mirror of
https://github.com/neondatabase/neon.git
synced 2026-03-04 17:00:37 +00:00
Compare commits
184 Commits
mx_offset_
...
release-35
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b58445d855 | ||
|
|
36050e7f3d | ||
|
|
762a8a7bb5 | ||
|
|
2e8a3afab1 | ||
|
|
4580f5085a | ||
|
|
e074ccf170 | ||
|
|
196943c78f | ||
|
|
149dd36b6b | ||
|
|
be271e3edf | ||
|
|
7c85c7ea91 | ||
|
|
1066bca5e3 | ||
|
|
1aad8918e1 | ||
|
|
966213f429 | ||
|
|
35e73759f5 | ||
|
|
48936d44f8 | ||
|
|
2eae0a1fe5 | ||
|
|
53470ad12a | ||
|
|
edccef4514 | ||
|
|
982fce1e72 | ||
|
|
e767ced8d0 | ||
|
|
1309571f5d | ||
|
|
9a69b6cb94 | ||
|
|
cc82cd1b07 | ||
|
|
c76b74c50d | ||
|
|
ed938885ff | ||
|
|
db4d094afa | ||
|
|
0626e0bfd3 | ||
|
|
33360ed96d | ||
|
|
444d6e337f | ||
|
|
39a28d1108 | ||
|
|
efa6aa134f | ||
|
|
3a1be9b246 | ||
|
|
664d32eb7f | ||
|
|
ed845b644b | ||
|
|
87dd37a2f2 | ||
|
|
1355bd0ac5 | ||
|
|
a1d6b1a4af | ||
|
|
92aee7e07f | ||
|
|
5e2f29491f | ||
|
|
618d36ee6d | ||
|
|
33c2d94ba6 | ||
|
|
08bfe1c826 | ||
|
|
65ff256bb8 | ||
|
|
5177c1e4b1 | ||
|
|
49efcc3773 | ||
|
|
76b1cdc17e | ||
|
|
1f151d03d8 | ||
|
|
ac758e4f51 | ||
|
|
4f280c2953 | ||
|
|
2c724e56e2 | ||
|
|
feff887c6f | ||
|
|
353d915fcf | ||
|
|
2e38098cbc | ||
|
|
a6fe5ea1ac | ||
|
|
05b0aed0c1 | ||
|
|
cd1705357d | ||
|
|
6bc7561290 | ||
|
|
fbd3ac14b5 | ||
|
|
e437787c8f | ||
|
|
3460dbf90b | ||
|
|
6b89d99677 | ||
|
|
6cc8ea86e4 | ||
|
|
e62a492d6f | ||
|
|
a475cdf642 | ||
|
|
7002c79a47 | ||
|
|
ee6cf357b4 | ||
|
|
e5c2086b5f | ||
|
|
5f1208296a | ||
|
|
88e8e473cd | ||
|
|
b0a77844f6 | ||
|
|
1baf464307 | ||
|
|
e9b8e81cea | ||
|
|
85d6194aa4 | ||
|
|
333a7a68ef | ||
|
|
6aa4e41bee | ||
|
|
840183e51f | ||
|
|
cbccc94b03 | ||
|
|
fce227df22 | ||
|
|
bd787e800f | ||
|
|
4a7704b4a3 | ||
|
|
ff1119da66 | ||
|
|
4c3ba1627b | ||
|
|
1407174fb2 | ||
|
|
ec9dcb1889 | ||
|
|
d11d781afc | ||
|
|
4e44565b71 | ||
|
|
4ed51ad33b | ||
|
|
1c1ebe5537 | ||
|
|
c19cb7f386 | ||
|
|
4b97d31b16 | ||
|
|
923ade3dd7 | ||
|
|
b04e711975 | ||
|
|
afd0a6b39a | ||
|
|
99752286d8 | ||
|
|
15df93363c | ||
|
|
bc0ab741af | ||
|
|
51d9dfeaa3 | ||
|
|
f63cb18155 | ||
|
|
0de603d88e | ||
|
|
240913912a | ||
|
|
91a4ea0de2 | ||
|
|
8608704f49 | ||
|
|
efef68ce99 | ||
|
|
8daefd24da | ||
|
|
46cc8b7982 | ||
|
|
38cd90dd0c | ||
|
|
a51b269f15 | ||
|
|
43bf6d0a0f | ||
|
|
15273a9b66 | ||
|
|
78aca668d0 | ||
|
|
acbf4148ea | ||
|
|
6508540561 | ||
|
|
a41b5244a8 | ||
|
|
2b3189be95 | ||
|
|
248563c595 | ||
|
|
14cd6ca933 | ||
|
|
eb36403e71 | ||
|
|
3c6f779698 | ||
|
|
f67f0c1c11 | ||
|
|
edb02d3299 | ||
|
|
664a69e65b | ||
|
|
478322ebf9 | ||
|
|
802f174072 | ||
|
|
47f9890bae | ||
|
|
262265daad | ||
|
|
300da5b872 | ||
|
|
7b22b5c433 | ||
|
|
ffca97bc1e | ||
|
|
cb356f3259 | ||
|
|
c85374295f | ||
|
|
4992160677 | ||
|
|
bd535b3371 | ||
|
|
d90c5a03af | ||
|
|
2d02cc9079 | ||
|
|
49ad94b99f | ||
|
|
948a217398 | ||
|
|
125381eae7 | ||
|
|
cd01bbc715 | ||
|
|
d8b5e3b88d | ||
|
|
06d25f2186 | ||
|
|
f759b561f3 | ||
|
|
ece0555600 | ||
|
|
73ea0a0b01 | ||
|
|
d8f6d6fd6f | ||
|
|
d24de169a7 | ||
|
|
0816168296 | ||
|
|
277b44d57a | ||
|
|
68c2c3880e | ||
|
|
49da498f65 | ||
|
|
2c76ba3dd7 | ||
|
|
dbe3dc69ad | ||
|
|
8e5bb3ed49 | ||
|
|
ab0be7b8da | ||
|
|
b4c55f5d24 | ||
|
|
ede70d833c | ||
|
|
70c3d18bb0 | ||
|
|
7a491f52c4 | ||
|
|
323c4ecb4f | ||
|
|
3d2466607e | ||
|
|
ed478b39f4 | ||
|
|
91585a558d | ||
|
|
93467eae1f | ||
|
|
f3aac81d19 | ||
|
|
979ad60c19 | ||
|
|
9316cb1b1f | ||
|
|
e7939a527a | ||
|
|
36d26665e1 | ||
|
|
873347f977 | ||
|
|
e814ac16f9 | ||
|
|
ad3055d386 | ||
|
|
94e03eb452 | ||
|
|
380f26ef79 | ||
|
|
3c5b7f59d7 | ||
|
|
fee89f80b5 | ||
|
|
41cce8eaf1 | ||
|
|
f88fe0218d | ||
|
|
cc856eca85 | ||
|
|
cf350c6002 | ||
|
|
0ce6b6a0a3 | ||
|
|
73f247d537 | ||
|
|
960be82183 | ||
|
|
806e5a6c19 | ||
|
|
8d5df07cce | ||
|
|
df7a9d1407 |
@@ -12,6 +12,11 @@ opt-level = 3
|
|||||||
# Turn on a small amount of optimization in Development mode.
|
# Turn on a small amount of optimization in Development mode.
|
||||||
opt-level = 1
|
opt-level = 1
|
||||||
|
|
||||||
|
[build]
|
||||||
|
# This is only present for local builds, as it will be overridden
|
||||||
|
# by the RUSTDOCFLAGS env var in CI.
|
||||||
|
rustdocflags = ["-Arustdoc::private_intra_doc_links"]
|
||||||
|
|
||||||
[alias]
|
[alias]
|
||||||
build_testing = ["build", "--features", "testing"]
|
build_testing = ["build", "--features", "testing"]
|
||||||
neon = ["run", "--bin", "neon_local"]
|
neon = ["run", "--bin", "neon_local"]
|
||||||
|
|||||||
@@ -105,7 +105,7 @@ runs:
|
|||||||
# Get previously uploaded data for this run
|
# Get previously uploaded data for this run
|
||||||
ZSTD_NBTHREADS=0
|
ZSTD_NBTHREADS=0
|
||||||
|
|
||||||
S3_FILEPATHS=$(aws s3api list-objects-v2 --bucket ${BUCKET} --prefix ${RAW_PREFIX}/ | jq --raw-output '.Contents[].Key')
|
S3_FILEPATHS=$(aws s3api list-objects-v2 --bucket ${BUCKET} --prefix ${RAW_PREFIX}/ | jq --raw-output '.Contents[]?.Key')
|
||||||
if [ -z "$S3_FILEPATHS" ]; then
|
if [ -z "$S3_FILEPATHS" ]; then
|
||||||
# There's no previously uploaded data for this $GITHUB_RUN_ID
|
# There's no previously uploaded data for this $GITHUB_RUN_ID
|
||||||
exit 0
|
exit 0
|
||||||
|
|||||||
@@ -150,6 +150,14 @@ runs:
|
|||||||
EXTRA_PARAMS="--flaky-tests-json $TEST_OUTPUT/flaky.json $EXTRA_PARAMS"
|
EXTRA_PARAMS="--flaky-tests-json $TEST_OUTPUT/flaky.json $EXTRA_PARAMS"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# We use pytest-split plugin to run benchmarks in parallel on different CI runners
|
||||||
|
if [ "${TEST_SELECTION}" = "test_runner/performance" ] && [ "${{ inputs.build_type }}" != "remote" ]; then
|
||||||
|
mkdir -p $TEST_OUTPUT
|
||||||
|
poetry run ./scripts/benchmark_durations.py "${TEST_RESULT_CONNSTR}" --days 10 --output "$TEST_OUTPUT/benchmark_durations.json"
|
||||||
|
|
||||||
|
EXTRA_PARAMS="--durations-path $TEST_OUTPUT/benchmark_durations.json $EXTRA_PARAMS"
|
||||||
|
fi
|
||||||
|
|
||||||
if [[ "${{ inputs.build_type }}" == "debug" ]]; then
|
if [[ "${{ inputs.build_type }}" == "debug" ]]; then
|
||||||
cov_prefix=(scripts/coverage "--profraw-prefix=$GITHUB_JOB" --dir=/tmp/coverage run)
|
cov_prefix=(scripts/coverage "--profraw-prefix=$GITHUB_JOB" --dir=/tmp/coverage run)
|
||||||
elif [[ "${{ inputs.build_type }}" == "release" ]]; then
|
elif [[ "${{ inputs.build_type }}" == "release" ]]; then
|
||||||
|
|||||||
55
.github/workflows/approved-for-ci-run.yml
vendored
Normal file
55
.github/workflows/approved-for-ci-run.yml
vendored
Normal file
@@ -0,0 +1,55 @@
|
|||||||
|
name: Handle `approved-for-ci-run` label
|
||||||
|
# This workflow helps to run CI pipeline for PRs made by external contributors (from forks).
|
||||||
|
|
||||||
|
on:
|
||||||
|
pull_request:
|
||||||
|
types:
|
||||||
|
# Default types that triggers a workflow ([1]):
|
||||||
|
# - [1] https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows#pull_request
|
||||||
|
- opened
|
||||||
|
- synchronize
|
||||||
|
- reopened
|
||||||
|
# Types that we wand to handle in addition to keep labels tidy:
|
||||||
|
- closed
|
||||||
|
# Actual magic happens here:
|
||||||
|
- labeled
|
||||||
|
|
||||||
|
env:
|
||||||
|
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
PR_NUMBER: ${{ github.event.pull_request.number }}
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
remove-label:
|
||||||
|
# Remove `approved-for-ci-run` label if the workflow is triggered by changes in a PR.
|
||||||
|
# The PR should be reviewed and labelled manually again.
|
||||||
|
|
||||||
|
runs-on: [ ubuntu-latest ]
|
||||||
|
|
||||||
|
if: |
|
||||||
|
contains(fromJSON('["opened", "synchronize", "reopened", "closed"]'), github.event.action) &&
|
||||||
|
contains(github.event.pull_request.labels.*.name, 'approved-for-ci-run')
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- run: gh pr --repo "${GITHUB_REPOSITORY}" edit "${PR_NUMBER}" --remove-label "approved-for-ci-run"
|
||||||
|
|
||||||
|
create-branch:
|
||||||
|
# Create a local branch for an `approved-for-ci-run` labelled PR to run CI pipeline in it.
|
||||||
|
|
||||||
|
runs-on: [ ubuntu-latest ]
|
||||||
|
|
||||||
|
if: |
|
||||||
|
github.event.action == 'labeled' &&
|
||||||
|
contains(github.event.pull_request.labels.*.name, 'approved-for-ci-run')
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- run: gh pr --repo "${GITHUB_REPOSITORY}" edit "${PR_NUMBER}" --remove-label "approved-for-ci-run"
|
||||||
|
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
ref: main
|
||||||
|
|
||||||
|
- run: gh pr checkout "${PR_NUMBER}"
|
||||||
|
|
||||||
|
- run: git checkout -b "ci-run/pr-${PR_NUMBER}"
|
||||||
|
|
||||||
|
- run: git push --force origin "ci-run/pr-${PR_NUMBER}"
|
||||||
45
.github/workflows/build_and_test.yml
vendored
45
.github/workflows/build_and_test.yml
vendored
@@ -5,6 +5,7 @@ on:
|
|||||||
branches:
|
branches:
|
||||||
- main
|
- main
|
||||||
- release
|
- release
|
||||||
|
- ci-run/pr-*
|
||||||
pull_request:
|
pull_request:
|
||||||
|
|
||||||
defaults:
|
defaults:
|
||||||
@@ -127,6 +128,11 @@ jobs:
|
|||||||
- name: Run cargo clippy (release)
|
- name: Run cargo clippy (release)
|
||||||
run: cargo hack --feature-powerset clippy --release $CLIPPY_COMMON_ARGS
|
run: cargo hack --feature-powerset clippy --release $CLIPPY_COMMON_ARGS
|
||||||
|
|
||||||
|
- name: Check documentation generation
|
||||||
|
run: cargo doc --workspace --no-deps --document-private-items
|
||||||
|
env:
|
||||||
|
RUSTDOCFLAGS: "-Dwarnings -Arustdoc::private_intra_doc_links"
|
||||||
|
|
||||||
# Use `${{ !cancelled() }}` to run quck tests after the longer clippy run
|
# Use `${{ !cancelled() }}` to run quck tests after the longer clippy run
|
||||||
- name: Check formatting
|
- name: Check formatting
|
||||||
if: ${{ !cancelled() }}
|
if: ${{ !cancelled() }}
|
||||||
@@ -155,7 +161,7 @@ jobs:
|
|||||||
build_type: [ debug, release ]
|
build_type: [ debug, release ]
|
||||||
env:
|
env:
|
||||||
BUILD_TYPE: ${{ matrix.build_type }}
|
BUILD_TYPE: ${{ matrix.build_type }}
|
||||||
GIT_VERSION: ${{ github.sha }}
|
GIT_VERSION: ${{ github.event.pull_request.head.sha || github.sha }}
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Fix git ownership
|
- name: Fix git ownership
|
||||||
@@ -174,6 +180,27 @@ jobs:
|
|||||||
submodules: true
|
submodules: true
|
||||||
fetch-depth: 1
|
fetch-depth: 1
|
||||||
|
|
||||||
|
- name: Check Postgres submodules revision
|
||||||
|
shell: bash -euo pipefail {0}
|
||||||
|
run: |
|
||||||
|
# This is a temporary solution to ensure that the Postgres submodules revision is correct (i.e. the updated intentionally).
|
||||||
|
# Eventually it will be replaced by a regression test https://github.com/neondatabase/neon/pull/4603
|
||||||
|
|
||||||
|
FAILED=false
|
||||||
|
for postgres in postgres-v14 postgres-v15; do
|
||||||
|
expected=$(cat vendor/revisions.json | jq --raw-output '."'"${postgres}"'"')
|
||||||
|
actual=$(git rev-parse "HEAD:vendor/${postgres}")
|
||||||
|
if [ "${expected}" != "${actual}" ]; then
|
||||||
|
echo >&2 "Expected ${postgres} rev to be at '${expected}', but it is at '${actual}'"
|
||||||
|
FAILED=true
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
if [ "${FAILED}" = "true" ]; then
|
||||||
|
echo >&2 "Please update vendors/revisions.json if these changes are intentional"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
- name: Set pg 14 revision for caching
|
- name: Set pg 14 revision for caching
|
||||||
id: pg_v14_rev
|
id: pg_v14_rev
|
||||||
run: echo pg_rev=$(git rev-parse HEAD:vendor/postgres-v14) >> $GITHUB_OUTPUT
|
run: echo pg_rev=$(git rev-parse HEAD:vendor/postgres-v14) >> $GITHUB_OUTPUT
|
||||||
@@ -369,13 +396,11 @@ jobs:
|
|||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
|
pytest_split_group: [ 1, 2, 3, 4 ]
|
||||||
build_type: [ release ]
|
build_type: [ release ]
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v3
|
||||||
with:
|
|
||||||
submodules: true
|
|
||||||
fetch-depth: 1
|
|
||||||
|
|
||||||
- name: Pytest benchmarks
|
- name: Pytest benchmarks
|
||||||
uses: ./.github/actions/run-python-test-set
|
uses: ./.github/actions/run-python-test-set
|
||||||
@@ -384,9 +409,11 @@ jobs:
|
|||||||
test_selection: performance
|
test_selection: performance
|
||||||
run_in_parallel: false
|
run_in_parallel: false
|
||||||
save_perf_report: ${{ github.ref_name == 'main' }}
|
save_perf_report: ${{ github.ref_name == 'main' }}
|
||||||
|
extra_params: --splits ${{ strategy.job-total }} --group ${{ matrix.pytest_split_group }}
|
||||||
env:
|
env:
|
||||||
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
||||||
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
||||||
|
TEST_RESULT_CONNSTR: "${{ secrets.REGRESS_TEST_RESULT_CONNSTR }}"
|
||||||
# XXX: no coverage data handling here, since benchmarks are run on release builds,
|
# XXX: no coverage data handling here, since benchmarks are run on release builds,
|
||||||
# while coverage is currently collected for the debug ones
|
# while coverage is currently collected for the debug ones
|
||||||
|
|
||||||
@@ -614,7 +641,7 @@ jobs:
|
|||||||
/kaniko/executor --reproducible --snapshot-mode=redo --skip-unused-stages --cache=true
|
/kaniko/executor --reproducible --snapshot-mode=redo --skip-unused-stages --cache=true
|
||||||
--cache-repo 369495373322.dkr.ecr.eu-central-1.amazonaws.com/cache
|
--cache-repo 369495373322.dkr.ecr.eu-central-1.amazonaws.com/cache
|
||||||
--context .
|
--context .
|
||||||
--build-arg GIT_VERSION=${{ github.sha }}
|
--build-arg GIT_VERSION=${{ github.event.pull_request.head.sha || github.sha }}
|
||||||
--build-arg REPOSITORY=369495373322.dkr.ecr.eu-central-1.amazonaws.com
|
--build-arg REPOSITORY=369495373322.dkr.ecr.eu-central-1.amazonaws.com
|
||||||
--destination 369495373322.dkr.ecr.eu-central-1.amazonaws.com/neon:${{needs.tag.outputs.build-tag}}
|
--destination 369495373322.dkr.ecr.eu-central-1.amazonaws.com/neon:${{needs.tag.outputs.build-tag}}
|
||||||
--destination neondatabase/neon:${{needs.tag.outputs.build-tag}}
|
--destination neondatabase/neon:${{needs.tag.outputs.build-tag}}
|
||||||
@@ -658,7 +685,7 @@ jobs:
|
|||||||
/kaniko/executor --reproducible --snapshot-mode=redo --skip-unused-stages --cache=true
|
/kaniko/executor --reproducible --snapshot-mode=redo --skip-unused-stages --cache=true
|
||||||
--cache-repo 369495373322.dkr.ecr.eu-central-1.amazonaws.com/cache
|
--cache-repo 369495373322.dkr.ecr.eu-central-1.amazonaws.com/cache
|
||||||
--context .
|
--context .
|
||||||
--build-arg GIT_VERSION=${{ github.sha }}
|
--build-arg GIT_VERSION=${{ github.event.pull_request.head.sha || github.sha }}
|
||||||
--build-arg BUILD_TAG=${{needs.tag.outputs.build-tag}}
|
--build-arg BUILD_TAG=${{needs.tag.outputs.build-tag}}
|
||||||
--build-arg REPOSITORY=369495373322.dkr.ecr.eu-central-1.amazonaws.com
|
--build-arg REPOSITORY=369495373322.dkr.ecr.eu-central-1.amazonaws.com
|
||||||
--dockerfile Dockerfile.compute-tools
|
--dockerfile Dockerfile.compute-tools
|
||||||
@@ -715,7 +742,7 @@ jobs:
|
|||||||
/kaniko/executor --reproducible --snapshot-mode=redo --skip-unused-stages --cache=true
|
/kaniko/executor --reproducible --snapshot-mode=redo --skip-unused-stages --cache=true
|
||||||
--cache-repo 369495373322.dkr.ecr.eu-central-1.amazonaws.com/cache
|
--cache-repo 369495373322.dkr.ecr.eu-central-1.amazonaws.com/cache
|
||||||
--context .
|
--context .
|
||||||
--build-arg GIT_VERSION=${{ github.sha }}
|
--build-arg GIT_VERSION=${{ github.event.pull_request.head.sha || github.sha }}
|
||||||
--build-arg PG_VERSION=${{ matrix.version }}
|
--build-arg PG_VERSION=${{ matrix.version }}
|
||||||
--build-arg BUILD_TAG=${{needs.tag.outputs.build-tag}}
|
--build-arg BUILD_TAG=${{needs.tag.outputs.build-tag}}
|
||||||
--build-arg REPOSITORY=369495373322.dkr.ecr.eu-central-1.amazonaws.com
|
--build-arg REPOSITORY=369495373322.dkr.ecr.eu-central-1.amazonaws.com
|
||||||
@@ -742,7 +769,7 @@ jobs:
|
|||||||
/kaniko/executor --reproducible --snapshot-mode=redo --skip-unused-stages --cache=true \
|
/kaniko/executor --reproducible --snapshot-mode=redo --skip-unused-stages --cache=true \
|
||||||
--cache-repo 369495373322.dkr.ecr.eu-central-1.amazonaws.com/cache \
|
--cache-repo 369495373322.dkr.ecr.eu-central-1.amazonaws.com/cache \
|
||||||
--context . \
|
--context . \
|
||||||
--build-arg GIT_VERSION=${{ github.sha }} \
|
--build-arg GIT_VERSION=${{ github.event.pull_request.head.sha || github.sha }} \
|
||||||
--build-arg PG_VERSION=${{ matrix.version }} \
|
--build-arg PG_VERSION=${{ matrix.version }} \
|
||||||
--build-arg BUILD_TAG=${{needs.tag.outputs.build-tag}} \
|
--build-arg BUILD_TAG=${{needs.tag.outputs.build-tag}} \
|
||||||
--build-arg REPOSITORY=369495373322.dkr.ecr.eu-central-1.amazonaws.com \
|
--build-arg REPOSITORY=369495373322.dkr.ecr.eu-central-1.amazonaws.com \
|
||||||
@@ -767,7 +794,7 @@ jobs:
|
|||||||
run:
|
run:
|
||||||
shell: sh -eu {0}
|
shell: sh -eu {0}
|
||||||
env:
|
env:
|
||||||
VM_BUILDER_VERSION: v0.11.1
|
VM_BUILDER_VERSION: v0.13.1
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
|
|||||||
3
.github/workflows/neon_extra_builds.yml
vendored
3
.github/workflows/neon_extra_builds.yml
vendored
@@ -3,7 +3,8 @@ name: Check neon with extra platform builds
|
|||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- main
|
- main
|
||||||
|
- ci-run/pr-*
|
||||||
pull_request:
|
pull_request:
|
||||||
|
|
||||||
defaults:
|
defaults:
|
||||||
|
|||||||
117
Cargo.lock
generated
117
Cargo.lock
generated
@@ -158,6 +158,19 @@ dependencies = [
|
|||||||
"syn 1.0.109",
|
"syn 1.0.109",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "async-compression"
|
||||||
|
version = "0.4.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "5b0122885821398cc923ece939e24d1056a2384ee719432397fa9db87230ff11"
|
||||||
|
dependencies = [
|
||||||
|
"flate2",
|
||||||
|
"futures-core",
|
||||||
|
"memchr",
|
||||||
|
"pin-project-lite",
|
||||||
|
"tokio",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "async-stream"
|
name = "async-stream"
|
||||||
version = "0.3.5"
|
version = "0.3.5"
|
||||||
@@ -593,7 +606,7 @@ dependencies = [
|
|||||||
"cc",
|
"cc",
|
||||||
"cfg-if",
|
"cfg-if",
|
||||||
"libc",
|
"libc",
|
||||||
"miniz_oxide",
|
"miniz_oxide 0.6.2",
|
||||||
"object",
|
"object",
|
||||||
"rustc-demangle",
|
"rustc-demangle",
|
||||||
]
|
]
|
||||||
@@ -882,9 +895,11 @@ name = "compute_tools"
|
|||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"anyhow",
|
"anyhow",
|
||||||
|
"async-compression",
|
||||||
"chrono",
|
"chrono",
|
||||||
"clap",
|
"clap",
|
||||||
"compute_api",
|
"compute_api",
|
||||||
|
"flate2",
|
||||||
"futures",
|
"futures",
|
||||||
"hyper",
|
"hyper",
|
||||||
"notify",
|
"notify",
|
||||||
@@ -1367,6 +1382,16 @@ version = "0.4.2"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80"
|
checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "flate2"
|
||||||
|
version = "1.0.26"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "3b9429470923de8e8cbd4d2dc513535400b4b3fef0319fb5c4e1f520a7bef743"
|
||||||
|
dependencies = [
|
||||||
|
"crc32fast",
|
||||||
|
"miniz_oxide 0.7.1",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "fnv"
|
name = "fnv"
|
||||||
version = "1.0.7"
|
version = "1.0.7"
|
||||||
@@ -2151,6 +2176,15 @@ dependencies = [
|
|||||||
"adler",
|
"adler",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "miniz_oxide"
|
||||||
|
version = "0.7.1"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "e7810e0be55b428ada41041c41f32c9f1a42817901b4ccf45fa3d4b6561e74c7"
|
||||||
|
dependencies = [
|
||||||
|
"adler",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "mio"
|
name = "mio"
|
||||||
version = "0.8.6"
|
version = "0.8.6"
|
||||||
@@ -2345,9 +2379,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "opentelemetry"
|
name = "opentelemetry"
|
||||||
version = "0.18.0"
|
version = "0.19.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "69d6c3d7288a106c0a363e4b0e8d308058d56902adefb16f4936f417ffef086e"
|
checksum = "5f4b8347cc26099d3aeee044065ecc3ae11469796b4d65d065a23a584ed92a6f"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"opentelemetry_api",
|
"opentelemetry_api",
|
||||||
"opentelemetry_sdk",
|
"opentelemetry_sdk",
|
||||||
@@ -2355,9 +2389,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "opentelemetry-http"
|
name = "opentelemetry-http"
|
||||||
version = "0.7.0"
|
version = "0.8.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "1edc79add46364183ece1a4542592ca593e6421c60807232f5b8f7a31703825d"
|
checksum = "a819b71d6530c4297b49b3cae2939ab3a8cc1b9f382826a1bc29dd0ca3864906"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-trait",
|
"async-trait",
|
||||||
"bytes",
|
"bytes",
|
||||||
@@ -2368,9 +2402,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "opentelemetry-otlp"
|
name = "opentelemetry-otlp"
|
||||||
version = "0.11.0"
|
version = "0.12.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "d1c928609d087790fc936a1067bdc310ae702bdf3b090c3f281b713622c8bbde"
|
checksum = "8af72d59a4484654ea8eb183fea5ae4eb6a41d7ac3e3bae5f4d2a282a3a7d3ca"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-trait",
|
"async-trait",
|
||||||
"futures",
|
"futures",
|
||||||
@@ -2386,48 +2420,47 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "opentelemetry-proto"
|
name = "opentelemetry-proto"
|
||||||
version = "0.1.0"
|
version = "0.2.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "d61a2f56df5574508dd86aaca016c917489e589ece4141df1b5e349af8d66c28"
|
checksum = "045f8eea8c0fa19f7d48e7bc3128a39c2e5c533d5c61298c548dfefc1064474c"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"futures",
|
"futures",
|
||||||
"futures-util",
|
"futures-util",
|
||||||
"opentelemetry",
|
"opentelemetry",
|
||||||
"prost",
|
"prost",
|
||||||
"tonic 0.8.3",
|
"tonic 0.8.3",
|
||||||
"tonic-build 0.8.4",
|
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "opentelemetry-semantic-conventions"
|
name = "opentelemetry-semantic-conventions"
|
||||||
version = "0.10.0"
|
version = "0.11.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "9b02e0230abb0ab6636d18e2ba8fa02903ea63772281340ccac18e0af3ec9eeb"
|
checksum = "24e33428e6bf08c6f7fcea4ddb8e358fab0fe48ab877a87c70c6ebe20f673ce5"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"opentelemetry",
|
"opentelemetry",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "opentelemetry_api"
|
name = "opentelemetry_api"
|
||||||
version = "0.18.0"
|
version = "0.19.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "c24f96e21e7acc813c7a8394ee94978929db2bcc46cf6b5014fc612bf7760c22"
|
checksum = "ed41783a5bf567688eb38372f2b7a8530f5a607a4b49d38dd7573236c23ca7e2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"fnv",
|
"fnv",
|
||||||
"futures-channel",
|
"futures-channel",
|
||||||
"futures-util",
|
"futures-util",
|
||||||
"indexmap",
|
"indexmap",
|
||||||
"js-sys",
|
|
||||||
"once_cell",
|
"once_cell",
|
||||||
"pin-project-lite",
|
"pin-project-lite",
|
||||||
"thiserror",
|
"thiserror",
|
||||||
|
"urlencoding",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "opentelemetry_sdk"
|
name = "opentelemetry_sdk"
|
||||||
version = "0.18.0"
|
version = "0.19.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "1ca41c4933371b61c2a2f214bf16931499af4ec90543604ec828f7a625c09113"
|
checksum = "8b3a2a91fdbfdd4d212c0dcc2ab540de2c2bcbbd90be17de7a7daf8822d010c1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-trait",
|
"async-trait",
|
||||||
"crossbeam-channel",
|
"crossbeam-channel",
|
||||||
@@ -2482,6 +2515,7 @@ name = "pageserver"
|
|||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"anyhow",
|
"anyhow",
|
||||||
|
"async-compression",
|
||||||
"async-stream",
|
"async-stream",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
"byteorder",
|
"byteorder",
|
||||||
@@ -2498,6 +2532,7 @@ dependencies = [
|
|||||||
"enum-map",
|
"enum-map",
|
||||||
"enumset",
|
"enumset",
|
||||||
"fail",
|
"fail",
|
||||||
|
"flate2",
|
||||||
"futures",
|
"futures",
|
||||||
"git-version",
|
"git-version",
|
||||||
"hex",
|
"hex",
|
||||||
@@ -2901,9 +2936,9 @@ checksum = "dc375e1527247fe1a97d8b7156678dfe7c1af2fc075c9a4db3690ecd2a148068"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "proc-macro2"
|
name = "proc-macro2"
|
||||||
version = "1.0.58"
|
version = "1.0.64"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "fa1fb82fc0c281dd9671101b66b771ebbe1eaf967b96ac8740dcba4b70005ca8"
|
checksum = "78803b62cbf1f46fde80d7c0e803111524b9877184cfe7c3033659490ac7a7da"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"unicode-ident",
|
"unicode-ident",
|
||||||
]
|
]
|
||||||
@@ -3292,9 +3327,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "reqwest-tracing"
|
name = "reqwest-tracing"
|
||||||
version = "0.4.4"
|
version = "0.4.5"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "783e8130d2427ddd7897dd3f814d4a3aea31b05deb42a4fdf8c18258fe5aefd1"
|
checksum = "1b97ad83c2fc18113346b7158d79732242002427c30f620fa817c1f32901e0a8"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"anyhow",
|
"anyhow",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
@@ -3962,7 +3997,7 @@ dependencies = [
|
|||||||
"tokio",
|
"tokio",
|
||||||
"tokio-stream",
|
"tokio-stream",
|
||||||
"tonic 0.9.2",
|
"tonic 0.9.2",
|
||||||
"tonic-build 0.9.2",
|
"tonic-build",
|
||||||
"tracing",
|
"tracing",
|
||||||
"utils",
|
"utils",
|
||||||
"workspace_hack",
|
"workspace_hack",
|
||||||
@@ -4063,7 +4098,7 @@ checksum = "4b55807c0344e1e6c04d7c965f5289c39a8d94ae23ed5c0b57aabac549f871c6"
|
|||||||
dependencies = [
|
dependencies = [
|
||||||
"filetime",
|
"filetime",
|
||||||
"libc",
|
"libc",
|
||||||
"xattr",
|
"xattr 0.2.3",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@@ -4344,16 +4379,17 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "tokio-tar"
|
name = "tokio-tar"
|
||||||
version = "0.3.0"
|
version = "0.3.1"
|
||||||
source = "git+https://github.com/neondatabase/tokio-tar.git?rev=404df61437de0feef49ba2ccdbdd94eb8ad6e142#404df61437de0feef49ba2ccdbdd94eb8ad6e142"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "9d5714c010ca3e5c27114c1cdeb9d14641ace49874aa5626d7149e47aedace75"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"filetime",
|
"filetime",
|
||||||
"futures-core",
|
"futures-core",
|
||||||
"libc",
|
"libc",
|
||||||
"redox_syscall 0.2.16",
|
"redox_syscall 0.3.5",
|
||||||
"tokio",
|
"tokio",
|
||||||
"tokio-stream",
|
"tokio-stream",
|
||||||
"xattr",
|
"xattr 1.0.0",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@@ -4480,19 +4516,6 @@ dependencies = [
|
|||||||
"tracing",
|
"tracing",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "tonic-build"
|
|
||||||
version = "0.8.4"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "5bf5e9b9c0f7e0a7c027dcfaba7b2c60816c7049171f679d99ee2ff65d0de8c4"
|
|
||||||
dependencies = [
|
|
||||||
"prettyplease 0.1.25",
|
|
||||||
"proc-macro2",
|
|
||||||
"prost-build",
|
|
||||||
"quote",
|
|
||||||
"syn 1.0.109",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "tonic-build"
|
name = "tonic-build"
|
||||||
version = "0.9.2"
|
version = "0.9.2"
|
||||||
@@ -4616,9 +4639,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "tracing-opentelemetry"
|
name = "tracing-opentelemetry"
|
||||||
version = "0.18.0"
|
version = "0.19.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "21ebb87a95ea13271332df069020513ab70bdb5637ca42d6e492dc3bbbad48de"
|
checksum = "00a39dcf9bfc1742fa4d6215253b33a6e474be78275884c216fc2a06267b3600"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"once_cell",
|
"once_cell",
|
||||||
"opentelemetry",
|
"opentelemetry",
|
||||||
@@ -4817,6 +4840,7 @@ dependencies = [
|
|||||||
"byteorder",
|
"byteorder",
|
||||||
"bytes",
|
"bytes",
|
||||||
"chrono",
|
"chrono",
|
||||||
|
"const_format",
|
||||||
"criterion",
|
"criterion",
|
||||||
"futures",
|
"futures",
|
||||||
"heapless",
|
"heapless",
|
||||||
@@ -5339,6 +5363,15 @@ dependencies = [
|
|||||||
"libc",
|
"libc",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "xattr"
|
||||||
|
version = "1.0.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "ea263437ca03c1522846a4ddafbca2542d0ad5ed9b784909d4b27b76f62bc34a"
|
||||||
|
dependencies = [
|
||||||
|
"libc",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "xmlparser"
|
name = "xmlparser"
|
||||||
version = "0.13.5"
|
version = "0.13.5"
|
||||||
|
|||||||
14
Cargo.toml
14
Cargo.toml
@@ -32,6 +32,8 @@ license = "Apache-2.0"
|
|||||||
## All dependency versions, used in the project
|
## All dependency versions, used in the project
|
||||||
[workspace.dependencies]
|
[workspace.dependencies]
|
||||||
anyhow = { version = "1.0", features = ["backtrace"] }
|
anyhow = { version = "1.0", features = ["backtrace"] }
|
||||||
|
async-compression = { version = "0.4.0", features = ["tokio", "gzip"] }
|
||||||
|
flate2 = "1.0.26"
|
||||||
async-stream = "0.3"
|
async-stream = "0.3"
|
||||||
async-trait = "0.1"
|
async-trait = "0.1"
|
||||||
aws-config = { version = "0.55", default-features = false, features=["rustls"] }
|
aws-config = { version = "0.55", default-features = false, features=["rustls"] }
|
||||||
@@ -82,9 +84,9 @@ notify = "5.0.0"
|
|||||||
num_cpus = "1.15"
|
num_cpus = "1.15"
|
||||||
num-traits = "0.2.15"
|
num-traits = "0.2.15"
|
||||||
once_cell = "1.13"
|
once_cell = "1.13"
|
||||||
opentelemetry = "0.18.0"
|
opentelemetry = "0.19.0"
|
||||||
opentelemetry-otlp = { version = "0.11.0", default_features=false, features = ["http-proto", "trace", "http", "reqwest-client"] }
|
opentelemetry-otlp = { version = "0.12.0", default_features=false, features = ["http-proto", "trace", "http", "reqwest-client"] }
|
||||||
opentelemetry-semantic-conventions = "0.10.0"
|
opentelemetry-semantic-conventions = "0.11.0"
|
||||||
parking_lot = "0.12"
|
parking_lot = "0.12"
|
||||||
pbkdf2 = "0.12.1"
|
pbkdf2 = "0.12.1"
|
||||||
pin-project-lite = "0.2"
|
pin-project-lite = "0.2"
|
||||||
@@ -93,7 +95,7 @@ prost = "0.11"
|
|||||||
rand = "0.8"
|
rand = "0.8"
|
||||||
regex = "1.4"
|
regex = "1.4"
|
||||||
reqwest = { version = "0.11", default-features = false, features = ["rustls-tls"] }
|
reqwest = { version = "0.11", default-features = false, features = ["rustls-tls"] }
|
||||||
reqwest-tracing = { version = "0.4.0", features = ["opentelemetry_0_18"] }
|
reqwest-tracing = { version = "0.4.0", features = ["opentelemetry_0_19"] }
|
||||||
reqwest-middleware = "0.2.0"
|
reqwest-middleware = "0.2.0"
|
||||||
reqwest-retry = "0.2.2"
|
reqwest-retry = "0.2.2"
|
||||||
routerify = "3"
|
routerify = "3"
|
||||||
@@ -122,13 +124,14 @@ tokio-io-timeout = "1.2.0"
|
|||||||
tokio-postgres-rustls = "0.9.0"
|
tokio-postgres-rustls = "0.9.0"
|
||||||
tokio-rustls = "0.23"
|
tokio-rustls = "0.23"
|
||||||
tokio-stream = "0.1"
|
tokio-stream = "0.1"
|
||||||
|
tokio-tar = "0.3"
|
||||||
tokio-util = { version = "0.7", features = ["io"] }
|
tokio-util = { version = "0.7", features = ["io"] }
|
||||||
toml = "0.7"
|
toml = "0.7"
|
||||||
toml_edit = "0.19"
|
toml_edit = "0.19"
|
||||||
tonic = {version = "0.9", features = ["tls", "tls-roots"]}
|
tonic = {version = "0.9", features = ["tls", "tls-roots"]}
|
||||||
tracing = "0.1"
|
tracing = "0.1"
|
||||||
tracing-error = "0.2.0"
|
tracing-error = "0.2.0"
|
||||||
tracing-opentelemetry = "0.18.0"
|
tracing-opentelemetry = "0.19.0"
|
||||||
tracing-subscriber = { version = "0.3", default_features = false, features = ["smallvec", "fmt", "tracing-log", "std", "env-filter"] }
|
tracing-subscriber = { version = "0.3", default_features = false, features = ["smallvec", "fmt", "tracing-log", "std", "env-filter"] }
|
||||||
url = "2.2"
|
url = "2.2"
|
||||||
uuid = { version = "1.2", features = ["v4", "serde"] }
|
uuid = { version = "1.2", features = ["v4", "serde"] }
|
||||||
@@ -146,7 +149,6 @@ postgres-native-tls = { git = "https://github.com/neondatabase/rust-postgres.git
|
|||||||
postgres-protocol = { git = "https://github.com/neondatabase/rust-postgres.git", rev="1aaedab101b23f7612042850d8f2036810fa7c7f" }
|
postgres-protocol = { git = "https://github.com/neondatabase/rust-postgres.git", rev="1aaedab101b23f7612042850d8f2036810fa7c7f" }
|
||||||
postgres-types = { git = "https://github.com/neondatabase/rust-postgres.git", rev="1aaedab101b23f7612042850d8f2036810fa7c7f" }
|
postgres-types = { git = "https://github.com/neondatabase/rust-postgres.git", rev="1aaedab101b23f7612042850d8f2036810fa7c7f" }
|
||||||
tokio-postgres = { git = "https://github.com/neondatabase/rust-postgres.git", rev="1aaedab101b23f7612042850d8f2036810fa7c7f" }
|
tokio-postgres = { git = "https://github.com/neondatabase/rust-postgres.git", rev="1aaedab101b23f7612042850d8f2036810fa7c7f" }
|
||||||
tokio-tar = { git = "https://github.com/neondatabase/tokio-tar.git", rev="404df61437de0feef49ba2ccdbdd94eb8ad6e142" }
|
|
||||||
|
|
||||||
## Other git libraries
|
## Other git libraries
|
||||||
heapless = { default-features=false, features=[], git = "https://github.com/japaric/heapless.git", rev = "644653bf3b831c6bb4963be2de24804acf5e5001" } # upstream release pending
|
heapless = { default-features=false, features=[], git = "https://github.com/japaric/heapless.git", rev = "644653bf3b831c6bb4963be2de24804acf5e5001" } # upstream release pending
|
||||||
|
|||||||
@@ -132,10 +132,20 @@ RUN wget https://github.com/plv8/plv8/archive/refs/tags/v3.1.5.tar.gz -O plv8.ta
|
|||||||
FROM build-deps AS h3-pg-build
|
FROM build-deps AS h3-pg-build
|
||||||
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||||
|
|
||||||
# packaged cmake is too old
|
RUN case "$(uname -m)" in \
|
||||||
RUN wget https://github.com/Kitware/CMake/releases/download/v3.24.2/cmake-3.24.2-linux-x86_64.sh \
|
"x86_64") \
|
||||||
|
export CMAKE_CHECKSUM=739d372726cb23129d57a539ce1432453448816e345e1545f6127296926b6754 \
|
||||||
|
;; \
|
||||||
|
"aarch64") \
|
||||||
|
export CMAKE_CHECKSUM=281b42627c9a1beed03e29706574d04c6c53fae4994472e90985ef018dd29c02 \
|
||||||
|
;; \
|
||||||
|
*) \
|
||||||
|
echo "Unsupported architecture '$(uname -m)'. Supported are x86_64 and aarch64" && exit 1 \
|
||||||
|
;; \
|
||||||
|
esac && \
|
||||||
|
wget https://github.com/Kitware/CMake/releases/download/v3.24.2/cmake-3.24.2-linux-$(uname -m).sh \
|
||||||
-q -O /tmp/cmake-install.sh \
|
-q -O /tmp/cmake-install.sh \
|
||||||
&& echo "739d372726cb23129d57a539ce1432453448816e345e1545f6127296926b6754 /tmp/cmake-install.sh" | sha256sum --check \
|
&& echo "${CMAKE_CHECKSUM} /tmp/cmake-install.sh" | sha256sum --check \
|
||||||
&& chmod u+x /tmp/cmake-install.sh \
|
&& chmod u+x /tmp/cmake-install.sh \
|
||||||
&& /tmp/cmake-install.sh --skip-license --prefix=/usr/local/ \
|
&& /tmp/cmake-install.sh --skip-license --prefix=/usr/local/ \
|
||||||
&& rm /tmp/cmake-install.sh
|
&& rm /tmp/cmake-install.sh
|
||||||
|
|||||||
@@ -6,8 +6,10 @@ license.workspace = true
|
|||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
anyhow.workspace = true
|
anyhow.workspace = true
|
||||||
|
async-compression.workspace = true
|
||||||
chrono.workspace = true
|
chrono.workspace = true
|
||||||
clap.workspace = true
|
clap.workspace = true
|
||||||
|
flate2.workspace = true
|
||||||
futures.workspace = true
|
futures.workspace = true
|
||||||
hyper = { workspace = true, features = ["full"] }
|
hyper = { workspace = true, features = ["full"] }
|
||||||
notify.workspace = true
|
notify.workspace = true
|
||||||
|
|||||||
@@ -1,4 +1,5 @@
|
|||||||
use std::fs;
|
use std::fs;
|
||||||
|
use std::io::BufRead;
|
||||||
use std::os::unix::fs::PermissionsExt;
|
use std::os::unix::fs::PermissionsExt;
|
||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
use std::process::{Command, Stdio};
|
use std::process::{Command, Stdio};
|
||||||
@@ -15,6 +16,7 @@ use utils::lsn::Lsn;
|
|||||||
|
|
||||||
use compute_api::responses::{ComputeMetrics, ComputeStatus};
|
use compute_api::responses::{ComputeMetrics, ComputeStatus};
|
||||||
use compute_api::spec::{ComputeMode, ComputeSpec};
|
use compute_api::spec::{ComputeMode, ComputeSpec};
|
||||||
|
use utils::measured_stream::MeasuredReader;
|
||||||
|
|
||||||
use crate::config;
|
use crate::config;
|
||||||
use crate::pg_helpers::*;
|
use crate::pg_helpers::*;
|
||||||
@@ -140,14 +142,14 @@ fn create_neon_superuser(spec: &ComputeSpec, client: &mut Client) -> Result<()>
|
|||||||
.cluster
|
.cluster
|
||||||
.roles
|
.roles
|
||||||
.iter()
|
.iter()
|
||||||
.map(|r| format!("'{}'", escape_literal(&r.name)))
|
.map(|r| escape_literal(&r.name))
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
let dbs = spec
|
let dbs = spec
|
||||||
.cluster
|
.cluster
|
||||||
.databases
|
.databases
|
||||||
.iter()
|
.iter()
|
||||||
.map(|db| format!("'{}'", escape_literal(&db.name)))
|
.map(|db| escape_literal(&db.name))
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
let roles_decl = if roles.is_empty() {
|
let roles_decl = if roles.is_empty() {
|
||||||
@@ -253,20 +255,52 @@ impl ComputeNode {
|
|||||||
|
|
||||||
let mut client = config.connect(NoTls)?;
|
let mut client = config.connect(NoTls)?;
|
||||||
let basebackup_cmd = match lsn {
|
let basebackup_cmd = match lsn {
|
||||||
Lsn(0) => format!("basebackup {} {}", spec.tenant_id, spec.timeline_id), // First start of the compute
|
// HACK We don't use compression on first start (Lsn(0)) because there's no API for it
|
||||||
_ => format!("basebackup {} {} {}", spec.tenant_id, spec.timeline_id, lsn),
|
Lsn(0) => format!("basebackup {} {}", spec.tenant_id, spec.timeline_id),
|
||||||
|
_ => format!(
|
||||||
|
"basebackup {} {} {} --gzip",
|
||||||
|
spec.tenant_id, spec.timeline_id, lsn
|
||||||
|
),
|
||||||
};
|
};
|
||||||
|
|
||||||
let copyreader = client.copy_out(basebackup_cmd.as_str())?;
|
let copyreader = client.copy_out(basebackup_cmd.as_str())?;
|
||||||
|
let mut measured_reader = MeasuredReader::new(copyreader);
|
||||||
|
|
||||||
|
// Check the magic number to see if it's a gzip or not. Even though
|
||||||
|
// we might explicitly ask for gzip, an old pageserver with no implementation
|
||||||
|
// of gzip compression might send us uncompressed data. After some time
|
||||||
|
// passes we can assume all pageservers know how to compress and we can
|
||||||
|
// delete this check.
|
||||||
|
//
|
||||||
|
// If the data is not gzip, it will be tar. It will not be mistakenly
|
||||||
|
// recognized as gzip because tar starts with an ascii encoding of a filename,
|
||||||
|
// and 0x1f and 0x8b are unlikely first characters for any filename. Moreover,
|
||||||
|
// we send the "global" directory first from the pageserver, so it definitely
|
||||||
|
// won't be recognized as gzip.
|
||||||
|
let mut bufreader = std::io::BufReader::new(&mut measured_reader);
|
||||||
|
let gzip = {
|
||||||
|
let peek = bufreader.fill_buf().unwrap();
|
||||||
|
peek[0] == 0x1f && peek[1] == 0x8b
|
||||||
|
};
|
||||||
|
|
||||||
// Read the archive directly from the `CopyOutReader`
|
// Read the archive directly from the `CopyOutReader`
|
||||||
//
|
//
|
||||||
// Set `ignore_zeros` so that unpack() reads all the Copy data and
|
// Set `ignore_zeros` so that unpack() reads all the Copy data and
|
||||||
// doesn't stop at the end-of-archive marker. Otherwise, if the server
|
// doesn't stop at the end-of-archive marker. Otherwise, if the server
|
||||||
// sends an Error after finishing the tarball, we will not notice it.
|
// sends an Error after finishing the tarball, we will not notice it.
|
||||||
let mut ar = tar::Archive::new(copyreader);
|
if gzip {
|
||||||
ar.set_ignore_zeros(true);
|
let mut ar = tar::Archive::new(flate2::read::GzDecoder::new(&mut bufreader));
|
||||||
ar.unpack(&self.pgdata)?;
|
ar.set_ignore_zeros(true);
|
||||||
|
ar.unpack(&self.pgdata)?;
|
||||||
|
} else {
|
||||||
|
let mut ar = tar::Archive::new(&mut bufreader);
|
||||||
|
ar.set_ignore_zeros(true);
|
||||||
|
ar.unpack(&self.pgdata)?;
|
||||||
|
};
|
||||||
|
|
||||||
|
// Report metrics
|
||||||
|
self.state.lock().unwrap().metrics.basebackup_bytes =
|
||||||
|
measured_reader.get_byte_count() as u64;
|
||||||
self.state.lock().unwrap().metrics.basebackup_ms = Utc::now()
|
self.state.lock().unwrap().metrics.basebackup_ms = Utc::now()
|
||||||
.signed_duration_since(start_time)
|
.signed_duration_since(start_time)
|
||||||
.to_std()
|
.to_std()
|
||||||
@@ -549,6 +583,13 @@ impl ComputeNode {
|
|||||||
pspec.spec.cluster.cluster_id.as_deref().unwrap_or("None")
|
pspec.spec.cluster.cluster_id.as_deref().unwrap_or("None")
|
||||||
);
|
);
|
||||||
|
|
||||||
|
// Log metrics so that we can search for slow operations in logs
|
||||||
|
let metrics = {
|
||||||
|
let state = self.state.lock().unwrap();
|
||||||
|
state.metrics.clone()
|
||||||
|
};
|
||||||
|
info!(?metrics, "compute start finished");
|
||||||
|
|
||||||
Ok(pg)
|
Ok(pg)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -47,30 +47,22 @@ pub fn write_postgres_conf(path: &Path, spec: &ComputeSpec) -> Result<()> {
|
|||||||
// Add options for connecting to storage
|
// Add options for connecting to storage
|
||||||
writeln!(file, "# Neon storage settings")?;
|
writeln!(file, "# Neon storage settings")?;
|
||||||
if let Some(s) = &spec.pageserver_connstring {
|
if let Some(s) = &spec.pageserver_connstring {
|
||||||
writeln!(
|
writeln!(file, "neon.pageserver_connstring={}", escape_conf_value(s))?;
|
||||||
file,
|
|
||||||
"neon.pageserver_connstring='{}'",
|
|
||||||
escape_conf_value(s)
|
|
||||||
)?;
|
|
||||||
}
|
}
|
||||||
if !spec.safekeeper_connstrings.is_empty() {
|
if !spec.safekeeper_connstrings.is_empty() {
|
||||||
writeln!(
|
writeln!(
|
||||||
file,
|
file,
|
||||||
"neon.safekeepers='{}'",
|
"neon.safekeepers={}",
|
||||||
escape_conf_value(&spec.safekeeper_connstrings.join(","))
|
escape_conf_value(&spec.safekeeper_connstrings.join(","))
|
||||||
)?;
|
)?;
|
||||||
}
|
}
|
||||||
if let Some(s) = &spec.tenant_id {
|
if let Some(s) = &spec.tenant_id {
|
||||||
writeln!(
|
writeln!(file, "neon.tenant_id={}", escape_conf_value(&s.to_string()))?;
|
||||||
file,
|
|
||||||
"neon.tenant_id='{}'",
|
|
||||||
escape_conf_value(&s.to_string())
|
|
||||||
)?;
|
|
||||||
}
|
}
|
||||||
if let Some(s) = &spec.timeline_id {
|
if let Some(s) = &spec.timeline_id {
|
||||||
writeln!(
|
writeln!(
|
||||||
file,
|
file,
|
||||||
"neon.timeline_id='{}'",
|
"neon.timeline_id={}",
|
||||||
escape_conf_value(&s.to_string())
|
escape_conf_value(&s.to_string())
|
||||||
)?;
|
)?;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -16,15 +16,26 @@ use compute_api::spec::{Database, GenericOption, GenericOptions, PgIdent, Role};
|
|||||||
|
|
||||||
const POSTGRES_WAIT_TIMEOUT: Duration = Duration::from_millis(60 * 1000); // milliseconds
|
const POSTGRES_WAIT_TIMEOUT: Duration = Duration::from_millis(60 * 1000); // milliseconds
|
||||||
|
|
||||||
/// Escape a string for including it in a SQL literal
|
/// Escape a string for including it in a SQL literal. Wrapping the result
|
||||||
|
/// with `E'{}'` or `'{}'` is not required, as it returns a ready-to-use
|
||||||
|
/// SQL string literal, e.g. `'db'''` or `E'db\\'`.
|
||||||
|
/// See <https://github.com/postgres/postgres/blob/da98d005cdbcd45af563d0c4ac86d0e9772cd15f/src/backend/utils/adt/quote.c#L47>
|
||||||
|
/// for the original implementation.
|
||||||
pub fn escape_literal(s: &str) -> String {
|
pub fn escape_literal(s: &str) -> String {
|
||||||
s.replace('\'', "''").replace('\\', "\\\\")
|
let res = s.replace('\'', "''").replace('\\', "\\\\");
|
||||||
|
|
||||||
|
if res.contains('\\') {
|
||||||
|
format!("E'{}'", res)
|
||||||
|
} else {
|
||||||
|
format!("'{}'", res)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Escape a string so that it can be used in postgresql.conf.
|
/// Escape a string so that it can be used in postgresql.conf. Wrapping the result
|
||||||
/// Same as escape_literal, currently.
|
/// with `'{}'` is not required, as it returns a ready-to-use config string.
|
||||||
pub fn escape_conf_value(s: &str) -> String {
|
pub fn escape_conf_value(s: &str) -> String {
|
||||||
s.replace('\'', "''").replace('\\', "\\\\")
|
let res = s.replace('\'', "''").replace('\\', "\\\\");
|
||||||
|
format!("'{}'", res)
|
||||||
}
|
}
|
||||||
|
|
||||||
trait GenericOptionExt {
|
trait GenericOptionExt {
|
||||||
@@ -37,7 +48,7 @@ impl GenericOptionExt for GenericOption {
|
|||||||
fn to_pg_option(&self) -> String {
|
fn to_pg_option(&self) -> String {
|
||||||
if let Some(val) = &self.value {
|
if let Some(val) = &self.value {
|
||||||
match self.vartype.as_ref() {
|
match self.vartype.as_ref() {
|
||||||
"string" => format!("{} '{}'", self.name, escape_literal(val)),
|
"string" => format!("{} {}", self.name, escape_literal(val)),
|
||||||
_ => format!("{} {}", self.name, val),
|
_ => format!("{} {}", self.name, val),
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
@@ -49,7 +60,7 @@ impl GenericOptionExt for GenericOption {
|
|||||||
fn to_pg_setting(&self) -> String {
|
fn to_pg_setting(&self) -> String {
|
||||||
if let Some(val) = &self.value {
|
if let Some(val) = &self.value {
|
||||||
match self.vartype.as_ref() {
|
match self.vartype.as_ref() {
|
||||||
"string" => format!("{} = '{}'", self.name, escape_conf_value(val)),
|
"string" => format!("{} = {}", self.name, escape_conf_value(val)),
|
||||||
_ => format!("{} = {}", self.name, val),
|
_ => format!("{} = {}", self.name, val),
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
|||||||
@@ -397,10 +397,44 @@ pub fn handle_databases(spec: &ComputeSpec, client: &mut Client) -> Result<()> {
|
|||||||
// We do not check either DB exists or not,
|
// We do not check either DB exists or not,
|
||||||
// Postgres will take care of it for us
|
// Postgres will take care of it for us
|
||||||
"delete_db" => {
|
"delete_db" => {
|
||||||
let query: String = format!("DROP DATABASE IF EXISTS {}", &op.name.pg_quote());
|
// In Postgres we can't drop a database if it is a template.
|
||||||
|
// So we need to unset the template flag first, but it could
|
||||||
|
// be a retry, so we could've already dropped the database.
|
||||||
|
// Check that database exists first to make it idempotent.
|
||||||
|
let unset_template_query: String = format!(
|
||||||
|
"
|
||||||
|
DO $$
|
||||||
|
BEGIN
|
||||||
|
IF EXISTS(
|
||||||
|
SELECT 1
|
||||||
|
FROM pg_catalog.pg_database
|
||||||
|
WHERE datname = {}
|
||||||
|
)
|
||||||
|
THEN
|
||||||
|
ALTER DATABASE {} is_template false;
|
||||||
|
END IF;
|
||||||
|
END
|
||||||
|
$$;",
|
||||||
|
escape_literal(&op.name),
|
||||||
|
&op.name.pg_quote()
|
||||||
|
);
|
||||||
|
// Use FORCE to drop database even if there are active connections.
|
||||||
|
// We run this from `cloud_admin`, so it should have enough privileges.
|
||||||
|
// NB: there could be other db states, which prevent us from dropping
|
||||||
|
// the database. For example, if db is used by any active subscription
|
||||||
|
// or replication slot.
|
||||||
|
// TODO: deal with it once we allow logical replication. Proper fix should
|
||||||
|
// involve returning an error code to the control plane, so it could
|
||||||
|
// figure out that this is a non-retryable error, return it to the user
|
||||||
|
// and fail operation permanently.
|
||||||
|
let drop_db_query: String = format!(
|
||||||
|
"DROP DATABASE IF EXISTS {} WITH (FORCE)",
|
||||||
|
&op.name.pg_quote()
|
||||||
|
);
|
||||||
|
|
||||||
warn!("deleting database '{}'", &op.name);
|
warn!("deleting database '{}'", &op.name);
|
||||||
client.execute(query.as_str(), &[])?;
|
client.execute(unset_template_query.as_str(), &[])?;
|
||||||
|
client.execute(drop_db_query.as_str(), &[])?;
|
||||||
}
|
}
|
||||||
"rename_db" => {
|
"rename_db" => {
|
||||||
let new_name = op.new_name.as_ref().unwrap();
|
let new_name = op.new_name.as_ref().unwrap();
|
||||||
|
|||||||
@@ -89,4 +89,12 @@ test.escaping = 'here''s a backslash \\ and a quote '' and a double-quote " hoor
|
|||||||
assert_eq!(none_generic_options.find("missed_value"), None);
|
assert_eq!(none_generic_options.find("missed_value"), None);
|
||||||
assert_eq!(none_generic_options.find("invalid_value"), None);
|
assert_eq!(none_generic_options.find("invalid_value"), None);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_escape_literal() {
|
||||||
|
assert_eq!(escape_literal("test"), "'test'");
|
||||||
|
assert_eq!(escape_literal("test'"), "'test'''");
|
||||||
|
assert_eq!(escape_literal("test\\'"), "E'test\\\\'''");
|
||||||
|
assert_eq!(escape_literal("test\\'\\'"), "E'test\\\\''\\\\'''");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -10,7 +10,7 @@
|
|||||||
//! (non-Neon binaries don't necessarily follow our pidfile conventions).
|
//! (non-Neon binaries don't necessarily follow our pidfile conventions).
|
||||||
//! The pid stored in the file is later used to stop the service.
|
//! The pid stored in the file is later used to stop the service.
|
||||||
//!
|
//!
|
||||||
//! See [`lock_file`] module for more info.
|
//! See the [`lock_file`](utils::lock_file) module for more info.
|
||||||
|
|
||||||
use std::ffi::OsStr;
|
use std::ffi::OsStr;
|
||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
|
|||||||
@@ -2,8 +2,9 @@
|
|||||||
//!
|
//!
|
||||||
//! In the local test environment, the data for each safekeeper is stored in
|
//! In the local test environment, the data for each safekeeper is stored in
|
||||||
//!
|
//!
|
||||||
|
//! ```text
|
||||||
//! .neon/safekeepers/<safekeeper id>
|
//! .neon/safekeepers/<safekeeper id>
|
||||||
//!
|
//! ```
|
||||||
use anyhow::Context;
|
use anyhow::Context;
|
||||||
|
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
|
|||||||
@@ -2,7 +2,9 @@
|
|||||||
//!
|
//!
|
||||||
//! In the local test environment, the data for each endpoint is stored in
|
//! In the local test environment, the data for each endpoint is stored in
|
||||||
//!
|
//!
|
||||||
|
//! ```text
|
||||||
//! .neon/endpoints/<endpoint id>
|
//! .neon/endpoints/<endpoint id>
|
||||||
|
//! ```
|
||||||
//!
|
//!
|
||||||
//! Some basic information about the endpoint, like the tenant and timeline IDs,
|
//! Some basic information about the endpoint, like the tenant and timeline IDs,
|
||||||
//! are stored in the `endpoint.json` file. The `endpoint.json` file is created
|
//! are stored in the `endpoint.json` file. The `endpoint.json` file is created
|
||||||
@@ -22,7 +24,7 @@
|
|||||||
//!
|
//!
|
||||||
//! Directory contents:
|
//! Directory contents:
|
||||||
//!
|
//!
|
||||||
//! ```ignore
|
//! ```text
|
||||||
//! .neon/endpoints/main/
|
//! .neon/endpoints/main/
|
||||||
//! compute.log - log output of `compute_ctl` and `postgres`
|
//! compute.log - log output of `compute_ctl` and `postgres`
|
||||||
//! endpoint.json - serialized `EndpointConf` struct
|
//! endpoint.json - serialized `EndpointConf` struct
|
||||||
|
|||||||
@@ -2,8 +2,9 @@
|
|||||||
//!
|
//!
|
||||||
//! In the local test environment, the data for each safekeeper is stored in
|
//! In the local test environment, the data for each safekeeper is stored in
|
||||||
//!
|
//!
|
||||||
|
//! ```text
|
||||||
//! .neon/safekeepers/<safekeeper id>
|
//! .neon/safekeepers/<safekeeper id>
|
||||||
//!
|
//! ```
|
||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
use std::process::Child;
|
use std::process::Child;
|
||||||
|
|||||||
84
docs/rfcs/024-user-mgmt.md
Normal file
84
docs/rfcs/024-user-mgmt.md
Normal file
@@ -0,0 +1,84 @@
|
|||||||
|
# Postgres user and database management
|
||||||
|
|
||||||
|
(This supersedes the previous proposal that looked too complicated and desynchronization-prone)
|
||||||
|
|
||||||
|
We've accumulated a bunch of problems with our approach to role and database management, namely:
|
||||||
|
|
||||||
|
1. we don't allow role and database creation from Postgres, and users are complaining about that
|
||||||
|
2. fine-grained role management is not possible both from Postgres and console
|
||||||
|
|
||||||
|
Right now, we do store users and databases both in console and Postgres, and there are two main reasons for
|
||||||
|
that:
|
||||||
|
|
||||||
|
* we want to be able to authenticate users in proxy against the console without Postgres' involvement. Otherwise,
|
||||||
|
malicious brute force attempts will wake up Postgres (expensive) and may exhaust the Postgres connections limit (deny of service).
|
||||||
|
* it is handy when we can render console UI without waking up compute (e.g., show database list)
|
||||||
|
|
||||||
|
This RFC doesn't talk about giving root access to the database, which is blocked by a secure runtime setup.
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
* Add Postgres extension that sends an HTTP request each time transaction that modifies users/databases is about to commit.
|
||||||
|
* Add user management API to internal console API. Also, the console should put a JWT token into the compute so that it can access management API.
|
||||||
|
|
||||||
|
## Postgres behavior
|
||||||
|
|
||||||
|
The default user role (@username) should have `CREATE ROLE`, `CREATE DB`, and `BYPASSRLS` privileges. We expose the Postgres port
|
||||||
|
to the open internet, so we need to check password strength. Now console generates strong passwords, so there is no risk of having dumb passwords. With user-provided passwords, such risks exist.
|
||||||
|
|
||||||
|
Since we store passwords in the console we should also send unencrypted password when role is created/changed. Hence communication with the console must be encrypted. Postgres also supports creating roles using hashes, in that case, we will not be able to get a raw password. So I can see the following options here:
|
||||||
|
* roles created via SQL will *not* have raw passwords in the console
|
||||||
|
* roles created via SQL will have raw passwords in the console, except ones that were created using hashes
|
||||||
|
|
||||||
|
I'm leaning towards the second option here as it is a bit more consistent one -- if raw password storage is enabled then we store passwords in all cases where we can store them.
|
||||||
|
|
||||||
|
To send data about roles and databases from Postgres to the console we can create the following Postgres extension:
|
||||||
|
|
||||||
|
* Intercept role/database changes in `ProcessUtility_hook`. Here we have access to the query statement with the raw password. The hook handler itself should not dial the console immediately and rather stash info in some hashmap for later use.
|
||||||
|
* When the transaction is about to commit we execute collected role modifications (all as one -- console should either accept all or reject all, and hence API shouldn't be REST-like). If the console request fails we can roll back the transaction. This way if the transaction is committed we know for sure that console has this information. We can use `XACT_EVENT_PRE_COMMIT` and `XACT_EVENT_PARALLEL_PRE_COMMIT` for that.
|
||||||
|
* Extension should be mindful of the fact that it is possible to create and delete roles within the transaction.
|
||||||
|
* We also need to track who is database owner, some coding around may be needed to get the current user when the database is created.
|
||||||
|
|
||||||
|
## Console user management API
|
||||||
|
|
||||||
|
The current public API has REST API for role management. We need to have some analog for the internal API (called mgmt API in the console code). But unlike public API here we want to have an atomic way to create several roles/databases (in cases when several roles were created in the same transaction). So something like that may work:
|
||||||
|
|
||||||
|
```
|
||||||
|
curl -X PATCH /api/v1/roles_and_databases -d '
|
||||||
|
[
|
||||||
|
{"op":"create", "type":"role", "name": "kurt", "password":"lYgT3BlbkFJ2vBZrqv"},
|
||||||
|
{"op":"drop", "type":"role", "name": "trout"},
|
||||||
|
{"op":"alter", "type":"role", "name": "kilgore", "password":"3BlbkFJ2vB"},
|
||||||
|
{"op":"create", "type":"database", "name": "db2", "owner": "eliot"},
|
||||||
|
]
|
||||||
|
'
|
||||||
|
```
|
||||||
|
|
||||||
|
Makes sense not to error out on duplicated create/delete operations (see failure modes)
|
||||||
|
|
||||||
|
## Managing users from the console
|
||||||
|
|
||||||
|
Now console puts a spec file with the list of databases/roles and delta operations in all the compute pods. `compute_ctl` then picks up that file and stubbornly executes deltas and checks data in the spec file is the same as in the Postgres. This way if the user creates a role in the UI we restart compute with a new spec file and during the start databases/roles are created. So if Postgres send an HTTP call each time role is created we need to break recursion in that case. We can do that based on application_name or some GUC or user (local == no HTTP hook).
|
||||||
|
|
||||||
|
Generally, we have several options when we are creating users via console:
|
||||||
|
|
||||||
|
1. restart compute with a new spec file, execute local SQL command; cut recursion in the extension
|
||||||
|
2. "push" spec files into running compute, execute local SQL command; cut recursion in the extension
|
||||||
|
3. "push" spec files into running compute, execute local SQL command; let extension create those roles in the console
|
||||||
|
4. avoid managing roles via spec files, send SQL commands to compute; let extension create those roles in the console
|
||||||
|
|
||||||
|
The last option is the most straightforward one, but with the raw password storage opt-out, we will not have the password to establish an SQL connection. Also, we need a spec for provisioning purposes and to address potential desync (but that is quite unlikely). So I think the easiest approach would be:
|
||||||
|
|
||||||
|
1. keep role management like it is now and cut the recursion in the extension when SQL is executed by compute_ctl
|
||||||
|
2. add "push" endpoint to the compute_ctl to avoid compute restart during the `apply_config` operation -- that can be done as a follow up to avoid increasing scope too much
|
||||||
|
|
||||||
|
## Failure modes
|
||||||
|
|
||||||
|
* during role creation via SQL role was created in the console but the connection was dropped before Postgres got acknowledgment or some error happened after acknowledgment (out of disk space, deadlock, etc):
|
||||||
|
|
||||||
|
in that case, Postgres won't have a role that exists in the console. Compute restart will heal it (due to the spec file). Also if the console allows repeated creation/deletion user can repeat the transaction.
|
||||||
|
|
||||||
|
|
||||||
|
# Scalability
|
||||||
|
|
||||||
|
On my laptop, I can create 4200 roles per second. That corresponds to 363 million roles per day. Since each role creation ends up in the console database we can add some limit to the number of roles (could be reasonably big to not run into it often -- like 1k or 10k).
|
||||||
22
docs/tools.md
Normal file
22
docs/tools.md
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
# Useful development tools
|
||||||
|
|
||||||
|
This readme contains some hints on how to set up some optional development tools.
|
||||||
|
|
||||||
|
## ccls
|
||||||
|
|
||||||
|
[ccls](https://github.com/MaskRay/ccls) is a c/c++ language server. It requires some setup
|
||||||
|
to work well. There are different ways to do it but here's what works for me:
|
||||||
|
1. Make a common parent directory for all your common neon projects. (for example, `~/src/neondatabase/`)
|
||||||
|
2. Go to `vendor/postgres-v15`
|
||||||
|
3. Run `make clean && ./configure`
|
||||||
|
4. Install [bear](https://github.com/rizsotto/Bear), and run `bear -- make -j4`
|
||||||
|
5. Copy the generated `compile_commands.json` to `~/src/neondatabase` (or equivalent)
|
||||||
|
6. Run `touch ~/src/neondatabase/.ccls-root` this will make the `compile_commands.json` file discoverable in all subdirectories
|
||||||
|
|
||||||
|
With this setup you will get decent lsp mileage inside the postgres repo, and also any postgres extensions that you put in `~/src/neondatabase/`, like `pg_embedding`, or inside `~/src/neondatabase/neon/pgxn` as well.
|
||||||
|
|
||||||
|
Some additional tips for various IDEs:
|
||||||
|
|
||||||
|
### Emacs
|
||||||
|
|
||||||
|
To improve performance: `(setq lsp-lens-enable nil)`
|
||||||
@@ -71,6 +71,7 @@ pub struct ComputeMetrics {
|
|||||||
pub wait_for_spec_ms: u64,
|
pub wait_for_spec_ms: u64,
|
||||||
pub sync_safekeepers_ms: u64,
|
pub sync_safekeepers_ms: u64,
|
||||||
pub basebackup_ms: u64,
|
pub basebackup_ms: u64,
|
||||||
|
pub basebackup_bytes: u64,
|
||||||
pub start_postgres_ms: u64,
|
pub start_postgres_ms: u64,
|
||||||
pub config_ms: u64,
|
pub config_ms: u64,
|
||||||
pub total_startup_ms: u64,
|
pub total_startup_ms: u64,
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
//! Helpers for observing duration on HistogramVec / CounterVec / GaugeVec / MetricVec<T>.
|
//! Helpers for observing duration on `HistogramVec` / `CounterVec` / `GaugeVec` / `MetricVec<T>`.
|
||||||
|
|
||||||
use std::{future::Future, time::Instant};
|
use std::{future::Future, time::Instant};
|
||||||
|
|
||||||
|
|||||||
@@ -411,12 +411,16 @@ pub struct LayerResidenceEvent {
|
|||||||
pub reason: LayerResidenceEventReason,
|
pub reason: LayerResidenceEventReason,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The reason for recording a given [`ResidenceEvent`].
|
/// The reason for recording a given [`LayerResidenceEvent`].
|
||||||
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
|
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
|
||||||
pub enum LayerResidenceEventReason {
|
pub enum LayerResidenceEventReason {
|
||||||
/// The layer map is being populated, e.g. during timeline load or attach.
|
/// The layer map is being populated, e.g. during timeline load or attach.
|
||||||
/// This includes [`RemoteLayer`] objects created in [`reconcile_with_remote`].
|
/// This includes [`RemoteLayer`] objects created in [`reconcile_with_remote`].
|
||||||
/// We need to record such events because there is no persistent storage for the events.
|
/// We need to record such events because there is no persistent storage for the events.
|
||||||
|
///
|
||||||
|
// https://github.com/rust-lang/rust/issues/74481
|
||||||
|
/// [`RemoteLayer`]: ../../tenant/storage_layer/struct.RemoteLayer.html
|
||||||
|
/// [`reconcile_with_remote`]: ../../tenant/struct.Timeline.html#method.reconcile_with_remote
|
||||||
LayerLoad,
|
LayerLoad,
|
||||||
/// We just created the layer (e.g., freeze_and_flush or compaction).
|
/// We just created the layer (e.g., freeze_and_flush or compaction).
|
||||||
/// Such layers are always [`LayerResidenceStatus::Resident`].
|
/// Such layers are always [`LayerResidenceStatus::Resident`].
|
||||||
|
|||||||
@@ -60,8 +60,9 @@ impl Ord for RelTag {
|
|||||||
|
|
||||||
/// Display RelTag in the same format that's used in most PostgreSQL debug messages:
|
/// Display RelTag in the same format that's used in most PostgreSQL debug messages:
|
||||||
///
|
///
|
||||||
|
/// ```text
|
||||||
/// <spcnode>/<dbnode>/<relnode>[_fsm|_vm|_init]
|
/// <spcnode>/<dbnode>/<relnode>[_fsm|_vm|_init]
|
||||||
///
|
/// ```
|
||||||
impl fmt::Display for RelTag {
|
impl fmt::Display for RelTag {
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
if let Some(forkname) = forknumber_to_name(self.forknum) {
|
if let Some(forkname) = forknumber_to_name(self.forknum) {
|
||||||
|
|||||||
@@ -57,9 +57,9 @@ pub fn slru_may_delete_clogsegment(segpage: u32, cutoff_page: u32) -> bool {
|
|||||||
// Multixact utils
|
// Multixact utils
|
||||||
|
|
||||||
pub fn mx_offset_to_flags_offset(xid: MultiXactId) -> usize {
|
pub fn mx_offset_to_flags_offset(xid: MultiXactId) -> usize {
|
||||||
((xid / pg_constants::MULTIXACT_MEMBERS_PER_MEMBERGROUP as u32)
|
((xid / pg_constants::MULTIXACT_MEMBERS_PER_MEMBERGROUP as u32) as u16
|
||||||
% pg_constants::MULTIXACT_MEMBERGROUPS_PER_PAGE as u32
|
% pg_constants::MULTIXACT_MEMBERGROUPS_PER_PAGE
|
||||||
* pg_constants::MULTIXACT_MEMBERGROUP_SIZE as u32) as usize
|
* pg_constants::MULTIXACT_MEMBERGROUP_SIZE) as usize
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn mx_offset_to_flags_bitshift(xid: MultiXactId) -> u16 {
|
pub fn mx_offset_to_flags_bitshift(xid: MultiXactId) -> u16 {
|
||||||
|
|||||||
@@ -49,14 +49,16 @@ pub fn forknumber_to_name(forknum: u8) -> Option<&'static str> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
///
|
|
||||||
/// Parse a filename of a relation file. Returns (relfilenode, forknum, segno) tuple.
|
/// Parse a filename of a relation file. Returns (relfilenode, forknum, segno) tuple.
|
||||||
///
|
///
|
||||||
/// Formats:
|
/// Formats:
|
||||||
|
///
|
||||||
|
/// ```text
|
||||||
/// <oid>
|
/// <oid>
|
||||||
/// <oid>_<fork name>
|
/// <oid>_<fork name>
|
||||||
/// <oid>.<segment number>
|
/// <oid>.<segment number>
|
||||||
/// <oid>_<fork name>.<segment number>
|
/// <oid>_<fork name>.<segment number>
|
||||||
|
/// ```
|
||||||
///
|
///
|
||||||
/// See functions relpath() and _mdfd_segpath() in PostgreSQL sources.
|
/// See functions relpath() and _mdfd_segpath() in PostgreSQL sources.
|
||||||
///
|
///
|
||||||
|
|||||||
@@ -5,11 +5,11 @@
|
|||||||
//! It is similar to what tokio_util::codec::Framed with appropriate codec
|
//! It is similar to what tokio_util::codec::Framed with appropriate codec
|
||||||
//! provides, but `FramedReader` and `FramedWriter` read/write parts can be used
|
//! provides, but `FramedReader` and `FramedWriter` read/write parts can be used
|
||||||
//! separately without using split from futures::stream::StreamExt (which
|
//! separately without using split from futures::stream::StreamExt (which
|
||||||
//! allocates box[1] in polling internally). tokio::io::split is used for splitting
|
//! allocates a [Box] in polling internally). tokio::io::split is used for splitting
|
||||||
//! instead. Plus we customize error messages more than a single type for all io
|
//! instead. Plus we customize error messages more than a single type for all io
|
||||||
//! calls.
|
//! calls.
|
||||||
//!
|
//!
|
||||||
//! [1] https://docs.rs/futures-util/0.3.26/src/futures_util/lock/bilock.rs.html#107
|
//! [Box]: https://docs.rs/futures-util/0.3.26/src/futures_util/lock/bilock.rs.html#107
|
||||||
use bytes::{Buf, BytesMut};
|
use bytes::{Buf, BytesMut};
|
||||||
use std::{
|
use std::{
|
||||||
future::Future,
|
future::Future,
|
||||||
@@ -117,7 +117,7 @@ impl<S: AsyncWrite + Unpin> Framed<S> {
|
|||||||
impl<S: AsyncRead + AsyncWrite + Unpin> Framed<S> {
|
impl<S: AsyncRead + AsyncWrite + Unpin> Framed<S> {
|
||||||
/// Split into owned read and write parts. Beware of potential issues with
|
/// Split into owned read and write parts. Beware of potential issues with
|
||||||
/// using halves in different tasks on TLS stream:
|
/// using halves in different tasks on TLS stream:
|
||||||
/// https://github.com/tokio-rs/tls/issues/40
|
/// <https://github.com/tokio-rs/tls/issues/40>
|
||||||
pub fn split(self) -> (FramedReader<S>, FramedWriter<S>) {
|
pub fn split(self) -> (FramedReader<S>, FramedWriter<S>) {
|
||||||
let (read_half, write_half) = tokio::io::split(self.stream);
|
let (read_half, write_half) = tokio::io::split(self.stream);
|
||||||
let reader = FramedReader {
|
let reader = FramedReader {
|
||||||
|
|||||||
@@ -934,6 +934,15 @@ impl<'a> BeMessage<'a> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn terminate_code(code: &[u8; 5]) -> [u8; 6] {
|
||||||
|
let mut terminated = [0; 6];
|
||||||
|
for (i, &elem) in code.iter().enumerate() {
|
||||||
|
terminated[i] = elem;
|
||||||
|
}
|
||||||
|
|
||||||
|
terminated
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
@@ -965,12 +974,3 @@ mod tests {
|
|||||||
assert_eq!(split_options(¶ms), ["foo bar", " \\", "baz ", "lol"]);
|
assert_eq!(split_options(¶ms), ["foo bar", " \\", "baz ", "lol"]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn terminate_code(code: &[u8; 5]) -> [u8; 6] {
|
|
||||||
let mut terminated = [0; 6];
|
|
||||||
for (i, &elem) in code.iter().enumerate() {
|
|
||||||
terminated[i] = elem;
|
|
||||||
}
|
|
||||||
|
|
||||||
terminated
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -34,12 +34,12 @@ pub const DEFAULT_REMOTE_STORAGE_MAX_CONCURRENT_SYNCS: usize = 50;
|
|||||||
pub const DEFAULT_REMOTE_STORAGE_MAX_SYNC_ERRORS: u32 = 10;
|
pub const DEFAULT_REMOTE_STORAGE_MAX_SYNC_ERRORS: u32 = 10;
|
||||||
/// Currently, sync happens with AWS S3, that has two limits on requests per second:
|
/// Currently, sync happens with AWS S3, that has two limits on requests per second:
|
||||||
/// ~200 RPS for IAM services
|
/// ~200 RPS for IAM services
|
||||||
/// https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/UsingWithRDS.IAMDBAuth.html
|
/// <https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/UsingWithRDS.IAMDBAuth.html>
|
||||||
/// ~3500 PUT/COPY/POST/DELETE or 5500 GET/HEAD S3 requests
|
/// ~3500 PUT/COPY/POST/DELETE or 5500 GET/HEAD S3 requests
|
||||||
/// https://aws.amazon.com/premiumsupport/knowledge-center/s3-request-limit-avoid-throttling/
|
/// <https://aws.amazon.com/premiumsupport/knowledge-center/s3-request-limit-avoid-throttling/>
|
||||||
pub const DEFAULT_REMOTE_STORAGE_S3_CONCURRENCY_LIMIT: usize = 100;
|
pub const DEFAULT_REMOTE_STORAGE_S3_CONCURRENCY_LIMIT: usize = 100;
|
||||||
/// No limits on the client side, which currenltly means 1000 for AWS S3.
|
/// No limits on the client side, which currenltly means 1000 for AWS S3.
|
||||||
/// https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html#API_ListObjectsV2_RequestSyntax
|
/// <https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html#API_ListObjectsV2_RequestSyntax>
|
||||||
pub const DEFAULT_MAX_KEYS_PER_LIST_RESPONSE: Option<i32> = None;
|
pub const DEFAULT_MAX_KEYS_PER_LIST_RESPONSE: Option<i32> = None;
|
||||||
|
|
||||||
const REMOTE_STORAGE_PREFIX_SEPARATOR: char = '/';
|
const REMOTE_STORAGE_PREFIX_SEPARATOR: char = '/';
|
||||||
@@ -50,6 +50,12 @@ const REMOTE_STORAGE_PREFIX_SEPARATOR: char = '/';
|
|||||||
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
||||||
pub struct RemotePath(PathBuf);
|
pub struct RemotePath(PathBuf);
|
||||||
|
|
||||||
|
impl std::fmt::Display for RemotePath {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
write!(f, "{}", self.0.display())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl RemotePath {
|
impl RemotePath {
|
||||||
pub fn new(relative_path: &Path) -> anyhow::Result<Self> {
|
pub fn new(relative_path: &Path) -> anyhow::Result<Self> {
|
||||||
anyhow::ensure!(
|
anyhow::ensure!(
|
||||||
|
|||||||
@@ -7,6 +7,7 @@
|
|||||||
use std::{
|
use std::{
|
||||||
borrow::Cow,
|
borrow::Cow,
|
||||||
future::Future,
|
future::Future,
|
||||||
|
io::ErrorKind,
|
||||||
path::{Path, PathBuf},
|
path::{Path, PathBuf},
|
||||||
pin::Pin,
|
pin::Pin,
|
||||||
};
|
};
|
||||||
@@ -150,10 +151,7 @@ impl RemoteStorage for LocalFs {
|
|||||||
let mut files = vec![];
|
let mut files = vec![];
|
||||||
let mut directory_queue = vec![full_path.clone()];
|
let mut directory_queue = vec![full_path.clone()];
|
||||||
|
|
||||||
while !directory_queue.is_empty() {
|
while let Some(cur_folder) = directory_queue.pop() {
|
||||||
let cur_folder = directory_queue
|
|
||||||
.pop()
|
|
||||||
.expect("queue cannot be empty: we just checked");
|
|
||||||
let mut entries = fs::read_dir(cur_folder.clone()).await?;
|
let mut entries = fs::read_dir(cur_folder.clone()).await?;
|
||||||
while let Some(entry) = entries.next_entry().await? {
|
while let Some(entry) = entries.next_entry().await? {
|
||||||
let file_name: PathBuf = entry.file_name().into();
|
let file_name: PathBuf = entry.file_name().into();
|
||||||
@@ -343,18 +341,14 @@ impl RemoteStorage for LocalFs {
|
|||||||
|
|
||||||
async fn delete(&self, path: &RemotePath) -> anyhow::Result<()> {
|
async fn delete(&self, path: &RemotePath) -> anyhow::Result<()> {
|
||||||
let file_path = path.with_base(&self.storage_root);
|
let file_path = path.with_base(&self.storage_root);
|
||||||
if !file_path.exists() {
|
match fs::remove_file(&file_path).await {
|
||||||
|
Ok(()) => Ok(()),
|
||||||
|
// The file doesn't exist. This shouldn't yield an error to mirror S3's behaviour.
|
||||||
// See https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html
|
// See https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html
|
||||||
// > If there isn't a null version, Amazon S3 does not remove any objects but will still respond that the command was successful.
|
// > If there isn't a null version, Amazon S3 does not remove any objects but will still respond that the command was successful.
|
||||||
return Ok(());
|
Err(e) if e.kind() == ErrorKind::NotFound => Ok(()),
|
||||||
|
Err(e) => Err(anyhow::anyhow!(e)),
|
||||||
}
|
}
|
||||||
|
|
||||||
if !file_path.is_file() {
|
|
||||||
anyhow::bail!("{file_path:?} is not a file");
|
|
||||||
}
|
|
||||||
Ok(fs::remove_file(file_path)
|
|
||||||
.await
|
|
||||||
.map_err(|e| anyhow::anyhow!(e))?)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn delete_objects<'a>(&self, paths: &'a [RemotePath]) -> anyhow::Result<()> {
|
async fn delete_objects<'a>(&self, paths: &'a [RemotePath]) -> anyhow::Result<()> {
|
||||||
|
|||||||
@@ -21,7 +21,7 @@ use crate::{SegmentMethod, SegmentSizeResult, SizeResult, StorageModel};
|
|||||||
// 2. D+C+a+b
|
// 2. D+C+a+b
|
||||||
// 3. D+A+B
|
// 3. D+A+B
|
||||||
|
|
||||||
/// [`Segment`] which has had it's size calculated.
|
/// `Segment` which has had its size calculated.
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
struct SegmentSize {
|
struct SegmentSize {
|
||||||
method: SegmentMethod,
|
method: SegmentMethod,
|
||||||
|
|||||||
@@ -33,7 +33,7 @@ pub enum OtelName<'a> {
|
|||||||
/// directly into HTTP servers. However, I couldn't find one for Hyper,
|
/// directly into HTTP servers. However, I couldn't find one for Hyper,
|
||||||
/// so I had to write our own. OpenTelemetry website has a registry of
|
/// so I had to write our own. OpenTelemetry website has a registry of
|
||||||
/// instrumentation libraries at:
|
/// instrumentation libraries at:
|
||||||
/// https://opentelemetry.io/registry/?language=rust&component=instrumentation
|
/// <https://opentelemetry.io/registry/?language=rust&component=instrumentation>
|
||||||
/// If a Hyper crate appears, consider switching to that.
|
/// If a Hyper crate appears, consider switching to that.
|
||||||
pub async fn tracing_handler<F, R>(
|
pub async fn tracing_handler<F, R>(
|
||||||
req: Request<Body>,
|
req: Request<Body>,
|
||||||
|
|||||||
@@ -40,6 +40,8 @@ pq_proto.workspace = true
|
|||||||
metrics.workspace = true
|
metrics.workspace = true
|
||||||
workspace_hack.workspace = true
|
workspace_hack.workspace = true
|
||||||
|
|
||||||
|
const_format.workspace = true
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
byteorder.workspace = true
|
byteorder.workspace = true
|
||||||
bytes.workspace = true
|
bytes.workspace = true
|
||||||
|
|||||||
@@ -14,7 +14,7 @@ pub async fn json_request<T: for<'de> Deserialize<'de>>(
|
|||||||
.map_err(ApiError::BadRequest)
|
.map_err(ApiError::BadRequest)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Will be removed as part of https://github.com/neondatabase/neon/issues/4282
|
/// Will be removed as part of <https://github.com/neondatabase/neon/issues/4282>
|
||||||
pub async fn json_request_or_empty_body<T: for<'de> Deserialize<'de>>(
|
pub async fn json_request_or_empty_body<T: for<'de> Deserialize<'de>>(
|
||||||
request: &mut Request<Body>,
|
request: &mut Request<Body>,
|
||||||
) -> Result<Option<T>, ApiError> {
|
) -> Result<Option<T>, ApiError> {
|
||||||
|
|||||||
@@ -109,10 +109,16 @@ pub use failpoint_macro_helpers::failpoint_sleep_helper;
|
|||||||
/// * building in docker (either in CI or locally)
|
/// * building in docker (either in CI or locally)
|
||||||
///
|
///
|
||||||
/// One thing to note is that .git is not available in docker (and it is bad to include it there).
|
/// One thing to note is that .git is not available in docker (and it is bad to include it there).
|
||||||
/// So everything becides docker build is covered by git_version crate, and docker uses a `GIT_VERSION` argument to get the value required.
|
/// When building locally, the `git_version` is used to query .git. When building on CI and docker,
|
||||||
/// It takes variable from build process env and puts it to the rustc env. And then we can retrieve it here by using env! macro.
|
/// we don't build the actual PR branch commits, but always a "phantom" would be merge commit to
|
||||||
/// Git version received from environment variable used as a fallback in git_version invocation.
|
/// the target branch -- the actual PR commit from which we build from is supplied as GIT_VERSION
|
||||||
/// And to avoid running buildscript every recompilation, we use rerun-if-env-changed option.
|
/// environment variable.
|
||||||
|
///
|
||||||
|
/// We ended up with this compromise between phantom would be merge commits vs. pull request branch
|
||||||
|
/// heads due to old logs becoming more reliable (github could gc the phantom merge commit
|
||||||
|
/// anytime) in #4641.
|
||||||
|
///
|
||||||
|
/// To avoid running buildscript every recompilation, we use rerun-if-env-changed option.
|
||||||
/// So the build script will be run only when GIT_VERSION envvar has changed.
|
/// So the build script will be run only when GIT_VERSION envvar has changed.
|
||||||
///
|
///
|
||||||
/// Why not to use buildscript to get git commit sha directly without procmacro from different crate?
|
/// Why not to use buildscript to get git commit sha directly without procmacro from different crate?
|
||||||
@@ -124,25 +130,36 @@ pub use failpoint_macro_helpers::failpoint_sleep_helper;
|
|||||||
/// Note that with git_version prefix is `git:` and in case of git version from env its `git-env:`.
|
/// Note that with git_version prefix is `git:` and in case of git version from env its `git-env:`.
|
||||||
///
|
///
|
||||||
/// #############################################################################################
|
/// #############################################################################################
|
||||||
/// TODO this macro is not the way the library is intended to be used, see https://github.com/neondatabase/neon/issues/1565 for details.
|
/// TODO this macro is not the way the library is intended to be used, see <https://github.com/neondatabase/neon/issues/1565> for details.
|
||||||
/// We use `cachepot` to reduce our current CI build times: https://github.com/neondatabase/cloud/pull/1033#issuecomment-1100935036
|
/// We use `cachepot` to reduce our current CI build times: <https://github.com/neondatabase/cloud/pull/1033#issuecomment-1100935036>
|
||||||
/// Yet, it seems to ignore the GIT_VERSION env variable, passed to Docker build, even with build.rs that contains
|
/// Yet, it seems to ignore the GIT_VERSION env variable, passed to Docker build, even with build.rs that contains
|
||||||
/// `println!("cargo:rerun-if-env-changed=GIT_VERSION");` code for cachepot cache invalidation.
|
/// `println!("cargo:rerun-if-env-changed=GIT_VERSION");` code for cachepot cache invalidation.
|
||||||
/// The problem needs further investigation and regular `const` declaration instead of a macro.
|
/// The problem needs further investigation and regular `const` declaration instead of a macro.
|
||||||
#[macro_export]
|
#[macro_export]
|
||||||
macro_rules! project_git_version {
|
macro_rules! project_git_version {
|
||||||
($const_identifier:ident) => {
|
($const_identifier:ident) => {
|
||||||
const $const_identifier: &str = git_version::git_version!(
|
// this should try GIT_VERSION first only then git_version::git_version!
|
||||||
prefix = "git:",
|
const $const_identifier: &::core::primitive::str = {
|
||||||
fallback = concat!(
|
const __COMMIT_FROM_GIT: &::core::primitive::str = git_version::git_version! {
|
||||||
"git-env:",
|
prefix = "",
|
||||||
env!("GIT_VERSION", "Missing GIT_VERSION envvar")
|
fallback = "unknown",
|
||||||
),
|
args = ["--abbrev=40", "--always", "--dirty=-modified"] // always use full sha
|
||||||
args = ["--abbrev=40", "--always", "--dirty=-modified"] // always use full sha
|
};
|
||||||
);
|
|
||||||
|
const __ARG: &[&::core::primitive::str; 2] = &match ::core::option_env!("GIT_VERSION") {
|
||||||
|
::core::option::Option::Some(x) => ["git-env:", x],
|
||||||
|
::core::option::Option::None => ["git:", __COMMIT_FROM_GIT],
|
||||||
|
};
|
||||||
|
|
||||||
|
$crate::__const_format::concatcp!(__ARG[0], __ARG[1])
|
||||||
|
};
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Re-export for `project_git_version` macro
|
||||||
|
#[doc(hidden)]
|
||||||
|
pub use const_format as __const_format;
|
||||||
|
|
||||||
/// Same as `assert!`, but evaluated during compilation and gets optimized out in runtime.
|
/// Same as `assert!`, but evaluated during compilation and gets optimized out in runtime.
|
||||||
#[macro_export]
|
#[macro_export]
|
||||||
macro_rules! const_assert {
|
macro_rules! const_assert {
|
||||||
|
|||||||
@@ -1,9 +1,10 @@
|
|||||||
//! A module to create and read lock files.
|
//! A module to create and read lock files.
|
||||||
//!
|
//!
|
||||||
//! File locking is done using [`fcntl::flock`] exclusive locks.
|
//! File locking is done using [`fcntl::flock`] exclusive locks.
|
||||||
//! The only consumer of this module is currently [`pid_file`].
|
//! The only consumer of this module is currently
|
||||||
//! See the module-level comment there for potential pitfalls
|
//! [`pid_file`](crate::pid_file). See the module-level comment
|
||||||
//! with lock files that are used to store PIDs (pidfiles).
|
//! there for potential pitfalls with lock files that are used
|
||||||
|
//! to store PIDs (pidfiles).
|
||||||
|
|
||||||
use std::{
|
use std::{
|
||||||
fs,
|
fs,
|
||||||
@@ -81,7 +82,7 @@ pub fn create_exclusive(lock_file_path: &Path) -> anyhow::Result<UnwrittenLockFi
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Returned by [`read_and_hold_lock_file`].
|
/// Returned by [`read_and_hold_lock_file`].
|
||||||
/// Check out the [`pid_file`] module for what the variants mean
|
/// Check out the [`pid_file`](crate::pid_file) module for what the variants mean
|
||||||
/// and potential caveats if the lock files that are used to store PIDs.
|
/// and potential caveats if the lock files that are used to store PIDs.
|
||||||
pub enum LockFileRead {
|
pub enum LockFileRead {
|
||||||
/// No file exists at the given path.
|
/// No file exists at the given path.
|
||||||
|
|||||||
@@ -112,7 +112,7 @@ pub fn init(
|
|||||||
///
|
///
|
||||||
/// When the return value is dropped, the hook is reverted to std default hook (prints to stderr).
|
/// When the return value is dropped, the hook is reverted to std default hook (prints to stderr).
|
||||||
/// If the assumptions about the initialization order are not held, use
|
/// If the assumptions about the initialization order are not held, use
|
||||||
/// [`TracingPanicHookGuard::disarm`] but keep in mind, if tracing is stopped, then panics will be
|
/// [`TracingPanicHookGuard::forget`] but keep in mind, if tracing is stopped, then panics will be
|
||||||
/// lost.
|
/// lost.
|
||||||
#[must_use]
|
#[must_use]
|
||||||
pub fn replace_panic_hook_with_tracing_panic_hook() -> TracingPanicHookGuard {
|
pub fn replace_panic_hook_with_tracing_panic_hook() -> TracingPanicHookGuard {
|
||||||
|
|||||||
@@ -1,4 +1,5 @@
|
|||||||
use pin_project_lite::pin_project;
|
use pin_project_lite::pin_project;
|
||||||
|
use std::io::Read;
|
||||||
use std::pin::Pin;
|
use std::pin::Pin;
|
||||||
use std::{io, task};
|
use std::{io, task};
|
||||||
use tokio::io::{AsyncRead, AsyncWrite, ReadBuf};
|
use tokio::io::{AsyncRead, AsyncWrite, ReadBuf};
|
||||||
@@ -75,3 +76,34 @@ impl<S: AsyncWrite + Unpin, R, W: FnMut(usize)> AsyncWrite for MeasuredStream<S,
|
|||||||
self.project().stream.poll_shutdown(context)
|
self.project().stream.poll_shutdown(context)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Wrapper for a reader that counts bytes read.
|
||||||
|
///
|
||||||
|
/// Similar to MeasuredStream but it's one way and it's sync
|
||||||
|
pub struct MeasuredReader<R: Read> {
|
||||||
|
inner: R,
|
||||||
|
byte_count: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<R: Read> MeasuredReader<R> {
|
||||||
|
pub fn new(reader: R) -> Self {
|
||||||
|
Self {
|
||||||
|
inner: reader,
|
||||||
|
byte_count: 0,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_byte_count(&self) -> usize {
|
||||||
|
self.byte_count
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<R: Read> Read for MeasuredReader<R> {
|
||||||
|
fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
|
||||||
|
let result = self.inner.read(buf);
|
||||||
|
if let Ok(n_bytes) = result {
|
||||||
|
self.byte_count += n_bytes
|
||||||
|
}
|
||||||
|
result
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -23,9 +23,9 @@ pub enum SeqWaitError {
|
|||||||
|
|
||||||
/// Monotonically increasing value
|
/// Monotonically increasing value
|
||||||
///
|
///
|
||||||
/// It is handy to store some other fields under the same mutex in SeqWait<S>
|
/// It is handy to store some other fields under the same mutex in `SeqWait<S>`
|
||||||
/// (e.g. store prev_record_lsn). So we allow SeqWait to be parametrized with
|
/// (e.g. store prev_record_lsn). So we allow SeqWait to be parametrized with
|
||||||
/// any type that can expose counter. <V> is the type of exposed counter.
|
/// any type that can expose counter. `V` is the type of exposed counter.
|
||||||
pub trait MonotonicCounter<V> {
|
pub trait MonotonicCounter<V> {
|
||||||
/// Bump counter value and check that it goes forward
|
/// Bump counter value and check that it goes forward
|
||||||
/// N.B.: new_val is an actual new value, not a difference.
|
/// N.B.: new_val is an actual new value, not a difference.
|
||||||
@@ -90,7 +90,7 @@ impl<T: Ord> Eq for Waiter<T> {}
|
|||||||
/// [`wait_for`]: SeqWait::wait_for
|
/// [`wait_for`]: SeqWait::wait_for
|
||||||
/// [`advance`]: SeqWait::advance
|
/// [`advance`]: SeqWait::advance
|
||||||
///
|
///
|
||||||
/// <S> means Storage, <V> is type of counter that this storage exposes.
|
/// `S` means Storage, `V` is type of counter that this storage exposes.
|
||||||
///
|
///
|
||||||
pub struct SeqWait<S, V>
|
pub struct SeqWait<S, V>
|
||||||
where
|
where
|
||||||
|
|||||||
@@ -1,8 +1,15 @@
|
|||||||
//! Assert that the current [`tracing::Span`] has a given set of fields.
|
//! Assert that the current [`tracing::Span`] has a given set of fields.
|
||||||
//!
|
//!
|
||||||
|
//! Can only produce meaningful positive results when tracing has been configured as in example.
|
||||||
|
//! Absence of `tracing_error::ErrorLayer` is not detected yet.
|
||||||
|
//!
|
||||||
|
//! `#[cfg(test)]` code will get a pass when using the `check_fields_present` macro in case tracing
|
||||||
|
//! is completly unconfigured.
|
||||||
|
//!
|
||||||
//! # Usage
|
//! # Usage
|
||||||
//!
|
//!
|
||||||
//! ```
|
//! ```rust
|
||||||
|
//! # fn main() {
|
||||||
//! use tracing_subscriber::prelude::*;
|
//! use tracing_subscriber::prelude::*;
|
||||||
//! let registry = tracing_subscriber::registry()
|
//! let registry = tracing_subscriber::registry()
|
||||||
//! .with(tracing_error::ErrorLayer::default());
|
//! .with(tracing_error::ErrorLayer::default());
|
||||||
@@ -20,23 +27,18 @@
|
|||||||
//!
|
//!
|
||||||
//! use utils::tracing_span_assert::{check_fields_present, MultiNameExtractor};
|
//! use utils::tracing_span_assert::{check_fields_present, MultiNameExtractor};
|
||||||
//! let extractor = MultiNameExtractor::new("TestExtractor", ["test", "test_id"]);
|
//! let extractor = MultiNameExtractor::new("TestExtractor", ["test", "test_id"]);
|
||||||
//! match check_fields_present([&extractor]) {
|
//! if let Err(missing) = check_fields_present!([&extractor]) {
|
||||||
//! Ok(()) => {},
|
//! // if you copypaste this to a custom assert method, remember to add #[track_caller]
|
||||||
//! Err(missing) => {
|
//! // to get the "user" code location for the panic.
|
||||||
//! panic!("Missing fields: {:?}", missing.into_iter().map(|f| f.name() ).collect::<Vec<_>>());
|
//! panic!("Missing fields: {missing:?}");
|
||||||
//! }
|
|
||||||
//! }
|
//! }
|
||||||
|
//! # }
|
||||||
//! ```
|
//! ```
|
||||||
//!
|
//!
|
||||||
//! Recommended reading: https://docs.rs/tracing-subscriber/0.3.16/tracing_subscriber/layer/index.html#per-layer-filtering
|
//! Recommended reading: <https://docs.rs/tracing-subscriber/0.3.16/tracing_subscriber/layer/index.html#per-layer-filtering>
|
||||||
//!
|
//!
|
||||||
|
|
||||||
use std::{
|
#[derive(Debug)]
|
||||||
collections::HashSet,
|
|
||||||
fmt::{self},
|
|
||||||
hash::{Hash, Hasher},
|
|
||||||
};
|
|
||||||
|
|
||||||
pub enum ExtractionResult {
|
pub enum ExtractionResult {
|
||||||
Present,
|
Present,
|
||||||
Absent,
|
Absent,
|
||||||
@@ -71,51 +73,105 @@ impl<const L: usize> Extractor for MultiNameExtractor<L> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
struct MemoryIdentity<'a>(&'a dyn Extractor);
|
/// Checks that the given extractors are satisfied with the current span hierarchy.
|
||||||
|
///
|
||||||
impl<'a> MemoryIdentity<'a> {
|
/// This should not be called directly, but used through [`check_fields_present`] which allows
|
||||||
fn as_ptr(&self) -> *const () {
|
/// `Summary::Unconfigured` only when the calling crate is being `#[cfg(test)]` as a conservative default.
|
||||||
self.0 as *const _ as *const ()
|
#[doc(hidden)]
|
||||||
}
|
pub fn check_fields_present0<const L: usize>(
|
||||||
}
|
|
||||||
impl<'a> PartialEq for MemoryIdentity<'a> {
|
|
||||||
fn eq(&self, other: &Self) -> bool {
|
|
||||||
self.as_ptr() == other.as_ptr()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl<'a> Eq for MemoryIdentity<'a> {}
|
|
||||||
impl<'a> Hash for MemoryIdentity<'a> {
|
|
||||||
fn hash<H: Hasher>(&self, state: &mut H) {
|
|
||||||
self.as_ptr().hash(state);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl<'a> fmt::Debug for MemoryIdentity<'a> {
|
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> std::fmt::Result {
|
|
||||||
write!(f, "{:p}: {}", self.as_ptr(), self.0.name())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// The extractor names passed as keys to [`new`].
|
|
||||||
pub fn check_fields_present<const L: usize>(
|
|
||||||
must_be_present: [&dyn Extractor; L],
|
must_be_present: [&dyn Extractor; L],
|
||||||
) -> Result<(), Vec<&dyn Extractor>> {
|
) -> Result<Summary, Vec<&dyn Extractor>> {
|
||||||
let mut missing: HashSet<MemoryIdentity> =
|
let mut missing = must_be_present.into_iter().collect::<Vec<_>>();
|
||||||
HashSet::from_iter(must_be_present.into_iter().map(|r| MemoryIdentity(r)));
|
|
||||||
let trace = tracing_error::SpanTrace::capture();
|
let trace = tracing_error::SpanTrace::capture();
|
||||||
trace.with_spans(|md, _formatted_fields| {
|
trace.with_spans(|md, _formatted_fields| {
|
||||||
missing.retain(|extractor| match extractor.0.extract(md.fields()) {
|
// when trying to understand the inner workings of how does the matching work, note that
|
||||||
|
// this closure might be called zero times if the span is disabled. normally it is called
|
||||||
|
// once per span hierarchy level.
|
||||||
|
missing.retain(|extractor| match extractor.extract(md.fields()) {
|
||||||
ExtractionResult::Present => false,
|
ExtractionResult::Present => false,
|
||||||
ExtractionResult::Absent => true,
|
ExtractionResult::Absent => true,
|
||||||
});
|
});
|
||||||
!missing.is_empty() // continue walking up until we've found all missing
|
|
||||||
|
// continue walking up until we've found all missing
|
||||||
|
!missing.is_empty()
|
||||||
});
|
});
|
||||||
if missing.is_empty() {
|
if missing.is_empty() {
|
||||||
Ok(())
|
Ok(Summary::FoundEverything)
|
||||||
|
} else if !tracing_subscriber_configured() {
|
||||||
|
Ok(Summary::Unconfigured)
|
||||||
} else {
|
} else {
|
||||||
Err(missing.into_iter().map(|mi| mi.0).collect())
|
// we can still hit here if a tracing subscriber has been configured but the ErrorLayer is
|
||||||
|
// missing, which can be annoying. for this case, we could probably use
|
||||||
|
// SpanTrace::status().
|
||||||
|
//
|
||||||
|
// another way to end up here is with RUST_LOG=pageserver=off while configuring the
|
||||||
|
// logging, though I guess in that case the SpanTrace::status() == EMPTY would be valid.
|
||||||
|
// this case is covered by test `not_found_if_tracing_error_subscriber_has_wrong_filter`.
|
||||||
|
Err(missing)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Checks that the given extractors are satisfied with the current span hierarchy.
|
||||||
|
///
|
||||||
|
/// The macro is the preferred way of checking if fields exist while passing checks if a test does
|
||||||
|
/// not have tracing configured.
|
||||||
|
///
|
||||||
|
/// Why mangled name? Because #[macro_export] will expose it at utils::__check_fields_present.
|
||||||
|
/// However we can game a module namespaced macro for `use` purposes by re-exporting the
|
||||||
|
/// #[macro_export] exported name with an alias (below).
|
||||||
|
#[doc(hidden)]
|
||||||
|
#[macro_export]
|
||||||
|
macro_rules! __check_fields_present {
|
||||||
|
($extractors:expr) => {{
|
||||||
|
{
|
||||||
|
use $crate::tracing_span_assert::{check_fields_present0, Summary::*, Extractor};
|
||||||
|
|
||||||
|
match check_fields_present0($extractors) {
|
||||||
|
Ok(FoundEverything) => Ok(()),
|
||||||
|
Ok(Unconfigured) if cfg!(test) => {
|
||||||
|
// allow unconfigured in tests
|
||||||
|
Ok(())
|
||||||
|
},
|
||||||
|
Ok(Unconfigured) => {
|
||||||
|
panic!("utils::tracing_span_assert: outside of #[cfg(test)] expected tracing to be configured with tracing_error::ErrorLayer")
|
||||||
|
},
|
||||||
|
Err(missing) => Err(missing)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub use crate::__check_fields_present as check_fields_present;
|
||||||
|
|
||||||
|
/// Explanation for why the check was deemed ok.
|
||||||
|
///
|
||||||
|
/// Mainly useful for testing, or configuring per-crate behaviour as in with
|
||||||
|
/// [`check_fields_present`].
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub enum Summary {
|
||||||
|
/// All extractors were found.
|
||||||
|
///
|
||||||
|
/// Should only happen when tracing is properly configured.
|
||||||
|
FoundEverything,
|
||||||
|
|
||||||
|
/// Tracing has not been configured at all. This is ok for tests running without tracing set
|
||||||
|
/// up.
|
||||||
|
Unconfigured,
|
||||||
|
}
|
||||||
|
|
||||||
|
fn tracing_subscriber_configured() -> bool {
|
||||||
|
let mut noop_configured = false;
|
||||||
|
tracing::dispatcher::get_default(|d| {
|
||||||
|
// it is possible that this closure will not be invoked, but the current implementation
|
||||||
|
// always invokes it
|
||||||
|
noop_configured = d
|
||||||
|
.downcast_ref::<tracing::subscriber::NoSubscriber>()
|
||||||
|
.is_some();
|
||||||
|
});
|
||||||
|
|
||||||
|
!noop_configured
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
|
|
||||||
@@ -123,6 +179,36 @@ mod tests {
|
|||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
|
|
||||||
|
use std::{
|
||||||
|
collections::HashSet,
|
||||||
|
fmt::{self},
|
||||||
|
hash::{Hash, Hasher},
|
||||||
|
};
|
||||||
|
|
||||||
|
struct MemoryIdentity<'a>(&'a dyn Extractor);
|
||||||
|
|
||||||
|
impl<'a> MemoryIdentity<'a> {
|
||||||
|
fn as_ptr(&self) -> *const () {
|
||||||
|
self.0 as *const _ as *const ()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl<'a> PartialEq for MemoryIdentity<'a> {
|
||||||
|
fn eq(&self, other: &Self) -> bool {
|
||||||
|
self.as_ptr() == other.as_ptr()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl<'a> Eq for MemoryIdentity<'a> {}
|
||||||
|
impl<'a> Hash for MemoryIdentity<'a> {
|
||||||
|
fn hash<H: Hasher>(&self, state: &mut H) {
|
||||||
|
self.as_ptr().hash(state);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl<'a> fmt::Debug for MemoryIdentity<'a> {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
write!(f, "{:p}: {}", self.as_ptr(), self.0.name())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
struct Setup {
|
struct Setup {
|
||||||
_current_thread_subscriber_guard: tracing::subscriber::DefaultGuard,
|
_current_thread_subscriber_guard: tracing::subscriber::DefaultGuard,
|
||||||
tenant_extractor: MultiNameExtractor<2>,
|
tenant_extractor: MultiNameExtractor<2>,
|
||||||
@@ -159,7 +245,8 @@ mod tests {
|
|||||||
let setup = setup_current_thread();
|
let setup = setup_current_thread();
|
||||||
let span = tracing::info_span!("root", tenant_id = "tenant-1", timeline_id = "timeline-1");
|
let span = tracing::info_span!("root", tenant_id = "tenant-1", timeline_id = "timeline-1");
|
||||||
let _guard = span.enter();
|
let _guard = span.enter();
|
||||||
check_fields_present([&setup.tenant_extractor, &setup.timeline_extractor]).unwrap();
|
let res = check_fields_present0([&setup.tenant_extractor, &setup.timeline_extractor]);
|
||||||
|
assert!(matches!(res, Ok(Summary::FoundEverything)), "{res:?}");
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -167,8 +254,8 @@ mod tests {
|
|||||||
let setup = setup_current_thread();
|
let setup = setup_current_thread();
|
||||||
let span = tracing::info_span!("root", timeline_id = "timeline-1");
|
let span = tracing::info_span!("root", timeline_id = "timeline-1");
|
||||||
let _guard = span.enter();
|
let _guard = span.enter();
|
||||||
let missing =
|
let missing = check_fields_present0([&setup.tenant_extractor, &setup.timeline_extractor])
|
||||||
check_fields_present([&setup.tenant_extractor, &setup.timeline_extractor]).unwrap_err();
|
.unwrap_err();
|
||||||
assert_missing(missing, vec![&setup.tenant_extractor]);
|
assert_missing(missing, vec![&setup.tenant_extractor]);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -185,7 +272,8 @@ mod tests {
|
|||||||
let span = tracing::info_span!("grandchild", timeline_id = "timeline-1");
|
let span = tracing::info_span!("grandchild", timeline_id = "timeline-1");
|
||||||
let _guard = span.enter();
|
let _guard = span.enter();
|
||||||
|
|
||||||
check_fields_present([&setup.tenant_extractor, &setup.timeline_extractor]).unwrap();
|
let res = check_fields_present0([&setup.tenant_extractor, &setup.timeline_extractor]);
|
||||||
|
assert!(matches!(res, Ok(Summary::FoundEverything)), "{res:?}");
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -198,7 +286,7 @@ mod tests {
|
|||||||
let span = tracing::info_span!("child", timeline_id = "timeline-1");
|
let span = tracing::info_span!("child", timeline_id = "timeline-1");
|
||||||
let _guard = span.enter();
|
let _guard = span.enter();
|
||||||
|
|
||||||
let missing = check_fields_present([&setup.tenant_extractor]).unwrap_err();
|
let missing = check_fields_present0([&setup.tenant_extractor]).unwrap_err();
|
||||||
assert_missing(missing, vec![&setup.tenant_extractor]);
|
assert_missing(missing, vec![&setup.tenant_extractor]);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -207,7 +295,8 @@ mod tests {
|
|||||||
let setup = setup_current_thread();
|
let setup = setup_current_thread();
|
||||||
let span = tracing::info_span!("root", tenant_id = "tenant-1", timeline_id = "timeline-1");
|
let span = tracing::info_span!("root", tenant_id = "tenant-1", timeline_id = "timeline-1");
|
||||||
let _guard = span.enter();
|
let _guard = span.enter();
|
||||||
check_fields_present([&setup.tenant_extractor]).unwrap();
|
let res = check_fields_present0([&setup.tenant_extractor]);
|
||||||
|
assert!(matches!(res, Ok(Summary::FoundEverything)), "{res:?}");
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -223,7 +312,8 @@ mod tests {
|
|||||||
let span = tracing::info_span!("grandchild", timeline_id = "timeline-1");
|
let span = tracing::info_span!("grandchild", timeline_id = "timeline-1");
|
||||||
let _guard = span.enter();
|
let _guard = span.enter();
|
||||||
|
|
||||||
check_fields_present([&setup.tenant_extractor]).unwrap();
|
let res = check_fields_present0([&setup.tenant_extractor]);
|
||||||
|
assert!(matches!(res, Ok(Summary::FoundEverything)), "{res:?}");
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -231,7 +321,7 @@ mod tests {
|
|||||||
let setup = setup_current_thread();
|
let setup = setup_current_thread();
|
||||||
let span = tracing::info_span!("root", timeline_id = "timeline-1");
|
let span = tracing::info_span!("root", timeline_id = "timeline-1");
|
||||||
let _guard = span.enter();
|
let _guard = span.enter();
|
||||||
let missing = check_fields_present([&setup.tenant_extractor]).unwrap_err();
|
let missing = check_fields_present0([&setup.tenant_extractor]).unwrap_err();
|
||||||
assert_missing(missing, vec![&setup.tenant_extractor]);
|
assert_missing(missing, vec![&setup.tenant_extractor]);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -245,43 +335,107 @@ mod tests {
|
|||||||
let span = tracing::info_span!("child", timeline_id = "timeline-1");
|
let span = tracing::info_span!("child", timeline_id = "timeline-1");
|
||||||
let _guard = span.enter();
|
let _guard = span.enter();
|
||||||
|
|
||||||
let missing = check_fields_present([&setup.tenant_extractor]).unwrap_err();
|
let missing = check_fields_present0([&setup.tenant_extractor]).unwrap_err();
|
||||||
assert_missing(missing, vec![&setup.tenant_extractor]);
|
assert_missing(missing, vec![&setup.tenant_extractor]);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn tracing_error_subscriber_not_set_up() {
|
fn tracing_error_subscriber_not_set_up_straight_line() {
|
||||||
// no setup
|
// no setup
|
||||||
|
|
||||||
let span = tracing::info_span!("foo", e = "some value");
|
let span = tracing::info_span!("foo", e = "some value");
|
||||||
let _guard = span.enter();
|
let _guard = span.enter();
|
||||||
|
|
||||||
let extractor = MultiNameExtractor::new("E", ["e"]);
|
let extractor = MultiNameExtractor::new("E", ["e"]);
|
||||||
let missing = check_fields_present([&extractor]).unwrap_err();
|
let res = check_fields_present0([&extractor]);
|
||||||
assert_missing(missing, vec![&extractor]);
|
assert!(matches!(res, Ok(Summary::Unconfigured)), "{res:?}");
|
||||||
|
|
||||||
|
// similarly for a not found key
|
||||||
|
let extractor = MultiNameExtractor::new("F", ["foobar"]);
|
||||||
|
let res = check_fields_present0([&extractor]);
|
||||||
|
assert!(matches!(res, Ok(Summary::Unconfigured)), "{res:?}");
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
#[should_panic]
|
fn tracing_error_subscriber_not_set_up_with_instrument() {
|
||||||
fn panics_if_tracing_error_subscriber_has_wrong_filter() {
|
// no setup
|
||||||
|
|
||||||
|
// demo a case where span entering is used to establish a parent child connection, but
|
||||||
|
// when we re-enter the subspan SpanTrace::with_spans iterates over nothing.
|
||||||
|
let span = tracing::info_span!("foo", e = "some value");
|
||||||
|
let _guard = span.enter();
|
||||||
|
|
||||||
|
let subspan = tracing::info_span!("bar", f = "foobar");
|
||||||
|
drop(_guard);
|
||||||
|
|
||||||
|
// normally this would work, but without any tracing-subscriber configured, both
|
||||||
|
// check_field_present find nothing
|
||||||
|
let _guard = subspan.enter();
|
||||||
|
let extractors: [&dyn Extractor; 2] = [
|
||||||
|
&MultiNameExtractor::new("E", ["e"]),
|
||||||
|
&MultiNameExtractor::new("F", ["f"]),
|
||||||
|
];
|
||||||
|
|
||||||
|
let res = check_fields_present0(extractors);
|
||||||
|
assert!(matches!(res, Ok(Summary::Unconfigured)), "{res:?}");
|
||||||
|
|
||||||
|
// similarly for a not found key
|
||||||
|
let extractor = MultiNameExtractor::new("G", ["g"]);
|
||||||
|
let res = check_fields_present0([&extractor]);
|
||||||
|
assert!(matches!(res, Ok(Summary::Unconfigured)), "{res:?}");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn tracing_subscriber_configured() {
|
||||||
|
// this will fail if any utils::logging::init callers appear, but let's hope they do not
|
||||||
|
// appear.
|
||||||
|
assert!(!super::tracing_subscriber_configured());
|
||||||
|
|
||||||
|
let _g = setup_current_thread();
|
||||||
|
|
||||||
|
assert!(super::tracing_subscriber_configured());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn not_found_when_disabled_by_filter() {
|
||||||
let r = tracing_subscriber::registry().with({
|
let r = tracing_subscriber::registry().with({
|
||||||
tracing_error::ErrorLayer::default().with_filter(
|
tracing_error::ErrorLayer::default().with_filter(tracing_subscriber::filter::filter_fn(
|
||||||
tracing_subscriber::filter::dynamic_filter_fn(|md, _| {
|
|md| !(md.is_span() && *md.level() == tracing::Level::INFO),
|
||||||
if md.is_span() && *md.level() == tracing::Level::INFO {
|
))
|
||||||
return false;
|
|
||||||
}
|
|
||||||
true
|
|
||||||
}),
|
|
||||||
)
|
|
||||||
});
|
});
|
||||||
|
|
||||||
let _guard = tracing::subscriber::set_default(r);
|
let _guard = tracing::subscriber::set_default(r);
|
||||||
|
|
||||||
|
// this test is a rather tricky one, it has a number of possible outcomes depending on the
|
||||||
|
// execution order when executed with other tests even if no test sets the global default
|
||||||
|
// subscriber.
|
||||||
|
|
||||||
let span = tracing::info_span!("foo", e = "some value");
|
let span = tracing::info_span!("foo", e = "some value");
|
||||||
let _guard = span.enter();
|
let _guard = span.enter();
|
||||||
|
|
||||||
let extractor = MultiNameExtractor::new("E", ["e"]);
|
let extractors: [&dyn Extractor; 1] = [&MultiNameExtractor::new("E", ["e"])];
|
||||||
let missing = check_fields_present([&extractor]).unwrap_err();
|
|
||||||
assert_missing(missing, vec![&extractor]);
|
if span.is_disabled() {
|
||||||
|
// the tests are running single threaded, or we got lucky and no other tests subscriber
|
||||||
|
// was got to register their per-CALLSITE::META interest between `set_default` and
|
||||||
|
// creation of the span, thus the filter got to apply and registered interest of Never,
|
||||||
|
// so the span was never created.
|
||||||
|
//
|
||||||
|
// as the span is disabled, no keys were recorded to it, leading check_fields_present0
|
||||||
|
// to find an error.
|
||||||
|
|
||||||
|
let missing = check_fields_present0(extractors).unwrap_err();
|
||||||
|
assert_missing(missing, vec![extractors[0]]);
|
||||||
|
} else {
|
||||||
|
// when the span is enabled, it is because some other test is running at the same time,
|
||||||
|
// and that tests registry has filters which are interested in our above span.
|
||||||
|
//
|
||||||
|
// because the span is now enabled, all keys will be found for it. the
|
||||||
|
// tracing_error::SpanTrace does not consider layer filters during the span hierarchy
|
||||||
|
// walk (SpanTrace::with_spans), nor is the SpanTrace::status a reliable indicator in
|
||||||
|
// this test-induced issue.
|
||||||
|
|
||||||
|
let res = check_fields_present0(extractors);
|
||||||
|
assert!(matches!(res, Ok(Summary::FoundEverything)), "{res:?}");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -12,6 +12,7 @@ testing = ["fail/failpoints"]
|
|||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
anyhow.workspace = true
|
anyhow.workspace = true
|
||||||
|
async-compression.workspace = true
|
||||||
async-stream.workspace = true
|
async-stream.workspace = true
|
||||||
async-trait.workspace = true
|
async-trait.workspace = true
|
||||||
byteorder.workspace = true
|
byteorder.workspace = true
|
||||||
@@ -24,6 +25,7 @@ consumption_metrics.workspace = true
|
|||||||
crc32c.workspace = true
|
crc32c.workspace = true
|
||||||
crossbeam-utils.workspace = true
|
crossbeam-utils.workspace = true
|
||||||
either.workspace = true
|
either.workspace = true
|
||||||
|
flate2.workspace = true
|
||||||
fail.workspace = true
|
fail.workspace = true
|
||||||
futures.workspace = true
|
futures.workspace = true
|
||||||
git-version.workspace = true
|
git-version.workspace = true
|
||||||
|
|||||||
@@ -1,8 +1,8 @@
|
|||||||
use pageserver::keyspace::{KeyPartitioning, KeySpace};
|
use pageserver::keyspace::{KeyPartitioning, KeySpace};
|
||||||
use pageserver::repository::Key;
|
use pageserver::repository::Key;
|
||||||
use pageserver::tenant::layer_map::LayerMap;
|
use pageserver::tenant::layer_map::LayerMap;
|
||||||
use pageserver::tenant::storage_layer::{tests::LayerDescriptor, Layer, LayerFileName};
|
use pageserver::tenant::storage_layer::LayerFileName;
|
||||||
use pageserver::tenant::storage_layer::{PersistentLayer, PersistentLayerDesc};
|
use pageserver::tenant::storage_layer::PersistentLayerDesc;
|
||||||
use rand::prelude::{SeedableRng, SliceRandom, StdRng};
|
use rand::prelude::{SeedableRng, SliceRandom, StdRng};
|
||||||
use std::cmp::{max, min};
|
use std::cmp::{max, min};
|
||||||
use std::fs::File;
|
use std::fs::File;
|
||||||
@@ -28,13 +28,13 @@ fn build_layer_map(filename_dump: PathBuf) -> LayerMap {
|
|||||||
for fname in filenames {
|
for fname in filenames {
|
||||||
let fname = fname.unwrap();
|
let fname = fname.unwrap();
|
||||||
let fname = LayerFileName::from_str(&fname).unwrap();
|
let fname = LayerFileName::from_str(&fname).unwrap();
|
||||||
let layer = LayerDescriptor::from(fname);
|
let layer = PersistentLayerDesc::from(fname);
|
||||||
|
|
||||||
let lsn_range = layer.get_lsn_range();
|
let lsn_range = layer.get_lsn_range();
|
||||||
min_lsn = min(min_lsn, lsn_range.start);
|
min_lsn = min(min_lsn, lsn_range.start);
|
||||||
max_lsn = max(max_lsn, Lsn(lsn_range.end.0 - 1));
|
max_lsn = max(max_lsn, Lsn(lsn_range.end.0 - 1));
|
||||||
|
|
||||||
updates.insert_historic(layer.layer_desc().clone());
|
updates.insert_historic(layer);
|
||||||
}
|
}
|
||||||
|
|
||||||
println!("min: {min_lsn}, max: {max_lsn}");
|
println!("min: {min_lsn}, max: {max_lsn}");
|
||||||
@@ -210,15 +210,15 @@ fn bench_sequential(c: &mut Criterion) {
|
|||||||
for i in 0..100_000 {
|
for i in 0..100_000 {
|
||||||
let i32 = (i as u32) % 100;
|
let i32 = (i as u32) % 100;
|
||||||
let zero = Key::from_hex("000000000000000000000000000000000000").unwrap();
|
let zero = Key::from_hex("000000000000000000000000000000000000").unwrap();
|
||||||
let layer = LayerDescriptor::from(PersistentLayerDesc::new_img(
|
let layer = PersistentLayerDesc::new_img(
|
||||||
TenantId::generate(),
|
TenantId::generate(),
|
||||||
TimelineId::generate(),
|
TimelineId::generate(),
|
||||||
zero.add(10 * i32)..zero.add(10 * i32 + 1),
|
zero.add(10 * i32)..zero.add(10 * i32 + 1),
|
||||||
Lsn(i),
|
Lsn(i),
|
||||||
false,
|
false,
|
||||||
0,
|
0,
|
||||||
));
|
);
|
||||||
updates.insert_historic(layer.layer_desc().clone());
|
updates.insert_historic(layer);
|
||||||
}
|
}
|
||||||
updates.flush();
|
updates.flush();
|
||||||
println!("Finished layer map init in {:?}", now.elapsed());
|
println!("Finished layer map init in {:?}", now.elapsed());
|
||||||
|
|||||||
@@ -7,10 +7,10 @@
|
|||||||
//! - The y axis represents LSN, growing upwards.
|
//! - The y axis represents LSN, growing upwards.
|
||||||
//!
|
//!
|
||||||
//! Coordinates in both axis are compressed for better readability.
|
//! Coordinates in both axis are compressed for better readability.
|
||||||
//! (see https://medium.com/algorithms-digest/coordinate-compression-2fff95326fb)
|
//! (see <https://medium.com/algorithms-digest/coordinate-compression-2fff95326fb>)
|
||||||
//!
|
//!
|
||||||
//! Example use:
|
//! Example use:
|
||||||
//! ```
|
//! ```bash
|
||||||
//! $ ls test_output/test_pgbench\[neon-45-684\]/repo/tenants/$TENANT/timelines/$TIMELINE | \
|
//! $ ls test_output/test_pgbench\[neon-45-684\]/repo/tenants/$TENANT/timelines/$TIMELINE | \
|
||||||
//! $ grep "__" | cargo run --release --bin pagectl draw-timeline-dir > out.svg
|
//! $ grep "__" | cargo run --release --bin pagectl draw-timeline-dir > out.svg
|
||||||
//! $ firefox out.svg
|
//! $ firefox out.svg
|
||||||
@@ -20,7 +20,7 @@
|
|||||||
//! or from pageserver log files.
|
//! or from pageserver log files.
|
||||||
//!
|
//!
|
||||||
//! TODO Consider shipping this as a grafana panel plugin:
|
//! TODO Consider shipping this as a grafana panel plugin:
|
||||||
//! https://grafana.com/tutorials/build-a-panel-plugin/
|
//! <https://grafana.com/tutorials/build-a-panel-plugin/>
|
||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
use pageserver::repository::Key;
|
use pageserver::repository::Key;
|
||||||
use std::cmp::Ordering;
|
use std::cmp::Ordering;
|
||||||
@@ -117,7 +117,8 @@ pub fn main() -> Result<()> {
|
|||||||
|
|
||||||
let mut lsn_diff = (lsn_end - lsn_start) as f32;
|
let mut lsn_diff = (lsn_end - lsn_start) as f32;
|
||||||
let mut fill = Fill::None;
|
let mut fill = Fill::None;
|
||||||
let mut margin = 0.05 * lsn_diff; // Height-dependent margin to disambiguate overlapping deltas
|
let mut ymargin = 0.05 * lsn_diff; // Height-dependent margin to disambiguate overlapping deltas
|
||||||
|
let xmargin = 0.05; // Height-dependent margin to disambiguate overlapping deltas
|
||||||
let mut lsn_offset = 0.0;
|
let mut lsn_offset = 0.0;
|
||||||
|
|
||||||
// Fill in and thicken rectangle if it's an
|
// Fill in and thicken rectangle if it's an
|
||||||
@@ -128,7 +129,7 @@ pub fn main() -> Result<()> {
|
|||||||
num_images += 1;
|
num_images += 1;
|
||||||
lsn_diff = 0.3;
|
lsn_diff = 0.3;
|
||||||
lsn_offset = -lsn_diff / 2.0;
|
lsn_offset = -lsn_diff / 2.0;
|
||||||
margin = 0.05;
|
ymargin = 0.05;
|
||||||
fill = Fill::Color(rgb(0, 0, 0));
|
fill = Fill::Color(rgb(0, 0, 0));
|
||||||
}
|
}
|
||||||
Ordering::Greater => panic!("Invalid lsn range {}-{}", lsn_start, lsn_end),
|
Ordering::Greater => panic!("Invalid lsn range {}-{}", lsn_start, lsn_end),
|
||||||
@@ -137,10 +138,10 @@ pub fn main() -> Result<()> {
|
|||||||
println!(
|
println!(
|
||||||
" {}",
|
" {}",
|
||||||
rectangle(
|
rectangle(
|
||||||
key_start as f32 + stretch * margin,
|
key_start as f32 + stretch * xmargin,
|
||||||
stretch * (lsn_max as f32 - (lsn_end as f32 - margin - lsn_offset)),
|
stretch * (lsn_max as f32 - (lsn_end as f32 - ymargin - lsn_offset)),
|
||||||
key_diff as f32 - stretch * 2.0 * margin,
|
key_diff as f32 - stretch * 2.0 * xmargin,
|
||||||
stretch * (lsn_diff - 2.0 * margin)
|
stretch * (lsn_diff - 2.0 * ymargin)
|
||||||
)
|
)
|
||||||
.fill(fill)
|
.fill(fill)
|
||||||
.stroke(Stroke::Color(rgb(0, 0, 0), 0.1))
|
.stroke(Stroke::Color(rgb(0, 0, 0), 0.1))
|
||||||
|
|||||||
@@ -19,12 +19,6 @@ use tokio::io;
|
|||||||
use tokio::io::AsyncWrite;
|
use tokio::io::AsyncWrite;
|
||||||
use tracing::*;
|
use tracing::*;
|
||||||
|
|
||||||
/// NB: This relies on a modified version of tokio_tar that does *not* write the
|
|
||||||
/// end-of-archive marker (1024 zero bytes), when the Builder struct is dropped
|
|
||||||
/// without explicitly calling 'finish' or 'into_inner'!
|
|
||||||
///
|
|
||||||
/// See https://github.com/neondatabase/tokio-tar/pull/1
|
|
||||||
///
|
|
||||||
use tokio_tar::{Builder, EntryType, Header};
|
use tokio_tar::{Builder, EntryType, Header};
|
||||||
|
|
||||||
use crate::context::RequestContext;
|
use crate::context::RequestContext;
|
||||||
|
|||||||
@@ -171,11 +171,13 @@ pub struct PageServerConf {
|
|||||||
|
|
||||||
pub log_format: LogFormat,
|
pub log_format: LogFormat,
|
||||||
|
|
||||||
/// Number of concurrent [`Tenant::gather_size_inputs`] allowed.
|
/// Number of concurrent [`Tenant::gather_size_inputs`](crate::tenant::Tenant::gather_size_inputs) allowed.
|
||||||
pub concurrent_tenant_size_logical_size_queries: ConfigurableSemaphore,
|
pub concurrent_tenant_size_logical_size_queries: ConfigurableSemaphore,
|
||||||
/// Limit of concurrent [`Tenant::gather_size_inputs`] issued by module `eviction_task`.
|
/// Limit of concurrent [`Tenant::gather_size_inputs`] issued by module `eviction_task`.
|
||||||
/// The number of permits is the same as `concurrent_tenant_size_logical_size_queries`.
|
/// The number of permits is the same as `concurrent_tenant_size_logical_size_queries`.
|
||||||
/// See the comment in `eviction_task` for details.
|
/// See the comment in `eviction_task` for details.
|
||||||
|
///
|
||||||
|
/// [`Tenant::gather_size_inputs`]: crate::tenant::Tenant::gather_size_inputs
|
||||||
pub eviction_task_immitated_concurrent_logical_size_queries: ConfigurableSemaphore,
|
pub eviction_task_immitated_concurrent_logical_size_queries: ConfigurableSemaphore,
|
||||||
|
|
||||||
// How often to collect metrics and send them to the metrics endpoint.
|
// How often to collect metrics and send them to the metrics endpoint.
|
||||||
@@ -570,21 +572,21 @@ impl PageServerConf {
|
|||||||
.join(TENANT_ATTACHING_MARKER_FILENAME)
|
.join(TENANT_ATTACHING_MARKER_FILENAME)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn tenant_ignore_mark_file_path(&self, tenant_id: TenantId) -> PathBuf {
|
pub fn tenant_ignore_mark_file_path(&self, tenant_id: &TenantId) -> PathBuf {
|
||||||
self.tenant_path(&tenant_id).join(IGNORED_TENANT_FILE_NAME)
|
self.tenant_path(tenant_id).join(IGNORED_TENANT_FILE_NAME)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Points to a place in pageserver's local directory,
|
/// Points to a place in pageserver's local directory,
|
||||||
/// where certain tenant's tenantconf file should be located.
|
/// where certain tenant's tenantconf file should be located.
|
||||||
pub fn tenant_config_path(&self, tenant_id: TenantId) -> PathBuf {
|
pub fn tenant_config_path(&self, tenant_id: &TenantId) -> PathBuf {
|
||||||
self.tenant_path(&tenant_id).join(TENANT_CONFIG_NAME)
|
self.tenant_path(tenant_id).join(TENANT_CONFIG_NAME)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn timelines_path(&self, tenant_id: &TenantId) -> PathBuf {
|
pub fn timelines_path(&self, tenant_id: &TenantId) -> PathBuf {
|
||||||
self.tenant_path(tenant_id).join(TIMELINES_SEGMENT_NAME)
|
self.tenant_path(tenant_id).join(TIMELINES_SEGMENT_NAME)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn timeline_path(&self, timeline_id: &TimelineId, tenant_id: &TenantId) -> PathBuf {
|
pub fn timeline_path(&self, tenant_id: &TenantId, timeline_id: &TimelineId) -> PathBuf {
|
||||||
self.timelines_path(tenant_id).join(timeline_id.to_string())
|
self.timelines_path(tenant_id).join(timeline_id.to_string())
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -594,7 +596,7 @@ impl PageServerConf {
|
|||||||
timeline_id: TimelineId,
|
timeline_id: TimelineId,
|
||||||
) -> PathBuf {
|
) -> PathBuf {
|
||||||
path_with_suffix_extension(
|
path_with_suffix_extension(
|
||||||
self.timeline_path(&timeline_id, &tenant_id),
|
self.timeline_path(&tenant_id, &timeline_id),
|
||||||
TIMELINE_UNINIT_MARK_SUFFIX,
|
TIMELINE_UNINIT_MARK_SUFFIX,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
@@ -617,8 +619,8 @@ impl PageServerConf {
|
|||||||
|
|
||||||
/// Points to a place in pageserver's local directory,
|
/// Points to a place in pageserver's local directory,
|
||||||
/// where certain timeline's metadata file should be located.
|
/// where certain timeline's metadata file should be located.
|
||||||
pub fn metadata_path(&self, timeline_id: TimelineId, tenant_id: TenantId) -> PathBuf {
|
pub fn metadata_path(&self, tenant_id: &TenantId, timeline_id: &TimelineId) -> PathBuf {
|
||||||
self.timeline_path(&timeline_id, &tenant_id)
|
self.timeline_path(tenant_id, timeline_id)
|
||||||
.join(METADATA_FILE_NAME)
|
.join(METADATA_FILE_NAME)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -993,6 +995,8 @@ impl ConfigurableSemaphore {
|
|||||||
/// Require a non-zero initial permits, because using permits == 0 is a crude way to disable a
|
/// Require a non-zero initial permits, because using permits == 0 is a crude way to disable a
|
||||||
/// feature such as [`Tenant::gather_size_inputs`]. Otherwise any semaphore using future will
|
/// feature such as [`Tenant::gather_size_inputs`]. Otherwise any semaphore using future will
|
||||||
/// behave like [`futures::future::pending`], just waiting until new permits are added.
|
/// behave like [`futures::future::pending`], just waiting until new permits are added.
|
||||||
|
///
|
||||||
|
/// [`Tenant::gather_size_inputs`]: crate::tenant::Tenant::gather_size_inputs
|
||||||
pub fn new(initial_permits: NonZeroUsize) -> Self {
|
pub fn new(initial_permits: NonZeroUsize) -> Self {
|
||||||
ConfigurableSemaphore {
|
ConfigurableSemaphore {
|
||||||
initial_permits,
|
initial_permits,
|
||||||
|
|||||||
@@ -179,6 +179,9 @@ impl RequestContext {
|
|||||||
/// a context and you are unwilling to change all callers to provide one.
|
/// a context and you are unwilling to change all callers to provide one.
|
||||||
///
|
///
|
||||||
/// Before we add cancellation, we should get rid of this method.
|
/// Before we add cancellation, we should get rid of this method.
|
||||||
|
///
|
||||||
|
/// [`attached_child`]: Self::attached_child
|
||||||
|
/// [`detached_child`]: Self::detached_child
|
||||||
pub fn todo_child(task_kind: TaskKind, download_behavior: DownloadBehavior) -> Self {
|
pub fn todo_child(task_kind: TaskKind, download_behavior: DownloadBehavior) -> Self {
|
||||||
Self::new(task_kind, download_behavior)
|
Self::new(task_kind, download_behavior)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -305,7 +305,7 @@ pub async fn disk_usage_eviction_task_iteration_impl<U: Usage>(
|
|||||||
let now = SystemTime::now();
|
let now = SystemTime::now();
|
||||||
for (i, (partition, candidate)) in candidates.iter().enumerate() {
|
for (i, (partition, candidate)) in candidates.iter().enumerate() {
|
||||||
debug!(
|
debug!(
|
||||||
"cand {}/{}: size={}, no_access_for={}us, parition={:?}, tenant={} timeline={} layer={}",
|
"cand {}/{}: size={}, no_access_for={}us, partition={:?}, {}/{}/{}",
|
||||||
i + 1,
|
i + 1,
|
||||||
candidates.len(),
|
candidates.len(),
|
||||||
candidate.layer.file_size(),
|
candidate.layer.file_size(),
|
||||||
|
|||||||
@@ -346,7 +346,7 @@ async fn timeline_create_handler(
|
|||||||
Err(tenant::CreateTimelineError::Other(err)) => Err(ApiError::InternalServerError(err)),
|
Err(tenant::CreateTimelineError::Other(err)) => Err(ApiError::InternalServerError(err)),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
.instrument(info_span!("timeline_create", tenant = %tenant_id, timeline_id = %new_timeline_id, lsn=?request_data.ancestor_start_lsn, pg_version=?request_data.pg_version))
|
.instrument(info_span!("timeline_create", %tenant_id, timeline_id = %new_timeline_id, lsn=?request_data.ancestor_start_lsn, pg_version=?request_data.pg_version))
|
||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -381,7 +381,7 @@ async fn timeline_list_handler(
|
|||||||
}
|
}
|
||||||
Ok::<Vec<TimelineInfo>, ApiError>(response_data)
|
Ok::<Vec<TimelineInfo>, ApiError>(response_data)
|
||||||
}
|
}
|
||||||
.instrument(info_span!("timeline_list", tenant = %tenant_id))
|
.instrument(info_span!("timeline_list", %tenant_id))
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
json_response(StatusCode::OK, response_data)
|
json_response(StatusCode::OK, response_data)
|
||||||
@@ -418,7 +418,7 @@ async fn timeline_detail_handler(
|
|||||||
|
|
||||||
Ok::<_, ApiError>(timeline_info)
|
Ok::<_, ApiError>(timeline_info)
|
||||||
}
|
}
|
||||||
.instrument(info_span!("timeline_detail", tenant = %tenant_id, timeline = %timeline_id))
|
.instrument(info_span!("timeline_detail", %tenant_id, %timeline_id))
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
json_response(StatusCode::OK, timeline_info)
|
json_response(StatusCode::OK, timeline_info)
|
||||||
@@ -479,7 +479,7 @@ async fn tenant_attach_handler(
|
|||||||
remote_storage.clone(),
|
remote_storage.clone(),
|
||||||
&ctx,
|
&ctx,
|
||||||
)
|
)
|
||||||
.instrument(info_span!("tenant_attach", tenant = %tenant_id))
|
.instrument(info_span!("tenant_attach", %tenant_id))
|
||||||
.await?;
|
.await?;
|
||||||
} else {
|
} else {
|
||||||
return Err(ApiError::BadRequest(anyhow!(
|
return Err(ApiError::BadRequest(anyhow!(
|
||||||
@@ -501,7 +501,7 @@ async fn timeline_delete_handler(
|
|||||||
let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Warn);
|
let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Warn);
|
||||||
|
|
||||||
mgr::delete_timeline(tenant_id, timeline_id, &ctx)
|
mgr::delete_timeline(tenant_id, timeline_id, &ctx)
|
||||||
.instrument(info_span!("timeline_delete", tenant = %tenant_id, timeline = %timeline_id))
|
.instrument(info_span!("timeline_delete", %tenant_id, %timeline_id))
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
// FIXME: needs to be an error for console to retry it. Ideally Accepted should be used and retried until 404.
|
// FIXME: needs to be an error for console to retry it. Ideally Accepted should be used and retried until 404.
|
||||||
@@ -519,7 +519,7 @@ async fn tenant_detach_handler(
|
|||||||
let state = get_state(&request);
|
let state = get_state(&request);
|
||||||
let conf = state.conf;
|
let conf = state.conf;
|
||||||
mgr::detach_tenant(conf, tenant_id, detach_ignored.unwrap_or(false))
|
mgr::detach_tenant(conf, tenant_id, detach_ignored.unwrap_or(false))
|
||||||
.instrument(info_span!("tenant_detach", tenant = %tenant_id))
|
.instrument(info_span!("tenant_detach", %tenant_id))
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
json_response(StatusCode::OK, ())
|
json_response(StatusCode::OK, ())
|
||||||
@@ -542,7 +542,7 @@ async fn tenant_load_handler(
|
|||||||
state.remote_storage.clone(),
|
state.remote_storage.clone(),
|
||||||
&ctx,
|
&ctx,
|
||||||
)
|
)
|
||||||
.instrument(info_span!("load", tenant = %tenant_id))
|
.instrument(info_span!("load", %tenant_id))
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
json_response(StatusCode::ACCEPTED, ())
|
json_response(StatusCode::ACCEPTED, ())
|
||||||
@@ -558,7 +558,7 @@ async fn tenant_ignore_handler(
|
|||||||
let state = get_state(&request);
|
let state = get_state(&request);
|
||||||
let conf = state.conf;
|
let conf = state.conf;
|
||||||
mgr::ignore_tenant(conf, tenant_id)
|
mgr::ignore_tenant(conf, tenant_id)
|
||||||
.instrument(info_span!("ignore_tenant", tenant = %tenant_id))
|
.instrument(info_span!("ignore_tenant", %tenant_id))
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
json_response(StatusCode::OK, ())
|
json_response(StatusCode::OK, ())
|
||||||
@@ -611,7 +611,7 @@ async fn tenant_status(
|
|||||||
attachment_status: state.attachment_status(),
|
attachment_status: state.attachment_status(),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
.instrument(info_span!("tenant_status_handler", tenant = %tenant_id))
|
.instrument(info_span!("tenant_status_handler", %tenant_id))
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
json_response(StatusCode::OK, tenant_info)
|
json_response(StatusCode::OK, tenant_info)
|
||||||
@@ -850,7 +850,7 @@ async fn tenant_create_handler(
|
|||||||
state.remote_storage.clone(),
|
state.remote_storage.clone(),
|
||||||
&ctx,
|
&ctx,
|
||||||
)
|
)
|
||||||
.instrument(info_span!("tenant_create", tenant = ?target_tenant_id))
|
.instrument(info_span!("tenant_create", tenant_id = %target_tenant_id))
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
// We created the tenant. Existing API semantics are that the tenant
|
// We created the tenant. Existing API semantics are that the tenant
|
||||||
@@ -912,7 +912,7 @@ async fn update_tenant_config_handler(
|
|||||||
|
|
||||||
let state = get_state(&request);
|
let state = get_state(&request);
|
||||||
mgr::set_new_tenant_config(state.conf, tenant_conf, tenant_id)
|
mgr::set_new_tenant_config(state.conf, tenant_conf, tenant_id)
|
||||||
.instrument(info_span!("tenant_config", tenant = ?tenant_id))
|
.instrument(info_span!("tenant_config", %tenant_id))
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
json_response(StatusCode::OK, ())
|
json_response(StatusCode::OK, ())
|
||||||
@@ -1143,7 +1143,7 @@ async fn disk_usage_eviction_run(
|
|||||||
let Some(storage) = state.remote_storage.clone() else {
|
let Some(storage) = state.remote_storage.clone() else {
|
||||||
return Err(ApiError::InternalServerError(anyhow::anyhow!(
|
return Err(ApiError::InternalServerError(anyhow::anyhow!(
|
||||||
"remote storage not configured, cannot run eviction iteration"
|
"remote storage not configured, cannot run eviction iteration"
|
||||||
)))
|
)));
|
||||||
};
|
};
|
||||||
|
|
||||||
let state = state.disk_usage_eviction_state.clone();
|
let state = state.disk_usage_eviction_state.clone();
|
||||||
|
|||||||
@@ -385,7 +385,7 @@ pub static UNEXPECTED_ONDEMAND_DOWNLOADS: Lazy<IntCounter> = Lazy::new(|| {
|
|||||||
.expect("failed to define a metric")
|
.expect("failed to define a metric")
|
||||||
});
|
});
|
||||||
|
|
||||||
/// Each [`Timeline`]'s [`EVICTIONS_WITH_LOW_RESIDENCE_DURATION`] metric.
|
/// Each `Timeline`'s [`EVICTIONS_WITH_LOW_RESIDENCE_DURATION`] metric.
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct EvictionsWithLowResidenceDuration {
|
pub struct EvictionsWithLowResidenceDuration {
|
||||||
data_source: &'static str,
|
data_source: &'static str,
|
||||||
@@ -541,6 +541,17 @@ pub static SMGR_QUERY_TIME: Lazy<HistogramVec> = Lazy::new(|| {
|
|||||||
.expect("failed to define a metric")
|
.expect("failed to define a metric")
|
||||||
});
|
});
|
||||||
|
|
||||||
|
// keep in sync with control plane Go code so that we can validate
|
||||||
|
// compute's basebackup_ms metric with our perspective in the context of SLI/SLO.
|
||||||
|
static COMPUTE_STARTUP_BUCKETS: Lazy<[f64; 28]> = Lazy::new(|| {
|
||||||
|
// Go code uses milliseconds. Variable is called `computeStartupBuckets`
|
||||||
|
[
|
||||||
|
5, 10, 20, 30, 50, 70, 100, 120, 150, 200, 250, 300, 350, 400, 450, 500, 600, 800, 1000,
|
||||||
|
1500, 2000, 2500, 3000, 5000, 10000, 20000, 40000, 60000,
|
||||||
|
]
|
||||||
|
.map(|ms| (ms as f64) / 1000.0)
|
||||||
|
});
|
||||||
|
|
||||||
pub struct BasebackupQueryTime(HistogramVec);
|
pub struct BasebackupQueryTime(HistogramVec);
|
||||||
pub static BASEBACKUP_QUERY_TIME: Lazy<BasebackupQueryTime> = Lazy::new(|| {
|
pub static BASEBACKUP_QUERY_TIME: Lazy<BasebackupQueryTime> = Lazy::new(|| {
|
||||||
BasebackupQueryTime({
|
BasebackupQueryTime({
|
||||||
@@ -548,7 +559,7 @@ pub static BASEBACKUP_QUERY_TIME: Lazy<BasebackupQueryTime> = Lazy::new(|| {
|
|||||||
"pageserver_basebackup_query_seconds",
|
"pageserver_basebackup_query_seconds",
|
||||||
"Histogram of basebackup queries durations, by result type",
|
"Histogram of basebackup queries durations, by result type",
|
||||||
&["result"],
|
&["result"],
|
||||||
CRITICAL_OP_BUCKETS.into(),
|
COMPUTE_STARTUP_BUCKETS.to_vec(),
|
||||||
)
|
)
|
||||||
.expect("failed to define a metric")
|
.expect("failed to define a metric")
|
||||||
})
|
})
|
||||||
@@ -818,7 +829,7 @@ pub static WAL_REDO_RECORD_COUNTER: Lazy<IntCounter> = Lazy::new(|| {
|
|||||||
.unwrap()
|
.unwrap()
|
||||||
});
|
});
|
||||||
|
|
||||||
/// Similar to [`prometheus::HistogramTimer`] but does not record on drop.
|
/// Similar to `prometheus::HistogramTimer` but does not record on drop.
|
||||||
pub struct StorageTimeMetricsTimer {
|
pub struct StorageTimeMetricsTimer {
|
||||||
metrics: StorageTimeMetrics,
|
metrics: StorageTimeMetrics,
|
||||||
start: Instant,
|
start: Instant,
|
||||||
@@ -876,7 +887,7 @@ impl StorageTimeMetrics {
|
|||||||
|
|
||||||
/// Starts timing a new operation.
|
/// Starts timing a new operation.
|
||||||
///
|
///
|
||||||
/// Note: unlike [`prometheus::HistogramTimer`] the returned timer does not record on drop.
|
/// Note: unlike `prometheus::HistogramTimer` the returned timer does not record on drop.
|
||||||
pub fn start_timer(&self) -> StorageTimeMetricsTimer {
|
pub fn start_timer(&self) -> StorageTimeMetricsTimer {
|
||||||
StorageTimeMetricsTimer::new(self.clone())
|
StorageTimeMetricsTimer::new(self.clone())
|
||||||
}
|
}
|
||||||
@@ -1256,7 +1267,7 @@ impl RemoteTimelineClientMetrics {
|
|||||||
/// Update the metrics that change when a call to the remote timeline client instance starts.
|
/// Update the metrics that change when a call to the remote timeline client instance starts.
|
||||||
///
|
///
|
||||||
/// Drop the returned guard object once the operation is finished to updates corresponding metrics that track completions.
|
/// Drop the returned guard object once the operation is finished to updates corresponding metrics that track completions.
|
||||||
/// Or, use [`RemoteTimelineClientCallMetricGuard::will_decrement_manually`] and [`call_end`] if that
|
/// Or, use [`RemoteTimelineClientCallMetricGuard::will_decrement_manually`] and [`call_end`](Self::call_end) if that
|
||||||
/// is more suitable.
|
/// is more suitable.
|
||||||
/// Never do both.
|
/// Never do both.
|
||||||
pub(crate) fn call_begin(
|
pub(crate) fn call_begin(
|
||||||
@@ -1289,7 +1300,7 @@ impl RemoteTimelineClientMetrics {
|
|||||||
|
|
||||||
/// Manually udpate the metrics that track completions, instead of using the guard object.
|
/// Manually udpate the metrics that track completions, instead of using the guard object.
|
||||||
/// Using the guard object is generally preferable.
|
/// Using the guard object is generally preferable.
|
||||||
/// See [`call_begin`] for more context.
|
/// See [`call_begin`](Self::call_begin) for more context.
|
||||||
pub(crate) fn call_end(
|
pub(crate) fn call_end(
|
||||||
&self,
|
&self,
|
||||||
file_kind: &RemoteOpFileKind,
|
file_kind: &RemoteOpFileKind,
|
||||||
|
|||||||
@@ -10,6 +10,7 @@
|
|||||||
//
|
//
|
||||||
|
|
||||||
use anyhow::Context;
|
use anyhow::Context;
|
||||||
|
use async_compression::tokio::write::GzipEncoder;
|
||||||
use bytes::Buf;
|
use bytes::Buf;
|
||||||
use bytes::Bytes;
|
use bytes::Bytes;
|
||||||
use futures::Stream;
|
use futures::Stream;
|
||||||
@@ -31,8 +32,10 @@ use std::str;
|
|||||||
use std::str::FromStr;
|
use std::str::FromStr;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
use tokio::io::AsyncWriteExt;
|
||||||
use tokio::io::{AsyncRead, AsyncWrite};
|
use tokio::io::{AsyncRead, AsyncWrite};
|
||||||
use tokio_util::io::StreamReader;
|
use tokio_util::io::StreamReader;
|
||||||
|
use tracing::field;
|
||||||
use tracing::*;
|
use tracing::*;
|
||||||
use utils::id::ConnectionId;
|
use utils::id::ConnectionId;
|
||||||
use utils::{
|
use utils::{
|
||||||
@@ -51,6 +54,7 @@ use crate::metrics::{LIVE_CONNECTIONS_COUNT, SMGR_QUERY_TIME};
|
|||||||
use crate::task_mgr;
|
use crate::task_mgr;
|
||||||
use crate::task_mgr::TaskKind;
|
use crate::task_mgr::TaskKind;
|
||||||
use crate::tenant;
|
use crate::tenant;
|
||||||
|
use crate::tenant::debug_assert_current_span_has_tenant_and_timeline_id;
|
||||||
use crate::tenant::mgr;
|
use crate::tenant::mgr;
|
||||||
use crate::tenant::mgr::GetTenantError;
|
use crate::tenant::mgr::GetTenantError;
|
||||||
use crate::tenant::{Tenant, Timeline};
|
use crate::tenant::{Tenant, Timeline};
|
||||||
@@ -238,6 +242,7 @@ pub async fn libpq_listener_main(
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[instrument(skip_all, fields(peer_addr))]
|
||||||
async fn page_service_conn_main(
|
async fn page_service_conn_main(
|
||||||
conf: &'static PageServerConf,
|
conf: &'static PageServerConf,
|
||||||
broker_client: storage_broker::BrokerClientChannel,
|
broker_client: storage_broker::BrokerClientChannel,
|
||||||
@@ -260,6 +265,7 @@ async fn page_service_conn_main(
|
|||||||
.context("could not set TCP_NODELAY")?;
|
.context("could not set TCP_NODELAY")?;
|
||||||
|
|
||||||
let peer_addr = socket.peer_addr().context("get peer address")?;
|
let peer_addr = socket.peer_addr().context("get peer address")?;
|
||||||
|
tracing::Span::current().record("peer_addr", field::display(peer_addr));
|
||||||
|
|
||||||
// setup read timeout of 10 minutes. the timeout is rather arbitrary for requirements:
|
// setup read timeout of 10 minutes. the timeout is rather arbitrary for requirements:
|
||||||
// - long enough for most valid compute connections
|
// - long enough for most valid compute connections
|
||||||
@@ -362,7 +368,7 @@ impl PageServerHandler {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[instrument(skip(self, pgb, ctx))]
|
#[instrument(skip_all)]
|
||||||
async fn handle_pagerequests<IO>(
|
async fn handle_pagerequests<IO>(
|
||||||
&self,
|
&self,
|
||||||
pgb: &mut PostgresBackend<IO>,
|
pgb: &mut PostgresBackend<IO>,
|
||||||
@@ -373,6 +379,8 @@ impl PageServerHandler {
|
|||||||
where
|
where
|
||||||
IO: AsyncRead + AsyncWrite + Send + Sync + Unpin,
|
IO: AsyncRead + AsyncWrite + Send + Sync + Unpin,
|
||||||
{
|
{
|
||||||
|
debug_assert_current_span_has_tenant_and_timeline_id();
|
||||||
|
|
||||||
// NOTE: pagerequests handler exits when connection is closed,
|
// NOTE: pagerequests handler exits when connection is closed,
|
||||||
// so there is no need to reset the association
|
// so there is no need to reset the association
|
||||||
task_mgr::associate_with(Some(tenant_id), Some(timeline_id));
|
task_mgr::associate_with(Some(tenant_id), Some(timeline_id));
|
||||||
@@ -473,7 +481,7 @@ impl PageServerHandler {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[allow(clippy::too_many_arguments)]
|
#[allow(clippy::too_many_arguments)]
|
||||||
#[instrument(skip(self, pgb, ctx))]
|
#[instrument(skip_all, fields(%base_lsn, end_lsn=%_end_lsn, %pg_version))]
|
||||||
async fn handle_import_basebackup<IO>(
|
async fn handle_import_basebackup<IO>(
|
||||||
&self,
|
&self,
|
||||||
pgb: &mut PostgresBackend<IO>,
|
pgb: &mut PostgresBackend<IO>,
|
||||||
@@ -487,6 +495,8 @@ impl PageServerHandler {
|
|||||||
where
|
where
|
||||||
IO: AsyncRead + AsyncWrite + Send + Sync + Unpin,
|
IO: AsyncRead + AsyncWrite + Send + Sync + Unpin,
|
||||||
{
|
{
|
||||||
|
debug_assert_current_span_has_tenant_and_timeline_id();
|
||||||
|
|
||||||
task_mgr::associate_with(Some(tenant_id), Some(timeline_id));
|
task_mgr::associate_with(Some(tenant_id), Some(timeline_id));
|
||||||
// Create empty timeline
|
// Create empty timeline
|
||||||
info!("creating new timeline");
|
info!("creating new timeline");
|
||||||
@@ -531,7 +541,7 @@ impl PageServerHandler {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[instrument(skip(self, pgb, ctx))]
|
#[instrument(skip_all, fields(%start_lsn, %end_lsn))]
|
||||||
async fn handle_import_wal<IO>(
|
async fn handle_import_wal<IO>(
|
||||||
&self,
|
&self,
|
||||||
pgb: &mut PostgresBackend<IO>,
|
pgb: &mut PostgresBackend<IO>,
|
||||||
@@ -544,6 +554,7 @@ impl PageServerHandler {
|
|||||||
where
|
where
|
||||||
IO: AsyncRead + AsyncWrite + Send + Sync + Unpin,
|
IO: AsyncRead + AsyncWrite + Send + Sync + Unpin,
|
||||||
{
|
{
|
||||||
|
debug_assert_current_span_has_tenant_and_timeline_id();
|
||||||
task_mgr::associate_with(Some(tenant_id), Some(timeline_id));
|
task_mgr::associate_with(Some(tenant_id), Some(timeline_id));
|
||||||
|
|
||||||
let timeline = get_active_tenant_timeline(tenant_id, timeline_id, &ctx).await?;
|
let timeline = get_active_tenant_timeline(tenant_id, timeline_id, &ctx).await?;
|
||||||
@@ -738,7 +749,7 @@ impl PageServerHandler {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[allow(clippy::too_many_arguments)]
|
#[allow(clippy::too_many_arguments)]
|
||||||
#[instrument(skip(self, pgb, ctx))]
|
#[instrument(skip_all, fields(?lsn, ?prev_lsn, %full_backup))]
|
||||||
async fn handle_basebackup_request<IO>(
|
async fn handle_basebackup_request<IO>(
|
||||||
&mut self,
|
&mut self,
|
||||||
pgb: &mut PostgresBackend<IO>,
|
pgb: &mut PostgresBackend<IO>,
|
||||||
@@ -747,11 +758,14 @@ impl PageServerHandler {
|
|||||||
lsn: Option<Lsn>,
|
lsn: Option<Lsn>,
|
||||||
prev_lsn: Option<Lsn>,
|
prev_lsn: Option<Lsn>,
|
||||||
full_backup: bool,
|
full_backup: bool,
|
||||||
|
gzip: bool,
|
||||||
ctx: RequestContext,
|
ctx: RequestContext,
|
||||||
) -> anyhow::Result<()>
|
) -> anyhow::Result<()>
|
||||||
where
|
where
|
||||||
IO: AsyncRead + AsyncWrite + Send + Sync + Unpin,
|
IO: AsyncRead + AsyncWrite + Send + Sync + Unpin,
|
||||||
{
|
{
|
||||||
|
debug_assert_current_span_has_tenant_and_timeline_id();
|
||||||
|
|
||||||
let started = std::time::Instant::now();
|
let started = std::time::Instant::now();
|
||||||
|
|
||||||
// check that the timeline exists
|
// check that the timeline exists
|
||||||
@@ -772,8 +786,9 @@ impl PageServerHandler {
|
|||||||
pgb.write_message_noflush(&BeMessage::CopyOutResponse)?;
|
pgb.write_message_noflush(&BeMessage::CopyOutResponse)?;
|
||||||
pgb.flush().await?;
|
pgb.flush().await?;
|
||||||
|
|
||||||
// Send a tarball of the latest layer on the timeline
|
// Send a tarball of the latest layer on the timeline. Compress if not
|
||||||
{
|
// fullbackup. TODO Compress in that case too (tests need to be updated)
|
||||||
|
if full_backup {
|
||||||
let mut writer = pgb.copyout_writer();
|
let mut writer = pgb.copyout_writer();
|
||||||
basebackup::send_basebackup_tarball(
|
basebackup::send_basebackup_tarball(
|
||||||
&mut writer,
|
&mut writer,
|
||||||
@@ -784,6 +799,40 @@ impl PageServerHandler {
|
|||||||
&ctx,
|
&ctx,
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
} else {
|
||||||
|
let mut writer = pgb.copyout_writer();
|
||||||
|
if gzip {
|
||||||
|
let mut encoder = GzipEncoder::with_quality(
|
||||||
|
writer,
|
||||||
|
// NOTE using fast compression because it's on the critical path
|
||||||
|
// for compute startup. For an empty database, we get
|
||||||
|
// <100KB with this method. The Level::Best compression method
|
||||||
|
// gives us <20KB, but maybe we should add basebackup caching
|
||||||
|
// on compute shutdown first.
|
||||||
|
async_compression::Level::Fastest,
|
||||||
|
);
|
||||||
|
basebackup::send_basebackup_tarball(
|
||||||
|
&mut encoder,
|
||||||
|
&timeline,
|
||||||
|
lsn,
|
||||||
|
prev_lsn,
|
||||||
|
full_backup,
|
||||||
|
&ctx,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
// shutdown the encoder to ensure the gzip footer is written
|
||||||
|
encoder.shutdown().await?;
|
||||||
|
} else {
|
||||||
|
basebackup::send_basebackup_tarball(
|
||||||
|
&mut writer,
|
||||||
|
&timeline,
|
||||||
|
lsn,
|
||||||
|
prev_lsn,
|
||||||
|
full_backup,
|
||||||
|
&ctx,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pgb.write_message_noflush(&BeMessage::CopyDone)?;
|
pgb.write_message_noflush(&BeMessage::CopyDone)?;
|
||||||
@@ -862,6 +911,7 @@ where
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[instrument(skip_all, fields(tenant_id, timeline_id))]
|
||||||
async fn process_query(
|
async fn process_query(
|
||||||
&mut self,
|
&mut self,
|
||||||
pgb: &mut PostgresBackend<IO>,
|
pgb: &mut PostgresBackend<IO>,
|
||||||
@@ -883,6 +933,10 @@ where
|
|||||||
let timeline_id = TimelineId::from_str(params[1])
|
let timeline_id = TimelineId::from_str(params[1])
|
||||||
.with_context(|| format!("Failed to parse timeline id from {}", params[1]))?;
|
.with_context(|| format!("Failed to parse timeline id from {}", params[1]))?;
|
||||||
|
|
||||||
|
tracing::Span::current()
|
||||||
|
.record("tenant_id", field::display(tenant_id))
|
||||||
|
.record("timeline_id", field::display(timeline_id));
|
||||||
|
|
||||||
self.check_permission(Some(tenant_id))?;
|
self.check_permission(Some(tenant_id))?;
|
||||||
|
|
||||||
self.handle_pagerequests(pgb, tenant_id, timeline_id, ctx)
|
self.handle_pagerequests(pgb, tenant_id, timeline_id, ctx)
|
||||||
@@ -902,6 +956,10 @@ where
|
|||||||
let timeline_id = TimelineId::from_str(params[1])
|
let timeline_id = TimelineId::from_str(params[1])
|
||||||
.with_context(|| format!("Failed to parse timeline id from {}", params[1]))?;
|
.with_context(|| format!("Failed to parse timeline id from {}", params[1]))?;
|
||||||
|
|
||||||
|
tracing::Span::current()
|
||||||
|
.record("tenant_id", field::display(tenant_id))
|
||||||
|
.record("timeline_id", field::display(timeline_id));
|
||||||
|
|
||||||
self.check_permission(Some(tenant_id))?;
|
self.check_permission(Some(tenant_id))?;
|
||||||
|
|
||||||
let lsn = if params.len() >= 3 {
|
let lsn = if params.len() >= 3 {
|
||||||
@@ -913,6 +971,19 @@ where
|
|||||||
None
|
None
|
||||||
};
|
};
|
||||||
|
|
||||||
|
let gzip = if params.len() >= 4 {
|
||||||
|
if params[3] == "--gzip" {
|
||||||
|
true
|
||||||
|
} else {
|
||||||
|
return Err(QueryError::Other(anyhow::anyhow!(
|
||||||
|
"Parameter in position 3 unknown {}",
|
||||||
|
params[3],
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
false
|
||||||
|
};
|
||||||
|
|
||||||
metrics::metric_vec_duration::observe_async_block_duration_by_result(
|
metrics::metric_vec_duration::observe_async_block_duration_by_result(
|
||||||
&*crate::metrics::BASEBACKUP_QUERY_TIME,
|
&*crate::metrics::BASEBACKUP_QUERY_TIME,
|
||||||
async move {
|
async move {
|
||||||
@@ -923,6 +994,7 @@ where
|
|||||||
lsn,
|
lsn,
|
||||||
None,
|
None,
|
||||||
false,
|
false,
|
||||||
|
gzip,
|
||||||
ctx,
|
ctx,
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
@@ -948,6 +1020,10 @@ where
|
|||||||
let timeline_id = TimelineId::from_str(params[1])
|
let timeline_id = TimelineId::from_str(params[1])
|
||||||
.with_context(|| format!("Failed to parse timeline id from {}", params[1]))?;
|
.with_context(|| format!("Failed to parse timeline id from {}", params[1]))?;
|
||||||
|
|
||||||
|
tracing::Span::current()
|
||||||
|
.record("tenant_id", field::display(tenant_id))
|
||||||
|
.record("timeline_id", field::display(timeline_id));
|
||||||
|
|
||||||
self.check_permission(Some(tenant_id))?;
|
self.check_permission(Some(tenant_id))?;
|
||||||
let timeline = get_active_tenant_timeline(tenant_id, timeline_id, &ctx).await?;
|
let timeline = get_active_tenant_timeline(tenant_id, timeline_id, &ctx).await?;
|
||||||
|
|
||||||
@@ -979,6 +1055,10 @@ where
|
|||||||
let timeline_id = TimelineId::from_str(params[1])
|
let timeline_id = TimelineId::from_str(params[1])
|
||||||
.with_context(|| format!("Failed to parse timeline id from {}", params[1]))?;
|
.with_context(|| format!("Failed to parse timeline id from {}", params[1]))?;
|
||||||
|
|
||||||
|
tracing::Span::current()
|
||||||
|
.record("tenant_id", field::display(tenant_id))
|
||||||
|
.record("timeline_id", field::display(timeline_id));
|
||||||
|
|
||||||
// The caller is responsible for providing correct lsn and prev_lsn.
|
// The caller is responsible for providing correct lsn and prev_lsn.
|
||||||
let lsn = if params.len() > 2 {
|
let lsn = if params.len() > 2 {
|
||||||
Some(
|
Some(
|
||||||
@@ -1000,8 +1080,17 @@ where
|
|||||||
self.check_permission(Some(tenant_id))?;
|
self.check_permission(Some(tenant_id))?;
|
||||||
|
|
||||||
// Check that the timeline exists
|
// Check that the timeline exists
|
||||||
self.handle_basebackup_request(pgb, tenant_id, timeline_id, lsn, prev_lsn, true, ctx)
|
self.handle_basebackup_request(
|
||||||
.await?;
|
pgb,
|
||||||
|
tenant_id,
|
||||||
|
timeline_id,
|
||||||
|
lsn,
|
||||||
|
prev_lsn,
|
||||||
|
true,
|
||||||
|
false,
|
||||||
|
ctx,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
pgb.write_message_noflush(&BeMessage::CommandComplete(b"SELECT 1"))?;
|
pgb.write_message_noflush(&BeMessage::CommandComplete(b"SELECT 1"))?;
|
||||||
} else if query_string.starts_with("import basebackup ") {
|
} else if query_string.starts_with("import basebackup ") {
|
||||||
// Import the `base` section (everything but the wal) of a basebackup.
|
// Import the `base` section (everything but the wal) of a basebackup.
|
||||||
@@ -1033,6 +1122,10 @@ where
|
|||||||
let pg_version = u32::from_str(params[4])
|
let pg_version = u32::from_str(params[4])
|
||||||
.with_context(|| format!("Failed to parse pg_version from {}", params[4]))?;
|
.with_context(|| format!("Failed to parse pg_version from {}", params[4]))?;
|
||||||
|
|
||||||
|
tracing::Span::current()
|
||||||
|
.record("tenant_id", field::display(tenant_id))
|
||||||
|
.record("timeline_id", field::display(timeline_id));
|
||||||
|
|
||||||
self.check_permission(Some(tenant_id))?;
|
self.check_permission(Some(tenant_id))?;
|
||||||
|
|
||||||
match self
|
match self
|
||||||
@@ -1077,6 +1170,10 @@ where
|
|||||||
let end_lsn = Lsn::from_str(params[3])
|
let end_lsn = Lsn::from_str(params[3])
|
||||||
.with_context(|| format!("Failed to parse Lsn from {}", params[3]))?;
|
.with_context(|| format!("Failed to parse Lsn from {}", params[3]))?;
|
||||||
|
|
||||||
|
tracing::Span::current()
|
||||||
|
.record("tenant_id", field::display(tenant_id))
|
||||||
|
.record("timeline_id", field::display(timeline_id));
|
||||||
|
|
||||||
self.check_permission(Some(tenant_id))?;
|
self.check_permission(Some(tenant_id))?;
|
||||||
|
|
||||||
match self
|
match self
|
||||||
@@ -1108,6 +1205,8 @@ where
|
|||||||
let tenant_id = TenantId::from_str(params[0])
|
let tenant_id = TenantId::from_str(params[0])
|
||||||
.with_context(|| format!("Failed to parse tenant id from {}", params[0]))?;
|
.with_context(|| format!("Failed to parse tenant id from {}", params[0]))?;
|
||||||
|
|
||||||
|
tracing::Span::current().record("tenant_id", field::display(tenant_id));
|
||||||
|
|
||||||
self.check_permission(Some(tenant_id))?;
|
self.check_permission(Some(tenant_id))?;
|
||||||
|
|
||||||
let tenant = get_active_tenant_with_timeout(tenant_id, &ctx).await?;
|
let tenant = get_active_tenant_with_timeout(tenant_id, &ctx).await?;
|
||||||
|
|||||||
@@ -1131,7 +1131,7 @@ impl<'a> DatadirModification<'a> {
|
|||||||
/// context, breaking the atomicity is OK. If the import is interrupted, the
|
/// context, breaking the atomicity is OK. If the import is interrupted, the
|
||||||
/// whole import fails and the timeline will be deleted anyway.
|
/// whole import fails and the timeline will be deleted anyway.
|
||||||
/// (Or to be precise, it will be left behind for debugging purposes and
|
/// (Or to be precise, it will be left behind for debugging purposes and
|
||||||
/// ignored, see https://github.com/neondatabase/neon/pull/1809)
|
/// ignored, see <https://github.com/neondatabase/neon/pull/1809>)
|
||||||
///
|
///
|
||||||
/// Note: A consequence of flushing the pending operations is that they
|
/// Note: A consequence of flushing the pending operations is that they
|
||||||
/// won't be visible to subsequent operations until `commit`. The function
|
/// won't be visible to subsequent operations until `commit`. The function
|
||||||
|
|||||||
@@ -205,7 +205,7 @@ pub enum TaskKind {
|
|||||||
///
|
///
|
||||||
/// Walreceiver uses its own abstraction called `TaskHandle` to represent the activity of establishing and handling a connection.
|
/// Walreceiver uses its own abstraction called `TaskHandle` to represent the activity of establishing and handling a connection.
|
||||||
/// That abstraction doesn't use `task_mgr`.
|
/// That abstraction doesn't use `task_mgr`.
|
||||||
/// The [`WalReceiverManager`] task ensures that this `TaskHandle` task does not outlive the [`WalReceiverManager`] task.
|
/// The `WalReceiverManager` task ensures that this `TaskHandle` task does not outlive the `WalReceiverManager` task.
|
||||||
/// For the `RequestContext` that we hand to the TaskHandle, we use the [`WalReceiverConnectionHandler`] task kind.
|
/// For the `RequestContext` that we hand to the TaskHandle, we use the [`WalReceiverConnectionHandler`] task kind.
|
||||||
///
|
///
|
||||||
/// Once the connection is established, the `TaskHandle` task creates a
|
/// Once the connection is established, the `TaskHandle` task creates a
|
||||||
@@ -213,16 +213,21 @@ pub enum TaskKind {
|
|||||||
/// the `Connection` object.
|
/// the `Connection` object.
|
||||||
/// A `CancellationToken` created by the `TaskHandle` task ensures
|
/// A `CancellationToken` created by the `TaskHandle` task ensures
|
||||||
/// that the [`WalReceiverConnectionPoller`] task will cancel soon after as the `TaskHandle` is dropped.
|
/// that the [`WalReceiverConnectionPoller`] task will cancel soon after as the `TaskHandle` is dropped.
|
||||||
|
///
|
||||||
|
/// [`WalReceiverConnectionHandler`]: Self::WalReceiverConnectionHandler
|
||||||
|
/// [`WalReceiverConnectionPoller`]: Self::WalReceiverConnectionPoller
|
||||||
WalReceiverManager,
|
WalReceiverManager,
|
||||||
|
|
||||||
/// The `TaskHandle` task that executes [`walreceiver_connection::handle_walreceiver_connection`].
|
/// The `TaskHandle` task that executes `handle_walreceiver_connection`.
|
||||||
/// Not a `task_mgr` task, but we use this `TaskKind` for its `RequestContext`.
|
/// Not a `task_mgr` task, but we use this `TaskKind` for its `RequestContext`.
|
||||||
/// See the comment on [`WalReceiverManager`].
|
/// See the comment on [`WalReceiverManager`].
|
||||||
|
///
|
||||||
|
/// [`WalReceiverManager`]: Self::WalReceiverManager
|
||||||
WalReceiverConnectionHandler,
|
WalReceiverConnectionHandler,
|
||||||
|
|
||||||
/// The task that polls the `tokio-postgres::Connection` object.
|
/// The task that polls the `tokio-postgres::Connection` object.
|
||||||
/// Spawned by task [`WalReceiverConnectionHandler`].
|
/// Spawned by task [`WalReceiverConnectionHandler`](Self::WalReceiverConnectionHandler).
|
||||||
/// See the comment on [`WalReceiverManager`].
|
/// See the comment on [`WalReceiverManager`](Self::WalReceiverManager).
|
||||||
WalReceiverConnectionPoller,
|
WalReceiverConnectionPoller,
|
||||||
|
|
||||||
// Garbage collection worker. One per tenant
|
// Garbage collection worker. One per tenant
|
||||||
|
|||||||
@@ -84,6 +84,25 @@ use utils::{
|
|||||||
lsn::{Lsn, RecordLsn},
|
lsn::{Lsn, RecordLsn},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/// Declare a failpoint that can use the `pause` failpoint action.
|
||||||
|
/// We don't want to block the executor thread, hence, spawn_blocking + await.
|
||||||
|
macro_rules! pausable_failpoint {
|
||||||
|
($name:literal) => {
|
||||||
|
if cfg!(feature = "testing") {
|
||||||
|
tokio::task::spawn_blocking({
|
||||||
|
let current = tracing::Span::current();
|
||||||
|
move || {
|
||||||
|
let _entered = current.entered();
|
||||||
|
tracing::info!("at failpoint {}", $name);
|
||||||
|
fail::fail_point!($name);
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
.expect("spawn_blocking");
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
pub mod blob_io;
|
pub mod blob_io;
|
||||||
pub mod block_io;
|
pub mod block_io;
|
||||||
pub mod disk_btree;
|
pub mod disk_btree;
|
||||||
@@ -114,7 +133,7 @@ pub use timeline::{
|
|||||||
// re-export this function so that page_cache.rs can use it.
|
// re-export this function so that page_cache.rs can use it.
|
||||||
pub use crate::tenant::ephemeral_file::writeback as writeback_ephemeral_file;
|
pub use crate::tenant::ephemeral_file::writeback as writeback_ephemeral_file;
|
||||||
|
|
||||||
// re-export for use in storage_sync.rs
|
// re-export for use in remote_timeline_client.rs
|
||||||
pub use crate::tenant::metadata::save_metadata;
|
pub use crate::tenant::metadata::save_metadata;
|
||||||
|
|
||||||
// re-export for use in walreceiver
|
// re-export for use in walreceiver
|
||||||
@@ -410,7 +429,7 @@ impl Tenant {
|
|||||||
.layers
|
.layers
|
||||||
.read()
|
.read()
|
||||||
.await
|
.await
|
||||||
.0
|
.layer_map()
|
||||||
.iter_historic_layers()
|
.iter_historic_layers()
|
||||||
.next()
|
.next()
|
||||||
.is_some(),
|
.is_some(),
|
||||||
@@ -421,8 +440,8 @@ impl Tenant {
|
|||||||
if !picked_local {
|
if !picked_local {
|
||||||
save_metadata(
|
save_metadata(
|
||||||
self.conf,
|
self.conf,
|
||||||
timeline_id,
|
&tenant_id,
|
||||||
tenant_id,
|
&timeline_id,
|
||||||
up_to_date_metadata,
|
up_to_date_metadata,
|
||||||
first_save,
|
first_save,
|
||||||
)
|
)
|
||||||
@@ -451,7 +470,7 @@ impl Tenant {
|
|||||||
) -> anyhow::Result<Arc<Tenant>> {
|
) -> anyhow::Result<Arc<Tenant>> {
|
||||||
// TODO dedup with spawn_load
|
// TODO dedup with spawn_load
|
||||||
let tenant_conf =
|
let tenant_conf =
|
||||||
Self::load_tenant_config(conf, tenant_id).context("load tenant config")?;
|
Self::load_tenant_config(conf, &tenant_id).context("load tenant config")?;
|
||||||
|
|
||||||
let wal_redo_manager = Arc::new(PostgresRedoManager::new(conf, tenant_id));
|
let wal_redo_manager = Arc::new(PostgresRedoManager::new(conf, tenant_id));
|
||||||
let tenant = Arc::new(Tenant::new(
|
let tenant = Arc::new(Tenant::new(
|
||||||
@@ -560,7 +579,7 @@ impl Tenant {
|
|||||||
.map(move |res| {
|
.map(move |res| {
|
||||||
res.with_context(|| format!("download index part for timeline {timeline_id}"))
|
res.with_context(|| format!("download index part for timeline {timeline_id}"))
|
||||||
})
|
})
|
||||||
.instrument(info_span!("download_index_part", timeline=%timeline_id)),
|
.instrument(info_span!("download_index_part", %timeline_id)),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
// Wait for all the download tasks to complete & collect results.
|
// Wait for all the download tasks to complete & collect results.
|
||||||
@@ -646,7 +665,7 @@ impl Tenant {
|
|||||||
span::debug_assert_current_span_has_tenant_id();
|
span::debug_assert_current_span_has_tenant_id();
|
||||||
|
|
||||||
info!("downloading index file for timeline {}", timeline_id);
|
info!("downloading index file for timeline {}", timeline_id);
|
||||||
tokio::fs::create_dir_all(self.conf.timeline_path(&timeline_id, &self.tenant_id))
|
tokio::fs::create_dir_all(self.conf.timeline_path(&self.tenant_id, &timeline_id))
|
||||||
.await
|
.await
|
||||||
.context("Failed to create new timeline directory")?;
|
.context("Failed to create new timeline directory")?;
|
||||||
|
|
||||||
@@ -724,7 +743,7 @@ impl Tenant {
|
|||||||
) -> Arc<Tenant> {
|
) -> Arc<Tenant> {
|
||||||
span::debug_assert_current_span_has_tenant_id();
|
span::debug_assert_current_span_has_tenant_id();
|
||||||
|
|
||||||
let tenant_conf = match Self::load_tenant_config(conf, tenant_id) {
|
let tenant_conf = match Self::load_tenant_config(conf, &tenant_id) {
|
||||||
Ok(conf) => conf,
|
Ok(conf) => conf,
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
error!("load tenant config failed: {:?}", e);
|
error!("load tenant config failed: {:?}", e);
|
||||||
@@ -835,7 +854,7 @@ impl Tenant {
|
|||||||
timeline_uninit_mark_file.display()
|
timeline_uninit_mark_file.display()
|
||||||
)
|
)
|
||||||
})?;
|
})?;
|
||||||
let timeline_dir = self.conf.timeline_path(&timeline_id, &self.tenant_id);
|
let timeline_dir = self.conf.timeline_path(&self.tenant_id, &timeline_id);
|
||||||
if let Err(e) =
|
if let Err(e) =
|
||||||
remove_timeline_and_uninit_mark(&timeline_dir, timeline_uninit_mark_file)
|
remove_timeline_and_uninit_mark(&timeline_dir, timeline_uninit_mark_file)
|
||||||
{
|
{
|
||||||
@@ -880,7 +899,7 @@ impl Tenant {
|
|||||||
if let Ok(timeline_id) =
|
if let Ok(timeline_id) =
|
||||||
file_name.to_str().unwrap_or_default().parse::<TimelineId>()
|
file_name.to_str().unwrap_or_default().parse::<TimelineId>()
|
||||||
{
|
{
|
||||||
let metadata = load_metadata(self.conf, timeline_id, self.tenant_id)
|
let metadata = load_metadata(self.conf, &self.tenant_id, &timeline_id)
|
||||||
.context("failed to load metadata")?;
|
.context("failed to load metadata")?;
|
||||||
timelines_to_load.insert(timeline_id, metadata);
|
timelines_to_load.insert(timeline_id, metadata);
|
||||||
} else {
|
} else {
|
||||||
@@ -1349,7 +1368,7 @@ impl Tenant {
|
|||||||
for (timeline_id, timeline) in &timelines_to_compact {
|
for (timeline_id, timeline) in &timelines_to_compact {
|
||||||
timeline
|
timeline
|
||||||
.compact(ctx)
|
.compact(ctx)
|
||||||
.instrument(info_span!("compact_timeline", timeline = %timeline_id))
|
.instrument(info_span!("compact_timeline", %timeline_id))
|
||||||
.await?;
|
.await?;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1440,12 +1459,12 @@ impl Tenant {
|
|||||||
let layer_removal_guard = timeline.layer_removal_cs.lock().await;
|
let layer_removal_guard = timeline.layer_removal_cs.lock().await;
|
||||||
info!("got layer_removal_cs.lock(), deleting layer files");
|
info!("got layer_removal_cs.lock(), deleting layer files");
|
||||||
|
|
||||||
// NB: storage_sync upload tasks that reference these layers have been cancelled
|
// NB: remote_timeline_client upload tasks that reference these layers have been cancelled
|
||||||
// by the caller.
|
// by the caller.
|
||||||
|
|
||||||
let local_timeline_directory = self
|
let local_timeline_directory = self
|
||||||
.conf
|
.conf
|
||||||
.timeline_path(&timeline.timeline_id, &self.tenant_id);
|
.timeline_path(&self.tenant_id, &timeline.timeline_id);
|
||||||
|
|
||||||
fail::fail_point!("timeline-delete-before-rm", |_| {
|
fail::fail_point!("timeline-delete-before-rm", |_| {
|
||||||
Err(anyhow::anyhow!("failpoint: timeline-delete-before-rm"))?
|
Err(anyhow::anyhow!("failpoint: timeline-delete-before-rm"))?
|
||||||
@@ -1498,20 +1517,7 @@ impl Tenant {
|
|||||||
remote_client.delete_all().await.context("delete_all")?
|
remote_client.delete_all().await.context("delete_all")?
|
||||||
};
|
};
|
||||||
|
|
||||||
// Have a failpoint that can use the `pause` failpoint action.
|
pausable_failpoint!("in_progress_delete");
|
||||||
// We don't want to block the executor thread, hence, spawn_blocking + await.
|
|
||||||
if cfg!(feature = "testing") {
|
|
||||||
tokio::task::spawn_blocking({
|
|
||||||
let current = tracing::Span::current();
|
|
||||||
move || {
|
|
||||||
let _entered = current.entered();
|
|
||||||
tracing::info!("at failpoint in_progress_delete");
|
|
||||||
fail::fail_point!("in_progress_delete");
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.await
|
|
||||||
.expect("spawn_blocking");
|
|
||||||
}
|
|
||||||
|
|
||||||
{
|
{
|
||||||
// Remove the timeline from the map.
|
// Remove the timeline from the map.
|
||||||
@@ -2226,7 +2232,7 @@ impl Tenant {
|
|||||||
/// Locate and load config
|
/// Locate and load config
|
||||||
pub(super) fn load_tenant_config(
|
pub(super) fn load_tenant_config(
|
||||||
conf: &'static PageServerConf,
|
conf: &'static PageServerConf,
|
||||||
tenant_id: TenantId,
|
tenant_id: &TenantId,
|
||||||
) -> anyhow::Result<TenantConfOpt> {
|
) -> anyhow::Result<TenantConfOpt> {
|
||||||
let target_config_path = conf.tenant_config_path(tenant_id);
|
let target_config_path = conf.tenant_config_path(tenant_id);
|
||||||
let target_config_display = target_config_path.display();
|
let target_config_display = target_config_path.display();
|
||||||
@@ -2813,7 +2819,7 @@ impl Tenant {
|
|||||||
timeline_struct.init_empty_layer_map(start_lsn);
|
timeline_struct.init_empty_layer_map(start_lsn);
|
||||||
|
|
||||||
if let Err(e) =
|
if let Err(e) =
|
||||||
self.create_timeline_files(&uninit_mark.timeline_path, new_timeline_id, new_metadata)
|
self.create_timeline_files(&uninit_mark.timeline_path, &new_timeline_id, new_metadata)
|
||||||
{
|
{
|
||||||
error!("Failed to create initial files for timeline {tenant_id}/{new_timeline_id}, cleaning up: {e:?}");
|
error!("Failed to create initial files for timeline {tenant_id}/{new_timeline_id}, cleaning up: {e:?}");
|
||||||
cleanup_timeline_directory(uninit_mark);
|
cleanup_timeline_directory(uninit_mark);
|
||||||
@@ -2832,7 +2838,7 @@ impl Tenant {
|
|||||||
fn create_timeline_files(
|
fn create_timeline_files(
|
||||||
&self,
|
&self,
|
||||||
timeline_path: &Path,
|
timeline_path: &Path,
|
||||||
new_timeline_id: TimelineId,
|
new_timeline_id: &TimelineId,
|
||||||
new_metadata: &TimelineMetadata,
|
new_metadata: &TimelineMetadata,
|
||||||
) -> anyhow::Result<()> {
|
) -> anyhow::Result<()> {
|
||||||
crashsafe::create_dir(timeline_path).context("Failed to create timeline directory")?;
|
crashsafe::create_dir(timeline_path).context("Failed to create timeline directory")?;
|
||||||
@@ -2843,8 +2849,8 @@ impl Tenant {
|
|||||||
|
|
||||||
save_metadata(
|
save_metadata(
|
||||||
self.conf,
|
self.conf,
|
||||||
|
&self.tenant_id,
|
||||||
new_timeline_id,
|
new_timeline_id,
|
||||||
self.tenant_id,
|
|
||||||
new_metadata,
|
new_metadata,
|
||||||
true,
|
true,
|
||||||
)
|
)
|
||||||
@@ -2867,7 +2873,7 @@ impl Tenant {
|
|||||||
timelines.get(&timeline_id).is_none(),
|
timelines.get(&timeline_id).is_none(),
|
||||||
"Timeline {tenant_id}/{timeline_id} already exists in pageserver's memory"
|
"Timeline {tenant_id}/{timeline_id} already exists in pageserver's memory"
|
||||||
);
|
);
|
||||||
let timeline_path = self.conf.timeline_path(&timeline_id, &tenant_id);
|
let timeline_path = self.conf.timeline_path(&tenant_id, &timeline_id);
|
||||||
anyhow::ensure!(
|
anyhow::ensure!(
|
||||||
!timeline_path.exists(),
|
!timeline_path.exists(),
|
||||||
"Timeline {} already exists, cannot create its uninit mark file",
|
"Timeline {} already exists, cannot create its uninit mark file",
|
||||||
@@ -2998,10 +3004,10 @@ pub(crate) enum CreateTenantFilesMode {
|
|||||||
pub(crate) fn create_tenant_files(
|
pub(crate) fn create_tenant_files(
|
||||||
conf: &'static PageServerConf,
|
conf: &'static PageServerConf,
|
||||||
tenant_conf: TenantConfOpt,
|
tenant_conf: TenantConfOpt,
|
||||||
tenant_id: TenantId,
|
tenant_id: &TenantId,
|
||||||
mode: CreateTenantFilesMode,
|
mode: CreateTenantFilesMode,
|
||||||
) -> anyhow::Result<PathBuf> {
|
) -> anyhow::Result<PathBuf> {
|
||||||
let target_tenant_directory = conf.tenant_path(&tenant_id);
|
let target_tenant_directory = conf.tenant_path(tenant_id);
|
||||||
anyhow::ensure!(
|
anyhow::ensure!(
|
||||||
!target_tenant_directory
|
!target_tenant_directory
|
||||||
.try_exists()
|
.try_exists()
|
||||||
@@ -3052,7 +3058,7 @@ pub(crate) fn create_tenant_files(
|
|||||||
fn try_create_target_tenant_dir(
|
fn try_create_target_tenant_dir(
|
||||||
conf: &'static PageServerConf,
|
conf: &'static PageServerConf,
|
||||||
tenant_conf: TenantConfOpt,
|
tenant_conf: TenantConfOpt,
|
||||||
tenant_id: TenantId,
|
tenant_id: &TenantId,
|
||||||
mode: CreateTenantFilesMode,
|
mode: CreateTenantFilesMode,
|
||||||
temporary_tenant_dir: &Path,
|
temporary_tenant_dir: &Path,
|
||||||
target_tenant_directory: &Path,
|
target_tenant_directory: &Path,
|
||||||
@@ -3076,7 +3082,7 @@ fn try_create_target_tenant_dir(
|
|||||||
}
|
}
|
||||||
|
|
||||||
let temporary_tenant_timelines_dir = rebase_directory(
|
let temporary_tenant_timelines_dir = rebase_directory(
|
||||||
&conf.timelines_path(&tenant_id),
|
&conf.timelines_path(tenant_id),
|
||||||
target_tenant_directory,
|
target_tenant_directory,
|
||||||
temporary_tenant_dir,
|
temporary_tenant_dir,
|
||||||
)
|
)
|
||||||
@@ -3088,7 +3094,7 @@ fn try_create_target_tenant_dir(
|
|||||||
)
|
)
|
||||||
.with_context(|| format!("resolve tenant {tenant_id} temporary config path"))?;
|
.with_context(|| format!("resolve tenant {tenant_id} temporary config path"))?;
|
||||||
|
|
||||||
Tenant::persist_tenant_config(&tenant_id, &temporary_tenant_config_path, tenant_conf, true)?;
|
Tenant::persist_tenant_config(tenant_id, &temporary_tenant_config_path, tenant_conf, true)?;
|
||||||
|
|
||||||
crashsafe::create_dir(&temporary_tenant_timelines_dir).with_context(|| {
|
crashsafe::create_dir(&temporary_tenant_timelines_dir).with_context(|| {
|
||||||
format!(
|
format!(
|
||||||
@@ -3376,7 +3382,7 @@ pub mod harness {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn timeline_path(&self, timeline_id: &TimelineId) -> PathBuf {
|
pub fn timeline_path(&self, timeline_id: &TimelineId) -> PathBuf {
|
||||||
self.conf.timeline_path(timeline_id, &self.tenant_id)
|
self.conf.timeline_path(&self.tenant_id, timeline_id)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -4329,13 +4335,13 @@ mod tests {
|
|||||||
// assert freeze_and_flush exercised the initdb optimization
|
// assert freeze_and_flush exercised the initdb optimization
|
||||||
{
|
{
|
||||||
let state = tline.flush_loop_state.lock().unwrap();
|
let state = tline.flush_loop_state.lock().unwrap();
|
||||||
let
|
let timeline::FlushLoopState::Running {
|
||||||
timeline::FlushLoopState::Running {
|
expect_initdb_optimization,
|
||||||
expect_initdb_optimization,
|
initdb_optimization_count,
|
||||||
initdb_optimization_count,
|
} = *state
|
||||||
} = *state else {
|
else {
|
||||||
panic!("unexpected state: {:?}", *state);
|
panic!("unexpected state: {:?}", *state);
|
||||||
};
|
};
|
||||||
assert!(expect_initdb_optimization);
|
assert!(expect_initdb_optimization);
|
||||||
assert!(initdb_optimization_count > 0);
|
assert!(initdb_optimization_count > 0);
|
||||||
}
|
}
|
||||||
@@ -4370,7 +4376,7 @@ mod tests {
|
|||||||
|
|
||||||
assert!(!harness
|
assert!(!harness
|
||||||
.conf
|
.conf
|
||||||
.timeline_path(&TIMELINE_ID, &tenant.tenant_id)
|
.timeline_path(&tenant.tenant_id, &TIMELINE_ID)
|
||||||
.exists());
|
.exists());
|
||||||
|
|
||||||
assert!(!harness
|
assert!(!harness
|
||||||
|
|||||||
@@ -442,7 +442,7 @@ where
|
|||||||
writer: W,
|
writer: W,
|
||||||
|
|
||||||
///
|
///
|
||||||
/// stack[0] is the current root page, stack.last() is the leaf.
|
/// `stack[0]` is the current root page, `stack.last()` is the leaf.
|
||||||
///
|
///
|
||||||
/// We maintain the length of the stack to be always greater than zero.
|
/// We maintain the length of the stack to be always greater than zero.
|
||||||
/// Two exceptions are:
|
/// Two exceptions are:
|
||||||
|
|||||||
@@ -55,7 +55,7 @@ impl EphemeralFile {
|
|||||||
l.next_file_id += 1;
|
l.next_file_id += 1;
|
||||||
|
|
||||||
let filename = conf
|
let filename = conf
|
||||||
.timeline_path(&timeline_id, &tenant_id)
|
.timeline_path(&tenant_id, &timeline_id)
|
||||||
.join(PathBuf::from(format!("ephemeral-{}", file_id)));
|
.join(PathBuf::from(format!("ephemeral-{}", file_id)));
|
||||||
|
|
||||||
let file = VirtualFile::open_with_options(
|
let file = VirtualFile::open_with_options(
|
||||||
@@ -346,7 +346,7 @@ mod tests {
|
|||||||
|
|
||||||
let tenant_id = TenantId::from_str("11000000000000000000000000000000").unwrap();
|
let tenant_id = TenantId::from_str("11000000000000000000000000000000").unwrap();
|
||||||
let timeline_id = TimelineId::from_str("22000000000000000000000000000000").unwrap();
|
let timeline_id = TimelineId::from_str("22000000000000000000000000000000").unwrap();
|
||||||
fs::create_dir_all(conf.timeline_path(&timeline_id, &tenant_id))?;
|
fs::create_dir_all(conf.timeline_path(&tenant_id, &timeline_id))?;
|
||||||
|
|
||||||
Ok((conf, tenant_id, timeline_id))
|
Ok((conf, tenant_id, timeline_id))
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -16,7 +16,7 @@
|
|||||||
//! Other read methods are less critical but still impact performance of background tasks.
|
//! Other read methods are less critical but still impact performance of background tasks.
|
||||||
//!
|
//!
|
||||||
//! This data structure relies on a persistent/immutable binary search tree. See the
|
//! This data structure relies on a persistent/immutable binary search tree. See the
|
||||||
//! following lecture for an introduction https://www.youtube.com/watch?v=WqCWghETNDc&t=581s
|
//! following lecture for an introduction <https://www.youtube.com/watch?v=WqCWghETNDc&t=581s>
|
||||||
//! Summary: A persistent/immutable BST (and persistent data structures in general) allows
|
//! Summary: A persistent/immutable BST (and persistent data structures in general) allows
|
||||||
//! you to modify the tree in such a way that each modification creates a new "version"
|
//! you to modify the tree in such a way that each modification creates a new "version"
|
||||||
//! of the tree. When you modify it, you get a new version, but all previous versions are
|
//! of the tree. When you modify it, you get a new version, but all previous versions are
|
||||||
@@ -40,7 +40,7 @@
|
|||||||
//! afterwards. We can add layers as long as they have larger LSNs than any previous layer in
|
//! afterwards. We can add layers as long as they have larger LSNs than any previous layer in
|
||||||
//! the map, but if we need to remove a layer, or insert anything with an older LSN, we need
|
//! the map, but if we need to remove a layer, or insert anything with an older LSN, we need
|
||||||
//! to throw away most of the persistent BST and build a new one, starting from the oldest
|
//! to throw away most of the persistent BST and build a new one, starting from the oldest
|
||||||
//! LSN. See `LayerMap::flush_updates()`.
|
//! LSN. See [`LayerMap::flush_updates()`].
|
||||||
//!
|
//!
|
||||||
|
|
||||||
mod historic_layer_coverage;
|
mod historic_layer_coverage;
|
||||||
@@ -60,7 +60,6 @@ use utils::lsn::Lsn;
|
|||||||
use historic_layer_coverage::BufferedHistoricLayerCoverage;
|
use historic_layer_coverage::BufferedHistoricLayerCoverage;
|
||||||
pub use historic_layer_coverage::LayerKey;
|
pub use historic_layer_coverage::LayerKey;
|
||||||
|
|
||||||
use super::storage_layer::range_eq;
|
|
||||||
use super::storage_layer::PersistentLayerDesc;
|
use super::storage_layer::PersistentLayerDesc;
|
||||||
|
|
||||||
///
|
///
|
||||||
@@ -365,7 +364,7 @@ impl LayerMap {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn is_l0(layer: &PersistentLayerDesc) -> bool {
|
pub fn is_l0(layer: &PersistentLayerDesc) -> bool {
|
||||||
range_eq(&layer.get_key_range(), &(Key::MIN..Key::MAX))
|
layer.get_key_range() == (Key::MIN..Key::MAX)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// This function determines which layers are counted in `count_deltas`:
|
/// This function determines which layers are counted in `count_deltas`:
|
||||||
@@ -397,7 +396,7 @@ impl LayerMap {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Case 2
|
// Case 2
|
||||||
if range_eq(partition_range, &(Key::MIN..Key::MAX)) {
|
if partition_range == &(Key::MIN..Key::MAX) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -652,19 +651,35 @@ impl LayerMap {
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::LayerMap;
|
use super::LayerMap;
|
||||||
use crate::tenant::storage_layer::{tests::LayerDescriptor, LayerFileName};
|
use crate::tenant::storage_layer::LayerFileName;
|
||||||
use std::str::FromStr;
|
use std::str::FromStr;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
mod l0_delta_layers_updated {
|
mod l0_delta_layers_updated {
|
||||||
|
|
||||||
use crate::tenant::{
|
use crate::tenant::{
|
||||||
storage_layer::{PersistentLayer, PersistentLayerDesc},
|
storage_layer::{AsLayerDesc, PersistentLayerDesc},
|
||||||
timeline::LayerFileManager,
|
timeline::layer_manager::LayerFileManager,
|
||||||
};
|
};
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
|
|
||||||
|
struct LayerObject(PersistentLayerDesc);
|
||||||
|
|
||||||
|
impl AsLayerDesc for LayerObject {
|
||||||
|
fn layer_desc(&self) -> &PersistentLayerDesc {
|
||||||
|
&self.0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl LayerObject {
|
||||||
|
fn new(desc: PersistentLayerDesc) -> Self {
|
||||||
|
LayerObject(desc)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type TestLayerFileManager = LayerFileManager<LayerObject>;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn for_full_range_delta() {
|
fn for_full_range_delta() {
|
||||||
// l0_delta_layers are used by compaction, and should observe all buffered updates
|
// l0_delta_layers are used by compaction, and should observe all buffered updates
|
||||||
@@ -701,18 +716,18 @@ mod tests {
|
|||||||
|
|
||||||
let layer = "000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__0000000053423C21-0000000053424D69";
|
let layer = "000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__0000000053423C21-0000000053424D69";
|
||||||
let layer = LayerFileName::from_str(layer).unwrap();
|
let layer = LayerFileName::from_str(layer).unwrap();
|
||||||
let layer = LayerDescriptor::from(layer);
|
let layer = PersistentLayerDesc::from(layer);
|
||||||
|
|
||||||
// same skeletan construction; see scenario below
|
// same skeletan construction; see scenario below
|
||||||
let not_found = Arc::new(layer.clone());
|
let not_found = Arc::new(LayerObject::new(layer.clone()));
|
||||||
let new_version = Arc::new(layer);
|
let new_version = Arc::new(LayerObject::new(layer));
|
||||||
|
|
||||||
// after the immutable storage state refactor, the replace operation
|
// after the immutable storage state refactor, the replace operation
|
||||||
// will not use layer map any more. We keep it here for consistency in test cases
|
// will not use layer map any more. We keep it here for consistency in test cases
|
||||||
// and can remove it in the future.
|
// and can remove it in the future.
|
||||||
let _map = LayerMap::default();
|
let _map = LayerMap::default();
|
||||||
|
|
||||||
let mut mapping = LayerFileManager::new();
|
let mut mapping = TestLayerFileManager::new();
|
||||||
|
|
||||||
mapping
|
mapping
|
||||||
.replace_and_verify(not_found, new_version)
|
.replace_and_verify(not_found, new_version)
|
||||||
@@ -721,10 +736,10 @@ mod tests {
|
|||||||
|
|
||||||
fn l0_delta_layers_updated_scenario(layer_name: &str, expected_l0: bool) {
|
fn l0_delta_layers_updated_scenario(layer_name: &str, expected_l0: bool) {
|
||||||
let name = LayerFileName::from_str(layer_name).unwrap();
|
let name = LayerFileName::from_str(layer_name).unwrap();
|
||||||
let skeleton = LayerDescriptor::from(name);
|
let skeleton = PersistentLayerDesc::from(name);
|
||||||
|
|
||||||
let remote = Arc::new(skeleton.clone());
|
let remote = Arc::new(LayerObject::new(skeleton.clone()));
|
||||||
let downloaded = Arc::new(skeleton);
|
let downloaded = Arc::new(LayerObject::new(skeleton));
|
||||||
|
|
||||||
let mut map = LayerMap::default();
|
let mut map = LayerMap::default();
|
||||||
let mut mapping = LayerFileManager::new();
|
let mut mapping = LayerFileManager::new();
|
||||||
|
|||||||
@@ -122,8 +122,7 @@ impl<Value: Clone> HistoricLayerCoverage<Value> {
|
|||||||
self.head = self
|
self.head = self
|
||||||
.historic
|
.historic
|
||||||
.iter()
|
.iter()
|
||||||
.rev()
|
.next_back()
|
||||||
.next()
|
|
||||||
.map(|(_, v)| v.clone())
|
.map(|(_, v)| v.clone())
|
||||||
.unwrap_or_default();
|
.unwrap_or_default();
|
||||||
}
|
}
|
||||||
@@ -412,7 +411,7 @@ fn test_persistent_overlapping() {
|
|||||||
/// still be more critical.
|
/// still be more critical.
|
||||||
///
|
///
|
||||||
/// See this for more on persistent and retroactive techniques:
|
/// See this for more on persistent and retroactive techniques:
|
||||||
/// https://www.youtube.com/watch?v=WqCWghETNDc&t=581s
|
/// <https://www.youtube.com/watch?v=WqCWghETNDc&t=581s>
|
||||||
pub struct BufferedHistoricLayerCoverage<Value> {
|
pub struct BufferedHistoricLayerCoverage<Value> {
|
||||||
/// A persistent layer map that we rebuild when we need to retroactively update
|
/// A persistent layer map that we rebuild when we need to retroactively update
|
||||||
historic_coverage: HistoricLayerCoverage<Value>,
|
historic_coverage: HistoricLayerCoverage<Value>,
|
||||||
|
|||||||
@@ -2,7 +2,7 @@ use std::ops::Range;
|
|||||||
|
|
||||||
// NOTE the `im` crate has 20x more downloads and also has
|
// NOTE the `im` crate has 20x more downloads and also has
|
||||||
// persistent/immutable BTree. But it's bugged so rpds is a
|
// persistent/immutable BTree. But it's bugged so rpds is a
|
||||||
// better choice https://github.com/neondatabase/neon/issues/3395
|
// better choice <https://github.com/neondatabase/neon/issues/3395>
|
||||||
use rpds::RedBlackTreeMapSync;
|
use rpds::RedBlackTreeMapSync;
|
||||||
|
|
||||||
/// Data structure that can efficiently:
|
/// Data structure that can efficiently:
|
||||||
@@ -11,7 +11,7 @@ use rpds::RedBlackTreeMapSync;
|
|||||||
/// - insert layers in non-decreasing lsn.start order
|
/// - insert layers in non-decreasing lsn.start order
|
||||||
///
|
///
|
||||||
/// For a detailed explanation and justification of this approach, see:
|
/// For a detailed explanation and justification of this approach, see:
|
||||||
/// https://neon.tech/blog/persistent-structures-in-neons-wal-indexing
|
/// <https://neon.tech/blog/persistent-structures-in-neons-wal-indexing>
|
||||||
///
|
///
|
||||||
/// NOTE The struct is parameterized over Value for easier
|
/// NOTE The struct is parameterized over Value for easier
|
||||||
/// testing, but in practice it's some sort of layer.
|
/// testing, but in practice it's some sort of layer.
|
||||||
@@ -113,8 +113,7 @@ impl<Value: Clone> LayerCoverage<Value> {
|
|||||||
pub fn query(&self, key: i128) -> Option<Value> {
|
pub fn query(&self, key: i128) -> Option<Value> {
|
||||||
self.nodes
|
self.nodes
|
||||||
.range(..=key)
|
.range(..=key)
|
||||||
.rev()
|
.next_back()?
|
||||||
.next()?
|
|
||||||
.1
|
.1
|
||||||
.as_ref()
|
.as_ref()
|
||||||
.map(|(_, v)| v.clone())
|
.map(|(_, v)| v.clone())
|
||||||
|
|||||||
@@ -24,7 +24,7 @@
|
|||||||
//! Currently, this is not used in the system. Future refactors will ensure
|
//! Currently, this is not used in the system. Future refactors will ensure
|
||||||
//! the storage state will be recorded in this file, and the system can be
|
//! the storage state will be recorded in this file, and the system can be
|
||||||
//! recovered from this file. This is tracked in
|
//! recovered from this file. This is tracked in
|
||||||
//! https://github.com/neondatabase/neon/issues/4418
|
//! <https://github.com/neondatabase/neon/issues/4418>
|
||||||
|
|
||||||
use std::io::{self, Read, Write};
|
use std::io::{self, Read, Write};
|
||||||
|
|
||||||
|
|||||||
@@ -1,10 +1,12 @@
|
|||||||
//! Every image of a certain timeline from [`crate::tenant::Tenant`]
|
//! Every image of a certain timeline from [`crate::tenant::Tenant`]
|
||||||
//! has a metadata that needs to be stored persistently.
|
//! has a metadata that needs to be stored persistently.
|
||||||
//!
|
//!
|
||||||
//! Later, the file gets is used in [`crate::remote_storage::storage_sync`] as a part of
|
//! Later, the file gets used in [`remote_timeline_client`] as a part of
|
||||||
//! external storage import and export operations.
|
//! external storage import and export operations.
|
||||||
//!
|
//!
|
||||||
//! The module contains all structs and related helper methods related to timeline metadata.
|
//! The module contains all structs and related helper methods related to timeline metadata.
|
||||||
|
//!
|
||||||
|
//! [`remote_timeline_client`]: super::remote_timeline_client
|
||||||
|
|
||||||
use std::fs::{File, OpenOptions};
|
use std::fs::{File, OpenOptions};
|
||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
@@ -232,13 +234,13 @@ impl TimelineMetadata {
|
|||||||
/// Save timeline metadata to file
|
/// Save timeline metadata to file
|
||||||
pub fn save_metadata(
|
pub fn save_metadata(
|
||||||
conf: &'static PageServerConf,
|
conf: &'static PageServerConf,
|
||||||
timeline_id: TimelineId,
|
tenant_id: &TenantId,
|
||||||
tenant_id: TenantId,
|
timeline_id: &TimelineId,
|
||||||
data: &TimelineMetadata,
|
data: &TimelineMetadata,
|
||||||
first_save: bool,
|
first_save: bool,
|
||||||
) -> anyhow::Result<()> {
|
) -> anyhow::Result<()> {
|
||||||
let _enter = info_span!("saving metadata").entered();
|
let _enter = info_span!("saving metadata").entered();
|
||||||
let path = conf.metadata_path(timeline_id, tenant_id);
|
let path = conf.metadata_path(tenant_id, timeline_id);
|
||||||
// use OpenOptions to ensure file presence is consistent with first_save
|
// use OpenOptions to ensure file presence is consistent with first_save
|
||||||
let mut file = VirtualFile::open_with_options(
|
let mut file = VirtualFile::open_with_options(
|
||||||
&path,
|
&path,
|
||||||
@@ -267,10 +269,10 @@ pub fn save_metadata(
|
|||||||
|
|
||||||
pub fn load_metadata(
|
pub fn load_metadata(
|
||||||
conf: &'static PageServerConf,
|
conf: &'static PageServerConf,
|
||||||
timeline_id: TimelineId,
|
tenant_id: &TenantId,
|
||||||
tenant_id: TenantId,
|
timeline_id: &TimelineId,
|
||||||
) -> anyhow::Result<TimelineMetadata> {
|
) -> anyhow::Result<TimelineMetadata> {
|
||||||
let metadata_path = conf.metadata_path(timeline_id, tenant_id);
|
let metadata_path = conf.metadata_path(tenant_id, timeline_id);
|
||||||
let metadata_bytes = std::fs::read(&metadata_path).with_context(|| {
|
let metadata_bytes = std::fs::read(&metadata_path).with_context(|| {
|
||||||
format!(
|
format!(
|
||||||
"Failed to read metadata bytes from path {}",
|
"Failed to read metadata bytes from path {}",
|
||||||
|
|||||||
@@ -184,9 +184,9 @@ pub fn schedule_local_tenant_processing(
|
|||||||
format!("Could not parse tenant id out of the tenant dir name in path {tenant_path:?}")
|
format!("Could not parse tenant id out of the tenant dir name in path {tenant_path:?}")
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
let tenant_ignore_mark = conf.tenant_ignore_mark_file_path(tenant_id);
|
let tenant_ignore_mark = conf.tenant_ignore_mark_file_path(&tenant_id);
|
||||||
anyhow::ensure!(
|
anyhow::ensure!(
|
||||||
!conf.tenant_ignore_mark_file_path(tenant_id).exists(),
|
!conf.tenant_ignore_mark_file_path(&tenant_id).exists(),
|
||||||
"Cannot load tenant, ignore mark found at {tenant_ignore_mark:?}"
|
"Cannot load tenant, ignore mark found at {tenant_ignore_mark:?}"
|
||||||
);
|
);
|
||||||
|
|
||||||
@@ -310,7 +310,7 @@ pub async fn create_tenant(
|
|||||||
// We're holding the tenants lock in write mode while doing local IO.
|
// We're holding the tenants lock in write mode while doing local IO.
|
||||||
// If this section ever becomes contentious, introduce a new `TenantState::Creating`
|
// If this section ever becomes contentious, introduce a new `TenantState::Creating`
|
||||||
// and do the work in that state.
|
// and do the work in that state.
|
||||||
let tenant_directory = super::create_tenant_files(conf, tenant_conf, tenant_id, CreateTenantFilesMode::Create)?;
|
let tenant_directory = super::create_tenant_files(conf, tenant_conf, &tenant_id, CreateTenantFilesMode::Create)?;
|
||||||
// TODO: tenant directory remains on disk if we bail out from here on.
|
// TODO: tenant directory remains on disk if we bail out from here on.
|
||||||
// See https://github.com/neondatabase/neon/issues/4233
|
// See https://github.com/neondatabase/neon/issues/4233
|
||||||
|
|
||||||
@@ -344,14 +344,9 @@ pub async fn set_new_tenant_config(
|
|||||||
info!("configuring tenant {tenant_id}");
|
info!("configuring tenant {tenant_id}");
|
||||||
let tenant = get_tenant(tenant_id, true).await?;
|
let tenant = get_tenant(tenant_id, true).await?;
|
||||||
|
|
||||||
let tenant_config_path = conf.tenant_config_path(tenant_id);
|
let tenant_config_path = conf.tenant_config_path(&tenant_id);
|
||||||
Tenant::persist_tenant_config(
|
Tenant::persist_tenant_config(&tenant_id, &tenant_config_path, new_tenant_conf, false)
|
||||||
&tenant.tenant_id(),
|
.map_err(SetNewTenantConfigError::Persist)?;
|
||||||
&tenant_config_path,
|
|
||||||
new_tenant_conf,
|
|
||||||
false,
|
|
||||||
)
|
|
||||||
.map_err(SetNewTenantConfigError::Persist)?;
|
|
||||||
tenant.set_new_tenant_config(new_tenant_conf);
|
tenant.set_new_tenant_config(new_tenant_conf);
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@@ -435,7 +430,7 @@ pub async fn detach_tenant(
|
|||||||
// Ignored tenants are not present in memory and will bail the removal from memory operation.
|
// Ignored tenants are not present in memory and will bail the removal from memory operation.
|
||||||
// Before returning the error, check for ignored tenant removal case — we only need to clean its local files then.
|
// Before returning the error, check for ignored tenant removal case — we only need to clean its local files then.
|
||||||
if detach_ignored && matches!(removal_result, Err(TenantStateError::NotFound(_))) {
|
if detach_ignored && matches!(removal_result, Err(TenantStateError::NotFound(_))) {
|
||||||
let tenant_ignore_mark = conf.tenant_ignore_mark_file_path(tenant_id);
|
let tenant_ignore_mark = conf.tenant_ignore_mark_file_path(&tenant_id);
|
||||||
if tenant_ignore_mark.exists() {
|
if tenant_ignore_mark.exists() {
|
||||||
info!("Detaching an ignored tenant");
|
info!("Detaching an ignored tenant");
|
||||||
local_files_cleanup_operation(tenant_id)
|
local_files_cleanup_operation(tenant_id)
|
||||||
@@ -457,7 +452,7 @@ pub async fn load_tenant(
|
|||||||
) -> Result<(), TenantMapInsertError> {
|
) -> Result<(), TenantMapInsertError> {
|
||||||
tenant_map_insert(tenant_id, || {
|
tenant_map_insert(tenant_id, || {
|
||||||
let tenant_path = conf.tenant_path(&tenant_id);
|
let tenant_path = conf.tenant_path(&tenant_id);
|
||||||
let tenant_ignore_mark = conf.tenant_ignore_mark_file_path(tenant_id);
|
let tenant_ignore_mark = conf.tenant_ignore_mark_file_path(&tenant_id);
|
||||||
if tenant_ignore_mark.exists() {
|
if tenant_ignore_mark.exists() {
|
||||||
std::fs::remove_file(&tenant_ignore_mark)
|
std::fs::remove_file(&tenant_ignore_mark)
|
||||||
.with_context(|| format!("Failed to remove tenant ignore mark {tenant_ignore_mark:?} during tenant loading"))?;
|
.with_context(|| format!("Failed to remove tenant ignore mark {tenant_ignore_mark:?} during tenant loading"))?;
|
||||||
@@ -478,7 +473,7 @@ pub async fn ignore_tenant(
|
|||||||
tenant_id: TenantId,
|
tenant_id: TenantId,
|
||||||
) -> Result<(), TenantStateError> {
|
) -> Result<(), TenantStateError> {
|
||||||
remove_tenant_from_memory(tenant_id, async {
|
remove_tenant_from_memory(tenant_id, async {
|
||||||
let ignore_mark_file = conf.tenant_ignore_mark_file_path(tenant_id);
|
let ignore_mark_file = conf.tenant_ignore_mark_file_path(&tenant_id);
|
||||||
fs::File::create(&ignore_mark_file)
|
fs::File::create(&ignore_mark_file)
|
||||||
.await
|
.await
|
||||||
.context("Failed to create ignore mark file")
|
.context("Failed to create ignore mark file")
|
||||||
@@ -525,7 +520,7 @@ pub async fn attach_tenant(
|
|||||||
ctx: &RequestContext,
|
ctx: &RequestContext,
|
||||||
) -> Result<(), TenantMapInsertError> {
|
) -> Result<(), TenantMapInsertError> {
|
||||||
tenant_map_insert(tenant_id, || {
|
tenant_map_insert(tenant_id, || {
|
||||||
let tenant_dir = create_tenant_files(conf, tenant_conf, tenant_id, CreateTenantFilesMode::Attach)?;
|
let tenant_dir = create_tenant_files(conf, tenant_conf, &tenant_id, CreateTenantFilesMode::Attach)?;
|
||||||
// TODO: tenant directory remains on disk if we bail out from here on.
|
// TODO: tenant directory remains on disk if we bail out from here on.
|
||||||
// See https://github.com/neondatabase/neon/issues/4233
|
// See https://github.com/neondatabase/neon/issues/4233
|
||||||
|
|
||||||
@@ -695,7 +690,7 @@ pub async fn immediate_gc(
|
|||||||
fail::fail_point!("immediate_gc_task_pre");
|
fail::fail_point!("immediate_gc_task_pre");
|
||||||
let result = tenant
|
let result = tenant
|
||||||
.gc_iteration(Some(timeline_id), gc_horizon, pitr, &ctx)
|
.gc_iteration(Some(timeline_id), gc_horizon, pitr, &ctx)
|
||||||
.instrument(info_span!("manual_gc", tenant = %tenant_id, timeline = %timeline_id))
|
.instrument(info_span!("manual_gc", %tenant_id, %timeline_id))
|
||||||
.await;
|
.await;
|
||||||
// FIXME: `gc_iteration` can return an error for multiple reasons; we should handle it
|
// FIXME: `gc_iteration` can return an error for multiple reasons; we should handle it
|
||||||
// better once the types support it.
|
// better once the types support it.
|
||||||
@@ -745,9 +740,7 @@ pub async fn immediate_compact(
|
|||||||
async move {
|
async move {
|
||||||
let result = timeline
|
let result = timeline
|
||||||
.compact(&ctx)
|
.compact(&ctx)
|
||||||
.instrument(
|
.instrument(info_span!("manual_compact", %tenant_id, %timeline_id))
|
||||||
info_span!("manual_compact", tenant = %tenant_id, timeline = %timeline_id),
|
|
||||||
)
|
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
match task_done.send(result) {
|
match task_done.send(result) {
|
||||||
|
|||||||
@@ -135,7 +135,7 @@
|
|||||||
//! - Initiate upload queue with that [`IndexPart`].
|
//! - Initiate upload queue with that [`IndexPart`].
|
||||||
//! - Reschedule all lost operations by comparing the local filesystem state
|
//! - Reschedule all lost operations by comparing the local filesystem state
|
||||||
//! and remote state as per [`IndexPart`]. This is done in
|
//! and remote state as per [`IndexPart`]. This is done in
|
||||||
//! [`Timeline::timeline_init_and_sync`] and [`Timeline::reconcile_with_remote`].
|
//! [`Tenant::timeline_init_and_sync`] and [`Timeline::reconcile_with_remote`].
|
||||||
//!
|
//!
|
||||||
//! Note that if we crash during file deletion between the index update
|
//! Note that if we crash during file deletion between the index update
|
||||||
//! that removes the file from the list of files, and deleting the remote file,
|
//! that removes the file from the list of files, and deleting the remote file,
|
||||||
@@ -163,8 +163,8 @@
|
|||||||
//! - download their remote [`IndexPart`]s
|
//! - download their remote [`IndexPart`]s
|
||||||
//! - create `Timeline` struct and a `RemoteTimelineClient`
|
//! - create `Timeline` struct and a `RemoteTimelineClient`
|
||||||
//! - initialize the client's upload queue with its `IndexPart`
|
//! - initialize the client's upload queue with its `IndexPart`
|
||||||
//! - create [`RemoteLayer`] instances for layers that are referenced by `IndexPart`
|
//! - create [`RemoteLayer`](super::storage_layer::RemoteLayer) instances
|
||||||
//! but not present locally
|
//! for layers that are referenced by `IndexPart` but not present locally
|
||||||
//! - schedule uploads for layers that are only present locally.
|
//! - schedule uploads for layers that are only present locally.
|
||||||
//! - if the remote `IndexPart`'s metadata was newer than the metadata in
|
//! - if the remote `IndexPart`'s metadata was newer than the metadata in
|
||||||
//! the local filesystem, write the remote metadata to the local filesystem
|
//! the local filesystem, write the remote metadata to the local filesystem
|
||||||
@@ -198,6 +198,8 @@
|
|||||||
//! in remote storage.
|
//! in remote storage.
|
||||||
//! But note that we don't test any of this right now.
|
//! But note that we don't test any of this right now.
|
||||||
//!
|
//!
|
||||||
|
//! [`Tenant::timeline_init_and_sync`]: super::Tenant::timeline_init_and_sync
|
||||||
|
//! [`Timeline::reconcile_with_remote`]: super::Timeline::reconcile_with_remote
|
||||||
|
|
||||||
mod delete;
|
mod delete;
|
||||||
mod download;
|
mod download;
|
||||||
@@ -442,8 +444,8 @@ impl RemoteTimelineClient {
|
|||||||
let index_part = download::download_index_part(
|
let index_part = download::download_index_part(
|
||||||
self.conf,
|
self.conf,
|
||||||
&self.storage_impl,
|
&self.storage_impl,
|
||||||
self.tenant_id,
|
&self.tenant_id,
|
||||||
self.timeline_id,
|
&self.timeline_id,
|
||||||
)
|
)
|
||||||
.measure_remote_op(
|
.measure_remote_op(
|
||||||
self.tenant_id,
|
self.tenant_id,
|
||||||
@@ -748,25 +750,13 @@ impl RemoteTimelineClient {
|
|||||||
stopped.deleted_at = SetDeletedFlagProgress::NotRunning;
|
stopped.deleted_at = SetDeletedFlagProgress::NotRunning;
|
||||||
});
|
});
|
||||||
|
|
||||||
// Have a failpoint that can use the `pause` failpoint action.
|
pausable_failpoint!("persist_deleted_index_part");
|
||||||
// We don't want to block the executor thread, hence, spawn_blocking + await.
|
|
||||||
if cfg!(feature = "testing") {
|
|
||||||
tokio::task::spawn_blocking({
|
|
||||||
let current = tracing::Span::current();
|
|
||||||
move || {
|
|
||||||
let _entered = current.entered();
|
|
||||||
tracing::info!("at failpoint persist_deleted_index_part");
|
|
||||||
fail::fail_point!("persist_deleted_index_part");
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.await
|
|
||||||
.expect("spawn_blocking");
|
|
||||||
}
|
|
||||||
upload::upload_index_part(
|
upload::upload_index_part(
|
||||||
self.conf,
|
self.conf,
|
||||||
&self.storage_impl,
|
&self.storage_impl,
|
||||||
self.tenant_id,
|
&self.tenant_id,
|
||||||
self.timeline_id,
|
&self.timeline_id,
|
||||||
&index_part_with_deleted_at,
|
&index_part_with_deleted_at,
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
@@ -841,7 +831,7 @@ impl RemoteTimelineClient {
|
|||||||
|
|
||||||
// Do not delete index part yet, it is needed for possible retry. If we remove it first
|
// Do not delete index part yet, it is needed for possible retry. If we remove it first
|
||||||
// and retry will arrive to different pageserver there wont be any traces of it on remote storage
|
// and retry will arrive to different pageserver there wont be any traces of it on remote storage
|
||||||
let timeline_path = self.conf.timeline_path(&self.timeline_id, &self.tenant_id);
|
let timeline_path = self.conf.timeline_path(&self.tenant_id, &self.timeline_id);
|
||||||
let timeline_storage_path = self.conf.remote_path(&timeline_path)?;
|
let timeline_storage_path = self.conf.remote_path(&timeline_path)?;
|
||||||
|
|
||||||
let remaining = self
|
let remaining = self
|
||||||
@@ -852,14 +842,16 @@ impl RemoteTimelineClient {
|
|||||||
let remaining: Vec<RemotePath> = remaining
|
let remaining: Vec<RemotePath> = remaining
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.filter(|p| p.object_name() != Some(IndexPart::FILE_NAME))
|
.filter(|p| p.object_name() != Some(IndexPart::FILE_NAME))
|
||||||
|
.inspect(|path| {
|
||||||
|
if let Some(name) = path.object_name() {
|
||||||
|
info!(%name, "deleting a file not referenced from index_part.json");
|
||||||
|
} else {
|
||||||
|
warn!(%path, "deleting a nameless or non-utf8 object not referenced from index_part.json");
|
||||||
|
}
|
||||||
|
})
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
if !remaining.is_empty() {
|
if !remaining.is_empty() {
|
||||||
warn!(
|
|
||||||
"Found {} files not bound to index_file.json, proceeding with their deletion",
|
|
||||||
remaining.len()
|
|
||||||
);
|
|
||||||
warn!("About to remove {} files", remaining.len());
|
|
||||||
self.storage_impl.delete_objects(&remaining).await?;
|
self.storage_impl.delete_objects(&remaining).await?;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -868,7 +860,7 @@ impl RemoteTimelineClient {
|
|||||||
debug!("deleting index part");
|
debug!("deleting index part");
|
||||||
self.storage_impl.delete(&index_file_path).await?;
|
self.storage_impl.delete(&index_file_path).await?;
|
||||||
|
|
||||||
info!(deletions_queued, "done deleting, including index_part.json");
|
info!(prefix=%timeline_storage_path, referenced=deletions_queued, not_referenced=%remaining.len(), "done deleting in timeline prefix, including index_part.json");
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@@ -933,11 +925,11 @@ impl RemoteTimelineClient {
|
|||||||
|
|
||||||
// Assign unique ID to this task
|
// Assign unique ID to this task
|
||||||
upload_queue.task_counter += 1;
|
upload_queue.task_counter += 1;
|
||||||
let task_id = upload_queue.task_counter;
|
let upload_task_id = upload_queue.task_counter;
|
||||||
|
|
||||||
// Add it to the in-progress map
|
// Add it to the in-progress map
|
||||||
let task = Arc::new(UploadTask {
|
let task = Arc::new(UploadTask {
|
||||||
task_id,
|
task_id: upload_task_id,
|
||||||
op: next_op,
|
op: next_op,
|
||||||
retries: AtomicU32::new(0),
|
retries: AtomicU32::new(0),
|
||||||
});
|
});
|
||||||
@@ -947,6 +939,8 @@ impl RemoteTimelineClient {
|
|||||||
|
|
||||||
// Spawn task to perform the task
|
// Spawn task to perform the task
|
||||||
let self_rc = Arc::clone(self);
|
let self_rc = Arc::clone(self);
|
||||||
|
let tenant_id = self.tenant_id;
|
||||||
|
let timeline_id = self.timeline_id;
|
||||||
task_mgr::spawn(
|
task_mgr::spawn(
|
||||||
self.runtime.handle(),
|
self.runtime.handle(),
|
||||||
TaskKind::RemoteUploadTask,
|
TaskKind::RemoteUploadTask,
|
||||||
@@ -958,7 +952,7 @@ impl RemoteTimelineClient {
|
|||||||
self_rc.perform_upload_task(task).await;
|
self_rc.perform_upload_task(task).await;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
.instrument(info_span!(parent: None, "remote_upload", tenant = %self.tenant_id, timeline = %self.timeline_id, upload_task_id = %task_id)),
|
.instrument(info_span!(parent: None, "remote_upload", %tenant_id, %timeline_id, %upload_task_id)),
|
||||||
);
|
);
|
||||||
|
|
||||||
// Loop back to process next task
|
// Loop back to process next task
|
||||||
@@ -1003,7 +997,7 @@ impl RemoteTimelineClient {
|
|||||||
UploadOp::UploadLayer(ref layer_file_name, ref layer_metadata) => {
|
UploadOp::UploadLayer(ref layer_file_name, ref layer_metadata) => {
|
||||||
let path = &self
|
let path = &self
|
||||||
.conf
|
.conf
|
||||||
.timeline_path(&self.timeline_id, &self.tenant_id)
|
.timeline_path(&self.tenant_id, &self.timeline_id)
|
||||||
.join(layer_file_name.file_name());
|
.join(layer_file_name.file_name());
|
||||||
upload::upload_timeline_layer(
|
upload::upload_timeline_layer(
|
||||||
self.conf,
|
self.conf,
|
||||||
@@ -1024,8 +1018,8 @@ impl RemoteTimelineClient {
|
|||||||
let res = upload::upload_index_part(
|
let res = upload::upload_index_part(
|
||||||
self.conf,
|
self.conf,
|
||||||
&self.storage_impl,
|
&self.storage_impl,
|
||||||
self.tenant_id,
|
&self.tenant_id,
|
||||||
self.timeline_id,
|
&self.timeline_id,
|
||||||
index_part,
|
index_part,
|
||||||
)
|
)
|
||||||
.measure_remote_op(
|
.measure_remote_op(
|
||||||
@@ -1044,7 +1038,7 @@ impl RemoteTimelineClient {
|
|||||||
UploadOp::Delete(delete) => {
|
UploadOp::Delete(delete) => {
|
||||||
let path = &self
|
let path = &self
|
||||||
.conf
|
.conf
|
||||||
.timeline_path(&self.timeline_id, &self.tenant_id)
|
.timeline_path(&self.tenant_id, &self.timeline_id)
|
||||||
.join(delete.layer_file_name.file_name());
|
.join(delete.layer_file_name.file_name());
|
||||||
delete::delete_layer(self.conf, &self.storage_impl, path)
|
delete::delete_layer(self.conf, &self.storage_impl, path)
|
||||||
.measure_remote_op(
|
.measure_remote_op(
|
||||||
|
|||||||
@@ -19,9 +19,10 @@ pub(super) async fn delete_layer<'a>(
|
|||||||
|
|
||||||
let path_to_delete = conf.remote_path(local_layer_path)?;
|
let path_to_delete = conf.remote_path(local_layer_path)?;
|
||||||
|
|
||||||
// XXX: If the deletion fails because the object already didn't exist,
|
// We don't want to print an error if the delete failed if the file has
|
||||||
// it would be good to just issue a warning but consider it success.
|
// already been deleted. Thankfully, in this situation S3 already
|
||||||
// https://github.com/neondatabase/neon/issues/2934
|
// does not yield an error. While OS-provided local file system APIs do yield
|
||||||
|
// errors, we avoid them in the `LocalFs` wrapper.
|
||||||
storage.delete(&path_to_delete).await.with_context(|| {
|
storage.delete(&path_to_delete).await.with_context(|| {
|
||||||
format!("Failed to delete remote layer from storage at {path_to_delete:?}")
|
format!("Failed to delete remote layer from storage at {path_to_delete:?}")
|
||||||
})
|
})
|
||||||
|
|||||||
@@ -46,7 +46,7 @@ pub async fn download_layer_file<'a>(
|
|||||||
) -> Result<u64, DownloadError> {
|
) -> Result<u64, DownloadError> {
|
||||||
debug_assert_current_span_has_tenant_and_timeline_id();
|
debug_assert_current_span_has_tenant_and_timeline_id();
|
||||||
|
|
||||||
let timeline_path = conf.timeline_path(&timeline_id, &tenant_id);
|
let timeline_path = conf.timeline_path(&tenant_id, &timeline_id);
|
||||||
|
|
||||||
let local_path = timeline_path.join(layer_file_name.file_name());
|
let local_path = timeline_path.join(layer_file_name.file_name());
|
||||||
|
|
||||||
@@ -229,11 +229,11 @@ pub async fn list_remote_timelines<'a>(
|
|||||||
pub(super) async fn download_index_part(
|
pub(super) async fn download_index_part(
|
||||||
conf: &'static PageServerConf,
|
conf: &'static PageServerConf,
|
||||||
storage: &GenericRemoteStorage,
|
storage: &GenericRemoteStorage,
|
||||||
tenant_id: TenantId,
|
tenant_id: &TenantId,
|
||||||
timeline_id: TimelineId,
|
timeline_id: &TimelineId,
|
||||||
) -> Result<IndexPart, DownloadError> {
|
) -> Result<IndexPart, DownloadError> {
|
||||||
let index_part_path = conf
|
let index_part_path = conf
|
||||||
.metadata_path(timeline_id, tenant_id)
|
.metadata_path(tenant_id, timeline_id)
|
||||||
.with_file_name(IndexPart::FILE_NAME);
|
.with_file_name(IndexPart::FILE_NAME);
|
||||||
let part_storage_path = conf
|
let part_storage_path = conf
|
||||||
.remote_path(&index_part_path)
|
.remote_path(&index_part_path)
|
||||||
|
|||||||
@@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
use anyhow::{bail, Context};
|
use anyhow::{bail, Context};
|
||||||
use fail::fail_point;
|
use fail::fail_point;
|
||||||
use std::path::Path;
|
use std::{io::ErrorKind, path::Path};
|
||||||
use tokio::fs;
|
use tokio::fs;
|
||||||
|
|
||||||
use crate::{config::PageServerConf, tenant::remote_timeline_client::index::IndexPart};
|
use crate::{config::PageServerConf, tenant::remote_timeline_client::index::IndexPart};
|
||||||
@@ -11,12 +11,14 @@ use utils::id::{TenantId, TimelineId};
|
|||||||
|
|
||||||
use super::index::LayerFileMetadata;
|
use super::index::LayerFileMetadata;
|
||||||
|
|
||||||
|
use tracing::info;
|
||||||
|
|
||||||
/// Serializes and uploads the given index part data to the remote storage.
|
/// Serializes and uploads the given index part data to the remote storage.
|
||||||
pub(super) async fn upload_index_part<'a>(
|
pub(super) async fn upload_index_part<'a>(
|
||||||
conf: &'static PageServerConf,
|
conf: &'static PageServerConf,
|
||||||
storage: &'a GenericRemoteStorage,
|
storage: &'a GenericRemoteStorage,
|
||||||
tenant_id: TenantId,
|
tenant_id: &TenantId,
|
||||||
timeline_id: TimelineId,
|
timeline_id: &TimelineId,
|
||||||
index_part: &'a IndexPart,
|
index_part: &'a IndexPart,
|
||||||
) -> anyhow::Result<()> {
|
) -> anyhow::Result<()> {
|
||||||
tracing::trace!("uploading new index part");
|
tracing::trace!("uploading new index part");
|
||||||
@@ -31,7 +33,7 @@ pub(super) async fn upload_index_part<'a>(
|
|||||||
let index_part_bytes = tokio::io::BufReader::new(std::io::Cursor::new(index_part_bytes));
|
let index_part_bytes = tokio::io::BufReader::new(std::io::Cursor::new(index_part_bytes));
|
||||||
|
|
||||||
let index_part_path = conf
|
let index_part_path = conf
|
||||||
.metadata_path(timeline_id, tenant_id)
|
.metadata_path(tenant_id, timeline_id)
|
||||||
.with_file_name(IndexPart::FILE_NAME);
|
.with_file_name(IndexPart::FILE_NAME);
|
||||||
let storage_path = conf.remote_path(&index_part_path)?;
|
let storage_path = conf.remote_path(&index_part_path)?;
|
||||||
|
|
||||||
@@ -56,9 +58,21 @@ pub(super) async fn upload_timeline_layer<'a>(
|
|||||||
});
|
});
|
||||||
let storage_path = conf.remote_path(source_path)?;
|
let storage_path = conf.remote_path(source_path)?;
|
||||||
|
|
||||||
let source_file = fs::File::open(&source_path)
|
let source_file_res = fs::File::open(&source_path).await;
|
||||||
.await
|
let source_file = match source_file_res {
|
||||||
.with_context(|| format!("Failed to open a source file for layer {source_path:?}"))?;
|
Ok(source_file) => source_file,
|
||||||
|
Err(e) if e.kind() == ErrorKind::NotFound => {
|
||||||
|
// If we encounter this arm, it wasn't intended, but it's also not
|
||||||
|
// a big problem, if it's because the file was deleted before an
|
||||||
|
// upload. However, a nonexistent file can also be indicative of
|
||||||
|
// something worse, like when a file is scheduled for upload before
|
||||||
|
// it has been written to disk yet.
|
||||||
|
info!(path = %source_path.display(), "File to upload doesn't exist. Likely the file has been deleted and an upload is not required any more.");
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
Err(e) => Err(e)
|
||||||
|
.with_context(|| format!("Failed to open a source file for layer {source_path:?}"))?,
|
||||||
|
};
|
||||||
|
|
||||||
let fs_size = source_file
|
let fs_size = source_file
|
||||||
.metadata()
|
.metadata()
|
||||||
|
|||||||
@@ -110,11 +110,11 @@ pub struct TimelineInputs {
|
|||||||
///
|
///
|
||||||
/// Tenant size does not consider the latest state, but only the state until next_gc_cutoff, which
|
/// Tenant size does not consider the latest state, but only the state until next_gc_cutoff, which
|
||||||
/// is updated on-demand, during the start of this calculation and separate from the
|
/// is updated on-demand, during the start of this calculation and separate from the
|
||||||
/// [`Timeline::latest_gc_cutoff`].
|
/// [`TimelineInputs::latest_gc_cutoff`].
|
||||||
///
|
///
|
||||||
/// For timelines in general:
|
/// For timelines in general:
|
||||||
///
|
///
|
||||||
/// ```ignore
|
/// ```text
|
||||||
/// 0-----|---------|----|------------| · · · · · |·> lsn
|
/// 0-----|---------|----|------------| · · · · · |·> lsn
|
||||||
/// initdb_lsn branchpoints* next_gc_cutoff latest
|
/// initdb_lsn branchpoints* next_gc_cutoff latest
|
||||||
/// ```
|
/// ```
|
||||||
|
|||||||
@@ -5,16 +5,13 @@ use utils::tracing_span_assert::{check_fields_present, MultiNameExtractor};
|
|||||||
pub(crate) fn debug_assert_current_span_has_tenant_id() {}
|
pub(crate) fn debug_assert_current_span_has_tenant_id() {}
|
||||||
|
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
pub(crate) static TENANT_ID_EXTRACTOR: once_cell::sync::Lazy<MultiNameExtractor<2>> =
|
pub(crate) static TENANT_ID_EXTRACTOR: once_cell::sync::Lazy<MultiNameExtractor<1>> =
|
||||||
once_cell::sync::Lazy::new(|| MultiNameExtractor::new("TenantId", ["tenant_id", "tenant"]));
|
once_cell::sync::Lazy::new(|| MultiNameExtractor::new("TenantId", ["tenant_id"]));
|
||||||
|
|
||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
#[track_caller]
|
#[track_caller]
|
||||||
pub(crate) fn debug_assert_current_span_has_tenant_id() {
|
pub(crate) fn debug_assert_current_span_has_tenant_id() {
|
||||||
if let Err(missing) = check_fields_present([&*TENANT_ID_EXTRACTOR]) {
|
if let Err(missing) = check_fields_present!([&*TENANT_ID_EXTRACTOR]) {
|
||||||
panic!(
|
panic!("missing extractors: {missing:?}")
|
||||||
"missing extractors: {:?}",
|
|
||||||
missing.into_iter().map(|e| e.name()).collect::<Vec<_>>()
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -41,7 +41,7 @@ pub use inmemory_layer::InMemoryLayer;
|
|||||||
pub use layer_desc::{PersistentLayerDesc, PersistentLayerKey};
|
pub use layer_desc::{PersistentLayerDesc, PersistentLayerKey};
|
||||||
pub use remote_layer::RemoteLayer;
|
pub use remote_layer::RemoteLayer;
|
||||||
|
|
||||||
use super::layer_map::BatchedUpdates;
|
use super::timeline::layer_manager::LayerManager;
|
||||||
|
|
||||||
pub fn range_overlaps<T>(a: &Range<T>, b: &Range<T>) -> bool
|
pub fn range_overlaps<T>(a: &Range<T>, b: &Range<T>) -> bool
|
||||||
where
|
where
|
||||||
@@ -54,13 +54,6 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn range_eq<T>(a: &Range<T>, b: &Range<T>) -> bool
|
|
||||||
where
|
|
||||||
T: PartialEq<T>,
|
|
||||||
{
|
|
||||||
a.start == b.start && a.end == b.end
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Struct used to communicate across calls to 'get_value_reconstruct_data'.
|
/// Struct used to communicate across calls to 'get_value_reconstruct_data'.
|
||||||
///
|
///
|
||||||
/// Before first call, you can fill in 'page_img' if you have an older cached
|
/// Before first call, you can fill in 'page_img' if you have an older cached
|
||||||
@@ -169,6 +162,9 @@ impl LayerAccessStats {
|
|||||||
/// The caller is responsible for recording a residence event
|
/// The caller is responsible for recording a residence event
|
||||||
/// using [`record_residence_event`] before calling `latest_activity`.
|
/// using [`record_residence_event`] before calling `latest_activity`.
|
||||||
/// If they don't, [`latest_activity`] will return `None`.
|
/// If they don't, [`latest_activity`] will return `None`.
|
||||||
|
///
|
||||||
|
/// [`record_residence_event`]: Self::record_residence_event
|
||||||
|
/// [`latest_activity`]: Self::latest_activity
|
||||||
pub(crate) fn empty_will_record_residence_event_later() -> Self {
|
pub(crate) fn empty_will_record_residence_event_later() -> Self {
|
||||||
LayerAccessStats(Mutex::default())
|
LayerAccessStats(Mutex::default())
|
||||||
}
|
}
|
||||||
@@ -176,8 +172,11 @@ impl LayerAccessStats {
|
|||||||
/// Create an empty stats object and record a [`LayerLoad`] event with the given residence status.
|
/// Create an empty stats object and record a [`LayerLoad`] event with the given residence status.
|
||||||
///
|
///
|
||||||
/// See [`record_residence_event`] for why you need to do this while holding the layer map lock.
|
/// See [`record_residence_event`] for why you need to do this while holding the layer map lock.
|
||||||
|
///
|
||||||
|
/// [`LayerLoad`]: LayerResidenceEventReason::LayerLoad
|
||||||
|
/// [`record_residence_event`]: Self::record_residence_event
|
||||||
pub(crate) fn for_loading_layer(
|
pub(crate) fn for_loading_layer(
|
||||||
layer_map_lock_held_witness: &BatchedUpdates<'_>,
|
layer_map_lock_held_witness: &LayerManager,
|
||||||
status: LayerResidenceStatus,
|
status: LayerResidenceStatus,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
let new = LayerAccessStats(Mutex::new(LayerAccessStatsLocked::default()));
|
let new = LayerAccessStats(Mutex::new(LayerAccessStatsLocked::default()));
|
||||||
@@ -194,9 +193,11 @@ impl LayerAccessStats {
|
|||||||
/// The `new_status` is not recorded in `self`.
|
/// The `new_status` is not recorded in `self`.
|
||||||
///
|
///
|
||||||
/// See [`record_residence_event`] for why you need to do this while holding the layer map lock.
|
/// See [`record_residence_event`] for why you need to do this while holding the layer map lock.
|
||||||
|
///
|
||||||
|
/// [`record_residence_event`]: Self::record_residence_event
|
||||||
pub(crate) fn clone_for_residence_change(
|
pub(crate) fn clone_for_residence_change(
|
||||||
&self,
|
&self,
|
||||||
layer_map_lock_held_witness: &BatchedUpdates<'_>,
|
layer_map_lock_held_witness: &LayerManager,
|
||||||
new_status: LayerResidenceStatus,
|
new_status: LayerResidenceStatus,
|
||||||
) -> LayerAccessStats {
|
) -> LayerAccessStats {
|
||||||
let clone = {
|
let clone = {
|
||||||
@@ -228,7 +229,7 @@ impl LayerAccessStats {
|
|||||||
///
|
///
|
||||||
pub(crate) fn record_residence_event(
|
pub(crate) fn record_residence_event(
|
||||||
&self,
|
&self,
|
||||||
_layer_map_lock_held_witness: &BatchedUpdates<'_>,
|
_layer_map_lock_held_witness: &LayerManager,
|
||||||
status: LayerResidenceStatus,
|
status: LayerResidenceStatus,
|
||||||
reason: LayerResidenceEventReason,
|
reason: LayerResidenceEventReason,
|
||||||
) {
|
) {
|
||||||
@@ -301,11 +302,13 @@ impl LayerAccessStats {
|
|||||||
/// implementation error. This function logs a rate-limited warning in that case.
|
/// implementation error. This function logs a rate-limited warning in that case.
|
||||||
///
|
///
|
||||||
/// TODO: use type system to avoid the need for `fallback`.
|
/// TODO: use type system to avoid the need for `fallback`.
|
||||||
/// The approach in https://github.com/neondatabase/neon/pull/3775
|
/// The approach in <https://github.com/neondatabase/neon/pull/3775>
|
||||||
/// could be used to enforce that a residence event is recorded
|
/// could be used to enforce that a residence event is recorded
|
||||||
/// before a layer is added to the layer map. We could also have
|
/// before a layer is added to the layer map. We could also have
|
||||||
/// a layer wrapper type that holds the LayerAccessStats, and ensure
|
/// a layer wrapper type that holds the LayerAccessStats, and ensure
|
||||||
/// that that type can only be produced by inserting into the layer map.
|
/// that that type can only be produced by inserting into the layer map.
|
||||||
|
///
|
||||||
|
/// [`record_residence_event`]: Self::record_residence_event
|
||||||
pub(crate) fn latest_activity(&self) -> Option<SystemTime> {
|
pub(crate) fn latest_activity(&self) -> Option<SystemTime> {
|
||||||
let locked = self.0.lock().unwrap();
|
let locked = self.0.lock().unwrap();
|
||||||
let inner = &locked.for_eviction_policy;
|
let inner = &locked.for_eviction_policy;
|
||||||
@@ -330,7 +333,7 @@ impl LayerAccessStats {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Supertrait of the [`Layer`] trait that captures the bare minimum interface
|
/// Supertrait of the [`Layer`] trait that captures the bare minimum interface
|
||||||
/// required by [`LayerMap`].
|
/// required by [`LayerMap`](super::layer_map::LayerMap).
|
||||||
///
|
///
|
||||||
/// All layers should implement a minimal `std::fmt::Debug` without tenant or
|
/// All layers should implement a minimal `std::fmt::Debug` without tenant or
|
||||||
/// timeline names, because those are known in the context of which the layers
|
/// timeline names, because those are known in the context of which the layers
|
||||||
@@ -377,12 +380,18 @@ pub trait Layer: std::fmt::Debug + std::fmt::Display + Send + Sync {
|
|||||||
fn dump(&self, verbose: bool, ctx: &RequestContext) -> Result<()>;
|
fn dump(&self, verbose: bool, ctx: &RequestContext) -> Result<()>;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returned by [`Layer::iter`]
|
/// Returned by [`PersistentLayer::iter`]
|
||||||
pub type LayerIter<'i> = Box<dyn Iterator<Item = Result<(Key, Lsn, Value)>> + 'i + Send>;
|
pub type LayerIter<'i> = Box<dyn Iterator<Item = Result<(Key, Lsn, Value)>> + 'i + Send>;
|
||||||
|
|
||||||
/// Returned by [`Layer::key_iter`]
|
/// Returned by [`PersistentLayer::key_iter`]
|
||||||
pub type LayerKeyIter<'i> = Box<dyn Iterator<Item = (Key, Lsn, u64)> + 'i + Send>;
|
pub type LayerKeyIter<'i> = Box<dyn Iterator<Item = (Key, Lsn, u64)> + 'i + Send>;
|
||||||
|
|
||||||
|
/// Get a layer descriptor from a layer.
|
||||||
|
pub trait AsLayerDesc {
|
||||||
|
/// Get the layer descriptor.
|
||||||
|
fn layer_desc(&self) -> &PersistentLayerDesc;
|
||||||
|
}
|
||||||
|
|
||||||
/// A Layer contains all data in a "rectangle" consisting of a range of keys and
|
/// A Layer contains all data in a "rectangle" consisting of a range of keys and
|
||||||
/// range of LSNs.
|
/// range of LSNs.
|
||||||
///
|
///
|
||||||
@@ -396,10 +405,8 @@ pub type LayerKeyIter<'i> = Box<dyn Iterator<Item = (Key, Lsn, u64)> + 'i + Send
|
|||||||
/// A delta layer contains all modifications within a range of LSNs and keys.
|
/// A delta layer contains all modifications within a range of LSNs and keys.
|
||||||
/// An image layer is a snapshot of all the data in a key-range, at a single
|
/// An image layer is a snapshot of all the data in a key-range, at a single
|
||||||
/// LSN.
|
/// LSN.
|
||||||
pub trait PersistentLayer: Layer {
|
pub trait PersistentLayer: Layer + AsLayerDesc {
|
||||||
/// Get the layer descriptor.
|
/// Identify the tenant this layer belongs to
|
||||||
fn layer_desc(&self) -> &PersistentLayerDesc;
|
|
||||||
|
|
||||||
fn get_tenant_id(&self) -> TenantId {
|
fn get_tenant_id(&self) -> TenantId {
|
||||||
self.layer_desc().tenant_id
|
self.layer_desc().tenant_id
|
||||||
}
|
}
|
||||||
@@ -435,6 +442,10 @@ pub trait PersistentLayer: Layer {
|
|||||||
None
|
None
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn downcast_delta_layer(self: Arc<Self>) -> Option<std::sync::Arc<DeltaLayer>> {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
|
||||||
fn is_remote_layer(&self) -> bool {
|
fn is_remote_layer(&self) -> bool {
|
||||||
false
|
false
|
||||||
}
|
}
|
||||||
@@ -465,119 +476,32 @@ pub fn downcast_remote_layer(
|
|||||||
pub mod tests {
|
pub mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
|
|
||||||
/// Holds metadata about a layer without any content. Used mostly for testing.
|
impl From<DeltaFileName> for PersistentLayerDesc {
|
||||||
///
|
|
||||||
/// To use filenames as fixtures, parse them as [`LayerFileName`] then convert from that to a
|
|
||||||
/// LayerDescriptor.
|
|
||||||
#[derive(Clone, Debug)]
|
|
||||||
pub struct LayerDescriptor {
|
|
||||||
base: PersistentLayerDesc,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<PersistentLayerDesc> for LayerDescriptor {
|
|
||||||
fn from(base: PersistentLayerDesc) -> Self {
|
|
||||||
Self { base }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Layer for LayerDescriptor {
|
|
||||||
fn get_value_reconstruct_data(
|
|
||||||
&self,
|
|
||||||
_key: Key,
|
|
||||||
_lsn_range: Range<Lsn>,
|
|
||||||
_reconstruct_data: &mut ValueReconstructState,
|
|
||||||
_ctx: &RequestContext,
|
|
||||||
) -> Result<ValueReconstructResult> {
|
|
||||||
todo!("This method shouldn't be part of the Layer trait")
|
|
||||||
}
|
|
||||||
|
|
||||||
fn dump(&self, _verbose: bool, _ctx: &RequestContext) -> Result<()> {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Boilerplate to implement the Layer trait, always use layer_desc for persistent layers.
|
|
||||||
fn get_key_range(&self) -> Range<Key> {
|
|
||||||
self.layer_desc().key_range.clone()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Boilerplate to implement the Layer trait, always use layer_desc for persistent layers.
|
|
||||||
fn get_lsn_range(&self) -> Range<Lsn> {
|
|
||||||
self.layer_desc().lsn_range.clone()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Boilerplate to implement the Layer trait, always use layer_desc for persistent layers.
|
|
||||||
fn is_incremental(&self) -> bool {
|
|
||||||
self.layer_desc().is_incremental
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Boilerplate to implement the Layer trait, always use layer_desc for persistent layers.
|
|
||||||
impl std::fmt::Display for LayerDescriptor {
|
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
|
||||||
write!(f, "{}", self.layer_desc().short_id())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl PersistentLayer for LayerDescriptor {
|
|
||||||
fn layer_desc(&self) -> &PersistentLayerDesc {
|
|
||||||
&self.base
|
|
||||||
}
|
|
||||||
|
|
||||||
fn local_path(&self) -> Option<PathBuf> {
|
|
||||||
unimplemented!()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn iter(&self, _: &RequestContext) -> Result<LayerIter<'_>> {
|
|
||||||
unimplemented!()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn key_iter(&self, _: &RequestContext) -> Result<LayerKeyIter<'_>> {
|
|
||||||
unimplemented!()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn delete_resident_layer_file(&self) -> Result<()> {
|
|
||||||
unimplemented!()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn info(&self, _: LayerAccessStatsReset) -> HistoricLayerInfo {
|
|
||||||
unimplemented!()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn access_stats(&self) -> &LayerAccessStats {
|
|
||||||
unimplemented!()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<DeltaFileName> for LayerDescriptor {
|
|
||||||
fn from(value: DeltaFileName) -> Self {
|
fn from(value: DeltaFileName) -> Self {
|
||||||
LayerDescriptor {
|
PersistentLayerDesc::new_delta(
|
||||||
base: PersistentLayerDesc::new_delta(
|
TenantId::from_array([0; 16]),
|
||||||
TenantId::from_array([0; 16]),
|
TimelineId::from_array([0; 16]),
|
||||||
TimelineId::from_array([0; 16]),
|
value.key_range,
|
||||||
value.key_range,
|
value.lsn_range,
|
||||||
value.lsn_range,
|
233,
|
||||||
233,
|
)
|
||||||
),
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<ImageFileName> for LayerDescriptor {
|
impl From<ImageFileName> for PersistentLayerDesc {
|
||||||
fn from(value: ImageFileName) -> Self {
|
fn from(value: ImageFileName) -> Self {
|
||||||
LayerDescriptor {
|
PersistentLayerDesc::new_img(
|
||||||
base: PersistentLayerDesc::new_img(
|
TenantId::from_array([0; 16]),
|
||||||
TenantId::from_array([0; 16]),
|
TimelineId::from_array([0; 16]),
|
||||||
TimelineId::from_array([0; 16]),
|
value.key_range,
|
||||||
value.key_range,
|
value.lsn,
|
||||||
value.lsn,
|
false,
|
||||||
false,
|
233,
|
||||||
233,
|
)
|
||||||
),
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<LayerFileName> for LayerDescriptor {
|
impl From<LayerFileName> for PersistentLayerDesc {
|
||||||
fn from(value: LayerFileName) -> Self {
|
fn from(value: LayerFileName) -> Self {
|
||||||
match value {
|
match value {
|
||||||
LayerFileName::Delta(d) => Self::from(d),
|
LayerFileName::Delta(d) => Self::from(d),
|
||||||
|
|||||||
@@ -7,14 +7,18 @@
|
|||||||
//! must be page images or WAL records with the 'will_init' flag set, so that
|
//! must be page images or WAL records with the 'will_init' flag set, so that
|
||||||
//! they can be replayed without referring to an older page version.
|
//! they can be replayed without referring to an older page version.
|
||||||
//!
|
//!
|
||||||
//! The delta files are stored in timelines/<timeline_id> directory. Currently,
|
//! The delta files are stored in `timelines/<timeline_id>` directory. Currently,
|
||||||
//! there are no subdirectories, and each delta file is named like this:
|
//! there are no subdirectories, and each delta file is named like this:
|
||||||
//!
|
//!
|
||||||
//! <key start>-<key end>__<start LSN>-<end LSN
|
//! ```text
|
||||||
|
//! <key start>-<key end>__<start LSN>-<end LSN>
|
||||||
|
//! ```
|
||||||
//!
|
//!
|
||||||
//! For example:
|
//! For example:
|
||||||
//!
|
//!
|
||||||
|
//! ```text
|
||||||
//! 000000067F000032BE0000400000000020B6-000000067F000032BE0000400000000030B6__000000578C6B29-0000000057A50051
|
//! 000000067F000032BE0000400000000020B6-000000067F000032BE0000400000000030B6__000000578C6B29-0000000057A50051
|
||||||
|
//! ```
|
||||||
//!
|
//!
|
||||||
//! Every delta file consists of three parts: "summary", "index", and
|
//! Every delta file consists of three parts: "summary", "index", and
|
||||||
//! "values". The summary is a fixed size header at the beginning of the file,
|
//! "values". The summary is a fixed size header at the beginning of the file,
|
||||||
@@ -47,6 +51,7 @@ use std::io::{Seek, SeekFrom};
|
|||||||
use std::ops::Range;
|
use std::ops::Range;
|
||||||
use std::os::unix::fs::FileExt;
|
use std::os::unix::fs::FileExt;
|
||||||
use std::path::{Path, PathBuf};
|
use std::path::{Path, PathBuf};
|
||||||
|
use std::sync::Arc;
|
||||||
use tracing::*;
|
use tracing::*;
|
||||||
|
|
||||||
use utils::{
|
use utils::{
|
||||||
@@ -56,8 +61,8 @@ use utils::{
|
|||||||
};
|
};
|
||||||
|
|
||||||
use super::{
|
use super::{
|
||||||
DeltaFileName, Layer, LayerAccessStats, LayerAccessStatsReset, LayerIter, LayerKeyIter,
|
AsLayerDesc, DeltaFileName, Layer, LayerAccessStats, LayerAccessStatsReset, LayerIter,
|
||||||
PathOrConf, PersistentLayerDesc,
|
LayerKeyIter, PathOrConf, PersistentLayerDesc,
|
||||||
};
|
};
|
||||||
|
|
||||||
///
|
///
|
||||||
@@ -403,10 +408,16 @@ impl std::fmt::Display for DeltaLayer {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl PersistentLayer for DeltaLayer {
|
impl AsLayerDesc for DeltaLayer {
|
||||||
fn layer_desc(&self) -> &PersistentLayerDesc {
|
fn layer_desc(&self) -> &PersistentLayerDesc {
|
||||||
&self.desc
|
&self.desc
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl PersistentLayer for DeltaLayer {
|
||||||
|
fn downcast_delta_layer(self: Arc<Self>) -> Option<std::sync::Arc<DeltaLayer>> {
|
||||||
|
Some(self)
|
||||||
|
}
|
||||||
|
|
||||||
fn local_path(&self) -> Option<PathBuf> {
|
fn local_path(&self) -> Option<PathBuf> {
|
||||||
Some(self.path())
|
Some(self.path())
|
||||||
@@ -459,22 +470,22 @@ impl PersistentLayer for DeltaLayer {
|
|||||||
impl DeltaLayer {
|
impl DeltaLayer {
|
||||||
fn path_for(
|
fn path_for(
|
||||||
path_or_conf: &PathOrConf,
|
path_or_conf: &PathOrConf,
|
||||||
timeline_id: TimelineId,
|
tenant_id: &TenantId,
|
||||||
tenant_id: TenantId,
|
timeline_id: &TimelineId,
|
||||||
fname: &DeltaFileName,
|
fname: &DeltaFileName,
|
||||||
) -> PathBuf {
|
) -> PathBuf {
|
||||||
match path_or_conf {
|
match path_or_conf {
|
||||||
PathOrConf::Path(path) => path.clone(),
|
PathOrConf::Path(path) => path.clone(),
|
||||||
PathOrConf::Conf(conf) => conf
|
PathOrConf::Conf(conf) => conf
|
||||||
.timeline_path(&timeline_id, &tenant_id)
|
.timeline_path(tenant_id, timeline_id)
|
||||||
.join(fname.to_string()),
|
.join(fname.to_string()),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn temp_path_for(
|
fn temp_path_for(
|
||||||
conf: &PageServerConf,
|
conf: &PageServerConf,
|
||||||
timeline_id: TimelineId,
|
tenant_id: &TenantId,
|
||||||
tenant_id: TenantId,
|
timeline_id: &TimelineId,
|
||||||
key_start: Key,
|
key_start: Key,
|
||||||
lsn_range: &Range<Lsn>,
|
lsn_range: &Range<Lsn>,
|
||||||
) -> PathBuf {
|
) -> PathBuf {
|
||||||
@@ -484,7 +495,7 @@ impl DeltaLayer {
|
|||||||
.map(char::from)
|
.map(char::from)
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
conf.timeline_path(&timeline_id, &tenant_id).join(format!(
|
conf.timeline_path(tenant_id, timeline_id).join(format!(
|
||||||
"{}-XXX__{:016X}-{:016X}.{}.{}",
|
"{}-XXX__{:016X}-{:016X}.{}.{}",
|
||||||
key_start,
|
key_start,
|
||||||
u64::from(lsn_range.start),
|
u64::from(lsn_range.start),
|
||||||
@@ -606,8 +617,8 @@ impl DeltaLayer {
|
|||||||
pub fn path(&self) -> PathBuf {
|
pub fn path(&self) -> PathBuf {
|
||||||
Self::path_for(
|
Self::path_for(
|
||||||
&self.path_or_conf,
|
&self.path_or_conf,
|
||||||
self.desc.timeline_id,
|
&self.desc.tenant_id,
|
||||||
self.desc.tenant_id,
|
&self.desc.timeline_id,
|
||||||
&self.layer_name(),
|
&self.layer_name(),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
@@ -655,7 +666,7 @@ impl DeltaLayerWriterInner {
|
|||||||
//
|
//
|
||||||
// Note: This overwrites any existing file. There shouldn't be any.
|
// Note: This overwrites any existing file. There shouldn't be any.
|
||||||
// FIXME: throw an error instead?
|
// FIXME: throw an error instead?
|
||||||
let path = DeltaLayer::temp_path_for(conf, timeline_id, tenant_id, key_start, &lsn_range);
|
let path = DeltaLayer::temp_path_for(conf, &tenant_id, &timeline_id, key_start, &lsn_range);
|
||||||
|
|
||||||
let mut file = VirtualFile::create(&path)?;
|
let mut file = VirtualFile::create(&path)?;
|
||||||
// make room for the header block
|
// make room for the header block
|
||||||
@@ -770,8 +781,8 @@ impl DeltaLayerWriterInner {
|
|||||||
// FIXME: throw an error instead?
|
// FIXME: throw an error instead?
|
||||||
let final_path = DeltaLayer::path_for(
|
let final_path = DeltaLayer::path_for(
|
||||||
&PathOrConf::Conf(self.conf),
|
&PathOrConf::Conf(self.conf),
|
||||||
self.timeline_id,
|
&self.tenant_id,
|
||||||
self.tenant_id,
|
&self.timeline_id,
|
||||||
&DeltaFileName {
|
&DeltaFileName {
|
||||||
key_range: self.key_start..key_end,
|
key_range: self.key_start..key_end,
|
||||||
lsn_range: self.lsn_range,
|
lsn_range: self.lsn_range,
|
||||||
@@ -798,7 +809,7 @@ impl DeltaLayerWriterInner {
|
|||||||
///
|
///
|
||||||
/// # Note
|
/// # Note
|
||||||
///
|
///
|
||||||
/// As described in https://github.com/neondatabase/neon/issues/2650, it's
|
/// As described in <https://github.com/neondatabase/neon/issues/2650>, it's
|
||||||
/// possible for the writer to drop before `finish` is actually called. So this
|
/// possible for the writer to drop before `finish` is actually called. So this
|
||||||
/// could lead to odd temporary files in the directory, exhausting file system.
|
/// could lead to odd temporary files in the directory, exhausting file system.
|
||||||
/// This structure wraps `DeltaLayerWriterInner` and also contains `Drop`
|
/// This structure wraps `DeltaLayerWriterInner` and also contains `Drop`
|
||||||
|
|||||||
@@ -57,8 +57,9 @@ impl Ord for DeltaFileName {
|
|||||||
|
|
||||||
/// Represents the filename of a DeltaLayer
|
/// Represents the filename of a DeltaLayer
|
||||||
///
|
///
|
||||||
|
/// ```text
|
||||||
/// <key start>-<key end>__<LSN start>-<LSN end>
|
/// <key start>-<key end>__<LSN start>-<LSN end>
|
||||||
///
|
/// ```
|
||||||
impl DeltaFileName {
|
impl DeltaFileName {
|
||||||
///
|
///
|
||||||
/// Parse a string as a delta file name. Returns None if the filename does not
|
/// Parse a string as a delta file name. Returns None if the filename does not
|
||||||
@@ -162,7 +163,9 @@ impl ImageFileName {
|
|||||||
///
|
///
|
||||||
/// Represents the filename of an ImageLayer
|
/// Represents the filename of an ImageLayer
|
||||||
///
|
///
|
||||||
|
/// ```text
|
||||||
/// <key start>-<key end>__<LSN>
|
/// <key start>-<key end>__<LSN>
|
||||||
|
/// ```
|
||||||
impl ImageFileName {
|
impl ImageFileName {
|
||||||
///
|
///
|
||||||
/// Parse a string as an image file name. Returns None if the filename does not
|
/// Parse a string as an image file name. Returns None if the filename does not
|
||||||
|
|||||||
@@ -7,11 +7,15 @@
|
|||||||
//! timelines/<timeline_id> directory. Currently, there are no
|
//! timelines/<timeline_id> directory. Currently, there are no
|
||||||
//! subdirectories, and each image layer file is named like this:
|
//! subdirectories, and each image layer file is named like this:
|
||||||
//!
|
//!
|
||||||
|
//! ```text
|
||||||
//! <key start>-<key end>__<LSN>
|
//! <key start>-<key end>__<LSN>
|
||||||
|
//! ```
|
||||||
//!
|
//!
|
||||||
//! For example:
|
//! For example:
|
||||||
//!
|
//!
|
||||||
|
//! ```text
|
||||||
//! 000000067F000032BE0000400000000070B6-000000067F000032BE0000400000000080B6__00000000346BC568
|
//! 000000067F000032BE0000400000000070B6-000000067F000032BE0000400000000080B6__00000000346BC568
|
||||||
|
//! ```
|
||||||
//!
|
//!
|
||||||
//! Every image layer file consists of three parts: "summary",
|
//! Every image layer file consists of three parts: "summary",
|
||||||
//! "index", and "values". The summary is a fixed size header at the
|
//! "index", and "values". The summary is a fixed size header at the
|
||||||
@@ -53,7 +57,9 @@ use utils::{
|
|||||||
};
|
};
|
||||||
|
|
||||||
use super::filename::ImageFileName;
|
use super::filename::ImageFileName;
|
||||||
use super::{Layer, LayerAccessStatsReset, LayerIter, PathOrConf, PersistentLayerDesc};
|
use super::{
|
||||||
|
AsLayerDesc, Layer, LayerAccessStatsReset, LayerIter, PathOrConf, PersistentLayerDesc,
|
||||||
|
};
|
||||||
|
|
||||||
///
|
///
|
||||||
/// Header stored in the beginning of the file
|
/// Header stored in the beginning of the file
|
||||||
@@ -241,11 +247,13 @@ impl std::fmt::Display for ImageLayer {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl PersistentLayer for ImageLayer {
|
impl AsLayerDesc for ImageLayer {
|
||||||
fn layer_desc(&self) -> &PersistentLayerDesc {
|
fn layer_desc(&self) -> &PersistentLayerDesc {
|
||||||
&self.desc
|
&self.desc
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl PersistentLayer for ImageLayer {
|
||||||
fn local_path(&self) -> Option<PathBuf> {
|
fn local_path(&self) -> Option<PathBuf> {
|
||||||
Some(self.path())
|
Some(self.path())
|
||||||
}
|
}
|
||||||
@@ -288,7 +296,7 @@ impl ImageLayer {
|
|||||||
match path_or_conf {
|
match path_or_conf {
|
||||||
PathOrConf::Path(path) => path.to_path_buf(),
|
PathOrConf::Path(path) => path.to_path_buf(),
|
||||||
PathOrConf::Conf(conf) => conf
|
PathOrConf::Conf(conf) => conf
|
||||||
.timeline_path(&timeline_id, &tenant_id)
|
.timeline_path(&tenant_id, &timeline_id)
|
||||||
.join(fname.to_string()),
|
.join(fname.to_string()),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -305,7 +313,7 @@ impl ImageLayer {
|
|||||||
.map(char::from)
|
.map(char::from)
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
conf.timeline_path(&timeline_id, &tenant_id)
|
conf.timeline_path(&tenant_id, &timeline_id)
|
||||||
.join(format!("{fname}.{rand_string}.{TEMP_FILE_SUFFIX}"))
|
.join(format!("{fname}.{rand_string}.{TEMP_FILE_SUFFIX}"))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -656,7 +664,7 @@ impl ImageLayerWriterInner {
|
|||||||
///
|
///
|
||||||
/// # Note
|
/// # Note
|
||||||
///
|
///
|
||||||
/// As described in https://github.com/neondatabase/neon/issues/2650, it's
|
/// As described in <https://github.com/neondatabase/neon/issues/2650>, it's
|
||||||
/// possible for the writer to drop before `finish` is actually called. So this
|
/// possible for the writer to drop before `finish` is actually called. So this
|
||||||
/// could lead to odd temporary files in the directory, exhausting file system.
|
/// could lead to odd temporary files in the directory, exhausting file system.
|
||||||
/// This structure wraps `ImageLayerWriterInner` and also contains `Drop`
|
/// This structure wraps `ImageLayerWriterInner` and also contains `Drop`
|
||||||
|
|||||||
@@ -4,9 +4,9 @@
|
|||||||
use crate::config::PageServerConf;
|
use crate::config::PageServerConf;
|
||||||
use crate::context::RequestContext;
|
use crate::context::RequestContext;
|
||||||
use crate::repository::Key;
|
use crate::repository::Key;
|
||||||
use crate::tenant::layer_map::BatchedUpdates;
|
|
||||||
use crate::tenant::remote_timeline_client::index::LayerFileMetadata;
|
use crate::tenant::remote_timeline_client::index::LayerFileMetadata;
|
||||||
use crate::tenant::storage_layer::{Layer, ValueReconstructResult, ValueReconstructState};
|
use crate::tenant::storage_layer::{Layer, ValueReconstructResult, ValueReconstructState};
|
||||||
|
use crate::tenant::timeline::layer_manager::LayerManager;
|
||||||
use anyhow::{bail, Result};
|
use anyhow::{bail, Result};
|
||||||
use pageserver_api::models::HistoricLayerInfo;
|
use pageserver_api::models::HistoricLayerInfo;
|
||||||
use std::ops::Range;
|
use std::ops::Range;
|
||||||
@@ -20,12 +20,12 @@ use utils::{
|
|||||||
|
|
||||||
use super::filename::{DeltaFileName, ImageFileName};
|
use super::filename::{DeltaFileName, ImageFileName};
|
||||||
use super::{
|
use super::{
|
||||||
DeltaLayer, ImageLayer, LayerAccessStats, LayerAccessStatsReset, LayerIter, LayerKeyIter,
|
AsLayerDesc, DeltaLayer, ImageLayer, LayerAccessStats, LayerAccessStatsReset, LayerIter,
|
||||||
LayerResidenceStatus, PersistentLayer, PersistentLayerDesc,
|
LayerKeyIter, LayerResidenceStatus, PersistentLayer, PersistentLayerDesc,
|
||||||
};
|
};
|
||||||
|
|
||||||
/// RemoteLayer is a not yet downloaded [`ImageLayer`] or
|
/// RemoteLayer is a not yet downloaded [`ImageLayer`] or
|
||||||
/// [`crate::storage_layer::DeltaLayer`].
|
/// [`DeltaLayer`](super::DeltaLayer).
|
||||||
///
|
///
|
||||||
/// RemoteLayer might be downloaded on-demand during operations which are
|
/// RemoteLayer might be downloaded on-demand during operations which are
|
||||||
/// allowed download remote layers and during which, it gets replaced with a
|
/// allowed download remote layers and during which, it gets replaced with a
|
||||||
@@ -50,6 +50,8 @@ pub struct RemoteLayer {
|
|||||||
/// It is very unlikely to accumulate these in the Timeline's LayerMap, but having this avoids
|
/// It is very unlikely to accumulate these in the Timeline's LayerMap, but having this avoids
|
||||||
/// a possible fast loop between `Timeline::get_reconstruct_data` and
|
/// a possible fast loop between `Timeline::get_reconstruct_data` and
|
||||||
/// `Timeline::download_remote_layer`, which also logs.
|
/// `Timeline::download_remote_layer`, which also logs.
|
||||||
|
///
|
||||||
|
/// [`ongoing_download`]: Self::ongoing_download
|
||||||
pub(crate) download_replacement_failure: std::sync::atomic::AtomicBool,
|
pub(crate) download_replacement_failure: std::sync::atomic::AtomicBool,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -115,11 +117,13 @@ impl std::fmt::Display for RemoteLayer {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl PersistentLayer for RemoteLayer {
|
impl AsLayerDesc for RemoteLayer {
|
||||||
fn layer_desc(&self) -> &PersistentLayerDesc {
|
fn layer_desc(&self) -> &PersistentLayerDesc {
|
||||||
&self.desc
|
&self.desc
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl PersistentLayer for RemoteLayer {
|
||||||
fn local_path(&self) -> Option<PathBuf> {
|
fn local_path(&self) -> Option<PathBuf> {
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
@@ -222,7 +226,7 @@ impl RemoteLayer {
|
|||||||
/// Create a Layer struct representing this layer, after it has been downloaded.
|
/// Create a Layer struct representing this layer, after it has been downloaded.
|
||||||
pub fn create_downloaded_layer(
|
pub fn create_downloaded_layer(
|
||||||
&self,
|
&self,
|
||||||
layer_map_lock_held_witness: &BatchedUpdates<'_>,
|
layer_map_lock_held_witness: &LayerManager,
|
||||||
conf: &'static PageServerConf,
|
conf: &'static PageServerConf,
|
||||||
file_size: u64,
|
file_size: u64,
|
||||||
) -> Arc<dyn PersistentLayer> {
|
) -> Arc<dyn PersistentLayer> {
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -198,10 +198,10 @@ impl Timeline {
|
|||||||
// So, we just need to deal with this.
|
// So, we just need to deal with this.
|
||||||
let candidates: Vec<Arc<dyn PersistentLayer>> = {
|
let candidates: Vec<Arc<dyn PersistentLayer>> = {
|
||||||
let guard = self.layers.read().await;
|
let guard = self.layers.read().await;
|
||||||
let (layers, mapping) = &*guard;
|
let layers = guard.layer_map();
|
||||||
let mut candidates = Vec::new();
|
let mut candidates = Vec::new();
|
||||||
for hist_layer in layers.iter_historic_layers() {
|
for hist_layer in layers.iter_historic_layers() {
|
||||||
let hist_layer = mapping.get_from_desc(&hist_layer);
|
let hist_layer = guard.get_from_desc(&hist_layer);
|
||||||
if hist_layer.is_remote_layer() {
|
if hist_layer.is_remote_layer() {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|||||||
378
pageserver/src/tenant/timeline/layer_manager.rs
Normal file
378
pageserver/src/tenant/timeline/layer_manager.rs
Normal file
@@ -0,0 +1,378 @@
|
|||||||
|
use anyhow::{bail, ensure, Context, Result};
|
||||||
|
use std::{collections::HashMap, sync::Arc};
|
||||||
|
use tracing::trace;
|
||||||
|
use utils::{
|
||||||
|
id::{TenantId, TimelineId},
|
||||||
|
lsn::{AtomicLsn, Lsn},
|
||||||
|
};
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
config::PageServerConf,
|
||||||
|
metrics::TimelineMetrics,
|
||||||
|
tenant::{
|
||||||
|
layer_map::{BatchedUpdates, LayerMap},
|
||||||
|
storage_layer::{
|
||||||
|
AsLayerDesc, DeltaLayer, ImageLayer, InMemoryLayer, Layer, PersistentLayer,
|
||||||
|
PersistentLayerDesc, PersistentLayerKey, RemoteLayer,
|
||||||
|
},
|
||||||
|
timeline::compare_arced_layers,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
/// Provides semantic APIs to manipulate the layer map.
|
||||||
|
pub struct LayerManager {
|
||||||
|
layer_map: LayerMap,
|
||||||
|
layer_fmgr: LayerFileManager,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// After GC, the layer map changes will not be applied immediately. Users should manually apply the changes after
|
||||||
|
/// scheduling deletes in remote client.
|
||||||
|
pub struct ApplyGcResultGuard<'a>(BatchedUpdates<'a>);
|
||||||
|
|
||||||
|
impl ApplyGcResultGuard<'_> {
|
||||||
|
pub fn flush(self) {
|
||||||
|
self.0.flush();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl LayerManager {
|
||||||
|
pub fn create() -> Self {
|
||||||
|
Self {
|
||||||
|
layer_map: LayerMap::default(),
|
||||||
|
layer_fmgr: LayerFileManager::new(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_from_desc(&self, desc: &PersistentLayerDesc) -> Arc<dyn PersistentLayer> {
|
||||||
|
self.layer_fmgr.get_from_desc(desc)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get an immutable reference to the layer map.
|
||||||
|
///
|
||||||
|
/// We expect users only to be able to get an immutable layer map. If users want to make modifications,
|
||||||
|
/// they should use the below semantic APIs. This design makes us step closer to immutable storage state.
|
||||||
|
pub fn layer_map(&self) -> &LayerMap {
|
||||||
|
&self.layer_map
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get a mutable reference to the layer map. This function will be removed once `flush_frozen_layer`
|
||||||
|
/// gets a refactor.
|
||||||
|
pub fn layer_map_mut(&mut self) -> &mut LayerMap {
|
||||||
|
&mut self.layer_map
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Replace layers in the layer file manager, used in evictions and layer downloads.
|
||||||
|
pub fn replace_and_verify(
|
||||||
|
&mut self,
|
||||||
|
expected: Arc<dyn PersistentLayer>,
|
||||||
|
new: Arc<dyn PersistentLayer>,
|
||||||
|
) -> Result<()> {
|
||||||
|
self.layer_fmgr.replace_and_verify(expected, new)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Called from `load_layer_map`. Initialize the layer manager with:
|
||||||
|
/// 1. all on-disk layers
|
||||||
|
/// 2. next open layer (with disk disk_consistent_lsn LSN)
|
||||||
|
pub fn initialize_local_layers(
|
||||||
|
&mut self,
|
||||||
|
on_disk_layers: Vec<Arc<dyn PersistentLayer>>,
|
||||||
|
next_open_layer_at: Lsn,
|
||||||
|
) {
|
||||||
|
let mut updates = self.layer_map.batch_update();
|
||||||
|
for layer in on_disk_layers {
|
||||||
|
Self::insert_historic_layer(layer, &mut updates, &mut self.layer_fmgr);
|
||||||
|
}
|
||||||
|
updates.flush();
|
||||||
|
self.layer_map.next_open_layer_at = Some(next_open_layer_at);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Initialize when creating a new timeline, called in `init_empty_layer_map`.
|
||||||
|
pub fn initialize_empty(&mut self, next_open_layer_at: Lsn) {
|
||||||
|
self.layer_map.next_open_layer_at = Some(next_open_layer_at);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn initialize_remote_layers(
|
||||||
|
&mut self,
|
||||||
|
corrupted_local_layers: Vec<Arc<dyn PersistentLayer>>,
|
||||||
|
remote_layers: Vec<Arc<RemoteLayer>>,
|
||||||
|
) {
|
||||||
|
let mut updates = self.layer_map.batch_update();
|
||||||
|
for layer in corrupted_local_layers {
|
||||||
|
Self::remove_historic_layer(layer, &mut updates, &mut self.layer_fmgr);
|
||||||
|
}
|
||||||
|
for layer in remote_layers {
|
||||||
|
Self::insert_historic_layer(layer, &mut updates, &mut self.layer_fmgr);
|
||||||
|
}
|
||||||
|
updates.flush();
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Open a new writable layer to append data if there is no open layer, otherwise return the current open layer,
|
||||||
|
/// called within `get_layer_for_write`.
|
||||||
|
pub fn get_layer_for_write(
|
||||||
|
&mut self,
|
||||||
|
lsn: Lsn,
|
||||||
|
last_record_lsn: Lsn,
|
||||||
|
conf: &'static PageServerConf,
|
||||||
|
timeline_id: TimelineId,
|
||||||
|
tenant_id: TenantId,
|
||||||
|
) -> Result<Arc<InMemoryLayer>> {
|
||||||
|
ensure!(lsn.is_aligned());
|
||||||
|
|
||||||
|
ensure!(
|
||||||
|
lsn > last_record_lsn,
|
||||||
|
"cannot modify relation after advancing last_record_lsn (incoming_lsn={}, last_record_lsn={})\n{}",
|
||||||
|
lsn,
|
||||||
|
last_record_lsn,
|
||||||
|
std::backtrace::Backtrace::force_capture(),
|
||||||
|
);
|
||||||
|
|
||||||
|
// Do we have a layer open for writing already?
|
||||||
|
let layer = if let Some(open_layer) = &self.layer_map.open_layer {
|
||||||
|
if open_layer.get_lsn_range().start > lsn {
|
||||||
|
bail!(
|
||||||
|
"unexpected open layer in the future: open layers starts at {}, write lsn {}",
|
||||||
|
open_layer.get_lsn_range().start,
|
||||||
|
lsn
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
Arc::clone(open_layer)
|
||||||
|
} else {
|
||||||
|
// No writeable layer yet. Create one.
|
||||||
|
let start_lsn = self
|
||||||
|
.layer_map
|
||||||
|
.next_open_layer_at
|
||||||
|
.context("No next open layer found")?;
|
||||||
|
|
||||||
|
trace!(
|
||||||
|
"creating in-memory layer at {}/{} for record at {}",
|
||||||
|
timeline_id,
|
||||||
|
start_lsn,
|
||||||
|
lsn
|
||||||
|
);
|
||||||
|
|
||||||
|
let new_layer = InMemoryLayer::create(conf, timeline_id, tenant_id, start_lsn)?;
|
||||||
|
let layer = Arc::new(new_layer);
|
||||||
|
|
||||||
|
self.layer_map.open_layer = Some(layer.clone());
|
||||||
|
self.layer_map.next_open_layer_at = None;
|
||||||
|
|
||||||
|
layer
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(layer)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Called from `freeze_inmem_layer`, returns true if successfully frozen.
|
||||||
|
pub fn try_freeze_in_memory_layer(
|
||||||
|
&mut self,
|
||||||
|
Lsn(last_record_lsn): Lsn,
|
||||||
|
last_freeze_at: &AtomicLsn,
|
||||||
|
) {
|
||||||
|
let end_lsn = Lsn(last_record_lsn + 1);
|
||||||
|
|
||||||
|
if let Some(open_layer) = &self.layer_map.open_layer {
|
||||||
|
let open_layer_rc = Arc::clone(open_layer);
|
||||||
|
// Does this layer need freezing?
|
||||||
|
open_layer.freeze(end_lsn);
|
||||||
|
|
||||||
|
// The layer is no longer open, update the layer map to reflect this.
|
||||||
|
// We will replace it with on-disk historics below.
|
||||||
|
self.layer_map.frozen_layers.push_back(open_layer_rc);
|
||||||
|
self.layer_map.open_layer = None;
|
||||||
|
self.layer_map.next_open_layer_at = Some(end_lsn);
|
||||||
|
last_freeze_at.store(end_lsn);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Add image layers to the layer map, called from `create_image_layers`.
|
||||||
|
pub fn track_new_image_layers(&mut self, image_layers: Vec<ImageLayer>) {
|
||||||
|
let mut updates = self.layer_map.batch_update();
|
||||||
|
for layer in image_layers {
|
||||||
|
Self::insert_historic_layer(Arc::new(layer), &mut updates, &mut self.layer_fmgr);
|
||||||
|
}
|
||||||
|
updates.flush();
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Insert into the layer map when a new delta layer is created, called from `create_delta_layer`.
|
||||||
|
pub fn track_new_l0_delta_layer(&mut self, delta_layer: Arc<DeltaLayer>) {
|
||||||
|
let mut updates = self.layer_map.batch_update();
|
||||||
|
Self::insert_historic_layer(delta_layer, &mut updates, &mut self.layer_fmgr);
|
||||||
|
updates.flush();
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Called when compaction is completed.
|
||||||
|
pub fn finish_compact_l0(
|
||||||
|
&mut self,
|
||||||
|
layer_removal_cs: Arc<tokio::sync::OwnedMutexGuard<()>>,
|
||||||
|
compact_from: Vec<Arc<dyn PersistentLayer>>,
|
||||||
|
compact_to: Vec<Arc<dyn PersistentLayer>>,
|
||||||
|
metrics: &TimelineMetrics,
|
||||||
|
) -> Result<()> {
|
||||||
|
let mut updates = self.layer_map.batch_update();
|
||||||
|
for l in compact_to {
|
||||||
|
Self::insert_historic_layer(l, &mut updates, &mut self.layer_fmgr);
|
||||||
|
}
|
||||||
|
for l in compact_from {
|
||||||
|
// NB: the layer file identified by descriptor `l` is guaranteed to be present
|
||||||
|
// in the LayerFileManager because compaction kept holding `layer_removal_cs` the entire
|
||||||
|
// time, even though we dropped `Timeline::layers` inbetween.
|
||||||
|
Self::delete_historic_layer(
|
||||||
|
layer_removal_cs.clone(),
|
||||||
|
l,
|
||||||
|
&mut updates,
|
||||||
|
metrics,
|
||||||
|
&mut self.layer_fmgr,
|
||||||
|
)?;
|
||||||
|
}
|
||||||
|
updates.flush();
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Called when garbage collect the timeline. Returns a guard that will apply the updates to the layer map.
|
||||||
|
pub fn finish_gc_timeline(
|
||||||
|
&mut self,
|
||||||
|
layer_removal_cs: Arc<tokio::sync::OwnedMutexGuard<()>>,
|
||||||
|
gc_layers: Vec<Arc<dyn PersistentLayer>>,
|
||||||
|
metrics: &TimelineMetrics,
|
||||||
|
) -> Result<ApplyGcResultGuard> {
|
||||||
|
let mut updates = self.layer_map.batch_update();
|
||||||
|
for doomed_layer in gc_layers {
|
||||||
|
Self::delete_historic_layer(
|
||||||
|
layer_removal_cs.clone(),
|
||||||
|
doomed_layer,
|
||||||
|
&mut updates,
|
||||||
|
metrics,
|
||||||
|
&mut self.layer_fmgr,
|
||||||
|
)?; // FIXME: schedule succeeded deletions in timeline.rs `gc_timeline` instead of in batch?
|
||||||
|
}
|
||||||
|
Ok(ApplyGcResultGuard(updates))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Helper function to insert a layer into the layer map and file manager.
|
||||||
|
fn insert_historic_layer(
|
||||||
|
layer: Arc<dyn PersistentLayer>,
|
||||||
|
updates: &mut BatchedUpdates<'_>,
|
||||||
|
mapping: &mut LayerFileManager,
|
||||||
|
) {
|
||||||
|
updates.insert_historic(layer.layer_desc().clone());
|
||||||
|
mapping.insert(layer);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Helper function to remove a layer into the layer map and file manager
|
||||||
|
fn remove_historic_layer(
|
||||||
|
layer: Arc<dyn PersistentLayer>,
|
||||||
|
updates: &mut BatchedUpdates<'_>,
|
||||||
|
mapping: &mut LayerFileManager,
|
||||||
|
) {
|
||||||
|
updates.remove_historic(layer.layer_desc().clone());
|
||||||
|
mapping.remove(layer);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Removes the layer from local FS (if present) and from memory.
|
||||||
|
/// Remote storage is not affected by this operation.
|
||||||
|
fn delete_historic_layer(
|
||||||
|
// we cannot remove layers otherwise, since gc and compaction will race
|
||||||
|
_layer_removal_cs: Arc<tokio::sync::OwnedMutexGuard<()>>,
|
||||||
|
layer: Arc<dyn PersistentLayer>,
|
||||||
|
updates: &mut BatchedUpdates<'_>,
|
||||||
|
metrics: &TimelineMetrics,
|
||||||
|
mapping: &mut LayerFileManager,
|
||||||
|
) -> anyhow::Result<()> {
|
||||||
|
if !layer.is_remote_layer() {
|
||||||
|
layer.delete_resident_layer_file()?;
|
||||||
|
let layer_file_size = layer.file_size();
|
||||||
|
metrics.resident_physical_size_gauge.sub(layer_file_size);
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO Removing from the bottom of the layer map is expensive.
|
||||||
|
// Maybe instead discard all layer map historic versions that
|
||||||
|
// won't be needed for page reconstruction for this timeline,
|
||||||
|
// and mark what we can't delete yet as deleted from the layer
|
||||||
|
// map index without actually rebuilding the index.
|
||||||
|
updates.remove_historic(layer.layer_desc().clone());
|
||||||
|
mapping.remove(layer);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn contains(&self, layer: &Arc<dyn PersistentLayer>) -> bool {
|
||||||
|
self.layer_fmgr.contains(layer)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct LayerFileManager<T: AsLayerDesc + ?Sized = dyn PersistentLayer>(
|
||||||
|
HashMap<PersistentLayerKey, Arc<T>>,
|
||||||
|
);
|
||||||
|
|
||||||
|
impl<T: AsLayerDesc + ?Sized> LayerFileManager<T> {
|
||||||
|
fn get_from_desc(&self, desc: &PersistentLayerDesc) -> Arc<T> {
|
||||||
|
// The assumption for the `expect()` is that all code maintains the following invariant:
|
||||||
|
// A layer's descriptor is present in the LayerMap => the LayerFileManager contains a layer for the descriptor.
|
||||||
|
self.0
|
||||||
|
.get(&desc.key())
|
||||||
|
.with_context(|| format!("get layer from desc: {}", desc.filename()))
|
||||||
|
.expect("not found")
|
||||||
|
.clone()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn insert(&mut self, layer: Arc<T>) {
|
||||||
|
let present = self.0.insert(layer.layer_desc().key(), layer.clone());
|
||||||
|
if present.is_some() && cfg!(debug_assertions) {
|
||||||
|
panic!("overwriting a layer: {:?}", layer.layer_desc())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn contains(&self, layer: &Arc<T>) -> bool {
|
||||||
|
self.0.contains_key(&layer.layer_desc().key())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn new() -> Self {
|
||||||
|
Self(HashMap::new())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn remove(&mut self, layer: Arc<T>) {
|
||||||
|
let present = self.0.remove(&layer.layer_desc().key());
|
||||||
|
if present.is_none() && cfg!(debug_assertions) {
|
||||||
|
panic!(
|
||||||
|
"removing layer that is not present in layer mapping: {:?}",
|
||||||
|
layer.layer_desc()
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn replace_and_verify(&mut self, expected: Arc<T>, new: Arc<T>) -> Result<()> {
|
||||||
|
let key = expected.layer_desc().key();
|
||||||
|
let other = new.layer_desc().key();
|
||||||
|
|
||||||
|
let expected_l0 = LayerMap::is_l0(expected.layer_desc());
|
||||||
|
let new_l0 = LayerMap::is_l0(new.layer_desc());
|
||||||
|
|
||||||
|
fail::fail_point!("layermap-replace-notfound", |_| anyhow::bail!(
|
||||||
|
"layermap-replace-notfound"
|
||||||
|
));
|
||||||
|
|
||||||
|
anyhow::ensure!(
|
||||||
|
key == other,
|
||||||
|
"expected and new layer have different keys: {key:?} != {other:?}"
|
||||||
|
);
|
||||||
|
|
||||||
|
anyhow::ensure!(
|
||||||
|
expected_l0 == new_l0,
|
||||||
|
"one layer is l0 while the other is not: {expected_l0} != {new_l0}"
|
||||||
|
);
|
||||||
|
|
||||||
|
if let Some(layer) = self.0.get_mut(&key) {
|
||||||
|
anyhow::ensure!(
|
||||||
|
compare_arced_layers(&expected, layer),
|
||||||
|
"another layer was found instead of expected, expected={expected:?}, new={new:?}",
|
||||||
|
expected = Arc::as_ptr(&expected),
|
||||||
|
new = Arc::as_ptr(layer),
|
||||||
|
);
|
||||||
|
*layer = new;
|
||||||
|
Ok(())
|
||||||
|
} else {
|
||||||
|
anyhow::bail!("layer was not found");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -7,19 +7,14 @@ pub(crate) fn debug_assert_current_span_has_tenant_and_timeline_id() {}
|
|||||||
#[cfg(debug_assertions)]
|
#[cfg(debug_assertions)]
|
||||||
#[track_caller]
|
#[track_caller]
|
||||||
pub(crate) fn debug_assert_current_span_has_tenant_and_timeline_id() {
|
pub(crate) fn debug_assert_current_span_has_tenant_and_timeline_id() {
|
||||||
static TIMELINE_ID_EXTRACTOR: once_cell::sync::Lazy<MultiNameExtractor<2>> =
|
static TIMELINE_ID_EXTRACTOR: once_cell::sync::Lazy<MultiNameExtractor<1>> =
|
||||||
once_cell::sync::Lazy::new(|| {
|
once_cell::sync::Lazy::new(|| MultiNameExtractor::new("TimelineId", ["timeline_id"]));
|
||||||
MultiNameExtractor::new("TimelineId", ["timeline_id", "timeline"])
|
|
||||||
});
|
|
||||||
|
|
||||||
let fields: [&dyn Extractor; 2] = [
|
let fields: [&dyn Extractor; 2] = [
|
||||||
&*crate::tenant::span::TENANT_ID_EXTRACTOR,
|
&*crate::tenant::span::TENANT_ID_EXTRACTOR,
|
||||||
&*TIMELINE_ID_EXTRACTOR,
|
&*TIMELINE_ID_EXTRACTOR,
|
||||||
];
|
];
|
||||||
if let Err(missing) = check_fields_present(fields) {
|
if let Err(missing) = check_fields_present!(fields) {
|
||||||
panic!(
|
panic!("missing extractors: {missing:?}")
|
||||||
"missing extractors: {:?}",
|
|
||||||
missing.into_iter().map(|e| e.name()).collect::<Vec<_>>()
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -132,7 +132,7 @@ impl<'t> UninitializedTimeline<'t> {
|
|||||||
impl Drop for UninitializedTimeline<'_> {
|
impl Drop for UninitializedTimeline<'_> {
|
||||||
fn drop(&mut self) {
|
fn drop(&mut self) {
|
||||||
if let Some((_, uninit_mark)) = self.raw_timeline.take() {
|
if let Some((_, uninit_mark)) = self.raw_timeline.take() {
|
||||||
let _entered = info_span!("drop_uninitialized_timeline", tenant = %self.owning_tenant.tenant_id, timeline = %self.timeline_id).entered();
|
let _entered = info_span!("drop_uninitialized_timeline", tenant_id = %self.owning_tenant.tenant_id, timeline_id = %self.timeline_id).entered();
|
||||||
error!("Timeline got dropped without initializing, cleaning its files");
|
error!("Timeline got dropped without initializing, cleaning its files");
|
||||||
cleanup_timeline_directory(uninit_mark);
|
cleanup_timeline_directory(uninit_mark);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -6,7 +6,7 @@
|
|||||||
//! Current connection state is tracked too, to ensure it's not getting stale.
|
//! Current connection state is tracked too, to ensure it's not getting stale.
|
||||||
//!
|
//!
|
||||||
//! After every connection or storage broker update fetched, the state gets updated correspondingly and rechecked for the new conneciton leader,
|
//! After every connection or storage broker update fetched, the state gets updated correspondingly and rechecked for the new conneciton leader,
|
||||||
//! then a [re]connection happens, if necessary.
|
//! then a (re)connection happens, if necessary.
|
||||||
//! Only WAL streaming task expects to be finished, other loops (storage broker, connection management) never exit unless cancelled explicitly via the dedicated channel.
|
//! Only WAL streaming task expects to be finished, other loops (storage broker, connection management) never exit unless cancelled explicitly via the dedicated channel.
|
||||||
|
|
||||||
use std::{collections::HashMap, num::NonZeroU64, ops::ControlFlow, sync::Arc, time::Duration};
|
use std::{collections::HashMap, num::NonZeroU64, ops::ControlFlow, sync::Arc, time::Duration};
|
||||||
@@ -266,7 +266,7 @@ pub struct ConnectionManagerStatus {
|
|||||||
impl ConnectionManagerStatus {
|
impl ConnectionManagerStatus {
|
||||||
/// Generates a string, describing current connection status in a form, suitable for logging.
|
/// Generates a string, describing current connection status in a form, suitable for logging.
|
||||||
pub fn to_human_readable_string(&self) -> String {
|
pub fn to_human_readable_string(&self) -> String {
|
||||||
let mut resulting_string = "WalReceiver status".to_string();
|
let mut resulting_string = String::new();
|
||||||
match &self.existing_connection {
|
match &self.existing_connection {
|
||||||
Some(connection) => {
|
Some(connection) => {
|
||||||
if connection.has_processed_wal {
|
if connection.has_processed_wal {
|
||||||
|
|||||||
@@ -175,8 +175,8 @@ impl WalRedoManager for PostgresRedoManager {
|
|||||||
let mut img = base_img.map(|p| p.1);
|
let mut img = base_img.map(|p| p.1);
|
||||||
let mut batch_neon = can_apply_in_neon(&records[0].1);
|
let mut batch_neon = can_apply_in_neon(&records[0].1);
|
||||||
let mut batch_start = 0;
|
let mut batch_start = 0;
|
||||||
for i in 1..records.len() {
|
for (i, record) in records.iter().enumerate().skip(1) {
|
||||||
let rec_neon = can_apply_in_neon(&records[i].1);
|
let rec_neon = can_apply_in_neon(&record.1);
|
||||||
|
|
||||||
if rec_neon != batch_neon {
|
if rec_neon != batch_neon {
|
||||||
let result = if batch_neon {
|
let result = if batch_neon {
|
||||||
@@ -685,7 +685,7 @@ impl PostgresRedoManager {
|
|||||||
// as close-on-exec by default, but that's not enough, since we use
|
// as close-on-exec by default, but that's not enough, since we use
|
||||||
// libraries that directly call libc open without setting that flag.
|
// libraries that directly call libc open without setting that flag.
|
||||||
.close_fds()
|
.close_fds()
|
||||||
.spawn_no_leak_child()
|
.spawn_no_leak_child(self.tenant_id)
|
||||||
.map_err(|e| {
|
.map_err(|e| {
|
||||||
Error::new(
|
Error::new(
|
||||||
e.kind(),
|
e.kind(),
|
||||||
@@ -989,6 +989,7 @@ impl PostgresRedoManager {
|
|||||||
/// Wrapper type around `std::process::Child` which guarantees that the child
|
/// Wrapper type around `std::process::Child` which guarantees that the child
|
||||||
/// will be killed and waited-for by this process before being dropped.
|
/// will be killed and waited-for by this process before being dropped.
|
||||||
struct NoLeakChild {
|
struct NoLeakChild {
|
||||||
|
tenant_id: TenantId,
|
||||||
child: Option<Child>,
|
child: Option<Child>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1007,9 +1008,12 @@ impl DerefMut for NoLeakChild {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl NoLeakChild {
|
impl NoLeakChild {
|
||||||
fn spawn(command: &mut Command) -> io::Result<Self> {
|
fn spawn(tenant_id: TenantId, command: &mut Command) -> io::Result<Self> {
|
||||||
let child = command.spawn()?;
|
let child = command.spawn()?;
|
||||||
Ok(NoLeakChild { child: Some(child) })
|
Ok(NoLeakChild {
|
||||||
|
tenant_id,
|
||||||
|
child: Some(child),
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
fn kill_and_wait(mut self) {
|
fn kill_and_wait(mut self) {
|
||||||
@@ -1056,11 +1060,16 @@ impl Drop for NoLeakChild {
|
|||||||
Some(child) => child,
|
Some(child) => child,
|
||||||
None => return,
|
None => return,
|
||||||
};
|
};
|
||||||
|
let tenant_id = self.tenant_id;
|
||||||
// Offload the kill+wait of the child process into the background.
|
// Offload the kill+wait of the child process into the background.
|
||||||
// If someone stops the runtime, we'll leak the child process.
|
// If someone stops the runtime, we'll leak the child process.
|
||||||
// We can ignore that case because we only stop the runtime on pageserver exit.
|
// We can ignore that case because we only stop the runtime on pageserver exit.
|
||||||
BACKGROUND_RUNTIME.spawn(async move {
|
BACKGROUND_RUNTIME.spawn(async move {
|
||||||
tokio::task::spawn_blocking(move || {
|
tokio::task::spawn_blocking(move || {
|
||||||
|
// Intentionally don't inherit the tracing context from whoever is dropping us.
|
||||||
|
// This thread here is going to outlive of our dropper.
|
||||||
|
let span = tracing::info_span!("walredo", %tenant_id);
|
||||||
|
let _entered = span.enter();
|
||||||
Self::kill_and_wait_impl(child);
|
Self::kill_and_wait_impl(child);
|
||||||
})
|
})
|
||||||
.await
|
.await
|
||||||
@@ -1069,12 +1078,12 @@ impl Drop for NoLeakChild {
|
|||||||
}
|
}
|
||||||
|
|
||||||
trait NoLeakChildCommandExt {
|
trait NoLeakChildCommandExt {
|
||||||
fn spawn_no_leak_child(&mut self) -> io::Result<NoLeakChild>;
|
fn spawn_no_leak_child(&mut self, tenant_id: TenantId) -> io::Result<NoLeakChild>;
|
||||||
}
|
}
|
||||||
|
|
||||||
impl NoLeakChildCommandExt for Command {
|
impl NoLeakChildCommandExt for Command {
|
||||||
fn spawn_no_leak_child(&mut self) -> io::Result<NoLeakChild> {
|
fn spawn_no_leak_child(&mut self, tenant_id: TenantId) -> io::Result<NoLeakChild> {
|
||||||
NoLeakChild::spawn(self)
|
NoLeakChild::spawn(tenant_id, self)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -149,7 +149,7 @@ hnsw_check_available_memory(Size requested)
|
|||||||
struct sysinfo si;
|
struct sysinfo si;
|
||||||
Size total;
|
Size total;
|
||||||
if (sysinfo(&si) < 0)
|
if (sysinfo(&si) < 0)
|
||||||
elog(ERROR, "Failed to get amount of RAM: %m");
|
elog(ERROR, "Failed to get amount of RAM: %n");
|
||||||
|
|
||||||
total = si.totalram*si.mem_unit;
|
total = si.totalram*si.mem_unit;
|
||||||
if ((Size)NBuffers*BLCKSZ + requested >= total)
|
if ((Size)NBuffers*BLCKSZ + requested >= total)
|
||||||
@@ -580,7 +580,6 @@ l2_distance(PG_FUNCTION_ARGS)
|
|||||||
errmsg("different array dimensions %d and %d", a_dim, b_dim)));
|
errmsg("different array dimensions %d and %d", a_dim, b_dim)));
|
||||||
}
|
}
|
||||||
|
|
||||||
#pragma clang loop vectorize(enable)
|
|
||||||
for (int i = 0; i < a_dim; i++)
|
for (int i = 0; i < a_dim; i++)
|
||||||
{
|
{
|
||||||
diff = ax[i] - bx[i];
|
diff = ax[i] - bx[i];
|
||||||
|
|||||||
@@ -223,7 +223,6 @@ dist_t fstdistfunc_scalar(const coord_t *x, const coord_t *y, size_t n)
|
|||||||
{
|
{
|
||||||
dist_t distance = 0.0;
|
dist_t distance = 0.0;
|
||||||
|
|
||||||
#pragma clang loop vectorize(enable)
|
|
||||||
for (size_t i = 0; i < n; i++)
|
for (size_t i = 0; i < n; i++)
|
||||||
{
|
{
|
||||||
dist_t diff = x[i] - y[i];
|
dist_t diff = x[i] - y[i];
|
||||||
|
|||||||
@@ -2231,6 +2231,18 @@ HandleSafekeeperResponse(void)
|
|||||||
if (n_synced >= quorum)
|
if (n_synced >= quorum)
|
||||||
{
|
{
|
||||||
/* All safekeepers synced! */
|
/* All safekeepers synced! */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Send empty message to broadcast latest truncateLsn to all safekeepers.
|
||||||
|
* This helps to finish next sync-safekeepers eailier, by skipping recovery
|
||||||
|
* step.
|
||||||
|
*
|
||||||
|
* We don't need to wait for response because it doesn't affect correctness,
|
||||||
|
* and TCP should be able to deliver the message to safekeepers in case of
|
||||||
|
* network working properly.
|
||||||
|
*/
|
||||||
|
BroadcastAppendRequest();
|
||||||
|
|
||||||
fprintf(stdout, "%X/%X\n", LSN_FORMAT_ARGS(propEpochStartLsn));
|
fprintf(stdout, "%X/%X\n", LSN_FORMAT_ARGS(propEpochStartLsn));
|
||||||
exit(0);
|
exit(0);
|
||||||
}
|
}
|
||||||
|
|||||||
168
poetry.lock
generated
168
poetry.lock
generated
@@ -1,10 +1,9 @@
|
|||||||
# This file is automatically @generated by Poetry and should not be changed by hand.
|
# This file is automatically @generated by Poetry 1.5.1 and should not be changed by hand.
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "aiohttp"
|
name = "aiohttp"
|
||||||
version = "3.7.4"
|
version = "3.7.4"
|
||||||
description = "Async http client/server framework (asyncio)"
|
description = "Async http client/server framework (asyncio)"
|
||||||
category = "main"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.6"
|
python-versions = ">=3.6"
|
||||||
files = [
|
files = [
|
||||||
@@ -62,7 +61,6 @@ speedups = ["aiodns", "brotlipy", "cchardet"]
|
|||||||
name = "aiopg"
|
name = "aiopg"
|
||||||
version = "1.3.4"
|
version = "1.3.4"
|
||||||
description = "Postgres integration with asyncio."
|
description = "Postgres integration with asyncio."
|
||||||
category = "main"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.6"
|
python-versions = ">=3.6"
|
||||||
files = [
|
files = [
|
||||||
@@ -81,7 +79,6 @@ sa = ["sqlalchemy[postgresql-psycopg2binary] (>=1.3,<1.5)"]
|
|||||||
name = "allure-pytest"
|
name = "allure-pytest"
|
||||||
version = "2.13.2"
|
version = "2.13.2"
|
||||||
description = "Allure pytest integration"
|
description = "Allure pytest integration"
|
||||||
category = "main"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = "*"
|
python-versions = "*"
|
||||||
files = [
|
files = [
|
||||||
@@ -97,7 +94,6 @@ pytest = ">=4.5.0"
|
|||||||
name = "allure-python-commons"
|
name = "allure-python-commons"
|
||||||
version = "2.13.2"
|
version = "2.13.2"
|
||||||
description = "Common module for integrate allure with python-based frameworks"
|
description = "Common module for integrate allure with python-based frameworks"
|
||||||
category = "main"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.6"
|
python-versions = ">=3.6"
|
||||||
files = [
|
files = [
|
||||||
@@ -113,7 +109,6 @@ pluggy = ">=0.4.0"
|
|||||||
name = "async-timeout"
|
name = "async-timeout"
|
||||||
version = "3.0.1"
|
version = "3.0.1"
|
||||||
description = "Timeout context manager for asyncio programs"
|
description = "Timeout context manager for asyncio programs"
|
||||||
category = "main"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.5.3"
|
python-versions = ">=3.5.3"
|
||||||
files = [
|
files = [
|
||||||
@@ -125,7 +120,6 @@ files = [
|
|||||||
name = "asyncpg"
|
name = "asyncpg"
|
||||||
version = "0.27.0"
|
version = "0.27.0"
|
||||||
description = "An asyncio PostgreSQL driver"
|
description = "An asyncio PostgreSQL driver"
|
||||||
category = "main"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.7.0"
|
python-versions = ">=3.7.0"
|
||||||
files = [
|
files = [
|
||||||
@@ -176,7 +170,6 @@ test = ["flake8 (>=5.0.4,<5.1.0)", "uvloop (>=0.15.3)"]
|
|||||||
name = "attrs"
|
name = "attrs"
|
||||||
version = "21.4.0"
|
version = "21.4.0"
|
||||||
description = "Classes Without Boilerplate"
|
description = "Classes Without Boilerplate"
|
||||||
category = "main"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
|
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
|
||||||
files = [
|
files = [
|
||||||
@@ -194,7 +187,6 @@ tests-no-zope = ["cloudpickle", "coverage[toml] (>=5.0.2)", "hypothesis", "mypy"
|
|||||||
name = "aws-sam-translator"
|
name = "aws-sam-translator"
|
||||||
version = "1.48.0"
|
version = "1.48.0"
|
||||||
description = "AWS SAM Translator is a library that transform SAM templates into AWS CloudFormation templates"
|
description = "AWS SAM Translator is a library that transform SAM templates into AWS CloudFormation templates"
|
||||||
category = "main"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.7, <=4.0, !=4.0"
|
python-versions = ">=3.7, <=4.0, !=4.0"
|
||||||
files = [
|
files = [
|
||||||
@@ -204,7 +196,7 @@ files = [
|
|||||||
]
|
]
|
||||||
|
|
||||||
[package.dependencies]
|
[package.dependencies]
|
||||||
boto3 = ">=1.19.5,<2.0.0"
|
boto3 = ">=1.19.5,<2.dev0"
|
||||||
jsonschema = ">=3.2,<4.0"
|
jsonschema = ">=3.2,<4.0"
|
||||||
|
|
||||||
[package.extras]
|
[package.extras]
|
||||||
@@ -214,7 +206,6 @@ dev = ["black (==20.8b1)", "boto3 (>=1.23,<2)", "click (>=7.1,<8.0)", "coverage
|
|||||||
name = "aws-xray-sdk"
|
name = "aws-xray-sdk"
|
||||||
version = "2.10.0"
|
version = "2.10.0"
|
||||||
description = "The AWS X-Ray SDK for Python (the SDK) enables Python developers to record and emit information from within their applications to the AWS X-Ray service."
|
description = "The AWS X-Ray SDK for Python (the SDK) enables Python developers to record and emit information from within their applications to the AWS X-Ray service."
|
||||||
category = "main"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = "*"
|
python-versions = "*"
|
||||||
files = [
|
files = [
|
||||||
@@ -230,7 +221,6 @@ wrapt = "*"
|
|||||||
name = "backoff"
|
name = "backoff"
|
||||||
version = "2.2.1"
|
version = "2.2.1"
|
||||||
description = "Function decoration for backoff and retry"
|
description = "Function decoration for backoff and retry"
|
||||||
category = "main"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.7,<4.0"
|
python-versions = ">=3.7,<4.0"
|
||||||
files = [
|
files = [
|
||||||
@@ -242,7 +232,6 @@ files = [
|
|||||||
name = "black"
|
name = "black"
|
||||||
version = "23.3.0"
|
version = "23.3.0"
|
||||||
description = "The uncompromising code formatter."
|
description = "The uncompromising code formatter."
|
||||||
category = "dev"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.7"
|
python-versions = ">=3.7"
|
||||||
files = [
|
files = [
|
||||||
@@ -292,7 +281,6 @@ uvloop = ["uvloop (>=0.15.2)"]
|
|||||||
name = "boto3"
|
name = "boto3"
|
||||||
version = "1.26.16"
|
version = "1.26.16"
|
||||||
description = "The AWS SDK for Python"
|
description = "The AWS SDK for Python"
|
||||||
category = "main"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">= 3.7"
|
python-versions = ">= 3.7"
|
||||||
files = [
|
files = [
|
||||||
@@ -312,7 +300,6 @@ crt = ["botocore[crt] (>=1.21.0,<2.0a0)"]
|
|||||||
name = "boto3-stubs"
|
name = "boto3-stubs"
|
||||||
version = "1.26.16"
|
version = "1.26.16"
|
||||||
description = "Type annotations for boto3 1.26.16 generated with mypy-boto3-builder 7.11.11"
|
description = "Type annotations for boto3 1.26.16 generated with mypy-boto3-builder 7.11.11"
|
||||||
category = "main"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.7"
|
python-versions = ">=3.7"
|
||||||
files = [
|
files = [
|
||||||
@@ -657,7 +644,6 @@ xray = ["mypy-boto3-xray (>=1.26.0,<1.27.0)"]
|
|||||||
name = "botocore"
|
name = "botocore"
|
||||||
version = "1.29.16"
|
version = "1.29.16"
|
||||||
description = "Low-level, data-driven core of boto 3."
|
description = "Low-level, data-driven core of boto 3."
|
||||||
category = "main"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">= 3.7"
|
python-versions = ">= 3.7"
|
||||||
files = [
|
files = [
|
||||||
@@ -677,7 +663,6 @@ crt = ["awscrt (==0.14.0)"]
|
|||||||
name = "botocore-stubs"
|
name = "botocore-stubs"
|
||||||
version = "1.27.38"
|
version = "1.27.38"
|
||||||
description = "Type annotations for botocore 1.27.38 generated with mypy-boto3-builder 7.10.1"
|
description = "Type annotations for botocore 1.27.38 generated with mypy-boto3-builder 7.10.1"
|
||||||
category = "main"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.7"
|
python-versions = ">=3.7"
|
||||||
files = [
|
files = [
|
||||||
@@ -692,7 +677,6 @@ typing-extensions = ">=4.1.0"
|
|||||||
name = "certifi"
|
name = "certifi"
|
||||||
version = "2022.12.7"
|
version = "2022.12.7"
|
||||||
description = "Python package for providing Mozilla's CA Bundle."
|
description = "Python package for providing Mozilla's CA Bundle."
|
||||||
category = "main"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.6"
|
python-versions = ">=3.6"
|
||||||
files = [
|
files = [
|
||||||
@@ -704,7 +688,6 @@ files = [
|
|||||||
name = "cffi"
|
name = "cffi"
|
||||||
version = "1.15.1"
|
version = "1.15.1"
|
||||||
description = "Foreign Function Interface for Python calling C code."
|
description = "Foreign Function Interface for Python calling C code."
|
||||||
category = "main"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = "*"
|
python-versions = "*"
|
||||||
files = [
|
files = [
|
||||||
@@ -781,7 +764,6 @@ pycparser = "*"
|
|||||||
name = "cfn-lint"
|
name = "cfn-lint"
|
||||||
version = "0.61.3"
|
version = "0.61.3"
|
||||||
description = "Checks CloudFormation templates for practices and behaviour that could potentially be improved"
|
description = "Checks CloudFormation templates for practices and behaviour that could potentially be improved"
|
||||||
category = "main"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.6, <=4.0, !=4.0"
|
python-versions = ">=3.6, <=4.0, !=4.0"
|
||||||
files = [
|
files = [
|
||||||
@@ -803,7 +785,6 @@ sarif-om = ">=1.0.4,<1.1.0"
|
|||||||
name = "chardet"
|
name = "chardet"
|
||||||
version = "3.0.4"
|
version = "3.0.4"
|
||||||
description = "Universal encoding detector for Python 2 and 3"
|
description = "Universal encoding detector for Python 2 and 3"
|
||||||
category = "main"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = "*"
|
python-versions = "*"
|
||||||
files = [
|
files = [
|
||||||
@@ -815,7 +796,6 @@ files = [
|
|||||||
name = "charset-normalizer"
|
name = "charset-normalizer"
|
||||||
version = "2.1.0"
|
version = "2.1.0"
|
||||||
description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet."
|
description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet."
|
||||||
category = "main"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.6.0"
|
python-versions = ">=3.6.0"
|
||||||
files = [
|
files = [
|
||||||
@@ -830,7 +810,6 @@ unicode-backport = ["unicodedata2"]
|
|||||||
name = "click"
|
name = "click"
|
||||||
version = "8.1.3"
|
version = "8.1.3"
|
||||||
description = "Composable command line interface toolkit"
|
description = "Composable command line interface toolkit"
|
||||||
category = "main"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.7"
|
python-versions = ">=3.7"
|
||||||
files = [
|
files = [
|
||||||
@@ -845,7 +824,6 @@ colorama = {version = "*", markers = "platform_system == \"Windows\""}
|
|||||||
name = "colorama"
|
name = "colorama"
|
||||||
version = "0.4.5"
|
version = "0.4.5"
|
||||||
description = "Cross-platform colored terminal text."
|
description = "Cross-platform colored terminal text."
|
||||||
category = "main"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
|
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
|
||||||
files = [
|
files = [
|
||||||
@@ -855,31 +833,34 @@ files = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "cryptography"
|
name = "cryptography"
|
||||||
version = "41.0.0"
|
version = "41.0.2"
|
||||||
description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers."
|
description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers."
|
||||||
category = "main"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.7"
|
python-versions = ">=3.7"
|
||||||
files = [
|
files = [
|
||||||
{file = "cryptography-41.0.0-cp37-abi3-macosx_10_12_universal2.whl", hash = "sha256:3c5ef25d060c80d6d9f7f9892e1d41bb1c79b78ce74805b8cb4aa373cb7d5ec8"},
|
{file = "cryptography-41.0.2-cp37-abi3-macosx_10_12_universal2.whl", hash = "sha256:01f1d9e537f9a15b037d5d9ee442b8c22e3ae11ce65ea1f3316a41c78756b711"},
|
||||||
{file = "cryptography-41.0.0-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:8362565b3835ceacf4dc8f3b56471a2289cf51ac80946f9087e66dc283a810e0"},
|
{file = "cryptography-41.0.2-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:079347de771f9282fbfe0e0236c716686950c19dee1b76240ab09ce1624d76d7"},
|
||||||
{file = "cryptography-41.0.0-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3680248309d340fda9611498a5319b0193a8dbdb73586a1acf8109d06f25b92d"},
|
{file = "cryptography-41.0.2-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:439c3cc4c0d42fa999b83ded80a9a1fb54d53c58d6e59234cfe97f241e6c781d"},
|
||||||
{file = "cryptography-41.0.0-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:84a165379cb9d411d58ed739e4af3396e544eac190805a54ba2e0322feb55c46"},
|
{file = "cryptography-41.0.2-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f14ad275364c8b4e525d018f6716537ae7b6d369c094805cae45300847e0894f"},
|
||||||
{file = "cryptography-41.0.0-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:4ab14d567f7bbe7f1cdff1c53d5324ed4d3fc8bd17c481b395db224fb405c237"},
|
{file = "cryptography-41.0.2-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:84609ade00a6ec59a89729e87a503c6e36af98ddcd566d5f3be52e29ba993182"},
|
||||||
{file = "cryptography-41.0.0-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:9f65e842cb02550fac96536edb1d17f24c0a338fd84eaf582be25926e993dde4"},
|
{file = "cryptography-41.0.2-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:49c3222bb8f8e800aead2e376cbef687bc9e3cb9b58b29a261210456a7783d83"},
|
||||||
{file = "cryptography-41.0.0-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:b7f2f5c525a642cecad24ee8670443ba27ac1fab81bba4cc24c7b6b41f2d0c75"},
|
{file = "cryptography-41.0.2-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:d73f419a56d74fef257955f51b18d046f3506270a5fd2ac5febbfa259d6c0fa5"},
|
||||||
{file = "cryptography-41.0.0-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:7d92f0248d38faa411d17f4107fc0bce0c42cae0b0ba5415505df72d751bf62d"},
|
{file = "cryptography-41.0.2-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:2a034bf7d9ca894720f2ec1d8b7b5832d7e363571828037f9e0c4f18c1b58a58"},
|
||||||
{file = "cryptography-41.0.0-cp37-abi3-win32.whl", hash = "sha256:34d405ea69a8b34566ba3dfb0521379b210ea5d560fafedf9f800a9a94a41928"},
|
{file = "cryptography-41.0.2-cp37-abi3-win32.whl", hash = "sha256:d124682c7a23c9764e54ca9ab5b308b14b18eba02722b8659fb238546de83a76"},
|
||||||
{file = "cryptography-41.0.0-cp37-abi3-win_amd64.whl", hash = "sha256:344c6de9f8bda3c425b3a41b319522ba3208551b70c2ae00099c205f0d9fd3be"},
|
{file = "cryptography-41.0.2-cp37-abi3-win_amd64.whl", hash = "sha256:9c3fe6534d59d071ee82081ca3d71eed3210f76ebd0361798c74abc2bcf347d4"},
|
||||||
{file = "cryptography-41.0.0-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:88ff107f211ea696455ea8d911389f6d2b276aabf3231bf72c8853d22db755c5"},
|
{file = "cryptography-41.0.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:a719399b99377b218dac6cf547b6ec54e6ef20207b6165126a280b0ce97e0d2a"},
|
||||||
{file = "cryptography-41.0.0-pp38-pypy38_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:b846d59a8d5a9ba87e2c3d757ca019fa576793e8758174d3868aecb88d6fc8eb"},
|
{file = "cryptography-41.0.2-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:182be4171f9332b6741ee818ec27daff9fb00349f706629f5cbf417bd50e66fd"},
|
||||||
{file = "cryptography-41.0.0-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:f5d0bf9b252f30a31664b6f64432b4730bb7038339bd18b1fafe129cfc2be9be"},
|
{file = "cryptography-41.0.2-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:7a9a3bced53b7f09da251685224d6a260c3cb291768f54954e28f03ef14e3766"},
|
||||||
{file = "cryptography-41.0.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:5c1f7293c31ebc72163a9a0df246f890d65f66b4a40d9ec80081969ba8c78cc9"},
|
{file = "cryptography-41.0.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:f0dc40e6f7aa37af01aba07277d3d64d5a03dc66d682097541ec4da03cc140ee"},
|
||||||
{file = "cryptography-41.0.0-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:bf8fc66012ca857d62f6a347007e166ed59c0bc150cefa49f28376ebe7d992a2"},
|
{file = "cryptography-41.0.2-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:674b669d5daa64206c38e507808aae49904c988fa0a71c935e7006a3e1e83831"},
|
||||||
{file = "cryptography-41.0.0-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:a4fc68d1c5b951cfb72dfd54702afdbbf0fb7acdc9b7dc4301bbf2225a27714d"},
|
{file = "cryptography-41.0.2-pp38-pypy38_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:7af244b012711a26196450d34f483357e42aeddb04128885d95a69bd8b14b69b"},
|
||||||
{file = "cryptography-41.0.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:14754bcdae909d66ff24b7b5f166d69340ccc6cb15731670435efd5719294895"},
|
{file = "cryptography-41.0.2-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:9b6d717393dbae53d4e52684ef4f022444fc1cce3c48c38cb74fca29e1f08eaa"},
|
||||||
{file = "cryptography-41.0.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:0ddaee209d1cf1f180f1efa338a68c4621154de0afaef92b89486f5f96047c55"},
|
{file = "cryptography-41.0.2-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:192255f539d7a89f2102d07d7375b1e0a81f7478925b3bc2e0549ebf739dae0e"},
|
||||||
{file = "cryptography-41.0.0.tar.gz", hash = "sha256:6b71f64beeea341c9b4f963b48ee3b62d62d57ba93eb120e1196b31dc1025e78"},
|
{file = "cryptography-41.0.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:f772610fe364372de33d76edcd313636a25684edb94cee53fd790195f5989d14"},
|
||||||
|
{file = "cryptography-41.0.2-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:b332cba64d99a70c1e0836902720887fb4529ea49ea7f5462cf6640e095e11d2"},
|
||||||
|
{file = "cryptography-41.0.2-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:9a6673c1828db6270b76b22cc696f40cde9043eb90373da5c2f8f2158957f42f"},
|
||||||
|
{file = "cryptography-41.0.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:342f3767e25876751e14f8459ad85e77e660537ca0a066e10e75df9c9e9099f0"},
|
||||||
|
{file = "cryptography-41.0.2.tar.gz", hash = "sha256:7d230bf856164de164ecb615ccc14c7fc6de6906ddd5b491f3af90d3514c925c"},
|
||||||
]
|
]
|
||||||
|
|
||||||
[package.dependencies]
|
[package.dependencies]
|
||||||
@@ -899,7 +880,6 @@ test-randomorder = ["pytest-randomly"]
|
|||||||
name = "docker"
|
name = "docker"
|
||||||
version = "4.2.2"
|
version = "4.2.2"
|
||||||
description = "A Python library for the Docker Engine API."
|
description = "A Python library for the Docker Engine API."
|
||||||
category = "main"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
|
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
|
||||||
files = [
|
files = [
|
||||||
@@ -921,7 +901,6 @@ tls = ["cryptography (>=1.3.4)", "idna (>=2.0.0)", "pyOpenSSL (>=17.5.0)"]
|
|||||||
name = "ecdsa"
|
name = "ecdsa"
|
||||||
version = "0.18.0"
|
version = "0.18.0"
|
||||||
description = "ECDSA cryptographic signature library (pure python)"
|
description = "ECDSA cryptographic signature library (pure python)"
|
||||||
category = "main"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*"
|
python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*"
|
||||||
files = [
|
files = [
|
||||||
@@ -940,7 +919,6 @@ gmpy2 = ["gmpy2"]
|
|||||||
name = "exceptiongroup"
|
name = "exceptiongroup"
|
||||||
version = "1.1.1"
|
version = "1.1.1"
|
||||||
description = "Backport of PEP 654 (exception groups)"
|
description = "Backport of PEP 654 (exception groups)"
|
||||||
category = "main"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.7"
|
python-versions = ">=3.7"
|
||||||
files = [
|
files = [
|
||||||
@@ -955,7 +933,6 @@ test = ["pytest (>=6)"]
|
|||||||
name = "execnet"
|
name = "execnet"
|
||||||
version = "1.9.0"
|
version = "1.9.0"
|
||||||
description = "execnet: rapid multi-Python deployment"
|
description = "execnet: rapid multi-Python deployment"
|
||||||
category = "main"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
|
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
|
||||||
files = [
|
files = [
|
||||||
@@ -970,7 +947,6 @@ testing = ["pre-commit"]
|
|||||||
name = "flask"
|
name = "flask"
|
||||||
version = "2.2.5"
|
version = "2.2.5"
|
||||||
description = "A simple framework for building complex web applications."
|
description = "A simple framework for building complex web applications."
|
||||||
category = "main"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.7"
|
python-versions = ">=3.7"
|
||||||
files = [
|
files = [
|
||||||
@@ -993,7 +969,6 @@ dotenv = ["python-dotenv"]
|
|||||||
name = "flask-cors"
|
name = "flask-cors"
|
||||||
version = "3.0.10"
|
version = "3.0.10"
|
||||||
description = "A Flask extension adding a decorator for CORS support"
|
description = "A Flask extension adding a decorator for CORS support"
|
||||||
category = "main"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = "*"
|
python-versions = "*"
|
||||||
files = [
|
files = [
|
||||||
@@ -1009,7 +984,6 @@ Six = "*"
|
|||||||
name = "graphql-core"
|
name = "graphql-core"
|
||||||
version = "3.2.1"
|
version = "3.2.1"
|
||||||
description = "GraphQL implementation for Python, a port of GraphQL.js, the JavaScript reference implementation for GraphQL."
|
description = "GraphQL implementation for Python, a port of GraphQL.js, the JavaScript reference implementation for GraphQL."
|
||||||
category = "main"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.6,<4"
|
python-versions = ">=3.6,<4"
|
||||||
files = [
|
files = [
|
||||||
@@ -1021,7 +995,6 @@ files = [
|
|||||||
name = "idna"
|
name = "idna"
|
||||||
version = "3.3"
|
version = "3.3"
|
||||||
description = "Internationalized Domain Names in Applications (IDNA)"
|
description = "Internationalized Domain Names in Applications (IDNA)"
|
||||||
category = "main"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.5"
|
python-versions = ">=3.5"
|
||||||
files = [
|
files = [
|
||||||
@@ -1033,7 +1006,6 @@ files = [
|
|||||||
name = "importlib-metadata"
|
name = "importlib-metadata"
|
||||||
version = "4.12.0"
|
version = "4.12.0"
|
||||||
description = "Read metadata from Python packages"
|
description = "Read metadata from Python packages"
|
||||||
category = "main"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.7"
|
python-versions = ">=3.7"
|
||||||
files = [
|
files = [
|
||||||
@@ -1053,7 +1025,6 @@ testing = ["flufl.flake8", "importlib-resources (>=1.3)", "packaging", "pyfakefs
|
|||||||
name = "iniconfig"
|
name = "iniconfig"
|
||||||
version = "1.1.1"
|
version = "1.1.1"
|
||||||
description = "iniconfig: brain-dead simple config-ini parsing"
|
description = "iniconfig: brain-dead simple config-ini parsing"
|
||||||
category = "main"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = "*"
|
python-versions = "*"
|
||||||
files = [
|
files = [
|
||||||
@@ -1065,7 +1036,6 @@ files = [
|
|||||||
name = "itsdangerous"
|
name = "itsdangerous"
|
||||||
version = "2.1.2"
|
version = "2.1.2"
|
||||||
description = "Safely pass data to untrusted environments and back."
|
description = "Safely pass data to untrusted environments and back."
|
||||||
category = "main"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.7"
|
python-versions = ">=3.7"
|
||||||
files = [
|
files = [
|
||||||
@@ -1077,7 +1047,6 @@ files = [
|
|||||||
name = "jinja2"
|
name = "jinja2"
|
||||||
version = "3.1.2"
|
version = "3.1.2"
|
||||||
description = "A very fast and expressive template engine."
|
description = "A very fast and expressive template engine."
|
||||||
category = "main"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.7"
|
python-versions = ">=3.7"
|
||||||
files = [
|
files = [
|
||||||
@@ -1095,7 +1064,6 @@ i18n = ["Babel (>=2.7)"]
|
|||||||
name = "jmespath"
|
name = "jmespath"
|
||||||
version = "1.0.1"
|
version = "1.0.1"
|
||||||
description = "JSON Matching Expressions"
|
description = "JSON Matching Expressions"
|
||||||
category = "main"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.7"
|
python-versions = ">=3.7"
|
||||||
files = [
|
files = [
|
||||||
@@ -1107,7 +1075,6 @@ files = [
|
|||||||
name = "jschema-to-python"
|
name = "jschema-to-python"
|
||||||
version = "1.2.3"
|
version = "1.2.3"
|
||||||
description = "Generate source code for Python classes from a JSON schema."
|
description = "Generate source code for Python classes from a JSON schema."
|
||||||
category = "main"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">= 2.7"
|
python-versions = ">= 2.7"
|
||||||
files = [
|
files = [
|
||||||
@@ -1124,7 +1091,6 @@ pbr = "*"
|
|||||||
name = "jsondiff"
|
name = "jsondiff"
|
||||||
version = "2.0.0"
|
version = "2.0.0"
|
||||||
description = "Diff JSON and JSON-like structures in Python"
|
description = "Diff JSON and JSON-like structures in Python"
|
||||||
category = "main"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = "*"
|
python-versions = "*"
|
||||||
files = [
|
files = [
|
||||||
@@ -1136,7 +1102,6 @@ files = [
|
|||||||
name = "jsonpatch"
|
name = "jsonpatch"
|
||||||
version = "1.32"
|
version = "1.32"
|
||||||
description = "Apply JSON-Patches (RFC 6902)"
|
description = "Apply JSON-Patches (RFC 6902)"
|
||||||
category = "main"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
|
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
|
||||||
files = [
|
files = [
|
||||||
@@ -1151,7 +1116,6 @@ jsonpointer = ">=1.9"
|
|||||||
name = "jsonpickle"
|
name = "jsonpickle"
|
||||||
version = "2.2.0"
|
version = "2.2.0"
|
||||||
description = "Python library for serializing any arbitrary object graph into JSON"
|
description = "Python library for serializing any arbitrary object graph into JSON"
|
||||||
category = "main"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=2.7"
|
python-versions = ">=2.7"
|
||||||
files = [
|
files = [
|
||||||
@@ -1168,7 +1132,6 @@ testing-libs = ["simplejson", "ujson", "yajl"]
|
|||||||
name = "jsonpointer"
|
name = "jsonpointer"
|
||||||
version = "2.3"
|
version = "2.3"
|
||||||
description = "Identify specific nodes in a JSON document (RFC 6901)"
|
description = "Identify specific nodes in a JSON document (RFC 6901)"
|
||||||
category = "main"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
|
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
|
||||||
files = [
|
files = [
|
||||||
@@ -1180,7 +1143,6 @@ files = [
|
|||||||
name = "jsonschema"
|
name = "jsonschema"
|
||||||
version = "3.2.0"
|
version = "3.2.0"
|
||||||
description = "An implementation of JSON Schema validation for Python"
|
description = "An implementation of JSON Schema validation for Python"
|
||||||
category = "main"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = "*"
|
python-versions = "*"
|
||||||
files = [
|
files = [
|
||||||
@@ -1202,7 +1164,6 @@ format-nongpl = ["idna", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3986-va
|
|||||||
name = "junit-xml"
|
name = "junit-xml"
|
||||||
version = "1.9"
|
version = "1.9"
|
||||||
description = "Creates JUnit XML test result documents that can be read by tools such as Jenkins"
|
description = "Creates JUnit XML test result documents that can be read by tools such as Jenkins"
|
||||||
category = "main"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = "*"
|
python-versions = "*"
|
||||||
files = [
|
files = [
|
||||||
@@ -1217,7 +1178,6 @@ six = "*"
|
|||||||
name = "markupsafe"
|
name = "markupsafe"
|
||||||
version = "2.1.1"
|
version = "2.1.1"
|
||||||
description = "Safely add untrusted strings to HTML/XML markup."
|
description = "Safely add untrusted strings to HTML/XML markup."
|
||||||
category = "main"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.7"
|
python-versions = ">=3.7"
|
||||||
files = [
|
files = [
|
||||||
@@ -1267,7 +1227,6 @@ files = [
|
|||||||
name = "moto"
|
name = "moto"
|
||||||
version = "4.1.2"
|
version = "4.1.2"
|
||||||
description = ""
|
description = ""
|
||||||
category = "main"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.7"
|
python-versions = ">=3.7"
|
||||||
files = [
|
files = [
|
||||||
@@ -1328,7 +1287,6 @@ xray = ["aws-xray-sdk (>=0.93,!=0.96)", "setuptools"]
|
|||||||
name = "multidict"
|
name = "multidict"
|
||||||
version = "6.0.4"
|
version = "6.0.4"
|
||||||
description = "multidict implementation"
|
description = "multidict implementation"
|
||||||
category = "main"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.7"
|
python-versions = ">=3.7"
|
||||||
files = [
|
files = [
|
||||||
@@ -1412,7 +1370,6 @@ files = [
|
|||||||
name = "mypy"
|
name = "mypy"
|
||||||
version = "1.3.0"
|
version = "1.3.0"
|
||||||
description = "Optional static typing for Python"
|
description = "Optional static typing for Python"
|
||||||
category = "dev"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.7"
|
python-versions = ">=3.7"
|
||||||
files = [
|
files = [
|
||||||
@@ -1459,7 +1416,6 @@ reports = ["lxml"]
|
|||||||
name = "mypy-boto3-s3"
|
name = "mypy-boto3-s3"
|
||||||
version = "1.26.0.post1"
|
version = "1.26.0.post1"
|
||||||
description = "Type annotations for boto3.S3 1.26.0 service generated with mypy-boto3-builder 7.11.10"
|
description = "Type annotations for boto3.S3 1.26.0 service generated with mypy-boto3-builder 7.11.10"
|
||||||
category = "main"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.7"
|
python-versions = ">=3.7"
|
||||||
files = [
|
files = [
|
||||||
@@ -1474,7 +1430,6 @@ typing-extensions = ">=4.1.0"
|
|||||||
name = "mypy-extensions"
|
name = "mypy-extensions"
|
||||||
version = "1.0.0"
|
version = "1.0.0"
|
||||||
description = "Type system extensions for programs checked with the mypy type checker."
|
description = "Type system extensions for programs checked with the mypy type checker."
|
||||||
category = "dev"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.5"
|
python-versions = ">=3.5"
|
||||||
files = [
|
files = [
|
||||||
@@ -1486,7 +1441,6 @@ files = [
|
|||||||
name = "networkx"
|
name = "networkx"
|
||||||
version = "2.8.5"
|
version = "2.8.5"
|
||||||
description = "Python package for creating and manipulating graphs and networks"
|
description = "Python package for creating and manipulating graphs and networks"
|
||||||
category = "main"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.8"
|
python-versions = ">=3.8"
|
||||||
files = [
|
files = [
|
||||||
@@ -1505,7 +1459,6 @@ test = ["codecov (>=2.1)", "pytest (>=7.1)", "pytest-cov (>=3.0)"]
|
|||||||
name = "openapi-schema-validator"
|
name = "openapi-schema-validator"
|
||||||
version = "0.2.3"
|
version = "0.2.3"
|
||||||
description = "OpenAPI schema validation for Python"
|
description = "OpenAPI schema validation for Python"
|
||||||
category = "main"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.7.0,<4.0.0"
|
python-versions = ">=3.7.0,<4.0.0"
|
||||||
files = [
|
files = [
|
||||||
@@ -1525,7 +1478,6 @@ strict-rfc3339 = ["strict-rfc3339"]
|
|||||||
name = "openapi-spec-validator"
|
name = "openapi-spec-validator"
|
||||||
version = "0.4.0"
|
version = "0.4.0"
|
||||||
description = "OpenAPI 2.0 (aka Swagger) and OpenAPI 3.0 spec validator"
|
description = "OpenAPI 2.0 (aka Swagger) and OpenAPI 3.0 spec validator"
|
||||||
category = "main"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.7.0,<4.0.0"
|
python-versions = ">=3.7.0,<4.0.0"
|
||||||
files = [
|
files = [
|
||||||
@@ -1546,7 +1498,6 @@ requests = ["requests"]
|
|||||||
name = "packaging"
|
name = "packaging"
|
||||||
version = "23.0"
|
version = "23.0"
|
||||||
description = "Core utilities for Python packages"
|
description = "Core utilities for Python packages"
|
||||||
category = "main"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.7"
|
python-versions = ">=3.7"
|
||||||
files = [
|
files = [
|
||||||
@@ -1558,7 +1509,6 @@ files = [
|
|||||||
name = "pathspec"
|
name = "pathspec"
|
||||||
version = "0.9.0"
|
version = "0.9.0"
|
||||||
description = "Utility library for gitignore style pattern matching of file paths."
|
description = "Utility library for gitignore style pattern matching of file paths."
|
||||||
category = "dev"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7"
|
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7"
|
||||||
files = [
|
files = [
|
||||||
@@ -1570,7 +1520,6 @@ files = [
|
|||||||
name = "pbr"
|
name = "pbr"
|
||||||
version = "5.9.0"
|
version = "5.9.0"
|
||||||
description = "Python Build Reasonableness"
|
description = "Python Build Reasonableness"
|
||||||
category = "main"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=2.6"
|
python-versions = ">=2.6"
|
||||||
files = [
|
files = [
|
||||||
@@ -1582,7 +1531,6 @@ files = [
|
|||||||
name = "platformdirs"
|
name = "platformdirs"
|
||||||
version = "2.5.2"
|
version = "2.5.2"
|
||||||
description = "A small Python module for determining appropriate platform-specific dirs, e.g. a \"user data dir\"."
|
description = "A small Python module for determining appropriate platform-specific dirs, e.g. a \"user data dir\"."
|
||||||
category = "dev"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.7"
|
python-versions = ">=3.7"
|
||||||
files = [
|
files = [
|
||||||
@@ -1598,7 +1546,6 @@ test = ["appdirs (==1.4.4)", "pytest (>=6)", "pytest-cov (>=2.7)", "pytest-mock
|
|||||||
name = "pluggy"
|
name = "pluggy"
|
||||||
version = "1.0.0"
|
version = "1.0.0"
|
||||||
description = "plugin and hook calling mechanisms for python"
|
description = "plugin and hook calling mechanisms for python"
|
||||||
category = "main"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.6"
|
python-versions = ">=3.6"
|
||||||
files = [
|
files = [
|
||||||
@@ -1614,7 +1561,6 @@ testing = ["pytest", "pytest-benchmark"]
|
|||||||
name = "prometheus-client"
|
name = "prometheus-client"
|
||||||
version = "0.14.1"
|
version = "0.14.1"
|
||||||
description = "Python client for the Prometheus monitoring system."
|
description = "Python client for the Prometheus monitoring system."
|
||||||
category = "main"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.6"
|
python-versions = ">=3.6"
|
||||||
files = [
|
files = [
|
||||||
@@ -1629,7 +1575,6 @@ twisted = ["twisted"]
|
|||||||
name = "psutil"
|
name = "psutil"
|
||||||
version = "5.9.4"
|
version = "5.9.4"
|
||||||
description = "Cross-platform lib for process and system monitoring in Python."
|
description = "Cross-platform lib for process and system monitoring in Python."
|
||||||
category = "main"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
|
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
|
||||||
files = [
|
files = [
|
||||||
@@ -1656,7 +1601,6 @@ test = ["enum34", "ipaddress", "mock", "pywin32", "wmi"]
|
|||||||
name = "psycopg2-binary"
|
name = "psycopg2-binary"
|
||||||
version = "2.9.6"
|
version = "2.9.6"
|
||||||
description = "psycopg2 - Python-PostgreSQL Database Adapter"
|
description = "psycopg2 - Python-PostgreSQL Database Adapter"
|
||||||
category = "main"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.6"
|
python-versions = ">=3.6"
|
||||||
files = [
|
files = [
|
||||||
@@ -1728,7 +1672,6 @@ files = [
|
|||||||
name = "pyasn1"
|
name = "pyasn1"
|
||||||
version = "0.4.8"
|
version = "0.4.8"
|
||||||
description = "ASN.1 types and codecs"
|
description = "ASN.1 types and codecs"
|
||||||
category = "main"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = "*"
|
python-versions = "*"
|
||||||
files = [
|
files = [
|
||||||
@@ -1740,7 +1683,6 @@ files = [
|
|||||||
name = "pycparser"
|
name = "pycparser"
|
||||||
version = "2.21"
|
version = "2.21"
|
||||||
description = "C parser in Python"
|
description = "C parser in Python"
|
||||||
category = "main"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
|
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
|
||||||
files = [
|
files = [
|
||||||
@@ -1752,7 +1694,6 @@ files = [
|
|||||||
name = "pyjwt"
|
name = "pyjwt"
|
||||||
version = "2.4.0"
|
version = "2.4.0"
|
||||||
description = "JSON Web Token implementation in Python"
|
description = "JSON Web Token implementation in Python"
|
||||||
category = "main"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.6"
|
python-versions = ">=3.6"
|
||||||
files = [
|
files = [
|
||||||
@@ -1773,7 +1714,6 @@ tests = ["coverage[toml] (==5.0.4)", "pytest (>=6.0.0,<7.0.0)"]
|
|||||||
name = "pyparsing"
|
name = "pyparsing"
|
||||||
version = "3.0.9"
|
version = "3.0.9"
|
||||||
description = "pyparsing module - Classes and methods to define and execute parsing grammars"
|
description = "pyparsing module - Classes and methods to define and execute parsing grammars"
|
||||||
category = "main"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.6.8"
|
python-versions = ">=3.6.8"
|
||||||
files = [
|
files = [
|
||||||
@@ -1788,7 +1728,6 @@ diagrams = ["jinja2", "railroad-diagrams"]
|
|||||||
name = "pypiwin32"
|
name = "pypiwin32"
|
||||||
version = "223"
|
version = "223"
|
||||||
description = ""
|
description = ""
|
||||||
category = "main"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = "*"
|
python-versions = "*"
|
||||||
files = [
|
files = [
|
||||||
@@ -1803,7 +1742,6 @@ pywin32 = ">=223"
|
|||||||
name = "pyrsistent"
|
name = "pyrsistent"
|
||||||
version = "0.18.1"
|
version = "0.18.1"
|
||||||
description = "Persistent/Functional/Immutable data structures"
|
description = "Persistent/Functional/Immutable data structures"
|
||||||
category = "main"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.7"
|
python-versions = ">=3.7"
|
||||||
files = [
|
files = [
|
||||||
@@ -1834,7 +1772,6 @@ files = [
|
|||||||
name = "pytest"
|
name = "pytest"
|
||||||
version = "7.3.1"
|
version = "7.3.1"
|
||||||
description = "pytest: simple powerful testing with Python"
|
description = "pytest: simple powerful testing with Python"
|
||||||
category = "main"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.7"
|
python-versions = ">=3.7"
|
||||||
files = [
|
files = [
|
||||||
@@ -1857,7 +1794,6 @@ testing = ["argcomplete", "attrs (>=19.2.0)", "hypothesis (>=3.56)", "mock", "no
|
|||||||
name = "pytest-asyncio"
|
name = "pytest-asyncio"
|
||||||
version = "0.21.0"
|
version = "0.21.0"
|
||||||
description = "Pytest support for asyncio"
|
description = "Pytest support for asyncio"
|
||||||
category = "main"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.7"
|
python-versions = ">=3.7"
|
||||||
files = [
|
files = [
|
||||||
@@ -1876,7 +1812,6 @@ testing = ["coverage (>=6.2)", "flaky (>=3.5.0)", "hypothesis (>=5.7.1)", "mypy
|
|||||||
name = "pytest-httpserver"
|
name = "pytest-httpserver"
|
||||||
version = "1.0.8"
|
version = "1.0.8"
|
||||||
description = "pytest-httpserver is a httpserver for pytest"
|
description = "pytest-httpserver is a httpserver for pytest"
|
||||||
category = "main"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.8,<4.0"
|
python-versions = ">=3.8,<4.0"
|
||||||
files = [
|
files = [
|
||||||
@@ -1891,7 +1826,6 @@ Werkzeug = ">=2.0.0"
|
|||||||
name = "pytest-lazy-fixture"
|
name = "pytest-lazy-fixture"
|
||||||
version = "0.6.3"
|
version = "0.6.3"
|
||||||
description = "It helps to use fixtures in pytest.mark.parametrize"
|
description = "It helps to use fixtures in pytest.mark.parametrize"
|
||||||
category = "main"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = "*"
|
python-versions = "*"
|
||||||
files = [
|
files = [
|
||||||
@@ -1906,7 +1840,6 @@ pytest = ">=3.2.5"
|
|||||||
name = "pytest-order"
|
name = "pytest-order"
|
||||||
version = "1.1.0"
|
version = "1.1.0"
|
||||||
description = "pytest plugin to run your tests in a specific order"
|
description = "pytest plugin to run your tests in a specific order"
|
||||||
category = "main"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.6"
|
python-versions = ">=3.6"
|
||||||
files = [
|
files = [
|
||||||
@@ -1924,7 +1857,6 @@ pytest = [
|
|||||||
name = "pytest-rerunfailures"
|
name = "pytest-rerunfailures"
|
||||||
version = "11.1.2"
|
version = "11.1.2"
|
||||||
description = "pytest plugin to re-run tests to eliminate flaky failures"
|
description = "pytest plugin to re-run tests to eliminate flaky failures"
|
||||||
category = "main"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.7"
|
python-versions = ">=3.7"
|
||||||
files = [
|
files = [
|
||||||
@@ -1936,11 +1868,24 @@ files = [
|
|||||||
packaging = ">=17.1"
|
packaging = ">=17.1"
|
||||||
pytest = ">=5.3"
|
pytest = ">=5.3"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "pytest-split"
|
||||||
|
version = "0.8.1"
|
||||||
|
description = "Pytest plugin which splits the test suite to equally sized sub suites based on test execution time."
|
||||||
|
optional = false
|
||||||
|
python-versions = ">=3.7.1,<4.0"
|
||||||
|
files = [
|
||||||
|
{file = "pytest_split-0.8.1-py3-none-any.whl", hash = "sha256:74b110ea091bd147cc1c5f9665a59506e5cedfa66f96a89fb03e4ab447c2c168"},
|
||||||
|
{file = "pytest_split-0.8.1.tar.gz", hash = "sha256:2d88bd3dc528689a7a3f58fc12ea165c3aa62e90795e420dfad920afe5612d6d"},
|
||||||
|
]
|
||||||
|
|
||||||
|
[package.dependencies]
|
||||||
|
pytest = ">=5,<8"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "pytest-timeout"
|
name = "pytest-timeout"
|
||||||
version = "2.1.0"
|
version = "2.1.0"
|
||||||
description = "pytest plugin to abort hanging tests"
|
description = "pytest plugin to abort hanging tests"
|
||||||
category = "main"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.6"
|
python-versions = ">=3.6"
|
||||||
files = [
|
files = [
|
||||||
@@ -1955,7 +1900,6 @@ pytest = ">=5.0.0"
|
|||||||
name = "pytest-xdist"
|
name = "pytest-xdist"
|
||||||
version = "3.3.1"
|
version = "3.3.1"
|
||||||
description = "pytest xdist plugin for distributed testing, most importantly across multiple CPUs"
|
description = "pytest xdist plugin for distributed testing, most importantly across multiple CPUs"
|
||||||
category = "main"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.7"
|
python-versions = ">=3.7"
|
||||||
files = [
|
files = [
|
||||||
@@ -1976,7 +1920,6 @@ testing = ["filelock"]
|
|||||||
name = "python-dateutil"
|
name = "python-dateutil"
|
||||||
version = "2.8.2"
|
version = "2.8.2"
|
||||||
description = "Extensions to the standard Python datetime module"
|
description = "Extensions to the standard Python datetime module"
|
||||||
category = "main"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7"
|
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7"
|
||||||
files = [
|
files = [
|
||||||
@@ -1991,7 +1934,6 @@ six = ">=1.5"
|
|||||||
name = "python-jose"
|
name = "python-jose"
|
||||||
version = "3.3.0"
|
version = "3.3.0"
|
||||||
description = "JOSE implementation in Python"
|
description = "JOSE implementation in Python"
|
||||||
category = "main"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = "*"
|
python-versions = "*"
|
||||||
files = [
|
files = [
|
||||||
@@ -2014,7 +1956,6 @@ pycryptodome = ["pyasn1", "pycryptodome (>=3.3.1,<4.0.0)"]
|
|||||||
name = "pywin32"
|
name = "pywin32"
|
||||||
version = "301"
|
version = "301"
|
||||||
description = "Python for Window Extensions"
|
description = "Python for Window Extensions"
|
||||||
category = "main"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = "*"
|
python-versions = "*"
|
||||||
files = [
|
files = [
|
||||||
@@ -2034,7 +1975,6 @@ files = [
|
|||||||
name = "pyyaml"
|
name = "pyyaml"
|
||||||
version = "6.0"
|
version = "6.0"
|
||||||
description = "YAML parser and emitter for Python"
|
description = "YAML parser and emitter for Python"
|
||||||
category = "main"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.6"
|
python-versions = ">=3.6"
|
||||||
files = [
|
files = [
|
||||||
@@ -2084,7 +2024,6 @@ files = [
|
|||||||
name = "requests"
|
name = "requests"
|
||||||
version = "2.31.0"
|
version = "2.31.0"
|
||||||
description = "Python HTTP for Humans."
|
description = "Python HTTP for Humans."
|
||||||
category = "main"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.7"
|
python-versions = ">=3.7"
|
||||||
files = [
|
files = [
|
||||||
@@ -2106,7 +2045,6 @@ use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"]
|
|||||||
name = "responses"
|
name = "responses"
|
||||||
version = "0.21.0"
|
version = "0.21.0"
|
||||||
description = "A utility library for mocking out the `requests` Python library."
|
description = "A utility library for mocking out the `requests` Python library."
|
||||||
category = "main"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.7"
|
python-versions = ">=3.7"
|
||||||
files = [
|
files = [
|
||||||
@@ -2125,7 +2063,6 @@ tests = ["coverage (>=6.0.0)", "flake8", "mypy", "pytest (>=7.0.0)", "pytest-asy
|
|||||||
name = "rsa"
|
name = "rsa"
|
||||||
version = "4.9"
|
version = "4.9"
|
||||||
description = "Pure-Python RSA implementation"
|
description = "Pure-Python RSA implementation"
|
||||||
category = "main"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.6,<4"
|
python-versions = ">=3.6,<4"
|
||||||
files = [
|
files = [
|
||||||
@@ -2140,7 +2077,6 @@ pyasn1 = ">=0.1.3"
|
|||||||
name = "ruff"
|
name = "ruff"
|
||||||
version = "0.0.269"
|
version = "0.0.269"
|
||||||
description = "An extremely fast Python linter, written in Rust."
|
description = "An extremely fast Python linter, written in Rust."
|
||||||
category = "dev"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.7"
|
python-versions = ">=3.7"
|
||||||
files = [
|
files = [
|
||||||
@@ -2167,7 +2103,6 @@ files = [
|
|||||||
name = "s3transfer"
|
name = "s3transfer"
|
||||||
version = "0.6.0"
|
version = "0.6.0"
|
||||||
description = "An Amazon S3 Transfer Manager"
|
description = "An Amazon S3 Transfer Manager"
|
||||||
category = "main"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">= 3.7"
|
python-versions = ">= 3.7"
|
||||||
files = [
|
files = [
|
||||||
@@ -2185,7 +2120,6 @@ crt = ["botocore[crt] (>=1.20.29,<2.0a.0)"]
|
|||||||
name = "sarif-om"
|
name = "sarif-om"
|
||||||
version = "1.0.4"
|
version = "1.0.4"
|
||||||
description = "Classes implementing the SARIF 2.1.0 object model."
|
description = "Classes implementing the SARIF 2.1.0 object model."
|
||||||
category = "main"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">= 2.7"
|
python-versions = ">= 2.7"
|
||||||
files = [
|
files = [
|
||||||
@@ -2201,7 +2135,6 @@ pbr = "*"
|
|||||||
name = "setuptools"
|
name = "setuptools"
|
||||||
version = "65.5.1"
|
version = "65.5.1"
|
||||||
description = "Easily download, build, install, upgrade, and uninstall Python packages"
|
description = "Easily download, build, install, upgrade, and uninstall Python packages"
|
||||||
category = "main"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.7"
|
python-versions = ">=3.7"
|
||||||
files = [
|
files = [
|
||||||
@@ -2218,7 +2151,6 @@ testing-integration = ["build[virtualenv]", "filelock (>=3.4.0)", "jaraco.envs (
|
|||||||
name = "six"
|
name = "six"
|
||||||
version = "1.16.0"
|
version = "1.16.0"
|
||||||
description = "Python 2 and 3 compatibility utilities"
|
description = "Python 2 and 3 compatibility utilities"
|
||||||
category = "main"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*"
|
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*"
|
||||||
files = [
|
files = [
|
||||||
@@ -2230,7 +2162,6 @@ files = [
|
|||||||
name = "sshpubkeys"
|
name = "sshpubkeys"
|
||||||
version = "3.3.1"
|
version = "3.3.1"
|
||||||
description = "SSH public key parser"
|
description = "SSH public key parser"
|
||||||
category = "main"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3"
|
python-versions = ">=3"
|
||||||
files = [
|
files = [
|
||||||
@@ -2249,7 +2180,6 @@ dev = ["twine", "wheel", "yapf"]
|
|||||||
name = "toml"
|
name = "toml"
|
||||||
version = "0.10.2"
|
version = "0.10.2"
|
||||||
description = "Python Library for Tom's Obvious, Minimal Language"
|
description = "Python Library for Tom's Obvious, Minimal Language"
|
||||||
category = "main"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*"
|
python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*"
|
||||||
files = [
|
files = [
|
||||||
@@ -2261,7 +2191,6 @@ files = [
|
|||||||
name = "tomli"
|
name = "tomli"
|
||||||
version = "2.0.1"
|
version = "2.0.1"
|
||||||
description = "A lil' TOML parser"
|
description = "A lil' TOML parser"
|
||||||
category = "main"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.7"
|
python-versions = ">=3.7"
|
||||||
files = [
|
files = [
|
||||||
@@ -2273,7 +2202,6 @@ files = [
|
|||||||
name = "types-psutil"
|
name = "types-psutil"
|
||||||
version = "5.9.5.12"
|
version = "5.9.5.12"
|
||||||
description = "Typing stubs for psutil"
|
description = "Typing stubs for psutil"
|
||||||
category = "main"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = "*"
|
python-versions = "*"
|
||||||
files = [
|
files = [
|
||||||
@@ -2285,7 +2213,6 @@ files = [
|
|||||||
name = "types-psycopg2"
|
name = "types-psycopg2"
|
||||||
version = "2.9.21.10"
|
version = "2.9.21.10"
|
||||||
description = "Typing stubs for psycopg2"
|
description = "Typing stubs for psycopg2"
|
||||||
category = "main"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = "*"
|
python-versions = "*"
|
||||||
files = [
|
files = [
|
||||||
@@ -2297,7 +2224,6 @@ files = [
|
|||||||
name = "types-pytest-lazy-fixture"
|
name = "types-pytest-lazy-fixture"
|
||||||
version = "0.6.3.3"
|
version = "0.6.3.3"
|
||||||
description = "Typing stubs for pytest-lazy-fixture"
|
description = "Typing stubs for pytest-lazy-fixture"
|
||||||
category = "main"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = "*"
|
python-versions = "*"
|
||||||
files = [
|
files = [
|
||||||
@@ -2309,7 +2235,6 @@ files = [
|
|||||||
name = "types-requests"
|
name = "types-requests"
|
||||||
version = "2.31.0.0"
|
version = "2.31.0.0"
|
||||||
description = "Typing stubs for requests"
|
description = "Typing stubs for requests"
|
||||||
category = "main"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = "*"
|
python-versions = "*"
|
||||||
files = [
|
files = [
|
||||||
@@ -2324,7 +2249,6 @@ types-urllib3 = "*"
|
|||||||
name = "types-s3transfer"
|
name = "types-s3transfer"
|
||||||
version = "0.6.0.post3"
|
version = "0.6.0.post3"
|
||||||
description = "Type annotations and code completion for s3transfer"
|
description = "Type annotations and code completion for s3transfer"
|
||||||
category = "main"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.7,<4.0"
|
python-versions = ">=3.7,<4.0"
|
||||||
files = [
|
files = [
|
||||||
@@ -2336,7 +2260,6 @@ files = [
|
|||||||
name = "types-toml"
|
name = "types-toml"
|
||||||
version = "0.10.8.6"
|
version = "0.10.8.6"
|
||||||
description = "Typing stubs for toml"
|
description = "Typing stubs for toml"
|
||||||
category = "main"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = "*"
|
python-versions = "*"
|
||||||
files = [
|
files = [
|
||||||
@@ -2348,7 +2271,6 @@ files = [
|
|||||||
name = "types-urllib3"
|
name = "types-urllib3"
|
||||||
version = "1.26.17"
|
version = "1.26.17"
|
||||||
description = "Typing stubs for urllib3"
|
description = "Typing stubs for urllib3"
|
||||||
category = "main"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = "*"
|
python-versions = "*"
|
||||||
files = [
|
files = [
|
||||||
@@ -2360,7 +2282,6 @@ files = [
|
|||||||
name = "typing-extensions"
|
name = "typing-extensions"
|
||||||
version = "4.6.1"
|
version = "4.6.1"
|
||||||
description = "Backported and Experimental Type Hints for Python 3.7+"
|
description = "Backported and Experimental Type Hints for Python 3.7+"
|
||||||
category = "main"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.7"
|
python-versions = ">=3.7"
|
||||||
files = [
|
files = [
|
||||||
@@ -2372,7 +2293,6 @@ files = [
|
|||||||
name = "urllib3"
|
name = "urllib3"
|
||||||
version = "1.26.11"
|
version = "1.26.11"
|
||||||
description = "HTTP library with thread-safe connection pooling, file post, and more."
|
description = "HTTP library with thread-safe connection pooling, file post, and more."
|
||||||
category = "main"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, <4"
|
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, <4"
|
||||||
files = [
|
files = [
|
||||||
@@ -2389,7 +2309,6 @@ socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"]
|
|||||||
name = "websocket-client"
|
name = "websocket-client"
|
||||||
version = "1.3.3"
|
version = "1.3.3"
|
||||||
description = "WebSocket client for Python with low level API options"
|
description = "WebSocket client for Python with low level API options"
|
||||||
category = "main"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.7"
|
python-versions = ">=3.7"
|
||||||
files = [
|
files = [
|
||||||
@@ -2406,7 +2325,6 @@ test = ["websockets"]
|
|||||||
name = "werkzeug"
|
name = "werkzeug"
|
||||||
version = "2.2.3"
|
version = "2.2.3"
|
||||||
description = "The comprehensive WSGI web application library."
|
description = "The comprehensive WSGI web application library."
|
||||||
category = "main"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.7"
|
python-versions = ">=3.7"
|
||||||
files = [
|
files = [
|
||||||
@@ -2424,7 +2342,6 @@ watchdog = ["watchdog"]
|
|||||||
name = "wrapt"
|
name = "wrapt"
|
||||||
version = "1.14.1"
|
version = "1.14.1"
|
||||||
description = "Module for decorators, wrappers and monkey patching."
|
description = "Module for decorators, wrappers and monkey patching."
|
||||||
category = "main"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7"
|
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7"
|
||||||
files = [
|
files = [
|
||||||
@@ -2498,7 +2415,6 @@ files = [
|
|||||||
name = "xmltodict"
|
name = "xmltodict"
|
||||||
version = "0.13.0"
|
version = "0.13.0"
|
||||||
description = "Makes working with XML feel like you are working with JSON"
|
description = "Makes working with XML feel like you are working with JSON"
|
||||||
category = "main"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.4"
|
python-versions = ">=3.4"
|
||||||
files = [
|
files = [
|
||||||
@@ -2510,7 +2426,6 @@ files = [
|
|||||||
name = "yarl"
|
name = "yarl"
|
||||||
version = "1.8.2"
|
version = "1.8.2"
|
||||||
description = "Yet another URL library"
|
description = "Yet another URL library"
|
||||||
category = "main"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.7"
|
python-versions = ">=3.7"
|
||||||
files = [
|
files = [
|
||||||
@@ -2598,7 +2513,6 @@ multidict = ">=4.0"
|
|||||||
name = "zipp"
|
name = "zipp"
|
||||||
version = "3.8.1"
|
version = "3.8.1"
|
||||||
description = "Backport of pathlib-compatible object wrapper for zip files"
|
description = "Backport of pathlib-compatible object wrapper for zip files"
|
||||||
category = "main"
|
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.7"
|
python-versions = ">=3.7"
|
||||||
files = [
|
files = [
|
||||||
@@ -2613,4 +2527,4 @@ testing = ["func-timeout", "jaraco.itertools", "pytest (>=6)", "pytest-black (>=
|
|||||||
[metadata]
|
[metadata]
|
||||||
lock-version = "2.0"
|
lock-version = "2.0"
|
||||||
python-versions = "^3.9"
|
python-versions = "^3.9"
|
||||||
content-hash = "c6c217033f50430c31b0979b74db222e6bab2301abd8b9f0cce5a9d5bccc578f"
|
content-hash = "e16a65d8fdff4e2173610e552e0e7306e301de2c640ae6082ef6cc5755f566d2"
|
||||||
|
|||||||
@@ -5,6 +5,7 @@
|
|||||||
/// the outside. Similar to an ingress controller for HTTPS.
|
/// the outside. Similar to an ingress controller for HTTPS.
|
||||||
use std::{net::SocketAddr, sync::Arc};
|
use std::{net::SocketAddr, sync::Arc};
|
||||||
|
|
||||||
|
use futures::future::Either;
|
||||||
use tokio::net::TcpListener;
|
use tokio::net::TcpListener;
|
||||||
|
|
||||||
use anyhow::{anyhow, bail, ensure, Context};
|
use anyhow::{anyhow, bail, ensure, Context};
|
||||||
@@ -109,20 +110,25 @@ async fn main() -> anyhow::Result<()> {
|
|||||||
|
|
||||||
let cancellation_token = CancellationToken::new();
|
let cancellation_token = CancellationToken::new();
|
||||||
|
|
||||||
let main = proxy::flatten_err(tokio::spawn(task_main(
|
let main = tokio::spawn(task_main(
|
||||||
Arc::new(destination),
|
Arc::new(destination),
|
||||||
tls_config,
|
tls_config,
|
||||||
proxy_listener,
|
proxy_listener,
|
||||||
cancellation_token.clone(),
|
cancellation_token.clone(),
|
||||||
)));
|
));
|
||||||
let signals_task = proxy::flatten_err(tokio::spawn(proxy::handle_signals(cancellation_token)));
|
let signals_task = tokio::spawn(proxy::handle_signals(cancellation_token));
|
||||||
|
|
||||||
tokio::select! {
|
// the signal task cant ever succeed.
|
||||||
res = main => { res?; },
|
// the main task can error, or can succeed on cancellation.
|
||||||
res = signals_task => { res?; },
|
// we want to immediately exit on either of these cases
|
||||||
}
|
let signal = match futures::future::select(signals_task, main).await {
|
||||||
|
Either::Left((res, _)) => proxy::flatten_err(res)?,
|
||||||
|
Either::Right((res, _)) => return proxy::flatten_err(res),
|
||||||
|
};
|
||||||
|
|
||||||
Ok(())
|
// maintenance tasks return `Infallible` success values, this is an impossible value
|
||||||
|
// so this match statically ensures that there are no possibilities for that value
|
||||||
|
match signal {}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn task_main(
|
async fn task_main(
|
||||||
|
|||||||
@@ -1,13 +1,15 @@
|
|||||||
|
use futures::future::Either;
|
||||||
use proxy::auth;
|
use proxy::auth;
|
||||||
use proxy::console;
|
use proxy::console;
|
||||||
use proxy::http;
|
use proxy::http;
|
||||||
use proxy::metrics;
|
use proxy::metrics;
|
||||||
|
|
||||||
use anyhow::bail;
|
use anyhow::bail;
|
||||||
use clap::{self, Arg};
|
|
||||||
use proxy::config::{self, ProxyConfig};
|
use proxy::config::{self, ProxyConfig};
|
||||||
|
use std::pin::pin;
|
||||||
use std::{borrow::Cow, net::SocketAddr};
|
use std::{borrow::Cow, net::SocketAddr};
|
||||||
use tokio::net::TcpListener;
|
use tokio::net::TcpListener;
|
||||||
|
use tokio::task::JoinSet;
|
||||||
use tokio_util::sync::CancellationToken;
|
use tokio_util::sync::CancellationToken;
|
||||||
use tracing::info;
|
use tracing::info;
|
||||||
use tracing::warn;
|
use tracing::warn;
|
||||||
@@ -15,6 +17,70 @@ use utils::{project_git_version, sentry_init::init_sentry};
|
|||||||
|
|
||||||
project_git_version!(GIT_VERSION);
|
project_git_version!(GIT_VERSION);
|
||||||
|
|
||||||
|
use clap::{Parser, ValueEnum};
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, ValueEnum)]
|
||||||
|
enum AuthBackend {
|
||||||
|
Console,
|
||||||
|
Postgres,
|
||||||
|
Link,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Neon proxy/router
|
||||||
|
#[derive(Parser)]
|
||||||
|
#[command(version = GIT_VERSION, about)]
|
||||||
|
struct ProxyCliArgs {
|
||||||
|
/// listen for incoming client connections on ip:port
|
||||||
|
#[clap(short, long, default_value = "127.0.0.1:4432")]
|
||||||
|
proxy: String,
|
||||||
|
#[clap(value_enum, long, default_value_t = AuthBackend::Link)]
|
||||||
|
auth_backend: AuthBackend,
|
||||||
|
/// listen for management callback connection on ip:port
|
||||||
|
#[clap(short, long, default_value = "127.0.0.1:7000")]
|
||||||
|
mgmt: String,
|
||||||
|
/// listen for incoming http connections (metrics, etc) on ip:port
|
||||||
|
#[clap(long, default_value = "127.0.0.1:7001")]
|
||||||
|
http: String,
|
||||||
|
/// listen for incoming wss connections on ip:port
|
||||||
|
#[clap(long)]
|
||||||
|
wss: Option<String>,
|
||||||
|
/// redirect unauthenticated users to the given uri in case of link auth
|
||||||
|
#[clap(short, long, default_value = "http://localhost:3000/psql_session/")]
|
||||||
|
uri: String,
|
||||||
|
/// cloud API endpoint for authenticating users
|
||||||
|
#[clap(
|
||||||
|
short,
|
||||||
|
long,
|
||||||
|
default_value = "http://localhost:3000/authenticate_proxy_request/"
|
||||||
|
)]
|
||||||
|
auth_endpoint: String,
|
||||||
|
/// path to TLS key for client postgres connections
|
||||||
|
///
|
||||||
|
/// tls-key and tls-cert are for backwards compatibility, we can put all certs in one dir
|
||||||
|
#[clap(short = 'k', long, alias = "ssl-key")]
|
||||||
|
tls_key: Option<String>,
|
||||||
|
/// path to TLS cert for client postgres connections
|
||||||
|
///
|
||||||
|
/// tls-key and tls-cert are for backwards compatibility, we can put all certs in one dir
|
||||||
|
#[clap(short = 'c', long, alias = "ssl-cert")]
|
||||||
|
tls_cert: Option<String>,
|
||||||
|
/// path to directory with TLS certificates for client postgres connections
|
||||||
|
#[clap(long)]
|
||||||
|
certs_dir: Option<String>,
|
||||||
|
/// http endpoint to receive periodic metric updates
|
||||||
|
#[clap(long)]
|
||||||
|
metric_collection_endpoint: Option<String>,
|
||||||
|
/// how often metrics should be sent to a collection endpoint
|
||||||
|
#[clap(long)]
|
||||||
|
metric_collection_interval: Option<String>,
|
||||||
|
/// cache for `wake_compute` api method (use `size=0` to disable)
|
||||||
|
#[clap(long, default_value = config::CacheOptions::DEFAULT_OPTIONS_NODE_INFO)]
|
||||||
|
wake_compute_cache: String,
|
||||||
|
/// Allow self-signed certificates for compute nodes (for testing)
|
||||||
|
#[clap(long, default_value_t = false, value_parser = clap::builder::BoolishValueParser::new(), action = clap::ArgAction::Set)]
|
||||||
|
allow_self_signed_compute: bool,
|
||||||
|
}
|
||||||
|
|
||||||
#[tokio::main]
|
#[tokio::main]
|
||||||
async fn main() -> anyhow::Result<()> {
|
async fn main() -> anyhow::Result<()> {
|
||||||
let _logging_guard = proxy::logging::init().await?;
|
let _logging_guard = proxy::logging::init().await?;
|
||||||
@@ -24,90 +90,99 @@ async fn main() -> anyhow::Result<()> {
|
|||||||
info!("Version: {GIT_VERSION}");
|
info!("Version: {GIT_VERSION}");
|
||||||
::metrics::set_build_info_metric(GIT_VERSION);
|
::metrics::set_build_info_metric(GIT_VERSION);
|
||||||
|
|
||||||
let args = cli().get_matches();
|
let args = ProxyCliArgs::parse();
|
||||||
let config = build_config(&args)?;
|
let config = build_config(&args)?;
|
||||||
|
|
||||||
info!("Authentication backend: {}", config.auth_backend);
|
info!("Authentication backend: {}", config.auth_backend);
|
||||||
|
|
||||||
// Check that we can bind to address before further initialization
|
// Check that we can bind to address before further initialization
|
||||||
let http_address: SocketAddr = args.get_one::<String>("http").unwrap().parse()?;
|
let http_address: SocketAddr = args.http.parse()?;
|
||||||
info!("Starting http on {http_address}");
|
info!("Starting http on {http_address}");
|
||||||
let http_listener = TcpListener::bind(http_address).await?.into_std()?;
|
let http_listener = TcpListener::bind(http_address).await?.into_std()?;
|
||||||
|
|
||||||
let mgmt_address: SocketAddr = args.get_one::<String>("mgmt").unwrap().parse()?;
|
let mgmt_address: SocketAddr = args.mgmt.parse()?;
|
||||||
info!("Starting mgmt on {mgmt_address}");
|
info!("Starting mgmt on {mgmt_address}");
|
||||||
let mgmt_listener = TcpListener::bind(mgmt_address).await?;
|
let mgmt_listener = TcpListener::bind(mgmt_address).await?;
|
||||||
|
|
||||||
let proxy_address: SocketAddr = args.get_one::<String>("proxy").unwrap().parse()?;
|
let proxy_address: SocketAddr = args.proxy.parse()?;
|
||||||
info!("Starting proxy on {proxy_address}");
|
info!("Starting proxy on {proxy_address}");
|
||||||
let proxy_listener = TcpListener::bind(proxy_address).await?;
|
let proxy_listener = TcpListener::bind(proxy_address).await?;
|
||||||
let cancellation_token = CancellationToken::new();
|
let cancellation_token = CancellationToken::new();
|
||||||
|
|
||||||
let mut client_tasks = vec![tokio::spawn(proxy::proxy::task_main(
|
// client facing tasks. these will exit on error or on cancellation
|
||||||
|
// cancellation returns Ok(())
|
||||||
|
let mut client_tasks = JoinSet::new();
|
||||||
|
client_tasks.spawn(proxy::proxy::task_main(
|
||||||
config,
|
config,
|
||||||
proxy_listener,
|
proxy_listener,
|
||||||
cancellation_token.clone(),
|
cancellation_token.clone(),
|
||||||
))];
|
));
|
||||||
|
|
||||||
if let Some(wss_address) = args.get_one::<String>("wss") {
|
if let Some(wss_address) = args.wss {
|
||||||
let wss_address: SocketAddr = wss_address.parse()?;
|
let wss_address: SocketAddr = wss_address.parse()?;
|
||||||
info!("Starting wss on {wss_address}");
|
info!("Starting wss on {wss_address}");
|
||||||
let wss_listener = TcpListener::bind(wss_address).await?;
|
let wss_listener = TcpListener::bind(wss_address).await?;
|
||||||
|
|
||||||
client_tasks.push(tokio::spawn(http::websocket::task_main(
|
client_tasks.spawn(http::websocket::task_main(
|
||||||
config,
|
config,
|
||||||
wss_listener,
|
wss_listener,
|
||||||
cancellation_token.clone(),
|
cancellation_token.clone(),
|
||||||
)));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut tasks = vec![
|
// maintenance tasks. these never return unless there's an error
|
||||||
tokio::spawn(proxy::handle_signals(cancellation_token)),
|
let mut maintenance_tasks = JoinSet::new();
|
||||||
tokio::spawn(http::server::task_main(http_listener)),
|
maintenance_tasks.spawn(proxy::handle_signals(cancellation_token));
|
||||||
tokio::spawn(console::mgmt::task_main(mgmt_listener)),
|
maintenance_tasks.spawn(http::server::task_main(http_listener));
|
||||||
];
|
maintenance_tasks.spawn(console::mgmt::task_main(mgmt_listener));
|
||||||
|
|
||||||
if let Some(metrics_config) = &config.metric_collection {
|
if let Some(metrics_config) = &config.metric_collection {
|
||||||
tasks.push(tokio::spawn(metrics::task_main(metrics_config)));
|
maintenance_tasks.spawn(metrics::task_main(metrics_config));
|
||||||
}
|
}
|
||||||
|
|
||||||
let tasks = futures::future::try_join_all(tasks.into_iter().map(proxy::flatten_err));
|
let maintenance = loop {
|
||||||
let client_tasks =
|
// get one complete task
|
||||||
futures::future::try_join_all(client_tasks.into_iter().map(proxy::flatten_err));
|
match futures::future::select(
|
||||||
tokio::select! {
|
pin!(maintenance_tasks.join_next()),
|
||||||
// We are only expecting an error from these forever tasks
|
pin!(client_tasks.join_next()),
|
||||||
res = tasks => { res?; },
|
)
|
||||||
res = client_tasks => { res?; },
|
.await
|
||||||
}
|
{
|
||||||
Ok(())
|
// exit immediately on maintenance task completion
|
||||||
|
Either::Left((Some(res), _)) => break proxy::flatten_err(res)?,
|
||||||
|
// exit with error immediately if all maintenance tasks have ceased (should be caught by branch above)
|
||||||
|
Either::Left((None, _)) => bail!("no maintenance tasks running. invalid state"),
|
||||||
|
// exit immediately on client task error
|
||||||
|
Either::Right((Some(res), _)) => proxy::flatten_err(res)?,
|
||||||
|
// exit if all our client tasks have shutdown gracefully
|
||||||
|
Either::Right((None, _)) => return Ok(()),
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// maintenance tasks return Infallible success values, this is an impossible value
|
||||||
|
// so this match statically ensures that there are no possibilities for that value
|
||||||
|
match maintenance {}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// ProxyConfig is created at proxy startup, and lives forever.
|
/// ProxyConfig is created at proxy startup, and lives forever.
|
||||||
fn build_config(args: &clap::ArgMatches) -> anyhow::Result<&'static ProxyConfig> {
|
fn build_config(args: &ProxyCliArgs) -> anyhow::Result<&'static ProxyConfig> {
|
||||||
let tls_config = match (
|
let tls_config = match (&args.tls_key, &args.tls_cert) {
|
||||||
args.get_one::<String>("tls-key"),
|
|
||||||
args.get_one::<String>("tls-cert"),
|
|
||||||
) {
|
|
||||||
(Some(key_path), Some(cert_path)) => Some(config::configure_tls(
|
(Some(key_path), Some(cert_path)) => Some(config::configure_tls(
|
||||||
key_path,
|
key_path,
|
||||||
cert_path,
|
cert_path,
|
||||||
args.get_one::<String>("certs-dir"),
|
args.certs_dir.as_ref(),
|
||||||
)?),
|
)?),
|
||||||
(None, None) => None,
|
(None, None) => None,
|
||||||
_ => bail!("either both or neither tls-key and tls-cert must be specified"),
|
_ => bail!("either both or neither tls-key and tls-cert must be specified"),
|
||||||
};
|
};
|
||||||
|
|
||||||
let allow_self_signed_compute: bool = args
|
if args.allow_self_signed_compute {
|
||||||
.get_one::<String>("allow-self-signed-compute")
|
|
||||||
.unwrap()
|
|
||||||
.parse()?;
|
|
||||||
if allow_self_signed_compute {
|
|
||||||
warn!("allowing self-signed compute certificates");
|
warn!("allowing self-signed compute certificates");
|
||||||
}
|
}
|
||||||
|
|
||||||
let metric_collection = match (
|
let metric_collection = match (
|
||||||
args.get_one::<String>("metric-collection-endpoint"),
|
&args.metric_collection_endpoint,
|
||||||
args.get_one::<String>("metric-collection-interval"),
|
&args.metric_collection_interval,
|
||||||
) {
|
) {
|
||||||
(Some(endpoint), Some(interval)) => Some(config::MetricCollectionConfig {
|
(Some(endpoint), Some(interval)) => Some(config::MetricCollectionConfig {
|
||||||
endpoint: endpoint.parse()?,
|
endpoint: endpoint.parse()?,
|
||||||
@@ -120,145 +195,38 @@ fn build_config(args: &clap::ArgMatches) -> anyhow::Result<&'static ProxyConfig>
|
|||||||
),
|
),
|
||||||
};
|
};
|
||||||
|
|
||||||
let auth_backend = match args.get_one::<String>("auth-backend").unwrap().as_str() {
|
let auth_backend = match &args.auth_backend {
|
||||||
"console" => {
|
AuthBackend::Console => {
|
||||||
let config::CacheOptions { size, ttl } = args
|
let config::CacheOptions { size, ttl } = args.wake_compute_cache.parse()?;
|
||||||
.get_one::<String>("wake-compute-cache")
|
|
||||||
.unwrap()
|
|
||||||
.parse()?;
|
|
||||||
|
|
||||||
info!("Using NodeInfoCache (wake_compute) with size={size} ttl={ttl:?}");
|
info!("Using NodeInfoCache (wake_compute) with size={size} ttl={ttl:?}");
|
||||||
let caches = Box::leak(Box::new(console::caches::ApiCaches {
|
let caches = Box::leak(Box::new(console::caches::ApiCaches {
|
||||||
node_info: console::caches::NodeInfoCache::new("node_info_cache", size, ttl),
|
node_info: console::caches::NodeInfoCache::new("node_info_cache", size, ttl),
|
||||||
}));
|
}));
|
||||||
|
|
||||||
let url = args.get_one::<String>("auth-endpoint").unwrap().parse()?;
|
let url = args.auth_endpoint.parse()?;
|
||||||
let endpoint = http::Endpoint::new(url, http::new_client());
|
let endpoint = http::Endpoint::new(url, http::new_client());
|
||||||
|
|
||||||
let api = console::provider::neon::Api::new(endpoint, caches);
|
let api = console::provider::neon::Api::new(endpoint, caches);
|
||||||
auth::BackendType::Console(Cow::Owned(api), ())
|
auth::BackendType::Console(Cow::Owned(api), ())
|
||||||
}
|
}
|
||||||
"postgres" => {
|
AuthBackend::Postgres => {
|
||||||
let url = args.get_one::<String>("auth-endpoint").unwrap().parse()?;
|
let url = args.auth_endpoint.parse()?;
|
||||||
let api = console::provider::mock::Api::new(url);
|
let api = console::provider::mock::Api::new(url);
|
||||||
auth::BackendType::Postgres(Cow::Owned(api), ())
|
auth::BackendType::Postgres(Cow::Owned(api), ())
|
||||||
}
|
}
|
||||||
"link" => {
|
AuthBackend::Link => {
|
||||||
let url = args.get_one::<String>("uri").unwrap().parse()?;
|
let url = args.uri.parse()?;
|
||||||
auth::BackendType::Link(Cow::Owned(url))
|
auth::BackendType::Link(Cow::Owned(url))
|
||||||
}
|
}
|
||||||
other => bail!("unsupported auth backend: {other}"),
|
|
||||||
};
|
};
|
||||||
|
|
||||||
let config = Box::leak(Box::new(ProxyConfig {
|
let config = Box::leak(Box::new(ProxyConfig {
|
||||||
tls_config,
|
tls_config,
|
||||||
auth_backend,
|
auth_backend,
|
||||||
metric_collection,
|
metric_collection,
|
||||||
allow_self_signed_compute,
|
allow_self_signed_compute: args.allow_self_signed_compute,
|
||||||
}));
|
}));
|
||||||
|
|
||||||
Ok(config)
|
Ok(config)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn cli() -> clap::Command {
|
|
||||||
clap::Command::new("Neon proxy/router")
|
|
||||||
.disable_help_flag(true)
|
|
||||||
.version(GIT_VERSION)
|
|
||||||
.arg(
|
|
||||||
Arg::new("proxy")
|
|
||||||
.short('p')
|
|
||||||
.long("proxy")
|
|
||||||
.help("listen for incoming client connections on ip:port")
|
|
||||||
.default_value("127.0.0.1:4432"),
|
|
||||||
)
|
|
||||||
.arg(
|
|
||||||
Arg::new("auth-backend")
|
|
||||||
.long("auth-backend")
|
|
||||||
.value_parser(["console", "postgres", "link"])
|
|
||||||
.default_value("link"),
|
|
||||||
)
|
|
||||||
.arg(
|
|
||||||
Arg::new("mgmt")
|
|
||||||
.short('m')
|
|
||||||
.long("mgmt")
|
|
||||||
.help("listen for management callback connection on ip:port")
|
|
||||||
.default_value("127.0.0.1:7000"),
|
|
||||||
)
|
|
||||||
.arg(
|
|
||||||
Arg::new("http")
|
|
||||||
.long("http")
|
|
||||||
.help("listen for incoming http connections (metrics, etc) on ip:port")
|
|
||||||
.default_value("127.0.0.1:7001"),
|
|
||||||
)
|
|
||||||
.arg(
|
|
||||||
Arg::new("wss")
|
|
||||||
.long("wss")
|
|
||||||
.help("listen for incoming wss connections on ip:port"),
|
|
||||||
)
|
|
||||||
.arg(
|
|
||||||
Arg::new("uri")
|
|
||||||
.short('u')
|
|
||||||
.long("uri")
|
|
||||||
.help("redirect unauthenticated users to the given uri in case of link auth")
|
|
||||||
.default_value("http://localhost:3000/psql_session/"),
|
|
||||||
)
|
|
||||||
.arg(
|
|
||||||
Arg::new("auth-endpoint")
|
|
||||||
.short('a')
|
|
||||||
.long("auth-endpoint")
|
|
||||||
.help("cloud API endpoint for authenticating users")
|
|
||||||
.default_value("http://localhost:3000/authenticate_proxy_request/"),
|
|
||||||
)
|
|
||||||
.arg(
|
|
||||||
Arg::new("tls-key")
|
|
||||||
.short('k')
|
|
||||||
.long("tls-key")
|
|
||||||
.alias("ssl-key") // backwards compatibility
|
|
||||||
.help("path to TLS key for client postgres connections"),
|
|
||||||
)
|
|
||||||
.arg(
|
|
||||||
Arg::new("tls-cert")
|
|
||||||
.short('c')
|
|
||||||
.long("tls-cert")
|
|
||||||
.alias("ssl-cert") // backwards compatibility
|
|
||||||
.help("path to TLS cert for client postgres connections"),
|
|
||||||
)
|
|
||||||
// tls-key and tls-cert are for backwards compatibility, we can put all certs in one dir
|
|
||||||
.arg(
|
|
||||||
Arg::new("certs-dir")
|
|
||||||
.long("certs-dir")
|
|
||||||
.help("path to directory with TLS certificates for client postgres connections"),
|
|
||||||
)
|
|
||||||
.arg(
|
|
||||||
Arg::new("metric-collection-endpoint")
|
|
||||||
.long("metric-collection-endpoint")
|
|
||||||
.help("http endpoint to receive periodic metric updates"),
|
|
||||||
)
|
|
||||||
.arg(
|
|
||||||
Arg::new("metric-collection-interval")
|
|
||||||
.long("metric-collection-interval")
|
|
||||||
.help("how often metrics should be sent to a collection endpoint"),
|
|
||||||
)
|
|
||||||
.arg(
|
|
||||||
Arg::new("wake-compute-cache")
|
|
||||||
.long("wake-compute-cache")
|
|
||||||
.help("cache for `wake_compute` api method (use `size=0` to disable)")
|
|
||||||
.default_value(config::CacheOptions::DEFAULT_OPTIONS_NODE_INFO),
|
|
||||||
)
|
|
||||||
.arg(
|
|
||||||
Arg::new("allow-self-signed-compute")
|
|
||||||
.long("allow-self-signed-compute")
|
|
||||||
.help("Allow self-signed certificates for compute nodes (for testing)")
|
|
||||||
.default_value("false"),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use super::*;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn verify_cli() {
|
|
||||||
cli().debug_assert();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -262,24 +262,21 @@ pub mod timed_lru {
|
|||||||
token: Option<(C, C::LookupInfo<C::Key>)>,
|
token: Option<(C, C::LookupInfo<C::Key>)>,
|
||||||
|
|
||||||
/// The value itself.
|
/// The value itself.
|
||||||
pub value: C::Value,
|
value: C::Value,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<C: Cache> Cached<C> {
|
impl<C: Cache> Cached<C> {
|
||||||
/// Place any entry into this wrapper; invalidation will be a no-op.
|
/// Place any entry into this wrapper; invalidation will be a no-op.
|
||||||
/// Unfortunately, rust doesn't let us implement [`From`] or [`Into`].
|
pub fn new_uncached(value: C::Value) -> Self {
|
||||||
pub fn new_uncached(value: impl Into<C::Value>) -> Self {
|
Self { token: None, value }
|
||||||
Self {
|
|
||||||
token: None,
|
|
||||||
value: value.into(),
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Drop this entry from a cache if it's still there.
|
/// Drop this entry from a cache if it's still there.
|
||||||
pub fn invalidate(&self) {
|
pub fn invalidate(self) -> C::Value {
|
||||||
if let Some((cache, info)) = &self.token {
|
if let Some((cache, info)) = &self.token {
|
||||||
cache.invalidate(info);
|
cache.invalidate(info);
|
||||||
}
|
}
|
||||||
|
self.value
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Tell if this entry is actually cached.
|
/// Tell if this entry is actually cached.
|
||||||
|
|||||||
@@ -110,7 +110,7 @@ impl<'a> Session<'a> {
|
|||||||
|
|
||||||
impl Session<'_> {
|
impl Session<'_> {
|
||||||
/// Store the cancel token for the given session.
|
/// Store the cancel token for the given session.
|
||||||
/// This enables query cancellation in [`crate::proxy::handshake`].
|
/// This enables query cancellation in `crate::proxy::prepare_client_connection`.
|
||||||
pub fn enable_query_cancellation(self, cancel_closure: CancelClosure) -> CancelKeyData {
|
pub fn enable_query_cancellation(self, cancel_closure: CancelClosure) -> CancelKeyData {
|
||||||
info!("enabling query cancellation for this session");
|
info!("enabling query cancellation for this session");
|
||||||
self.cancel_map
|
self.cancel_map
|
||||||
|
|||||||
@@ -1,4 +1,9 @@
|
|||||||
use crate::{auth::parse_endpoint_param, cancellation::CancelClosure, error::UserFacingError};
|
use crate::{
|
||||||
|
auth::parse_endpoint_param,
|
||||||
|
cancellation::CancelClosure,
|
||||||
|
console::errors::WakeComputeError,
|
||||||
|
error::{io_error, UserFacingError},
|
||||||
|
};
|
||||||
use futures::{FutureExt, TryFutureExt};
|
use futures::{FutureExt, TryFutureExt};
|
||||||
use itertools::Itertools;
|
use itertools::Itertools;
|
||||||
use pq_proto::StartupMessageParams;
|
use pq_proto::StartupMessageParams;
|
||||||
@@ -13,7 +18,7 @@ const COULD_NOT_CONNECT: &str = "Couldn't connect to compute node";
|
|||||||
#[derive(Debug, Error)]
|
#[derive(Debug, Error)]
|
||||||
pub enum ConnectionError {
|
pub enum ConnectionError {
|
||||||
/// This error doesn't seem to reveal any secrets; for instance,
|
/// This error doesn't seem to reveal any secrets; for instance,
|
||||||
/// [`tokio_postgres::error::Kind`] doesn't contain ip addresses and such.
|
/// `tokio_postgres::error::Kind` doesn't contain ip addresses and such.
|
||||||
#[error("{COULD_NOT_CONNECT}: {0}")]
|
#[error("{COULD_NOT_CONNECT}: {0}")]
|
||||||
Postgres(#[from] tokio_postgres::Error),
|
Postgres(#[from] tokio_postgres::Error),
|
||||||
|
|
||||||
@@ -24,6 +29,12 @@ pub enum ConnectionError {
|
|||||||
TlsError(#[from] native_tls::Error),
|
TlsError(#[from] native_tls::Error),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl From<WakeComputeError> for ConnectionError {
|
||||||
|
fn from(value: WakeComputeError) -> Self {
|
||||||
|
io_error(value).into()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl UserFacingError for ConnectionError {
|
impl UserFacingError for ConnectionError {
|
||||||
fn to_string_client(&self) -> String {
|
fn to_string_client(&self) -> String {
|
||||||
use ConnectionError::*;
|
use ConnectionError::*;
|
||||||
|
|||||||
@@ -211,7 +211,7 @@ pub struct CacheOptions {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl CacheOptions {
|
impl CacheOptions {
|
||||||
/// Default options for [`crate::auth::caches::NodeInfoCache`].
|
/// Default options for [`crate::console::provider::NodeInfoCache`].
|
||||||
pub const DEFAULT_OPTIONS_NODE_INFO: &str = "size=4000,ttl=4m";
|
pub const DEFAULT_OPTIONS_NODE_INFO: &str = "size=4000,ttl=4m";
|
||||||
|
|
||||||
/// Parse cache options passed via cmdline.
|
/// Parse cache options passed via cmdline.
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ use anyhow::Context;
|
|||||||
use once_cell::sync::Lazy;
|
use once_cell::sync::Lazy;
|
||||||
use postgres_backend::{self, AuthType, PostgresBackend, PostgresBackendTCP, QueryError};
|
use postgres_backend::{self, AuthType, PostgresBackend, PostgresBackendTCP, QueryError};
|
||||||
use pq_proto::{BeMessage, SINGLE_COL_ROWDESC};
|
use pq_proto::{BeMessage, SINGLE_COL_ROWDESC};
|
||||||
use std::future;
|
use std::{convert::Infallible, future};
|
||||||
use tokio::net::{TcpListener, TcpStream};
|
use tokio::net::{TcpListener, TcpStream};
|
||||||
use tracing::{error, info, info_span, Instrument};
|
use tracing::{error, info, info_span, Instrument};
|
||||||
|
|
||||||
@@ -31,7 +31,7 @@ pub fn notify(psql_session_id: &str, msg: ComputeReady) -> Result<(), waiters::N
|
|||||||
|
|
||||||
/// Console management API listener task.
|
/// Console management API listener task.
|
||||||
/// It spawns console response handlers needed for the link auth.
|
/// It spawns console response handlers needed for the link auth.
|
||||||
pub async fn task_main(listener: TcpListener) -> anyhow::Result<()> {
|
pub async fn task_main(listener: TcpListener) -> anyhow::Result<Infallible> {
|
||||||
scopeguard::defer! {
|
scopeguard::defer! {
|
||||||
info!("mgmt has shut down");
|
info!("mgmt has shut down");
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -186,18 +186,18 @@ pub trait Api {
|
|||||||
async fn get_auth_info(
|
async fn get_auth_info(
|
||||||
&self,
|
&self,
|
||||||
extra: &ConsoleReqExtra<'_>,
|
extra: &ConsoleReqExtra<'_>,
|
||||||
creds: &ClientCredentials<'_>,
|
creds: &ClientCredentials,
|
||||||
) -> Result<Option<AuthInfo>, errors::GetAuthInfoError>;
|
) -> Result<Option<AuthInfo>, errors::GetAuthInfoError>;
|
||||||
|
|
||||||
/// Wake up the compute node and return the corresponding connection info.
|
/// Wake up the compute node and return the corresponding connection info.
|
||||||
async fn wake_compute(
|
async fn wake_compute(
|
||||||
&self,
|
&self,
|
||||||
extra: &ConsoleReqExtra<'_>,
|
extra: &ConsoleReqExtra<'_>,
|
||||||
creds: &ClientCredentials<'_>,
|
creds: &ClientCredentials,
|
||||||
) -> Result<CachedNodeInfo, errors::WakeComputeError>;
|
) -> Result<CachedNodeInfo, errors::WakeComputeError>;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Various caches for [`console`].
|
/// Various caches for [`console`](super).
|
||||||
pub struct ApiCaches {
|
pub struct ApiCaches {
|
||||||
/// Cache for the `wake_compute` API method.
|
/// Cache for the `wake_compute` API method.
|
||||||
pub node_info: NodeInfoCache,
|
pub node_info: NodeInfoCache,
|
||||||
|
|||||||
@@ -106,7 +106,7 @@ impl super::Api for Api {
|
|||||||
async fn get_auth_info(
|
async fn get_auth_info(
|
||||||
&self,
|
&self,
|
||||||
_extra: &ConsoleReqExtra<'_>,
|
_extra: &ConsoleReqExtra<'_>,
|
||||||
creds: &ClientCredentials<'_>,
|
creds: &ClientCredentials,
|
||||||
) -> Result<Option<AuthInfo>, GetAuthInfoError> {
|
) -> Result<Option<AuthInfo>, GetAuthInfoError> {
|
||||||
self.do_get_auth_info(creds).await
|
self.do_get_auth_info(creds).await
|
||||||
}
|
}
|
||||||
@@ -115,7 +115,7 @@ impl super::Api for Api {
|
|||||||
async fn wake_compute(
|
async fn wake_compute(
|
||||||
&self,
|
&self,
|
||||||
_extra: &ConsoleReqExtra<'_>,
|
_extra: &ConsoleReqExtra<'_>,
|
||||||
_creds: &ClientCredentials<'_>,
|
_creds: &ClientCredentials,
|
||||||
) -> Result<CachedNodeInfo, WakeComputeError> {
|
) -> Result<CachedNodeInfo, WakeComputeError> {
|
||||||
self.do_wake_compute()
|
self.do_wake_compute()
|
||||||
.map_ok(CachedNodeInfo::new_uncached)
|
.map_ok(CachedNodeInfo::new_uncached)
|
||||||
|
|||||||
@@ -123,7 +123,7 @@ impl super::Api for Api {
|
|||||||
async fn get_auth_info(
|
async fn get_auth_info(
|
||||||
&self,
|
&self,
|
||||||
extra: &ConsoleReqExtra<'_>,
|
extra: &ConsoleReqExtra<'_>,
|
||||||
creds: &ClientCredentials<'_>,
|
creds: &ClientCredentials,
|
||||||
) -> Result<Option<AuthInfo>, GetAuthInfoError> {
|
) -> Result<Option<AuthInfo>, GetAuthInfoError> {
|
||||||
self.do_get_auth_info(extra, creds).await
|
self.do_get_auth_info(extra, creds).await
|
||||||
}
|
}
|
||||||
@@ -132,7 +132,7 @@ impl super::Api for Api {
|
|||||||
async fn wake_compute(
|
async fn wake_compute(
|
||||||
&self,
|
&self,
|
||||||
extra: &ConsoleReqExtra<'_>,
|
extra: &ConsoleReqExtra<'_>,
|
||||||
creds: &ClientCredentials<'_>,
|
creds: &ClientCredentials,
|
||||||
) -> Result<CachedNodeInfo, WakeComputeError> {
|
) -> Result<CachedNodeInfo, WakeComputeError> {
|
||||||
let key = creds.project().expect("impossible");
|
let key = creds.project().expect("impossible");
|
||||||
|
|
||||||
|
|||||||
@@ -1,17 +1,17 @@
|
|||||||
|
use anyhow::Context;
|
||||||
|
use async_trait::async_trait;
|
||||||
use parking_lot::Mutex;
|
use parking_lot::Mutex;
|
||||||
use pq_proto::StartupMessageParams;
|
use pq_proto::StartupMessageParams;
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
use std::{collections::HashMap, sync::Arc};
|
use std::{collections::HashMap, sync::Arc};
|
||||||
|
use tokio::time;
|
||||||
|
|
||||||
use futures::TryFutureExt;
|
|
||||||
|
|
||||||
use crate::config;
|
|
||||||
use crate::{auth, console};
|
use crate::{auth, console};
|
||||||
|
use crate::{compute, config};
|
||||||
|
|
||||||
use super::sql_over_http::MAX_RESPONSE_SIZE;
|
use super::sql_over_http::MAX_RESPONSE_SIZE;
|
||||||
|
|
||||||
use crate::proxy::invalidate_cache;
|
use crate::proxy::ConnectMechanism;
|
||||||
use crate::proxy::NUM_RETRIES_WAKE_COMPUTE;
|
|
||||||
|
|
||||||
use tracing::error;
|
use tracing::error;
|
||||||
use tracing::info;
|
use tracing::info;
|
||||||
@@ -185,11 +185,31 @@ impl GlobalConnPool {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
//
|
struct TokioMechanism<'a> {
|
||||||
|
conn_info: &'a ConnInfo,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl ConnectMechanism for TokioMechanism<'_> {
|
||||||
|
type Connection = tokio_postgres::Client;
|
||||||
|
type ConnectError = tokio_postgres::Error;
|
||||||
|
type Error = anyhow::Error;
|
||||||
|
|
||||||
|
async fn connect_once(
|
||||||
|
&self,
|
||||||
|
node_info: &console::CachedNodeInfo,
|
||||||
|
timeout: time::Duration,
|
||||||
|
) -> Result<Self::Connection, Self::ConnectError> {
|
||||||
|
connect_to_compute_once(node_info, self.conn_info, timeout).await
|
||||||
|
}
|
||||||
|
|
||||||
|
fn update_connect_config(&self, _config: &mut compute::ConnCfg) {}
|
||||||
|
}
|
||||||
|
|
||||||
// Wake up the destination if needed. Code here is a bit involved because
|
// Wake up the destination if needed. Code here is a bit involved because
|
||||||
// we reuse the code from the usual proxy and we need to prepare few structures
|
// we reuse the code from the usual proxy and we need to prepare few structures
|
||||||
// that this code expects.
|
// that this code expects.
|
||||||
//
|
#[tracing::instrument(skip_all)]
|
||||||
async fn connect_to_compute(
|
async fn connect_to_compute(
|
||||||
config: &config::ProxyConfig,
|
config: &config::ProxyConfig,
|
||||||
conn_info: &ConnInfo,
|
conn_info: &ConnInfo,
|
||||||
@@ -219,36 +239,19 @@ async fn connect_to_compute(
|
|||||||
application_name: Some(APP_NAME),
|
application_name: Some(APP_NAME),
|
||||||
};
|
};
|
||||||
|
|
||||||
let node_info = &mut creds.wake_compute(&extra).await?.expect("msg");
|
let node_info = creds
|
||||||
|
.wake_compute(&extra)
|
||||||
|
.await?
|
||||||
|
.context("missing cache entry from wake_compute")?;
|
||||||
|
|
||||||
// This code is a copy of `connect_to_compute` from `src/proxy.rs` with
|
crate::proxy::connect_to_compute(&TokioMechanism { conn_info }, node_info, &extra, &creds).await
|
||||||
// the difference that it uses `tokio_postgres` for the connection.
|
|
||||||
let mut num_retries: usize = NUM_RETRIES_WAKE_COMPUTE;
|
|
||||||
loop {
|
|
||||||
match connect_to_compute_once(node_info, conn_info).await {
|
|
||||||
Err(e) if num_retries > 0 => {
|
|
||||||
info!("compute node's state has changed; requesting a wake-up");
|
|
||||||
match creds.wake_compute(&extra).await? {
|
|
||||||
// Update `node_info` and try one more time.
|
|
||||||
Some(new) => {
|
|
||||||
*node_info = new;
|
|
||||||
}
|
|
||||||
// Link auth doesn't work that way, so we just exit.
|
|
||||||
None => return Err(e),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
other => return other,
|
|
||||||
}
|
|
||||||
|
|
||||||
num_retries -= 1;
|
|
||||||
info!("retrying after wake-up ({num_retries} attempts left)");
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn connect_to_compute_once(
|
async fn connect_to_compute_once(
|
||||||
node_info: &console::CachedNodeInfo,
|
node_info: &console::CachedNodeInfo,
|
||||||
conn_info: &ConnInfo,
|
conn_info: &ConnInfo,
|
||||||
) -> anyhow::Result<tokio_postgres::Client> {
|
timeout: time::Duration,
|
||||||
|
) -> Result<tokio_postgres::Client, tokio_postgres::Error> {
|
||||||
let mut config = (*node_info.config).clone();
|
let mut config = (*node_info.config).clone();
|
||||||
|
|
||||||
let (client, connection) = config
|
let (client, connection) = config
|
||||||
@@ -256,16 +259,8 @@ async fn connect_to_compute_once(
|
|||||||
.password(&conn_info.password)
|
.password(&conn_info.password)
|
||||||
.dbname(&conn_info.dbname)
|
.dbname(&conn_info.dbname)
|
||||||
.max_backend_message_size(MAX_RESPONSE_SIZE)
|
.max_backend_message_size(MAX_RESPONSE_SIZE)
|
||||||
|
.connect_timeout(timeout)
|
||||||
.connect(tokio_postgres::NoTls)
|
.connect(tokio_postgres::NoTls)
|
||||||
.inspect_err(|e: &tokio_postgres::Error| {
|
|
||||||
error!(
|
|
||||||
"failed to connect to compute node hosts={:?} ports={:?}: {}",
|
|
||||||
node_info.config.get_hosts(),
|
|
||||||
node_info.config.get_ports(),
|
|
||||||
e
|
|
||||||
);
|
|
||||||
invalidate_cache(node_info)
|
|
||||||
})
|
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
tokio::spawn(async move {
|
tokio::spawn(async move {
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
use anyhow::anyhow;
|
use anyhow::{anyhow, bail};
|
||||||
use hyper::{Body, Request, Response, StatusCode};
|
use hyper::{Body, Request, Response, StatusCode};
|
||||||
use std::net::TcpListener;
|
use std::{convert::Infallible, net::TcpListener};
|
||||||
use tracing::info;
|
use tracing::info;
|
||||||
use utils::http::{endpoint, error::ApiError, json::json_response, RouterBuilder, RouterService};
|
use utils::http::{endpoint, error::ApiError, json::json_response, RouterBuilder, RouterService};
|
||||||
|
|
||||||
@@ -12,7 +12,7 @@ fn make_router() -> RouterBuilder<hyper::Body, ApiError> {
|
|||||||
endpoint::make_router().get("/v1/status", status_handler)
|
endpoint::make_router().get("/v1/status", status_handler)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn task_main(http_listener: TcpListener) -> anyhow::Result<()> {
|
pub async fn task_main(http_listener: TcpListener) -> anyhow::Result<Infallible> {
|
||||||
scopeguard::defer! {
|
scopeguard::defer! {
|
||||||
info!("http has shut down");
|
info!("http has shut down");
|
||||||
}
|
}
|
||||||
@@ -23,5 +23,5 @@ pub async fn task_main(http_listener: TcpListener) -> anyhow::Result<()> {
|
|||||||
.serve(service().map_err(|e| anyhow!(e))?)
|
.serve(service().map_err(|e| anyhow!(e))?)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
Ok(())
|
bail!("hyper server without shutdown handling cannot shutdown successfully");
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,5 +1,8 @@
|
|||||||
use crate::{
|
use crate::{
|
||||||
cancellation::CancelMap, config::ProxyConfig, error::io_error, proxy::handle_ws_client,
|
cancellation::CancelMap,
|
||||||
|
config::ProxyConfig,
|
||||||
|
error::io_error,
|
||||||
|
proxy::{handle_client, ClientMode},
|
||||||
};
|
};
|
||||||
use bytes::{Buf, Bytes};
|
use bytes::{Buf, Bytes};
|
||||||
use futures::{Sink, Stream, StreamExt};
|
use futures::{Sink, Stream, StreamExt};
|
||||||
@@ -150,12 +153,12 @@ async fn serve_websocket(
|
|||||||
hostname: Option<String>,
|
hostname: Option<String>,
|
||||||
) -> anyhow::Result<()> {
|
) -> anyhow::Result<()> {
|
||||||
let websocket = websocket.await?;
|
let websocket = websocket.await?;
|
||||||
handle_ws_client(
|
handle_client(
|
||||||
config,
|
config,
|
||||||
cancel_map,
|
cancel_map,
|
||||||
session_id,
|
session_id,
|
||||||
WebSocketRw::new(websocket),
|
WebSocketRw::new(websocket),
|
||||||
hostname,
|
ClientMode::Websockets { hostname },
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
Ok(())
|
Ok(())
|
||||||
@@ -221,6 +224,18 @@ async fn ws_handler(
|
|||||||
);
|
);
|
||||||
r
|
r
|
||||||
})
|
})
|
||||||
|
} else if request.uri().path() == "/sql" && request.method() == Method::OPTIONS {
|
||||||
|
Response::builder()
|
||||||
|
.header("Allow", "OPTIONS, POST")
|
||||||
|
.header("Access-Control-Allow-Origin", "*")
|
||||||
|
.header(
|
||||||
|
"Access-Control-Allow-Headers",
|
||||||
|
"Neon-Connection-String, Neon-Raw-Text-Output, Neon-Array-Mode, Neon-Pool-Opt-In",
|
||||||
|
)
|
||||||
|
.header("Access-Control-Max-Age", "86400" /* 24 hours */)
|
||||||
|
.status(StatusCode::OK) // 204 is also valid, but see: https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods/OPTIONS#status_code
|
||||||
|
.body(Body::empty())
|
||||||
|
.map_err(|e| ApiError::BadRequest(e.into()))
|
||||||
} else {
|
} else {
|
||||||
json_response(StatusCode::BAD_REQUEST, "query is not supported")
|
json_response(StatusCode::BAD_REQUEST, "query is not supported")
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
|
use std::convert::Infallible;
|
||||||
|
|
||||||
use anyhow::{bail, Context};
|
use anyhow::{bail, Context};
|
||||||
use futures::{Future, FutureExt};
|
|
||||||
use tokio::task::JoinError;
|
use tokio::task::JoinError;
|
||||||
use tokio_util::sync::CancellationToken;
|
use tokio_util::sync::CancellationToken;
|
||||||
use tracing::warn;
|
use tracing::warn;
|
||||||
@@ -23,7 +24,7 @@ pub mod url;
|
|||||||
pub mod waiters;
|
pub mod waiters;
|
||||||
|
|
||||||
/// Handle unix signals appropriately.
|
/// Handle unix signals appropriately.
|
||||||
pub async fn handle_signals(token: CancellationToken) -> anyhow::Result<()> {
|
pub async fn handle_signals(token: CancellationToken) -> anyhow::Result<Infallible> {
|
||||||
use tokio::signal::unix::{signal, SignalKind};
|
use tokio::signal::unix::{signal, SignalKind};
|
||||||
|
|
||||||
let mut hangup = signal(SignalKind::hangup())?;
|
let mut hangup = signal(SignalKind::hangup())?;
|
||||||
@@ -50,8 +51,6 @@ pub async fn handle_signals(token: CancellationToken) -> anyhow::Result<()> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Flattens `Result<Result<T>>` into `Result<T>`.
|
/// Flattens `Result<Result<T>>` into `Result<T>`.
|
||||||
pub async fn flatten_err(
|
pub fn flatten_err<T>(r: Result<anyhow::Result<T>, JoinError>) -> anyhow::Result<T> {
|
||||||
f: impl Future<Output = Result<anyhow::Result<()>, JoinError>>,
|
r.context("join error").and_then(|x| x)
|
||||||
) -> anyhow::Result<()> {
|
|
||||||
f.map(|r| r.context("join error").and_then(|x| x)).await
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,7 +4,7 @@ use crate::{config::MetricCollectionConfig, http};
|
|||||||
use chrono::{DateTime, Utc};
|
use chrono::{DateTime, Utc};
|
||||||
use consumption_metrics::{idempotency_key, Event, EventChunk, EventType, CHUNK_SIZE};
|
use consumption_metrics::{idempotency_key, Event, EventChunk, EventType, CHUNK_SIZE};
|
||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
use std::{collections::HashMap, time::Duration};
|
use std::{collections::HashMap, convert::Infallible, time::Duration};
|
||||||
use tracing::{error, info, instrument, trace, warn};
|
use tracing::{error, info, instrument, trace, warn};
|
||||||
|
|
||||||
const PROXY_IO_BYTES_PER_CLIENT: &str = "proxy_io_bytes_per_client";
|
const PROXY_IO_BYTES_PER_CLIENT: &str = "proxy_io_bytes_per_client";
|
||||||
@@ -26,7 +26,7 @@ pub struct Ids {
|
|||||||
pub branch_id: String,
|
pub branch_id: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn task_main(config: &MetricCollectionConfig) -> anyhow::Result<()> {
|
pub async fn task_main(config: &MetricCollectionConfig) -> anyhow::Result<Infallible> {
|
||||||
info!("metrics collector config: {config:?}");
|
info!("metrics collector config: {config:?}");
|
||||||
scopeguard::defer! {
|
scopeguard::defer! {
|
||||||
info!("metrics collector has shut down");
|
info!("metrics collector has shut down");
|
||||||
|
|||||||
@@ -6,16 +6,21 @@ use crate::{
|
|||||||
cancellation::{self, CancelMap},
|
cancellation::{self, CancelMap},
|
||||||
compute::{self, PostgresConnection},
|
compute::{self, PostgresConnection},
|
||||||
config::{ProxyConfig, TlsConfig},
|
config::{ProxyConfig, TlsConfig},
|
||||||
console::{self, messages::MetricsAuxInfo},
|
console::{
|
||||||
error::io_error,
|
self,
|
||||||
|
errors::{ApiError, WakeComputeError},
|
||||||
|
messages::MetricsAuxInfo,
|
||||||
|
},
|
||||||
stream::{PqStream, Stream},
|
stream::{PqStream, Stream},
|
||||||
};
|
};
|
||||||
use anyhow::{bail, Context};
|
use anyhow::{bail, Context};
|
||||||
|
use async_trait::async_trait;
|
||||||
use futures::TryFutureExt;
|
use futures::TryFutureExt;
|
||||||
|
use hyper::StatusCode;
|
||||||
use metrics::{register_int_counter, register_int_counter_vec, IntCounter, IntCounterVec};
|
use metrics::{register_int_counter, register_int_counter_vec, IntCounter, IntCounterVec};
|
||||||
use once_cell::sync::Lazy;
|
use once_cell::sync::Lazy;
|
||||||
use pq_proto::{BeMessage as Be, FeStartupPacket, StartupMessageParams};
|
use pq_proto::{BeMessage as Be, FeStartupPacket, StartupMessageParams};
|
||||||
use std::sync::Arc;
|
use std::{error::Error, io, ops::ControlFlow, sync::Arc};
|
||||||
use tokio::{
|
use tokio::{
|
||||||
io::{AsyncRead, AsyncWrite, AsyncWriteExt},
|
io::{AsyncRead, AsyncWrite, AsyncWriteExt},
|
||||||
time,
|
time,
|
||||||
@@ -25,7 +30,10 @@ use tracing::{error, info, warn};
|
|||||||
use utils::measured_stream::MeasuredStream;
|
use utils::measured_stream::MeasuredStream;
|
||||||
|
|
||||||
/// Number of times we should retry the `/proxy_wake_compute` http request.
|
/// Number of times we should retry the `/proxy_wake_compute` http request.
|
||||||
pub const NUM_RETRIES_WAKE_COMPUTE: usize = 1;
|
/// Retry duration is BASE_RETRY_WAIT_DURATION * 1.5^n
|
||||||
|
const NUM_RETRIES_CONNECT: u32 = 10;
|
||||||
|
const CONNECT_TIMEOUT: time::Duration = time::Duration::from_secs(2);
|
||||||
|
const BASE_RETRY_WAIT_DURATION: time::Duration = time::Duration::from_millis(100);
|
||||||
|
|
||||||
const ERR_INSECURE_CONNECTION: &str = "connection is insecure (try using `sslmode=require`)";
|
const ERR_INSECURE_CONNECTION: &str = "connection is insecure (try using `sslmode=require`)";
|
||||||
const ERR_PROTO_VIOLATION: &str = "protocol violation";
|
const ERR_PROTO_VIOLATION: &str = "protocol violation";
|
||||||
@@ -96,7 +104,8 @@ pub async fn task_main(
|
|||||||
.set_nodelay(true)
|
.set_nodelay(true)
|
||||||
.context("failed to set socket option")?;
|
.context("failed to set socket option")?;
|
||||||
|
|
||||||
handle_client(config, &cancel_map, session_id, socket).await
|
handle_client(config, &cancel_map, session_id, socket, ClientMode::Tcp)
|
||||||
|
.await
|
||||||
}
|
}
|
||||||
.unwrap_or_else(move |e| {
|
.unwrap_or_else(move |e| {
|
||||||
// Acknowledge that the task has finished with an error.
|
// Acknowledge that the task has finished with an error.
|
||||||
@@ -121,14 +130,50 @@ pub async fn task_main(
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO(tech debt): unite this with its twin below.
|
pub enum ClientMode {
|
||||||
|
Tcp,
|
||||||
|
Websockets { hostname: Option<String> },
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Abstracts the logic of handling TCP vs WS clients
|
||||||
|
impl ClientMode {
|
||||||
|
fn allow_cleartext(&self) -> bool {
|
||||||
|
match self {
|
||||||
|
ClientMode::Tcp => false,
|
||||||
|
ClientMode::Websockets { .. } => true,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn allow_self_signed_compute(&self, config: &ProxyConfig) -> bool {
|
||||||
|
match self {
|
||||||
|
ClientMode::Tcp => config.allow_self_signed_compute,
|
||||||
|
ClientMode::Websockets { .. } => false,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn hostname<'a, S>(&'a self, s: &'a Stream<S>) -> Option<&'a str> {
|
||||||
|
match self {
|
||||||
|
ClientMode::Tcp => s.sni_hostname(),
|
||||||
|
ClientMode::Websockets { hostname } => hostname.as_deref(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn handshake_tls<'a>(&self, tls: Option<&'a TlsConfig>) -> Option<&'a TlsConfig> {
|
||||||
|
match self {
|
||||||
|
ClientMode::Tcp => tls,
|
||||||
|
// TLS is None here if using websockets, because the connection is already encrypted.
|
||||||
|
ClientMode::Websockets { .. } => None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[tracing::instrument(fields(session_id = ?session_id), skip_all)]
|
#[tracing::instrument(fields(session_id = ?session_id), skip_all)]
|
||||||
pub async fn handle_ws_client(
|
pub async fn handle_client<S: AsyncRead + AsyncWrite + Unpin>(
|
||||||
config: &'static ProxyConfig,
|
config: &'static ProxyConfig,
|
||||||
cancel_map: &CancelMap,
|
cancel_map: &CancelMap,
|
||||||
session_id: uuid::Uuid,
|
session_id: uuid::Uuid,
|
||||||
stream: impl AsyncRead + AsyncWrite + Unpin,
|
stream: S,
|
||||||
hostname: Option<String>,
|
mode: ClientMode,
|
||||||
) -> anyhow::Result<()> {
|
) -> anyhow::Result<()> {
|
||||||
// The `closed` counter will increase when this future is destroyed.
|
// The `closed` counter will increase when this future is destroyed.
|
||||||
NUM_CONNECTIONS_ACCEPTED_COUNTER.inc();
|
NUM_CONNECTIONS_ACCEPTED_COUNTER.inc();
|
||||||
@@ -137,10 +182,8 @@ pub async fn handle_ws_client(
|
|||||||
}
|
}
|
||||||
|
|
||||||
let tls = config.tls_config.as_ref();
|
let tls = config.tls_config.as_ref();
|
||||||
let hostname = hostname.as_deref();
|
|
||||||
|
|
||||||
// TLS is None here, because the connection is already encrypted.
|
let do_handshake = handshake(stream, mode.handshake_tls(tls), cancel_map);
|
||||||
let do_handshake = handshake(stream, None, cancel_map);
|
|
||||||
let (mut stream, params) = match do_handshake.await? {
|
let (mut stream, params) = match do_handshake.await? {
|
||||||
Some(x) => x,
|
Some(x) => x,
|
||||||
None => return Ok(()), // it's a cancellation request
|
None => return Ok(()), // it's a cancellation request
|
||||||
@@ -148,6 +191,7 @@ pub async fn handle_ws_client(
|
|||||||
|
|
||||||
// Extract credentials which we're going to use for auth.
|
// Extract credentials which we're going to use for auth.
|
||||||
let creds = {
|
let creds = {
|
||||||
|
let hostname = mode.hostname(stream.get_ref());
|
||||||
let common_names = tls.and_then(|tls| tls.common_names.clone());
|
let common_names = tls.and_then(|tls| tls.common_names.clone());
|
||||||
let result = config
|
let result = config
|
||||||
.auth_backend
|
.auth_backend
|
||||||
@@ -155,59 +199,21 @@ pub async fn handle_ws_client(
|
|||||||
.map(|_| auth::ClientCredentials::parse(¶ms, hostname, common_names))
|
.map(|_| auth::ClientCredentials::parse(¶ms, hostname, common_names))
|
||||||
.transpose();
|
.transpose();
|
||||||
|
|
||||||
async { result }.or_else(|e| stream.throw_error(e)).await?
|
match result {
|
||||||
|
Ok(creds) => creds,
|
||||||
|
Err(e) => stream.throw_error(e).await?,
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
let client = Client::new(stream, creds, ¶ms, session_id, false);
|
|
||||||
cancel_map
|
|
||||||
.with_session(|session| client.connect_to_db(session, true))
|
|
||||||
.await
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tracing::instrument(fields(session_id = ?session_id), skip_all)]
|
|
||||||
async fn handle_client(
|
|
||||||
config: &'static ProxyConfig,
|
|
||||||
cancel_map: &CancelMap,
|
|
||||||
session_id: uuid::Uuid,
|
|
||||||
stream: impl AsyncRead + AsyncWrite + Unpin,
|
|
||||||
) -> anyhow::Result<()> {
|
|
||||||
// The `closed` counter will increase when this future is destroyed.
|
|
||||||
NUM_CONNECTIONS_ACCEPTED_COUNTER.inc();
|
|
||||||
scopeguard::defer! {
|
|
||||||
NUM_CONNECTIONS_CLOSED_COUNTER.inc();
|
|
||||||
}
|
|
||||||
|
|
||||||
let tls = config.tls_config.as_ref();
|
|
||||||
let do_handshake = handshake(stream, tls, cancel_map);
|
|
||||||
let (mut stream, params) = match do_handshake.await? {
|
|
||||||
Some(x) => x,
|
|
||||||
None => return Ok(()), // it's a cancellation request
|
|
||||||
};
|
|
||||||
|
|
||||||
// Extract credentials which we're going to use for auth.
|
|
||||||
let creds = {
|
|
||||||
let sni = stream.get_ref().sni_hostname();
|
|
||||||
let common_names = tls.and_then(|tls| tls.common_names.clone());
|
|
||||||
let result = config
|
|
||||||
.auth_backend
|
|
||||||
.as_ref()
|
|
||||||
.map(|_| auth::ClientCredentials::parse(¶ms, sni, common_names))
|
|
||||||
.transpose();
|
|
||||||
|
|
||||||
async { result }.or_else(|e| stream.throw_error(e)).await?
|
|
||||||
};
|
|
||||||
|
|
||||||
let allow_self_signed_compute = config.allow_self_signed_compute;
|
|
||||||
|
|
||||||
let client = Client::new(
|
let client = Client::new(
|
||||||
stream,
|
stream,
|
||||||
creds,
|
creds,
|
||||||
¶ms,
|
¶ms,
|
||||||
session_id,
|
session_id,
|
||||||
allow_self_signed_compute,
|
mode.allow_self_signed_compute(config),
|
||||||
);
|
);
|
||||||
cancel_map
|
cancel_map
|
||||||
.with_session(|session| client.connect_to_db(session, false))
|
.with_session(|session| client.connect_to_db(session, mode.allow_cleartext()))
|
||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -290,18 +296,18 @@ async fn handshake<S: AsyncRead + AsyncWrite + Unpin>(
|
|||||||
/// (e.g. the compute node's address might've changed at the wrong time).
|
/// (e.g. the compute node's address might've changed at the wrong time).
|
||||||
/// Invalidate the cache entry (if any) to prevent subsequent errors.
|
/// Invalidate the cache entry (if any) to prevent subsequent errors.
|
||||||
#[tracing::instrument(name = "invalidate_cache", skip_all)]
|
#[tracing::instrument(name = "invalidate_cache", skip_all)]
|
||||||
pub fn invalidate_cache(node_info: &console::CachedNodeInfo) {
|
pub fn invalidate_cache(node_info: console::CachedNodeInfo) -> compute::ConnCfg {
|
||||||
let is_cached = node_info.cached();
|
let is_cached = node_info.cached();
|
||||||
if is_cached {
|
if is_cached {
|
||||||
warn!("invalidating stalled compute node info cache entry");
|
warn!("invalidating stalled compute node info cache entry");
|
||||||
node_info.invalidate();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
let label = match is_cached {
|
let label = match is_cached {
|
||||||
true => "compute_cached",
|
true => "compute_cached",
|
||||||
false => "compute_uncached",
|
false => "compute_uncached",
|
||||||
};
|
};
|
||||||
NUM_CONNECTION_FAILURES.with_label_values(&[label]).inc();
|
NUM_CONNECTION_FAILURES.with_label_values(&[label]).inc();
|
||||||
|
|
||||||
|
node_info.invalidate().config
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Try to connect to the compute node once.
|
/// Try to connect to the compute node once.
|
||||||
@@ -315,61 +321,214 @@ async fn connect_to_compute_once(
|
|||||||
node_info
|
node_info
|
||||||
.config
|
.config
|
||||||
.connect(allow_self_signed_compute, timeout)
|
.connect(allow_self_signed_compute, timeout)
|
||||||
.inspect_err(|_: &compute::ConnectionError| invalidate_cache(node_info))
|
|
||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
|
|
||||||
|
enum ConnectionState<E> {
|
||||||
|
Cached(console::CachedNodeInfo),
|
||||||
|
Invalid(compute::ConnCfg, E),
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
pub trait ConnectMechanism {
|
||||||
|
type Connection;
|
||||||
|
type ConnectError;
|
||||||
|
type Error: From<Self::ConnectError>;
|
||||||
|
async fn connect_once(
|
||||||
|
&self,
|
||||||
|
node_info: &console::CachedNodeInfo,
|
||||||
|
timeout: time::Duration,
|
||||||
|
) -> Result<Self::Connection, Self::ConnectError>;
|
||||||
|
|
||||||
|
fn update_connect_config(&self, conf: &mut compute::ConnCfg);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct TcpMechanism<'a> {
|
||||||
|
/// KV-dictionary with PostgreSQL connection params.
|
||||||
|
pub params: &'a StartupMessageParams,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl ConnectMechanism for TcpMechanism<'_> {
|
||||||
|
type Connection = PostgresConnection;
|
||||||
|
type ConnectError = compute::ConnectionError;
|
||||||
|
type Error = compute::ConnectionError;
|
||||||
|
|
||||||
|
async fn connect_once(
|
||||||
|
&self,
|
||||||
|
node_info: &console::CachedNodeInfo,
|
||||||
|
timeout: time::Duration,
|
||||||
|
) -> Result<PostgresConnection, Self::Error> {
|
||||||
|
connect_to_compute_once(node_info, timeout).await
|
||||||
|
}
|
||||||
|
|
||||||
|
fn update_connect_config(&self, config: &mut compute::ConnCfg) {
|
||||||
|
config.set_startup_params(self.params);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Try to connect to the compute node, retrying if necessary.
|
/// Try to connect to the compute node, retrying if necessary.
|
||||||
/// This function might update `node_info`, so we take it by `&mut`.
|
/// This function might update `node_info`, so we take it by `&mut`.
|
||||||
#[tracing::instrument(skip_all)]
|
#[tracing::instrument(skip_all)]
|
||||||
async fn connect_to_compute(
|
pub async fn connect_to_compute<M: ConnectMechanism>(
|
||||||
node_info: &mut console::CachedNodeInfo,
|
mechanism: &M,
|
||||||
params: &StartupMessageParams,
|
mut node_info: console::CachedNodeInfo,
|
||||||
extra: &console::ConsoleReqExtra<'_>,
|
extra: &console::ConsoleReqExtra<'_>,
|
||||||
creds: &auth::BackendType<'_, auth::ClientCredentials<'_>>,
|
creds: &auth::BackendType<'_, auth::ClientCredentials<'_>>,
|
||||||
) -> Result<PostgresConnection, compute::ConnectionError> {
|
) -> Result<M::Connection, M::Error>
|
||||||
let mut num_retries: usize = NUM_RETRIES_WAKE_COMPUTE;
|
where
|
||||||
|
M::ConnectError: ShouldRetry + std::fmt::Debug,
|
||||||
|
M::Error: From<WakeComputeError>,
|
||||||
|
{
|
||||||
|
mechanism.update_connect_config(&mut node_info.config);
|
||||||
|
|
||||||
|
let mut num_retries = 0;
|
||||||
|
let mut state = ConnectionState::<M::ConnectError>::Cached(node_info);
|
||||||
|
|
||||||
loop {
|
loop {
|
||||||
// Apply startup params to the (possibly, cached) compute node info.
|
match state {
|
||||||
node_info.config.set_startup_params(params);
|
ConnectionState::Invalid(config, err) => {
|
||||||
|
match try_wake(&config, extra, creds).await {
|
||||||
|
// we can't wake up the compute node
|
||||||
|
Ok(None) => return Err(err.into()),
|
||||||
|
// there was an error communicating with the control plane
|
||||||
|
Err(e) => return Err(e.into()),
|
||||||
|
// failed to wake up but we can continue to retry
|
||||||
|
Ok(Some(ControlFlow::Continue(()))) => {
|
||||||
|
state = ConnectionState::Invalid(config, err);
|
||||||
|
let wait_duration = retry_after(num_retries);
|
||||||
|
num_retries += 1;
|
||||||
|
|
||||||
// Set a shorter timeout for the initial connection attempt.
|
info!(num_retries, "retrying wake compute");
|
||||||
//
|
time::sleep(wait_duration).await;
|
||||||
// In case we try to connect to an outdated address that is no longer valid, the
|
continue;
|
||||||
// default behavior of Kubernetes is to drop the packets, causing us to wait for
|
}
|
||||||
// the entire timeout period. We want to fail fast in such cases.
|
// successfully woke up a compute node and can break the wakeup loop
|
||||||
//
|
Ok(Some(ControlFlow::Break(mut node_info))) => {
|
||||||
// A specific case to consider is when we have cached compute node information
|
mechanism.update_connect_config(&mut node_info.config);
|
||||||
// with a 4-minute TTL (Time To Live), but the user has executed a `/suspend` API
|
state = ConnectionState::Cached(node_info)
|
||||||
// call, resulting in the nonexistence of the compute node.
|
|
||||||
//
|
|
||||||
// We only use caching in case of scram proxy backed by the console, so reduce
|
|
||||||
// the timeout only in that case.
|
|
||||||
let is_scram_proxy = matches!(creds, auth::BackendType::Console(_, _));
|
|
||||||
let timeout = if is_scram_proxy && num_retries == NUM_RETRIES_WAKE_COMPUTE {
|
|
||||||
time::Duration::from_secs(2)
|
|
||||||
} else {
|
|
||||||
time::Duration::from_secs(10)
|
|
||||||
};
|
|
||||||
|
|
||||||
match connect_to_compute_once(node_info, timeout).await {
|
|
||||||
Err(e) if num_retries > 0 => {
|
|
||||||
info!("compute node's state has changed; requesting a wake-up");
|
|
||||||
match creds.wake_compute(extra).map_err(io_error).await? {
|
|
||||||
// Update `node_info` and try one more time.
|
|
||||||
Some(mut new) => {
|
|
||||||
new.config.reuse_password(&node_info.config);
|
|
||||||
*node_info = new;
|
|
||||||
}
|
}
|
||||||
// Link auth doesn't work that way, so we just exit.
|
|
||||||
None => return Err(e),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
other => return other,
|
ConnectionState::Cached(node_info) => {
|
||||||
}
|
match mechanism.connect_once(&node_info, CONNECT_TIMEOUT).await {
|
||||||
|
Ok(res) => return Ok(res),
|
||||||
|
Err(e) => {
|
||||||
|
error!(error = ?e, "could not connect to compute node");
|
||||||
|
if !e.should_retry(num_retries) {
|
||||||
|
return Err(e.into());
|
||||||
|
}
|
||||||
|
|
||||||
num_retries -= 1;
|
// after the first connect failure,
|
||||||
info!("retrying after wake-up ({num_retries} attempts left)");
|
// we should invalidate the cache and wake up a new compute node
|
||||||
|
if num_retries == 0 {
|
||||||
|
state = ConnectionState::Invalid(invalidate_cache(node_info), e);
|
||||||
|
} else {
|
||||||
|
state = ConnectionState::Cached(node_info);
|
||||||
|
}
|
||||||
|
|
||||||
|
let wait_duration = retry_after(num_retries);
|
||||||
|
num_retries += 1;
|
||||||
|
|
||||||
|
info!(num_retries, "retrying wake compute");
|
||||||
|
time::sleep(wait_duration).await;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Attempts to wake up the compute node.
|
||||||
|
/// * Returns Ok(Some(true)) if there was an error waking but retries are acceptable
|
||||||
|
/// * Returns Ok(Some(false)) if the wakeup succeeded
|
||||||
|
/// * Returns Ok(None) or Err(e) if there was an error
|
||||||
|
async fn try_wake(
|
||||||
|
config: &compute::ConnCfg,
|
||||||
|
extra: &console::ConsoleReqExtra<'_>,
|
||||||
|
creds: &auth::BackendType<'_, auth::ClientCredentials<'_>>,
|
||||||
|
) -> Result<Option<ControlFlow<console::CachedNodeInfo>>, WakeComputeError> {
|
||||||
|
info!("compute node's state has likely changed; requesting a wake-up");
|
||||||
|
match creds.wake_compute(extra).await {
|
||||||
|
// retry wake if the compute was in an invalid state
|
||||||
|
Err(WakeComputeError::ApiError(ApiError::Console {
|
||||||
|
status: StatusCode::BAD_REQUEST,
|
||||||
|
..
|
||||||
|
})) => Ok(Some(ControlFlow::Continue(()))),
|
||||||
|
// Update `node_info` and try again.
|
||||||
|
Ok(Some(mut new)) => {
|
||||||
|
new.config.reuse_password(config);
|
||||||
|
Ok(Some(ControlFlow::Break(new)))
|
||||||
|
}
|
||||||
|
Err(e) => Err(e),
|
||||||
|
Ok(None) => Ok(None),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub trait ShouldRetry {
|
||||||
|
fn could_retry(&self) -> bool;
|
||||||
|
fn should_retry(&self, num_retries: u32) -> bool {
|
||||||
|
match self {
|
||||||
|
// retry all errors at least once
|
||||||
|
_ if num_retries == 0 => true,
|
||||||
|
_ if num_retries >= NUM_RETRIES_CONNECT => false,
|
||||||
|
err => err.could_retry(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ShouldRetry for io::Error {
|
||||||
|
fn could_retry(&self) -> bool {
|
||||||
|
use std::io::ErrorKind;
|
||||||
|
matches!(
|
||||||
|
self.kind(),
|
||||||
|
ErrorKind::ConnectionRefused | ErrorKind::AddrNotAvailable | ErrorKind::TimedOut
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ShouldRetry for tokio_postgres::error::DbError {
|
||||||
|
fn could_retry(&self) -> bool {
|
||||||
|
use tokio_postgres::error::SqlState;
|
||||||
|
matches!(
|
||||||
|
self.code(),
|
||||||
|
&SqlState::CONNECTION_FAILURE
|
||||||
|
| &SqlState::CONNECTION_EXCEPTION
|
||||||
|
| &SqlState::CONNECTION_DOES_NOT_EXIST
|
||||||
|
| &SqlState::SQLCLIENT_UNABLE_TO_ESTABLISH_SQLCONNECTION,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ShouldRetry for tokio_postgres::Error {
|
||||||
|
fn could_retry(&self) -> bool {
|
||||||
|
if let Some(io_err) = self.source().and_then(|x| x.downcast_ref()) {
|
||||||
|
io::Error::could_retry(io_err)
|
||||||
|
} else if let Some(db_err) = self.source().and_then(|x| x.downcast_ref()) {
|
||||||
|
tokio_postgres::error::DbError::could_retry(db_err)
|
||||||
|
} else {
|
||||||
|
false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ShouldRetry for compute::ConnectionError {
|
||||||
|
fn could_retry(&self) -> bool {
|
||||||
|
match self {
|
||||||
|
compute::ConnectionError::Postgres(err) => err.could_retry(),
|
||||||
|
compute::ConnectionError::CouldNotConnect(err) => err.could_retry(),
|
||||||
|
_ => false,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn retry_after(num_retries: u32) -> time::Duration {
|
||||||
|
match num_retries {
|
||||||
|
0 => time::Duration::ZERO,
|
||||||
|
_ => {
|
||||||
|
// 3/2 = 1.5 which seems to be an ok growth factor heuristic
|
||||||
|
BASE_RETRY_WAIT_DURATION * 3_u32.pow(num_retries) / 2_u32.pow(num_retries)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -501,15 +660,13 @@ impl<S: AsyncRead + AsyncWrite + Unpin> Client<'_, S> {
|
|||||||
application_name: params.get("application_name"),
|
application_name: params.get("application_name"),
|
||||||
};
|
};
|
||||||
|
|
||||||
let auth_result = async {
|
let auth_result = match creds
|
||||||
// `&mut stream` doesn't let us merge those 2 lines.
|
.authenticate(&extra, &mut stream, allow_cleartext)
|
||||||
let res = creds
|
.await
|
||||||
.authenticate(&extra, &mut stream, allow_cleartext)
|
{
|
||||||
.await;
|
Ok(auth_result) => auth_result,
|
||||||
|
Err(e) => return stream.throw_error(e).await,
|
||||||
async { res }.or_else(|e| stream.throw_error(e)).await
|
};
|
||||||
}
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
let AuthSuccess {
|
let AuthSuccess {
|
||||||
reported_auth_ok,
|
reported_auth_ok,
|
||||||
@@ -518,7 +675,8 @@ impl<S: AsyncRead + AsyncWrite + Unpin> Client<'_, S> {
|
|||||||
|
|
||||||
node_info.allow_self_signed_compute = allow_self_signed_compute;
|
node_info.allow_self_signed_compute = allow_self_signed_compute;
|
||||||
|
|
||||||
let mut node = connect_to_compute(&mut node_info, params, &extra, &creds)
|
let aux = node_info.aux.clone();
|
||||||
|
let mut node = connect_to_compute(&TcpMechanism { params }, node_info, &extra, &creds)
|
||||||
.or_else(|e| stream.throw_error(e))
|
.or_else(|e| stream.throw_error(e))
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
@@ -529,6 +687,6 @@ impl<S: AsyncRead + AsyncWrite + Unpin> Client<'_, S> {
|
|||||||
// immediately after opening the connection.
|
// immediately after opening the connection.
|
||||||
let (stream, read_buf) = stream.into_inner();
|
let (stream, read_buf) = stream.into_inner();
|
||||||
node.stream.write_all(&read_buf).await?;
|
node.stream.write_all(&read_buf).await?;
|
||||||
proxy_pass(stream, node.stream, &node_info.aux).await
|
proxy_pass(stream, node.stream, &aux).await
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user