mirror of
https://github.com/neondatabase/neon.git
synced 2026-02-09 05:30:37 +00:00
Compare commits
22 Commits
docker-bui
...
layer_map_
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c471c25744 | ||
|
|
e030830397 | ||
|
|
58fa4f0eb7 | ||
|
|
877a2d70e3 | ||
|
|
959f5c6f40 | ||
|
|
678fe0684f | ||
|
|
c9821f13e0 | ||
|
|
121d535068 | ||
|
|
ec3a3aed37 | ||
|
|
87cd2bae77 | ||
|
|
be81db21b9 | ||
|
|
f2d89761c2 | ||
|
|
a0372158a0 | ||
|
|
83048a4adc | ||
|
|
f71b1b174d | ||
|
|
96e78394f5 | ||
|
|
ada933eb42 | ||
|
|
f6a10f4693 | ||
|
|
d25307dced | ||
|
|
2759f1a22e | ||
|
|
f474495ba0 | ||
|
|
bf1c36a30c |
@@ -15,6 +15,7 @@
|
|||||||
!proxy/
|
!proxy/
|
||||||
!safekeeper/
|
!safekeeper/
|
||||||
!storage_broker/
|
!storage_broker/
|
||||||
|
!trace/
|
||||||
!vendor/postgres-v14/
|
!vendor/postgres-v14/
|
||||||
!vendor/postgres-v15/
|
!vendor/postgres-v15/
|
||||||
!workspace_hack/
|
!workspace_hack/
|
||||||
|
|||||||
54
.github/helm-values/production.proxy-scram.yaml
vendored
54
.github/helm-values/production.proxy-scram.yaml
vendored
@@ -1,54 +0,0 @@
|
|||||||
settings:
|
|
||||||
authBackend: "console"
|
|
||||||
authEndpoint: "http://console-release.local/management/api/v2"
|
|
||||||
domain: "*.cloud.neon.tech"
|
|
||||||
sentryEnvironment: "production"
|
|
||||||
wssPort: 8443
|
|
||||||
metricCollectionEndpoint: "http://console-release.local/billing/api/v1/usage_events"
|
|
||||||
metricCollectionInterval: "10min"
|
|
||||||
|
|
||||||
podLabels:
|
|
||||||
zenith_service: proxy-scram
|
|
||||||
zenith_env: production
|
|
||||||
zenith_region: us-west-2
|
|
||||||
zenith_region_slug: oregon
|
|
||||||
|
|
||||||
exposedService:
|
|
||||||
annotations:
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-type: external
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
|
||||||
service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing
|
|
||||||
external-dns.alpha.kubernetes.io/hostname: '*.cloud.neon.tech'
|
|
||||||
httpsPort: 443
|
|
||||||
|
|
||||||
metrics:
|
|
||||||
enabled: true
|
|
||||||
serviceMonitor:
|
|
||||||
enabled: true
|
|
||||||
selector:
|
|
||||||
release: kube-prometheus-stack
|
|
||||||
|
|
||||||
extraManifests:
|
|
||||||
- apiVersion: operator.victoriametrics.com/v1beta1
|
|
||||||
kind: VMServiceScrape
|
|
||||||
metadata:
|
|
||||||
name: "{{ include \"neon-proxy.fullname\" . }}"
|
|
||||||
labels:
|
|
||||||
helm.sh/chart: neon-proxy-{{ .Chart.Version }}
|
|
||||||
app.kubernetes.io/name: neon-proxy
|
|
||||||
app.kubernetes.io/instance: "{{ include \"neon-proxy.fullname\" . }}"
|
|
||||||
app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
|
|
||||||
app.kubernetes.io/managed-by: Helm
|
|
||||||
namespace: "{{ .Release.Namespace }}"
|
|
||||||
spec:
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app.kubernetes.io/name: "neon-proxy"
|
|
||||||
endpoints:
|
|
||||||
- port: http
|
|
||||||
path: /metrics
|
|
||||||
interval: 10s
|
|
||||||
scrapeTimeout: 10s
|
|
||||||
namespaceSelector:
|
|
||||||
matchNames:
|
|
||||||
- "{{ .Release.Namespace }}"
|
|
||||||
35
.github/workflows/deploy-prod.yml
vendored
35
.github/workflows/deploy-prod.yml
vendored
@@ -204,41 +204,6 @@ jobs:
|
|||||||
- name: Cleanup ansible folder
|
- name: Cleanup ansible folder
|
||||||
run: rm -rf ~/.ansible
|
run: rm -rf ~/.ansible
|
||||||
|
|
||||||
deploy-proxy:
|
|
||||||
runs-on: [ self-hosted, gen3, small ]
|
|
||||||
container: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/ansible:pinned
|
|
||||||
if: inputs.deployProxy && inputs.disclamerAcknowledged
|
|
||||||
defaults:
|
|
||||||
run:
|
|
||||||
shell: bash
|
|
||||||
environment:
|
|
||||||
name: prod-old
|
|
||||||
env:
|
|
||||||
KUBECONFIG: .kubeconfig
|
|
||||||
steps:
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
with:
|
|
||||||
submodules: true
|
|
||||||
fetch-depth: 0
|
|
||||||
ref: ${{ inputs.branch }}
|
|
||||||
|
|
||||||
- name: Store kubeconfig file
|
|
||||||
run: |
|
|
||||||
echo "${{ secrets.PRODUCTION_KUBECONFIG_DATA }}" | base64 --decode > ${KUBECONFIG}
|
|
||||||
chmod 0600 ${KUBECONFIG}
|
|
||||||
|
|
||||||
- name: Add neon helm chart
|
|
||||||
run: helm repo add neondatabase https://neondatabase.github.io/helm-charts
|
|
||||||
|
|
||||||
- name: Re-deploy proxy
|
|
||||||
run: |
|
|
||||||
DOCKER_TAG=${{ inputs.dockerTag }}
|
|
||||||
helm upgrade neon-proxy-scram neondatabase/neon-proxy --namespace neon-proxy --install --atomic -f .github/helm-values/production.proxy-scram.yaml --set image.tag=${DOCKER_TAG} --set settings.sentryUrl=${{ secrets.SENTRY_URL_PROXY }} --wait --timeout 15m0s
|
|
||||||
|
|
||||||
- name: Cleanup helm folder
|
|
||||||
run: rm -rf ~/.cache
|
|
||||||
|
|
||||||
deploy-storage-broker:
|
deploy-storage-broker:
|
||||||
name: deploy storage broker on old staging and old prod
|
name: deploy storage broker on old staging and old prod
|
||||||
runs-on: [ self-hosted, gen3, small ]
|
runs-on: [ self-hosted, gen3, small ]
|
||||||
|
|||||||
46
.github/workflows/neon_extra_builds.yml
vendored
46
.github/workflows/neon_extra_builds.yml
vendored
@@ -4,6 +4,7 @@ on:
|
|||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- main
|
- main
|
||||||
|
pull_request:
|
||||||
|
|
||||||
defaults:
|
defaults:
|
||||||
run:
|
run:
|
||||||
@@ -20,6 +21,7 @@ env:
|
|||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
check-macos-build:
|
check-macos-build:
|
||||||
|
if: github.ref_name == 'main' || contains(github.event.pull_request.labels.*.name, 'run-extra-build-macos')
|
||||||
timeout-minutes: 90
|
timeout-minutes: 90
|
||||||
runs-on: macos-latest
|
runs-on: macos-latest
|
||||||
|
|
||||||
@@ -93,11 +95,16 @@ jobs:
|
|||||||
run: ./run_clippy.sh
|
run: ./run_clippy.sh
|
||||||
|
|
||||||
gather-rust-build-stats:
|
gather-rust-build-stats:
|
||||||
timeout-minutes: 90
|
if: github.ref_name == 'main' || contains(github.event.pull_request.labels.*.name, 'run-extra-build-stats')
|
||||||
runs-on: ubuntu-latest
|
runs-on: [ self-hosted, gen3, large ]
|
||||||
|
container:
|
||||||
|
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rust:pinned
|
||||||
|
options: --init
|
||||||
|
|
||||||
env:
|
env:
|
||||||
BUILD_TYPE: release
|
BUILD_TYPE: release
|
||||||
|
# remove the cachepot wrapper and build without crate caches
|
||||||
|
RUSTC_WRAPPER: ""
|
||||||
# build with incremental compilation produce partial results
|
# build with incremental compilation produce partial results
|
||||||
# so do not attempt to cache this build, also disable the incremental compilation
|
# so do not attempt to cache this build, also disable the incremental compilation
|
||||||
CARGO_INCREMENTAL: 0
|
CARGO_INCREMENTAL: 0
|
||||||
@@ -109,11 +116,6 @@ jobs:
|
|||||||
submodules: true
|
submodules: true
|
||||||
fetch-depth: 1
|
fetch-depth: 1
|
||||||
|
|
||||||
- name: Install Ubuntu postgres dependencies
|
|
||||||
run: |
|
|
||||||
sudo apt update
|
|
||||||
sudo apt install build-essential libreadline-dev zlib1g-dev flex bison libseccomp-dev libssl-dev protobuf-compiler
|
|
||||||
|
|
||||||
# Some of our rust modules use FFI and need those to be checked
|
# Some of our rust modules use FFI and need those to be checked
|
||||||
- name: Get postgres headers
|
- name: Get postgres headers
|
||||||
run: make postgres-headers -j$(nproc)
|
run: make postgres-headers -j$(nproc)
|
||||||
@@ -122,7 +124,31 @@ jobs:
|
|||||||
run: cargo build --all --release --timings
|
run: cargo build --all --release --timings
|
||||||
|
|
||||||
- name: Upload the build stats
|
- name: Upload the build stats
|
||||||
uses: actions/upload-artifact@v3
|
id: upload-stats
|
||||||
|
env:
|
||||||
|
BUCKET: neon-github-public-dev
|
||||||
|
SHA: ${{ github.event.pull_request.head.sha || github.sha }}
|
||||||
|
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_DEV }}
|
||||||
|
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_KEY_DEV }}
|
||||||
|
run: |
|
||||||
|
REPORT_URL=https://${BUCKET}.s3.amazonaws.com/build-stats/${SHA}/${GITHUB_RUN_ID}/cargo-timing.html
|
||||||
|
aws s3 cp --only-show-errors ./target/cargo-timings/cargo-timing.html "s3://${BUCKET}/build-stats/${SHA}/${GITHUB_RUN_ID}/"
|
||||||
|
echo "report-url=${REPORT_URL}" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
|
- name: Publish build stats report
|
||||||
|
uses: actions/github-script@v6
|
||||||
|
env:
|
||||||
|
REPORT_URL: ${{ steps.upload-stats.outputs.report-url }}
|
||||||
|
SHA: ${{ github.event.pull_request.head.sha || github.sha }}
|
||||||
with:
|
with:
|
||||||
name: neon-${{ runner.os }}-release-build-stats
|
script: |
|
||||||
path: ./target/cargo-timings/
|
const { REPORT_URL, SHA } = process.env
|
||||||
|
|
||||||
|
await github.rest.repos.createCommitStatus({
|
||||||
|
owner: context.repo.owner,
|
||||||
|
repo: context.repo.repo,
|
||||||
|
sha: `${SHA}`,
|
||||||
|
state: 'success',
|
||||||
|
target_url: `${REPORT_URL}`,
|
||||||
|
context: `Build stats (release)`,
|
||||||
|
})
|
||||||
|
|||||||
395
Cargo.lock
generated
395
Cargo.lock
generated
@@ -30,9 +30,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "ahash"
|
name = "ahash"
|
||||||
version = "0.8.2"
|
version = "0.8.3"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "bf6ccdb167abbf410dcb915cabd428929d7f6a04980b54a11f26a39f1c7f7107"
|
checksum = "2c99f64d1e06488f620f932677e24bc6e2897582980441ae90a671415bd7ec2f"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"cfg-if",
|
"cfg-if",
|
||||||
"once_cell",
|
"once_cell",
|
||||||
@@ -143,15 +143,24 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "async-trait"
|
name = "async-trait"
|
||||||
version = "0.1.61"
|
version = "0.1.64"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "705339e0e4a9690e2908d2b3d049d85682cf19fbd5782494498fbf7003a6a282"
|
checksum = "1cd7fce9ba8c3c042128ce72d8b2ddbf3a05747efb67ea0313c635e10bda47a2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
"quote",
|
"quote",
|
||||||
"syn",
|
"syn",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "atomic-polyfill"
|
||||||
|
version = "1.0.1"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "d299f547288d6db8d5c3a2916f7b2f66134b15b8c1ac1c4357dd3b8752af7bb2"
|
||||||
|
dependencies = [
|
||||||
|
"critical-section",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "atty"
|
name = "atty"
|
||||||
version = "0.2.14"
|
version = "0.2.14"
|
||||||
@@ -498,9 +507,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "axum"
|
name = "axum"
|
||||||
version = "0.6.2"
|
version = "0.6.4"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "1304eab461cf02bd70b083ed8273388f9724c549b316ba3d1e213ce0e9e7fb7e"
|
checksum = "e5694b64066a2459918d8074c2ce0d5a88f409431994c2356617c8ae0c4721fc"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-trait",
|
"async-trait",
|
||||||
"axum-core",
|
"axum-core",
|
||||||
@@ -527,9 +536,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "axum-core"
|
name = "axum-core"
|
||||||
version = "0.3.1"
|
version = "0.3.2"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "f487e40dc9daee24d8a1779df88522f159a54a980f99cfbe43db0be0bd3444a8"
|
checksum = "1cae3e661676ffbacb30f1a824089a8c9150e71017f7e1e38f2aa32009188d34"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-trait",
|
"async-trait",
|
||||||
"bytes",
|
"bytes",
|
||||||
@@ -623,9 +632,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "bstr"
|
name = "bstr"
|
||||||
version = "1.1.0"
|
version = "1.2.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "b45ea9b00a7b3f2988e9a65ad3917e62123c38dba709b666506207be96d1790b"
|
checksum = "b7f0778972c64420fdedc63f09919c8a88bda7b25135357fd25a5d9f3257e832"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"memchr",
|
"memchr",
|
||||||
"once_cell",
|
"once_cell",
|
||||||
@@ -647,9 +656,9 @@ checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "bytes"
|
name = "bytes"
|
||||||
version = "1.3.0"
|
version = "1.4.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "dfb24e866b15a1af2a1b663f10c6b6b8f397a84aadb828f12e5b289ec23a3a3c"
|
checksum = "89b2fd2a0dcf38d7971e2194b6b6eebab45ae01067456a7fd93d5547a61b70be"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"serde",
|
"serde",
|
||||||
]
|
]
|
||||||
@@ -672,9 +681,9 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "cc"
|
name = "cc"
|
||||||
version = "1.0.78"
|
version = "1.0.79"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "a20104e2335ce8a659d6dd92a51a767a0c062599c73b343fd152cb401e828c3d"
|
checksum = "50d30906286121d95be3d479533b458f87493b30a4b5f79a607db8f5d11aa91f"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "cexpr"
|
name = "cexpr"
|
||||||
@@ -756,9 +765,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "clap"
|
name = "clap"
|
||||||
version = "4.1.1"
|
version = "4.1.4"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "4ec7a4128863c188deefe750ac1d1dfe66c236909f845af04beed823638dc1b2"
|
checksum = "f13b9c79b5d1dd500d20ef541215a6423c75829ef43117e1b4d17fd8af0b5d76"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bitflags",
|
"bitflags",
|
||||||
"clap_derive",
|
"clap_derive",
|
||||||
@@ -838,7 +847,7 @@ version = "0.1.0"
|
|||||||
dependencies = [
|
dependencies = [
|
||||||
"anyhow",
|
"anyhow",
|
||||||
"chrono",
|
"chrono",
|
||||||
"clap 4.1.1",
|
"clap 4.1.4",
|
||||||
"futures",
|
"futures",
|
||||||
"hyper",
|
"hyper",
|
||||||
"notify",
|
"notify",
|
||||||
@@ -896,7 +905,7 @@ name = "control_plane"
|
|||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"anyhow",
|
"anyhow",
|
||||||
"clap 4.1.1",
|
"clap 4.1.4",
|
||||||
"comfy-table",
|
"comfy-table",
|
||||||
"git-version",
|
"git-version",
|
||||||
"nix",
|
"nix",
|
||||||
@@ -997,6 +1006,12 @@ dependencies = [
|
|||||||
"itertools",
|
"itertools",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "critical-section"
|
||||||
|
version = "1.1.1"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "6548a0ad5d2549e111e1f6a11a6c2e2d00ce6a3dafe22948d67c2b443f775e52"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "crossbeam-channel"
|
name = "crossbeam-channel"
|
||||||
version = "0.5.6"
|
version = "0.5.6"
|
||||||
@@ -1077,9 +1092,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "cxx"
|
name = "cxx"
|
||||||
version = "1.0.86"
|
version = "1.0.89"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "51d1075c37807dcf850c379432f0df05ba52cc30f279c5cfc43cc221ce7f8579"
|
checksum = "bc831ee6a32dd495436e317595e639a587aa9907bef96fe6e6abc290ab6204e9"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"cc",
|
"cc",
|
||||||
"cxxbridge-flags",
|
"cxxbridge-flags",
|
||||||
@@ -1089,9 +1104,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "cxx-build"
|
name = "cxx-build"
|
||||||
version = "1.0.86"
|
version = "1.0.89"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "5044281f61b27bc598f2f6647d480aed48d2bf52d6eb0b627d84c0361b17aa70"
|
checksum = "94331d54f1b1a8895cd81049f7eaaaef9d05a7dcb4d1fd08bf3ff0806246789d"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"cc",
|
"cc",
|
||||||
"codespan-reporting",
|
"codespan-reporting",
|
||||||
@@ -1104,15 +1119,15 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "cxxbridge-flags"
|
name = "cxxbridge-flags"
|
||||||
version = "1.0.86"
|
version = "1.0.89"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "61b50bc93ba22c27b0d31128d2d130a0a6b3d267ae27ef7e4fae2167dfe8781c"
|
checksum = "48dcd35ba14ca9b40d6e4b4b39961f23d835dbb8eed74565ded361d93e1feb8a"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "cxxbridge-macro"
|
name = "cxxbridge-macro"
|
||||||
version = "1.0.86"
|
version = "1.0.89"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "39e61fda7e62115119469c7b3591fd913ecca96fb766cfd3f2e2502ab7bc87a5"
|
checksum = "81bbeb29798b407ccd82a3324ade1a7286e0d29851475990b612670f6f5124d2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
"quote",
|
"quote",
|
||||||
@@ -1221,19 +1236,60 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "either"
|
name = "either"
|
||||||
version = "1.8.0"
|
version = "1.8.1"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "90e5c1c8368803113bf0c9584fc495a58b86dc8a29edbf8fe877d21d9507e797"
|
checksum = "7fcaabb2fef8c910e7f4c7ce9f67a1283a1715879a7c230ca9d6d1ae31f16d91"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "encoding_rs"
|
name = "encoding_rs"
|
||||||
version = "0.8.31"
|
version = "0.8.32"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "9852635589dc9f9ea1b6fe9f05b50ef208c85c834a562f0c6abb1c475736ec2b"
|
checksum = "071a31f4ee85403370b58aca746f01041ede6f0da2730960ad001edc2b71b394"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"cfg-if",
|
"cfg-if",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "enum-map"
|
||||||
|
version = "2.4.2"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "50c25992259941eb7e57b936157961b217a4fc8597829ddef0596d6c3cd86e1a"
|
||||||
|
dependencies = [
|
||||||
|
"enum-map-derive",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "enum-map-derive"
|
||||||
|
version = "0.11.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "2a4da76b3b6116d758c7ba93f7ec6a35d2e2cf24feda76c6e38a375f4d5c59f2"
|
||||||
|
dependencies = [
|
||||||
|
"proc-macro2",
|
||||||
|
"quote",
|
||||||
|
"syn",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "enumset"
|
||||||
|
version = "1.0.12"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "19be8061a06ab6f3a6cf21106c873578bf01bd42ad15e0311a9c76161cb1c753"
|
||||||
|
dependencies = [
|
||||||
|
"enumset_derive",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "enumset_derive"
|
||||||
|
version = "0.6.1"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "03e7b551eba279bf0fa88b83a46330168c1560a52a94f5126f892f0b364ab3e0"
|
||||||
|
dependencies = [
|
||||||
|
"darling",
|
||||||
|
"proc-macro2",
|
||||||
|
"quote",
|
||||||
|
"syn",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "env_logger"
|
name = "env_logger"
|
||||||
version = "0.10.0"
|
version = "0.10.0"
|
||||||
@@ -1303,7 +1359,7 @@ dependencies = [
|
|||||||
"cfg-if",
|
"cfg-if",
|
||||||
"libc",
|
"libc",
|
||||||
"redox_syscall",
|
"redox_syscall",
|
||||||
"windows-sys",
|
"windows-sys 0.42.0",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@@ -1348,9 +1404,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "futures"
|
name = "futures"
|
||||||
version = "0.3.25"
|
version = "0.3.26"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "38390104763dc37a5145a53c29c63c1290b5d316d6086ec32c293f6736051bb0"
|
checksum = "13e2792b0ff0340399d58445b88fd9770e3489eff258a4cbc1523418f12abf84"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"futures-channel",
|
"futures-channel",
|
||||||
"futures-core",
|
"futures-core",
|
||||||
@@ -1363,9 +1419,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "futures-channel"
|
name = "futures-channel"
|
||||||
version = "0.3.25"
|
version = "0.3.26"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "52ba265a92256105f45b719605a571ffe2d1f0fea3807304b522c1d778f79eed"
|
checksum = "2e5317663a9089767a1ec00a487df42e0ca174b61b4483213ac24448e4664df5"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"futures-core",
|
"futures-core",
|
||||||
"futures-sink",
|
"futures-sink",
|
||||||
@@ -1373,15 +1429,15 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "futures-core"
|
name = "futures-core"
|
||||||
version = "0.3.25"
|
version = "0.3.26"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "04909a7a7e4633ae6c4a9ab280aeb86da1236243a77b694a49eacd659a4bd3ac"
|
checksum = "ec90ff4d0fe1f57d600049061dc6bb68ed03c7d2fbd697274c41805dcb3f8608"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "futures-executor"
|
name = "futures-executor"
|
||||||
version = "0.3.25"
|
version = "0.3.26"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "7acc85df6714c176ab5edf386123fafe217be88c0840ec11f199441134a074e2"
|
checksum = "e8de0a35a6ab97ec8869e32a2473f4b1324459e14c29275d14b10cb1fd19b50e"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"futures-core",
|
"futures-core",
|
||||||
"futures-task",
|
"futures-task",
|
||||||
@@ -1390,15 +1446,15 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "futures-io"
|
name = "futures-io"
|
||||||
version = "0.3.25"
|
version = "0.3.26"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "00f5fb52a06bdcadeb54e8d3671f8888a39697dcb0b81b23b55174030427f4eb"
|
checksum = "bfb8371b6fb2aeb2d280374607aeabfc99d95c72edfe51692e42d3d7f0d08531"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "futures-macro"
|
name = "futures-macro"
|
||||||
version = "0.3.25"
|
version = "0.3.26"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "bdfb8ce053d86b91919aad980c220b1fb8401a9394410e1c289ed7e66b61835d"
|
checksum = "95a73af87da33b5acf53acfebdc339fe592ecf5357ac7c0a7734ab9d8c876a70"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
"quote",
|
"quote",
|
||||||
@@ -1407,15 +1463,15 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "futures-sink"
|
name = "futures-sink"
|
||||||
version = "0.3.25"
|
version = "0.3.26"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "39c15cf1a4aa79df40f1bb462fb39676d0ad9e366c2a33b590d7c66f4f81fcf9"
|
checksum = "f310820bb3e8cfd46c80db4d7fb8353e15dfff853a127158425f31e0be6c8364"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "futures-task"
|
name = "futures-task"
|
||||||
version = "0.3.25"
|
version = "0.3.26"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "2ffb393ac5d9a6eaa9d3fdf37ae2776656b706e200c8e16b1bdb227f5198e6ea"
|
checksum = "dcf79a1bf610b10f42aea489289c5a2c478a786509693b80cd39c44ccd936366"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "futures-timer"
|
name = "futures-timer"
|
||||||
@@ -1425,9 +1481,9 @@ checksum = "e64b03909df88034c26dc1547e8970b91f98bdb65165d6a4e9110d94263dbb2c"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "futures-util"
|
name = "futures-util"
|
||||||
version = "0.3.25"
|
version = "0.3.26"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "197676987abd2f9cadff84926f410af1c183608d36641465df73ae8211dc65d6"
|
checksum = "9c1d6de3acfef38d2be4b1f543f553131788603495be83da675e180c8d6b7bd1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"futures-channel",
|
"futures-channel",
|
||||||
"futures-core",
|
"futures-core",
|
||||||
@@ -1464,9 +1520,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "gimli"
|
name = "gimli"
|
||||||
version = "0.27.0"
|
version = "0.27.1"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "dec7af912d60cdbd3677c1af9352ebae6fb8394d165568a2234df0fa00f87793"
|
checksum = "221996f774192f0f718773def8201c4ae31f02616a54ccfc2d358bb0e5cefdec"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "git-version"
|
name = "git-version"
|
||||||
@@ -1521,6 +1577,15 @@ version = "1.8.2"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "eabb4a44450da02c90444cf74558da904edde8fb4e9035a9a6a4e15445af0bd7"
|
checksum = "eabb4a44450da02c90444cf74558da904edde8fb4e9035a9a6a4e15445af0bd7"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "hash32"
|
||||||
|
version = "0.3.1"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "47d60b12902ba28e2730cd37e95b8c9223af2808df9e902d4df49588d1470606"
|
||||||
|
dependencies = [
|
||||||
|
"byteorder",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "hashbrown"
|
name = "hashbrown"
|
||||||
version = "0.12.3"
|
version = "0.12.3"
|
||||||
@@ -1536,7 +1601,7 @@ version = "0.13.2"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e"
|
checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"ahash 0.8.2",
|
"ahash 0.8.3",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@@ -1548,11 +1613,23 @@ dependencies = [
|
|||||||
"hashbrown 0.12.3",
|
"hashbrown 0.12.3",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "heapless"
|
||||||
|
version = "0.8.0"
|
||||||
|
source = "git+https://github.com/japaric/heapless.git?rev=644653bf3b831c6bb4963be2de24804acf5e5001#644653bf3b831c6bb4963be2de24804acf5e5001"
|
||||||
|
dependencies = [
|
||||||
|
"atomic-polyfill",
|
||||||
|
"hash32",
|
||||||
|
"rustc_version",
|
||||||
|
"spin 0.9.4",
|
||||||
|
"stable_deref_trait",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "heck"
|
name = "heck"
|
||||||
version = "0.4.0"
|
version = "0.4.1"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "2540771e65fc8cb83cd6e8a237f70c319bd5c29f78ed1084ba5d50eeac86f7f9"
|
checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "hermit-abi"
|
name = "hermit-abi"
|
||||||
@@ -1814,7 +1891,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
|||||||
checksum = "e7d6c6f8c91b4b9ed43484ad1a938e393caf35960fce7f82a040497207bd8e9e"
|
checksum = "e7d6c6f8c91b4b9ed43484ad1a938e393caf35960fce7f82a040497207bd8e9e"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"libc",
|
"libc",
|
||||||
"windows-sys",
|
"windows-sys 0.42.0",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@@ -1832,7 +1909,7 @@ dependencies = [
|
|||||||
"hermit-abi 0.2.6",
|
"hermit-abi 0.2.6",
|
||||||
"io-lifetimes",
|
"io-lifetimes",
|
||||||
"rustix",
|
"rustix",
|
||||||
"windows-sys",
|
"windows-sys 0.42.0",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@@ -1852,9 +1929,9 @@ checksum = "fad582f4b9e86b6caa621cabeb0963332d92eea04729ab12892c2533951e6440"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "js-sys"
|
name = "js-sys"
|
||||||
version = "0.3.60"
|
version = "0.3.61"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "49409df3e3bf0856b916e2ceaca09ee28e6871cf7d9ce97a692cacfdb2a25a47"
|
checksum = "445dde2150c55e483f3d8416706b97ec8e8237c307e5b7b4b8dd15e6af2a0730"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"wasm-bindgen",
|
"wasm-bindgen",
|
||||||
]
|
]
|
||||||
@@ -2019,6 +2096,7 @@ dependencies = [
|
|||||||
name = "metrics"
|
name = "metrics"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
|
"chrono",
|
||||||
"libc",
|
"libc",
|
||||||
"once_cell",
|
"once_cell",
|
||||||
"prometheus",
|
"prometheus",
|
||||||
@@ -2039,9 +2117,9 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "miniz_oxide"
|
name = "miniz_oxide"
|
||||||
version = "0.6.2"
|
version = "0.6.4"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "b275950c28b37e794e8c55d88aeb5e139d0ce23fdbbeda68f8d7174abdf9e8fa"
|
checksum = "f2e212582ede878b109755efd0773a4f0f4ec851584cf0aefbeb4d9ecc114822"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"adler",
|
"adler",
|
||||||
]
|
]
|
||||||
@@ -2055,7 +2133,7 @@ dependencies = [
|
|||||||
"libc",
|
"libc",
|
||||||
"log",
|
"log",
|
||||||
"wasi",
|
"wasi",
|
||||||
"windows-sys",
|
"windows-sys 0.42.0",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@@ -2099,9 +2177,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "notify"
|
name = "notify"
|
||||||
version = "5.0.0"
|
version = "5.1.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "ed2c66da08abae1c024c01d635253e402341b4060a12e99b31c7594063bf490a"
|
checksum = "58ea850aa68a06e48fdb069c0ec44d0d64c8dbffa49bf3b6f7f0a901fdea1ba9"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bitflags",
|
"bitflags",
|
||||||
"crossbeam-channel",
|
"crossbeam-channel",
|
||||||
@@ -2112,7 +2190,7 @@ dependencies = [
|
|||||||
"libc",
|
"libc",
|
||||||
"mio",
|
"mio",
|
||||||
"walkdir",
|
"walkdir",
|
||||||
"winapi",
|
"windows-sys 0.42.0",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@@ -2167,9 +2245,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "object"
|
name = "object"
|
||||||
version = "0.30.2"
|
version = "0.30.3"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "2b8c786513eb403643f2a88c244c2aaa270ef2153f55094587d0c48a3cf22a83"
|
checksum = "ea86265d3d3dcb6a27fc51bd29a4bf387fae9d2986b823079d4986af253eb439"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"memchr",
|
"memchr",
|
||||||
]
|
]
|
||||||
@@ -2305,9 +2383,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "os_info"
|
name = "os_info"
|
||||||
version = "3.5.1"
|
version = "3.6.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "c4750134fb6a5d49afc80777394ad5d95b04bc12068c6abb92fae8f43817270f"
|
checksum = "5c424bc68d15e0778838ac013b5b3449544d8133633d8016319e7e05a820b8c0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"log",
|
"log",
|
||||||
"serde",
|
"serde",
|
||||||
@@ -2336,13 +2414,15 @@ dependencies = [
|
|||||||
"byteorder",
|
"byteorder",
|
||||||
"bytes",
|
"bytes",
|
||||||
"chrono",
|
"chrono",
|
||||||
"clap 4.1.1",
|
"clap 4.1.4",
|
||||||
"close_fds",
|
"close_fds",
|
||||||
"const_format",
|
"const_format",
|
||||||
"consumption_metrics",
|
"consumption_metrics",
|
||||||
"crc32c",
|
"crc32c",
|
||||||
"criterion",
|
"criterion",
|
||||||
"crossbeam-utils",
|
"crossbeam-utils",
|
||||||
|
"enum-map",
|
||||||
|
"enumset",
|
||||||
"fail",
|
"fail",
|
||||||
"futures",
|
"futures",
|
||||||
"git-version",
|
"git-version",
|
||||||
@@ -2375,6 +2455,8 @@ dependencies = [
|
|||||||
"serde_with",
|
"serde_with",
|
||||||
"signal-hook",
|
"signal-hook",
|
||||||
"storage_broker",
|
"storage_broker",
|
||||||
|
"strum",
|
||||||
|
"strum_macros",
|
||||||
"svg_fmt",
|
"svg_fmt",
|
||||||
"tempfile",
|
"tempfile",
|
||||||
"tenant_size_model",
|
"tenant_size_model",
|
||||||
@@ -2399,6 +2481,7 @@ dependencies = [
|
|||||||
"byteorder",
|
"byteorder",
|
||||||
"bytes",
|
"bytes",
|
||||||
"const_format",
|
"const_format",
|
||||||
|
"enum-map",
|
||||||
"postgres_ffi",
|
"postgres_ffi",
|
||||||
"serde",
|
"serde",
|
||||||
"serde_with",
|
"serde_with",
|
||||||
@@ -2418,15 +2501,15 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "parking_lot_core"
|
name = "parking_lot_core"
|
||||||
version = "0.9.6"
|
version = "0.9.7"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "ba1ef8814b5c993410bb3adfad7a5ed269563e4a2f90c41f5d85be7fb47133bf"
|
checksum = "9069cbb9f99e3a5083476ccb29ceb1de18b9118cafa53e90c9551235de2b9521"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"cfg-if",
|
"cfg-if",
|
||||||
"libc",
|
"libc",
|
||||||
"redox_syscall",
|
"redox_syscall",
|
||||||
"smallvec",
|
"smallvec",
|
||||||
"windows-sys",
|
"windows-sys 0.45.0",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@@ -2782,7 +2865,7 @@ dependencies = [
|
|||||||
"bstr",
|
"bstr",
|
||||||
"bytes",
|
"bytes",
|
||||||
"chrono",
|
"chrono",
|
||||||
"clap 4.1.1",
|
"clap 4.1.4",
|
||||||
"consumption_metrics",
|
"consumption_metrics",
|
||||||
"futures",
|
"futures",
|
||||||
"git-version",
|
"git-version",
|
||||||
@@ -2882,9 +2965,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "rayon-core"
|
name = "rayon-core"
|
||||||
version = "1.10.1"
|
version = "1.10.2"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "cac410af5d00ab6884528b4ab69d1e8e146e8d471201800fa1b4524126de6ad3"
|
checksum = "356a0625f1954f730c0201cdab48611198dc6ce21f4acff55089b5a78e6e835b"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"crossbeam-channel",
|
"crossbeam-channel",
|
||||||
"crossbeam-deque",
|
"crossbeam-deque",
|
||||||
@@ -2974,11 +3057,11 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "reqwest"
|
name = "reqwest"
|
||||||
version = "0.11.13"
|
version = "0.11.14"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "68cc60575865c7831548863cc02356512e3f1dc2f3f82cb837d7fc4cc8f3c97c"
|
checksum = "21eed90ec8570952d53b772ecf8f206aa1ec9a3d76b2521c56c42973f2d91ee9"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"base64 0.13.1",
|
"base64 0.21.0",
|
||||||
"bytes",
|
"bytes",
|
||||||
"encoding_rs",
|
"encoding_rs",
|
||||||
"futures-core",
|
"futures-core",
|
||||||
@@ -3020,7 +3103,7 @@ dependencies = [
|
|||||||
"cc",
|
"cc",
|
||||||
"libc",
|
"libc",
|
||||||
"once_cell",
|
"once_cell",
|
||||||
"spin",
|
"spin 0.5.2",
|
||||||
"untrusted",
|
"untrusted",
|
||||||
"web-sys",
|
"web-sys",
|
||||||
"winapi",
|
"winapi",
|
||||||
@@ -3115,7 +3198,7 @@ dependencies = [
|
|||||||
"io-lifetimes",
|
"io-lifetimes",
|
||||||
"libc",
|
"libc",
|
||||||
"linux-raw-sys",
|
"linux-raw-sys",
|
||||||
"windows-sys",
|
"windows-sys 0.42.0",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@@ -3181,7 +3264,7 @@ dependencies = [
|
|||||||
"async-trait",
|
"async-trait",
|
||||||
"byteorder",
|
"byteorder",
|
||||||
"bytes",
|
"bytes",
|
||||||
"clap 4.1.1",
|
"clap 4.1.4",
|
||||||
"const_format",
|
"const_format",
|
||||||
"crc32c",
|
"crc32c",
|
||||||
"fs2",
|
"fs2",
|
||||||
@@ -3242,7 +3325,7 @@ version = "0.1.21"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "713cfb06c7059f3588fb8044c0fad1d09e3c01d225e25b9220dbfdcf16dbb1b3"
|
checksum = "713cfb06c7059f3588fb8044c0fad1d09e3c01d225e25b9220dbfdcf16dbb1b3"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"windows-sys",
|
"windows-sys 0.42.0",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@@ -3269,9 +3352,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "security-framework"
|
name = "security-framework"
|
||||||
version = "2.7.0"
|
version = "2.8.2"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "2bc1bb97804af6631813c55739f771071e0f2ed33ee20b68c86ec505d906356c"
|
checksum = "a332be01508d814fed64bf28f798a146d73792121129962fdf335bb3c49a4254"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bitflags",
|
"bitflags",
|
||||||
"core-foundation",
|
"core-foundation",
|
||||||
@@ -3282,9 +3365,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "security-framework-sys"
|
name = "security-framework-sys"
|
||||||
version = "2.6.1"
|
version = "2.8.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "0160a13a177a45bfb43ce71c01580998474f556ad854dcbca936dd2841a5c556"
|
checksum = "31c9bb296072e961fcbd8853511dd39c2d8be2deb1e17c6860b1d30732b323b4"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"core-foundation-sys",
|
"core-foundation-sys",
|
||||||
"libc",
|
"libc",
|
||||||
@@ -3298,9 +3381,9 @@ checksum = "58bc9567378fc7690d6b2addae4e60ac2eeea07becb2c64b9f218b53865cba2a"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "sentry"
|
name = "sentry"
|
||||||
version = "0.29.1"
|
version = "0.29.2"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "17ad137b9df78294b98cab1a650bef237cc6c950e82e5ce164655e674d07c5cc"
|
checksum = "a6097dc270a9c4555c5d6222ed243eaa97ff38e29299ed7c5cb36099033c604e"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"httpdate",
|
"httpdate",
|
||||||
"reqwest",
|
"reqwest",
|
||||||
@@ -3316,9 +3399,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "sentry-backtrace"
|
name = "sentry-backtrace"
|
||||||
version = "0.29.1"
|
version = "0.29.2"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "afe4800806552aab314129761d5d3b3d422284eca3de2ab59e9fd133636cbd3d"
|
checksum = "9d92d1e4d591534ae4f872d6142f3b500f4ffc179a6aed8a3e86c7cc96d10a6a"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"backtrace",
|
"backtrace",
|
||||||
"once_cell",
|
"once_cell",
|
||||||
@@ -3328,9 +3411,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "sentry-contexts"
|
name = "sentry-contexts"
|
||||||
version = "0.29.1"
|
version = "0.29.2"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "a42938426670f6e7974989cd1417837a96dd8bbb01567094f567d6acb360bf88"
|
checksum = "3afa877b1898ff67dd9878cf4bec4e53cef7d3be9f14b1fc9e4fcdf36f8e4259"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"hostname",
|
"hostname",
|
||||||
"libc",
|
"libc",
|
||||||
@@ -3342,9 +3425,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "sentry-core"
|
name = "sentry-core"
|
||||||
version = "0.29.1"
|
version = "0.29.2"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "4df9b9d8de2658a1ecd4e45f7b06c80c5dd97b891bfbc7c501186189b7e9bbdf"
|
checksum = "fc43eb7e4e3a444151a0fe8a0e9ce60eabd905dae33d66e257fa26f1b509c1bd"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"once_cell",
|
"once_cell",
|
||||||
"rand",
|
"rand",
|
||||||
@@ -3355,9 +3438,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "sentry-panic"
|
name = "sentry-panic"
|
||||||
version = "0.29.1"
|
version = "0.29.2"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "0af37b8500f273e511ebd6eb0d342ff7937d64ce3f134764b2b4653112d48cb4"
|
checksum = "ccab4fab11e3e63c45f4524bee2e75cde39cdf164cb0b0cbe6ccd1948ceddf66"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"sentry-backtrace",
|
"sentry-backtrace",
|
||||||
"sentry-core",
|
"sentry-core",
|
||||||
@@ -3365,9 +3448,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "sentry-types"
|
name = "sentry-types"
|
||||||
version = "0.29.1"
|
version = "0.29.2"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "ccc95faa4078768a6bf8df45e2b894bbf372b3dbbfb364e9429c1c58ab7545c6"
|
checksum = "f63708ec450b6bdcb657af760c447416d69c38ce421f34e5e2e9ce8118410bc7"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"debugid",
|
"debugid",
|
||||||
"getrandom",
|
"getrandom",
|
||||||
@@ -3567,6 +3650,21 @@ version = "0.5.2"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d"
|
checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "spin"
|
||||||
|
version = "0.9.4"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "7f6002a767bff9e83f8eeecf883ecb8011875a21ae8da43bffb817a57e78cc09"
|
||||||
|
dependencies = [
|
||||||
|
"lock_api",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "stable_deref_trait"
|
||||||
|
version = "1.2.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "static_assertions"
|
name = "static_assertions"
|
||||||
version = "1.1.0"
|
version = "1.1.0"
|
||||||
@@ -3580,7 +3678,7 @@ dependencies = [
|
|||||||
"anyhow",
|
"anyhow",
|
||||||
"async-stream",
|
"async-stream",
|
||||||
"bytes",
|
"bytes",
|
||||||
"clap 4.1.1",
|
"clap 4.1.4",
|
||||||
"const_format",
|
"const_format",
|
||||||
"futures",
|
"futures",
|
||||||
"futures-core",
|
"futures-core",
|
||||||
@@ -3661,9 +3759,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "sync_wrapper"
|
name = "sync_wrapper"
|
||||||
version = "0.1.1"
|
version = "0.1.2"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "20518fe4a4c9acf048008599e464deb21beeae3d3578418951a189c235a7a9a8"
|
checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "synstructure"
|
name = "synstructure"
|
||||||
@@ -3822,9 +3920,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "tokio"
|
name = "tokio"
|
||||||
version = "1.24.2"
|
version = "1.25.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "597a12a59981d9e3c38d216785b0c37399f6e415e8d0712047620f189371b0bb"
|
checksum = "c8e00990ebabbe4c14c08aca901caed183ecd5c09562a12c824bb53d3c3fd3af"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"autocfg",
|
"autocfg",
|
||||||
"bytes",
|
"bytes",
|
||||||
@@ -3836,7 +3934,7 @@ dependencies = [
|
|||||||
"signal-hook-registry",
|
"signal-hook-registry",
|
||||||
"socket2",
|
"socket2",
|
||||||
"tokio-macros",
|
"tokio-macros",
|
||||||
"windows-sys",
|
"windows-sys 0.42.0",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@@ -3961,18 +4059,18 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "toml"
|
name = "toml"
|
||||||
version = "0.5.10"
|
version = "0.5.11"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "1333c76748e868a4d9d1017b5ab53171dfd095f70c712fdb4653a406547f598f"
|
checksum = "f4f7f0dd8d50a853a531c426359045b1998f04219d88799810762cd4ad314234"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"serde",
|
"serde",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "toml_datetime"
|
name = "toml_datetime"
|
||||||
version = "0.5.0"
|
version = "0.5.1"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "808b51e57d0ef8f71115d8f3a01e7d3750d01c79cac4b3eda910f4389fdf92fd"
|
checksum = "4553f467ac8e3d374bc9a177a26801e5d0f9b211aa1673fb137a403afd1c9cf5"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"serde",
|
"serde",
|
||||||
]
|
]
|
||||||
@@ -4089,6 +4187,17 @@ version = "0.3.2"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52"
|
checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "trace"
|
||||||
|
version = "0.1.0"
|
||||||
|
dependencies = [
|
||||||
|
"anyhow",
|
||||||
|
"clap 4.1.4",
|
||||||
|
"pageserver_api",
|
||||||
|
"utils",
|
||||||
|
"workspace_hack",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "tracing"
|
name = "tracing"
|
||||||
version = "0.1.37"
|
version = "0.1.37"
|
||||||
@@ -4247,9 +4356,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "unicode-bidi"
|
name = "unicode-bidi"
|
||||||
version = "0.3.8"
|
version = "0.3.10"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "099b7128301d285f79ddd55b9a83d5e6b9e97c92e0ea0daebee7263e932de992"
|
checksum = "d54675592c1dbefd78cbd98db9bacd89886e1ca50692a0692baefffdeb92dd58"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "unicode-ident"
|
name = "unicode-ident"
|
||||||
@@ -4335,6 +4444,7 @@ dependencies = [
|
|||||||
"bytes",
|
"bytes",
|
||||||
"criterion",
|
"criterion",
|
||||||
"git-version",
|
"git-version",
|
||||||
|
"heapless",
|
||||||
"hex",
|
"hex",
|
||||||
"hex-literal",
|
"hex-literal",
|
||||||
"hyper",
|
"hyper",
|
||||||
@@ -4367,9 +4477,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "uuid"
|
name = "uuid"
|
||||||
version = "1.2.2"
|
version = "1.3.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "422ee0de9031b5b948b97a8fc04e3aa35230001a722ddd27943e0be31564ce4c"
|
checksum = "1674845326ee10d37ca60470760d4288a6f80f304007d92e5c53bab78c9cfd79"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"getrandom",
|
"getrandom",
|
||||||
"serde",
|
"serde",
|
||||||
@@ -4392,7 +4502,7 @@ name = "wal_craft"
|
|||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"anyhow",
|
"anyhow",
|
||||||
"clap 4.1.1",
|
"clap 4.1.4",
|
||||||
"env_logger",
|
"env_logger",
|
||||||
"log",
|
"log",
|
||||||
"once_cell",
|
"once_cell",
|
||||||
@@ -4431,9 +4541,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "wasm-bindgen"
|
name = "wasm-bindgen"
|
||||||
version = "0.2.83"
|
version = "0.2.84"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "eaf9f5aceeec8be17c128b2e93e031fb8a4d469bb9c4ae2d7dc1888b26887268"
|
checksum = "31f8dcbc21f30d9b8f2ea926ecb58f6b91192c17e9d33594b3df58b2007ca53b"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"cfg-if",
|
"cfg-if",
|
||||||
"wasm-bindgen-macro",
|
"wasm-bindgen-macro",
|
||||||
@@ -4441,9 +4551,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "wasm-bindgen-backend"
|
name = "wasm-bindgen-backend"
|
||||||
version = "0.2.83"
|
version = "0.2.84"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "4c8ffb332579b0557b52d268b91feab8df3615f265d5270fec2a8c95b17c1142"
|
checksum = "95ce90fd5bcc06af55a641a86428ee4229e44e07033963a2290a8e241607ccb9"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bumpalo",
|
"bumpalo",
|
||||||
"log",
|
"log",
|
||||||
@@ -4456,9 +4566,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "wasm-bindgen-futures"
|
name = "wasm-bindgen-futures"
|
||||||
version = "0.4.33"
|
version = "0.4.34"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "23639446165ca5a5de86ae1d8896b737ae80319560fbaa4c2887b7da6e7ebd7d"
|
checksum = "f219e0d211ba40266969f6dbdd90636da12f75bee4fc9d6c23d1260dadb51454"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"cfg-if",
|
"cfg-if",
|
||||||
"js-sys",
|
"js-sys",
|
||||||
@@ -4468,9 +4578,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "wasm-bindgen-macro"
|
name = "wasm-bindgen-macro"
|
||||||
version = "0.2.83"
|
version = "0.2.84"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "052be0f94026e6cbc75cdefc9bae13fd6052cdcaf532fa6c45e7ae33a1e6c810"
|
checksum = "4c21f77c0bedc37fd5dc21f897894a5ca01e7bb159884559461862ae90c0b4c5"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"quote",
|
"quote",
|
||||||
"wasm-bindgen-macro-support",
|
"wasm-bindgen-macro-support",
|
||||||
@@ -4478,9 +4588,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "wasm-bindgen-macro-support"
|
name = "wasm-bindgen-macro-support"
|
||||||
version = "0.2.83"
|
version = "0.2.84"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "07bc0c051dc5f23e307b13285f9d75df86bfdf816c5721e573dec1f9b8aa193c"
|
checksum = "2aff81306fcac3c7515ad4e177f521b5c9a15f2b08f4e32d823066102f35a5f6"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
"quote",
|
"quote",
|
||||||
@@ -4491,15 +4601,15 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "wasm-bindgen-shared"
|
name = "wasm-bindgen-shared"
|
||||||
version = "0.2.83"
|
version = "0.2.84"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "1c38c045535d93ec4f0b4defec448e4291638ee608530863b1e2ba115d4fff7f"
|
checksum = "0046fef7e28c3804e5e38bfa31ea2a0f73905319b677e57ebe37e49358989b5d"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "web-sys"
|
name = "web-sys"
|
||||||
version = "0.3.60"
|
version = "0.3.61"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "bcda906d8be16e728fd5adc5b729afad4e444e106ab28cd1c7256e54fa61510f"
|
checksum = "e33b99f4b23ba3eec1a53ac264e35a755f00e966e0065077d6027c0f575b0b97"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"js-sys",
|
"js-sys",
|
||||||
"wasm-bindgen",
|
"wasm-bindgen",
|
||||||
@@ -4526,9 +4636,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "which"
|
name = "which"
|
||||||
version = "4.3.0"
|
version = "4.4.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "1c831fbbee9e129a8cf93e7747a82da9d95ba8e16621cae60ec2cdc849bacb7b"
|
checksum = "2441c784c52b289a054b7201fc93253e288f094e2f4be9058343127c4226a269"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"either",
|
"either",
|
||||||
"libc",
|
"libc",
|
||||||
@@ -4581,6 +4691,30 @@ dependencies = [
|
|||||||
"windows_x86_64_msvc",
|
"windows_x86_64_msvc",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "windows-sys"
|
||||||
|
version = "0.45.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0"
|
||||||
|
dependencies = [
|
||||||
|
"windows-targets",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "windows-targets"
|
||||||
|
version = "0.42.1"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "8e2522491fbfcd58cc84d47aeb2958948c4b8982e9a2d8a2a35bbaed431390e7"
|
||||||
|
dependencies = [
|
||||||
|
"windows_aarch64_gnullvm",
|
||||||
|
"windows_aarch64_msvc",
|
||||||
|
"windows_i686_gnu",
|
||||||
|
"windows_i686_msvc",
|
||||||
|
"windows_x86_64_gnu",
|
||||||
|
"windows_x86_64_gnullvm",
|
||||||
|
"windows_x86_64_msvc",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "windows_aarch64_gnullvm"
|
name = "windows_aarch64_gnullvm"
|
||||||
version = "0.42.1"
|
version = "0.42.1"
|
||||||
@@ -4639,14 +4773,13 @@ dependencies = [
|
|||||||
"anyhow",
|
"anyhow",
|
||||||
"bytes",
|
"bytes",
|
||||||
"chrono",
|
"chrono",
|
||||||
"clap 4.1.1",
|
"clap 4.1.4",
|
||||||
"crossbeam-utils",
|
"crossbeam-utils",
|
||||||
"either",
|
"either",
|
||||||
"fail",
|
"fail",
|
||||||
"futures",
|
"futures",
|
||||||
"futures-channel",
|
"futures-channel",
|
||||||
"futures-executor",
|
"futures-executor",
|
||||||
"futures-task",
|
|
||||||
"futures-util",
|
"futures-util",
|
||||||
"hashbrown 0.12.3",
|
"hashbrown 0.12.3",
|
||||||
"indexmap",
|
"indexmap",
|
||||||
|
|||||||
@@ -7,6 +7,7 @@ members = [
|
|||||||
"safekeeper",
|
"safekeeper",
|
||||||
"storage_broker",
|
"storage_broker",
|
||||||
"workspace_hack",
|
"workspace_hack",
|
||||||
|
"trace",
|
||||||
"libs/*",
|
"libs/*",
|
||||||
]
|
]
|
||||||
|
|
||||||
@@ -31,12 +32,14 @@ bstr = "1.0"
|
|||||||
byteorder = "1.4"
|
byteorder = "1.4"
|
||||||
bytes = "1.0"
|
bytes = "1.0"
|
||||||
chrono = { version = "0.4", default-features = false, features = ["clock"] }
|
chrono = { version = "0.4", default-features = false, features = ["clock"] }
|
||||||
clap = "4.0"
|
clap = { version = "4.0", features = ["derive"] }
|
||||||
close_fds = "0.3.2"
|
close_fds = "0.3.2"
|
||||||
comfy-table = "6.1"
|
comfy-table = "6.1"
|
||||||
const_format = "0.2"
|
const_format = "0.2"
|
||||||
crc32c = "0.6"
|
crc32c = "0.6"
|
||||||
crossbeam-utils = "0.8.5"
|
crossbeam-utils = "0.8.5"
|
||||||
|
enum-map = "2.4.2"
|
||||||
|
enumset = "1.0.12"
|
||||||
fail = "0.5.0"
|
fail = "0.5.0"
|
||||||
fs2 = "0.4.3"
|
fs2 = "0.4.3"
|
||||||
futures = "0.3"
|
futures = "0.3"
|
||||||
@@ -119,6 +122,9 @@ postgres-types = { git = "https://github.com/neondatabase/rust-postgres.git", re
|
|||||||
tokio-postgres = { git = "https://github.com/neondatabase/rust-postgres.git", rev="43e6db254a97fdecbce33d8bc0890accfd74495e" }
|
tokio-postgres = { git = "https://github.com/neondatabase/rust-postgres.git", rev="43e6db254a97fdecbce33d8bc0890accfd74495e" }
|
||||||
tokio-tar = { git = "https://github.com/neondatabase/tokio-tar.git", rev="404df61437de0feef49ba2ccdbdd94eb8ad6e142" }
|
tokio-tar = { git = "https://github.com/neondatabase/tokio-tar.git", rev="404df61437de0feef49ba2ccdbdd94eb8ad6e142" }
|
||||||
|
|
||||||
|
## Other git libraries
|
||||||
|
heapless = { default-features=false, features=[], git = "https://github.com/japaric/heapless.git", rev = "644653bf3b831c6bb4963be2de24804acf5e5001" } # upstream release pending
|
||||||
|
|
||||||
## Local libraries
|
## Local libraries
|
||||||
consumption_metrics = { version = "0.1", path = "./libs/consumption_metrics/" }
|
consumption_metrics = { version = "0.1", path = "./libs/consumption_metrics/" }
|
||||||
metrics = { version = "0.1", path = "./libs/metrics/" }
|
metrics = { version = "0.1", path = "./libs/metrics/" }
|
||||||
|
|||||||
@@ -10,7 +10,8 @@ ARG TAG=pinned
|
|||||||
FROM debian:bullseye-slim AS build-deps
|
FROM debian:bullseye-slim AS build-deps
|
||||||
RUN apt update && \
|
RUN apt update && \
|
||||||
apt install -y git autoconf automake libtool build-essential bison flex libreadline-dev \
|
apt install -y git autoconf automake libtool build-essential bison flex libreadline-dev \
|
||||||
zlib1g-dev libxml2-dev libcurl4-openssl-dev libossp-uuid-dev wget pkg-config libssl-dev
|
zlib1g-dev libxml2-dev libcurl4-openssl-dev libossp-uuid-dev wget pkg-config libssl-dev \
|
||||||
|
libicu-dev
|
||||||
|
|
||||||
#########################################################################################
|
#########################################################################################
|
||||||
#
|
#
|
||||||
@@ -22,7 +23,7 @@ FROM build-deps AS pg-build
|
|||||||
ARG PG_VERSION
|
ARG PG_VERSION
|
||||||
COPY vendor/postgres-${PG_VERSION} postgres
|
COPY vendor/postgres-${PG_VERSION} postgres
|
||||||
RUN cd postgres && \
|
RUN cd postgres && \
|
||||||
./configure CFLAGS='-O2 -g3' --enable-debug --with-openssl --with-uuid=ossp && \
|
./configure CFLAGS='-O2 -g3' --enable-debug --with-openssl --with-uuid=ossp --with-icu && \
|
||||||
make MAKELEVEL=0 -j $(getconf _NPROCESSORS_ONLN) -s install && \
|
make MAKELEVEL=0 -j $(getconf _NPROCESSORS_ONLN) -s install && \
|
||||||
make MAKELEVEL=0 -j $(getconf _NPROCESSORS_ONLN) -s -C contrib/ install && \
|
make MAKELEVEL=0 -j $(getconf _NPROCESSORS_ONLN) -s -C contrib/ install && \
|
||||||
# Install headers
|
# Install headers
|
||||||
@@ -234,10 +235,13 @@ COPY --from=compute-tools --chown=postgres /home/nonroot/target/release-line-deb
|
|||||||
|
|
||||||
# Install:
|
# Install:
|
||||||
# libreadline8 for psql
|
# libreadline8 for psql
|
||||||
|
# libicu67, locales for collations (including ICU)
|
||||||
# libossp-uuid16 for extension ossp-uuid
|
# libossp-uuid16 for extension ossp-uuid
|
||||||
# libgeos, libgdal, libsfcgal1, libproj and libprotobuf-c1 for PostGIS
|
# libgeos, libgdal, libsfcgal1, libproj and libprotobuf-c1 for PostGIS
|
||||||
RUN apt update && \
|
RUN apt update && \
|
||||||
apt install --no-install-recommends -y \
|
apt install --no-install-recommends -y \
|
||||||
|
locales \
|
||||||
|
libicu67 \
|
||||||
libreadline8 \
|
libreadline8 \
|
||||||
libossp-uuid16 \
|
libossp-uuid16 \
|
||||||
libgeos-c1v5 \
|
libgeos-c1v5 \
|
||||||
@@ -246,7 +250,9 @@ RUN apt update && \
|
|||||||
libprotobuf-c1 \
|
libprotobuf-c1 \
|
||||||
libsfcgal1 \
|
libsfcgal1 \
|
||||||
gdb && \
|
gdb && \
|
||||||
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
|
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* && \
|
||||||
|
localedef -i en_US -c -f UTF-8 -A /usr/share/locale/locale.alias en_US.UTF-8
|
||||||
|
|
||||||
|
ENV LANG en_US.utf8
|
||||||
USER postgres
|
USER postgres
|
||||||
ENTRYPOINT ["/usr/local/bin/compute_ctl"]
|
ENTRYPOINT ["/usr/local/bin/compute_ctl"]
|
||||||
|
|||||||
@@ -8,5 +8,6 @@ license.workspace = true
|
|||||||
prometheus.workspace = true
|
prometheus.workspace = true
|
||||||
libc.workspace = true
|
libc.workspace = true
|
||||||
once_cell.workspace = true
|
once_cell.workspace = true
|
||||||
|
chrono.workspace = true
|
||||||
|
|
||||||
workspace_hack.workspace = true
|
workspace_hack.workspace = true
|
||||||
|
|||||||
34
libs/metrics/src/launch_timestamp.rs
Normal file
34
libs/metrics/src/launch_timestamp.rs
Normal file
@@ -0,0 +1,34 @@
|
|||||||
|
//! A timestamp captured at process startup to identify restarts of the process, e.g., in logs and metrics.
|
||||||
|
|
||||||
|
use chrono::Utc;
|
||||||
|
|
||||||
|
use super::register_uint_gauge;
|
||||||
|
use std::fmt::Display;
|
||||||
|
|
||||||
|
pub struct LaunchTimestamp(chrono::DateTime<Utc>);
|
||||||
|
|
||||||
|
impl LaunchTimestamp {
|
||||||
|
pub fn generate() -> Self {
|
||||||
|
LaunchTimestamp(Utc::now())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Display for LaunchTimestamp {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
write!(f, "{}", self.0)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn set_launch_timestamp_metric(launch_ts: &'static LaunchTimestamp) {
|
||||||
|
let millis_since_epoch: u64 = launch_ts
|
||||||
|
.0
|
||||||
|
.timestamp_millis()
|
||||||
|
.try_into()
|
||||||
|
.expect("we're after the epoch, this should be positive");
|
||||||
|
let metric = register_uint_gauge!(
|
||||||
|
"libmetrics_launch_timestamp",
|
||||||
|
"Timestamp (millis since epoch) at wich the process launched."
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
metric.set(millis_since_epoch);
|
||||||
|
}
|
||||||
@@ -20,6 +20,7 @@ pub use prometheus::{register_int_gauge_vec, IntGaugeVec};
|
|||||||
pub use prometheus::{Encoder, TextEncoder};
|
pub use prometheus::{Encoder, TextEncoder};
|
||||||
use prometheus::{Registry, Result};
|
use prometheus::{Registry, Result};
|
||||||
|
|
||||||
|
pub mod launch_timestamp;
|
||||||
mod wrappers;
|
mod wrappers;
|
||||||
pub use wrappers::{CountedReader, CountedWriter};
|
pub use wrappers::{CountedReader, CountedWriter};
|
||||||
|
|
||||||
@@ -34,6 +35,14 @@ macro_rules! register_uint_gauge_vec {
|
|||||||
}};
|
}};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[macro_export]
|
||||||
|
macro_rules! register_uint_gauge {
|
||||||
|
($NAME:expr, $HELP:expr $(,)?) => {{
|
||||||
|
let gauge = $crate::UIntGauge::new($NAME, $HELP).unwrap();
|
||||||
|
$crate::register(Box::new(gauge.clone())).map(|_| gauge)
|
||||||
|
}};
|
||||||
|
}
|
||||||
|
|
||||||
/// Special internal registry, to collect metrics independently from the default registry.
|
/// Special internal registry, to collect metrics independently from the default registry.
|
||||||
/// Was introduced to fix deadlock with lazy registration of metrics in the default registry.
|
/// Was introduced to fix deadlock with lazy registration of metrics in the default registry.
|
||||||
static INTERNAL_REGISTRY: Lazy<Registry> = Lazy::new(Registry::new);
|
static INTERNAL_REGISTRY: Lazy<Registry> = Lazy::new(Registry::new);
|
||||||
|
|||||||
@@ -13,5 +13,6 @@ bytes.workspace = true
|
|||||||
byteorder.workspace = true
|
byteorder.workspace = true
|
||||||
utils.workspace = true
|
utils.workspace = true
|
||||||
postgres_ffi.workspace = true
|
postgres_ffi.workspace = true
|
||||||
|
enum-map.workspace = true
|
||||||
|
|
||||||
workspace_hack.workspace = true
|
workspace_hack.workspace = true
|
||||||
|
|||||||
@@ -1,9 +1,14 @@
|
|||||||
use std::num::{NonZeroU64, NonZeroUsize};
|
use std::{
|
||||||
|
collections::HashMap,
|
||||||
|
num::{NonZeroU64, NonZeroUsize},
|
||||||
|
time::SystemTime,
|
||||||
|
};
|
||||||
|
|
||||||
use byteorder::{BigEndian, ReadBytesExt};
|
use byteorder::{BigEndian, ReadBytesExt};
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use serde_with::{serde_as, DisplayFromStr};
|
use serde_with::{serde_as, DisplayFromStr};
|
||||||
use utils::{
|
use utils::{
|
||||||
|
history_buffer::HistoryBufferWithDropCounter,
|
||||||
id::{NodeId, TenantId, TimelineId},
|
id::{NodeId, TenantId, TimelineId},
|
||||||
lsn::Lsn,
|
lsn::Lsn,
|
||||||
};
|
};
|
||||||
@@ -137,7 +142,6 @@ pub struct TenantConfigRequest {
|
|||||||
#[serde_as(as = "DisplayFromStr")]
|
#[serde_as(as = "DisplayFromStr")]
|
||||||
pub tenant_id: TenantId,
|
pub tenant_id: TenantId,
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
#[serde_as(as = "Option<DisplayFromStr>")]
|
|
||||||
pub checkpoint_distance: Option<u64>,
|
pub checkpoint_distance: Option<u64>,
|
||||||
pub checkpoint_timeout: Option<String>,
|
pub checkpoint_timeout: Option<String>,
|
||||||
pub compaction_target_size: Option<u64>,
|
pub compaction_target_size: Option<u64>,
|
||||||
@@ -227,6 +231,130 @@ pub struct TimelineInfo {
|
|||||||
pub state: TimelineState,
|
pub state: TimelineState,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize)]
|
||||||
|
pub struct LayerMapInfo {
|
||||||
|
pub in_memory_layers: Vec<InMemoryLayerInfo>,
|
||||||
|
pub historic_layers: Vec<HistoricLayerInfo>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Hash, PartialEq, Eq, Clone, Copy, Serialize, Deserialize, enum_map::Enum)]
|
||||||
|
#[repr(usize)]
|
||||||
|
pub enum LayerAccessKind {
|
||||||
|
GetValueReconstructData,
|
||||||
|
Iter,
|
||||||
|
KeyIter,
|
||||||
|
Dump,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
|
pub struct LayerAccessStatFullDetails {
|
||||||
|
pub when_millis_since_epoch: u64,
|
||||||
|
pub task_kind: &'static str,
|
||||||
|
pub access_kind: LayerAccessKind,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// An event that impacts the layer's residence status.
|
||||||
|
#[serde_as]
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
|
pub struct LayerResidenceEvent {
|
||||||
|
/// The time when the event occurred.
|
||||||
|
/// NB: this timestamp is captured while the residence status changes.
|
||||||
|
/// So, it might be behind/ahead of the actual residence change by a short amount of time.
|
||||||
|
///
|
||||||
|
#[serde(rename = "timestamp_millis_since_epoch")]
|
||||||
|
#[serde_as(as = "serde_with::TimestampMilliSeconds")]
|
||||||
|
timestamp: SystemTime,
|
||||||
|
/// The new residence status of the layer.
|
||||||
|
status: LayerResidenceStatus,
|
||||||
|
/// The reason why we had to record this event.
|
||||||
|
reason: LayerResidenceEventReason,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The reason for recording a given [`ResidenceEvent`].
|
||||||
|
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
|
||||||
|
pub enum LayerResidenceEventReason {
|
||||||
|
/// The layer map is being populated, e.g. during timeline load or attach.
|
||||||
|
/// This includes [`RemoteLayer`] objects created in [`reconcile_with_remote`].
|
||||||
|
/// We need to record such events because there is no persistent storage for the events.
|
||||||
|
LayerLoad,
|
||||||
|
/// We just created the layer (e.g., freeze_and_flush or compaction).
|
||||||
|
/// Such layers are always [`LayerResidenceStatus::Resident`].
|
||||||
|
LayerCreate,
|
||||||
|
/// We on-demand downloaded or evicted the given layer.
|
||||||
|
ResidenceChange,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The residence status of the layer, after the given [`LayerResidenceEvent`].
|
||||||
|
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
|
||||||
|
pub enum LayerResidenceStatus {
|
||||||
|
/// Residence status for a layer file that exists locally.
|
||||||
|
/// It may also exist on the remote, we don't care here.
|
||||||
|
Resident,
|
||||||
|
/// Residence status for a layer file that only exists on the remote.
|
||||||
|
Evicted,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl LayerResidenceEvent {
|
||||||
|
pub fn new(status: LayerResidenceStatus, reason: LayerResidenceEventReason) -> Self {
|
||||||
|
Self {
|
||||||
|
status,
|
||||||
|
reason,
|
||||||
|
timestamp: SystemTime::now(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize)]
|
||||||
|
pub struct LayerAccessStats {
|
||||||
|
pub access_count_by_access_kind: HashMap<LayerAccessKind, u64>,
|
||||||
|
pub task_kind_access_flag: Vec<&'static str>,
|
||||||
|
pub first: Option<LayerAccessStatFullDetails>,
|
||||||
|
pub accesses_history: HistoryBufferWithDropCounter<LayerAccessStatFullDetails, 16>,
|
||||||
|
pub residence_events_history: HistoryBufferWithDropCounter<LayerResidenceEvent, 16>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[serde_as]
|
||||||
|
#[derive(Debug, Clone, Serialize)]
|
||||||
|
#[serde(tag = "kind")]
|
||||||
|
pub enum InMemoryLayerInfo {
|
||||||
|
Open {
|
||||||
|
#[serde_as(as = "DisplayFromStr")]
|
||||||
|
lsn_start: Lsn,
|
||||||
|
},
|
||||||
|
Frozen {
|
||||||
|
#[serde_as(as = "DisplayFromStr")]
|
||||||
|
lsn_start: Lsn,
|
||||||
|
#[serde_as(as = "DisplayFromStr")]
|
||||||
|
lsn_end: Lsn,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
#[serde_as]
|
||||||
|
#[derive(Debug, Clone, Serialize)]
|
||||||
|
#[serde(tag = "kind")]
|
||||||
|
pub enum HistoricLayerInfo {
|
||||||
|
Delta {
|
||||||
|
layer_file_name: String,
|
||||||
|
layer_file_size: Option<u64>,
|
||||||
|
|
||||||
|
#[serde_as(as = "DisplayFromStr")]
|
||||||
|
lsn_start: Lsn,
|
||||||
|
#[serde_as(as = "DisplayFromStr")]
|
||||||
|
lsn_end: Lsn,
|
||||||
|
remote: bool,
|
||||||
|
access_stats: LayerAccessStats,
|
||||||
|
},
|
||||||
|
Image {
|
||||||
|
layer_file_name: String,
|
||||||
|
layer_file_size: Option<u64>,
|
||||||
|
|
||||||
|
#[serde_as(as = "DisplayFromStr")]
|
||||||
|
lsn_start: Lsn,
|
||||||
|
remote: bool,
|
||||||
|
access_stats: LayerAccessStats,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Debug, Serialize, Deserialize)]
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
pub struct DownloadRemoteLayersTaskSpawnRequest {
|
pub struct DownloadRemoteLayersTaskSpawnRequest {
|
||||||
pub max_concurrent_downloads: NonZeroUsize,
|
pub max_concurrent_downloads: NonZeroUsize,
|
||||||
@@ -267,7 +395,7 @@ pub struct TimelineGcRequest {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Wrapped in libpq CopyData
|
// Wrapped in libpq CopyData
|
||||||
#[derive(PartialEq, Eq)]
|
#[derive(PartialEq, Eq, Debug)]
|
||||||
pub enum PagestreamFeMessage {
|
pub enum PagestreamFeMessage {
|
||||||
Exists(PagestreamExistsRequest),
|
Exists(PagestreamExistsRequest),
|
||||||
Nblocks(PagestreamNblocksRequest),
|
Nblocks(PagestreamNblocksRequest),
|
||||||
|
|||||||
@@ -11,6 +11,7 @@ async-trait.workspace = true
|
|||||||
anyhow.workspace = true
|
anyhow.workspace = true
|
||||||
bincode.workspace = true
|
bincode.workspace = true
|
||||||
bytes.workspace = true
|
bytes.workspace = true
|
||||||
|
heapless.workspace = true
|
||||||
hyper = { workspace = true, features = ["full"] }
|
hyper = { workspace = true, features = ["full"] }
|
||||||
routerify.workspace = true
|
routerify.workspace = true
|
||||||
serde.workspace = true
|
serde.workspace = true
|
||||||
|
|||||||
161
libs/utils/src/history_buffer.rs
Normal file
161
libs/utils/src/history_buffer.rs
Normal file
@@ -0,0 +1,161 @@
|
|||||||
|
//! A heapless buffer for events of sorts.
|
||||||
|
|
||||||
|
use std::ops;
|
||||||
|
|
||||||
|
use heapless::HistoryBuffer;
|
||||||
|
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct HistoryBufferWithDropCounter<T, const L: usize> {
|
||||||
|
buffer: HistoryBuffer<T, L>,
|
||||||
|
drop_count: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T, const L: usize> HistoryBufferWithDropCounter<T, L> {
|
||||||
|
pub fn write(&mut self, data: T) {
|
||||||
|
let len_before = self.buffer.len();
|
||||||
|
self.buffer.write(data);
|
||||||
|
let len_after = self.buffer.len();
|
||||||
|
self.drop_count += u64::from(len_before == len_after);
|
||||||
|
}
|
||||||
|
pub fn drop_count(&self) -> u64 {
|
||||||
|
self.drop_count
|
||||||
|
}
|
||||||
|
pub fn map<U, F: Fn(&T) -> U>(&self, f: F) -> HistoryBufferWithDropCounter<U, L> {
|
||||||
|
let mut buffer = HistoryBuffer::new();
|
||||||
|
buffer.extend(self.buffer.oldest_ordered().map(f));
|
||||||
|
HistoryBufferWithDropCounter::<U, L> {
|
||||||
|
buffer,
|
||||||
|
drop_count: self.drop_count,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T, const L: usize> Default for HistoryBufferWithDropCounter<T, L> {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self {
|
||||||
|
buffer: HistoryBuffer::default(),
|
||||||
|
drop_count: 0,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T, const L: usize> ops::Deref for HistoryBufferWithDropCounter<T, L> {
|
||||||
|
type Target = HistoryBuffer<T, L>;
|
||||||
|
|
||||||
|
fn deref(&self) -> &Self::Target {
|
||||||
|
&self.buffer
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(serde::Serialize)]
|
||||||
|
struct SerdeRepr<T> {
|
||||||
|
buffer: Vec<T>,
|
||||||
|
drop_count: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a, T, const L: usize> From<&'a HistoryBufferWithDropCounter<T, L>> for SerdeRepr<T>
|
||||||
|
where
|
||||||
|
T: Clone + serde::Serialize,
|
||||||
|
{
|
||||||
|
fn from(value: &'a HistoryBufferWithDropCounter<T, L>) -> Self {
|
||||||
|
let HistoryBufferWithDropCounter { buffer, drop_count } = value;
|
||||||
|
SerdeRepr {
|
||||||
|
buffer: buffer.iter().cloned().collect(),
|
||||||
|
drop_count: *drop_count,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T, const L: usize> serde::Serialize for HistoryBufferWithDropCounter<T, L>
|
||||||
|
where
|
||||||
|
T: Clone + serde::Serialize,
|
||||||
|
{
|
||||||
|
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||||
|
where
|
||||||
|
S: serde::Serializer,
|
||||||
|
{
|
||||||
|
SerdeRepr::from(self).serialize(serializer)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod test {
|
||||||
|
use super::HistoryBufferWithDropCounter;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_basics() {
|
||||||
|
let mut b = HistoryBufferWithDropCounter::<_, 2>::default();
|
||||||
|
b.write(1);
|
||||||
|
b.write(2);
|
||||||
|
b.write(3);
|
||||||
|
assert!(b.iter().any(|e| *e == 2));
|
||||||
|
assert!(b.iter().any(|e| *e == 3));
|
||||||
|
assert!(!b.iter().any(|e| *e == 1));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_drop_count_works() {
|
||||||
|
let mut b = HistoryBufferWithDropCounter::<_, 2>::default();
|
||||||
|
b.write(1);
|
||||||
|
assert_eq!(b.drop_count(), 0);
|
||||||
|
b.write(2);
|
||||||
|
assert_eq!(b.drop_count(), 0);
|
||||||
|
b.write(3);
|
||||||
|
assert_eq!(b.drop_count(), 1);
|
||||||
|
b.write(4);
|
||||||
|
assert_eq!(b.drop_count(), 2);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_clone_works() {
|
||||||
|
let mut b = HistoryBufferWithDropCounter::<_, 2>::default();
|
||||||
|
b.write(1);
|
||||||
|
b.write(2);
|
||||||
|
b.write(3);
|
||||||
|
assert_eq!(b.drop_count(), 1);
|
||||||
|
let mut c = b.clone();
|
||||||
|
assert_eq!(c.drop_count(), 1);
|
||||||
|
assert!(c.iter().any(|e| *e == 2));
|
||||||
|
assert!(c.iter().any(|e| *e == 3));
|
||||||
|
assert!(!c.iter().any(|e| *e == 1));
|
||||||
|
|
||||||
|
c.write(4);
|
||||||
|
assert!(c.iter().any(|e| *e == 4));
|
||||||
|
assert!(!b.iter().any(|e| *e == 4));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_map() {
|
||||||
|
let mut b = HistoryBufferWithDropCounter::<_, 2>::default();
|
||||||
|
|
||||||
|
b.write(1);
|
||||||
|
assert_eq!(b.drop_count(), 0);
|
||||||
|
{
|
||||||
|
let c = b.map(|i| i + 10);
|
||||||
|
assert_eq!(c.oldest_ordered().cloned().collect::<Vec<_>>(), vec![11]);
|
||||||
|
assert_eq!(c.drop_count(), 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
b.write(2);
|
||||||
|
assert_eq!(b.drop_count(), 0);
|
||||||
|
{
|
||||||
|
let c = b.map(|i| i + 10);
|
||||||
|
assert_eq!(
|
||||||
|
c.oldest_ordered().cloned().collect::<Vec<_>>(),
|
||||||
|
vec![11, 12]
|
||||||
|
);
|
||||||
|
assert_eq!(c.drop_count(), 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
b.write(3);
|
||||||
|
assert_eq!(b.drop_count(), 1);
|
||||||
|
{
|
||||||
|
let c = b.map(|i| i + 10);
|
||||||
|
assert_eq!(
|
||||||
|
c.oldest_ordered().cloned().collect::<Vec<_>>(),
|
||||||
|
vec![12, 13]
|
||||||
|
);
|
||||||
|
assert_eq!(c.drop_count(), 1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,7 +1,8 @@
|
|||||||
use crate::auth::{Claims, JwtAuth};
|
use crate::auth::{Claims, JwtAuth};
|
||||||
use crate::http::error;
|
use crate::http::error;
|
||||||
use anyhow::anyhow;
|
use anyhow::{anyhow, Context};
|
||||||
use hyper::header::AUTHORIZATION;
|
use hyper::header::{HeaderName, AUTHORIZATION};
|
||||||
|
use hyper::http::HeaderValue;
|
||||||
use hyper::{header::CONTENT_TYPE, Body, Request, Response, Server};
|
use hyper::{header::CONTENT_TYPE, Body, Request, Response, Server};
|
||||||
use metrics::{register_int_counter, Encoder, IntCounter, TextEncoder};
|
use metrics::{register_int_counter, Encoder, IntCounter, TextEncoder};
|
||||||
use once_cell::sync::Lazy;
|
use once_cell::sync::Lazy;
|
||||||
@@ -13,6 +14,7 @@ use tracing::info;
|
|||||||
|
|
||||||
use std::future::Future;
|
use std::future::Future;
|
||||||
use std::net::TcpListener;
|
use std::net::TcpListener;
|
||||||
|
use std::str::FromStr;
|
||||||
|
|
||||||
use super::error::ApiError;
|
use super::error::ApiError;
|
||||||
|
|
||||||
@@ -143,6 +145,38 @@ pub fn auth_middleware<B: hyper::body::HttpBody + Send + Sync + 'static>(
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn add_response_header_middleware<B>(
|
||||||
|
header: &str,
|
||||||
|
value: &str,
|
||||||
|
) -> anyhow::Result<Middleware<B, ApiError>>
|
||||||
|
where
|
||||||
|
B: hyper::body::HttpBody + Send + Sync + 'static,
|
||||||
|
{
|
||||||
|
let name =
|
||||||
|
HeaderName::from_str(header).with_context(|| format!("invalid header name: {header}"))?;
|
||||||
|
let value =
|
||||||
|
HeaderValue::from_str(value).with_context(|| format!("invalid header value: {value}"))?;
|
||||||
|
Ok(Middleware::post_with_info(
|
||||||
|
move |mut response, request_info| {
|
||||||
|
let name = name.clone();
|
||||||
|
let value = value.clone();
|
||||||
|
async move {
|
||||||
|
let headers = response.headers_mut();
|
||||||
|
if headers.contains_key(&name) {
|
||||||
|
tracing::warn!(
|
||||||
|
"{} response already contains header {:?}",
|
||||||
|
request_info.uri(),
|
||||||
|
&name,
|
||||||
|
);
|
||||||
|
} else {
|
||||||
|
headers.insert(name, value);
|
||||||
|
}
|
||||||
|
Ok(response)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
pub fn check_permission_with(
|
pub fn check_permission_with(
|
||||||
req: &Request<Body>,
|
req: &Request<Body>,
|
||||||
check_permission: impl Fn(&Claims) -> Result<(), anyhow::Error>,
|
check_permission: impl Fn(&Claims) -> Result<(), anyhow::Error>,
|
||||||
|
|||||||
@@ -52,6 +52,8 @@ pub mod signals;
|
|||||||
|
|
||||||
pub mod fs_ext;
|
pub mod fs_ext;
|
||||||
|
|
||||||
|
pub mod history_buffer;
|
||||||
|
|
||||||
/// use with fail::cfg("$name", "return(2000)")
|
/// use with fail::cfg("$name", "return(2000)")
|
||||||
#[macro_export]
|
#[macro_export]
|
||||||
macro_rules! failpoint_sleep_millis_async {
|
macro_rules! failpoint_sleep_millis_async {
|
||||||
|
|||||||
@@ -67,6 +67,10 @@ utils.workspace = true
|
|||||||
workspace_hack.workspace = true
|
workspace_hack.workspace = true
|
||||||
reqwest.workspace = true
|
reqwest.workspace = true
|
||||||
rpds.workspace = true
|
rpds.workspace = true
|
||||||
|
enum-map.workspace = true
|
||||||
|
enumset.workspace = true
|
||||||
|
strum.workspace = true
|
||||||
|
strum_macros.workspace = true
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
criterion.workspace = true
|
criterion.workspace = true
|
||||||
|
|||||||
@@ -1,8 +1,7 @@
|
|||||||
use pageserver::keyspace::{KeyPartitioning, KeySpace};
|
use pageserver::keyspace::{KeyPartitioning, KeySpace};
|
||||||
use pageserver::repository::Key;
|
use pageserver::repository::Key;
|
||||||
use pageserver::tenant::layer_map::LayerMap;
|
use pageserver::tenant::layer_map::LayerMap;
|
||||||
use pageserver::tenant::storage_layer::Layer;
|
use pageserver::tenant::storage_layer::{Layer, LayerDescriptor, LayerFileName};
|
||||||
use pageserver::tenant::storage_layer::{DeltaFileName, ImageFileName, LayerDescriptor};
|
|
||||||
use rand::prelude::{SeedableRng, SliceRandom, StdRng};
|
use rand::prelude::{SeedableRng, SliceRandom, StdRng};
|
||||||
use std::cmp::{max, min};
|
use std::cmp::{max, min};
|
||||||
use std::fs::File;
|
use std::fs::File;
|
||||||
@@ -26,30 +25,15 @@ fn build_layer_map(filename_dump: PathBuf) -> LayerMap<LayerDescriptor> {
|
|||||||
|
|
||||||
let mut updates = layer_map.batch_update();
|
let mut updates = layer_map.batch_update();
|
||||||
for fname in filenames {
|
for fname in filenames {
|
||||||
let fname = &fname.unwrap();
|
let fname = fname.unwrap();
|
||||||
if let Some(imgfilename) = ImageFileName::parse_str(fname) {
|
let fname = LayerFileName::from_str(&fname).unwrap();
|
||||||
let layer = LayerDescriptor {
|
let layer = LayerDescriptor::from(fname);
|
||||||
key: imgfilename.key_range,
|
|
||||||
lsn: imgfilename.lsn..(imgfilename.lsn + 1),
|
let lsn_range = layer.get_lsn_range();
|
||||||
is_incremental: false,
|
min_lsn = min(min_lsn, lsn_range.start);
|
||||||
short_id: fname.to_string(),
|
max_lsn = max(max_lsn, Lsn(lsn_range.end.0 - 1));
|
||||||
};
|
|
||||||
updates.insert_historic(Arc::new(layer));
|
updates.insert_historic(Arc::new(layer));
|
||||||
min_lsn = min(min_lsn, imgfilename.lsn);
|
|
||||||
max_lsn = max(max_lsn, imgfilename.lsn);
|
|
||||||
} else if let Some(deltafilename) = DeltaFileName::parse_str(fname) {
|
|
||||||
let layer = LayerDescriptor {
|
|
||||||
key: deltafilename.key_range.clone(),
|
|
||||||
lsn: deltafilename.lsn_range.clone(),
|
|
||||||
is_incremental: true,
|
|
||||||
short_id: fname.to_string(),
|
|
||||||
};
|
|
||||||
updates.insert_historic(Arc::new(layer));
|
|
||||||
min_lsn = min(min_lsn, deltafilename.lsn_range.start);
|
|
||||||
max_lsn = max(max_lsn, deltafilename.lsn_range.end);
|
|
||||||
} else {
|
|
||||||
panic!("unexpected filename {fname}");
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
println!("min: {min_lsn}, max: {max_lsn}");
|
println!("min: {min_lsn}, max: {max_lsn}");
|
||||||
|
|||||||
@@ -7,6 +7,7 @@ use std::{env, ops::ControlFlow, path::Path, str::FromStr};
|
|||||||
use anyhow::{anyhow, Context};
|
use anyhow::{anyhow, Context};
|
||||||
use clap::{Arg, ArgAction, Command};
|
use clap::{Arg, ArgAction, Command};
|
||||||
use fail::FailScenario;
|
use fail::FailScenario;
|
||||||
|
use metrics::launch_timestamp::{set_launch_timestamp_metric, LaunchTimestamp};
|
||||||
use remote_storage::GenericRemoteStorage;
|
use remote_storage::GenericRemoteStorage;
|
||||||
use tracing::*;
|
use tracing::*;
|
||||||
|
|
||||||
@@ -52,6 +53,8 @@ fn version() -> String {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn main() -> anyhow::Result<()> {
|
fn main() -> anyhow::Result<()> {
|
||||||
|
let launch_ts = Box::leak(Box::new(LaunchTimestamp::generate()));
|
||||||
|
|
||||||
let arg_matches = cli().get_matches();
|
let arg_matches = cli().get_matches();
|
||||||
|
|
||||||
if arg_matches.get_flag("enabled-features") {
|
if arg_matches.get_flag("enabled-features") {
|
||||||
@@ -108,7 +111,7 @@ fn main() -> anyhow::Result<()> {
|
|||||||
virtual_file::init(conf.max_file_descriptors);
|
virtual_file::init(conf.max_file_descriptors);
|
||||||
page_cache::init(conf.page_cache_size);
|
page_cache::init(conf.page_cache_size);
|
||||||
|
|
||||||
start_pageserver(conf).context("Failed to start pageserver")?;
|
start_pageserver(launch_ts, conf).context("Failed to start pageserver")?;
|
||||||
|
|
||||||
scenario.teardown();
|
scenario.teardown();
|
||||||
Ok(())
|
Ok(())
|
||||||
@@ -203,13 +206,24 @@ fn initialize_config(
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
fn start_pageserver(conf: &'static PageServerConf) -> anyhow::Result<()> {
|
fn start_pageserver(
|
||||||
|
launch_ts: &'static LaunchTimestamp,
|
||||||
|
conf: &'static PageServerConf,
|
||||||
|
) -> anyhow::Result<()> {
|
||||||
// Initialize logging
|
// Initialize logging
|
||||||
logging::init(conf.log_format)?;
|
logging::init(conf.log_format)?;
|
||||||
|
|
||||||
// Print version to the log, and expose it as a prometheus metric too.
|
// Print version and launch timestamp to the log,
|
||||||
info!("version: {}", version());
|
// and expose them as prometheus metrics.
|
||||||
|
// A changed version string indicates changed software.
|
||||||
|
// A changed launch timestamp indicates a pageserver restart.
|
||||||
|
info!(
|
||||||
|
"version: {} launch_timestamp: {}",
|
||||||
|
version(),
|
||||||
|
launch_ts.to_string()
|
||||||
|
);
|
||||||
set_build_info_metric(GIT_VERSION);
|
set_build_info_metric(GIT_VERSION);
|
||||||
|
set_launch_timestamp_metric(launch_ts);
|
||||||
|
|
||||||
// If any failpoints were set from FAILPOINTS environment variable,
|
// If any failpoints were set from FAILPOINTS environment variable,
|
||||||
// print them to the log for debugging purposes
|
// print them to the log for debugging purposes
|
||||||
@@ -307,7 +321,7 @@ fn start_pageserver(conf: &'static PageServerConf) -> anyhow::Result<()> {
|
|||||||
{
|
{
|
||||||
let _rt_guard = MGMT_REQUEST_RUNTIME.enter();
|
let _rt_guard = MGMT_REQUEST_RUNTIME.enter();
|
||||||
|
|
||||||
let router = http::make_router(conf, auth.clone(), remote_storage)?
|
let router = http::make_router(conf, launch_ts, auth.clone(), remote_storage)?
|
||||||
.build()
|
.build()
|
||||||
.map_err(|err| anyhow!(err))?;
|
.map_err(|err| anyhow!(err))?;
|
||||||
let service = utils::http::RouterService::new(router).unwrap();
|
let service = utils::http::RouterService::new(router).unwrap();
|
||||||
@@ -347,6 +361,7 @@ fn start_pageserver(conf: &'static PageServerConf) -> anyhow::Result<()> {
|
|||||||
pageserver::consumption_metrics::collect_metrics(
|
pageserver::consumption_metrics::collect_metrics(
|
||||||
metric_collection_endpoint,
|
metric_collection_endpoint,
|
||||||
conf.metric_collection_interval,
|
conf.metric_collection_interval,
|
||||||
|
conf.cached_metric_collection_interval,
|
||||||
conf.synthetic_size_calculation_interval,
|
conf.synthetic_size_calculation_interval,
|
||||||
conf.id,
|
conf.id,
|
||||||
metrics_ctx,
|
metrics_ctx,
|
||||||
|
|||||||
@@ -58,6 +58,7 @@ pub mod defaults {
|
|||||||
super::ConfigurableSemaphore::DEFAULT_INITIAL.get();
|
super::ConfigurableSemaphore::DEFAULT_INITIAL.get();
|
||||||
|
|
||||||
pub const DEFAULT_METRIC_COLLECTION_INTERVAL: &str = "10 min";
|
pub const DEFAULT_METRIC_COLLECTION_INTERVAL: &str = "10 min";
|
||||||
|
pub const DEFAULT_CACHED_METRIC_COLLECTION_INTERVAL: &str = "1 hour";
|
||||||
pub const DEFAULT_METRIC_COLLECTION_ENDPOINT: Option<reqwest::Url> = None;
|
pub const DEFAULT_METRIC_COLLECTION_ENDPOINT: Option<reqwest::Url> = None;
|
||||||
pub const DEFAULT_SYNTHETIC_SIZE_CALCULATION_INTERVAL: &str = "10 min";
|
pub const DEFAULT_SYNTHETIC_SIZE_CALCULATION_INTERVAL: &str = "10 min";
|
||||||
|
|
||||||
@@ -85,6 +86,7 @@ pub mod defaults {
|
|||||||
#concurrent_tenant_size_logical_size_queries = '{DEFAULT_CONCURRENT_TENANT_SIZE_LOGICAL_SIZE_QUERIES}'
|
#concurrent_tenant_size_logical_size_queries = '{DEFAULT_CONCURRENT_TENANT_SIZE_LOGICAL_SIZE_QUERIES}'
|
||||||
|
|
||||||
#metric_collection_interval = '{DEFAULT_METRIC_COLLECTION_INTERVAL}'
|
#metric_collection_interval = '{DEFAULT_METRIC_COLLECTION_INTERVAL}'
|
||||||
|
#cached_metric_collection_interval = '{DEFAULT_CACHED_METRIC_COLLECTION_INTERVAL}'
|
||||||
#synthetic_size_calculation_interval = '{DEFAULT_SYNTHETIC_SIZE_CALCULATION_INTERVAL}'
|
#synthetic_size_calculation_interval = '{DEFAULT_SYNTHETIC_SIZE_CALCULATION_INTERVAL}'
|
||||||
|
|
||||||
# [tenant_config]
|
# [tenant_config]
|
||||||
@@ -154,6 +156,8 @@ pub struct PageServerConf {
|
|||||||
|
|
||||||
// How often to collect metrics and send them to the metrics endpoint.
|
// How often to collect metrics and send them to the metrics endpoint.
|
||||||
pub metric_collection_interval: Duration,
|
pub metric_collection_interval: Duration,
|
||||||
|
// How often to send unchanged cached metrics to the metrics endpoint.
|
||||||
|
pub cached_metric_collection_interval: Duration,
|
||||||
pub metric_collection_endpoint: Option<Url>,
|
pub metric_collection_endpoint: Option<Url>,
|
||||||
pub synthetic_size_calculation_interval: Duration,
|
pub synthetic_size_calculation_interval: Duration,
|
||||||
|
|
||||||
@@ -220,6 +224,7 @@ struct PageServerConfigBuilder {
|
|||||||
concurrent_tenant_size_logical_size_queries: BuilderValue<ConfigurableSemaphore>,
|
concurrent_tenant_size_logical_size_queries: BuilderValue<ConfigurableSemaphore>,
|
||||||
|
|
||||||
metric_collection_interval: BuilderValue<Duration>,
|
metric_collection_interval: BuilderValue<Duration>,
|
||||||
|
cached_metric_collection_interval: BuilderValue<Duration>,
|
||||||
metric_collection_endpoint: BuilderValue<Option<Url>>,
|
metric_collection_endpoint: BuilderValue<Option<Url>>,
|
||||||
synthetic_size_calculation_interval: BuilderValue<Duration>,
|
synthetic_size_calculation_interval: BuilderValue<Duration>,
|
||||||
|
|
||||||
@@ -264,6 +269,10 @@ impl Default for PageServerConfigBuilder {
|
|||||||
DEFAULT_METRIC_COLLECTION_INTERVAL,
|
DEFAULT_METRIC_COLLECTION_INTERVAL,
|
||||||
)
|
)
|
||||||
.expect("cannot parse default metric collection interval")),
|
.expect("cannot parse default metric collection interval")),
|
||||||
|
cached_metric_collection_interval: Set(humantime::parse_duration(
|
||||||
|
DEFAULT_CACHED_METRIC_COLLECTION_INTERVAL,
|
||||||
|
)
|
||||||
|
.expect("cannot parse default cached_metric_collection_interval")),
|
||||||
synthetic_size_calculation_interval: Set(humantime::parse_duration(
|
synthetic_size_calculation_interval: Set(humantime::parse_duration(
|
||||||
DEFAULT_SYNTHETIC_SIZE_CALCULATION_INTERVAL,
|
DEFAULT_SYNTHETIC_SIZE_CALCULATION_INTERVAL,
|
||||||
)
|
)
|
||||||
@@ -353,6 +362,14 @@ impl PageServerConfigBuilder {
|
|||||||
self.metric_collection_interval = BuilderValue::Set(metric_collection_interval)
|
self.metric_collection_interval = BuilderValue::Set(metric_collection_interval)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn cached_metric_collection_interval(
|
||||||
|
&mut self,
|
||||||
|
cached_metric_collection_interval: Duration,
|
||||||
|
) {
|
||||||
|
self.cached_metric_collection_interval =
|
||||||
|
BuilderValue::Set(cached_metric_collection_interval)
|
||||||
|
}
|
||||||
|
|
||||||
pub fn metric_collection_endpoint(&mut self, metric_collection_endpoint: Option<Url>) {
|
pub fn metric_collection_endpoint(&mut self, metric_collection_endpoint: Option<Url>) {
|
||||||
self.metric_collection_endpoint = BuilderValue::Set(metric_collection_endpoint)
|
self.metric_collection_endpoint = BuilderValue::Set(metric_collection_endpoint)
|
||||||
}
|
}
|
||||||
@@ -427,6 +444,9 @@ impl PageServerConfigBuilder {
|
|||||||
metric_collection_interval: self
|
metric_collection_interval: self
|
||||||
.metric_collection_interval
|
.metric_collection_interval
|
||||||
.ok_or(anyhow!("missing metric_collection_interval"))?,
|
.ok_or(anyhow!("missing metric_collection_interval"))?,
|
||||||
|
cached_metric_collection_interval: self
|
||||||
|
.cached_metric_collection_interval
|
||||||
|
.ok_or(anyhow!("missing cached_metric_collection_interval"))?,
|
||||||
metric_collection_endpoint: self
|
metric_collection_endpoint: self
|
||||||
.metric_collection_endpoint
|
.metric_collection_endpoint
|
||||||
.ok_or(anyhow!("missing metric_collection_endpoint"))?,
|
.ok_or(anyhow!("missing metric_collection_endpoint"))?,
|
||||||
@@ -612,6 +632,7 @@ impl PageServerConf {
|
|||||||
ConfigurableSemaphore::new(permits)
|
ConfigurableSemaphore::new(permits)
|
||||||
}),
|
}),
|
||||||
"metric_collection_interval" => builder.metric_collection_interval(parse_toml_duration(key, item)?),
|
"metric_collection_interval" => builder.metric_collection_interval(parse_toml_duration(key, item)?),
|
||||||
|
"cached_metric_collection_interval" => builder.cached_metric_collection_interval(parse_toml_duration(key, item)?),
|
||||||
"metric_collection_endpoint" => {
|
"metric_collection_endpoint" => {
|
||||||
let endpoint = parse_toml_string(key, item)?.parse().context("failed to parse metric_collection_endpoint")?;
|
let endpoint = parse_toml_string(key, item)?.parse().context("failed to parse metric_collection_endpoint")?;
|
||||||
builder.metric_collection_endpoint(Some(endpoint));
|
builder.metric_collection_endpoint(Some(endpoint));
|
||||||
@@ -741,6 +762,7 @@ impl PageServerConf {
|
|||||||
log_format: LogFormat::from_str(defaults::DEFAULT_LOG_FORMAT).unwrap(),
|
log_format: LogFormat::from_str(defaults::DEFAULT_LOG_FORMAT).unwrap(),
|
||||||
concurrent_tenant_size_logical_size_queries: ConfigurableSemaphore::default(),
|
concurrent_tenant_size_logical_size_queries: ConfigurableSemaphore::default(),
|
||||||
metric_collection_interval: Duration::from_secs(60),
|
metric_collection_interval: Duration::from_secs(60),
|
||||||
|
cached_metric_collection_interval: Duration::from_secs(60 * 60),
|
||||||
metric_collection_endpoint: defaults::DEFAULT_METRIC_COLLECTION_ENDPOINT,
|
metric_collection_endpoint: defaults::DEFAULT_METRIC_COLLECTION_ENDPOINT,
|
||||||
synthetic_size_calculation_interval: Duration::from_secs(60),
|
synthetic_size_calculation_interval: Duration::from_secs(60),
|
||||||
test_remote_failures: 0,
|
test_remote_failures: 0,
|
||||||
@@ -881,6 +903,7 @@ initial_superuser_name = 'zzzz'
|
|||||||
id = 10
|
id = 10
|
||||||
|
|
||||||
metric_collection_interval = '222 s'
|
metric_collection_interval = '222 s'
|
||||||
|
cached_metric_collection_interval = '22200 s'
|
||||||
metric_collection_endpoint = 'http://localhost:80/metrics'
|
metric_collection_endpoint = 'http://localhost:80/metrics'
|
||||||
synthetic_size_calculation_interval = '333 s'
|
synthetic_size_calculation_interval = '333 s'
|
||||||
log_format = 'json'
|
log_format = 'json'
|
||||||
@@ -928,6 +951,9 @@ log_format = 'json'
|
|||||||
metric_collection_interval: humantime::parse_duration(
|
metric_collection_interval: humantime::parse_duration(
|
||||||
defaults::DEFAULT_METRIC_COLLECTION_INTERVAL
|
defaults::DEFAULT_METRIC_COLLECTION_INTERVAL
|
||||||
)?,
|
)?,
|
||||||
|
cached_metric_collection_interval: humantime::parse_duration(
|
||||||
|
defaults::DEFAULT_CACHED_METRIC_COLLECTION_INTERVAL
|
||||||
|
)?,
|
||||||
metric_collection_endpoint: defaults::DEFAULT_METRIC_COLLECTION_ENDPOINT,
|
metric_collection_endpoint: defaults::DEFAULT_METRIC_COLLECTION_ENDPOINT,
|
||||||
synthetic_size_calculation_interval: humantime::parse_duration(
|
synthetic_size_calculation_interval: humantime::parse_duration(
|
||||||
defaults::DEFAULT_SYNTHETIC_SIZE_CALCULATION_INTERVAL
|
defaults::DEFAULT_SYNTHETIC_SIZE_CALCULATION_INTERVAL
|
||||||
@@ -978,6 +1004,7 @@ log_format = 'json'
|
|||||||
log_format: LogFormat::Json,
|
log_format: LogFormat::Json,
|
||||||
concurrent_tenant_size_logical_size_queries: ConfigurableSemaphore::default(),
|
concurrent_tenant_size_logical_size_queries: ConfigurableSemaphore::default(),
|
||||||
metric_collection_interval: Duration::from_secs(222),
|
metric_collection_interval: Duration::from_secs(222),
|
||||||
|
cached_metric_collection_interval: Duration::from_secs(22200),
|
||||||
metric_collection_endpoint: Some(Url::parse("http://localhost:80/metrics")?),
|
metric_collection_endpoint: Some(Url::parse("http://localhost:80/metrics")?),
|
||||||
synthetic_size_calculation_interval: Duration::from_secs(333),
|
synthetic_size_calculation_interval: Duration::from_secs(333),
|
||||||
test_remote_failures: 0,
|
test_remote_failures: 0,
|
||||||
|
|||||||
@@ -46,12 +46,12 @@ pub struct PageserverConsumptionMetricsKey {
|
|||||||
pub async fn collect_metrics(
|
pub async fn collect_metrics(
|
||||||
metric_collection_endpoint: &Url,
|
metric_collection_endpoint: &Url,
|
||||||
metric_collection_interval: Duration,
|
metric_collection_interval: Duration,
|
||||||
|
cached_metric_collection_interval: Duration,
|
||||||
synthetic_size_calculation_interval: Duration,
|
synthetic_size_calculation_interval: Duration,
|
||||||
node_id: NodeId,
|
node_id: NodeId,
|
||||||
ctx: RequestContext,
|
ctx: RequestContext,
|
||||||
) -> anyhow::Result<()> {
|
) -> anyhow::Result<()> {
|
||||||
let mut ticker = tokio::time::interval(metric_collection_interval);
|
let mut ticker = tokio::time::interval(metric_collection_interval);
|
||||||
|
|
||||||
info!("starting collect_metrics");
|
info!("starting collect_metrics");
|
||||||
|
|
||||||
// spin up background worker that caclulates tenant sizes
|
// spin up background worker that caclulates tenant sizes
|
||||||
@@ -75,6 +75,7 @@ pub async fn collect_metrics(
|
|||||||
// define client here to reuse it for all requests
|
// define client here to reuse it for all requests
|
||||||
let client = reqwest::Client::new();
|
let client = reqwest::Client::new();
|
||||||
let mut cached_metrics: HashMap<PageserverConsumptionMetricsKey, u64> = HashMap::new();
|
let mut cached_metrics: HashMap<PageserverConsumptionMetricsKey, u64> = HashMap::new();
|
||||||
|
let mut prev_iteration_time: Option<std::time::Instant> = None;
|
||||||
|
|
||||||
loop {
|
loop {
|
||||||
tokio::select! {
|
tokio::select! {
|
||||||
@@ -83,10 +84,15 @@ pub async fn collect_metrics(
|
|||||||
return Ok(());
|
return Ok(());
|
||||||
},
|
},
|
||||||
_ = ticker.tick() => {
|
_ = ticker.tick() => {
|
||||||
if let Err(err) = collect_metrics_iteration(&client, &mut cached_metrics, metric_collection_endpoint, node_id, &ctx).await
|
|
||||||
{
|
// send cached metrics every cached_metric_collection_interval
|
||||||
error!("metrics collection failed: {err:?}");
|
let send_cached = prev_iteration_time
|
||||||
}
|
.map(|x| x.elapsed() >= cached_metric_collection_interval)
|
||||||
|
.unwrap_or(false);
|
||||||
|
|
||||||
|
prev_iteration_time = Some(std::time::Instant::now());
|
||||||
|
|
||||||
|
collect_metrics_iteration(&client, &mut cached_metrics, metric_collection_endpoint, node_id, &ctx, send_cached).await;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -97,17 +103,19 @@ pub async fn collect_metrics(
|
|||||||
/// Gather per-tenant and per-timeline metrics and send them to the `metric_collection_endpoint`.
|
/// Gather per-tenant and per-timeline metrics and send them to the `metric_collection_endpoint`.
|
||||||
/// Cache metrics to avoid sending the same metrics multiple times.
|
/// Cache metrics to avoid sending the same metrics multiple times.
|
||||||
///
|
///
|
||||||
|
/// This function handles all errors internally
|
||||||
|
/// and doesn't break iteration if just one tenant fails.
|
||||||
|
///
|
||||||
/// TODO
|
/// TODO
|
||||||
/// - refactor this function (chunking+sending part) to reuse it in proxy module;
|
/// - refactor this function (chunking+sending part) to reuse it in proxy module;
|
||||||
/// - improve error handling. Now if one tenant fails to collect metrics,
|
|
||||||
/// the whole iteration fails and metrics for other tenants are not collected.
|
|
||||||
pub async fn collect_metrics_iteration(
|
pub async fn collect_metrics_iteration(
|
||||||
client: &reqwest::Client,
|
client: &reqwest::Client,
|
||||||
cached_metrics: &mut HashMap<PageserverConsumptionMetricsKey, u64>,
|
cached_metrics: &mut HashMap<PageserverConsumptionMetricsKey, u64>,
|
||||||
metric_collection_endpoint: &reqwest::Url,
|
metric_collection_endpoint: &reqwest::Url,
|
||||||
node_id: NodeId,
|
node_id: NodeId,
|
||||||
ctx: &RequestContext,
|
ctx: &RequestContext,
|
||||||
) -> anyhow::Result<()> {
|
send_cached: bool,
|
||||||
|
) {
|
||||||
let mut current_metrics: Vec<(PageserverConsumptionMetricsKey, u64)> = Vec::new();
|
let mut current_metrics: Vec<(PageserverConsumptionMetricsKey, u64)> = Vec::new();
|
||||||
trace!(
|
trace!(
|
||||||
"starting collect_metrics_iteration. metric_collection_endpoint: {}",
|
"starting collect_metrics_iteration. metric_collection_endpoint: {}",
|
||||||
@@ -115,7 +123,13 @@ pub async fn collect_metrics_iteration(
|
|||||||
);
|
);
|
||||||
|
|
||||||
// get list of tenants
|
// get list of tenants
|
||||||
let tenants = mgr::list_tenants().await?;
|
let tenants = match mgr::list_tenants().await {
|
||||||
|
Ok(tenants) => tenants,
|
||||||
|
Err(err) => {
|
||||||
|
error!("failed to list tenants: {:?}", err);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
// iterate through list of Active tenants and collect metrics
|
// iterate through list of Active tenants and collect metrics
|
||||||
for (tenant_id, tenant_state) in tenants {
|
for (tenant_id, tenant_state) in tenants {
|
||||||
@@ -123,7 +137,15 @@ pub async fn collect_metrics_iteration(
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
let tenant = mgr::get_tenant(tenant_id, true).await?;
|
let tenant = match mgr::get_tenant(tenant_id, true).await {
|
||||||
|
Ok(tenant) => tenant,
|
||||||
|
Err(err) => {
|
||||||
|
// It is possible that tenant was deleted between
|
||||||
|
// `list_tenants` and `get_tenant`, so just warn about it.
|
||||||
|
warn!("failed to get tenant {tenant_id:?}: {err:?}");
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
let mut tenant_resident_size = 0;
|
let mut tenant_resident_size = 0;
|
||||||
|
|
||||||
@@ -142,29 +164,51 @@ pub async fn collect_metrics_iteration(
|
|||||||
timeline_written_size,
|
timeline_written_size,
|
||||||
));
|
));
|
||||||
|
|
||||||
let (timeline_logical_size, is_exact) = timeline.get_current_logical_size(ctx)?;
|
match timeline.get_current_logical_size(ctx) {
|
||||||
// Only send timeline logical size when it is fully calculated.
|
// Only send timeline logical size when it is fully calculated.
|
||||||
if is_exact {
|
Ok((size, is_exact)) if is_exact => {
|
||||||
current_metrics.push((
|
current_metrics.push((
|
||||||
PageserverConsumptionMetricsKey {
|
PageserverConsumptionMetricsKey {
|
||||||
tenant_id,
|
tenant_id,
|
||||||
timeline_id: Some(timeline.timeline_id),
|
timeline_id: Some(timeline.timeline_id),
|
||||||
metric: TIMELINE_LOGICAL_SIZE,
|
metric: TIMELINE_LOGICAL_SIZE,
|
||||||
},
|
},
|
||||||
timeline_logical_size,
|
size,
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
Ok((_, _)) => {}
|
||||||
|
Err(err) => {
|
||||||
|
error!(
|
||||||
|
"failed to get current logical size for timeline {}: {err:?}",
|
||||||
|
timeline.timeline_id
|
||||||
|
);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
let timeline_resident_size = timeline.get_resident_physical_size();
|
let timeline_resident_size = timeline.get_resident_physical_size();
|
||||||
tenant_resident_size += timeline_resident_size;
|
tenant_resident_size += timeline_resident_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
let tenant_remote_size = tenant.get_remote_size().await?;
|
match tenant.get_remote_size().await {
|
||||||
debug!(
|
Ok(tenant_remote_size) => {
|
||||||
"collected current metrics for tenant: {}: state={:?} resident_size={} remote_size={}",
|
current_metrics.push((
|
||||||
tenant_id, tenant_state, tenant_resident_size, tenant_remote_size
|
PageserverConsumptionMetricsKey {
|
||||||
);
|
tenant_id,
|
||||||
|
timeline_id: None,
|
||||||
|
metric: REMOTE_STORAGE_SIZE,
|
||||||
|
},
|
||||||
|
tenant_remote_size,
|
||||||
|
));
|
||||||
|
}
|
||||||
|
Err(err) => {
|
||||||
|
error!(
|
||||||
|
"failed to get remote size for tenant {}: {err:?}",
|
||||||
|
tenant_id
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
current_metrics.push((
|
current_metrics.push((
|
||||||
PageserverConsumptionMetricsKey {
|
PageserverConsumptionMetricsKey {
|
||||||
@@ -175,15 +219,6 @@ pub async fn collect_metrics_iteration(
|
|||||||
tenant_resident_size,
|
tenant_resident_size,
|
||||||
));
|
));
|
||||||
|
|
||||||
current_metrics.push((
|
|
||||||
PageserverConsumptionMetricsKey {
|
|
||||||
tenant_id,
|
|
||||||
timeline_id: None,
|
|
||||||
metric: REMOTE_STORAGE_SIZE,
|
|
||||||
},
|
|
||||||
tenant_remote_size,
|
|
||||||
));
|
|
||||||
|
|
||||||
// Note that this metric is calculated in a separate bgworker
|
// Note that this metric is calculated in a separate bgworker
|
||||||
// Here we only use cached value, which may lag behind the real latest one
|
// Here we only use cached value, which may lag behind the real latest one
|
||||||
let tenant_synthetic_size = tenant.get_cached_synthetic_size();
|
let tenant_synthetic_size = tenant.get_cached_synthetic_size();
|
||||||
@@ -197,15 +232,18 @@ pub async fn collect_metrics_iteration(
|
|||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Filter metrics
|
// Filter metrics, unless we want to send all metrics, including cached ones.
|
||||||
current_metrics.retain(|(curr_key, curr_val)| match cached_metrics.get(curr_key) {
|
// See: https://github.com/neondatabase/neon/issues/3485
|
||||||
Some(val) => val != curr_val,
|
if !send_cached {
|
||||||
None => true,
|
current_metrics.retain(|(curr_key, curr_val)| match cached_metrics.get(curr_key) {
|
||||||
});
|
Some(val) => val != curr_val,
|
||||||
|
None => true,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
if current_metrics.is_empty() {
|
if current_metrics.is_empty() {
|
||||||
trace!("no new metrics to send");
|
trace!("no new metrics to send");
|
||||||
return Ok(());
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Send metrics.
|
// Send metrics.
|
||||||
@@ -256,8 +294,6 @@ pub async fn collect_metrics_iteration(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Caclculate synthetic size for each active tenant
|
/// Caclculate synthetic size for each active tenant
|
||||||
|
|||||||
@@ -664,6 +664,55 @@ paths:
|
|||||||
application/json:
|
application/json:
|
||||||
schema:
|
schema:
|
||||||
$ref: "#/components/schemas/Error"
|
$ref: "#/components/schemas/Error"
|
||||||
|
/v1/tenant/{tenant_id}/config/:
|
||||||
|
parameters:
|
||||||
|
- name: tenant_id
|
||||||
|
in: path
|
||||||
|
required: true
|
||||||
|
schema:
|
||||||
|
type: string
|
||||||
|
format: hex
|
||||||
|
get:
|
||||||
|
description: |
|
||||||
|
Returns tenant's config description: specific config overrides a tenant has
|
||||||
|
and the effective config.
|
||||||
|
responses:
|
||||||
|
"200":
|
||||||
|
description: Tenant config, specific and effective
|
||||||
|
content:
|
||||||
|
application/json:
|
||||||
|
schema:
|
||||||
|
$ref: "#/components/schemas/TenantConfig"
|
||||||
|
"400":
|
||||||
|
description: Malformed get tenanant config request
|
||||||
|
content:
|
||||||
|
application/json:
|
||||||
|
schema:
|
||||||
|
$ref: "#/components/schemas/Error"
|
||||||
|
"401":
|
||||||
|
description: Unauthorized Error
|
||||||
|
content:
|
||||||
|
application/json:
|
||||||
|
schema:
|
||||||
|
$ref: "#/components/schemas/UnauthorizedError"
|
||||||
|
"403":
|
||||||
|
description: Forbidden Error
|
||||||
|
content:
|
||||||
|
application/json:
|
||||||
|
schema:
|
||||||
|
$ref: "#/components/schemas/ForbiddenError"
|
||||||
|
"404":
|
||||||
|
description: Tenand or timeline were not found
|
||||||
|
content:
|
||||||
|
application/json:
|
||||||
|
schema:
|
||||||
|
$ref: "#/components/schemas/NotFoundError"
|
||||||
|
"500":
|
||||||
|
description: Generic operation error
|
||||||
|
content:
|
||||||
|
application/json:
|
||||||
|
schema:
|
||||||
|
$ref: "#/components/schemas/Error"
|
||||||
components:
|
components:
|
||||||
securitySchemes:
|
securitySchemes:
|
||||||
JWT:
|
JWT:
|
||||||
@@ -724,10 +773,33 @@ components:
|
|||||||
type: integer
|
type: integer
|
||||||
checkpoint_timeout:
|
checkpoint_timeout:
|
||||||
type: string
|
type: string
|
||||||
|
compaction_target_size:
|
||||||
|
type: integer
|
||||||
compaction_period:
|
compaction_period:
|
||||||
type: string
|
type: string
|
||||||
compaction_threshold:
|
compaction_threshold:
|
||||||
type: string
|
type: string
|
||||||
|
image_creation_threshold:
|
||||||
|
type: integer
|
||||||
|
walreceiver_connect_timeout:
|
||||||
|
type: string
|
||||||
|
lagging_wal_timeout:
|
||||||
|
type: string
|
||||||
|
max_lsn_wal_lag:
|
||||||
|
type: integer
|
||||||
|
trace_read_requests:
|
||||||
|
type: boolean
|
||||||
|
TenantConfig:
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
tenant_specific_overrides:
|
||||||
|
type: object
|
||||||
|
schema:
|
||||||
|
$ref: "#/components/schemas/TenantConfigInfo"
|
||||||
|
effective_config:
|
||||||
|
type: object
|
||||||
|
schema:
|
||||||
|
$ref: "#/components/schemas/TenantConfigInfo"
|
||||||
TimelineInfo:
|
TimelineInfo:
|
||||||
type: object
|
type: object
|
||||||
required:
|
required:
|
||||||
|
|||||||
@@ -1,13 +1,15 @@
|
|||||||
|
use std::collections::HashMap;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use anyhow::{anyhow, Context, Result};
|
use anyhow::{anyhow, Context, Result};
|
||||||
use hyper::StatusCode;
|
use hyper::StatusCode;
|
||||||
use hyper::{Body, Request, Response, Uri};
|
use hyper::{Body, Request, Response, Uri};
|
||||||
|
use metrics::launch_timestamp::LaunchTimestamp;
|
||||||
use pageserver_api::models::DownloadRemoteLayersTaskSpawnRequest;
|
use pageserver_api::models::DownloadRemoteLayersTaskSpawnRequest;
|
||||||
use remote_storage::GenericRemoteStorage;
|
use remote_storage::GenericRemoteStorage;
|
||||||
use tokio_util::sync::CancellationToken;
|
use tokio_util::sync::CancellationToken;
|
||||||
use tracing::*;
|
use tracing::*;
|
||||||
use utils::http::request::{must_get_query_param, parse_query_param};
|
use utils::http::request::{get_request_param, must_get_query_param, parse_query_param};
|
||||||
|
|
||||||
use super::models::{
|
use super::models::{
|
||||||
StatusResponse, TenantConfigRequest, TenantCreateRequest, TenantCreateResponse, TenantInfo,
|
StatusResponse, TenantConfigRequest, TenantCreateRequest, TenantCreateResponse, TenantInfo,
|
||||||
@@ -18,6 +20,7 @@ use crate::pgdatadir_mapping::LsnForTimestamp;
|
|||||||
use crate::task_mgr::TaskKind;
|
use crate::task_mgr::TaskKind;
|
||||||
use crate::tenant::config::TenantConfOpt;
|
use crate::tenant::config::TenantConfOpt;
|
||||||
use crate::tenant::mgr::TenantMapInsertError;
|
use crate::tenant::mgr::TenantMapInsertError;
|
||||||
|
use crate::tenant::storage_layer::LayerAccessStatsReset;
|
||||||
use crate::tenant::{PageReconstructError, Timeline};
|
use crate::tenant::{PageReconstructError, Timeline};
|
||||||
use crate::{config::PageServerConf, tenant::mgr};
|
use crate::{config::PageServerConf, tenant::mgr};
|
||||||
use utils::{
|
use utils::{
|
||||||
@@ -317,10 +320,7 @@ async fn get_lsn_by_timestamp_handler(request: Request<Body>) -> Result<Response
|
|||||||
let timestamp_pg = postgres_ffi::to_pg_timestamp(timestamp);
|
let timestamp_pg = postgres_ffi::to_pg_timestamp(timestamp);
|
||||||
|
|
||||||
let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Download);
|
let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Download);
|
||||||
let timeline = mgr::get_tenant(tenant_id, true)
|
let timeline = active_timeline_of_active_tenant(tenant_id, timeline_id).await?;
|
||||||
.await
|
|
||||||
.and_then(|tenant| tenant.get_timeline(timeline_id, true))
|
|
||||||
.map_err(ApiError::NotFound)?;
|
|
||||||
let result = timeline
|
let result = timeline
|
||||||
.find_lsn_for_timestamp(timestamp_pg, &ctx)
|
.find_lsn_for_timestamp(timestamp_pg, &ctx)
|
||||||
.await
|
.await
|
||||||
@@ -497,7 +497,11 @@ async fn tenant_size_handler(request: Request<Body>) -> Result<Response<Body>, A
|
|||||||
.map_err(ApiError::InternalServerError)?;
|
.map_err(ApiError::InternalServerError)?;
|
||||||
|
|
||||||
let size = if !inputs_only.unwrap_or(false) {
|
let size = if !inputs_only.unwrap_or(false) {
|
||||||
Some(inputs.calculate().map_err(ApiError::InternalServerError)?)
|
Some(
|
||||||
|
tenant
|
||||||
|
.calc_and_update_cached_synthetic_size(&inputs)
|
||||||
|
.map_err(ApiError::InternalServerError)?,
|
||||||
|
)
|
||||||
} else {
|
} else {
|
||||||
None
|
None
|
||||||
};
|
};
|
||||||
@@ -528,6 +532,65 @@ async fn tenant_size_handler(request: Request<Body>) -> Result<Response<Body>, A
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async fn layer_map_info_handler(request: Request<Body>) -> Result<Response<Body>, ApiError> {
|
||||||
|
let tenant_id: TenantId = parse_request_param(&request, "tenant_id")?;
|
||||||
|
let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
|
||||||
|
let reset: LayerAccessStatsReset =
|
||||||
|
parse_query_param(&request, "reset")?.unwrap_or(LayerAccessStatsReset::NoReset);
|
||||||
|
|
||||||
|
check_permission(&request, Some(tenant_id))?;
|
||||||
|
|
||||||
|
let timeline = active_timeline_of_active_tenant(tenant_id, timeline_id).await?;
|
||||||
|
let layer_map_info = timeline.layer_map_info(reset);
|
||||||
|
|
||||||
|
json_response(StatusCode::OK, layer_map_info)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn layer_download_handler(request: Request<Body>) -> Result<Response<Body>, ApiError> {
|
||||||
|
let tenant_id: TenantId = parse_request_param(&request, "tenant_id")?;
|
||||||
|
check_permission(&request, Some(tenant_id))?;
|
||||||
|
let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
|
||||||
|
let layer_file_name = get_request_param(&request, "layer_file_name")?;
|
||||||
|
check_permission(&request, Some(tenant_id))?;
|
||||||
|
|
||||||
|
let timeline = active_timeline_of_active_tenant(tenant_id, timeline_id).await?;
|
||||||
|
let downloaded = timeline
|
||||||
|
.download_layer(layer_file_name)
|
||||||
|
.await
|
||||||
|
.map_err(ApiError::InternalServerError)?;
|
||||||
|
|
||||||
|
match downloaded {
|
||||||
|
Some(true) => json_response(StatusCode::OK, ()),
|
||||||
|
Some(false) => json_response(StatusCode::NOT_MODIFIED, ()),
|
||||||
|
None => json_response(
|
||||||
|
StatusCode::BAD_REQUEST,
|
||||||
|
format!("Layer {tenant_id}/{timeline_id}/{layer_file_name} not found"),
|
||||||
|
),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn evict_timeline_layer_handler(request: Request<Body>) -> Result<Response<Body>, ApiError> {
|
||||||
|
let tenant_id: TenantId = parse_request_param(&request, "tenant_id")?;
|
||||||
|
check_permission(&request, Some(tenant_id))?;
|
||||||
|
let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
|
||||||
|
let layer_file_name = get_request_param(&request, "layer_file_name")?;
|
||||||
|
|
||||||
|
let timeline = active_timeline_of_active_tenant(tenant_id, timeline_id).await?;
|
||||||
|
let evicted = timeline
|
||||||
|
.evict_layer(layer_file_name)
|
||||||
|
.await
|
||||||
|
.map_err(ApiError::InternalServerError)?;
|
||||||
|
|
||||||
|
match evicted {
|
||||||
|
Some(true) => json_response(StatusCode::OK, ()),
|
||||||
|
Some(false) => json_response(StatusCode::NOT_MODIFIED, ()),
|
||||||
|
None => json_response(
|
||||||
|
StatusCode::BAD_REQUEST,
|
||||||
|
format!("Layer {tenant_id}/{timeline_id}/{layer_file_name} not found"),
|
||||||
|
),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Helper function to standardize the error messages we produce on bad durations
|
// Helper function to standardize the error messages we produce on bad durations
|
||||||
//
|
//
|
||||||
// Intended to be used with anyhow's `with_context`, e.g.:
|
// Intended to be used with anyhow's `with_context`, e.g.:
|
||||||
@@ -644,12 +707,40 @@ async fn tenant_create_handler(mut request: Request<Body>) -> Result<Response<Bo
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn tenant_config_handler(mut request: Request<Body>) -> Result<Response<Body>, ApiError> {
|
async fn get_tenant_config_handler(request: Request<Body>) -> Result<Response<Body>, ApiError> {
|
||||||
|
let tenant_id: TenantId = parse_request_param(&request, "tenant_id")?;
|
||||||
|
check_permission(&request, Some(tenant_id))?;
|
||||||
|
|
||||||
|
let tenant = mgr::get_tenant(tenant_id, false)
|
||||||
|
.await
|
||||||
|
.map_err(ApiError::NotFound)?;
|
||||||
|
|
||||||
|
let response = HashMap::from([
|
||||||
|
(
|
||||||
|
"tenant_specific_overrides",
|
||||||
|
serde_json::to_value(tenant.tenant_specific_overrides())
|
||||||
|
.context("serializing tenant specific overrides")
|
||||||
|
.map_err(ApiError::InternalServerError)?,
|
||||||
|
),
|
||||||
|
(
|
||||||
|
"effective_config",
|
||||||
|
serde_json::to_value(tenant.effective_config())
|
||||||
|
.context("serializing effective config")
|
||||||
|
.map_err(ApiError::InternalServerError)?,
|
||||||
|
),
|
||||||
|
]);
|
||||||
|
|
||||||
|
json_response(StatusCode::OK, response)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn update_tenant_config_handler(
|
||||||
|
mut request: Request<Body>,
|
||||||
|
) -> Result<Response<Body>, ApiError> {
|
||||||
let request_data: TenantConfigRequest = json_request(&mut request).await?;
|
let request_data: TenantConfigRequest = json_request(&mut request).await?;
|
||||||
let tenant_id = request_data.tenant_id;
|
let tenant_id = request_data.tenant_id;
|
||||||
check_permission(&request, Some(tenant_id))?;
|
check_permission(&request, Some(tenant_id))?;
|
||||||
|
|
||||||
let mut tenant_conf: TenantConfOpt = Default::default();
|
let mut tenant_conf = TenantConfOpt::default();
|
||||||
if let Some(gc_period) = request_data.gc_period {
|
if let Some(gc_period) = request_data.gc_period {
|
||||||
tenant_conf.gc_period = Some(
|
tenant_conf.gc_period = Some(
|
||||||
humantime::parse_duration(&gc_period)
|
humantime::parse_duration(&gc_period)
|
||||||
@@ -684,12 +775,8 @@ async fn tenant_config_handler(mut request: Request<Body>) -> Result<Response<Bo
|
|||||||
.map_err(ApiError::BadRequest)?,
|
.map_err(ApiError::BadRequest)?,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
if let Some(max_lsn_wal_lag) = request_data.max_lsn_wal_lag {
|
tenant_conf.max_lsn_wal_lag = request_data.max_lsn_wal_lag;
|
||||||
tenant_conf.max_lsn_wal_lag = Some(max_lsn_wal_lag);
|
tenant_conf.trace_read_requests = request_data.trace_read_requests;
|
||||||
}
|
|
||||||
if let Some(trace_read_requests) = request_data.trace_read_requests {
|
|
||||||
tenant_conf.trace_read_requests = Some(trace_read_requests);
|
|
||||||
}
|
|
||||||
|
|
||||||
tenant_conf.checkpoint_distance = request_data.checkpoint_distance;
|
tenant_conf.checkpoint_distance = request_data.checkpoint_distance;
|
||||||
if let Some(checkpoint_timeout) = request_data.checkpoint_timeout {
|
if let Some(checkpoint_timeout) = request_data.checkpoint_timeout {
|
||||||
@@ -711,7 +798,7 @@ async fn tenant_config_handler(mut request: Request<Body>) -> Result<Response<Bo
|
|||||||
}
|
}
|
||||||
|
|
||||||
let state = get_state(&request);
|
let state = get_state(&request);
|
||||||
mgr::update_tenant_config(state.conf, tenant_conf, tenant_id)
|
mgr::set_new_tenant_config(state.conf, tenant_conf, tenant_id)
|
||||||
.instrument(info_span!("tenant_config", tenant = ?tenant_id))
|
.instrument(info_span!("tenant_config", tenant = ?tenant_id))
|
||||||
.await
|
.await
|
||||||
// FIXME: `update_tenant_config` can fail because of both user and internal errors.
|
// FIXME: `update_tenant_config` can fail because of both user and internal errors.
|
||||||
@@ -804,12 +891,7 @@ async fn timeline_checkpoint_handler(request: Request<Body>) -> Result<Response<
|
|||||||
check_permission(&request, Some(tenant_id))?;
|
check_permission(&request, Some(tenant_id))?;
|
||||||
|
|
||||||
let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Download);
|
let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Download);
|
||||||
let tenant = mgr::get_tenant(tenant_id, true)
|
let timeline = active_timeline_of_active_tenant(tenant_id, timeline_id).await?;
|
||||||
.await
|
|
||||||
.map_err(ApiError::NotFound)?;
|
|
||||||
let timeline = tenant
|
|
||||||
.get_timeline(timeline_id, true)
|
|
||||||
.map_err(ApiError::NotFound)?;
|
|
||||||
timeline
|
timeline
|
||||||
.freeze_and_flush()
|
.freeze_and_flush()
|
||||||
.await
|
.await
|
||||||
@@ -830,12 +912,7 @@ async fn timeline_download_remote_layers_handler_post(
|
|||||||
let body: DownloadRemoteLayersTaskSpawnRequest = json_request(&mut request).await?;
|
let body: DownloadRemoteLayersTaskSpawnRequest = json_request(&mut request).await?;
|
||||||
check_permission(&request, Some(tenant_id))?;
|
check_permission(&request, Some(tenant_id))?;
|
||||||
|
|
||||||
let tenant = mgr::get_tenant(tenant_id, true)
|
let timeline = active_timeline_of_active_tenant(tenant_id, timeline_id).await?;
|
||||||
.await
|
|
||||||
.map_err(ApiError::NotFound)?;
|
|
||||||
let timeline = tenant
|
|
||||||
.get_timeline(timeline_id, true)
|
|
||||||
.map_err(ApiError::NotFound)?;
|
|
||||||
match timeline.spawn_download_all_remote_layers(body).await {
|
match timeline.spawn_download_all_remote_layers(body).await {
|
||||||
Ok(st) => json_response(StatusCode::ACCEPTED, st),
|
Ok(st) => json_response(StatusCode::ACCEPTED, st),
|
||||||
Err(st) => json_response(StatusCode::CONFLICT, st),
|
Err(st) => json_response(StatusCode::CONFLICT, st),
|
||||||
@@ -846,15 +923,10 @@ async fn timeline_download_remote_layers_handler_get(
|
|||||||
request: Request<Body>,
|
request: Request<Body>,
|
||||||
) -> Result<Response<Body>, ApiError> {
|
) -> Result<Response<Body>, ApiError> {
|
||||||
let tenant_id: TenantId = parse_request_param(&request, "tenant_id")?;
|
let tenant_id: TenantId = parse_request_param(&request, "tenant_id")?;
|
||||||
let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
|
|
||||||
check_permission(&request, Some(tenant_id))?;
|
check_permission(&request, Some(tenant_id))?;
|
||||||
|
let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
|
||||||
|
|
||||||
let tenant = mgr::get_tenant(tenant_id, true)
|
let timeline = active_timeline_of_active_tenant(tenant_id, timeline_id).await?;
|
||||||
.await
|
|
||||||
.map_err(ApiError::NotFound)?;
|
|
||||||
let timeline = tenant
|
|
||||||
.get_timeline(timeline_id, true)
|
|
||||||
.map_err(ApiError::NotFound)?;
|
|
||||||
let info = timeline
|
let info = timeline
|
||||||
.get_download_all_remote_layers_task_info()
|
.get_download_all_remote_layers_task_info()
|
||||||
.context("task never started since last pageserver process start")
|
.context("task never started since last pageserver process start")
|
||||||
@@ -862,6 +934,18 @@ async fn timeline_download_remote_layers_handler_get(
|
|||||||
json_response(StatusCode::OK, info)
|
json_response(StatusCode::OK, info)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async fn active_timeline_of_active_tenant(
|
||||||
|
tenant_id: TenantId,
|
||||||
|
timeline_id: TimelineId,
|
||||||
|
) -> Result<Arc<Timeline>, ApiError> {
|
||||||
|
let tenant = mgr::get_tenant(tenant_id, true)
|
||||||
|
.await
|
||||||
|
.map_err(ApiError::NotFound)?;
|
||||||
|
tenant
|
||||||
|
.get_timeline(timeline_id, true)
|
||||||
|
.map_err(ApiError::NotFound)
|
||||||
|
}
|
||||||
|
|
||||||
async fn handler_404(_: Request<Body>) -> Result<Response<Body>, ApiError> {
|
async fn handler_404(_: Request<Body>) -> Result<Response<Body>, ApiError> {
|
||||||
json_response(
|
json_response(
|
||||||
StatusCode::NOT_FOUND,
|
StatusCode::NOT_FOUND,
|
||||||
@@ -871,6 +955,7 @@ async fn handler_404(_: Request<Body>) -> Result<Response<Body>, ApiError> {
|
|||||||
|
|
||||||
pub fn make_router(
|
pub fn make_router(
|
||||||
conf: &'static PageServerConf,
|
conf: &'static PageServerConf,
|
||||||
|
launch_ts: &'static LaunchTimestamp,
|
||||||
auth: Option<Arc<JwtAuth>>,
|
auth: Option<Arc<JwtAuth>>,
|
||||||
remote_storage: Option<GenericRemoteStorage>,
|
remote_storage: Option<GenericRemoteStorage>,
|
||||||
) -> anyhow::Result<RouterBuilder<hyper::Body, ApiError>> {
|
) -> anyhow::Result<RouterBuilder<hyper::Body, ApiError>> {
|
||||||
@@ -887,6 +972,14 @@ pub fn make_router(
|
|||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
router = router.middleware(
|
||||||
|
endpoint::add_response_header_middleware(
|
||||||
|
"PAGESERVER_LAUNCH_TIMESTAMP",
|
||||||
|
&launch_ts.to_string(),
|
||||||
|
)
|
||||||
|
.expect("construct launch timestamp header middleware"),
|
||||||
|
);
|
||||||
|
|
||||||
macro_rules! testing_api {
|
macro_rules! testing_api {
|
||||||
($handler_desc:literal, $handler:path $(,)?) => {{
|
($handler_desc:literal, $handler:path $(,)?) => {{
|
||||||
#[cfg(not(feature = "testing"))]
|
#[cfg(not(feature = "testing"))]
|
||||||
@@ -919,7 +1012,8 @@ pub fn make_router(
|
|||||||
.post("/v1/tenant", tenant_create_handler)
|
.post("/v1/tenant", tenant_create_handler)
|
||||||
.get("/v1/tenant/:tenant_id", tenant_status)
|
.get("/v1/tenant/:tenant_id", tenant_status)
|
||||||
.get("/v1/tenant/:tenant_id/size", tenant_size_handler)
|
.get("/v1/tenant/:tenant_id/size", tenant_size_handler)
|
||||||
.put("/v1/tenant/config", tenant_config_handler)
|
.put("/v1/tenant/config", update_tenant_config_handler)
|
||||||
|
.get("/v1/tenant/:tenant_id/config", get_tenant_config_handler)
|
||||||
.get("/v1/tenant/:tenant_id/timeline", timeline_list_handler)
|
.get("/v1/tenant/:tenant_id/timeline", timeline_list_handler)
|
||||||
.post("/v1/tenant/:tenant_id/timeline", timeline_create_handler)
|
.post("/v1/tenant/:tenant_id/timeline", timeline_create_handler)
|
||||||
.post("/v1/tenant/:tenant_id/attach", tenant_attach_handler)
|
.post("/v1/tenant/:tenant_id/attach", tenant_attach_handler)
|
||||||
@@ -958,5 +1052,17 @@ pub fn make_router(
|
|||||||
"/v1/tenant/:tenant_id/timeline/:timeline_id",
|
"/v1/tenant/:tenant_id/timeline/:timeline_id",
|
||||||
timeline_delete_handler,
|
timeline_delete_handler,
|
||||||
)
|
)
|
||||||
|
.get(
|
||||||
|
"/v1/tenant/:tenant_id/timeline/:timeline_id/layer",
|
||||||
|
layer_map_info_handler,
|
||||||
|
)
|
||||||
|
.get(
|
||||||
|
"/v1/tenant/:tenant_id/timeline/:timeline_id/layer/:layer_file_name",
|
||||||
|
layer_download_handler,
|
||||||
|
)
|
||||||
|
.delete(
|
||||||
|
"/v1/tenant/:tenant_id/timeline/:timeline_id/layer/:layer_file_name",
|
||||||
|
evict_timeline_layer_handler,
|
||||||
|
)
|
||||||
.any(handler_404))
|
.any(handler_404))
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -150,6 +150,15 @@ pub static TENANT_STATE_METRIC: Lazy<UIntGaugeVec> = Lazy::new(|| {
|
|||||||
.expect("Failed to register pageserver_tenant_states_count metric")
|
.expect("Failed to register pageserver_tenant_states_count metric")
|
||||||
});
|
});
|
||||||
|
|
||||||
|
pub static TENANT_SYNTHETIC_SIZE_METRIC: Lazy<UIntGaugeVec> = Lazy::new(|| {
|
||||||
|
register_uint_gauge_vec!(
|
||||||
|
"pageserver_tenant_synthetic_size",
|
||||||
|
"Synthetic size of each tenant",
|
||||||
|
&["tenant_id"]
|
||||||
|
)
|
||||||
|
.expect("Failed to register pageserver_tenant_synthetic_size metric")
|
||||||
|
});
|
||||||
|
|
||||||
// Metrics for cloud upload. These metrics reflect data uploaded to cloud storage,
|
// Metrics for cloud upload. These metrics reflect data uploaded to cloud storage,
|
||||||
// or in testing they estimate how much we would upload if we did.
|
// or in testing they estimate how much we would upload if we did.
|
||||||
static NUM_PERSISTENT_FILES_CREATED: Lazy<IntCounterVec> = Lazy::new(|| {
|
static NUM_PERSISTENT_FILES_CREATED: Lazy<IntCounterVec> = Lazy::new(|| {
|
||||||
@@ -593,6 +602,7 @@ impl Drop for TimelineMetrics {
|
|||||||
|
|
||||||
pub fn remove_tenant_metrics(tenant_id: &TenantId) {
|
pub fn remove_tenant_metrics(tenant_id: &TenantId) {
|
||||||
let tid = tenant_id.to_string();
|
let tid = tenant_id.to_string();
|
||||||
|
let _ = TENANT_SYNTHETIC_SIZE_METRIC.remove_label_values(&[&tid]);
|
||||||
for state in TENANT_STATE_OPTIONS {
|
for state in TENANT_STATE_OPTIONS {
|
||||||
let _ = TENANT_STATE_METRIC.remove_label_values(&[&tid, state]);
|
let _ = TENANT_STATE_METRIC.remove_label_values(&[&tid, state]);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -169,7 +169,14 @@ task_local! {
|
|||||||
/// Note that we don't try to limit how many task of a certain kind can be running
|
/// Note that we don't try to limit how many task of a certain kind can be running
|
||||||
/// at the same time.
|
/// at the same time.
|
||||||
///
|
///
|
||||||
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
|
#[derive(
|
||||||
|
Debug,
|
||||||
|
// NB: enumset::EnumSetType derives PartialEq, Eq, Clone, Copy
|
||||||
|
enumset::EnumSetType,
|
||||||
|
serde::Serialize,
|
||||||
|
serde::Deserialize,
|
||||||
|
strum_macros::IntoStaticStr,
|
||||||
|
)]
|
||||||
pub enum TaskKind {
|
pub enum TaskKind {
|
||||||
// Pageserver startup, i.e., `main`
|
// Pageserver startup, i.e., `main`
|
||||||
Startup,
|
Startup,
|
||||||
|
|||||||
@@ -45,13 +45,14 @@ use std::sync::MutexGuard;
|
|||||||
use std::sync::{Mutex, RwLock};
|
use std::sync::{Mutex, RwLock};
|
||||||
use std::time::{Duration, Instant};
|
use std::time::{Duration, Instant};
|
||||||
|
|
||||||
|
use self::config::TenantConf;
|
||||||
use self::metadata::TimelineMetadata;
|
use self::metadata::TimelineMetadata;
|
||||||
use self::remote_timeline_client::RemoteTimelineClient;
|
use self::remote_timeline_client::RemoteTimelineClient;
|
||||||
use crate::config::PageServerConf;
|
use crate::config::PageServerConf;
|
||||||
use crate::context::{DownloadBehavior, RequestContext};
|
use crate::context::{DownloadBehavior, RequestContext};
|
||||||
use crate::import_datadir;
|
use crate::import_datadir;
|
||||||
use crate::is_uninit_mark;
|
use crate::is_uninit_mark;
|
||||||
use crate::metrics::{remove_tenant_metrics, TENANT_STATE_METRIC};
|
use crate::metrics::{remove_tenant_metrics, TENANT_STATE_METRIC, TENANT_SYNTHETIC_SIZE_METRIC};
|
||||||
use crate::repository::GcResult;
|
use crate::repository::GcResult;
|
||||||
use crate::task_mgr;
|
use crate::task_mgr;
|
||||||
use crate::task_mgr::TaskKind;
|
use crate::task_mgr::TaskKind;
|
||||||
@@ -1618,8 +1619,16 @@ fn tree_sort_timelines(
|
|||||||
Ok(result)
|
Ok(result)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Private functions
|
|
||||||
impl Tenant {
|
impl Tenant {
|
||||||
|
pub fn tenant_specific_overrides(&self) -> TenantConfOpt {
|
||||||
|
*self.tenant_conf.read().unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn effective_config(&self) -> TenantConf {
|
||||||
|
self.tenant_specific_overrides()
|
||||||
|
.merge(self.conf.default_tenant_conf)
|
||||||
|
}
|
||||||
|
|
||||||
pub fn get_checkpoint_distance(&self) -> u64 {
|
pub fn get_checkpoint_distance(&self) -> u64 {
|
||||||
let tenant_conf = self.tenant_conf.read().unwrap();
|
let tenant_conf = self.tenant_conf.read().unwrap();
|
||||||
tenant_conf
|
tenant_conf
|
||||||
@@ -1690,8 +1699,8 @@ impl Tenant {
|
|||||||
.unwrap_or(self.conf.default_tenant_conf.trace_read_requests)
|
.unwrap_or(self.conf.default_tenant_conf.trace_read_requests)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn update_tenant_config(&self, new_tenant_conf: TenantConfOpt) {
|
pub fn set_new_tenant_config(&self, new_tenant_conf: TenantConfOpt) {
|
||||||
self.tenant_conf.write().unwrap().update(&new_tenant_conf);
|
*self.tenant_conf.write().unwrap() = new_tenant_conf;
|
||||||
}
|
}
|
||||||
|
|
||||||
fn create_timeline_data(
|
fn create_timeline_data(
|
||||||
@@ -2432,13 +2441,27 @@ impl Tenant {
|
|||||||
pub async fn calculate_synthetic_size(&self, ctx: &RequestContext) -> anyhow::Result<u64> {
|
pub async fn calculate_synthetic_size(&self, ctx: &RequestContext) -> anyhow::Result<u64> {
|
||||||
let inputs = self.gather_size_inputs(ctx).await?;
|
let inputs = self.gather_size_inputs(ctx).await?;
|
||||||
|
|
||||||
|
self.calc_and_update_cached_synthetic_size(&inputs)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Calculate synthetic size , cache it and set metric value
|
||||||
|
pub fn calc_and_update_cached_synthetic_size(
|
||||||
|
&self,
|
||||||
|
inputs: &size::ModelInputs,
|
||||||
|
) -> anyhow::Result<u64> {
|
||||||
let size = inputs.calculate()?;
|
let size = inputs.calculate()?;
|
||||||
|
|
||||||
self.cached_synthetic_tenant_size
|
self.cached_synthetic_tenant_size
|
||||||
.store(size, Ordering::Relaxed);
|
.store(size, Ordering::Relaxed);
|
||||||
|
|
||||||
|
TENANT_SYNTHETIC_SIZE_METRIC
|
||||||
|
.get_metric_with_label_values(&[&self.tenant_id.to_string()])
|
||||||
|
.unwrap()
|
||||||
|
.set(size);
|
||||||
|
|
||||||
Ok(size)
|
Ok(size)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_cached_synthetic_size(&self) -> u64 {
|
pub fn get_cached_synthetic_size(&self) -> u64 {
|
||||||
self.cached_synthetic_tenant_size.load(Ordering::Relaxed)
|
self.cached_synthetic_tenant_size.load(Ordering::Relaxed)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -51,6 +51,7 @@ pub struct TenantConf {
|
|||||||
pub checkpoint_distance: u64,
|
pub checkpoint_distance: u64,
|
||||||
// Inmemory layer is also flushed at least once in checkpoint_timeout to
|
// Inmemory layer is also flushed at least once in checkpoint_timeout to
|
||||||
// eventually upload WAL after activity is stopped.
|
// eventually upload WAL after activity is stopped.
|
||||||
|
#[serde(with = "humantime_serde")]
|
||||||
pub checkpoint_timeout: Duration,
|
pub checkpoint_timeout: Duration,
|
||||||
// Target file size, when creating image and delta layers.
|
// Target file size, when creating image and delta layers.
|
||||||
// This parameter determines L1 layer file size.
|
// This parameter determines L1 layer file size.
|
||||||
@@ -96,23 +97,61 @@ pub struct TenantConf {
|
|||||||
/// which parameters are set and which are not.
|
/// which parameters are set and which are not.
|
||||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize, Default)]
|
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize, Default)]
|
||||||
pub struct TenantConfOpt {
|
pub struct TenantConfOpt {
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
#[serde(default)]
|
||||||
pub checkpoint_distance: Option<u64>,
|
pub checkpoint_distance: Option<u64>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
#[serde(default)]
|
||||||
pub checkpoint_timeout: Option<Duration>,
|
pub checkpoint_timeout: Option<Duration>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
#[serde(default)]
|
||||||
pub compaction_target_size: Option<u64>,
|
pub compaction_target_size: Option<u64>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
#[serde(with = "humantime_serde")]
|
#[serde(with = "humantime_serde")]
|
||||||
|
#[serde(default)]
|
||||||
pub compaction_period: Option<Duration>,
|
pub compaction_period: Option<Duration>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
#[serde(default)]
|
||||||
pub compaction_threshold: Option<usize>,
|
pub compaction_threshold: Option<usize>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
#[serde(default)]
|
||||||
pub gc_horizon: Option<u64>,
|
pub gc_horizon: Option<u64>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
#[serde(with = "humantime_serde")]
|
#[serde(with = "humantime_serde")]
|
||||||
|
#[serde(default)]
|
||||||
pub gc_period: Option<Duration>,
|
pub gc_period: Option<Duration>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
#[serde(default)]
|
||||||
pub image_creation_threshold: Option<usize>,
|
pub image_creation_threshold: Option<usize>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
#[serde(with = "humantime_serde")]
|
#[serde(with = "humantime_serde")]
|
||||||
|
#[serde(default)]
|
||||||
pub pitr_interval: Option<Duration>,
|
pub pitr_interval: Option<Duration>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
#[serde(with = "humantime_serde")]
|
#[serde(with = "humantime_serde")]
|
||||||
|
#[serde(default)]
|
||||||
pub walreceiver_connect_timeout: Option<Duration>,
|
pub walreceiver_connect_timeout: Option<Duration>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
#[serde(with = "humantime_serde")]
|
#[serde(with = "humantime_serde")]
|
||||||
|
#[serde(default)]
|
||||||
pub lagging_wal_timeout: Option<Duration>,
|
pub lagging_wal_timeout: Option<Duration>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
#[serde(default)]
|
||||||
pub max_lsn_wal_lag: Option<NonZeroU64>,
|
pub max_lsn_wal_lag: Option<NonZeroU64>,
|
||||||
|
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
#[serde(default)]
|
||||||
pub trace_read_requests: Option<bool>,
|
pub trace_read_requests: Option<bool>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -225,3 +264,24 @@ impl Default for TenantConf {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn de_serializing_pageserver_config_omits_empty_values() {
|
||||||
|
let small_conf = TenantConfOpt {
|
||||||
|
gc_horizon: Some(42),
|
||||||
|
..TenantConfOpt::default()
|
||||||
|
};
|
||||||
|
|
||||||
|
let toml_form = toml_edit::easy::to_string(&small_conf).unwrap();
|
||||||
|
assert_eq!(toml_form, "gc_horizon = 42\n");
|
||||||
|
assert_eq!(small_conf, toml_edit::easy::from_str(&toml_form).unwrap());
|
||||||
|
|
||||||
|
let json_form = serde_json::to_string(&small_conf).unwrap();
|
||||||
|
assert_eq!(json_form, "{\"gc_horizon\":42}");
|
||||||
|
assert_eq!(small_conf, serde_json::from_str(&json_form).unwrap());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -53,12 +53,16 @@ use crate::repository::Key;
|
|||||||
use crate::tenant::storage_layer::InMemoryLayer;
|
use crate::tenant::storage_layer::InMemoryLayer;
|
||||||
use crate::tenant::storage_layer::Layer;
|
use crate::tenant::storage_layer::Layer;
|
||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
|
use std::collections::HashMap;
|
||||||
use std::collections::VecDeque;
|
use std::collections::VecDeque;
|
||||||
use std::ops::Range;
|
use std::ops::Range;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use utils::lsn::Lsn;
|
use utils::lsn::Lsn;
|
||||||
|
|
||||||
use historic_layer_coverage::BufferedHistoricLayerCoverage;
|
use historic_layer_coverage::BufferedHistoricLayerCoverage;
|
||||||
|
pub use historic_layer_coverage::Replacement;
|
||||||
|
|
||||||
|
use self::historic_layer_coverage::LayerKey;
|
||||||
|
|
||||||
use super::storage_layer::range_eq;
|
use super::storage_layer::range_eq;
|
||||||
|
|
||||||
@@ -86,11 +90,18 @@ pub struct LayerMap<L: ?Sized> {
|
|||||||
pub frozen_layers: VecDeque<Arc<InMemoryLayer>>,
|
pub frozen_layers: VecDeque<Arc<InMemoryLayer>>,
|
||||||
|
|
||||||
/// Index of the historic layers optimized for search
|
/// Index of the historic layers optimized for search
|
||||||
historic: BufferedHistoricLayerCoverage<Arc<L>>,
|
historic: BufferedHistoricLayerCoverage<LayerKey>,
|
||||||
|
|
||||||
|
/// All layers accessible by key. Useful for:
|
||||||
|
/// 1. Iterating all layers
|
||||||
|
/// 2. Dereferencing a self.historic search result
|
||||||
|
/// 3. Replacing a layer with a remote/local version without
|
||||||
|
/// rebuilding the self.historic index.
|
||||||
|
mapping: HashMap<LayerKey, Arc<L>>,
|
||||||
|
|
||||||
/// L0 layers have key range Key::MIN..Key::MAX, and locating them using R-Tree search is very inefficient.
|
/// L0 layers have key range Key::MIN..Key::MAX, and locating them using R-Tree search is very inefficient.
|
||||||
/// So L0 layers are held in l0_delta_layers vector, in addition to the R-tree.
|
/// So L0 layers are held in l0_delta_layers vector, in addition to the R-tree.
|
||||||
l0_delta_layers: Vec<Arc<L>>,
|
l0_delta_layers: HashMap<LayerKey, Arc<L>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<L: ?Sized> Default for LayerMap<L> {
|
impl<L: ?Sized> Default for LayerMap<L> {
|
||||||
@@ -99,8 +110,9 @@ impl<L: ?Sized> Default for LayerMap<L> {
|
|||||||
open_layer: None,
|
open_layer: None,
|
||||||
next_open_layer_at: None,
|
next_open_layer_at: None,
|
||||||
frozen_layers: VecDeque::default(),
|
frozen_layers: VecDeque::default(),
|
||||||
l0_delta_layers: Vec::default(),
|
l0_delta_layers: HashMap::default(),
|
||||||
historic: BufferedHistoricLayerCoverage::default(),
|
historic: BufferedHistoricLayerCoverage::default(),
|
||||||
|
mapping: HashMap::default(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -209,33 +221,38 @@ where
|
|||||||
match (latest_delta, latest_image) {
|
match (latest_delta, latest_image) {
|
||||||
(None, None) => None,
|
(None, None) => None,
|
||||||
(None, Some(image)) => {
|
(None, Some(image)) => {
|
||||||
|
let image = self.mapping.get(&image).unwrap();
|
||||||
let lsn_floor = image.get_lsn_range().start;
|
let lsn_floor = image.get_lsn_range().start;
|
||||||
Some(SearchResult {
|
Some(SearchResult {
|
||||||
layer: image,
|
layer: image.clone(),
|
||||||
lsn_floor,
|
lsn_floor,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
(Some(delta), None) => {
|
(Some(delta), None) => {
|
||||||
|
let delta = self.mapping.get(&delta).unwrap();
|
||||||
let lsn_floor = delta.get_lsn_range().start;
|
let lsn_floor = delta.get_lsn_range().start;
|
||||||
Some(SearchResult {
|
Some(SearchResult {
|
||||||
layer: delta,
|
layer: delta.clone(),
|
||||||
lsn_floor,
|
lsn_floor,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
(Some(delta), Some(image)) => {
|
(Some(delta), Some(image)) => {
|
||||||
|
let image = self.mapping.get(&image).unwrap();
|
||||||
|
let delta = self.mapping.get(&delta).unwrap();
|
||||||
|
|
||||||
let img_lsn = image.get_lsn_range().start;
|
let img_lsn = image.get_lsn_range().start;
|
||||||
let image_is_newer = image.get_lsn_range().end >= delta.get_lsn_range().end;
|
let image_is_newer = image.get_lsn_range().end >= delta.get_lsn_range().end;
|
||||||
let image_exact_match = img_lsn + 1 == end_lsn;
|
let image_exact_match = img_lsn + 1 == end_lsn;
|
||||||
if image_is_newer || image_exact_match {
|
if image_is_newer || image_exact_match {
|
||||||
Some(SearchResult {
|
Some(SearchResult {
|
||||||
layer: image,
|
layer: image.clone(),
|
||||||
lsn_floor: img_lsn,
|
lsn_floor: img_lsn,
|
||||||
})
|
})
|
||||||
} else {
|
} else {
|
||||||
let lsn_floor =
|
let lsn_floor =
|
||||||
std::cmp::max(delta.get_lsn_range().start, image.get_lsn_range().start + 1);
|
std::cmp::max(delta.get_lsn_range().start, image.get_lsn_range().start + 1);
|
||||||
Some(SearchResult {
|
Some(SearchResult {
|
||||||
layer: delta,
|
layer: delta.clone(),
|
||||||
lsn_floor,
|
lsn_floor,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@@ -254,19 +271,12 @@ where
|
|||||||
/// Helper function for BatchedUpdates::insert_historic
|
/// Helper function for BatchedUpdates::insert_historic
|
||||||
///
|
///
|
||||||
pub(self) fn insert_historic_noflush(&mut self, layer: Arc<L>) {
|
pub(self) fn insert_historic_noflush(&mut self, layer: Arc<L>) {
|
||||||
let kr = layer.get_key_range();
|
let key = LayerKey::from(&*layer);
|
||||||
let lr = layer.get_lsn_range();
|
self.historic.insert(key.clone(), key.clone());
|
||||||
self.historic.insert(
|
self.mapping.insert(key.clone(), layer.clone());
|
||||||
historic_layer_coverage::LayerKey {
|
|
||||||
key: kr.start.to_i128()..kr.end.to_i128(),
|
|
||||||
lsn: lr.start.0..lr.end.0,
|
|
||||||
is_image: !layer.is_incremental(),
|
|
||||||
},
|
|
||||||
Arc::clone(&layer),
|
|
||||||
);
|
|
||||||
|
|
||||||
if Self::is_l0(&layer) {
|
if Self::is_l0(&layer) {
|
||||||
self.l0_delta_layers.push(layer);
|
self.l0_delta_layers.insert(key, layer.clone());
|
||||||
}
|
}
|
||||||
|
|
||||||
NUM_ONDISK_LAYERS.inc();
|
NUM_ONDISK_LAYERS.inc();
|
||||||
@@ -278,30 +288,67 @@ where
|
|||||||
/// Helper function for BatchedUpdates::remove_historic
|
/// Helper function for BatchedUpdates::remove_historic
|
||||||
///
|
///
|
||||||
pub fn remove_historic_noflush(&mut self, layer: Arc<L>) {
|
pub fn remove_historic_noflush(&mut self, layer: Arc<L>) {
|
||||||
let kr = layer.get_key_range();
|
let key = historic_layer_coverage::LayerKey::from(&*layer);
|
||||||
let lr = layer.get_lsn_range();
|
self.historic.remove(key.clone());
|
||||||
self.historic.remove(historic_layer_coverage::LayerKey {
|
self.mapping.remove(&key.clone());
|
||||||
key: kr.start.to_i128()..kr.end.to_i128(),
|
|
||||||
lsn: lr.start.0..lr.end.0,
|
|
||||||
is_image: !layer.is_incremental(),
|
|
||||||
});
|
|
||||||
|
|
||||||
if Self::is_l0(&layer) {
|
if Self::is_l0(&layer) {
|
||||||
let len_before = self.l0_delta_layers.len();
|
self.l0_delta_layers.remove(&key);
|
||||||
|
|
||||||
// FIXME: ptr_eq might fail to return true for 'dyn'
|
|
||||||
// references. Clippy complains about this. In practice it
|
|
||||||
// seems to work, the assertion below would be triggered
|
|
||||||
// otherwise but this ought to be fixed.
|
|
||||||
#[allow(clippy::vtable_address_comparisons)]
|
|
||||||
self.l0_delta_layers
|
|
||||||
.retain(|other| !Arc::ptr_eq(other, &layer));
|
|
||||||
assert_eq!(self.l0_delta_layers.len(), len_before - 1);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
NUM_ONDISK_LAYERS.dec();
|
NUM_ONDISK_LAYERS.dec();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Replaces existing layer iff it is the `expected`.
|
||||||
|
///
|
||||||
|
/// If the expected layer has been removed it will not be inserted by this function.
|
||||||
|
///
|
||||||
|
/// Returned `Replacement` describes succeeding in replacement or the reason why it could not
|
||||||
|
/// be done.
|
||||||
|
///
|
||||||
|
/// TODO replacement can be done without buffering and rebuilding layer map updates.
|
||||||
|
/// One way to do that is to add a layer of indirection for returned values, so
|
||||||
|
/// that we can replace values only by updating a hashmap.
|
||||||
|
pub fn replace_historic(
|
||||||
|
&mut self,
|
||||||
|
expected: &Arc<L>,
|
||||||
|
new: Arc<L>,
|
||||||
|
) -> anyhow::Result<Replacement<Arc<L>>> {
|
||||||
|
let key = historic_layer_coverage::LayerKey::from(&**expected);
|
||||||
|
let other = historic_layer_coverage::LayerKey::from(&*new);
|
||||||
|
|
||||||
|
let expected_l0 = Self::is_l0(expected);
|
||||||
|
let new_l0 = Self::is_l0(&new);
|
||||||
|
|
||||||
|
anyhow::ensure!(
|
||||||
|
key == other,
|
||||||
|
"expected and new must have equal LayerKeys: {key:?} != {other:?}"
|
||||||
|
);
|
||||||
|
|
||||||
|
anyhow::ensure!(
|
||||||
|
expected_l0 == new_l0,
|
||||||
|
"expected and new must both be l0 deltas or neither should be: {expected_l0} != {new_l0}"
|
||||||
|
);
|
||||||
|
|
||||||
|
use std::collections::hash_map::Entry;
|
||||||
|
|
||||||
|
if expected_l0 {
|
||||||
|
match self.mapping.entry(key.clone()) {
|
||||||
|
Entry::Occupied(mut entry) => entry.insert(new.clone()),
|
||||||
|
Entry::Vacant(_) => anyhow::bail!("layer doesn't exist"),
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
match self.mapping.entry(key.clone()) {
|
||||||
|
Entry::Occupied(mut entry) => entry.insert(new.clone()),
|
||||||
|
Entry::Vacant(_) => anyhow::bail!("layer doesn't exist"),
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(Replacement::Replaced {
|
||||||
|
in_buffered: false,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
/// Helper function for BatchedUpdates::drop.
|
/// Helper function for BatchedUpdates::drop.
|
||||||
pub(self) fn flush_updates(&mut self) {
|
pub(self) fn flush_updates(&mut self) {
|
||||||
self.historic.rebuild();
|
self.historic.rebuild();
|
||||||
@@ -327,8 +374,8 @@ where
|
|||||||
let start = key.start.to_i128();
|
let start = key.start.to_i128();
|
||||||
let end = key.end.to_i128();
|
let end = key.end.to_i128();
|
||||||
|
|
||||||
let layer_covers = |layer: Option<Arc<L>>| match layer {
|
let layer_covers = |key: Option<&LayerKey>| match key {
|
||||||
Some(layer) => layer.get_lsn_range().start >= lsn.start,
|
Some(key) => self.mapping.get(key).unwrap().get_lsn_range().start >= lsn.start,
|
||||||
None => false,
|
None => false,
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -348,7 +395,7 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn iter_historic_layers(&self) -> impl '_ + Iterator<Item = Arc<L>> {
|
pub fn iter_historic_layers(&self) -> impl '_ + Iterator<Item = Arc<L>> {
|
||||||
self.historic.iter()
|
self.mapping.values().cloned()
|
||||||
}
|
}
|
||||||
|
|
||||||
///
|
///
|
||||||
@@ -375,10 +422,13 @@ where
|
|||||||
// Initialize loop variables
|
// Initialize loop variables
|
||||||
let mut coverage: Vec<(Range<Key>, Option<Arc<L>>)> = vec![];
|
let mut coverage: Vec<(Range<Key>, Option<Arc<L>>)> = vec![];
|
||||||
let mut current_key = start;
|
let mut current_key = start;
|
||||||
let mut current_val = version.image_coverage.query(start);
|
let mut current_val = version.image_coverage.query(start)
|
||||||
|
.map(|key| self.mapping.get(&key).unwrap().clone());
|
||||||
|
|
||||||
// Loop through the change events and push intervals
|
// Loop through the change events and push intervals
|
||||||
for (change_key, change_val) in version.image_coverage.range(start..end) {
|
for (change_key, change_val) in version.image_coverage.range(start..end) {
|
||||||
|
let change_val = change_val.map(|key| self.mapping.get(&key).unwrap().clone());
|
||||||
|
|
||||||
let kr = Key::from_i128(current_key)..Key::from_i128(change_key);
|
let kr = Key::from_i128(current_key)..Key::from_i128(change_key);
|
||||||
coverage.push((kr, current_val.take()));
|
coverage.push((kr, current_val.take()));
|
||||||
current_key = change_key;
|
current_key = change_key;
|
||||||
@@ -472,6 +522,7 @@ where
|
|||||||
for (change_key, change_val) in version.delta_coverage.range(start..end) {
|
for (change_key, change_val) in version.delta_coverage.range(start..end) {
|
||||||
// If there's a relevant delta in this part, add 1 and recurse down
|
// If there's a relevant delta in this part, add 1 and recurse down
|
||||||
if let Some(val) = current_val {
|
if let Some(val) = current_val {
|
||||||
|
let val = self.mapping.get(&val).unwrap().clone();
|
||||||
if val.get_lsn_range().end > lsn.start {
|
if val.get_lsn_range().end > lsn.start {
|
||||||
let kr = Key::from_i128(current_key)..Key::from_i128(change_key);
|
let kr = Key::from_i128(current_key)..Key::from_i128(change_key);
|
||||||
let lr = lsn.start..val.get_lsn_range().start;
|
let lr = lsn.start..val.get_lsn_range().start;
|
||||||
@@ -494,6 +545,7 @@ where
|
|||||||
|
|
||||||
// Consider the last part
|
// Consider the last part
|
||||||
if let Some(val) = current_val {
|
if let Some(val) = current_val {
|
||||||
|
let val = self.mapping.get(&val).unwrap().clone();
|
||||||
if val.get_lsn_range().end > lsn.start {
|
if val.get_lsn_range().end > lsn.start {
|
||||||
let kr = Key::from_i128(current_key)..Key::from_i128(end);
|
let kr = Key::from_i128(current_key)..Key::from_i128(end);
|
||||||
let lr = lsn.start..val.get_lsn_range().start;
|
let lr = lsn.start..val.get_lsn_range().start;
|
||||||
@@ -650,7 +702,7 @@ where
|
|||||||
|
|
||||||
/// Return all L0 delta layers
|
/// Return all L0 delta layers
|
||||||
pub fn get_level0_deltas(&self) -> Result<Vec<Arc<L>>> {
|
pub fn get_level0_deltas(&self) -> Result<Vec<Arc<L>>> {
|
||||||
Ok(self.l0_delta_layers.clone())
|
Ok(self.l0_delta_layers.values().cloned().collect())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// debugging function to print out the contents of the layer map
|
/// debugging function to print out the contents of the layer map
|
||||||
@@ -676,3 +728,91 @@ where
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::{LayerMap, Replacement};
|
||||||
|
use crate::tenant::storage_layer::{Layer, LayerDescriptor, LayerFileName};
|
||||||
|
use std::str::FromStr;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
mod l0_delta_layers_updated {
|
||||||
|
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn for_full_range_delta() {
|
||||||
|
// l0_delta_layers are used by compaction, and should observe all buffered updates
|
||||||
|
l0_delta_layers_updated_scenario(
|
||||||
|
"000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__0000000053423C21-0000000053424D69",
|
||||||
|
true
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn for_non_full_range_delta() {
|
||||||
|
// has minimal uncovered areas compared to l0_delta_layers_updated_on_insert_replace_remove_for_full_range_delta
|
||||||
|
l0_delta_layers_updated_scenario(
|
||||||
|
"000000000000000000000000000000000001-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFE__0000000053423C21-0000000053424D69",
|
||||||
|
// because not full range
|
||||||
|
false
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn for_image() {
|
||||||
|
l0_delta_layers_updated_scenario(
|
||||||
|
"000000000000000000000000000000000000-000000000000000000000000000000010000__0000000053424D69",
|
||||||
|
// code only checks if it is a full range layer, doesn't care about images, which must
|
||||||
|
// mean we should in practice never have full range images
|
||||||
|
false
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn l0_delta_layers_updated_scenario(layer_name: &str, expected_l0: bool) {
|
||||||
|
let name = LayerFileName::from_str(layer_name).unwrap();
|
||||||
|
let skeleton = LayerDescriptor::from(name);
|
||||||
|
|
||||||
|
let remote: Arc<dyn Layer> = Arc::new(skeleton.clone());
|
||||||
|
let downloaded: Arc<dyn Layer> = Arc::new(skeleton);
|
||||||
|
|
||||||
|
let mut map = LayerMap::default();
|
||||||
|
|
||||||
|
// two disjoint Arcs in different lifecycle phases.
|
||||||
|
assert!(!LayerMap::compare_arced_layers(&remote, &downloaded));
|
||||||
|
|
||||||
|
let expected_in_counts = (1, usize::from(expected_l0));
|
||||||
|
|
||||||
|
map.batch_update().insert_historic(remote.clone());
|
||||||
|
assert_eq!(count_layer_in(&map, &remote), expected_in_counts);
|
||||||
|
|
||||||
|
let replaced = map
|
||||||
|
.replace_historic(&remote, downloaded.clone())
|
||||||
|
.expect("name derived attributes are the same");
|
||||||
|
assert!(
|
||||||
|
matches!(replaced, Replacement::Replaced { .. }),
|
||||||
|
"{replaced:?}"
|
||||||
|
);
|
||||||
|
assert_eq!(count_layer_in(&map, &downloaded), expected_in_counts);
|
||||||
|
|
||||||
|
map.batch_update().remove_historic(downloaded.clone());
|
||||||
|
assert_eq!(count_layer_in(&map, &downloaded), (0, 0));
|
||||||
|
}
|
||||||
|
|
||||||
|
fn count_layer_in(map: &LayerMap<dyn Layer>, layer: &Arc<dyn Layer>) -> (usize, usize) {
|
||||||
|
let historic = map
|
||||||
|
.iter_historic_layers()
|
||||||
|
.filter(|x| LayerMap::compare_arced_layers(x, layer))
|
||||||
|
.count();
|
||||||
|
let l0s = map
|
||||||
|
.get_level0_deltas()
|
||||||
|
.expect("why does this return a result");
|
||||||
|
let l0 = l0s
|
||||||
|
.iter()
|
||||||
|
.filter(|x| LayerMap::compare_arced_layers(x, layer))
|
||||||
|
.count();
|
||||||
|
|
||||||
|
(historic, l0)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -12,7 +12,7 @@ use super::layer_coverage::LayerCoverageTuple;
|
|||||||
/// These three values are enough to uniquely identify a layer, since
|
/// These three values are enough to uniquely identify a layer, since
|
||||||
/// a layer is obligated to contain all contents within range, so two
|
/// a layer is obligated to contain all contents within range, so two
|
||||||
/// deltas (or images) with the same range have identical content.
|
/// deltas (or images) with the same range have identical content.
|
||||||
#[derive(Debug, PartialEq, Eq, Clone)]
|
#[derive(Debug, PartialEq, Eq, Clone, Hash)]
|
||||||
pub struct LayerKey {
|
pub struct LayerKey {
|
||||||
// TODO I use i128 and u64 because it was easy for prototyping,
|
// TODO I use i128 and u64 because it was easy for prototyping,
|
||||||
// testing, and benchmarking. If we can use the Lsn and Key
|
// testing, and benchmarking. If we can use the Lsn and Key
|
||||||
@@ -41,6 +41,18 @@ impl Ord for LayerKey {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<'a, L: crate::tenant::storage_layer::Layer + ?Sized> From<&'a L> for LayerKey {
|
||||||
|
fn from(layer: &'a L) -> Self {
|
||||||
|
let kr = layer.get_key_range();
|
||||||
|
let lr = layer.get_lsn_range();
|
||||||
|
LayerKey {
|
||||||
|
key: kr.start.to_i128()..kr.end.to_i128(),
|
||||||
|
lsn: lr.start.0..lr.end.0,
|
||||||
|
is_image: !layer.is_incremental(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Efficiently queryable layer coverage for each LSN.
|
/// Efficiently queryable layer coverage for each LSN.
|
||||||
///
|
///
|
||||||
/// Allows answering layer map queries very efficiently,
|
/// Allows answering layer map queries very efficiently,
|
||||||
@@ -82,15 +94,13 @@ impl<Value: Clone> HistoricLayerCoverage<Value> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Insert into data structure
|
// Insert into data structure
|
||||||
if layer_key.is_image {
|
let target = if layer_key.is_image {
|
||||||
self.head
|
&mut self.head.image_coverage
|
||||||
.image_coverage
|
|
||||||
.insert(layer_key.key, layer_key.lsn.clone(), value);
|
|
||||||
} else {
|
} else {
|
||||||
self.head
|
&mut self.head.delta_coverage
|
||||||
.delta_coverage
|
};
|
||||||
.insert(layer_key.key, layer_key.lsn.clone(), value);
|
|
||||||
}
|
target.insert(layer_key.key, layer_key.lsn.clone(), value);
|
||||||
|
|
||||||
// Remember history. Clone is O(1)
|
// Remember history. Clone is O(1)
|
||||||
self.historic.insert(layer_key.lsn.start, self.head.clone());
|
self.historic.insert(layer_key.lsn.start, self.head.clone());
|
||||||
@@ -415,6 +425,19 @@ impl<Value: Clone> BufferedHistoricLayerCoverage<Value> {
|
|||||||
self.buffer.insert(layer_key, None);
|
self.buffer.insert(layer_key, None);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Replaces a previous layer with a new layer value.
|
||||||
|
///
|
||||||
|
/// The replacement is conditional on:
|
||||||
|
/// - there is an existing `LayerKey` record
|
||||||
|
/// - there is no buffered removal for the given `LayerKey`
|
||||||
|
/// - the given closure returns true for the current `Value`
|
||||||
|
///
|
||||||
|
/// The closure is used to compare the latest value (buffered insert, or existing layer)
|
||||||
|
/// against some expectation. This allows to use `Arc::ptr_eq` or similar which would be
|
||||||
|
/// inaccessible via `PartialEq` trait.
|
||||||
|
///
|
||||||
|
/// Returns a `Replacement` value describing the outcome; only the case of
|
||||||
|
/// `Replacement::Replaced` modifies the map and requires a rebuild.
|
||||||
pub fn rebuild(&mut self) {
|
pub fn rebuild(&mut self) {
|
||||||
// Find the first LSN that needs to be rebuilt
|
// Find the first LSN that needs to be rebuilt
|
||||||
let rebuild_since: u64 = match self.buffer.iter().next() {
|
let rebuild_since: u64 = match self.buffer.iter().next() {
|
||||||
@@ -458,17 +481,6 @@ impl<Value: Clone> BufferedHistoricLayerCoverage<Value> {
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Iterate all the layers
|
|
||||||
pub fn iter(&self) -> impl '_ + Iterator<Item = Value> {
|
|
||||||
// NOTE we can actually perform this without rebuilding,
|
|
||||||
// but it's not necessary for now.
|
|
||||||
if !self.buffer.is_empty() {
|
|
||||||
panic!("rebuild pls")
|
|
||||||
}
|
|
||||||
|
|
||||||
self.layers.values().cloned()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Return a reference to a queryable map, assuming all updates
|
/// Return a reference to a queryable map, assuming all updates
|
||||||
/// have already been processed using self.rebuild()
|
/// have already been processed using self.rebuild()
|
||||||
pub fn get(&self) -> anyhow::Result<&HistoricLayerCoverage<Value>> {
|
pub fn get(&self) -> anyhow::Result<&HistoricLayerCoverage<Value>> {
|
||||||
@@ -483,6 +495,22 @@ impl<Value: Clone> BufferedHistoricLayerCoverage<Value> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Outcome of the replace operation.
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub enum Replacement<Value> {
|
||||||
|
/// Previous value was replaced with the new value.
|
||||||
|
Replaced {
|
||||||
|
/// Replacement happened for a scheduled insert.
|
||||||
|
in_buffered: bool,
|
||||||
|
},
|
||||||
|
/// Key was not found buffered updates or existing layers.
|
||||||
|
NotFound,
|
||||||
|
/// Key has been scheduled for removal, it was not replaced.
|
||||||
|
RemovalBuffered,
|
||||||
|
/// Previous value was rejected by the closure.
|
||||||
|
Unexpected(Value),
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_retroactive_regression_1() {
|
fn test_retroactive_regression_1() {
|
||||||
let mut map = BufferedHistoricLayerCoverage::new();
|
let mut map = BufferedHistoricLayerCoverage::new();
|
||||||
@@ -548,7 +576,7 @@ fn test_retroactive_simple() {
|
|||||||
LayerKey {
|
LayerKey {
|
||||||
key: 2..5,
|
key: 2..5,
|
||||||
lsn: 105..106,
|
lsn: 105..106,
|
||||||
is_image: true,
|
is_image: false,
|
||||||
},
|
},
|
||||||
"Delta 1".to_string(),
|
"Delta 1".to_string(),
|
||||||
);
|
);
|
||||||
@@ -556,17 +584,24 @@ fn test_retroactive_simple() {
|
|||||||
// Rebuild so we can start querying
|
// Rebuild so we can start querying
|
||||||
map.rebuild();
|
map.rebuild();
|
||||||
|
|
||||||
// Query key 4
|
{
|
||||||
let version = map.get().unwrap().get_version(90);
|
let map = map.get().expect("rebuilt");
|
||||||
assert!(version.is_none());
|
|
||||||
let version = map.get().unwrap().get_version(102).unwrap();
|
let version = map.get_version(90);
|
||||||
assert_eq!(version.image_coverage.query(4), Some("Image 1".to_string()));
|
assert!(version.is_none());
|
||||||
let version = map.get().unwrap().get_version(107).unwrap();
|
let version = map.get_version(102).unwrap();
|
||||||
assert_eq!(version.image_coverage.query(4), Some("Delta 1".to_string()));
|
assert_eq!(version.image_coverage.query(4), Some("Image 1".to_string()));
|
||||||
let version = map.get().unwrap().get_version(115).unwrap();
|
|
||||||
assert_eq!(version.image_coverage.query(4), Some("Image 2".to_string()));
|
let version = map.get_version(107).unwrap();
|
||||||
let version = map.get().unwrap().get_version(125).unwrap();
|
assert_eq!(version.image_coverage.query(4), Some("Image 1".to_string()));
|
||||||
assert_eq!(version.image_coverage.query(4), Some("Image 3".to_string()));
|
assert_eq!(version.delta_coverage.query(4), Some("Delta 1".to_string()));
|
||||||
|
|
||||||
|
let version = map.get_version(115).unwrap();
|
||||||
|
assert_eq!(version.image_coverage.query(4), Some("Image 2".to_string()));
|
||||||
|
|
||||||
|
let version = map.get_version(125).unwrap();
|
||||||
|
assert_eq!(version.image_coverage.query(4), Some("Image 3".to_string()));
|
||||||
|
}
|
||||||
|
|
||||||
// Remove Image 3
|
// Remove Image 3
|
||||||
map.remove(LayerKey {
|
map.remove(LayerKey {
|
||||||
@@ -576,8 +611,11 @@ fn test_retroactive_simple() {
|
|||||||
});
|
});
|
||||||
map.rebuild();
|
map.rebuild();
|
||||||
|
|
||||||
// Check deletion worked
|
{
|
||||||
let version = map.get().unwrap().get_version(125).unwrap();
|
// Check deletion worked
|
||||||
assert_eq!(version.image_coverage.query(4), Some("Image 2".to_string()));
|
let map = map.get().expect("rebuilt");
|
||||||
assert_eq!(version.image_coverage.query(8), Some("Image 4".to_string()));
|
let version = map.get_version(125).unwrap();
|
||||||
|
assert_eq!(version.image_coverage.query(4), Some("Image 2".to_string()));
|
||||||
|
assert_eq!(version.image_coverage.query(8), Some("Image 4".to_string()));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -101,24 +101,24 @@ impl<Value: Clone> LayerCoverage<Value> {
|
|||||||
/// Get the latest (by lsn.end) layer at a given key
|
/// Get the latest (by lsn.end) layer at a given key
|
||||||
///
|
///
|
||||||
/// Complexity: O(log N)
|
/// Complexity: O(log N)
|
||||||
pub fn query(&self, key: i128) -> Option<Value> {
|
pub fn query(&self, key: i128) -> Option<&Value> {
|
||||||
self.nodes
|
self.nodes
|
||||||
.range(..=key)
|
.range(..=key)
|
||||||
.rev()
|
.rev()
|
||||||
.next()?
|
.next()?
|
||||||
.1
|
.1
|
||||||
.as_ref()
|
.as_ref()
|
||||||
.map(|(_, v)| v.clone())
|
.map(|(_, v)| v)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Iterate the changes in layer coverage in a given range. You will likely
|
/// Iterate the changes in layer coverage in a given range. You will likely
|
||||||
/// want to start with self.query(key.start), and then follow up with self.range
|
/// want to start with self.query(key.start), and then follow up with self.range
|
||||||
///
|
///
|
||||||
/// Complexity: O(log N + result_size)
|
/// Complexity: O(log N + result_size)
|
||||||
pub fn range(&self, key: Range<i128>) -> impl '_ + Iterator<Item = (i128, Option<Value>)> {
|
pub fn range(&self, key: Range<i128>) -> impl '_ + Iterator<Item = (i128, Option<&Value>)> {
|
||||||
self.nodes
|
self.nodes
|
||||||
.range(key)
|
.range(key)
|
||||||
.map(|(k, v)| (*k, v.as_ref().map(|x| x.1.clone())))
|
.map(|(k, v)| (*k, v.as_ref().map(|x| &x.1)))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// O(1) clone
|
/// O(1) clone
|
||||||
|
|||||||
@@ -285,17 +285,22 @@ pub async fn create_tenant(
|
|||||||
}).await
|
}).await
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn update_tenant_config(
|
pub async fn set_new_tenant_config(
|
||||||
conf: &'static PageServerConf,
|
conf: &'static PageServerConf,
|
||||||
tenant_conf: TenantConfOpt,
|
new_tenant_conf: TenantConfOpt,
|
||||||
tenant_id: TenantId,
|
tenant_id: TenantId,
|
||||||
) -> anyhow::Result<()> {
|
) -> anyhow::Result<()> {
|
||||||
info!("configuring tenant {tenant_id}");
|
info!("configuring tenant {tenant_id}");
|
||||||
let tenant = get_tenant(tenant_id, true).await?;
|
let tenant = get_tenant(tenant_id, true).await?;
|
||||||
|
|
||||||
tenant.update_tenant_config(tenant_conf);
|
|
||||||
let tenant_config_path = conf.tenant_config_path(tenant_id);
|
let tenant_config_path = conf.tenant_config_path(tenant_id);
|
||||||
Tenant::persist_tenant_config(&tenant.tenant_id(), &tenant_config_path, tenant_conf, false)?;
|
Tenant::persist_tenant_config(
|
||||||
|
&tenant.tenant_id(),
|
||||||
|
&tenant_config_path,
|
||||||
|
new_tenant_conf,
|
||||||
|
false,
|
||||||
|
)?;
|
||||||
|
tenant.set_new_tenant_config(new_tenant_conf);
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1135,18 +1135,29 @@ mod tests {
|
|||||||
client.init_upload_queue_for_empty_remote(&metadata)?;
|
client.init_upload_queue_for_empty_remote(&metadata)?;
|
||||||
|
|
||||||
// Create a couple of dummy files, schedule upload for them
|
// Create a couple of dummy files, schedule upload for them
|
||||||
let content_foo = dummy_contents("foo");
|
let layer_file_name_1: LayerFileName = "000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__00000000016B59D8-00000000016B5A51".parse().unwrap();
|
||||||
let content_bar = dummy_contents("bar");
|
let layer_file_name_2: LayerFileName = "000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__00000000016B59D9-00000000016B5A52".parse().unwrap();
|
||||||
std::fs::write(timeline_path.join("foo"), &content_foo)?;
|
let layer_file_name_3: LayerFileName = "000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__00000000016B59DA-00000000016B5A53".parse().unwrap();
|
||||||
std::fs::write(timeline_path.join("bar"), &content_bar)?;
|
let content_1 = dummy_contents("foo");
|
||||||
|
let content_2 = dummy_contents("bar");
|
||||||
|
let content_3 = dummy_contents("baz");
|
||||||
|
std::fs::write(
|
||||||
|
timeline_path.join(layer_file_name_1.file_name()),
|
||||||
|
&content_1,
|
||||||
|
)?;
|
||||||
|
std::fs::write(
|
||||||
|
timeline_path.join(layer_file_name_2.file_name()),
|
||||||
|
&content_2,
|
||||||
|
)?;
|
||||||
|
std::fs::write(timeline_path.join(layer_file_name_3.file_name()), content_3)?;
|
||||||
|
|
||||||
client.schedule_layer_file_upload(
|
client.schedule_layer_file_upload(
|
||||||
&LayerFileName::Test("foo".to_owned()),
|
&layer_file_name_1,
|
||||||
&LayerFileMetadata::new(content_foo.len() as u64),
|
&LayerFileMetadata::new(content_1.len() as u64),
|
||||||
)?;
|
)?;
|
||||||
client.schedule_layer_file_upload(
|
client.schedule_layer_file_upload(
|
||||||
&LayerFileName::Test("bar".to_owned()),
|
&layer_file_name_2,
|
||||||
&LayerFileMetadata::new(content_bar.len() as u64),
|
&LayerFileMetadata::new(content_2.len() as u64),
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
// Check that they are started immediately, not queued
|
// Check that they are started immediately, not queued
|
||||||
@@ -1183,7 +1194,13 @@ mod tests {
|
|||||||
|
|
||||||
// Download back the index.json, and check that the list of files is correct
|
// Download back the index.json, and check that the list of files is correct
|
||||||
let index_part = runtime.block_on(client.download_index_file())?;
|
let index_part = runtime.block_on(client.download_index_file())?;
|
||||||
assert_file_list(&index_part.timeline_layers, &["foo", "bar"]);
|
assert_file_list(
|
||||||
|
&index_part.timeline_layers,
|
||||||
|
&[
|
||||||
|
&layer_file_name_1.file_name(),
|
||||||
|
&layer_file_name_2.file_name(),
|
||||||
|
],
|
||||||
|
);
|
||||||
let downloaded_metadata = index_part.parse_metadata()?;
|
let downloaded_metadata = index_part.parse_metadata()?;
|
||||||
assert_eq!(downloaded_metadata, metadata);
|
assert_eq!(downloaded_metadata, metadata);
|
||||||
|
|
||||||
@@ -1191,10 +1208,10 @@ mod tests {
|
|||||||
let content_baz = dummy_contents("baz");
|
let content_baz = dummy_contents("baz");
|
||||||
std::fs::write(timeline_path.join("baz"), &content_baz)?;
|
std::fs::write(timeline_path.join("baz"), &content_baz)?;
|
||||||
client.schedule_layer_file_upload(
|
client.schedule_layer_file_upload(
|
||||||
&LayerFileName::Test("baz".to_owned()),
|
&layer_file_name_3,
|
||||||
&LayerFileMetadata::new(content_baz.len() as u64),
|
&LayerFileMetadata::new(content_baz.len() as u64),
|
||||||
)?;
|
)?;
|
||||||
client.schedule_layer_file_deletion(&[LayerFileName::Test("foo".to_owned())])?;
|
client.schedule_layer_file_deletion(&[layer_file_name_1.clone()])?;
|
||||||
{
|
{
|
||||||
let mut guard = client.upload_queue.lock().unwrap();
|
let mut guard = client.upload_queue.lock().unwrap();
|
||||||
let upload_queue = guard.initialized_mut().unwrap();
|
let upload_queue = guard.initialized_mut().unwrap();
|
||||||
@@ -1206,12 +1223,26 @@ mod tests {
|
|||||||
assert!(upload_queue.num_inprogress_deletions == 0);
|
assert!(upload_queue.num_inprogress_deletions == 0);
|
||||||
assert!(upload_queue.latest_files_changes_since_metadata_upload_scheduled == 0);
|
assert!(upload_queue.latest_files_changes_since_metadata_upload_scheduled == 0);
|
||||||
}
|
}
|
||||||
assert_remote_files(&["foo", "bar", "index_part.json"], &remote_timeline_dir);
|
assert_remote_files(
|
||||||
|
&[
|
||||||
|
&layer_file_name_1.file_name(),
|
||||||
|
&layer_file_name_2.file_name(),
|
||||||
|
"index_part.json",
|
||||||
|
],
|
||||||
|
&remote_timeline_dir,
|
||||||
|
);
|
||||||
|
|
||||||
// Finish them
|
// Finish them
|
||||||
runtime.block_on(client.wait_completion())?;
|
runtime.block_on(client.wait_completion())?;
|
||||||
|
|
||||||
assert_remote_files(&["bar", "baz", "index_part.json"], &remote_timeline_dir);
|
assert_remote_files(
|
||||||
|
&[
|
||||||
|
&layer_file_name_2.file_name(),
|
||||||
|
&layer_file_name_3.file_name(),
|
||||||
|
"index_part.json",
|
||||||
|
],
|
||||||
|
&remote_timeline_dir,
|
||||||
|
);
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -8,7 +8,8 @@ use serde::{Deserialize, Serialize};
|
|||||||
use serde_with::{serde_as, DisplayFromStr};
|
use serde_with::{serde_as, DisplayFromStr};
|
||||||
use tracing::warn;
|
use tracing::warn;
|
||||||
|
|
||||||
use crate::tenant::{metadata::TimelineMetadata, storage_layer::LayerFileName};
|
use crate::tenant::metadata::TimelineMetadata;
|
||||||
|
use crate::tenant::storage_layer::LayerFileName;
|
||||||
|
|
||||||
use utils::lsn::Lsn;
|
use utils::lsn::Lsn;
|
||||||
|
|
||||||
@@ -274,7 +275,7 @@ mod tests {
|
|||||||
"timeline_layers":["000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__0000000001696070-00000000016960E9"],
|
"timeline_layers":["000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__0000000001696070-00000000016960E9"],
|
||||||
"layer_metadata":{
|
"layer_metadata":{
|
||||||
"000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__0000000001696070-00000000016960E9": { "file_size": 25600000 },
|
"000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__0000000001696070-00000000016960E9": { "file_size": 25600000 },
|
||||||
"LAYER_FILE_NAME::test/not_a_real_layer_but_adding_coverage": { "file_size": 9007199254741001 }
|
"000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__00000000016B59D8-00000000016B5A51": { "file_size": 9007199254741001 }
|
||||||
},
|
},
|
||||||
"disk_consistent_lsn":"0/16960E8",
|
"disk_consistent_lsn":"0/16960E8",
|
||||||
"metadata_bytes":[113,11,159,210,0,54,0,4,0,0,0,0,1,105,96,232,1,0,0,0,0,1,105,96,112,0,0,0,0,0,0,0,0,0,0,0,0,0,1,105,96,112,0,0,0,0,1,105,96,112,0,0,0,14,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
|
"metadata_bytes":[113,11,159,210,0,54,0,4,0,0,0,0,1,105,96,232,1,0,0,0,0,1,105,96,112,0,0,0,0,0,0,0,0,0,0,0,0,0,1,105,96,112,0,0,0,0,1,105,96,112,0,0,0,14,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
|
||||||
@@ -288,7 +289,7 @@ mod tests {
|
|||||||
("000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__0000000001696070-00000000016960E9".parse().unwrap(), IndexLayerMetadata {
|
("000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__0000000001696070-00000000016960E9".parse().unwrap(), IndexLayerMetadata {
|
||||||
file_size: Some(25600000),
|
file_size: Some(25600000),
|
||||||
}),
|
}),
|
||||||
(LayerFileName::new_test("not_a_real_layer_but_adding_coverage"), IndexLayerMetadata {
|
("000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__00000000016B59D8-00000000016B5A51".parse().unwrap(), IndexLayerMetadata {
|
||||||
// serde_json should always parse this but this might be a double with jq for
|
// serde_json should always parse this but this might be a double with jq for
|
||||||
// example.
|
// example.
|
||||||
file_size: Some(9007199254741001),
|
file_size: Some(9007199254741001),
|
||||||
@@ -312,7 +313,7 @@ mod tests {
|
|||||||
"missing_layers":["This shouldn't fail deserialization"],
|
"missing_layers":["This shouldn't fail deserialization"],
|
||||||
"layer_metadata":{
|
"layer_metadata":{
|
||||||
"000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__0000000001696070-00000000016960E9": { "file_size": 25600000 },
|
"000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__0000000001696070-00000000016960E9": { "file_size": 25600000 },
|
||||||
"LAYER_FILE_NAME::test/not_a_real_layer_but_adding_coverage": { "file_size": 9007199254741001 }
|
"000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__00000000016B59D8-00000000016B5A51": { "file_size": 9007199254741001 }
|
||||||
},
|
},
|
||||||
"disk_consistent_lsn":"0/16960E8",
|
"disk_consistent_lsn":"0/16960E8",
|
||||||
"metadata_bytes":[112,11,159,210,0,54,0,4,0,0,0,0,1,105,96,232,1,0,0,0,0,1,105,96,112,0,0,0,0,0,0,0,0,0,0,0,0,0,1,105,96,112,0,0,0,0,1,105,96,112,0,0,0,14,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
|
"metadata_bytes":[112,11,159,210,0,54,0,4,0,0,0,0,1,105,96,232,1,0,0,0,0,1,105,96,112,0,0,0,0,0,0,0,0,0,0,0,0,0,1,105,96,112,0,0,0,0,1,105,96,112,0,0,0,14,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
|
||||||
@@ -326,7 +327,7 @@ mod tests {
|
|||||||
("000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__0000000001696070-00000000016960E9".parse().unwrap(), IndexLayerMetadata {
|
("000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__0000000001696070-00000000016960E9".parse().unwrap(), IndexLayerMetadata {
|
||||||
file_size: Some(25600000),
|
file_size: Some(25600000),
|
||||||
}),
|
}),
|
||||||
(LayerFileName::new_test("not_a_real_layer_but_adding_coverage"), IndexLayerMetadata {
|
("000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__00000000016B59D8-00000000016B5A51".parse().unwrap(), IndexLayerMetadata {
|
||||||
// serde_json should always parse this but this might be a double with jq for
|
// serde_json should always parse this but this might be a double with jq for
|
||||||
// example.
|
// example.
|
||||||
file_size: Some(9007199254741001),
|
file_size: Some(9007199254741001),
|
||||||
|
|||||||
@@ -6,14 +6,24 @@ mod image_layer;
|
|||||||
mod inmemory_layer;
|
mod inmemory_layer;
|
||||||
mod remote_layer;
|
mod remote_layer;
|
||||||
|
|
||||||
|
use crate::config::PageServerConf;
|
||||||
use crate::context::RequestContext;
|
use crate::context::RequestContext;
|
||||||
use crate::repository::{Key, Value};
|
use crate::repository::{Key, Value};
|
||||||
|
use crate::task_mgr::TaskKind;
|
||||||
use crate::walrecord::NeonWalRecord;
|
use crate::walrecord::NeonWalRecord;
|
||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
use bytes::Bytes;
|
use bytes::Bytes;
|
||||||
|
use enum_map::EnumMap;
|
||||||
|
use enumset::EnumSet;
|
||||||
|
use pageserver_api::models::LayerAccessKind;
|
||||||
|
use pageserver_api::models::{
|
||||||
|
HistoricLayerInfo, LayerResidenceEvent, LayerResidenceEventReason, LayerResidenceStatus,
|
||||||
|
};
|
||||||
use std::ops::Range;
|
use std::ops::Range;
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
use std::sync::Arc;
|
use std::sync::{Arc, Mutex};
|
||||||
|
use std::time::{SystemTime, UNIX_EPOCH};
|
||||||
|
use utils::history_buffer::HistoryBufferWithDropCounter;
|
||||||
|
|
||||||
use utils::{
|
use utils::{
|
||||||
id::{TenantId, TimelineId},
|
id::{TenantId, TimelineId},
|
||||||
@@ -21,7 +31,7 @@ use utils::{
|
|||||||
};
|
};
|
||||||
|
|
||||||
pub use delta_layer::{DeltaLayer, DeltaLayerWriter};
|
pub use delta_layer::{DeltaLayer, DeltaLayerWriter};
|
||||||
pub use filename::{DeltaFileName, ImageFileName, LayerFileName, PathOrConf};
|
pub use filename::{DeltaFileName, ImageFileName, LayerFileName};
|
||||||
pub use image_layer::{ImageLayer, ImageLayerWriter};
|
pub use image_layer::{ImageLayer, ImageLayerWriter};
|
||||||
pub use inmemory_layer::InMemoryLayer;
|
pub use inmemory_layer::InMemoryLayer;
|
||||||
pub use remote_layer::RemoteLayer;
|
pub use remote_layer::RemoteLayer;
|
||||||
@@ -81,9 +91,156 @@ pub enum ValueReconstructResult {
|
|||||||
Missing,
|
Missing,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct LayerAccessStats(Mutex<LayerAccessStatsInner>);
|
||||||
|
|
||||||
|
#[derive(Debug, Default, Clone)]
|
||||||
|
struct LayerAccessStatsInner {
|
||||||
|
first_access: Option<LayerAccessStatFullDetails>,
|
||||||
|
count_by_access_kind: EnumMap<LayerAccessKind, u64>,
|
||||||
|
task_kind_flag: EnumSet<TaskKind>,
|
||||||
|
last_accesses: HistoryBufferWithDropCounter<LayerAccessStatFullDetails, 16>,
|
||||||
|
last_residence_changes: HistoryBufferWithDropCounter<LayerResidenceEvent, 16>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
struct LayerAccessStatFullDetails {
|
||||||
|
when: SystemTime,
|
||||||
|
task_kind: TaskKind,
|
||||||
|
access_kind: LayerAccessKind,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Copy, strum_macros::EnumString)]
|
||||||
|
pub enum LayerAccessStatsReset {
|
||||||
|
NoReset,
|
||||||
|
JustTaskKindFlags,
|
||||||
|
AllStats,
|
||||||
|
}
|
||||||
|
|
||||||
|
fn system_time_to_millis_since_epoch(ts: &SystemTime) -> u64 {
|
||||||
|
ts.duration_since(UNIX_EPOCH)
|
||||||
|
.expect("better to die in this unlikely case than report false stats")
|
||||||
|
.as_millis()
|
||||||
|
.try_into()
|
||||||
|
.expect("64 bits is enough for few more years")
|
||||||
|
}
|
||||||
|
|
||||||
|
impl LayerAccessStatFullDetails {
|
||||||
|
fn to_api_model(&self) -> pageserver_api::models::LayerAccessStatFullDetails {
|
||||||
|
let Self {
|
||||||
|
when,
|
||||||
|
task_kind,
|
||||||
|
access_kind,
|
||||||
|
} = self;
|
||||||
|
pageserver_api::models::LayerAccessStatFullDetails {
|
||||||
|
when_millis_since_epoch: system_time_to_millis_since_epoch(when),
|
||||||
|
task_kind: task_kind.into(), // into static str, powered by strum_macros
|
||||||
|
access_kind: *access_kind,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl LayerAccessStats {
|
||||||
|
pub(crate) fn for_loading_layer(status: LayerResidenceStatus) -> Self {
|
||||||
|
let new = LayerAccessStats(Mutex::new(LayerAccessStatsInner::default()));
|
||||||
|
new.record_residence_event(status, LayerResidenceEventReason::LayerLoad);
|
||||||
|
new
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn for_new_layer_file() -> Self {
|
||||||
|
let new = LayerAccessStats(Mutex::new(LayerAccessStatsInner::default()));
|
||||||
|
new.record_residence_event(
|
||||||
|
LayerResidenceStatus::Resident,
|
||||||
|
LayerResidenceEventReason::LayerCreate,
|
||||||
|
);
|
||||||
|
new
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Creates a clone of `self` and records `new_status` in the clone.
|
||||||
|
/// The `new_status` is not recorded in `self`
|
||||||
|
pub(crate) fn clone_for_residence_change(
|
||||||
|
&self,
|
||||||
|
new_status: LayerResidenceStatus,
|
||||||
|
) -> LayerAccessStats {
|
||||||
|
let clone = {
|
||||||
|
let inner = self.0.lock().unwrap();
|
||||||
|
inner.clone()
|
||||||
|
};
|
||||||
|
let new = LayerAccessStats(Mutex::new(clone));
|
||||||
|
new.record_residence_event(new_status, LayerResidenceEventReason::ResidenceChange);
|
||||||
|
new
|
||||||
|
}
|
||||||
|
|
||||||
|
fn record_residence_event(
|
||||||
|
&self,
|
||||||
|
status: LayerResidenceStatus,
|
||||||
|
reason: LayerResidenceEventReason,
|
||||||
|
) {
|
||||||
|
let mut inner = self.0.lock().unwrap();
|
||||||
|
inner
|
||||||
|
.last_residence_changes
|
||||||
|
.write(LayerResidenceEvent::new(status, reason));
|
||||||
|
}
|
||||||
|
|
||||||
|
fn record_access(&self, access_kind: LayerAccessKind, task_kind: TaskKind) {
|
||||||
|
let mut inner = self.0.lock().unwrap();
|
||||||
|
let this_access = LayerAccessStatFullDetails {
|
||||||
|
when: SystemTime::now(),
|
||||||
|
task_kind,
|
||||||
|
access_kind,
|
||||||
|
};
|
||||||
|
inner
|
||||||
|
.first_access
|
||||||
|
.get_or_insert_with(|| this_access.clone());
|
||||||
|
inner.count_by_access_kind[access_kind] += 1;
|
||||||
|
inner.task_kind_flag |= task_kind;
|
||||||
|
inner.last_accesses.write(this_access);
|
||||||
|
}
|
||||||
|
fn to_api_model(
|
||||||
|
&self,
|
||||||
|
reset: LayerAccessStatsReset,
|
||||||
|
) -> pageserver_api::models::LayerAccessStats {
|
||||||
|
let mut inner = self.0.lock().unwrap();
|
||||||
|
let LayerAccessStatsInner {
|
||||||
|
first_access,
|
||||||
|
count_by_access_kind,
|
||||||
|
task_kind_flag,
|
||||||
|
last_accesses,
|
||||||
|
last_residence_changes,
|
||||||
|
} = &*inner;
|
||||||
|
let ret = pageserver_api::models::LayerAccessStats {
|
||||||
|
access_count_by_access_kind: count_by_access_kind
|
||||||
|
.iter()
|
||||||
|
.map(|(kind, count)| (kind, *count))
|
||||||
|
.collect(),
|
||||||
|
task_kind_access_flag: task_kind_flag
|
||||||
|
.iter()
|
||||||
|
.map(|task_kind| task_kind.into()) // into static str, powered by strum_macros
|
||||||
|
.collect(),
|
||||||
|
first: first_access.as_ref().map(|a| a.to_api_model()),
|
||||||
|
accesses_history: last_accesses.map(|m| m.to_api_model()),
|
||||||
|
residence_events_history: last_residence_changes.clone(),
|
||||||
|
};
|
||||||
|
match reset {
|
||||||
|
LayerAccessStatsReset::NoReset => (),
|
||||||
|
LayerAccessStatsReset::JustTaskKindFlags => {
|
||||||
|
inner.task_kind_flag.clear();
|
||||||
|
}
|
||||||
|
LayerAccessStatsReset::AllStats => {
|
||||||
|
*inner = LayerAccessStatsInner::default();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ret
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Supertrait of the [`Layer`] trait that captures the bare minimum interface
|
/// Supertrait of the [`Layer`] trait that captures the bare minimum interface
|
||||||
/// required by [`LayerMap`].
|
/// required by [`LayerMap`].
|
||||||
pub trait Layer: Send + Sync {
|
///
|
||||||
|
/// All layers should implement a minimal `std::fmt::Debug` without tenant or
|
||||||
|
/// timeline names, because those are known in the context of which the layers
|
||||||
|
/// are used in (timeline).
|
||||||
|
pub trait Layer: std::fmt::Debug + Send + Sync {
|
||||||
/// Range of keys that this layer covers
|
/// Range of keys that this layer covers
|
||||||
fn get_key_range(&self) -> Range<Key>;
|
fn get_key_range(&self) -> Range<Key>;
|
||||||
|
|
||||||
@@ -146,8 +303,7 @@ pub type LayerKeyIter<'i> = Box<dyn Iterator<Item = (Key, Lsn, u64)> + 'i>;
|
|||||||
/// Furthermore, there are two kinds of on-disk layers: delta and image layers.
|
/// Furthermore, there are two kinds of on-disk layers: delta and image layers.
|
||||||
/// A delta layer contains all modifications within a range of LSNs and keys.
|
/// A delta layer contains all modifications within a range of LSNs and keys.
|
||||||
/// An image layer is a snapshot of all the data in a key-range, at a single
|
/// An image layer is a snapshot of all the data in a key-range, at a single
|
||||||
/// LSN
|
/// LSN.
|
||||||
///
|
|
||||||
pub trait PersistentLayer: Layer {
|
pub trait PersistentLayer: Layer {
|
||||||
fn get_tenant_id(&self) -> TenantId;
|
fn get_tenant_id(&self) -> TenantId;
|
||||||
|
|
||||||
@@ -187,6 +343,10 @@ pub trait PersistentLayer: Layer {
|
|||||||
/// Should not change over the lifetime of the layer object because
|
/// Should not change over the lifetime of the layer object because
|
||||||
/// current_physical_size is computed as the som of this value.
|
/// current_physical_size is computed as the som of this value.
|
||||||
fn file_size(&self) -> Option<u64>;
|
fn file_size(&self) -> Option<u64>;
|
||||||
|
|
||||||
|
fn info(&self, reset: LayerAccessStatsReset) -> HistoricLayerInfo;
|
||||||
|
|
||||||
|
fn access_stats(&self) -> &LayerAccessStats;
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn downcast_remote_layer(
|
pub fn downcast_remote_layer(
|
||||||
@@ -199,15 +359,11 @@ pub fn downcast_remote_layer(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl std::fmt::Debug for dyn Layer {
|
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
|
||||||
f.debug_struct("Layer")
|
|
||||||
.field("short_id", &self.short_id())
|
|
||||||
.finish()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Holds metadata about a layer without any content. Used mostly for testing.
|
/// Holds metadata about a layer without any content. Used mostly for testing.
|
||||||
|
///
|
||||||
|
/// To use filenames as fixtures, parse them as [`LayerFileName`] then convert from that to a
|
||||||
|
/// LayerDescriptor.
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
pub struct LayerDescriptor {
|
pub struct LayerDescriptor {
|
||||||
pub key: Range<Key>,
|
pub key: Range<Key>,
|
||||||
pub lsn: Range<Lsn>,
|
pub lsn: Range<Lsn>,
|
||||||
@@ -246,3 +402,50 @@ impl Layer for LayerDescriptor {
|
|||||||
todo!()
|
todo!()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl From<DeltaFileName> for LayerDescriptor {
|
||||||
|
fn from(value: DeltaFileName) -> Self {
|
||||||
|
let short_id = value.to_string();
|
||||||
|
LayerDescriptor {
|
||||||
|
key: value.key_range,
|
||||||
|
lsn: value.lsn_range,
|
||||||
|
is_incremental: true,
|
||||||
|
short_id,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<ImageFileName> for LayerDescriptor {
|
||||||
|
fn from(value: ImageFileName) -> Self {
|
||||||
|
let short_id = value.to_string();
|
||||||
|
let lsn = value.lsn_as_range();
|
||||||
|
LayerDescriptor {
|
||||||
|
key: value.key_range,
|
||||||
|
lsn,
|
||||||
|
is_incremental: false,
|
||||||
|
short_id,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<LayerFileName> for LayerDescriptor {
|
||||||
|
fn from(value: LayerFileName) -> Self {
|
||||||
|
match value {
|
||||||
|
LayerFileName::Delta(d) => Self::from(d),
|
||||||
|
LayerFileName::Image(i) => Self::from(i),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Helper enum to hold a PageServerConf, or a path
|
||||||
|
///
|
||||||
|
/// This is used by DeltaLayer and ImageLayer. Normally, this holds a reference to the
|
||||||
|
/// global config, and paths to layer files are constructed using the tenant/timeline
|
||||||
|
/// path from the config. But in the 'pageserver_binutils' binary, we need to construct a Layer
|
||||||
|
/// struct for a file on disk, without having a page server running, so that we have no
|
||||||
|
/// config. In that case, we use the Path variant to hold the full path to the file on
|
||||||
|
/// disk.
|
||||||
|
enum PathOrConf {
|
||||||
|
Path(PathBuf),
|
||||||
|
Conf(&'static PageServerConf),
|
||||||
|
}
|
||||||
|
|||||||
@@ -37,6 +37,7 @@ use crate::virtual_file::VirtualFile;
|
|||||||
use crate::{walrecord, TEMP_FILE_SUFFIX};
|
use crate::{walrecord, TEMP_FILE_SUFFIX};
|
||||||
use crate::{DELTA_FILE_MAGIC, STORAGE_FORMAT_VERSION};
|
use crate::{DELTA_FILE_MAGIC, STORAGE_FORMAT_VERSION};
|
||||||
use anyhow::{bail, ensure, Context, Result};
|
use anyhow::{bail, ensure, Context, Result};
|
||||||
|
use pageserver_api::models::{HistoricLayerInfo, LayerAccessKind};
|
||||||
use rand::{distributions::Alphanumeric, Rng};
|
use rand::{distributions::Alphanumeric, Rng};
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use std::fs::{self, File};
|
use std::fs::{self, File};
|
||||||
@@ -54,7 +55,10 @@ use utils::{
|
|||||||
lsn::Lsn,
|
lsn::Lsn,
|
||||||
};
|
};
|
||||||
|
|
||||||
use super::{DeltaFileName, Layer, LayerFileName, LayerIter, LayerKeyIter, PathOrConf};
|
use super::{
|
||||||
|
DeltaFileName, Layer, LayerAccessStats, LayerAccessStatsReset, LayerFileName, LayerIter,
|
||||||
|
LayerKeyIter, LayerResidenceStatus, PathOrConf,
|
||||||
|
};
|
||||||
|
|
||||||
///
|
///
|
||||||
/// Header stored in the beginning of the file
|
/// Header stored in the beginning of the file
|
||||||
@@ -166,14 +170,13 @@ impl DeltaKey {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// DeltaLayer is the in-memory data structure associated with an on-disk delta
|
||||||
|
/// file.
|
||||||
///
|
///
|
||||||
/// DeltaLayer is the in-memory data structure associated with an
|
/// We keep a DeltaLayer in memory for each file, in the LayerMap. If a layer
|
||||||
/// on-disk delta file. We keep a DeltaLayer in memory for each
|
/// is in "loaded" state, we have a copy of the index in memory, in 'inner'.
|
||||||
/// file, in the LayerMap. If a layer is in "loaded" state, we have a
|
/// Otherwise the struct is just a placeholder for a file that exists on disk,
|
||||||
/// copy of the index in memory, in 'inner'. Otherwise the struct is
|
/// and it needs to be loaded before using it in queries.
|
||||||
/// just a placeholder for a file that exists on disk, and it needs to
|
|
||||||
/// be loaded before using it in queries.
|
|
||||||
///
|
|
||||||
pub struct DeltaLayer {
|
pub struct DeltaLayer {
|
||||||
path_or_conf: PathOrConf,
|
path_or_conf: PathOrConf,
|
||||||
|
|
||||||
@@ -184,9 +187,22 @@ pub struct DeltaLayer {
|
|||||||
|
|
||||||
pub file_size: u64,
|
pub file_size: u64,
|
||||||
|
|
||||||
|
access_stats: LayerAccessStats,
|
||||||
|
|
||||||
inner: RwLock<DeltaLayerInner>,
|
inner: RwLock<DeltaLayerInner>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl std::fmt::Debug for DeltaLayer {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
f.debug_struct("DeltaLayer")
|
||||||
|
.field("key_range", &self.key_range)
|
||||||
|
.field("lsn_range", &self.lsn_range)
|
||||||
|
.field("file_size", &self.file_size)
|
||||||
|
.field("inner", &self.inner)
|
||||||
|
.finish()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pub struct DeltaLayerInner {
|
pub struct DeltaLayerInner {
|
||||||
/// If false, the fields below have not been loaded into memory yet.
|
/// If false, the fields below have not been loaded into memory yet.
|
||||||
loaded: bool,
|
loaded: bool,
|
||||||
@@ -199,6 +215,16 @@ pub struct DeltaLayerInner {
|
|||||||
file: Option<FileBlockReader<VirtualFile>>,
|
file: Option<FileBlockReader<VirtualFile>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl std::fmt::Debug for DeltaLayerInner {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
f.debug_struct("DeltaLayerInner")
|
||||||
|
.field("loaded", &self.loaded)
|
||||||
|
.field("index_start_blk", &self.index_start_blk)
|
||||||
|
.field("index_root_blk", &self.index_root_blk)
|
||||||
|
.finish()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl Layer for DeltaLayer {
|
impl Layer for DeltaLayer {
|
||||||
fn get_key_range(&self) -> Range<Key> {
|
fn get_key_range(&self) -> Range<Key> {
|
||||||
self.key_range.clone()
|
self.key_range.clone()
|
||||||
@@ -230,7 +256,7 @@ impl Layer for DeltaLayer {
|
|||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
|
|
||||||
let inner = self.load(ctx)?;
|
let inner = self.load(LayerAccessKind::Dump, ctx)?;
|
||||||
|
|
||||||
println!(
|
println!(
|
||||||
"index_start_blk: {}, root {}",
|
"index_start_blk: {}, root {}",
|
||||||
@@ -303,7 +329,7 @@ impl Layer for DeltaLayer {
|
|||||||
|
|
||||||
{
|
{
|
||||||
// Open the file and lock the metadata in memory
|
// Open the file and lock the metadata in memory
|
||||||
let inner = self.load(ctx)?;
|
let inner = self.load(LayerAccessKind::GetValueReconstructData, ctx)?;
|
||||||
|
|
||||||
// Scan the page versions backwards, starting from `lsn`.
|
// Scan the page versions backwards, starting from `lsn`.
|
||||||
let file = inner.file.as_ref().unwrap();
|
let file = inner.file.as_ref().unwrap();
|
||||||
@@ -394,7 +420,9 @@ impl PersistentLayer for DeltaLayer {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn iter(&self, ctx: &RequestContext) -> Result<LayerIter<'_>> {
|
fn iter(&self, ctx: &RequestContext) -> Result<LayerIter<'_>> {
|
||||||
let inner = self.load(ctx).context("load delta layer")?;
|
let inner = self
|
||||||
|
.load(LayerAccessKind::KeyIter, ctx)
|
||||||
|
.context("load delta layer")?;
|
||||||
Ok(match DeltaValueIter::new(inner) {
|
Ok(match DeltaValueIter::new(inner) {
|
||||||
Ok(iter) => Box::new(iter),
|
Ok(iter) => Box::new(iter),
|
||||||
Err(err) => Box::new(std::iter::once(Err(err))),
|
Err(err) => Box::new(std::iter::once(Err(err))),
|
||||||
@@ -402,7 +430,7 @@ impl PersistentLayer for DeltaLayer {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn key_iter(&self, ctx: &RequestContext) -> Result<LayerKeyIter<'_>> {
|
fn key_iter(&self, ctx: &RequestContext) -> Result<LayerKeyIter<'_>> {
|
||||||
let inner = self.load(ctx)?;
|
let inner = self.load(LayerAccessKind::KeyIter, ctx)?;
|
||||||
Ok(Box::new(
|
Ok(Box::new(
|
||||||
DeltaKeyIter::new(inner).context("Layer index is corrupted")?,
|
DeltaKeyIter::new(inner).context("Layer index is corrupted")?,
|
||||||
))
|
))
|
||||||
@@ -417,6 +445,26 @@ impl PersistentLayer for DeltaLayer {
|
|||||||
fn file_size(&self) -> Option<u64> {
|
fn file_size(&self) -> Option<u64> {
|
||||||
Some(self.file_size)
|
Some(self.file_size)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn info(&self, reset: LayerAccessStatsReset) -> HistoricLayerInfo {
|
||||||
|
let layer_file_name = self.filename().file_name();
|
||||||
|
let lsn_range = self.get_lsn_range();
|
||||||
|
|
||||||
|
let access_stats = self.access_stats.to_api_model(reset);
|
||||||
|
|
||||||
|
HistoricLayerInfo::Delta {
|
||||||
|
layer_file_name,
|
||||||
|
layer_file_size: Some(self.file_size),
|
||||||
|
lsn_start: lsn_range.start,
|
||||||
|
lsn_end: lsn_range.end,
|
||||||
|
remote: false,
|
||||||
|
access_stats,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn access_stats(&self) -> &LayerAccessStats {
|
||||||
|
&self.access_stats
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl DeltaLayer {
|
impl DeltaLayer {
|
||||||
@@ -461,7 +509,13 @@ impl DeltaLayer {
|
|||||||
/// Open the underlying file and read the metadata into memory, if it's
|
/// Open the underlying file and read the metadata into memory, if it's
|
||||||
/// not loaded already.
|
/// not loaded already.
|
||||||
///
|
///
|
||||||
fn load(&self, _ctx: &RequestContext) -> Result<RwLockReadGuard<DeltaLayerInner>> {
|
fn load(
|
||||||
|
&self,
|
||||||
|
access_kind: LayerAccessKind,
|
||||||
|
ctx: &RequestContext,
|
||||||
|
) -> Result<RwLockReadGuard<DeltaLayerInner>> {
|
||||||
|
self.access_stats
|
||||||
|
.record_access(access_kind, ctx.task_kind());
|
||||||
loop {
|
loop {
|
||||||
// Quick exit if already loaded
|
// Quick exit if already loaded
|
||||||
let inner = self.inner.read().unwrap();
|
let inner = self.inner.read().unwrap();
|
||||||
@@ -542,6 +596,7 @@ impl DeltaLayer {
|
|||||||
tenant_id: TenantId,
|
tenant_id: TenantId,
|
||||||
filename: &DeltaFileName,
|
filename: &DeltaFileName,
|
||||||
file_size: u64,
|
file_size: u64,
|
||||||
|
access_stats: LayerAccessStats,
|
||||||
) -> DeltaLayer {
|
) -> DeltaLayer {
|
||||||
DeltaLayer {
|
DeltaLayer {
|
||||||
path_or_conf: PathOrConf::Conf(conf),
|
path_or_conf: PathOrConf::Conf(conf),
|
||||||
@@ -550,6 +605,7 @@ impl DeltaLayer {
|
|||||||
key_range: filename.key_range.clone(),
|
key_range: filename.key_range.clone(),
|
||||||
lsn_range: filename.lsn_range.clone(),
|
lsn_range: filename.lsn_range.clone(),
|
||||||
file_size,
|
file_size,
|
||||||
|
access_stats,
|
||||||
inner: RwLock::new(DeltaLayerInner {
|
inner: RwLock::new(DeltaLayerInner {
|
||||||
loaded: false,
|
loaded: false,
|
||||||
file: None,
|
file: None,
|
||||||
@@ -579,6 +635,7 @@ impl DeltaLayer {
|
|||||||
key_range: summary.key_range,
|
key_range: summary.key_range,
|
||||||
lsn_range: summary.lsn_range,
|
lsn_range: summary.lsn_range,
|
||||||
file_size: metadata.len(),
|
file_size: metadata.len(),
|
||||||
|
access_stats: LayerAccessStats::for_loading_layer(LayerResidenceStatus::Resident),
|
||||||
inner: RwLock::new(DeltaLayerInner {
|
inner: RwLock::new(DeltaLayerInner {
|
||||||
loaded: false,
|
loaded: false,
|
||||||
file: None,
|
file: None,
|
||||||
@@ -749,6 +806,7 @@ impl DeltaLayerWriterInner {
|
|||||||
key_range: self.key_start..key_end,
|
key_range: self.key_start..key_end,
|
||||||
lsn_range: self.lsn_range.clone(),
|
lsn_range: self.lsn_range.clone(),
|
||||||
file_size: metadata.len(),
|
file_size: metadata.len(),
|
||||||
|
access_stats: LayerAccessStats::for_new_layer_file(),
|
||||||
inner: RwLock::new(DeltaLayerInner {
|
inner: RwLock::new(DeltaLayerInner {
|
||||||
loaded: false,
|
loaded: false,
|
||||||
file: None,
|
file: None,
|
||||||
|
|||||||
@@ -1,12 +1,10 @@
|
|||||||
//!
|
//!
|
||||||
//! Helper functions for dealing with filenames of the image and delta layer files.
|
//! Helper functions for dealing with filenames of the image and delta layer files.
|
||||||
//!
|
//!
|
||||||
use crate::config::PageServerConf;
|
|
||||||
use crate::repository::Key;
|
use crate::repository::Key;
|
||||||
use std::cmp::Ordering;
|
use std::cmp::Ordering;
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
use std::ops::Range;
|
use std::ops::Range;
|
||||||
use std::path::PathBuf;
|
|
||||||
use std::str::FromStr;
|
use std::str::FromStr;
|
||||||
|
|
||||||
use utils::lsn::Lsn;
|
use utils::lsn::Lsn;
|
||||||
@@ -130,6 +128,13 @@ impl Ord for ImageFileName {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl ImageFileName {
|
||||||
|
pub fn lsn_as_range(&self) -> Range<Lsn> {
|
||||||
|
// Saves from having to copypaste this all over
|
||||||
|
self.lsn..(self.lsn + 1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
///
|
///
|
||||||
/// Represents the filename of an ImageLayer
|
/// Represents the filename of an ImageLayer
|
||||||
///
|
///
|
||||||
@@ -177,49 +182,32 @@ impl fmt::Display for ImageFileName {
|
|||||||
pub enum LayerFileName {
|
pub enum LayerFileName {
|
||||||
Image(ImageFileName),
|
Image(ImageFileName),
|
||||||
Delta(DeltaFileName),
|
Delta(DeltaFileName),
|
||||||
#[cfg(test)]
|
|
||||||
Test(String),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl LayerFileName {
|
impl LayerFileName {
|
||||||
pub fn file_name(&self) -> String {
|
pub fn file_name(&self) -> String {
|
||||||
match self {
|
match self {
|
||||||
LayerFileName::Image(fname) => format!("{fname}"),
|
Self::Image(fname) => fname.to_string(),
|
||||||
LayerFileName::Delta(fname) => format!("{fname}"),
|
Self::Delta(fname) => fname.to_string(),
|
||||||
#[cfg(test)]
|
|
||||||
LayerFileName::Test(fname) => fname.to_string(),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#[cfg(test)]
|
|
||||||
pub(crate) fn new_test(name: &str) -> LayerFileName {
|
|
||||||
LayerFileName::Test(name.to_owned())
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<ImageFileName> for LayerFileName {
|
impl From<ImageFileName> for LayerFileName {
|
||||||
fn from(fname: ImageFileName) -> Self {
|
fn from(fname: ImageFileName) -> Self {
|
||||||
LayerFileName::Image(fname)
|
Self::Image(fname)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
impl From<DeltaFileName> for LayerFileName {
|
impl From<DeltaFileName> for LayerFileName {
|
||||||
fn from(fname: DeltaFileName) -> Self {
|
fn from(fname: DeltaFileName) -> Self {
|
||||||
LayerFileName::Delta(fname)
|
Self::Delta(fname)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// include a `/` in the name as an additional layer of robustness
|
|
||||||
// because `/` chars are not allowed in UNIX paths
|
|
||||||
#[cfg(test)]
|
|
||||||
const LAYER_FILE_NAME_TEST_PREFIX: &str = "LAYER_FILE_NAME::test/";
|
|
||||||
|
|
||||||
impl FromStr for LayerFileName {
|
impl FromStr for LayerFileName {
|
||||||
type Err = String;
|
type Err = String;
|
||||||
|
|
||||||
fn from_str(value: &str) -> Result<Self, Self::Err> {
|
fn from_str(value: &str) -> Result<Self, Self::Err> {
|
||||||
#[cfg(test)]
|
|
||||||
if let Some(value) = value.strip_prefix(LAYER_FILE_NAME_TEST_PREFIX) {
|
|
||||||
return Ok(LayerFileName::Test(value.to_owned()));
|
|
||||||
}
|
|
||||||
let delta = DeltaFileName::parse_str(value);
|
let delta = DeltaFileName::parse_str(value);
|
||||||
let image = ImageFileName::parse_str(value);
|
let image = ImageFileName::parse_str(value);
|
||||||
let ok = match (delta, image) {
|
let ok = match (delta, image) {
|
||||||
@@ -228,8 +216,8 @@ impl FromStr for LayerFileName {
|
|||||||
"neither delta nor image layer file name: {value:?}"
|
"neither delta nor image layer file name: {value:?}"
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
(Some(delta), None) => LayerFileName::Delta(delta),
|
(Some(delta), None) => Self::Delta(delta),
|
||||||
(None, Some(image)) => LayerFileName::Image(image),
|
(None, Some(image)) => Self::Image(image),
|
||||||
(Some(_), Some(_)) => unreachable!(),
|
(Some(_), Some(_)) => unreachable!(),
|
||||||
};
|
};
|
||||||
Ok(ok)
|
Ok(ok)
|
||||||
@@ -242,12 +230,8 @@ impl serde::Serialize for LayerFileName {
|
|||||||
S: serde::Serializer,
|
S: serde::Serializer,
|
||||||
{
|
{
|
||||||
match self {
|
match self {
|
||||||
LayerFileName::Image(fname) => serializer.serialize_str(&format!("{}", fname)),
|
Self::Image(fname) => serializer.serialize_str(&fname.to_string()),
|
||||||
LayerFileName::Delta(fname) => serializer.serialize_str(&format!("{}", fname)),
|
Self::Delta(fname) => serializer.serialize_str(&fname.to_string()),
|
||||||
#[cfg(test)]
|
|
||||||
LayerFileName::Test(t) => {
|
|
||||||
serializer.serialize_str(&format!("{LAYER_FILE_NAME_TEST_PREFIX}{t}"))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -270,16 +254,3 @@ impl<'de> serde::de::Visitor<'de> for LayerFileNameVisitor {
|
|||||||
v.parse().map_err(|e| E::custom(e))
|
v.parse().map_err(|e| E::custom(e))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Helper enum to hold a PageServerConf, or a path
|
|
||||||
///
|
|
||||||
/// This is used by DeltaLayer and ImageLayer. Normally, this holds a reference to the
|
|
||||||
/// global config, and paths to layer files are constructed using the tenant/timeline
|
|
||||||
/// path from the config. But in the 'pageserver_binutils' binary, we need to construct a Layer
|
|
||||||
/// struct for a file on disk, without having a page server running, so that we have no
|
|
||||||
/// config. In that case, we use the Path variant to hold the full path to the file on
|
|
||||||
/// disk.
|
|
||||||
pub enum PathOrConf {
|
|
||||||
Path(PathBuf),
|
|
||||||
Conf(&'static PageServerConf),
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -27,13 +27,14 @@ use crate::tenant::blob_io::{BlobCursor, BlobWriter, WriteBlobWriter};
|
|||||||
use crate::tenant::block_io::{BlockBuf, BlockReader, FileBlockReader};
|
use crate::tenant::block_io::{BlockBuf, BlockReader, FileBlockReader};
|
||||||
use crate::tenant::disk_btree::{DiskBtreeBuilder, DiskBtreeReader, VisitDirection};
|
use crate::tenant::disk_btree::{DiskBtreeBuilder, DiskBtreeReader, VisitDirection};
|
||||||
use crate::tenant::storage_layer::{
|
use crate::tenant::storage_layer::{
|
||||||
PersistentLayer, ValueReconstructResult, ValueReconstructState,
|
LayerAccessStats, PersistentLayer, ValueReconstructResult, ValueReconstructState,
|
||||||
};
|
};
|
||||||
use crate::virtual_file::VirtualFile;
|
use crate::virtual_file::VirtualFile;
|
||||||
use crate::{IMAGE_FILE_MAGIC, STORAGE_FORMAT_VERSION, TEMP_FILE_SUFFIX};
|
use crate::{IMAGE_FILE_MAGIC, STORAGE_FORMAT_VERSION, TEMP_FILE_SUFFIX};
|
||||||
use anyhow::{bail, ensure, Context, Result};
|
use anyhow::{bail, ensure, Context, Result};
|
||||||
use bytes::Bytes;
|
use bytes::Bytes;
|
||||||
use hex;
|
use hex;
|
||||||
|
use pageserver_api::models::{HistoricLayerInfo, LayerAccessKind};
|
||||||
use rand::{distributions::Alphanumeric, Rng};
|
use rand::{distributions::Alphanumeric, Rng};
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use std::fs::{self, File};
|
use std::fs::{self, File};
|
||||||
@@ -51,8 +52,8 @@ use utils::{
|
|||||||
lsn::Lsn,
|
lsn::Lsn,
|
||||||
};
|
};
|
||||||
|
|
||||||
use super::filename::{ImageFileName, LayerFileName, PathOrConf};
|
use super::filename::{ImageFileName, LayerFileName};
|
||||||
use super::{Layer, LayerIter};
|
use super::{Layer, LayerAccessStatsReset, LayerIter, LayerResidenceStatus, PathOrConf};
|
||||||
|
|
||||||
///
|
///
|
||||||
/// Header stored in the beginning of the file
|
/// Header stored in the beginning of the file
|
||||||
@@ -94,13 +95,13 @@ impl From<&ImageLayer> for Summary {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
///
|
|
||||||
/// ImageLayer is the in-memory data structure associated with an on-disk image
|
/// ImageLayer is the in-memory data structure associated with an on-disk image
|
||||||
/// file. We keep an ImageLayer in memory for each file, in the LayerMap. If a
|
/// file.
|
||||||
/// layer is in "loaded" state, we have a copy of the index in memory, in 'inner'.
|
///
|
||||||
|
/// We keep an ImageLayer in memory for each file, in the LayerMap. If a layer
|
||||||
|
/// is in "loaded" state, we have a copy of the index in memory, in 'inner'.
|
||||||
/// Otherwise the struct is just a placeholder for a file that exists on disk,
|
/// Otherwise the struct is just a placeholder for a file that exists on disk,
|
||||||
/// and it needs to be loaded before using it in queries.
|
/// and it needs to be loaded before using it in queries.
|
||||||
///
|
|
||||||
pub struct ImageLayer {
|
pub struct ImageLayer {
|
||||||
path_or_conf: PathOrConf,
|
path_or_conf: PathOrConf,
|
||||||
pub tenant_id: TenantId,
|
pub tenant_id: TenantId,
|
||||||
@@ -111,9 +112,22 @@ pub struct ImageLayer {
|
|||||||
// This entry contains an image of all pages as of this LSN
|
// This entry contains an image of all pages as of this LSN
|
||||||
pub lsn: Lsn,
|
pub lsn: Lsn,
|
||||||
|
|
||||||
|
access_stats: LayerAccessStats,
|
||||||
|
|
||||||
inner: RwLock<ImageLayerInner>,
|
inner: RwLock<ImageLayerInner>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl std::fmt::Debug for ImageLayer {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
f.debug_struct("ImageLayer")
|
||||||
|
.field("key_range", &self.key_range)
|
||||||
|
.field("file_size", &self.file_size)
|
||||||
|
.field("lsn", &self.lsn)
|
||||||
|
.field("inner", &self.inner)
|
||||||
|
.finish()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pub struct ImageLayerInner {
|
pub struct ImageLayerInner {
|
||||||
/// If false, the 'index' has not been loaded into memory yet.
|
/// If false, the 'index' has not been loaded into memory yet.
|
||||||
loaded: bool,
|
loaded: bool,
|
||||||
@@ -126,6 +140,16 @@ pub struct ImageLayerInner {
|
|||||||
file: Option<FileBlockReader<VirtualFile>>,
|
file: Option<FileBlockReader<VirtualFile>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl std::fmt::Debug for ImageLayerInner {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
f.debug_struct("ImageLayerInner")
|
||||||
|
.field("loaded", &self.loaded)
|
||||||
|
.field("index_start_blk", &self.index_start_blk)
|
||||||
|
.field("index_root_blk", &self.index_root_blk)
|
||||||
|
.finish()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl Layer for ImageLayer {
|
impl Layer for ImageLayer {
|
||||||
fn get_key_range(&self) -> Range<Key> {
|
fn get_key_range(&self) -> Range<Key> {
|
||||||
self.key_range.clone()
|
self.key_range.clone()
|
||||||
@@ -154,7 +178,7 @@ impl Layer for ImageLayer {
|
|||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
|
|
||||||
let inner = self.load(ctx)?;
|
let inner = self.load(LayerAccessKind::Dump, ctx)?;
|
||||||
let file = inner.file.as_ref().unwrap();
|
let file = inner.file.as_ref().unwrap();
|
||||||
let tree_reader =
|
let tree_reader =
|
||||||
DiskBtreeReader::<_, KEY_SIZE>::new(inner.index_start_blk, inner.index_root_blk, file);
|
DiskBtreeReader::<_, KEY_SIZE>::new(inner.index_start_blk, inner.index_root_blk, file);
|
||||||
@@ -181,7 +205,7 @@ impl Layer for ImageLayer {
|
|||||||
assert!(lsn_range.start >= self.lsn);
|
assert!(lsn_range.start >= self.lsn);
|
||||||
assert!(lsn_range.end >= self.lsn);
|
assert!(lsn_range.end >= self.lsn);
|
||||||
|
|
||||||
let inner = self.load(ctx)?;
|
let inner = self.load(LayerAccessKind::GetValueReconstructData, ctx)?;
|
||||||
|
|
||||||
let file = inner.file.as_ref().unwrap();
|
let file = inner.file.as_ref().unwrap();
|
||||||
let tree_reader = DiskBtreeReader::new(inner.index_start_blk, inner.index_root_blk, file);
|
let tree_reader = DiskBtreeReader::new(inner.index_start_blk, inner.index_root_blk, file);
|
||||||
@@ -235,6 +259,23 @@ impl PersistentLayer for ImageLayer {
|
|||||||
fn file_size(&self) -> Option<u64> {
|
fn file_size(&self) -> Option<u64> {
|
||||||
Some(self.file_size)
|
Some(self.file_size)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn info(&self, reset: LayerAccessStatsReset) -> HistoricLayerInfo {
|
||||||
|
let layer_file_name = self.filename().file_name();
|
||||||
|
let lsn_range = self.get_lsn_range();
|
||||||
|
|
||||||
|
HistoricLayerInfo::Image {
|
||||||
|
layer_file_name,
|
||||||
|
layer_file_size: Some(self.file_size),
|
||||||
|
lsn_start: lsn_range.start,
|
||||||
|
remote: false,
|
||||||
|
access_stats: self.access_stats.to_api_model(reset),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn access_stats(&self) -> &LayerAccessStats {
|
||||||
|
&self.access_stats
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ImageLayer {
|
impl ImageLayer {
|
||||||
@@ -272,7 +313,13 @@ impl ImageLayer {
|
|||||||
/// Open the underlying file and read the metadata into memory, if it's
|
/// Open the underlying file and read the metadata into memory, if it's
|
||||||
/// not loaded already.
|
/// not loaded already.
|
||||||
///
|
///
|
||||||
fn load(&self, _ctx: &RequestContext) -> Result<RwLockReadGuard<ImageLayerInner>> {
|
fn load(
|
||||||
|
&self,
|
||||||
|
access_kind: LayerAccessKind,
|
||||||
|
ctx: &RequestContext,
|
||||||
|
) -> Result<RwLockReadGuard<ImageLayerInner>> {
|
||||||
|
self.access_stats
|
||||||
|
.record_access(access_kind, ctx.task_kind());
|
||||||
loop {
|
loop {
|
||||||
// Quick exit if already loaded
|
// Quick exit if already loaded
|
||||||
let inner = self.inner.read().unwrap();
|
let inner = self.inner.read().unwrap();
|
||||||
@@ -352,6 +399,7 @@ impl ImageLayer {
|
|||||||
tenant_id: TenantId,
|
tenant_id: TenantId,
|
||||||
filename: &ImageFileName,
|
filename: &ImageFileName,
|
||||||
file_size: u64,
|
file_size: u64,
|
||||||
|
access_stats: LayerAccessStats,
|
||||||
) -> ImageLayer {
|
) -> ImageLayer {
|
||||||
ImageLayer {
|
ImageLayer {
|
||||||
path_or_conf: PathOrConf::Conf(conf),
|
path_or_conf: PathOrConf::Conf(conf),
|
||||||
@@ -360,6 +408,7 @@ impl ImageLayer {
|
|||||||
key_range: filename.key_range.clone(),
|
key_range: filename.key_range.clone(),
|
||||||
lsn: filename.lsn,
|
lsn: filename.lsn,
|
||||||
file_size,
|
file_size,
|
||||||
|
access_stats,
|
||||||
inner: RwLock::new(ImageLayerInner {
|
inner: RwLock::new(ImageLayerInner {
|
||||||
loaded: false,
|
loaded: false,
|
||||||
file: None,
|
file: None,
|
||||||
@@ -387,6 +436,7 @@ impl ImageLayer {
|
|||||||
key_range: summary.key_range,
|
key_range: summary.key_range,
|
||||||
lsn: summary.lsn,
|
lsn: summary.lsn,
|
||||||
file_size: metadata.len(),
|
file_size: metadata.len(),
|
||||||
|
access_stats: LayerAccessStats::for_loading_layer(LayerResidenceStatus::Resident),
|
||||||
inner: RwLock::new(ImageLayerInner {
|
inner: RwLock::new(ImageLayerInner {
|
||||||
file: None,
|
file: None,
|
||||||
loaded: false,
|
loaded: false,
|
||||||
@@ -546,6 +596,7 @@ impl ImageLayerWriterInner {
|
|||||||
key_range: self.key_range.clone(),
|
key_range: self.key_range.clone(),
|
||||||
lsn: self.lsn,
|
lsn: self.lsn,
|
||||||
file_size: metadata.len(),
|
file_size: metadata.len(),
|
||||||
|
access_stats: LayerAccessStats::for_new_layer_file(),
|
||||||
inner: RwLock::new(ImageLayerInner {
|
inner: RwLock::new(ImageLayerInner {
|
||||||
loaded: false,
|
loaded: false,
|
||||||
file: None,
|
file: None,
|
||||||
|
|||||||
@@ -13,6 +13,7 @@ use crate::tenant::ephemeral_file::EphemeralFile;
|
|||||||
use crate::tenant::storage_layer::{ValueReconstructResult, ValueReconstructState};
|
use crate::tenant::storage_layer::{ValueReconstructResult, ValueReconstructState};
|
||||||
use crate::walrecord;
|
use crate::walrecord;
|
||||||
use anyhow::{ensure, Result};
|
use anyhow::{ensure, Result};
|
||||||
|
use pageserver_api::models::InMemoryLayerInfo;
|
||||||
use std::cell::RefCell;
|
use std::cell::RefCell;
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use tracing::*;
|
use tracing::*;
|
||||||
@@ -52,6 +53,15 @@ pub struct InMemoryLayer {
|
|||||||
inner: RwLock<InMemoryLayerInner>,
|
inner: RwLock<InMemoryLayerInner>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl std::fmt::Debug for InMemoryLayer {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
f.debug_struct("InMemoryLayer")
|
||||||
|
.field("start_lsn", &self.start_lsn)
|
||||||
|
.field("inner", &self.inner)
|
||||||
|
.finish()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pub struct InMemoryLayerInner {
|
pub struct InMemoryLayerInner {
|
||||||
/// Frozen layers have an exclusive end LSN.
|
/// Frozen layers have an exclusive end LSN.
|
||||||
/// Writes are only allowed when this is None
|
/// Writes are only allowed when this is None
|
||||||
@@ -70,6 +80,14 @@ pub struct InMemoryLayerInner {
|
|||||||
file: EphemeralFile,
|
file: EphemeralFile,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl std::fmt::Debug for InMemoryLayerInner {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
f.debug_struct("InMemoryLayerInner")
|
||||||
|
.field("end_lsn", &self.end_lsn)
|
||||||
|
.finish()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl InMemoryLayerInner {
|
impl InMemoryLayerInner {
|
||||||
fn assert_writeable(&self) {
|
fn assert_writeable(&self) {
|
||||||
assert!(self.end_lsn.is_none());
|
assert!(self.end_lsn.is_none());
|
||||||
@@ -80,6 +98,16 @@ impl InMemoryLayer {
|
|||||||
pub fn get_timeline_id(&self) -> TimelineId {
|
pub fn get_timeline_id(&self) -> TimelineId {
|
||||||
self.timeline_id
|
self.timeline_id
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn info(&self) -> InMemoryLayerInfo {
|
||||||
|
let lsn_start = self.start_lsn;
|
||||||
|
let lsn_end = self.inner.read().unwrap().end_lsn;
|
||||||
|
|
||||||
|
match lsn_end {
|
||||||
|
Some(lsn_end) => InMemoryLayerInfo::Frozen { lsn_start, lsn_end },
|
||||||
|
None => InMemoryLayerInfo::Open { lsn_start },
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Layer for InMemoryLayer {
|
impl Layer for InMemoryLayer {
|
||||||
|
|||||||
@@ -7,6 +7,7 @@ use crate::repository::Key;
|
|||||||
use crate::tenant::remote_timeline_client::index::LayerFileMetadata;
|
use crate::tenant::remote_timeline_client::index::LayerFileMetadata;
|
||||||
use crate::tenant::storage_layer::{Layer, ValueReconstructResult, ValueReconstructState};
|
use crate::tenant::storage_layer::{Layer, ValueReconstructResult, ValueReconstructState};
|
||||||
use anyhow::{bail, Result};
|
use anyhow::{bail, Result};
|
||||||
|
use pageserver_api::models::HistoricLayerInfo;
|
||||||
use std::ops::Range;
|
use std::ops::Range;
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
@@ -18,9 +19,19 @@ use utils::{
|
|||||||
|
|
||||||
use super::filename::{DeltaFileName, ImageFileName, LayerFileName};
|
use super::filename::{DeltaFileName, ImageFileName, LayerFileName};
|
||||||
use super::image_layer::ImageLayer;
|
use super::image_layer::ImageLayer;
|
||||||
use super::{DeltaLayer, LayerIter, LayerKeyIter, PersistentLayer};
|
use super::{
|
||||||
|
DeltaLayer, LayerAccessStats, LayerAccessStatsReset, LayerIter, LayerKeyIter,
|
||||||
|
LayerResidenceStatus, PersistentLayer,
|
||||||
|
};
|
||||||
|
|
||||||
#[derive(Debug)]
|
/// RemoteLayer is a not yet downloaded [`ImageLayer`] or
|
||||||
|
/// [`crate::storage_layer::DeltaLayer`].
|
||||||
|
///
|
||||||
|
/// RemoteLayer might be downloaded on-demand during operations which are
|
||||||
|
/// allowed download remote layers and during which, it gets replaced with a
|
||||||
|
/// concrete `DeltaLayer` or `ImageLayer`.
|
||||||
|
///
|
||||||
|
/// See: [`crate::context::RequestContext`] for authorization to download
|
||||||
pub struct RemoteLayer {
|
pub struct RemoteLayer {
|
||||||
tenantid: TenantId,
|
tenantid: TenantId,
|
||||||
timelineid: TimelineId,
|
timelineid: TimelineId,
|
||||||
@@ -35,9 +46,21 @@ pub struct RemoteLayer {
|
|||||||
|
|
||||||
is_incremental: bool,
|
is_incremental: bool,
|
||||||
|
|
||||||
|
access_stats: LayerAccessStats,
|
||||||
|
|
||||||
pub(crate) ongoing_download: Arc<tokio::sync::Semaphore>,
|
pub(crate) ongoing_download: Arc<tokio::sync::Semaphore>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl std::fmt::Debug for RemoteLayer {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
f.debug_struct("RemoteLayer")
|
||||||
|
.field("file_name", &self.file_name)
|
||||||
|
.field("layer_metadata", &self.layer_metadata)
|
||||||
|
.field("is_incremental", &self.is_incremental)
|
||||||
|
.finish()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl Layer for RemoteLayer {
|
impl Layer for RemoteLayer {
|
||||||
fn get_key_range(&self) -> Range<Key> {
|
fn get_key_range(&self) -> Range<Key> {
|
||||||
self.key_range.clone()
|
self.key_range.clone()
|
||||||
@@ -136,6 +159,34 @@ impl PersistentLayer for RemoteLayer {
|
|||||||
fn file_size(&self) -> Option<u64> {
|
fn file_size(&self) -> Option<u64> {
|
||||||
self.layer_metadata.file_size()
|
self.layer_metadata.file_size()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn info(&self, reset: LayerAccessStatsReset) -> HistoricLayerInfo {
|
||||||
|
let layer_file_name = self.filename().file_name();
|
||||||
|
let lsn_range = self.get_lsn_range();
|
||||||
|
|
||||||
|
if self.is_delta {
|
||||||
|
HistoricLayerInfo::Delta {
|
||||||
|
layer_file_name,
|
||||||
|
layer_file_size: self.layer_metadata.file_size(),
|
||||||
|
lsn_start: lsn_range.start,
|
||||||
|
lsn_end: lsn_range.end,
|
||||||
|
remote: true,
|
||||||
|
access_stats: self.access_stats.to_api_model(reset),
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
HistoricLayerInfo::Image {
|
||||||
|
layer_file_name,
|
||||||
|
layer_file_size: self.layer_metadata.file_size(),
|
||||||
|
lsn_start: lsn_range.start,
|
||||||
|
remote: true,
|
||||||
|
access_stats: self.access_stats.to_api_model(reset),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn access_stats(&self) -> &LayerAccessStats {
|
||||||
|
&self.access_stats
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl RemoteLayer {
|
impl RemoteLayer {
|
||||||
@@ -144,17 +195,19 @@ impl RemoteLayer {
|
|||||||
timelineid: TimelineId,
|
timelineid: TimelineId,
|
||||||
fname: &ImageFileName,
|
fname: &ImageFileName,
|
||||||
layer_metadata: &LayerFileMetadata,
|
layer_metadata: &LayerFileMetadata,
|
||||||
|
access_stats: LayerAccessStats,
|
||||||
) -> RemoteLayer {
|
) -> RemoteLayer {
|
||||||
RemoteLayer {
|
RemoteLayer {
|
||||||
tenantid,
|
tenantid,
|
||||||
timelineid,
|
timelineid,
|
||||||
key_range: fname.key_range.clone(),
|
key_range: fname.key_range.clone(),
|
||||||
lsn_range: fname.lsn..(fname.lsn + 1),
|
lsn_range: fname.lsn_as_range(),
|
||||||
is_delta: false,
|
is_delta: false,
|
||||||
is_incremental: false,
|
is_incremental: false,
|
||||||
file_name: fname.to_owned().into(),
|
file_name: fname.to_owned().into(),
|
||||||
layer_metadata: layer_metadata.clone(),
|
layer_metadata: layer_metadata.clone(),
|
||||||
ongoing_download: Arc::new(tokio::sync::Semaphore::new(1)),
|
ongoing_download: Arc::new(tokio::sync::Semaphore::new(1)),
|
||||||
|
access_stats,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -163,6 +216,7 @@ impl RemoteLayer {
|
|||||||
timelineid: TimelineId,
|
timelineid: TimelineId,
|
||||||
fname: &DeltaFileName,
|
fname: &DeltaFileName,
|
||||||
layer_metadata: &LayerFileMetadata,
|
layer_metadata: &LayerFileMetadata,
|
||||||
|
access_stats: LayerAccessStats,
|
||||||
) -> RemoteLayer {
|
) -> RemoteLayer {
|
||||||
RemoteLayer {
|
RemoteLayer {
|
||||||
tenantid,
|
tenantid,
|
||||||
@@ -174,6 +228,7 @@ impl RemoteLayer {
|
|||||||
file_name: fname.to_owned().into(),
|
file_name: fname.to_owned().into(),
|
||||||
layer_metadata: layer_metadata.clone(),
|
layer_metadata: layer_metadata.clone(),
|
||||||
ongoing_download: Arc::new(tokio::sync::Semaphore::new(1)),
|
ongoing_download: Arc::new(tokio::sync::Semaphore::new(1)),
|
||||||
|
access_stats,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -194,6 +249,8 @@ impl RemoteLayer {
|
|||||||
self.tenantid,
|
self.tenantid,
|
||||||
&fname,
|
&fname,
|
||||||
file_size,
|
file_size,
|
||||||
|
self.access_stats
|
||||||
|
.clone_for_residence_change(LayerResidenceStatus::Resident),
|
||||||
))
|
))
|
||||||
} else {
|
} else {
|
||||||
let fname = ImageFileName {
|
let fname = ImageFileName {
|
||||||
@@ -206,6 +263,8 @@ impl RemoteLayer {
|
|||||||
self.tenantid,
|
self.tenantid,
|
||||||
&fname,
|
&fname,
|
||||||
file_size,
|
file_size,
|
||||||
|
self.access_stats
|
||||||
|
.clone_for_residence_change(LayerResidenceStatus::Resident),
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ use itertools::Itertools;
|
|||||||
use once_cell::sync::OnceCell;
|
use once_cell::sync::OnceCell;
|
||||||
use pageserver_api::models::{
|
use pageserver_api::models::{
|
||||||
DownloadRemoteLayersTaskInfo, DownloadRemoteLayersTaskSpawnRequest,
|
DownloadRemoteLayersTaskInfo, DownloadRemoteLayersTaskSpawnRequest,
|
||||||
DownloadRemoteLayersTaskState, TimelineState,
|
DownloadRemoteLayersTaskState, LayerMapInfo, LayerResidenceStatus, TimelineState,
|
||||||
};
|
};
|
||||||
use tokio::sync::{oneshot, watch, Semaphore, TryAcquireError};
|
use tokio::sync::{oneshot, watch, Semaphore, TryAcquireError};
|
||||||
use tokio_util::sync::CancellationToken;
|
use tokio_util::sync::CancellationToken;
|
||||||
@@ -30,8 +30,8 @@ use crate::broker_client::is_broker_client_initialized;
|
|||||||
use crate::context::{DownloadBehavior, RequestContext};
|
use crate::context::{DownloadBehavior, RequestContext};
|
||||||
use crate::tenant::remote_timeline_client::{self, index::LayerFileMetadata};
|
use crate::tenant::remote_timeline_client::{self, index::LayerFileMetadata};
|
||||||
use crate::tenant::storage_layer::{
|
use crate::tenant::storage_layer::{
|
||||||
DeltaFileName, DeltaLayerWriter, ImageFileName, ImageLayerWriter, InMemoryLayer, LayerFileName,
|
DeltaFileName, DeltaLayerWriter, ImageFileName, ImageLayerWriter, InMemoryLayer,
|
||||||
RemoteLayer,
|
LayerAccessStats, LayerFileName, RemoteLayer,
|
||||||
};
|
};
|
||||||
use crate::tenant::{
|
use crate::tenant::{
|
||||||
ephemeral_file::is_ephemeral_file,
|
ephemeral_file::is_ephemeral_file,
|
||||||
@@ -69,9 +69,10 @@ use crate::ZERO_PAGE;
|
|||||||
use crate::{is_temporary, task_mgr};
|
use crate::{is_temporary, task_mgr};
|
||||||
use walreceiver::spawn_connection_manager_task;
|
use walreceiver::spawn_connection_manager_task;
|
||||||
|
|
||||||
|
use super::layer_map::BatchedUpdates;
|
||||||
use super::remote_timeline_client::index::IndexPart;
|
use super::remote_timeline_client::index::IndexPart;
|
||||||
use super::remote_timeline_client::RemoteTimelineClient;
|
use super::remote_timeline_client::RemoteTimelineClient;
|
||||||
use super::storage_layer::{DeltaLayer, ImageLayer, Layer};
|
use super::storage_layer::{DeltaLayer, ImageLayer, Layer, LayerAccessStatsReset};
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
|
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
|
||||||
enum FlushLoopState {
|
enum FlushLoopState {
|
||||||
@@ -91,7 +92,7 @@ pub struct Timeline {
|
|||||||
|
|
||||||
pub pg_version: u32,
|
pub pg_version: u32,
|
||||||
|
|
||||||
pub layers: RwLock<LayerMap<dyn PersistentLayer>>,
|
pub(super) layers: RwLock<LayerMap<dyn PersistentLayer>>,
|
||||||
|
|
||||||
last_freeze_at: AtomicLsn,
|
last_freeze_at: AtomicLsn,
|
||||||
// Atomic would be more appropriate here.
|
// Atomic would be more appropriate here.
|
||||||
@@ -663,7 +664,7 @@ impl Timeline {
|
|||||||
// Below are functions compact_level0() and create_image_layers()
|
// Below are functions compact_level0() and create_image_layers()
|
||||||
// but they are a bit ad hoc and don't quite work like it's explained
|
// but they are a bit ad hoc and don't quite work like it's explained
|
||||||
// above. Rewrite it.
|
// above. Rewrite it.
|
||||||
let _layer_removal_cs = self.layer_removal_cs.lock().await;
|
let layer_removal_cs = self.layer_removal_cs.lock().await;
|
||||||
// Is the timeline being deleted?
|
// Is the timeline being deleted?
|
||||||
let state = *self.state.borrow();
|
let state = *self.state.borrow();
|
||||||
if state == TimelineState::Stopping {
|
if state == TimelineState::Stopping {
|
||||||
@@ -696,7 +697,8 @@ impl Timeline {
|
|||||||
|
|
||||||
// 3. Compact
|
// 3. Compact
|
||||||
let timer = self.metrics.compact_time_histo.start_timer();
|
let timer = self.metrics.compact_time_histo.start_timer();
|
||||||
self.compact_level0(target_file_size, ctx).await?;
|
self.compact_level0(&layer_removal_cs, target_file_size, ctx)
|
||||||
|
.await?;
|
||||||
timer.stop_and_record();
|
timer.stop_and_record();
|
||||||
|
|
||||||
// If `create_image_layers' or `compact_level0` scheduled any
|
// If `create_image_layers' or `compact_level0` scheduled any
|
||||||
@@ -832,6 +834,89 @@ impl Timeline {
|
|||||||
pub fn subscribe_for_state_updates(&self) -> watch::Receiver<TimelineState> {
|
pub fn subscribe_for_state_updates(&self) -> watch::Receiver<TimelineState> {
|
||||||
self.state.subscribe()
|
self.state.subscribe()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn layer_map_info(&self, reset: LayerAccessStatsReset) -> LayerMapInfo {
|
||||||
|
let layer_map = self.layers.read().unwrap();
|
||||||
|
let mut in_memory_layers = Vec::with_capacity(layer_map.frozen_layers.len() + 1);
|
||||||
|
if let Some(open_layer) = &layer_map.open_layer {
|
||||||
|
in_memory_layers.push(open_layer.info());
|
||||||
|
}
|
||||||
|
for frozen_layer in &layer_map.frozen_layers {
|
||||||
|
in_memory_layers.push(frozen_layer.info());
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut historic_layers = Vec::new();
|
||||||
|
for historic_layer in layer_map.iter_historic_layers() {
|
||||||
|
historic_layers.push(historic_layer.info(reset));
|
||||||
|
}
|
||||||
|
|
||||||
|
LayerMapInfo {
|
||||||
|
in_memory_layers,
|
||||||
|
historic_layers,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn download_layer(&self, layer_file_name: &str) -> anyhow::Result<Option<bool>> {
|
||||||
|
let Some(layer) = self.find_layer(layer_file_name) else { return Ok(None) };
|
||||||
|
let Some(remote_layer) = layer.downcast_remote_layer() else { return Ok(Some(false)) };
|
||||||
|
if self.remote_client.is_none() {
|
||||||
|
return Ok(Some(false));
|
||||||
|
}
|
||||||
|
|
||||||
|
self.download_remote_layer(remote_layer).await?;
|
||||||
|
Ok(Some(true))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn evict_layer(&self, layer_file_name: &str) -> anyhow::Result<Option<bool>> {
|
||||||
|
let Some(local_layer) = self.find_layer(layer_file_name) else { return Ok(None) };
|
||||||
|
if local_layer.is_remote_layer() {
|
||||||
|
return Ok(Some(false));
|
||||||
|
}
|
||||||
|
let Some(remote_client) = &self.remote_client else { return Ok(Some(false)) };
|
||||||
|
|
||||||
|
// ensure the current layer is uploaded for sure
|
||||||
|
remote_client
|
||||||
|
.wait_completion()
|
||||||
|
.await
|
||||||
|
.context("wait for layer upload ops to complete")?;
|
||||||
|
|
||||||
|
let layer_metadata = LayerFileMetadata::new(
|
||||||
|
local_layer
|
||||||
|
.file_size()
|
||||||
|
.expect("Local layer should have a file size"),
|
||||||
|
);
|
||||||
|
let new_remote_layer = Arc::new(match local_layer.filename() {
|
||||||
|
LayerFileName::Image(image_name) => RemoteLayer::new_img(
|
||||||
|
self.tenant_id,
|
||||||
|
self.timeline_id,
|
||||||
|
&image_name,
|
||||||
|
&layer_metadata,
|
||||||
|
local_layer
|
||||||
|
.access_stats()
|
||||||
|
.clone_for_residence_change(LayerResidenceStatus::Evicted),
|
||||||
|
),
|
||||||
|
LayerFileName::Delta(delta_name) => RemoteLayer::new_delta(
|
||||||
|
self.tenant_id,
|
||||||
|
self.timeline_id,
|
||||||
|
&delta_name,
|
||||||
|
&layer_metadata,
|
||||||
|
local_layer
|
||||||
|
.access_stats()
|
||||||
|
.clone_for_residence_change(LayerResidenceStatus::Evicted),
|
||||||
|
),
|
||||||
|
});
|
||||||
|
|
||||||
|
let gc_lock = self.layer_removal_cs.lock().await;
|
||||||
|
let mut layers = self.layers.write().unwrap();
|
||||||
|
let mut updates = layers.batch_update();
|
||||||
|
self.delete_historic_layer(&gc_lock, local_layer, &mut updates)?;
|
||||||
|
updates.insert_historic(new_remote_layer);
|
||||||
|
updates.flush();
|
||||||
|
drop(layers);
|
||||||
|
drop(gc_lock);
|
||||||
|
|
||||||
|
Ok(Some(true))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Private functions
|
// Private functions
|
||||||
@@ -1093,6 +1178,7 @@ impl Timeline {
|
|||||||
self.tenant_id,
|
self.tenant_id,
|
||||||
&imgfilename,
|
&imgfilename,
|
||||||
file_size,
|
file_size,
|
||||||
|
LayerAccessStats::for_loading_layer(LayerResidenceStatus::Resident),
|
||||||
);
|
);
|
||||||
|
|
||||||
trace!("found layer {}", layer.path().display());
|
trace!("found layer {}", layer.path().display());
|
||||||
@@ -1124,6 +1210,7 @@ impl Timeline {
|
|||||||
self.tenant_id,
|
self.tenant_id,
|
||||||
&deltafilename,
|
&deltafilename,
|
||||||
file_size,
|
file_size,
|
||||||
|
LayerAccessStats::for_loading_layer(LayerResidenceStatus::Resident),
|
||||||
);
|
);
|
||||||
|
|
||||||
trace!("found layer {}", layer.path().display());
|
trace!("found layer {}", layer.path().display());
|
||||||
@@ -1261,6 +1348,7 @@ impl Timeline {
|
|||||||
self.timeline_id,
|
self.timeline_id,
|
||||||
imgfilename,
|
imgfilename,
|
||||||
&remote_layer_metadata,
|
&remote_layer_metadata,
|
||||||
|
LayerAccessStats::for_loading_layer(LayerResidenceStatus::Evicted),
|
||||||
);
|
);
|
||||||
let remote_layer = Arc::new(remote_layer);
|
let remote_layer = Arc::new(remote_layer);
|
||||||
|
|
||||||
@@ -1285,12 +1373,11 @@ impl Timeline {
|
|||||||
self.timeline_id,
|
self.timeline_id,
|
||||||
deltafilename,
|
deltafilename,
|
||||||
&remote_layer_metadata,
|
&remote_layer_metadata,
|
||||||
|
LayerAccessStats::for_loading_layer(LayerResidenceStatus::Evicted),
|
||||||
);
|
);
|
||||||
let remote_layer = Arc::new(remote_layer);
|
let remote_layer = Arc::new(remote_layer);
|
||||||
updates.insert_historic(remote_layer);
|
updates.insert_historic(remote_layer);
|
||||||
}
|
}
|
||||||
#[cfg(test)]
|
|
||||||
LayerFileName::Test(_) => unreachable!(),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1621,6 +1708,43 @@ impl Timeline {
|
|||||||
Err(e) => error!("Failed to compute current logical size for metrics update: {e:?}"),
|
Err(e) => error!("Failed to compute current logical size for metrics update: {e:?}"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn find_layer(&self, layer_file_name: &str) -> Option<Arc<dyn PersistentLayer>> {
|
||||||
|
for historic_layer in self.layers.read().unwrap().iter_historic_layers() {
|
||||||
|
let historic_layer_name = historic_layer.filename().file_name();
|
||||||
|
if layer_file_name == historic_layer_name {
|
||||||
|
return Some(historic_layer);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
None
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Removes the layer from local FS (if present) and from memory.
|
||||||
|
/// Remote storage is not affected by this operation.
|
||||||
|
fn delete_historic_layer(
|
||||||
|
&self,
|
||||||
|
// we cannot remove layers otherwise, since gc and compaction will race
|
||||||
|
_layer_removal_cs: &tokio::sync::MutexGuard<'_, ()>,
|
||||||
|
layer: Arc<dyn PersistentLayer>,
|
||||||
|
updates: &mut BatchedUpdates<'_, dyn PersistentLayer>,
|
||||||
|
) -> anyhow::Result<()> {
|
||||||
|
let layer_size = layer.file_size();
|
||||||
|
|
||||||
|
layer.delete()?;
|
||||||
|
if let Some(layer_size) = layer_size {
|
||||||
|
self.metrics.resident_physical_size_gauge.sub(layer_size);
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO Removing from the bottom of the layer map is expensive.
|
||||||
|
// Maybe instead discard all layer map historic versions that
|
||||||
|
// won't be needed for page reconstruction for this timeline,
|
||||||
|
// and mark what we can't delete yet as deleted from the layer
|
||||||
|
// map index without actually rebuilding the index.
|
||||||
|
updates.remove_historic(layer);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
type TraversalId = String;
|
type TraversalId = String;
|
||||||
@@ -2727,6 +2851,7 @@ impl Timeline {
|
|||||||
///
|
///
|
||||||
async fn compact_level0(
|
async fn compact_level0(
|
||||||
&self,
|
&self,
|
||||||
|
layer_removal_cs: &tokio::sync::MutexGuard<'_, ()>,
|
||||||
target_file_size: u64,
|
target_file_size: u64,
|
||||||
ctx: &RequestContext,
|
ctx: &RequestContext,
|
||||||
) -> anyhow::Result<()> {
|
) -> anyhow::Result<()> {
|
||||||
@@ -2780,14 +2905,8 @@ impl Timeline {
|
|||||||
// delete the old ones
|
// delete the old ones
|
||||||
let mut layer_names_to_delete = Vec::with_capacity(deltas_to_compact.len());
|
let mut layer_names_to_delete = Vec::with_capacity(deltas_to_compact.len());
|
||||||
for l in deltas_to_compact {
|
for l in deltas_to_compact {
|
||||||
if let Some(path) = l.local_path() {
|
|
||||||
self.metrics
|
|
||||||
.resident_physical_size_gauge
|
|
||||||
.sub(path.metadata()?.len());
|
|
||||||
}
|
|
||||||
layer_names_to_delete.push(l.filename());
|
layer_names_to_delete.push(l.filename());
|
||||||
l.delete()?;
|
self.delete_historic_layer(layer_removal_cs, l, &mut updates)?;
|
||||||
updates.remove_historic(l);
|
|
||||||
}
|
}
|
||||||
updates.flush();
|
updates.flush();
|
||||||
drop(layers);
|
drop(layers);
|
||||||
@@ -2907,7 +3026,7 @@ impl Timeline {
|
|||||||
|
|
||||||
fail_point!("before-timeline-gc");
|
fail_point!("before-timeline-gc");
|
||||||
|
|
||||||
let _layer_removal_cs = self.layer_removal_cs.lock().await;
|
let layer_removal_cs = self.layer_removal_cs.lock().await;
|
||||||
// Is the timeline being deleted?
|
// Is the timeline being deleted?
|
||||||
let state = *self.state.borrow();
|
let state = *self.state.borrow();
|
||||||
if state == TimelineState::Stopping {
|
if state == TimelineState::Stopping {
|
||||||
@@ -2926,7 +3045,13 @@ impl Timeline {
|
|||||||
let new_gc_cutoff = Lsn::min(horizon_cutoff, pitr_cutoff);
|
let new_gc_cutoff = Lsn::min(horizon_cutoff, pitr_cutoff);
|
||||||
|
|
||||||
let res = self
|
let res = self
|
||||||
.gc_timeline(horizon_cutoff, pitr_cutoff, retain_lsns, new_gc_cutoff)
|
.gc_timeline(
|
||||||
|
&layer_removal_cs,
|
||||||
|
horizon_cutoff,
|
||||||
|
pitr_cutoff,
|
||||||
|
retain_lsns,
|
||||||
|
new_gc_cutoff,
|
||||||
|
)
|
||||||
.instrument(
|
.instrument(
|
||||||
info_span!("gc_timeline", timeline = %self.timeline_id, cutoff = %new_gc_cutoff),
|
info_span!("gc_timeline", timeline = %self.timeline_id, cutoff = %new_gc_cutoff),
|
||||||
)
|
)
|
||||||
@@ -2940,6 +3065,7 @@ impl Timeline {
|
|||||||
|
|
||||||
async fn gc_timeline(
|
async fn gc_timeline(
|
||||||
&self,
|
&self,
|
||||||
|
layer_removal_cs: &tokio::sync::MutexGuard<'_, ()>,
|
||||||
horizon_cutoff: Lsn,
|
horizon_cutoff: Lsn,
|
||||||
pitr_cutoff: Lsn,
|
pitr_cutoff: Lsn,
|
||||||
retain_lsns: Vec<Lsn>,
|
retain_lsns: Vec<Lsn>,
|
||||||
@@ -3095,22 +3221,12 @@ impl Timeline {
|
|||||||
// (couldn't do this in the loop above, because you cannot modify a collection
|
// (couldn't do this in the loop above, because you cannot modify a collection
|
||||||
// while iterating it. BTreeMap::retain() would be another option)
|
// while iterating it. BTreeMap::retain() would be another option)
|
||||||
let mut layer_names_to_delete = Vec::with_capacity(layers_to_remove.len());
|
let mut layer_names_to_delete = Vec::with_capacity(layers_to_remove.len());
|
||||||
for doomed_layer in layers_to_remove {
|
{
|
||||||
if let Some(path) = doomed_layer.local_path() {
|
for doomed_layer in layers_to_remove {
|
||||||
self.metrics
|
layer_names_to_delete.push(doomed_layer.filename());
|
||||||
.resident_physical_size_gauge
|
self.delete_historic_layer(layer_removal_cs, doomed_layer, &mut updates)?; // FIXME: schedule succeeded deletions before returning?
|
||||||
.sub(path.metadata()?.len());
|
result.layers_removed += 1;
|
||||||
}
|
}
|
||||||
layer_names_to_delete.push(doomed_layer.filename());
|
|
||||||
doomed_layer.delete()?; // FIXME: schedule succeeded deletions before returning?
|
|
||||||
|
|
||||||
// TODO Removing from the bottom of the layer map is expensive.
|
|
||||||
// Maybe instead discard all layer map historic versions that
|
|
||||||
// won't be needed for page reconstruction for this timeline,
|
|
||||||
// and mark what we can't delete yet as deleted from the layer
|
|
||||||
// map index without actually rebuilding the index.
|
|
||||||
updates.remove_historic(doomed_layer);
|
|
||||||
result.layers_removed += 1;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if result.layers_removed != 0 {
|
if result.layers_removed != 0 {
|
||||||
@@ -3278,13 +3394,43 @@ impl Timeline {
|
|||||||
// Delta- or ImageLayer in the layer map.
|
// Delta- or ImageLayer in the layer map.
|
||||||
let new_layer = remote_layer.create_downloaded_layer(self_clone.conf, *size);
|
let new_layer = remote_layer.create_downloaded_layer(self_clone.conf, *size);
|
||||||
let mut layers = self_clone.layers.write().unwrap();
|
let mut layers = self_clone.layers.write().unwrap();
|
||||||
let mut updates = layers.batch_update();
|
|
||||||
{
|
{
|
||||||
|
use crate::tenant::layer_map::Replacement;
|
||||||
let l: Arc<dyn PersistentLayer> = remote_layer.clone();
|
let l: Arc<dyn PersistentLayer> = remote_layer.clone();
|
||||||
updates.remove_historic(l);
|
match layers.replace_historic(&l, new_layer) {
|
||||||
|
Ok(Replacement::Replaced { .. }) => { /* expected */ }
|
||||||
|
Ok(Replacement::NotFound) => {
|
||||||
|
// TODO: the downloaded file should probably be removed, otherwise
|
||||||
|
// it will be added to the layermap on next load? we should
|
||||||
|
// probably restart any get_reconstruct_data search as well.
|
||||||
|
//
|
||||||
|
// See: https://github.com/neondatabase/neon/issues/3533
|
||||||
|
error!("replacing downloaded layer into layermap failed because layer was not found");
|
||||||
|
}
|
||||||
|
Ok(Replacement::RemovalBuffered) => {
|
||||||
|
unreachable!("current implementation does not remove anything")
|
||||||
|
}
|
||||||
|
Ok(Replacement::Unexpected(other)) => {
|
||||||
|
// if the other layer would have the same pointer value as
|
||||||
|
// expected, it means they differ only on vtables.
|
||||||
|
//
|
||||||
|
// otherwise there's no known reason for this to happen as
|
||||||
|
// compacted layers should have different covering rectangle
|
||||||
|
// leading to produce Replacement::NotFound.
|
||||||
|
|
||||||
|
error!(
|
||||||
|
expected.ptr = ?Arc::as_ptr(&l),
|
||||||
|
other.ptr = ?Arc::as_ptr(&other),
|
||||||
|
"replacing downloaded layer into layermap failed because another layer was found instead of expected"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
// this is a precondition failure, the layer filename derived
|
||||||
|
// attributes didn't match up, which doesn't seem likely.
|
||||||
|
error!("replacing downloaded layer into layermap failed: {e:#?}")
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
updates.insert_historic(new_layer);
|
|
||||||
updates.flush();
|
|
||||||
drop(layers);
|
drop(layers);
|
||||||
|
|
||||||
// Now that we've inserted the download into the layer map,
|
// Now that we've inserted the download into the layer map,
|
||||||
|
|||||||
@@ -132,7 +132,7 @@ lfc_shmem_request(void)
|
|||||||
RequestNamedLWLockTranche("lfc_lock", 1);
|
RequestNamedLWLockTranche("lfc_lock", 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool
|
static bool
|
||||||
lfc_check_limit_hook(int *newval, void **extra, GucSource source)
|
lfc_check_limit_hook(int *newval, void **extra, GucSource source)
|
||||||
{
|
{
|
||||||
if (*newval > lfc_max_size)
|
if (*newval > lfc_max_size)
|
||||||
@@ -143,7 +143,7 @@ lfc_check_limit_hook(int *newval, void **extra, GucSource source)
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
static void
|
||||||
lfc_change_limit_hook(int newval, void *extra)
|
lfc_change_limit_hook(int newval, void *extra)
|
||||||
{
|
{
|
||||||
uint32 new_size = SIZE_MB_TO_CHUNKS(newval);
|
uint32 new_size = SIZE_MB_TO_CHUNKS(newval);
|
||||||
@@ -213,7 +213,7 @@ lfc_init(void)
|
|||||||
INT_MAX,
|
INT_MAX,
|
||||||
PGC_SIGHUP,
|
PGC_SIGHUP,
|
||||||
GUC_UNIT_MB,
|
GUC_UNIT_MB,
|
||||||
NULL,
|
lfc_check_limit_hook,
|
||||||
lfc_change_limit_hook,
|
lfc_change_limit_hook,
|
||||||
NULL);
|
NULL);
|
||||||
|
|
||||||
@@ -472,7 +472,6 @@ local_cache_pages(PG_FUNCTION_ARGS)
|
|||||||
HASH_SEQ_STATUS status;
|
HASH_SEQ_STATUS status;
|
||||||
FileCacheEntry* entry;
|
FileCacheEntry* entry;
|
||||||
uint32 n_pages = 0;
|
uint32 n_pages = 0;
|
||||||
uint32 i;
|
|
||||||
|
|
||||||
funcctx = SRF_FIRSTCALL_INIT();
|
funcctx = SRF_FIRSTCALL_INIT();
|
||||||
|
|
||||||
|
|||||||
@@ -149,36 +149,32 @@ impl<'l> BackendType<'l, ClientCredentials<'_>> {
|
|||||||
};
|
};
|
||||||
|
|
||||||
// TODO: find a proper way to merge those very similar blocks.
|
// TODO: find a proper way to merge those very similar blocks.
|
||||||
let (mut node, payload) = match self {
|
let (mut node, password) = match self {
|
||||||
Console(api, creds) if creds.project.is_none() => {
|
Console(api, creds) if creds.project.is_none() => {
|
||||||
let payload = fetch_magic_payload(client).await?;
|
let payload = fetch_magic_payload(client).await?;
|
||||||
|
creds.project = Some(payload.project.into());
|
||||||
|
let node = api.wake_compute(extra, creds).await?;
|
||||||
|
|
||||||
let mut creds = creds.as_ref();
|
(node, payload.password)
|
||||||
creds.project = Some(payload.project.as_str().into());
|
|
||||||
let node = api.wake_compute(extra, &creds).await?;
|
|
||||||
|
|
||||||
(node, payload)
|
|
||||||
}
|
}
|
||||||
// This is a hack to allow cleartext password in secure connections (wss).
|
// This is a hack to allow cleartext password in secure connections (wss).
|
||||||
Console(api, creds) if creds.use_cleartext_password_flow => {
|
Console(api, creds) if creds.use_cleartext_password_flow => {
|
||||||
let payload = fetch_plaintext_password(client).await?;
|
let payload = fetch_plaintext_password(client).await?;
|
||||||
let node = api.wake_compute(extra, creds).await?;
|
let node = api.wake_compute(extra, creds).await?;
|
||||||
|
|
||||||
(node, payload)
|
(node, payload.password)
|
||||||
}
|
}
|
||||||
Postgres(api, creds) if creds.project.is_none() => {
|
Postgres(api, creds) if creds.project.is_none() => {
|
||||||
let payload = fetch_magic_payload(client).await?;
|
let payload = fetch_magic_payload(client).await?;
|
||||||
|
creds.project = Some(payload.project.into());
|
||||||
|
let node = api.wake_compute(extra, creds).await?;
|
||||||
|
|
||||||
let mut creds = creds.as_ref();
|
(node, payload.password)
|
||||||
creds.project = Some(payload.project.as_str().into());
|
|
||||||
let node = api.wake_compute(extra, &creds).await?;
|
|
||||||
|
|
||||||
(node, payload)
|
|
||||||
}
|
}
|
||||||
_ => return Ok(None),
|
_ => return Ok(None),
|
||||||
};
|
};
|
||||||
|
|
||||||
node.config.password(payload.password);
|
node.config.password(password);
|
||||||
Ok(Some(AuthSuccess {
|
Ok(Some(AuthSuccess {
|
||||||
reported_auth_ok: false,
|
reported_auth_ok: false,
|
||||||
value: node,
|
value: node,
|
||||||
|
|||||||
@@ -47,18 +47,6 @@ impl ClientCredentials<'_> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a> ClientCredentials<'a> {
|
|
||||||
#[inline]
|
|
||||||
pub fn as_ref(&'a self) -> ClientCredentials<'a> {
|
|
||||||
Self {
|
|
||||||
user: self.user,
|
|
||||||
dbname: self.dbname,
|
|
||||||
project: self.project().map(Cow::Borrowed),
|
|
||||||
use_cleartext_password_flow: self.use_cleartext_password_flow,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a> ClientCredentials<'a> {
|
impl<'a> ClientCredentials<'a> {
|
||||||
pub fn parse(
|
pub fn parse(
|
||||||
params: &'a StartupMessageParams,
|
params: &'a StartupMessageParams,
|
||||||
|
|||||||
@@ -431,6 +431,7 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
|||||||
|
|
||||||
logging::init(LogFormat::from_config(&args.log_format)?)?;
|
logging::init(LogFormat::from_config(&args.log_format)?)?;
|
||||||
info!("version: {GIT_VERSION}");
|
info!("version: {GIT_VERSION}");
|
||||||
|
::metrics::set_build_info_metric(GIT_VERSION);
|
||||||
|
|
||||||
let registry = Registry {
|
let registry = Registry {
|
||||||
shared_state: Arc::new(RwLock::new(SharedState::new(args.all_keys_chan_size))),
|
shared_state: Arc::new(RwLock::new(SharedState::new(args.all_keys_chan_size))),
|
||||||
|
|||||||
@@ -97,10 +97,17 @@ class NeonCompare(PgCompare):
|
|||||||
self._pg_bin = pg_bin
|
self._pg_bin = pg_bin
|
||||||
self.pageserver_http_client = self.env.pageserver.http_client()
|
self.pageserver_http_client = self.env.pageserver.http_client()
|
||||||
|
|
||||||
# We only use one branch and one timeline
|
# Create tenant
|
||||||
self.env.neon_cli.create_branch(branch_name, "empty")
|
tenant_conf: Dict[str, str] = {}
|
||||||
self._pg = self.env.postgres.create_start(branch_name)
|
if False: # TODO add pytest setting for this
|
||||||
self.timeline = self.pg.safe_psql("SHOW neon.timeline_id")[0][0]
|
tenant_conf["trace_read_requests"] = "true"
|
||||||
|
self.tenant, _ = self.env.neon_cli.create_tenant(conf=tenant_conf)
|
||||||
|
|
||||||
|
# Create timeline
|
||||||
|
self.timeline = self.env.neon_cli.create_timeline(branch_name, tenant_id=self.tenant)
|
||||||
|
|
||||||
|
# Start pg
|
||||||
|
self._pg = self.env.postgres.create_start(branch_name, "main", self.tenant)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def pg(self) -> PgProtocol:
|
def pg(self) -> PgProtocol:
|
||||||
@@ -115,11 +122,11 @@ class NeonCompare(PgCompare):
|
|||||||
return self._pg_bin
|
return self._pg_bin
|
||||||
|
|
||||||
def flush(self):
|
def flush(self):
|
||||||
self.pageserver_http_client.timeline_checkpoint(self.env.initial_tenant, self.timeline)
|
self.pageserver_http_client.timeline_checkpoint(self.tenant, self.timeline)
|
||||||
self.pageserver_http_client.timeline_gc(self.env.initial_tenant, self.timeline, 0)
|
self.pageserver_http_client.timeline_gc(self.tenant, self.timeline, 0)
|
||||||
|
|
||||||
def compact(self):
|
def compact(self):
|
||||||
self.pageserver_http_client.timeline_compact(self.env.initial_tenant, self.timeline)
|
self.pageserver_http_client.timeline_compact(self.tenant, self.timeline)
|
||||||
|
|
||||||
def report_peak_memory_use(self):
|
def report_peak_memory_use(self):
|
||||||
self.zenbenchmark.record(
|
self.zenbenchmark.record(
|
||||||
@@ -131,13 +138,13 @@ class NeonCompare(PgCompare):
|
|||||||
|
|
||||||
def report_size(self):
|
def report_size(self):
|
||||||
timeline_size = self.zenbenchmark.get_timeline_size(
|
timeline_size = self.zenbenchmark.get_timeline_size(
|
||||||
self.env.repo_dir, self.env.initial_tenant, self.timeline
|
self.env.repo_dir, self.tenant, self.timeline
|
||||||
)
|
)
|
||||||
self.zenbenchmark.record(
|
self.zenbenchmark.record(
|
||||||
"size", timeline_size / (1024 * 1024), "MB", report=MetricReport.LOWER_IS_BETTER
|
"size", timeline_size / (1024 * 1024), "MB", report=MetricReport.LOWER_IS_BETTER
|
||||||
)
|
)
|
||||||
|
|
||||||
params = f'{{tenant_id="{self.env.initial_tenant}",timeline_id="{self.timeline}"}}'
|
params = f'{{tenant_id="{self.tenant}",timeline_id="{self.timeline}"}}'
|
||||||
total_files = self.zenbenchmark.get_int_counter_value(
|
total_files = self.zenbenchmark.get_int_counter_value(
|
||||||
self.env.pageserver, "pageserver_created_persistent_files_total" + params
|
self.env.pageserver, "pageserver_created_persistent_files_total" + params
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -50,6 +50,8 @@ PAGESERVER_GLOBAL_METRICS: Tuple[str, ...] = (
|
|||||||
"pageserver_storage_operations_seconds_global_count",
|
"pageserver_storage_operations_seconds_global_count",
|
||||||
"pageserver_storage_operations_seconds_global_sum",
|
"pageserver_storage_operations_seconds_global_sum",
|
||||||
"pageserver_storage_operations_seconds_global_bucket",
|
"pageserver_storage_operations_seconds_global_bucket",
|
||||||
|
"libmetrics_launch_timestamp",
|
||||||
|
"libmetrics_build_info",
|
||||||
)
|
)
|
||||||
|
|
||||||
PAGESERVER_PER_TENANT_METRICS: Tuple[str, ...] = (
|
PAGESERVER_PER_TENANT_METRICS: Tuple[str, ...] = (
|
||||||
|
|||||||
@@ -1205,6 +1205,11 @@ class PageserverHttpClient(requests.Session):
|
|||||||
assert isinstance(res_json, dict)
|
assert isinstance(res_json, dict)
|
||||||
return res_json
|
return res_json
|
||||||
|
|
||||||
|
def tenant_config(self, tenant_id: TenantId) -> TenantConfig:
|
||||||
|
res = self.get(f"http://localhost:{self.port}/v1/tenant/{tenant_id}/config")
|
||||||
|
self.verbose_error(res)
|
||||||
|
return TenantConfig.from_json(res.json())
|
||||||
|
|
||||||
def tenant_size(self, tenant_id: TenantId) -> int:
|
def tenant_size(self, tenant_id: TenantId) -> int:
|
||||||
return self.tenant_size_and_modelinputs(tenant_id)[0]
|
return self.tenant_size_and_modelinputs(tenant_id)[0]
|
||||||
|
|
||||||
@@ -1472,6 +1477,104 @@ class PageserverHttpClient(requests.Session):
|
|||||||
assert len(relevant) == 1
|
assert len(relevant) == 1
|
||||||
return relevant[0].lstrip(name).strip()
|
return relevant[0].lstrip(name).strip()
|
||||||
|
|
||||||
|
def layer_map_info(
|
||||||
|
self,
|
||||||
|
tenant_id: TenantId,
|
||||||
|
timeline_id: TimelineId,
|
||||||
|
) -> LayerMapInfo:
|
||||||
|
res = self.get(
|
||||||
|
f"http://localhost:{self.port}/v1/tenant/{tenant_id}/timeline/{timeline_id}/layer/",
|
||||||
|
)
|
||||||
|
self.verbose_error(res)
|
||||||
|
return LayerMapInfo.from_json(res.json())
|
||||||
|
|
||||||
|
def download_layer(self, tenant_id: TenantId, timeline_id: TimelineId, layer_name: str):
|
||||||
|
res = self.get(
|
||||||
|
f"http://localhost:{self.port}/v1/tenant/{tenant_id}/timeline/{timeline_id}/layer/{layer_name}",
|
||||||
|
)
|
||||||
|
self.verbose_error(res)
|
||||||
|
|
||||||
|
assert res.status_code == 200
|
||||||
|
|
||||||
|
def evict_layer(self, tenant_id: TenantId, timeline_id: TimelineId, layer_name: str):
|
||||||
|
res = self.delete(
|
||||||
|
f"http://localhost:{self.port}/v1/tenant/{tenant_id}/timeline/{timeline_id}/layer/{layer_name}",
|
||||||
|
)
|
||||||
|
self.verbose_error(res)
|
||||||
|
|
||||||
|
assert res.status_code == 200
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class TenantConfig:
|
||||||
|
tenant_specific_overrides: Dict[str, Any]
|
||||||
|
effective_config: Dict[str, Any]
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_json(cls, d: Dict[str, Any]) -> TenantConfig:
|
||||||
|
return TenantConfig(
|
||||||
|
tenant_specific_overrides=d["tenant_specific_overrides"],
|
||||||
|
effective_config=d["effective_config"],
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class LayerMapInfo:
|
||||||
|
in_memory_layers: List[InMemoryLayerInfo]
|
||||||
|
historic_layers: List[HistoricLayerInfo]
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_json(cls, d: Dict[str, Any]) -> LayerMapInfo:
|
||||||
|
info = LayerMapInfo(in_memory_layers=[], historic_layers=[])
|
||||||
|
|
||||||
|
json_in_memory_layers = d["in_memory_layers"]
|
||||||
|
assert isinstance(json_in_memory_layers, List)
|
||||||
|
for json_in_memory_layer in json_in_memory_layers:
|
||||||
|
info.in_memory_layers.append(InMemoryLayerInfo.from_json(json_in_memory_layer))
|
||||||
|
|
||||||
|
json_historic_layers = d["historic_layers"]
|
||||||
|
assert isinstance(json_historic_layers, List)
|
||||||
|
for json_historic_layer in json_historic_layers:
|
||||||
|
info.historic_layers.append(HistoricLayerInfo.from_json(json_historic_layer))
|
||||||
|
|
||||||
|
return info
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class InMemoryLayerInfo:
|
||||||
|
kind: str
|
||||||
|
lsn_start: str
|
||||||
|
lsn_end: Optional[str]
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_json(cls, d: Dict[str, Any]) -> InMemoryLayerInfo:
|
||||||
|
return InMemoryLayerInfo(
|
||||||
|
kind=d["kind"],
|
||||||
|
lsn_start=d["lsn_start"],
|
||||||
|
lsn_end=d.get("lsn_end"),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class HistoricLayerInfo:
|
||||||
|
kind: str
|
||||||
|
layer_file_name: str
|
||||||
|
layer_file_size: Optional[int]
|
||||||
|
lsn_start: str
|
||||||
|
lsn_end: Optional[str]
|
||||||
|
remote: bool
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_json(cls, d: Dict[str, Any]) -> HistoricLayerInfo:
|
||||||
|
return HistoricLayerInfo(
|
||||||
|
kind=d["kind"],
|
||||||
|
layer_file_name=d["layer_file_name"],
|
||||||
|
layer_file_size=d.get("layer_file_size"),
|
||||||
|
lsn_start=d["lsn_start"],
|
||||||
|
lsn_end=d.get("lsn_end"),
|
||||||
|
remote=d["remote"],
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
class PageserverPort:
|
class PageserverPort:
|
||||||
|
|||||||
@@ -0,0 +1 @@
|
|||||||
|
.build/
|
||||||
1
test_runner/pg_clients/swift/PostgresNIOExample/.gitignore
vendored
Normal file
1
test_runner/pg_clients/swift/PostgresNIOExample/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
|||||||
|
.build/
|
||||||
10
test_runner/pg_clients/swift/PostgresNIOExample/Dockerfile
Normal file
10
test_runner/pg_clients/swift/PostgresNIOExample/Dockerfile
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
FROM swift:5.7 AS build
|
||||||
|
WORKDIR /source
|
||||||
|
|
||||||
|
COPY . .
|
||||||
|
RUN swift build --configuration release
|
||||||
|
|
||||||
|
FROM swift:5.7
|
||||||
|
WORKDIR /app
|
||||||
|
COPY --from=build /source/.build/release .
|
||||||
|
CMD ["/app/PostgresNIOExample"]
|
||||||
@@ -0,0 +1,86 @@
|
|||||||
|
{
|
||||||
|
"pins" : [
|
||||||
|
{
|
||||||
|
"identity" : "postgres-nio",
|
||||||
|
"kind" : "remoteSourceControl",
|
||||||
|
"location" : "https://github.com/vapor/postgres-nio.git",
|
||||||
|
"state" : {
|
||||||
|
"revision" : "7daf026e145de2c07d6e37f4171b1acb4b5f22b1",
|
||||||
|
"version" : "1.12.1"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"identity" : "swift-atomics",
|
||||||
|
"kind" : "remoteSourceControl",
|
||||||
|
"location" : "https://github.com/apple/swift-atomics.git",
|
||||||
|
"state" : {
|
||||||
|
"revision" : "ff3d2212b6b093db7f177d0855adbc4ef9c5f036",
|
||||||
|
"version" : "1.0.3"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"identity" : "swift-collections",
|
||||||
|
"kind" : "remoteSourceControl",
|
||||||
|
"location" : "https://github.com/apple/swift-collections.git",
|
||||||
|
"state" : {
|
||||||
|
"revision" : "937e904258d22af6e447a0b72c0bc67583ef64a2",
|
||||||
|
"version" : "1.0.4"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"identity" : "swift-crypto",
|
||||||
|
"kind" : "remoteSourceControl",
|
||||||
|
"location" : "https://github.com/apple/swift-crypto.git",
|
||||||
|
"state" : {
|
||||||
|
"revision" : "75ec60b8b4cc0f085c3ac414f3dca5625fa3588e",
|
||||||
|
"version" : "2.2.4"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"identity" : "swift-log",
|
||||||
|
"kind" : "remoteSourceControl",
|
||||||
|
"location" : "https://github.com/apple/swift-log.git",
|
||||||
|
"state" : {
|
||||||
|
"revision" : "32e8d724467f8fe623624570367e3d50c5638e46",
|
||||||
|
"version" : "1.5.2"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"identity" : "swift-metrics",
|
||||||
|
"kind" : "remoteSourceControl",
|
||||||
|
"location" : "https://github.com/apple/swift-metrics.git",
|
||||||
|
"state" : {
|
||||||
|
"revision" : "9b39d811a83cf18b79d7d5513b06f8b290198b10",
|
||||||
|
"version" : "2.3.3"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"identity" : "swift-nio",
|
||||||
|
"kind" : "remoteSourceControl",
|
||||||
|
"location" : "https://github.com/apple/swift-nio.git",
|
||||||
|
"state" : {
|
||||||
|
"revision" : "45167b8006448c79dda4b7bd604e07a034c15c49",
|
||||||
|
"version" : "2.48.0"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"identity" : "swift-nio-ssl",
|
||||||
|
"kind" : "remoteSourceControl",
|
||||||
|
"location" : "https://github.com/apple/swift-nio-ssl.git",
|
||||||
|
"state" : {
|
||||||
|
"revision" : "4fb7ead803e38949eb1d6fabb849206a72c580f3",
|
||||||
|
"version" : "2.23.0"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"identity" : "swift-nio-transport-services",
|
||||||
|
"kind" : "remoteSourceControl",
|
||||||
|
"location" : "https://github.com/apple/swift-nio-transport-services.git",
|
||||||
|
"state" : {
|
||||||
|
"revision" : "c0d9a144cfaec8d3d596aadde3039286a266c15c",
|
||||||
|
"version" : "1.15.0"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"version" : 2
|
||||||
|
}
|
||||||
@@ -0,0 +1,17 @@
|
|||||||
|
// swift-tools-version:5.7
|
||||||
|
import PackageDescription
|
||||||
|
|
||||||
|
let package = Package(
|
||||||
|
name: "PostgresNIOExample",
|
||||||
|
dependencies: [
|
||||||
|
.package(url: "https://github.com/vapor/postgres-nio.git", from: "1.8.0")
|
||||||
|
],
|
||||||
|
targets: [
|
||||||
|
.executableTarget(
|
||||||
|
name: "PostgresNIOExample",
|
||||||
|
dependencies: [
|
||||||
|
.product(name: "PostgresNIO", package: "postgres-nio"),
|
||||||
|
]
|
||||||
|
)
|
||||||
|
]
|
||||||
|
)
|
||||||
@@ -0,0 +1,49 @@
|
|||||||
|
import Foundation
|
||||||
|
|
||||||
|
import PostgresNIO
|
||||||
|
import NIOPosix
|
||||||
|
import Logging
|
||||||
|
|
||||||
|
await Task {
|
||||||
|
do {
|
||||||
|
let eventLoopGroup = MultiThreadedEventLoopGroup(numberOfThreads: 1)
|
||||||
|
let logger = Logger(label: "postgres-logger")
|
||||||
|
|
||||||
|
let env = ProcessInfo.processInfo.environment
|
||||||
|
|
||||||
|
let sslContext = try! NIOSSLContext(configuration: .makeClientConfiguration())
|
||||||
|
|
||||||
|
let config = PostgresConnection.Configuration(
|
||||||
|
connection: .init(
|
||||||
|
host: env["NEON_HOST"] ?? "",
|
||||||
|
port: 5432
|
||||||
|
),
|
||||||
|
authentication: .init(
|
||||||
|
username: env["NEON_USER"] ?? "",
|
||||||
|
database: env["NEON_DATABASE"] ?? "",
|
||||||
|
password: env["NEON_PASSWORD"] ?? ""
|
||||||
|
),
|
||||||
|
tls: .require(sslContext)
|
||||||
|
)
|
||||||
|
|
||||||
|
let connection = try await PostgresConnection.connect(
|
||||||
|
on: eventLoopGroup.next(),
|
||||||
|
configuration: config,
|
||||||
|
id: 1,
|
||||||
|
logger: logger
|
||||||
|
)
|
||||||
|
|
||||||
|
let rows = try await connection.query("SELECT 1 as col", logger: logger)
|
||||||
|
for try await (n) in rows.decode((Int).self, context: .default) {
|
||||||
|
print(n)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close your connection once done
|
||||||
|
try await connection.close()
|
||||||
|
|
||||||
|
// Shutdown the EventLoopGroup, once all connections are closed.
|
||||||
|
try eventLoopGroup.syncShutdownGracefully()
|
||||||
|
} catch {
|
||||||
|
print(error)
|
||||||
|
}
|
||||||
|
}.value
|
||||||
@@ -19,6 +19,7 @@ from fixtures.utils import subprocess_capture
|
|||||||
"swift/PostgresClientKitExample", # See https://github.com/neondatabase/neon/pull/2008#discussion_r911896592
|
"swift/PostgresClientKitExample", # See https://github.com/neondatabase/neon/pull/2008#discussion_r911896592
|
||||||
marks=pytest.mark.xfail(reason="Neither SNI nor parameters is supported"),
|
marks=pytest.mark.xfail(reason="Neither SNI nor parameters is supported"),
|
||||||
),
|
),
|
||||||
|
"swift/PostgresNIOExample",
|
||||||
"typescript/postgresql-client",
|
"typescript/postgresql-client",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|||||||
140
test_runner/regress/test_layer_eviction.py
Normal file
140
test_runner/regress/test_layer_eviction.py
Normal file
@@ -0,0 +1,140 @@
|
|||||||
|
import pytest
|
||||||
|
from fixtures.neon_fixtures import (
|
||||||
|
NeonEnvBuilder,
|
||||||
|
RemoteStorageKind,
|
||||||
|
wait_for_last_record_lsn,
|
||||||
|
wait_for_upload,
|
||||||
|
)
|
||||||
|
from fixtures.types import Lsn, TenantId, TimelineId
|
||||||
|
from fixtures.utils import query_scalar
|
||||||
|
|
||||||
|
|
||||||
|
# Crates a few layers, ensures that we can evict them (removing locally but keeping track of them anyway)
|
||||||
|
# and then download them back.
|
||||||
|
@pytest.mark.parametrize("remote_storage_kind", [RemoteStorageKind.LOCAL_FS])
|
||||||
|
def test_basic_eviction(
|
||||||
|
neon_env_builder: NeonEnvBuilder,
|
||||||
|
remote_storage_kind: RemoteStorageKind,
|
||||||
|
):
|
||||||
|
neon_env_builder.enable_remote_storage(
|
||||||
|
remote_storage_kind=remote_storage_kind,
|
||||||
|
test_name="test_download_remote_layers_api",
|
||||||
|
)
|
||||||
|
|
||||||
|
env = neon_env_builder.init_start()
|
||||||
|
client = env.pageserver.http_client()
|
||||||
|
pg = env.postgres.create_start("main")
|
||||||
|
|
||||||
|
tenant_id = TenantId(pg.safe_psql("show neon.tenant_id")[0][0])
|
||||||
|
timeline_id = TimelineId(pg.safe_psql("show neon.timeline_id")[0][0])
|
||||||
|
|
||||||
|
# Create a number of layers in the tenant
|
||||||
|
with pg.cursor() as cur:
|
||||||
|
cur.execute("CREATE TABLE foo (t text)")
|
||||||
|
cur.execute(
|
||||||
|
"""
|
||||||
|
INSERT INTO foo
|
||||||
|
SELECT 'long string to consume some space' || g
|
||||||
|
FROM generate_series(1, 5000000) g
|
||||||
|
"""
|
||||||
|
)
|
||||||
|
current_lsn = Lsn(query_scalar(cur, "SELECT pg_current_wal_flush_lsn()"))
|
||||||
|
|
||||||
|
wait_for_last_record_lsn(client, tenant_id, timeline_id, current_lsn)
|
||||||
|
client.timeline_checkpoint(tenant_id, timeline_id)
|
||||||
|
wait_for_upload(client, tenant_id, timeline_id, current_lsn)
|
||||||
|
|
||||||
|
timeline_path = env.repo_dir / "tenants" / str(tenant_id) / "timelines" / str(timeline_id)
|
||||||
|
initial_local_layers = sorted(
|
||||||
|
list(filter(lambda path: path.name != "metadata", timeline_path.glob("*")))
|
||||||
|
)
|
||||||
|
assert (
|
||||||
|
len(initial_local_layers) > 1
|
||||||
|
), f"Should create multiple layers for timeline, but got {initial_local_layers}"
|
||||||
|
|
||||||
|
# Compare layer map dump with the local layers, ensure everything's present locally and matches
|
||||||
|
initial_layer_map_info = client.layer_map_info(tenant_id=tenant_id, timeline_id=timeline_id)
|
||||||
|
assert (
|
||||||
|
not initial_layer_map_info.in_memory_layers
|
||||||
|
), "Should have no in memory layers after flushing"
|
||||||
|
assert len(initial_local_layers) == len(
|
||||||
|
initial_layer_map_info.historic_layers
|
||||||
|
), "Should have the same layers in memory and on disk"
|
||||||
|
for returned_layer in initial_layer_map_info.historic_layers:
|
||||||
|
assert (
|
||||||
|
returned_layer.kind == "Delta"
|
||||||
|
), f"Did not create and expect image layers, but got {returned_layer}"
|
||||||
|
assert (
|
||||||
|
not returned_layer.remote
|
||||||
|
), f"All created layers should be present locally, but got {returned_layer}"
|
||||||
|
|
||||||
|
local_layers = list(
|
||||||
|
filter(lambda layer: layer.name == returned_layer.layer_file_name, initial_local_layers)
|
||||||
|
)
|
||||||
|
assert (
|
||||||
|
len(local_layers) == 1
|
||||||
|
), f"Did not find returned layer {returned_layer} in local layers {initial_local_layers}"
|
||||||
|
local_layer = local_layers[0]
|
||||||
|
assert (
|
||||||
|
returned_layer.layer_file_size == local_layer.stat().st_size
|
||||||
|
), f"Returned layer {returned_layer} has a different file size than local layer {local_layer}"
|
||||||
|
|
||||||
|
# Detach all layers, ensre they are not in the local FS, but are still dumped as part of the layer map
|
||||||
|
for local_layer in initial_local_layers:
|
||||||
|
client.evict_layer(
|
||||||
|
tenant_id=tenant_id, timeline_id=timeline_id, layer_name=local_layer.name
|
||||||
|
)
|
||||||
|
assert not any(
|
||||||
|
new_local_layer.name == local_layer.name for new_local_layer in timeline_path.glob("*")
|
||||||
|
), f"Did not expect to find {local_layer} layer after evicting"
|
||||||
|
|
||||||
|
empty_layers = list(filter(lambda path: path.name != "metadata", timeline_path.glob("*")))
|
||||||
|
assert (
|
||||||
|
not empty_layers
|
||||||
|
), f"After evicting all layers, timeline {tenant_id}/{timeline_id} should have no layers locally, but got: {empty_layers}"
|
||||||
|
|
||||||
|
evicted_layer_map_info = client.layer_map_info(tenant_id=tenant_id, timeline_id=timeline_id)
|
||||||
|
assert (
|
||||||
|
not evicted_layer_map_info.in_memory_layers
|
||||||
|
), "Should have no in memory layers after flushing and evicting"
|
||||||
|
assert len(initial_local_layers) == len(
|
||||||
|
evicted_layer_map_info.historic_layers
|
||||||
|
), "Should have the same layers in memory and on disk initially"
|
||||||
|
for returned_layer in evicted_layer_map_info.historic_layers:
|
||||||
|
assert (
|
||||||
|
returned_layer.kind == "Delta"
|
||||||
|
), f"Did not create and expect image layers, but got {returned_layer}"
|
||||||
|
assert (
|
||||||
|
returned_layer.remote
|
||||||
|
), f"All layers should be evicted and not present locally, but got {returned_layer}"
|
||||||
|
assert any(
|
||||||
|
local_layer.name == returned_layer.layer_file_name
|
||||||
|
for local_layer in initial_local_layers
|
||||||
|
), f"Did not find returned layer {returned_layer} in local layers {initial_local_layers}"
|
||||||
|
|
||||||
|
# redownload all evicted layers and ensure the initial state is restored
|
||||||
|
for local_layer in initial_local_layers:
|
||||||
|
client.download_layer(
|
||||||
|
tenant_id=tenant_id, timeline_id=timeline_id, layer_name=local_layer.name
|
||||||
|
)
|
||||||
|
client.timeline_download_remote_layers(
|
||||||
|
tenant_id,
|
||||||
|
timeline_id,
|
||||||
|
# allow some concurrency to unveil potential concurrency bugs
|
||||||
|
max_concurrent_downloads=10,
|
||||||
|
errors_ok=False,
|
||||||
|
at_least_one_download=False,
|
||||||
|
)
|
||||||
|
|
||||||
|
redownloaded_layers = sorted(
|
||||||
|
list(filter(lambda path: path.name != "metadata", timeline_path.glob("*")))
|
||||||
|
)
|
||||||
|
assert (
|
||||||
|
redownloaded_layers == initial_local_layers
|
||||||
|
), "Should have the same layers locally after redownloading the evicted layers"
|
||||||
|
redownloaded_layer_map_info = client.layer_map_info(
|
||||||
|
tenant_id=tenant_id, timeline_id=timeline_id
|
||||||
|
)
|
||||||
|
assert (
|
||||||
|
redownloaded_layer_map_info == initial_layer_map_info
|
||||||
|
), "Should have the same layer map after redownloading the evicted layers"
|
||||||
@@ -22,6 +22,7 @@ wait_lsn_timeout='111 s';
|
|||||||
tenant_config={checkpoint_distance = 10000, compaction_target_size = 1048576}"""
|
tenant_config={checkpoint_distance = 10000, compaction_target_size = 1048576}"""
|
||||||
|
|
||||||
env = neon_env_builder.init_start()
|
env = neon_env_builder.init_start()
|
||||||
|
http_client = env.pageserver.http_client()
|
||||||
|
|
||||||
# Check that we raise on misspelled configs
|
# Check that we raise on misspelled configs
|
||||||
invalid_conf_key = "some_invalid_setting_name_blah_blah_123"
|
invalid_conf_key = "some_invalid_setting_name_blah_blah_123"
|
||||||
@@ -36,12 +37,11 @@ tenant_config={checkpoint_distance = 10000, compaction_target_size = 1048576}"""
|
|||||||
else:
|
else:
|
||||||
raise AssertionError("Expected validation error")
|
raise AssertionError("Expected validation error")
|
||||||
|
|
||||||
tenant, _ = env.neon_cli.create_tenant(
|
new_conf = {
|
||||||
conf={
|
"checkpoint_distance": "20000",
|
||||||
"checkpoint_distance": "20000",
|
"gc_period": "30sec",
|
||||||
"gc_period": "30sec",
|
}
|
||||||
}
|
tenant, _ = env.neon_cli.create_tenant(conf=new_conf)
|
||||||
)
|
|
||||||
|
|
||||||
env.neon_cli.create_timeline("test_tenant_conf", tenant_id=tenant)
|
env.neon_cli.create_timeline("test_tenant_conf", tenant_id=tenant)
|
||||||
env.postgres.create_start(
|
env.postgres.create_start(
|
||||||
@@ -69,7 +69,20 @@ tenant_config={checkpoint_distance = 10000, compaction_target_size = 1048576}"""
|
|||||||
"image_creation_threshold": 3,
|
"image_creation_threshold": 3,
|
||||||
"pitr_interval": 604800, # 7 days
|
"pitr_interval": 604800, # 7 days
|
||||||
}.items()
|
}.items()
|
||||||
)
|
), f"Unexpected res: {res}"
|
||||||
|
default_tenant_config = http_client.tenant_config(tenant_id=env.initial_tenant)
|
||||||
|
assert (
|
||||||
|
not default_tenant_config.tenant_specific_overrides
|
||||||
|
), "Should have no specific settings yet"
|
||||||
|
effective_config = default_tenant_config.effective_config
|
||||||
|
assert effective_config["checkpoint_distance"] == 10000
|
||||||
|
assert effective_config["compaction_target_size"] == 1048576
|
||||||
|
assert effective_config["compaction_period"] == "20s"
|
||||||
|
assert effective_config["compaction_threshold"] == 10
|
||||||
|
assert effective_config["gc_horizon"] == 67108864
|
||||||
|
assert effective_config["gc_period"] == "1h"
|
||||||
|
assert effective_config["image_creation_threshold"] == 3
|
||||||
|
assert effective_config["pitr_interval"] == "7days"
|
||||||
|
|
||||||
# check the configuration of the new tenant
|
# check the configuration of the new tenant
|
||||||
with closing(env.pageserver.connect()) as psconn:
|
with closing(env.pageserver.connect()) as psconn:
|
||||||
@@ -89,15 +102,37 @@ tenant_config={checkpoint_distance = 10000, compaction_target_size = 1048576}"""
|
|||||||
"image_creation_threshold": 3,
|
"image_creation_threshold": 3,
|
||||||
"pitr_interval": 604800,
|
"pitr_interval": 604800,
|
||||||
}.items()
|
}.items()
|
||||||
)
|
), f"Unexpected res: {res}"
|
||||||
|
new_tenant_config = http_client.tenant_config(tenant_id=tenant)
|
||||||
|
new_specific_config = new_tenant_config.tenant_specific_overrides
|
||||||
|
assert new_specific_config["checkpoint_distance"] == 20000
|
||||||
|
assert new_specific_config["gc_period"] == "30s"
|
||||||
|
assert len(new_specific_config) == len(
|
||||||
|
new_conf
|
||||||
|
), f"No more specific properties were expected, but got: {new_specific_config}"
|
||||||
|
new_effective_config = new_tenant_config.effective_config
|
||||||
|
assert (
|
||||||
|
new_effective_config["checkpoint_distance"] == 20000
|
||||||
|
), "Specific 'checkpoint_distance' config should override the default value"
|
||||||
|
assert (
|
||||||
|
new_effective_config["gc_period"] == "30s"
|
||||||
|
), "Specific 'gc_period' config should override the default value"
|
||||||
|
assert new_effective_config["compaction_target_size"] == 1048576
|
||||||
|
assert new_effective_config["compaction_period"] == "20s"
|
||||||
|
assert new_effective_config["compaction_threshold"] == 10
|
||||||
|
assert new_effective_config["gc_horizon"] == 67108864
|
||||||
|
assert new_effective_config["image_creation_threshold"] == 3
|
||||||
|
assert new_effective_config["pitr_interval"] == "7days"
|
||||||
|
|
||||||
# update the config and ensure that it has changed
|
# update the config and ensure that it has changed
|
||||||
|
conf_update = {
|
||||||
|
"checkpoint_distance": "15000",
|
||||||
|
"gc_period": "80sec",
|
||||||
|
"compaction_period": "80sec",
|
||||||
|
}
|
||||||
env.neon_cli.config_tenant(
|
env.neon_cli.config_tenant(
|
||||||
tenant_id=tenant,
|
tenant_id=tenant,
|
||||||
conf={
|
conf=conf_update,
|
||||||
"checkpoint_distance": "15000",
|
|
||||||
"gc_period": "80sec",
|
|
||||||
},
|
|
||||||
)
|
)
|
||||||
|
|
||||||
with closing(env.pageserver.connect()) as psconn:
|
with closing(env.pageserver.connect()) as psconn:
|
||||||
@@ -110,14 +145,37 @@ tenant_config={checkpoint_distance = 10000, compaction_target_size = 1048576}"""
|
|||||||
for i in {
|
for i in {
|
||||||
"checkpoint_distance": 15000,
|
"checkpoint_distance": 15000,
|
||||||
"compaction_target_size": 1048576,
|
"compaction_target_size": 1048576,
|
||||||
"compaction_period": 20,
|
"compaction_period": 80,
|
||||||
"compaction_threshold": 10,
|
"compaction_threshold": 10,
|
||||||
"gc_horizon": 67108864,
|
"gc_horizon": 67108864,
|
||||||
"gc_period": 80,
|
"gc_period": 80,
|
||||||
"image_creation_threshold": 3,
|
"image_creation_threshold": 3,
|
||||||
"pitr_interval": 604800,
|
"pitr_interval": 604800,
|
||||||
}.items()
|
}.items()
|
||||||
)
|
), f"Unexpected res: {res}"
|
||||||
|
updated_tenant_config = http_client.tenant_config(tenant_id=tenant)
|
||||||
|
updated_specific_config = updated_tenant_config.tenant_specific_overrides
|
||||||
|
assert updated_specific_config["checkpoint_distance"] == 15000
|
||||||
|
assert updated_specific_config["gc_period"] == "1m 20s"
|
||||||
|
assert updated_specific_config["compaction_period"] == "1m 20s"
|
||||||
|
assert len(updated_specific_config) == len(
|
||||||
|
conf_update
|
||||||
|
), f"No more specific properties were expected, but got: {updated_specific_config}"
|
||||||
|
updated_effective_config = updated_tenant_config.effective_config
|
||||||
|
assert (
|
||||||
|
updated_effective_config["checkpoint_distance"] == 15000
|
||||||
|
), "Specific 'checkpoint_distance' config should override the default value"
|
||||||
|
assert (
|
||||||
|
updated_effective_config["gc_period"] == "1m 20s"
|
||||||
|
), "Specific 'gc_period' config should override the default value"
|
||||||
|
assert (
|
||||||
|
updated_effective_config["compaction_period"] == "1m 20s"
|
||||||
|
), "Specific 'compaction_period' config should override the default value"
|
||||||
|
assert updated_effective_config["compaction_target_size"] == 1048576
|
||||||
|
assert updated_effective_config["compaction_threshold"] == 10
|
||||||
|
assert updated_effective_config["gc_horizon"] == 67108864
|
||||||
|
assert updated_effective_config["image_creation_threshold"] == 3
|
||||||
|
assert updated_effective_config["pitr_interval"] == "7days"
|
||||||
|
|
||||||
# restart the pageserver and ensure that the config is still correct
|
# restart the pageserver and ensure that the config is still correct
|
||||||
env.pageserver.stop()
|
env.pageserver.stop()
|
||||||
@@ -133,22 +191,44 @@ tenant_config={checkpoint_distance = 10000, compaction_target_size = 1048576}"""
|
|||||||
for i in {
|
for i in {
|
||||||
"checkpoint_distance": 15000,
|
"checkpoint_distance": 15000,
|
||||||
"compaction_target_size": 1048576,
|
"compaction_target_size": 1048576,
|
||||||
"compaction_period": 20,
|
"compaction_period": 80,
|
||||||
"compaction_threshold": 10,
|
"compaction_threshold": 10,
|
||||||
"gc_horizon": 67108864,
|
"gc_horizon": 67108864,
|
||||||
"gc_period": 80,
|
"gc_period": 80,
|
||||||
"image_creation_threshold": 3,
|
"image_creation_threshold": 3,
|
||||||
"pitr_interval": 604800,
|
"pitr_interval": 604800,
|
||||||
}.items()
|
}.items()
|
||||||
)
|
), f"Unexpected res: {res}"
|
||||||
|
restarted_tenant_config = http_client.tenant_config(tenant_id=tenant)
|
||||||
|
assert (
|
||||||
|
restarted_tenant_config == updated_tenant_config
|
||||||
|
), "Updated config should not change after the restart"
|
||||||
|
|
||||||
# update the config with very short config and make sure no trailing chars are left from previous config
|
# update the config with very short config and make sure no trailing chars are left from previous config
|
||||||
|
final_conf = {
|
||||||
|
"pitr_interval": "1 min",
|
||||||
|
}
|
||||||
env.neon_cli.config_tenant(
|
env.neon_cli.config_tenant(
|
||||||
tenant_id=tenant,
|
tenant_id=tenant,
|
||||||
conf={
|
conf=final_conf,
|
||||||
"pitr_interval": "1 min",
|
|
||||||
},
|
|
||||||
)
|
)
|
||||||
|
final_tenant_config = http_client.tenant_config(tenant_id=tenant)
|
||||||
|
final_specific_config = final_tenant_config.tenant_specific_overrides
|
||||||
|
assert final_specific_config["pitr_interval"] == "1m"
|
||||||
|
assert len(final_specific_config) == len(
|
||||||
|
final_conf
|
||||||
|
), f"No more specific properties were expected, but got: {final_specific_config}"
|
||||||
|
final_effective_config = final_tenant_config.effective_config
|
||||||
|
assert (
|
||||||
|
final_effective_config["pitr_interval"] == "1m"
|
||||||
|
), "Specific 'pitr_interval' config should override the default value"
|
||||||
|
assert final_effective_config["checkpoint_distance"] == 10000
|
||||||
|
assert final_effective_config["compaction_target_size"] == 1048576
|
||||||
|
assert final_effective_config["compaction_period"] == "20s"
|
||||||
|
assert final_effective_config["compaction_threshold"] == 10
|
||||||
|
assert final_effective_config["gc_horizon"] == 67108864
|
||||||
|
assert final_effective_config["gc_period"] == "1h"
|
||||||
|
assert final_effective_config["image_creation_threshold"] == 3
|
||||||
|
|
||||||
# restart the pageserver and ensure that the config is still correct
|
# restart the pageserver and ensure that the config is still correct
|
||||||
env.pageserver.stop()
|
env.pageserver.stop()
|
||||||
@@ -165,7 +245,7 @@ tenant_config={checkpoint_distance = 10000, compaction_target_size = 1048576}"""
|
|||||||
"compaction_period": 20,
|
"compaction_period": 20,
|
||||||
"pitr_interval": 60,
|
"pitr_interval": 60,
|
||||||
}.items()
|
}.items()
|
||||||
)
|
), f"Unexpected res: {res}"
|
||||||
|
|
||||||
|
|
||||||
def test_creating_tenant_conf_after_attach(neon_env_builder: NeonEnvBuilder):
|
def test_creating_tenant_conf_after_attach(neon_env_builder: NeonEnvBuilder):
|
||||||
|
|||||||
@@ -2,6 +2,7 @@ from typing import Any, List, Tuple
|
|||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
from fixtures.log_helper import log
|
from fixtures.log_helper import log
|
||||||
|
from fixtures.metrics import parse_metrics
|
||||||
from fixtures.neon_fixtures import NeonEnv, NeonEnvBuilder, wait_for_last_flush_lsn
|
from fixtures.neon_fixtures import NeonEnv, NeonEnvBuilder, wait_for_last_flush_lsn
|
||||||
from fixtures.types import Lsn
|
from fixtures.types import Lsn
|
||||||
|
|
||||||
@@ -368,6 +369,17 @@ def test_single_branch_get_tenant_size_grows(neon_env_builder: NeonEnvBuilder):
|
|||||||
|
|
||||||
assert size_after == prev, "size after restarting pageserver should not have changed"
|
assert size_after == prev, "size after restarting pageserver should not have changed"
|
||||||
|
|
||||||
|
ps_metrics = parse_metrics(http_client.get_metrics(), "pageserver")
|
||||||
|
tenant_metric_filter = {
|
||||||
|
"tenant_id": str(tenant_id),
|
||||||
|
}
|
||||||
|
|
||||||
|
tenant_size_metric = int(
|
||||||
|
ps_metrics.query_one("pageserver_tenant_synthetic_size", filter=tenant_metric_filter).value
|
||||||
|
)
|
||||||
|
|
||||||
|
assert tenant_size_metric == size_after, "API size value should be equal to metric size value"
|
||||||
|
|
||||||
|
|
||||||
def test_get_tenant_size_with_multiple_branches(neon_env_builder: NeonEnvBuilder):
|
def test_get_tenant_size_with_multiple_branches(neon_env_builder: NeonEnvBuilder):
|
||||||
"""
|
"""
|
||||||
|
|||||||
15
trace/Cargo.toml
Normal file
15
trace/Cargo.toml
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
[package]
|
||||||
|
name = "trace"
|
||||||
|
version = "0.1.0"
|
||||||
|
edition.workspace = true
|
||||||
|
license.workspace = true
|
||||||
|
|
||||||
|
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
clap.workspace = true
|
||||||
|
anyhow.workspace = true
|
||||||
|
|
||||||
|
pageserver_api.workspace = true
|
||||||
|
utils.workspace = true
|
||||||
|
workspace_hack.workspace = true
|
||||||
172
trace/src/main.rs
Normal file
172
trace/src/main.rs
Normal file
@@ -0,0 +1,172 @@
|
|||||||
|
//! A tool for working with read traces generated by the pageserver.
|
||||||
|
use std::collections::HashMap;
|
||||||
|
use std::path::PathBuf;
|
||||||
|
use std::str::FromStr;
|
||||||
|
use std::{
|
||||||
|
fs::{read_dir, File},
|
||||||
|
io::BufReader,
|
||||||
|
};
|
||||||
|
|
||||||
|
use pageserver_api::models::{PagestreamFeMessage, PagestreamGetPageRequest};
|
||||||
|
use utils::id::{ConnectionId, TenantId, TimelineId};
|
||||||
|
|
||||||
|
use clap::{Parser, Subcommand};
|
||||||
|
|
||||||
|
/// Utils for working with pageserver read traces. For generating
|
||||||
|
/// traces, see the `trace_read_requests` tenant config option.
|
||||||
|
#[derive(Parser, Debug)]
|
||||||
|
#[command(author, version, about, long_about = None)]
|
||||||
|
struct Args {
|
||||||
|
/// Path of trace directory
|
||||||
|
#[arg(short, long)]
|
||||||
|
path: PathBuf,
|
||||||
|
|
||||||
|
#[command(subcommand)]
|
||||||
|
command: Command,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// What to do with the read trace
|
||||||
|
#[derive(Subcommand, Debug)]
|
||||||
|
enum Command {
|
||||||
|
/// List traces in the directory
|
||||||
|
List,
|
||||||
|
|
||||||
|
/// Print the traces in text format
|
||||||
|
Dump,
|
||||||
|
|
||||||
|
/// Print stats and anomalies about the traces
|
||||||
|
Analyze,
|
||||||
|
|
||||||
|
/// Draw the traces in svg format
|
||||||
|
Draw,
|
||||||
|
|
||||||
|
/// Send the read requests to a pageserver
|
||||||
|
Replay,
|
||||||
|
}
|
||||||
|
|
||||||
|
// HACK This function will change and improve as we see what kind of analysis is useful.
|
||||||
|
// Currently it collects the difference in blkno of consecutive GetPage requests,
|
||||||
|
// and counts the frequency of each value. This information is useful in order to:
|
||||||
|
// - see how sequential a workload is by seeing how often the delta is 1
|
||||||
|
// - detect any prefetching anomalies by looking for negative deltas during seqscan
|
||||||
|
fn analyze_trace<R: std::io::Read>(mut reader: R) {
|
||||||
|
let mut total = 0; // Total requests traced
|
||||||
|
let mut cross_rel = 0; // Requests that ask for different rel than previous request
|
||||||
|
let mut deltas = HashMap::<i32, u32>::new(); // Consecutive blkno differences
|
||||||
|
let mut prev: Option<PagestreamGetPageRequest> = None;
|
||||||
|
|
||||||
|
// Compute stats
|
||||||
|
while let Ok(msg) = PagestreamFeMessage::parse(&mut reader) {
|
||||||
|
match msg {
|
||||||
|
PagestreamFeMessage::Exists(_) => {}
|
||||||
|
PagestreamFeMessage::Nblocks(_) => {}
|
||||||
|
PagestreamFeMessage::GetPage(req) => {
|
||||||
|
total += 1;
|
||||||
|
|
||||||
|
if let Some(prev) = prev {
|
||||||
|
if prev.rel == req.rel {
|
||||||
|
let delta = (req.blkno as i32) - (prev.blkno as i32);
|
||||||
|
deltas.entry(delta).and_modify(|c| *c += 1).or_insert(1);
|
||||||
|
} else {
|
||||||
|
cross_rel += 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
prev = Some(req);
|
||||||
|
}
|
||||||
|
PagestreamFeMessage::DbSize(_) => {}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// Print stats.
|
||||||
|
let mut other = deltas.len();
|
||||||
|
deltas.retain(|_, count| *count > 300);
|
||||||
|
other -= deltas.len();
|
||||||
|
dbg!(total);
|
||||||
|
dbg!(cross_rel);
|
||||||
|
dbg!(other);
|
||||||
|
dbg!(deltas);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn dump_trace<R: std::io::Read>(mut reader: R) {
|
||||||
|
while let Ok(msg) = PagestreamFeMessage::parse(&mut reader) {
|
||||||
|
println!("{msg:?}");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
struct TraceFile {
|
||||||
|
#[allow(dead_code)]
|
||||||
|
pub tenant_id: TenantId,
|
||||||
|
|
||||||
|
#[allow(dead_code)]
|
||||||
|
pub timeline_id: TimelineId,
|
||||||
|
|
||||||
|
#[allow(dead_code)]
|
||||||
|
pub connection_id: ConnectionId,
|
||||||
|
|
||||||
|
pub path: PathBuf,
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_trace_files(traces_dir: &PathBuf) -> anyhow::Result<Vec<TraceFile>> {
|
||||||
|
let mut trace_files = Vec::<TraceFile>::new();
|
||||||
|
|
||||||
|
// Trace files are organized as {tenant_id}/{timeline_id}/{connection_id}
|
||||||
|
for tenant_dir in read_dir(traces_dir)? {
|
||||||
|
let entry = tenant_dir?;
|
||||||
|
let path = entry.path();
|
||||||
|
let tenant_id = TenantId::from_str(path.file_name().unwrap().to_str().unwrap())?;
|
||||||
|
|
||||||
|
for timeline_dir in read_dir(path)? {
|
||||||
|
let entry = timeline_dir?;
|
||||||
|
let path = entry.path();
|
||||||
|
let timeline_id = TimelineId::from_str(path.file_name().unwrap().to_str().unwrap())?;
|
||||||
|
|
||||||
|
for trace_dir in read_dir(path)? {
|
||||||
|
let entry = trace_dir?;
|
||||||
|
let path = entry.path();
|
||||||
|
let connection_id =
|
||||||
|
ConnectionId::from_str(path.file_name().unwrap().to_str().unwrap())?;
|
||||||
|
|
||||||
|
trace_files.push(TraceFile {
|
||||||
|
tenant_id,
|
||||||
|
timeline_id,
|
||||||
|
connection_id,
|
||||||
|
path,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(trace_files)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn main() -> anyhow::Result<()> {
|
||||||
|
let args = Args::parse();
|
||||||
|
|
||||||
|
match args.command {
|
||||||
|
Command::List => {
|
||||||
|
for trace_file in get_trace_files(&args.path)? {
|
||||||
|
println!("{trace_file:?}");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Command::Dump => {
|
||||||
|
for trace_file in get_trace_files(&args.path)? {
|
||||||
|
let file = File::open(trace_file.path.clone())?;
|
||||||
|
let reader = BufReader::new(file);
|
||||||
|
dump_trace(reader);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Command::Analyze => {
|
||||||
|
for trace_file in get_trace_files(&args.path)? {
|
||||||
|
println!("analyzing {trace_file:?}");
|
||||||
|
let file = File::open(trace_file.path.clone())?;
|
||||||
|
let reader = BufReader::new(file);
|
||||||
|
analyze_trace(reader);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Command::Draw => todo!(),
|
||||||
|
Command::Replay => todo!(),
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
@@ -23,7 +23,6 @@ fail = { version = "0.5", default-features = false, features = ["failpoints"] }
|
|||||||
futures = { version = "0.3" }
|
futures = { version = "0.3" }
|
||||||
futures-channel = { version = "0.3", features = ["sink"] }
|
futures-channel = { version = "0.3", features = ["sink"] }
|
||||||
futures-executor = { version = "0.3" }
|
futures-executor = { version = "0.3" }
|
||||||
futures-task = { version = "0.3", default-features = false, features = ["std"] }
|
|
||||||
futures-util = { version = "0.3", features = ["channel", "io", "sink"] }
|
futures-util = { version = "0.3", features = ["channel", "io", "sink"] }
|
||||||
hashbrown = { version = "0.12", features = ["raw"] }
|
hashbrown = { version = "0.12", features = ["raw"] }
|
||||||
indexmap = { version = "1", default-features = false, features = ["std"] }
|
indexmap = { version = "1", default-features = false, features = ["std"] }
|
||||||
|
|||||||
Reference in New Issue
Block a user