diff --git a/.config/hakari.toml b/.config/hakari.toml index 3b6d9d8822..9991cd92b0 100644 --- a/.config/hakari.toml +++ b/.config/hakari.toml @@ -33,6 +33,7 @@ workspace-members = [ "compute_api", "consumption_metrics", "desim", + "json", "metrics", "pageserver_api", "postgres_backend", diff --git a/.dockerignore b/.dockerignore index 4d9433764e..aa44421fb6 100644 --- a/.dockerignore +++ b/.dockerignore @@ -27,4 +27,4 @@ !storage_controller/ !vendor/postgres-*/ !workspace_hack/ -!build_tools/patches +!build-tools/patches diff --git a/.github/actionlint.yml b/.github/actionlint.yml index b7e0be761a..25b2fc702a 100644 --- a/.github/actionlint.yml +++ b/.github/actionlint.yml @@ -7,6 +7,7 @@ self-hosted-runner: - small-metal - small-arm64 - unit-perf + - unit-perf-aws-arm - us-east-2 config-variables: - AWS_ECR_REGION @@ -30,6 +31,7 @@ config-variables: - NEON_PROD_AWS_ACCOUNT_ID - PGREGRESS_PG16_PROJECT_ID - PGREGRESS_PG17_PROJECT_ID + - PREWARM_PGBENCH_SIZE - REMOTE_STORAGE_AZURE_CONTAINER - REMOTE_STORAGE_AZURE_REGION - SLACK_CICD_CHANNEL_ID diff --git a/.github/actions/run-python-test-set/action.yml b/.github/actions/run-python-test-set/action.yml index 6f2b48444a..1f2012358e 100644 --- a/.github/actions/run-python-test-set/action.yml +++ b/.github/actions/run-python-test-set/action.yml @@ -176,7 +176,13 @@ runs: fi if [[ $BUILD_TYPE == "debug" && $RUNNER_ARCH == 'X64' ]]; then - cov_prefix=(scripts/coverage "--profraw-prefix=$GITHUB_JOB" --dir=/tmp/coverage run) + # We don't use code coverage for regression tests (the step is disabled), + # so there's no need to collect it. + # Ref https://github.com/neondatabase/neon/issues/4540 + # cov_prefix=(scripts/coverage "--profraw-prefix=$GITHUB_JOB" --dir=/tmp/coverage run) + cov_prefix=() + # Explicitly set LLVM_PROFILE_FILE to /dev/null to avoid writing *.profraw files + export LLVM_PROFILE_FILE=/dev/null else cov_prefix=() fi diff --git a/.github/workflows/_build-and-test-locally.yml b/.github/workflows/_build-and-test-locally.yml index e2203a38ec..94115572df 100644 --- a/.github/workflows/_build-and-test-locally.yml +++ b/.github/workflows/_build-and-test-locally.yml @@ -150,7 +150,7 @@ jobs: secretKey: ${{ secrets.HETZNER_CACHE_SECRET_KEY }} use-fallback: false path: pg_install/v14 - key: v1-${{ runner.os }}-${{ runner.arch }}-${{ inputs.build-type }}-pg-${{ steps.pg_v14_rev.outputs.pg_rev }}-bookworm-${{ hashFiles('Makefile', 'build-tools.Dockerfile') }} + key: v1-${{ runner.os }}-${{ runner.arch }}-${{ inputs.build-type }}-pg-${{ steps.pg_v14_rev.outputs.pg_rev }}-bookworm-${{ hashFiles('Makefile', 'build-tools/Dockerfile') }} - name: Cache postgres v15 build id: cache_pg_15 @@ -162,7 +162,7 @@ jobs: secretKey: ${{ secrets.HETZNER_CACHE_SECRET_KEY }} use-fallback: false path: pg_install/v15 - key: v1-${{ runner.os }}-${{ runner.arch }}-${{ inputs.build-type }}-pg-${{ steps.pg_v15_rev.outputs.pg_rev }}-bookworm-${{ hashFiles('Makefile', 'build-tools.Dockerfile') }} + key: v1-${{ runner.os }}-${{ runner.arch }}-${{ inputs.build-type }}-pg-${{ steps.pg_v15_rev.outputs.pg_rev }}-bookworm-${{ hashFiles('Makefile', 'build-tools/Dockerfile') }} - name: Cache postgres v16 build id: cache_pg_16 @@ -174,7 +174,7 @@ jobs: secretKey: ${{ secrets.HETZNER_CACHE_SECRET_KEY }} use-fallback: false path: pg_install/v16 - key: v1-${{ runner.os }}-${{ runner.arch }}-${{ inputs.build-type }}-pg-${{ steps.pg_v16_rev.outputs.pg_rev }}-bookworm-${{ hashFiles('Makefile', 'build-tools.Dockerfile') }} + key: v1-${{ runner.os }}-${{ runner.arch }}-${{ inputs.build-type }}-pg-${{ steps.pg_v16_rev.outputs.pg_rev }}-bookworm-${{ hashFiles('Makefile', 'build-tools/Dockerfile') }} - name: Cache postgres v17 build id: cache_pg_17 @@ -186,7 +186,7 @@ jobs: secretKey: ${{ secrets.HETZNER_CACHE_SECRET_KEY }} use-fallback: false path: pg_install/v17 - key: v1-${{ runner.os }}-${{ runner.arch }}-${{ inputs.build-type }}-pg-${{ steps.pg_v17_rev.outputs.pg_rev }}-bookworm-${{ hashFiles('Makefile', 'build-tools.Dockerfile') }} + key: v1-${{ runner.os }}-${{ runner.arch }}-${{ inputs.build-type }}-pg-${{ steps.pg_v17_rev.outputs.pg_rev }}-bookworm-${{ hashFiles('Makefile', 'build-tools/Dockerfile') }} - name: Build all # Note: the Makefile picks up BUILD_TYPE and CARGO_PROFILE from the env variables diff --git a/.github/workflows/benchmarking.yml b/.github/workflows/benchmarking.yml index 79371ec704..df80bad579 100644 --- a/.github/workflows/benchmarking.yml +++ b/.github/workflows/benchmarking.yml @@ -219,6 +219,7 @@ jobs: --ignore test_runner/performance/test_cumulative_statistics_persistence.py --ignore test_runner/performance/test_perf_many_relations.py --ignore test_runner/performance/test_perf_oltp_large_tenant.py + --ignore test_runner/performance/test_lfc_prewarm.py env: BENCHMARK_CONNSTR: ${{ steps.create-neon-project.outputs.dsn }} VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}" @@ -410,6 +411,77 @@ jobs: env: SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }} + prewarm-test: + if: ${{ github.event.inputs.run_only_pgvector_tests == 'false' || github.event.inputs.run_only_pgvector_tests == null }} + permissions: + contents: write + statuses: write + id-token: write # aws-actions/configure-aws-credentials + env: + PGBENCH_SIZE: ${{ vars.PREWARM_PGBENCH_SIZE }} + POSTGRES_DISTRIB_DIR: /tmp/neon/pg_install + DEFAULT_PG_VERSION: 17 + TEST_OUTPUT: /tmp/test_output + BUILD_TYPE: remote + SAVE_PERF_REPORT: ${{ github.event.inputs.save_perf_report || ( github.ref_name == 'main' ) }} + PLATFORM: "neon-staging" + + runs-on: [ self-hosted, us-east-2, x64 ] + container: + image: ghcr.io/neondatabase/build-tools:pinned-bookworm + credentials: + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + options: --init + + steps: + - name: Harden the runner (Audit all outbound calls) + uses: step-security/harden-runner@4d991eb9b905ef189e4c376166672c3f2f230481 # v2.11.0 + with: + egress-policy: audit + + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + + - name: Configure AWS credentials + uses: aws-actions/configure-aws-credentials@e3dd6a429d7300a6a4c196c26e071d42e0343502 # v4.0.2 + with: + aws-region: eu-central-1 + role-to-assume: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }} + role-duration-seconds: 18000 # 5 hours + + - name: Download Neon artifact + uses: ./.github/actions/download + with: + name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact + path: /tmp/neon/ + prefix: latest + aws-oidc-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }} + + - name: Run prewarm benchmark + uses: ./.github/actions/run-python-test-set + with: + build_type: ${{ env.BUILD_TYPE }} + test_selection: performance/test_lfc_prewarm.py + run_in_parallel: false + save_perf_report: ${{ env.SAVE_PERF_REPORT }} + extra_params: -m remote_cluster --timeout 5400 + pg_version: ${{ env.DEFAULT_PG_VERSION }} + aws-oidc-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }} + env: + VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}" + PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}" + NEON_API_KEY: ${{ secrets.NEON_STAGING_API_KEY }} + + - name: Create Allure report + id: create-allure-report + if: ${{ !cancelled() }} + uses: ./.github/actions/allure-report-generate + with: + store-test-results-into-db: true + aws-oidc-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }} + env: + REGRESS_TEST_RESULT_CONNSTR_NEW: ${{ secrets.REGRESS_TEST_RESULT_CONNSTR_NEW }} + generate-matrices: if: ${{ github.event.inputs.run_only_pgvector_tests == 'false' || github.event.inputs.run_only_pgvector_tests == null }} # Create matrices for the benchmarking jobs, so we run benchmarks on rds only once a week (on Saturday) diff --git a/.github/workflows/build-build-tools-image.yml b/.github/workflows/build-build-tools-image.yml index 133c8635b6..24e4c8fa3d 100644 --- a/.github/workflows/build-build-tools-image.yml +++ b/.github/workflows/build-build-tools-image.yml @@ -72,7 +72,7 @@ jobs: ARCHS: ${{ inputs.archs || '["x64","arm64"]' }} DEBIANS: ${{ inputs.debians || '["bullseye","bookworm"]' }} IMAGE_TAG: | - ${{ hashFiles('build-tools.Dockerfile', + ${{ hashFiles('build-tools/Dockerfile', '.github/workflows/build-build-tools-image.yml') }} run: | echo "archs=${ARCHS}" | tee -a ${GITHUB_OUTPUT} @@ -144,7 +144,7 @@ jobs: - uses: docker/build-push-action@471d1dc4e07e5cdedd4c2171150001c434f0b7a4 # v6.15.0 with: - file: build-tools.Dockerfile + file: build-tools/Dockerfile context: . provenance: false push: true diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml index 456c7b8c92..2977f642bc 100644 --- a/.github/workflows/build_and_test.yml +++ b/.github/workflows/build_and_test.yml @@ -87,6 +87,29 @@ jobs: uses: ./.github/workflows/build-build-tools-image.yml secrets: inherit + lint-yamls: + needs: [ meta, check-permissions, build-build-tools-image ] + # We do need to run this in `.*-rc-pr` because of hotfixes. + if: ${{ contains(fromJSON('["pr", "push-main", "storage-rc-pr", "proxy-rc-pr", "compute-rc-pr"]'), needs.meta.outputs.run-kind) }} + runs-on: [ self-hosted, small ] + container: + image: ${{ needs.build-build-tools-image.outputs.image }} + credentials: + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + options: --init + + steps: + - name: Harden the runner (Audit all outbound calls) + uses: step-security/harden-runner@4d991eb9b905ef189e4c376166672c3f2f230481 # v2.11.0 + with: + egress-policy: audit + + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + + - run: make -C compute manifest-schema-validation + - run: make lint-openapi-spec + check-codestyle-python: needs: [ meta, check-permissions, build-build-tools-image ] # No need to run on `main` because we this in the merge queue. We do need to run this in `.*-rc-pr` because of hotfixes. @@ -199,28 +222,6 @@ jobs: build-tools-image: ${{ needs.build-build-tools-image.outputs.image }}-bookworm secrets: inherit - validate-compute-manifest: - runs-on: ubuntu-22.04 - needs: [ meta, check-permissions ] - # We do need to run this in `.*-rc-pr` because of hotfixes. - if: ${{ contains(fromJSON('["pr", "push-main", "storage-rc-pr", "proxy-rc-pr", "compute-rc-pr"]'), needs.meta.outputs.run-kind) }} - steps: - - name: Harden the runner (Audit all outbound calls) - uses: step-security/harden-runner@4d991eb9b905ef189e4c376166672c3f2f230481 # v2.11.0 - with: - egress-policy: audit - - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - - - name: Set up Node.js - uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4.4.0 - with: - node-version: '24' - - - name: Validate manifest against schema - run: | - make -C compute manifest-schema-validation - build-and-test-locally: needs: [ meta, build-build-tools-image ] # We do need to run this in `.*-rc-pr` because of hotfixes. @@ -306,14 +307,14 @@ jobs: statuses: write contents: write pull-requests: write - runs-on: [ self-hosted, unit-perf ] + runs-on: [ self-hosted, unit-perf-aws-arm ] container: image: ${{ needs.build-build-tools-image.outputs.image }}-bookworm credentials: username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} # for changed limits, see comments on `options:` earlier in this file - options: --init --shm-size=512mb --ulimit memlock=67108864:67108864 + options: --init --shm-size=512mb --ulimit memlock=67108864:67108864 --ulimit nofile=65536:65536 --security-opt seccomp=unconfined strategy: fail-fast: false matrix: @@ -986,6 +987,7 @@ jobs: - name: Verify docker-compose example and test extensions timeout-minutes: 60 env: + PARALLEL_COMPUTES: 3 TAG: >- ${{ needs.meta.outputs.run-kind == 'compute-rc-pr' diff --git a/.github/workflows/periodic_pagebench.yml b/.github/workflows/periodic_pagebench.yml index 317db94052..728a6d4956 100644 --- a/.github/workflows/periodic_pagebench.yml +++ b/.github/workflows/periodic_pagebench.yml @@ -1,4 +1,4 @@ -name: Periodic pagebench performance test on unit-perf hetzner runner +name: Periodic pagebench performance test on unit-perf-aws-arm runners on: schedule: @@ -40,7 +40,7 @@ jobs: statuses: write contents: write pull-requests: write - runs-on: [ self-hosted, unit-perf ] + runs-on: [ self-hosted, unit-perf-aws-arm ] container: image: ghcr.io/neondatabase/build-tools:pinned-bookworm credentials: diff --git a/.github/workflows/proxy-benchmark.yml b/.github/workflows/proxy-benchmark.yml index 3a98ad4e8e..0ae93ce295 100644 --- a/.github/workflows/proxy-benchmark.yml +++ b/.github/workflows/proxy-benchmark.yml @@ -1,4 +1,4 @@ -name: Periodic proxy performance test on unit-perf hetzner runner +name: Periodic proxy performance test on unit-perf-aws-arm runners on: push: # TODO: remove after testing @@ -32,7 +32,7 @@ jobs: statuses: write contents: write pull-requests: write - runs-on: [self-hosted, unit-perf] + runs-on: [self-hosted, unit-perf-aws-arm] timeout-minutes: 60 # 1h timeout container: image: ghcr.io/neondatabase/build-tools:pinned-bookworm diff --git a/.gitignore b/.gitignore index 69573638f2..32e8fcf798 100644 --- a/.gitignore +++ b/.gitignore @@ -16,6 +16,7 @@ neon.iml /integration_tests/.neon compaction-suite-results.* pgxn/neon/communicator/communicator_bindings.h +docker-compose/docker-compose-parallel.yml # Coverage *.profraw @@ -29,3 +30,6 @@ pgxn/neon/communicator/communicator_bindings.h # pgindent typedef lists *.list + +# Node +**/node_modules/ diff --git a/.gitmodules b/.gitmodules index d1330bf28c..e381fb079e 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,16 +1,16 @@ [submodule "vendor/postgres-v14"] path = vendor/postgres-v14 - url = https://github.com/neondatabase/postgres.git + url = ../postgres.git branch = REL_14_STABLE_neon [submodule "vendor/postgres-v15"] path = vendor/postgres-v15 - url = https://github.com/neondatabase/postgres.git + url = ../postgres.git branch = REL_15_STABLE_neon [submodule "vendor/postgres-v16"] path = vendor/postgres-v16 - url = https://github.com/neondatabase/postgres.git + url = ../postgres.git branch = REL_16_STABLE_neon [submodule "vendor/postgres-v17"] path = vendor/postgres-v17 - url = https://github.com/neondatabase/postgres.git + url = ../postgres.git branch = REL_17_STABLE_neon diff --git a/Cargo.lock b/Cargo.lock index 77d83c7a71..60ce23153e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1427,6 +1427,7 @@ dependencies = [ "p256 0.13.2", "pageserver_page_api", "postgres", + "postgres-types", "postgres_initdb", "postgres_versioninfo", "regex", @@ -1950,6 +1951,7 @@ dependencies = [ "diesel_derives", "itoa", "serde_json", + "uuid", ] [[package]] @@ -3581,6 +3583,15 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "json" +version = "0.1.0" +dependencies = [ + "futures", + "itoa", + "ryu", +] + [[package]] name = "json-structural-diff" version = "0.2.0" @@ -4403,6 +4414,7 @@ dependencies = [ "pageserver_client", "pageserver_client_grpc", "pageserver_page_api", + "pprof", "rand 0.8.5", "reqwest", "serde", @@ -4431,6 +4443,7 @@ dependencies = [ "pageserver_api", "postgres_ffi", "remote_storage", + "serde", "serde_json", "svg_fmt", "thiserror 1.0.69", @@ -4448,6 +4461,7 @@ dependencies = [ "arc-swap", "async-compression", "async-stream", + "base64 0.22.1", "bincode", "bit_field", "byteorder", @@ -4609,30 +4623,18 @@ version = "0.1.0" dependencies = [ "anyhow", "arc-swap", - "async-trait", "bytes", - "chrono", "compute_api", - "dashmap 5.5.0", "futures", - "http 1.1.0", - "hyper 1.6.0", - "hyper-util", - "metrics", "pageserver_api", "pageserver_page_api", - "priority-queue", - "rand 0.8.5", - "scopeguard", - "thiserror 1.0.69", "tokio", "tokio-stream", "tokio-util", "tonic 0.13.1", - "tower 0.4.13", "tracing", "utils", - "uuid", + "workspace_hack", ] [[package]] @@ -5248,17 +5250,6 @@ dependencies = [ "elliptic-curve 0.13.8", ] -[[package]] -name = "priority-queue" -version = "2.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5676d703dda103cbb035b653a9f11448c0a7216c7926bd35fcb5865475d0c970" -dependencies = [ - "autocfg", - "equivalent", - "indexmap 2.9.0", -] - [[package]] name = "proc-macro2" version = "1.0.94" @@ -5428,6 +5419,7 @@ dependencies = [ "async-trait", "atomic-take", "aws-config", + "aws-credential-types", "aws-sdk-iam", "aws-sigv4", "base64 0.22.1", @@ -5467,6 +5459,7 @@ dependencies = [ "itoa", "jose-jwa", "jose-jwk", + "json", "lasso", "measured", "metrics", @@ -5892,6 +5885,8 @@ dependencies = [ "azure_identity", "azure_storage", "azure_storage_blobs", + "base64 0.22.1", + "byteorder", "bytes", "camino", "camino-tempfile", @@ -6383,6 +6378,7 @@ dependencies = [ "itertools 0.10.5", "jsonwebtoken", "metrics", + "nix 0.30.1", "once_cell", "pageserver_api", "parking_lot 0.12.1", @@ -6390,6 +6386,7 @@ dependencies = [ "postgres-protocol", "postgres_backend", "postgres_ffi", + "postgres_ffi_types", "postgres_versioninfo", "pprof", "pq_proto", @@ -6434,7 +6431,7 @@ dependencies = [ "anyhow", "const_format", "pageserver_api", - "postgres_ffi", + "postgres_ffi_types", "postgres_versioninfo", "pq_proto", "serde", @@ -7113,6 +7110,7 @@ dependencies = [ "tokio-util", "tracing", "utils", + "uuid", "workspace_hack", ] @@ -7176,6 +7174,7 @@ dependencies = [ "pageserver_api", "pageserver_client", "reqwest", + "safekeeper_api", "serde_json", "storage_controller_client", "tokio", @@ -7755,6 +7754,7 @@ dependencies = [ "futures-core", "pin-project-lite", "tokio", + "tokio-util", ] [[package]] @@ -8408,6 +8408,7 @@ dependencies = [ "tracing-error", "tracing-subscriber", "tracing-utils", + "uuid", "walkdir", ] @@ -8955,8 +8956,10 @@ dependencies = [ "fail", "form_urlencoded", "futures-channel", + "futures-core", "futures-executor", "futures-io", + "futures-sink", "futures-util", "generic-array", "getrandom 0.2.11", @@ -9025,7 +9028,6 @@ dependencies = [ "tracing-log", "tracing-subscriber", "url", - "uuid", "zeroize", "zstd", "zstd-safe", diff --git a/Cargo.toml b/Cargo.toml index 8e6327a974..76a1a57aa9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -44,6 +44,7 @@ members = [ "libs/walproposer", "libs/wal_decoder", "libs/postgres_initdb", + "libs/proxy/json", "libs/proxy/postgres-protocol2", "libs/proxy/postgres-types2", "libs/proxy/tokio-postgres2", @@ -204,7 +205,7 @@ tokio = { version = "1.43.1", features = ["macros"] } tokio-io-timeout = "1.2.0" tokio-postgres-rustls = "0.12.0" tokio-rustls = { version = "0.26.0", default-features = false, features = ["tls12", "ring"]} -tokio-stream = "0.1" +tokio-stream = { version = "0.1", features = ["sync"] } tokio-tar = "0.3" tokio-util = { version = "0.7.10", features = ["io", "io-util", "rt"] } toml = "0.8" diff --git a/Makefile b/Makefile index 4b31e26810..dc8bacc78e 100644 --- a/Makefile +++ b/Makefile @@ -2,7 +2,7 @@ ROOT_PROJECT_DIR := $(dir $(abspath $(lastword $(MAKEFILE_LIST)))) # Where to install Postgres, default is ./pg_install, maybe useful for package # managers. -POSTGRES_INSTALL_DIR ?= $(ROOT_PROJECT_DIR)/pg_install/ +POSTGRES_INSTALL_DIR ?= $(ROOT_PROJECT_DIR)/pg_install # Supported PostgreSQL versions POSTGRES_VERSIONS = v17 v16 v15 v14 @@ -14,7 +14,7 @@ POSTGRES_VERSIONS = v17 v16 v15 v14 # it is derived from BUILD_TYPE. # All intermediate build artifacts are stored here. -BUILD_DIR := build +BUILD_DIR := $(ROOT_PROJECT_DIR)/build ICU_PREFIX_DIR := /usr/local/icu @@ -212,7 +212,7 @@ neon-pgindent: postgres-v17-pg-bsd-indent neon-pg-ext-v17 FIND_TYPEDEF=$(ROOT_PROJECT_DIR)/vendor/postgres-v17/src/tools/find_typedef \ INDENT=$(BUILD_DIR)/v17/src/tools/pg_bsd_indent/pg_bsd_indent \ PGINDENT_SCRIPT=$(ROOT_PROJECT_DIR)/vendor/postgres-v17/src/tools/pgindent/pgindent \ - -C $(BUILD_DIR)/neon-v17 \ + -C $(BUILD_DIR)/pgxn-v17/neon \ -f $(ROOT_PROJECT_DIR)/pgxn/neon/Makefile pgindent @@ -220,6 +220,19 @@ neon-pgindent: postgres-v17-pg-bsd-indent neon-pg-ext-v17 setup-pre-commit-hook: ln -s -f $(ROOT_PROJECT_DIR)/pre-commit.py .git/hooks/pre-commit +build-tools/node_modules: build-tools/package.json + cd build-tools && $(if $(CI),npm ci,npm install) + touch build-tools/node_modules + +.PHONY: lint-openapi-spec +lint-openapi-spec: build-tools/node_modules + # operation-2xx-response: pageserver timeline delete returns 404 on success + find . -iname "openapi_spec.y*ml" -exec\ + npx --prefix=build-tools/ redocly\ + --skip-rule=operation-operationId --skip-rule=operation-summary --extends=minimal\ + --skip-rule=no-server-example.com --skip-rule=operation-2xx-response\ + lint {} \+ + # Targets for building PostgreSQL are defined in postgres.mk. # # But if the caller has indicated that PostgreSQL is already diff --git a/build-tools.Dockerfile b/build-tools/Dockerfile similarity index 93% rename from build-tools.Dockerfile rename to build-tools/Dockerfile index 14a52bd736..b5fe642e6f 100644 --- a/build-tools.Dockerfile +++ b/build-tools/Dockerfile @@ -35,7 +35,7 @@ RUN echo 'Acquire::Retries "5";' > /etc/apt/apt.conf.d/80-retries && \ echo -e "retry_connrefused=on\ntimeout=15\ntries=5\nretry-on-host-error=on\n" > /root/.wgetrc && \ echo -e "--retry-connrefused\n--connect-timeout 15\n--retry 5\n--max-time 300\n" > /root/.curlrc -COPY build_tools/patches/pgcopydbv017.patch /pgcopydbv017.patch +COPY build-tools/patches/pgcopydbv017.patch /pgcopydbv017.patch RUN if [ "${DEBIAN_VERSION}" = "bookworm" ]; then \ set -e && \ @@ -188,6 +188,12 @@ RUN curl -fsSL 'https://apt.llvm.org/llvm-snapshot.gpg.key' | apt-key add - \ && bash -c 'for f in /usr/bin/clang*-${LLVM_VERSION} /usr/bin/llvm*-${LLVM_VERSION}; do ln -s "${f}" "${f%-${LLVM_VERSION}}"; done' \ && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* +# Install node +ENV NODE_VERSION=24 +RUN curl -fsSL https://deb.nodesource.com/setup_${NODE_VERSION}.x | bash - \ + && apt install -y nodejs \ + && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* + # Install docker RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg \ && echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/debian ${DEBIAN_VERSION} stable" > /etc/apt/sources.list.d/docker.list \ @@ -311,14 +317,14 @@ RUN curl -sSO https://static.rust-lang.org/rustup/dist/$(uname -m)-unknown-linux . "$HOME/.cargo/env" && \ cargo --version && rustup --version && \ rustup component add llvm-tools rustfmt clippy && \ - cargo install rustfilt --version ${RUSTFILT_VERSION} --locked && \ - cargo install cargo-hakari --version ${CARGO_HAKARI_VERSION} --locked && \ - cargo install cargo-deny --version ${CARGO_DENY_VERSION} --locked && \ - cargo install cargo-hack --version ${CARGO_HACK_VERSION} --locked && \ - cargo install cargo-nextest --version ${CARGO_NEXTEST_VERSION} --locked && \ - cargo install cargo-chef --version ${CARGO_CHEF_VERSION} --locked && \ - cargo install diesel_cli --version ${CARGO_DIESEL_CLI_VERSION} --locked \ - --features postgres-bundled --no-default-features && \ + cargo install rustfilt --locked --version ${RUSTFILT_VERSION} && \ + cargo install cargo-hakari --locked --version ${CARGO_HAKARI_VERSION} && \ + cargo install cargo-deny --locked --version ${CARGO_DENY_VERSION} && \ + cargo install cargo-hack --locked --version ${CARGO_HACK_VERSION} && \ + cargo install cargo-nextest --locked --version ${CARGO_NEXTEST_VERSION} && \ + cargo install cargo-chef --locked --version ${CARGO_CHEF_VERSION} && \ + cargo install diesel_cli --locked --version ${CARGO_DIESEL_CLI_VERSION} \ + --features postgres-bundled --no-default-features && \ rm -rf /home/nonroot/.cargo/registry && \ rm -rf /home/nonroot/.cargo/git diff --git a/build-tools/package-lock.json b/build-tools/package-lock.json new file mode 100644 index 0000000000..b2c44ed9b4 --- /dev/null +++ b/build-tools/package-lock.json @@ -0,0 +1,3189 @@ +{ + "name": "build-tools", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "build-tools", + "devDependencies": { + "@redocly/cli": "1.34.4", + "@sourcemeta/jsonschema": "10.0.0" + } + }, + "node_modules/@babel/code-frame": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.27.1.tgz", + "integrity": "sha512-cjQ7ZlQ0Mv3b47hABuTevyTuYN4i+loJKGeV9flcCgIK37cCXRh+L1bd3iBHlynerhQ7BhCkn2BPbQUL+rGqFg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-validator-identifier": "^7.27.1", + "js-tokens": "^4.0.0", + "picocolors": "^1.1.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-validator-identifier": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.27.1.tgz", + "integrity": "sha512-D2hP9eA+Sqx1kBZgzxZh0y1trbuU+JoDkiEwqhQ36nodYqJwyEIhPSdMNd7lOm/4io72luTPWH20Yda0xOuUow==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/runtime": { + "version": "7.27.6", + "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.27.6.tgz", + "integrity": "sha512-vbavdySgbTTrmFE+EsiqUTzlOr5bzlnJtUv9PynGCAKvfQqjIXbvFdumPM/GxMDfyuGMJaJAU6TO4zc1Jf1i8Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@emotion/is-prop-valid": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/@emotion/is-prop-valid/-/is-prop-valid-1.2.2.tgz", + "integrity": "sha512-uNsoYd37AFmaCdXlg6EYD1KaPOaRWRByMCYzbKUX4+hhMfrxdVSelShywL4JVaAeM/eHUOSprYBQls+/neX3pw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@emotion/memoize": "^0.8.1" + } + }, + "node_modules/@emotion/memoize": { + "version": "0.8.1", + "resolved": "https://registry.npmjs.org/@emotion/memoize/-/memoize-0.8.1.tgz", + "integrity": "sha512-W2P2c/VRW1/1tLox0mVUalvnWXxavmv/Oum2aPsRcoDJuob75FC3Y8FbpfLwUegRcxINtGUMPq0tFCvYNTBXNA==", + "dev": true, + "license": "MIT" + }, + "node_modules/@emotion/unitless": { + "version": "0.8.1", + "resolved": "https://registry.npmjs.org/@emotion/unitless/-/unitless-0.8.1.tgz", + "integrity": "sha512-KOEGMu6dmJZtpadb476IsZBclKvILjopjUii3V+7MnXIQCYh8W3NgNcgwo21n9LXZX6EDIKvqfjYxXebDwxKmQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/@exodus/schemasafe": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/@exodus/schemasafe/-/schemasafe-1.3.0.tgz", + "integrity": "sha512-5Aap/GaRupgNx/feGBwLLTVv8OQFfv3pq2lPRzPg9R+IOBnDgghTGW7l7EuVXOvg5cc/xSAlRW8rBrjIC3Nvqw==", + "dev": true, + "license": "MIT" + }, + "node_modules/@faker-js/faker": { + "version": "7.6.0", + "resolved": "https://registry.npmjs.org/@faker-js/faker/-/faker-7.6.0.tgz", + "integrity": "sha512-XK6BTq1NDMo9Xqw/YkYyGjSsg44fbNwYRx7QK2CuoQgyy+f1rrTDHoExVM5PsyXCtfl2vs2vVJ0MN0yN6LppRw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=14.0.0", + "npm": ">=6.0.0" + } + }, + "node_modules/@humanwhocodes/momoa": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/@humanwhocodes/momoa/-/momoa-2.0.4.tgz", + "integrity": "sha512-RE815I4arJFtt+FVeU1Tgp9/Xvecacji8w/V6XtXsWWH/wz/eNkNbhb+ny/+PlVZjV0rxQpRSQKNKE3lcktHEA==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=10.10.0" + } + }, + "node_modules/@jest/schemas": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz", + "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@sinclair/typebox": "^0.27.8" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jsep-plugin/assignment": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/@jsep-plugin/assignment/-/assignment-1.3.0.tgz", + "integrity": "sha512-VVgV+CXrhbMI3aSusQyclHkenWSAm95WaiKrMxRFam3JSUiIaQjoMIw2sEs/OX4XifnqeQUN4DYbJjlA8EfktQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 10.16.0" + }, + "peerDependencies": { + "jsep": "^0.4.0||^1.0.0" + } + }, + "node_modules/@jsep-plugin/regex": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/@jsep-plugin/regex/-/regex-1.0.4.tgz", + "integrity": "sha512-q7qL4Mgjs1vByCaTnDFcBnV9HS7GVPJX5vyVoCgZHNSC9rjwIlmbXG5sUuorR5ndfHAIlJ8pVStxvjXHbNvtUg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 10.16.0" + }, + "peerDependencies": { + "jsep": "^0.4.0||^1.0.0" + } + }, + "node_modules/@opentelemetry/api": { + "version": "1.9.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/api/-/api-1.9.0.tgz", + "integrity": "sha512-3giAOQvZiH5F9bMlMiv8+GSPMeqg0dbaeo58/0SlA9sxSqZhnUtxzX9/2FzyhS9sWQf5S0GJE0AKBrFqjpeYcg==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=8.0.0" + } + }, + "node_modules/@opentelemetry/api-logs": { + "version": "0.53.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/api-logs/-/api-logs-0.53.0.tgz", + "integrity": "sha512-8HArjKx+RaAI8uEIgcORbZIPklyh1YLjPSBus8hjRmvLi6DeFzgOcdZ7KwPabKj8mXF8dX0hyfAyGfycz0DbFw==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@opentelemetry/api": "^1.0.0" + }, + "engines": { + "node": ">=14" + } + }, + "node_modules/@opentelemetry/context-async-hooks": { + "version": "1.26.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/context-async-hooks/-/context-async-hooks-1.26.0.tgz", + "integrity": "sha512-HedpXXYzzbaoutw6DFLWLDket2FwLkLpil4hGCZ1xYEIMTcivdfwEOISgdbLEWyG3HW52gTq2V9mOVJrONgiwg==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": ">=1.0.0 <1.10.0" + } + }, + "node_modules/@opentelemetry/core": { + "version": "1.26.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/core/-/core-1.26.0.tgz", + "integrity": "sha512-1iKxXXE8415Cdv0yjG3G6hQnB5eVEsJce3QaawX8SjDn0mAS0ZM8fAbZZJD4ajvhC15cePvosSCut404KrIIvQ==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@opentelemetry/semantic-conventions": "1.27.0" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": ">=1.0.0 <1.10.0" + } + }, + "node_modules/@opentelemetry/exporter-trace-otlp-http": { + "version": "0.53.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/exporter-trace-otlp-http/-/exporter-trace-otlp-http-0.53.0.tgz", + "integrity": "sha512-m7F5ZTq+V9mKGWYpX8EnZ7NjoqAU7VemQ1E2HAG+W/u0wpY1x0OmbxAXfGKFHCspdJk8UKlwPGrpcB8nay3P8A==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@opentelemetry/core": "1.26.0", + "@opentelemetry/otlp-exporter-base": "0.53.0", + "@opentelemetry/otlp-transformer": "0.53.0", + "@opentelemetry/resources": "1.26.0", + "@opentelemetry/sdk-trace-base": "1.26.0" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.0.0" + } + }, + "node_modules/@opentelemetry/otlp-exporter-base": { + "version": "0.53.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/otlp-exporter-base/-/otlp-exporter-base-0.53.0.tgz", + "integrity": "sha512-UCWPreGQEhD6FjBaeDuXhiMf6kkBODF0ZQzrk/tuQcaVDJ+dDQ/xhJp192H9yWnKxVpEjFrSSLnpqmX4VwX+eA==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@opentelemetry/core": "1.26.0", + "@opentelemetry/otlp-transformer": "0.53.0" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.0.0" + } + }, + "node_modules/@opentelemetry/otlp-transformer": { + "version": "0.53.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/otlp-transformer/-/otlp-transformer-0.53.0.tgz", + "integrity": "sha512-rM0sDA9HD8dluwuBxLetUmoqGJKSAbWenwD65KY9iZhUxdBHRLrIdrABfNDP7aiTjcgK8XFyTn5fhDz7N+W6DA==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@opentelemetry/api-logs": "0.53.0", + "@opentelemetry/core": "1.26.0", + "@opentelemetry/resources": "1.26.0", + "@opentelemetry/sdk-logs": "0.53.0", + "@opentelemetry/sdk-metrics": "1.26.0", + "@opentelemetry/sdk-trace-base": "1.26.0", + "protobufjs": "^7.3.0" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": "^1.3.0" + } + }, + "node_modules/@opentelemetry/propagator-b3": { + "version": "1.26.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/propagator-b3/-/propagator-b3-1.26.0.tgz", + "integrity": "sha512-vvVkQLQ/lGGyEy9GT8uFnI047pajSOVnZI2poJqVGD3nJ+B9sFGdlHNnQKophE3lHfnIH0pw2ubrCTjZCgIj+Q==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@opentelemetry/core": "1.26.0" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": ">=1.0.0 <1.10.0" + } + }, + "node_modules/@opentelemetry/propagator-jaeger": { + "version": "1.26.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/propagator-jaeger/-/propagator-jaeger-1.26.0.tgz", + "integrity": "sha512-DelFGkCdaxA1C/QA0Xilszfr0t4YbGd3DjxiCDPh34lfnFr+VkkrjV9S8ZTJvAzfdKERXhfOxIKBoGPJwoSz7Q==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@opentelemetry/core": "1.26.0" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": ">=1.0.0 <1.10.0" + } + }, + "node_modules/@opentelemetry/resources": { + "version": "1.26.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/resources/-/resources-1.26.0.tgz", + "integrity": "sha512-CPNYchBE7MBecCSVy0HKpUISEeJOniWqcHaAHpmasZ3j9o6V3AyBzhRc90jdmemq0HOxDr6ylhUbDhBqqPpeNw==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@opentelemetry/core": "1.26.0", + "@opentelemetry/semantic-conventions": "1.27.0" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": ">=1.0.0 <1.10.0" + } + }, + "node_modules/@opentelemetry/sdk-logs": { + "version": "0.53.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/sdk-logs/-/sdk-logs-0.53.0.tgz", + "integrity": "sha512-dhSisnEgIj/vJZXZV6f6KcTnyLDx/VuQ6l3ejuZpMpPlh9S1qMHiZU9NMmOkVkwwHkMy3G6mEBwdP23vUZVr4g==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@opentelemetry/api-logs": "0.53.0", + "@opentelemetry/core": "1.26.0", + "@opentelemetry/resources": "1.26.0" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": ">=1.4.0 <1.10.0" + } + }, + "node_modules/@opentelemetry/sdk-metrics": { + "version": "1.26.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/sdk-metrics/-/sdk-metrics-1.26.0.tgz", + "integrity": "sha512-0SvDXmou/JjzSDOjUmetAAvcKQW6ZrvosU0rkbDGpXvvZN+pQF6JbK/Kd4hNdK4q/22yeruqvukXEJyySTzyTQ==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@opentelemetry/core": "1.26.0", + "@opentelemetry/resources": "1.26.0" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": ">=1.3.0 <1.10.0" + } + }, + "node_modules/@opentelemetry/sdk-trace-base": { + "version": "1.26.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/sdk-trace-base/-/sdk-trace-base-1.26.0.tgz", + "integrity": "sha512-olWQldtvbK4v22ymrKLbIcBi9L2SpMO84sCPY54IVsJhP9fRsxJT194C/AVaAuJzLE30EdhhM1VmvVYR7az+cw==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@opentelemetry/core": "1.26.0", + "@opentelemetry/resources": "1.26.0", + "@opentelemetry/semantic-conventions": "1.27.0" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": ">=1.0.0 <1.10.0" + } + }, + "node_modules/@opentelemetry/sdk-trace-node": { + "version": "1.26.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/sdk-trace-node/-/sdk-trace-node-1.26.0.tgz", + "integrity": "sha512-Fj5IVKrj0yeUwlewCRwzOVcr5avTuNnMHWf7GPc1t6WaT78J6CJyF3saZ/0RkZfdeNO8IcBl/bNcWMVZBMRW8Q==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@opentelemetry/context-async-hooks": "1.26.0", + "@opentelemetry/core": "1.26.0", + "@opentelemetry/propagator-b3": "1.26.0", + "@opentelemetry/propagator-jaeger": "1.26.0", + "@opentelemetry/sdk-trace-base": "1.26.0", + "semver": "^7.5.2" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "@opentelemetry/api": ">=1.0.0 <1.10.0" + } + }, + "node_modules/@opentelemetry/semantic-conventions": { + "version": "1.27.0", + "resolved": "https://registry.npmjs.org/@opentelemetry/semantic-conventions/-/semantic-conventions-1.27.0.tgz", + "integrity": "sha512-sAay1RrB+ONOem0OZanAR1ZI/k7yDpnOQSQmTMuGImUQb2y8EbSaCJ94FQluM74xoU03vlb2d2U90hZluL6nQg==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=14" + } + }, + "node_modules/@protobufjs/aspromise": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@protobufjs/aspromise/-/aspromise-1.1.2.tgz", + "integrity": "sha512-j+gKExEuLmKwvz3OgROXtrJ2UG2x8Ch2YZUxahh+s1F2HZ+wAceUNLkvy6zKCPVRkU++ZWQrdxsUeQXmcg4uoQ==", + "dev": true, + "license": "BSD-3-Clause" + }, + "node_modules/@protobufjs/base64": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@protobufjs/base64/-/base64-1.1.2.tgz", + "integrity": "sha512-AZkcAA5vnN/v4PDqKyMR5lx7hZttPDgClv83E//FMNhR2TMcLUhfRUBHCmSl0oi9zMgDDqRUJkSxO3wm85+XLg==", + "dev": true, + "license": "BSD-3-Clause" + }, + "node_modules/@protobufjs/codegen": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/@protobufjs/codegen/-/codegen-2.0.4.tgz", + "integrity": "sha512-YyFaikqM5sH0ziFZCN3xDC7zeGaB/d0IUb9CATugHWbd1FRFwWwt4ld4OYMPWu5a3Xe01mGAULCdqhMlPl29Jg==", + "dev": true, + "license": "BSD-3-Clause" + }, + "node_modules/@protobufjs/eventemitter": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@protobufjs/eventemitter/-/eventemitter-1.1.0.tgz", + "integrity": "sha512-j9ednRT81vYJ9OfVuXG6ERSTdEL1xVsNgqpkxMsbIabzSo3goCjDIveeGv5d03om39ML71RdmrGNjG5SReBP/Q==", + "dev": true, + "license": "BSD-3-Clause" + }, + "node_modules/@protobufjs/fetch": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@protobufjs/fetch/-/fetch-1.1.0.tgz", + "integrity": "sha512-lljVXpqXebpsijW71PZaCYeIcE5on1w5DlQy5WH6GLbFryLUrBD4932W/E2BSpfRJWseIL4v/KPgBFxDOIdKpQ==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "@protobufjs/aspromise": "^1.1.1", + "@protobufjs/inquire": "^1.1.0" + } + }, + "node_modules/@protobufjs/float": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/@protobufjs/float/-/float-1.0.2.tgz", + "integrity": "sha512-Ddb+kVXlXst9d+R9PfTIxh1EdNkgoRe5tOX6t01f1lYWOvJnSPDBlG241QLzcyPdoNTsblLUdujGSE4RzrTZGQ==", + "dev": true, + "license": "BSD-3-Clause" + }, + "node_modules/@protobufjs/inquire": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@protobufjs/inquire/-/inquire-1.1.0.tgz", + "integrity": "sha512-kdSefcPdruJiFMVSbn801t4vFK7KB/5gd2fYvrxhuJYg8ILrmn9SKSX2tZdV6V+ksulWqS7aXjBcRXl3wHoD9Q==", + "dev": true, + "license": "BSD-3-Clause" + }, + "node_modules/@protobufjs/path": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@protobufjs/path/-/path-1.1.2.tgz", + "integrity": "sha512-6JOcJ5Tm08dOHAbdR3GrvP+yUUfkjG5ePsHYczMFLq3ZmMkAD98cDgcT2iA1lJ9NVwFd4tH/iSSoe44YWkltEA==", + "dev": true, + "license": "BSD-3-Clause" + }, + "node_modules/@protobufjs/pool": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@protobufjs/pool/-/pool-1.1.0.tgz", + "integrity": "sha512-0kELaGSIDBKvcgS4zkjz1PeddatrjYcmMWOlAuAPwAeccUrPHdUqo/J6LiymHHEiJT5NrF1UVwxY14f+fy4WQw==", + "dev": true, + "license": "BSD-3-Clause" + }, + "node_modules/@protobufjs/utf8": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@protobufjs/utf8/-/utf8-1.1.0.tgz", + "integrity": "sha512-Vvn3zZrhQZkkBE8LSuW3em98c0FwgO4nxzv6OdSxPKJIEKY2bGbHn+mhGIPerzI4twdxaP8/0+06HBpwf345Lw==", + "dev": true, + "license": "BSD-3-Clause" + }, + "node_modules/@redocly/ajv": { + "version": "8.11.2", + "resolved": "https://registry.npmjs.org/@redocly/ajv/-/ajv-8.11.2.tgz", + "integrity": "sha512-io1JpnwtIcvojV7QKDUSIuMN/ikdOUd1ReEnUnMKGfDVridQZ31J0MmIuqwuRjWDZfmvr+Q0MqCcfHM2gTivOg==", + "dev": true, + "license": "MIT", + "dependencies": { + "fast-deep-equal": "^3.1.1", + "json-schema-traverse": "^1.0.0", + "require-from-string": "^2.0.2", + "uri-js-replace": "^1.0.1" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/@redocly/cli": { + "version": "1.34.4", + "resolved": "https://registry.npmjs.org/@redocly/cli/-/cli-1.34.4.tgz", + "integrity": "sha512-seH/GgrjSB1EeOsgJ/4Ct6Jk2N7sh12POn/7G8UQFARMyUMJpe1oHtBwT2ndfp4EFCpgBAbZ/82Iw6dwczNxEA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@opentelemetry/api": "1.9.0", + "@opentelemetry/exporter-trace-otlp-http": "0.53.0", + "@opentelemetry/resources": "1.26.0", + "@opentelemetry/sdk-trace-node": "1.26.0", + "@opentelemetry/semantic-conventions": "1.27.0", + "@redocly/config": "^0.22.0", + "@redocly/openapi-core": "1.34.4", + "@redocly/respect-core": "1.34.4", + "abort-controller": "^3.0.0", + "chokidar": "^3.5.1", + "colorette": "^1.2.0", + "core-js": "^3.32.1", + "dotenv": "16.4.7", + "form-data": "^4.0.0", + "get-port-please": "^3.0.1", + "glob": "^7.1.6", + "handlebars": "^4.7.6", + "mobx": "^6.0.4", + "pluralize": "^8.0.0", + "react": "^17.0.0 || ^18.2.0 || ^19.0.0", + "react-dom": "^17.0.0 || ^18.2.0 || ^19.0.0", + "redoc": "2.5.0", + "semver": "^7.5.2", + "simple-websocket": "^9.0.0", + "styled-components": "^6.0.7", + "yargs": "17.0.1" + }, + "bin": { + "openapi": "bin/cli.js", + "redocly": "bin/cli.js" + }, + "engines": { + "node": ">=18.17.0", + "npm": ">=9.5.0" + } + }, + "node_modules/@redocly/config": { + "version": "0.22.2", + "resolved": "https://registry.npmjs.org/@redocly/config/-/config-0.22.2.tgz", + "integrity": "sha512-roRDai8/zr2S9YfmzUfNhKjOF0NdcOIqF7bhf4MVC5UxpjIysDjyudvlAiVbpPHp3eDRWbdzUgtkK1a7YiDNyQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/@redocly/openapi-core": { + "version": "1.34.4", + "resolved": "https://registry.npmjs.org/@redocly/openapi-core/-/openapi-core-1.34.4.tgz", + "integrity": "sha512-hf53xEgpXIgWl3b275PgZU3OTpYh1RoD2LHdIfQ1JzBNTWsiNKczTEsI/4Tmh2N1oq9YcphhSMyk3lDh85oDjg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@redocly/ajv": "^8.11.2", + "@redocly/config": "^0.22.0", + "colorette": "^1.2.0", + "https-proxy-agent": "^7.0.5", + "js-levenshtein": "^1.1.6", + "js-yaml": "^4.1.0", + "minimatch": "^5.0.1", + "pluralize": "^8.0.0", + "yaml-ast-parser": "0.0.43" + }, + "engines": { + "node": ">=18.17.0", + "npm": ">=9.5.0" + } + }, + "node_modules/@redocly/respect-core": { + "version": "1.34.4", + "resolved": "https://registry.npmjs.org/@redocly/respect-core/-/respect-core-1.34.4.tgz", + "integrity": "sha512-MitKyKyQpsizA4qCVv+MjXL4WltfhFQAoiKiAzrVR1Kusro3VhYb6yJuzoXjiJhR0ukLP5QOP19Vcs7qmj9dZg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@faker-js/faker": "^7.6.0", + "@redocly/ajv": "8.11.2", + "@redocly/openapi-core": "1.34.4", + "better-ajv-errors": "^1.2.0", + "colorette": "^2.0.20", + "concat-stream": "^2.0.0", + "cookie": "^0.7.2", + "dotenv": "16.4.7", + "form-data": "4.0.0", + "jest-diff": "^29.3.1", + "jest-matcher-utils": "^29.3.1", + "js-yaml": "4.1.0", + "json-pointer": "^0.6.2", + "jsonpath-plus": "^10.0.6", + "open": "^10.1.0", + "openapi-sampler": "^1.6.1", + "outdent": "^0.8.0", + "set-cookie-parser": "^2.3.5", + "undici": "^6.21.1" + }, + "engines": { + "node": ">=18.17.0", + "npm": ">=9.5.0" + } + }, + "node_modules/@redocly/respect-core/node_modules/colorette": { + "version": "2.0.20", + "resolved": "https://registry.npmjs.org/colorette/-/colorette-2.0.20.tgz", + "integrity": "sha512-IfEDxwoWIjkeXL1eXcDiow4UbKjhLdq6/EuSVR9GMN7KVH3r9gQ83e73hsz1Nd1T3ijd5xv1wcWRYO+D6kCI2w==", + "dev": true, + "license": "MIT" + }, + "node_modules/@redocly/respect-core/node_modules/form-data": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.0.tgz", + "integrity": "sha512-ETEklSGi5t0QMZuiXoA/Q6vcnxcLQP5vdugSpuAyi6SVGi2clPPp+xgEhuMaHC+zGgn31Kd235W35f7Hykkaww==", + "dev": true, + "license": "MIT", + "dependencies": { + "asynckit": "^0.4.0", + "combined-stream": "^1.0.8", + "mime-types": "^2.1.12" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/@sinclair/typebox": { + "version": "0.27.8", + "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz", + "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==", + "dev": true, + "license": "MIT" + }, + "node_modules/@sourcemeta/jsonschema": { + "version": "10.0.0", + "resolved": "https://registry.npmjs.org/@sourcemeta/jsonschema/-/jsonschema-10.0.0.tgz", + "integrity": "sha512-NyRjy3JxFrcDU9zci4fTe4dhoUZu61UNONgxJ13hmhaUAYF51gYvVEoWpDtl1ckikdboMuAm/QVeelh/+B8hGQ==", + "cpu": [ + "x64", + "arm64" + ], + "dev": true, + "license": "AGPL-3.0", + "os": [ + "darwin", + "linux", + "win32" + ], + "bin": { + "jsonschema": "cli.js" + }, + "engines": { + "node": ">=16" + }, + "funding": { + "url": "https://github.com/sponsors/sourcemeta" + } + }, + "node_modules/@types/json-schema": { + "version": "7.0.15", + "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.15.tgz", + "integrity": "sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/node": { + "version": "24.0.13", + "resolved": "https://registry.npmjs.org/@types/node/-/node-24.0.13.tgz", + "integrity": "sha512-Qm9OYVOFHFYg3wJoTSrz80hoec5Lia/dPp84do3X7dZvLikQvM1YpmvTBEdIr/e+U8HTkFjLHLnl78K/qjf+jQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "undici-types": "~7.8.0" + } + }, + "node_modules/@types/stylis": { + "version": "4.2.5", + "resolved": "https://registry.npmjs.org/@types/stylis/-/stylis-4.2.5.tgz", + "integrity": "sha512-1Xve+NMN7FWjY14vLoY5tL3BVEQ/n42YLwaqJIPYhotZ9uBHt87VceMwWQpzmdEt2TNXIorIFG+YeCUUW7RInw==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/trusted-types": { + "version": "2.0.7", + "resolved": "https://registry.npmjs.org/@types/trusted-types/-/trusted-types-2.0.7.tgz", + "integrity": "sha512-ScaPdn1dQczgbl0QFTeTOmVHFULt394XJgOQNoyVhZ6r2vLnMLJfBPd53SB52T/3G36VI1/g2MZaX0cwDuXsfw==", + "dev": true, + "license": "MIT", + "optional": true + }, + "node_modules/abort-controller": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/abort-controller/-/abort-controller-3.0.0.tgz", + "integrity": "sha512-h8lQ8tacZYnR3vNQTgibj+tODHI5/+l06Au2Pcriv/Gmet0eaj4TwWH41sO9wnHDiQsEj19q0drzdWdeAHtweg==", + "dev": true, + "license": "MIT", + "dependencies": { + "event-target-shim": "^5.0.0" + }, + "engines": { + "node": ">=6.5" + } + }, + "node_modules/agent-base": { + "version": "7.1.4", + "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-7.1.4.tgz", + "integrity": "sha512-MnA+YT8fwfJPgBx3m60MNqakm30XOkyIoH1y6huTQvC0PwZG7ki8NacLBcrPbNoo8vEZy7Jpuk7+jMO+CUovTQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 14" + } + }, + "node_modules/ajv": { + "version": "8.17.1", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.17.1.tgz", + "integrity": "sha512-B/gBuNg5SiMTrPkC+A2+cW0RszwxYmn6VYxB/inlBStS5nx6xHIt/ehKRhIMhqusl7a8LjQoZnjCs5vhwxOQ1g==", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "fast-deep-equal": "^3.1.3", + "fast-uri": "^3.0.1", + "json-schema-traverse": "^1.0.0", + "require-from-string": "^2.0.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "license": "MIT", + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/anymatch": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.3.tgz", + "integrity": "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==", + "dev": true, + "license": "ISC", + "dependencies": { + "normalize-path": "^3.0.0", + "picomatch": "^2.0.4" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/argparse": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", + "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==", + "dev": true, + "license": "Python-2.0" + }, + "node_modules/asynckit": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", + "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==", + "dev": true, + "license": "MIT" + }, + "node_modules/balanced-match": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", + "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", + "dev": true, + "license": "MIT" + }, + "node_modules/better-ajv-errors": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/better-ajv-errors/-/better-ajv-errors-1.2.0.tgz", + "integrity": "sha512-UW+IsFycygIo7bclP9h5ugkNH8EjCSgqyFB/yQ4Hqqa1OEYDtb0uFIkYE0b6+CjkgJYVM5UKI/pJPxjYe9EZlA==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@babel/code-frame": "^7.16.0", + "@humanwhocodes/momoa": "^2.0.2", + "chalk": "^4.1.2", + "jsonpointer": "^5.0.0", + "leven": "^3.1.0 < 4" + }, + "engines": { + "node": ">= 12.13.0" + }, + "peerDependencies": { + "ajv": "4.11.8 - 8" + } + }, + "node_modules/binary-extensions": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.3.0.tgz", + "integrity": "sha512-Ceh+7ox5qe7LJuLHoY0feh3pHuUDHAcRUeyL2VYghZwfpkNIy/+8Ocg0a3UuSoYzavmylwuLWQOf3hl0jjMMIw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/brace-expansion": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz", + "integrity": "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0" + } + }, + "node_modules/braces": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz", + "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==", + "dev": true, + "license": "MIT", + "dependencies": { + "fill-range": "^7.1.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/buffer-from": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.2.tgz", + "integrity": "sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/bundle-name": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/bundle-name/-/bundle-name-4.1.0.tgz", + "integrity": "sha512-tjwM5exMg6BGRI+kNmTntNsvdZS1X8BFYS6tnJ2hdH0kVxM6/eVZ2xy+FqStSWvYmtfFMDLIxurorHwDKfDz5Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "run-applescript": "^7.0.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/call-bind-apply-helpers": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz", + "integrity": "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/call-me-maybe": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/call-me-maybe/-/call-me-maybe-1.0.2.tgz", + "integrity": "sha512-HpX65o1Hnr9HH25ojC1YGs7HCQLq0GCOibSaWER0eNpgJ/Z1MZv2mTc7+xh6WOPxbRVcmgbv4hGU+uSQ/2xFZQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/camelize": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/camelize/-/camelize-1.0.1.tgz", + "integrity": "sha512-dU+Tx2fsypxTgtLoE36npi3UqcjSSMNYfkqgmoEhtZrraP5VWq0K7FkWVTYa8eMPtnU/G2txVsfdCJTn9uzpuQ==", + "dev": true, + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/chokidar": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.6.0.tgz", + "integrity": "sha512-7VT13fmjotKpGipCW9JEQAusEPE+Ei8nl6/g4FBAmIm0GOOLMua9NDDo/DWp0ZAxCr3cPq5ZpBqmPAQgDda2Pw==", + "dev": true, + "license": "MIT", + "dependencies": { + "anymatch": "~3.1.2", + "braces": "~3.0.2", + "glob-parent": "~5.1.2", + "is-binary-path": "~2.1.0", + "is-glob": "~4.0.1", + "normalize-path": "~3.0.0", + "readdirp": "~3.6.0" + }, + "engines": { + "node": ">= 8.10.0" + }, + "funding": { + "url": "https://paulmillr.com/funding/" + }, + "optionalDependencies": { + "fsevents": "~2.3.2" + } + }, + "node_modules/classnames": { + "version": "2.5.1", + "resolved": "https://registry.npmjs.org/classnames/-/classnames-2.5.1.tgz", + "integrity": "sha512-saHYOzhIQs6wy2sVxTM6bUDsQO4F50V9RQ22qBpEdCW+I+/Wmke2HOl6lS6dTpdxVhb88/I6+Hs+438c3lfUow==", + "dev": true, + "license": "MIT" + }, + "node_modules/cliui": { + "version": "7.0.4", + "resolved": "https://registry.npmjs.org/cliui/-/cliui-7.0.4.tgz", + "integrity": "sha512-OcRE68cOsVMXp1Yvonl/fzkQOyjLSu/8bhPDfQt0e0/Eb283TKP20Fs2MqoPsr9SwA595rRCA+QMzYc9nBP+JQ==", + "dev": true, + "license": "ISC", + "dependencies": { + "string-width": "^4.2.0", + "strip-ansi": "^6.0.0", + "wrap-ansi": "^7.0.0" + } + }, + "node_modules/clsx": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/clsx/-/clsx-2.1.1.tgz", + "integrity": "sha512-eYm0QWBtUrBWZWG0d386OGAw16Z995PiOVo2B7bjWSbHedGl5e0ZWaq65kOGgUSNesEIDkB9ISbTg/JK9dhCZA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true, + "license": "MIT" + }, + "node_modules/colorette": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/colorette/-/colorette-1.4.0.tgz", + "integrity": "sha512-Y2oEozpomLn7Q3HFP7dpww7AtMJplbM9lGZP6RDfHqmbeRjiwRg4n6VM6j4KLmRke85uWEI7JqF17f3pqdRA0g==", + "dev": true, + "license": "MIT" + }, + "node_modules/combined-stream": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz", + "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==", + "dev": true, + "license": "MIT", + "dependencies": { + "delayed-stream": "~1.0.0" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/concat-map": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", + "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==", + "dev": true, + "license": "MIT" + }, + "node_modules/concat-stream": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/concat-stream/-/concat-stream-2.0.0.tgz", + "integrity": "sha512-MWufYdFw53ccGjCA+Ol7XJYpAlW6/prSMzuPOTRnJGcGzuhLn4Scrz7qf6o8bROZ514ltazcIFJZevcfbo0x7A==", + "dev": true, + "engines": [ + "node >= 6.0" + ], + "license": "MIT", + "dependencies": { + "buffer-from": "^1.0.0", + "inherits": "^2.0.3", + "readable-stream": "^3.0.2", + "typedarray": "^0.0.6" + } + }, + "node_modules/cookie": { + "version": "0.7.2", + "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.7.2.tgz", + "integrity": "sha512-yki5XnKuf750l50uGTllt6kKILY4nQ1eNIQatoXEByZ5dWgnKqbnqmTrBE5B4N7lrMJKQ2ytWMiTO2o0v6Ew/w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/core-js": { + "version": "3.44.0", + "resolved": "https://registry.npmjs.org/core-js/-/core-js-3.44.0.tgz", + "integrity": "sha512-aFCtd4l6GvAXwVEh3XbbVqJGHDJt0OZRa+5ePGx3LLwi12WfexqQxcsohb2wgsa/92xtl19Hd66G/L+TaAxDMw==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/core-js" + } + }, + "node_modules/css-color-keywords": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/css-color-keywords/-/css-color-keywords-1.0.0.tgz", + "integrity": "sha512-FyyrDHZKEjXDpNJYvVsV960FiqQyXc/LlYmsxl2BcdMb2WPx0OGRVgTg55rPSyLSNMqP52R9r8geSp7apN3Ofg==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=4" + } + }, + "node_modules/css-to-react-native": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/css-to-react-native/-/css-to-react-native-3.2.0.tgz", + "integrity": "sha512-e8RKaLXMOFii+02mOlqwjbD00KSEKqblnpO9e++1aXS1fPQOpS1YoqdVHBqPjHNoxeF2mimzVqawm2KCbEdtHQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "camelize": "^1.0.0", + "css-color-keywords": "^1.0.0", + "postcss-value-parser": "^4.0.2" + } + }, + "node_modules/csstype": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.1.3.tgz", + "integrity": "sha512-M1uQkMl8rQK/szD0LNhtqxIPLpimGm8sOBwU7lLnCpSbTyY3yeU1Vc7l4KT5zT4s/yOxHH5O7tIuuLOCnLADRw==", + "dev": true, + "license": "MIT" + }, + "node_modules/debug": { + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.1.tgz", + "integrity": "sha512-KcKCqiftBJcZr++7ykoDIEwSa3XWowTfNPo92BYxjXiyYEVrUQh2aLyhxBCwww+heortUFxEJYcRzosstTEBYQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/decko": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/decko/-/decko-1.2.0.tgz", + "integrity": "sha512-m8FnyHXV1QX+S1cl+KPFDIl6NMkxtKsy6+U/aYyjrOqWMuwAwYWu7ePqrsUHtDR5Y8Yk2pi/KIDSgF+vT4cPOQ==", + "dev": true + }, + "node_modules/default-browser": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/default-browser/-/default-browser-5.2.1.tgz", + "integrity": "sha512-WY/3TUME0x3KPYdRRxEJJvXRHV4PyPoUsxtZa78lwItwRQRHhd2U9xOscaT/YTf8uCXIAjeJOFBVEh/7FtD8Xg==", + "dev": true, + "license": "MIT", + "dependencies": { + "bundle-name": "^4.1.0", + "default-browser-id": "^5.0.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/default-browser-id": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/default-browser-id/-/default-browser-id-5.0.0.tgz", + "integrity": "sha512-A6p/pu/6fyBcA1TRz/GqWYPViplrftcW2gZC9q79ngNCKAeR/X3gcEdXQHl4KNXV+3wgIJ1CPkJQ3IHM6lcsyA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/define-lazy-prop": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/define-lazy-prop/-/define-lazy-prop-3.0.0.tgz", + "integrity": "sha512-N+MeXYoqr3pOgn8xfyRPREN7gHakLYjhsHhWGT3fWAiL4IkAt0iDw14QiiEm2bE30c5XX5q0FtAA3CK5f9/BUg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/delayed-stream": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", + "integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/diff-sequences": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/diff-sequences/-/diff-sequences-29.6.3.tgz", + "integrity": "sha512-EjePK1srD3P08o2j4f0ExnylqRs5B9tJjcp9t1krH2qRi8CCdsYfwe9JgSLurFBWwq4uOlipzfk5fHNvwFKr8Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/dompurify": { + "version": "3.2.6", + "resolved": "https://registry.npmjs.org/dompurify/-/dompurify-3.2.6.tgz", + "integrity": "sha512-/2GogDQlohXPZe6D6NOgQvXLPSYBqIWMnZ8zzOhn09REE4eyAzb+Hed3jhoM9OkuaJ8P6ZGTTVWQKAi8ieIzfQ==", + "dev": true, + "license": "(MPL-2.0 OR Apache-2.0)", + "optionalDependencies": { + "@types/trusted-types": "^2.0.7" + } + }, + "node_modules/dotenv": { + "version": "16.4.7", + "resolved": "https://registry.npmjs.org/dotenv/-/dotenv-16.4.7.tgz", + "integrity": "sha512-47qPchRCykZC03FhkYAhrvwU4xDBFIj1QPqaarj6mdM/hgUzfPHcpkHJOn3mJAufFeeAxAzeGsr5X0M4k6fLZQ==", + "dev": true, + "license": "BSD-2-Clause", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://dotenvx.com" + } + }, + "node_modules/dunder-proto": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/dunder-proto/-/dunder-proto-1.0.1.tgz", + "integrity": "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.1", + "es-errors": "^1.3.0", + "gopd": "^1.2.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", + "dev": true, + "license": "MIT" + }, + "node_modules/es-define-property": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.1.tgz", + "integrity": "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-errors": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz", + "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-object-atoms": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.1.1.tgz", + "integrity": "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-set-tostringtag": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/es-set-tostringtag/-/es-set-tostringtag-2.1.0.tgz", + "integrity": "sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.6", + "has-tostringtag": "^1.0.2", + "hasown": "^2.0.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es6-promise": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/es6-promise/-/es6-promise-3.3.1.tgz", + "integrity": "sha512-SOp9Phqvqn7jtEUxPWdWfWoLmyt2VaJ6MpvP9Comy1MceMXqE6bxvaTu4iaxpYYPzhny28Lc+M87/c2cPK6lDg==", + "dev": true, + "license": "MIT" + }, + "node_modules/escalade": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.2.0.tgz", + "integrity": "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/event-target-shim": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/event-target-shim/-/event-target-shim-5.0.1.tgz", + "integrity": "sha512-i/2XbnSz/uxRCU6+NdVJgKWDTM427+MqYbkQzD321DuCQJUqOuJKIA0IM2+W2xtYHdKOmZ4dR6fExsd4SXL+WQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/eventemitter3": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/eventemitter3/-/eventemitter3-5.0.1.tgz", + "integrity": "sha512-GWkBvjiSZK87ELrYOSESUYeVIc9mvLLf/nXalMOS5dYrgZq9o5OVkbZAVM06CVxYsCwH9BDZFPlQTlPA1j4ahA==", + "dev": true, + "license": "MIT" + }, + "node_modules/fast-deep-equal": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", + "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==", + "dev": true, + "license": "MIT" + }, + "node_modules/fast-safe-stringify": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/fast-safe-stringify/-/fast-safe-stringify-2.1.1.tgz", + "integrity": "sha512-W+KJc2dmILlPplD/H4K9l9LcAHAfPtP6BY84uVLXQ6Evcz9Lcg33Y2z1IVblT6xdY54PXYVHEv+0Wpq8Io6zkA==", + "dev": true, + "license": "MIT" + }, + "node_modules/fast-uri": { + "version": "3.0.6", + "resolved": "https://registry.npmjs.org/fast-uri/-/fast-uri-3.0.6.tgz", + "integrity": "sha512-Atfo14OibSv5wAp4VWNsFYE1AchQRTv9cBGWET4pZWHzYshFSS9NQI6I57rdKn9croWVMbYFbLhJ+yJvmZIIHw==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/fastify" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/fastify" + } + ], + "license": "BSD-3-Clause", + "peer": true + }, + "node_modules/fast-xml-parser": { + "version": "4.5.3", + "resolved": "https://registry.npmjs.org/fast-xml-parser/-/fast-xml-parser-4.5.3.tgz", + "integrity": "sha512-RKihhV+SHsIUGXObeVy9AXiBbFwkVk7Syp8XgwN5U3JV416+Gwp/GO9i0JYKmikykgz/UHRrrV4ROuZEo/T0ig==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/NaturalIntelligence" + } + ], + "license": "MIT", + "dependencies": { + "strnum": "^1.1.1" + }, + "bin": { + "fxparser": "src/cli/cli.js" + } + }, + "node_modules/fill-range": { + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz", + "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==", + "dev": true, + "license": "MIT", + "dependencies": { + "to-regex-range": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/foreach": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/foreach/-/foreach-2.0.6.tgz", + "integrity": "sha512-k6GAGDyqLe9JaebCsFCoudPPWfihKu8pylYXRlqP1J7ms39iPoTtk2fviNglIeQEwdh0bQeKJ01ZPyuyQvKzwg==", + "dev": true, + "license": "MIT" + }, + "node_modules/form-data": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.3.tgz", + "integrity": "sha512-qsITQPfmvMOSAdeyZ+12I1c+CKSstAFAwu+97zrnWAbIr5u8wfsExUzCesVLC8NgHuRUqNN4Zy6UPWUTRGslcA==", + "dev": true, + "license": "MIT", + "dependencies": { + "asynckit": "^0.4.0", + "combined-stream": "^1.0.8", + "es-set-tostringtag": "^2.1.0", + "hasown": "^2.0.2", + "mime-types": "^2.1.12" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/fs.realpath": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", + "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==", + "dev": true, + "license": "ISC" + }, + "node_modules/fsevents": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", + "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + } + }, + "node_modules/function-bind": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", + "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", + "dev": true, + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/get-caller-file": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz", + "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==", + "dev": true, + "license": "ISC", + "engines": { + "node": "6.* || 8.* || >= 10.*" + } + }, + "node_modules/get-intrinsic": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.3.0.tgz", + "integrity": "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.2", + "es-define-property": "^1.0.1", + "es-errors": "^1.3.0", + "es-object-atoms": "^1.1.1", + "function-bind": "^1.1.2", + "get-proto": "^1.0.1", + "gopd": "^1.2.0", + "has-symbols": "^1.1.0", + "hasown": "^2.0.2", + "math-intrinsics": "^1.1.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/get-port-please": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/get-port-please/-/get-port-please-3.2.0.tgz", + "integrity": "sha512-I9QVvBw5U/hw3RmWpYKRumUeaDgxTPd401x364rLmWBJcOQ753eov1eTgzDqRG9bqFIfDc7gfzcQEWrUri3o1A==", + "dev": true, + "license": "MIT" + }, + "node_modules/get-proto": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/get-proto/-/get-proto-1.0.1.tgz", + "integrity": "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==", + "dev": true, + "license": "MIT", + "dependencies": { + "dunder-proto": "^1.0.1", + "es-object-atoms": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/glob": { + "version": "7.2.3", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", + "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", + "deprecated": "Glob versions prior to v9 are no longer supported", + "dev": true, + "license": "ISC", + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.1.1", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + }, + "engines": { + "node": "*" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/glob-parent": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", + "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "dev": true, + "license": "ISC", + "dependencies": { + "is-glob": "^4.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/glob/node_modules/brace-expansion": { + "version": "1.1.12", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", + "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/glob/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/gopd": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz", + "integrity": "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/handlebars": { + "version": "4.7.8", + "resolved": "https://registry.npmjs.org/handlebars/-/handlebars-4.7.8.tgz", + "integrity": "sha512-vafaFqs8MZkRrSX7sFVUdo3ap/eNiLnb4IakshzvP56X5Nr1iGKAIqdX6tMlm6HcNRIkr6AxO5jFEoJzzpT8aQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "minimist": "^1.2.5", + "neo-async": "^2.6.2", + "source-map": "^0.6.1", + "wordwrap": "^1.0.0" + }, + "bin": { + "handlebars": "bin/handlebars" + }, + "engines": { + "node": ">=0.4.7" + }, + "optionalDependencies": { + "uglify-js": "^3.1.4" + } + }, + "node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/has-symbols": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz", + "integrity": "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-tostringtag": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.2.tgz", + "integrity": "sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==", + "dev": true, + "license": "MIT", + "dependencies": { + "has-symbols": "^1.0.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/hasown": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", + "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/http2-client": { + "version": "1.3.5", + "resolved": "https://registry.npmjs.org/http2-client/-/http2-client-1.3.5.tgz", + "integrity": "sha512-EC2utToWl4RKfs5zd36Mxq7nzHHBuomZboI0yYL6Y0RmBgT7Sgkq4rQ0ezFTYoIsSs7Tm9SJe+o2FcAg6GBhGA==", + "dev": true, + "license": "MIT" + }, + "node_modules/https-proxy-agent": { + "version": "7.0.6", + "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-7.0.6.tgz", + "integrity": "sha512-vK9P5/iUfdl95AI+JVyUuIcVtd4ofvtrOr3HNtM2yxC9bnMbEdp3x01OhQNnjb8IJYi38VlTE3mBXwcfvywuSw==", + "dev": true, + "license": "MIT", + "dependencies": { + "agent-base": "^7.1.2", + "debug": "4" + }, + "engines": { + "node": ">= 14" + } + }, + "node_modules/inflight": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", + "integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==", + "deprecated": "This module is not supported, and leaks memory. Do not use it. Check out lru-cache if you want a good and tested way to coalesce async requests by a key value, which is much more comprehensive and powerful.", + "dev": true, + "license": "ISC", + "dependencies": { + "once": "^1.3.0", + "wrappy": "1" + } + }, + "node_modules/inherits": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", + "dev": true, + "license": "ISC" + }, + "node_modules/is-binary-path": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz", + "integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==", + "dev": true, + "license": "MIT", + "dependencies": { + "binary-extensions": "^2.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/is-docker": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-docker/-/is-docker-3.0.0.tgz", + "integrity": "sha512-eljcgEDlEns/7AXFosB5K/2nCM4P7FQPkGc/DWLy5rmFEWvZayGrik1d9/QIY5nJ4f9YsVvBkA6kJpHn9rISdQ==", + "dev": true, + "license": "MIT", + "bin": { + "is-docker": "cli.js" + }, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-extglob": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", + "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-fullwidth-code-point": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/is-glob": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", + "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-extglob": "^2.1.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-inside-container": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-inside-container/-/is-inside-container-1.0.0.tgz", + "integrity": "sha512-KIYLCCJghfHZxqjYBE7rEy0OBuTd5xCHS7tHVgvCLkx7StIoaxwNW3hCALgEUjFfeRk+MG/Qxmp/vtETEF3tRA==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-docker": "^3.0.0" + }, + "bin": { + "is-inside-container": "cli.js" + }, + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-number": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", + "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.12.0" + } + }, + "node_modules/is-wsl": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/is-wsl/-/is-wsl-3.1.0.tgz", + "integrity": "sha512-UcVfVfaK4Sc4m7X3dUSoHoozQGBEFeDC+zVo06t98xe8CzHSZZBekNXH+tu0NalHolcJ/QAGqS46Hef7QXBIMw==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-inside-container": "^1.0.0" + }, + "engines": { + "node": ">=16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/jest-diff": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-diff/-/jest-diff-29.7.0.tgz", + "integrity": "sha512-LMIgiIrhigmPrs03JHpxUh2yISK3vLFPkAodPeo0+BuF7wA2FoQbkEg1u8gBYBThncu7e1oEDUfIXVuTqLRUjw==", + "dev": true, + "license": "MIT", + "dependencies": { + "chalk": "^4.0.0", + "diff-sequences": "^29.6.3", + "jest-get-type": "^29.6.3", + "pretty-format": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-get-type": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/jest-get-type/-/jest-get-type-29.6.3.tgz", + "integrity": "sha512-zrteXnqYxfQh7l5FHyL38jL39di8H8rHoecLH3JNxH3BwOrBsNeabdap5e0I23lD4HHI8W5VFBZqG4Eaq5LNcw==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-matcher-utils": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-matcher-utils/-/jest-matcher-utils-29.7.0.tgz", + "integrity": "sha512-sBkD+Xi9DtcChsI3L3u0+N0opgPYnCRPtGcQYrgXmR+hmt/fYfWAL0xRXYU8eWOdfuLgBe0YCW3AFtnRLagq/g==", + "dev": true, + "license": "MIT", + "dependencies": { + "chalk": "^4.0.0", + "jest-diff": "^29.7.0", + "jest-get-type": "^29.6.3", + "pretty-format": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/js-levenshtein": { + "version": "1.1.6", + "resolved": "https://registry.npmjs.org/js-levenshtein/-/js-levenshtein-1.1.6.tgz", + "integrity": "sha512-X2BB11YZtrRqY4EnQcLX5Rh373zbK4alC1FW7D7MBhL2gtcC17cTnr6DmfHZeS0s2rTHjUTMMHfG7gO8SSdw+g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/js-tokens": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", + "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/js-yaml": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", + "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", + "dev": true, + "license": "MIT", + "dependencies": { + "argparse": "^2.0.1" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "node_modules/jsep": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/jsep/-/jsep-1.4.0.tgz", + "integrity": "sha512-B7qPcEVE3NVkmSJbaYxvv4cHkVW7DQsZz13pUMrfS8z8Q/BuShN+gcTXrUlPiGqM2/t/EEaI030bpxMqY8gMlw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 10.16.0" + } + }, + "node_modules/json-pointer": { + "version": "0.6.2", + "resolved": "https://registry.npmjs.org/json-pointer/-/json-pointer-0.6.2.tgz", + "integrity": "sha512-vLWcKbOaXlO+jvRy4qNd+TI1QUPZzfJj1tpJ3vAXDych5XJf93ftpUKe5pKCrzyIIwgBJcOcCVRUfqQP25afBw==", + "dev": true, + "license": "MIT", + "dependencies": { + "foreach": "^2.0.4" + } + }, + "node_modules/json-schema-traverse": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", + "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==", + "dev": true, + "license": "MIT" + }, + "node_modules/jsonpath-plus": { + "version": "10.3.0", + "resolved": "https://registry.npmjs.org/jsonpath-plus/-/jsonpath-plus-10.3.0.tgz", + "integrity": "sha512-8TNmfeTCk2Le33A3vRRwtuworG/L5RrgMvdjhKZxvyShO+mBu2fP50OWUjRLNtvw344DdDarFh9buFAZs5ujeA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jsep-plugin/assignment": "^1.3.0", + "@jsep-plugin/regex": "^1.0.4", + "jsep": "^1.4.0" + }, + "bin": { + "jsonpath": "bin/jsonpath-cli.js", + "jsonpath-plus": "bin/jsonpath-cli.js" + }, + "engines": { + "node": ">=18.0.0" + } + }, + "node_modules/jsonpointer": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/jsonpointer/-/jsonpointer-5.0.1.tgz", + "integrity": "sha512-p/nXbhSEcu3pZRdkW1OfJhpsVtW1gd4Wa1fnQc9YLiTfAjn0312eMKimbdIQzuZl9aa9xUGaRlP9T/CJE/ditQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/leven": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/leven/-/leven-3.1.0.tgz", + "integrity": "sha512-qsda+H8jTaUaN/x5vzW2rzc+8Rw4TAQ/4KjB46IwK5VH+IlVeeeje/EoZRpiXvIqjFgK84QffqPztGI3VBLG1A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/long": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/long/-/long-5.3.2.tgz", + "integrity": "sha512-mNAgZ1GmyNhD7AuqnTG3/VQ26o760+ZYBPKjPvugO8+nLbYfX6TVpJPseBvopbdY+qpZ/lKUnmEc1LeZYS3QAA==", + "dev": true, + "license": "Apache-2.0" + }, + "node_modules/loose-envify": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/loose-envify/-/loose-envify-1.4.0.tgz", + "integrity": "sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "js-tokens": "^3.0.0 || ^4.0.0" + }, + "bin": { + "loose-envify": "cli.js" + } + }, + "node_modules/lunr": { + "version": "2.3.9", + "resolved": "https://registry.npmjs.org/lunr/-/lunr-2.3.9.tgz", + "integrity": "sha512-zTU3DaZaF3Rt9rhN3uBMGQD3dD2/vFQqnvZCDv4dl5iOzq2IZQqTxu90r4E5J+nP70J3ilqVCrbho2eWaeW8Ow==", + "dev": true, + "license": "MIT" + }, + "node_modules/mark.js": { + "version": "8.11.1", + "resolved": "https://registry.npmjs.org/mark.js/-/mark.js-8.11.1.tgz", + "integrity": "sha512-1I+1qpDt4idfgLQG+BNWmrqku+7/2bi5nLf4YwF8y8zXvmfiTBY3PV3ZibfrjBueCByROpuBjLLFCajqkgYoLQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/marked": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/marked/-/marked-4.3.0.tgz", + "integrity": "sha512-PRsaiG84bK+AMvxziE/lCFss8juXjNaWzVbN5tXAm4XjeaS9NAHhop+PjQxz2A9h8Q4M/xGmzP8vqNwy6JeK0A==", + "dev": true, + "license": "MIT", + "bin": { + "marked": "bin/marked.js" + }, + "engines": { + "node": ">= 12" + } + }, + "node_modules/math-intrinsics": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz", + "integrity": "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mime-types": { + "version": "2.1.35", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "dev": true, + "license": "MIT", + "dependencies": { + "mime-db": "1.52.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/minimatch": { + "version": "5.1.6", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-5.1.6.tgz", + "integrity": "sha512-lKwV/1brpG6mBUFHtb7NUmtABCb2WZZmm2wNiOA5hAb8VdCS4B3dtMWyvcoViccwAW/COERjXLt0zP1zXUN26g==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^2.0.1" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/minimist": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.8.tgz", + "integrity": "sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==", + "dev": true, + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/mobx": { + "version": "6.13.7", + "resolved": "https://registry.npmjs.org/mobx/-/mobx-6.13.7.tgz", + "integrity": "sha512-aChaVU/DO5aRPmk1GX8L+whocagUUpBQqoPtJk+cm7UOXUk87J4PeWCh6nNmTTIfEhiR9DI/+FnA8dln/hTK7g==", + "dev": true, + "license": "MIT", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/mobx" + } + }, + "node_modules/mobx-react": { + "version": "9.2.0", + "resolved": "https://registry.npmjs.org/mobx-react/-/mobx-react-9.2.0.tgz", + "integrity": "sha512-dkGWCx+S0/1mfiuFfHRH8D9cplmwhxOV5CkXMp38u6rQGG2Pv3FWYztS0M7ncR6TyPRQKaTG/pnitInoYE9Vrw==", + "dev": true, + "license": "MIT", + "dependencies": { + "mobx-react-lite": "^4.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/mobx" + }, + "peerDependencies": { + "mobx": "^6.9.0", + "react": "^16.8.0 || ^17 || ^18 || ^19" + }, + "peerDependenciesMeta": { + "react-dom": { + "optional": true + }, + "react-native": { + "optional": true + } + } + }, + "node_modules/mobx-react-lite": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/mobx-react-lite/-/mobx-react-lite-4.1.0.tgz", + "integrity": "sha512-QEP10dpHHBeQNv1pks3WnHRCem2Zp636lq54M2nKO2Sarr13pL4u6diQXf65yzXUn0mkk18SyIDCm9UOJYTi1w==", + "dev": true, + "license": "MIT", + "dependencies": { + "use-sync-external-store": "^1.4.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/mobx" + }, + "peerDependencies": { + "mobx": "^6.9.0", + "react": "^16.8.0 || ^17 || ^18 || ^19" + }, + "peerDependenciesMeta": { + "react-dom": { + "optional": true + }, + "react-native": { + "optional": true + } + } + }, + "node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "dev": true, + "license": "MIT" + }, + "node_modules/nanoid": { + "version": "3.3.11", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.11.tgz", + "integrity": "sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "bin": { + "nanoid": "bin/nanoid.cjs" + }, + "engines": { + "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" + } + }, + "node_modules/neo-async": { + "version": "2.6.2", + "resolved": "https://registry.npmjs.org/neo-async/-/neo-async-2.6.2.tgz", + "integrity": "sha512-Yd3UES5mWCSqR+qNT93S3UoYUkqAZ9lLg8a7g9rimsWmYGK8cVToA4/sF3RrshdyV3sAGMXVUmpMYOw+dLpOuw==", + "dev": true, + "license": "MIT" + }, + "node_modules/node-fetch": { + "version": "2.7.0", + "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.7.0.tgz", + "integrity": "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==", + "dev": true, + "license": "MIT", + "dependencies": { + "whatwg-url": "^5.0.0" + }, + "engines": { + "node": "4.x || >=6.0.0" + }, + "peerDependencies": { + "encoding": "^0.1.0" + }, + "peerDependenciesMeta": { + "encoding": { + "optional": true + } + } + }, + "node_modules/node-fetch-h2": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/node-fetch-h2/-/node-fetch-h2-2.3.0.tgz", + "integrity": "sha512-ofRW94Ab0T4AOh5Fk8t0h8OBWrmjb0SSB20xh1H8YnPV9EJ+f5AMoYSUQ2zgJ4Iq2HAK0I2l5/Nequ8YzFS3Hg==", + "dev": true, + "license": "MIT", + "dependencies": { + "http2-client": "^1.2.5" + }, + "engines": { + "node": "4.x || >=6.0.0" + } + }, + "node_modules/node-readfiles": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/node-readfiles/-/node-readfiles-0.2.0.tgz", + "integrity": "sha512-SU00ZarexNlE4Rjdm83vglt5Y9yiQ+XI1XpflWlb7q7UTN1JUItm69xMeiQCTxtTfnzt+83T8Cx+vI2ED++VDA==", + "dev": true, + "license": "MIT", + "dependencies": { + "es6-promise": "^3.2.1" + } + }, + "node_modules/normalize-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", + "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/oas-kit-common": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/oas-kit-common/-/oas-kit-common-1.0.8.tgz", + "integrity": "sha512-pJTS2+T0oGIwgjGpw7sIRU8RQMcUoKCDWFLdBqKB2BNmGpbBMH2sdqAaOXUg8OzonZHU0L7vfJu1mJFEiYDWOQ==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "fast-safe-stringify": "^2.0.7" + } + }, + "node_modules/oas-linter": { + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/oas-linter/-/oas-linter-3.2.2.tgz", + "integrity": "sha512-KEGjPDVoU5K6swgo9hJVA/qYGlwfbFx+Kg2QB/kd7rzV5N8N5Mg6PlsoCMohVnQmo+pzJap/F610qTodKzecGQ==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "@exodus/schemasafe": "^1.0.0-rc.2", + "should": "^13.2.1", + "yaml": "^1.10.0" + }, + "funding": { + "url": "https://github.com/Mermade/oas-kit?sponsor=1" + } + }, + "node_modules/oas-resolver": { + "version": "2.5.6", + "resolved": "https://registry.npmjs.org/oas-resolver/-/oas-resolver-2.5.6.tgz", + "integrity": "sha512-Yx5PWQNZomfEhPPOphFbZKi9W93CocQj18NlD2Pa4GWZzdZpSJvYwoiuurRI7m3SpcChrnO08hkuQDL3FGsVFQ==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "node-fetch-h2": "^2.3.0", + "oas-kit-common": "^1.0.8", + "reftools": "^1.1.9", + "yaml": "^1.10.0", + "yargs": "^17.0.1" + }, + "bin": { + "resolve": "resolve.js" + }, + "funding": { + "url": "https://github.com/Mermade/oas-kit?sponsor=1" + } + }, + "node_modules/oas-schema-walker": { + "version": "1.1.5", + "resolved": "https://registry.npmjs.org/oas-schema-walker/-/oas-schema-walker-1.1.5.tgz", + "integrity": "sha512-2yucenq1a9YPmeNExoUa9Qwrt9RFkjqaMAA1X+U7sbb0AqBeTIdMHky9SQQ6iN94bO5NW0W4TRYXerG+BdAvAQ==", + "dev": true, + "license": "BSD-3-Clause", + "funding": { + "url": "https://github.com/Mermade/oas-kit?sponsor=1" + } + }, + "node_modules/oas-validator": { + "version": "5.0.8", + "resolved": "https://registry.npmjs.org/oas-validator/-/oas-validator-5.0.8.tgz", + "integrity": "sha512-cu20/HE5N5HKqVygs3dt94eYJfBi0TsZvPVXDhbXQHiEityDN+RROTleefoKRKKJ9dFAF2JBkDHgvWj0sjKGmw==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "call-me-maybe": "^1.0.1", + "oas-kit-common": "^1.0.8", + "oas-linter": "^3.2.2", + "oas-resolver": "^2.5.6", + "oas-schema-walker": "^1.1.5", + "reftools": "^1.1.9", + "should": "^13.2.1", + "yaml": "^1.10.0" + }, + "funding": { + "url": "https://github.com/Mermade/oas-kit?sponsor=1" + } + }, + "node_modules/object-assign": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", + "integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/once": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", + "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==", + "dev": true, + "license": "ISC", + "dependencies": { + "wrappy": "1" + } + }, + "node_modules/open": { + "version": "10.2.0", + "resolved": "https://registry.npmjs.org/open/-/open-10.2.0.tgz", + "integrity": "sha512-YgBpdJHPyQ2UE5x+hlSXcnejzAvD0b22U2OuAP+8OnlJT+PjWPxtgmGqKKc+RgTM63U9gN0YzrYc71R2WT/hTA==", + "dev": true, + "license": "MIT", + "dependencies": { + "default-browser": "^5.2.1", + "define-lazy-prop": "^3.0.0", + "is-inside-container": "^1.0.0", + "wsl-utils": "^0.1.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/openapi-sampler": { + "version": "1.6.1", + "resolved": "https://registry.npmjs.org/openapi-sampler/-/openapi-sampler-1.6.1.tgz", + "integrity": "sha512-s1cIatOqrrhSj2tmJ4abFYZQK6l5v+V4toO5q1Pa0DyN8mtyqy2I+Qrj5W9vOELEtybIMQs/TBZGVO/DtTFK8w==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/json-schema": "^7.0.7", + "fast-xml-parser": "^4.5.0", + "json-pointer": "0.6.2" + } + }, + "node_modules/outdent": { + "version": "0.8.0", + "resolved": "https://registry.npmjs.org/outdent/-/outdent-0.8.0.tgz", + "integrity": "sha512-KiOAIsdpUTcAXuykya5fnVVT+/5uS0Q1mrkRHcF89tpieSmY33O/tmc54CqwA+bfhbtEfZUNLHaPUiB9X3jt1A==", + "dev": true, + "license": "MIT" + }, + "node_modules/path-browserify": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/path-browserify/-/path-browserify-1.0.1.tgz", + "integrity": "sha512-b7uo2UCUOYZcnF/3ID0lulOJi/bafxa1xPe7ZPsammBSpjSWQkjNxlt635YGS2MiR9GjvuXCtz2emr3jbsz98g==", + "dev": true, + "license": "MIT" + }, + "node_modules/path-is-absolute": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", + "integrity": "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/perfect-scrollbar": { + "version": "1.5.6", + "resolved": "https://registry.npmjs.org/perfect-scrollbar/-/perfect-scrollbar-1.5.6.tgz", + "integrity": "sha512-rixgxw3SxyJbCaSpo1n35A/fwI1r2rdwMKOTCg/AcG+xOEyZcE8UHVjpZMFCVImzsFoCZeJTT+M/rdEIQYO2nw==", + "dev": true, + "license": "MIT" + }, + "node_modules/picocolors": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz", + "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==", + "dev": true, + "license": "ISC" + }, + "node_modules/picomatch": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", + "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8.6" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/pluralize": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/pluralize/-/pluralize-8.0.0.tgz", + "integrity": "sha512-Nc3IT5yHzflTfbjgqWcCPpo7DaKy4FnpB0l/zCAW0Tc7jxAiuqSxHasntB3D7887LSrA93kDJ9IXovxJYxyLCA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/polished": { + "version": "4.3.1", + "resolved": "https://registry.npmjs.org/polished/-/polished-4.3.1.tgz", + "integrity": "sha512-OBatVyC/N7SCW/FaDHrSd+vn0o5cS855TOmYi4OkdWUMSJCET/xip//ch8xGUvtr3i44X9LVyWwQlRMTN3pwSA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.17.8" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/postcss": { + "version": "8.4.49", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.49.tgz", + "integrity": "sha512-OCVPnIObs4N29kxTjzLfUryOkvZEq+pf8jTF0lg8E7uETuWHA+v7j3c/xJmiqpX450191LlmZfUKkXxkTry7nA==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/postcss" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "nanoid": "^3.3.7", + "picocolors": "^1.1.1", + "source-map-js": "^1.2.1" + }, + "engines": { + "node": "^10 || ^12 || >=14" + } + }, + "node_modules/postcss-value-parser": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-4.2.0.tgz", + "integrity": "sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/pretty-format": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-29.7.0.tgz", + "integrity": "sha512-Pdlw/oPxN+aXdmM9R00JVC9WVFoCLTKJvDVLgmJ+qAffBMxsV85l/Lu7sNx4zSzPyoL2euImuEwHhOXdEgNFZQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jest/schemas": "^29.6.3", + "ansi-styles": "^5.0.0", + "react-is": "^18.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/pretty-format/node_modules/ansi-styles": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-5.2.0.tgz", + "integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/prismjs": { + "version": "1.30.0", + "resolved": "https://registry.npmjs.org/prismjs/-/prismjs-1.30.0.tgz", + "integrity": "sha512-DEvV2ZF2r2/63V+tK8hQvrR2ZGn10srHbXviTlcv7Kpzw8jWiNTqbVgjO3IY8RxrrOUF8VPMQQFysYYYv0YZxw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/prop-types": { + "version": "15.8.1", + "resolved": "https://registry.npmjs.org/prop-types/-/prop-types-15.8.1.tgz", + "integrity": "sha512-oj87CgZICdulUohogVAR7AjlC0327U4el4L6eAvOqCeudMDVU0NThNaV+b9Df4dXgSP1gXMTnPdhfe/2qDH5cg==", + "dev": true, + "license": "MIT", + "dependencies": { + "loose-envify": "^1.4.0", + "object-assign": "^4.1.1", + "react-is": "^16.13.1" + } + }, + "node_modules/prop-types/node_modules/react-is": { + "version": "16.13.1", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-16.13.1.tgz", + "integrity": "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/protobufjs": { + "version": "7.5.3", + "resolved": "https://registry.npmjs.org/protobufjs/-/protobufjs-7.5.3.tgz", + "integrity": "sha512-sildjKwVqOI2kmFDiXQ6aEB0fjYTafpEvIBs8tOR8qI4spuL9OPROLVu2qZqi/xgCfsHIwVqlaF8JBjWFHnKbw==", + "dev": true, + "hasInstallScript": true, + "license": "BSD-3-Clause", + "dependencies": { + "@protobufjs/aspromise": "^1.1.2", + "@protobufjs/base64": "^1.1.2", + "@protobufjs/codegen": "^2.0.4", + "@protobufjs/eventemitter": "^1.1.0", + "@protobufjs/fetch": "^1.1.0", + "@protobufjs/float": "^1.0.2", + "@protobufjs/inquire": "^1.1.0", + "@protobufjs/path": "^1.1.2", + "@protobufjs/pool": "^1.1.0", + "@protobufjs/utf8": "^1.1.0", + "@types/node": ">=13.7.0", + "long": "^5.0.0" + }, + "engines": { + "node": ">=12.0.0" + } + }, + "node_modules/queue-microtask": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz", + "integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT" + }, + "node_modules/randombytes": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/randombytes/-/randombytes-2.1.0.tgz", + "integrity": "sha512-vYl3iOX+4CKUWuxGi9Ukhie6fsqXqS9FE2Zaic4tNFD2N2QQaXOMFbuKK4QmDHC0JO6B1Zp41J0LpT0oR68amQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "safe-buffer": "^5.1.0" + } + }, + "node_modules/react": { + "version": "19.1.0", + "resolved": "https://registry.npmjs.org/react/-/react-19.1.0.tgz", + "integrity": "sha512-FS+XFBNvn3GTAWq26joslQgWNoFu08F4kl0J4CgdNKADkdSGXQyTCnKteIAJy96Br6YbpEU1LSzV5dYtjMkMDg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/react-dom": { + "version": "19.1.0", + "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-19.1.0.tgz", + "integrity": "sha512-Xs1hdnE+DyKgeHJeJznQmYMIBG3TKIHJJT95Q58nHLSrElKlGQqDTR2HQ9fx5CN/Gk6Vh/kupBTDLU11/nDk/g==", + "dev": true, + "license": "MIT", + "dependencies": { + "scheduler": "^0.26.0" + }, + "peerDependencies": { + "react": "^19.1.0" + } + }, + "node_modules/react-is": { + "version": "18.3.1", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-18.3.1.tgz", + "integrity": "sha512-/LLMVyas0ljjAtoYiPqYiL8VWXzUUdThrmU5+n20DZv+a+ClRoevUzw5JxU+Ieh5/c87ytoTBV9G1FiKfNJdmg==", + "dev": true, + "license": "MIT" + }, + "node_modules/react-tabs": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/react-tabs/-/react-tabs-6.1.0.tgz", + "integrity": "sha512-6QtbTRDKM+jA/MZTTefvigNxo0zz+gnBTVFw2CFVvq+f2BuH0nF0vDLNClL045nuTAdOoK/IL1vTP0ZLX0DAyQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "clsx": "^2.0.0", + "prop-types": "^15.5.0" + }, + "peerDependencies": { + "react": "^18.0.0 || ^19.0.0" + } + }, + "node_modules/readable-stream": { + "version": "3.6.2", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.2.tgz", + "integrity": "sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==", + "dev": true, + "license": "MIT", + "dependencies": { + "inherits": "^2.0.3", + "string_decoder": "^1.1.1", + "util-deprecate": "^1.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/readdirp": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz", + "integrity": "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==", + "dev": true, + "license": "MIT", + "dependencies": { + "picomatch": "^2.2.1" + }, + "engines": { + "node": ">=8.10.0" + } + }, + "node_modules/redoc": { + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/redoc/-/redoc-2.5.0.tgz", + "integrity": "sha512-NpYsOZ1PD9qFdjbLVBZJWptqE+4Y6TkUuvEOqPUmoH7AKOmPcE+hYjotLxQNTqVoWL4z0T2uxILmcc8JGDci+Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "@redocly/openapi-core": "^1.4.0", + "classnames": "^2.3.2", + "decko": "^1.2.0", + "dompurify": "^3.2.4", + "eventemitter3": "^5.0.1", + "json-pointer": "^0.6.2", + "lunr": "^2.3.9", + "mark.js": "^8.11.1", + "marked": "^4.3.0", + "mobx-react": "^9.1.1", + "openapi-sampler": "^1.5.0", + "path-browserify": "^1.0.1", + "perfect-scrollbar": "^1.5.5", + "polished": "^4.2.2", + "prismjs": "^1.29.0", + "prop-types": "^15.8.1", + "react-tabs": "^6.0.2", + "slugify": "~1.4.7", + "stickyfill": "^1.1.1", + "swagger2openapi": "^7.0.8", + "url-template": "^2.0.8" + }, + "engines": { + "node": ">=6.9", + "npm": ">=3.0.0" + }, + "peerDependencies": { + "core-js": "^3.1.4", + "mobx": "^6.0.4", + "react": "^16.8.4 || ^17.0.0 || ^18.0.0 || ^19.0.0", + "react-dom": "^16.8.4 || ^17.0.0 || ^18.0.0 || ^19.0.0", + "styled-components": "^4.1.1 || ^5.1.1 || ^6.0.5" + } + }, + "node_modules/reftools": { + "version": "1.1.9", + "resolved": "https://registry.npmjs.org/reftools/-/reftools-1.1.9.tgz", + "integrity": "sha512-OVede/NQE13xBQ+ob5CKd5KyeJYU2YInb1bmV4nRoOfquZPkAkxuOXicSe1PvqIuZZ4kD13sPKBbR7UFDmli6w==", + "dev": true, + "license": "BSD-3-Clause", + "funding": { + "url": "https://github.com/Mermade/oas-kit?sponsor=1" + } + }, + "node_modules/require-directory": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", + "integrity": "sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/require-from-string": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/require-from-string/-/require-from-string-2.0.2.tgz", + "integrity": "sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/run-applescript": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/run-applescript/-/run-applescript-7.0.0.tgz", + "integrity": "sha512-9by4Ij99JUr/MCFBUkDKLWK3G9HVXmabKz9U5MlIAIuvuzkiOicRYs8XJLxX+xahD+mLiiCYDqF9dKAgtzKP1A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/safe-buffer": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", + "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT" + }, + "node_modules/scheduler": { + "version": "0.26.0", + "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.26.0.tgz", + "integrity": "sha512-NlHwttCI/l5gCPR3D1nNXtWABUmBwvZpEQiD4IXSbIDq8BzLIK/7Ir5gTFSGZDUu37K5cMNp0hFtzO38sC7gWA==", + "dev": true, + "license": "MIT" + }, + "node_modules/semver": { + "version": "7.7.2", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.2.tgz", + "integrity": "sha512-RF0Fw+rO5AMf9MAyaRXI4AV0Ulj5lMHqVxxdSgiVbixSCXoEmmX/jk0CuJw4+3SqroYO9VoUh+HcuJivvtJemA==", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/set-cookie-parser": { + "version": "2.7.1", + "resolved": "https://registry.npmjs.org/set-cookie-parser/-/set-cookie-parser-2.7.1.tgz", + "integrity": "sha512-IOc8uWeOZgnb3ptbCURJWNjWUPcO3ZnTTdzsurqERrP6nPyv+paC55vJM0LpOlT2ne+Ix+9+CRG1MNLlyZ4GjQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/shallowequal": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/shallowequal/-/shallowequal-1.1.0.tgz", + "integrity": "sha512-y0m1JoUZSlPAjXVtPPW70aZWfIL/dSP7AFkRnniLCrK/8MDKog3TySTBmckD+RObVxH0v4Tox67+F14PdED2oQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/should": { + "version": "13.2.3", + "resolved": "https://registry.npmjs.org/should/-/should-13.2.3.tgz", + "integrity": "sha512-ggLesLtu2xp+ZxI+ysJTmNjh2U0TsC+rQ/pfED9bUZZ4DKefP27D+7YJVVTvKsmjLpIi9jAa7itwDGkDDmt1GQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "should-equal": "^2.0.0", + "should-format": "^3.0.3", + "should-type": "^1.4.0", + "should-type-adaptors": "^1.0.1", + "should-util": "^1.0.0" + } + }, + "node_modules/should-equal": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/should-equal/-/should-equal-2.0.0.tgz", + "integrity": "sha512-ZP36TMrK9euEuWQYBig9W55WPC7uo37qzAEmbjHz4gfyuXrEUgF8cUvQVO+w+d3OMfPvSRQJ22lSm8MQJ43LTA==", + "dev": true, + "license": "MIT", + "dependencies": { + "should-type": "^1.4.0" + } + }, + "node_modules/should-format": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/should-format/-/should-format-3.0.3.tgz", + "integrity": "sha512-hZ58adtulAk0gKtua7QxevgUaXTTXxIi8t41L3zo9AHvjXO1/7sdLECuHeIN2SRtYXpNkmhoUP2pdeWgricQ+Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "should-type": "^1.3.0", + "should-type-adaptors": "^1.0.1" + } + }, + "node_modules/should-type": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/should-type/-/should-type-1.4.0.tgz", + "integrity": "sha512-MdAsTu3n25yDbIe1NeN69G4n6mUnJGtSJHygX3+oN0ZbO3DTiATnf7XnYJdGT42JCXurTb1JI0qOBR65shvhPQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/should-type-adaptors": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/should-type-adaptors/-/should-type-adaptors-1.1.0.tgz", + "integrity": "sha512-JA4hdoLnN+kebEp2Vs8eBe9g7uy0zbRo+RMcU0EsNy+R+k049Ki+N5tT5Jagst2g7EAja+euFuoXFCa8vIklfA==", + "dev": true, + "license": "MIT", + "dependencies": { + "should-type": "^1.3.0", + "should-util": "^1.0.0" + } + }, + "node_modules/should-util": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/should-util/-/should-util-1.0.1.tgz", + "integrity": "sha512-oXF8tfxx5cDk8r2kYqlkUJzZpDBqVY/II2WhvU0n9Y3XYvAYRmeaf1PvvIvTgPnv4KJ+ES5M0PyDq5Jp+Ygy2g==", + "dev": true, + "license": "MIT" + }, + "node_modules/simple-websocket": { + "version": "9.1.0", + "resolved": "https://registry.npmjs.org/simple-websocket/-/simple-websocket-9.1.0.tgz", + "integrity": "sha512-8MJPnjRN6A8UCp1I+H/dSFyjwJhp6wta4hsVRhjf8w9qBHRzxYt14RaOcjvQnhD1N4yKOddEjflwMnQM4VtXjQ==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT", + "dependencies": { + "debug": "^4.3.1", + "queue-microtask": "^1.2.2", + "randombytes": "^2.1.0", + "readable-stream": "^3.6.0", + "ws": "^7.4.2" + } + }, + "node_modules/slugify": { + "version": "1.4.7", + "resolved": "https://registry.npmjs.org/slugify/-/slugify-1.4.7.tgz", + "integrity": "sha512-tf+h5W1IrjNm/9rKKj0JU2MDMruiopx0jjVA5zCdBtcGjfp0+c5rHw/zADLC3IeKlGHtVbHtpfzvYA0OYT+HKg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8.0.0" + } + }, + "node_modules/source-map": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", + "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", + "dev": true, + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/source-map-js": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.1.tgz", + "integrity": "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==", + "dev": true, + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/stickyfill": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/stickyfill/-/stickyfill-1.1.1.tgz", + "integrity": "sha512-GCp7vHAfpao+Qh/3Flh9DXEJ/qSi0KJwJw6zYlZOtRYXWUIpMM6mC2rIep/dK8RQqwW0KxGJIllmjPIBOGN8AA==", + "dev": true + }, + "node_modules/string_decoder": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz", + "integrity": "sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==", + "dev": true, + "license": "MIT", + "dependencies": { + "safe-buffer": "~5.2.0" + } + }, + "node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dev": true, + "license": "MIT", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strnum": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/strnum/-/strnum-1.1.2.tgz", + "integrity": "sha512-vrN+B7DBIoTTZjnPNewwhx6cBA/H+IS7rfW68n7XxC1y7uoiGQBxaKzqucGUgavX15dJgiGztLJ8vxuEzwqBdA==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/NaturalIntelligence" + } + ], + "license": "MIT" + }, + "node_modules/styled-components": { + "version": "6.1.19", + "resolved": "https://registry.npmjs.org/styled-components/-/styled-components-6.1.19.tgz", + "integrity": "sha512-1v/e3Dl1BknC37cXMhwGomhO8AkYmN41CqyX9xhUDxry1ns3BFQy2lLDRQXJRdVVWB9OHemv/53xaStimvWyuA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@emotion/is-prop-valid": "1.2.2", + "@emotion/unitless": "0.8.1", + "@types/stylis": "4.2.5", + "css-to-react-native": "3.2.0", + "csstype": "3.1.3", + "postcss": "8.4.49", + "shallowequal": "1.1.0", + "stylis": "4.3.2", + "tslib": "2.6.2" + }, + "engines": { + "node": ">= 16" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/styled-components" + }, + "peerDependencies": { + "react": ">= 16.8.0", + "react-dom": ">= 16.8.0" + } + }, + "node_modules/stylis": { + "version": "4.3.2", + "resolved": "https://registry.npmjs.org/stylis/-/stylis-4.3.2.tgz", + "integrity": "sha512-bhtUjWd/z6ltJiQwg0dUfxEJ+W+jdqQd8TbWLWyeIJHlnsqmGLRFFd8e5mA0AZi/zx90smXRlN66YMTcaSFifg==", + "dev": true, + "license": "MIT" + }, + "node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "license": "MIT", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/swagger2openapi": { + "version": "7.0.8", + "resolved": "https://registry.npmjs.org/swagger2openapi/-/swagger2openapi-7.0.8.tgz", + "integrity": "sha512-upi/0ZGkYgEcLeGieoz8gT74oWHA0E7JivX7aN9mAf+Tc7BQoRBvnIGHoPDw+f9TXTW4s6kGYCZJtauP6OYp7g==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "call-me-maybe": "^1.0.1", + "node-fetch": "^2.6.1", + "node-fetch-h2": "^2.3.0", + "node-readfiles": "^0.2.0", + "oas-kit-common": "^1.0.8", + "oas-resolver": "^2.5.6", + "oas-schema-walker": "^1.1.5", + "oas-validator": "^5.0.8", + "reftools": "^1.1.9", + "yaml": "^1.10.0", + "yargs": "^17.0.1" + }, + "bin": { + "boast": "boast.js", + "oas-validate": "oas-validate.js", + "swagger2openapi": "swagger2openapi.js" + }, + "funding": { + "url": "https://github.com/Mermade/oas-kit?sponsor=1" + } + }, + "node_modules/to-regex-range": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", + "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-number": "^7.0.0" + }, + "engines": { + "node": ">=8.0" + } + }, + "node_modules/tr46": { + "version": "0.0.3", + "resolved": "https://registry.npmjs.org/tr46/-/tr46-0.0.3.tgz", + "integrity": "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==", + "dev": true, + "license": "MIT" + }, + "node_modules/tslib": { + "version": "2.6.2", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.2.tgz", + "integrity": "sha512-AEYxH93jGFPn/a2iVAwW87VuUIkR1FVUKB77NwMF7nBTDkDrrT/Hpt/IrCJ0QXhW27jTBDcf5ZY7w6RiqTMw2Q==", + "dev": true, + "license": "0BSD" + }, + "node_modules/typedarray": { + "version": "0.0.6", + "resolved": "https://registry.npmjs.org/typedarray/-/typedarray-0.0.6.tgz", + "integrity": "sha512-/aCDEGatGvZ2BIk+HmLf4ifCJFwvKFNb9/JeZPMulfgFracn9QFcAf5GO8B/mweUjSoblS5In0cWhqpfs/5PQA==", + "dev": true, + "license": "MIT" + }, + "node_modules/uglify-js": { + "version": "3.19.3", + "resolved": "https://registry.npmjs.org/uglify-js/-/uglify-js-3.19.3.tgz", + "integrity": "sha512-v3Xu+yuwBXisp6QYTcH4UbH+xYJXqnq2m/LtQVWKWzYc1iehYnLixoQDN9FH6/j9/oybfd6W9Ghwkl8+UMKTKQ==", + "dev": true, + "license": "BSD-2-Clause", + "optional": true, + "bin": { + "uglifyjs": "bin/uglifyjs" + }, + "engines": { + "node": ">=0.8.0" + } + }, + "node_modules/undici": { + "version": "6.21.3", + "resolved": "https://registry.npmjs.org/undici/-/undici-6.21.3.tgz", + "integrity": "sha512-gBLkYIlEnSp8pFbT64yFgGE6UIB9tAkhukC23PmMDCe5Nd+cRqKxSjw5y54MK2AZMgZfJWMaNE4nYUHgi1XEOw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18.17" + } + }, + "node_modules/undici-types": { + "version": "7.8.0", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-7.8.0.tgz", + "integrity": "sha512-9UJ2xGDvQ43tYyVMpuHlsgApydB8ZKfVYTsLDhXkFL/6gfkp+U8xTGdh8pMJv1SpZna0zxG1DwsKZsreLbXBxw==", + "dev": true, + "license": "MIT" + }, + "node_modules/uri-js-replace": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/uri-js-replace/-/uri-js-replace-1.0.1.tgz", + "integrity": "sha512-W+C9NWNLFOoBI2QWDp4UT9pv65r2w5Cx+3sTYFvtMdDBxkKt1syCqsUdSFAChbEe1uK5TfS04wt/nGwmaeIQ0g==", + "dev": true, + "license": "MIT" + }, + "node_modules/url-template": { + "version": "2.0.8", + "resolved": "https://registry.npmjs.org/url-template/-/url-template-2.0.8.tgz", + "integrity": "sha512-XdVKMF4SJ0nP/O7XIPB0JwAEuT9lDIYnNsK8yGVe43y0AWoKeJNdv3ZNWh7ksJ6KqQFjOO6ox/VEitLnaVNufw==", + "dev": true, + "license": "BSD" + }, + "node_modules/use-sync-external-store": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/use-sync-external-store/-/use-sync-external-store-1.5.0.tgz", + "integrity": "sha512-Rb46I4cGGVBmjamjphe8L/UnvJD+uPPtTkNvX5mZgqdbavhI4EbgIWJiIHXJ8bc/i9EQGPRh4DwEURJ552Do0A==", + "dev": true, + "license": "MIT", + "peerDependencies": { + "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0" + } + }, + "node_modules/util-deprecate": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", + "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==", + "dev": true, + "license": "MIT" + }, + "node_modules/webidl-conversions": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-3.0.1.tgz", + "integrity": "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==", + "dev": true, + "license": "BSD-2-Clause" + }, + "node_modules/whatwg-url": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-5.0.0.tgz", + "integrity": "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==", + "dev": true, + "license": "MIT", + "dependencies": { + "tr46": "~0.0.3", + "webidl-conversions": "^3.0.0" + } + }, + "node_modules/wordwrap": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/wordwrap/-/wordwrap-1.0.0.tgz", + "integrity": "sha512-gvVzJFlPycKc5dZN4yPkP8w7Dc37BtP1yczEneOb4uq34pXZcvrtRTmWV8W+Ume+XCxKgbjM+nevkyFPMybd4Q==", + "dev": true, + "license": "MIT" + }, + "node_modules/wrap-ansi": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", + "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/wrappy": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", + "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==", + "dev": true, + "license": "ISC" + }, + "node_modules/ws": { + "version": "7.5.10", + "resolved": "https://registry.npmjs.org/ws/-/ws-7.5.10.tgz", + "integrity": "sha512-+dbF1tHwZpXcbOJdVOkzLDxZP1ailvSxM6ZweXTegylPny803bFhA+vqBYw4s31NSAk4S2Qz+AKXK9a4wkdjcQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8.3.0" + }, + "peerDependencies": { + "bufferutil": "^4.0.1", + "utf-8-validate": "^5.0.2" + }, + "peerDependenciesMeta": { + "bufferutil": { + "optional": true + }, + "utf-8-validate": { + "optional": true + } + } + }, + "node_modules/wsl-utils": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/wsl-utils/-/wsl-utils-0.1.0.tgz", + "integrity": "sha512-h3Fbisa2nKGPxCpm89Hk33lBLsnaGBvctQopaBSOW/uIs6FTe1ATyAnKFJrzVs9vpGdsTe73WF3V4lIsk4Gacw==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-wsl": "^3.1.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/y18n": { + "version": "5.0.8", + "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz", + "integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=10" + } + }, + "node_modules/yaml": { + "version": "1.10.2", + "resolved": "https://registry.npmjs.org/yaml/-/yaml-1.10.2.tgz", + "integrity": "sha512-r3vXyErRCYJ7wg28yvBY5VSoAF8ZvlcW9/BwUzEtUsjvX/DKs24dIkuwjtuprwJJHsbyUbLApepYTR1BN4uHrg==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">= 6" + } + }, + "node_modules/yaml-ast-parser": { + "version": "0.0.43", + "resolved": "https://registry.npmjs.org/yaml-ast-parser/-/yaml-ast-parser-0.0.43.tgz", + "integrity": "sha512-2PTINUwsRqSd+s8XxKaJWQlUuEMHJQyEuh2edBbW8KNJz0SJPwUSD2zRWqezFEdN7IzAgeuYHFUCF7o8zRdZ0A==", + "dev": true, + "license": "Apache-2.0" + }, + "node_modules/yargs": { + "version": "17.0.1", + "resolved": "https://registry.npmjs.org/yargs/-/yargs-17.0.1.tgz", + "integrity": "sha512-xBBulfCc8Y6gLFcrPvtqKz9hz8SO0l1Ni8GgDekvBX2ro0HRQImDGnikfc33cgzcYUSncapnNcZDjVFIH3f6KQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "cliui": "^7.0.2", + "escalade": "^3.1.1", + "get-caller-file": "^2.0.5", + "require-directory": "^2.1.1", + "string-width": "^4.2.0", + "y18n": "^5.0.5", + "yargs-parser": "^20.2.2" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/yargs-parser": { + "version": "20.2.9", + "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-20.2.9.tgz", + "integrity": "sha512-y11nGElTIV+CT3Zv9t7VKl+Q3hTQoT9a1Qzezhhl6Rp21gJ/IVTW7Z3y9EWXhuUBC2Shnf+DX0antecpAwSP8w==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=10" + } + } + } +} diff --git a/build-tools/package.json b/build-tools/package.json new file mode 100644 index 0000000000..000969c672 --- /dev/null +++ b/build-tools/package.json @@ -0,0 +1,8 @@ +{ + "name": "build-tools", + "private": true, + "devDependencies": { + "@redocly/cli": "1.34.4", + "@sourcemeta/jsonschema": "10.0.0" + } +} diff --git a/build_tools/patches/pgcopydbv017.patch b/build-tools/patches/pgcopydbv017.patch similarity index 100% rename from build_tools/patches/pgcopydbv017.patch rename to build-tools/patches/pgcopydbv017.patch diff --git a/clippy.toml b/clippy.toml index 408232488c..c03059053a 100644 --- a/clippy.toml +++ b/clippy.toml @@ -1,9 +1,12 @@ disallowed-methods = [ "tokio::task::block_in_place", + # Allow this for now, to deny it later once we stop using Handle::block_on completely # "tokio::runtime::Handle::block_on", - # use tokio_epoll_uring_ext instead - "tokio_epoll_uring::thread_local_system", + + # tokio-epoll-uring: + # - allow-invalid because the method doesn't exist on macOS + { path = "tokio_epoll_uring::thread_local_system", replacement = "tokio_epoll_uring_ext module inside pageserver crate", allow-invalid = true } ] disallowed-macros = [ diff --git a/compute/Makefile b/compute/Makefile index ef2e55f7b1..25bbb30d3a 100644 --- a/compute/Makefile +++ b/compute/Makefile @@ -50,9 +50,9 @@ jsonnetfmt-format: jsonnetfmt --in-place $(jsonnet_files) .PHONY: manifest-schema-validation -manifest-schema-validation: node_modules - node_modules/.bin/jsonschema validate -d https://json-schema.org/draft/2020-12/schema manifest.schema.json manifest.yaml +manifest-schema-validation: ../build-tools/node_modules + npx --prefix=../build-tools/ jsonschema validate -d https://json-schema.org/draft/2020-12/schema manifest.schema.json manifest.yaml -node_modules: package.json - npm install - touch node_modules +../build-tools/node_modules: ../build-tools/package.json + cd ../build-tools && $(if $(CI),npm ci,npm install) + touch ../build-tools/node_modules diff --git a/compute/compute-node.Dockerfile b/compute/compute-node.Dockerfile index 0dd32011fb..a658738d76 100644 --- a/compute/compute-node.Dockerfile +++ b/compute/compute-node.Dockerfile @@ -9,7 +9,7 @@ # # build-tools: This contains Rust compiler toolchain and other tools needed at compile # time. This is also used for the storage builds. This image is defined in -# build-tools.Dockerfile. +# build-tools/Dockerfile. # # build-deps: Contains C compiler, other build tools, and compile-time dependencies # needed to compile PostgreSQL and most extensions. (Some extensions need @@ -115,7 +115,7 @@ ARG EXTENSIONS=all FROM $BASE_IMAGE_SHA AS build-deps ARG DEBIAN_VERSION -# Keep in sync with build-tools.Dockerfile +# Keep in sync with build-tools/Dockerfile ENV PROTOC_VERSION=25.1 # Use strict mode for bash to catch errors early @@ -170,7 +170,29 @@ RUN case $DEBIAN_VERSION in \ FROM build-deps AS pg-build ARG PG_VERSION COPY vendor/postgres-${PG_VERSION:?} postgres +COPY compute/patches/postgres_fdw.patch . +COPY compute/patches/pg_stat_statements_pg14-16.patch . +COPY compute/patches/pg_stat_statements_pg17.patch . RUN cd postgres && \ + # Apply patches to some contrib extensions + # For example, we need to grant EXECUTE on pg_stat_statements_reset() to {privileged_role_name}. + # In vanilla Postgres this function is limited to Postgres role superuser. + # In Neon we have {privileged_role_name} role that is not a superuser but replaces superuser in some cases. + # We could add the additional grant statements to the Postgres repository but it would be hard to maintain, + # whenever we need to pick up a new Postgres version and we want to limit the changes in our Postgres fork, + # so we do it here. + case "${PG_VERSION}" in \ + "v14" | "v15" | "v16") \ + patch -p1 < /pg_stat_statements_pg14-16.patch; \ + ;; \ + "v17") \ + patch -p1 < /pg_stat_statements_pg17.patch; \ + ;; \ + *) \ + # To do not forget to migrate patches to the next major version + echo "No contrib patches for this PostgreSQL version" && exit 1;; \ + esac && \ + patch -p1 < /postgres_fdw.patch && \ export CONFIGURE_CMD="./configure CFLAGS='-O2 -g3 -fsigned-char' --enable-debug --with-openssl --with-uuid=ossp \ --with-icu --with-libxml --with-libxslt --with-lz4" && \ if [ "${PG_VERSION:?}" != "v14" ]; then \ @@ -184,8 +206,6 @@ RUN cd postgres && \ echo 'trusted = true' >> /usr/local/pgsql/share/extension/autoinc.control && \ echo 'trusted = true' >> /usr/local/pgsql/share/extension/dblink.control && \ echo 'trusted = true' >> /usr/local/pgsql/share/extension/postgres_fdw.control && \ - file=/usr/local/pgsql/share/extension/postgres_fdw--1.0.sql && [ -e $file ] && \ - echo 'GRANT USAGE ON FOREIGN DATA WRAPPER postgres_fdw TO neon_superuser;' >> $file && \ echo 'trusted = true' >> /usr/local/pgsql/share/extension/bloom.control && \ echo 'trusted = true' >> /usr/local/pgsql/share/extension/earthdistance.control && \ echo 'trusted = true' >> /usr/local/pgsql/share/extension/insert_username.control && \ @@ -195,34 +215,7 @@ RUN cd postgres && \ echo 'trusted = true' >> /usr/local/pgsql/share/extension/pgrowlocks.control && \ echo 'trusted = true' >> /usr/local/pgsql/share/extension/pgstattuple.control && \ echo 'trusted = true' >> /usr/local/pgsql/share/extension/refint.control && \ - echo 'trusted = true' >> /usr/local/pgsql/share/extension/xml2.control && \ - # We need to grant EXECUTE on pg_stat_statements_reset() to neon_superuser. - # In vanilla postgres this function is limited to Postgres role superuser. - # In neon we have neon_superuser role that is not a superuser but replaces superuser in some cases. - # We could add the additional grant statements to the postgres repository but it would be hard to maintain, - # whenever we need to pick up a new postgres version and we want to limit the changes in our postgres fork, - # so we do it here. - for file in /usr/local/pgsql/share/extension/pg_stat_statements--*.sql; do \ - filename=$(basename "$file"); \ - # Note that there are no downgrade scripts for pg_stat_statements, so we \ - # don't have to modify any downgrade paths or (much) older versions: we only \ - # have to make sure every creation of the pg_stat_statements_reset function \ - # also adds execute permissions to the neon_superuser. - case $filename in \ - pg_stat_statements--1.4.sql) \ - # pg_stat_statements_reset is first created with 1.4 - echo 'GRANT EXECUTE ON FUNCTION pg_stat_statements_reset() TO neon_superuser;' >> $file; \ - ;; \ - pg_stat_statements--1.6--1.7.sql) \ - # Then with the 1.6-1.7 migration it is re-created with a new signature, thus add the permissions back - echo 'GRANT EXECUTE ON FUNCTION pg_stat_statements_reset(Oid, Oid, bigint) TO neon_superuser;' >> $file; \ - ;; \ - pg_stat_statements--1.10--1.11.sql) \ - # Then with the 1.10-1.11 migration it is re-created with a new signature again, thus add the permissions back - echo 'GRANT EXECUTE ON FUNCTION pg_stat_statements_reset(Oid, Oid, bigint, boolean) TO neon_superuser;' >> $file; \ - ;; \ - esac; \ - done; + echo 'trusted = true' >> /usr/local/pgsql/share/extension/xml2.control # Set PATH for all the subsequent build steps ENV PATH="/usr/local/pgsql/bin:$PATH" @@ -1524,7 +1517,7 @@ WORKDIR /ext-src COPY compute/patches/pg_duckdb_v031.patch . COPY compute/patches/duckdb_v120.patch . # pg_duckdb build requires source dir to be a git repo to get submodules -# allow neon_superuser to execute some functions that in pg_duckdb are available to superuser only: +# allow {privileged_role_name} to execute some functions that in pg_duckdb are available to superuser only: # - extension management function duckdb.install_extension() # - access to duckdb.extensions table and its sequence RUN git clone --depth 1 --branch v0.3.1 https://github.com/duckdb/pg_duckdb.git pg_duckdb-src && \ @@ -1790,7 +1783,7 @@ RUN set -e \ ######################################################################################### FROM build-deps AS exporters ARG TARGETARCH -# Keep sql_exporter version same as in build-tools.Dockerfile and +# Keep sql_exporter version same as in build-tools/Dockerfile and # test_runner/regress/test_compute_metrics.py # See comment on the top of the file regading `echo`, `-e` and `\n` RUN if [ "$TARGETARCH" = "amd64" ]; then\ @@ -1915,10 +1908,10 @@ RUN cd /ext-src/pg_repack-src && patch -p1 /etc/ld.so.conf.d/00-neon.conf && /sbin/ldconfig -RUN apt-get update && apt-get install -y libtap-parser-sourcehandler-pgtap-perl jq \ +RUN apt-get update && apt-get install -y libtap-parser-sourcehandler-pgtap-perl jq parallel \ && apt clean && rm -rf /ext-src/*.tar.gz /ext-src/*.patch /var/lib/apt/lists/* ENV PATH=/usr/local/pgsql/bin:$PATH -ENV PGHOST=compute +ENV PGHOST=compute1 ENV PGPORT=55433 ENV PGUSER=cloud_admin ENV PGDATABASE=postgres diff --git a/compute/package.json b/compute/package.json deleted file mode 100644 index 581384dc13..0000000000 --- a/compute/package.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "name": "neon-compute", - "private": true, - "dependencies": { - "@sourcemeta/jsonschema": "9.3.4" - } -} \ No newline at end of file diff --git a/compute/patches/anon_v2.patch b/compute/patches/anon_v2.patch index 4faf927e39..ba9d7a8fe6 100644 --- a/compute/patches/anon_v2.patch +++ b/compute/patches/anon_v2.patch @@ -1,22 +1,26 @@ diff --git a/sql/anon.sql b/sql/anon.sql -index 0cdc769..b450327 100644 +index 0cdc769..5eab1d6 100644 --- a/sql/anon.sql +++ b/sql/anon.sql -@@ -1141,3 +1141,15 @@ $$ +@@ -1141,3 +1141,19 @@ $$ -- TODO : https://en.wikipedia.org/wiki/L-diversity -- TODO : https://en.wikipedia.org/wiki/T-closeness + +-- NEON Patches + -+GRANT ALL ON SCHEMA anon to neon_superuser; -+GRANT ALL ON ALL TABLES IN SCHEMA anon TO neon_superuser; -+ +DO $$ ++DECLARE ++ privileged_role_name text; +BEGIN -+ IF current_setting('server_version_num')::int >= 150000 THEN -+ GRANT SET ON PARAMETER anon.transparent_dynamic_masking TO neon_superuser; -+ END IF; ++ privileged_role_name := current_setting('neon.privileged_role_name'); ++ ++ EXECUTE format('GRANT ALL ON SCHEMA anon to %I', privileged_role_name); ++ EXECUTE format('GRANT ALL ON ALL TABLES IN SCHEMA anon TO %I', privileged_role_name); ++ ++ IF current_setting('server_version_num')::int >= 150000 THEN ++ EXECUTE format('GRANT SET ON PARAMETER anon.transparent_dynamic_masking TO %I', privileged_role_name); ++ END IF; +END $$; diff --git a/sql/init.sql b/sql/init.sql index 7da6553..9b6164b 100644 diff --git a/compute/patches/pg_duckdb_v031.patch b/compute/patches/pg_duckdb_v031.patch index edc7fbf69d..f7aa374116 100644 --- a/compute/patches/pg_duckdb_v031.patch +++ b/compute/patches/pg_duckdb_v031.patch @@ -21,13 +21,21 @@ index 3235cc8..6b892bc 100644 include Makefile.global diff --git a/sql/pg_duckdb--0.2.0--0.3.0.sql b/sql/pg_duckdb--0.2.0--0.3.0.sql -index d777d76..af60106 100644 +index d777d76..3b54396 100644 --- a/sql/pg_duckdb--0.2.0--0.3.0.sql +++ b/sql/pg_duckdb--0.2.0--0.3.0.sql -@@ -1056,3 +1056,6 @@ GRANT ALL ON FUNCTION duckdb.cache(TEXT, TEXT) TO PUBLIC; +@@ -1056,3 +1056,14 @@ GRANT ALL ON FUNCTION duckdb.cache(TEXT, TEXT) TO PUBLIC; GRANT ALL ON FUNCTION duckdb.cache_info() TO PUBLIC; GRANT ALL ON FUNCTION duckdb.cache_delete(TEXT) TO PUBLIC; GRANT ALL ON PROCEDURE duckdb.recycle_ddb() TO PUBLIC; -+GRANT ALL ON FUNCTION duckdb.install_extension(TEXT) TO neon_superuser; -+GRANT ALL ON TABLE duckdb.extensions TO neon_superuser; -+GRANT ALL ON SEQUENCE duckdb.extensions_table_seq TO neon_superuser; ++ ++DO $$ ++DECLARE ++ privileged_role_name text; ++BEGIN ++ privileged_role_name := current_setting('neon.privileged_role_name'); ++ ++ EXECUTE format('GRANT ALL ON FUNCTION duckdb.install_extension(TEXT) TO %I', privileged_role_name); ++ EXECUTE format('GRANT ALL ON TABLE duckdb.extensions TO %I', privileged_role_name); ++ EXECUTE format('GRANT ALL ON SEQUENCE duckdb.extensions_table_seq TO %I', privileged_role_name); ++END $$; diff --git a/compute/patches/pg_stat_statements_pg14-16.patch b/compute/patches/pg_stat_statements_pg14-16.patch new file mode 100644 index 0000000000..368c6791c7 --- /dev/null +++ b/compute/patches/pg_stat_statements_pg14-16.patch @@ -0,0 +1,34 @@ +diff --git a/contrib/pg_stat_statements/pg_stat_statements--1.4.sql b/contrib/pg_stat_statements/pg_stat_statements--1.4.sql +index 58cdf600fce..8be57a996f6 100644 +--- a/contrib/pg_stat_statements/pg_stat_statements--1.4.sql ++++ b/contrib/pg_stat_statements/pg_stat_statements--1.4.sql +@@ -46,3 +46,12 @@ GRANT SELECT ON pg_stat_statements TO PUBLIC; + + -- Don't want this to be available to non-superusers. + REVOKE ALL ON FUNCTION pg_stat_statements_reset() FROM PUBLIC; ++ ++DO $$ ++DECLARE ++ privileged_role_name text; ++BEGIN ++ privileged_role_name := current_setting('neon.privileged_role_name'); ++ ++ EXECUTE format('GRANT EXECUTE ON FUNCTION pg_stat_statements_reset() TO %I', privileged_role_name); ++END $$; +diff --git a/contrib/pg_stat_statements/pg_stat_statements--1.6--1.7.sql b/contrib/pg_stat_statements/pg_stat_statements--1.6--1.7.sql +index 6fc3fed4c93..256345a8f79 100644 +--- a/contrib/pg_stat_statements/pg_stat_statements--1.6--1.7.sql ++++ b/contrib/pg_stat_statements/pg_stat_statements--1.6--1.7.sql +@@ -20,3 +20,12 @@ LANGUAGE C STRICT PARALLEL SAFE; + + -- Don't want this to be available to non-superusers. + REVOKE ALL ON FUNCTION pg_stat_statements_reset(Oid, Oid, bigint) FROM PUBLIC; ++ ++DO $$ ++DECLARE ++ privileged_role_name text; ++BEGIN ++ privileged_role_name := current_setting('neon.privileged_role_name'); ++ ++ EXECUTE format('GRANT EXECUTE ON FUNCTION pg_stat_statements_reset(Oid, Oid, bigint) TO %I', privileged_role_name); ++END $$; diff --git a/compute/patches/pg_stat_statements_pg17.patch b/compute/patches/pg_stat_statements_pg17.patch new file mode 100644 index 0000000000..ff63b3255c --- /dev/null +++ b/compute/patches/pg_stat_statements_pg17.patch @@ -0,0 +1,52 @@ +diff --git a/contrib/pg_stat_statements/pg_stat_statements--1.10--1.11.sql b/contrib/pg_stat_statements/pg_stat_statements--1.10--1.11.sql +index 0bb2c397711..32764db1d8b 100644 +--- a/contrib/pg_stat_statements/pg_stat_statements--1.10--1.11.sql ++++ b/contrib/pg_stat_statements/pg_stat_statements--1.10--1.11.sql +@@ -80,3 +80,12 @@ LANGUAGE C STRICT PARALLEL SAFE; + + -- Don't want this to be available to non-superusers. + REVOKE ALL ON FUNCTION pg_stat_statements_reset(Oid, Oid, bigint, boolean) FROM PUBLIC; ++ ++DO $$ ++DECLARE ++ privileged_role_name text; ++BEGIN ++ privileged_role_name := current_setting('neon.privileged_role_name'); ++ ++ EXECUTE format('GRANT EXECUTE ON FUNCTION pg_stat_statements_reset(Oid, Oid, bigint, boolean) TO %I', privileged_role_name); ++END $$; +\ No newline at end of file +diff --git a/contrib/pg_stat_statements/pg_stat_statements--1.4.sql b/contrib/pg_stat_statements/pg_stat_statements--1.4.sql +index 58cdf600fce..8be57a996f6 100644 +--- a/contrib/pg_stat_statements/pg_stat_statements--1.4.sql ++++ b/contrib/pg_stat_statements/pg_stat_statements--1.4.sql +@@ -46,3 +46,12 @@ GRANT SELECT ON pg_stat_statements TO PUBLIC; + + -- Don't want this to be available to non-superusers. + REVOKE ALL ON FUNCTION pg_stat_statements_reset() FROM PUBLIC; ++ ++DO $$ ++DECLARE ++ privileged_role_name text; ++BEGIN ++ privileged_role_name := current_setting('neon.privileged_role_name'); ++ ++ EXECUTE format('GRANT EXECUTE ON FUNCTION pg_stat_statements_reset() TO %I', privileged_role_name); ++END $$; +diff --git a/contrib/pg_stat_statements/pg_stat_statements--1.6--1.7.sql b/contrib/pg_stat_statements/pg_stat_statements--1.6--1.7.sql +index 6fc3fed4c93..256345a8f79 100644 +--- a/contrib/pg_stat_statements/pg_stat_statements--1.6--1.7.sql ++++ b/contrib/pg_stat_statements/pg_stat_statements--1.6--1.7.sql +@@ -20,3 +20,12 @@ LANGUAGE C STRICT PARALLEL SAFE; + + -- Don't want this to be available to non-superusers. + REVOKE ALL ON FUNCTION pg_stat_statements_reset(Oid, Oid, bigint) FROM PUBLIC; ++ ++DO $$ ++DECLARE ++ privileged_role_name text; ++BEGIN ++ privileged_role_name := current_setting('neon.privileged_role_name'); ++ ++ EXECUTE format('GRANT EXECUTE ON FUNCTION pg_stat_statements_reset(Oid, Oid, bigint) TO %I', privileged_role_name); ++END $$; diff --git a/compute/patches/postgres_fdw.patch b/compute/patches/postgres_fdw.patch new file mode 100644 index 0000000000..d0007ffea5 --- /dev/null +++ b/compute/patches/postgres_fdw.patch @@ -0,0 +1,17 @@ +diff --git a/contrib/postgres_fdw/postgres_fdw--1.0.sql b/contrib/postgres_fdw/postgres_fdw--1.0.sql +index a0f0fc1bf45..ee077f2eea6 100644 +--- a/contrib/postgres_fdw/postgres_fdw--1.0.sql ++++ b/contrib/postgres_fdw/postgres_fdw--1.0.sql +@@ -16,3 +16,12 @@ LANGUAGE C STRICT; + CREATE FOREIGN DATA WRAPPER postgres_fdw + HANDLER postgres_fdw_handler + VALIDATOR postgres_fdw_validator; ++ ++DO $$ ++DECLARE ++ privileged_role_name text; ++BEGIN ++ privileged_role_name := current_setting('neon.privileged_role_name'); ++ ++ EXECUTE format('GRANT USAGE ON FOREIGN DATA WRAPPER postgres_fdw TO %I', privileged_role_name); ++END $$; diff --git a/compute_tools/Cargo.toml b/compute_tools/Cargo.toml index 1a03022d89..910bae3bda 100644 --- a/compute_tools/Cargo.toml +++ b/compute_tools/Cargo.toml @@ -66,7 +66,7 @@ url.workspace = true uuid.workspace = true walkdir.workspace = true x509-cert.workspace = true - +postgres-types.workspace = true postgres_versioninfo.workspace = true postgres_initdb.workspace = true compute_api.workspace = true diff --git a/compute_tools/README.md b/compute_tools/README.md index 8d84031efc..49f1368f0e 100644 --- a/compute_tools/README.md +++ b/compute_tools/README.md @@ -46,11 +46,14 @@ stateDiagram-v2 Configuration --> Failed : Failed to configure the compute Configuration --> Running : Compute has been configured Empty --> Init : Compute spec is immediately available - Empty --> TerminationPending : Requested termination + Empty --> TerminationPendingFast : Requested termination + Empty --> TerminationPendingImmediate : Requested termination Init --> Failed : Failed to start Postgres Init --> Running : Started Postgres - Running --> TerminationPending : Requested termination - TerminationPending --> Terminated : Terminated compute + Running --> TerminationPendingFast : Requested termination + Running --> TerminationPendingImmediate : Requested termination + TerminationPendingFast --> Terminated compute with 30s delay for cplane to inspect status + TerminationPendingImmediate --> Terminated : Terminated compute immediately Failed --> [*] : Compute exited Terminated --> [*] : Compute exited ``` diff --git a/compute_tools/src/bin/compute_ctl.rs b/compute_tools/src/bin/compute_ctl.rs index db7746b8eb..78e2c6308f 100644 --- a/compute_tools/src/bin/compute_ctl.rs +++ b/compute_tools/src/bin/compute_ctl.rs @@ -87,6 +87,14 @@ struct Cli { #[arg(short = 'C', long, value_name = "DATABASE_URL")] pub connstr: String, + #[arg( + long, + default_value = "neon_superuser", + value_name = "PRIVILEGED_ROLE_NAME", + value_parser = Self::parse_privileged_role_name + )] + pub privileged_role_name: String, + #[cfg(target_os = "linux")] #[arg(long, default_value = "neon-postgres")] pub cgroup: String, @@ -149,6 +157,21 @@ impl Cli { Ok(url) } + + /// For simplicity, we do not escape `privileged_role_name` anywhere in the code. + /// Since it's a system role, which we fully control, that's fine. Still, let's + /// validate it to avoid any surprises. + fn parse_privileged_role_name(value: &str) -> Result { + use regex::Regex; + + let pattern = Regex::new(r"^[a-z_]+$").unwrap(); + + if !pattern.is_match(value) { + bail!("--privileged-role-name can only contain lowercase letters and underscores") + } + + Ok(value.to_string()) + } } fn main() -> Result<()> { @@ -178,6 +201,7 @@ fn main() -> Result<()> { ComputeNodeParams { compute_id: cli.compute_id, connstr, + privileged_role_name: cli.privileged_role_name.clone(), pgdata: cli.pgdata.clone(), pgbin: cli.pgbin.clone(), pgversion: get_pg_version_string(&cli.pgbin), @@ -327,4 +351,49 @@ mod test { ]) .expect_err("URL parameters are not allowed"); } + + #[test] + fn verify_privileged_role_name() { + // Valid name + let cli = Cli::parse_from([ + "compute_ctl", + "--pgdata=test", + "--connstr=test", + "--compute-id=test", + "--privileged-role-name", + "my_superuser", + ]); + assert_eq!(cli.privileged_role_name, "my_superuser"); + + // Invalid names + Cli::try_parse_from([ + "compute_ctl", + "--pgdata=test", + "--connstr=test", + "--compute-id=test", + "--privileged-role-name", + "NeonSuperuser", + ]) + .expect_err("uppercase letters are not allowed"); + + Cli::try_parse_from([ + "compute_ctl", + "--pgdata=test", + "--connstr=test", + "--compute-id=test", + "--privileged-role-name", + "$'neon_superuser", + ]) + .expect_err("special characters are not allowed"); + + Cli::try_parse_from([ + "compute_ctl", + "--pgdata=test", + "--connstr=test", + "--compute-id=test", + "--privileged-role-name", + "", + ]) + .expect_err("empty name is not allowed"); + } } diff --git a/compute_tools/src/compute.rs b/compute_tools/src/compute.rs index f69a0e7c73..d7ec37cc0a 100644 --- a/compute_tools/src/compute.rs +++ b/compute_tools/src/compute.rs @@ -1,13 +1,13 @@ -use anyhow::{Context, Result, anyhow}; +use anyhow::{Context, Result}; use chrono::{DateTime, Utc}; use compute_api::privilege::Privilege; use compute_api::responses::{ ComputeConfig, ComputeCtlConfig, ComputeMetrics, ComputeStatus, LfcOffloadState, - LfcPrewarmState, TlsConfig, + LfcPrewarmState, PromoteState, TlsConfig, }; use compute_api::spec::{ ComputeAudit, ComputeFeature, ComputeMode, ComputeSpec, ExtVersion, PageserverConnectionInfo, - PageserverShardConnectionInfo, PgIdent, + PageserverProtocol, PageserverShardConnectionInfo, PageserverShardInfo, PgIdent, }; use futures::StreamExt; use futures::future::join_all; @@ -30,8 +30,7 @@ use std::sync::atomic::{AtomicU32, AtomicU64, Ordering}; use std::sync::{Arc, Condvar, Mutex, RwLock}; use std::time::{Duration, Instant}; use std::{env, fs}; -use tokio::task::JoinHandle; -use tokio::{spawn, time}; +use tokio::{spawn, sync::watch, task::JoinHandle, time}; use tracing::{Instrument, debug, error, info, instrument, warn}; use url::Url; use utils::id::{TenantId, TimelineId}; @@ -76,12 +75,20 @@ const DEFAULT_INSTALLED_EXTENSIONS_COLLECTION_INTERVAL: u64 = 3600; /// Static configuration params that don't change after startup. These mostly /// come from the CLI args, or are derived from them. +#[derive(Clone, Debug)] pub struct ComputeNodeParams { /// The ID of the compute pub compute_id: String, - // Url type maintains proper escaping + + /// Url type maintains proper escaping pub connstr: url::Url, + /// The name of the 'weak' superuser role, which we give to the users. + /// It follows the allow list approach, i.e., we take a standard role + /// and grant it extra permissions with explicit GRANTs here and there, + /// and core patches. + pub privileged_role_name: String, + pub resize_swap_on_bind: bool, pub set_disk_quota_for_fs: Option, @@ -176,6 +183,7 @@ pub struct ComputeState { /// WAL flush LSN that is set after terminating Postgres and syncing safekeepers if /// mode == ComputeMode::Primary. None otherwise pub terminate_flush_lsn: Option, + pub promote_state: Option>, pub metrics: ComputeMetrics, } @@ -193,6 +201,7 @@ impl ComputeState { lfc_prewarm_state: LfcPrewarmState::default(), lfc_offload_state: LfcOffloadState::default(), terminate_flush_lsn: None, + promote_state: None, } } @@ -272,53 +281,114 @@ impl ParsedSpec { } } -fn extract_pageserver_conninfo_from_guc( - pageserver_connstring_guc: &str, -) -> PageserverConnectionInfo { - PageserverConnectionInfo { - shards: pageserver_connstring_guc - .split(',') - .enumerate() - .map(|(i, connstr)| { - ( - i as u32, - PageserverShardConnectionInfo { - libpq_url: Some(connstr.to_string()), - grpc_url: None, - }, - ) +/// Extract PageserverConnectionInfo from a comma-separated list of libpq connection strings. +/// +/// This is used for backwards-compatilibity, to parse the legacye `pageserver_connstr` +/// field in the compute spec, or the 'neon.pageserver_connstring' GUC. Nowadays, the +/// 'pageserver_connection_info' field should be used instead. +fn extract_pageserver_conninfo_from_connstr( + connstr: &str, + stripe_size: Option, +) -> Result { + let shard_infos: Vec<_> = connstr + .split(',') + .map(|connstr| PageserverShardInfo { + pageservers: vec![PageserverShardConnectionInfo { + id: None, + libpq_url: Some(connstr.to_string()), + grpc_url: None, + }], + }) + .collect(); + + match shard_infos.len() { + 0 => anyhow::bail!("empty connection string"), + 1 => { + // We assume that if there's only connection string, it means "unsharded", + // rather than a sharded system with just a single shard. The latter is + // possible in principle, but we never do it. + let shard_count = ShardCount::unsharded(); + let only_shard = shard_infos.first().unwrap().clone(); + let shards = vec![(ShardIndex::unsharded(), only_shard)]; + Ok(PageserverConnectionInfo { + shard_count, + stripe_size: None, + shards: shards.into_iter().collect(), + prefer_protocol: PageserverProtocol::Libpq, }) - .collect(), - prefer_grpc: false, + } + n => { + if stripe_size.is_none() { + anyhow::bail!("{n} shards but no stripe_size"); + } + let shard_count = ShardCount(n.try_into()?); + let shards = shard_infos + .into_iter() + .enumerate() + .map(|(idx, shard_info)| { + ( + ShardIndex { + shard_count, + shard_number: ShardNumber( + idx.try_into().expect("shard number fits in u8"), + ), + }, + shard_info, + ) + }) + .collect(); + Ok(PageserverConnectionInfo { + shard_count, + stripe_size, + shards, + prefer_protocol: PageserverProtocol::Libpq, + }) + } } } impl TryFrom for ParsedSpec { - type Error = String; - fn try_from(spec: ComputeSpec) -> Result { + type Error = anyhow::Error; + fn try_from(spec: ComputeSpec) -> Result { // Extract the options from the spec file that are needed to connect to // the storage system. // - // For backwards-compatibility, the top-level fields in the spec file - // may be empty. In that case, we need to dig them from the GUCs in the - // cluster.settings field. - let pageserver_conninfo = match &spec.pageserver_connection_info { - Some(x) => x.clone(), - None => { - if let Some(guc) = spec.cluster.settings.find("neon.pageserver_connstring") { - extract_pageserver_conninfo_from_guc(&guc) - } else { - return Err("pageserver connstr should be provided".to_string()); - } + // In compute specs generated by old control plane versions, the spec file might + // be missing the `pageserver_connection_info` field. In that case, we need to dig + // the pageserver connection info from the `pageserver_connstr` field instead, or + // if that's missing too, from the GUC in the cluster.settings field. + let mut pageserver_conninfo = spec.pageserver_connection_info.clone(); + if pageserver_conninfo.is_none() { + if let Some(pageserver_connstr_field) = &spec.pageserver_connstring { + pageserver_conninfo = Some(extract_pageserver_conninfo_from_connstr( + pageserver_connstr_field, + spec.shard_stripe_size, + )?); } - }; + } + if pageserver_conninfo.is_none() { + if let Some(guc) = spec.cluster.settings.find("neon.pageserver_connstring") { + let stripe_size = if let Some(guc) = spec.cluster.settings.find("neon.stripe_size") + { + Some(u32::from_str(&guc)?) + } else { + None + }; + pageserver_conninfo = + Some(extract_pageserver_conninfo_from_connstr(&guc, stripe_size)?); + } + } + let pageserver_conninfo = pageserver_conninfo.ok_or(anyhow::anyhow!( + "pageserver connection information should be provided" + ))?; + // Similarly for safekeeper connection strings let safekeeper_connstrings = if spec.safekeeper_connstrings.is_empty() { if matches!(spec.mode, ComputeMode::Primary) { spec.cluster .settings .find("neon.safekeepers") - .ok_or("safekeeper connstrings should be provided")? + .ok_or(anyhow::anyhow!("safekeeper connstrings should be provided"))? .split(',') .map(|str| str.to_string()) .collect() @@ -333,22 +403,22 @@ impl TryFrom for ParsedSpec { let tenant_id: TenantId = if let Some(tenant_id) = spec.tenant_id { tenant_id } else { - spec.cluster + let guc = spec + .cluster .settings .find("neon.tenant_id") - .ok_or("tenant id should be provided") - .map(|s| TenantId::from_str(&s))? - .or(Err("invalid tenant id"))? + .ok_or(anyhow::anyhow!("tenant id should be provided"))?; + TenantId::from_str(&guc).context("invalid tenant id")? }; let timeline_id: TimelineId = if let Some(timeline_id) = spec.timeline_id { timeline_id } else { - spec.cluster + let guc = spec + .cluster .settings .find("neon.timeline_id") - .ok_or("timeline id should be provided") - .map(|s| TimelineId::from_str(&s))? - .or(Err("invalid timeline id"))? + .ok_or(anyhow::anyhow!("timeline id should be provided"))?; + TimelineId::from_str(&guc).context(anyhow::anyhow!("invalid timeline id"))? }; let endpoint_storage_addr: Option = spec @@ -372,7 +442,7 @@ impl TryFrom for ParsedSpec { }; // Now check validity of the parsed specification - res.validate()?; + res.validate().map_err(anyhow::Error::msg)?; Ok(res) } } @@ -452,7 +522,7 @@ impl ComputeNode { let mut new_state = ComputeState::new(); if let Some(spec) = config.spec { - let pspec = ParsedSpec::try_from(spec).map_err(|msg| anyhow!(msg))?; + let pspec = ParsedSpec::try_from(spec).map_err(|msg| anyhow::anyhow!(msg))?; new_state.pspec = Some(pspec); } @@ -983,14 +1053,20 @@ impl ComputeNode { None }; - let mut delay_exit = false; let mut state = self.state.lock().unwrap(); state.terminate_flush_lsn = lsn; - if let ComputeStatus::TerminationPending { mode } = state.status { + + let delay_exit = state.status == ComputeStatus::TerminationPendingFast; + if state.status == ComputeStatus::TerminationPendingFast + || state.status == ComputeStatus::TerminationPendingImmediate + { + info!( + "Changing compute status from {} to {}", + state.status, + ComputeStatus::Terminated + ); state.status = ComputeStatus::Terminated; self.state_changed.notify_all(); - // we were asked to terminate gracefully, don't exit to avoid restart - delay_exit = mode == compute_api::responses::TerminateMode::Fast } drop(state); @@ -1054,12 +1130,13 @@ impl ComputeNode { let spec = compute_state.pspec.as_ref().expect("spec must be set"); let started = Instant::now(); - let (connected, size) = if spec.pageserver_conninfo.prefer_grpc { - self.try_get_basebackup_grpc(spec, lsn)? - } else { - self.try_get_basebackup_libpq(spec, lsn)? + let (connected, size) = match spec.pageserver_conninfo.prefer_protocol { + PageserverProtocol::Grpc => self.try_get_basebackup_grpc(spec, lsn)?, + PageserverProtocol::Libpq => self.try_get_basebackup_libpq(spec, lsn)?, }; + self.fix_zenith_signal_neon_signal()?; + let mut state = self.state.lock().unwrap(); state.metrics.pageserver_connect_micros = connected.duration_since(started).as_micros() as u64; @@ -1069,27 +1146,56 @@ impl ComputeNode { Ok(()) } + /// Move the Zenith signal file to Neon signal file location. + /// This makes Compute compatible with older PageServers that don't yet + /// know about the Zenith->Neon rename. + fn fix_zenith_signal_neon_signal(&self) -> Result<()> { + let datadir = Path::new(&self.params.pgdata); + + let neonsig = datadir.join("neon.signal"); + + if neonsig.is_file() { + return Ok(()); + } + + let zenithsig = datadir.join("zenith.signal"); + + if zenithsig.is_file() { + fs::copy(zenithsig, neonsig)?; + } + + Ok(()) + } + /// Fetches a basebackup via gRPC. The connstring must use grpc://. Returns the timestamp when /// the connection was established, and the (compressed) size of the basebackup. fn try_get_basebackup_grpc(&self, spec: &ParsedSpec, lsn: Lsn) -> Result<(Instant, usize)> { + let shard0_index = ShardIndex { + shard_number: ShardNumber(0), + shard_count: spec.pageserver_conninfo.shard_count, + }; let shard0 = spec .pageserver_conninfo .shards - .get(&0) - .expect("shard 0 connection info missing"); - let shard0_url = shard0.grpc_url.clone().expect("no grpc_url for shard 0"); - - let shard_index = match spec.pageserver_conninfo.shards.len() as u8 { - 0 | 1 => ShardIndex::unsharded(), - count => ShardIndex::new(ShardNumber(0), ShardCount(count)), - }; + .get(&shard0_index) + .ok_or_else(|| { + anyhow::anyhow!("shard connection info missing for shard {}", shard0_index) + })?; + let pageserver = shard0 + .pageservers + .first() + .expect("must have at least one pageserver"); + let shard0_url = pageserver + .grpc_url + .clone() + .expect("no grpc_url for shard 0"); let (reader, connected) = tokio::runtime::Handle::current().block_on(async move { let mut client = page_api::Client::connect( shard0_url, spec.tenant_id, spec.timeline_id, - shard_index, + shard0_index, spec.storage_auth_token.clone(), None, // NB: base backups use payload compression ) @@ -1121,12 +1227,25 @@ impl ComputeNode { /// Fetches a basebackup via libpq. The connstring must use postgresql://. Returns the timestamp /// when the connection was established, and the (compressed) size of the basebackup. fn try_get_basebackup_libpq(&self, spec: &ParsedSpec, lsn: Lsn) -> Result<(Instant, usize)> { + let shard0_index = ShardIndex { + shard_number: ShardNumber(0), + shard_count: spec.pageserver_conninfo.shard_count, + }; let shard0 = spec .pageserver_conninfo .shards - .get(&0) - .expect("shard 0 connection info missing"); - let shard0_connstr = shard0.libpq_url.clone().expect("no libpq_url for shard 0"); + .get(&shard0_index) + .ok_or_else(|| { + anyhow::anyhow!("shard connection info missing for shard {}", shard0_index) + })?; + let pageserver = shard0 + .pageservers + .first() + .expect("must have at least one pageserver"); + let shard0_connstr = pageserver + .libpq_url + .clone() + .expect("no libpq_url for shard 0"); let mut config = postgres::Config::from_str(&shard0_connstr)?; // Use the storage auth token from the config file, if given. @@ -1286,9 +1405,7 @@ impl ComputeNode { // In case of error, log and fail the check, but don't crash. // We're playing it safe because these errors could be transient - // and we don't yet retry. Also being careful here allows us to - // be backwards compatible with safekeepers that don't have the - // TIMELINE_STATUS API yet. + // and we don't yet retry. if responses.len() < quorum { error!( "failed sync safekeepers check {:?} {:?} {:?}", @@ -1391,6 +1508,7 @@ impl ComputeNode { self.create_pgdata()?; config::write_postgres_conf( pgdata_path, + &self.params, &pspec.spec, self.params.internal_http_port, tls_config, @@ -1731,6 +1849,7 @@ impl ComputeNode { } // Run migrations separately to not hold up cold starts + let params = self.params.clone(); tokio::spawn(async move { let mut conf = conf.as_ref().clone(); conf.application_name("compute_ctl:migrations"); @@ -1742,7 +1861,7 @@ impl ComputeNode { eprintln!("connection error: {e}"); } }); - if let Err(e) = handle_migrations(&mut client).await { + if let Err(e) = handle_migrations(params, &mut client).await { error!("Failed to run migrations: {}", e); } } @@ -1821,11 +1940,14 @@ impl ComputeNode { let pgdata_path = Path::new(&self.params.pgdata); config::write_postgres_conf( pgdata_path, + &self.params, &spec, self.params.internal_http_port, tls_config, )?; + self.pg_reload_conf()?; + if !spec.skip_pg_catalog_updates { let max_concurrent_connections = spec.reconfigure_concurrency; // Temporarily reset max_cluster_size in config @@ -1845,10 +1967,9 @@ impl ComputeNode { Ok(()) })?; + self.pg_reload_conf()?; } - self.pg_reload_conf()?; - let unknown_op = "unknown".to_string(); let op_id = spec.operation_uuid.as_ref().unwrap_or(&unknown_op); info!( @@ -1921,7 +2042,8 @@ impl ComputeNode { // exit loop ComputeStatus::Failed - | ComputeStatus::TerminationPending { .. } + | ComputeStatus::TerminationPendingFast + | ComputeStatus::TerminationPendingImmediate | ComputeStatus::Terminated => break 'cert_update, // wait @@ -2087,7 +2209,7 @@ LIMIT 100", self.params .remote_ext_base_url .as_ref() - .ok_or(DownloadError::BadInput(anyhow!( + .ok_or(DownloadError::BadInput(anyhow::anyhow!( "Remote extensions storage is not configured", )))?; @@ -2283,7 +2405,7 @@ LIMIT 100", let remote_extensions = spec .remote_extensions .as_ref() - .ok_or(anyhow!("Remote extensions are not configured"))?; + .ok_or(anyhow::anyhow!("Remote extensions are not configured"))?; info!("parse shared_preload_libraries from spec.cluster.settings"); let mut libs_vec = Vec::new(); @@ -2431,14 +2553,31 @@ LIMIT 100", pub fn spawn_lfc_offload_task(self: &Arc, interval: Duration) { self.terminate_lfc_offload_task(); let secs = interval.as_secs(); - info!("spawning lfc offload worker with {secs}s interval"); let this = self.clone(); + + info!("spawning LFC offload worker with {secs}s interval"); let handle = spawn(async move { let mut interval = time::interval(interval); interval.tick().await; // returns immediately loop { interval.tick().await; - this.offload_lfc_async().await; + + let prewarm_state = this.state.lock().unwrap().lfc_prewarm_state.clone(); + // Do not offload LFC state if we are currently prewarming or any issue occurred. + // If we'd do that, we might override the LFC state in endpoint storage with some + // incomplete state. Imagine a situation: + // 1. Endpoint started with `autoprewarm: true` + // 2. While prewarming is not completed, we upload the new incomplete state + // 3. Compute gets interrupted and restarts + // 4. We start again and try to prewarm with the state from 2. instead of the previous complete state + if matches!( + prewarm_state, + LfcPrewarmState::Completed + | LfcPrewarmState::NotPrewarmed + | LfcPrewarmState::Skipped + ) { + this.offload_lfc_async().await; + } } }); *self.lfc_offload_task.lock().unwrap() = Some(handle); @@ -2455,19 +2594,11 @@ LIMIT 100", // If the value is -1, we never suspend so set the value to default collection. // If the value is 0, it means default, we will just continue to use the default. if spec.suspend_timeout_seconds == -1 || spec.suspend_timeout_seconds == 0 { - info!( - "[NEON_EXT_INT_UPD] Spec Timeout: {}, New Timeout: {}", - spec.suspend_timeout_seconds, DEFAULT_INSTALLED_EXTENSIONS_COLLECTION_INTERVAL - ); self.params.installed_extensions_collection_interval.store( DEFAULT_INSTALLED_EXTENSIONS_COLLECTION_INTERVAL, std::sync::atomic::Ordering::SeqCst, ); } else { - info!( - "[NEON_EXT_INT_UPD] Spec Timeout: {}", - spec.suspend_timeout_seconds - ); self.params.installed_extensions_collection_interval.store( spec.suspend_timeout_seconds as u64, std::sync::atomic::Ordering::SeqCst, @@ -2485,7 +2616,7 @@ pub async fn installed_extensions(conf: tokio_postgres::Config) -> Result<()> { serde_json::to_string(&extensions).expect("failed to serialize extensions list") ); } - Err(err) => error!("could not get installed extensions: {err:?}"), + Err(err) => error!("could not get installed extensions: {err}"), } Ok(()) } @@ -2598,7 +2729,10 @@ mod tests { match ParsedSpec::try_from(spec.clone()) { Ok(_p) => panic!("Failed to detect duplicate entry"), - Err(e) => assert!(e.starts_with("duplicate entry in safekeeper_connstrings:")), + Err(e) => assert!( + e.to_string() + .starts_with("duplicate entry in safekeeper_connstrings:") + ), }; } } diff --git a/compute_tools/src/compute_prewarm.rs b/compute_tools/src/compute_prewarm.rs index 4190580e5e..07b4a596cc 100644 --- a/compute_tools/src/compute_prewarm.rs +++ b/compute_tools/src/compute_prewarm.rs @@ -70,7 +70,7 @@ impl ComputeNode { } }; let row = match client - .query_one("select * from get_prewarm_info()", &[]) + .query_one("select * from neon.get_prewarm_info()", &[]) .await { Ok(row) => row, @@ -89,7 +89,7 @@ impl ComputeNode { self.state.lock().unwrap().lfc_offload_state.clone() } - /// If there is a prewarm request ongoing, return false, true otherwise + /// If there is a prewarm request ongoing, return `false`, `true` otherwise. pub fn prewarm_lfc(self: &Arc, from_endpoint: Option) -> bool { { let state = &mut self.state.lock().unwrap().lfc_prewarm_state; @@ -101,14 +101,25 @@ impl ComputeNode { let cloned = self.clone(); spawn(async move { - let Err(err) = cloned.prewarm_impl(from_endpoint).await else { - cloned.state.lock().unwrap().lfc_prewarm_state = LfcPrewarmState::Completed; - return; - }; - error!(%err); - cloned.state.lock().unwrap().lfc_prewarm_state = LfcPrewarmState::Failed { - error: err.to_string(), + let state = match cloned.prewarm_impl(from_endpoint).await { + Ok(true) => LfcPrewarmState::Completed, + Ok(false) => { + info!( + "skipping LFC prewarm because LFC state is not found in endpoint storage" + ); + LfcPrewarmState::Skipped + } + Err(err) => { + crate::metrics::LFC_PREWARM_ERRORS.inc(); + error!(%err, "could not prewarm LFC"); + + LfcPrewarmState::Failed { + error: err.to_string(), + } + } }; + + cloned.state.lock().unwrap().lfc_prewarm_state = state; }); true } @@ -119,15 +130,21 @@ impl ComputeNode { EndpointStoragePair::from_spec_and_endpoint(state.pspec.as_ref().unwrap(), from_endpoint) } - async fn prewarm_impl(&self, from_endpoint: Option) -> Result<()> { + /// Request LFC state from endpoint storage and load corresponding pages into Postgres. + /// Returns a result with `false` if the LFC state is not found in endpoint storage. + async fn prewarm_impl(&self, from_endpoint: Option) -> Result { let EndpointStoragePair { url, token } = self.endpoint_storage_pair(from_endpoint)?; - info!(%url, "requesting LFC state from endpoint storage"); + info!(%url, "requesting LFC state from endpoint storage"); let request = Client::new().get(&url).bearer_auth(token); let res = request.send().await.context("querying endpoint storage")?; let status = res.status(); - if status != StatusCode::OK { - bail!("{status} querying endpoint storage") + match status { + StatusCode::OK => (), + StatusCode::NOT_FOUND => { + return Ok(false); + } + _ => bail!("{status} querying endpoint storage"), } let mut uncompressed = Vec::new(); @@ -140,15 +157,18 @@ impl ComputeNode { .await .context("decoding LFC state")?; let uncompressed_len = uncompressed.len(); - info!(%url, "downloaded LFC state, uncompressed size {uncompressed_len}, loading into postgres"); + + info!(%url, "downloaded LFC state, uncompressed size {uncompressed_len}, loading into Postgres"); ComputeNode::get_maintenance_client(&self.tokio_conn_conf) .await .context("connecting to postgres")? - .query_one("select prewarm_local_cache($1)", &[&uncompressed]) + .query_one("select neon.prewarm_local_cache($1)", &[&uncompressed]) .await .context("loading LFC state into postgres") - .map(|_| ()) + .map(|_| ())?; + + Ok(true) } /// If offload request is ongoing, return false, true otherwise @@ -176,11 +196,14 @@ impl ComputeNode { async fn offload_lfc_with_state_update(&self) { crate::metrics::LFC_OFFLOADS.inc(); + let Err(err) = self.offload_lfc_impl().await else { self.state.lock().unwrap().lfc_offload_state = LfcOffloadState::Completed; return; }; - error!(%err); + + crate::metrics::LFC_OFFLOAD_ERRORS.inc(); + error!(%err, "could not offload LFC state to endpoint storage"); self.state.lock().unwrap().lfc_offload_state = LfcOffloadState::Failed { error: err.to_string(), }; @@ -188,13 +211,13 @@ impl ComputeNode { async fn offload_lfc_impl(&self) -> Result<()> { let EndpointStoragePair { url, token } = self.endpoint_storage_pair(None)?; - info!(%url, "requesting LFC state from postgres"); + info!(%url, "requesting LFC state from Postgres"); let mut compressed = Vec::new(); ComputeNode::get_maintenance_client(&self.tokio_conn_conf) .await .context("connecting to postgres")? - .query_one("select get_local_cache_state()", &[]) + .query_one("select neon.get_local_cache_state()", &[]) .await .context("querying LFC state")? .try_get::(0) @@ -203,13 +226,17 @@ impl ComputeNode { .read_to_end(&mut compressed) .await .context("compressing LFC state")?; + let compressed_len = compressed.len(); info!(%url, "downloaded LFC state, compressed size {compressed_len}, writing to endpoint storage"); let request = Client::new().put(url).bearer_auth(token).body(compressed); match request.send().await { Ok(res) if res.status() == StatusCode::OK => Ok(()), - Ok(res) => bail!("Error writing to endpoint storage: {}", res.status()), + Ok(res) => bail!( + "Request to endpoint storage failed with status: {}", + res.status() + ), Err(err) => Err(err).context("writing to endpoint storage"), } } diff --git a/compute_tools/src/compute_promote.rs b/compute_tools/src/compute_promote.rs new file mode 100644 index 0000000000..42256faa22 --- /dev/null +++ b/compute_tools/src/compute_promote.rs @@ -0,0 +1,132 @@ +use crate::compute::ComputeNode; +use anyhow::{Context, Result, bail}; +use compute_api::{ + responses::{LfcPrewarmState, PromoteState, SafekeepersLsn}, + spec::ComputeMode, +}; +use std::{sync::Arc, time::Duration}; +use tokio::time::sleep; +use utils::lsn::Lsn; + +impl ComputeNode { + /// Returns only when promote fails or succeeds. If a network error occurs + /// and http client disconnects, this does not stop promotion, and subsequent + /// calls block until promote finishes. + /// Called by control plane on secondary after primary endpoint is terminated + pub async fn promote(self: &Arc, safekeepers_lsn: SafekeepersLsn) -> PromoteState { + let cloned = self.clone(); + let start_promotion = || { + let (tx, rx) = tokio::sync::watch::channel(PromoteState::NotPromoted); + tokio::spawn(async move { + tx.send(match cloned.promote_impl(safekeepers_lsn).await { + Ok(_) => PromoteState::Completed, + Err(err) => { + tracing::error!(%err, "promoting"); + PromoteState::Failed { + error: err.to_string(), + } + } + }) + }); + rx + }; + + let mut task; + // self.state is unlocked after block ends so we lock it in promote_impl + // and task.changed() is reached + { + task = self + .state + .lock() + .unwrap() + .promote_state + .get_or_insert_with(start_promotion) + .clone() + } + task.changed().await.expect("promote sender dropped"); + task.borrow().clone() + } + + // Why do we have to supply safekeepers? + // For secondary we use primary_connection_conninfo so safekeepers field is empty + async fn promote_impl(&self, safekeepers_lsn: SafekeepersLsn) -> Result<()> { + { + let state = self.state.lock().unwrap(); + let mode = &state.pspec.as_ref().unwrap().spec.mode; + if *mode != ComputeMode::Replica { + bail!("{} is not replica", mode.to_type_str()); + } + + // we don't need to query Postgres so not self.lfc_prewarm_state() + match &state.lfc_prewarm_state { + LfcPrewarmState::NotPrewarmed | LfcPrewarmState::Prewarming => { + bail!("prewarm not requested or pending") + } + LfcPrewarmState::Failed { error } => { + tracing::warn!(%error, "replica prewarm failed") + } + _ => {} + } + } + + let client = ComputeNode::get_maintenance_client(&self.tokio_conn_conf) + .await + .context("connecting to postgres")?; + + let primary_lsn = safekeepers_lsn.wal_flush_lsn; + let mut last_wal_replay_lsn: Lsn = Lsn::INVALID; + const RETRIES: i32 = 20; + for i in 0..=RETRIES { + let row = client + .query_one("SELECT pg_last_wal_replay_lsn()", &[]) + .await + .context("getting last replay lsn")?; + let lsn: u64 = row.get::(0).into(); + last_wal_replay_lsn = lsn.into(); + if last_wal_replay_lsn >= primary_lsn { + break; + } + tracing::info!("Try {i}, replica lsn {last_wal_replay_lsn}, primary lsn {primary_lsn}"); + sleep(Duration::from_secs(1)).await; + } + if last_wal_replay_lsn < primary_lsn { + bail!("didn't catch up with primary in {RETRIES} retries"); + } + + // using $1 doesn't work with ALTER SYSTEM SET + let safekeepers_sql = format!( + "ALTER SYSTEM SET neon.safekeepers='{}'", + safekeepers_lsn.safekeepers + ); + client + .query(&safekeepers_sql, &[]) + .await + .context("setting safekeepers")?; + client + .query("SELECT pg_reload_conf()", &[]) + .await + .context("reloading postgres config")?; + let row = client + .query_one("SELECT * FROM pg_promote()", &[]) + .await + .context("pg_promote")?; + if !row.get::(0) { + bail!("pg_promote() returned false"); + } + + let client = ComputeNode::get_maintenance_client(&self.tokio_conn_conf) + .await + .context("connecting to postgres")?; + let row = client + .query_one("SHOW transaction_read_only", &[]) + .await + .context("getting transaction_read_only")?; + if row.get::(0) == "on" { + bail!("replica in read only mode after promotion"); + } + + let mut state = self.state.lock().unwrap(); + state.pspec.as_mut().unwrap().spec.mode = ComputeMode::Primary; + Ok(()) + } +} diff --git a/compute_tools/src/config.rs b/compute_tools/src/config.rs index ceffbf40de..8821611f0c 100644 --- a/compute_tools/src/config.rs +++ b/compute_tools/src/config.rs @@ -9,11 +9,14 @@ use std::path::Path; use compute_api::responses::TlsConfig; use compute_api::spec::{ComputeAudit, ComputeMode, ComputeSpec, GenericOption}; +use crate::compute::ComputeNodeParams; use crate::pg_helpers::{ GenericOptionExt, GenericOptionsSearch, PgOptionsSerialize, escape_conf_value, }; use crate::tls::{self, SERVER_CRT, SERVER_KEY}; +use utils::shard::{ShardIndex, ShardNumber}; + /// Check that `line` is inside a text file and put it there if it is not. /// Create file if it doesn't exist. pub fn line_in_file(path: &Path, line: &str) -> Result { @@ -41,6 +44,7 @@ pub fn line_in_file(path: &Path, line: &str) -> Result { /// Create or completely rewrite configuration file specified by `path` pub fn write_postgres_conf( pgdata_path: &Path, + params: &ComputeNodeParams, spec: &ComputeSpec, extension_server_port: u16, tls_config: &Option, @@ -56,24 +60,53 @@ pub fn write_postgres_conf( // Add options for connecting to storage writeln!(file, "# Neon storage settings")?; - + writeln!(file)?; if let Some(conninfo) = &spec.pageserver_connection_info { + // Stripe size GUC should be defined prior to connection string + if let Some(stripe_size) = conninfo.stripe_size { + writeln!( + file, + "# from compute spec's pageserver_conninfo.stripe_size field" + )?; + writeln!(file, "neon.stripe_size={stripe_size}")?; + } + let mut libpq_urls: Option> = Some(Vec::new()); let mut grpc_urls: Option> = Some(Vec::new()); + let num_shards = if conninfo.shard_count.0 == 0 { + 1 // unsharded, treat it as a single shard + } else { + conninfo.shard_count.0 + }; - for shardno in 0..conninfo.shards.len() { - let info = conninfo.shards.get(&(shardno as u32)).ok_or_else(|| { - anyhow::anyhow!("shard {shardno} missing from pageserver_connection_info shard map") + for shard_number in 0..num_shards { + let shard_index = ShardIndex { + shard_number: ShardNumber(shard_number), + shard_count: conninfo.shard_count, + }; + let info = conninfo.shards.get(&shard_index).ok_or_else(|| { + anyhow::anyhow!( + "shard {shard_index} missing from pageserver_connection_info shard map" + ) })?; - if let Some(url) = &info.libpq_url { + let first_pageserver = info + .pageservers + .first() + .expect("must have at least one pageserver"); + + // Add the libpq URL to the array, or if the URL is missing, reset the array + // forgetting any previous entries. All servers must have a libpq URL, or none + // at all. + if let Some(url) = &first_pageserver.libpq_url { if let Some(ref mut urls) = libpq_urls { urls.push(url.clone()); } } else { libpq_urls = None } - if let Some(url) = &info.grpc_url { + // Similarly for gRPC URLs + if let Some(url) = &first_pageserver.grpc_url { if let Some(ref mut urls) = grpc_urls { urls.push(url.clone()); } @@ -82,6 +115,10 @@ pub fn write_postgres_conf( } } if let Some(libpq_urls) = libpq_urls { + writeln!( + file, + "# derived from compute spec's pageserver_conninfo field" + )?; writeln!( file, "neon.pageserver_connstring={}", @@ -91,6 +128,10 @@ pub fn write_postgres_conf( writeln!(file, "# no neon.pageserver_connstring")?; } if let Some(grpc_urls) = grpc_urls { + writeln!( + file, + "# derived from compute spec's pageserver_conninfo field" + )?; writeln!( file, "neon.pageserver_grpc_urls={}", @@ -99,11 +140,19 @@ pub fn write_postgres_conf( } else { writeln!(file, "# no neon.pageserver_grpc_urls")?; } + } else { + // Stripe size GUC should be defined prior to connection string + if let Some(stripe_size) = spec.shard_stripe_size { + writeln!(file, "# from compute spec's shard_stripe_size field")?; + writeln!(file, "neon.stripe_size={stripe_size}")?; + } + + if let Some(s) = &spec.pageserver_connstring { + writeln!(file, "# from compute spec's pageserver_connstring field")?; + writeln!(file, "neon.pageserver_connstring={}", escape_conf_value(s))?; + } } - if let Some(stripe_size) = spec.shard_stripe_size { - writeln!(file, "neon.stripe_size={stripe_size}")?; - } if !spec.safekeeper_connstrings.is_empty() { let mut neon_safekeepers_value = String::new(); tracing::info!( @@ -203,6 +252,12 @@ pub fn write_postgres_conf( } } + writeln!( + file, + "neon.privileged_role_name={}", + escape_conf_value(params.privileged_role_name.as_str()) + )?; + // If there are any extra options in the 'settings' field, append those if spec.cluster.settings.is_some() { writeln!(file, "# Managed by compute_ctl: begin")?; diff --git a/compute_tools/src/http/openapi_spec.yaml b/compute_tools/src/http/openapi_spec.yaml index bbdb7d0917..3cf5ea7c51 100644 --- a/compute_tools/src/http/openapi_spec.yaml +++ b/compute_tools/src/http/openapi_spec.yaml @@ -83,6 +83,87 @@ paths: schema: $ref: "#/components/schemas/DbsAndRoles" + /promote: + post: + tags: + - Promotion + summary: Promote secondary replica to primary + description: "" + operationId: promoteReplica + requestBody: + description: Promote requests data + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/SafekeepersLsn" + responses: + 200: + description: Promote succeeded or wasn't started + content: + application/json: + schema: + $ref: "#/components/schemas/PromoteState" + 500: + description: Promote failed + content: + application/json: + schema: + $ref: "#/components/schemas/PromoteState" + + /lfc/prewarm: + post: + summary: Request LFC Prewarm + parameters: + - name: from_endpoint + in: query + schema: + type: string + description: "" + operationId: lfcPrewarm + responses: + 202: + description: LFC prewarm started + 429: + description: LFC prewarm ongoing + get: + tags: + - Prewarm + summary: Get LFC prewarm state + description: "" + operationId: getLfcPrewarmState + responses: + 200: + description: Prewarm state + content: + application/json: + schema: + $ref: "#/components/schemas/LfcPrewarmState" + + /lfc/offload: + post: + summary: Request LFC offload + description: "" + operationId: lfcOffload + responses: + 202: + description: LFC offload started + 429: + description: LFC offload ongoing + get: + tags: + - Prewarm + summary: Get LFC offloading state + description: "" + operationId: getLfcOffloadState + responses: + 200: + description: Offload state + content: + application/json: + schema: + $ref: "#/components/schemas/LfcOffloadState" + /database_schema: get: tags: @@ -290,9 +371,28 @@ paths: summary: Terminate Postgres and wait for it to exit description: "" operationId: terminate + parameters: + - name: mode + in: query + description: "Terminate mode: fast (wait 30s before returning) and immediate" + required: false + schema: + type: string + enum: ["fast", "immediate"] + default: fast responses: 200: description: Result + content: + application/json: + schema: + $ref: "#/components/schemas/TerminateResponse" + 201: + description: Result if compute is already terminated + content: + application/json: + schema: + $ref: "#/components/schemas/TerminateResponse" 412: description: "wrong state" content: @@ -335,15 +435,6 @@ components: total_startup_ms: type: integer - Info: - type: object - description: Information about VM/Pod. - required: - - num_cpus - properties: - num_cpus: - type: integer - DbsAndRoles: type: object description: Databases and Roles @@ -458,11 +549,14 @@ components: type: string enum: - empty - - init - - failed - - running - configuration_pending + - init + - running - configuration + - failed + - termination_pending_fast + - termination_pending_immediate + - terminated example: running ExtensionInstallRequest: @@ -497,25 +591,69 @@ components: type: string example: "1.0.0" - InstalledExtensions: + SafekeepersLsn: type: object + required: + - safekeepers + - wal_flush_lsn properties: - extensions: - description: Contains list of installed extensions. - type: array - items: - type: object - properties: - extname: - type: string - version: - type: string - items: - type: string - n_databases: - type: integer - owned_by_superuser: - type: integer + safekeepers: + description: Primary replica safekeepers + type: string + wal_flush_lsn: + description: Primary last WAL flush LSN + type: string + + LfcPrewarmState: + type: object + required: + - status + - total + - prewarmed + - skipped + properties: + status: + description: LFC prewarm status + enum: [not_prewarmed, prewarming, completed, failed, skipped] + type: string + error: + description: LFC prewarm error, if any + type: string + total: + description: Total pages processed + type: integer + prewarmed: + description: Total pages prewarmed + type: integer + skipped: + description: Pages processed but not prewarmed + type: integer + + LfcOffloadState: + type: object + required: + - status + properties: + status: + description: LFC offload status + enum: [not_offloaded, offloading, completed, failed] + type: string + error: + description: LFC offload error, if any + type: string + + PromoteState: + type: object + required: + - status + properties: + status: + description: Promote result + enum: [not_promoted, completed, failed] + type: string + error: + description: Promote error, if any + type: string SetRoleGrantsRequest: type: object @@ -544,6 +682,17 @@ components: description: Role name. example: "neon" + TerminateResponse: + type: object + required: + - lsn + properties: + lsn: + type: string + nullable: true + description: "last WAL flush LSN" + example: "0/028F10D8" + SetRoleGrantsResponse: type: object required: diff --git a/compute_tools/src/http/routes/mod.rs b/compute_tools/src/http/routes/mod.rs index 432e66a830..dd71f663eb 100644 --- a/compute_tools/src/http/routes/mod.rs +++ b/compute_tools/src/http/routes/mod.rs @@ -14,6 +14,7 @@ pub(in crate::http) mod insights; pub(in crate::http) mod lfc; pub(in crate::http) mod metrics; pub(in crate::http) mod metrics_json; +pub(in crate::http) mod promote; pub(in crate::http) mod status; pub(in crate::http) mod terminate; diff --git a/compute_tools/src/http/routes/promote.rs b/compute_tools/src/http/routes/promote.rs new file mode 100644 index 0000000000..bc5f93b4da --- /dev/null +++ b/compute_tools/src/http/routes/promote.rs @@ -0,0 +1,14 @@ +use crate::http::JsonResponse; +use axum::Form; +use http::StatusCode; + +pub(in crate::http) async fn promote( + compute: axum::extract::State>, + Form(safekeepers_lsn): Form, +) -> axum::response::Response { + let state = compute.promote(safekeepers_lsn).await; + if let compute_api::responses::PromoteState::Failed { error } = state { + return JsonResponse::error(StatusCode::INTERNAL_SERVER_ERROR, error); + } + JsonResponse::success(StatusCode::OK, state) +} diff --git a/compute_tools/src/http/routes/terminate.rs b/compute_tools/src/http/routes/terminate.rs index 32d90a5990..5b30b020c8 100644 --- a/compute_tools/src/http/routes/terminate.rs +++ b/compute_tools/src/http/routes/terminate.rs @@ -3,7 +3,7 @@ use crate::http::JsonResponse; use axum::extract::State; use axum::response::Response; use axum_extra::extract::OptionalQuery; -use compute_api::responses::{ComputeStatus, TerminateResponse}; +use compute_api::responses::{ComputeStatus, TerminateMode, TerminateResponse}; use http::StatusCode; use serde::Deserialize; use std::sync::Arc; @@ -12,7 +12,7 @@ use tracing::info; #[derive(Deserialize, Default)] pub struct TerminateQuery { - mode: compute_api::responses::TerminateMode, + mode: TerminateMode, } /// Terminate the compute. @@ -24,16 +24,16 @@ pub(in crate::http) async fn terminate( { let mut state = compute.state.lock().unwrap(); if state.status == ComputeStatus::Terminated { - return JsonResponse::success(StatusCode::CREATED, state.terminate_flush_lsn); + let response = TerminateResponse { + lsn: state.terminate_flush_lsn, + }; + return JsonResponse::success(StatusCode::CREATED, response); } if !matches!(state.status, ComputeStatus::Empty | ComputeStatus::Running) { return JsonResponse::invalid_status(state.status); } - state.set_status( - ComputeStatus::TerminationPending { mode }, - &compute.state_changed, - ); + state.set_status(mode.into(), &compute.state_changed); } forward_termination_signal(false); diff --git a/compute_tools/src/http/server.rs b/compute_tools/src/http/server.rs index d5d2427971..17939e39d4 100644 --- a/compute_tools/src/http/server.rs +++ b/compute_tools/src/http/server.rs @@ -23,7 +23,7 @@ use super::{ middleware::authorize::Authorize, routes::{ check_writability, configure, database_schema, dbs_and_roles, extension_server, extensions, - grants, insights, lfc, metrics, metrics_json, status, terminate, + grants, insights, lfc, metrics, metrics_json, promote, status, terminate, }, }; use crate::compute::ComputeNode; @@ -87,6 +87,7 @@ impl From<&Server> for Router> { let authenticated_router = Router::>::new() .route("/lfc/prewarm", get(lfc::prewarm_state).post(lfc::prewarm)) .route("/lfc/offload", get(lfc::offload_state).post(lfc::offload)) + .route("/promote", post(promote::promote)) .route("/check_writability", post(check_writability::is_writable)) .route("/configure", post(configure::configure)) .route("/database_schema", get(database_schema::get_schema_dump)) diff --git a/compute_tools/src/installed_extensions.rs b/compute_tools/src/installed_extensions.rs index 411e03b7ec..90e1a17be4 100644 --- a/compute_tools/src/installed_extensions.rs +++ b/compute_tools/src/installed_extensions.rs @@ -2,6 +2,7 @@ use std::collections::HashMap; use anyhow::Result; use compute_api::responses::{InstalledExtension, InstalledExtensions}; +use tokio_postgres::error::Error as PostgresError; use tokio_postgres::{Client, Config, NoTls}; use crate::metrics::INSTALLED_EXTENSIONS; @@ -10,7 +11,7 @@ use crate::metrics::INSTALLED_EXTENSIONS; /// and to make database listing query here more explicit. /// /// Limit the number of databases to 500 to avoid excessive load. -async fn list_dbs(client: &mut Client) -> Result> { +async fn list_dbs(client: &mut Client) -> Result, PostgresError> { // `pg_database.datconnlimit = -2` means that the database is in the // invalid state let databases = client @@ -37,7 +38,9 @@ async fn list_dbs(client: &mut Client) -> Result> { /// Same extension can be installed in multiple databases with different versions, /// so we report a separate metric (number of databases where it is installed) /// for each extension version. -pub async fn get_installed_extensions(mut conf: Config) -> Result { +pub async fn get_installed_extensions( + mut conf: Config, +) -> Result { conf.application_name("compute_ctl:get_installed_extensions"); let databases: Vec = { let (mut client, connection) = conf.connect(NoTls).await?; diff --git a/compute_tools/src/lib.rs b/compute_tools/src/lib.rs index 3899a1ca76..2d5d4565b7 100644 --- a/compute_tools/src/lib.rs +++ b/compute_tools/src/lib.rs @@ -12,6 +12,7 @@ pub mod logger; pub mod catalog; pub mod compute; pub mod compute_prewarm; +pub mod compute_promote; pub mod disk_quota; pub mod extension_server; pub mod installed_extensions; diff --git a/compute_tools/src/lsn_lease.rs b/compute_tools/src/lsn_lease.rs index 241b4cf467..6abfea82e0 100644 --- a/compute_tools/src/lsn_lease.rs +++ b/compute_tools/src/lsn_lease.rs @@ -4,13 +4,13 @@ use std::thread; use std::time::{Duration, SystemTime}; use anyhow::{Result, bail}; -use compute_api::spec::{ComputeMode, PageserverConnectionInfo}; +use compute_api::spec::{ComputeMode, PageserverConnectionInfo, PageserverProtocol}; use pageserver_page_api as page_api; use postgres::{NoTls, SimpleQueryMessage}; use tracing::{info, warn}; use utils::id::{TenantId, TimelineId}; use utils::lsn::Lsn; -use utils::shard::{ShardCount, ShardNumber, TenantShardId}; +use utils::shard::TenantShardId; use crate::compute::ComputeNode; @@ -116,37 +116,38 @@ fn try_acquire_lsn_lease( timeline_id: TimelineId, lsn: Lsn, ) -> Result> { - let shard_count = conninfo.shards.len(); let mut leases = Vec::new(); - for (shard_number, shard) in conninfo.shards.into_iter() { - let tenant_shard_id = match shard_count { - 0 | 1 => TenantShardId::unsharded(tenant_id), - shard_count => TenantShardId { - tenant_id, - shard_number: ShardNumber(shard_number as u8), - shard_count: ShardCount::new(shard_count as u8), - }, + for (shard_index, shard) in conninfo.shards.into_iter() { + let tenant_shard_id = TenantShardId { + tenant_id, + shard_number: shard_index.shard_number, + shard_count: shard_index.shard_count, }; - let lease = if conninfo.prefer_grpc { - acquire_lsn_lease_grpc( - &shard.grpc_url.unwrap(), - auth, - tenant_shard_id, - timeline_id, - lsn, - )? - } else { - acquire_lsn_lease_libpq( - &shard.libpq_url.unwrap(), - auth, - tenant_shard_id, - timeline_id, - lsn, - )? - }; - leases.push(lease); + // XXX: If there are more than pageserver for the one shard, do we need to get a + // leas on all of them? Currently, that's what we assume, but this is hypothetical + // as of this writing, as we never pass the info for more than one pageserver per + // shard. + for pageserver in shard.pageservers { + let lease = match conninfo.prefer_protocol { + PageserverProtocol::Grpc => acquire_lsn_lease_grpc( + &pageserver.grpc_url.unwrap(), + auth, + tenant_shard_id, + timeline_id, + lsn, + )?, + PageserverProtocol::Libpq => acquire_lsn_lease_libpq( + &pageserver.libpq_url.unwrap(), + auth, + tenant_shard_id, + timeline_id, + lsn, + )?, + }; + leases.push(lease); + } } Ok(leases.into_iter().min().flatten()) diff --git a/compute_tools/src/metrics.rs b/compute_tools/src/metrics.rs index 8f81675c49..6e4df73c0f 100644 --- a/compute_tools/src/metrics.rs +++ b/compute_tools/src/metrics.rs @@ -105,6 +105,14 @@ pub(crate) static LFC_PREWARMS: Lazy = Lazy::new(|| { .expect("failed to define a metric") }); +pub(crate) static LFC_PREWARM_ERRORS: Lazy = Lazy::new(|| { + register_int_counter!( + "compute_ctl_lfc_prewarm_errors_total", + "Total number of LFC prewarm errors", + ) + .expect("failed to define a metric") +}); + pub(crate) static LFC_OFFLOADS: Lazy = Lazy::new(|| { register_int_counter!( "compute_ctl_lfc_offloads_total", @@ -113,6 +121,14 @@ pub(crate) static LFC_OFFLOADS: Lazy = Lazy::new(|| { .expect("failed to define a metric") }); +pub(crate) static LFC_OFFLOAD_ERRORS: Lazy = Lazy::new(|| { + register_int_counter!( + "compute_ctl_lfc_offload_errors_total", + "Total number of LFC offload errors", + ) + .expect("failed to define a metric") +}); + pub fn collect() -> Vec { let mut metrics = COMPUTE_CTL_UP.collect(); metrics.extend(INSTALLED_EXTENSIONS.collect()); @@ -123,6 +139,8 @@ pub fn collect() -> Vec { metrics.extend(PG_CURR_DOWNTIME_MS.collect()); metrics.extend(PG_TOTAL_DOWNTIME_MS.collect()); metrics.extend(LFC_PREWARMS.collect()); + metrics.extend(LFC_PREWARM_ERRORS.collect()); metrics.extend(LFC_OFFLOADS.collect()); + metrics.extend(LFC_OFFLOAD_ERRORS.collect()); metrics } diff --git a/compute_tools/src/migrations/0001-add_bypass_rls_to_privileged_role.sql b/compute_tools/src/migrations/0001-add_bypass_rls_to_privileged_role.sql new file mode 100644 index 0000000000..6443645336 --- /dev/null +++ b/compute_tools/src/migrations/0001-add_bypass_rls_to_privileged_role.sql @@ -0,0 +1 @@ +ALTER ROLE {privileged_role_name} BYPASSRLS; diff --git a/compute_tools/src/migrations/0001-neon_superuser_bypass_rls.sql b/compute_tools/src/migrations/0001-neon_superuser_bypass_rls.sql deleted file mode 100644 index 73b36a37f6..0000000000 --- a/compute_tools/src/migrations/0001-neon_superuser_bypass_rls.sql +++ /dev/null @@ -1 +0,0 @@ -ALTER ROLE neon_superuser BYPASSRLS; diff --git a/compute_tools/src/migrations/0002-alter_roles.sql b/compute_tools/src/migrations/0002-alter_roles.sql index 6cb49f873f..367356e6eb 100644 --- a/compute_tools/src/migrations/0002-alter_roles.sql +++ b/compute_tools/src/migrations/0002-alter_roles.sql @@ -1,8 +1,21 @@ +-- On December 8th, 2023, an engineering escalation (INC-110) was opened after +-- it was found that BYPASSRLS was being applied to all roles. +-- +-- PR that introduced the issue: https://github.com/neondatabase/neon/pull/5657 +-- Subsequent commit on main: https://github.com/neondatabase/neon/commit/ad99fa5f0393e2679e5323df653c508ffa0ac072 +-- +-- NOBYPASSRLS and INHERIT are the defaults for a Postgres role, but because it +-- isn't easy to know if a Postgres cluster is affected by the issue, we need to +-- keep the migration around for a long time, if not indefinitely, so any +-- cluster can be fixed. +-- +-- Branching is the gift that keeps on giving... + DO $$ DECLARE role_name text; BEGIN - FOR role_name IN SELECT rolname FROM pg_roles WHERE pg_has_role(rolname, 'neon_superuser', 'member') + FOR role_name IN SELECT rolname FROM pg_roles WHERE pg_has_role(rolname, '{privileged_role_name}', 'member') LOOP RAISE NOTICE 'EXECUTING ALTER ROLE % INHERIT', quote_ident(role_name); EXECUTE 'ALTER ROLE ' || quote_ident(role_name) || ' INHERIT'; @@ -10,7 +23,7 @@ BEGIN FOR role_name IN SELECT rolname FROM pg_roles WHERE - NOT pg_has_role(rolname, 'neon_superuser', 'member') AND NOT starts_with(rolname, 'pg_') + NOT pg_has_role(rolname, '{privileged_role_name}', 'member') AND NOT starts_with(rolname, 'pg_') LOOP RAISE NOTICE 'EXECUTING ALTER ROLE % NOBYPASSRLS', quote_ident(role_name); EXECUTE 'ALTER ROLE ' || quote_ident(role_name) || ' NOBYPASSRLS'; diff --git a/compute_tools/src/migrations/0003-grant_pg_create_subscription_to_neon_superuser.sql b/compute_tools/src/migrations/0003-grant_pg_create_subscription_to_privileged_role.sql similarity index 63% rename from compute_tools/src/migrations/0003-grant_pg_create_subscription_to_neon_superuser.sql rename to compute_tools/src/migrations/0003-grant_pg_create_subscription_to_privileged_role.sql index 37f0ce211f..adf159dc06 100644 --- a/compute_tools/src/migrations/0003-grant_pg_create_subscription_to_neon_superuser.sql +++ b/compute_tools/src/migrations/0003-grant_pg_create_subscription_to_privileged_role.sql @@ -1,6 +1,6 @@ DO $$ BEGIN IF (SELECT setting::numeric >= 160000 FROM pg_settings WHERE name = 'server_version_num') THEN - EXECUTE 'GRANT pg_create_subscription TO neon_superuser'; + EXECUTE 'GRANT pg_create_subscription TO {privileged_role_name}'; END IF; END $$; diff --git a/compute_tools/src/migrations/0004-grant_pg_monitor_to_neon_superuser.sql b/compute_tools/src/migrations/0004-grant_pg_monitor_to_neon_superuser.sql deleted file mode 100644 index 11afd3b635..0000000000 --- a/compute_tools/src/migrations/0004-grant_pg_monitor_to_neon_superuser.sql +++ /dev/null @@ -1 +0,0 @@ -GRANT pg_monitor TO neon_superuser WITH ADMIN OPTION; diff --git a/compute_tools/src/migrations/0004-grant_pg_monitor_to_privileged_role.sql b/compute_tools/src/migrations/0004-grant_pg_monitor_to_privileged_role.sql new file mode 100644 index 0000000000..6a7ed4007f --- /dev/null +++ b/compute_tools/src/migrations/0004-grant_pg_monitor_to_privileged_role.sql @@ -0,0 +1 @@ +GRANT pg_monitor TO {privileged_role_name} WITH ADMIN OPTION; diff --git a/compute_tools/src/migrations/0005-grant_all_on_tables_to_neon_superuser.sql b/compute_tools/src/migrations/0005-grant_all_on_tables_to_privileged_role.sql similarity index 58% rename from compute_tools/src/migrations/0005-grant_all_on_tables_to_neon_superuser.sql rename to compute_tools/src/migrations/0005-grant_all_on_tables_to_privileged_role.sql index 8abe052494..c31f99f3cb 100644 --- a/compute_tools/src/migrations/0005-grant_all_on_tables_to_neon_superuser.sql +++ b/compute_tools/src/migrations/0005-grant_all_on_tables_to_privileged_role.sql @@ -1,4 +1,4 @@ -- SKIP: Deemed insufficient for allowing relations created by extensions to be --- interacted with by neon_superuser without permission issues. +-- interacted with by {privileged_role_name} without permission issues. -ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT ALL ON TABLES TO neon_superuser; +ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT ALL ON TABLES TO {privileged_role_name}; diff --git a/compute_tools/src/migrations/0006-grant_all_on_sequences_to_neon_superuser.sql b/compute_tools/src/migrations/0006-grant_all_on_sequences_to_privileged_role.sql similarity index 57% rename from compute_tools/src/migrations/0006-grant_all_on_sequences_to_neon_superuser.sql rename to compute_tools/src/migrations/0006-grant_all_on_sequences_to_privileged_role.sql index 5bcb026e0c..fadac9ac3b 100644 --- a/compute_tools/src/migrations/0006-grant_all_on_sequences_to_neon_superuser.sql +++ b/compute_tools/src/migrations/0006-grant_all_on_sequences_to_privileged_role.sql @@ -1,4 +1,4 @@ -- SKIP: Deemed insufficient for allowing relations created by extensions to be --- interacted with by neon_superuser without permission issues. +-- interacted with by {privileged_role_name} without permission issues. -ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT ALL ON SEQUENCES TO neon_superuser; +ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT ALL ON SEQUENCES TO {privileged_role_name}; diff --git a/compute_tools/src/migrations/0007-grant_all_on_tables_to_neon_superuser_with_grant_option.sql b/compute_tools/src/migrations/0007-grant_all_on_tables_with_grant_option_to_privileged_role.sql similarity index 73% rename from compute_tools/src/migrations/0007-grant_all_on_tables_to_neon_superuser_with_grant_option.sql rename to compute_tools/src/migrations/0007-grant_all_on_tables_with_grant_option_to_privileged_role.sql index ce7c96753e..5caa9b7829 100644 --- a/compute_tools/src/migrations/0007-grant_all_on_tables_to_neon_superuser_with_grant_option.sql +++ b/compute_tools/src/migrations/0007-grant_all_on_tables_with_grant_option_to_privileged_role.sql @@ -1,3 +1,3 @@ -- SKIP: Moved inline to the handle_grants() functions. -ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT ALL ON TABLES TO neon_superuser WITH GRANT OPTION; +ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT ALL ON TABLES TO {privileged_role_name} WITH GRANT OPTION; diff --git a/compute_tools/src/migrations/0008-grant_all_on_sequences_to_neon_superuser_with_grant_option.sql b/compute_tools/src/migrations/0008-grant_all_on_sequences_with_grant_option_to_privileged_role.sql similarity index 72% rename from compute_tools/src/migrations/0008-grant_all_on_sequences_to_neon_superuser_with_grant_option.sql rename to compute_tools/src/migrations/0008-grant_all_on_sequences_with_grant_option_to_privileged_role.sql index 72baf920cd..03de0c37ac 100644 --- a/compute_tools/src/migrations/0008-grant_all_on_sequences_to_neon_superuser_with_grant_option.sql +++ b/compute_tools/src/migrations/0008-grant_all_on_sequences_with_grant_option_to_privileged_role.sql @@ -1,3 +1,3 @@ -- SKIP: Moved inline to the handle_grants() functions. -ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT ALL ON SEQUENCES TO neon_superuser WITH GRANT OPTION; +ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT ALL ON SEQUENCES TO {privileged_role_name} WITH GRANT OPTION; diff --git a/compute_tools/src/migrations/0010-grant_snapshot_synchronization_funcs_to_neon_superuser.sql b/compute_tools/src/migrations/0010-grant_snapshot_synchronization_funcs_to_privileged_role.sql similarity index 82% rename from compute_tools/src/migrations/0010-grant_snapshot_synchronization_funcs_to_neon_superuser.sql rename to compute_tools/src/migrations/0010-grant_snapshot_synchronization_funcs_to_privileged_role.sql index 28750e00dd..84fcb36391 100644 --- a/compute_tools/src/migrations/0010-grant_snapshot_synchronization_funcs_to_neon_superuser.sql +++ b/compute_tools/src/migrations/0010-grant_snapshot_synchronization_funcs_to_privileged_role.sql @@ -1,7 +1,7 @@ DO $$ BEGIN IF (SELECT setting::numeric >= 160000 FROM pg_settings WHERE name = 'server_version_num') THEN - EXECUTE 'GRANT EXECUTE ON FUNCTION pg_export_snapshot TO neon_superuser'; - EXECUTE 'GRANT EXECUTE ON FUNCTION pg_log_standby_snapshot TO neon_superuser'; + EXECUTE 'GRANT EXECUTE ON FUNCTION pg_export_snapshot TO {privileged_role_name}'; + EXECUTE 'GRANT EXECUTE ON FUNCTION pg_log_standby_snapshot TO {privileged_role_name}'; END IF; END $$; diff --git a/compute_tools/src/migrations/0011-grant_pg_show_replication_origin_status_to_neon_superuser.sql b/compute_tools/src/migrations/0011-grant_pg_show_replication_origin_status_to_neon_superuser.sql deleted file mode 100644 index 425ed8cd3d..0000000000 --- a/compute_tools/src/migrations/0011-grant_pg_show_replication_origin_status_to_neon_superuser.sql +++ /dev/null @@ -1 +0,0 @@ -GRANT EXECUTE ON FUNCTION pg_show_replication_origin_status TO neon_superuser; diff --git a/compute_tools/src/migrations/0011-grant_pg_show_replication_origin_status_to_privileged_role.sql b/compute_tools/src/migrations/0011-grant_pg_show_replication_origin_status_to_privileged_role.sql new file mode 100644 index 0000000000..125a9f463f --- /dev/null +++ b/compute_tools/src/migrations/0011-grant_pg_show_replication_origin_status_to_privileged_role.sql @@ -0,0 +1 @@ +GRANT EXECUTE ON FUNCTION pg_show_replication_origin_status TO {privileged_role_name}; diff --git a/compute_tools/src/migrations/0012-grant_pg_signal_backend_to_privileged_role.sql b/compute_tools/src/migrations/0012-grant_pg_signal_backend_to_privileged_role.sql new file mode 100644 index 0000000000..1b54ec8a3b --- /dev/null +++ b/compute_tools/src/migrations/0012-grant_pg_signal_backend_to_privileged_role.sql @@ -0,0 +1 @@ +GRANT pg_signal_backend TO {privileged_role_name} WITH ADMIN OPTION; diff --git a/compute_tools/src/migrations/tests/0001-neon_superuser_bypass_rls.sql b/compute_tools/src/migrations/tests/0001-add_bypass_rls_to_privileged_role.sql similarity index 100% rename from compute_tools/src/migrations/tests/0001-neon_superuser_bypass_rls.sql rename to compute_tools/src/migrations/tests/0001-add_bypass_rls_to_privileged_role.sql diff --git a/compute_tools/src/migrations/tests/0003-grant_pg_create_subscription_to_neon_superuser.sql b/compute_tools/src/migrations/tests/0003-grant_pg_create_subscription_to_privileged_role.sql similarity index 100% rename from compute_tools/src/migrations/tests/0003-grant_pg_create_subscription_to_neon_superuser.sql rename to compute_tools/src/migrations/tests/0003-grant_pg_create_subscription_to_privileged_role.sql diff --git a/compute_tools/src/migrations/tests/0004-grant_pg_monitor_to_neon_superuser.sql b/compute_tools/src/migrations/tests/0004-grant_pg_monitor_to_privileged_role.sql similarity index 58% rename from compute_tools/src/migrations/tests/0004-grant_pg_monitor_to_neon_superuser.sql rename to compute_tools/src/migrations/tests/0004-grant_pg_monitor_to_privileged_role.sql index acb8dd417d..3464a2b1cf 100644 --- a/compute_tools/src/migrations/tests/0004-grant_pg_monitor_to_neon_superuser.sql +++ b/compute_tools/src/migrations/tests/0004-grant_pg_monitor_to_privileged_role.sql @@ -7,13 +7,17 @@ BEGIN INTO monitor FROM pg_auth_members WHERE roleid = 'pg_monitor'::regrole - AND member = 'pg_monitor'::regrole; + AND member = 'neon_superuser'::regrole; - IF NOT monitor.member THEN + IF monitor IS NULL THEN + RAISE EXCEPTION 'no entry in pg_auth_members for neon_superuser and pg_monitor'; + END IF; + + IF monitor.admin IS NULL OR NOT monitor.member THEN RAISE EXCEPTION 'neon_superuser is not a member of pg_monitor'; END IF; - IF NOT monitor.admin THEN + IF monitor.admin IS NULL OR NOT monitor.admin THEN RAISE EXCEPTION 'neon_superuser cannot grant pg_monitor'; END IF; END $$; diff --git a/compute_tools/src/migrations/tests/0005-grant_all_on_tables_to_neon_superuser.sql b/compute_tools/src/migrations/tests/0005-grant_all_on_tables_to_privileged_role.sql similarity index 100% rename from compute_tools/src/migrations/tests/0005-grant_all_on_tables_to_neon_superuser.sql rename to compute_tools/src/migrations/tests/0005-grant_all_on_tables_to_privileged_role.sql diff --git a/compute_tools/src/migrations/tests/0006-grant_all_on_sequences_to_neon_superuser.sql b/compute_tools/src/migrations/tests/0006-grant_all_on_sequences_to_privileged_role.sql similarity index 100% rename from compute_tools/src/migrations/tests/0006-grant_all_on_sequences_to_neon_superuser.sql rename to compute_tools/src/migrations/tests/0006-grant_all_on_sequences_to_privileged_role.sql diff --git a/compute_tools/src/migrations/tests/0007-grant_all_on_tables_to_neon_superuser_with_grant_option.sql b/compute_tools/src/migrations/tests/0007-grant_all_on_tables_with_grant_option_to_privileged_role.sql similarity index 100% rename from compute_tools/src/migrations/tests/0007-grant_all_on_tables_to_neon_superuser_with_grant_option.sql rename to compute_tools/src/migrations/tests/0007-grant_all_on_tables_with_grant_option_to_privileged_role.sql diff --git a/compute_tools/src/migrations/tests/0008-grant_all_on_sequences_to_neon_superuser_with_grant_option.sql b/compute_tools/src/migrations/tests/0008-grant_all_on_sequences_with_grant_option_to_privileged_role.sql similarity index 100% rename from compute_tools/src/migrations/tests/0008-grant_all_on_sequences_to_neon_superuser_with_grant_option.sql rename to compute_tools/src/migrations/tests/0008-grant_all_on_sequences_with_grant_option_to_privileged_role.sql diff --git a/compute_tools/src/migrations/tests/0010-grant_snapshot_synchronization_funcs_to_neon_superuser.sql b/compute_tools/src/migrations/tests/0010-grant_snapshot_synchronization_funcs_to_privileged_role.sql similarity index 100% rename from compute_tools/src/migrations/tests/0010-grant_snapshot_synchronization_funcs_to_neon_superuser.sql rename to compute_tools/src/migrations/tests/0010-grant_snapshot_synchronization_funcs_to_privileged_role.sql diff --git a/compute_tools/src/migrations/tests/0011-grant_pg_show_replication_origin_status_to_neon_superuser.sql b/compute_tools/src/migrations/tests/0011-grant_pg_show_replication_origin_status_to_privileged_role.sql similarity index 100% rename from compute_tools/src/migrations/tests/0011-grant_pg_show_replication_origin_status_to_neon_superuser.sql rename to compute_tools/src/migrations/tests/0011-grant_pg_show_replication_origin_status_to_privileged_role.sql diff --git a/compute_tools/src/migrations/tests/0012-grant_pg_signal_backend_to_privileged_role.sql b/compute_tools/src/migrations/tests/0012-grant_pg_signal_backend_to_privileged_role.sql new file mode 100644 index 0000000000..e62b742d30 --- /dev/null +++ b/compute_tools/src/migrations/tests/0012-grant_pg_signal_backend_to_privileged_role.sql @@ -0,0 +1,23 @@ +DO $$ +DECLARE + signal_backend record; +BEGIN + SELECT pg_has_role('neon_superuser', 'pg_signal_backend', 'member') AS member, + admin_option AS admin + INTO signal_backend + FROM pg_auth_members + WHERE roleid = 'pg_signal_backend'::regrole + AND member = 'neon_superuser'::regrole; + + IF signal_backend IS NULL THEN + RAISE EXCEPTION 'no entry in pg_auth_members for neon_superuser and pg_signal_backend'; + END IF; + + IF signal_backend.member IS NULL OR NOT signal_backend.member THEN + RAISE EXCEPTION 'neon_superuser is not a member of pg_signal_backend'; + END IF; + + IF signal_backend.admin IS NULL OR NOT signal_backend.admin THEN + RAISE EXCEPTION 'neon_superuser cannot grant pg_signal_backend'; + END IF; +END $$; diff --git a/compute_tools/src/monitor.rs b/compute_tools/src/monitor.rs index 8a2f6addad..fa01545856 100644 --- a/compute_tools/src/monitor.rs +++ b/compute_tools/src/monitor.rs @@ -84,7 +84,8 @@ impl ComputeMonitor { if matches!( compute_status, ComputeStatus::Terminated - | ComputeStatus::TerminationPending { .. } + | ComputeStatus::TerminationPendingFast + | ComputeStatus::TerminationPendingImmediate | ComputeStatus::Failed ) { info!( diff --git a/compute_tools/src/spec.rs b/compute_tools/src/spec.rs index 43cfbb48f7..4525a0e831 100644 --- a/compute_tools/src/spec.rs +++ b/compute_tools/src/spec.rs @@ -9,6 +9,7 @@ use reqwest::StatusCode; use tokio_postgres::Client; use tracing::{error, info, instrument}; +use crate::compute::ComputeNodeParams; use crate::config; use crate::metrics::{CPLANE_REQUESTS_TOTAL, CPlaneRequestRPC, UNKNOWN_HTTP_STATUS}; use crate::migration::MigrationRunner; @@ -169,7 +170,7 @@ pub async fn handle_neon_extension_upgrade(client: &mut Client) -> Result<()> { } #[instrument(skip_all)] -pub async fn handle_migrations(client: &mut Client) -> Result<()> { +pub async fn handle_migrations(params: ComputeNodeParams, client: &mut Client) -> Result<()> { info!("handle migrations"); // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! @@ -178,24 +179,58 @@ pub async fn handle_migrations(client: &mut Client) -> Result<()> { // Add new migrations in numerical order. let migrations = [ - include_str!("./migrations/0001-neon_superuser_bypass_rls.sql"), - include_str!("./migrations/0002-alter_roles.sql"), - include_str!("./migrations/0003-grant_pg_create_subscription_to_neon_superuser.sql"), - include_str!("./migrations/0004-grant_pg_monitor_to_neon_superuser.sql"), - include_str!("./migrations/0005-grant_all_on_tables_to_neon_superuser.sql"), - include_str!("./migrations/0006-grant_all_on_sequences_to_neon_superuser.sql"), - include_str!( - "./migrations/0007-grant_all_on_tables_to_neon_superuser_with_grant_option.sql" + &format!( + include_str!("./migrations/0001-add_bypass_rls_to_privileged_role.sql"), + privileged_role_name = params.privileged_role_name ), - include_str!( - "./migrations/0008-grant_all_on_sequences_to_neon_superuser_with_grant_option.sql" + &format!( + include_str!("./migrations/0002-alter_roles.sql"), + privileged_role_name = params.privileged_role_name + ), + &format!( + include_str!("./migrations/0003-grant_pg_create_subscription_to_privileged_role.sql"), + privileged_role_name = params.privileged_role_name + ), + &format!( + include_str!("./migrations/0004-grant_pg_monitor_to_privileged_role.sql"), + privileged_role_name = params.privileged_role_name + ), + &format!( + include_str!("./migrations/0005-grant_all_on_tables_to_privileged_role.sql"), + privileged_role_name = params.privileged_role_name + ), + &format!( + include_str!("./migrations/0006-grant_all_on_sequences_to_privileged_role.sql"), + privileged_role_name = params.privileged_role_name + ), + &format!( + include_str!( + "./migrations/0007-grant_all_on_tables_with_grant_option_to_privileged_role.sql" + ), + privileged_role_name = params.privileged_role_name + ), + &format!( + include_str!( + "./migrations/0008-grant_all_on_sequences_with_grant_option_to_privileged_role.sql" + ), + privileged_role_name = params.privileged_role_name ), include_str!("./migrations/0009-revoke_replication_for_previously_allowed_roles.sql"), - include_str!( - "./migrations/0010-grant_snapshot_synchronization_funcs_to_neon_superuser.sql" + &format!( + include_str!( + "./migrations/0010-grant_snapshot_synchronization_funcs_to_privileged_role.sql" + ), + privileged_role_name = params.privileged_role_name ), - include_str!( - "./migrations/0011-grant_pg_show_replication_origin_status_to_neon_superuser.sql" + &format!( + include_str!( + "./migrations/0011-grant_pg_show_replication_origin_status_to_privileged_role.sql" + ), + privileged_role_name = params.privileged_role_name + ), + &format!( + include_str!("./migrations/0012-grant_pg_signal_backend_to_privileged_role.sql"), + privileged_role_name = params.privileged_role_name ), ]; diff --git a/compute_tools/src/spec_apply.rs b/compute_tools/src/spec_apply.rs index fcd072263a..ec7e75922b 100644 --- a/compute_tools/src/spec_apply.rs +++ b/compute_tools/src/spec_apply.rs @@ -13,14 +13,14 @@ use tokio_postgres::Client; use tokio_postgres::error::SqlState; use tracing::{Instrument, debug, error, info, info_span, instrument, warn}; -use crate::compute::{ComputeNode, ComputeState}; +use crate::compute::{ComputeNode, ComputeNodeParams, ComputeState}; use crate::pg_helpers::{ DatabaseExt, Escaping, GenericOptionsSearch, RoleExt, get_existing_dbs_async, get_existing_roles_async, }; use crate::spec_apply::ApplySpecPhase::{ - CreateAndAlterDatabases, CreateAndAlterRoles, CreateAvailabilityCheck, CreateNeonSuperuser, - CreatePgauditExtension, CreatePgauditlogtofileExtension, CreateSchemaNeon, + CreateAndAlterDatabases, CreateAndAlterRoles, CreateAvailabilityCheck, CreatePgauditExtension, + CreatePgauditlogtofileExtension, CreatePrivilegedRole, CreateSchemaNeon, DisablePostgresDBPgAudit, DropInvalidDatabases, DropRoles, FinalizeDropLogicalSubscriptions, HandleNeonExtension, HandleOtherExtensions, RenameAndDeleteDatabases, RenameRoles, RunInEachDatabase, @@ -49,6 +49,7 @@ impl ComputeNode { // Proceed with post-startup configuration. Note, that order of operations is important. let client = Self::get_maintenance_client(&conf).await?; let spec = spec.clone(); + let params = Arc::new(self.params.clone()); let databases = get_existing_dbs_async(&client).await?; let roles = get_existing_roles_async(&client) @@ -157,6 +158,7 @@ impl ComputeNode { let conf = Arc::new(conf); let fut = Self::apply_spec_sql_db( + params.clone(), spec.clone(), conf, ctx.clone(), @@ -185,7 +187,7 @@ impl ComputeNode { } for phase in [ - CreateNeonSuperuser, + CreatePrivilegedRole, DropInvalidDatabases, RenameRoles, CreateAndAlterRoles, @@ -195,6 +197,7 @@ impl ComputeNode { ] { info!("Applying phase {:?}", &phase); apply_operations( + params.clone(), spec.clone(), ctx.clone(), jwks_roles.clone(), @@ -243,6 +246,7 @@ impl ComputeNode { } let fut = Self::apply_spec_sql_db( + params.clone(), spec.clone(), conf, ctx.clone(), @@ -293,6 +297,7 @@ impl ComputeNode { for phase in phases { debug!("Applying phase {:?}", &phase); apply_operations( + params.clone(), spec.clone(), ctx.clone(), jwks_roles.clone(), @@ -313,7 +318,9 @@ impl ComputeNode { /// May opt to not connect to databases that don't have any scheduled /// operations. The function is concurrency-controlled with the provided /// semaphore. The caller has to make sure the semaphore isn't exhausted. + #[allow(clippy::too_many_arguments)] // TODO: needs bigger refactoring async fn apply_spec_sql_db( + params: Arc, spec: Arc, conf: Arc, ctx: Arc>, @@ -328,6 +335,7 @@ impl ComputeNode { for subphase in subphases { apply_operations( + params.clone(), spec.clone(), ctx.clone(), jwks_roles.clone(), @@ -467,7 +475,7 @@ pub enum PerDatabasePhase { #[derive(Clone, Debug)] pub enum ApplySpecPhase { - CreateNeonSuperuser, + CreatePrivilegedRole, DropInvalidDatabases, RenameRoles, CreateAndAlterRoles, @@ -510,6 +518,7 @@ pub struct MutableApplyContext { /// - No timeouts have (yet) been implemented. /// - The caller is responsible for limiting and/or applying concurrency. pub async fn apply_operations<'a, Fut, F>( + params: Arc, spec: Arc, ctx: Arc>, jwks_roles: Arc>, @@ -527,7 +536,7 @@ where debug!("Processing phase {:?}", &apply_spec_phase); let ctx = ctx; - let mut ops = get_operations(&spec, &ctx, &jwks_roles, &apply_spec_phase) + let mut ops = get_operations(¶ms, &spec, &ctx, &jwks_roles, &apply_spec_phase) .await? .peekable(); @@ -588,14 +597,18 @@ where /// sort/merge/batch execution, but for now this is a nice way to improve /// batching behavior of the commands. async fn get_operations<'a>( + params: &'a ComputeNodeParams, spec: &'a ComputeSpec, ctx: &'a RwLock, jwks_roles: &'a HashSet, apply_spec_phase: &'a ApplySpecPhase, ) -> Result + 'a + Send>> { match apply_spec_phase { - ApplySpecPhase::CreateNeonSuperuser => Ok(Box::new(once(Operation { - query: include_str!("sql/create_neon_superuser.sql").to_string(), + ApplySpecPhase::CreatePrivilegedRole => Ok(Box::new(once(Operation { + query: format!( + include_str!("sql/create_privileged_role.sql"), + privileged_role_name = params.privileged_role_name + ), comment: None, }))), ApplySpecPhase::DropInvalidDatabases => { @@ -697,8 +710,9 @@ async fn get_operations<'a>( None => { let query = if !jwks_roles.contains(role.name.as_str()) { format!( - "CREATE ROLE {} INHERIT CREATEROLE CREATEDB BYPASSRLS REPLICATION IN ROLE neon_superuser {}", + "CREATE ROLE {} INHERIT CREATEROLE CREATEDB BYPASSRLS REPLICATION IN ROLE {} {}", role.name.pg_quote(), + params.privileged_role_name, role.to_pg_options(), ) } else { @@ -849,8 +863,9 @@ async fn get_operations<'a>( // ALL PRIVILEGES grants CREATE, CONNECT, and TEMPORARY on the database // (see https://www.postgresql.org/docs/current/ddl-priv.html) query: format!( - "GRANT ALL PRIVILEGES ON DATABASE {} TO neon_superuser", - db.name.pg_quote() + "GRANT ALL PRIVILEGES ON DATABASE {} TO {}", + db.name.pg_quote(), + params.privileged_role_name ), comment: None, }, diff --git a/compute_tools/src/sql/create_neon_superuser.sql b/compute_tools/src/sql/create_neon_superuser.sql deleted file mode 100644 index 300645627b..0000000000 --- a/compute_tools/src/sql/create_neon_superuser.sql +++ /dev/null @@ -1,8 +0,0 @@ -DO $$ - BEGIN - IF NOT EXISTS (SELECT FROM pg_catalog.pg_roles WHERE rolname = 'neon_superuser') - THEN - CREATE ROLE neon_superuser CREATEDB CREATEROLE NOLOGIN REPLICATION BYPASSRLS IN ROLE pg_read_all_data, pg_write_all_data; - END IF; - END -$$; diff --git a/compute_tools/src/sql/create_privileged_role.sql b/compute_tools/src/sql/create_privileged_role.sql new file mode 100644 index 0000000000..df27ac32fc --- /dev/null +++ b/compute_tools/src/sql/create_privileged_role.sql @@ -0,0 +1,8 @@ +DO $$ + BEGIN + IF NOT EXISTS (SELECT FROM pg_catalog.pg_roles WHERE rolname = '{privileged_role_name}') + THEN + CREATE ROLE {privileged_role_name} CREATEDB CREATEROLE NOLOGIN REPLICATION BYPASSRLS IN ROLE pg_read_all_data, pg_write_all_data; + END IF; + END +$$; diff --git a/control_plane/README.md b/control_plane/README.md index aa6f935e27..60c6120d82 100644 --- a/control_plane/README.md +++ b/control_plane/README.md @@ -8,10 +8,10 @@ code changes locally, but not suitable for running production systems. ## Example: Start with Postgres 16 -To create and start a local development environment with Postgres 16, you will need to provide `--pg-version` flag to 3 of the start-up commands. +To create and start a local development environment with Postgres 16, you will need to provide `--pg-version` flag to 2 of the start-up commands. ```shell -cargo neon init --pg-version 16 +cargo neon init cargo neon start cargo neon tenant create --set-default --pg-version 16 cargo neon endpoint create main --pg-version 16 diff --git a/control_plane/src/bin/neon_local.rs b/control_plane/src/bin/neon_local.rs index ef8a504cec..6da3223024 100644 --- a/control_plane/src/bin/neon_local.rs +++ b/control_plane/src/bin/neon_local.rs @@ -16,9 +16,14 @@ use std::time::Duration; use anyhow::{Context, Result, anyhow, bail}; use clap::Parser; use compute_api::requests::ComputeClaimsScope; -use compute_api::spec::{ComputeMode, PageserverConnectionInfo, PageserverShardConnectionInfo}; +use compute_api::spec::{ + ComputeMode, PageserverConnectionInfo, PageserverProtocol, PageserverShardInfo, +}; use control_plane::broker::StorageBroker; use control_plane::endpoint::{ComputeControlPlane, EndpointTerminateMode}; +use control_plane::endpoint::{ + pageserver_conf_to_shard_conn_info, tenant_locate_response_to_conn_info, +}; use control_plane::endpoint_storage::{ENDPOINT_STORAGE_DEFAULT_ADDR, EndpointStorage}; use control_plane::local_env; use control_plane::local_env::{ @@ -44,7 +49,6 @@ use pageserver_api::models::{ }; use pageserver_api::shard::{DEFAULT_STRIPE_SIZE, ShardCount, ShardStripeSize, TenantShardId}; use postgres_backend::AuthType; -use postgres_connection::parse_host_port; use safekeeper_api::membership::{SafekeeperGeneration, SafekeeperId}; use safekeeper_api::{ DEFAULT_HTTP_LISTEN_PORT as DEFAULT_SAFEKEEPER_HTTP_PORT, @@ -52,11 +56,11 @@ use safekeeper_api::{ }; use storage_broker::DEFAULT_LISTEN_ADDR as DEFAULT_BROKER_ADDR; use tokio::task::JoinSet; -use url::Host; use utils::auth::{Claims, Scope}; use utils::id::{NodeId, TenantId, TenantTimelineId, TimelineId}; use utils::lsn::Lsn; use utils::project_git_version; +use utils::shard::ShardIndex; // Default id of a safekeeper node, if not specified on the command line. const DEFAULT_SAFEKEEPER_ID: NodeId = NodeId(1); @@ -631,6 +635,10 @@ struct EndpointCreateCmdArgs { help = "Allow multiple primary endpoints running on the same branch. Shouldn't be used normally, but useful for tests." )] allow_multiple: bool, + + /// Only allow changing it on creation + #[clap(long, help = "Name of the privileged role for the endpoint")] + privileged_role_name: Option, } #[derive(clap::Args)] @@ -1480,6 +1488,7 @@ async fn handle_endpoint(subcmd: &EndpointCmd, env: &local_env::LocalEnv) -> Res args.grpc, !args.update_catalog, false, + args.privileged_role_name.clone(), )?; } EndpointCmd::Start(args) => { @@ -1516,74 +1525,56 @@ async fn handle_endpoint(subcmd: &EndpointCmd, env: &local_env::LocalEnv) -> Res )?; } - let (shards, stripe_size) = if let Some(ps_id) = pageserver_id { - let conf = env.get_pageserver_conf(ps_id).unwrap(); - let libpq_url = Some({ - let (host, port) = parse_host_port(&conf.listen_pg_addr)?; - let port = port.unwrap_or(5432); - format!("postgres://no_user@{host}:{port}") - }); - let grpc_url = if let Some(grpc_addr) = &conf.listen_grpc_addr { - let (host, port) = parse_host_port(grpc_addr)?; - let port = port.unwrap_or(DEFAULT_PAGESERVER_GRPC_PORT); - Some(format!("grpc://no_user@{host}:{port}")) - } else { - None - }; - let pageserver = PageserverShardConnectionInfo { - libpq_url, - grpc_url, - }; + let prefer_protocol = if endpoint.grpc { + PageserverProtocol::Grpc + } else { + PageserverProtocol::Libpq + }; + let mut pageserver_conninfo = if let Some(ps_id) = pageserver_id { + let conf = env.get_pageserver_conf(ps_id).unwrap(); + let ps_conninfo = pageserver_conf_to_shard_conn_info(conf)?; + + let shard_info = PageserverShardInfo { + pageservers: vec![ps_conninfo], + }; // If caller is telling us what pageserver to use, this is not a tenant which is // fully managed by storage controller, therefore not sharded. - (vec![(0, pageserver)], DEFAULT_STRIPE_SIZE) + let shards: HashMap<_, _> = vec![(ShardIndex::unsharded(), shard_info)] + .into_iter() + .collect(); + PageserverConnectionInfo { + shard_count: ShardCount(0), + stripe_size: None, + shards, + prefer_protocol, + } } else { // Look up the currently attached location of the tenant, and its striping metadata, // to pass these on to postgres. let storage_controller = StorageController::from_env(env); let locate_result = storage_controller.tenant_locate(endpoint.tenant_id).await?; - let shards = futures::future::try_join_all(locate_result.shards.into_iter().map( - |shard| async move { - if let ComputeMode::Static(lsn) = endpoint.mode { - // Initialize LSN leases for static computes. + assert!(!locate_result.shards.is_empty()); + + // Initialize LSN leases for static computes. + if let ComputeMode::Static(lsn) = endpoint.mode { + futures::future::try_join_all(locate_result.shards.iter().map( + |shard| async move { let conf = env.get_pageserver_conf(shard.node_id).unwrap(); let pageserver = PageServerNode::from_env(env, conf); pageserver .http_client .timeline_init_lsn_lease(shard.shard_id, endpoint.timeline_id, lsn) - .await?; - } + .await + }, + )) + .await?; + } - let libpq_host = Host::parse(&shard.listen_pg_addr)?; - let libpq_port = shard.listen_pg_port; - let libpq_url = - Some(format!("postgres://no_user@{libpq_host}:{libpq_port}")); - - let grpc_url = if let Some(grpc_host) = shard.listen_grpc_addr { - let grpc_port = shard.listen_grpc_port.expect("no gRPC port"); - Some(format!("grpc://no_user@{grpc_host}:{grpc_port}")) - } else { - None - }; - let pageserver = PageserverShardConnectionInfo { - libpq_url, - grpc_url, - }; - anyhow::Ok((shard.shard_id.shard_number.0 as u32, pageserver)) - }, - )) - .await?; - let stripe_size = locate_result.shard_params.stripe_size; - - (shards, stripe_size) - }; - assert!(!shards.is_empty()); - let pageserver_conninfo = PageserverConnectionInfo { - shards: shards.into_iter().collect(), - prefer_grpc: endpoint.grpc, + tenant_locate_response_to_conn_info(&locate_result)? }; + pageserver_conninfo.prefer_protocol = prefer_protocol; let ps_conf = env.get_pageserver_conf(DEFAULT_PAGESERVER_ID)?; let auth_token = if matches!(ps_conf.pg_auth_type, AuthType::NeonJWT) { @@ -1615,7 +1606,6 @@ async fn handle_endpoint(subcmd: &EndpointCmd, env: &local_env::LocalEnv) -> Res safekeepers, pageserver_conninfo, remote_ext_base_url: remote_ext_base_url.clone(), - shard_stripe_size: stripe_size.0 as usize, create_test_user: args.create_test_user, start_timeout: args.start_timeout, autoprewarm: args.autoprewarm, @@ -1632,66 +1622,45 @@ async fn handle_endpoint(subcmd: &EndpointCmd, env: &local_env::LocalEnv) -> Res .endpoints .get(endpoint_id.as_str()) .with_context(|| format!("postgres endpoint {endpoint_id} is not found"))?; - let shards = if let Some(ps_id) = args.endpoint_pageserver_id { + + let prefer_protocol = if endpoint.grpc { + PageserverProtocol::Grpc + } else { + PageserverProtocol::Libpq + }; + let mut pageserver_conninfo = if let Some(ps_id) = args.endpoint_pageserver_id { let conf = env.get_pageserver_conf(ps_id)?; - let libpq_url = Some({ - let (host, port) = parse_host_port(&conf.listen_pg_addr)?; - let port = port.unwrap_or(5432); - format!("postgres://no_user@{host}:{port}") - }); - let grpc_url = if let Some(grpc_addr) = &conf.listen_grpc_addr { - let (host, port) = parse_host_port(grpc_addr)?; - let port = port.unwrap_or(DEFAULT_PAGESERVER_GRPC_PORT); - Some(format!("grpc://no_user@{host}:{port}")) - } else { - None - }; - let pageserver = PageserverShardConnectionInfo { - libpq_url, - grpc_url, + let ps_conninfo = pageserver_conf_to_shard_conn_info(conf)?; + let shard_info = PageserverShardInfo { + pageservers: vec![ps_conninfo], }; + // If caller is telling us what pageserver to use, this is not a tenant which is // fully managed by storage controller, therefore not sharded. - vec![(0, pageserver)] - } else { - let storage_controller = StorageController::from_env(env); - storage_controller - .tenant_locate(endpoint.tenant_id) - .await? - .shards + let shards: HashMap<_, _> = vec![(ShardIndex::unsharded(), shard_info)] .into_iter() - .map(|shard| { - // Use gRPC if requested. - let libpq_host = Host::parse(&shard.listen_pg_addr).expect("bad hostname"); - let libpq_port = shard.listen_pg_port; - let libpq_url = - Some(format!("postgres://no_user@{libpq_host}:{libpq_port}")); + .collect(); + PageserverConnectionInfo { + shard_count: ShardCount::unsharded(), + stripe_size: None, + shards, + prefer_protocol, + } + } else { + // Look up the currently attached location of the tenant, and its striping metadata, + // to pass these on to postgres. + let storage_controller = StorageController::from_env(env); + let locate_result = storage_controller.tenant_locate(endpoint.tenant_id).await?; - let grpc_url = if let Some(grpc_host) = shard.listen_grpc_addr { - let grpc_port = shard.listen_grpc_port.expect("no gRPC port"); - Some(format!("grpc://no_user@{grpc_host}:{grpc_port}")) - } else { - None - }; - ( - shard.shard_id.shard_number.0 as u32, - PageserverShardConnectionInfo { - libpq_url, - grpc_url, - }, - ) - }) - .collect::>() - }; - let pageserver_conninfo = PageserverConnectionInfo { - shards: shards.into_iter().collect(), - prefer_grpc: endpoint.grpc, + tenant_locate_response_to_conn_info(&locate_result)? }; + pageserver_conninfo.prefer_protocol = prefer_protocol; + // If --safekeepers argument is given, use only the listed // safekeeper nodes; otherwise all from the env. let safekeepers = parse_safekeepers(&args.safekeepers)?; endpoint - .reconfigure(Some(pageserver_conninfo), None, safekeepers, None) + .reconfigure(Some(&pageserver_conninfo), safekeepers, None) .await?; } EndpointCmd::Stop(args) => { diff --git a/control_plane/src/broker.rs b/control_plane/src/broker.rs index f43f459636..988b08e875 100644 --- a/control_plane/src/broker.rs +++ b/control_plane/src/broker.rs @@ -36,7 +36,7 @@ impl StorageBroker { pub async fn start(&self, retry_timeout: &Duration) -> anyhow::Result<()> { let broker = &self.env.broker; - print!("Starting neon broker at {}", broker.client_url()); + println!("Starting neon broker at {}", broker.client_url()); let mut args = Vec::new(); diff --git a/control_plane/src/endpoint.rs b/control_plane/src/endpoint.rs index 29d369d4d8..58a419b965 100644 --- a/control_plane/src/endpoint.rs +++ b/control_plane/src/endpoint.rs @@ -32,11 +32,12 @@ //! config.json - passed to `compute_ctl` //! pgdata/ //! postgresql.conf - copy of postgresql.conf created by `compute_ctl` -//! zenith.signal +//! neon.signal +//! zenith.signal - copy of neon.signal, for backward compatibility //! //! ``` //! -use std::collections::BTreeMap; +use std::collections::{BTreeMap, HashMap}; use std::fmt::Display; use std::net::{IpAddr, Ipv4Addr, SocketAddr, TcpStream}; use std::path::PathBuf; @@ -56,8 +57,8 @@ use compute_api::responses::{ TlsConfig, }; use compute_api::spec::{ - Cluster, ComputeAudit, ComputeFeature, ComputeMode, ComputeSpec, Database, PgIdent, - RemoteExtSpec, Role, + Cluster, ComputeAudit, ComputeFeature, ComputeMode, ComputeSpec, Database, PageserverProtocol, + PageserverShardInfo, PgIdent, RemoteExtSpec, Role, }; // re-export these, because they're used in the reconfigure() function @@ -68,7 +69,6 @@ use jsonwebtoken::jwk::{ OctetKeyPairParameters, OctetKeyPairType, PublicKeyUse, }; use nix::sys::signal::{Signal, kill}; -use pageserver_api::shard::ShardStripeSize; use pem::Pem; use reqwest::header::CONTENT_TYPE; use safekeeper_api::PgMajorVersion; @@ -79,6 +79,10 @@ use spki::der::Decode; use spki::{SubjectPublicKeyInfo, SubjectPublicKeyInfoRef}; use tracing::debug; use utils::id::{NodeId, TenantId, TimelineId}; +use utils::shard::{ShardIndex, ShardNumber}; + +use pageserver_api::config::DEFAULT_GRPC_LISTEN_PORT as DEFAULT_PAGESERVER_GRPC_PORT; +use postgres_connection::parse_host_port; use crate::local_env::LocalEnv; use crate::postgresql_conf::PostgresConf; @@ -101,6 +105,7 @@ pub struct EndpointConf { features: Vec, cluster: Option, compute_ctl_config: ComputeCtlConfig, + privileged_role_name: Option, } // @@ -201,6 +206,7 @@ impl ComputeControlPlane { grpc: bool, skip_pg_catalog_updates: bool, drop_subscriptions_before_start: bool, + privileged_role_name: Option, ) -> Result> { let pg_port = pg_port.unwrap_or_else(|| self.get_port()); let external_http_port = external_http_port.unwrap_or_else(|| self.get_port() + 1); @@ -238,6 +244,7 @@ impl ComputeControlPlane { features: vec![], cluster: None, compute_ctl_config: compute_ctl_config.clone(), + privileged_role_name: privileged_role_name.clone(), }); ep.create_endpoint_dir()?; @@ -259,6 +266,7 @@ impl ComputeControlPlane { features: vec![], cluster: None, compute_ctl_config, + privileged_role_name, })?, )?; std::fs::write( @@ -334,6 +342,9 @@ pub struct Endpoint { /// The compute_ctl config for the endpoint's compute. compute_ctl_config: ComputeCtlConfig, + + /// The name of the privileged role for the endpoint. + privileged_role_name: Option, } #[derive(PartialEq, Eq)] @@ -384,7 +395,6 @@ pub struct EndpointStartArgs { pub safekeepers: Vec, pub pageserver_conninfo: PageserverConnectionInfo, pub remote_ext_base_url: Option, - pub shard_stripe_size: usize, pub create_test_user: bool, pub start_timeout: Duration, pub autoprewarm: bool, @@ -434,6 +444,7 @@ impl Endpoint { features: conf.features, cluster: conf.cluster, compute_ctl_config: conf.compute_ctl_config, + privileged_role_name: conf.privileged_role_name, }) } @@ -466,7 +477,7 @@ impl Endpoint { conf.append("max_connections", "100"); conf.append("wal_level", "logical"); // wal_sender_timeout is the maximum time to wait for WAL replication. - // It also defines how often the walreciever will send a feedback message to the wal sender. + // It also defines how often the walreceiver will send a feedback message to the wal sender. conf.append("wal_sender_timeout", "5s"); conf.append("listen_addresses", &self.pg_address.ip().to_string()); conf.append("port", &self.pg_address.port().to_string()); @@ -715,6 +726,46 @@ impl Endpoint { remote_extensions = None; }; + // For the sake of backwards-compatibility, also fill in 'pageserver_connstring' + // + // XXX: I believe this is not really needed, except to make + // test_forward_compatibility happy. + // + // Use a closure so that we can conviniently return None in the middle of the + // loop. + let pageserver_connstring = (|| { + let num_shards = if args.pageserver_conninfo.shard_count.is_unsharded() { + 1 + } else { + args.pageserver_conninfo.shard_count.0 + }; + let mut connstrings = Vec::new(); + for shard_no in 0..num_shards { + let shard_index = ShardIndex { + shard_count: args.pageserver_conninfo.shard_count, + shard_number: ShardNumber(shard_no), + }; + let shard = args + .pageserver_conninfo + .shards + .get(&shard_index) + .expect(&format!( + "shard {} not found in pageserver_connection_info", + shard_index + )); + let pageserver = shard + .pageservers + .first() + .expect("must have at least one pageserver"); + if let Some(libpq_url) = &pageserver.libpq_url { + connstrings.push(libpq_url.clone()); + } else { + return None; + } + } + Some(connstrings.join(",")) + })(); + // Create config file let config = { let mut spec = ComputeSpec { @@ -759,13 +810,14 @@ impl Endpoint { branch_id: None, endpoint_id: Some(self.endpoint_id.clone()), mode: self.mode, - pageserver_connection_info: Some(args.pageserver_conninfo), + pageserver_connection_info: Some(args.pageserver_conninfo.clone()), + pageserver_connstring, safekeepers_generation: args.safekeepers_generation.map(|g| g.into_inner()), safekeeper_connstrings, storage_auth_token: args.auth_token.clone(), remote_extensions, pgbouncer_settings: None, - shard_stripe_size: Some(args.shard_stripe_size), + shard_stripe_size: args.pageserver_conninfo.stripe_size, // redundant with pageserver_connection_info.stripe_size local_proxy_config: None, reconfigure_concurrency: self.reconfigure_concurrency, drop_subscriptions_before_start: self.drop_subscriptions_before_start, @@ -861,6 +913,10 @@ impl Endpoint { cmd.arg("--dev"); } + if let Some(privileged_role_name) = self.privileged_role_name.clone() { + cmd.args(["--privileged-role-name", &privileged_role_name]); + } + let child = cmd.spawn()?; // set up a scopeguard to kill & wait for the child in case we panic or bail below let child = scopeguard::guard(child, |mut child| { @@ -914,7 +970,8 @@ impl Endpoint { ComputeStatus::Empty | ComputeStatus::ConfigurationPending | ComputeStatus::Configuration - | ComputeStatus::TerminationPending { .. } + | ComputeStatus::TerminationPendingFast + | ComputeStatus::TerminationPendingImmediate | ComputeStatus::Terminated => { bail!("unexpected compute status: {:?}", state.status) } @@ -972,8 +1029,7 @@ impl Endpoint { pub async fn reconfigure( &self, - pageserver_conninfo: Option, - stripe_size: Option, + pageserver_conninfo: Option<&PageserverConnectionInfo>, safekeepers: Option>, safekeeper_generation: Option, ) -> Result<()> { @@ -995,10 +1051,8 @@ impl Endpoint { !pageserver_conninfo.shards.is_empty(), "no pageservers provided" ); - spec.pageserver_connection_info = Some(pageserver_conninfo); - } - if stripe_size.is_some() { - spec.shard_stripe_size = stripe_size.map(|s| s.0 as usize); + spec.pageserver_connection_info = Some(pageserver_conninfo.clone()); + spec.shard_stripe_size = pageserver_conninfo.stripe_size; } // If safekeepers are not specified, don't change them. @@ -1047,11 +1101,9 @@ impl Endpoint { pub async fn reconfigure_pageservers( &self, - pageservers: PageserverConnectionInfo, - stripe_size: Option, + pageservers: &PageserverConnectionInfo, ) -> Result<()> { - self.reconfigure(Some(pageservers), stripe_size, None, None) - .await + self.reconfigure(Some(pageservers), None, None).await } pub async fn reconfigure_safekeepers( @@ -1059,7 +1111,7 @@ impl Endpoint { safekeepers: Vec, generation: SafekeeperGeneration, ) -> Result<()> { - self.reconfigure(None, None, Some(safekeepers), Some(generation)) + self.reconfigure(None, Some(safekeepers), Some(generation)) .await } @@ -1115,3 +1167,68 @@ impl Endpoint { ) } } + +pub fn pageserver_conf_to_shard_conn_info( + conf: &crate::local_env::PageServerConf, +) -> Result { + let libpq_url = { + let (host, port) = parse_host_port(&conf.listen_pg_addr)?; + let port = port.unwrap_or(5432); + Some(format!("postgres://no_user@{host}:{port}")) + }; + let grpc_url = if let Some(grpc_addr) = &conf.listen_grpc_addr { + let (host, port) = parse_host_port(grpc_addr)?; + let port = port.unwrap_or(DEFAULT_PAGESERVER_GRPC_PORT); + Some(format!("grpc://no_user@{host}:{port}")) + } else { + None + }; + Ok(PageserverShardConnectionInfo { + id: Some(conf.id.to_string()), + libpq_url, + grpc_url, + }) +} + +pub fn tenant_locate_response_to_conn_info( + response: &pageserver_api::controller_api::TenantLocateResponse, +) -> Result { + let mut shards = HashMap::new(); + for shard in response.shards.iter() { + tracing::info!("parsing {}", shard.listen_pg_addr); + let libpq_url = { + let host = &shard.listen_pg_addr; + let port = shard.listen_pg_port; + Some(format!("postgres://no_user@{host}:{port}")) + }; + let grpc_url = if let Some(grpc_addr) = &shard.listen_grpc_addr { + let host = grpc_addr; + let port = shard.listen_grpc_port.expect("no gRPC port"); + Some(format!("grpc://no_user@{host}:{port}")) + } else { + None + }; + + let shard_info = PageserverShardInfo { + pageservers: vec![PageserverShardConnectionInfo { + id: Some(shard.node_id.to_string()), + libpq_url, + grpc_url, + }], + }; + + shards.insert(shard.shard_id.to_index(), shard_info); + } + + let stripe_size = if response.shard_params.count.is_unsharded() { + None + } else { + Some(response.shard_params.stripe_size.0) + }; + Ok(PageserverConnectionInfo { + shard_count: response.shard_params.count, + stripe_size, + shards, + prefer_protocol: PageserverProtocol::default(), + }) +} diff --git a/control_plane/src/local_env.rs b/control_plane/src/local_env.rs index d0611113e8..d34dd39f61 100644 --- a/control_plane/src/local_env.rs +++ b/control_plane/src/local_env.rs @@ -217,6 +217,9 @@ pub struct NeonStorageControllerConf { pub posthog_config: Option, pub kick_secondary_downloads: Option, + + #[serde(with = "humantime_serde")] + pub shard_split_request_timeout: Option, } impl NeonStorageControllerConf { @@ -250,6 +253,7 @@ impl Default for NeonStorageControllerConf { timeline_safekeeper_count: None, posthog_config: None, kick_secondary_downloads: None, + shard_split_request_timeout: None, } } } diff --git a/control_plane/src/pageserver.rs b/control_plane/src/pageserver.rs index 3f66960edd..843ead807d 100644 --- a/control_plane/src/pageserver.rs +++ b/control_plane/src/pageserver.rs @@ -303,7 +303,7 @@ impl PageServerNode { async fn start_node(&self, retry_timeout: &Duration) -> anyhow::Result<()> { // TODO: using a thread here because start_process() is not async but we need to call check_status() let datadir = self.repo_path(); - print!( + println!( "Starting pageserver node {} at '{}' in {:?}, retrying for {:?}", self.conf.id, self.pg_connection_config.raw_address(), @@ -452,6 +452,12 @@ impl PageServerNode { .map(|x| x.parse::()) .transpose() .context("Failed to parse 'image_creation_threshold' as non zero integer")?, + // HADRON + image_layer_force_creation_period: settings + .remove("image_layer_force_creation_period") + .map(humantime::parse_duration) + .transpose() + .context("Failed to parse 'image_layer_force_creation_period' as duration")?, image_layer_creation_check_threshold: settings .remove("image_layer_creation_check_threshold") .map(|x| x.parse::()) diff --git a/control_plane/src/safekeeper.rs b/control_plane/src/safekeeper.rs index da9dafd8e9..2ba2f3ebe4 100644 --- a/control_plane/src/safekeeper.rs +++ b/control_plane/src/safekeeper.rs @@ -127,7 +127,7 @@ impl SafekeeperNode { extra_opts: &[String], retry_timeout: &Duration, ) -> anyhow::Result<()> { - print!( + println!( "Starting safekeeper at '{}' in '{}', retrying for {:?}", self.pg_connection_config.raw_address(), self.datadir_path().display(), diff --git a/control_plane/src/storage_controller.rs b/control_plane/src/storage_controller.rs index bb83a6319c..f996f39967 100644 --- a/control_plane/src/storage_controller.rs +++ b/control_plane/src/storage_controller.rs @@ -648,6 +648,13 @@ impl StorageController { args.push(format!("--timeline-safekeeper-count={sk_cnt}")); } + if let Some(duration) = self.config.shard_split_request_timeout { + args.push(format!( + "--shard-split-request-timeout={}", + humantime::Duration::from(duration) + )); + } + let mut envs = vec![ ("LD_LIBRARY_PATH".to_owned(), pg_lib_dir.to_string()), ("DYLD_LIBRARY_PATH".to_owned(), pg_lib_dir.to_string()), @@ -660,7 +667,7 @@ impl StorageController { )); } - println!("Starting storage controller"); + println!("Starting storage controller at {scheme}://{host}:{listen_port}"); background_process::start_process( COMMAND, diff --git a/control_plane/storcon_cli/Cargo.toml b/control_plane/storcon_cli/Cargo.toml index ce89116691..61d48b2469 100644 --- a/control_plane/storcon_cli/Cargo.toml +++ b/control_plane/storcon_cli/Cargo.toml @@ -14,6 +14,7 @@ humantime.workspace = true pageserver_api.workspace = true pageserver_client.workspace = true reqwest.workspace = true +safekeeper_api.workspace=true serde_json = { workspace = true, features = ["raw_value"] } storage_controller_client.workspace = true tokio.workspace = true diff --git a/control_plane/storcon_cli/src/main.rs b/control_plane/storcon_cli/src/main.rs index 701c4b3b2e..a4d1030488 100644 --- a/control_plane/storcon_cli/src/main.rs +++ b/control_plane/storcon_cli/src/main.rs @@ -11,7 +11,7 @@ use pageserver_api::controller_api::{ PlacementPolicy, SafekeeperDescribeResponse, SafekeeperSchedulingPolicyRequest, ShardSchedulingPolicy, ShardsPreferredAzsRequest, ShardsPreferredAzsResponse, SkSchedulingPolicy, TenantCreateRequest, TenantDescribeResponse, TenantPolicyRequest, - TenantShardMigrateRequest, TenantShardMigrateResponse, + TenantShardMigrateRequest, TenantShardMigrateResponse, TimelineSafekeeperMigrateRequest, }; use pageserver_api::models::{ EvictionPolicy, EvictionPolicyLayerAccessThreshold, ShardParameters, TenantConfig, @@ -21,6 +21,7 @@ use pageserver_api::models::{ use pageserver_api::shard::{ShardStripeSize, TenantShardId}; use pageserver_client::mgmt_api::{self}; use reqwest::{Certificate, Method, StatusCode, Url}; +use safekeeper_api::models::TimelineLocateResponse; use storage_controller_client::control_api::Client; use utils::id::{NodeId, TenantId, TimelineId}; @@ -75,6 +76,12 @@ enum Command { NodeStartDelete { #[arg(long)] node_id: NodeId, + /// When `force` is true, skip waiting for shards to prewarm during migration. + /// This can significantly speed up node deletion since prewarming all shards + /// can take considerable time, but may result in slower initial access to + /// migrated shards until they warm up naturally. + #[arg(long)] + force: bool, }, /// Cancel deletion of the specified pageserver and wait for `timeout` /// for the operation to be canceled. May be retried. @@ -279,6 +286,23 @@ enum Command { #[arg(long)] concurrency: Option, }, + /// Locate safekeepers for a timeline from the storcon DB. + TimelineLocate { + #[arg(long)] + tenant_id: TenantId, + #[arg(long)] + timeline_id: TimelineId, + }, + /// Migrate a timeline to a new set of safekeepers + TimelineSafekeeperMigrate { + #[arg(long)] + tenant_id: TenantId, + #[arg(long)] + timeline_id: TimelineId, + /// Example: --new-sk-set 1,2,3 + #[arg(long, required = true, value_delimiter = ',')] + new_sk_set: Vec, + }, } #[derive(Parser)] @@ -458,6 +482,7 @@ async fn main() -> anyhow::Result<()> { listen_http_port, listen_https_port, availability_zone_id: AvailabilityZone(availability_zone_id), + node_ip_addr: None, }), ) .await?; @@ -933,13 +958,14 @@ async fn main() -> anyhow::Result<()> { .dispatch::<(), ()>(Method::DELETE, format!("control/v1/node/{node_id}"), None) .await?; } - Command::NodeStartDelete { node_id } => { + Command::NodeStartDelete { node_id, force } => { + let query = if force { + format!("control/v1/node/{node_id}/delete?force=true") + } else { + format!("control/v1/node/{node_id}/delete") + }; storcon_client - .dispatch::<(), ()>( - Method::PUT, - format!("control/v1/node/{node_id}/delete"), - None, - ) + .dispatch::<(), ()>(Method::PUT, query, None) .await?; println!("Delete started for {node_id}"); } @@ -1324,7 +1350,7 @@ async fn main() -> anyhow::Result<()> { concurrency, } => { let mut path = format!( - "/v1/tenant/{tenant_shard_id}/timeline/{timeline_id}/download_heatmap_layers", + "v1/tenant/{tenant_shard_id}/timeline/{timeline_id}/download_heatmap_layers", ); if let Some(c) = concurrency { @@ -1335,6 +1361,41 @@ async fn main() -> anyhow::Result<()> { .dispatch::<(), ()>(Method::POST, path, None) .await?; } + Command::TimelineLocate { + tenant_id, + timeline_id, + } => { + let path = format!("debug/v1/tenant/{tenant_id}/timeline/{timeline_id}/locate"); + + let resp = storcon_client + .dispatch::<(), TimelineLocateResponse>(Method::GET, path, None) + .await?; + + let sk_set = resp.sk_set.iter().map(|id| id.0 as i64).collect::>(); + let new_sk_set = resp + .new_sk_set + .as_ref() + .map(|ids| ids.iter().map(|id| id.0 as i64).collect::>()); + + println!("generation = {}", resp.generation); + println!("sk_set = {sk_set:?}"); + println!("new_sk_set = {new_sk_set:?}"); + } + Command::TimelineSafekeeperMigrate { + tenant_id, + timeline_id, + new_sk_set, + } => { + let path = format!("v1/tenant/{tenant_id}/timeline/{timeline_id}/safekeeper_migrate"); + + storcon_client + .dispatch::<_, ()>( + Method::POST, + path, + Some(TimelineSafekeeperMigrateRequest { new_sk_set }), + ) + .await?; + } } Ok(()) diff --git a/docker-compose/compute_wrapper/shell/compute.sh b/docker-compose/compute_wrapper/shell/compute.sh index 1e62e91fd0..6f36b4358e 100755 --- a/docker-compose/compute_wrapper/shell/compute.sh +++ b/docker-compose/compute_wrapper/shell/compute.sh @@ -54,14 +54,16 @@ else printf '%s\n' "${result}" | jq . fi - echo "Check if a timeline present" - PARAMS=( - -X GET - -H "Content-Type: application/json" - "http://pageserver:9898/v1/tenant/${tenant_id}/timeline" - ) - timeline_id=$(curl "${PARAMS[@]}" | jq -r .[0].timeline_id) - if [[ -z "${timeline_id}" || "${timeline_id}" = null ]]; then + if [[ "${RUN_PARALLEL:-false}" != "true" ]]; then + echo "Check if a timeline present" + PARAMS=( + -X GET + -H "Content-Type: application/json" + "http://pageserver:9898/v1/tenant/${tenant_id}/timeline" + ) + timeline_id=$(curl "${PARAMS[@]}" | jq -r .[0].timeline_id) + fi + if [[ -z "${timeline_id:-}" || "${timeline_id:-}" = null ]]; then generate_id timeline_id PARAMS=( -sbf diff --git a/docker-compose/docker-compose.yml b/docker-compose/docker-compose.yml index 2519b75c7f..19c3bc74e2 100644 --- a/docker-compose/docker-compose.yml +++ b/docker-compose/docker-compose.yml @@ -142,7 +142,7 @@ services: - "storage_broker" - "--listen-addr=0.0.0.0:50051" - compute: + compute1: restart: always build: context: ./compute_wrapper/ @@ -152,6 +152,7 @@ services: - TAG=${COMPUTE_TAG:-${TAG:-latest}} - http_proxy=${http_proxy:-} - https_proxy=${https_proxy:-} + image: built-compute environment: - PG_VERSION=${PG_VERSION:-16} - TENANT_ID=${TENANT_ID:-} @@ -166,6 +167,11 @@ services: - 3080:3080 # http endpoints entrypoint: - "/shell/compute.sh" + # Ad an alias for compute1 for compatibility + networks: + default: + aliases: + - compute depends_on: - safekeeper1 - safekeeper2 @@ -174,15 +180,20 @@ services: compute_is_ready: image: postgres:latest + environment: + - PARALLEL_COMPUTES=1 entrypoint: - - "/bin/bash" + - "/bin/sh" - "-c" command: - - "until pg_isready -h compute -p 55433 -U cloud_admin ; do - echo 'Waiting to start compute...' && sleep 1; - done" + - "for i in $(seq 1 $${PARALLEL_COMPUTES}); do + until pg_isready -h compute$$i -p 55433 -U cloud_admin ; do + sleep 1; + done; + done; + echo All computes are started" depends_on: - - compute + - compute1 neon-test-extensions: profiles: ["test-extensions"] @@ -196,4 +207,4 @@ services: command: - sleep 3600 depends_on: - - compute + - compute1 diff --git a/docker-compose/docker_compose_test.sh b/docker-compose/docker_compose_test.sh index 6edf90ca8d..063b8dee85 100755 --- a/docker-compose/docker_compose_test.sh +++ b/docker-compose/docker_compose_test.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # A basic test to ensure Docker images are built correctly. # Build a wrapper around the compute, start all services and runs a simple SQL query. @@ -13,9 +13,36 @@ # set -eux -o pipefail +cd "$(dirname "${0}")" export COMPOSE_FILE='docker-compose.yml' export COMPOSE_PROFILES=test-extensions -cd "$(dirname "${0}")" +export PARALLEL_COMPUTES=${PARALLEL_COMPUTES:-1} +READY_MESSAGE="All computes are started" +COMPUTES=() +for i in $(seq 1 "${PARALLEL_COMPUTES}"); do + COMPUTES+=("compute${i}") +done +CURRENT_TMPDIR=$(mktemp -d) +trap 'rm -rf ${CURRENT_TMPDIR} docker-compose-parallel.yml' EXIT +if [[ ${PARALLEL_COMPUTES} -gt 1 ]]; then + export COMPOSE_FILE=docker-compose-parallel.yml + cp docker-compose.yml docker-compose-parallel.yml + # Replace the environment variable PARALLEL_COMPUTES with the actual value + yq eval -i ".services.compute_is_ready.environment |= map(select(. | test(\"^PARALLEL_COMPUTES=\") | not)) + [\"PARALLEL_COMPUTES=${PARALLEL_COMPUTES}\"]" ${COMPOSE_FILE} + for i in $(seq 2 "${PARALLEL_COMPUTES}"); do + # Duplicate compute1 as compute${i} for parallel execution + yq eval -i ".services.compute${i} = .services.compute1" ${COMPOSE_FILE} + # We don't need these sections, so delete them + yq eval -i "(del .services.compute${i}.build) | (del .services.compute${i}.ports) | (del .services.compute${i}.networks)" ${COMPOSE_FILE} + # Let the compute 1 be the only dependence + yq eval -i ".services.compute${i}.depends_on = [\"compute1\"]" ${COMPOSE_FILE} + # Set RUN_PARALLEL=true for compute2. They will generate tenant_id and timeline_id to avoid using the same as other computes + yq eval -i ".services.compute${i}.environment += [\"RUN_PARALLEL=true\"]" ${COMPOSE_FILE} + # Remove TENANT_ID and TIMELINE_ID from the environment variables of the generated computes + # They will create new TENANT_ID and TIMELINE_ID anyway. + yq eval -i ".services.compute${i}.environment |= map(select(. | (test(\"^TENANT_ID=\") or test(\"^TIMELINE_ID=\")) | not))" ${COMPOSE_FILE} + done +fi PSQL_OPTION="-h localhost -U cloud_admin -p 55433 -d postgres" function cleanup() { @@ -27,11 +54,11 @@ function cleanup() { for pg_version in ${TEST_VERSION_ONLY-14 15 16 17}; do pg_version=${pg_version/v/} - echo "clean up containers if exists" + echo "clean up containers if exist" cleanup PG_TEST_VERSION=$((pg_version < 16 ? 16 : pg_version)) - PG_VERSION=${pg_version} PG_TEST_VERSION=${PG_TEST_VERSION} docker compose up --quiet-pull --build -d - + PG_VERSION=${pg_version} PG_TEST_VERSION=${PG_TEST_VERSION} docker compose build compute1 + PG_VERSION=${pg_version} PG_TEST_VERSION=${PG_TEST_VERSION} docker compose up --quiet-pull -d echo "wait until the compute is ready. timeout after 60s. " cnt=0 while sleep 3; do @@ -41,45 +68,50 @@ for pg_version in ${TEST_VERSION_ONLY-14 15 16 17}; do echo "timeout before the compute is ready." exit 1 fi - if docker compose logs "compute_is_ready" | grep -q "accepting connections"; then + if docker compose logs compute_is_ready | grep -q "${READY_MESSAGE}"; then echo "OK. The compute is ready to connect." echo "execute simple queries." - docker compose exec compute /bin/bash -c "psql ${PSQL_OPTION} -c 'SELECT 1'" + for compute in "${COMPUTES[@]}"; do + docker compose exec "${compute}" /bin/bash -c "psql ${PSQL_OPTION} -c 'SELECT 1'" + done break fi done if [[ ${pg_version} -ge 16 ]]; then - # This is required for the pg_hint_plan test, to prevent flaky log message causing the test to fail - # It cannot be moved to Dockerfile now because the database directory is created after the start of the container - echo Adding dummy config - docker compose exec compute touch /var/db/postgres/compute/compute_ctl_temp_override.conf - # Prepare for the PostGIS test - docker compose exec compute mkdir -p /tmp/pgis_reg/pgis_reg_tmp - TMPDIR=$(mktemp -d) - docker compose cp neon-test-extensions:/ext-src/postgis-src/raster/test "${TMPDIR}" - docker compose cp neon-test-extensions:/ext-src/postgis-src/regress/00-regress-install "${TMPDIR}" - docker compose exec compute mkdir -p /ext-src/postgis-src/raster /ext-src/postgis-src/regress /ext-src/postgis-src/regress/00-regress-install - docker compose cp "${TMPDIR}/test" compute:/ext-src/postgis-src/raster/test - docker compose cp "${TMPDIR}/00-regress-install" compute:/ext-src/postgis-src/regress - rm -rf "${TMPDIR}" - # The following block copies the files for the pg_hintplan test to the compute node for the extension test in an isolated docker-compose environment - TMPDIR=$(mktemp -d) - docker compose cp neon-test-extensions:/ext-src/pg_hint_plan-src/data "${TMPDIR}/data" - docker compose cp "${TMPDIR}/data" compute:/ext-src/pg_hint_plan-src/ - rm -rf "${TMPDIR}" - # The following block does the same for the contrib/file_fdw test - TMPDIR=$(mktemp -d) - docker compose cp neon-test-extensions:/postgres/contrib/file_fdw/data "${TMPDIR}/data" - docker compose cp "${TMPDIR}/data" compute:/postgres/contrib/file_fdw/data - rm -rf "${TMPDIR}" + mkdir "${CURRENT_TMPDIR}"/{pg_hint_plan-src,file_fdw,postgis-src} + docker compose cp neon-test-extensions:/ext-src/postgis-src/raster/test "${CURRENT_TMPDIR}/postgis-src/test" + docker compose cp neon-test-extensions:/ext-src/postgis-src/regress/00-regress-install "${CURRENT_TMPDIR}/postgis-src/00-regress-install" + docker compose cp neon-test-extensions:/ext-src/pg_hint_plan-src/data "${CURRENT_TMPDIR}/pg_hint_plan-src/data" + docker compose cp neon-test-extensions:/postgres/contrib/file_fdw/data "${CURRENT_TMPDIR}/file_fdw/data" + + for compute in "${COMPUTES[@]}"; do + # This is required for the pg_hint_plan test, to prevent flaky log message causing the test to fail + # It cannot be moved to Dockerfile now because the database directory is created after the start of the container + echo Adding dummy config on "${compute}" + docker compose exec "${compute}" touch /var/db/postgres/compute/compute_ctl_temp_override.conf + # Prepare for the PostGIS test + docker compose exec "${compute}" mkdir -p /tmp/pgis_reg/pgis_reg_tmp /ext-src/postgis-src/raster /ext-src/postgis-src/regress /ext-src/postgis-src/regress/00-regress-install + docker compose cp "${CURRENT_TMPDIR}/postgis-src/test" "${compute}":/ext-src/postgis-src/raster/test + docker compose cp "${CURRENT_TMPDIR}/postgis-src/00-regress-install" "${compute}":/ext-src/postgis-src/regress + # The following block copies the files for the pg_hintplan test to the compute node for the extension test in an isolated docker-compose environment + docker compose cp "${CURRENT_TMPDIR}/pg_hint_plan-src/data" "${compute}":/ext-src/pg_hint_plan-src/ + # The following block does the same for the contrib/file_fdw test + docker compose cp "${CURRENT_TMPDIR}/file_fdw/data" "${compute}":/postgres/contrib/file_fdw/data + done # Apply patches docker compose exec -T neon-test-extensions bash -c "(cd /postgres && patch -p1)" <"../compute/patches/contrib_pg${pg_version}.patch" # We are running tests now rm -f testout.txt testout_contrib.txt + # We want to run the longest tests first to better utilize parallelization and reduce overall test time. + # Tests listed in the RUN_FIRST variable will be run before others. + # If parallelization is not used, this environment variable will be ignored. + docker compose exec -e USE_PGXS=1 -e SKIP=timescaledb-src,rdkit-src,pg_jsonschema-src,kq_imcx-src,wal2json_2_5-src,rag_jina_reranker_v1_tiny_en-src,rag_bge_small_en_v15-src \ + -e RUN_FIRST=hll-src,postgis-src,pgtap-src -e PARALLEL_COMPUTES="${PARALLEL_COMPUTES}" \ neon-test-extensions /run-tests.sh /ext-src | tee testout.txt && EXT_SUCCESS=1 || EXT_SUCCESS=0 docker compose exec -e SKIP=start-scripts,postgres_fdw,ltree_plpython,jsonb_plpython,jsonb_plperl,hstore_plpython,hstore_plperl,dblink,bool_plperl \ + -e PARALLEL_COMPUTES="${PARALLEL_COMPUTES}" \ neon-test-extensions /run-tests.sh /postgres/contrib | tee testout_contrib.txt && CONTRIB_SUCCESS=1 || CONTRIB_SUCCESS=0 if [[ ${EXT_SUCCESS} -eq 0 || ${CONTRIB_SUCCESS} -eq 0 ]]; then CONTRIB_FAILED= diff --git a/docker-compose/run-tests.sh b/docker-compose/run-tests.sh index 930402ce66..b37b9363fa 100755 --- a/docker-compose/run-tests.sh +++ b/docker-compose/run-tests.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash set -x if [[ -v BENCHMARK_CONNSTR ]]; then @@ -26,8 +26,9 @@ if [[ -v BENCHMARK_CONNSTR ]]; then fi fi REGULAR_USER=false -while getopts r arg; do - case $arg in +PARALLEL_COMPUTES=${PARALLEL_COMPUTES:-1} +while getopts pr arg; do + case ${arg} in r) REGULAR_USER=true shift $((OPTIND-1)) @@ -41,26 +42,49 @@ extdir=${1} cd "${extdir}" || exit 2 FAILED= -LIST=$( (echo -e "${SKIP//","/"\n"}"; ls) | sort | uniq -u) -for d in ${LIST}; do - [ -d "${d}" ] || continue - if ! psql -w -c "select 1" >/dev/null; then - FAILED="${d} ${FAILED}" - break - fi - if [[ ${REGULAR_USER} = true ]] && [ -f "${d}"/regular-test.sh ]; then - "${d}/regular-test.sh" || FAILED="${d} ${FAILED}" - continue - fi +export FAILED_FILE=/tmp/failed +rm -f ${FAILED_FILE} +mapfile -t LIST < <( (echo -e "${SKIP//","/"\n"}"; ls) | sort | uniq -u) +if [[ ${PARALLEL_COMPUTES} -gt 1 ]]; then + # Avoid errors if RUN_FIRST is not defined + RUN_FIRST=${RUN_FIRST:-} + # Move entries listed in the RUN_FIRST variable to the beginning + ORDERED_LIST=$(printf "%s\n" "${LIST[@]}" | grep -x -Ff <(echo -e "${RUN_FIRST//,/$'\n'}"); printf "%s\n" "${LIST[@]}" | grep -vx -Ff <(echo -e "${RUN_FIRST//,/$'\n'}")) + parallel -j"${PARALLEL_COMPUTES}" "[[ -d {} ]] || exit 0 + export PGHOST=compute{%} + if ! psql -c 'select 1'>/dev/null; then + exit 1 + fi + echo Running on \${PGHOST} + if [[ -f ${extdir}/{}/neon-test.sh ]]; then + echo Running from script + ${extdir}/{}/neon-test.sh || echo {} >> ${FAILED_FILE}; + else + echo Running using make; + USE_PGXS=1 make -C {} installcheck || echo {} >> ${FAILED_FILE}; + fi" ::: ${ORDERED_LIST} + [[ ! -f ${FAILED_FILE} ]] && exit 0 +else + for d in "${LIST[@]}"; do + [ -d "${d}" ] || continue + if ! psql -w -c "select 1" >/dev/null; then + FAILED="${d} ${FAILED}" + break + fi + if [[ ${REGULAR_USER} = true ]] && [ -f "${d}"/regular-test.sh ]; then + "${d}/regular-test.sh" || FAILED="${d} ${FAILED}" + continue + fi - if [ -f "${d}/neon-test.sh" ]; then - "${d}/neon-test.sh" || FAILED="${d} ${FAILED}" - else - USE_PGXS=1 make -C "${d}" installcheck || FAILED="${d} ${FAILED}" - fi -done -[ -z "${FAILED}" ] && exit 0 -for d in ${FAILED}; do + if [ -f "${d}/neon-test.sh" ]; then + "${d}/neon-test.sh" || FAILED="${d} ${FAILED}" + else + USE_PGXS=1 make -C "${d}" installcheck || FAILED="${d} ${FAILED}" + fi + done + [[ -z ${FAILED} ]] && exit 0 +fi +for d in ${FAILED} $([[ ! -f ${FAILED_FILE} ]] || cat ${FAILED_FILE}); do cat "$(find $d -name regression.diffs)" done for postgis_diff in /tmp/pgis_reg/*_diff; do @@ -68,4 +92,5 @@ for postgis_diff in /tmp/pgis_reg/*_diff; do cat "${postgis_diff}" done echo "${FAILED}" +cat ${FAILED_FILE} exit 1 diff --git a/docker-compose/test_extensions_upgrade.sh b/docker-compose/test_extensions_upgrade.sh index f1cf17f531..1d39fc029e 100755 --- a/docker-compose/test_extensions_upgrade.sh +++ b/docker-compose/test_extensions_upgrade.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash set -eux -o pipefail cd "$(dirname "${0}")" # Takes a variable name as argument. The result is stored in that variable. @@ -60,8 +60,8 @@ function check_timeline() { # Restarts the compute node with the required compute tag and timeline. # Accepts the tag for the compute node and the timeline as parameters. function restart_compute() { - docker compose down compute compute_is_ready - COMPUTE_TAG=${1} TENANT_ID=${tenant_id} TIMELINE_ID=${2} docker compose up --quiet-pull -d --build compute compute_is_ready + docker compose down compute1 compute_is_ready + COMPUTE_TAG=${1} TENANT_ID=${tenant_id} TIMELINE_ID=${2} docker compose up --quiet-pull -d --build compute1 compute_is_ready wait_for_ready check_timeline ${2} } diff --git a/docs/core_changes.md b/docs/core_changes.md index 1388317728..abfd20af26 100644 --- a/docs/core_changes.md +++ b/docs/core_changes.md @@ -129,9 +129,10 @@ segment to bootstrap the WAL writing, but it doesn't contain the checkpoint reco changes in xlog.c, to allow starting the compute node without reading the last checkpoint record from WAL. -This includes code to read the `zenith.signal` file, which tells the startup code the LSN to start -at. When the `zenith.signal` file is present, the startup uses that LSN instead of the last -checkpoint's LSN. The system is known to be consistent at that LSN, without any WAL redo. +This includes code to read the `neon.signal` (also `zenith.signal`) file, which tells the startup +code the LSN to start at. When the `neon.signal` file is present, the startup uses that LSN +instead of the last checkpoint's LSN. The system is known to be consistent at that LSN, without +any WAL redo. ### How to get rid of the patch diff --git a/docs/pageserver-services.md b/docs/pageserver-services.md index 11d984eb08..3c430c6236 100644 --- a/docs/pageserver-services.md +++ b/docs/pageserver-services.md @@ -75,7 +75,7 @@ CLI examples: * AWS S3 : `env AWS_ACCESS_KEY_ID='SOMEKEYAAAAASADSAH*#' AWS_SECRET_ACCESS_KEY='SOMEsEcReTsd292v' ${PAGESERVER_BIN} -c "remote_storage={bucket_name='some-sample-bucket',bucket_region='eu-north-1', prefix_in_bucket='/test_prefix/'}"` For Amazon AWS S3, a key id and secret access key could be located in `~/.aws/credentials` if awscli was ever configured to work with the desired bucket, on the AWS Settings page for a certain user. Also note, that the bucket names does not contain any protocols when used on AWS. -For local S3 installations, refer to the their documentation for name format and credentials. +For local S3 installations, refer to their documentation for name format and credentials. Similar to other pageserver settings, toml config file can be used to configure either of the storages as backup targets. Required sections are: diff --git a/docs/rfcs/035-safekeeper-dynamic-membership-change.md b/docs/rfcs/035-safekeeper-dynamic-membership-change.md index 9b320c7285..8619f83ff5 100644 --- a/docs/rfcs/035-safekeeper-dynamic-membership-change.md +++ b/docs/rfcs/035-safekeeper-dynamic-membership-change.md @@ -20,7 +20,7 @@ In our case consensus leader is compute (walproposer), and we don't want to wake up all computes for the change. Neither we want to fully reimplement the leader logic second time outside compute. Because of that the proposed algorithm relies for issuing configurations on the external fault tolerant (distributed) strongly -consisent storage with simple API: CAS (compare-and-swap) on the single key. +consistent storage with simple API: CAS (compare-and-swap) on the single key. Properly configured postgres suits this. In the system consensus is implemented at the timeline level, so algorithm below @@ -34,7 +34,7 @@ A configuration is ``` struct Configuration { - generation: Generation, // a number uniquely identifying configuration + generation: SafekeeperGeneration, // a number uniquely identifying configuration sk_set: Vec, // current safekeeper set new_sk_set: Optional>, } @@ -81,11 +81,11 @@ configuration generation in them is less than its current one. Namely, it refuses to vote, to truncate WAL in `handle_elected` and to accept WAL. In response it sends its current configuration generation to let walproposer know. -Safekeeper gets `PUT /v1/tenants/{tenant_id}/timelines/{timeline_id}/configuration` -accepting `Configuration`. Safekeeper switches to the given conf it is higher than its +Safekeeper gets `PUT /v1/tenants/{tenant_id}/timelines/{timeline_id}/membership` +accepting `Configuration`. Safekeeper switches to the given conf if it is higher than its current one and ignores it otherwise. In any case it replies with ``` -struct ConfigurationSwitchResponse { +struct TimelineMembershipSwitchResponse { conf: Configuration, term: Term, last_log_term: Term, @@ -108,7 +108,7 @@ establishes this configuration as its own and moves to voting. It should stop talking to safekeepers not listed in the configuration at this point, though it is not unsafe to continue doing so. -To be elected it must receive votes from both majorites if `new_sk_set` is present. +To be elected it must receive votes from both majorities if `new_sk_set` is present. Similarly, to commit WAL it must receive flush acknowledge from both majorities. If walproposer hears from safekeeper configuration higher than his own (i.e. @@ -130,7 +130,7 @@ storage are reachable. 1) Fetch current timeline configuration from the configuration storage. 2) If it is already joint one and `new_set` is different from `desired_set` refuse to change. However, assign join conf to (in memory) var - `join_conf` and proceed to step 4 to finish the ongoing change. + `joint_conf` and proceed to step 4 to finish the ongoing change. 3) Else, create joint `joint_conf: Configuration`: increment current conf number `n` and put `desired_set` to `new_sk_set`. Persist it in the configuration storage by doing CAS on the current generation: change happens only if @@ -161,11 +161,11 @@ storage are reachable. because `pull_timeline` already includes it and plus additionally would be broadcast by compute. More importantly, we may proceed to the next step only when `` on the majority of the new set reached - `sync_position`. Similarly, on the happy path no waiting is not needed because + `sync_position`. Similarly, on the happy path no waiting is needed because `pull_timeline` already includes it. However, we should double check to be safe. For example, timeline could have been created earlier e.g. manually or after try-to-migrate, abort, try-to-migrate-again sequence. -7) Create `new_conf: Configuration` incrementing `join_conf` generation and having new +7) Create `new_conf: Configuration` incrementing `joint_conf` generation and having new safekeeper set as `sk_set` and None `new_sk_set`. Write it to configuration storage under one more CAS. 8) Call `PUT` `configuration` on safekeepers from the new set, @@ -178,12 +178,12 @@ spec of it. Description above focuses on safety. To make the flow practical and live, here a few more considerations. -1) It makes sense to ping new set to ensure it we are migrating to live node(s) before +1) It makes sense to ping new set to ensure we are migrating to live node(s) before step 3. 2) If e.g. accidentally wrong new sk set has been specified, before CAS in step `6` is completed it is safe to rollback to the old conf with one more CAS. 3) On step 4 timeline might be already created on members of the new set for various reasons; - the simplest is the procedure restart. There are more complicated scenarious like mentioned + the simplest is the procedure restart. There are more complicated scenarios like mentioned in step 5. Deleting and re-doing `pull_timeline` is generally unsafe without involving generations, so seems simpler to treat existing timeline as success. However, this also has a disadvantage: you might imagine an surpassingly unlikely schedule where condition in @@ -192,7 +192,7 @@ considerations. 4) In the end timeline should be locally deleted on the safekeeper(s) which are in the old set but not in the new one, unless they are unreachable. To be safe this also should be done under generation number (deletion proceeds only if - current configuration is <= than one in request and safekeeper is not memeber of it). + current configuration is <= than one in request and safekeeper is not member of it). 5) If current conf fetched on step 1 is already not joint and members equal to `desired_set`, jump to step 7, using it as `new_conf`. @@ -261,14 +261,14 @@ Timeline (branch) creation in cplane should call storage_controller POST Response should be augmented with `safekeepers_generation` and `safekeepers` fields like described in `/notify-safekeepers` above. Initially (currently) these fields may be absent; in this case cplane chooses safekeepers on its own -like it currently does. The call should be retried until succeeds. +like it currently does. The call should be retried until it succeeds. Timeline deletion and tenant deletion in cplane should call appropriate storage_controller endpoints like it currently does for sharded tenants. The calls should be retried until they succeed. -When compute receives safekeepers list from control plane it needs to know the -generation to checked whether it should be updated (note that compute may get +When compute receives safekeeper list from control plane it needs to know the +generation to check whether it should be updated (note that compute may get safekeeper list from either cplane or safekeepers). Currently `neon.safekeepers` GUC is just a comma separates list of `host:port`. Let's prefix it with `g#:` to this end, so it will look like @@ -305,8 +305,8 @@ enum MigrationRequest { ``` `FinishPending` requests to run the procedure to ensure state is clean: current -configuration is not joint and majority of safekeepers are aware of it, but do -not attempt to migrate anywhere. If current configuration fetched on step 1 is +configuration is not joint and the majority of safekeepers are aware of it, but do +not attempt to migrate anywhere. If the current configuration fetched on step 1 is not joint it jumps to step 7. It should be run at startup for all timelines (but similarly, in the first version it is ok to trigger it manually). @@ -315,7 +315,7 @@ similarly, in the first version it is ok to trigger it manually). `safekeepers` table mirroring current `nodes` should be added, except that for `scheduling_policy`: it is enough to have at least in the beginning only 3 fields: 1) `active` 2) `paused` (initially means only not assign new tlis there -3) `decomissioned` (node is removed). +3) `decommissioned` (node is removed). `timelines` table: ``` @@ -326,9 +326,10 @@ table! { tenant_id -> Varchar, start_lsn -> pg_lsn, generation -> Int4, - sk_set -> Array, // list of safekeeper ids + sk_set -> Array, // list of safekeeper ids new_sk_set -> Nullable>, // list of safekeeper ids, null if not joint conf cplane_notified_generation -> Int4, + sk_set_notified_generation -> Int4, // the generation a quorum of sk_set knows about deleted_at -> Nullable, } } @@ -338,13 +339,23 @@ table! { might also want to add ancestor_timeline_id to preserve the hierarchy, but for this RFC it is not needed. +`cplane_notified_generation` and `sk_set_notified_generation` fields are used to +track the last stage of the algorithm, when we need to notify safekeeper set and cplane +with the final configuration after it's already committed to DB. + +The timeline is up-to-date (no migration in progress) if `new_sk_set` is null and +`*_notified_generation` fields are up to date with `generation`. + +It's possible to replace `*_notified_generation` with one boolean field `migration_completed`, +but for better observability it's nice to have them separately. + #### API Node management is similar to pageserver: -1) POST `/control/v1/safekeepers` inserts safekeeper. -2) GET `/control/v1/safekeepers` lists safekeepers. -3) GET `/control/v1/safekeepers/:node_id` gets safekeeper. -4) PUT `/control/v1/safekepers/:node_id/status` changes status to e.g. +1) POST `/control/v1/safekeeper` inserts safekeeper. +2) GET `/control/v1/safekeeper` lists safekeepers. +3) GET `/control/v1/safekeeper/:node_id` gets safekeeper. +4) PUT `/control/v1/safekeper/:node_id/scheduling_policy` changes status to e.g. `offline` or `decomissioned`. Initially it is simpler not to schedule any migrations here. @@ -368,8 +379,8 @@ Migration API: the first version is the simplest and the most imperative: all timelines from one safekeeper to another. It accepts json ``` { - "src_sk": u32, - "dst_sk": u32, + "src_sk": NodeId, + "dst_sk": NodeId, "limit": Optional, } ``` @@ -379,12 +390,15 @@ Returns list of scheduled requests. 2) PUT `/control/v1/tenant/:tenant_id/timeline/:timeline_id/safekeeper_migrate` schedules `MigrationRequest` to move single timeline to given set of safekeepers: ``` -{ - "desired_set": Vec, +struct TimelineSafekeeperMigrateRequest { + "new_sk_set": Vec, } ``` -Returns scheduled request. +In the first version the handler migrates the timeline to `new_sk_set` synchronously. +Should be retried until success. + +In the future we might change it to asynchronous API and return scheduled request. Similar call should be added for the tenant. @@ -434,6 +448,9 @@ table! { } ``` +We load all pending ops from the table on startup into the memory. +The table is needed only to preserve the state between restarts. + `op_type` can be `include` (seed from peers and ensure generation is up to date), `exclude` (remove locally) and `delete`. Field is actually not strictly needed as it can be computed from current configuration, but gives more explicit @@ -474,7 +491,7 @@ actions must be idempotent. Now, a tricky point here is timeline start LSN. For the initial (tenant creation) call cplane doesn't know it. However, setting start_lsn on safekeepers during creation is a good thing -- it provides a guarantee that walproposer can always find a common point in WAL histories of -safekeeper and its own, and so absense of it would be a clear sign of +safekeeper and its own, and so absence of it would be a clear sign of corruption. The following sequence works: 1) Create timeline (or observe that it exists) on pageserver, figuring out last_record_lsn in response. @@ -497,11 +514,9 @@ corruption. The following sequence works: retries the call until 200 response. There is a small question how request handler (timeline creation in this - case) would interact with per sk reconciler. As always I prefer to do the - simplest possible thing and here it seems to be just waking it up so it - re-reads the db for work to do. Passing work in memory is faster, but - that shouldn't matter, and path to scan db for work will exist anyway, - simpler to reuse it. + case) would interact with per sk reconciler. In the current implementation + we first persist the request in the DB, and then send an in-memory request + to each safekeeper reconciler to process it. For pg version / wal segment size: while we may persist them in `timelines` table, it is not necessary as initial creation at step 3 can take them from @@ -509,30 +524,40 @@ pageserver or cplane creation call and later pull_timeline will carry them around. Timeline migration. -1) CAS to the db to create joint conf, and in the same transaction create - `safekeeper_timeline_pending_ops` `include` entries to initialize new members - as well as deliver this conf to current ones; poke per sk reconcilers to work - on it. Also any conf change should also poke cplane notifier task(s). -2) Once it becomes possible per alg description above, get out of joint conf - with another CAS. Task should get wakeups from per sk reconcilers because - conf switch is required for advancement; however retries should be sleep - based as well as LSN advancement might be needed, though in happy path - it isn't. To see whether further transition is possible on wakup migration - executor polls safekeepers per the algorithm. CAS creating new conf with only - new members should again insert entries to `safekeeper_timeline_pending_ops` - to switch them there, as well as `exclude` rows to remove timeline from - old members. +1) CAS to the db to create joint conf. Since this moment the migration is considered to be + "in progress". We can detect all "in-progress" migrations looking into the database. +2) Do steps 4-6 from the algorithm, including `pull_timeline` onto `new_sk_set`, update membership + configuration on all safekeepers, notify cplane, etc. All operations are idempotent, + so we don't need to persist anything in the database at this stage. If any errors occur, + it's safe to retry or abort the migration. +3) Once it becomes possible per alg description above, get out of joint conf + with another CAS. Also should insert `exclude` entries into `safekeeper_timeline_pending_ops` + in the same DB transaction. Adding `exclude` entries atomically is nesessary because after + CAS we don't have the list of excluded safekeepers in the `timelines` table anymore, but we + need to have them persisted somewhere in case the migration is interrupted right after the CAS. +4) Finish the migration. The final membership configuration is committed to the DB at this stage. + So, the migration can not be aborted anymore. But it can still be retried if the migration fails + past stage 3. To finish the migration we need to send the new membership configuration to + a new quorum of safekeepers, notify cplane with the new safekeeper list and schedule the `exclude` + requests to in-memory queue for safekeeper reconciler. If the algrorithm is retried, it's + possible that we have already committed `exclude` requests to DB, but didn't send them to + the in-memory queue. In this case we need to read them from `safekeeper_timeline_pending_ops` + because it's the only place where they are persistent. The fields `sk_set_notified_generation` + and `cplane_notified_generation` are updated after each step. The migration is considered + fully completed when they match the `generation` field. + +In practice, we can report "success" after stage 3 and do the "finish" step in per-timeline +reconciler (if we implement it). But it's wise to at least try to finish them synchronously, +so the timeline is always in a "good state" and doesn't require an old quorum to commit +WAL after the migration reported "success". Timeline deletion: just set `deleted_at` on the timeline row and insert `safekeeper_timeline_pending_ops` entries in the same xact, the rest is done by per sk reconcilers. -When node is removed (set to `decomissioned`), `safekeeper_timeline_pending_ops` +When node is removed (set to `decommissioned`), `safekeeper_timeline_pending_ops` for it must be cleared in the same transaction. -One more task pool should infinitely retry notifying control plane about changed -safekeeper sets (trying making `cplane_notified_generation` equal `generation`). - #### Dealing with multiple instances of storage_controller Operations described above executed concurrently might create some errors but do @@ -541,7 +566,7 @@ of storage_controller it is fine to have it temporarily, e.g. during redeploy. To harden against some controller instance creating some work in `safekeeper_timeline_pending_ops` and then disappearing without anyone pickup up -the job per sk reconcilers apart from explicit wakups should scan for work +the job per sk reconcilers apart from explicit wakeups should scan for work periodically. It is possible to remove that though if all db updates are protected with leadership token/term -- then such scans are needed only after leadership is acquired. @@ -563,7 +588,7 @@ There should be following layers of tests: safekeeper communication and pull_timeline need to be mocked and main switch procedure wrapped to as a node (thread) in simulation tests, using these mocks. Test would inject migrations like it currently injects - safekeeper/walproposer restars. Main assert is the same -- committed WAL must + safekeeper/walproposer restarts. Main assert is the same -- committed WAL must not be lost. 3) Since simulation testing injects at relatively high level points (not @@ -613,7 +638,7 @@ Let's have the following implementation bits for gradual rollout: `notify-safekeepers`. Then the rollout for a region would be: -- Current situation: safekeepers are choosen by control_plane. +- Current situation: safekeepers are chosen by control_plane. - We manually migrate some timelines, test moving them around. - Then we enable `--set-safekeepers` so that all new timelines are on storage controller. diff --git a/endpoint_storage/src/app.rs b/endpoint_storage/src/app.rs index 42431c0066..a7a18743ef 100644 --- a/endpoint_storage/src/app.rs +++ b/endpoint_storage/src/app.rs @@ -13,6 +13,8 @@ use utils::backoff::retry; pub fn app(state: Arc) -> Router<()> { use axum::routing::{delete as _delete, get as _get}; let delete_prefix = _delete(delete_prefix); + // NB: On any changes do not forget to update the OpenAPI spec + // in /endpoint_storage/src/openapi_spec.yml. Router::new() .route( "/{tenant_id}/{timeline_id}/{endpoint_id}/{*path}", diff --git a/endpoint_storage/src/openapi_spec.yml b/endpoint_storage/src/openapi_spec.yml new file mode 100644 index 0000000000..8d9abf902c --- /dev/null +++ b/endpoint_storage/src/openapi_spec.yml @@ -0,0 +1,146 @@ +openapi: "3.0.2" +info: + title: Endpoint Storage API + description: Endpoint Storage API + version: "1.0" + license: + name: "Apache" + url: https://github.com/neondatabase/neon/blob/main/LICENSE +servers: + - url: "" +paths: + /status: + description: Healthcheck endpoint + get: + description: Healthcheck + security: [] + responses: + "200": + description: OK + + /{tenant_id}/{timeline_id}/{endpoint_id}/{key}: + parameters: + - name: tenant_id + in: path + required: true + schema: + type: string + - name: timeline_id + in: path + required: true + schema: + type: string + - name: endpoint_id + in: path + required: true + schema: + type: string + - name: key + in: path + required: true + schema: + type: string + get: + description: Get file from blob storage + responses: + "200": + description: "File stream from blob storage" + content: + application/octet-stream: + schema: + type: string + format: binary + "400": + description: File was not found + "403": + description: JWT does not authorize request to this route + put: + description: Insert file into blob storage. If file exists, override it + requestBody: + content: + application/octet-stream: + schema: + type: string + format: binary + responses: + "200": + description: File was inserted successfully + "403": + description: JWT does not authorize request to this route + delete: + description: Delete file from blob storage + responses: + "200": + description: File was successfully deleted or not found + "403": + description: JWT does not authorize request to this route + + /{tenant_id}/{timeline_id}/{endpoint_id}: + parameters: + - name: tenant_id + in: path + required: true + schema: + type: string + - name: timeline_id + in: path + required: true + schema: + type: string + - name: endpoint_id + in: path + required: true + schema: + type: string + delete: + description: Delete endpoint data from blob storage + responses: + "200": + description: Endpoint data was deleted + "403": + description: JWT does not authorize request to this route + + /{tenant_id}/{timeline_id}: + parameters: + - name: tenant_id + in: path + required: true + schema: + type: string + - name: timeline_id + in: path + required: true + schema: + type: string + delete: + description: Delete timeline data from blob storage + responses: + "200": + description: Timeline data was deleted + "403": + description: JWT does not authorize request to this route + + /{tenant_id}: + parameters: + - name: tenant_id + in: path + required: true + schema: + type: string + delete: + description: Delete tenant data from blob storage + responses: + "200": + description: Tenant data was deleted + "403": + description: JWT does not authorize request to this route + +components: + securitySchemes: + JWT: + type: http + scheme: bearer + bearerFormat: JWT + +security: + - JWT: [] diff --git a/libs/compute_api/src/responses.rs b/libs/compute_api/src/responses.rs index a54411b06a..5b8fc49750 100644 --- a/libs/compute_api/src/responses.rs +++ b/libs/compute_api/src/responses.rs @@ -46,16 +46,45 @@ pub struct ExtensionInstallResponse { pub version: ExtVersion, } -#[derive(Serialize, Default, Debug, Clone)] +/// Status of the LFC prewarm process. The same state machine is reused for +/// both autoprewarm (prewarm after compute/Postgres start using the previously +/// stored LFC state) and explicit prewarming via API. +#[derive(Serialize, Default, Debug, Clone, PartialEq)] #[serde(tag = "status", rename_all = "snake_case")] pub enum LfcPrewarmState { + /// Default value when compute boots up. #[default] NotPrewarmed, + /// Prewarming thread is active and loading pages into LFC. Prewarming, + /// We found requested LFC state in the endpoint storage and + /// completed prewarming successfully. Completed, - Failed { - error: String, - }, + /// Unexpected error happened during prewarming. Note, `Not Found 404` + /// response from the endpoint storage is explicitly excluded here + /// because it can normally happen on the first compute start, + /// since LFC state is not available yet. + Failed { error: String }, + /// We tried to fetch the corresponding LFC state from the endpoint storage, + /// but received `Not Found 404`. This should normally happen only during the + /// first endpoint start after creation with `autoprewarm: true`. + /// + /// During the orchestrated prewarm via API, when a caller explicitly + /// provides the LFC state key to prewarm from, it's the caller responsibility + /// to handle this status as an error state in this case. + Skipped, +} + +impl Display for LfcPrewarmState { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + LfcPrewarmState::NotPrewarmed => f.write_str("NotPrewarmed"), + LfcPrewarmState::Prewarming => f.write_str("Prewarming"), + LfcPrewarmState::Completed => f.write_str("Completed"), + LfcPrewarmState::Skipped => f.write_str("Skipped"), + LfcPrewarmState::Failed { error } => write!(f, "Error({error})"), + } + } } #[derive(Serialize, Default, Debug, Clone, PartialEq)] @@ -70,6 +99,23 @@ pub enum LfcOffloadState { }, } +#[derive(Serialize, Debug, Clone, PartialEq)] +#[serde(tag = "status", rename_all = "snake_case")] +/// Response of /promote +pub enum PromoteState { + NotPromoted, + Completed, + Failed { error: String }, +} + +#[derive(Deserialize, Serialize, Default, Debug, Clone)] +#[serde(rename_all = "snake_case")] +/// Result of /safekeepers_lsn +pub struct SafekeepersLsn { + pub safekeepers: String, + pub wal_flush_lsn: utils::lsn::Lsn, +} + /// Response of the /status API #[derive(Serialize, Debug, Deserialize)] #[serde(rename_all = "snake_case")] @@ -93,6 +139,15 @@ pub enum TerminateMode { Immediate, } +impl From for ComputeStatus { + fn from(mode: TerminateMode) -> Self { + match mode { + TerminateMode::Fast => ComputeStatus::TerminationPendingFast, + TerminateMode::Immediate => ComputeStatus::TerminationPendingImmediate, + } + } +} + #[derive(Serialize, Clone, Copy, Debug, Deserialize, PartialEq, Eq)] #[serde(rename_all = "snake_case")] pub enum ComputeStatus { @@ -113,7 +168,9 @@ pub enum ComputeStatus { // control-plane to terminate it. Failed, // Termination requested - TerminationPending { mode: TerminateMode }, + TerminationPendingFast, + // Termination requested, without waiting 30s before returning from /terminate + TerminationPendingImmediate, // Terminated Postgres Terminated, } @@ -132,7 +189,10 @@ impl Display for ComputeStatus { ComputeStatus::Running => f.write_str("running"), ComputeStatus::Configuration => f.write_str("configuration"), ComputeStatus::Failed => f.write_str("failed"), - ComputeStatus::TerminationPending { .. } => f.write_str("termination-pending"), + ComputeStatus::TerminationPendingFast => f.write_str("termination-pending-fast"), + ComputeStatus::TerminationPendingImmediate => { + f.write_str("termination-pending-immediate") + } ComputeStatus::Terminated => f.write_str("terminated"), } } diff --git a/libs/compute_api/src/spec.rs b/libs/compute_api/src/spec.rs index d011f53ee9..f7ffcd6444 100644 --- a/libs/compute_api/src/spec.rs +++ b/libs/compute_api/src/spec.rs @@ -14,6 +14,7 @@ use serde::{Deserialize, Serialize}; use url::Url; use utils::id::{TenantId, TimelineId}; use utils::lsn::Lsn; +use utils::shard::{ShardCount, ShardIndex}; use crate::responses::TlsConfig; @@ -106,11 +107,18 @@ pub struct ComputeSpec { pub tenant_id: Option, pub timeline_id: Option, - // Pageserver information can be passed in two different ways: - // 1. Here - // 2. in cluster.settings. This is legacy, we are switching to method 1. + /// Pageserver information can be passed in three different ways: + /// 1. Here in `pageserver_connection_info` + /// 2. In the `pageserver_connstring` field. + /// 3. in `cluster.settings`. + /// + /// The goal is to use method 1. everywhere. But for backwards-compatibility with old + /// versions of the control plane, `compute_ctl` will check 2. and 3. if the + /// `pageserver_connection_info` field is missing. pub pageserver_connection_info: Option, + pub pageserver_connstring: Option, + // More neon ids that we expose to the compute_ctl // and to postgres as neon extension GUCs. pub project_id: Option, @@ -145,7 +153,7 @@ pub struct ComputeSpec { // Stripe size for pageserver sharding, in pages #[serde(default)] - pub shard_stripe_size: Option, + pub shard_stripe_size: Option, /// Local Proxy configuration used for JWT authentication #[serde(default)] @@ -218,16 +226,28 @@ pub enum ComputeFeature { UnknownFeature, } -/// Feature flag to signal `compute_ctl` to enable certain experimental functionality. -#[derive(Clone, Debug, Default, Deserialize, Serialize, Eq, PartialEq)] +#[derive(Clone, Debug, Deserialize, Serialize, Eq, PartialEq)] pub struct PageserverConnectionInfo { - pub shards: HashMap, + /// NB: 0 for unsharded tenants, 1 for sharded tenants with 1 shard, following storage + pub shard_count: ShardCount, - pub prefer_grpc: bool, + /// INVARIANT: null if shard_count is 0, otherwise non-null and immutable + pub stripe_size: Option, + + pub shards: HashMap, + + #[serde(default)] + pub prefer_protocol: PageserverProtocol, } -#[derive(Clone, Debug, Default, Deserialize, Serialize, Eq, PartialEq)] +#[derive(Clone, Debug, Deserialize, Serialize, Eq, PartialEq)] +pub struct PageserverShardInfo { + pub pageservers: Vec, +} + +#[derive(Clone, Debug, Deserialize, Serialize, Eq, PartialEq)] pub struct PageserverShardConnectionInfo { + pub id: Option, pub libpq_url: Option, pub grpc_url: Option, } @@ -465,13 +485,15 @@ pub struct JwksSettings { pub jwt_audience: Option, } -/// Protocol used to connect to a Pageserver. Parsed from the connstring scheme. -#[derive(Clone, Copy, Debug, Default, PartialEq, Eq)] +/// Protocol used to connect to a Pageserver. +#[derive(Clone, Copy, Debug, Default, Deserialize, Serialize, PartialEq, Eq)] pub enum PageserverProtocol { /// The original protocol based on libpq and COPY. Uses postgresql:// or postgres:// scheme. #[default] + #[serde(rename = "libpq")] Libpq, /// A newer, gRPC-based protocol. Uses grpc:// scheme. + #[serde(rename = "grpc")] Grpc, } diff --git a/libs/http-utils/src/endpoint.rs b/libs/http-utils/src/endpoint.rs index f32ced1180..a61bf8e08a 100644 --- a/libs/http-utils/src/endpoint.rs +++ b/libs/http-utils/src/endpoint.rs @@ -20,6 +20,7 @@ use tokio_stream::wrappers::ReceiverStream; use tokio_util::io::ReaderStream; use tracing::{Instrument, debug, info, info_span, warn}; use utils::auth::{AuthError, Claims, SwappableJwtAuth}; +use utils::metrics_collector::{METRICS_COLLECTOR, METRICS_STALE_MILLIS}; use crate::error::{ApiError, api_error_handler, route_error_handler}; use crate::request::{get_query_param, parse_query_param}; @@ -250,9 +251,28 @@ impl std::io::Write for ChannelWriter { } } -pub async fn prometheus_metrics_handler(_req: Request) -> Result, ApiError> { +pub async fn prometheus_metrics_handler( + req: Request, + force_metric_collection_on_scrape: bool, +) -> Result, ApiError> { SERVE_METRICS_COUNT.inc(); + // HADRON + let requested_use_latest = parse_query_param(&req, "use_latest")?; + + let use_latest = match requested_use_latest { + None => force_metric_collection_on_scrape, + Some(true) => true, + Some(false) => { + if force_metric_collection_on_scrape { + // We don't cache in this case + true + } else { + false + } + } + }; + let started_at = std::time::Instant::now(); let (tx, rx) = mpsc::channel(1); @@ -277,12 +297,18 @@ pub async fn prometheus_metrics_handler(_req: Request) -> Result) -> Result { tracing::info!( @@ -303,6 +333,7 @@ pub async fn prometheus_metrics_handler(_req: Request) -> Result( Some(q) => q, None => return Ok(None), }; - let mut values = url::form_urlencoded::parse(query.as_bytes()) + let values = url::form_urlencoded::parse(query.as_bytes()) .filter_map(|(k, v)| if k == param_name { Some(v) } else { None }) // we call .next() twice below. If it's None the first time, .fuse() ensures it's None afterwards .fuse(); - let value1 = values.next(); - if values.next().is_some() { - return Err(ApiError::BadRequest(anyhow!( - "param {param_name} specified more than once" - ))); - } + // Work around an issue with Alloy's pyroscope scrape where the "seconds" + // parameter is added several times. https://github.com/grafana/alloy/issues/3026 + // TODO: revert after Alloy is fixed. + let value1 = values + .map(Ok) + .reduce(|acc, i| { + match acc { + Err(_) => acc, + + // It's okay to have duplicates as along as they have the same value. + Ok(ref a) if a == &i.unwrap() => acc, + + _ => Err(ApiError::BadRequest(anyhow!( + "param {param_name} specified more than once" + ))), + } + }) + .transpose()?; + // if values.next().is_some() { + // return Err(ApiError::BadRequest(anyhow!( + // "param {param_name} specified more than once" + // ))); + // } + Ok(value1) } @@ -92,3 +110,39 @@ pub async fn ensure_no_body(request: &mut Request) -> Result<(), ApiError> None => Ok(()), } } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_get_query_param_duplicate() { + let req = Request::builder() + .uri("http://localhost:12345/testuri?testparam=1") + .body(hyper::Body::empty()) + .unwrap(); + let value = get_query_param(&req, "testparam").unwrap(); + assert_eq!(value.unwrap(), "1"); + + let req = Request::builder() + .uri("http://localhost:12345/testuri?testparam=1&testparam=1") + .body(hyper::Body::empty()) + .unwrap(); + let value = get_query_param(&req, "testparam").unwrap(); + assert_eq!(value.unwrap(), "1"); + + let req = Request::builder() + .uri("http://localhost:12345/testuri") + .body(hyper::Body::empty()) + .unwrap(); + let value = get_query_param(&req, "testparam").unwrap(); + assert!(value.is_none()); + + let req = Request::builder() + .uri("http://localhost:12345/testuri?testparam=1&testparam=2&testparam=3") + .body(hyper::Body::empty()) + .unwrap(); + let value = get_query_param(&req, "testparam"); + assert!(value.is_err()); + } +} diff --git a/libs/metrics/src/lib.rs b/libs/metrics/src/lib.rs index 5d028ee041..41873cdcd6 100644 --- a/libs/metrics/src/lib.rs +++ b/libs/metrics/src/lib.rs @@ -4,12 +4,14 @@ //! a default registry. #![deny(clippy::undocumented_unsafe_blocks)] +use std::sync::RwLock; + use measured::label::{LabelGroupSet, LabelGroupVisitor, LabelName, NoLabels}; use measured::metric::counter::CounterState; use measured::metric::gauge::GaugeState; use measured::metric::group::Encoding; use measured::metric::name::{MetricName, MetricNameEncoder}; -use measured::metric::{MetricEncoding, MetricFamilyEncoding}; +use measured::metric::{MetricEncoding, MetricFamilyEncoding, MetricType}; use measured::{FixedCardinalityLabel, LabelGroup, MetricGroup}; use once_cell::sync::Lazy; use prometheus::Registry; @@ -116,12 +118,52 @@ pub fn pow2_buckets(start: usize, end: usize) -> Vec { .collect() } +pub struct InfoMetric { + label: RwLock, + metric: M, +} + +impl InfoMetric { + pub fn new(label: L) -> Self { + Self::with_metric(label, GaugeState::new(1)) + } +} + +impl> InfoMetric { + pub fn with_metric(label: L, metric: M) -> Self { + Self { + label: RwLock::new(label), + metric, + } + } + + pub fn set_label(&self, label: L) { + *self.label.write().unwrap() = label; + } +} + +impl MetricFamilyEncoding for InfoMetric +where + L: LabelGroup, + M: MetricEncoding, + E: Encoding, +{ + fn collect_family_into( + &self, + name: impl measured::metric::name::MetricNameEncoder, + enc: &mut E, + ) -> Result<(), E::Err> { + M::write_type(&name, enc)?; + self.metric + .collect_into(&(), &*self.label.read().unwrap(), name, enc) + } +} + pub struct BuildInfo { pub revision: &'static str, pub build_tag: &'static str, } -// todo: allow label group without the set impl LabelGroup for BuildInfo { fn visit_values(&self, v: &mut impl LabelGroupVisitor) { const REVISION: &LabelName = LabelName::from_str("revision"); @@ -131,24 +173,6 @@ impl LabelGroup for BuildInfo { } } -impl MetricFamilyEncoding for BuildInfo -where - GaugeState: MetricEncoding, -{ - fn collect_family_into( - &self, - name: impl measured::metric::name::MetricNameEncoder, - enc: &mut T, - ) -> Result<(), T::Err> { - enc.write_help(&name, "Build/version information")?; - GaugeState::write_type(&name, enc)?; - GaugeState { - count: std::sync::atomic::AtomicI64::new(1), - } - .collect_into(&(), self, name, enc) - } -} - #[derive(MetricGroup)] #[metric(new(build_info: BuildInfo))] pub struct NeonMetrics { @@ -165,8 +189,8 @@ pub struct NeonMetrics { #[derive(MetricGroup)] #[metric(new(build_info: BuildInfo))] pub struct LibMetrics { - #[metric(init = build_info)] - build_info: BuildInfo, + #[metric(init = InfoMetric::new(build_info))] + build_info: InfoMetric, #[metric(flatten)] rusage: Rusage, diff --git a/libs/neon-shmem/src/hash.rs b/libs/neon-shmem/src/hash.rs index a8b60ba64c..3353e39a58 100644 --- a/libs/neon-shmem/src/hash.rs +++ b/libs/neon-shmem/src/hash.rs @@ -16,6 +16,7 @@ //! //! Concurrency is managed very simply: the entire map is guarded by one shared-memory RwLock. +use std::fmt::Debug; use std::hash::{BuildHasher, Hash}; use std::mem::MaybeUninit; @@ -56,6 +57,22 @@ pub struct HashMapInit<'a, K, V, S = rustc_hash::FxBuildHasher> { num_buckets: u32, } +impl<'a, K, V, S> Debug for HashMapInit<'a, K, V, S> +where + K: Debug, + V: Debug, +{ + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("HashMapInit") + .field("shmem_handle", &self.shmem_handle) + .field("shared_ptr", &self.shared_ptr) + .field("shared_size", &self.shared_size) + // .field("hasher", &self.hasher) + .field("num_buckets", &self.num_buckets) + .finish() + } +} + /// This is a per-process handle to a hash table that (possibly) lives in shared memory. /// If a child process is launched with fork(), the child process should /// get its own HashMapAccess by calling HashMapInit::attach_writer/reader(). @@ -71,6 +88,20 @@ pub struct HashMapAccess<'a, K, V, S = rustc_hash::FxBuildHasher> { unsafe impl Sync for HashMapAccess<'_, K, V, S> {} unsafe impl Send for HashMapAccess<'_, K, V, S> {} +impl<'a, K, V, S> Debug for HashMapAccess<'a, K, V, S> +where + K: Debug, + V: Debug, +{ + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("HashMapAccess") + .field("shmem_handle", &self.shmem_handle) + .field("shared_ptr", &self.shared_ptr) + // .field("hasher", &self.hasher) + .finish() + } +} + impl<'a, K: Clone + Hash + Eq, V, S> HashMapInit<'a, K, V, S> { /// Change the 'hasher' used by the hash table. /// @@ -298,7 +329,7 @@ where /// Get a reference to the entry containing a key. /// - /// NB: THis takes a write lock as there's no way to distinguish whether the intention + /// NB: This takes a write lock as there's no way to distinguish whether the intention /// is to use the entry for reading or for writing in advance. pub fn entry(&self, key: K) -> Entry<'a, '_, K, V> { let hash = self.get_hash_value(&key); diff --git a/libs/neon-shmem/src/hash/core.rs b/libs/neon-shmem/src/hash/core.rs index 4665c36adb..0d40a1877b 100644 --- a/libs/neon-shmem/src/hash/core.rs +++ b/libs/neon-shmem/src/hash/core.rs @@ -1,5 +1,6 @@ //! Simple hash table with chaining. +use std::fmt::Debug; use std::hash::Hash; use std::mem::MaybeUninit; @@ -17,6 +18,19 @@ pub(crate) struct Bucket { pub(crate) inner: Option<(K, V)>, } +impl Debug for Bucket +where + K: Debug, + V: Debug, +{ + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("Bucket") + .field("next", &self.next) + .field("inner", &self.inner) + .finish() + } +} + /// Core hash table implementation. pub(crate) struct CoreHashMap<'a, K, V> { /// Dictionary used to map hashes to bucket indices. @@ -31,6 +45,22 @@ pub(crate) struct CoreHashMap<'a, K, V> { pub(crate) buckets_in_use: u32, } +impl<'a, K, V> Debug for CoreHashMap<'a, K, V> +where + K: Debug, + V: Debug, +{ + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("CoreHashMap") + .field("dictionary", &self.dictionary) + .field("buckets", &self.buckets) + .field("free_head", &self.free_head) + .field("alloc_limit", &self.alloc_limit) + .field("buckets_in_use", &self.buckets_in_use) + .finish() + } +} + /// Error for when there are no empty buckets left but one is needed. #[derive(Debug, PartialEq)] pub struct FullError; diff --git a/libs/neon-shmem/src/lib.rs b/libs/neon-shmem/src/lib.rs index 61ca168073..226cc0c22d 100644 --- a/libs/neon-shmem/src/lib.rs +++ b/libs/neon-shmem/src/lib.rs @@ -1,5 +1,3 @@ -//! Shared memory utilities for neon communicator - pub mod hash; pub mod shmem; pub mod sync; diff --git a/libs/neon-shmem/src/shmem.rs b/libs/neon-shmem/src/shmem.rs index f19f402859..9c304d6540 100644 --- a/libs/neon-shmem/src/shmem.rs +++ b/libs/neon-shmem/src/shmem.rs @@ -21,6 +21,7 @@ use nix::unistd::ftruncate as nix_ftruncate; /// the underlying file is resized. Do not access the area beyond the current size. Currently, that /// will cause the file to be expanded, but we might use `mprotect()` etc. to enforce that in the /// future. +#[derive(Debug)] pub struct ShmemHandle { /// memfd file descriptor fd: OwnedFd, @@ -35,6 +36,7 @@ pub struct ShmemHandle { } /// This is stored at the beginning in the shared memory area. +#[derive(Debug)] struct SharedStruct { max_size: usize, diff --git a/libs/pageserver_api/src/config.rs b/libs/pageserver_api/src/config.rs index 00d6b61399..f01c65d1bd 100644 --- a/libs/pageserver_api/src/config.rs +++ b/libs/pageserver_api/src/config.rs @@ -5,6 +5,7 @@ mod tests; use const_format::formatcp; use posthog_client_lite::PostHogClientConfig; +use utils::serde_percent::Percent; pub const DEFAULT_PG_LISTEN_PORT: u16 = 64000; pub const DEFAULT_PG_LISTEN_ADDR: &str = formatcp!("127.0.0.1:{DEFAULT_PG_LISTEN_PORT}"); pub const DEFAULT_HTTP_LISTEN_PORT: u16 = 9898; @@ -223,8 +224,9 @@ pub struct ConfigToml { pub metric_collection_bucket: Option, #[serde(with = "humantime_serde")] pub synthetic_size_calculation_interval: Duration, - pub disk_usage_based_eviction: Option, + pub disk_usage_based_eviction: DiskUsageEvictionTaskConfig, pub test_remote_failures: u64, + pub test_remote_failures_probability: u64, pub ondemand_download_behavior_treat_error_as_warn: bool, #[serde(with = "humantime_serde")] pub background_task_maximum_delay: Duration, @@ -270,9 +272,13 @@ pub struct ConfigToml { pub timeline_import_config: TimelineImportConfig, #[serde(skip_serializing_if = "Option::is_none")] pub basebackup_cache_config: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub image_layer_generation_large_timeline_threshold: Option, + pub force_metric_collection_on_scrape: bool, } #[derive(Debug, Clone, PartialEq, Eq, serde::Serialize, serde::Deserialize)] +#[serde(default)] pub struct DiskUsageEvictionTaskConfig { pub max_usage_pct: utils::serde_percent::Percent, pub min_avail_bytes: u64, @@ -283,6 +289,21 @@ pub struct DiskUsageEvictionTaskConfig { /// Select sorting for evicted layers #[serde(default)] pub eviction_order: EvictionOrder, + pub enabled: bool, +} + +impl Default for DiskUsageEvictionTaskConfig { + fn default() -> Self { + Self { + max_usage_pct: Percent::new(80).unwrap(), + min_avail_bytes: 2_000_000_000, + period: Duration::from_secs(60), + #[cfg(feature = "testing")] + mock_statvfs: None, + eviction_order: EvictionOrder::default(), + enabled: true, + } + } } #[derive(Debug, Clone, PartialEq, Eq, serde::Serialize, serde::Deserialize)] @@ -543,6 +564,11 @@ pub struct TenantConfigToml { pub gc_period: Duration, // Delta layer churn threshold to create L1 image layers. pub image_creation_threshold: usize, + // HADRON + // When the timeout is reached, PageServer will (1) force compact any remaining L0 deltas and + // (2) create image layers if there are any L1 deltas. + #[serde(with = "humantime_serde")] + pub image_layer_force_creation_period: Option, // Determines how much history is retained, to allow // branching and read replicas at an older point in time. // The unit is time. @@ -738,9 +764,10 @@ impl Default for ConfigToml { metric_collection_bucket: (None), - disk_usage_based_eviction: (None), + disk_usage_based_eviction: DiskUsageEvictionTaskConfig::default(), test_remote_failures: (0), + test_remote_failures_probability: (100), ondemand_download_behavior_treat_error_as_warn: (false), @@ -804,6 +831,8 @@ impl Default for ConfigToml { }, basebackup_cache_config: None, posthog_config: None, + image_layer_generation_large_timeline_threshold: Some(2 * 1024 * 1024 * 1024), + force_metric_collection_on_scrape: true, } } } @@ -897,6 +926,7 @@ impl Default for TenantConfigToml { gc_period: humantime::parse_duration(DEFAULT_GC_PERIOD) .expect("cannot parse default gc period"), image_creation_threshold: DEFAULT_IMAGE_CREATION_THRESHOLD, + image_layer_force_creation_period: None, pitr_interval: humantime::parse_duration(DEFAULT_PITR_INTERVAL) .expect("cannot parse default PITR interval"), walreceiver_connect_timeout: humantime::parse_duration( diff --git a/libs/pageserver_api/src/controller_api.rs b/libs/pageserver_api/src/controller_api.rs index a8c7083b17..8f86b03f72 100644 --- a/libs/pageserver_api/src/controller_api.rs +++ b/libs/pageserver_api/src/controller_api.rs @@ -1,5 +1,6 @@ use std::collections::{HashMap, HashSet}; use std::fmt::Display; +use std::net::IpAddr; use std::str::FromStr; use std::time::{Duration, Instant}; @@ -10,7 +11,7 @@ use serde::{Deserialize, Serialize}; use utils::id::{NodeId, TenantId, TimelineId}; use utils::lsn::Lsn; -use crate::models::{PageserverUtilization, ShardParameters, TenantConfig}; +use crate::models::{PageserverUtilization, ShardParameters, TenantConfig, TimelineInfo}; use crate::shard::{ShardStripeSize, TenantShardId}; #[derive(Serialize, Deserialize, Debug)] @@ -60,6 +61,11 @@ pub struct NodeRegisterRequest { pub listen_https_port: Option, pub availability_zone_id: AvailabilityZone, + + // Reachable IP address of the PS/SK registering, if known. + // Hadron Cluster Coordiantor will update the DNS record of the registering node + // with this IP address. + pub node_ip_addr: Option, } #[derive(Serialize, Deserialize)] @@ -126,6 +132,13 @@ pub struct TenantDescribeResponse { pub config: TenantConfig, } +#[derive(Serialize, Deserialize, Debug)] +pub struct TenantTimelineDescribeResponse { + pub shards: Vec, + #[serde(skip_serializing_if = "Option::is_none")] + pub image_consistent_lsn: Option, +} + #[derive(Serialize, Deserialize, Debug)] pub struct NodeShardResponse { pub node_id: NodeId, @@ -538,6 +551,39 @@ pub struct SafekeeperDescribeResponse { pub scheduling_policy: SkSchedulingPolicy, } +#[derive(Serialize, Deserialize, Clone, Debug)] +pub struct TimelineSafekeeperPeer { + pub node_id: NodeId, + pub listen_http_addr: String, + pub http_port: i32, +} + +#[derive(Serialize, Deserialize, Clone, Debug)] +pub struct SCSafekeeperTimeline { + // SC does not know the tenant id. + pub timeline_id: TimelineId, + pub peers: Vec, +} + +#[derive(Serialize, Deserialize, Clone, Debug)] +pub struct SCSafekeeperTimelinesResponse { + pub timelines: Vec, + pub safekeeper_peers: Vec, +} + +#[derive(Serialize, Deserialize, Clone, Debug)] +pub struct SafekeeperTimeline { + pub tenant_id: TenantId, + pub timeline_id: TimelineId, + pub peers: Vec, +} + +#[derive(Serialize, Deserialize, Clone, Debug)] +pub struct SafekeeperTimelinesResponse { + pub timelines: Vec, + pub safekeeper_peers: Vec, +} + #[derive(Serialize, Deserialize, Clone)] pub struct SafekeeperSchedulingPolicyRequest { pub scheduling_policy: SkSchedulingPolicy, diff --git a/libs/pageserver_api/src/models.rs b/libs/pageserver_api/src/models.rs index 16545364c1..11e02a8550 100644 --- a/libs/pageserver_api/src/models.rs +++ b/libs/pageserver_api/src/models.rs @@ -384,7 +384,7 @@ pub struct SafekeepersInfo { pub safekeepers: Vec, } -#[derive(Serialize, Deserialize, Clone)] +#[derive(Serialize, Deserialize, Clone, Debug)] pub struct SafekeeperInfo { pub id: NodeId, pub hostname: String, @@ -597,6 +597,9 @@ pub struct TenantConfigPatch { pub gc_period: FieldPatch, #[serde(skip_serializing_if = "FieldPatch::is_noop")] pub image_creation_threshold: FieldPatch, + // HADRON + #[serde(skip_serializing_if = "FieldPatch::is_noop")] + pub image_layer_force_creation_period: FieldPatch, #[serde(skip_serializing_if = "FieldPatch::is_noop")] pub pitr_interval: FieldPatch, #[serde(skip_serializing_if = "FieldPatch::is_noop")] @@ -700,6 +703,11 @@ pub struct TenantConfig { #[serde(skip_serializing_if = "Option::is_none")] pub image_creation_threshold: Option, + // HADRON + #[serde(skip_serializing_if = "Option::is_none")] + #[serde(with = "humantime_serde")] + pub image_layer_force_creation_period: Option, + #[serde(skip_serializing_if = "Option::is_none")] #[serde(with = "humantime_serde")] pub pitr_interval: Option, @@ -798,6 +806,7 @@ impl TenantConfig { mut gc_horizon, mut gc_period, mut image_creation_threshold, + mut image_layer_force_creation_period, mut pitr_interval, mut walreceiver_connect_timeout, mut lagging_wal_timeout, @@ -861,6 +870,11 @@ impl TenantConfig { patch .image_creation_threshold .apply(&mut image_creation_threshold); + // HADRON + patch + .image_layer_force_creation_period + .map(|v| humantime::parse_duration(&v))? + .apply(&mut image_layer_force_creation_period); patch .pitr_interval .map(|v| humantime::parse_duration(&v))? @@ -942,6 +956,7 @@ impl TenantConfig { gc_horizon, gc_period, image_creation_threshold, + image_layer_force_creation_period, pitr_interval, walreceiver_connect_timeout, lagging_wal_timeout, @@ -1016,6 +1031,9 @@ impl TenantConfig { image_creation_threshold: self .image_creation_threshold .unwrap_or(global_conf.image_creation_threshold), + image_layer_force_creation_period: self + .image_layer_force_creation_period + .or(global_conf.image_layer_force_creation_period), pitr_interval: self.pitr_interval.unwrap_or(global_conf.pitr_interval), walreceiver_connect_timeout: self .walreceiver_connect_timeout @@ -1604,6 +1622,9 @@ pub struct TimelineInfo { /// Whether the timeline is invisible in synthetic size calculations. pub is_invisible: Option, + // HADRON: the largest LSN below which all page updates have been included in the image layers. + #[serde(skip_serializing_if = "Option::is_none")] + pub image_consistent_lsn: Option, } #[derive(Debug, Clone, Serialize, Deserialize)] diff --git a/libs/postgres_backend/src/lib.rs b/libs/postgres_backend/src/lib.rs index 851d824291..20afa8bb46 100644 --- a/libs/postgres_backend/src/lib.rs +++ b/libs/postgres_backend/src/lib.rs @@ -749,7 +749,18 @@ impl PostgresBackend { trace!("got query {query_string:?}"); if let Err(e) = handler.process_query(self, query_string).await { match e { - QueryError::Shutdown => return Ok(ProcessMsgResult::Break), + err @ QueryError::Shutdown => { + // Notify postgres of the connection shutdown at the libpq + // protocol level. This avoids postgres having to tell apart + // from an idle connection and a stale one, which is bug prone. + let shutdown_error = short_error(&err); + self.write_message_noflush(&BeMessage::ErrorResponse( + &shutdown_error, + Some(err.pg_error_code()), + ))?; + + return Ok(ProcessMsgResult::Break); + } QueryError::SimulatedConnectionError => { return Err(QueryError::SimulatedConnectionError); } diff --git a/libs/postgres_ffi/build.rs b/libs/postgres_ffi/build.rs index cdebd43f6f..190d9a78c4 100644 --- a/libs/postgres_ffi/build.rs +++ b/libs/postgres_ffi/build.rs @@ -110,7 +110,6 @@ fn main() -> anyhow::Result<()> { .allowlist_type("XLogRecPtr") .allowlist_type("XLogSegNo") .allowlist_type("TimeLineID") - .allowlist_type("TimestampTz") .allowlist_type("MultiXactId") .allowlist_type("MultiXactOffset") .allowlist_type("MultiXactStatus") diff --git a/libs/postgres_ffi/src/lib.rs b/libs/postgres_ffi/src/lib.rs index 9297ac46c9..a88b520a41 100644 --- a/libs/postgres_ffi/src/lib.rs +++ b/libs/postgres_ffi/src/lib.rs @@ -227,8 +227,7 @@ pub mod walrecord; // Export some widely used datatypes that are unlikely to change across Postgres versions pub use v14::bindings::{ BlockNumber, CheckPoint, ControlFileData, MultiXactId, OffsetNumber, Oid, PageHeaderData, - RepOriginId, TimeLineID, TimestampTz, TransactionId, XLogRecPtr, XLogRecord, XLogSegNo, uint32, - uint64, + RepOriginId, TimeLineID, TransactionId, XLogRecPtr, XLogRecord, XLogSegNo, uint32, uint64, }; // Likewise for these, although the assumption that these don't change is a little more iffy. pub use v14::bindings::{MultiXactOffset, MultiXactStatus}; diff --git a/libs/postgres_ffi/src/walrecord.rs b/libs/postgres_ffi/src/walrecord.rs index d593123dc0..7ed07b0e77 100644 --- a/libs/postgres_ffi/src/walrecord.rs +++ b/libs/postgres_ffi/src/walrecord.rs @@ -4,13 +4,14 @@ //! TODO: Generate separate types for each supported PG version use bytes::{Buf, Bytes}; +use postgres_ffi_types::TimestampTz; use serde::{Deserialize, Serialize}; use utils::bin_ser::DeserializeError; use utils::lsn::Lsn; use crate::{ BLCKSZ, BlockNumber, MultiXactId, MultiXactOffset, MultiXactStatus, Oid, PgMajorVersion, - RepOriginId, TimestampTz, TransactionId, XLOG_SIZE_OF_XLOG_RECORD, XLogRecord, pg_constants, + RepOriginId, TransactionId, XLOG_SIZE_OF_XLOG_RECORD, XLogRecord, pg_constants, }; #[repr(C)] @@ -863,7 +864,8 @@ pub mod v17 { XlHeapDelete, XlHeapInsert, XlHeapLock, XlHeapMultiInsert, XlHeapUpdate, XlParameterChange, rm_neon, }; - pub use crate::{TimeLineID, TimestampTz}; + pub use crate::TimeLineID; + pub use postgres_ffi_types::TimestampTz; #[repr(C)] #[derive(Debug)] diff --git a/libs/postgres_ffi/src/xlog_utils.rs b/libs/postgres_ffi/src/xlog_utils.rs index f7b6296053..134baf5ff7 100644 --- a/libs/postgres_ffi/src/xlog_utils.rs +++ b/libs/postgres_ffi/src/xlog_utils.rs @@ -9,10 +9,11 @@ use super::super::waldecoder::WalStreamDecoder; use super::bindings::{ - CheckPoint, ControlFileData, DBState_DB_SHUTDOWNED, FullTransactionId, TimeLineID, TimestampTz, + CheckPoint, ControlFileData, DBState_DB_SHUTDOWNED, FullTransactionId, TimeLineID, XLogLongPageHeaderData, XLogPageHeaderData, XLogRecPtr, XLogRecord, XLogSegNo, XLOG_PAGE_MAGIC, MY_PGVERSION }; +use postgres_ffi_types::TimestampTz; use super::wal_generator::LogicalMessageGenerator; use crate::pg_constants; use crate::PG_TLI; diff --git a/libs/postgres_ffi_types/src/lib.rs b/libs/postgres_ffi_types/src/lib.rs index 84ef499b9f..86e8259e8a 100644 --- a/libs/postgres_ffi_types/src/lib.rs +++ b/libs/postgres_ffi_types/src/lib.rs @@ -11,3 +11,4 @@ pub mod forknum; pub type Oid = u32; pub type RepOriginId = u16; +pub type TimestampTz = i64; diff --git a/libs/proxy/json/Cargo.toml b/libs/proxy/json/Cargo.toml new file mode 100644 index 0000000000..2f163c141d --- /dev/null +++ b/libs/proxy/json/Cargo.toml @@ -0,0 +1,12 @@ +[package] +name = "json" +version = "0.1.0" +edition.workspace = true +license.workspace = true + +[dependencies] +ryu = "1" +itoa = "1" + +[dev-dependencies] +futures = "0.3" diff --git a/libs/proxy/json/src/lib.rs b/libs/proxy/json/src/lib.rs new file mode 100644 index 0000000000..a8b2e6b509 --- /dev/null +++ b/libs/proxy/json/src/lib.rs @@ -0,0 +1,412 @@ +//! A JSON serialization lib, designed for more flexibility than `serde_json` offers. +//! +//! Features: +//! +//! ## Dynamic construction +//! +//! Sometimes you have dynamic values you want to serialize, that are not already in a serde-aware model like a struct or a Vec etc. +//! To achieve this with serde, you need to implement a lot of different traits on a lot of different new-types. +//! Because of this, it's often easier to give-in and pull all the data into a serde-aware model (`serde_json::Value` or some intermediate struct), +//! but that is often not very efficient. +//! +//! This crate allows full control over the JSON encoding without needing to implement any extra traits. Just call the +//! relevant functions, and it will guarantee a correctly encoded JSON value. +//! +//! ## Async construction +//! +//! Similar to the above, sometimes the values arrive asynchronously. Often collecting those values in memory +//! is more expensive than writing them as JSON, since the overheads of `Vec` and `String` is much higher, however +//! there are exceptions. +//! +//! Serializing to JSON all in one go is also more CPU intensive and can cause lag spikes, +//! whereas serializing values incrementally spreads out the CPU load and reduces lag. +//! +//! ## Examples +//! +//! To represent the following JSON as a compact string +//! +//! ```json +//! { +//! "results": { +//! "rows": [ +//! { +//! "id": 1, +//! "value": null +//! }, +//! { +//! "id": 2, +//! "value": "hello" +//! } +//! ] +//! } +//! } +//! ``` +//! +//! We can use the following code: +//! +//! ``` +//! // create the outer object +//! let s = json::value_to_string!(|v| json::value_as_object!(|v| { +//! // create an entry with key "results" and start an object value associated with it. +//! let results = v.key("results"); +//! json::value_as_object!(|results| { +//! // create an entry with key "rows" and start an list value associated with it. +//! let rows = results.key("rows"); +//! json::value_as_list!(|rows| { +//! // create a list entry and start an object value associated with it. +//! let row = rows.entry(); +//! json::value_as_object!(|row| { +//! // add entry "id": 1 +//! row.entry("id", 1); +//! // add entry "value": null +//! row.entry("value", json::Null); +//! }); +//! +//! // create a list entry and start an object value associated with it. +//! let row = rows.entry(); +//! json::value_as_object!(|row| { +//! // add entry "id": 2 +//! row.entry("id", 2); +//! // add entry "value": "hello" +//! row.entry("value", "hello"); +//! }); +//! }); +//! }); +//! })); +//! +//! assert_eq!(s, r#"{"results":{"rows":[{"id":1,"value":null},{"id":2,"value":"hello"}]}}"#); +//! ``` + +mod macros; +mod str; +mod value; + +pub use value::{Null, ValueEncoder}; + +#[must_use] +/// Serialize a single json value. +pub struct ValueSer<'buf> { + buf: &'buf mut Vec, + start: usize, +} + +impl<'buf> ValueSer<'buf> { + /// Create a new json value serializer. + pub fn new(buf: &'buf mut Vec) -> Self { + Self { buf, start: 0 } + } + + /// Borrow the underlying buffer + pub fn as_buffer(&self) -> &[u8] { + self.buf + } + + #[inline] + pub fn value(self, e: impl ValueEncoder) { + e.encode(self); + } + + /// Write raw bytes to the buf. This must be already JSON encoded. + #[inline] + pub fn write_raw_json(self, data: &[u8]) { + self.buf.extend_from_slice(data); + self.finish(); + } + + /// Start a new object serializer. + #[inline] + pub fn object(self) -> ObjectSer<'buf> { + ObjectSer::new(self) + } + + /// Start a new list serializer. + #[inline] + pub fn list(self) -> ListSer<'buf> { + ListSer::new(self) + } + + /// Finish the value ser. + #[inline] + fn finish(self) { + // don't trigger the drop handler which triggers a rollback. + // this won't cause memory leaks because `ValueSet` owns no allocations. + std::mem::forget(self); + } +} + +impl Drop for ValueSer<'_> { + fn drop(&mut self) { + self.buf.truncate(self.start); + } +} + +#[must_use] +/// Serialize a json object. +pub struct ObjectSer<'buf> { + value: ValueSer<'buf>, + start: usize, +} + +impl<'buf> ObjectSer<'buf> { + /// Start a new object serializer. + #[inline] + pub fn new(value: ValueSer<'buf>) -> Self { + value.buf.push(b'{'); + let start = value.buf.len(); + Self { value, start } + } + + /// Borrow the underlying buffer + pub fn as_buffer(&self) -> &[u8] { + self.value.as_buffer() + } + + /// Start a new object entry with the given string key, returning a [`ValueSer`] for the associated value. + #[inline] + pub fn key(&mut self, key: impl KeyEncoder) -> ValueSer<'_> { + key.write_key(self) + } + + /// Write an entry (key-value pair) to the object. + #[inline] + pub fn entry(&mut self, key: impl KeyEncoder, val: impl ValueEncoder) { + self.key(key).value(val); + } + + #[inline] + fn entry_inner(&mut self, f: impl FnOnce(&mut Vec)) -> ValueSer<'_> { + // track before the separator so we the value is rolled back it also removes the separator. + let start = self.value.buf.len(); + + // push separator if necessary + if self.value.buf.len() > self.start { + self.value.buf.push(b','); + } + // push key + f(self.value.buf); + // push value separator + self.value.buf.push(b':'); + + // return value writer. + ValueSer { + buf: self.value.buf, + start, + } + } + + /// Reset the buffer back to before this object was started. + #[inline] + pub fn rollback(self) -> ValueSer<'buf> { + // Do not fully reset the value, only reset it to before the `{`. + // This ensures any `,` before this value are not clobbered. + self.value.buf.truncate(self.start - 1); + self.value + } + + /// Finish the object ser. + #[inline] + pub fn finish(self) { + self.value.buf.push(b'}'); + self.value.finish(); + } +} + +pub trait KeyEncoder { + fn write_key<'a>(self, obj: &'a mut ObjectSer) -> ValueSer<'a>; +} + +#[must_use] +/// Serialize a json object. +pub struct ListSer<'buf> { + value: ValueSer<'buf>, + start: usize, +} + +impl<'buf> ListSer<'buf> { + /// Start a new list serializer. + #[inline] + pub fn new(value: ValueSer<'buf>) -> Self { + value.buf.push(b'['); + let start = value.buf.len(); + Self { value, start } + } + + /// Borrow the underlying buffer + pub fn as_buffer(&self) -> &[u8] { + self.value.as_buffer() + } + + /// Write an value to the list. + #[inline] + pub fn push(&mut self, val: impl ValueEncoder) { + self.entry().value(val); + } + + /// Start a new value entry in this list. + #[inline] + pub fn entry(&mut self) -> ValueSer<'_> { + // track before the separator so we the value is rolled back it also removes the separator. + let start = self.value.buf.len(); + + // push separator if necessary + if self.value.buf.len() > self.start { + self.value.buf.push(b','); + } + + // return value writer. + ValueSer { + buf: self.value.buf, + start, + } + } + + /// Reset the buffer back to before this object was started. + #[inline] + pub fn rollback(self) -> ValueSer<'buf> { + // Do not fully reset the value, only reset it to before the `[`. + // This ensures any `,` before this value are not clobbered. + self.value.buf.truncate(self.start - 1); + self.value + } + + /// Finish the object ser. + #[inline] + pub fn finish(self) { + self.value.buf.push(b']'); + self.value.finish(); + } +} + +#[cfg(test)] +mod tests { + use crate::{Null, ValueSer}; + + #[test] + fn object() { + let mut buf = vec![]; + let mut object = ValueSer::new(&mut buf).object(); + object.entry("foo", "bar"); + object.entry("baz", Null); + object.finish(); + + assert_eq!(buf, br#"{"foo":"bar","baz":null}"#); + } + + #[test] + fn list() { + let mut buf = vec![]; + let mut list = ValueSer::new(&mut buf).list(); + list.entry().value("bar"); + list.entry().value(Null); + list.finish(); + + assert_eq!(buf, br#"["bar",null]"#); + } + + #[test] + fn object_macro() { + let res = crate::value_to_string!(|obj| { + crate::value_as_object!(|obj| { + obj.entry("foo", "bar"); + obj.entry("baz", Null); + }) + }); + + assert_eq!(res, r#"{"foo":"bar","baz":null}"#); + } + + #[test] + fn list_macro() { + let res = crate::value_to_string!(|list| { + crate::value_as_list!(|list| { + list.entry().value("bar"); + list.entry().value(Null); + }) + }); + + assert_eq!(res, r#"["bar",null]"#); + } + + #[test] + fn rollback_on_drop() { + let res = crate::value_to_string!(|list| { + crate::value_as_list!(|list| { + list.entry().value("bar"); + + 'cancel: { + let nested_list = list.entry(); + crate::value_as_list!(|nested_list| { + nested_list.entry().value(1); + + assert_eq!(nested_list.as_buffer(), br#"["bar",[1"#); + if true { + break 'cancel; + } + }) + } + + assert_eq!(list.as_buffer(), br#"["bar""#); + + list.entry().value(Null); + }) + }); + + assert_eq!(res, r#"["bar",null]"#); + } + + #[test] + fn rollback_object() { + let res = crate::value_to_string!(|obj| { + crate::value_as_object!(|obj| { + let entry = obj.key("1"); + entry.value(1_i32); + + let entry = obj.key("2"); + let entry = { + let mut nested_obj = entry.object(); + nested_obj.entry("foo", "bar"); + nested_obj.rollback() + }; + + entry.value(2_i32); + }) + }); + + assert_eq!(res, r#"{"1":1,"2":2}"#); + } + + #[test] + fn rollback_list() { + let res = crate::value_to_string!(|list| { + crate::value_as_list!(|list| { + let entry = list.entry(); + entry.value(1_i32); + + let entry = list.entry(); + let entry = { + let mut nested_list = entry.list(); + nested_list.push("foo"); + nested_list.rollback() + }; + + entry.value(2_i32); + }) + }); + + assert_eq!(res, r#"[1,2]"#); + } + + #[test] + fn string_escaping() { + let mut buf = vec![]; + let mut object = ValueSer::new(&mut buf).object(); + + let key = "hello"; + let value = "\n world"; + + object.entry(format_args!("{key:?}"), value); + object.finish(); + + assert_eq!(buf, br#"{"\"hello\"":"\n world"}"#); + } +} diff --git a/libs/proxy/json/src/macros.rs b/libs/proxy/json/src/macros.rs new file mode 100644 index 0000000000..d3b5cfed10 --- /dev/null +++ b/libs/proxy/json/src/macros.rs @@ -0,0 +1,86 @@ +//! # Examples +//! +//! ``` +//! use futures::{StreamExt, TryStream, TryStreamExt}; +//! +//! async fn stream_to_json_list(mut s: S) -> Result +//! where +//! S: TryStream + Unpin, +//! T: json::ValueEncoder +//! { +//! Ok(json::value_to_string!(|val| json::value_as_list!(|val| { +//! // note how we can use `.await` and `?` in here. +//! while let Some(value) = s.try_next().await? { +//! val.push(value); +//! } +//! }))) +//! } +//! +//! let stream = futures::stream::iter([1, 2, 3]).map(Ok::); +//! let json_string = futures::executor::block_on(stream_to_json_list(stream)).unwrap(); +//! assert_eq!(json_string, "[1,2,3]"); +//! ``` + +/// A helper to create a new JSON vec. +/// +/// Implemented as a macro to preserve all control flow. +#[macro_export] +macro_rules! value_to_vec { + (|$val:ident| $body:expr) => {{ + let mut buf = vec![]; + let $val = $crate::ValueSer::new(&mut buf); + let _: () = $body; + buf + }}; +} + +/// A helper to create a new JSON string. +/// +/// Implemented as a macro to preserve all control flow. +#[macro_export] +macro_rules! value_to_string { + (|$val:ident| $body:expr) => {{ + ::std::string::String::from_utf8($crate::value_to_vec!(|$val| $body)) + .expect("json should be valid utf8") + }}; +} + +/// A helper that ensures the [`ObjectSer::finish`](crate::ObjectSer::finish) method is called on completion. +/// +/// Consumes `$val` and assigns it as an [`ObjectSer`](crate::ObjectSer) serializer. +/// The serializer is only 'finished' if the body completes. +/// The serializer is rolled back if `break`/`return` escapes the body. +/// +/// Implemented as a macro to preserve all control flow. +#[macro_export] +macro_rules! value_as_object { + (|$val:ident| $body:expr) => {{ + let mut obj = $crate::ObjectSer::new($val); + + let $val = &mut obj; + let res = $body; + + obj.finish(); + res + }}; +} + +/// A helper that ensures the [`ListSer::finish`](crate::ListSer::finish) method is called on completion. +/// +/// Consumes `$val` and assigns it as an [`ListSer`](crate::ListSer) serializer. +/// The serializer is only 'finished' if the body completes. +/// The serializer is rolled back if `break`/`return` escapes the body. +/// +/// Implemented as a macro to preserve all control flow. +#[macro_export] +macro_rules! value_as_list { + (|$val:ident| $body:expr) => {{ + let mut list = $crate::ListSer::new($val); + + let $val = &mut list; + let res = $body; + + list.finish(); + res + }}; +} diff --git a/libs/proxy/json/src/str.rs b/libs/proxy/json/src/str.rs new file mode 100644 index 0000000000..b092fd50ec --- /dev/null +++ b/libs/proxy/json/src/str.rs @@ -0,0 +1,166 @@ +//! Helpers for serializing escaped strings. +//! +//! ## License +//! +//! +//! +//! Licensed by David Tolnay under MIT or Apache-2.0. +//! +//! With modifications by Conrad Ludgate on behalf of Databricks. + +use std::fmt::{self, Write}; + +/// Represents a character escape code in a type-safe manner. +pub enum CharEscape { + /// An escaped quote `"` + Quote, + /// An escaped reverse solidus `\` + ReverseSolidus, + // /// An escaped solidus `/` + // Solidus, + /// An escaped backspace character (usually escaped as `\b`) + Backspace, + /// An escaped form feed character (usually escaped as `\f`) + FormFeed, + /// An escaped line feed character (usually escaped as `\n`) + LineFeed, + /// An escaped carriage return character (usually escaped as `\r`) + CarriageReturn, + /// An escaped tab character (usually escaped as `\t`) + Tab, + /// An escaped ASCII plane control character (usually escaped as + /// `\u00XX` where `XX` are two hex characters) + AsciiControl(u8), +} + +impl CharEscape { + #[inline] + fn from_escape_table(escape: u8, byte: u8) -> CharEscape { + match escape { + self::BB => CharEscape::Backspace, + self::TT => CharEscape::Tab, + self::NN => CharEscape::LineFeed, + self::FF => CharEscape::FormFeed, + self::RR => CharEscape::CarriageReturn, + self::QU => CharEscape::Quote, + self::BS => CharEscape::ReverseSolidus, + self::UU => CharEscape::AsciiControl(byte), + _ => unreachable!(), + } + } +} + +pub(crate) fn format_escaped_str(writer: &mut Vec, value: &str) { + writer.reserve(2 + value.len()); + + writer.push(b'"'); + + let rest = format_escaped_str_contents(writer, value); + writer.extend_from_slice(rest); + + writer.push(b'"'); +} + +pub(crate) fn format_escaped_fmt(writer: &mut Vec, args: fmt::Arguments) { + writer.push(b'"'); + + Collect { buf: writer } + .write_fmt(args) + .expect("formatting should not error"); + + writer.push(b'"'); +} + +struct Collect<'buf> { + buf: &'buf mut Vec, +} + +impl fmt::Write for Collect<'_> { + fn write_str(&mut self, s: &str) -> fmt::Result { + let last = format_escaped_str_contents(self.buf, s); + self.buf.extend(last); + Ok(()) + } +} + +// writes any escape sequences, and returns the suffix still needed to be written. +fn format_escaped_str_contents<'a>(writer: &mut Vec, value: &'a str) -> &'a [u8] { + let bytes = value.as_bytes(); + + let mut start = 0; + + for (i, &byte) in bytes.iter().enumerate() { + let escape = ESCAPE[byte as usize]; + if escape == 0 { + continue; + } + + writer.extend_from_slice(&bytes[start..i]); + + let char_escape = CharEscape::from_escape_table(escape, byte); + write_char_escape(writer, char_escape); + + start = i + 1; + } + + &bytes[start..] +} + +const BB: u8 = b'b'; // \x08 +const TT: u8 = b't'; // \x09 +const NN: u8 = b'n'; // \x0A +const FF: u8 = b'f'; // \x0C +const RR: u8 = b'r'; // \x0D +const QU: u8 = b'"'; // \x22 +const BS: u8 = b'\\'; // \x5C +const UU: u8 = b'u'; // \x00...\x1F except the ones above +const __: u8 = 0; + +// Lookup table of escape sequences. A value of b'x' at index i means that byte +// i is escaped as "\x" in JSON. A value of 0 means that byte i is not escaped. +static ESCAPE: [u8; 256] = [ + // 1 2 3 4 5 6 7 8 9 A B C D E F + UU, UU, UU, UU, UU, UU, UU, UU, BB, TT, NN, UU, FF, RR, UU, UU, // 0 + UU, UU, UU, UU, UU, UU, UU, UU, UU, UU, UU, UU, UU, UU, UU, UU, // 1 + __, __, QU, __, __, __, __, __, __, __, __, __, __, __, __, __, // 2 + __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, // 3 + __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, // 4 + __, __, __, __, __, __, __, __, __, __, __, __, BS, __, __, __, // 5 + __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, // 6 + __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, // 7 + __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, // 8 + __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, // 9 + __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, // A + __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, // B + __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, // C + __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, // D + __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, // E + __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, __, // F +]; + +fn write_char_escape(writer: &mut Vec, char_escape: CharEscape) { + let s = match char_escape { + CharEscape::Quote => b"\\\"", + CharEscape::ReverseSolidus => b"\\\\", + // CharEscape::Solidus => b"\\/", + CharEscape::Backspace => b"\\b", + CharEscape::FormFeed => b"\\f", + CharEscape::LineFeed => b"\\n", + CharEscape::CarriageReturn => b"\\r", + CharEscape::Tab => b"\\t", + CharEscape::AsciiControl(byte) => { + static HEX_DIGITS: [u8; 16] = *b"0123456789abcdef"; + let bytes = &[ + b'\\', + b'u', + b'0', + b'0', + HEX_DIGITS[(byte >> 4) as usize], + HEX_DIGITS[(byte & 0xF) as usize], + ]; + return writer.extend_from_slice(bytes); + } + }; + + writer.extend_from_slice(s); +} diff --git a/libs/proxy/json/src/value.rs b/libs/proxy/json/src/value.rs new file mode 100644 index 0000000000..705af9603e --- /dev/null +++ b/libs/proxy/json/src/value.rs @@ -0,0 +1,168 @@ +use core::fmt; +use std::collections::{BTreeMap, HashMap}; + +use crate::str::{format_escaped_fmt, format_escaped_str}; +use crate::{KeyEncoder, ObjectSer, ValueSer, value_as_list, value_as_object}; + +/// Write a value to the underlying json representation. +pub trait ValueEncoder { + fn encode(self, v: ValueSer<'_>); +} + +pub(crate) fn write_int(x: impl itoa::Integer, b: &mut Vec) { + b.extend_from_slice(itoa::Buffer::new().format(x).as_bytes()); +} + +pub(crate) fn write_float(x: impl ryu::Float, b: &mut Vec) { + b.extend_from_slice(ryu::Buffer::new().format(x).as_bytes()); +} + +impl ValueEncoder for &T { + #[inline] + fn encode(self, v: ValueSer<'_>) { + T::encode(*self, v); + } +} + +impl ValueEncoder for &str { + #[inline] + fn encode(self, v: ValueSer<'_>) { + format_escaped_str(v.buf, self); + v.finish(); + } +} + +impl ValueEncoder for fmt::Arguments<'_> { + #[inline] + fn encode(self, v: ValueSer<'_>) { + if let Some(s) = self.as_str() { + format_escaped_str(v.buf, s); + } else { + format_escaped_fmt(v.buf, self); + } + v.finish(); + } +} + +macro_rules! int { + [$($t:ty),*] => { + $( + impl ValueEncoder for $t { + #[inline] + fn encode(self, v: ValueSer<'_>) { + write_int(self, v.buf); + v.finish(); + } + } + )* + }; +} + +int![u8, u16, u32, u64, usize, u128]; +int![i8, i16, i32, i64, isize, i128]; + +macro_rules! float { + [$($t:ty),*] => { + $( + impl ValueEncoder for $t { + #[inline] + fn encode(self, v: ValueSer<'_>) { + write_float(self, v.buf); + v.finish(); + } + } + )* + }; +} + +float![f32, f64]; + +impl ValueEncoder for bool { + #[inline] + fn encode(self, v: ValueSer<'_>) { + v.write_raw_json(if self { b"true" } else { b"false" }); + } +} + +impl ValueEncoder for Option { + #[inline] + fn encode(self, v: ValueSer<'_>) { + match self { + Some(value) => value.encode(v), + None => Null.encode(v), + } + } +} + +impl KeyEncoder for &str { + #[inline] + fn write_key<'a>(self, obj: &'a mut ObjectSer) -> ValueSer<'a> { + let obj = &mut *obj; + obj.entry_inner(|b| format_escaped_str(b, self)) + } +} + +impl KeyEncoder for fmt::Arguments<'_> { + #[inline] + fn write_key<'a>(self, obj: &'a mut ObjectSer) -> ValueSer<'a> { + if let Some(key) = self.as_str() { + obj.entry_inner(|b| format_escaped_str(b, key)) + } else { + obj.entry_inner(|b| format_escaped_fmt(b, self)) + } + } +} + +/// Represents the JSON null value. +pub struct Null; + +impl ValueEncoder for Null { + #[inline] + fn encode(self, v: ValueSer<'_>) { + v.write_raw_json(b"null"); + } +} + +impl ValueEncoder for Vec { + #[inline] + fn encode(self, v: ValueSer<'_>) { + value_as_list!(|v| { + for t in self { + v.entry().value(t); + } + }); + } +} + +impl ValueEncoder for &[T] { + #[inline] + fn encode(self, v: ValueSer<'_>) { + value_as_list!(|v| { + for t in self { + v.entry().value(t); + } + }); + } +} + +impl ValueEncoder for HashMap { + #[inline] + fn encode(self, o: ValueSer<'_>) { + value_as_object!(|o| { + for (k, v) in self { + o.entry(k, v); + } + }); + } +} + +impl ValueEncoder for BTreeMap { + #[inline] + fn encode(self, o: ValueSer<'_>) { + value_as_object!(|o| { + for (k, v) in self { + o.entry(k, v); + } + }); + } +} diff --git a/libs/remote_storage/Cargo.toml b/libs/remote_storage/Cargo.toml index bd18d80915..0ae13552b8 100644 --- a/libs/remote_storage/Cargo.toml +++ b/libs/remote_storage/Cargo.toml @@ -13,6 +13,7 @@ aws-smithy-async.workspace = true aws-smithy-types.workspace = true aws-config.workspace = true aws-sdk-s3.workspace = true +base64.workspace = true bytes.workspace = true camino = { workspace = true, features = ["serde1"] } humantime-serde.workspace = true @@ -41,6 +42,9 @@ http-body-util.workspace = true itertools.workspace = true sync_wrapper = { workspace = true, features = ["futures"] } +byteorder = "1.4" +rand = "0.8.5" + [dev-dependencies] camino-tempfile.workspace = true test-context.workspace = true diff --git a/libs/remote_storage/src/azure_blob.rs b/libs/remote_storage/src/azure_blob.rs index e9c24ac723..db30829216 100644 --- a/libs/remote_storage/src/azure_blob.rs +++ b/libs/remote_storage/src/azure_blob.rs @@ -14,17 +14,25 @@ use anyhow::{Context, Result, anyhow}; use azure_core::request_options::{IfMatchCondition, MaxResults, Metadata, Range}; use azure_core::{Continuable, HttpClient, RetryOptions, TransportOptions}; use azure_storage::StorageCredentials; -use azure_storage_blobs::blob::operations::GetBlobBuilder; +use azure_storage_blobs::blob::BlobBlockType; +use azure_storage_blobs::blob::BlockList; use azure_storage_blobs::blob::{Blob, CopyStatus}; use azure_storage_blobs::container::operations::ListBlobsBuilder; -use azure_storage_blobs::prelude::{ClientBuilder, ContainerClient}; +use azure_storage_blobs::prelude::ClientBuilder; +use azure_storage_blobs::{blob::operations::GetBlobBuilder, prelude::ContainerClient}; +use base64::{Engine as _, engine::general_purpose::URL_SAFE}; +use byteorder::{BigEndian, ByteOrder}; use bytes::Bytes; +use camino::Utf8Path; use futures::FutureExt; use futures::future::Either; use futures::stream::Stream; use futures_util::{StreamExt, TryStreamExt}; use http_types::{StatusCode, Url}; use scopeguard::ScopeGuard; +use tokio::fs::File; +use tokio::io::AsyncReadExt; +use tokio::io::AsyncSeekExt; use tokio_util::sync::CancellationToken; use tracing::debug; use utils::backoff; @@ -51,6 +59,9 @@ pub struct AzureBlobStorage { // Alternative timeout used for metadata objects which are expected to be small pub small_timeout: Duration, + /* BEGIN_HADRON */ + pub put_block_size_mb: Option, + /* END_HADRON */ } impl AzureBlobStorage { @@ -107,6 +118,9 @@ impl AzureBlobStorage { concurrency_limiter: ConcurrencyLimiter::new(azure_config.concurrency_limit.get()), timeout, small_timeout, + /* BEGIN_HADRON */ + put_block_size_mb: azure_config.put_block_size_mb, + /* END_HADRON */ }) } @@ -583,31 +597,137 @@ impl RemoteStorage for AzureBlobStorage { let started_at = start_measuring_requests(kind); - let op = async { + let mut metadata_map = metadata.unwrap_or([].into()); + let timeline_file_path = metadata_map.0.remove("databricks_azure_put_block"); + + /* BEGIN_HADRON */ + let op = async move { let blob_client = self.client.blob_client(self.relative_path_to_name(to)); + let put_block_size = self.put_block_size_mb.unwrap_or(0) * 1024 * 1024; + if timeline_file_path.is_none() || put_block_size == 0 { + // Use put_block_blob directly. + let from: Pin< + Box> + Send + Sync + 'static>, + > = Box::pin(from); + let from = NonSeekableStream::new(from, data_size_bytes); + let body = azure_core::Body::SeekableStream(Box::new(from)); - let from: Pin> + Send + Sync + 'static>> = - Box::pin(from); + let mut builder = blob_client.put_block_blob(body); + if !metadata_map.0.is_empty() { + builder = builder.metadata(to_azure_metadata(metadata_map)); + } + let fut = builder.into_future(); + let fut = tokio::time::timeout(self.timeout, fut); + let result = fut.await; + match result { + Ok(Ok(_response)) => return Ok(()), + Ok(Err(azure)) => return Err(azure.into()), + Err(_timeout) => return Err(TimeoutOrCancel::Timeout.into()), + }; + } + // Upload chunks concurrently using Put Block. + // Each PutBlock uploads put_block_size bytes of the file. + let mut upload_futures: Vec>> = + vec![]; + let mut block_list = BlockList::default(); + let mut start_bytes = 0u64; + let mut remaining_bytes = data_size_bytes; + let mut block_list_count = 0; - let from = NonSeekableStream::new(from, data_size_bytes); + while remaining_bytes > 0 { + let block_size = std::cmp::min(remaining_bytes, put_block_size); + let end_bytes = start_bytes + block_size as u64; + let block_id = block_list_count; + let timeout = self.timeout; + let blob_client = blob_client.clone(); + let timeline_file = timeline_file_path.clone().unwrap().clone(); - let body = azure_core::Body::SeekableStream(Box::new(from)); + let mut encoded_block_id = [0u8; 8]; + BigEndian::write_u64(&mut encoded_block_id, block_id); + URL_SAFE.encode(encoded_block_id); - let mut builder = blob_client.put_block_blob(body); + // Put one block. + let part_fut = async move { + let mut file = File::open(Utf8Path::new(&timeline_file.clone())).await?; + file.seek(io::SeekFrom::Start(start_bytes)).await?; + let limited_reader = file.take(block_size as u64); + let file_chunk_stream = + tokio_util::io::ReaderStream::with_capacity(limited_reader, 1024 * 1024); + let file_chunk_stream_pin: Pin< + Box> + Send + Sync + 'static>, + > = Box::pin(file_chunk_stream); + let stream_wrapper = NonSeekableStream::new(file_chunk_stream_pin, block_size); + let body = azure_core::Body::SeekableStream(Box::new(stream_wrapper)); + // Azure put block takes URL-encoded block ids and all blocks must have the same byte length. + // https://learn.microsoft.com/en-us/rest/api/storageservices/put-block?tabs=microsoft-entra-id#uri-parameters + let builder = blob_client.put_block(encoded_block_id.to_vec(), body); + let fut = builder.into_future(); + let fut = tokio::time::timeout(timeout, fut); + let result = fut.await; + tracing::debug!( + "azure put block id-{} size {} start {} end {} file {} response {:#?}", + block_id, + block_size, + start_bytes, + end_bytes, + timeline_file, + result + ); + match result { + Ok(Ok(_response)) => Ok(()), + Ok(Err(azure)) => Err(azure), + Err(_timeout) => Err(azure_core::Error::new( + azure_core::error::ErrorKind::Io, + std::io::Error::new( + std::io::ErrorKind::TimedOut, + "Operation timed out", + ), + )), + } + }; + upload_futures.push(tokio::spawn(part_fut)); - if let Some(metadata) = metadata { - builder = builder.metadata(to_azure_metadata(metadata)); + block_list_count += 1; + remaining_bytes -= block_size; + start_bytes += block_size as u64; + + block_list + .blocks + .push(BlobBlockType::Uncommitted(encoded_block_id.to_vec().into())); } + tracing::debug!( + "azure put blocks {} total MB: {} chunk size MB: {}", + block_list_count, + data_size_bytes / 1024 / 1024, + put_block_size / 1024 / 1024 + ); + // Wait for all blocks to be uploaded. + let upload_results = futures::future::try_join_all(upload_futures).await; + if upload_results.is_err() { + return Err(anyhow::anyhow!(format!( + "Failed to upload all blocks {:#?}", + upload_results.unwrap_err() + ))); + } + + // Commit the blocks. + let mut builder = blob_client.put_block_list(block_list); + if !metadata_map.0.is_empty() { + builder = builder.metadata(to_azure_metadata(metadata_map)); + } let fut = builder.into_future(); let fut = tokio::time::timeout(self.timeout, fut); + let result = fut.await; + tracing::debug!("azure put block list response {:#?}", result); - match fut.await { + match result { Ok(Ok(_response)) => Ok(()), Ok(Err(azure)) => Err(azure.into()), Err(_timeout) => Err(TimeoutOrCancel::Timeout.into()), } }; + /* END_HADRON */ let res = tokio::select! { res = op => res, @@ -622,7 +742,6 @@ impl RemoteStorage for AzureBlobStorage { crate::metrics::BUCKET_METRICS .req_seconds .observe_elapsed(kind, outcome, started_at); - res } diff --git a/libs/remote_storage/src/config.rs b/libs/remote_storage/src/config.rs index 5bc1f678ae..e13e17d544 100644 --- a/libs/remote_storage/src/config.rs +++ b/libs/remote_storage/src/config.rs @@ -195,8 +195,19 @@ pub struct AzureConfig { pub max_keys_per_list_response: Option, #[serde(default = "default_azure_conn_pool_size")] pub conn_pool_size: usize, + /* BEGIN_HADRON */ + #[serde(default = "default_azure_put_block_size_mb")] + pub put_block_size_mb: Option, + /* END_HADRON */ } +/* BEGIN_HADRON */ +fn default_azure_put_block_size_mb() -> Option { + // Disable parallel upload by default. + Some(0) +} +/* END_HADRON */ + fn default_remote_storage_azure_concurrency_limit() -> NonZeroUsize { NonZeroUsize::new(DEFAULT_REMOTE_STORAGE_AZURE_CONCURRENCY_LIMIT).unwrap() } @@ -213,6 +224,9 @@ impl Debug for AzureConfig { "max_keys_per_list_response", &self.max_keys_per_list_response, ) + /* BEGIN_HADRON */ + .field("put_block_size_mb", &self.put_block_size_mb) + /* END_HADRON */ .finish() } } @@ -352,6 +366,7 @@ timeout = '5s'"; upload_storage_class = 'INTELLIGENT_TIERING' timeout = '7s' conn_pool_size = 8 + put_block_size_mb = 1024 "; let config = parse(toml).unwrap(); @@ -367,6 +382,9 @@ timeout = '5s'"; concurrency_limit: default_remote_storage_azure_concurrency_limit(), max_keys_per_list_response: DEFAULT_MAX_KEYS_PER_LIST_RESPONSE, conn_pool_size: 8, + /* BEGIN_HADRON */ + put_block_size_mb: Some(1024), + /* END_HADRON */ }), timeout: Duration::from_secs(7), small_timeout: RemoteStorageConfig::DEFAULT_SMALL_TIMEOUT diff --git a/libs/remote_storage/src/lib.rs b/libs/remote_storage/src/lib.rs index ed416b2811..5885c3e791 100644 --- a/libs/remote_storage/src/lib.rs +++ b/libs/remote_storage/src/lib.rs @@ -732,9 +732,15 @@ impl GenericRemoteStorage { }) } - pub fn unreliable_wrapper(s: Self, fail_first: u64) -> Self { - Self::Unreliable(Arc::new(UnreliableWrapper::new(s, fail_first))) + /* BEGIN_HADRON */ + pub fn unreliable_wrapper(s: Self, fail_first: u64, fail_probability: u64) -> Self { + Self::Unreliable(Arc::new(UnreliableWrapper::new( + s, + fail_first, + fail_probability, + ))) } + /* END_HADRON */ /// See [`RemoteStorage::upload`], which this method calls with `None` as metadata. pub async fn upload_storage_object( diff --git a/libs/remote_storage/src/simulate_failures.rs b/libs/remote_storage/src/simulate_failures.rs index f9856a5856..e895380192 100644 --- a/libs/remote_storage/src/simulate_failures.rs +++ b/libs/remote_storage/src/simulate_failures.rs @@ -1,6 +1,8 @@ //! This module provides a wrapper around a real RemoteStorage implementation that //! causes the first N attempts at each upload or download operatio to fail. For //! testing purposes. +use rand::Rng; +use std::cmp; use std::collections::HashMap; use std::collections::hash_map::Entry; use std::num::NonZeroU32; @@ -25,6 +27,13 @@ pub struct UnreliableWrapper { // Tracks how many failed attempts of each operation has been made. attempts: Mutex>, + + /* BEGIN_HADRON */ + // This the probability of failure for each operation, ranged from [0, 100]. + // The probability is default to 100, which means that all operations will fail. + // Storage will fail by probability up to attempts_to_fail times. + attempt_failure_probability: u64, + /* END_HADRON */ } /// Used to identify retries of different unique operation. @@ -40,7 +49,11 @@ enum RemoteOp { } impl UnreliableWrapper { - pub fn new(inner: crate::GenericRemoteStorage, attempts_to_fail: u64) -> Self { + pub fn new( + inner: crate::GenericRemoteStorage, + attempts_to_fail: u64, + attempt_failure_probability: u64, + ) -> Self { assert!(attempts_to_fail > 0); let inner = match inner { GenericRemoteStorage::AwsS3(s) => GenericRemoteStorage::AwsS3(s), @@ -51,9 +64,11 @@ impl UnreliableWrapper { panic!("Can't wrap unreliable wrapper unreliably") } }; + let actual_attempt_failure_probability = cmp::min(attempt_failure_probability, 100); UnreliableWrapper { inner, attempts_to_fail, + attempt_failure_probability: actual_attempt_failure_probability, attempts: Mutex::new(HashMap::new()), } } @@ -66,6 +81,7 @@ impl UnreliableWrapper { /// fn attempt(&self, op: RemoteOp) -> anyhow::Result { let mut attempts = self.attempts.lock().unwrap(); + let mut rng = rand::thread_rng(); match attempts.entry(op) { Entry::Occupied(mut e) => { @@ -75,15 +91,19 @@ impl UnreliableWrapper { *p }; - if attempts_before_this >= self.attempts_to_fail { - // let it succeed - e.remove(); - Ok(attempts_before_this) - } else { + /* BEGIN_HADRON */ + // If there are more attempts to fail, fail the request by probability. + if (attempts_before_this < self.attempts_to_fail) + && (rng.gen_range(0..=100) < self.attempt_failure_probability) + { let error = anyhow::anyhow!("simulated failure of remote operation {:?}", e.key()); Err(error) + } else { + e.remove(); + Ok(attempts_before_this) } + /* END_HADRON */ } Entry::Vacant(e) => { let error = anyhow::anyhow!("simulated failure of remote operation {:?}", e.key()); diff --git a/libs/remote_storage/tests/common/mod.rs b/libs/remote_storage/tests/common/mod.rs index daab05d91a..fb7d6fd482 100644 --- a/libs/remote_storage/tests/common/mod.rs +++ b/libs/remote_storage/tests/common/mod.rs @@ -165,10 +165,42 @@ pub(crate) async fn upload_remote_data( let (data, data_len) = upload_stream(format!("remote blob data {i}").into_bytes().into()); + + /* BEGIN_HADRON */ + let mut metadata = None; + if matches!(&*task_client, GenericRemoteStorage::AzureBlob(_)) { + let file_path = "/tmp/dbx_upload_tmp_file.txt"; + { + // Open the file in append mode + let mut file = std::fs::OpenOptions::new() + .append(true) + .create(true) // Create the file if it doesn't exist + .open(file_path)?; + // Append some bytes to the file + std::io::Write::write_all( + &mut file, + &format!("remote blob data {i}").into_bytes(), + )?; + file.sync_all()?; + } + metadata = Some(remote_storage::StorageMetadata::from([( + "databricks_azure_put_block", + file_path, + )])); + } + /* END_HADRON */ + task_client - .upload(data, data_len, &blob_path, None, &cancel) + .upload(data, data_len, &blob_path, metadata, &cancel) .await?; + // TODO: Check upload is using the put_block upload. + // We cannot consume data here since data is moved inside the upload. + // let total_bytes = data.fold(0, |acc, chunk| async move { + // acc + chunk.map(|bytes| bytes.len()).unwrap_or(0) + // }).await; + // assert_eq!(total_bytes, data_len); + Ok::<_, anyhow::Error>((blob_prefix, blob_path)) }); } diff --git a/libs/remote_storage/tests/test_real_azure.rs b/libs/remote_storage/tests/test_real_azure.rs index 31c9ca3200..4d7caabd39 100644 --- a/libs/remote_storage/tests/test_real_azure.rs +++ b/libs/remote_storage/tests/test_real_azure.rs @@ -219,6 +219,9 @@ async fn create_azure_client( concurrency_limit: NonZeroUsize::new(100).unwrap(), max_keys_per_list_response, conn_pool_size: 8, + /* BEGIN_HADRON */ + put_block_size_mb: Some(1), + /* END_HADRON */ }), timeout: RemoteStorageConfig::DEFAULT_TIMEOUT, small_timeout: RemoteStorageConfig::DEFAULT_SMALL_TIMEOUT, diff --git a/libs/safekeeper_api/Cargo.toml b/libs/safekeeper_api/Cargo.toml index 928e583b0b..1d09d6fc6d 100644 --- a/libs/safekeeper_api/Cargo.toml +++ b/libs/safekeeper_api/Cargo.toml @@ -9,7 +9,7 @@ anyhow.workspace = true const_format.workspace = true serde.workspace = true serde_json.workspace = true -postgres_ffi.workspace = true +postgres_ffi_types.workspace = true postgres_versioninfo.workspace = true pq_proto.workspace = true tokio.workspace = true diff --git a/libs/safekeeper_api/src/models.rs b/libs/safekeeper_api/src/models.rs index 1774489c1c..a300c8464f 100644 --- a/libs/safekeeper_api/src/models.rs +++ b/libs/safekeeper_api/src/models.rs @@ -3,7 +3,7 @@ use std::net::SocketAddr; use pageserver_api::shard::ShardIdentity; -use postgres_ffi::TimestampTz; +use postgres_ffi_types::TimestampTz; use postgres_versioninfo::PgVersionId; use serde::{Deserialize, Serialize}; use tokio::time::Instant; @@ -11,7 +11,7 @@ use utils::id::{NodeId, TenantId, TenantTimelineId, TimelineId}; use utils::lsn::Lsn; use utils::pageserver_feedback::PageserverFeedback; -use crate::membership::Configuration; +use crate::membership::{Configuration, SafekeeperGeneration}; use crate::{ServerInfo, Term}; #[derive(Debug, Serialize, Deserialize)] @@ -221,7 +221,7 @@ pub struct TimelineMembershipSwitchRequest { pub struct TimelineMembershipSwitchResponse { pub previous_conf: Configuration, pub current_conf: Configuration, - pub term: Term, + pub last_log_term: Term, pub flush_lsn: Lsn, } @@ -311,3 +311,12 @@ pub struct PullTimelineResponse { pub safekeeper_host: Option, // TODO: add more fields? } + +/// Response to a timeline locate request. +/// Storcon-only API. +#[derive(Serialize, Deserialize, Clone, Debug)] +pub struct TimelineLocateResponse { + pub generation: SafekeeperGeneration, + pub sk_set: Vec, + pub new_sk_set: Option>, +} diff --git a/libs/utils/Cargo.toml b/libs/utils/Cargo.toml index 7b1dc56071..4b326949d7 100644 --- a/libs/utils/Cargo.toml +++ b/libs/utils/Cargo.toml @@ -47,6 +47,7 @@ tracing-subscriber = { workspace = true, features = ["json", "registry"] } tracing-utils.workspace = true rand.workspace = true scopeguard.workspace = true +uuid.workspace = true strum.workspace = true strum_macros.workspace = true walkdir.workspace = true diff --git a/libs/utils/src/auth.rs b/libs/utils/src/auth.rs index de3a964d23..b2aade15de 100644 --- a/libs/utils/src/auth.rs +++ b/libs/utils/src/auth.rs @@ -12,7 +12,8 @@ use jsonwebtoken::{ Algorithm, DecodingKey, EncodingKey, Header, TokenData, Validation, decode, encode, }; use pem::Pem; -use serde::{Deserialize, Serialize, de::DeserializeOwned}; +use serde::{Deserialize, Deserializer, Serialize, de::DeserializeOwned}; +use uuid::Uuid; use crate::id::TenantId; @@ -25,6 +26,11 @@ pub enum Scope { /// Provides access to all data for a specific tenant (specified in `struct Claims` below) // TODO: join these two? Tenant, + /// Provides access to all data for a specific tenant, but based on endpoint ID. This token scope + /// is only used by compute to fetch the spec for a specific endpoint. The spec contains a Tenant-scoped + /// token authorizing access to all data of a tenant, so the spec-fetch API requires a TenantEndpoint + /// scope token to ensure that untrusted compute nodes can't fetch spec for arbitrary endpoints. + TenantEndpoint, /// Provides blanket access to all tenants on the pageserver plus pageserver-wide APIs. /// Should only be used e.g. for status check/tenant creation/list. PageServerApi, @@ -51,17 +57,43 @@ pub enum Scope { ControllerPeer, } +fn deserialize_empty_string_as_none_uuid<'de, D>(deserializer: D) -> Result, D::Error> +where + D: Deserializer<'de>, +{ + let opt = Option::::deserialize(deserializer)?; + match opt.as_deref() { + Some("") => Ok(None), + Some(s) => Uuid::parse_str(s) + .map(Some) + .map_err(serde::de::Error::custom), + None => Ok(None), + } +} + /// JWT payload. See docs/authentication.md for the format #[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] pub struct Claims { #[serde(default)] pub tenant_id: Option, + #[serde( + default, + skip_serializing_if = "Option::is_none", + // Neon control plane includes this field as empty in the claims. + // Consider it None in those cases. + deserialize_with = "deserialize_empty_string_as_none_uuid" + )] + pub endpoint_id: Option, pub scope: Scope, } impl Claims { pub fn new(tenant_id: Option, scope: Scope) -> Self { - Self { tenant_id, scope } + Self { + tenant_id, + scope, + endpoint_id: None, + } } } @@ -212,6 +244,7 @@ MC4CAQAwBQYDK2VwBCIEID/Drmc1AA6U/znNRWpF3zEGegOATQxfkdWxitcOMsIH let expected_claims = Claims { tenant_id: Some(TenantId::from_str("3d1f7595b468230304e0b73cecbcb081").unwrap()), scope: Scope::Tenant, + endpoint_id: None, }; // A test token containing the following payload, signed using TEST_PRIV_KEY_ED25519: @@ -240,6 +273,7 @@ MC4CAQAwBQYDK2VwBCIEID/Drmc1AA6U/znNRWpF3zEGegOATQxfkdWxitcOMsIH let claims = Claims { tenant_id: Some(TenantId::from_str("3d1f7595b468230304e0b73cecbcb081").unwrap()), scope: Scope::Tenant, + endpoint_id: None, }; let pem = pem::parse(TEST_PRIV_KEY_ED25519).unwrap(); diff --git a/libs/utils/src/env.rs b/libs/utils/src/env.rs index 2a85f54a01..0b3b5e6c4f 100644 --- a/libs/utils/src/env.rs +++ b/libs/utils/src/env.rs @@ -44,3 +44,63 @@ where } } } + +/* BEGIN_HADRON */ +pub enum DeploymentMode { + Local, + Dev, + Staging, + Prod, +} + +pub fn get_deployment_mode() -> Option { + match std::env::var("DEPLOYMENT_MODE") { + Ok(env) => match env.as_str() { + "development" => Some(DeploymentMode::Dev), + "staging" => Some(DeploymentMode::Staging), + "production" => Some(DeploymentMode::Prod), + _ => { + tracing::error!("Unexpected DEPLOYMENT_MODE: {}", env); + None + } + }, + Err(_) => { + // tracing::error!("DEPLOYMENT_MODE not set"); + None + } + } +} + +pub fn is_dev_or_staging() -> bool { + matches!( + get_deployment_mode(), + Some(DeploymentMode::Dev) | Some(DeploymentMode::Staging) + ) +} + +pub enum TestingMode { + Chaos, + Stress, +} + +pub fn get_test_mode() -> Option { + match std::env::var("HADRON_TEST_MODE") { + Ok(env) => match env.as_str() { + "chaos" => Some(TestingMode::Chaos), + "stress" => Some(TestingMode::Stress), + _ => { + tracing::error!("Unexpected HADRON_TEST_MODE: {}", env); + None + } + }, + Err(_) => { + tracing::error!("HADRON_TEST_MODE not set"); + None + } + } +} + +pub fn is_chaos_testing() -> bool { + matches!(get_test_mode(), Some(TestingMode::Chaos)) +} +/* END_HADRON */ diff --git a/libs/utils/src/ip_address.rs b/libs/utils/src/ip_address.rs new file mode 100644 index 0000000000..d0834d0ba5 --- /dev/null +++ b/libs/utils/src/ip_address.rs @@ -0,0 +1,73 @@ +use std::env::{VarError, var}; +use std::error::Error; +use std::net::IpAddr; +use std::str::FromStr; + +/// Name of the environment variable containing the reachable IP address of the node. If set, the IP address contained in this +/// environment variable is used as the reachable IP address of the pageserver or safekeeper node during node registration. +/// In a Kubernetes environment, this environment variable should be set by Kubernetes to the Pod IP (specified in the Pod +/// template). +pub const HADRON_NODE_IP_ADDRESS: &str = "HADRON_NODE_IP_ADDRESS"; + +/// Read the reachable IP address of this page server from env var HADRON_NODE_IP_ADDRESS. +/// In Kubernetes this environment variable is set to the Pod IP (specified in the Pod template). +pub fn read_node_ip_addr_from_env() -> Result, Box> { + match var(HADRON_NODE_IP_ADDRESS) { + Ok(v) => { + if let Ok(addr) = IpAddr::from_str(&v) { + Ok(Some(addr)) + } else { + Err(format!("Invalid IP address string: {v}. Cannot be parsed as either an IPv4 or an IPv6 address.").into()) + } + } + Err(VarError::NotPresent) => Ok(None), + Err(e) => Err(e.into()), + } +} + +#[cfg(test)] +mod tests { + use super::*; + use std::env; + use std::net::{Ipv4Addr, Ipv6Addr}; + + #[test] + fn test_read_node_ip_addr_from_env() { + // SAFETY: test code + unsafe { + // Test with a valid IPv4 address + env::set_var(HADRON_NODE_IP_ADDRESS, "192.168.1.1"); + let result = read_node_ip_addr_from_env().unwrap(); + assert_eq!(result, Some(IpAddr::V4(Ipv4Addr::new(192, 168, 1, 1)))); + + // Test with a valid IPv6 address + env::set_var( + HADRON_NODE_IP_ADDRESS, + "2001:0db8:85a3:0000:0000:8a2e:0370:7334", + ); + } + let result = read_node_ip_addr_from_env().unwrap(); + assert_eq!( + result, + Some(IpAddr::V6( + Ipv6Addr::from_str("2001:0db8:85a3:0000:0000:8a2e:0370:7334").unwrap() + )) + ); + + // Test with an invalid IP address + // SAFETY: test code + unsafe { + env::set_var(HADRON_NODE_IP_ADDRESS, "invalid_ip"); + } + let result = read_node_ip_addr_from_env(); + assert!(result.is_err()); + + // Test with no environment variable set + // SAFETY: test code + unsafe { + env::remove_var(HADRON_NODE_IP_ADDRESS); + } + let result = read_node_ip_addr_from_env().unwrap(); + assert_eq!(result, None); + } +} diff --git a/libs/utils/src/lib.rs b/libs/utils/src/lib.rs index 11f787562c..69771be5dc 100644 --- a/libs/utils/src/lib.rs +++ b/libs/utils/src/lib.rs @@ -26,6 +26,9 @@ pub mod auth; // utility functions and helper traits for unified unique id generation/serialization etc. pub mod id; +// utility functions to obtain reachable IP addresses in PS/SK nodes. +pub mod ip_address; + pub mod shard; mod hex; @@ -99,6 +102,8 @@ pub mod elapsed_accum; #[cfg(target_os = "linux")] pub mod linux_socket_ioctl; +pub mod metrics_collector; + // Re-export used in macro. Avoids adding git-version as dep in target crates. #[doc(hidden)] pub use git_version; diff --git a/libs/utils/src/logging.rs b/libs/utils/src/logging.rs index 5828a400a0..d67c0f123b 100644 --- a/libs/utils/src/logging.rs +++ b/libs/utils/src/logging.rs @@ -1,4 +1,5 @@ use std::future::Future; +use std::pin::Pin; use std::str::FromStr; use std::time::Duration; @@ -7,7 +8,7 @@ use metrics::{IntCounter, IntCounterVec}; use once_cell::sync::Lazy; use strum_macros::{EnumString, VariantNames}; use tokio::time::Instant; -use tracing::info; +use tracing::{info, warn}; /// Logs a critical error, similarly to `tracing::error!`. This will: /// @@ -377,10 +378,11 @@ impl std::fmt::Debug for SecretString { /// /// TODO: consider upgrading this to a warning, but currently it fires too often. #[inline] -pub async fn log_slow(name: &str, threshold: Duration, f: std::pin::Pin<&mut F>) -> O -where - F: Future, -{ +pub async fn log_slow( + name: &str, + threshold: Duration, + f: Pin<&mut impl Future>, +) -> O { monitor_slow_future( threshold, threshold, // period = threshold @@ -394,16 +396,42 @@ where if !is_slow { return; } + let elapsed = elapsed_total.as_secs_f64(); if ready { - info!( - "slow {name} completed after {:.3}s", - elapsed_total.as_secs_f64() - ); + info!("slow {name} completed after {elapsed:.3}s"); } else { - info!( - "slow {name} still running after {:.3}s", - elapsed_total.as_secs_f64() - ); + info!("slow {name} still running after {elapsed:.3}s"); + } + }, + ) + .await +} + +/// Logs a periodic warning if a future is slow to complete. +#[inline] +pub async fn warn_slow( + name: &str, + threshold: Duration, + f: Pin<&mut impl Future>, +) -> O { + monitor_slow_future( + threshold, + threshold, // period = threshold + f, + |MonitorSlowFutureCallback { + ready, + is_slow, + elapsed_total, + elapsed_since_last_callback: _, + }| { + if !is_slow { + return; + } + let elapsed = elapsed_total.as_secs_f64(); + if ready { + warn!("slow {name} completed after {elapsed:.3}s"); + } else { + warn!("slow {name} still running after {elapsed:.3}s"); } }, ) @@ -416,7 +444,7 @@ where pub async fn monitor_slow_future( threshold: Duration, period: Duration, - mut fut: std::pin::Pin<&mut F>, + mut fut: Pin<&mut F>, mut cb: impl FnMut(MonitorSlowFutureCallback), ) -> O where diff --git a/libs/utils/src/metrics_collector.rs b/libs/utils/src/metrics_collector.rs new file mode 100644 index 0000000000..9e57fcd643 --- /dev/null +++ b/libs/utils/src/metrics_collector.rs @@ -0,0 +1,75 @@ +use std::{ + sync::{Arc, RwLock}, + time::{Duration, Instant}, +}; + +use metrics::{IntGauge, proto::MetricFamily, register_int_gauge}; +use once_cell::sync::Lazy; + +pub static METRICS_STALE_MILLIS: Lazy = Lazy::new(|| { + register_int_gauge!( + "metrics_metrics_stale_milliseconds", + "The current metrics stale time in milliseconds" + ) + .expect("failed to define a metric") +}); + +#[derive(Debug)] +pub struct CollectedMetrics { + pub metrics: Vec, + pub collected_at: Instant, +} + +impl CollectedMetrics { + fn new(metrics: Vec) -> Self { + Self { + metrics, + collected_at: Instant::now(), + } + } +} + +#[derive(Debug)] +pub struct MetricsCollector { + last_collected: RwLock>, +} + +impl MetricsCollector { + pub fn new() -> Self { + Self { + last_collected: RwLock::new(Arc::new(CollectedMetrics::new(vec![]))), + } + } + + #[tracing::instrument(name = "metrics_collector", skip_all)] + pub fn run_once(&self, cache_metrics: bool) -> Arc { + let started = Instant::now(); + let metrics = metrics::gather(); + let collected = Arc::new(CollectedMetrics::new(metrics)); + if cache_metrics { + let mut guard = self.last_collected.write().unwrap(); + *guard = collected.clone(); + } + tracing::info!( + "Collected {} metric families in {} ms", + collected.metrics.len(), + started.elapsed().as_millis() + ); + collected + } + + pub fn last_collected(&self) -> Arc { + self.last_collected.read().unwrap().clone() + } +} + +impl Default for MetricsCollector { + fn default() -> Self { + Self::new() + } +} + +// Interval for metrics collection. Currently hard-coded to be the same as the metrics scape interval from the obs agent +pub static METRICS_COLLECTION_INTERVAL: Duration = Duration::from_secs(30); + +pub static METRICS_COLLECTOR: Lazy = Lazy::new(MetricsCollector::default); diff --git a/libs/utils/src/shard.rs b/libs/utils/src/shard.rs index 633a57c97f..3345549819 100644 --- a/libs/utils/src/shard.rs +++ b/libs/utils/src/shard.rs @@ -49,16 +49,14 @@ pub struct TenantShardId { pub shard_count: ShardCount, } -impl std::fmt::Display for ShardCount { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - self.0.fmt(f) - } -} - impl ShardCount { pub const MAX: Self = Self(u8::MAX); pub const MIN: Self = Self(0); + pub fn unsharded() -> Self { + ShardCount(0) + } + /// The internal value of a ShardCount may be zero, which means "1 shard, but use /// legacy format for TenantShardId that excludes the shard suffix", also known /// as [`TenantShardId::unsharded`]. @@ -177,6 +175,12 @@ impl std::fmt::Display for ShardNumber { } } +impl std::fmt::Display for ShardCount { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + self.0.fmt(f) + } +} + impl std::fmt::Display for ShardSlug<'_> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!( diff --git a/libs/wal_decoder/src/models/record.rs b/libs/wal_decoder/src/models/record.rs index 51659ed904..a37e1473e0 100644 --- a/libs/wal_decoder/src/models/record.rs +++ b/libs/wal_decoder/src/models/record.rs @@ -2,7 +2,8 @@ use bytes::Bytes; use postgres_ffi::walrecord::{MultiXactMember, describe_postgres_wal_record}; -use postgres_ffi::{MultiXactId, MultiXactOffset, TimestampTz, TransactionId}; +use postgres_ffi::{MultiXactId, MultiXactOffset, TransactionId}; +use postgres_ffi_types::TimestampTz; use serde::{Deserialize, Serialize}; use utils::bin_ser::DeserializeError; diff --git a/libs/walproposer/src/api_bindings.rs b/libs/walproposer/src/api_bindings.rs index 7c6abf252e..825a137d0f 100644 --- a/libs/walproposer/src/api_bindings.rs +++ b/libs/walproposer/src/api_bindings.rs @@ -428,6 +428,12 @@ pub fn empty_shmem() -> crate::bindings::WalproposerShmemState { shard_number: 0, }; + let empty_wal_rate_limiter = crate::bindings::WalRateLimiter { + should_limit: crate::bindings::pg_atomic_uint32 { value: 0 }, + sent_bytes: 0, + last_recorded_time_us: crate::bindings::pg_atomic_uint64 { value: 0 }, + }; + crate::bindings::WalproposerShmemState { propEpochStartLsn: crate::bindings::pg_atomic_uint64 { value: 0 }, donor_name: [0; 64], @@ -441,6 +447,7 @@ pub fn empty_shmem() -> crate::bindings::WalproposerShmemState { num_shards: 0, replica_promote: false, min_ps_feedback: empty_feedback, + wal_rate_limiter: empty_wal_rate_limiter, } } diff --git a/pageserver/Cargo.toml b/pageserver/Cargo.toml index db932d126e..ecc10a7d5f 100644 --- a/pageserver/Cargo.toml +++ b/pageserver/Cargo.toml @@ -114,6 +114,7 @@ twox-hash.workspace = true procfs.workspace = true [dev-dependencies] +base64.workspace = true criterion.workspace = true hex-literal.workspace = true tokio = { workspace = true, features = ["process", "sync", "fs", "rt", "io-util", "time", "test-util"] } diff --git a/pageserver/client/src/mgmt_api.rs b/pageserver/client/src/mgmt_api.rs index af4be23b9b..3867e536f4 100644 --- a/pageserver/client/src/mgmt_api.rs +++ b/pageserver/client/src/mgmt_api.rs @@ -1,4 +1,4 @@ -use std::collections::HashMap; +use std::collections::{BTreeMap, HashMap}; use std::error::Error as _; use std::time::Duration; @@ -251,6 +251,70 @@ impl Client { Ok(()) } + pub async fn tenant_timeline_compact( + &self, + tenant_shard_id: TenantShardId, + timeline_id: TimelineId, + force_image_layer_creation: bool, + must_force_image_layer_creation: bool, + scheduled: bool, + wait_until_done: bool, + ) -> Result<()> { + let mut path = reqwest::Url::parse(&format!( + "{}/v1/tenant/{tenant_shard_id}/timeline/{timeline_id}/compact", + self.mgmt_api_endpoint + )) + .expect("Cannot build URL"); + + if force_image_layer_creation { + path.query_pairs_mut() + .append_pair("force_image_layer_creation", "true"); + } + + if must_force_image_layer_creation { + path.query_pairs_mut() + .append_pair("must_force_image_layer_creation", "true"); + } + + if scheduled { + path.query_pairs_mut().append_pair("scheduled", "true"); + } + if wait_until_done { + path.query_pairs_mut() + .append_pair("wait_until_scheduled_compaction_done", "true"); + path.query_pairs_mut() + .append_pair("wait_until_uploaded", "true"); + } + self.request(Method::PUT, path, ()).await?; + Ok(()) + } + + /* BEGIN_HADRON */ + pub async fn tenant_timeline_describe( + &self, + tenant_shard_id: &TenantShardId, + timeline_id: &TimelineId, + ) -> Result { + let mut path = reqwest::Url::parse(&format!( + "{}/v1/tenant/{tenant_shard_id}/timeline/{timeline_id}", + self.mgmt_api_endpoint + )) + .expect("Cannot build URL"); + path.query_pairs_mut() + .append_pair("include-image-consistent-lsn", "true"); + + let response: reqwest::Response = self.request(Method::GET, path, ()).await?; + let body = response.json().await.map_err(Error::ReceiveBody)?; + Ok(body) + } + + pub async fn list_tenant_visible_size(&self) -> Result> { + let uri = format!("{}/v1/list_tenant_visible_size", self.mgmt_api_endpoint); + let resp = self.get(&uri).await?; + resp.json().await.map_err(Error::ReceiveBody) + } + /* END_HADRON */ + pub async fn tenant_scan_remote_storage( &self, tenant_id: TenantId, @@ -809,6 +873,22 @@ impl Client { .map_err(Error::ReceiveBody) } + pub async fn reset_alert_gauges(&self) -> Result<()> { + let uri = format!( + "{}/hadron-internal/reset_alert_gauges", + self.mgmt_api_endpoint + ); + self.start_request(Method::POST, uri) + .send() + .await + .map_err(Error::SendRequest)? + .error_from_body() + .await? + .json() + .await + .map_err(Error::ReceiveBody) + } + pub async fn wait_lsn( &self, tenant_shard_id: TenantShardId, diff --git a/pageserver/client_grpc/Cargo.toml b/pageserver/client_grpc/Cargo.toml index 681b3d3bc3..e2741ad839 100644 --- a/pageserver/client_grpc/Cargo.toml +++ b/pageserver/client_grpc/Cargo.toml @@ -1,7 +1,8 @@ [package] name = "pageserver_client_grpc" version = "0.1.0" -edition = "2024" +edition.workspace = true +license.workspace = true [features] testing = ["pageserver_api/testing"] @@ -10,35 +11,14 @@ testing = ["pageserver_api/testing"] anyhow.workspace = true arc-swap.workspace = true bytes.workspace = true +compute_api.workspace = true futures.workspace = true -http.workspace = true -thiserror.workspace = true +pageserver_api.workspace = true +pageserver_page_api.workspace = true +tokio.workspace = true +tokio-stream.workspace = true +tokio-util.workspace = true tonic.workspace = true tracing.workspace = true -tokio = { version = "1.43.1", features = [ - "full", - "macros", - "net", - "io-util", - "rt", - "rt-multi-thread", -] } -uuid = { version = "1", features = ["v4"] } -tower = { version = "0.4", features = ["timeout", "util"] } -rand = "0.8" -tokio-util = { version = "0.7", features = ["compat"] } -hyper-util = "0.1.9" -hyper = "1.6.0" -metrics.workspace = true -priority-queue = "2.3.1" -scopeguard.workspace = true -async-trait = { version = "0.1" } -tokio-stream = "0.1" -dashmap = "5" -chrono = { version = "0.4", features = ["serde"] } -compute_api.workspace = true - - -pageserver_page_api.workspace = true -pageserver_api.workspace = true utils.workspace = true +workspace_hack.workspace = true diff --git a/pageserver/client_grpc/src/client.rs b/pageserver/client_grpc/src/client.rs index 4f3f606935..3a9edc7092 100644 --- a/pageserver/client_grpc/src/client.rs +++ b/pageserver/client_grpc/src/client.rs @@ -1,12 +1,16 @@ use std::collections::HashMap; use std::num::NonZero; +use std::pin::pin; use std::sync::Arc; +use std::time::{Duration, Instant}; use anyhow::anyhow; use arc_swap::ArcSwap; use futures::stream::FuturesUnordered; use futures::{FutureExt as _, StreamExt as _}; -use tracing::instrument; +use tonic::codec::CompressionEncoding; +use tracing::{debug, instrument}; +use utils::logging::warn_slow; use crate::pool::{ChannelPool, ClientGuard, ClientPool, StreamGuard, StreamPool}; use crate::retry::Retry; @@ -20,28 +24,40 @@ use utils::shard::{ShardCount, ShardIndex, ShardNumber}; /// Max number of concurrent clients per channel (i.e. TCP connection). New channels will be spun up /// when full. /// +/// Normal requests are small, and we don't pipeline them, so we can afford a large number of +/// streams per connection. +/// /// TODO: tune all of these constants, and consider making them configurable. -/// TODO: consider separate limits for unary and streaming clients, so we don't fill up channels -/// with only streams. -const MAX_CLIENTS_PER_CHANNEL: NonZero = NonZero::new(16).unwrap(); +const MAX_CLIENTS_PER_CHANNEL: NonZero = NonZero::new(64).unwrap(); -/// Max number of concurrent unary request clients per shard. -const MAX_UNARY_CLIENTS: NonZero = NonZero::new(64).unwrap(); +/// Max number of concurrent bulk GetPage streams per channel (i.e. TCP connection). These use a +/// dedicated channel pool with a lower client limit, to avoid TCP-level head-of-line blocking and +/// transmission delays. This also concentrates large window sizes on a smaller set of +/// streams/connections, presumably reducing memory use. +const MAX_BULK_CLIENTS_PER_CHANNEL: NonZero = NonZero::new(16).unwrap(); -/// Max number of concurrent GetPage streams per shard. The max number of concurrent GetPage -/// requests is given by `MAX_STREAMS * MAX_STREAM_QUEUE_DEPTH`. -const MAX_STREAMS: NonZero = NonZero::new(64).unwrap(); +/// The batch size threshold at which a GetPage request will use the bulk stream pool. +/// +/// The gRPC initial window size is 64 KB. Each page is 8 KB, so let's avoid increasing the window +/// size for the normal stream pool, and route requests for >= 5 pages (>32 KB) to the bulk pool. +const BULK_THRESHOLD_BATCH_SIZE: usize = 5; -/// Max number of pipelined requests per stream. -const MAX_STREAM_QUEUE_DEPTH: NonZero = NonZero::new(2).unwrap(); +/// The overall request call timeout, including retries and pool acquisition. +/// TODO: should we retry forever? Should the caller decide? +const CALL_TIMEOUT: Duration = Duration::from_secs(60); -/// Max number of concurrent bulk GetPage streams per shard, used e.g. for prefetches. Because these -/// are more throughput-oriented, we have a smaller limit but higher queue depth. -const MAX_BULK_STREAMS: NonZero = NonZero::new(16).unwrap(); +/// The per-request (retry attempt) timeout, including any lazy connection establishment. +const REQUEST_TIMEOUT: Duration = Duration::from_secs(10); -/// Max number of pipelined requests per bulk stream. These are more throughput-oriented and thus -/// get a larger queue depth. -const MAX_BULK_STREAM_QUEUE_DEPTH: NonZero = NonZero::new(4).unwrap(); +/// The initial request retry backoff duration. The first retry does not back off. +/// TODO: use a different backoff for ResourceExhausted (rate limiting)? Needs server support. +const BASE_BACKOFF: Duration = Duration::from_millis(5); + +/// The maximum request retry backoff duration. +const MAX_BACKOFF: Duration = Duration::from_secs(5); + +/// Threshold and interval for warning about slow operation. +const SLOW_THRESHOLD: Duration = Duration::from_secs(3); /// A rich Pageserver gRPC client for a single tenant timeline. This client is more capable than the /// basic `page_api::Client` gRPC client, and supports: @@ -49,10 +65,19 @@ const MAX_BULK_STREAM_QUEUE_DEPTH: NonZero = NonZero::new(4).unwrap(); /// * Sharded tenants across multiple Pageservers. /// * Pooling of connections, clients, and streams for efficient resource use. /// * Concurrent use by many callers. -/// * Internal handling of GetPage bidirectional streams, with pipelining and error handling. +/// * Internal handling of GetPage bidirectional streams. /// * Automatic retries. /// * Observability. /// +/// The client has dedicated connection/client/stream pools per shard, for resource reuse. These +/// pools are unbounded: we allow scaling out as many concurrent streams as needed to serve all +/// concurrent callers, which mostly eliminates head-of-line blocking. Idle streams are fairly +/// cheap: the server task currently uses 26 KB of memory, so we can comfortably fit 100,000 +/// concurrent idle streams (2.5 GB memory). The worst case degenerates to the old libpq case with +/// one stream per backend, but without the TCP connection overhead. In the common case we expect +/// significantly lower stream counts due to stream sharing, driven e.g. by idle backends, LFC hits, +/// read coalescing, sharding (backends typically only talk to one shard at a time), etc. +/// /// TODO: this client does not support base backups or LSN leases, as these are only used by /// compute_ctl. Consider adding this, but LSN leases need concurrent requests on all shards. pub struct PageserverClient { @@ -62,10 +87,10 @@ pub struct PageserverClient { timeline_id: TimelineId, /// The JWT auth token for this tenant, if any. auth_token: Option, + /// The compression to use, if any. + compression: Option, /// The shards for this tenant. shards: ArcSwap, - /// The retry configuration. - retry: Retry, } impl PageserverClient { @@ -76,14 +101,21 @@ impl PageserverClient { timeline_id: TimelineId, shard_spec: ShardSpec, auth_token: Option, + compression: Option, ) -> anyhow::Result { - let shards = Shards::new(tenant_id, timeline_id, shard_spec, auth_token.clone())?; + let shards = Shards::new( + tenant_id, + timeline_id, + shard_spec, + auth_token.clone(), + compression, + )?; Ok(Self { tenant_id, timeline_id, auth_token, + compression, shards: ArcSwap::new(Arc::new(shards)), - retry: Retry, }) } @@ -93,11 +125,33 @@ impl PageserverClient { /// TODO: verify that in-flight requests are allowed to complete, and that the old pools are /// properly spun down and dropped afterwards. pub fn update_shards(&self, shard_spec: ShardSpec) -> anyhow::Result<()> { + // Validate the shard spec. We should really use `ArcSwap::rcu` for this, to avoid races + // with concurrent updates, but that involves creating a new `Shards` on every attempt, + // which spins up a bunch of Tokio tasks and such. These should already be checked elsewhere + // in the stack, and if they're violated then we already have problems elsewhere, so a + // best-effort but possibly-racy check is okay here. + let old = self.shards.load_full(); + if shard_spec.count < old.count { + return Err(anyhow!( + "can't reduce shard count from {} to {}", + old.count, + shard_spec.count + )); + } + if !old.count.is_unsharded() && shard_spec.stripe_size != old.stripe_size { + return Err(anyhow!( + "can't change stripe size from {} to {}", + old.stripe_size, + shard_spec.stripe_size + )); + } + let shards = Shards::new( self.tenant_id, self.timeline_id, shard_spec, self.auth_token.clone(), + self.compression, )?; self.shards.store(Arc::new(shards)); Ok(()) @@ -109,13 +163,15 @@ impl PageserverClient { &self, req: page_api::CheckRelExistsRequest, ) -> tonic::Result { - self.retry - .with(async || { - // Relation metadata is only available on shard 0. - let mut client = self.shards.load_full().get_zero().client().await?; - client.check_rel_exists(req).await - }) - .await + debug!("sending request: {req:?}"); + let resp = Self::with_retries(CALL_TIMEOUT, async |_| { + // Relation metadata is only available on shard 0. + let mut client = self.shards.load_full().get_zero().client().await?; + Self::with_timeout(REQUEST_TIMEOUT, client.check_rel_exists(req)).await + }) + .await?; + debug!("received response: {resp:?}"); + Ok(resp) } /// Returns the total size of a database, as # of bytes. @@ -124,17 +180,20 @@ impl PageserverClient { &self, req: page_api::GetDbSizeRequest, ) -> tonic::Result { - self.retry - .with(async || { - // Relation metadata is only available on shard 0. - let mut client = self.shards.load_full().get_zero().client().await?; - client.get_db_size(req).await - }) - .await + debug!("sending request: {req:?}"); + let resp = Self::with_retries(CALL_TIMEOUT, async |_| { + // Relation metadata is only available on shard 0. + let mut client = self.shards.load_full().get_zero().client().await?; + Self::with_timeout(REQUEST_TIMEOUT, client.get_db_size(req)).await + }) + .await?; + debug!("received response: {resp:?}"); + Ok(resp) } - /// Fetches pages. The `request_id` must be unique across all in-flight requests. Automatically - /// splits requests that straddle shard boundaries, and assembles the responses. + /// Fetches pages. The `request_id` must be unique across all in-flight requests, and the + /// `attempt` must be 0 (incremented on retry). Automatically splits requests that straddle + /// shard boundaries, and assembles the responses. /// /// Unlike `page_api::Client`, this automatically converts `status_code` into `tonic::Status` /// errors. All responses will have `GetPageStatusCode::Ok`. @@ -154,6 +213,12 @@ impl PageserverClient { if req.block_numbers.is_empty() { return Err(tonic::Status::invalid_argument("no block number")); } + // The request attempt must be 0. The client will increment it internally. + if req.request_id.attempt != 0 { + return Err(tonic::Status::invalid_argument("request attempt must be 0")); + } + + debug!("sending request: {req:?}"); // The shards may change while we're fetching pages. We execute the request using a stable // view of the shards (especially important for requests that span shards), but retry the @@ -163,9 +228,16 @@ impl PageserverClient { // // TODO: the gRPC server and client doesn't yet properly support shard splits. Revisit this // once we figure out how to handle these. - self.retry - .with(async || Self::get_page_with_shards(req.clone(), &self.shards.load_full()).await) - .await + let resp = Self::with_retries(CALL_TIMEOUT, async |attempt| { + let mut req = req.clone(); + req.request_id.attempt = attempt as u32; + let shards = self.shards.load_full(); + Self::with_timeout(REQUEST_TIMEOUT, Self::get_page_with_shards(req, &shards)).await + }) + .await?; + + debug!("received response: {resp:?}"); + Ok(resp) } /// Fetches pages using the given shards. This uses a stable view of the shards, regardless of @@ -176,7 +248,7 @@ impl PageserverClient { ) -> tonic::Result { // Fast path: request is for a single shard. if let Some(shard_id) = - GetPageSplitter::is_single_shard(&req, shards.count, shards.stripe_size) + GetPageSplitter::for_single_shard(&req, shards.count, shards.stripe_size) { return Self::get_page_with_shard(req, shards.get(shard_id)?).await; } @@ -193,10 +265,10 @@ impl PageserverClient { } while let Some((shard_id, shard_response)) = shard_requests.next().await.transpose()? { - splitter.add_response(shard_id, shard_response); + splitter.add_response(shard_id, shard_response)?; } - splitter.assemble_response() + splitter.get_response() } /// Fetches pages on the given shard. Does not retry internally. @@ -204,9 +276,8 @@ impl PageserverClient { req: page_api::GetPageRequest, shard: &Shard, ) -> tonic::Result { - let expected = req.block_numbers.len(); - let stream = shard.stream(req.request_class.is_bulk()).await; - let resp = stream.send(req).await?; + let mut stream = shard.stream(Self::is_bulk(&req)).await?; + let resp = stream.send(req.clone()).await?; // Convert per-request errors into a tonic::Status. if resp.status_code != page_api::GetPageStatusCode::Ok { @@ -216,11 +287,27 @@ impl PageserverClient { )); } - // Check that we received the expected number of pages. - let actual = resp.page_images.len(); - if expected != actual { - return Err(tonic::Status::data_loss(format!( - "expected {expected} pages, got {actual}", + // Check that we received the expected pages. + if req.rel != resp.rel { + return Err(tonic::Status::internal(format!( + "shard {} returned wrong relation, expected {} got {}", + shard.id, req.rel, resp.rel + ))); + } + if !req + .block_numbers + .iter() + .copied() + .eq(resp.pages.iter().map(|p| p.block_number)) + { + return Err(tonic::Status::internal(format!( + "shard {} returned wrong pages, expected {:?} got {:?}", + shard.id, + req.block_numbers, + resp.pages + .iter() + .map(|page| page.block_number) + .collect::>() ))); } @@ -233,13 +320,15 @@ impl PageserverClient { &self, req: page_api::GetRelSizeRequest, ) -> tonic::Result { - self.retry - .with(async || { - // Relation metadata is only available on shard 0. - let mut client = self.shards.load_full().get_zero().client().await?; - client.get_rel_size(req).await - }) - .await + debug!("sending request: {req:?}"); + let resp = Self::with_retries(CALL_TIMEOUT, async |_| { + // Relation metadata is only available on shard 0. + let mut client = self.shards.load_full().get_zero().client().await?; + Self::with_timeout(REQUEST_TIMEOUT, client.get_rel_size(req)).await + }) + .await?; + debug!("received response: {resp:?}"); + Ok(resp) } /// Fetches an SLRU segment. @@ -248,13 +337,50 @@ impl PageserverClient { &self, req: page_api::GetSlruSegmentRequest, ) -> tonic::Result { - self.retry - .with(async || { - // SLRU segments are only available on shard 0. - let mut client = self.shards.load_full().get_zero().client().await?; - client.get_slru_segment(req).await - }) - .await + debug!("sending request: {req:?}"); + let resp = Self::with_retries(CALL_TIMEOUT, async |_| { + // SLRU segments are only available on shard 0. + let mut client = self.shards.load_full().get_zero().client().await?; + Self::with_timeout(REQUEST_TIMEOUT, client.get_slru_segment(req)).await + }) + .await?; + debug!("received response: {resp:?}"); + Ok(resp) + } + + /// Runs the given async closure with retries up to the given timeout. Only certain gRPC status + /// codes are retried, see [`Retry::should_retry`]. Returns `DeadlineExceeded` on timeout. + async fn with_retries(timeout: Duration, f: F) -> tonic::Result + where + F: FnMut(usize) -> O, // pass attempt number, starting at 0 + O: Future>, + { + Retry { + timeout: Some(timeout), + base_backoff: BASE_BACKOFF, + max_backoff: MAX_BACKOFF, + } + .with(f) + .await + } + + /// Runs the given future with a timeout. Returns `DeadlineExceeded` on timeout. + async fn with_timeout( + timeout: Duration, + f: impl Future>, + ) -> tonic::Result { + let started = Instant::now(); + tokio::time::timeout(timeout, f).await.map_err(|_| { + tonic::Status::deadline_exceeded(format!( + "request timed out after {:.3}s", + started.elapsed().as_secs_f64() + )) + })? + } + + /// Returns true if the request is considered a bulk request and should use the bulk pool. + fn is_bulk(req: &page_api::GetPageRequest) -> bool { + req.block_numbers.len() >= BULK_THRESHOLD_BATCH_SIZE } } @@ -343,13 +469,21 @@ impl Shards { timeline_id: TimelineId, shard_spec: ShardSpec, auth_token: Option, + compression: Option, ) -> anyhow::Result { // NB: the shard spec has already been validated when constructed. let mut shards = HashMap::with_capacity(shard_spec.urls.len()); for (shard_id, url) in shard_spec.urls { shards.insert( shard_id, - Shard::new(url, tenant_id, timeline_id, shard_id, auth_token.clone())?, + Shard::new( + url, + tenant_id, + timeline_id, + shard_id, + auth_token.clone(), + compression, + )?, ); } @@ -375,21 +509,31 @@ impl Shards { } } -/// A single shard. Uses dedicated resource pools with the following structure: +/// A single shard. Has dedicated resource pools with the following structure: /// -/// * Channel pool: unbounded. -/// * Unary client pool: MAX_UNARY_CLIENTS. -/// * Stream client pool: unbounded. -/// * Stream pool: MAX_STREAMS and MAX_STREAM_QUEUE_DEPTH. -/// * Bulk channel pool: unbounded. +/// * Channel pool: MAX_CLIENTS_PER_CHANNEL. +/// * Client pool: unbounded. +/// * Stream pool: unbounded. +/// * Bulk channel pool: MAX_BULK_CLIENTS_PER_CHANNEL. /// * Bulk client pool: unbounded. -/// * Bulk stream pool: MAX_BULK_STREAMS and MAX_BULK_STREAM_QUEUE_DEPTH. +/// * Bulk stream pool: unbounded. +/// +/// We use a separate bulk channel pool with a lower concurrency limit for large batch requests. +/// This avoids TCP-level head-of-line blocking, and also concentrates large window sizes on a +/// smaller set of streams/connections, which presumably reduces memory use. Neither of these pools +/// are bounded, nor do they pipeline requests, so the latency characteristics should be mostly +/// similar (except for TCP transmission time). +/// +/// TODO: since we never use bounded pools, we could consider removing the pool limiters. However, +/// the code is fairly trivial, so we may as well keep them around for now in case we need them. struct Shard { + /// The shard ID. + id: ShardIndex, /// Unary gRPC client pool. client_pool: Arc, /// GetPage stream pool. stream_pool: Arc, - /// GetPage stream pool for bulk requests, e.g. prefetches. + /// GetPage stream pool for bulk requests. bulk_stream_pool: Arc, } @@ -401,51 +545,36 @@ impl Shard { timeline_id: TimelineId, shard_id: ShardIndex, auth_token: Option, + compression: Option, ) -> anyhow::Result { - // Common channel pool for unary and stream requests. Bounded by client/stream pools. - let channel_pool = ChannelPool::new(url.clone(), MAX_CLIENTS_PER_CHANNEL)?; - - // Client pool for unary requests. + // Shard pools for unary requests and non-bulk GetPage requests. let client_pool = ClientPool::new( - channel_pool.clone(), + ChannelPool::new(url.clone(), MAX_CLIENTS_PER_CHANNEL)?, tenant_id, timeline_id, shard_id, auth_token.clone(), - Some(MAX_UNARY_CLIENTS), + compression, + None, // unbounded ); + let stream_pool = StreamPool::new(client_pool.clone(), None); // unbounded - // GetPage stream pool. Uses a dedicated client pool to avoid starving out unary clients, - // but shares a channel pool with it (as it's unbounded). - let stream_pool = StreamPool::new( - ClientPool::new( - channel_pool.clone(), - tenant_id, - timeline_id, - shard_id, - auth_token.clone(), - None, // unbounded, limited by stream pool - ), - Some(MAX_STREAMS), - MAX_STREAM_QUEUE_DEPTH, - ); - - // Bulk GetPage stream pool, e.g. for prefetches. Uses dedicated channel/client/stream pools - // to avoid head-of-line blocking of latency-sensitive requests. + // Bulk GetPage stream pool for large batches (prefetches, sequential scans, vacuum, etc.). let bulk_stream_pool = StreamPool::new( ClientPool::new( - ChannelPool::new(url, MAX_CLIENTS_PER_CHANNEL)?, + ChannelPool::new(url, MAX_BULK_CLIENTS_PER_CHANNEL)?, tenant_id, timeline_id, shard_id, auth_token, - None, // unbounded, limited by stream pool + compression, + None, // unbounded, ), - Some(MAX_BULK_STREAMS), - MAX_BULK_STREAM_QUEUE_DEPTH, + None, // unbounded ); Ok(Self { + id: shard_id, client_pool, stream_pool, bulk_stream_pool, @@ -453,19 +582,23 @@ impl Shard { } /// Returns a pooled client for this shard. + #[instrument(skip_all)] async fn client(&self) -> tonic::Result { - self.client_pool - .get() - .await - .map_err(|err| tonic::Status::internal(format!("failed to get client: {err}"))) + warn_slow( + "client pool acquisition", + SLOW_THRESHOLD, + pin!(self.client_pool.get()), + ) + .await } - /// Returns a pooled stream for this shard. If `bulk` is `true`, uses the dedicated bulk stream - /// pool (e.g. for prefetches). - async fn stream(&self, bulk: bool) -> StreamGuard { - match bulk { - false => self.stream_pool.get().await, - true => self.bulk_stream_pool.get().await, - } + /// Returns a pooled stream for this shard. If `bulk` is `true`, uses the dedicated bulk pool. + #[instrument(skip_all, fields(bulk))] + async fn stream(&self, bulk: bool) -> tonic::Result { + let pool = match bulk { + false => &self.stream_pool, + true => &self.bulk_stream_pool, + }; + warn_slow("stream pool acquisition", SLOW_THRESHOLD, pin!(pool.get())).await } } diff --git a/pageserver/client_grpc/src/lib.rs b/pageserver/client_grpc/src/lib.rs index 14fb3fbd5a..7c9f569d00 100644 --- a/pageserver/client_grpc/src/lib.rs +++ b/pageserver/client_grpc/src/lib.rs @@ -4,3 +4,4 @@ mod retry; mod split; pub use client::{PageserverClient, ShardSpec}; +pub use pageserver_api::shard::ShardStripeSize; // used in ShardSpec diff --git a/pageserver/client_grpc/src/pool.rs b/pageserver/client_grpc/src/pool.rs index 0e4bff2f1b..98a649b4c8 100644 --- a/pageserver/client_grpc/src/pool.rs +++ b/pageserver/client_grpc/src/pool.rs @@ -9,19 +9,36 @@ //! //! * ChannelPool: manages gRPC channels (TCP connections) to a single Pageserver. Multiple clients //! can acquire and use the same channel concurrently (via HTTP/2 stream multiplexing), up to a -//! per-channel client limit. Channels may be closed when they are no longer used by any clients. +//! per-channel client limit. Channels are closed immediately when empty, and indirectly rely on +//! client/stream idle timeouts. //! //! * ClientPool: manages gRPC clients for a single tenant shard. Each client acquires a (shared) //! channel from the ChannelPool for the client's lifetime. A client can only be acquired by a -//! single caller at a time, and is returned to the pool when dropped. Idle clients may be removed -//! from the pool after some time, to free up the channel. +//! single caller at a time, and is returned to the pool when dropped. Idle clients are removed +//! from the pool after a while to free up resources. //! //! * StreamPool: manages bidirectional gRPC GetPage streams. Each stream acquires a client from the -//! ClientPool for the stream's lifetime. Internal streams are not exposed to callers; instead, it -//! returns a guard that can be used to send a single request, to properly enforce queue depth and -//! route responses. Internally, the pool will reuse or spin up a suitable stream for the request, -//! possibly pipelining multiple requests from multiple callers on the same stream (up to some -//! queue depth). Idle streams may be removed from the pool after a while to free up the client. +//! ClientPool for the stream's lifetime. A stream can only be acquired by a single caller at a +//! time, and is returned to the pool when dropped. Idle streams are removed from the pool after +//! a while to free up resources. +//! +//! The stream only supports sending a single, synchronous request at a time, and does not support +//! pipelining multiple requests from different callers onto the same stream -- instead, we scale +//! out concurrent streams to improve throughput. There are many reasons for this design choice: +//! +//! * It (mostly) eliminates head-of-line blocking. A single stream is processed sequentially by +//! a single server task, which may block e.g. on layer downloads, LSN waits, etc. +//! +//! * Cancellation becomes trivial, by closing the stream. Otherwise, if a caller goes away +//! (e.g. because of a timeout), the request would still be processed by the server and block +//! requests behind it in the stream. It might even block its own timeout retry. +//! +//! * Stream scheduling becomes significantly simpler and cheaper. +//! +//! * Individual callers can still use client-side batching for pipelining. +//! +//! * Idle streams are cheap. Benchmarks show that an idle GetPage stream takes up about 26 KB +//! per stream (2.5 GB for 100,000 streams), so we can afford to scale out. //! //! Each channel corresponds to one TCP connection. Each client unary request and each stream //! corresponds to one HTTP/2 stream and server task. @@ -29,32 +46,31 @@ //! TODO: error handling (including custom error types). //! TODO: observability. -use std::collections::{BTreeMap, HashMap}; +use std::collections::BTreeMap; use std::num::NonZero; use std::ops::{Deref, DerefMut}; +use std::pin::Pin; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::{Arc, Mutex, Weak}; use std::time::{Duration, Instant}; -use futures::StreamExt as _; -use tokio::sync::mpsc::{Receiver, Sender}; -use tokio::sync::{OwnedSemaphorePermit, Semaphore, mpsc, oneshot}; +use futures::{Stream, StreamExt as _}; +use tokio::sync::{OwnedSemaphorePermit, Semaphore, watch}; +use tokio_stream::wrappers::WatchStream; use tokio_util::sync::CancellationToken; +use tonic::codec::CompressionEncoding; use tonic::transport::{Channel, Endpoint}; -use tracing::{error, warn}; use pageserver_page_api as page_api; use utils::id::{TenantId, TimelineId}; use utils::shard::ShardIndex; -/// Reap channels/clients/streams that have been idle for this long. +/// Reap clients/streams that have been idle for this long. Channels are reaped immediately when +/// empty, and indirectly rely on the client/stream idle timeouts. /// -/// TODO: this is per-pool. For nested pools, it can take up to 3x as long for a TCP connection to -/// be reaped. First, we must wait for an idle stream to be reaped, which marks its client as idle. -/// Then, we must wait for the idle client to be reaped, which marks its channel as idle. Then, we -/// must wait for the idle channel to be reaped. Is that a problem? Maybe not, we just have to -/// account for it when setting the reap threshold. Alternatively, we can immediately reap empty -/// channels, and/or stream pool clients. +/// A stream's client will be reaped after 2x the idle threshold (first stream the client), but +/// that's okay -- if the stream closes abruptly (e.g. due to timeout or cancellation), we want to +/// keep its client around in the pool for a while. const REAP_IDLE_THRESHOLD: Duration = match cfg!(any(test, feature = "testing")) { false => Duration::from_secs(180), true => Duration::from_secs(1), // exercise reaping in tests @@ -82,8 +98,6 @@ pub struct ChannelPool { max_clients_per_channel: NonZero, /// Open channels. channels: Mutex>, - /// Reaps idle channels. - idle_reaper: Reaper, /// Channel ID generator. next_channel_id: AtomicUsize, } @@ -95,9 +109,6 @@ struct ChannelEntry { channel: Channel, /// Number of clients using this channel. clients: usize, - /// The channel has been idle (no clients) since this time. None if channel is in use. - /// INVARIANT: Some if clients == 0, otherwise None. - idle_since: Option, } impl ChannelPool { @@ -107,15 +118,12 @@ impl ChannelPool { E: TryInto + Send + Sync + 'static, >::Error: std::error::Error + Send + Sync, { - let pool = Arc::new(Self { + Ok(Arc::new(Self { endpoint: endpoint.try_into()?, max_clients_per_channel, channels: Mutex::default(), - idle_reaper: Reaper::new(REAP_IDLE_THRESHOLD, REAP_IDLE_INTERVAL), next_channel_id: AtomicUsize::default(), - }); - pool.idle_reaper.spawn(&pool); - Ok(pool) + })) } /// Acquires a gRPC channel for a client. Multiple clients may acquire the same channel. @@ -136,22 +144,17 @@ impl ChannelPool { let mut channels = self.channels.lock().unwrap(); // Try to find an existing channel with available capacity. We check entries in BTreeMap - // order, to fill up the lower-ordered channels first. The ClientPool also prefers clients - // with lower-ordered channel IDs first. This will cluster clients in lower-ordered + // order, to fill up the lower-ordered channels first. The client/stream pools also prefer + // clients with lower-ordered channel IDs first. This will cluster clients in lower-ordered // channels, and free up higher-ordered channels such that they can be reaped. for (&id, entry) in channels.iter_mut() { assert!( entry.clients <= self.max_clients_per_channel.get(), "channel overflow" ); - assert_eq!( - entry.idle_since.is_some(), - entry.clients == 0, - "incorrect channel idle state" - ); + assert_ne!(entry.clients, 0, "empty channel not reaped"); if entry.clients < self.max_clients_per_channel.get() { entry.clients += 1; - entry.idle_since = None; return ChannelGuard { pool: Arc::downgrade(self), id, @@ -168,7 +171,6 @@ impl ChannelPool { let entry = ChannelEntry { channel: channel.clone(), clients: 1, // account for the guard below - idle_since: None, }; channels.insert(id, entry); @@ -180,20 +182,6 @@ impl ChannelPool { } } -impl Reapable for ChannelPool { - /// Reaps channels that have been idle since before the cutoff. - fn reap_idle(&self, cutoff: Instant) { - self.channels.lock().unwrap().retain(|_, entry| { - let Some(idle_since) = entry.idle_since else { - assert_ne!(entry.clients, 0, "empty channel not marked idle"); - return true; - }; - assert_eq!(entry.clients, 0, "idle channel has clients"); - idle_since >= cutoff - }) - } -} - /// Tracks a channel acquired from the pool. The owned inner channel can be obtained with `take()`, /// since the gRPC client requires an owned `Channel`. pub struct ChannelGuard { @@ -210,7 +198,7 @@ impl ChannelGuard { } } -/// Returns the channel to the pool. +/// Returns the channel to the pool. The channel is closed when empty. impl Drop for ChannelGuard { fn drop(&mut self) { let Some(pool) = self.pool.upgrade() else { @@ -219,11 +207,12 @@ impl Drop for ChannelGuard { let mut channels = pool.channels.lock().unwrap(); let entry = channels.get_mut(&self.id).expect("unknown channel"); - assert!(entry.idle_since.is_none(), "active channel marked idle"); assert!(entry.clients > 0, "channel underflow"); entry.clients -= 1; + + // Reap empty channels immediately. if entry.clients == 0 { - entry.idle_since = Some(Instant::now()); // mark channel as idle + channels.remove(&self.id); } } } @@ -242,6 +231,8 @@ pub struct ClientPool { shard_id: ShardIndex, /// Authentication token, if any. auth_token: Option, + /// Compression to use. + compression: Option, /// Channel pool to acquire channels from. channel_pool: Arc, /// Limits the max number of concurrent clients for this pool. None if the pool is unbounded. @@ -250,8 +241,7 @@ pub struct ClientPool { /// /// The first client in the map will be acquired next. The map is sorted by client ID, which in /// turn is sorted by its channel ID, such that we prefer acquiring idle clients from - /// lower-ordered channels. This allows us to free up and reap higher-numbered channels as idle - /// clients are reaped. + /// lower-ordered channels. This allows us to free up and reap higher-ordered channels. idle: Mutex>, /// Reaps idle clients. idle_reaper: Reaper, @@ -281,6 +271,7 @@ impl ClientPool { timeline_id: TimelineId, shard_id: ShardIndex, auth_token: Option, + compression: Option, max_clients: Option>, ) -> Arc { let pool = Arc::new(Self { @@ -288,6 +279,7 @@ impl ClientPool { timeline_id, shard_id, auth_token, + compression, channel_pool, idle: Mutex::default(), idle_reaper: Reaper::new(REAP_IDLE_THRESHOLD, REAP_IDLE_INTERVAL), @@ -305,7 +297,7 @@ impl ClientPool { /// This is moderately performance-sensitive. It is called for every unary request, but these /// establish a new gRPC stream per request so they're already expensive. GetPage requests use /// the `StreamPool` instead. - pub async fn get(self: &Arc) -> anyhow::Result { + pub async fn get(self: &Arc) -> tonic::Result { // Acquire a permit if the pool is bounded. let mut permit = None; if let Some(limiter) = self.limiter.clone() { @@ -323,7 +315,7 @@ impl ClientPool { }); } - // Slow path: construct a new client. + // Construct a new client. let mut channel_guard = self.channel_pool.get(); let client = page_api::Client::new( channel_guard.take(), @@ -331,8 +323,9 @@ impl ClientPool { self.timeline_id, self.shard_id, self.auth_token.clone(), - None, - )?; + self.compression, + ) + .map_err(|err| tonic::Status::internal(format!("failed to create client: {err}")))?; Ok(ClientGuard { pool: Arc::downgrade(self), @@ -402,277 +395,187 @@ impl Drop for ClientGuard { /// A pool of bidirectional gRPC streams. Currently only used for GetPage streams. Each stream /// acquires a client from the inner `ClientPool` for the stream's lifetime. /// -/// Individual streams are not exposed to callers -- instead, the returned guard can be used to send -/// a single request and await the response. Internally, requests are multiplexed across streams and -/// channels. This allows proper queue depth enforcement and response routing. +/// Individual streams only send a single request at a time, and do not pipeline multiple callers +/// onto the same stream. Instead, we scale out the number of concurrent streams. This is primarily +/// to eliminate head-of-line blocking. See the module documentation for more details. /// /// TODO: consider making this generic over request and response types; not currently needed. pub struct StreamPool { /// The client pool to acquire clients from. Must be unbounded. client_pool: Arc, - /// All pooled streams. + /// Idle pooled streams. Acquired streams are removed from here and returned on drop. /// - /// Incoming requests will be sent over an existing stream with available capacity. If all - /// streams are full, a new one is spun up and added to the pool (up to `max_streams`). Each - /// stream has an associated Tokio task that processes requests and responses. - streams: Mutex>, - /// The max number of concurrent streams, or None if unbounded. - max_streams: Option>, - /// The max number of concurrent requests per stream. - max_queue_depth: NonZero, - /// Limits the max number of concurrent requests, given by `max_streams * max_queue_depth`. - /// None if the pool is unbounded. + /// The first stream in the map will be acquired next. The map is sorted by stream ID, which is + /// equivalent to the client ID and in turn sorted by its channel ID. This way we prefer + /// acquiring idle streams from lower-ordered channels, which allows us to free up and reap + /// higher-ordered channels. + idle: Mutex>, + /// Limits the max number of concurrent streams. None if the pool is unbounded. limiter: Option>, /// Reaps idle streams. idle_reaper: Reaper, - /// Stream ID generator. - next_stream_id: AtomicUsize, } -type StreamID = usize; -type RequestSender = Sender<(page_api::GetPageRequest, ResponseSender)>; -type RequestReceiver = Receiver<(page_api::GetPageRequest, ResponseSender)>; -type ResponseSender = oneshot::Sender>; +/// The stream ID. Reuses the inner client ID. +type StreamID = ClientID; +/// A pooled stream. struct StreamEntry { - /// Sends caller requests to the stream task. The stream task exits when this is dropped. - sender: RequestSender, - /// Number of in-flight requests on this stream. - queue_depth: usize, - /// The time when this stream went idle (queue_depth == 0). - /// INVARIANT: Some if queue_depth == 0, otherwise None. - idle_since: Option, + /// The bidirectional stream. + stream: BiStream, + /// The time when this stream was last used, i.e. when it was put back into `StreamPool::idle`. + idle_since: Instant, +} + +/// A bidirectional GetPage stream and its client. Can send requests and receive responses. +struct BiStream { + /// The owning client. Holds onto the channel slot while the stream is alive. + client: ClientGuard, + /// Stream for sending requests. Uses a watch channel, so it can only send a single request at a + /// time, and the caller must await the response before sending another request. This is + /// enforced by `StreamGuard::send`. + sender: watch::Sender, + /// Stream for receiving responses. + receiver: Pin> + Send>>, } impl StreamPool { - /// Creates a new stream pool, using the given client pool. It will send up to `max_queue_depth` - /// concurrent requests on each stream, and use up to `max_streams` concurrent streams. + /// Creates a new stream pool, using the given client pool. It will use up to `max_streams` + /// concurrent streams. /// /// The client pool must be unbounded. The stream pool will enforce its own limits, and because /// streams are long-lived they can cause persistent starvation if they exhaust the client pool. /// The stream pool should generally have its own dedicated client pool (but it can share a /// channel pool with others since these are always unbounded). - pub fn new( - client_pool: Arc, - max_streams: Option>, - max_queue_depth: NonZero, - ) -> Arc { + pub fn new(client_pool: Arc, max_streams: Option>) -> Arc { assert!(client_pool.limiter.is_none(), "bounded client pool"); let pool = Arc::new(Self { client_pool, - streams: Mutex::default(), - limiter: max_streams.map(|max_streams| { - Arc::new(Semaphore::new(max_streams.get() * max_queue_depth.get())) - }), - max_streams, - max_queue_depth, + idle: Mutex::default(), + limiter: max_streams.map(|max_streams| Arc::new(Semaphore::new(max_streams.get()))), idle_reaper: Reaper::new(REAP_IDLE_THRESHOLD, REAP_IDLE_INTERVAL), - next_stream_id: AtomicUsize::default(), }); pool.idle_reaper.spawn(&pool); pool } - /// Acquires an available stream from the pool, or spins up a new stream async if all streams - /// are full. Returns a guard that can be used to send a single request on the stream and await - /// the response, with queue depth quota already acquired. Blocks if the pool is at capacity - /// (i.e. `CLIENT_LIMIT * STREAM_QUEUE_DEPTH` requests in flight). + /// Acquires an available stream from the pool, or spins up a new stream if all streams are + /// full. Returns a guard that can be used to send requests and await the responses. Blocks if + /// the pool is full. /// /// This is very performance-sensitive, as it is on the GetPage hot path. /// - /// TODO: this must do something more sophisticated for performance. We want: - /// - /// * Cheap, concurrent access in the common case where we can use a pooled stream. - /// * Quick acquisition of pooled streams with available capacity. - /// * Prefer streams that belong to lower-numbered channels, to reap idle channels. - /// * Prefer filling up existing streams' queue depth before spinning up new streams. - /// * Don't hold a lock while spinning up new streams. - /// * Allow concurrent clients to join onto streams while they're spun up. - /// * Allow spinning up multiple streams concurrently, but don't overshoot limits. - /// - /// For now, we just do something simple but inefficient (linear scan under mutex). - pub async fn get(self: &Arc) -> StreamGuard { + /// TODO: is a `Mutex` performant enough? Will it become too contended? We can't + /// trivially use e.g. DashMap or sharding, because we want to pop lower-ordered streams first + /// to free up higher-ordered channels. + pub async fn get(self: &Arc) -> tonic::Result { // Acquire a permit if the pool is bounded. let mut permit = None; if let Some(limiter) = self.limiter.clone() { permit = Some(limiter.acquire_owned().await.expect("never closed")); } - let mut streams = self.streams.lock().unwrap(); - // Look for a pooled stream with available capacity. - for (&id, entry) in streams.iter_mut() { - assert!( - entry.queue_depth <= self.max_queue_depth.get(), - "stream queue overflow" - ); - assert_eq!( - entry.idle_since.is_some(), - entry.queue_depth == 0, - "incorrect stream idle state" - ); - if entry.queue_depth < self.max_queue_depth.get() { - entry.queue_depth += 1; - entry.idle_since = None; - return StreamGuard { - pool: Arc::downgrade(self), - id, - sender: entry.sender.clone(), - permit, - }; - } + // Fast path: acquire an idle stream from the pool. + if let Some((_, entry)) = self.idle.lock().unwrap().pop_first() { + return Ok(StreamGuard { + pool: Arc::downgrade(self), + stream: Some(entry.stream), + can_reuse: true, + permit, + }); } - // No available stream, spin up a new one. We install the stream entry in the pool first and - // return the guard, while spinning up the stream task async. This allows other callers to - // join onto this stream and also create additional streams concurrently if this fills up. - let id = self.next_stream_id.fetch_add(1, Ordering::Relaxed); - let (req_tx, req_rx) = mpsc::channel(self.max_queue_depth.get()); - let entry = StreamEntry { - sender: req_tx.clone(), - queue_depth: 1, // reserve quota for this caller - idle_since: None, - }; - streams.insert(id, entry); + // Spin up a new stream. Uses a watch channel to send a single request at a time, since + // `StreamGuard::send` enforces this anyway and it avoids unnecessary channel overhead. + let mut client = self.client_pool.get().await?; - if let Some(max_streams) = self.max_streams { - assert!(streams.len() <= max_streams.get(), "stream overflow"); - }; + let (req_tx, req_rx) = watch::channel(page_api::GetPageRequest::default()); + let req_stream = WatchStream::from_changes(req_rx); + let resp_stream = client.get_pages(req_stream).await?; - let client_pool = self.client_pool.clone(); - let pool = Arc::downgrade(self); - - tokio::spawn(async move { - if let Err(err) = Self::run_stream(client_pool, req_rx).await { - error!("stream failed: {err}"); - } - // Remove stream from pool on exit. Weak reference to avoid holding the pool alive. - if let Some(pool) = pool.upgrade() { - let entry = pool.streams.lock().unwrap().remove(&id); - assert!(entry.is_some(), "unknown stream ID: {id}"); - } - }); - - StreamGuard { + Ok(StreamGuard { pool: Arc::downgrade(self), - id, - sender: req_tx, + stream: Some(BiStream { + client, + sender: req_tx, + receiver: Box::pin(resp_stream), + }), + can_reuse: true, permit, - } - } - - /// Runs a stream task. This acquires a client from the `ClientPool` and establishes a - /// bidirectional GetPage stream, then forwards requests and responses between callers and the - /// stream. It does not track or enforce queue depths -- that's done by `get()` since it must be - /// atomic with pool stream acquisition. - /// - /// The task exits when the request channel is closed, or on a stream error. The caller is - /// responsible for removing the stream from the pool on exit. - async fn run_stream( - client_pool: Arc, - mut caller_rx: RequestReceiver, - ) -> anyhow::Result<()> { - // Acquire a client from the pool and create a stream. - let mut client = client_pool.get().await?; - - let (req_tx, req_rx) = mpsc::channel(1); - let req_stream = tokio_stream::wrappers::ReceiverStream::new(req_rx); - let mut resp_stream = client.get_pages(req_stream).await?; - - // Track caller response channels by request ID. If the task returns early, these response - // channels will be dropped and the waiting callers will receive an error. - let mut callers = HashMap::new(); - - // Process requests and responses. - loop { - tokio::select! { - // Receive requests from callers and send them to the stream. - req = caller_rx.recv() => { - // Shut down if request channel is closed. - let Some((req, resp_tx)) = req else { - return Ok(()); - }; - - // Store the response channel by request ID. - if callers.contains_key(&req.request_id) { - // Error on request ID duplicates. Ignore callers that went away. - _ = resp_tx.send(Err(tonic::Status::invalid_argument( - format!("duplicate request ID: {}", req.request_id), - ))); - continue; - } - callers.insert(req.request_id, resp_tx); - - // Send the request on the stream. Bail out if the send fails. - req_tx.send(req).await.map_err(|_| { - tonic::Status::unavailable("stream closed") - })?; - } - - // Receive responses from the stream and send them to callers. - resp = resp_stream.next() => { - // Shut down if the stream is closed, and bail out on stream errors. - let Some(resp) = resp.transpose()? else { - return Ok(()) - }; - - // Send the response to the caller. Ignore errors if the caller went away. - let Some(resp_tx) = callers.remove(&resp.request_id) else { - warn!("received response for unknown request ID: {}", resp.request_id); - continue; - }; - _ = resp_tx.send(Ok(resp)); - } - } - } + }) } } impl Reapable for StreamPool { /// Reaps streams that have been idle since before the cutoff. fn reap_idle(&self, cutoff: Instant) { - self.streams.lock().unwrap().retain(|_, entry| { - let Some(idle_since) = entry.idle_since else { - assert_ne!(entry.queue_depth, 0, "empty stream not marked idle"); - return true; - }; - assert_eq!(entry.queue_depth, 0, "idle stream has requests"); - idle_since >= cutoff - }); + self.idle + .lock() + .unwrap() + .retain(|_, entry| entry.idle_since >= cutoff); } } -/// A pooled stream reference. Can be used to send a single request, to properly enforce queue -/// depth. Queue depth is already reserved and will be returned on drop. +/// A stream acquired from the pool. Returned to the pool when dropped, unless there are still +/// in-flight requests on the stream, or the stream failed. pub struct StreamGuard { pool: Weak, - id: StreamID, - sender: RequestSender, + stream: Option, // Some until dropped + can_reuse: bool, // returned to pool if true permit: Option, // None if pool is unbounded } impl StreamGuard { - /// Sends a request on the stream and awaits the response. Consumes the guard, since it's only - /// valid for a single request (to enforce queue depth). This also drops the guard on return and - /// returns the queue depth quota to the pool. + /// Sends a request on the stream and awaits the response. If the future is dropped before it + /// resolves (e.g. due to a timeout or cancellation), the stream will be closed to cancel the + /// request and is not returned to the pool. The same is true if the stream errors, in which + /// case the caller can't send further requests on the stream. /// - /// The `GetPageRequest::request_id` must be unique across in-flight requests. + /// We only support sending a single request at a time, to eliminate head-of-line blocking. See + /// module documentation for details. /// /// NB: errors are often returned as `GetPageResponse::status_code` instead of `tonic::Status` /// to avoid tearing down the stream for per-request errors. Callers must check this. pub async fn send( - self, + &mut self, req: page_api::GetPageRequest, ) -> tonic::Result { - let (resp_tx, resp_rx) = oneshot::channel(); + let req_id = req.request_id; + let stream = self.stream.as_mut().expect("not dropped"); - self.sender - .send((req, resp_tx)) - .await + // Mark the stream as not reusable while the request is in flight. We can't return the + // stream to the pool until we receive the response, to avoid head-of-line blocking and + // stale responses. Failed streams can't be reused either. + if !self.can_reuse { + return Err(tonic::Status::internal("stream can't be reused")); + } + self.can_reuse = false; + + // Send the request and receive the response. + // + // NB: this uses a watch channel, so it's unsafe to change this code to pipeline requests. + stream + .sender + .send(req) .map_err(|_| tonic::Status::unavailable("stream closed"))?; - resp_rx + let resp = stream + .receiver + .next() .await - .map_err(|_| tonic::Status::unavailable("stream closed"))? + .ok_or_else(|| tonic::Status::unavailable("stream closed"))??; + + if resp.request_id != req_id { + return Err(tonic::Status::internal(format!( + "response ID {} does not match request ID {}", + resp.request_id, req_id + ))); + } + + // Success, mark the stream as reusable. + self.can_reuse = true; + + Ok(resp) } } @@ -682,17 +585,21 @@ impl Drop for StreamGuard { return; // pool was dropped }; - // Release the queue depth reservation on drop. This can prematurely decrement it if dropped - // before the response is received, but that's okay. - let mut streams = pool.streams.lock().unwrap(); - let entry = streams.get_mut(&self.id).expect("unknown stream"); - assert!(entry.idle_since.is_none(), "active stream marked idle"); - assert!(entry.queue_depth > 0, "stream queue underflow"); - entry.queue_depth -= 1; - if entry.queue_depth == 0 { - entry.idle_since = Some(Instant::now()); // mark stream as idle + // If the stream isn't reusable, it can't be returned to the pool. + if !self.can_reuse { + return; } + // Place the idle stream back into the pool. + let entry = StreamEntry { + stream: self.stream.take().expect("dropped once"), + idle_since: Instant::now(), + }; + pool.idle + .lock() + .unwrap() + .insert(entry.stream.client.id, entry); + _ = self.permit; // returned on drop, referenced for visibility } } diff --git a/pageserver/client_grpc/src/retry.rs b/pageserver/client_grpc/src/retry.rs index fe26c99289..8a138711e8 100644 --- a/pageserver/client_grpc/src/retry.rs +++ b/pageserver/client_grpc/src/retry.rs @@ -1,5 +1,6 @@ use std::time::Duration; +use futures::future::pending; use tokio::time::Instant; use tracing::{error, info, warn}; @@ -8,61 +9,54 @@ use utils::backoff::exponential_backoff_duration; /// A retry handler for Pageserver gRPC requests. /// /// This is used instead of backoff::retry for better control and observability. -#[derive(Clone, Copy)] -pub struct Retry; +pub struct Retry { + /// Timeout across all retry attempts. If None, retries forever. + pub timeout: Option, + /// The initial backoff duration. The first retry does not use a backoff. + pub base_backoff: Duration, + /// The maximum backoff duration. + pub max_backoff: Duration, +} impl Retry { - /// The per-request timeout. - // TODO: tune these, and/or make them configurable. Should we retry forever? - const REQUEST_TIMEOUT: Duration = Duration::from_secs(10); - /// The total timeout across all attempts - const TOTAL_TIMEOUT: Duration = Duration::from_secs(60); - /// The initial backoff duration. - const BASE_BACKOFF: Duration = Duration::from_millis(10); - /// The maximum backoff duration. - const MAX_BACKOFF: Duration = Duration::from_secs(10); - /// If true, log successful requests. For debugging. - const LOG_SUCCESS: bool = false; - /// Runs the given async closure with timeouts and retries (exponential backoff). Logs errors, /// using the current tracing span for context. /// - /// Only certain gRPC status codes are retried, see [`Self::should_retry`]. For default - /// timeouts, see [`Self::REQUEST_TIMEOUT`] and [`Self::TOTAL_TIMEOUT`]. + /// Only certain gRPC status codes are retried, see [`Self::should_retry`]. pub async fn with(&self, mut f: F) -> tonic::Result where - F: FnMut() -> O, + F: FnMut(usize) -> O, // pass attempt number, starting at 0 O: Future>, { let started = Instant::now(); - let deadline = started + Self::TOTAL_TIMEOUT; + let deadline = self.timeout.map(|timeout| started + timeout); let mut last_error = None; let mut retries = 0; loop { - // Set up a future to wait for the backoff (if any) and run the request with a timeout. + // Set up a future to wait for the backoff, if any, and run the closure. let backoff_and_try = async { // NB: sleep() always sleeps 1ms, even when given a 0 argument. See: // https://github.com/tokio-rs/tokio/issues/6866 - if let Some(backoff) = Self::backoff_duration(retries) { + if let Some(backoff) = self.backoff_duration(retries) { tokio::time::sleep(backoff).await; } - let request_started = Instant::now(); - tokio::time::timeout(Self::REQUEST_TIMEOUT, f()) - .await - .map_err(|_| { - tonic::Status::deadline_exceeded(format!( - "request timed out after {:.3}s", - request_started.elapsed().as_secs_f64() - )) - })? + f(retries).await }; - // Wait for the backoff and request, or bail out if the total timeout is exceeded. + // Set up a future for the timeout, if any. + let timeout = async { + match deadline { + Some(deadline) => tokio::time::sleep_until(deadline).await, + None => pending().await, + } + }; + + // Wait for the backoff and request, or bail out if the timeout is exceeded. let result = tokio::select! { result = backoff_and_try => result, - _ = tokio::time::sleep_until(deadline) => { + _ = timeout => { let last_error = last_error.unwrap_or_else(|| { tonic::Status::deadline_exceeded(format!( "request timed out after {:.3}s", @@ -80,7 +74,7 @@ impl Retry { match result { // Success, return the result. Ok(result) => { - if retries > 0 || Self::LOG_SUCCESS { + if retries > 0 { info!( "request succeeded after {retries} retries in {:.3}s", started.elapsed().as_secs_f64(), @@ -113,12 +107,13 @@ impl Retry { } } - /// Returns the backoff duration for the given retry attempt, or None for no backoff. - fn backoff_duration(retry: usize) -> Option { + /// Returns the backoff duration for the given retry attempt, or None for no backoff. The first + /// attempt and first retry never backs off, so this returns None for 0 and 1 retries. + fn backoff_duration(&self, retries: usize) -> Option { let backoff = exponential_backoff_duration( - retry as u32, - Self::BASE_BACKOFF.as_secs_f64(), - Self::MAX_BACKOFF.as_secs_f64(), + (retries as u32).saturating_sub(1), // first retry does not back off + self.base_backoff.as_secs_f64(), + self.max_backoff.as_secs_f64(), ); (!backoff.is_zero()).then_some(backoff) } @@ -132,7 +127,6 @@ impl Retry { tonic::Code::Aborted => true, tonic::Code::Cancelled => true, tonic::Code::DeadlineExceeded => true, // maybe transient slowness - tonic::Code::Internal => true, // maybe transient failure? tonic::Code::ResourceExhausted => true, tonic::Code::Unavailable => true, @@ -140,6 +134,10 @@ impl Retry { tonic::Code::AlreadyExists => false, tonic::Code::DataLoss => false, tonic::Code::FailedPrecondition => false, + // NB: don't retry Internal. It is intended for serious errors such as invariant + // violations, and is also used for client-side invariant checks that would otherwise + // result in retry loops. + tonic::Code::Internal => false, tonic::Code::InvalidArgument => false, tonic::Code::NotFound => false, tonic::Code::OutOfRange => false, diff --git a/pageserver/client_grpc/src/split.rs b/pageserver/client_grpc/src/split.rs index 894aaa992c..b7539b900c 100644 --- a/pageserver/client_grpc/src/split.rs +++ b/pageserver/client_grpc/src/split.rs @@ -5,27 +5,24 @@ use bytes::Bytes; use pageserver_api::key::rel_block_to_key; use pageserver_api::shard::{ShardStripeSize, key_to_shard_number}; use pageserver_page_api as page_api; -use utils::shard::{ShardCount, ShardIndex}; +use utils::shard::{ShardCount, ShardIndex, ShardNumber}; /// Splits GetPageRequests that straddle shard boundaries and assembles the responses. /// TODO: add tests for this. pub struct GetPageSplitter { - /// The original request ID. Used for all shard requests. - request_id: page_api::RequestID, /// Split requests by shard index. requests: HashMap, - /// Maps the offset in `GetPageRequest::block_numbers` to the owning shard. Used to assemble - /// the response pages in the same order as the original request. + /// The response being assembled. Preallocated with empty pages, to be filled in. + response: page_api::GetPageResponse, + /// Maps the offset in `request.block_numbers` and `response.pages` to the owning shard. Used + /// to assemble the response pages in the same order as the original request. block_shards: Vec, - /// Page responses by shard index. Will be assembled into a single response. - responses: HashMap>, } impl GetPageSplitter { /// Checks if the given request only touches a single shard, and returns the shard ID. This is /// the common case, so we check first in order to avoid unnecessary allocations and overhead. - /// The caller must ensure that the request has at least one block number, or this will panic. - pub fn is_single_shard( + pub fn for_single_shard( req: &page_api::GetPageRequest, count: ShardCount, stripe_size: ShardStripeSize, @@ -35,8 +32,12 @@ impl GetPageSplitter { return Some(ShardIndex::unsharded()); } - // Find the base shard index for the first page, and compare with the rest. - let key = rel_block_to_key(req.rel, *req.block_numbers.first().expect("no pages")); + // Find the first page's shard, for comparison. If there are no pages, just return the first + // shard (caller likely checked already, otherwise the server will reject it). + let Some(&first_page) = req.block_numbers.first() else { + return Some(ShardIndex::new(ShardNumber(0), count)); + }; + let key = rel_block_to_key(req.rel, first_page); let shard_number = key_to_shard_number(count, stripe_size, &key); req.block_numbers @@ -57,19 +58,19 @@ impl GetPageSplitter { ) -> Self { // The caller should make sure we don't split requests unnecessarily. debug_assert!( - Self::is_single_shard(&req, count, stripe_size).is_none(), + Self::for_single_shard(&req, count, stripe_size).is_none(), "unnecessary request split" ); // Split the requests by shard index. let mut requests = HashMap::with_capacity(2); // common case let mut block_shards = Vec::with_capacity(req.block_numbers.len()); - for blkno in req.block_numbers { + for &blkno in &req.block_numbers { let key = rel_block_to_key(req.rel, blkno); let shard_number = key_to_shard_number(count, stripe_size, &key); let shard_id = ShardIndex::new(shard_number, count); - let shard_req = requests + requests .entry(shard_id) .or_insert_with(|| page_api::GetPageRequest { request_id: req.request_id, @@ -77,20 +78,39 @@ impl GetPageSplitter { rel: req.rel, read_lsn: req.read_lsn, block_numbers: Vec::new(), - }); - shard_req.block_numbers.push(blkno); + }) + .block_numbers + .push(blkno); block_shards.push(shard_id); } - Self { + // Construct a response to be populated by shard responses. Preallocate empty page slots + // with the expected block numbers. + let response = page_api::GetPageResponse { request_id: req.request_id, - responses: HashMap::with_capacity(requests.len()), + status_code: page_api::GetPageStatusCode::Ok, + reason: None, + rel: req.rel, + pages: req + .block_numbers + .into_iter() + .map(|block_number| { + page_api::Page { + block_number, + image: Bytes::new(), // empty page slot to be filled in + } + }) + .collect(), + }; + + Self { requests, + response, block_shards, } } - /// Drains the per-shard requests, moving them out of the hashmap to avoid extra allocations. + /// Drains the per-shard requests, moving them out of the splitter to avoid extra allocations. pub fn drain_requests( &mut self, ) -> impl Iterator { @@ -99,70 +119,91 @@ impl GetPageSplitter { /// Adds a response from the given shard. The response must match the request ID and have an OK /// status code. A response must not already exist for the given shard ID. - pub fn add_response(&mut self, shard_id: ShardIndex, response: page_api::GetPageResponse) { - // NB: this is called below a `Retry::with()`, so unrecoverable errors should not use a - // retryable status code (e.g. `Internal`). - + #[allow(clippy::result_large_err)] + pub fn add_response( + &mut self, + shard_id: ShardIndex, + response: page_api::GetPageResponse, + ) -> tonic::Result<()> { // The caller should already have converted status codes into tonic::Status. - assert_eq!( - response.status_code, - page_api::GetPageStatusCode::Ok, - "non-OK response" - ); + if response.status_code != page_api::GetPageStatusCode::Ok { + return Err(tonic::Status::internal(format!( + "unexpected non-OK response for shard {shard_id}: {} {}", + response.status_code, + response.reason.unwrap_or_default() + ))); + } - // The stream pool ensures the response matches the request ID. - assert_eq!(response.request_id, self.request_id, "response ID mismatch"); + if response.request_id != self.response.request_id { + return Err(tonic::Status::internal(format!( + "response ID mismatch for shard {shard_id}: expected {}, got {}", + self.response.request_id, response.request_id + ))); + } - // Add the response data to the map. - let old = self.responses.insert(shard_id, response.page_images); + // Place the shard response pages into the assembled response, in request order. + let mut pages = response.pages.into_iter(); - // We only dispatch one request per shard. - assert!(old.is_none(), "duplicate response for shard {shard_id}"); + for (i, &s) in self.block_shards.iter().enumerate() { + if shard_id != s { + continue; + } + + let Some(slot) = self.response.pages.get_mut(i) else { + return Err(tonic::Status::internal(format!( + "no block_shards slot {i} for shard {shard_id}" + ))); + }; + let Some(page) = pages.next() else { + return Err(tonic::Status::internal(format!( + "missing page {} in shard {shard_id} response", + slot.block_number + ))); + }; + if page.block_number != slot.block_number { + return Err(tonic::Status::internal(format!( + "shard {shard_id} returned wrong page at index {i}, expected {} got {}", + slot.block_number, page.block_number + ))); + } + if !slot.image.is_empty() { + return Err(tonic::Status::internal(format!( + "shard {shard_id} returned duplicate page {} at index {i}", + slot.block_number + ))); + } + + *slot = page; + } + + // Make sure we've consumed all pages from the shard response. + if let Some(extra_page) = pages.next() { + return Err(tonic::Status::internal(format!( + "shard {shard_id} returned extra page: {}", + extra_page.block_number + ))); + } + + Ok(()) } - /// Assembles the shard responses into a single response. Responses must be present for all - /// relevant shards, and the total number of pages must match the original request. + /// Fetches the final, assembled response. #[allow(clippy::result_large_err)] - pub fn assemble_response(self) -> tonic::Result { - // NB: this is called below a `Retry::with()`, so unrecoverable errors should not use a - // retryable status code (e.g. `Internal`). - - let mut response = page_api::GetPageResponse { - request_id: self.request_id, - status_code: page_api::GetPageStatusCode::Ok, - reason: None, - page_images: Vec::with_capacity(self.block_shards.len()), - }; - - // Set up per-shard page iterators we can pull from. - let mut shard_responses = HashMap::with_capacity(self.responses.len()); - for (shard_id, responses) in self.responses { - shard_responses.insert(shard_id, responses.into_iter()); - } - - // Reassemble the responses in the same order as the original request. - for shard_id in &self.block_shards { - let page = shard_responses - .get_mut(shard_id) - .ok_or_else(|| { - tonic::Status::data_loss(format!("missing response for shard {shard_id}")) - })? - .next() - .ok_or_else(|| { - tonic::Status::data_loss(format!("missing page from shard {shard_id}")) - })?; - response.page_images.push(page); - } - - // Make sure there are no additional pages. - for (shard_id, mut pages) in shard_responses { - if pages.next().is_some() { - return Err(tonic::Status::out_of_range(format!( - "extra pages returned from shard {shard_id}" + pub fn get_response(self) -> tonic::Result { + // Check that the response is complete. + for (i, page) in self.response.pages.iter().enumerate() { + if page.image.is_empty() { + return Err(tonic::Status::internal(format!( + "missing page {} for shard {}", + page.block_number, + self.block_shards + .get(i) + .map(|s| s.to_string()) + .unwrap_or_else(|| "?".to_string()) ))); } } - Ok(response) + Ok(self.response) } } diff --git a/pageserver/ctl/Cargo.toml b/pageserver/ctl/Cargo.toml index 7b70f0dc87..ba34fa1f69 100644 --- a/pageserver/ctl/Cargo.toml +++ b/pageserver/ctl/Cargo.toml @@ -17,6 +17,7 @@ pageserver = { path = ".." } pageserver_api.workspace = true remote_storage = { path = "../../libs/remote_storage" } postgres_ffi.workspace = true +serde.workspace = true thiserror.workspace = true tokio.workspace = true tokio-util.workspace = true diff --git a/pageserver/ctl/src/download_remote_object.rs b/pageserver/ctl/src/download_remote_object.rs new file mode 100644 index 0000000000..aa09774701 --- /dev/null +++ b/pageserver/ctl/src/download_remote_object.rs @@ -0,0 +1,85 @@ +use camino::Utf8PathBuf; +use clap::Parser; +use tokio_util::sync::CancellationToken; + +/// Download a specific object from remote storage to a local file. +/// +/// The remote storage configuration is supplied via the `REMOTE_STORAGE_CONFIG` environment +/// variable, in the same TOML format that the pageserver itself understands. This allows the +/// command to work with any cloud supported by the `remote_storage` crate (currently AWS S3, +/// Azure Blob Storage and local files), as long as the credentials are available via the +/// standard environment variables expected by the underlying SDKs. +/// +/// Examples for setting the environment variable: +/// +/// ```bash +/// # AWS S3 (region can also be provided via AWS_REGION) +/// export REMOTE_STORAGE_CONFIG='remote_storage = { bucket_name = "my-bucket", bucket_region = "us-east-2" }' +/// +/// # Azure Blob Storage (account key picked up from AZURE_STORAGE_ACCOUNT_KEY) +/// export REMOTE_STORAGE_CONFIG='remote_storage = { container = "my-container", account = "my-account" }' +/// ``` +#[derive(Parser)] +pub(crate) struct DownloadRemoteObjectCmd { + /// Key / path of the object to download (relative to the remote storage prefix). + /// + /// Examples: + /// "wal/3aa8f.../00000001000000000000000A" + /// "pageserver/v1/tenants//timelines//layer_12345" + pub remote_path: String, + + /// Path of the local file to create. Existing file will be overwritten. + /// + /// Examples: + /// "./segment" + /// "/tmp/layer_12345.parquet" + pub output_file: Utf8PathBuf, +} + +pub(crate) async fn main(cmd: &DownloadRemoteObjectCmd) -> anyhow::Result<()> { + use remote_storage::{DownloadOpts, GenericRemoteStorage, RemotePath, RemoteStorageConfig}; + + // Fetch remote storage configuration from the environment + let config_str = std::env::var("REMOTE_STORAGE_CONFIG").map_err(|_| { + anyhow::anyhow!( + "'REMOTE_STORAGE_CONFIG' environment variable must be set to a valid remote storage TOML config" + ) + })?; + + let config = RemoteStorageConfig::from_toml_str(&config_str)?; + + // Initialise remote storage client + let storage = GenericRemoteStorage::from_config(&config).await?; + + // RemotePath must be relative – leading slashes confuse the parser. + let remote_path_str = cmd.remote_path.trim_start_matches('/'); + let remote_path = RemotePath::from_string(remote_path_str)?; + + let cancel = CancellationToken::new(); + + println!( + "Downloading '{remote_path}' from remote storage bucket {:?} ...", + config.storage.bucket_name() + ); + + // Start the actual download + let download = storage + .download(&remote_path, &DownloadOpts::default(), &cancel) + .await?; + + // Stream to file + let mut reader = tokio_util::io::StreamReader::new(download.download_stream); + let tmp_path = cmd.output_file.with_extension("tmp"); + let mut file = tokio::fs::File::create(&tmp_path).await?; + tokio::io::copy(&mut reader, &mut file).await?; + file.sync_all().await?; + // Atomically move into place + tokio::fs::rename(&tmp_path, &cmd.output_file).await?; + + println!( + "Downloaded to '{}'. Last modified: {:?}, etag: {}", + cmd.output_file, download.last_modified, download.etag + ); + + Ok(()) +} diff --git a/pageserver/ctl/src/index_part.rs b/pageserver/ctl/src/index_part.rs index 6cce2844c7..9801f3c9dc 100644 --- a/pageserver/ctl/src/index_part.rs +++ b/pageserver/ctl/src/index_part.rs @@ -1,10 +1,180 @@ -use anyhow::Context; +use std::str::FromStr; + +use anyhow::{Context, Ok}; use camino::Utf8PathBuf; -use pageserver::tenant::IndexPart; +use pageserver::tenant::{ + IndexPart, + layer_map::{LayerMap, SearchResult}, + remote_timeline_client::{index::LayerFileMetadata, remote_layer_path}, + storage_layer::{LayerName, LayerVisibilityHint, PersistentLayerDesc, ReadableLayerWeak}, +}; +use pageserver_api::key::Key; +use serde::Serialize; +use std::collections::BTreeMap; +use utils::{ + id::{TenantId, TimelineId}, + lsn::Lsn, + shard::TenantShardId, +}; #[derive(clap::Subcommand)] pub(crate) enum IndexPartCmd { - Dump { path: Utf8PathBuf }, + Dump { + path: Utf8PathBuf, + }, + /// Find all layers that need to be searched to construct the given page at the given LSN. + Search { + #[arg(long)] + tenant_id: String, + #[arg(long)] + timeline_id: String, + #[arg(long)] + path: Utf8PathBuf, + #[arg(long)] + key: String, + #[arg(long)] + lsn: String, + }, + /// List all visible delta and image layers at the latest LSN. + ListVisibleLayers { + #[arg(long)] + path: Utf8PathBuf, + }, +} + +fn create_layer_map_from_index_part( + index_part: &IndexPart, + tenant_shard_id: TenantShardId, + timeline_id: TimelineId, +) -> LayerMap { + let mut layer_map = LayerMap::default(); + { + let mut updates = layer_map.batch_update(); + for (key, value) in index_part.layer_metadata.iter() { + updates.insert_historic(PersistentLayerDesc::from_filename( + tenant_shard_id, + timeline_id, + key.clone(), + value.file_size, + )); + } + } + layer_map +} + +async fn search_layers( + tenant_id: &str, + timeline_id: &str, + path: &Utf8PathBuf, + key: &str, + lsn: &str, +) -> anyhow::Result<()> { + let tenant_id = TenantId::from_str(tenant_id).unwrap(); + let tenant_shard_id = TenantShardId::unsharded(tenant_id); + let timeline_id = TimelineId::from_str(timeline_id).unwrap(); + let index_json = { + let bytes = tokio::fs::read(path).await?; + IndexPart::from_json_bytes(&bytes).unwrap() + }; + let layer_map = create_layer_map_from_index_part(&index_json, tenant_shard_id, timeline_id); + let key = Key::from_hex(key)?; + + let lsn = Lsn::from_str(lsn).unwrap(); + let mut end_lsn = lsn; + loop { + let result = layer_map.search(key, end_lsn); + match result { + Some(SearchResult { layer, lsn_floor }) => { + let disk_layer = match layer { + ReadableLayerWeak::PersistentLayer(layer) => layer, + ReadableLayerWeak::InMemoryLayer(_) => { + anyhow::bail!("unexpected in-memory layer") + } + }; + + let metadata = index_json + .layer_metadata + .get(&disk_layer.layer_name()) + .unwrap(); + println!( + "{}", + remote_layer_path( + &tenant_id, + &timeline_id, + metadata.shard, + &disk_layer.layer_name(), + metadata.generation + ) + ); + end_lsn = lsn_floor; + } + None => break, + } + } + Ok(()) +} + +#[derive(Debug, Clone, Serialize)] +struct VisibleLayers { + pub total_images: u64, + pub total_image_bytes: u64, + pub total_deltas: u64, + pub total_delta_bytes: u64, + pub layer_metadata: BTreeMap, +} + +impl VisibleLayers { + pub fn new() -> Self { + Self { + layer_metadata: BTreeMap::new(), + total_images: 0, + total_image_bytes: 0, + total_deltas: 0, + total_delta_bytes: 0, + } + } + + pub fn add_layer(&mut self, name: LayerName, layer: LayerFileMetadata) { + match name { + LayerName::Image(_) => { + self.total_images += 1; + self.total_image_bytes += layer.file_size; + } + LayerName::Delta(_) => { + self.total_deltas += 1; + self.total_delta_bytes += layer.file_size; + } + } + self.layer_metadata.insert(name, layer); + } +} + +async fn list_visible_layers(path: &Utf8PathBuf) -> anyhow::Result<()> { + let tenant_id = TenantId::generate(); + let tenant_shard_id = TenantShardId::unsharded(tenant_id); + let timeline_id = TimelineId::generate(); + + let bytes = tokio::fs::read(path).await.context("read file")?; + let index_part = IndexPart::from_json_bytes(&bytes).context("deserialize")?; + let layer_map = create_layer_map_from_index_part(&index_part, tenant_shard_id, timeline_id); + let mut visible_layers = VisibleLayers::new(); + let (layers, _key_space) = layer_map.get_visibility(Vec::new()); + for (layer, visibility) in layers { + if visibility == LayerVisibilityHint::Visible { + visible_layers.add_layer( + layer.layer_name(), + index_part + .layer_metadata + .get(&layer.layer_name()) + .unwrap() + .clone(), + ); + } + } + let output = serde_json::to_string_pretty(&visible_layers).context("serialize output")?; + println!("{output}"); + + Ok(()) } pub(crate) async fn main(cmd: &IndexPartCmd) -> anyhow::Result<()> { @@ -16,5 +186,13 @@ pub(crate) async fn main(cmd: &IndexPartCmd) -> anyhow::Result<()> { println!("{output}"); Ok(()) } + IndexPartCmd::Search { + tenant_id, + timeline_id, + path, + key, + lsn, + } => search_layers(tenant_id, timeline_id, path, key, lsn).await, + IndexPartCmd::ListVisibleLayers { path } => list_visible_layers(path).await, } } diff --git a/pageserver/ctl/src/main.rs b/pageserver/ctl/src/main.rs index 3cd4faaf2e..e84ad2c87f 100644 --- a/pageserver/ctl/src/main.rs +++ b/pageserver/ctl/src/main.rs @@ -4,6 +4,7 @@ //! //! Separate, `metadata` subcommand allows to print and update pageserver's metadata file. +mod download_remote_object; mod draw_timeline_dir; mod index_part; mod key; @@ -16,6 +17,7 @@ use std::time::{Duration, SystemTime}; use camino::{Utf8Path, Utf8PathBuf}; use clap::{Parser, Subcommand}; +use download_remote_object::DownloadRemoteObjectCmd; use index_part::IndexPartCmd; use layers::LayerCmd; use page_trace::PageTraceCmd; @@ -63,6 +65,7 @@ enum Commands { /// Debug print a hex key found from logs Key(key::DescribeKeyCommand), PageTrace(PageTraceCmd), + DownloadRemoteObject(DownloadRemoteObjectCmd), } /// Read and update pageserver metadata file @@ -185,6 +188,9 @@ async fn main() -> anyhow::Result<()> { } Commands::Key(dkc) => dkc.execute(), Commands::PageTrace(cmd) => page_trace::main(&cmd)?, + Commands::DownloadRemoteObject(cmd) => { + download_remote_object::main(&cmd).await?; + } }; Ok(()) } diff --git a/pageserver/page_api/proto/page_service.proto b/pageserver/page_api/proto/page_service.proto index 1d6c230916..d113a04a42 100644 --- a/pageserver/page_api/proto/page_service.proto +++ b/pageserver/page_api/proto/page_service.proto @@ -153,7 +153,7 @@ message GetDbSizeResponse { message GetPageRequest { // A request ID. Will be included in the response. Should be unique for // in-flight requests on the stream. - uint64 request_id = 1; + RequestID request_id = 1; // The request class. GetPageClass request_class = 2; // The LSN to read at. @@ -177,6 +177,14 @@ message GetPageRequest { repeated uint32 block_number = 5; } +// A Request ID. Should be unique for in-flight requests on a stream. Included in the response. +message RequestID { + // The base request ID. + uint64 id = 1; + // The request attempt. Starts at 0, incremented on each retry. + uint32 attempt = 2; +} + // A GetPageRequest class. Primarily intended for observability, but may also be // used for prioritization in the future. enum GetPageClass { @@ -199,13 +207,26 @@ enum GetPageClass { // the entire batch is ready, so no one can make use of the individual pages. message GetPageResponse { // The original request's ID. - uint64 request_id = 1; - // The response status code. + RequestID request_id = 1; + // The response status code. If not OK, the rel and page fields will be empty. GetPageStatusCode status_code = 2; // A string describing the status, if any. string reason = 3; - // The 8KB page images, in the same order as the request. Empty if status_code != OK. - repeated bytes page_image = 4; + // The relation that the pages belong to. + RelTag rel = 4; + // The page(s), in the same order as the request. + repeated Page page = 5; +} + +// A page. +// +// TODO: it would be slightly more efficient (but less convenient) to have separate arrays of block +// numbers and images, but given the 8KB page size it's probably negligible. Benchmark it anyway. +message Page { + // The page number. + uint32 block_number = 1; + // The materialized page image, as an 8KB byte vector. + bytes image = 2; } // A GetPageResponse status code. diff --git a/pageserver/page_api/src/client.rs b/pageserver/page_api/src/client.rs index 18d02c4ed8..f70d0e7b28 100644 --- a/pageserver/page_api/src/client.rs +++ b/pageserver/page_api/src/client.rs @@ -1,4 +1,5 @@ -use anyhow::anyhow; +use anyhow::Context as _; +use futures::future::ready; use futures::{Stream, StreamExt as _, TryStreamExt as _}; use tokio::io::AsyncRead; use tokio_util::io::StreamReader; @@ -34,9 +35,7 @@ impl Client { E: TryInto + Send + Sync + 'static, >::Error: std::error::Error + Send + Sync, { - let endpoint: Endpoint = endpoint - .try_into() - .map_err(|err| anyhow!("invalid endpoint: {err}"))?; + let endpoint: Endpoint = endpoint.try_into().context("invalid endpoint")?; let channel = endpoint.connect().await?; Self::new( channel, @@ -112,7 +111,7 @@ impl Client { ) -> tonic::Result> + Send + 'static> { let reqs = reqs.map(proto::GetPageRequest::from); let resps = self.inner.get_pages(reqs).await?.into_inner(); - Ok(resps.map_ok(GetPageResponse::from)) + Ok(resps.and_then(|resp| ready(GetPageResponse::try_from(resp).map_err(|err| err.into())))) } /// Returns the size of a relation, as # of blocks. diff --git a/pageserver/page_api/src/model.rs b/pageserver/page_api/src/model.rs index af78212b68..7df7de6fc6 100644 --- a/pageserver/page_api/src/model.rs +++ b/pageserver/page_api/src/model.rs @@ -51,7 +51,7 @@ impl From for tonic::Status { } /// The LSN a request should read at. -#[derive(Clone, Copy, Debug)] +#[derive(Clone, Copy, Debug, Default)] pub struct ReadLsn { /// The request's read LSN. pub request_lsn: Lsn, @@ -331,7 +331,7 @@ impl From for proto::GetDbSizeResponse { } /// Requests one or more pages. -#[derive(Clone, Debug)] +#[derive(Clone, Debug, Default)] pub struct GetPageRequest { /// A request ID. Will be included in the response. Should be unique for in-flight requests on /// the stream. @@ -358,7 +358,10 @@ impl TryFrom for GetPageRequest { return Err(ProtocolError::Missing("block_number")); } Ok(Self { - request_id: pb.request_id, + request_id: pb + .request_id + .ok_or(ProtocolError::Missing("request_id"))? + .into(), request_class: pb.request_class.into(), read_lsn: pb .read_lsn @@ -373,7 +376,7 @@ impl TryFrom for GetPageRequest { impl From for proto::GetPageRequest { fn from(request: GetPageRequest) -> Self { Self { - request_id: request.request_id, + request_id: Some(request.request_id.into()), request_class: request.request_class.into(), read_lsn: Some(request.read_lsn.into()), rel: Some(request.rel.into()), @@ -382,16 +385,60 @@ impl From for proto::GetPageRequest { } } -/// A GetPage request ID. -pub type RequestID = u64; +/// A GetPage request ID and retry attempt. Should be unique for in-flight requests on a stream. +#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, Hash, PartialOrd, Ord)] +pub struct RequestID { + /// The base request ID. + pub id: u64, + // The request attempt. Starts at 0, incremented on each retry. + pub attempt: u32, +} + +impl RequestID { + /// Creates a new RequestID with the given ID and an initial attempt of 0. + pub fn new(id: u64) -> Self { + Self { id, attempt: 0 } + } +} + +impl Display for RequestID { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}.{}", self.id, self.attempt) + } +} + +impl From for RequestID { + fn from(pb: proto::RequestId) -> Self { + Self { + id: pb.id, + attempt: pb.attempt, + } + } +} + +impl From for RequestID { + fn from(id: u64) -> Self { + Self::new(id) + } +} + +impl From for proto::RequestId { + fn from(request_id: RequestID) -> Self { + Self { + id: request_id.id, + attempt: request_id.attempt, + } + } +} /// A GetPage request class. -#[derive(Clone, Copy, Debug, strum_macros::Display)] +#[derive(Clone, Copy, Debug, Default, strum_macros::Display)] pub enum GetPageClass { /// Unknown class. For backwards compatibility: used when an older client version sends a class /// that a newer server version has removed. Unknown, /// A normal request. This is the default. + #[default] Normal, /// A prefetch request. NB: can only be classified on pg < 18. Prefetch, @@ -399,19 +446,6 @@ pub enum GetPageClass { Background, } -impl GetPageClass { - /// Returns true if this is considered a bulk request (i.e. more throughput-oriented rather than - /// latency-sensitive). - pub fn is_bulk(&self) -> bool { - match self { - Self::Unknown => false, - Self::Normal => false, - Self::Prefetch => true, - Self::Background => true, - } - } -} - impl From for GetPageClass { fn from(pb: proto::GetPageClass) -> Self { match pb { @@ -458,32 +492,41 @@ impl From for i32 { pub struct GetPageResponse { /// The original request's ID. pub request_id: RequestID, - /// The response status code. + /// The response status code. If not OK, the `rel` and `pages` fields will be empty. pub status_code: GetPageStatusCode, /// A string describing the status, if any. pub reason: Option, - /// The 8KB page images, in the same order as the request. Empty if status != OK. - pub page_images: Vec, + /// The relation that the pages belong to. + pub rel: RelTag, + // The page(s), in the same order as the request. + pub pages: Vec, } -impl From for GetPageResponse { - fn from(pb: proto::GetPageResponse) -> Self { - Self { - request_id: pb.request_id, +impl TryFrom for GetPageResponse { + type Error = ProtocolError; + + fn try_from(pb: proto::GetPageResponse) -> Result { + Ok(Self { + request_id: pb + .request_id + .ok_or(ProtocolError::Missing("request_id"))? + .into(), status_code: pb.status_code.into(), reason: Some(pb.reason).filter(|r| !r.is_empty()), - page_images: pb.page_image, - } + rel: pb.rel.ok_or(ProtocolError::Missing("rel"))?.try_into()?, + pages: pb.page.into_iter().map(Page::from).collect(), + }) } } impl From for proto::GetPageResponse { fn from(response: GetPageResponse) -> Self { Self { - request_id: response.request_id, + request_id: Some(response.request_id.into()), status_code: response.status_code.into(), reason: response.reason.unwrap_or_default(), - page_image: response.page_images, + rel: Some(response.rel.into()), + page: response.pages.into_iter().map(proto::Page::from).collect(), } } } @@ -516,11 +559,39 @@ impl GetPageResponse { request_id, status_code, reason: Some(status.message().to_string()), - page_images: Vec::new(), + rel: RelTag::default(), + pages: Vec::new(), }) } } +// A page. +#[derive(Clone, Debug)] +pub struct Page { + /// The page number. + pub block_number: u32, + /// The materialized page image, as an 8KB byte vector. + pub image: Bytes, +} + +impl From for Page { + fn from(pb: proto::Page) -> Self { + Self { + block_number: pb.block_number, + image: pb.image, + } + } +} + +impl From for proto::Page { + fn from(page: Page) -> Self { + Self { + block_number: page.block_number, + image: page.image, + } + } +} + /// A GetPage response status code. /// /// These are effectively equivalent to gRPC statuses. However, we use a bidirectional stream diff --git a/pageserver/pagebench/Cargo.toml b/pageserver/pagebench/Cargo.toml index 117ef0167e..50835abdc6 100644 --- a/pageserver/pagebench/Cargo.toml +++ b/pageserver/pagebench/Cargo.toml @@ -16,6 +16,7 @@ futures.workspace = true hdrhistogram.workspace = true humantime.workspace = true humantime-serde.workspace = true +pprof.workspace = true rand.workspace = true reqwest.workspace = true serde.workspace = true @@ -30,9 +31,9 @@ metrics.workspace = true tonic.workspace = true url.workspace = true +pageserver_api.workspace = true pageserver_client.workspace = true pageserver_client_grpc.workspace = true -pageserver_api.workspace = true pageserver_page_api.workspace = true utils = { path = "../../libs/utils/" } workspace_hack = { version = "0.1", path = "../../workspace_hack" } diff --git a/pageserver/pagebench/src/cmd/getpage_latest_lsn.rs b/pageserver/pagebench/src/cmd/getpage_latest_lsn.rs index 01c6bea2e5..f458f4efe4 100644 --- a/pageserver/pagebench/src/cmd/getpage_latest_lsn.rs +++ b/pageserver/pagebench/src/cmd/getpage_latest_lsn.rs @@ -10,12 +10,14 @@ use anyhow::Context; use async_trait::async_trait; use bytes::Bytes; use camino::Utf8PathBuf; +use futures::stream::FuturesUnordered; use futures::{Stream, StreamExt as _}; use pageserver_api::key::Key; use pageserver_api::keyspace::KeySpaceAccum; use pageserver_api::pagestream_api::{PagestreamGetPageRequest, PagestreamRequest}; use pageserver_api::reltag::RelTag; use pageserver_api::shard::TenantShardId; +use pageserver_client_grpc::{self as client_grpc, ShardSpec}; use pageserver_page_api as page_api; use rand::prelude::*; use tokio::task::JoinSet; @@ -41,6 +43,10 @@ pub(crate) struct Args { /// Pageserver connection string. Supports postgresql:// and grpc:// protocols. #[clap(long, default_value = "postgres://postgres@localhost:64000")] page_service_connstring: String, + /// Use the rich gRPC Pageserver client `client_grpc::PageserverClient`, rather than the basic + /// no-frills `page_api::Client`. Only valid with grpc:// connstrings. + #[clap(long)] + rich_client: bool, #[clap(long)] pageserver_jwt: Option, #[clap(long, default_value = "1")] @@ -360,6 +366,7 @@ async fn main_impl( let client: Box = match scheme.as_str() { "postgresql" | "postgres" => { assert!(!args.compression, "libpq does not support compression"); + assert!(!args.rich_client, "rich client requires grpc://"); Box::new( LibpqClient::new(&args.page_service_connstring, worker_id.timeline) .await @@ -367,6 +374,16 @@ async fn main_impl( ) } + "grpc" if args.rich_client => Box::new( + RichGrpcClient::new( + &args.page_service_connstring, + worker_id.timeline, + args.compression, + ) + .await + .unwrap(), + ), + "grpc" => Box::new( GrpcClient::new( &args.page_service_connstring, @@ -685,7 +702,7 @@ impl Client for GrpcClient { blks: Vec, ) -> anyhow::Result<()> { let req = page_api::GetPageRequest { - request_id: req_id, + request_id: req_id.into(), request_class: page_api::GetPageClass::Normal, read_lsn: page_api::ReadLsn { request_lsn: req_lsn, @@ -705,6 +722,79 @@ impl Client for GrpcClient { "unexpected status code: {}", resp.status_code, ); - Ok((resp.request_id, resp.page_images)) + Ok(( + resp.request_id.id, + resp.pages.into_iter().map(|p| p.image).collect(), + )) + } +} + +/// A rich gRPC Pageserver client. +struct RichGrpcClient { + inner: Arc, + requests: FuturesUnordered< + Pin> + Send>>, + >, +} + +impl RichGrpcClient { + async fn new( + connstring: &str, + ttid: TenantTimelineId, + compression: bool, + ) -> anyhow::Result { + let inner = Arc::new(client_grpc::PageserverClient::new( + ttid.tenant_id, + ttid.timeline_id, + ShardSpec::new( + [(ShardIndex::unsharded(), connstring.to_string())].into(), + None, + )?, + None, + compression.then_some(tonic::codec::CompressionEncoding::Zstd), + )?); + Ok(Self { + inner, + requests: FuturesUnordered::new(), + }) + } +} + +#[async_trait] +impl Client for RichGrpcClient { + async fn send_get_page( + &mut self, + req_id: u64, + req_lsn: Lsn, + mod_lsn: Lsn, + rel: RelTag, + blks: Vec, + ) -> anyhow::Result<()> { + let req = page_api::GetPageRequest { + request_id: req_id.into(), + request_class: page_api::GetPageClass::Normal, + read_lsn: page_api::ReadLsn { + request_lsn: req_lsn, + not_modified_since_lsn: Some(mod_lsn), + }, + rel, + block_numbers: blks, + }; + let inner = self.inner.clone(); + self.requests.push(Box::pin(async move { + inner + .get_page(req) + .await + .map_err(|err| anyhow::anyhow!("{err}")) + })); + Ok(()) + } + + async fn recv_get_page(&mut self) -> anyhow::Result<(u64, Vec)> { + let resp = self.requests.next().await.unwrap()?; + Ok(( + resp.request_id.id, + resp.pages.into_iter().map(|p| p.image).collect(), + )) } } diff --git a/pageserver/pagebench/src/cmd/idle_streams.rs b/pageserver/pagebench/src/cmd/idle_streams.rs new file mode 100644 index 0000000000..73bc9f3f46 --- /dev/null +++ b/pageserver/pagebench/src/cmd/idle_streams.rs @@ -0,0 +1,127 @@ +use std::sync::Arc; + +use anyhow::anyhow; +use futures::StreamExt; +use tonic::transport::Endpoint; +use tracing::info; + +use pageserver_page_api::{GetPageClass, GetPageRequest, GetPageStatusCode, ReadLsn, RelTag}; +use utils::id::TenantTimelineId; +use utils::lsn::Lsn; +use utils::shard::ShardIndex; + +/// Starts a large number of idle gRPC GetPage streams. +#[derive(clap::Parser)] +pub(crate) struct Args { + /// The Pageserver to connect to. Must use grpc://. + #[clap(long, default_value = "grpc://localhost:51051")] + server: String, + /// The Pageserver HTTP API. + #[clap(long, default_value = "http://localhost:9898")] + http_server: String, + /// The number of streams to open. + #[clap(long, default_value = "100000")] + count: usize, + /// Number of streams per connection. + #[clap(long, default_value = "100")] + per_connection: usize, + /// Send a single GetPage request on each stream. + #[clap(long, default_value_t = false)] + send_request: bool, +} + +pub(crate) fn main(args: Args) -> anyhow::Result<()> { + let rt = tokio::runtime::Builder::new_multi_thread() + .enable_all() + .build()?; + + rt.block_on(main_impl(args)) +} + +async fn main_impl(args: Args) -> anyhow::Result<()> { + // Discover a tenant and timeline to use. + let mgmt_api_client = Arc::new(pageserver_client::mgmt_api::Client::new( + reqwest::Client::new(), + args.http_server.clone(), + None, + )); + let timelines: Vec = crate::util::cli::targets::discover( + &mgmt_api_client, + crate::util::cli::targets::Spec { + limit_to_first_n_targets: Some(1), + targets: None, + }, + ) + .await?; + let ttid = timelines + .first() + .ok_or_else(|| anyhow!("no timelines found"))?; + + // Set up the initial client. + let endpoint = Endpoint::from_shared(args.server.clone())?; + + let connect = async || { + pageserver_page_api::Client::new( + endpoint.connect().await?, + ttid.tenant_id, + ttid.timeline_id, + ShardIndex::unsharded(), + None, + None, + ) + }; + + let mut client = connect().await?; + let mut streams = Vec::with_capacity(args.count); + + // Create streams. + for i in 0..args.count { + if i % 100 == 0 { + info!("opened {}/{} streams", i, args.count); + } + if i % args.per_connection == 0 && i > 0 { + client = connect().await?; + } + + let (req_tx, req_rx) = tokio::sync::mpsc::unbounded_channel(); + let req_stream = tokio_stream::wrappers::UnboundedReceiverStream::new(req_rx); + let mut resp_stream = client.get_pages(req_stream).await?; + + // Send request if specified. + if args.send_request { + req_tx.send(GetPageRequest { + request_id: 1.into(), + request_class: GetPageClass::Normal, + read_lsn: ReadLsn { + request_lsn: Lsn::MAX, + not_modified_since_lsn: Some(Lsn(1)), + }, + rel: RelTag { + spcnode: 1664, // pg_global + dbnode: 0, // shared database + relnode: 1262, // pg_authid + forknum: 0, // init + }, + block_numbers: vec![0], + })?; + let resp = resp_stream + .next() + .await + .transpose()? + .ok_or_else(|| anyhow!("no response"))?; + if resp.status_code != GetPageStatusCode::Ok { + return Err(anyhow!("{} response", resp.status_code)); + } + } + + // Hold onto streams to avoid closing them. + streams.push((req_tx, resp_stream)); + } + + info!("opened {} streams, sleeping", args.count); + + // Block forever, to hold the idle streams open for inspection. + futures::future::pending::<()>().await; + + Ok(()) +} diff --git a/pageserver/pagebench/src/main.rs b/pageserver/pagebench/src/main.rs index 5527557450..ceca58e032 100644 --- a/pageserver/pagebench/src/main.rs +++ b/pageserver/pagebench/src/main.rs @@ -1,4 +1,7 @@ +use std::fs::File; + use clap::Parser; +use tracing::info; use utils::logging; /// Re-usable pieces of code that aren't CLI-specific. @@ -17,38 +20,73 @@ mod cmd { pub(super) mod aux_files; pub(super) mod basebackup; pub(super) mod getpage_latest_lsn; + pub(super) mod idle_streams; pub(super) mod ondemand_download_churn; pub(super) mod trigger_initial_size_calculation; } /// Component-level performance test for pageserver. #[derive(clap::Parser)] -enum Args { +struct Args { + /// Takes a client CPU profile into profile.svg. The benchmark must exit cleanly before it's + /// written, e.g. via --runtime. + #[arg(long)] + profile: bool, + + #[command(subcommand)] + subcommand: Subcommand, +} + +#[derive(clap::Subcommand)] +enum Subcommand { Basebackup(cmd::basebackup::Args), GetPageLatestLsn(cmd::getpage_latest_lsn::Args), TriggerInitialSizeCalculation(cmd::trigger_initial_size_calculation::Args), OndemandDownloadChurn(cmd::ondemand_download_churn::Args), AuxFiles(cmd::aux_files::Args), + IdleStreams(cmd::idle_streams::Args), } -fn main() { +fn main() -> anyhow::Result<()> { logging::init( logging::LogFormat::Plain, logging::TracingErrorLayerEnablement::Disabled, logging::Output::Stderr, - ) - .unwrap(); + )?; logging::replace_panic_hook_with_tracing_panic_hook().forget(); let args = Args::parse(); - match args { - Args::Basebackup(args) => cmd::basebackup::main(args), - Args::GetPageLatestLsn(args) => cmd::getpage_latest_lsn::main(args), - Args::TriggerInitialSizeCalculation(args) => { + + // Start a CPU profile if requested. + let mut profiler = None; + if args.profile { + profiler = Some( + pprof::ProfilerGuardBuilder::default() + .frequency(1000) + .blocklist(&["libc", "libgcc", "pthread", "vdso"]) + .build()?, + ); + } + + match args.subcommand { + Subcommand::Basebackup(args) => cmd::basebackup::main(args), + Subcommand::GetPageLatestLsn(args) => cmd::getpage_latest_lsn::main(args), + Subcommand::TriggerInitialSizeCalculation(args) => { cmd::trigger_initial_size_calculation::main(args) } - Args::OndemandDownloadChurn(args) => cmd::ondemand_download_churn::main(args), - Args::AuxFiles(args) => cmd::aux_files::main(args), + Subcommand::OndemandDownloadChurn(args) => cmd::ondemand_download_churn::main(args), + Subcommand::AuxFiles(args) => cmd::aux_files::main(args), + Subcommand::IdleStreams(args) => cmd::idle_streams::main(args), + }?; + + // Generate a CPU flamegraph if requested. + if let Some(profiler) = profiler { + let report = profiler.report().build()?; + drop(profiler); // stop profiling + let file = File::create("profile.svg")?; + report.flamegraph(file)?; + info!("wrote CPU profile flamegraph to profile.svg") } - .unwrap() + + Ok(()) } diff --git a/pageserver/src/auth.rs b/pageserver/src/auth.rs index 4075427ab4..9e97fdaba8 100644 --- a/pageserver/src/auth.rs +++ b/pageserver/src/auth.rs @@ -20,7 +20,8 @@ pub fn check_permission(claims: &Claims, tenant_id: Option) -> Result< | Scope::GenerationsApi | Scope::Infra | Scope::Scrubber - | Scope::ControllerPeer, + | Scope::ControllerPeer + | Scope::TenantEndpoint, _, ) => Err(AuthError( format!( diff --git a/pageserver/src/basebackup.rs b/pageserver/src/basebackup.rs index 36dada1e89..1a44c80e2d 100644 --- a/pageserver/src/basebackup.rs +++ b/pageserver/src/basebackup.rs @@ -114,7 +114,7 @@ where // Compute postgres doesn't have any previous WAL files, but the first // record that it's going to write needs to include the LSN of the // previous record (xl_prev). We include prev_record_lsn in the - // "zenith.signal" file, so that postgres can read it during startup. + // "neon.signal" file, so that postgres can read it during startup. // // We don't keep full history of record boundaries in the page server, // however, only the predecessor of the latest record on each @@ -751,34 +751,39 @@ where // // Add generated pg_control file and bootstrap WAL segment. - // Also send zenith.signal file with extra bootstrap data. + // Also send neon.signal and zenith.signal file with extra bootstrap data. // async fn add_pgcontrol_file( &mut self, pg_control_bytes: Bytes, system_identifier: u64, ) -> Result<(), BasebackupError> { - // add zenith.signal file - let mut zenith_signal = String::new(); + // add neon.signal file + let mut neon_signal = String::new(); if self.prev_record_lsn == Lsn(0) { if self.timeline.is_ancestor_lsn(self.lsn) { - write!(zenith_signal, "PREV LSN: none") + write!(neon_signal, "PREV LSN: none") .map_err(|e| BasebackupError::Server(e.into()))?; } else { - write!(zenith_signal, "PREV LSN: invalid") + write!(neon_signal, "PREV LSN: invalid") .map_err(|e| BasebackupError::Server(e.into()))?; } } else { - write!(zenith_signal, "PREV LSN: {}", self.prev_record_lsn) + write!(neon_signal, "PREV LSN: {}", self.prev_record_lsn) .map_err(|e| BasebackupError::Server(e.into()))?; } - self.ar - .append( - &new_tar_header("zenith.signal", zenith_signal.len() as u64)?, - zenith_signal.as_bytes(), - ) - .await - .map_err(|e| BasebackupError::Client(e, "add_pgcontrol_file,zenith.signal"))?; + + // TODO: Remove zenith.signal once all historical computes have been replaced + // ... and thus support the neon.signal file. + for signalfilename in ["neon.signal", "zenith.signal"] { + self.ar + .append( + &new_tar_header(signalfilename, neon_signal.len() as u64)?, + neon_signal.as_bytes(), + ) + .await + .map_err(|e| BasebackupError::Client(e, "add_pgcontrol_file,neon.signal"))?; + } //send pg_control let header = new_tar_header("global/pg_control", pg_control_bytes.len() as u64)?; diff --git a/pageserver/src/bin/pageserver.rs b/pageserver/src/bin/pageserver.rs index 327384fd82..dfb8b437c3 100644 --- a/pageserver/src/bin/pageserver.rs +++ b/pageserver/src/bin/pageserver.rs @@ -29,8 +29,8 @@ use pageserver::task_mgr::{ }; use pageserver::tenant::{TenantSharedResources, mgr, secondary}; use pageserver::{ - CancellableTask, ConsumptionMetricsTasks, HttpEndpointListener, HttpsEndpointListener, http, - page_cache, page_service, task_mgr, virtual_file, + CancellableTask, ConsumptionMetricsTasks, HttpEndpointListener, HttpsEndpointListener, + MetricsCollectionTask, http, page_cache, page_service, task_mgr, virtual_file, }; use postgres_backend::AuthType; use remote_storage::GenericRemoteStorage; @@ -41,6 +41,7 @@ use tracing_utils::OtelGuard; use utils::auth::{JwtAuth, SwappableJwtAuth}; use utils::crashsafe::syncfs; use utils::logging::TracingErrorLayerEnablement; +use utils::metrics_collector::{METRICS_COLLECTION_INTERVAL, METRICS_COLLECTOR}; use utils::sentry_init::init_sentry; use utils::{failpoint_support, logging, project_build_tag, project_git_version, tcp_listener}; @@ -763,6 +764,41 @@ fn start_pageserver( (http_task, https_task) }; + /* BEGIN_HADRON */ + let metrics_collection_task = { + let cancel = shutdown_pageserver.child_token(); + let task = crate::BACKGROUND_RUNTIME.spawn({ + let cancel = cancel.clone(); + let background_jobs_barrier = background_jobs_barrier.clone(); + async move { + if conf.force_metric_collection_on_scrape { + return; + } + + // first wait until background jobs are cleared to launch. + tokio::select! { + _ = cancel.cancelled() => { return; }, + _ = background_jobs_barrier.wait() => {} + }; + let mut interval = tokio::time::interval(METRICS_COLLECTION_INTERVAL); + loop { + tokio::select! { + _ = cancel.cancelled() => { + tracing::info!("cancelled metrics collection task, exiting..."); + break; + }, + _ = interval.tick() => {} + } + tokio::task::spawn_blocking(|| { + METRICS_COLLECTOR.run_once(true); + }); + } + } + }); + MetricsCollectionTask(CancellableTask { task, cancel }) + }; + /* END_HADRON */ + let consumption_metrics_tasks = { let cancel = shutdown_pageserver.child_token(); let task = crate::BACKGROUND_RUNTIME.spawn({ @@ -844,6 +880,7 @@ fn start_pageserver( https_endpoint_listener, page_service, page_service_grpc, + metrics_collection_task, consumption_metrics_tasks, disk_usage_eviction_task, &tenant_manager, @@ -880,17 +917,15 @@ async fn create_remote_storage_client( // If `test_remote_failures` is non-zero, wrap the client with a // wrapper that simulates failures. if conf.test_remote_failures > 0 { - if !cfg!(feature = "testing") { - anyhow::bail!( - "test_remote_failures option is not available because pageserver was compiled without the 'testing' feature" - ); - } info!( "Simulating remote failures for first {} attempts of each op", conf.test_remote_failures ); - remote_storage = - GenericRemoteStorage::unreliable_wrapper(remote_storage, conf.test_remote_failures); + remote_storage = GenericRemoteStorage::unreliable_wrapper( + remote_storage, + conf.test_remote_failures, + conf.test_remote_failures_probability, + ); } Ok(remote_storage) diff --git a/pageserver/src/config.rs b/pageserver/src/config.rs index 75b41b9b60..bb73ae1dd5 100644 --- a/pageserver/src/config.rs +++ b/pageserver/src/config.rs @@ -28,7 +28,6 @@ use reqwest::Url; use storage_broker::Uri; use utils::id::{NodeId, TimelineId}; use utils::logging::{LogFormat, SecretString}; -use utils::serde_percent::Percent; use crate::tenant::storage_layer::inmemory_layer::IndexEntry; use crate::tenant::{TENANTS_SEGMENT_NAME, TIMELINES_SEGMENT_NAME}; @@ -146,9 +145,13 @@ pub struct PageServerConf { pub metric_collection_bucket: Option, pub synthetic_size_calculation_interval: Duration, - pub disk_usage_based_eviction: Option, + pub disk_usage_based_eviction: DiskUsageEvictionTaskConfig, + // The number of allowed failures in remote storage operations. pub test_remote_failures: u64, + // The probability of failure in remote storage operations. Only works when test_remote_failures > 1. + // Use 100 for 100% failure, 0 for no failure. + pub test_remote_failures_probability: u64, pub ondemand_download_behavior_treat_error_as_warn: bool, @@ -249,6 +252,14 @@ pub struct PageServerConf { pub timeline_import_config: pageserver_api::config::TimelineImportConfig, pub basebackup_cache_config: Option, + + /// Defines what is a big tenant for the purpose of image layer generation. + /// See Timeline::should_check_if_image_layers_required + pub image_layer_generation_large_timeline_threshold: Option, + + /// Controls whether to collect all metrics on each scrape or to return potentially stale + /// results. + pub force_metric_collection_on_scrape: bool, } /// Token for authentication to safekeepers @@ -393,6 +404,7 @@ impl PageServerConf { synthetic_size_calculation_interval, disk_usage_based_eviction, test_remote_failures, + test_remote_failures_probability, ondemand_download_behavior_treat_error_as_warn, background_task_maximum_delay, control_plane_api, @@ -428,6 +440,8 @@ impl PageServerConf { posthog_config, timeline_import_config, basebackup_cache_config, + image_layer_generation_large_timeline_threshold, + force_metric_collection_on_scrape, } = config_toml; let mut conf = PageServerConf { @@ -460,17 +474,9 @@ impl PageServerConf { metric_collection_endpoint, metric_collection_bucket, synthetic_size_calculation_interval, - disk_usage_based_eviction: Some(disk_usage_based_eviction.unwrap_or( - DiskUsageEvictionTaskConfig { - max_usage_pct: Percent::new(80).unwrap(), - min_avail_bytes: 2_000_000_000, - period: Duration::from_secs(60), - #[cfg(feature = "testing")] - mock_statvfs: None, - eviction_order: Default::default(), - }, - )), + disk_usage_based_eviction, test_remote_failures, + test_remote_failures_probability, ondemand_download_behavior_treat_error_as_warn, background_task_maximum_delay, control_plane_api: control_plane_api @@ -494,6 +500,8 @@ impl PageServerConf { dev_mode, timeline_import_config, basebackup_cache_config, + image_layer_generation_large_timeline_threshold, + force_metric_collection_on_scrape, // ------------------------------------------------------------ // fields that require additional validation or custom handling @@ -635,7 +643,7 @@ impl PageServerConf { pub fn dummy_conf(repo_dir: Utf8PathBuf) -> Self { let pg_distrib_dir = Utf8PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("../pg_install"); - let config_toml = pageserver_api::config::ConfigToml { + let mut config_toml = pageserver_api::config::ConfigToml { wait_lsn_timeout: Duration::from_secs(60), wal_redo_timeout: Duration::from_secs(60), pg_distrib_dir: Some(pg_distrib_dir), @@ -647,6 +655,15 @@ impl PageServerConf { control_plane_api: Some(Url::parse("http://localhost:6666").unwrap()), ..Default::default() }; + + // Test authors tend to forget about the default 10min initial lease deadline + // when writing tests, which turns their immediate gc requests via mgmt API + // into no-ops. Override the binary default here, such that there is no initial + // lease deadline by default in tests. Tests that care can always override it + // themselves. + // Cf https://databricks.atlassian.net/browse/LKB-92?focusedCommentId=6722329 + config_toml.tenant_config.lsn_lease_length = Duration::from_secs(0); + PageServerConf::parse_and_validate(NodeId(0), config_toml, &repo_dir).unwrap() } } @@ -710,8 +727,9 @@ mod tests { use std::time::Duration; use camino::Utf8PathBuf; + use pageserver_api::config::{DiskUsageEvictionTaskConfig, EvictionOrder}; use rstest::rstest; - use utils::id::NodeId; + use utils::{id::NodeId, serde_percent::Percent}; use super::PageServerConf; @@ -811,19 +829,69 @@ mod tests { .expect("parse_and_validate"); } - #[test] - fn test_config_disk_usage_based_eviction_is_valid() { - let input = r#" + #[rstest] + #[ + case::omit_the_whole_config( + DiskUsageEvictionTaskConfig { + max_usage_pct: Percent::new(80).unwrap(), + min_avail_bytes: 2_000_000_000, + period: Duration::from_secs(60), + eviction_order: Default::default(), + #[cfg(feature = "testing")] + mock_statvfs: None, + enabled: true, + }, + r#" control_plane_api = "http://localhost:6666" - "#; + "#, + )] + #[ + case::omit_enabled_field( + DiskUsageEvictionTaskConfig { + max_usage_pct: Percent::new(80).unwrap(), + min_avail_bytes: 1_000_000_000, + period: Duration::from_secs(60), + eviction_order: EvictionOrder::RelativeAccessed { + highest_layer_count_loses_first: true, + }, + #[cfg(feature = "testing")] + mock_statvfs: None, + enabled: true, + }, + r#" + control_plane_api = "http://localhost:6666" + disk_usage_based_eviction = { max_usage_pct = 80, min_avail_bytes = 1000000000, period = "60s" } + "#, + )] + #[case::disabled( + DiskUsageEvictionTaskConfig { + max_usage_pct: Percent::new(80).unwrap(), + min_avail_bytes: 2_000_000_000, + period: Duration::from_secs(60), + eviction_order: EvictionOrder::RelativeAccessed { + highest_layer_count_loses_first: true, + }, + #[cfg(feature = "testing")] + mock_statvfs: None, + enabled: false, + }, + r#" + control_plane_api = "http://localhost:6666" + disk_usage_based_eviction = { enabled = false } + "# + )] + fn test_config_disk_usage_based_eviction_is_valid( + #[case] expected_disk_usage_based_eviction: DiskUsageEvictionTaskConfig, + #[case] input: &str, + ) { let config_toml = toml_edit::de::from_str::(input) .expect("disk_usage_based_eviction is valid"); let workdir = Utf8PathBuf::from("/nonexistent"); let config = PageServerConf::parse_and_validate(NodeId(0), config_toml, &workdir).unwrap(); - let disk_usage_based_eviction = config.disk_usage_based_eviction.unwrap(); - assert_eq!(disk_usage_based_eviction.max_usage_pct.get(), 80); - assert_eq!(disk_usage_based_eviction.min_avail_bytes, 2_000_000_000); - assert_eq!(disk_usage_based_eviction.period, Duration::from_secs(60)); - assert_eq!(disk_usage_based_eviction.eviction_order, Default::default()); + let disk_usage_based_eviction = config.disk_usage_based_eviction; + assert_eq!( + expected_disk_usage_based_eviction, + disk_usage_based_eviction + ); } } diff --git a/pageserver/src/controller_upcall_client.rs b/pageserver/src/controller_upcall_client.rs index f1f9aaf43c..be1de43d18 100644 --- a/pageserver/src/controller_upcall_client.rs +++ b/pageserver/src/controller_upcall_client.rs @@ -194,6 +194,7 @@ impl StorageControllerUpcallApi for StorageControllerUpcallClient { listen_http_port: m.http_port, listen_https_port: m.https_port, availability_zone_id: az_id.expect("Checked above"), + node_ip_addr: None, }) } Err(e) => { diff --git a/pageserver/src/deletion_queue/validator.rs b/pageserver/src/deletion_queue/validator.rs index 363b1427f5..c9bfbd8adc 100644 --- a/pageserver/src/deletion_queue/validator.rs +++ b/pageserver/src/deletion_queue/validator.rs @@ -1,5 +1,5 @@ //! The validator is responsible for validating DeletionLists for execution, -//! based on whethe the generation in the DeletionList is still the latest +//! based on whether the generation in the DeletionList is still the latest //! generation for a tenant. //! //! The purpose of validation is to ensure split-brain safety in the cluster diff --git a/pageserver/src/disk_usage_eviction_task.rs b/pageserver/src/disk_usage_eviction_task.rs index f13b3709f5..f1d34664a8 100644 --- a/pageserver/src/disk_usage_eviction_task.rs +++ b/pageserver/src/disk_usage_eviction_task.rs @@ -171,7 +171,8 @@ pub fn launch_disk_usage_global_eviction_task( tenant_manager: Arc, background_jobs_barrier: completion::Barrier, ) -> Option { - let Some(task_config) = &conf.disk_usage_based_eviction else { + let task_config = &conf.disk_usage_based_eviction; + if !task_config.enabled { info!("disk usage based eviction task not configured"); return None; }; @@ -458,6 +459,9 @@ pub(crate) async fn disk_usage_eviction_task_iteration_impl( match next { Ok(Ok(file_size)) => { METRICS.layers_evicted.inc(); + /*BEGIN_HADRON */ + METRICS.bytes_evicted.inc_by(file_size); + /*END_HADRON */ usage_assumed.add_available_bytes(file_size); } Ok(Err(( @@ -1265,6 +1269,7 @@ mod filesystem_level_usage { #[cfg(feature = "testing")] mock_statvfs: None, eviction_order: pageserver_api::config::EvictionOrder::default(), + enabled: true, }, total_bytes: 100_000, avail_bytes: 0, diff --git a/pageserver/src/feature_resolver.rs b/pageserver/src/feature_resolver.rs index 6ce4522080..f0178fd9b3 100644 --- a/pageserver/src/feature_resolver.rs +++ b/pageserver/src/feature_resolver.rs @@ -1,4 +1,8 @@ -use std::{collections::HashMap, sync::Arc, time::Duration}; +use std::{ + collections::HashMap, + sync::{Arc, atomic::AtomicBool}, + time::Duration, +}; use arc_swap::ArcSwap; use pageserver_api::config::NodeMetadata; @@ -355,11 +359,17 @@ impl PerTenantProperties { } } -#[derive(Clone)] pub struct TenantFeatureResolver { inner: FeatureResolver, tenant_id: TenantId, - cached_tenant_properties: Arc>>, + cached_tenant_properties: ArcSwap>, + + // Add feature flag on the critical path below. + // + // If a feature flag will be used on the critical path, we will update it in the tenant housekeeping loop insetad of + // resolving directly by calling `evaluate_multivariate` or `evaluate_boolean`. Remember to update the flag in the + // housekeeping loop. The user should directly read this atomic flag instead of using the set of evaluate functions. + pub feature_test_remote_size_flag: AtomicBool, } impl TenantFeatureResolver { @@ -367,7 +377,8 @@ impl TenantFeatureResolver { Self { inner, tenant_id, - cached_tenant_properties: Arc::new(ArcSwap::new(Arc::new(HashMap::new()))), + cached_tenant_properties: ArcSwap::new(Arc::new(HashMap::new())), + feature_test_remote_size_flag: AtomicBool::new(false), } } @@ -396,12 +407,14 @@ impl TenantFeatureResolver { self.inner.is_feature_flag_boolean(flag_key) } - pub fn update_cached_tenant_properties(&self, tenant_shard: &TenantShard) { - let mut remote_size_mb = None; + /// Refresh the cached properties and flags on the critical path. + pub fn refresh_properties_and_flags(&self, tenant_shard: &TenantShard) { + let mut remote_size_mb = Some(0.0); for timeline in tenant_shard.list_timelines() { let size = timeline.metrics.resident_physical_size_get(); if size == 0 { remote_size_mb = None; + break; } if let Some(ref mut remote_size_mb) = remote_size_mb { *remote_size_mb += size as f64 / 1024.0 / 1024.0; @@ -410,5 +423,12 @@ impl TenantFeatureResolver { self.cached_tenant_properties.store(Arc::new( PerTenantProperties { remote_size_mb }.into_posthog_properties(), )); + + // BEGIN: Update the feature flag on the critical path. + self.feature_test_remote_size_flag.store( + self.evaluate_boolean("test-remote-size-flag").is_ok(), + std::sync::atomic::Ordering::Relaxed, + ); + // END: Update the feature flag on the critical path. } } diff --git a/pageserver/src/http/openapi_spec.yml b/pageserver/src/http/openapi_spec.yml index e8d1367d6c..3ffc80f19a 100644 --- a/pageserver/src/http/openapi_spec.yml +++ b/pageserver/src/http/openapi_spec.yml @@ -116,26 +116,6 @@ paths: schema: type: string - /v1/tenant/{tenant_id}/timeline: - parameters: - - name: tenant_id - in: path - required: true - schema: - type: string - get: - description: Get timelines for tenant - responses: - "200": - description: TimelineInfo - content: - application/json: - schema: - type: array - items: - $ref: "#/components/schemas/TimelineInfo" - - /v1/tenant/{tenant_id}/timeline/{timeline_id}: parameters: - name: tenant_id @@ -618,7 +598,7 @@ paths: schema: $ref: "#/components/schemas/SecondaryProgress" - /v1/tenant/{tenant_id}/timeline/: + /v1/tenant/{tenant_id}/timeline: parameters: - name: tenant_id in: path @@ -685,6 +665,17 @@ paths: application/json: schema: $ref: "#/components/schemas/Error" + get: + description: Get timelines for tenant + responses: + "200": + description: TimelineInfo + content: + application/json: + schema: + type: array + items: + $ref: "#/components/schemas/TimelineInfo" /v1/tenant/{tenant_shard_id}/timeline/{timeline_id}/detach_ancestor: parameters: @@ -767,7 +758,7 @@ paths: $ref: "#/components/schemas/ServiceUnavailableError" - /v1/tenant/: + /v1/tenant: get: description: Get tenants list responses: @@ -847,7 +838,7 @@ paths: items: $ref: "#/components/schemas/TenantInfo" - /v1/tenant/{tenant_id}/config/: + /v1/tenant/{tenant_id}/config: parameters: - name: tenant_id in: path diff --git a/pageserver/src/http/routes.rs b/pageserver/src/http/routes.rs index 02094e6aa9..3a08244d71 100644 --- a/pageserver/src/http/routes.rs +++ b/pageserver/src/http/routes.rs @@ -2,12 +2,15 @@ //! Management HTTP API //! use std::cmp::Reverse; -use std::collections::{BinaryHeap, HashMap}; +use std::collections::BTreeMap; +use std::collections::BinaryHeap; +use std::collections::HashMap; use std::str::FromStr; use std::sync::Arc; use std::time::Duration; use anyhow::{Context, Result, anyhow}; +use bytes::Bytes; use enumset::EnumSet; use futures::future::join_all; use futures::{StreamExt, TryFutureExt}; @@ -44,6 +47,7 @@ use pageserver_api::shard::{ShardCount, TenantShardId}; use postgres_ffi::PgMajorVersion; use remote_storage::{DownloadError, GenericRemoteStorage, TimeTravelError}; use scopeguard::defer; +use serde::{Deserialize, Serialize}; use serde_json::json; use tenant_size_model::svg::SvgBranchKind; use tenant_size_model::{SizeResult, StorageModel}; @@ -55,12 +59,14 @@ use utils::auth::SwappableJwtAuth; use utils::generation::Generation; use utils::id::{TenantId, TimelineId}; use utils::lsn::Lsn; +use wal_decoder::models::record::NeonWalRecord; use crate::config::PageServerConf; use crate::context; use crate::context::{DownloadBehavior, RequestContext, RequestContextBuilder}; use crate::deletion_queue::DeletionQueueClient; use crate::feature_resolver::FeatureResolver; +use crate::metrics::LOCAL_DATA_LOSS_SUSPECTED; use crate::pgdatadir_mapping::LsnForTimestamp; use crate::task_mgr::TaskKind; use crate::tenant::config::LocationConf; @@ -74,12 +80,13 @@ use crate::tenant::remote_timeline_client::{ }; use crate::tenant::secondary::SecondaryController; use crate::tenant::size::ModelInputs; +use crate::tenant::storage_layer::ValuesReconstructState; use crate::tenant::storage_layer::{IoConcurrency, LayerAccessStatsReset, LayerName}; use crate::tenant::timeline::layer_manager::LayerManagerLockHolder; use crate::tenant::timeline::offload::{OffloadError, offload_timeline}; use crate::tenant::timeline::{ - CompactFlags, CompactOptions, CompactRequest, CompactionError, MarkInvisibleRequest, Timeline, - WaitLsnTimeout, WaitLsnWaiter, import_pgdata, + CompactFlags, CompactOptions, CompactRequest, MarkInvisibleRequest, Timeline, WaitLsnTimeout, + WaitLsnWaiter, import_pgdata, }; use crate::tenant::{ GetTimelineError, LogicalSizeCalculationCause, OffloadedTimeline, PageReconstructError, @@ -394,6 +401,7 @@ async fn build_timeline_info( timeline: &Arc, include_non_incremental_logical_size: bool, force_await_initial_logical_size: bool, + include_image_consistent_lsn: bool, ctx: &RequestContext, ) -> anyhow::Result { crate::tenant::debug_assert_current_span_has_tenant_and_timeline_id(); @@ -418,6 +426,10 @@ async fn build_timeline_info( .await?, ); } + // HADRON + if include_image_consistent_lsn { + info.image_consistent_lsn = Some(timeline.compute_image_consistent_lsn().await?); + } Ok(info) } @@ -507,6 +519,8 @@ async fn build_timeline_info_common( is_invisible: Some(is_invisible), walreceiver_status, + // HADRON + image_consistent_lsn: None, }; Ok(info) } @@ -709,6 +723,8 @@ async fn timeline_list_handler( parse_query_param(&request, "include-non-incremental-logical-size")?; let force_await_initial_logical_size: Option = parse_query_param(&request, "force-await-initial-logical-size")?; + let include_image_consistent_lsn: Option = + parse_query_param(&request, "include-image-consistent-lsn")?; check_permission(&request, Some(tenant_shard_id.tenant_id))?; let state = get_state(&request); @@ -729,6 +745,7 @@ async fn timeline_list_handler( &timeline, include_non_incremental_logical_size.unwrap_or(false), force_await_initial_logical_size.unwrap_or(false), + include_image_consistent_lsn.unwrap_or(false), &ctx, ) .instrument(info_span!("build_timeline_info", timeline_id = %timeline.timeline_id)) @@ -757,6 +774,9 @@ async fn timeline_and_offloaded_list_handler( parse_query_param(&request, "include-non-incremental-logical-size")?; let force_await_initial_logical_size: Option = parse_query_param(&request, "force-await-initial-logical-size")?; + let include_image_consistent_lsn: Option = + parse_query_param(&request, "include-image-consistent-lsn")?; + check_permission(&request, Some(tenant_shard_id.tenant_id))?; let state = get_state(&request); @@ -777,6 +797,7 @@ async fn timeline_and_offloaded_list_handler( &timeline, include_non_incremental_logical_size.unwrap_or(false), force_await_initial_logical_size.unwrap_or(false), + include_image_consistent_lsn.unwrap_or(false), &ctx, ) .instrument(info_span!("build_timeline_info", timeline_id = %timeline.timeline_id)) @@ -961,6 +982,9 @@ async fn timeline_detail_handler( parse_query_param(&request, "include-non-incremental-logical-size")?; let force_await_initial_logical_size: Option = parse_query_param(&request, "force-await-initial-logical-size")?; + // HADRON + let include_image_consistent_lsn: Option = + parse_query_param(&request, "include-image-consistent-lsn")?; check_permission(&request, Some(tenant_shard_id.tenant_id))?; // Logical size calculation needs downloading. @@ -981,6 +1005,7 @@ async fn timeline_detail_handler( &timeline, include_non_incremental_logical_size.unwrap_or(false), force_await_initial_logical_size.unwrap_or(false), + include_image_consistent_lsn.unwrap_or(false), ctx, ) .await @@ -2332,6 +2357,7 @@ async fn timeline_compact_handler( flags, sub_compaction, sub_compaction_max_job_size_mb, + gc_compaction_do_metadata_compaction: false, }; let scheduled = compact_request @@ -2499,12 +2525,10 @@ async fn timeline_checkpoint_handler( .compact(&cancel, flags, &ctx) .await .map_err(|e| - match e { - CompactionError::ShuttingDown => ApiError::ShuttingDown, - CompactionError::Offload(e) => ApiError::InternalServerError(anyhow::anyhow!(e)), - CompactionError::CollectKeySpaceError(e) => ApiError::InternalServerError(anyhow::anyhow!(e)), - CompactionError::Other(e) => ApiError::InternalServerError(e), - CompactionError::AlreadyRunning(_) => ApiError::InternalServerError(anyhow::anyhow!(e)), + if e.is_cancel() { + ApiError::ShuttingDown + } else { + ApiError::InternalServerError(e.into_anyhow()) } )?; } @@ -2689,6 +2713,16 @@ async fn deletion_queue_flush( } } +/// Try if `GetPage@Lsn` is successful, useful for manual debugging. +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +struct GetPageResponse { + pub page: Bytes, + pub layers_visited: u32, + pub delta_layers_visited: u32, + pub records: Vec<(Lsn, NeonWalRecord)>, + pub img: Option<(Lsn, Bytes)>, +} + async fn getpage_at_lsn_handler( request: Request, cancel: CancellationToken, @@ -2739,21 +2773,24 @@ async fn getpage_at_lsn_handler_inner( // Use last_record_lsn if no lsn is provided let lsn = lsn.unwrap_or_else(|| timeline.get_last_record_lsn()); - let page = timeline.get(key.0, lsn, &ctx).await?; if touch { json_response(StatusCode::OK, ()) } else { - Result::<_, ApiError>::Ok( - Response::builder() - .status(StatusCode::OK) - .header(header::CONTENT_TYPE, "application/octet-stream") - .body(hyper::Body::from(page)) - .unwrap(), - ) + let mut reconstruct_state = ValuesReconstructState::new_with_debug(IoConcurrency::sequential()); + let page = timeline.debug_get(key.0, lsn, &ctx, &mut reconstruct_state).await?; + let response = GetPageResponse { + page, + layers_visited: reconstruct_state.get_layers_visited(), + delta_layers_visited: reconstruct_state.get_delta_layers_visited(), + records: reconstruct_state.debug_state.records.clone(), + img: reconstruct_state.debug_state.img.clone(), + }; + + json_response(StatusCode::OK, response) } } - .instrument(info_span!("timeline_get", tenant_id = %tenant_shard_id.tenant_id, shard_id = %tenant_shard_id.shard_slug(), %timeline_id)) + .instrument(info_span!("timeline_debug_get", tenant_id = %tenant_shard_id.tenant_id, shard_id = %tenant_shard_id.shard_slug(), %timeline_id)) .await } @@ -3215,6 +3252,30 @@ async fn get_utilization( .map_err(ApiError::InternalServerError) } +/// HADRON +async fn list_tenant_visible_size_handler( + request: Request, + _cancel: CancellationToken, +) -> Result, ApiError> { + check_permission(&request, None)?; + let state = get_state(&request); + + let mut map = BTreeMap::new(); + for (tenant_shard_id, slot) in state.tenant_manager.list() { + match slot { + TenantSlot::Attached(tenant) => { + let visible_size = tenant.get_visible_size(); + map.insert(tenant_shard_id, visible_size); + } + TenantSlot::Secondary(_) | TenantSlot::InProgress(_) => { + continue; + } + } + } + + json_response(StatusCode::OK, map) +} + async fn list_aux_files( mut request: Request, _cancel: CancellationToken, @@ -3618,6 +3679,7 @@ async fn activate_post_import_handler( let timeline_info = build_timeline_info( &timeline, false, // include_non_incremental_logical_size, false, // force_await_initial_logical_size + false, // include_image_consistent_lsn &ctx, ) .await @@ -3630,6 +3692,17 @@ async fn activate_post_import_handler( .await } +// [Hadron] Reset gauge metrics that are used to raised alerts. We need this API as a stop-gap measure to reset alerts +// after we manually rectify situations such as local SSD data loss. We will eventually automate this. +async fn hadron_reset_alert_gauges( + request: Request, + _cancel: CancellationToken, +) -> Result, ApiError> { + check_permission(&request, None)?; + LOCAL_DATA_LOSS_SUSPECTED.set(0); + json_response(StatusCode::OK, ()) +} + /// Read the end of a tar archive. /// /// A tar archive normally ends with two consecutive blocks of zeros, 512 bytes each. @@ -3682,6 +3755,23 @@ async fn read_tar_eof(mut reader: (impl tokio::io::AsyncRead + Unpin)) -> anyhow Ok(()) } +async fn force_refresh_feature_flag( + request: Request, + _cancel: CancellationToken, +) -> Result, ApiError> { + let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?; + check_permission(&request, Some(tenant_shard_id.tenant_id))?; + + let state = get_state(&request); + let tenant = state + .tenant_manager + .get_attached_tenant_shard(tenant_shard_id)?; + tenant + .feature_resolver + .refresh_properties_and_flags(&tenant); + json_response(StatusCode::OK, ()) +} + async fn tenant_evaluate_feature_flag( request: Request, _cancel: CancellationToken, @@ -3698,7 +3788,7 @@ async fn tenant_evaluate_feature_flag( let tenant = state .tenant_manager .get_attached_tenant_shard(tenant_shard_id)?; - // TODO: the properties we get here might be stale right after it is collected. But such races are rare (updated every 10s) + // TODO: the properties we get here might be stale right after it is collected. But such races are rare (updated every 10s) // and we don't need to worry about it for now. let properties = tenant.feature_resolver.collect_properties(); if as_type.as_deref() == Some("boolean") { @@ -3911,9 +4001,14 @@ pub fn make_router( .expect("construct launch timestamp header middleware"), ); + let force_metric_collection_on_scrape = state.conf.force_metric_collection_on_scrape; + + let prometheus_metrics_handler_wrapper = + move |req| prometheus_metrics_handler(req, force_metric_collection_on_scrape); + Ok(router .data(state) - .get("/metrics", |r| request_span(r, prometheus_metrics_handler)) + .get("/metrics", move |r| request_span(r, prometheus_metrics_handler_wrapper)) .get("/profile/cpu", |r| request_span(r, profile_cpu_handler)) .get("/profile/heap", |r| request_span(r, profile_heap_handler)) .get("/v1/status", |r| api_handler(r, status_handler)) @@ -4106,7 +4201,7 @@ pub fn make_router( }) .get( "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/getpage", - |r| testing_api_handler("getpage@lsn", r, getpage_at_lsn_handler), + |r| testing_api_handler("getpage@lsn", r, getpage_at_lsn_handler), ) .get( "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/touchpage", @@ -4119,6 +4214,7 @@ pub fn make_router( .put("/v1/io_engine", |r| api_handler(r, put_io_engine_handler)) .put("/v1/io_mode", |r| api_handler(r, put_io_mode_handler)) .get("/v1/utilization", |r| api_handler(r, get_utilization)) + .get("/v1/list_tenant_visible_size", |r| api_handler(r, list_tenant_visible_size_handler)) .post( "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/ingest_aux_files", |r| testing_api_handler("ingest_aux_files", r, ingest_aux_files), @@ -4147,6 +4243,9 @@ pub fn make_router( .get("/v1/tenant/:tenant_shard_id/feature_flag/:flag_key", |r| { api_handler(r, tenant_evaluate_feature_flag) }) + .post("/v1/tenant/:tenant_shard_id/force_refresh_feature_flag", |r| { + api_handler(r, force_refresh_feature_flag) + }) .put("/v1/feature_flag/:flag_key", |r| { testing_api_handler("force override feature flag - put", r, force_override_feature_flag_for_testing_put) }) @@ -4156,5 +4255,8 @@ pub fn make_router( .post("/v1/feature_flag_spec", |r| { api_handler(r, update_feature_flag_spec) }) + .post("/hadron-internal/reset_alert_gauges", |r| { + api_handler(r, hadron_reset_alert_gauges) + }) .any(handler_404)) } diff --git a/pageserver/src/import_datadir.rs b/pageserver/src/import_datadir.rs index 96fe0c1078..409cc2e3c5 100644 --- a/pageserver/src/import_datadir.rs +++ b/pageserver/src/import_datadir.rs @@ -610,13 +610,13 @@ async fn import_file( debug!("imported twophase file"); } else if file_path.starts_with("pg_wal") { debug!("found wal file in base section. ignore it"); - } else if file_path.starts_with("zenith.signal") { + } else if file_path.starts_with("zenith.signal") || file_path.starts_with("neon.signal") { // Parse zenith signal file to set correct previous LSN let bytes = read_all_bytes(reader).await?; - // zenith.signal format is "PREV LSN: prev_lsn" + // neon.signal format is "PREV LSN: prev_lsn" // TODO write serialization and deserialization in the same place. - let zenith_signal = std::str::from_utf8(&bytes)?.trim(); - let prev_lsn = match zenith_signal { + let neon_signal = std::str::from_utf8(&bytes)?.trim(); + let prev_lsn = match neon_signal { "PREV LSN: none" => Lsn(0), "PREV LSN: invalid" => Lsn(0), other => { @@ -624,17 +624,17 @@ async fn import_file( split[1] .trim() .parse::() - .context("can't parse zenith.signal")? + .context("can't parse neon.signal")? } }; - // zenith.signal is not necessarily the last file, that we handle + // neon.signal is not necessarily the last file, that we handle // but it is ok to call `finish_write()`, because final `modification.commit()` // will update lsn once more to the final one. let writer = modification.tline.writer().await; writer.finish_write(prev_lsn); - debug!("imported zenith signal {}", prev_lsn); + debug!("imported neon signal {}", prev_lsn); } else if file_path.starts_with("pg_tblspc") { // TODO Backups exported from neon won't have pg_tblspc, but we will need // this to import arbitrary postgres databases. diff --git a/pageserver/src/lib.rs b/pageserver/src/lib.rs index 0dd3c465e0..0864026f6b 100644 --- a/pageserver/src/lib.rs +++ b/pageserver/src/lib.rs @@ -73,6 +73,9 @@ pub struct HttpEndpointListener(pub CancellableTask); pub struct HttpsEndpointListener(pub CancellableTask); pub struct ConsumptionMetricsTasks(pub CancellableTask); pub struct DiskUsageEvictionTask(pub CancellableTask); +// HADRON +pub struct MetricsCollectionTask(pub CancellableTask); + impl CancellableTask { pub async fn shutdown(self) { self.cancel.cancel(); @@ -87,6 +90,7 @@ pub async fn shutdown_pageserver( https_listener: Option, page_service: page_service::Listener, grpc_task: Option, + metrics_collection_task: MetricsCollectionTask, consumption_metrics_worker: ConsumptionMetricsTasks, disk_usage_eviction_task: Option, tenant_manager: &TenantManager, @@ -211,6 +215,14 @@ pub async fn shutdown_pageserver( // Best effort to persist any outstanding deletions, to avoid leaking objects deletion_queue.shutdown(Duration::from_secs(5)).await; + // HADRON + timed( + metrics_collection_task.0.shutdown(), + "shutdown metrics collections metrics", + Duration::from_secs(1), + ) + .await; + timed( consumption_metrics_worker.0.shutdown(), "shutdown consumption metrics", diff --git a/pageserver/src/metrics.rs b/pageserver/src/metrics.rs index 21faceef49..1b783326a0 100644 --- a/pageserver/src/metrics.rs +++ b/pageserver/src/metrics.rs @@ -1,3 +1,4 @@ +use std::cell::Cell; use std::collections::HashMap; use std::num::NonZeroUsize; use std::os::fd::RawFd; @@ -102,7 +103,18 @@ pub(crate) static STORAGE_TIME_COUNT_PER_TIMELINE: Lazy = Lazy::n .expect("failed to define a metric") }); -// Buckets for background operation duration in seconds, like compaction, GC, size calculation. +/* BEGIN_HADRON */ +pub(crate) static STORAGE_ACTIVE_COUNT_PER_TIMELINE: Lazy = Lazy::new(|| { + register_int_gauge_vec!( + "pageserver_active_storage_operations_count", + "Count of active storage operations with operation, tenant and timeline dimensions", + &["operation", "tenant_id", "shard_id", "timeline_id"], + ) + .expect("failed to define a metric") +}); +/*END_HADRON */ + +// Buckets for background operations like compaction, GC, size calculation const STORAGE_OP_BUCKETS: &[f64] = &[0.010, 0.100, 1.0, 10.0, 100.0, 1000.0]; pub(crate) static STORAGE_TIME_GLOBAL: Lazy = Lazy::new(|| { @@ -2810,6 +2822,49 @@ pub(crate) static WALRECEIVER_CANDIDATES_ADDED: Lazy = pub(crate) static WALRECEIVER_CANDIDATES_REMOVED: Lazy = Lazy::new(|| WALRECEIVER_CANDIDATES_EVENTS.with_label_values(&["remove"])); +pub(crate) static LOCAL_DATA_LOSS_SUSPECTED: Lazy = Lazy::new(|| { + register_int_gauge!( + "pageserver_local_data_loss_suspected", + "Non-zero value indicates that pageserver local data loss is suspected (and highly likely)." + ) + .expect("failed to define a metric") +}); + +// Counter keeping track of misrouted PageStream requests. Spelling out PageStream requests here to distinguish +// it from other types of reqeusts (SK wal replication, http requests, etc.). PageStream requests are used by +// Postgres compute to fetch data from pageservers. +// A misrouted PageStream request is registered if the pageserver cannot find the tenant identified in the +// request, or if the pageserver is not the "primary" serving the tenant shard. These error almost always identify +// issues with compute configuration, caused by either the compute node itself being stuck in the wrong +// configuration or Storage Controller reconciliation bugs. Misrouted requests are expected during tenant migration +// and/or during recovery following a pageserver failure, but persistently high rates of misrouted requests +// are indicative of bugs (and unavailability). +pub(crate) static MISROUTED_PAGESTREAM_REQUESTS: Lazy = Lazy::new(|| { + register_int_counter!( + "pageserver_misrouted_pagestream_requests_total", + "Number of pageserver pagestream requests that were routed to the wrong pageserver" + ) + .expect("failed to define a metric") +}); + +// Global counter for PageStream request results by outcome. Outcomes are divided into 3 categories: +// - success +// - internal_error: errors that indicate bugs in the storage cluster (e.g. page reconstruction errors, misrouted requests, LSN timeout errors) +// - other_error: transient error conditions that are expected in normal operation or indicate bugs with other parts of the system (e.g. error due to pageserver shutdown, malformed requests etc.) +pub(crate) static PAGESTREAM_HANDLER_RESULTS_TOTAL: Lazy = Lazy::new(|| { + register_int_counter_vec!( + "pageserver_pagestream_handler_results_total", + "Number of pageserver pagestream handler results by outcome (success, internal_error, other_error)", + &["outcome"] + ) + .expect("failed to define a metric") +}); + +// Constants for pageserver_pagestream_handler_results_total's outcome labels +pub(crate) const PAGESTREAM_HANDLER_OUTCOME_SUCCESS: &str = "success"; +pub(crate) const PAGESTREAM_HANDLER_OUTCOME_INTERNAL_ERROR: &str = "internal_error"; +pub(crate) const PAGESTREAM_HANDLER_OUTCOME_OTHER_ERROR: &str = "other_error"; + // Metrics collected on WAL redo operations // // We collect the time spent in actual WAL redo ('redo'), and time waiting @@ -3048,13 +3103,19 @@ pub(crate) static WAL_REDO_PROCESS_COUNTERS: Lazy = pub(crate) struct StorageTimeMetricsTimer { metrics: StorageTimeMetrics, start: Instant, + stopped: Cell, } impl StorageTimeMetricsTimer { fn new(metrics: StorageTimeMetrics) -> Self { + /*BEGIN_HADRON */ + // record the active operation as the timer starts + metrics.timeline_active_count.inc(); + /*END_HADRON */ Self { metrics, start: Instant::now(), + stopped: Cell::new(false), } } @@ -3070,6 +3131,10 @@ impl StorageTimeMetricsTimer { self.metrics.timeline_sum.inc_by(seconds); self.metrics.timeline_count.inc(); self.metrics.global_histogram.observe(seconds); + /* BEGIN_HADRON*/ + self.stopped.set(true); + self.metrics.timeline_active_count.dec(); + /*END_HADRON */ duration } @@ -3080,6 +3145,16 @@ impl StorageTimeMetricsTimer { } } +/*BEGIN_HADRON */ +impl Drop for StorageTimeMetricsTimer { + fn drop(&mut self) { + if !self.stopped.get() { + self.metrics.timeline_active_count.dec(); + } + } +} +/*END_HADRON */ + pub(crate) struct AlwaysRecordingStorageTimeMetricsTimer(Option); impl Drop for AlwaysRecordingStorageTimeMetricsTimer { @@ -3105,6 +3180,10 @@ pub(crate) struct StorageTimeMetrics { timeline_sum: Counter, /// Number of oeprations, per operation, tenant_id and timeline_id timeline_count: IntCounter, + /*BEGIN_HADRON */ + /// Number of active operations per operation, tenant_id, and timeline_id + timeline_active_count: IntGauge, + /*END_HADRON */ /// Global histogram having only the "operation" label. global_histogram: Histogram, } @@ -3124,6 +3203,11 @@ impl StorageTimeMetrics { let timeline_count = STORAGE_TIME_COUNT_PER_TIMELINE .get_metric_with_label_values(&[operation, tenant_id, shard_id, timeline_id]) .unwrap(); + /*BEGIN_HADRON */ + let timeline_active_count = STORAGE_ACTIVE_COUNT_PER_TIMELINE + .get_metric_with_label_values(&[operation, tenant_id, shard_id, timeline_id]) + .unwrap(); + /*END_HADRON */ let global_histogram = STORAGE_TIME_GLOBAL .get_metric_with_label_values(&[operation]) .unwrap(); @@ -3131,6 +3215,7 @@ impl StorageTimeMetrics { StorageTimeMetrics { timeline_sum, timeline_count, + timeline_active_count, global_histogram, } } @@ -3544,6 +3629,14 @@ impl TimelineMetrics { shard_id, timeline_id, ]); + /* BEGIN_HADRON */ + let _ = STORAGE_ACTIVE_COUNT_PER_TIMELINE.remove_label_values(&[ + op, + tenant_id, + shard_id, + timeline_id, + ]); + /*END_HADRON */ } for op in StorageIoSizeOperation::VARIANTS { @@ -4336,6 +4429,9 @@ pub(crate) mod disk_usage_based_eviction { pub(crate) layers_collected: IntCounter, pub(crate) layers_selected: IntCounter, pub(crate) layers_evicted: IntCounter, + /*BEGIN_HADRON */ + pub(crate) bytes_evicted: IntCounter, + /*END_HADRON */ } impl Default for Metrics { @@ -4372,12 +4468,21 @@ pub(crate) mod disk_usage_based_eviction { ) .unwrap(); + /*BEGIN_HADRON */ + let bytes_evicted = register_int_counter!( + "pageserver_disk_usage_based_eviction_evicted_bytes_total", + "Amount of bytes successfully evicted" + ) + .unwrap(); + /*END_HADRON */ + Self { tenant_collection_time, tenant_layer_count, layers_collected, layers_selected, layers_evicted, + bytes_evicted, } } } @@ -4497,6 +4602,7 @@ pub fn preinitialize_metrics( &CIRCUIT_BREAKERS_UNBROKEN, &PAGE_SERVICE_SMGR_FLUSH_INPROGRESS_MICROS_GLOBAL, &WAIT_LSN_IN_PROGRESS_GLOBAL_MICROS, + &MISROUTED_PAGESTREAM_REQUESTS, ] .into_iter() .for_each(|c| { @@ -4534,6 +4640,7 @@ pub fn preinitialize_metrics( // gauges WALRECEIVER_ACTIVE_MANAGERS.get(); + LOCAL_DATA_LOSS_SUSPECTED.get(); // histograms [ diff --git a/pageserver/src/page_service.rs b/pageserver/src/page_service.rs index ad50d32dc8..23146ac40e 100644 --- a/pageserver/src/page_service.rs +++ b/pageserver/src/page_service.rs @@ -50,6 +50,7 @@ use tokio::io::{AsyncRead, AsyncReadExt as _, AsyncWrite, AsyncWriteExt as _, Bu use tokio::task::JoinHandle; use tokio_util::sync::CancellationToken; use tonic::service::Interceptor as _; +use tonic::transport::server::TcpConnectInfo; use tracing::*; use utils::auth::{Claims, Scope, SwappableJwtAuth}; use utils::id::{TenantId, TenantTimelineId, TimelineId}; @@ -69,7 +70,7 @@ use crate::context::{ }; use crate::metrics::{ self, COMPUTE_COMMANDS_COUNTERS, ComputeCommandKind, GetPageBatchBreakReason, LIVE_CONNECTIONS, - SmgrOpTimer, TimelineMetrics, + MISROUTED_PAGESTREAM_REQUESTS, PAGESTREAM_HANDLER_RESULTS_TOTAL, SmgrOpTimer, TimelineMetrics, }; use crate::pgdatadir_mapping::{LsnRange, Version}; use crate::span::{ @@ -90,7 +91,8 @@ use crate::{CancellableTask, PERF_TRACE_TARGET, timed_after_cancellation}; /// is not yet in state [`TenantState::Active`]. /// /// NB: this is a different value than [`crate::http::routes::ACTIVE_TENANT_TIMEOUT`]. -const ACTIVE_TENANT_TIMEOUT: Duration = Duration::from_millis(30000); +/// HADRON: reduced timeout and we will retry in Cache::get(). +const ACTIVE_TENANT_TIMEOUT: Duration = Duration::from_millis(5000); /// Threshold at which to log slow GetPage requests. const LOG_SLOW_GETPAGE_THRESHOLD: Duration = Duration::from_secs(30); @@ -1127,6 +1129,7 @@ impl PageServerHandler { // Closing the connection by returning ``::Reconnect` has the side effect of rate-limiting above message, via // client's reconnect backoff, as well as hopefully prompting the client to load its updated configuration // and talk to a different pageserver. + MISROUTED_PAGESTREAM_REQUESTS.inc(); return respond_error!( span, PageStreamError::Reconnect( @@ -1438,20 +1441,57 @@ impl PageServerHandler { let (response_msg, ctx) = match handler_result { Err(e) => match &e.err { PageStreamError::Shutdown => { + // BEGIN HADRON + PAGESTREAM_HANDLER_RESULTS_TOTAL + .with_label_values(&[metrics::PAGESTREAM_HANDLER_OUTCOME_OTHER_ERROR]) + .inc(); + // END HADRON + // If we fail to fulfil a request during shutdown, which may be _because_ of // shutdown, then do not send the error to the client. Instead just drop the // connection. span.in_scope(|| info!("dropping connection due to shutdown")); return Err(QueryError::Shutdown); } - PageStreamError::Reconnect(reason) => { - span.in_scope(|| info!("handler requested reconnect: {reason}")); + PageStreamError::Reconnect(_reason) => { + span.in_scope(|| { + // BEGIN HADRON + // We can get here because the compute node is pointing at the wrong PS. We + // already have a metric to keep track of this so suppressing this log to + // reduce log spam. The information in this log message is not going to be that + // helpful given the volume of logs that can be generated. + // info!("handler requested reconnect: {reason}") + // END HADRON + }); + // BEGIN HADRON + PAGESTREAM_HANDLER_RESULTS_TOTAL + .with_label_values(&[ + metrics::PAGESTREAM_HANDLER_OUTCOME_INTERNAL_ERROR, + ]) + .inc(); + // END HADRON return Err(QueryError::Reconnect); } PageStreamError::Read(_) | PageStreamError::LsnTimeout(_) | PageStreamError::NotFound(_) | PageStreamError::BadRequest(_) => { + // BEGIN HADRON + if let PageStreamError::Read(_) | PageStreamError::LsnTimeout(_) = &e.err { + PAGESTREAM_HANDLER_RESULTS_TOTAL + .with_label_values(&[ + metrics::PAGESTREAM_HANDLER_OUTCOME_INTERNAL_ERROR, + ]) + .inc(); + } else { + PAGESTREAM_HANDLER_RESULTS_TOTAL + .with_label_values(&[ + metrics::PAGESTREAM_HANDLER_OUTCOME_OTHER_ERROR, + ]) + .inc(); + } + // END HADRON + // print the all details to the log with {:#}, but for the client the // error message is enough. Do not log if shutting down, as the anyhow::Error // here includes cancellation which is not an error. @@ -1469,7 +1509,15 @@ impl PageServerHandler { ) } }, - Ok((response_msg, _op_timer_already_observed, ctx)) => (response_msg, Some(ctx)), + Ok((response_msg, _op_timer_already_observed, ctx)) => { + // BEGIN HADRON + PAGESTREAM_HANDLER_RESULTS_TOTAL + .with_label_values(&[metrics::PAGESTREAM_HANDLER_OUTCOME_SUCCESS]) + .inc(); + // END HADRON + + (response_msg, Some(ctx)) + } }; let ctx = ctx.map(|req_ctx| { @@ -3170,14 +3218,25 @@ where pub struct GrpcPageServiceHandler { tenant_manager: Arc, ctx: RequestContext, + + /// Cancelled to shut down the server. Tonic will shut down in response to this, but wait for + /// in-flight requests to complete. Any tasks we spawn ourselves must respect this token. cancel: CancellationToken, + + /// Any tasks we spawn ourselves should clone this gate guard, so that we can wait for them to + /// complete during shutdown. Request handlers implicitly hold this guard already. gate_guard: GateGuard, + + /// `get_vectored` concurrency setting. get_vectored_concurrent_io: GetVectoredConcurrentIo, } impl GrpcPageServiceHandler { /// Spawns a gRPC server for the page service. /// + /// Returns a `CancellableTask` handle that can be used to shut down the server. It waits for + /// any in-flight requests and tasks to complete first. + /// /// TODO: this doesn't support TLS. We need TLS reloading via ReloadingCertificateResolver, so we /// need to reimplement the TCP+TLS accept loop ourselves. pub fn spawn( @@ -3187,12 +3246,15 @@ impl GrpcPageServiceHandler { get_vectored_concurrent_io: GetVectoredConcurrentIo, listener: std::net::TcpListener, ) -> anyhow::Result { + // Set up a cancellation token for shutting down the server, and a gate to wait for all + // requests and spawned tasks to complete. let cancel = CancellationToken::new(); + let gate = Gate::default(); + let ctx = RequestContextBuilder::new(TaskKind::PageRequestHandler) .download_behavior(DownloadBehavior::Download) .perf_span_dispatch(perf_trace_dispatch) .detached_child(); - let gate = Gate::default(); // Set up the TCP socket. We take a preconfigured TcpListener to bind the // port early during startup. @@ -3260,19 +3322,20 @@ impl GrpcPageServiceHandler { .build_v1()?; let server = server.add_service(reflection_service); - // Spawn server task. + // Spawn server task. It runs until the cancellation token fires and in-flight requests and + // tasks complete. The `CancellableTask` will wait for the task's join handle, which + // implicitly waits for the gate to close. let task_cancel = cancel.clone(); let task = COMPUTE_REQUEST_RUNTIME.spawn(task_mgr::exit_on_panic_or_error( - "grpc listener", + "grpc pageservice listener", async move { - let result = server + server .serve_with_incoming_shutdown(incoming, task_cancel.cancelled()) - .await; - if result.is_ok() { - // TODO: revisit shutdown logic once page service is implemented. - gate.close().await; - } - result + .await?; + // Server exited cleanly. All requests should have completed by now. Wait for any + // spawned tasks to complete as well (e.g. IoConcurrency sidecars) via the gate. + gate.close().await; + anyhow::Ok(()) }, )); @@ -3292,9 +3355,12 @@ impl GrpcPageServiceHandler { } /// Generates a PagestreamRequest header from a ReadLsn and request ID. - fn make_hdr(read_lsn: page_api::ReadLsn, req_id: u64) -> PagestreamRequest { + fn make_hdr( + read_lsn: page_api::ReadLsn, + req_id: Option, + ) -> PagestreamRequest { PagestreamRequest { - reqid: req_id, + reqid: req_id.map(|r| r.id).unwrap_or_default(), request_lsn: read_lsn.request_lsn, not_modified_since: read_lsn .not_modified_since_lsn @@ -3352,11 +3418,11 @@ impl GrpcPageServiceHandler { /// NB: errors returned from here are intercepted in get_pages(), and may be converted to a /// GetPageResponse with an appropriate status code to avoid terminating the stream. /// + /// TODO: verify that the requested pages belong to this shard. + /// /// TODO: get_vectored() currently enforces a batch limit of 32. Postgres will typically send /// batches up to effective_io_concurrency = 100. Either we have to accept large batches, or /// split them up in the client or server. - /// - /// TODO: verify that the given keys belong to this shard. #[instrument(skip_all, fields(req_id, rel, blkno, blks, req_lsn, mod_lsn))] async fn get_page( ctx: &RequestContext, @@ -3404,7 +3470,7 @@ impl GrpcPageServiceHandler { batch.push(BatchedGetPageRequest { req: PagestreamGetPageRequest { - hdr: Self::make_hdr(req.read_lsn, req.request_id), + hdr: Self::make_hdr(req.read_lsn, Some(req.request_id)), rel: req.rel, blkno, }, @@ -3434,12 +3500,16 @@ impl GrpcPageServiceHandler { request_id: req.request_id, status_code: page_api::GetPageStatusCode::Ok, reason: None, - page_images: Vec::with_capacity(results.len()), + rel: req.rel, + pages: Vec::with_capacity(results.len()), }; for result in results { match result { - Ok((PagestreamBeMessage::GetPage(r), _, _)) => resp.page_images.push(r.page), + Ok((PagestreamBeMessage::GetPage(r), _, _)) => resp.pages.push(page_api::Page { + block_number: r.req.blkno, + image: r.page, + }), Ok((resp, _, _)) => { return Err(tonic::Status::internal(format!( "unexpected response: {resp:?}" @@ -3455,7 +3525,10 @@ impl GrpcPageServiceHandler { /// Implements the gRPC page service. /// -/// TODO: cancellation. +/// Tonic will drop the request handler futures if the client goes away (e.g. due to a timeout or +/// cancellation), so the read path must be cancellation-safe. On shutdown, Tonic will wait for +/// in-flight requests to complete. +/// /// TODO: when the libpq impl is removed, remove the Pagestream types and inline the handler code. #[tonic::async_trait] impl proto::PageService for GrpcPageServiceHandler { @@ -3482,7 +3555,7 @@ impl proto::PageService for GrpcPageServiceHandler { span_record!(rel=%req.rel, lsn=%req.read_lsn); let req = PagestreamExistsRequest { - hdr: Self::make_hdr(req.read_lsn, 0), + hdr: Self::make_hdr(req.read_lsn, None), rel: req.rel, }; @@ -3540,8 +3613,14 @@ impl proto::PageService for GrpcPageServiceHandler { // Spawn a task to run the basebackup. let span = Span::current(); + let gate_guard = self + .gate_guard + .try_clone() + .map_err(|_| tonic::Status::unavailable("shutting down"))?; let (mut simplex_read, mut simplex_write) = tokio::io::simplex(CHUNK_SIZE); let jh = tokio::spawn(async move { + let _gate_guard = gate_guard; // keep gate open until task completes + let gzip_level = match req.compression { page_api::BaseBackupCompression::None => None, // NB: using fast compression because it's on the critical path for compute @@ -3632,7 +3711,7 @@ impl proto::PageService for GrpcPageServiceHandler { span_record!(db_oid=%req.db_oid, lsn=%req.read_lsn); let req = PagestreamDbSizeRequest { - hdr: Self::make_hdr(req.read_lsn, 0), + hdr: Self::make_hdr(req.read_lsn, None), dbnode: req.db_oid, }; @@ -3665,13 +3744,14 @@ impl proto::PageService for GrpcPageServiceHandler { .await?; // Spawn an IoConcurrency sidecar, if enabled. - let Ok(gate_guard) = self.gate_guard.try_clone() else { - return Err(tonic::Status::unavailable("shutting down")); - }; + let gate_guard = self + .gate_guard + .try_clone() + .map_err(|_| tonic::Status::unavailable("shutting down"))?; let io_concurrency = IoConcurrency::spawn_from_conf(self.get_vectored_concurrent_io, gate_guard); - // Spawn a task to handle the GetPageRequest stream. + // Construct the GetPageRequest stream handler. let span = Span::current(); let ctx = self.ctx.attached_child(); let cancel = self.cancel.clone(); @@ -3682,29 +3762,33 @@ impl proto::PageService for GrpcPageServiceHandler { .get(ttid.tenant_id, ttid.timeline_id, shard_selector) .await? .downgrade(); - loop { + // NB: Tonic considers the entire stream to be an in-flight request and will wait + // for it to complete before shutting down. React to cancellation between requests. let req = tokio::select! { - req = reqs.message() => req, - _ = cancel.cancelled() => { - tracing::info!("closing getpages stream due to shutdown"); - break; + result = reqs.message() => match result { + Ok(Some(req)) => Ok(req), + Ok(None) => break, // client closed the stream + Err(err) => Err(err), }, - }; - let req = if let Some(req) = req? { - req - } else { - break; - }; - let req_id = req.request_id; + _ = cancel.cancelled() => Err(tonic::Status::unavailable("shutting down")), + }?; + let req_id = req.request_id.map(page_api::RequestID::from).unwrap_or_default(); let result = Self::get_page(&ctx, &timeline, req, io_concurrency.clone()) .instrument(span.clone()) // propagate request span .await; yield match result { Ok(resp) => resp, // Convert per-request errors to GetPageResponses as appropriate, or terminate - // the stream with a tonic::Status. - Err(err) => page_api::GetPageResponse::try_from_status(err, req_id)?.into(), + // the stream with a tonic::Status. Log the error regardless, since + // ObservabilityLayer can't automatically log stream errors. + Err(status) => { + // TODO: it would be nice if we could propagate the get_page() fields here. + span.in_scope(|| { + warn!("request failed with {:?}: {}", status.code(), status.message()); + }); + page_api::GetPageResponse::try_from_status(status, req_id)?.into() + } } } }; @@ -3728,7 +3812,7 @@ impl proto::PageService for GrpcPageServiceHandler { span_record!(rel=%req.rel, lsn=%req.read_lsn); let req = PagestreamNblocksRequest { - hdr: Self::make_hdr(req.read_lsn, 0), + hdr: Self::make_hdr(req.read_lsn, None), rel: req.rel, }; @@ -3761,7 +3845,7 @@ impl proto::PageService for GrpcPageServiceHandler { span_record!(kind=%req.kind, segno=%req.segno, lsn=%req.read_lsn); let req = PagestreamGetSlruSegmentRequest { - hdr: Self::make_hdr(req.read_lsn, 0), + hdr: Self::make_hdr(req.read_lsn, None), kind: req.kind as u8, segno: req.segno, }; @@ -3842,40 +3926,85 @@ impl tonic::server::NamedService for Observabili const NAME: &'static str = S::NAME; // propagate inner service name } -impl tower::Service> for ObservabilityLayerService +impl tower::Service> for ObservabilityLayerService where - S: tower::Service>, + S: tower::Service, Response = http::Response> + Send, S::Future: Send + 'static, { type Response = S::Response; type Error = S::Error; type Future = BoxFuture<'static, Result>; - fn call(&mut self, mut req: http::Request) -> Self::Future { + fn call(&mut self, mut req: http::Request) -> Self::Future { // Record the request start time as a request extension. // // TODO: we should start a timer here instead, but it currently requires a timeline handle // and SmgrQueryType, which we don't have yet. Refactor it to provide it later. req.extensions_mut().insert(ReceivedAt(Instant::now())); - // Create a basic tracing span. Enter the span for the current thread (to use it for inner - // sync code like interceptors), and instrument the future (to use it for inner async code - // like the page service itself). + // Extract the peer address and gRPC method. + let peer = req + .extensions() + .get::() + .and_then(|info| info.remote_addr()) + .map(|addr| addr.to_string()) + .unwrap_or_default(); + + let method = req + .uri() + .path() + .split('/') + .nth(2) + .unwrap_or(req.uri().path()) + .to_string(); + + // Create a basic tracing span. // - // The instrument() call below is not sufficient. It only affects the returned future, and - // only takes effect when the caller polls it. Any sync code executed when we call - // self.inner.call() below (such as interceptors) runs outside of the returned future, and - // is not affected by it. We therefore have to enter the span on the current thread too. + // Enter the span for the current thread and instrument the future. It is not sufficient to + // only instrument the future, since it only takes effect after the future is returned and + // polled, not when the inner service is called below (e.g. during interceptor execution). let span = info_span!( "grpc:pageservice", - // Set by TenantMetadataInterceptor. + // These will be populated by TenantMetadataInterceptor. tenant_id = field::Empty, timeline_id = field::Empty, shard_id = field::Empty, + // NB: empty fields must be listed first above. Otherwise, the field names will be + // clobbered when the empty fields are populated. They will be output last regardless. + %peer, + %method, ); let _guard = span.enter(); - Box::pin(self.inner.call(req).instrument(span.clone())) + // Construct a future for calling the inner service, but don't await it. This avoids having + // to clone the inner service into the future below. + let call = self.inner.call(req); + + async move { + // Await the inner service call. + let result = call.await; + + // Log gRPC error statuses. This won't include request info from handler spans, but it + // will catch all errors (even those emitted before handler spans are constructed). Only + // unary request errors are logged here, not streaming response errors. + if let Ok(ref resp) = result + && let Some(status) = tonic::Status::from_header_map(resp.headers()) + && status.code() != tonic::Code::Ok + { + // TODO: it would be nice if we could propagate the handler span's request fields + // here. This could e.g. be done by attaching the request fields to + // tonic::Status::metadata via a proc macro. + warn!( + "request failed with {:?}: {}", + status.code(), + status.message() + ); + } + + result + } + .instrument(span.clone()) + .boxed() } fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { diff --git a/pageserver/src/pgdatadir_mapping.rs b/pageserver/src/pgdatadir_mapping.rs index 31f38d485f..8b76d980fc 100644 --- a/pageserver/src/pgdatadir_mapping.rs +++ b/pageserver/src/pgdatadir_mapping.rs @@ -25,9 +25,9 @@ use pageserver_api::keyspace::{KeySpaceRandomAccum, SparseKeySpace}; use pageserver_api::models::RelSizeMigration; use pageserver_api::reltag::{BlockNumber, RelTag, SlruKind}; use pageserver_api::shard::ShardIdentity; -use postgres_ffi::{BLCKSZ, PgMajorVersion, TimestampTz, TransactionId}; +use postgres_ffi::{BLCKSZ, PgMajorVersion, TransactionId}; use postgres_ffi_types::forknum::{FSM_FORKNUM, VISIBILITYMAP_FORKNUM}; -use postgres_ffi_types::{Oid, RepOriginId}; +use postgres_ffi_types::{Oid, RepOriginId, TimestampTz}; use serde::{Deserialize, Serialize}; use strum::IntoEnumIterator; use tokio_util::sync::CancellationToken; @@ -141,6 +141,23 @@ pub(crate) enum CollectKeySpaceError { Cancelled, } +impl CollectKeySpaceError { + pub(crate) fn is_cancel(&self) -> bool { + match self { + CollectKeySpaceError::Decode(_) => false, + CollectKeySpaceError::PageRead(e) => e.is_cancel(), + CollectKeySpaceError::Cancelled => true, + } + } + pub(crate) fn into_anyhow(self) -> anyhow::Error { + match self { + CollectKeySpaceError::Decode(e) => anyhow::Error::new(e), + CollectKeySpaceError::PageRead(e) => anyhow::Error::new(e), + CollectKeySpaceError::Cancelled => anyhow::Error::new(self), + } + } +} + impl From for CollectKeySpaceError { fn from(err: PageReconstructError) -> Self { match err { @@ -269,6 +286,10 @@ impl Timeline { /// Like [`Self::get_rel_page_at_lsn`], but returns a batch of pages. /// /// The ordering of the returned vec corresponds to the ordering of `pages`. + /// + /// NB: the read path must be cancellation-safe. The Tonic gRPC service will drop the future + /// if the client goes away (e.g. due to timeout or cancellation). + /// TODO: verify that it actually is cancellation-safe. pub(crate) async fn get_rel_page_at_lsn_batched( &self, pages: impl ExactSizeIterator, @@ -796,6 +817,7 @@ impl Timeline { let gc_cutoff_lsn_guard = self.get_applied_gc_cutoff_lsn(); let gc_cutoff_planned = { let gc_info = self.gc_info.read().unwrap(); + info!(cutoffs=?gc_info.cutoffs, applied_cutoff=%*gc_cutoff_lsn_guard, "starting find_lsn_for_timestamp"); gc_info.min_cutoff() }; // Usually the planned cutoff is newer than the cutoff of the last gc run, diff --git a/pageserver/src/tenant.rs b/pageserver/src/tenant.rs index f4877fd763..3d66ae4719 100644 --- a/pageserver/src/tenant.rs +++ b/pageserver/src/tenant.rs @@ -34,7 +34,7 @@ use once_cell::sync::Lazy; pub use pageserver_api::models::TenantState; use pageserver_api::models::{self, RelSizeMigration}; use pageserver_api::models::{ - CompactInfoResponse, LsnLease, TimelineArchivalState, TimelineState, TopTenantShardItem, + CompactInfoResponse, TimelineArchivalState, TimelineState, TopTenantShardItem, WalRedoManagerStatus, }; use pageserver_api::shard::{ShardIdentity, ShardStripeSize, TenantShardId}; @@ -142,6 +142,9 @@ mod gc_block; mod gc_result; pub(crate) mod throttle; +#[cfg(test)] +pub mod debug; + pub(crate) use timeline::{LogicalSizeCalculationCause, PageReconstructError, Timeline}; pub(crate) use crate::span::debug_assert_current_span_has_tenant_and_timeline_id; @@ -180,6 +183,7 @@ pub(super) struct AttachedTenantConf { impl AttachedTenantConf { fn new( + conf: &'static PageServerConf, tenant_conf: pageserver_api::models::TenantConfig, location: AttachedLocationConfig, ) -> Self { @@ -191,9 +195,7 @@ impl AttachedTenantConf { let lsn_lease_deadline = if location.attach_mode == AttachmentMode::Single { Some( tokio::time::Instant::now() - + tenant_conf - .lsn_lease_length - .unwrap_or(LsnLease::DEFAULT_LENGTH), + + TenantShard::get_lsn_lease_length_impl(conf, &tenant_conf), ) } else { // We don't use `lsn_lease_deadline` to delay GC in AttachedMulti and AttachedStale @@ -208,10 +210,13 @@ impl AttachedTenantConf { } } - fn try_from(location_conf: LocationConf) -> anyhow::Result { + fn try_from( + conf: &'static PageServerConf, + location_conf: LocationConf, + ) -> anyhow::Result { match &location_conf.mode { LocationMode::Attached(attach_conf) => { - Ok(Self::new(location_conf.tenant_conf, *attach_conf)) + Ok(Self::new(conf, location_conf.tenant_conf, *attach_conf)) } LocationMode::Secondary(_) => { anyhow::bail!( @@ -386,7 +391,7 @@ pub struct TenantShard { l0_flush_global_state: L0FlushGlobalState, - pub(crate) feature_resolver: TenantFeatureResolver, + pub(crate) feature_resolver: Arc, } impl std::fmt::Debug for TenantShard { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { @@ -3286,7 +3291,9 @@ impl TenantShard { // Ignore this, we likely raced with unarchival. OffloadError::NotArchived => Ok(()), OffloadError::AlreadyInProgress => Ok(()), - err => Err(err), + OffloadError::Cancelled => Err(CompactionError::new_cancelled()), + // don't break the anyhow chain + OffloadError::Other(err) => Err(CompactionError::Other(err)), })?; } @@ -3314,27 +3321,13 @@ impl TenantShard { /// Trips the compaction circuit breaker if appropriate. pub(crate) fn maybe_trip_compaction_breaker(&self, err: &CompactionError) { - match err { - err if err.is_cancel() => {} - CompactionError::ShuttingDown => (), - // Offload failures don't trip the circuit breaker, since they're cheap to retry and - // shouldn't block compaction. - CompactionError::Offload(_) => {} - CompactionError::CollectKeySpaceError(err) => { - // CollectKeySpaceError::Cancelled and PageRead::Cancelled are handled in `err.is_cancel` branch. - self.compaction_circuit_breaker - .lock() - .unwrap() - .fail(&CIRCUIT_BREAKERS_BROKEN, err); - } - CompactionError::Other(err) => { - self.compaction_circuit_breaker - .lock() - .unwrap() - .fail(&CIRCUIT_BREAKERS_BROKEN, err); - } - CompactionError::AlreadyRunning(_) => {} + if err.is_cancel() { + return; } + self.compaction_circuit_breaker + .lock() + .unwrap() + .fail(&CIRCUIT_BREAKERS_BROKEN, err); } /// Cancel scheduled compaction tasks @@ -3400,7 +3393,13 @@ impl TenantShard { .collect_vec(); for timeline in timelines { - timeline.maybe_freeze_ephemeral_layer().await; + // Include a span with the timeline ID. The parent span already has the tenant ID. + let span = + info_span!("maybe_freeze_ephemeral_layer", timeline_id = %timeline.timeline_id); + timeline + .maybe_freeze_ephemeral_layer() + .instrument(span) + .await; } } @@ -3411,7 +3410,7 @@ impl TenantShard { } // Update the feature resolver with the latest tenant-spcific data. - self.feature_resolver.update_cached_tenant_properties(self); + self.feature_resolver.refresh_properties_and_flags(self); } pub fn timeline_has_no_attached_children(&self, timeline_id: TimelineId) -> bool { @@ -4178,6 +4177,15 @@ impl TenantShard { .unwrap_or(self.conf.default_tenant_conf.image_creation_threshold) } + // HADRON + pub fn get_image_creation_timeout(&self) -> Option { + let tenant_conf = self.tenant_conf.load().tenant_conf.clone(); + tenant_conf.image_layer_force_creation_period.or(self + .conf + .default_tenant_conf + .image_layer_force_creation_period) + } + pub fn get_pitr_interval(&self) -> Duration { let tenant_conf = self.tenant_conf.load().tenant_conf.clone(); tenant_conf @@ -4205,10 +4213,16 @@ impl TenantShard { } pub fn get_lsn_lease_length(&self) -> Duration { - let tenant_conf = self.tenant_conf.load().tenant_conf.clone(); + Self::get_lsn_lease_length_impl(self.conf, &self.tenant_conf.load().tenant_conf) + } + + pub fn get_lsn_lease_length_impl( + conf: &'static PageServerConf, + tenant_conf: &pageserver_api::models::TenantConfig, + ) -> Duration { tenant_conf .lsn_lease_length - .unwrap_or(self.conf.default_tenant_conf.lsn_lease_length) + .unwrap_or(conf.default_tenant_conf.lsn_lease_length) } pub fn get_timeline_offloading_enabled(&self) -> bool { @@ -4494,10 +4508,10 @@ impl TenantShard { gc_block: Default::default(), l0_flush_global_state, basebackup_cache, - feature_resolver: TenantFeatureResolver::new( + feature_resolver: Arc::new(TenantFeatureResolver::new( feature_resolver, tenant_shard_id.tenant_id, - ), + )), } } @@ -5711,6 +5725,16 @@ impl TenantShard { .unwrap_or(0) } + /// HADRON + /// Return the visible size of all timelines in this tenant. + pub(crate) fn get_visible_size(&self) -> u64 { + let timelines = self.timelines.lock().unwrap(); + timelines + .values() + .map(|t| t.metrics.visible_physical_size_gauge.get()) + .sum() + } + /// Builds a new tenant manifest, and uploads it if it differs from the last-known tenant /// manifest in `Self::remote_tenant_manifest`. /// @@ -6009,22 +6033,24 @@ pub(crate) mod harness { } #[instrument(skip_all, fields(tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug()))] - pub(crate) async fn do_try_load( + pub(crate) async fn do_try_load_with_redo( &self, + walredo_mgr: Arc, ctx: &RequestContext, ) -> anyhow::Result> { - let walredo_mgr = Arc::new(WalRedoManager::from(TestRedoManager)); - let (basebackup_cache, _) = BasebackupCache::new(Utf8PathBuf::new(), None); let tenant = Arc::new(TenantShard::new( TenantState::Attaching, self.conf, - AttachedTenantConf::try_from(LocationConf::attached_single( - self.tenant_conf.clone(), - self.generation, - ShardParameters::default(), - )) + AttachedTenantConf::try_from( + self.conf, + LocationConf::attached_single( + self.tenant_conf.clone(), + self.generation, + ShardParameters::default(), + ), + ) .unwrap(), self.shard_identity, Some(walredo_mgr), @@ -6049,6 +6075,14 @@ pub(crate) mod harness { Ok(tenant) } + pub(crate) async fn do_try_load( + &self, + ctx: &RequestContext, + ) -> anyhow::Result> { + let walredo_mgr = Arc::new(WalRedoManager::from(TestRedoManager)); + self.do_try_load_with_redo(walredo_mgr, ctx).await + } + pub fn timeline_path(&self, timeline_id: &TimelineId) -> Utf8PathBuf { self.conf.timeline_path(&self.tenant_shard_id, timeline_id) } @@ -6125,7 +6159,7 @@ mod tests { use pageserver_api::keyspace::KeySpace; #[cfg(feature = "testing")] use pageserver_api::keyspace::KeySpaceRandomAccum; - use pageserver_api::models::{CompactionAlgorithm, CompactionAlgorithmSettings}; + use pageserver_api::models::{CompactionAlgorithm, CompactionAlgorithmSettings, LsnLease}; use pageserver_compaction::helpers::overlaps_with; #[cfg(feature = "testing")] use rand::SeedableRng; @@ -6675,17 +6709,13 @@ mod tests { tline.freeze_and_flush().await.map_err(|e| e.into()) } - #[tokio::test(start_paused = true)] + #[tokio::test] async fn test_prohibit_branch_creation_on_garbage_collected_data() -> anyhow::Result<()> { let (tenant, ctx) = TenantHarness::create("test_prohibit_branch_creation_on_garbage_collected_data") .await? .load() .await; - // Advance to the lsn lease deadline so that GC is not blocked by - // initial transition into AttachedSingle. - tokio::time::advance(tenant.get_lsn_lease_length()).await; - tokio::time::resume(); let tline = tenant .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx) .await?; @@ -9186,7 +9216,11 @@ mod tests { let cancel = CancellationToken::new(); tline - .compact_with_gc(&cancel, CompactOptions::default(), &ctx) + .compact_with_gc( + &cancel, + CompactOptions::default_for_gc_compaction_unit_tests(), + &ctx, + ) .await .unwrap(); @@ -9269,7 +9303,11 @@ mod tests { guard.cutoffs.space = Lsn(0x40); } tline - .compact_with_gc(&cancel, CompactOptions::default(), &ctx) + .compact_with_gc( + &cancel, + CompactOptions::default_for_gc_compaction_unit_tests(), + &ctx, + ) .await .unwrap(); @@ -9384,17 +9422,21 @@ mod tests { Ok(()) } - #[tokio::test(start_paused = true)] + #[tokio::test] async fn test_lsn_lease() -> anyhow::Result<()> { let (tenant, ctx) = TenantHarness::create("test_lsn_lease") .await .unwrap() .load() .await; - // Advance to the lsn lease deadline so that GC is not blocked by - // initial transition into AttachedSingle. - tokio::time::advance(tenant.get_lsn_lease_length()).await; - tokio::time::resume(); + // set a non-zero lease length to test the feature + tenant + .update_tenant_config(|mut conf| { + conf.lsn_lease_length = Some(LsnLease::DEFAULT_LENGTH); + Ok(conf) + }) + .unwrap(); + let key = Key::from_hex("010000000033333333444444445500000000").unwrap(); let end_lsn = Lsn(0x100); @@ -9802,7 +9844,11 @@ mod tests { let cancel = CancellationToken::new(); tline - .compact_with_gc(&cancel, CompactOptions::default(), &ctx) + .compact_with_gc( + &cancel, + CompactOptions::default_for_gc_compaction_unit_tests(), + &ctx, + ) .await .unwrap(); @@ -9837,7 +9883,11 @@ mod tests { guard.cutoffs.space = Lsn(0x40); } tline - .compact_with_gc(&cancel, CompactOptions::default(), &ctx) + .compact_with_gc( + &cancel, + CompactOptions::default_for_gc_compaction_unit_tests(), + &ctx, + ) .await .unwrap(); @@ -10412,7 +10462,7 @@ mod tests { &cancel, CompactOptions { flags: dryrun_flags, - ..Default::default() + ..CompactOptions::default_for_gc_compaction_unit_tests() }, &ctx, ) @@ -10423,14 +10473,22 @@ mod tests { verify_result().await; tline - .compact_with_gc(&cancel, CompactOptions::default(), &ctx) + .compact_with_gc( + &cancel, + CompactOptions::default_for_gc_compaction_unit_tests(), + &ctx, + ) .await .unwrap(); verify_result().await; // compact again tline - .compact_with_gc(&cancel, CompactOptions::default(), &ctx) + .compact_with_gc( + &cancel, + CompactOptions::default_for_gc_compaction_unit_tests(), + &ctx, + ) .await .unwrap(); verify_result().await; @@ -10449,14 +10507,22 @@ mod tests { guard.cutoffs.space = Lsn(0x38); } tline - .compact_with_gc(&cancel, CompactOptions::default(), &ctx) + .compact_with_gc( + &cancel, + CompactOptions::default_for_gc_compaction_unit_tests(), + &ctx, + ) .await .unwrap(); verify_result().await; // no wals between 0x30 and 0x38, so we should obtain the same result // not increasing the GC horizon and compact again tline - .compact_with_gc(&cancel, CompactOptions::default(), &ctx) + .compact_with_gc( + &cancel, + CompactOptions::default_for_gc_compaction_unit_tests(), + &ctx, + ) .await .unwrap(); verify_result().await; @@ -10661,7 +10727,7 @@ mod tests { &cancel, CompactOptions { flags: dryrun_flags, - ..Default::default() + ..CompactOptions::default_for_gc_compaction_unit_tests() }, &ctx, ) @@ -10672,14 +10738,22 @@ mod tests { verify_result().await; tline - .compact_with_gc(&cancel, CompactOptions::default(), &ctx) + .compact_with_gc( + &cancel, + CompactOptions::default_for_gc_compaction_unit_tests(), + &ctx, + ) .await .unwrap(); verify_result().await; // compact again tline - .compact_with_gc(&cancel, CompactOptions::default(), &ctx) + .compact_with_gc( + &cancel, + CompactOptions::default_for_gc_compaction_unit_tests(), + &ctx, + ) .await .unwrap(); verify_result().await; @@ -10879,7 +10953,11 @@ mod tests { let cancel = CancellationToken::new(); branch_tline - .compact_with_gc(&cancel, CompactOptions::default(), &ctx) + .compact_with_gc( + &cancel, + CompactOptions::default_for_gc_compaction_unit_tests(), + &ctx, + ) .await .unwrap(); @@ -10892,7 +10970,7 @@ mod tests { &cancel, CompactOptions { compact_lsn_range: Some(CompactLsnRange::above(Lsn(0x40))), - ..Default::default() + ..CompactOptions::default_for_gc_compaction_unit_tests() }, &ctx, ) @@ -11560,7 +11638,7 @@ mod tests { CompactOptions { flags: EnumSet::new(), compact_key_range: Some((get_key(0)..get_key(2)).into()), - ..Default::default() + ..CompactOptions::default_for_gc_compaction_unit_tests() }, &ctx, ) @@ -11607,7 +11685,7 @@ mod tests { CompactOptions { flags: EnumSet::new(), compact_key_range: Some((get_key(2)..get_key(4)).into()), - ..Default::default() + ..CompactOptions::default_for_gc_compaction_unit_tests() }, &ctx, ) @@ -11659,7 +11737,7 @@ mod tests { CompactOptions { flags: EnumSet::new(), compact_key_range: Some((get_key(4)..get_key(9)).into()), - ..Default::default() + ..CompactOptions::default_for_gc_compaction_unit_tests() }, &ctx, ) @@ -11710,7 +11788,7 @@ mod tests { CompactOptions { flags: EnumSet::new(), compact_key_range: Some((get_key(9)..get_key(10)).into()), - ..Default::default() + ..CompactOptions::default_for_gc_compaction_unit_tests() }, &ctx, ) @@ -11766,7 +11844,7 @@ mod tests { CompactOptions { flags: EnumSet::new(), compact_key_range: Some((get_key(0)..get_key(10)).into()), - ..Default::default() + ..CompactOptions::default_for_gc_compaction_unit_tests() }, &ctx, ) @@ -12037,7 +12115,7 @@ mod tests { &cancel, CompactOptions { compact_lsn_range: Some(CompactLsnRange::above(Lsn(0x28))), - ..Default::default() + ..CompactOptions::default_for_gc_compaction_unit_tests() }, &ctx, ) @@ -12072,7 +12150,11 @@ mod tests { // compact again tline - .compact_with_gc(&cancel, CompactOptions::default(), &ctx) + .compact_with_gc( + &cancel, + CompactOptions::default_for_gc_compaction_unit_tests(), + &ctx, + ) .await .unwrap(); verify_result().await; @@ -12291,7 +12373,7 @@ mod tests { CompactOptions { compact_key_range: Some((get_key(0)..get_key(2)).into()), compact_lsn_range: Some((Lsn(0x20)..Lsn(0x28)).into()), - ..Default::default() + ..CompactOptions::default_for_gc_compaction_unit_tests() }, &ctx, ) @@ -12337,7 +12419,7 @@ mod tests { CompactOptions { compact_key_range: Some((get_key(3)..get_key(8)).into()), compact_lsn_range: Some((Lsn(0x28)..Lsn(0x40)).into()), - ..Default::default() + ..CompactOptions::default_for_gc_compaction_unit_tests() }, &ctx, ) @@ -12385,7 +12467,7 @@ mod tests { CompactOptions { compact_key_range: Some((get_key(0)..get_key(5)).into()), compact_lsn_range: Some((Lsn(0x20)..Lsn(0x50)).into()), - ..Default::default() + ..CompactOptions::default_for_gc_compaction_unit_tests() }, &ctx, ) @@ -12420,7 +12502,11 @@ mod tests { // final full compaction tline - .compact_with_gc(&cancel, CompactOptions::default(), &ctx) + .compact_with_gc( + &cancel, + CompactOptions::default_for_gc_compaction_unit_tests(), + &ctx, + ) .await .unwrap(); verify_result().await; @@ -12530,7 +12616,7 @@ mod tests { CompactOptions { compact_key_range: None, compact_lsn_range: None, - ..Default::default() + ..CompactOptions::default_for_gc_compaction_unit_tests() }, &ctx, ) @@ -12788,6 +12874,40 @@ mod tests { }, ] ); + + Ok(()) + } + + #[tokio::test] + async fn test_get_force_image_creation_lsn() -> anyhow::Result<()> { + let tenant_conf = pageserver_api::models::TenantConfig { + pitr_interval: Some(Duration::from_secs(7 * 3600)), + image_layer_force_creation_period: Some(Duration::from_secs(3600)), + ..Default::default() + }; + + let tenant_id = TenantId::generate(); + + let harness = TenantHarness::create_custom( + "test_get_force_image_creation_lsn", + tenant_conf, + tenant_id, + ShardIdentity::unsharded(), + Generation::new(1), + ) + .await?; + let (tenant, ctx) = harness.load().await; + let timeline = tenant + .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx) + .await?; + timeline.gc_info.write().unwrap().cutoffs.time = Some(Lsn(100)); + { + let writer = timeline.writer().await; + writer.finish_write(Lsn(5000)); + } + + let image_creation_lsn = timeline.get_force_image_creation_lsn().unwrap(); + assert_eq!(image_creation_lsn, Lsn(4300)); Ok(()) } } diff --git a/pageserver/src/tenant/debug.rs b/pageserver/src/tenant/debug.rs new file mode 100644 index 0000000000..604f7f265e --- /dev/null +++ b/pageserver/src/tenant/debug.rs @@ -0,0 +1,366 @@ +use std::{ops::Range, str::FromStr, sync::Arc}; + +use crate::walredo::RedoAttemptType; +use base64::{Engine as _, engine::general_purpose::STANDARD}; +use bytes::{Bytes, BytesMut}; +use camino::Utf8PathBuf; +use clap::Parser; +use itertools::Itertools; +use pageserver_api::{ + key::Key, + keyspace::KeySpace, + shard::{ShardIdentity, ShardStripeSize}, +}; +use postgres_ffi::PgMajorVersion; +use postgres_ffi::{BLCKSZ, page_is_new, page_set_lsn}; +use tracing::Instrument; +use utils::{ + generation::Generation, + id::{TenantId, TimelineId}, + lsn::Lsn, + shard::{ShardCount, ShardIndex, ShardNumber}, +}; +use wal_decoder::models::record::NeonWalRecord; + +use crate::{ + context::{DownloadBehavior, RequestContext}, + task_mgr::TaskKind, + tenant::storage_layer::ValueReconstructState, + walredo::harness::RedoHarness, +}; + +use super::{ + WalRedoManager, WalredoManagerId, + harness::TenantHarness, + remote_timeline_client::LayerFileMetadata, + storage_layer::{AsLayerDesc, IoConcurrency, Layer, LayerName, ValuesReconstructState}, +}; + +fn process_page_image(next_record_lsn: Lsn, is_fpw: bool, img_bytes: Bytes) -> Bytes { + // To match the logic in libs/wal_decoder/src/serialized_batch.rs + let mut new_image: BytesMut = img_bytes.into(); + if is_fpw && !page_is_new(&new_image) { + page_set_lsn(&mut new_image, next_record_lsn); + } + assert_eq!(new_image.len(), BLCKSZ as usize); + new_image.freeze() +} + +async fn redo_wals(input: &str, key: Key) -> anyhow::Result<()> { + let tenant_id = TenantId::generate(); + let timeline_id = TimelineId::generate(); + let redo_harness = RedoHarness::new()?; + let span = redo_harness.span(); + let tenant_conf = pageserver_api::models::TenantConfig { + ..Default::default() + }; + + let ctx = RequestContext::new(TaskKind::DebugTool, DownloadBehavior::Error); + let tenant = TenantHarness::create_custom( + "search_key", + tenant_conf, + tenant_id, + ShardIdentity::unsharded(), + Generation::new(1), + ) + .await? + .do_try_load_with_redo( + Arc::new(WalRedoManager::Prod( + WalredoManagerId::next(), + redo_harness.manager, + )), + &ctx, + ) + .await + .unwrap(); + let timeline = tenant + .create_test_timeline(timeline_id, Lsn(0x10), PgMajorVersion::PG16, &ctx) + .await?; + let contents = tokio::fs::read_to_string(input) + .await + .map_err(|e| anyhow::Error::msg(format!("Failed to read input file {input}: {e}"))) + .unwrap(); + let lines = contents.lines(); + let mut last_wal_lsn: Option = None; + let state = { + let mut state = ValueReconstructState::default(); + let mut is_fpw = false; + let mut is_first_line = true; + for line in lines { + if is_first_line { + is_first_line = false; + if line.trim() == "FPW" { + is_fpw = true; + } + continue; // Skip the first line. + } + // Each input line is in the "," format. + let (lsn_str, payload_b64) = line + .split_once(',') + .expect("Invalid input format: expected ','"); + + // Parse the LSN and decode the payload. + let lsn = Lsn::from_str(lsn_str.trim()).expect("Invalid LSN format"); + let bytes = Bytes::from( + STANDARD + .decode(payload_b64.trim()) + .expect("Invalid base64 payload"), + ); + + // The first line is considered the base image, the rest are WAL records. + if state.img.is_none() { + state.img = Some((lsn, process_page_image(lsn, is_fpw, bytes))); + } else { + let wal_record = NeonWalRecord::Postgres { + will_init: false, + rec: bytes, + }; + state.records.push((lsn, wal_record)); + last_wal_lsn.replace(lsn); + } + } + state + }; + + assert!(state.img.is_some(), "No base image found"); + assert!(!state.records.is_empty(), "No WAL records found"); + let result = timeline + .reconstruct_value(key, last_wal_lsn.unwrap(), state, RedoAttemptType::ReadPage) + .instrument(span.clone()) + .await?; + + eprintln!("final image: {:?}", STANDARD.encode(result)); + + Ok(()) +} + +async fn search_key( + tenant_id: TenantId, + timeline_id: TimelineId, + dir: String, + key: Key, + lsn: Lsn, +) -> anyhow::Result<()> { + let shard_index = ShardIndex { + shard_number: ShardNumber(0), + shard_count: ShardCount(4), + }; + + let redo_harness = RedoHarness::new()?; + let span = redo_harness.span(); + let tenant_conf = pageserver_api::models::TenantConfig { + ..Default::default() + }; + let ctx = RequestContext::new(TaskKind::DebugTool, DownloadBehavior::Error); + let tenant = TenantHarness::create_custom( + "search_key", + tenant_conf, + tenant_id, + ShardIdentity::new( + shard_index.shard_number, + shard_index.shard_count, + ShardStripeSize(32768), + ) + .unwrap(), + Generation::new(1), + ) + .await? + .do_try_load_with_redo( + Arc::new(WalRedoManager::Prod( + WalredoManagerId::next(), + redo_harness.manager, + )), + &ctx, + ) + .await + .unwrap(); + + let timeline = tenant + .create_test_timeline(timeline_id, Lsn(0x10), PgMajorVersion::PG16, &ctx) + .await?; + + let mut delta_layers: Vec = Vec::new(); + let mut img_layer: Option = Option::None; + let mut dir = tokio::fs::read_dir(dir).await?; + loop { + let entry = dir.next_entry().await?; + if entry.is_none() || !entry.as_ref().unwrap().file_type().await?.is_file() { + break; + } + let path = Utf8PathBuf::from_path_buf(entry.unwrap().path()).unwrap(); + let layer_name = match LayerName::from_str(path.file_name().unwrap()) { + Ok(name) => name, + Err(_) => { + eprintln!("Skipped invalid layer: {path}"); + continue; + } + }; + let layer = Layer::for_resident( + tenant.conf, + &timeline, + path.clone(), + layer_name, + LayerFileMetadata::new( + tokio::fs::metadata(path.clone()).await?.len(), + Generation::new(1), + shard_index, + ), + ); + if layer.layer_desc().is_delta() { + delta_layers.push(layer.into()); + } else if img_layer.is_none() { + img_layer = Some(layer.into()); + } else { + anyhow::bail!("Found multiple image layers"); + } + } + // sort delta layers based on the descending order of LSN + delta_layers.sort_by(|a, b| { + b.layer_desc() + .get_lsn_range() + .start + .cmp(&a.layer_desc().get_lsn_range().start) + }); + + let mut state = ValuesReconstructState::new(IoConcurrency::Sequential); + + let key_space = KeySpace::single(Range { + start: key, + end: key.next(), + }); + let lsn_range = Range { + start: img_layer + .as_ref() + .map_or(Lsn(0x00), |img| img.layer_desc().image_layer_lsn()), + end: lsn, + }; + for delta_layer in delta_layers.iter() { + delta_layer + .get_values_reconstruct_data(key_space.clone(), lsn_range.clone(), &mut state, &ctx) + .await?; + } + + img_layer + .as_ref() + .unwrap() + .get_values_reconstruct_data(key_space.clone(), lsn_range.clone(), &mut state, &ctx) + .await?; + + for (_key, result) in std::mem::take(&mut state.keys) { + let state = result.collect_pending_ios().await?; + if state.img.is_some() { + eprintln!( + "image: {}: {:x?}", + state.img.as_ref().unwrap().0, + STANDARD.encode(state.img.as_ref().unwrap().1.clone()) + ); + } + for delta in state.records.iter() { + match &delta.1 { + NeonWalRecord::Postgres { will_init, rec } => { + eprintln!( + "delta: {}: will_init: {}, {:x?}", + delta.0, + will_init, + STANDARD.encode(rec) + ); + } + _ => { + eprintln!("delta: {}: {:x?}", delta.0, delta.1); + } + } + } + + let result = timeline + .reconstruct_value(key, lsn_range.end, state, RedoAttemptType::ReadPage) + .instrument(span.clone()) + .await?; + eprintln!("final image: {lsn} : {result:?}"); + } + + Ok(()) +} + +/// Redo all WALs against the base image in the input file. Return the base64 encoded final image. +/// Each line in the input file must be in the form "," where: +/// * `` is a PostgreSQL LSN in hexadecimal notation, e.g. `0/16ABCDE`. +/// * `` is the base64‐encoded page image (first line) or WAL record (subsequent lines). +/// +/// The first line provides the base image of a page. The LSN is the LSN of "next record" following +/// the record containing the FPI. For example, if the FPI was extracted from a WAL record occuping +/// [0/1, 0/200) in the WAL stream, the LSN appearing along side the page image here should be 0/200. +/// +/// The subsequent lines are WAL records, ordered from the oldest to the newest. The LSN is the +/// record LSN of the WAL record, not the "next record" LSN. For example, if the WAL record here +/// occupies [0/1, 0/200) in the WAL stream, the LSN appearing along side the WAL record here should +/// be 0/1. +#[derive(Parser)] +struct RedoWalsCmd { + #[clap(long)] + input: String, + #[clap(long)] + key: String, +} + +#[tokio::test] +async fn test_redo_wals() -> anyhow::Result<()> { + let args = std::env::args().collect_vec(); + let pos = args + .iter() + .position(|arg| arg == "--") + .unwrap_or(args.len()); + let slice = &args[pos..args.len()]; + let cmd = match RedoWalsCmd::try_parse_from(slice) { + Ok(cmd) => cmd, + Err(err) => { + eprintln!("{err}"); + return Ok(()); + } + }; + + let key = Key::from_hex(&cmd.key).unwrap(); + redo_wals(&cmd.input, key).await?; + + Ok(()) +} + +/// Search for a page at the given LSN in all layers of the data_dir. +/// Return the base64-encoded image and all WAL records, as well as the final reconstructed image. +#[derive(Parser)] +struct SearchKeyCmd { + #[clap(long)] + tenant_id: String, + #[clap(long)] + timeline_id: String, + #[clap(long)] + data_dir: String, + #[clap(long)] + key: String, + #[clap(long)] + lsn: String, +} + +#[tokio::test] +async fn test_search_key() -> anyhow::Result<()> { + let args = std::env::args().collect_vec(); + let pos = args + .iter() + .position(|arg| arg == "--") + .unwrap_or(args.len()); + let slice = &args[pos..args.len()]; + let cmd = match SearchKeyCmd::try_parse_from(slice) { + Ok(cmd) => cmd, + Err(err) => { + eprintln!("{err}"); + return Ok(()); + } + }; + + let tenant_id = TenantId::from_str(&cmd.tenant_id).unwrap(); + let timeline_id = TimelineId::from_str(&cmd.timeline_id).unwrap(); + let key = Key::from_hex(&cmd.key).unwrap(); + let lsn = Lsn::from_str(&cmd.lsn).unwrap(); + search_key(tenant_id, timeline_id, cmd.data_dir, key, lsn).await?; + + Ok(()) +} diff --git a/pageserver/src/tenant/layer_map.rs b/pageserver/src/tenant/layer_map.rs index 23052ccee7..ba02602cfe 100644 --- a/pageserver/src/tenant/layer_map.rs +++ b/pageserver/src/tenant/layer_map.rs @@ -46,10 +46,11 @@ mod historic_layer_coverage; mod layer_coverage; -use std::collections::{HashMap, VecDeque}; +use std::collections::{BTreeMap, HashMap, VecDeque}; use std::iter::Peekable; use std::ops::Range; use std::sync::Arc; +use std::time::Instant; use anyhow::Result; use historic_layer_coverage::BufferedHistoricLayerCoverage; @@ -904,6 +905,103 @@ impl LayerMap { max_stacked_deltas } + /* BEGIN_HADRON */ + /** + * Compute the image consistent LSN, the largest LSN below which all pages have been redone successfully. + * It works by first finding the latest image layers and store them into a map. Then for each delta layer, + * find all overlapping image layers in order to potentially increase the image LSN in case there are gaps + * (e.g., if an image is created at LSN 100 but the delta layer spans LSN [150, 200], then we can increase + * image LSN to 150 because there is no WAL record in between). + * Finally, the image consistent LSN is computed by taking the minimum of all image layers. + */ + pub fn compute_image_consistent_lsn(&self, disk_consistent_lsn: Lsn) -> Lsn { + struct ImageLayerInfo { + // creation LSN of the image layer + image_lsn: Lsn, + // the current minimum LSN of newer delta layers with overlapping key ranges + min_delta_lsn: Lsn, + } + let started_at = Instant::now(); + + let min_l0_deltas_lsn = { + let l0_deltas = self.level0_deltas(); + l0_deltas + .iter() + .map(|layer| layer.get_lsn_range().start) + .min() + .unwrap_or(disk_consistent_lsn) + }; + let global_key_range = Key::MIN..Key::MAX; + + // step 1: collect all most recent image layers into a map + // map: end key to image_layer_info + let mut image_map: BTreeMap = BTreeMap::new(); + for (img_range, img) in self.image_coverage(&global_key_range, disk_consistent_lsn) { + let img_lsn = img.map(|layer| layer.get_lsn_range().end).unwrap_or(Lsn(0)); + image_map.insert( + img_range.end, + ImageLayerInfo { + image_lsn: img_lsn, + min_delta_lsn: min_l0_deltas_lsn, + }, + ); + } + + // step 2: go through all delta layers, and update the image layer info with overlapping + // key ranges + for layer in self.historic.iter() { + if !layer.is_delta { + continue; + } + let delta_key_range = layer.get_key_range(); + let delta_lsn_range = layer.get_lsn_range(); + for (img_end_key, img_info) in image_map.range_mut(delta_key_range.start..Key::MAX) { + debug_assert!(img_end_key >= &delta_key_range.start); + if delta_lsn_range.end > img_info.image_lsn { + // the delta layer includes WAL records after the image + // it's possibel that the delta layer's start LSN < image LSN, which will be simply ignored by step 3 + img_info.min_delta_lsn = + std::cmp::min(img_info.min_delta_lsn, delta_lsn_range.start); + } + if img_end_key >= &delta_key_range.end { + // we have fully processed all overlapping image layers + break; + } + } + } + + // step 3, go through all image layers and find the image consistent LSN + let mut img_consistent_lsn = min_l0_deltas_lsn.checked_sub(Lsn(1)).unwrap(); + let mut prev_key = Key::MIN; + for (img_key, img_info) in image_map { + tracing::debug!( + "Image layer {:?}:{} has min delta lsn {}", + Range { + start: prev_key, + end: img_key, + }, + img_info.image_lsn, + img_info.min_delta_lsn, + ); + let image_lsn = std::cmp::max( + img_info.image_lsn, + img_info.min_delta_lsn.checked_sub(Lsn(1)).unwrap_or(Lsn(0)), + ); + img_consistent_lsn = std::cmp::min(img_consistent_lsn, image_lsn); + prev_key = img_key; + } + tracing::info!( + "computed image_consistent_lsn {} for disk_consistent_lsn {} in {}ms. Processed {} layrs in total.", + img_consistent_lsn, + disk_consistent_lsn, + started_at.elapsed().as_millis(), + self.historic.len() + ); + img_consistent_lsn + } + + /* END_HADRON */ + /// Return all L0 delta layers pub fn level0_deltas(&self) -> &Vec> { &self.l0_delta_layers @@ -1579,6 +1677,138 @@ mod tests { LayerVisibilityHint::Visible )); } + + /* BEGIN_HADRON */ + #[test] + fn test_compute_image_consistent_lsn() { + let mut layer_map = LayerMap::default(); + + let disk_consistent_lsn = Lsn(1000); + // case 1: empty layer map + let image_consistent_lsn = layer_map.compute_image_consistent_lsn(disk_consistent_lsn); + assert_eq!( + disk_consistent_lsn.checked_sub(Lsn(1)).unwrap(), + image_consistent_lsn + ); + + // case 2: only L0 delta layer + { + let mut updates = layer_map.batch_update(); + updates.insert_historic(PersistentLayerDesc::new_test( + Key::from_i128(0)..Key::from_i128(100), + Lsn(900)..Lsn(990), + true, + )); + + updates.insert_historic(PersistentLayerDesc::new_test( + Key::from_i128(0)..Key::from_i128(100), + Lsn(850)..Lsn(899), + true, + )); + } + + // should use min L0 delta LSN - 1 as image consistent LSN + let image_consistent_lsn = layer_map.compute_image_consistent_lsn(disk_consistent_lsn); + assert_eq!(Lsn(849), image_consistent_lsn); + + // case 3: 3 images, no L1 delta + { + let mut updates = layer_map.batch_update(); + updates.insert_historic(PersistentLayerDesc::new_test( + Key::from_i128(0)..Key::from_i128(40), + Lsn(100)..Lsn(100), + false, + )); + + updates.insert_historic(PersistentLayerDesc::new_test( + Key::from_i128(40)..Key::from_i128(70), + Lsn(200)..Lsn(200), + false, + )); + + updates.insert_historic(PersistentLayerDesc::new_test( + Key::from_i128(70)..Key::from_i128(100), + Lsn(150)..Lsn(150), + false, + )); + } + // should use min L0 delta LSN - 1 as image consistent LSN + let image_consistent_lsn = layer_map.compute_image_consistent_lsn(disk_consistent_lsn); + assert_eq!(Lsn(849), image_consistent_lsn); + + // case 4: 3 images with 1 L1 delta + { + let mut updates = layer_map.batch_update(); + updates.insert_historic(PersistentLayerDesc::new_test( + Key::from_i128(0)..Key::from_i128(50), + Lsn(300)..Lsn(350), + true, + )); + } + let image_consistent_lsn = layer_map.compute_image_consistent_lsn(disk_consistent_lsn); + assert_eq!(Lsn(299), image_consistent_lsn); + + // case 5: 3 images with 1 more L1 delta with smaller LSN + { + let mut updates = layer_map.batch_update(); + updates.insert_historic(PersistentLayerDesc::new_test( + Key::from_i128(50)..Key::from_i128(72), + Lsn(200)..Lsn(300), + true, + )); + } + let image_consistent_lsn = layer_map.compute_image_consistent_lsn(disk_consistent_lsn); + assert_eq!(Lsn(199), image_consistent_lsn); + + // case 6: 3 images with more newer L1 deltas (no impact on final results) + { + let mut updates = layer_map.batch_update(); + updates.insert_historic(PersistentLayerDesc::new_test( + Key::from_i128(0)..Key::from_i128(30), + Lsn(400)..Lsn(500), + true, + )); + updates.insert_historic(PersistentLayerDesc::new_test( + Key::from_i128(35)..Key::from_i128(100), + Lsn(450)..Lsn(600), + true, + )); + } + let image_consistent_lsn = layer_map.compute_image_consistent_lsn(disk_consistent_lsn); + assert_eq!(Lsn(199), image_consistent_lsn); + + // case 7: 3 images with more older L1 deltas (no impact on final results) + { + let mut updates = layer_map.batch_update(); + updates.insert_historic(PersistentLayerDesc::new_test( + Key::from_i128(0)..Key::from_i128(40), + Lsn(0)..Lsn(50), + true, + )); + + updates.insert_historic(PersistentLayerDesc::new_test( + Key::from_i128(50)..Key::from_i128(100), + Lsn(10)..Lsn(60), + true, + )); + } + let image_consistent_lsn = layer_map.compute_image_consistent_lsn(disk_consistent_lsn); + assert_eq!(Lsn(199), image_consistent_lsn); + + // case 8: 3 images with one more L1 delta with overlapping LSN range + { + let mut updates = layer_map.batch_update(); + updates.insert_historic(PersistentLayerDesc::new_test( + Key::from_i128(0)..Key::from_i128(50), + Lsn(50)..Lsn(250), + true, + )); + } + let image_consistent_lsn = layer_map.compute_image_consistent_lsn(disk_consistent_lsn); + assert_eq!(Lsn(100), image_consistent_lsn); + } + + /* END_HADRON */ } #[cfg(test)] diff --git a/pageserver/src/tenant/mgr.rs b/pageserver/src/tenant/mgr.rs index 95f5c60170..52f67abde5 100644 --- a/pageserver/src/tenant/mgr.rs +++ b/pageserver/src/tenant/mgr.rs @@ -43,7 +43,7 @@ use crate::controller_upcall_client::{ }; use crate::deletion_queue::DeletionQueueClient; use crate::http::routes::ACTIVE_TENANT_TIMEOUT; -use crate::metrics::{TENANT, TENANT_MANAGER as METRICS}; +use crate::metrics::{LOCAL_DATA_LOSS_SUSPECTED, TENANT, TENANT_MANAGER as METRICS}; use crate::task_mgr::{BACKGROUND_RUNTIME, TaskKind}; use crate::tenant::config::{ AttachedLocationConfig, AttachmentMode, LocationConf, LocationMode, SecondaryLocationConfig, @@ -538,6 +538,21 @@ pub async fn init_tenant_mgr( // Determine which tenants are to be secondary or attached, and in which generation let tenant_modes = init_load_generations(conf, &tenant_configs, resources, cancel).await?; + // Hadron local SSD check: Raise an alert if our local filesystem does not contain any tenants but the re-attach request returned tenants. + // This can happen if the PS suffered a Kubernetes node failure resulting in loss of all local data, but recovered quickly on another node + // so the Storage Controller has not had the time to move tenants out. + let data_loss_suspected = if let Some(tenant_modes) = &tenant_modes { + tenant_configs.is_empty() && !tenant_modes.is_empty() + } else { + false + }; + if data_loss_suspected { + tracing::error!( + "Local data loss suspected: no tenants found on local filesystem, but re-attach request returned tenants" + ); + } + LOCAL_DATA_LOSS_SUSPECTED.set(if data_loss_suspected { 1 } else { 0 }); + tracing::info!( "Attaching {} tenants at startup, warming up {} at a time", tenant_configs.len(), @@ -664,7 +679,7 @@ pub async fn init_tenant_mgr( tenant_shard_id, &tenant_dir_path, resources.clone(), - AttachedTenantConf::new(location_conf.tenant_conf, attached_conf), + AttachedTenantConf::new(conf, location_conf.tenant_conf, attached_conf), shard_identity, Some(init_order.clone()), SpawnMode::Lazy, @@ -842,8 +857,11 @@ impl TenantManager { // take our fast path and just provide the updated configuration // to the tenant. tenant.set_new_location_config( - AttachedTenantConf::try_from(new_location_config.clone()) - .map_err(UpsertLocationError::BadRequest)?, + AttachedTenantConf::try_from( + self.conf, + new_location_config.clone(), + ) + .map_err(UpsertLocationError::BadRequest)?, ); Some(FastPathModified::Attached(tenant.clone())) @@ -1046,7 +1064,7 @@ impl TenantManager { // Testing hack: if we are configured with no control plane, then drop the generation // from upserts. This enables creating generation-less tenants even though neon_local // always uses generations when calling the location conf API. - let attached_conf = AttachedTenantConf::try_from(new_location_config) + let attached_conf = AttachedTenantConf::try_from(self.conf, new_location_config) .map_err(UpsertLocationError::BadRequest)?; let tenant = tenant_spawn( @@ -1250,7 +1268,7 @@ impl TenantManager { tenant_shard_id, &tenant_path, self.resources.clone(), - AttachedTenantConf::try_from(config)?, + AttachedTenantConf::try_from(self.conf, config)?, shard_identity, None, SpawnMode::Eager, @@ -1660,6 +1678,8 @@ impl TenantManager { // Phase 6: Release the InProgress on the parent shard drop(parent_slot_guard); + utils::pausable_failpoint!("shard-split-post-finish-pause"); + Ok(child_shards) } @@ -2131,7 +2151,7 @@ impl TenantManager { tenant_shard_id, &tenant_path, self.resources.clone(), - AttachedTenantConf::try_from(config).map_err(Error::DetachReparent)?, + AttachedTenantConf::try_from(self.conf, config).map_err(Error::DetachReparent)?, shard_identity, None, SpawnMode::Eager, diff --git a/pageserver/src/tenant/remote_timeline_client/upload.rs b/pageserver/src/tenant/remote_timeline_client/upload.rs index ffb4717d9f..f2fbf656a6 100644 --- a/pageserver/src/tenant/remote_timeline_client/upload.rs +++ b/pageserver/src/tenant/remote_timeline_client/upload.rs @@ -141,11 +141,29 @@ pub(super) async fn upload_timeline_layer<'a>( let fs_size = usize::try_from(fs_size) .with_context(|| format!("convert {local_path:?} size {fs_size} usize"))?; - + /* BEGIN_HADRON */ + let mut metadata = None; + match storage { + // Pass the file path as a storage metadata to minimize changes to neon. + // Otherwise, we need to change the upload interface. + GenericRemoteStorage::AzureBlob(s) => { + let block_size_mb = s.put_block_size_mb.unwrap_or(0); + if block_size_mb > 0 && fs_size > block_size_mb * 1024 * 1024 { + metadata = Some(remote_storage::StorageMetadata::from([( + "databricks_azure_put_block", + local_path.as_str(), + )])); + } + } + GenericRemoteStorage::LocalFs(_) => {} + GenericRemoteStorage::AwsS3(_) => {} + GenericRemoteStorage::Unreliable(_) => {} + }; + /* END_HADRON */ let reader = tokio_util::io::ReaderStream::with_capacity(source_file, super::BUFFER_SIZE); storage - .upload(reader, fs_size, remote_path, None, cancel) + .upload(reader, fs_size, remote_path, metadata, cancel) .await .with_context(|| format!("upload layer from local path '{local_path}'")) } diff --git a/pageserver/src/tenant/storage_layer.rs b/pageserver/src/tenant/storage_layer.rs index 9fbb9d2438..43ea8fffa3 100644 --- a/pageserver/src/tenant/storage_layer.rs +++ b/pageserver/src/tenant/storage_layer.rs @@ -75,7 +75,7 @@ where /// the same ValueReconstructState struct in the next 'get_value_reconstruct_data' /// call, to collect more records. /// -#[derive(Debug, Default)] +#[derive(Debug, Default, Clone)] pub(crate) struct ValueReconstructState { pub(crate) records: Vec<(Lsn, NeonWalRecord)>, pub(crate) img: Option<(Lsn, Bytes)>, @@ -308,6 +308,9 @@ pub struct ValuesReconstructState { layers_visited: u32, delta_layers_visited: u32, + pub(crate) enable_debug: bool, + pub(crate) debug_state: ValueReconstructState, + pub(crate) io_concurrency: IoConcurrency, num_active_ios: Arc, @@ -657,6 +660,23 @@ impl ValuesReconstructState { layers_visited: 0, delta_layers_visited: 0, io_concurrency, + enable_debug: false, + debug_state: ValueReconstructState::default(), + num_active_ios: Arc::new(AtomicUsize::new(0)), + read_path: None, + } + } + + pub(crate) fn new_with_debug(io_concurrency: IoConcurrency) -> Self { + Self { + keys: HashMap::new(), + keys_done: KeySpaceRandomAccum::new(), + keys_with_image_coverage: None, + layers_visited: 0, + delta_layers_visited: 0, + io_concurrency, + enable_debug: true, + debug_state: ValueReconstructState::default(), num_active_ios: Arc::new(AtomicUsize::new(0)), read_path: None, } @@ -670,6 +690,12 @@ impl ValuesReconstructState { self.io_concurrency.spawn_io(fut).await; } + pub(crate) fn set_debug_state(&mut self, debug_state: &ValueReconstructState) { + if self.enable_debug { + self.debug_state = debug_state.clone(); + } + } + pub(crate) fn on_layer_visited(&mut self, layer: &ReadableLayer) { self.layers_visited += 1; if let ReadableLayer::PersistentLayer(layer) = layer { diff --git a/pageserver/src/tenant/storage_layer/layer_name.rs b/pageserver/src/tenant/storage_layer/layer_name.rs index 0f7995f87b..973852defc 100644 --- a/pageserver/src/tenant/storage_layer/layer_name.rs +++ b/pageserver/src/tenant/storage_layer/layer_name.rs @@ -225,7 +225,7 @@ impl fmt::Display for ImageLayerName { /// storage and object names in remote storage consist of the LayerName plus some extra qualifiers /// that uniquely identify the physical incarnation of a layer (see [crate::tenant::remote_timeline_client::remote_layer_path]) /// and [`crate::tenant::storage_layer::layer::local_layer_path`]) -#[derive(Debug, PartialEq, Eq, Hash, Clone)] +#[derive(Debug, PartialEq, Eq, Hash, Clone, Ord, PartialOrd)] pub enum LayerName { Image(ImageLayerName), Delta(DeltaLayerName), diff --git a/pageserver/src/tenant/tasks.rs b/pageserver/src/tenant/tasks.rs index 954dd38bb4..08fc7d61a5 100644 --- a/pageserver/src/tenant/tasks.rs +++ b/pageserver/src/tenant/tasks.rs @@ -17,23 +17,35 @@ use tracing::*; use utils::backoff::exponential_backoff_duration; use utils::completion::Barrier; use utils::pausable_failpoint; -use utils::sync::gate::GateError; use crate::context::{DownloadBehavior, RequestContext}; use crate::metrics::{self, BackgroundLoopSemaphoreMetricsRecorder, TENANT_TASK_EVENTS}; use crate::task_mgr::{self, BACKGROUND_RUNTIME, TOKIO_WORKER_THREADS, TaskKind}; -use crate::tenant::blob_io::WriteBlobError; use crate::tenant::throttle::Stats; use crate::tenant::timeline::CompactionError; use crate::tenant::timeline::compaction::CompactionOutcome; use crate::tenant::{TenantShard, TenantState}; -use crate::virtual_file::owned_buffers_io::write::FlushTaskError; /// Semaphore limiting concurrent background tasks (across all tenants). /// /// We use 3/4 Tokio threads, to avoid blocking all threads in case we do any CPU-heavy work. static CONCURRENT_BACKGROUND_TASKS: Lazy = Lazy::new(|| { let total_threads = TOKIO_WORKER_THREADS.get(); + + /*BEGIN_HADRON*/ + // ideally we should run at least one compaction task per tenant in order to (1) maximize + // compaction throughput (2) avoid head-of-line blocking of large compactions. However doing + // that may create too many compaction tasks with lots of memory overheads. So we limit the + // number of compaction tasks based on the available CPU core count. + // Need to revisit. + // let tasks_per_thread = std::env::var("BG_TASKS_PER_THREAD") + // .ok() + // .and_then(|s| s.parse().ok()) + // .unwrap_or(4); + // let permits = usize::max(1, total_threads * tasks_per_thread); + // // assert!(permits < total_threads, "need threads for other work"); + /*END_HADRON*/ + let permits = max(1, (total_threads * 3).checked_div(4).unwrap_or(0)); assert_ne!(permits, 0, "we will not be adding in permits later"); assert!(permits < total_threads, "need threads for other work"); @@ -295,48 +307,12 @@ pub(crate) fn log_compaction_error( task_cancelled: bool, degrade_to_warning: bool, ) { - use CompactionError::*; + let is_cancel = err.is_cancel(); - use crate::tenant::PageReconstructError; - use crate::tenant::upload_queue::NotInitialized; - - let level = match err { - e if e.is_cancel() => return, - ShuttingDown => return, - Offload(_) => Level::ERROR, - AlreadyRunning(_) => Level::ERROR, - CollectKeySpaceError(_) => Level::ERROR, - _ if task_cancelled => Level::INFO, - Other(err) => { - let root_cause = err.root_cause(); - - let upload_queue = root_cause - .downcast_ref::() - .is_some_and(|e| e.is_stopping()); - let timeline = root_cause - .downcast_ref::() - .is_some_and(|e| e.is_stopping()); - let buffered_writer_flush_task_canelled = root_cause - .downcast_ref::() - .is_some_and(|e| e.is_cancel()); - let write_blob_cancelled = root_cause - .downcast_ref::() - .is_some_and(|e| e.is_cancel()); - let gate_closed = root_cause - .downcast_ref::() - .is_some_and(|e| e.is_cancel()); - let is_stopping = upload_queue - || timeline - || buffered_writer_flush_task_canelled - || write_blob_cancelled - || gate_closed; - - if is_stopping { - Level::INFO - } else { - Level::ERROR - } - } + let level = if is_cancel || task_cancelled { + Level::INFO + } else { + Level::ERROR }; if let Some((error_count, sleep_duration)) = retry_info { diff --git a/pageserver/src/tenant/timeline.rs b/pageserver/src/tenant/timeline.rs index aeced98859..06e02a7386 100644 --- a/pageserver/src/tenant/timeline.rs +++ b/pageserver/src/tenant/timeline.rs @@ -40,7 +40,6 @@ use layer_manager::{ Shutdown, }; -use offload::OffloadError; use once_cell::sync::Lazy; use pageserver_api::config::tenant_conf_defaults::DEFAULT_PITR_INTERVAL; use pageserver_api::key::{ @@ -119,7 +118,6 @@ use crate::pgdatadir_mapping::{ MAX_AUX_FILE_V2_DELTAS, MetricsUpdate, }; use crate::task_mgr::TaskKind; -use crate::tenant::config::AttachmentMode; use crate::tenant::gc_result::GcResult; use crate::tenant::layer_map::LayerMap; use crate::tenant::metadata::TimelineMetadata; @@ -202,7 +200,7 @@ pub struct TimelineResources { pub l0_compaction_trigger: Arc, pub l0_flush_global_state: l0_flush::L0FlushGlobalState, pub basebackup_cache: Arc, - pub feature_resolver: TenantFeatureResolver, + pub feature_resolver: Arc, } pub struct Timeline { @@ -450,7 +448,7 @@ pub struct Timeline { /// A channel to send async requests to prepare a basebackup for the basebackup cache. basebackup_cache: Arc, - feature_resolver: TenantFeatureResolver, + feature_resolver: Arc, } pub(crate) enum PreviousHeatmap { @@ -587,6 +585,28 @@ pub(crate) enum PageReconstructError { MissingKey(Box), } +impl PageReconstructError { + pub(crate) fn is_cancel(&self) -> bool { + match self { + PageReconstructError::Other(_) => false, + PageReconstructError::AncestorLsnTimeout(e) => e.is_cancel(), + PageReconstructError::Cancelled => true, + PageReconstructError::WalRedo(_) => false, + PageReconstructError::MissingKey(_) => false, + } + } + #[allow(dead_code)] // we use the is_cancel + into_anyhow pattern in quite a few places, this one will follow soon enough + pub(crate) fn into_anyhow(self) -> anyhow::Error { + match self { + PageReconstructError::Other(e) => e, + PageReconstructError::AncestorLsnTimeout(e) => e.into_anyhow(), + PageReconstructError::Cancelled => anyhow::Error::new(self), + PageReconstructError::WalRedo(e) => e, + PageReconstructError::MissingKey(_) => anyhow::Error::new(self), + } + } +} + impl From for PageReconstructError { fn from(value: anyhow::Error) -> Self { // with walingest.rs many PageReconstructError are wrapped in as anyhow::Error @@ -740,17 +760,6 @@ impl std::fmt::Display for MissingKeyError { } } -impl PageReconstructError { - /// Returns true if this error indicates a tenant/timeline shutdown alike situation - pub(crate) fn is_stopping(&self) -> bool { - use PageReconstructError::*; - match self { - Cancelled => true, - Other(_) | AncestorLsnTimeout(_) | WalRedo(_) | MissingKey(_) => false, - } - } -} - #[derive(thiserror::Error, Debug)] pub(crate) enum CreateImageLayersError { #[error("timeline shutting down")] @@ -930,6 +939,20 @@ pub(crate) struct CompactOptions { /// Set job size for the GC compaction. /// This option is only used by GC compaction. pub sub_compaction_max_job_size_mb: Option, + /// Only for GC compaction. + /// If set, the compaction will compact the metadata layers. Should be only set to true in unit tests + /// because metadata compaction is not fully supported yet. + pub gc_compaction_do_metadata_compaction: bool, +} + +impl CompactOptions { + #[cfg(test)] + pub fn default_for_gc_compaction_unit_tests() -> Self { + Self { + gc_compaction_do_metadata_compaction: true, + ..Default::default() + } + } } impl std::fmt::Debug for Timeline { @@ -953,13 +976,35 @@ pub enum WaitLsnError { Timeout(String), } +impl WaitLsnError { + pub(crate) fn is_cancel(&self) -> bool { + match self { + WaitLsnError::Shutdown => true, + WaitLsnError::BadState(timeline_state) => match timeline_state { + TimelineState::Loading => false, + TimelineState::Active => false, + TimelineState::Stopping => true, + TimelineState::Broken { .. } => false, + }, + WaitLsnError::Timeout(_) => false, + } + } + pub(crate) fn into_anyhow(self) -> anyhow::Error { + match self { + WaitLsnError::Shutdown => anyhow::Error::new(self), + WaitLsnError::BadState(_) => anyhow::Error::new(self), + WaitLsnError::Timeout(_) => anyhow::Error::new(self), + } + } +} + impl From for tonic::Status { fn from(err: WaitLsnError) -> Self { use tonic::Code; - let code = match &err { - WaitLsnError::Timeout(_) => Code::Internal, - WaitLsnError::BadState(_) => Code::Internal, - WaitLsnError::Shutdown => Code::Unavailable, + let code = if err.is_cancel() { + Code::Unavailable + } else { + Code::Internal }; tonic::Status::new(code, err.to_string()) } @@ -971,7 +1016,7 @@ impl From for tonic::Status { impl From for CompactionError { fn from(e: CreateImageLayersError) -> Self { match e { - CreateImageLayersError::Cancelled => CompactionError::ShuttingDown, + CreateImageLayersError::Cancelled => CompactionError::new_cancelled(), CreateImageLayersError::Other(e) => { CompactionError::Other(e.context("create image layers")) } @@ -1086,6 +1131,26 @@ enum ImageLayerCreationOutcome { Skip, } +enum RepartitionError { + Other(anyhow::Error), + CollectKeyspace(CollectKeySpaceError), +} + +impl RepartitionError { + fn is_cancel(&self) -> bool { + match self { + RepartitionError::Other(_) => false, + RepartitionError::CollectKeyspace(e) => e.is_cancel(), + } + } + fn into_anyhow(self) -> anyhow::Error { + match self { + RepartitionError::Other(e) => e, + RepartitionError::CollectKeyspace(e) => e.into_anyhow(), + } + } +} + /// Public interface functions impl Timeline { /// Get the LSN where this branch was created @@ -1202,12 +1267,66 @@ impl Timeline { } } + #[inline(always)] + pub(crate) async fn debug_get( + &self, + key: Key, + lsn: Lsn, + ctx: &RequestContext, + reconstruct_state: &mut ValuesReconstructState, + ) -> Result { + if !lsn.is_valid() { + return Err(PageReconstructError::Other(anyhow::anyhow!("Invalid LSN"))); + } + + // This check is debug-only because of the cost of hashing, and because it's a double-check: we + // already checked the key against the shard_identity when looking up the Timeline from + // page_service. + debug_assert!(!self.shard_identity.is_key_disposable(&key)); + + let query = VersionedKeySpaceQuery::uniform(KeySpace::single(key..key.next()), lsn); + let vectored_res = self + .debug_get_vectored_impl(query, reconstruct_state, ctx) + .await; + + let key_value = vectored_res?.pop_first(); + match key_value { + Some((got_key, value)) => { + if got_key != key { + error!( + "Expected {}, but singular vectored get returned {}", + key, got_key + ); + Err(PageReconstructError::Other(anyhow!( + "Singular vectored get returned wrong key" + ))) + } else { + value + } + } + None => Err(PageReconstructError::MissingKey(Box::new( + MissingKeyError { + keyspace: KeySpace::single(key..key.next()), + shard: self.shard_identity.get_shard_number(&key), + original_hwm_lsn: lsn, + ancestor_lsn: None, + backtrace: None, + read_path: None, + query: None, + }, + ))), + } + } + pub(crate) const LAYERS_VISITED_WARN_THRESHOLD: u32 = 100; /// Look up multiple page versions at a given LSN /// /// This naive implementation will be replaced with a more efficient one /// which actually vectorizes the read path. + /// + /// NB: the read path must be cancellation-safe. The Tonic gRPC service will drop the future + /// if the client goes away (e.g. due to timeout or cancellation). pub(crate) async fn get_vectored( &self, query: VersionedKeySpaceQuery, @@ -1496,6 +1615,98 @@ impl Timeline { Ok(results) } + // A copy of the get_vectored_impl method except that we store the image and wal records into `reconstruct_state`. + // This is only used in the http getpage call for debugging purpose. + pub(super) async fn debug_get_vectored_impl( + &self, + query: VersionedKeySpaceQuery, + reconstruct_state: &mut ValuesReconstructState, + ctx: &RequestContext, + ) -> Result>, GetVectoredError> { + if query.is_empty() { + return Ok(BTreeMap::default()); + } + + let read_path = if self.conf.enable_read_path_debugging || ctx.read_path_debug() { + Some(ReadPath::new( + query.total_keyspace(), + query.high_watermark_lsn()?, + )) + } else { + None + }; + + reconstruct_state.read_path = read_path; + + let traversal_res: Result<(), _> = self + .get_vectored_reconstruct_data(query.clone(), reconstruct_state, ctx) + .await; + + if let Err(err) = traversal_res { + // Wait for all the spawned IOs to complete. + // See comments on `spawn_io` inside `storage_layer` for more details. + let mut collect_futs = std::mem::take(&mut reconstruct_state.keys) + .into_values() + .map(|state| state.collect_pending_ios()) + .collect::>(); + while collect_futs.next().await.is_some() {} + return Err(err); + }; + + let reconstruct_state = Arc::new(Mutex::new(reconstruct_state)); + let futs = FuturesUnordered::new(); + + for (key, state) in std::mem::take(&mut reconstruct_state.lock().unwrap().keys) { + let req_lsn_for_key = query.map_key_to_lsn(&key); + futs.push({ + let walredo_self = self.myself.upgrade().expect("&self method holds the arc"); + let rc_clone = Arc::clone(&reconstruct_state); + + async move { + assert_eq!(state.situation, ValueReconstructSituation::Complete); + + let converted = match state.collect_pending_ios().await { + Ok(ok) => ok, + Err(err) => { + return (key, Err(err)); + } + }; + DELTAS_PER_READ_GLOBAL.observe(converted.num_deltas() as f64); + + // The walredo module expects the records to be descending in terms of Lsn. + // And we submit the IOs in that order, so, there shuold be no need to sort here. + debug_assert!( + converted + .records + .is_sorted_by_key(|(lsn, _)| std::cmp::Reverse(*lsn)), + "{converted:?}" + ); + { + let mut guard = rc_clone.lock().unwrap(); + guard.set_debug_state(&converted); + } + ( + key, + walredo_self + .reconstruct_value( + key, + req_lsn_for_key, + converted, + RedoAttemptType::ReadPage, + ) + .await, + ) + } + }); + } + + let results = futs + .collect::>>() + .await; + + Ok(results) + } + /// Get last or prev record separately. Same as get_last_record_rlsn().last/prev. pub(crate) fn get_last_record_lsn(&self) -> Lsn { self.last_record_lsn.load().last @@ -1772,30 +1983,31 @@ impl Timeline { existing_lease.clone() } Entry::Vacant(vacant) => { - // Reject already GC-ed LSN if we are in AttachedSingle and - // not blocked by the lsn lease deadline. + // Never allow a lease to be requested for an LSN below the applied GC cutoff. The data could have been deleted. + let latest_gc_cutoff_lsn = self.get_applied_gc_cutoff_lsn(); + if lsn < *latest_gc_cutoff_lsn { + bail!( + "tried to request an lsn lease for an lsn below the latest gc cutoff. requested at {} gc cutoff {}", + lsn, + *latest_gc_cutoff_lsn + ); + } + + // We allow create lease for those below the planned gc cutoff if we are still within the grace period + // of GC blocking. let validate = { let conf = self.tenant_conf.load(); - conf.location.attach_mode == AttachmentMode::Single - && !conf.is_gc_blocked_by_lsn_lease_deadline() + !conf.is_gc_blocked_by_lsn_lease_deadline() }; - if init || validate { - let latest_gc_cutoff_lsn = self.get_applied_gc_cutoff_lsn(); - if lsn < *latest_gc_cutoff_lsn { - bail!( - "tried to request an lsn lease for an lsn below the latest gc cutoff. requested at {} gc cutoff {}", - lsn, - *latest_gc_cutoff_lsn - ); - } - if lsn < planned_cutoff { - bail!( - "tried to request an lsn lease for an lsn below the planned gc cutoff. requested at {} planned gc cutoff {}", - lsn, - planned_cutoff - ); - } + // Do not allow initial lease creation to be below the planned gc cutoff. The client (compute_ctl) determines + // whether it is a initial lease creation or a renewal. + if (init || validate) && lsn < planned_cutoff { + bail!( + "tried to request an lsn lease for an lsn below the planned gc cutoff. requested at {} planned gc cutoff {}", + lsn, + planned_cutoff + ); } let dt: DateTime = valid_until.into(); @@ -1841,6 +2053,8 @@ impl Timeline { // an ephemeral layer open forever when idle. It also freezes layers if the global limit on // ephemeral layer bytes has been breached. pub(super) async fn maybe_freeze_ephemeral_layer(&self) { + debug_assert_current_span_has_tenant_and_timeline_id(); + let Ok(mut write_guard) = self.write_lock.try_lock() else { // If the write lock is held, there is an active wal receiver: rolling open layers // is their responsibility while they hold this lock. @@ -1988,6 +2202,7 @@ impl Timeline { compact_lsn_range: None, sub_compaction: false, sub_compaction_max_job_size_mb: None, + gc_compaction_do_metadata_compaction: false, }, ctx, ) @@ -2065,22 +2280,7 @@ impl Timeline { match &result { Ok(_) => self.compaction_failed.store(false, AtomicOrdering::Relaxed), Err(e) if e.is_cancel() => {} - Err(CompactionError::ShuttingDown) => { - // Covered by the `Err(e) if e.is_cancel()` branch. - } - Err(CompactionError::AlreadyRunning(_)) => { - // Covered by the `Err(e) if e.is_cancel()` branch. - } - Err(CompactionError::Other(_)) => { - self.compaction_failed.store(true, AtomicOrdering::Relaxed) - } - Err(CompactionError::CollectKeySpaceError(_)) => { - // Cancelled errors are covered by the `Err(e) if e.is_cancel()` branch. - self.compaction_failed.store(true, AtomicOrdering::Relaxed) - } - // Don't change the current value on offload failure or shutdown. We don't want to - // abruptly stall nor resume L0 flushes in these cases. - Err(CompactionError::Offload(_)) => {} + Err(_) => self.compaction_failed.store(true, AtomicOrdering::Relaxed), }; result @@ -2809,6 +3009,18 @@ impl Timeline { .unwrap_or(self.conf.default_tenant_conf.image_creation_threshold) } + // HADRON + fn get_image_layer_force_creation_period(&self) -> Option { + let tenant_conf = self.tenant_conf.load(); + tenant_conf + .tenant_conf + .image_layer_force_creation_period + .or(self + .conf + .default_tenant_conf + .image_layer_force_creation_period) + } + fn get_compaction_algorithm_settings(&self) -> CompactionAlgorithmSettings { let tenant_conf = &self.tenant_conf.load(); tenant_conf @@ -3078,7 +3290,6 @@ impl Timeline { repartition_threshold: 0, last_image_layer_creation_check_at: AtomicLsn::new(0), last_image_layer_creation_check_instant: Mutex::new(None), - last_received_wal: Mutex::new(None), rel_size_latest_cache: RwLock::new(HashMap::new()), rel_size_snapshot_cache: Mutex::new(LruCache::new(relsize_snapshot_cache_capacity)), @@ -3129,7 +3340,7 @@ impl Timeline { basebackup_cache: resources.basebackup_cache, - feature_resolver: resources.feature_resolver, + feature_resolver: resources.feature_resolver.clone(), }; result.repartition_threshold = @@ -4970,7 +5181,7 @@ impl Timeline { ctx, ) .await - .map_err(|e| FlushLayerError::from_anyhow(self, e.into()))?; + .map_err(|e| FlushLayerError::from_anyhow(self, e.into_anyhow()))?; if self.cancel.is_cancelled() { return Err(FlushLayerError::Cancelled); @@ -4999,6 +5210,7 @@ impl Timeline { .create_image_layers( &partitions, self.initdb_lsn, + None, ImageLayerCreationMode::Initial, ctx, LastImageLayerCreationStatus::Initial, @@ -5220,18 +5432,18 @@ impl Timeline { partition_size: u64, flags: EnumSet, ctx: &RequestContext, - ) -> Result<((KeyPartitioning, SparseKeyPartitioning), Lsn), CompactionError> { + ) -> Result<((KeyPartitioning, SparseKeyPartitioning), Lsn), RepartitionError> { let Ok(mut guard) = self.partitioning.try_write_guard() else { // NB: there are two callers, one is the compaction task, of which there is only one per struct Tenant and hence Timeline. // The other is the initdb optimization in flush_frozen_layer, used by `boostrap_timeline`, which runs before `.activate()` // and hence before the compaction task starts. - return Err(CompactionError::Other(anyhow!( + return Err(RepartitionError::Other(anyhow!( "repartition() called concurrently" ))); }; let ((dense_partition, sparse_partition), partition_lsn) = &*guard.read(); if lsn < *partition_lsn { - return Err(CompactionError::Other(anyhow!( + return Err(RepartitionError::Other(anyhow!( "repartition() called with LSN going backwards, this should not happen" ))); } @@ -5252,7 +5464,10 @@ impl Timeline { )); } - let (dense_ks, sparse_ks) = self.collect_keyspace(lsn, ctx).await?; + let (dense_ks, sparse_ks) = self + .collect_keyspace(lsn, ctx) + .await + .map_err(RepartitionError::CollectKeyspace)?; let dense_partitioning = dense_ks.partition( &self.shard_identity, partition_size, @@ -5267,14 +5482,19 @@ impl Timeline { } // Is it time to create a new image layer for the given partition? True if we want to generate. - async fn time_for_new_image_layer(&self, partition: &KeySpace, lsn: Lsn) -> bool { + async fn time_for_new_image_layer( + &self, + partition: &KeySpace, + lsn: Lsn, + force_image_creation_lsn: Option, + ) -> bool { let threshold = self.get_image_creation_threshold(); let guard = self.layers.read(LayerManagerLockHolder::Compaction).await; let Ok(layers) = guard.layer_map() else { return false; }; - + let mut min_image_lsn: Lsn = Lsn::MAX; let mut max_deltas = 0; for part_range in &partition.ranges { let image_coverage = layers.image_coverage(part_range, lsn); @@ -5309,9 +5529,25 @@ impl Timeline { return true; } } + min_image_lsn = min(min_image_lsn, img_lsn); } } + // HADRON + // for child timelines, we consider all pages up to ancestor_LSN are redone successfully by the parent timeline + min_image_lsn = min_image_lsn.max(self.get_ancestor_lsn()); + if min_image_lsn < force_image_creation_lsn.unwrap_or(Lsn(0)) && max_deltas > 0 { + info!( + "forcing image creation for partitioned range {}-{}. Min image LSN: {}, force image creation LSN: {}, num deltas: {}", + partition.ranges[0].start, + partition.ranges[0].end, + min_image_lsn, + force_image_creation_lsn.unwrap(), + max_deltas + ); + return true; + } + debug!( max_deltas, "none of the partitioned ranges had >= {threshold} deltas" @@ -5531,13 +5767,14 @@ impl Timeline { /// Predicate function which indicates whether we should check if new image layers /// are required. Since checking if new image layers are required is expensive in /// terms of CPU, we only do it in the following cases: - /// 1. If the timeline has ingested sufficient WAL to justify the cost + /// 1. If the timeline has ingested sufficient WAL to justify the cost or ... /// 2. If enough time has passed since the last check: /// 1. For large tenants, we wish to perform the check more often since they - /// suffer from the lack of image layers + /// suffer from the lack of image layers. Note that we assume sharded tenants + /// to be large since non-zero shards do not track the logical size. /// 2. For small tenants (that can mostly fit in RAM), we use a much longer interval fn should_check_if_image_layers_required(self: &Arc, lsn: Lsn) -> bool { - const LARGE_TENANT_THRESHOLD: u64 = 2 * 1024 * 1024 * 1024; + let large_timeline_threshold = self.conf.image_layer_generation_large_timeline_threshold; let last_checks_at = self.last_image_layer_creation_check_at.load(); let distance = lsn @@ -5548,30 +5785,39 @@ impl Timeline { let distance_based_decision = distance.0 >= min_distance; - let mut time_based_decision = false; let mut last_check_instant = self.last_image_layer_creation_check_instant.lock().unwrap(); - if let CurrentLogicalSize::Exact(logical_size) = self.current_logical_size.current_size() { - let check_required_after = if Into::::into(&logical_size) >= LARGE_TENANT_THRESHOLD - { - self.get_checkpoint_timeout() - } else { - Duration::from_secs(3600 * 48) - }; - - time_based_decision = match *last_check_instant { - Some(last_check) => { - let elapsed = last_check.elapsed(); - elapsed >= check_required_after + let check_required_after = (|| { + if self.shard_identity.is_unsharded() { + if let CurrentLogicalSize::Exact(logical_size) = + self.current_logical_size.current_size() + { + if Some(Into::::into(&logical_size)) < large_timeline_threshold { + return Duration::from_secs(3600 * 48); + } } - None => true, - }; - } + } + + self.get_checkpoint_timeout() + })(); + + let time_based_decision = match *last_check_instant { + Some(last_check) => { + let elapsed = last_check.elapsed(); + elapsed >= check_required_after + } + None => true, + }; // Do the expensive delta layer counting only if this timeline has ingested sufficient // WAL since the last check or a checkpoint timeout interval has elapsed since the last // check. let decision = distance_based_decision || time_based_decision; - + tracing::info!( + "Decided to check image layers: {}. Distance-based decision: {}, time-based decision: {}", + decision, + distance_based_decision, + time_based_decision + ); if decision { self.last_image_layer_creation_check_at.store(lsn); *last_check_instant = Some(Instant::now()); @@ -5584,10 +5830,12 @@ impl Timeline { /// true = we have generate all image layers, false = we preempt the process for L0 compaction. /// /// `partition_mode` is only for logging purpose and is not used anywhere in this function. + #[allow(clippy::too_many_arguments)] async fn create_image_layers( self: &Arc, partitioning: &KeyPartitioning, lsn: Lsn, + force_image_creation_lsn: Option, mode: ImageLayerCreationMode, ctx: &RequestContext, last_status: LastImageLayerCreationStatus, @@ -5691,7 +5939,11 @@ impl Timeline { } else if let ImageLayerCreationMode::Try = mode { // check_for_image_layers = false -> skip // check_for_image_layers = true -> check time_for_new_image_layer -> skip/generate - if !check_for_image_layers || !self.time_for_new_image_layer(partition, lsn).await { + if !check_for_image_layers + || !self + .time_for_new_image_layer(partition, lsn, force_image_creation_lsn) + .await + { start = img_range.end; continue; } @@ -6012,57 +6264,88 @@ impl Drop for Timeline { } } -/// Top-level failure to compact. -#[derive(Debug, thiserror::Error)] -pub(crate) enum CompactionError { - #[error("The timeline or pageserver is shutting down")] - ShuttingDown, - /// Compaction tried to offload a timeline and failed - #[error("Failed to offload timeline: {0}")] - Offload(OffloadError), - /// Compaction cannot be done right now; page reconstruction and so on. - #[error("Failed to collect keyspace: {0}")] - CollectKeySpaceError(#[from] CollectKeySpaceError), - #[error(transparent)] - Other(anyhow::Error), - #[error("Compaction already running: {0}")] - AlreadyRunning(&'static str), -} +pub(crate) use compaction_error::CompactionError; +/// In a private mod to enforce that [`CompactionError::is_cancel`] is used +/// instead of `match`ing on [`CompactionError::ShuttingDown`]. +mod compaction_error { + use utils::sync::gate::GateError; -impl CompactionError { - /// Errors that can be ignored, i.e., cancel and shutdown. - pub fn is_cancel(&self) -> bool { - matches!( - self, - Self::ShuttingDown - | Self::AlreadyRunning(_) - | Self::CollectKeySpaceError(CollectKeySpaceError::Cancelled) - | Self::CollectKeySpaceError(CollectKeySpaceError::PageRead( - PageReconstructError::Cancelled - )) - | Self::Offload(OffloadError::Cancelled) - ) + use crate::{ + pgdatadir_mapping::CollectKeySpaceError, + tenant::{PageReconstructError, blob_io::WriteBlobError, upload_queue::NotInitialized}, + virtual_file::owned_buffers_io::write::FlushTaskError, + }; + + /// Top-level failure to compact. Use [`Self::is_cancel`]. + #[derive(Debug, thiserror::Error)] + pub(crate) enum CompactionError { + /// Use [`Self::is_cancel`] instead of checking for this variant. + #[error("The timeline or pageserver is shutting down")] + #[allow(private_interfaces)] + ShuttingDown(ForbidMatching), // private ForbidMatching enforces use of [`Self::is_cancel`]. + #[error(transparent)] + Other(anyhow::Error), } - /// Critical errors that indicate data corruption. - pub fn is_critical(&self) -> bool { - matches!( - self, - Self::CollectKeySpaceError( - CollectKeySpaceError::Decode(_) - | CollectKeySpaceError::PageRead( - PageReconstructError::MissingKey(_) | PageReconstructError::WalRedo(_), - ) - ) - ) - } -} + #[derive(Debug)] + struct ForbidMatching; -impl From for CompactionError { - fn from(e: OffloadError) -> Self { - match e { - OffloadError::Cancelled => Self::ShuttingDown, - _ => Self::Offload(e), + impl CompactionError { + pub fn new_cancelled() -> Self { + Self::ShuttingDown(ForbidMatching) + } + /// Errors that can be ignored, i.e., cancel and shutdown. + pub fn is_cancel(&self) -> bool { + let other = match self { + CompactionError::ShuttingDown(_) => return true, + CompactionError::Other(other) => other, + }; + + // The write path of compaction in particular often lacks differentiated + // handling errors stemming from cancellation from other errors. + // So, if requested, we also check the ::Other variant by downcasting. + // The list below has been found empirically from flaky tests and production logs. + // The process is simple: on ::Other(), compaction will print the enclosed + // anyhow::Error in debug mode, i.e., with backtrace. That backtrace contains the + // line where the write path / compaction code does undifferentiated error handling + // from a non-anyhow type to an anyhow type. Add the type to the list of downcasts + // below, following the same is_cancel() pattern. + + let root_cause = other.root_cause(); + + let upload_queue = root_cause + .downcast_ref::() + .is_some_and(|e| e.is_stopping()); + let timeline = root_cause + .downcast_ref::() + .is_some_and(|e| e.is_cancel()); + let buffered_writer_flush_task_canelled = root_cause + .downcast_ref::() + .is_some_and(|e| e.is_cancel()); + let write_blob_cancelled = root_cause + .downcast_ref::() + .is_some_and(|e| e.is_cancel()); + let gate_closed = root_cause + .downcast_ref::() + .is_some_and(|e| e.is_cancel()); + upload_queue + || timeline + || buffered_writer_flush_task_canelled + || write_blob_cancelled + || gate_closed + } + pub fn into_anyhow(self) -> anyhow::Error { + match self { + CompactionError::ShuttingDown(ForbidMatching) => anyhow::Error::new(self), + CompactionError::Other(e) => e, + } + } + pub fn from_collect_keyspace(err: CollectKeySpaceError) -> Self { + if err.is_cancel() { + Self::new_cancelled() + } else { + Self::Other(err.into_anyhow()) + } } } } @@ -6074,7 +6357,7 @@ impl From for CompactionError { CompactionError::Other(anyhow::anyhow!(value)) } super::upload_queue::NotInitialized::ShuttingDown - | super::upload_queue::NotInitialized::Stopped => CompactionError::ShuttingDown, + | super::upload_queue::NotInitialized::Stopped => CompactionError::new_cancelled(), } } } @@ -6084,7 +6367,7 @@ impl From for CompactionError { match e { super::storage_layer::layer::DownloadError::TimelineShutdown | super::storage_layer::layer::DownloadError::DownloadCancelled => { - CompactionError::ShuttingDown + CompactionError::new_cancelled() } super::storage_layer::layer::DownloadError::ContextAndConfigReallyDeniesDownloads | super::storage_layer::layer::DownloadError::DownloadRequired @@ -6103,14 +6386,14 @@ impl From for CompactionError { impl From for CompactionError { fn from(_: layer_manager::Shutdown) -> Self { - CompactionError::ShuttingDown + CompactionError::new_cancelled() } } impl From for CompactionError { fn from(e: super::storage_layer::errors::PutError) -> Self { if e.is_cancel() { - CompactionError::ShuttingDown + CompactionError::new_cancelled() } else { CompactionError::Other(e.into_anyhow()) } @@ -6209,7 +6492,7 @@ impl Timeline { let mut guard = tokio::select! { guard = self.layers.write(LayerManagerLockHolder::Compaction) => guard, _ = self.cancel.cancelled() => { - return Err(CompactionError::ShuttingDown); + return Err(CompactionError::new_cancelled()); } }; @@ -6765,7 +7048,7 @@ impl Timeline { } /// Reconstruct a value, using the given base image and WAL records in 'data'. - async fn reconstruct_value( + pub(crate) async fn reconstruct_value( &self, key: Key, request_lsn: Lsn, @@ -7036,6 +7319,19 @@ impl Timeline { .unwrap() .clone() } + + /* BEGIN_HADRON */ + pub(crate) async fn compute_image_consistent_lsn(&self) -> anyhow::Result { + let guard = self + .layers + .read(LayerManagerLockHolder::ComputeImageConsistentLsn) + .await; + let layer_map = guard.layer_map()?; + let disk_consistent_lsn = self.get_disk_consistent_lsn(); + + Ok(layer_map.compute_image_consistent_lsn(disk_consistent_lsn)) + } + /* END_HADRON */ } impl Timeline { diff --git a/pageserver/src/tenant/timeline/compaction.rs b/pageserver/src/tenant/timeline/compaction.rs index ac3930fb71..f76ef502dc 100644 --- a/pageserver/src/tenant/timeline/compaction.rs +++ b/pageserver/src/tenant/timeline/compaction.rs @@ -4,6 +4,7 @@ //! //! The old legacy algorithm is implemented directly in `timeline.rs`. +use std::cmp::min; use std::collections::{BinaryHeap, HashMap, HashSet, VecDeque}; use std::ops::{Deref, Range}; use std::sync::Arc; @@ -16,7 +17,8 @@ use super::{ Timeline, }; -use crate::tenant::timeline::DeltaEntry; +use crate::pgdatadir_mapping::CollectKeySpaceError; +use crate::tenant::timeline::{DeltaEntry, RepartitionError}; use crate::walredo::RedoAttemptType; use anyhow::{Context, anyhow}; use bytes::Bytes; @@ -64,7 +66,7 @@ use crate::tenant::timeline::{ DeltaLayerWriter, ImageLayerCreationOutcome, ImageLayerWriter, IoConcurrency, Layer, ResidentLayer, drop_layer_manager_rlock, }; -use crate::tenant::{DeltaLayer, MaybeOffloaded}; +use crate::tenant::{DeltaLayer, MaybeOffloaded, PageReconstructError}; use crate::virtual_file::{MaybeFatalIo, VirtualFile}; /// Maximum number of deltas before generating an image layer in bottom-most compaction. @@ -394,6 +396,7 @@ impl GcCompactionQueue { }), compact_lsn_range: None, sub_compaction_max_job_size_mb: None, + gc_compaction_do_metadata_compaction: false, }, permit, ); @@ -510,6 +513,7 @@ impl GcCompactionQueue { compact_key_range: Some(job.compact_key_range.into()), compact_lsn_range: Some(job.compact_lsn_range.into()), sub_compaction_max_job_size_mb: None, + gc_compaction_do_metadata_compaction: false, }; pending_tasks.push(GcCompactionQueueItem::SubCompactionJob { options, @@ -571,7 +575,7 @@ impl GcCompactionQueue { } match res { Ok(res) => Ok(res), - Err(CompactionError::ShuttingDown) => Err(CompactionError::ShuttingDown), + Err(e) if e.is_cancel() => Err(e), Err(_) => { // There are some cases where traditional gc might collect some layer // files causing gc-compaction cannot read the full history of the key. @@ -591,9 +595,9 @@ impl GcCompactionQueue { timeline: &Arc, ) -> Result { let Ok(_one_op_at_a_time_guard) = self.consumer_lock.try_lock() else { - return Err(CompactionError::AlreadyRunning( - "cannot run gc-compaction because another gc-compaction is running. This should not happen because we only call this function from the gc-compaction queue.", - )); + return Err(CompactionError::Other(anyhow::anyhow!( + "cannot run gc-compaction because another gc-compaction is running. This should not happen because we only call this function from the gc-compaction queue." + ))); }; let has_pending_tasks; let mut yield_for_l0 = false; @@ -783,6 +787,8 @@ pub(crate) struct GcCompactJob { /// as specified here. The true range being compacted is `min_lsn/max_lsn` in [`GcCompactionJobDescription`]. /// min_lsn will always <= the lower bound specified here, and max_lsn will always >= the upper bound specified here. pub compact_lsn_range: Range, + /// See [`CompactOptions::gc_compaction_do_metadata_compaction`]. + pub do_metadata_compaction: bool, } impl GcCompactJob { @@ -797,6 +803,7 @@ impl GcCompactJob { .compact_lsn_range .map(|x| x.into()) .unwrap_or(Lsn::INVALID..Lsn::MAX), + do_metadata_compaction: options.gc_compaction_do_metadata_compaction, } } } @@ -1259,13 +1266,16 @@ impl Timeline { // Is the timeline being deleted? if self.is_stopping() { trace!("Dropping out of compaction on timeline shutdown"); - return Err(CompactionError::ShuttingDown); + return Err(CompactionError::new_cancelled()); } let target_file_size = self.get_checkpoint_distance(); // Define partitioning schema if needed + // HADRON + let force_image_creation_lsn = self.get_force_image_creation_lsn(); + // 1. L0 Compact let l0_outcome = { let timer = self.metrics.compact_time_histo.start_timer(); @@ -1273,6 +1283,7 @@ impl Timeline { .compact_level0( target_file_size, options.flags.contains(CompactFlags::ForceL0Compaction), + force_image_creation_lsn, ctx, ) .await?; @@ -1375,6 +1386,7 @@ impl Timeline { .create_image_layers( &partitioning, lsn, + force_image_creation_lsn, mode, &image_ctx, self.last_image_layer_creation_status @@ -1417,22 +1429,33 @@ impl Timeline { } // Suppress errors when cancelled. - Err(_) if self.cancel.is_cancelled() => {} + // + // Log other errors but continue. Failure to repartition is normal, if the timeline was just created + // as an empty timeline. Also in unit tests, when we use the timeline as a simple + // key-value store, ignoring the datadir layout. Log the error but continue. + // + // TODO: + // 1. shouldn't we return early here if we observe cancellation + // 2. Experiment: can we stop checking self.cancel here? + Err(_) if self.cancel.is_cancelled() => {} // TODO: try how we fare removing this branch Err(err) if err.is_cancel() => {} - - // Alert on critical errors that indicate data corruption. - Err(err) if err.is_critical() => { + Err(RepartitionError::CollectKeyspace( + e @ CollectKeySpaceError::Decode(_) + | e @ CollectKeySpaceError::PageRead( + PageReconstructError::MissingKey(_) | PageReconstructError::WalRedo(_), + ), + )) => { + // Alert on critical errors that indicate data corruption. critical_timeline!( self.tenant_shard_id, self.timeline_id, - "could not compact, repartitioning keyspace failed: {err:?}" + "could not compact, repartitioning keyspace failed: {e:?}" ); } - - // Log other errors. No partitioning? This is normal, if the timeline was just created - // as an empty timeline. Also in unit tests, when we use the timeline as a simple - // key-value store, ignoring the datadir layout. Log the error but continue. - Err(err) => error!("could not compact, repartitioning keyspace failed: {err:?}"), + Err(e) => error!( + "could not compact, repartitioning keyspace failed: {:?}", + e.into_anyhow() + ), }; let partition_count = self.partitioning.read().0.0.parts.len(); @@ -1460,6 +1483,41 @@ impl Timeline { Ok(CompactionOutcome::Done) } + /* BEGIN_HADRON */ + // Get the force image creation LSN based on gc_cutoff_lsn. + // Note that this is an estimation and the workload rate may suddenly change. When that happens, + // the force image creation may be too early or too late, but eventually it should be able to catch up. + pub(crate) fn get_force_image_creation_lsn(self: &Arc) -> Option { + let image_creation_period = self.get_image_layer_force_creation_period()?; + let current_lsn = self.get_last_record_lsn(); + let pitr_lsn = self.gc_info.read().unwrap().cutoffs.time?; + let pitr_interval = self.get_pitr_interval(); + if pitr_lsn == Lsn::INVALID || pitr_interval.is_zero() { + tracing::warn!( + "pitr LSN/interval not found, skipping force image creation LSN calculation" + ); + return None; + } + + let delta_lsn = current_lsn.checked_sub(pitr_lsn).unwrap().0 + * image_creation_period.as_secs() + / pitr_interval.as_secs(); + let force_image_creation_lsn = current_lsn.checked_sub(delta_lsn).unwrap_or(Lsn(0)); + + tracing::info!( + "Tenant shard {} computed force_image_creation_lsn: {}. Current lsn: {}, image_layer_force_creation_period: {:?}, GC cutoff: {}, PITR interval: {:?}", + self.tenant_shard_id, + force_image_creation_lsn, + current_lsn, + image_creation_period, + pitr_lsn, + pitr_interval + ); + + Some(force_image_creation_lsn) + } + /* END_HADRON */ + /// Check for layers that are elegible to be rewritten: /// - Shard splitting: After a shard split, ancestor layers beyond pitr_interval, so that /// we don't indefinitely retain keys in this shard that aren't needed. @@ -1612,7 +1670,7 @@ impl Timeline { for (i, layer) in layers_to_rewrite.into_iter().enumerate() { if self.cancel.is_cancelled() { - return Err(CompactionError::ShuttingDown); + return Err(CompactionError::new_cancelled()); } info!(layer=%layer, "rewriting layer after shard split: {}/{}", i, total); @@ -1710,7 +1768,7 @@ impl Timeline { Ok(()) => {}, Err(WaitCompletionError::NotInitialized(ni)) => return Err(CompactionError::from(ni)), Err(WaitCompletionError::UploadQueueShutDownOrStopped) => { - return Err(CompactionError::ShuttingDown); + return Err(CompactionError::new_cancelled()); } }, // Don't wait if there's L0 compaction to do. We don't need to update the outcome @@ -1789,6 +1847,7 @@ impl Timeline { self: &Arc, target_file_size: u64, force_compaction_ignore_threshold: bool, + force_compaction_lsn: Option, ctx: &RequestContext, ) -> Result { let CompactLevel0Phase1Result { @@ -1809,6 +1868,7 @@ impl Timeline { stats, target_file_size, force_compaction_ignore_threshold, + force_compaction_lsn, &ctx, ) .instrument(phase1_span) @@ -1831,6 +1891,7 @@ impl Timeline { mut stats: CompactLevel0Phase1StatsBuilder, target_file_size: u64, force_compaction_ignore_threshold: bool, + force_compaction_lsn: Option, ctx: &RequestContext, ) -> Result { let begin = tokio::time::Instant::now(); @@ -1860,11 +1921,28 @@ impl Timeline { return Ok(CompactLevel0Phase1Result::default()); } } else { - debug!( - level0_deltas = level0_deltas.len(), - threshold, "too few deltas to compact" - ); - return Ok(CompactLevel0Phase1Result::default()); + // HADRON + let min_lsn = level0_deltas + .iter() + .map(|a| a.get_lsn_range().start) + .reduce(min); + if force_compaction_lsn.is_some() + && min_lsn.is_some() + && min_lsn.unwrap() < force_compaction_lsn.unwrap() + { + info!( + "forcing L0 compaction of {} L0 deltas. Min lsn: {}, force compaction lsn: {}", + level0_deltas.len(), + min_lsn.unwrap(), + force_compaction_lsn.unwrap() + ); + } else { + debug!( + level0_deltas = level0_deltas.len(), + threshold, "too few deltas to compact" + ); + return Ok(CompactLevel0Phase1Result::default()); + } } } @@ -1973,7 +2051,7 @@ impl Timeline { let mut all_keys = Vec::new(); for l in deltas_to_compact.iter() { if self.cancel.is_cancelled() { - return Err(CompactionError::ShuttingDown); + return Err(CompactionError::new_cancelled()); } let delta = l.get_as_delta(ctx).await.map_err(CompactionError::Other)?; let keys = delta @@ -2066,7 +2144,7 @@ impl Timeline { stats.read_lock_held_compute_holes_micros = stats.read_lock_held_key_sort_micros.till_now(); if self.cancel.is_cancelled() { - return Err(CompactionError::ShuttingDown); + return Err(CompactionError::new_cancelled()); } stats.read_lock_drop_micros = stats.read_lock_held_compute_holes_micros.till_now(); @@ -2174,7 +2252,7 @@ impl Timeline { // avoid hitting the cancellation token on every key. in benches, we end up // shuffling an order of million keys per layer, this means we'll check it // around tens of times per layer. - return Err(CompactionError::ShuttingDown); + return Err(CompactionError::new_cancelled()); } let same_key = prev_key == Some(key); @@ -2259,7 +2337,7 @@ impl Timeline { if writer.is_none() { if self.cancel.is_cancelled() { // to be somewhat responsive to cancellation, check for each new layer - return Err(CompactionError::ShuttingDown); + return Err(CompactionError::new_cancelled()); } // Create writer if not initiaized yet writer = Some( @@ -2515,10 +2593,13 @@ impl Timeline { // Is the timeline being deleted? if self.is_stopping() { trace!("Dropping out of compaction on timeline shutdown"); - return Err(CompactionError::ShuttingDown); + return Err(CompactionError::new_cancelled()); } - let (dense_ks, _sparse_ks) = self.collect_keyspace(end_lsn, ctx).await?; + let (dense_ks, _sparse_ks) = self + .collect_keyspace(end_lsn, ctx) + .await + .map_err(CompactionError::from_collect_keyspace)?; // TODO(chi): ignore sparse_keyspace for now, compact it in the future. let mut adaptor = TimelineAdaptor::new(self, (end_lsn, dense_ks)); @@ -3098,6 +3179,7 @@ impl Timeline { dry_run: job.dry_run, compact_key_range: start..end, compact_lsn_range: job.compact_lsn_range.start..compact_below_lsn, + do_metadata_compaction: false, }); current_start = Some(end); } @@ -3160,7 +3242,7 @@ impl Timeline { async fn compact_with_gc_inner( self: &Arc, cancel: &CancellationToken, - job: GcCompactJob, + mut job: GcCompactJob, ctx: &RequestContext, yield_for_l0: bool, ) -> Result { @@ -3168,13 +3250,35 @@ impl Timeline { // with legacy compaction tasks in the future. Always ensure the lock order is compaction -> gc. // Note that we already acquired the compaction lock when the outer `compact` function gets called. + // If the job is not configured to compact the metadata key range, shrink the key range + // to exclude the metadata key range. The check is done by checking if the end of the key range + // is larger than the start of the metadata key range. Note that metadata keys cover the entire + // second half of the keyspace, so it's enough to only check the end of the key range. + if !job.do_metadata_compaction + && job.compact_key_range.end > Key::metadata_key_range().start + { + tracing::info!( + "compaction for metadata key range is not supported yet, overriding compact_key_range from {} to {}", + job.compact_key_range.end, + Key::metadata_key_range().start + ); + // Shrink the key range to exclude the metadata key range. + job.compact_key_range.end = Key::metadata_key_range().start; + + // Skip the job if the key range completely lies within the metadata key range. + if job.compact_key_range.start >= job.compact_key_range.end { + tracing::info!("compact_key_range is empty, skipping compaction"); + return Ok(CompactionOutcome::Done); + } + } + let timer = Instant::now(); let begin_timer = timer; let gc_lock = async { tokio::select! { guard = self.gc_lock.lock() => Ok(guard), - _ = cancel.cancelled() => Err(CompactionError::ShuttingDown), + _ = cancel.cancelled() => Err(CompactionError::new_cancelled()), } }; @@ -3447,7 +3551,7 @@ impl Timeline { } total_layer_size += layer.layer_desc().file_size; if cancel.is_cancelled() { - return Err(CompactionError::ShuttingDown); + return Err(CompactionError::new_cancelled()); } let should_yield = yield_for_l0 && self @@ -3594,7 +3698,7 @@ impl Timeline { } if cancel.is_cancelled() { - return Err(CompactionError::ShuttingDown); + return Err(CompactionError::new_cancelled()); } let should_yield = yield_for_l0 diff --git a/pageserver/src/tenant/timeline/handle.rs b/pageserver/src/tenant/timeline/handle.rs index 2dbff20ab2..7bca66190f 100644 --- a/pageserver/src/tenant/timeline/handle.rs +++ b/pageserver/src/tenant/timeline/handle.rs @@ -212,8 +212,12 @@ //! to the parent shard during a shard split. Eventually, the shard split task will //! shut down the parent => case (1). -use std::collections::{HashMap, hash_map}; -use std::sync::{Arc, Mutex, Weak}; +use std::collections::HashMap; +use std::collections::hash_map; +use std::sync::Arc; +use std::sync::Mutex; +use std::sync::Weak; +use std::time::Duration; use pageserver_api::shard::ShardIdentity; use tracing::{instrument, trace}; @@ -333,6 +337,44 @@ enum RoutingResult { } impl Cache { + /* BEGIN_HADRON */ + /// A wrapper of do_get to resolve the tenant shard for a get page request. + #[instrument(level = "trace", skip_all)] + pub(crate) async fn get( + &mut self, + timeline_id: TimelineId, + shard_selector: ShardSelector, + tenant_manager: &T::TenantManager, + ) -> Result, GetError> { + const GET_MAX_RETRIES: usize = 10; + const RETRY_BACKOFF: Duration = Duration::from_millis(100); + let mut attempt = 0; + loop { + attempt += 1; + match self + .do_get(timeline_id, shard_selector, tenant_manager) + .await + { + Ok(handle) => return Ok(handle), + Err(e) => { + // Retry on tenant manager error to handle tenant split more gracefully + if attempt < GET_MAX_RETRIES { + tokio::time::sleep(RETRY_BACKOFF).await; + continue; + } else { + tracing::warn!( + "Failed to resolve tenant shard after {} attempts: {:?}", + GET_MAX_RETRIES, + e + ); + return Err(e); + } + } + } + } + } + /* END_HADRON */ + /// See module-level comment for details. /// /// Does NOT check for the shutdown state of [`Types::Timeline`]. @@ -341,7 +383,7 @@ impl Cache { /// and if so, return an error that causes the page service to /// close the connection. #[instrument(level = "trace", skip_all)] - pub(crate) async fn get( + async fn do_get( &mut self, timeline_id: TimelineId, shard_selector: ShardSelector, @@ -879,6 +921,7 @@ mod tests { .await .err() .expect("documented behavior: can't get new handle after shutdown"); + assert_eq!(cache.map.len(), 1, "next access cleans up the cache"); cache diff --git a/pageserver/src/tenant/timeline/layer_manager.rs b/pageserver/src/tenant/timeline/layer_manager.rs index 2eccf48579..d8d81a6c91 100644 --- a/pageserver/src/tenant/timeline/layer_manager.rs +++ b/pageserver/src/tenant/timeline/layer_manager.rs @@ -47,6 +47,7 @@ pub(crate) enum LayerManagerLockHolder { ImportPgData, DetachAncestor, Eviction, + ComputeImageConsistentLsn, #[cfg(test)] Testing, } diff --git a/pageserver/src/tenant/timeline/offload.rs b/pageserver/src/tenant/timeline/offload.rs index 9464f034c7..e9cf2e9aa7 100644 --- a/pageserver/src/tenant/timeline/offload.rs +++ b/pageserver/src/tenant/timeline/offload.rs @@ -17,8 +17,6 @@ pub(crate) enum OffloadError { Cancelled, #[error("Timeline is not archived")] NotArchived, - #[error(transparent)] - RemoteStorage(anyhow::Error), #[error("Offload or deletion already in progress")] AlreadyInProgress, #[error("Unexpected offload error: {0}")] @@ -29,7 +27,7 @@ impl From for OffloadError { fn from(e: TenantManifestError) -> Self { match e { TenantManifestError::Cancelled => Self::Cancelled, - TenantManifestError::RemoteStorage(e) => Self::RemoteStorage(e), + TenantManifestError::RemoteStorage(e) => Self::Other(e), } } } diff --git a/pageserver/src/tenant/timeline/walreceiver/connection_manager.rs b/pageserver/src/tenant/timeline/walreceiver/connection_manager.rs index 9b151d2449..f33f47a956 100644 --- a/pageserver/src/tenant/timeline/walreceiver/connection_manager.rs +++ b/pageserver/src/tenant/timeline/walreceiver/connection_manager.rs @@ -182,12 +182,19 @@ pub(super) async fn connection_manager_loop_step( } }, + // If we've not received any updates from the broker from a while, are waiting for WAL + // and have no safekeeper connection or connection candidates, then it might be that + // the broker subscription is wedged. Drop the current subscription and re-subscribe + // with the goal of unblocking it. _ = broker_reset_interval.tick() => { - if wait_lsn_status.borrow().is_some() { - tracing::warn!("No broker updates received for a while, but waiting for WAL. Re-setting stream ...") - } + let awaiting_lsn = wait_lsn_status.borrow().is_some(); + let no_candidates = connection_manager_state.wal_stream_candidates.is_empty(); + let no_connection = connection_manager_state.wal_connection.is_none(); - broker_subscription = subscribe_for_timeline_updates(broker_client, id, cancel).await?; + if awaiting_lsn && no_candidates && no_connection { + tracing::info!("No broker updates received for a while, but waiting for WAL. Re-setting stream ..."); + broker_subscription = subscribe_for_timeline_updates(broker_client, id, cancel).await?; + } }, new_event = async { diff --git a/pageserver/src/utilization.rs b/pageserver/src/utilization.rs index 29d1a31aaf..0dafa5c4bb 100644 --- a/pageserver/src/utilization.rs +++ b/pageserver/src/utilization.rs @@ -1,6 +1,6 @@ //! An utilization metric which is used to decide on which pageserver to put next tenant. //! -//! The metric is exposed via `GET /v1/utilization`. Refer and maintain it's openapi spec as the +//! The metric is exposed via `GET /v1/utilization`. Refer and maintain its openapi spec as the //! truth. use std::path::Path; @@ -45,9 +45,10 @@ pub(crate) fn regenerate( let (disk_wanted_bytes, shard_count) = tenant_manager.calculate_utilization()?; // Fetch the fraction of disk space which may be used - let disk_usable_pct = match conf.disk_usage_based_eviction.clone() { - Some(e) => e.max_usage_pct, - None => Percent::new(100).unwrap(), + let disk_usable_pct = if conf.disk_usage_based_eviction.enabled { + conf.disk_usage_based_eviction.max_usage_pct + } else { + Percent::new(100).unwrap() }; // Express a static value for how many shards we may schedule on one node diff --git a/pageserver/src/walingest.rs b/pageserver/src/walingest.rs index f852051178..3acf98b020 100644 --- a/pageserver/src/walingest.rs +++ b/pageserver/src/walingest.rs @@ -32,9 +32,10 @@ use pageserver_api::reltag::{BlockNumber, RelTag, SlruKind}; use pageserver_api::shard::ShardIdentity; use postgres_ffi::walrecord::*; use postgres_ffi::{ - PgMajorVersion, TimestampTz, TransactionId, dispatch_pgversion, enum_pgversion, - enum_pgversion_dispatch, fsm_logical_to_physical, pg_constants, + PgMajorVersion, TransactionId, dispatch_pgversion, enum_pgversion, enum_pgversion_dispatch, + fsm_logical_to_physical, pg_constants, }; +use postgres_ffi_types::TimestampTz; use postgres_ffi_types::forknum::{FSM_FORKNUM, INIT_FORKNUM, MAIN_FORKNUM, VISIBILITYMAP_FORKNUM}; use tracing::*; use utils::bin_ser::{DeserializeError, SerializeError}; @@ -1069,7 +1070,7 @@ impl WalIngest { // NB: In PostgreSQL, the next-multi-xid stored in the control file is allowed to // go to 0, and it's fixed up by skipping to FirstMultiXactId in functions that // read it, like GetNewMultiXactId(). This is different from how nextXid is - // incremented! nextXid skips over < FirstNormalTransactionId when the the value + // incremented! nextXid skips over < FirstNormalTransactionId when the value // is stored, so it's never 0 in a checkpoint. // // I don't know why it's done that way, it seems less error-prone to skip over 0 diff --git a/pageserver/src/walredo.rs b/pageserver/src/walredo.rs index b17b5a15f9..f053c9ed37 100644 --- a/pageserver/src/walredo.rs +++ b/pageserver/src/walredo.rs @@ -147,6 +147,16 @@ pub enum RedoAttemptType { GcCompaction, } +impl std::fmt::Display for RedoAttemptType { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + RedoAttemptType::ReadPage => write!(f, "read page"), + RedoAttemptType::LegacyCompaction => write!(f, "legacy compaction"), + RedoAttemptType::GcCompaction => write!(f, "gc compaction"), + } + } +} + /// /// Public interface of WAL redo manager /// @@ -199,6 +209,7 @@ impl PostgresRedoManager { self.conf.wal_redo_timeout, pg_version, max_retry_attempts, + redo_attempt_type, ) .await }; @@ -221,6 +232,7 @@ impl PostgresRedoManager { self.conf.wal_redo_timeout, pg_version, max_retry_attempts, + redo_attempt_type, ) .await } @@ -445,6 +457,7 @@ impl PostgresRedoManager { wal_redo_timeout: Duration, pg_version: PgMajorVersion, max_retry_attempts: u32, + redo_attempt_type: RedoAttemptType, ) -> Result { *(self.last_redo_at.lock().unwrap()) = Some(Instant::now()); @@ -485,17 +498,28 @@ impl PostgresRedoManager { ); if let Err(e) = result.as_ref() { - error!( - "error applying {} WAL records {}..{} ({} bytes) to key {key}, from base image with LSN {} to reconstruct page image at LSN {} n_attempts={}: {:?}", - records.len(), - records.first().map(|p| p.0).unwrap_or(Lsn(0)), - records.last().map(|p| p.0).unwrap_or(Lsn(0)), - nbytes, - base_img_lsn, - lsn, - n_attempts, - e, - ); + macro_rules! message { + ($level:tt) => { + $level!( + "error applying {} WAL records {}..{} ({} bytes) to key {} during {}, from base image with LSN {} to reconstruct page image at LSN {} n_attempts={}: {:?}", + records.len(), + records.first().map(|p| p.0).unwrap_or(Lsn(0)), + records.last().map(|p| p.0).unwrap_or(Lsn(0)), + nbytes, + key, + redo_attempt_type, + base_img_lsn, + lsn, + n_attempts, + e, + ) + } + } + match redo_attempt_type { + RedoAttemptType::ReadPage => message!(error), + RedoAttemptType::LegacyCompaction => message!(error), + RedoAttemptType::GcCompaction => message!(warn), + } } result.map_err(Error::Other) @@ -566,22 +590,55 @@ impl PostgresRedoManager { } } +#[cfg(test)] +pub(crate) mod harness { + use super::PostgresRedoManager; + use crate::config::PageServerConf; + use utils::{id::TenantId, shard::TenantShardId}; + + pub struct RedoHarness { + // underscored because unused, except for removal at drop + _repo_dir: camino_tempfile::Utf8TempDir, + pub manager: PostgresRedoManager, + tenant_shard_id: TenantShardId, + } + + impl RedoHarness { + pub fn new() -> anyhow::Result { + crate::tenant::harness::setup_logging(); + + let repo_dir = camino_tempfile::tempdir()?; + let conf = PageServerConf::dummy_conf(repo_dir.path().to_path_buf()); + let conf = Box::leak(Box::new(conf)); + let tenant_shard_id = TenantShardId::unsharded(TenantId::generate()); + + let manager = PostgresRedoManager::new(conf, tenant_shard_id); + + Ok(RedoHarness { + _repo_dir: repo_dir, + manager, + tenant_shard_id, + }) + } + pub fn span(&self) -> tracing::Span { + tracing::info_span!("RedoHarness", tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug()) + } + } +} + #[cfg(test)] mod tests { use std::str::FromStr; use bytes::Bytes; use pageserver_api::key::Key; - use pageserver_api::shard::TenantShardId; use postgres_ffi::PgMajorVersion; use tracing::Instrument; - use utils::id::TenantId; use utils::lsn::Lsn; use wal_decoder::models::record::NeonWalRecord; - use super::PostgresRedoManager; - use crate::config::PageServerConf; use crate::walredo::RedoAttemptType; + use crate::walredo::harness::RedoHarness; #[tokio::test] async fn test_ping() { @@ -692,33 +749,4 @@ mod tests { ) ] } - - struct RedoHarness { - // underscored because unused, except for removal at drop - _repo_dir: camino_tempfile::Utf8TempDir, - manager: PostgresRedoManager, - tenant_shard_id: TenantShardId, - } - - impl RedoHarness { - fn new() -> anyhow::Result { - crate::tenant::harness::setup_logging(); - - let repo_dir = camino_tempfile::tempdir()?; - let conf = PageServerConf::dummy_conf(repo_dir.path().to_path_buf()); - let conf = Box::leak(Box::new(conf)); - let tenant_shard_id = TenantShardId::unsharded(TenantId::generate()); - - let manager = PostgresRedoManager::new(conf, tenant_shard_id); - - Ok(RedoHarness { - _repo_dir: repo_dir, - manager, - tenant_shard_id, - }) - } - fn span(&self) -> tracing::Span { - tracing::info_span!("RedoHarness", tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug()) - } - } } diff --git a/pgxn/neon/Makefile b/pgxn/neon/Makefile index 322ab039f5..3ea7a946cf 100644 --- a/pgxn/neon/Makefile +++ b/pgxn/neon/Makefile @@ -9,6 +9,7 @@ OBJS = \ extension_server.o \ file_cache.o \ hll.o \ + lfc_prewarm.o \ libpagestore.o \ logical_replication_monitor.o \ neon.o \ diff --git a/pgxn/neon/communicator.c b/pgxn/neon/communicator.c index 7c84be7d15..158b8940a3 100644 --- a/pgxn/neon/communicator.c +++ b/pgxn/neon/communicator.c @@ -65,6 +65,7 @@ #include "port/pg_iovec.h" #include "postmaster/interrupt.h" #include "replication/walsender.h" +#include "storage/ipc.h" #include "utils/timeout.h" #include "bitmap.h" @@ -412,6 +413,47 @@ compact_prefetch_buffers(void) return false; } +/* + * Check that prefetch response matches the slot + */ +static void +check_getpage_response(PrefetchRequest* slot, NeonResponse* resp) +{ + if (resp->tag != T_NeonGetPageResponse && resp->tag != T_NeonErrorResponse) + { + neon_shard_log(slot->shard_no, PANIC, "Unexpected prefetch response %d, ring_receive=" UINT64_FORMAT ", ring_flush=" UINT64_FORMAT ", ring_unused=" UINT64_FORMAT "", + resp->tag, MyPState->ring_receive, MyPState->ring_flush, MyPState->ring_unused); + } + if (neon_protocol_version >= 3) + { + NRelFileInfo rinfo = BufTagGetNRelFileInfo(slot->buftag); + if (resp->tag == T_NeonGetPageResponse) + { + NeonGetPageResponse * getpage_resp = (NeonGetPageResponse *)resp; + if (resp->reqid != slot->reqid || + resp->lsn != slot->request_lsns.request_lsn || + resp->not_modified_since != slot->request_lsns.not_modified_since || + !RelFileInfoEquals(getpage_resp->req.rinfo, rinfo) || + getpage_resp->req.forknum != slot->buftag.forkNum || + getpage_resp->req.blkno != slot->buftag.blockNum) + { + NEON_PANIC_CONNECTION_STATE(slot->shard_no, PANIC, + "Receive unexpected getpage response {reqid=" UINT64_HEX_FORMAT ",lsn=%X/%08X, since=%X/%08X, rel=%u/%u/%u.%u, block=%u} to get page request {reqid=" UINT64_HEX_FORMAT ",lsn=%X/%08X, since=%X/%08X, rel=%u/%u/%u.%u, block=%u}", + resp->reqid, LSN_FORMAT_ARGS(resp->lsn), LSN_FORMAT_ARGS(resp->not_modified_since), RelFileInfoFmt(getpage_resp->req.rinfo), getpage_resp->req.forknum, getpage_resp->req.blkno, + slot->reqid, LSN_FORMAT_ARGS(slot->request_lsns.request_lsn), LSN_FORMAT_ARGS(slot->request_lsns.not_modified_since), RelFileInfoFmt(rinfo), slot->buftag.forkNum, slot->buftag.blockNum); + } + } + else if (resp->reqid != slot->reqid || + resp->lsn != slot->request_lsns.request_lsn || + resp->not_modified_since != slot->request_lsns.not_modified_since) + { + elog(WARNING, NEON_TAG "Error message {reqid=" UINT64_HEX_FORMAT ",lsn=%X/%08X, since=%X/%08X} doesn't match exists request {reqid=" UINT64_HEX_FORMAT ",lsn=%X/%08X, since=%X/%08X}", + resp->reqid, LSN_FORMAT_ARGS(resp->lsn), LSN_FORMAT_ARGS(resp->not_modified_since), + slot->reqid, LSN_FORMAT_ARGS(slot->request_lsns.request_lsn), LSN_FORMAT_ARGS(slot->request_lsns.not_modified_since)); + } + } +} + /* * If there might be responses still in the TCP buffer, then we should try to * use those, to reduce any TCP backpressure on the OS/PS side. @@ -446,15 +488,18 @@ communicator_prefetch_pump_state(void) if (response == NULL) break; + check_getpage_response(slot, response); + /* The slot should still be valid */ if (slot->status != PRFS_REQUESTED || slot->response != NULL || slot->my_ring_index != MyPState->ring_receive) - neon_shard_log(slot->shard_no, ERROR, - "Incorrect prefetch slot state after receive: status=%d response=%p my=%lu receive=%lu", + { + neon_shard_log(slot->shard_no, PANIC, + "Incorrect prefetch slot state after receive: status=%d response=%p my=" UINT64_FORMAT " receive=" UINT64_FORMAT "", slot->status, slot->response, - (long) slot->my_ring_index, (long) MyPState->ring_receive); - + slot->my_ring_index, MyPState->ring_receive); + } /* update prefetch state */ MyPState->n_responses_buffered += 1; MyPState->n_requests_inflight -= 1; @@ -593,6 +638,21 @@ readahead_buffer_resize(int newsize, void *extra) } +/* + * Callback to be called on backend exit to ensure correct state of compute-PS communication + * in case of backend cancel + */ +static void +prefetch_on_exit(int code, Datum arg) +{ + if (code != 0) /* do disconnect only on abnormal backend termination */ + { + shardno_t shard_no = DatumGetInt32(arg); + prefetch_on_ps_disconnect(); + page_server->disconnect(shard_no); + } +} + /* * Make sure that there are no responses still in the buffer. @@ -605,6 +665,11 @@ consume_prefetch_responses(void) { if (MyPState->ring_receive < MyPState->ring_unused) prefetch_wait_for(MyPState->ring_unused - 1); + /* + * We know for sure we're not working on any prefetch pages after + * this. + */ + END_PREFETCH_RECEIVE_WORK(); } static void @@ -722,10 +787,12 @@ prefetch_read(PrefetchRequest *slot) if (slot->status != PRFS_REQUESTED || slot->response != NULL || slot->my_ring_index != MyPState->ring_receive) - neon_shard_log(slot->shard_no, ERROR, - "Incorrect prefetch read: status=%d response=%p my=%lu receive=%lu", + { + neon_shard_log(slot->shard_no, PANIC, + "Incorrect prefetch read: status=%d response=%p my=" UINT64_FORMAT " receive=" UINT64_FORMAT "", slot->status, slot->response, - (long)slot->my_ring_index, (long)MyPState->ring_receive); + slot->my_ring_index, MyPState->ring_receive); + } /* * Copy the request info so that if an error happens and the prefetch @@ -741,14 +808,18 @@ prefetch_read(PrefetchRequest *slot) MemoryContextSwitchTo(old); if (response) { + check_getpage_response(slot, response); + /* The slot should still be valid */ if (slot->status != PRFS_REQUESTED || slot->response != NULL || slot->my_ring_index != MyPState->ring_receive) - neon_shard_log(shard_no, ERROR, - "Incorrect prefetch slot state after receive: status=%d response=%p my=%lu receive=%lu", + { + neon_shard_log(shard_no, PANIC, + "Incorrect prefetch slot state after receive: status=%d response=%p my=" UINT64_FORMAT " receive=" UINT64_FORMAT "", slot->status, slot->response, - (long) slot->my_ring_index, (long) MyPState->ring_receive); + slot->my_ring_index, MyPState->ring_receive); + } /* update prefetch state */ MyPState->n_responses_buffered += 1; @@ -781,8 +852,8 @@ prefetch_read(PrefetchRequest *slot) * and the prefetch queue was flushed during the receive call */ neon_shard_log(shard_no, LOG, - "No response from reading prefetch entry %lu: %u/%u/%u.%u block %u. This can be caused by a concurrent disconnect", - (long) my_ring_index, + "No response from reading prefetch entry " UINT64_FORMAT ": %u/%u/%u.%u block %u. This can be caused by a concurrent disconnect", + my_ring_index, RelFileInfoFmt(BufTagGetNRelFileInfo(buftag)), buftag.forkNum, buftag.blockNum); return false; @@ -820,11 +891,10 @@ communicator_prefetch_receive(BufferTag tag) void prefetch_on_ps_disconnect(void) { - bool save_readpage_reentrant_guard = readpage_reentrant_guard; MyPState->ring_flush = MyPState->ring_unused; - /* Prohibit callig of prefetch_pump_state */ - START_PREFETCH_RECEIVE_WORK(); + /* Nothing should cancel disconnect: we should not leave connection in opaque state */ + HOLD_INTERRUPTS(); while (MyPState->ring_receive < MyPState->ring_unused) { @@ -854,9 +924,6 @@ prefetch_on_ps_disconnect(void) MyNeonCounters->getpage_prefetch_discards_total += 1; } - /* Restore guard */ - readpage_reentrant_guard = save_readpage_reentrant_guard; - /* * We can have gone into retry due to network error, so update stats with * the latest available @@ -865,6 +932,8 @@ prefetch_on_ps_disconnect(void) MyPState->n_requests_inflight; MyNeonCounters->getpage_prefetches_buffered = MyPState->n_responses_buffered; + + RESUME_INTERRUPTS(); } /* @@ -1027,16 +1096,11 @@ communicator_prefetch_lookupv(NRelFileInfo rinfo, ForkNumber forknum, BlockNumbe /* * Ignore errors */ - if (slot->response->tag != T_NeonGetPageResponse) + if (slot->response->tag == T_NeonErrorResponse) { - if (slot->response->tag != T_NeonErrorResponse) - { - NEON_PANIC_CONNECTION_STATE(slot->shard_no, PANIC, - "Expected GetPage (0x%02x) or Error (0x%02x) response to GetPageRequest, but got 0x%02x", - T_NeonGetPageResponse, T_NeonErrorResponse, slot->response->tag); - } continue; } + Assert(slot->response->tag == T_NeonGetPageResponse); /* checked by check_getpage_response when response was assigned to the slot */ memcpy(buffers[i], ((NeonGetPageResponse*)slot->response)->page, BLCKSZ); @@ -1351,7 +1415,7 @@ equal_requests(NeonRequest* a, NeonRequest* b) static NeonResponse * page_server_request(void const *req) { - NeonResponse *resp; + NeonResponse *resp = NULL; BufferTag tag = {0}; shardno_t shard_no; @@ -1371,7 +1435,7 @@ page_server_request(void const *req) tag.blockNum = ((NeonGetPageRequest *) req)->blkno; break; default: - neon_log(ERROR, "Unexpected request tag: %d", messageTag(req)); + neon_log(PANIC, "Unexpected request tag: %d", messageTag(req)); } shard_no = get_shard_number(&tag); @@ -1384,9 +1448,12 @@ page_server_request(void const *req) shard_no = 0; } - do + consume_prefetch_responses(); + + PG_TRY(); { - PG_TRY(); + before_shmem_exit(prefetch_on_exit, Int32GetDatum(shard_no)); + do { while (!page_server->send(shard_no, (NeonRequest *) req) || !page_server->flush(shard_no)) @@ -1394,30 +1461,24 @@ page_server_request(void const *req) /* do nothing */ } MyNeonCounters->pageserver_open_requests++; - consume_prefetch_responses(); resp = page_server->receive(shard_no); MyNeonCounters->pageserver_open_requests--; - } - PG_CATCH(); - { - /* - * Cancellation in this code needs to be handled better at some - * point, but this currently seems fine for now. - */ - page_server->disconnect(shard_no); - MyNeonCounters->pageserver_open_requests = 0; + } while (resp == NULL); + cancel_before_shmem_exit(prefetch_on_exit, Int32GetDatum(shard_no)); + } + PG_CATCH(); + { + cancel_before_shmem_exit(prefetch_on_exit, Int32GetDatum(shard_no)); + /* Nothing should cancel disconnect: we should not leave connection in opaque state */ + HOLD_INTERRUPTS(); + page_server->disconnect(shard_no); + MyNeonCounters->pageserver_open_requests = 0; + RESUME_INTERRUPTS(); - /* - * We know for sure we're not working on any prefetch pages after - * this. - */ - END_PREFETCH_RECEIVE_WORK(); + PG_RE_THROW(); + } + PG_END_TRY(); - PG_RE_THROW(); - } - PG_END_TRY(); - - } while (resp == NULL); return resp; } @@ -1502,7 +1563,7 @@ nm_pack_request(NeonRequest *msg) case T_NeonDbSizeResponse: case T_NeonGetSlruSegmentResponse: default: - neon_log(ERROR, "unexpected neon message tag 0x%02x", msg->tag); + neon_log(PANIC, "unexpected neon message tag 0x%02x", msg->tag); break; } return s; @@ -1654,7 +1715,7 @@ nm_unpack_response(StringInfo s) case T_NeonDbSizeRequest: case T_NeonGetSlruSegmentRequest: default: - neon_log(ERROR, "unexpected neon message tag 0x%02x", tag); + neon_log(PANIC, "unexpected neon message tag 0x%02x", tag); break; } @@ -1783,7 +1844,7 @@ nm_to_string(NeonMessage *msg) NeonDbSizeResponse *msg_resp = (NeonDbSizeResponse *) msg; appendStringInfoString(&s, "{\"type\": \"NeonDbSizeResponse\""); - appendStringInfo(&s, ", \"db_size\": %ld}", + appendStringInfo(&s, ", \"db_size\": " INT64_FORMAT "}", msg_resp->db_size); appendStringInfoChar(&s, '}'); @@ -1983,8 +2044,8 @@ communicator_exists(NRelFileInfo rinfo, ForkNumber forkNum, neon_request_lsns *r !RelFileInfoEquals(exists_resp->req.rinfo, request.rinfo) || exists_resp->req.forknum != request.forknum) { - NEON_PANIC_CONNECTION_STATE(-1, PANIC, - "Unexpect response {reqid=%lx,lsn=%X/%08X, since=%X/%08X, rel=%u/%u/%u.%u} to exits request {reqid=%lx,lsn=%X/%08X, since=%X/%08X, rel=%u/%u/%u.%u}", + NEON_PANIC_CONNECTION_STATE(0, PANIC, + "Unexpect response {reqid=" UINT64_HEX_FORMAT ",lsn=%X/%08X, since=%X/%08X, rel=%u/%u/%u.%u} to exits request {reqid=" UINT64_HEX_FORMAT ",lsn=%X/%08X, since=%X/%08X, rel=%u/%u/%u.%u}", resp->reqid, LSN_FORMAT_ARGS(resp->lsn), LSN_FORMAT_ARGS(resp->not_modified_since), RelFileInfoFmt(exists_resp->req.rinfo), exists_resp->req.forknum, request.hdr.reqid, LSN_FORMAT_ARGS(request.hdr.lsn), LSN_FORMAT_ARGS(request.hdr.not_modified_since), RelFileInfoFmt(request.rinfo), request.forknum); } @@ -1997,14 +2058,14 @@ communicator_exists(NRelFileInfo rinfo, ForkNumber forkNum, neon_request_lsns *r { if (!equal_requests(resp, &request.hdr)) { - elog(WARNING, NEON_TAG "Error message {reqid=%lx,lsn=%X/%08X, since=%X/%08X} doesn't match exists request {reqid=%lx,lsn=%X/%08X, since=%X/%08X}", + elog(WARNING, NEON_TAG "Error message {reqid=" UINT64_HEX_FORMAT ",lsn=%X/%08X, since=%X/%08X} doesn't match exists request {reqid=" UINT64_HEX_FORMAT ",lsn=%X/%08X, since=%X/%08X}", resp->reqid, LSN_FORMAT_ARGS(resp->lsn), LSN_FORMAT_ARGS(resp->not_modified_since), request.hdr.reqid, LSN_FORMAT_ARGS(request.hdr.lsn), LSN_FORMAT_ARGS(request.hdr.not_modified_since)); } } ereport(ERROR, (errcode(ERRCODE_IO_ERROR), - errmsg(NEON_TAG "[reqid %lx] could not read relation existence of rel %u/%u/%u.%u from page server at lsn %X/%08X", + errmsg(NEON_TAG "[reqid " UINT64_HEX_FORMAT "] could not read relation existence of rel %u/%u/%u.%u from page server at lsn %X/%08X", resp->reqid, RelFileInfoFmt(rinfo), forkNum, @@ -2014,7 +2075,7 @@ communicator_exists(NRelFileInfo rinfo, ForkNumber forkNum, neon_request_lsns *r break; default: - NEON_PANIC_CONNECTION_STATE(-1, PANIC, + NEON_PANIC_CONNECTION_STATE(0, PANIC, "Expected Exists (0x%02x) or Error (0x%02x) response to ExistsRequest, but got 0x%02x", T_NeonExistsResponse, T_NeonErrorResponse, resp->tag); } @@ -2158,6 +2219,7 @@ Retry: Assert(memcmp(&hashkey.buftag, &slot->buftag, sizeof(BufferTag)) == 0); Assert(hashkey.buftag.blockNum == base_blockno + i); + /* We already checked that response match request when storing it in slot */ resp = slot->response; switch (resp->tag) @@ -2165,21 +2227,6 @@ Retry: case T_NeonGetPageResponse: { NeonGetPageResponse* getpage_resp = (NeonGetPageResponse *) resp; - if (neon_protocol_version >= 3) - { - if (resp->reqid != slot->reqid || - resp->lsn != slot->request_lsns.request_lsn || - resp->not_modified_since != slot->request_lsns.not_modified_since || - !RelFileInfoEquals(getpage_resp->req.rinfo, rinfo) || - getpage_resp->req.forknum != forkNum || - getpage_resp->req.blkno != base_blockno + i) - { - NEON_PANIC_CONNECTION_STATE(-1, PANIC, - "Unexpect response {reqid=%lx,lsn=%X/%08X, since=%X/%08X, rel=%u/%u/%u.%u, block=%u} to get page request {reqid=%lx,lsn=%X/%08X, since=%X/%08X, rel=%u/%u/%u.%u, block=%u}", - resp->reqid, LSN_FORMAT_ARGS(resp->lsn), LSN_FORMAT_ARGS(resp->not_modified_since), RelFileInfoFmt(getpage_resp->req.rinfo), getpage_resp->req.forknum, getpage_resp->req.blkno, - slot->reqid, LSN_FORMAT_ARGS(slot->request_lsns.request_lsn), LSN_FORMAT_ARGS(slot->request_lsns.not_modified_since), RelFileInfoFmt(rinfo), forkNum, base_blockno + i); - } - } memcpy(buffer, getpage_resp->page, BLCKSZ); /* @@ -2192,20 +2239,9 @@ Retry: break; } case T_NeonErrorResponse: - if (neon_protocol_version >= 3) - { - if (resp->reqid != slot->reqid || - resp->lsn != slot->request_lsns.request_lsn || - resp->not_modified_since != slot->request_lsns.not_modified_since) - { - elog(WARNING, NEON_TAG "Error message {reqid=%lx,lsn=%X/%08X, since=%X/%08X} doesn't match get relsize request {reqid=%lx,lsn=%X/%08X, since=%X/%08X}", - resp->reqid, LSN_FORMAT_ARGS(resp->lsn), LSN_FORMAT_ARGS(resp->not_modified_since), - slot->reqid, LSN_FORMAT_ARGS(slot->request_lsns.request_lsn), LSN_FORMAT_ARGS(slot->request_lsns.not_modified_since)); - } - } ereport(ERROR, (errcode(ERRCODE_IO_ERROR), - errmsg(NEON_TAG "[shard %d, reqid %lx] could not read block %u in rel %u/%u/%u.%u from page server at lsn %X/%08X", + errmsg(NEON_TAG "[shard %d, reqid " UINT64_HEX_FORMAT "] could not read block %u in rel %u/%u/%u.%u from page server at lsn %X/%08X", slot->shard_no, resp->reqid, blockno, RelFileInfoFmt(rinfo), forkNum, LSN_FORMAT_ARGS(reqlsns->effective_request_lsn)), errdetail("page server returned error: %s", @@ -2257,8 +2293,8 @@ communicator_nblocks(NRelFileInfo rinfo, ForkNumber forknum, neon_request_lsns * !RelFileInfoEquals(relsize_resp->req.rinfo, request.rinfo) || relsize_resp->req.forknum != forknum) { - NEON_PANIC_CONNECTION_STATE(-1, PANIC, - "Unexpect response {reqid=%lx,lsn=%X/%08X, since=%X/%08X, rel=%u/%u/%u.%u} to get relsize request {reqid=%lx,lsn=%X/%08X, since=%X/%08X, rel=%u/%u/%u.%u}", + NEON_PANIC_CONNECTION_STATE(0, PANIC, + "Unexpect response {reqid=" UINT64_HEX_FORMAT ",lsn=%X/%08X, since=%X/%08X, rel=%u/%u/%u.%u} to get relsize request {reqid=" UINT64_HEX_FORMAT ",lsn=%X/%08X, since=%X/%08X, rel=%u/%u/%u.%u}", resp->reqid, LSN_FORMAT_ARGS(resp->lsn), LSN_FORMAT_ARGS(resp->not_modified_since), RelFileInfoFmt(relsize_resp->req.rinfo), relsize_resp->req.forknum, request.hdr.reqid, LSN_FORMAT_ARGS(request.hdr.lsn), LSN_FORMAT_ARGS(request.hdr.not_modified_since), RelFileInfoFmt(request.rinfo), forknum); } @@ -2271,14 +2307,14 @@ communicator_nblocks(NRelFileInfo rinfo, ForkNumber forknum, neon_request_lsns * { if (!equal_requests(resp, &request.hdr)) { - elog(WARNING, NEON_TAG "Error message {reqid=%lx,lsn=%X/%08X, since=%X/%08X} doesn't match get relsize request {reqid=%lx,lsn=%X/%08X, since=%X/%08X}", + elog(WARNING, NEON_TAG "Error message {reqid=" UINT64_HEX_FORMAT ",lsn=%X/%08X, since=%X/%08X} doesn't match get relsize request {reqid=" UINT64_HEX_FORMAT ",lsn=%X/%08X, since=%X/%08X}", resp->reqid, LSN_FORMAT_ARGS(resp->lsn), LSN_FORMAT_ARGS(resp->not_modified_since), request.hdr.reqid, LSN_FORMAT_ARGS(request.hdr.lsn), LSN_FORMAT_ARGS(request.hdr.not_modified_since)); } } ereport(ERROR, (errcode(ERRCODE_IO_ERROR), - errmsg(NEON_TAG "[reqid %lx] could not read relation size of rel %u/%u/%u.%u from page server at lsn %X/%08X", + errmsg(NEON_TAG "[reqid " UINT64_HEX_FORMAT "] could not read relation size of rel %u/%u/%u.%u from page server at lsn %X/%08X", resp->reqid, RelFileInfoFmt(rinfo), forknum, @@ -2288,7 +2324,7 @@ communicator_nblocks(NRelFileInfo rinfo, ForkNumber forknum, neon_request_lsns * break; default: - NEON_PANIC_CONNECTION_STATE(-1, PANIC, + NEON_PANIC_CONNECTION_STATE(0, PANIC, "Expected Nblocks (0x%02x) or Error (0x%02x) response to NblocksRequest, but got 0x%02x", T_NeonNblocksResponse, T_NeonErrorResponse, resp->tag); } @@ -2327,8 +2363,8 @@ communicator_dbsize(Oid dbNode, neon_request_lsns *request_lsns) if (!equal_requests(resp, &request.hdr) || dbsize_resp->req.dbNode != dbNode) { - NEON_PANIC_CONNECTION_STATE(-1, PANIC, - "Unexpect response {reqid=%lx,lsn=%X/%08X, since=%X/%08X, dbNode=%u} to get DB size request {reqid=%lx,lsn=%X/%08X, since=%X/%08X, dbNode=%u}", + NEON_PANIC_CONNECTION_STATE(0, PANIC, + "Unexpect response {reqid=" UINT64_HEX_FORMAT ",lsn=%X/%08X, since=%X/%08X, dbNode=%u} to get DB size request {reqid=" UINT64_HEX_FORMAT ",lsn=%X/%08X, since=%X/%08X, dbNode=%u}", resp->reqid, LSN_FORMAT_ARGS(resp->lsn), LSN_FORMAT_ARGS(resp->not_modified_since), dbsize_resp->req.dbNode, request.hdr.reqid, LSN_FORMAT_ARGS(request.hdr.lsn), LSN_FORMAT_ARGS(request.hdr.not_modified_since), dbNode); } @@ -2341,14 +2377,14 @@ communicator_dbsize(Oid dbNode, neon_request_lsns *request_lsns) { if (!equal_requests(resp, &request.hdr)) { - elog(WARNING, NEON_TAG "Error message {reqid=%lx,lsn=%X/%08X, since=%X/%08X} doesn't match get DB size request {reqid=%lx,lsn=%X/%08X, since=%X/%08X}", + elog(WARNING, NEON_TAG "Error message {reqid=" UINT64_HEX_FORMAT ",lsn=%X/%08X, since=%X/%08X} doesn't match get DB size request {reqid=" UINT64_HEX_FORMAT ",lsn=%X/%08X, since=%X/%08X}", resp->reqid, LSN_FORMAT_ARGS(resp->lsn), LSN_FORMAT_ARGS(resp->not_modified_since), request.hdr.reqid, LSN_FORMAT_ARGS(request.hdr.lsn), LSN_FORMAT_ARGS(request.hdr.not_modified_since)); } } ereport(ERROR, (errcode(ERRCODE_IO_ERROR), - errmsg(NEON_TAG "[reqid %lx] could not read db size of db %u from page server at lsn %X/%08X", + errmsg(NEON_TAG "[reqid " UINT64_HEX_FORMAT "] could not read db size of db %u from page server at lsn %X/%08X", resp->reqid, dbNode, LSN_FORMAT_ARGS(request_lsns->effective_request_lsn)), errdetail("page server returned error: %s", @@ -2356,7 +2392,7 @@ communicator_dbsize(Oid dbNode, neon_request_lsns *request_lsns) break; default: - NEON_PANIC_CONNECTION_STATE(-1, PANIC, + NEON_PANIC_CONNECTION_STATE(0, PANIC, "Expected DbSize (0x%02x) or Error (0x%02x) response to DbSizeRequest, but got 0x%02x", T_NeonDbSizeResponse, T_NeonErrorResponse, resp->tag); } @@ -2372,7 +2408,7 @@ communicator_read_slru_segment(SlruKind kind, int64 segno, neon_request_lsns *re { int n_blocks; shardno_t shard_no = 0; /* All SLRUs are at shard 0 */ - NeonResponse *resp; + NeonResponse *resp = NULL; NeonGetSlruSegmentRequest request; request = (NeonGetSlruSegmentRequest) { @@ -2383,14 +2419,29 @@ communicator_read_slru_segment(SlruKind kind, int64 segno, neon_request_lsns *re .segno = segno }; - do + consume_prefetch_responses(); + + PG_TRY(); { - while (!page_server->send(shard_no, &request.hdr) || !page_server->flush(shard_no)); + before_shmem_exit(prefetch_on_exit, Int32GetDatum(shard_no)); + do + { + while (!page_server->send(shard_no, &request.hdr) || !page_server->flush(shard_no)); + resp = page_server->receive(shard_no); + } while (resp == NULL); + cancel_before_shmem_exit(prefetch_on_exit, Int32GetDatum(shard_no)); + } + PG_CATCH(); + { + cancel_before_shmem_exit(prefetch_on_exit, Int32GetDatum(shard_no)); + /* Nothing should cancel disconnect: we should not leave connection in opaque state */ + HOLD_INTERRUPTS(); + page_server->disconnect(shard_no); + RESUME_INTERRUPTS(); - consume_prefetch_responses(); - - resp = page_server->receive(shard_no); - } while (resp == NULL); + PG_RE_THROW(); + } + PG_END_TRY(); switch (resp->tag) { @@ -2403,8 +2454,8 @@ communicator_read_slru_segment(SlruKind kind, int64 segno, neon_request_lsns *re slru_resp->req.kind != kind || slru_resp->req.segno != segno) { - NEON_PANIC_CONNECTION_STATE(-1, PANIC, - "Unexpect response {reqid=%lx,lsn=%X/%08X, since=%X/%08X, kind=%u, segno=%u} to get SLRU segment request {reqid=%lx,lsn=%X/%08X, since=%X/%08X, kind=%u, segno=%lluu}", + NEON_PANIC_CONNECTION_STATE(0, PANIC, + "Unexpect response {reqid=" UINT64_HEX_FORMAT ",lsn=%X/%08X, since=%X/%08X, kind=%u, segno=%u} to get SLRU segment request {reqid=" UINT64_HEX_FORMAT ",lsn=%X/%08X, since=%X/%08X, kind=%u, segno=%lluu}", resp->reqid, LSN_FORMAT_ARGS(resp->lsn), LSN_FORMAT_ARGS(resp->not_modified_since), slru_resp->req.kind, slru_resp->req.segno, request.hdr.reqid, LSN_FORMAT_ARGS(request.hdr.lsn), LSN_FORMAT_ARGS(request.hdr.not_modified_since), kind, (unsigned long long) segno); } @@ -2418,14 +2469,14 @@ communicator_read_slru_segment(SlruKind kind, int64 segno, neon_request_lsns *re { if (!equal_requests(resp, &request.hdr)) { - elog(WARNING, NEON_TAG "Error message {reqid=%lx,lsn=%X/%08X, since=%X/%08X} doesn't match get SLRU segment request {reqid=%lx,lsn=%X/%08X, since=%X/%08X}", + elog(WARNING, NEON_TAG "Error message {reqid=" UINT64_HEX_FORMAT ",lsn=%X/%08X, since=%X/%08X} doesn't match get SLRU segment request {reqid=" UINT64_HEX_FORMAT ",lsn=%X/%08X, since=%X/%08X}", resp->reqid, LSN_FORMAT_ARGS(resp->lsn), LSN_FORMAT_ARGS(resp->not_modified_since), request.hdr.reqid, LSN_FORMAT_ARGS(request.hdr.lsn), LSN_FORMAT_ARGS(request.hdr.not_modified_since)); } } ereport(ERROR, (errcode(ERRCODE_IO_ERROR), - errmsg(NEON_TAG "[reqid %lx] could not read SLRU %d segment %llu at lsn %X/%08X", + errmsg(NEON_TAG "[reqid " UINT64_HEX_FORMAT "] could not read SLRU %d segment %llu at lsn %X/%08X", resp->reqid, kind, (unsigned long long) segno, @@ -2435,7 +2486,7 @@ communicator_read_slru_segment(SlruKind kind, int64 segno, neon_request_lsns *re break; default: - NEON_PANIC_CONNECTION_STATE(-1, PANIC, + NEON_PANIC_CONNECTION_STATE(0, PANIC, "Expected GetSlruSegment (0x%02x) or Error (0x%02x) response to GetSlruSegmentRequest, but got 0x%02x", T_NeonGetSlruSegmentResponse, T_NeonErrorResponse, resp->tag); } diff --git a/pgxn/neon/communicator/README.md b/pgxn/neon/communicator/README.md index 8887a01cbc..a18f64c9f6 100644 --- a/pgxn/neon/communicator/README.md +++ b/pgxn/neon/communicator/README.md @@ -49,21 +49,36 @@ slots are statically allocated for each backend, and must not be accessed by other backends. The worker process reads requests from the shared memory slots, and writes responses back to the slots. -To submit an IO request, first pick one of your backend's free slots, -and write the details of the IO request in the slot. Finally, update -the 'state' field of the slot to Submitted. That informs the worker -process that it can start processing the request. Once the state has -been set to Submitted, the backend *must not* access the slot anymore, -until the worker process sets its state to 'Completed'. In other -words, each slot is owned by either the backend or the worker process -at all times, and the 'state' field indicates who has ownership at the -moment. +Here's an example snapshot of the system, when two requests from two +different backends are in progress: + +``` +Backends Request slots Communicator process +--------- ------------- -------------------- + +Backend 1 1: Idle + 2: Idle + 3: Processing tokio task handling request 3 + +Backend 2 4: Completed + 5: Processing tokio task handling request 5 + 6: Idle + +... ... +``` + +To submit an IO request, the backend first picks one of its Idle +slots, writes the IO request in the slot, and updates it to +'Submitted' state. That transfers the ownership of the slot to the +worker process, until the worker process marks the request as +Completed. The worker process spawns a separate Tokio task for each +request. To inform the worker process that a request slot has a pending IO request, there's a pipe shared by the worker process and all backend -processes. After you have changed the slot's state to Submitted, write -the index of the request slot to the pipe. This wakes up the worker -process. +processes. The backend writes the index of the request slot to the +pipe after changing the slot's state to Submitted. This wakes up the +worker process. (Note that the pipe is just used for wakeups, but the worker process is free to pick up Submitted IO requests even without receiving the diff --git a/pgxn/neon/communicator/src/backend_comms.rs b/pgxn/neon/communicator/src/backend_comms.rs index 998e0daf71..704b7269b1 100644 --- a/pgxn/neon/communicator/src/backend_comms.rs +++ b/pgxn/neon/communicator/src/backend_comms.rs @@ -1,18 +1,19 @@ -//! This module implements a request/response "slot" for submitting requests from backends -//! to the communicator process. +//! This module implements a request/response "slot" for submitting +//! requests from backends to the communicator process. //! //! NB: The "backend" side of this code runs in Postgres backend processes, //! which means that it is not safe to use the 'tracing' crate for logging, nor -//! to launch threads or use tokio tasks. +//! to launch threads or use tokio tasks! + use std::cell::UnsafeCell; -use std::sync::atomic::fence; use std::sync::atomic::{AtomicI32, Ordering}; use crate::neon_request::{NeonIORequest, NeonIOResult}; use atomic_enum::atomic_enum; -/// One request/response slot. Each backend has its own set of slots that it uses. +/// One request/response slot. Each backend has its own set of slots that it +/// uses. /// /// This is the moral equivalent of PgAioHandle for Postgres AIO requests /// Like PgAioHandle, try to keep this small. @@ -21,7 +22,7 @@ use atomic_enum::atomic_enum; /// /// ## Lifecycle of a request /// -/// The slot is always owned by either the backend process or the communicator +/// A slot is always owned by either the backend process or the communicator /// process, depending on the 'state'. Only the owning process is allowed to /// read or modify the slot, except for reading the 'state' itself to check who /// owns it. @@ -39,64 +40,66 @@ use atomic_enum::atomic_enum; /// slot for a new request. /// /// For correctness of the above protocol, we really only need two states: -/// "owned by backend" and "owned by communicator process. But to help with -/// debugging, there are a few more states. When the backend starts to fill in -/// the request details in the slot, it first sets the state from Idle to -/// Filling, and when it's done with that, from Filling to Submitted. In the -/// Filling state, the slot is still owned by the backend. Similarly, when the -/// communicator process starts to process a request, it sets it to Processing -/// state first, but the slot is still owned by the communicator process. +/// "owned by backend" and "owned by communicator process". But to help with +/// debugging and better assertions, there are a few more states. When the +/// backend starts to fill in the request details in the slot, it first sets the +/// state from Idle to Filling, and when it's done with that, from Filling to +/// Submitted. In the Filling state, the slot is still owned by the +/// backend. Similarly, when the communicator process starts to process a +/// request, it sets it to Processing state first, but the slot is still owned +/// by the communicator process. /// /// This struct doesn't handle waking up the communicator process when a request -/// has been submitted or when a response is ready. We only store the 'owner_procno' -/// which can be used for waking up the backend on completion, but the wakeups are -/// performed elsewhere. -pub struct NeonIOHandle { +/// has been submitted or when a response is ready. The 'owner_procno' is used +/// for waking up the backend on completion, but that happens elsewhere. +pub struct NeonIORequestSlot { /// similar to PgAioHandleState - state: AtomicNeonIOHandleState, + state: AtomicNeonIORequestSlotState, - /// The owning process's ProcNumber. The worker process uses this to set the process's - /// latch on completion. + /// The owning process's ProcNumber. The worker process uses this to set the + /// process's latch on completion. /// - /// (This could be calculated from num_neon_request_slots_per_backend and the index of - /// this slot in the overall 'neon_requst_slots array') + /// (This could be calculated from num_neon_request_slots_per_backend and + /// the index of this slot in the overall 'neon_requst_slots array'. But we + /// prefer the communicator process to not know how the request slots are + /// divided between the backends.) owner_procno: AtomicI32, - /// SAFETY: This is modified by fill_request(), after it has established ownership - /// of the slot by setting state from Idle to Filling + /// SAFETY: This is modified by submit_request(), after it has established + /// ownership of the slot by setting state from Idle to Filling request: UnsafeCell, - /// valid when state is Completed + /// Valid when state is Completed /// - /// SAFETY: This is modified by RequestProcessingGuard::complete(). There can be - /// only one RequestProcessingGuard outstanding for a slot at a time, because - /// it is returned by start_processing_request() which checks the state, so - /// RequestProcessingGuard has exclusive access to the slot. + /// SAFETY: This is modified by RequestProcessingGuard::complete(). There + /// can be only one RequestProcessingGuard outstanding for a slot at a time, + /// because it is returned by start_processing_request() which checks the + /// state, so RequestProcessingGuard has exclusive access to the slot. result: UnsafeCell, } // The protocol described in the "Lifecycle of a request" section above ensures // the safe access to the fields -unsafe impl Send for NeonIOHandle {} -unsafe impl Sync for NeonIOHandle {} +unsafe impl Send for NeonIORequestSlot {} +unsafe impl Sync for NeonIORequestSlot {} -impl Default for NeonIOHandle { - fn default() -> NeonIOHandle { - NeonIOHandle { +impl Default for NeonIORequestSlot { + fn default() -> NeonIORequestSlot { + NeonIORequestSlot { owner_procno: AtomicI32::new(-1), request: UnsafeCell::new(NeonIORequest::Empty), result: UnsafeCell::new(NeonIOResult::Empty), - state: AtomicNeonIOHandleState::new(NeonIOHandleState::Idle), + state: AtomicNeonIORequestSlotState::new(NeonIORequestSlotState::Idle), } } } #[atomic_enum] #[derive(Eq, PartialEq)] -pub enum NeonIOHandleState { +pub enum NeonIORequestSlotState { Idle, - /// backend is filling in the request + /// Backend is filling in the request Filling, /// Backend has submitted the request to the communicator, but the @@ -111,7 +114,86 @@ pub enum NeonIOHandleState { Completed, } -pub struct RequestProcessingGuard<'a>(&'a NeonIOHandle); +impl NeonIORequestSlot { + /// Write a request to the slot, and mark it as Submitted. + /// + /// Note: This does not wake up the worker process to actually process + /// the request. It's the caller's responsibility to do that. + pub fn submit_request(&self, request: &NeonIORequest, proc_number: i32) { + // Verify that the slot is in Idle state previously, and put it in + // Filling state. + // + // XXX: This step isn't strictly necessary. Assuming the caller didn't + // screw up and try to use a slot that's already in use, we could fill + // the slot and switch it directly from Idle to Submitted state. + if let Err(s) = self.state.compare_exchange( + NeonIORequestSlotState::Idle, + NeonIORequestSlotState::Filling, + Ordering::Relaxed, + Ordering::Relaxed, + ) { + panic!("unexpected state in request slot: {s:?}"); + } + + // Fill in the request details + self.owner_procno.store(proc_number, Ordering::Relaxed); + unsafe { *self.request.get() = *request } + + // This synchronizes-with store/swap in [`start_processing_request`]. + // Note that this ensures that the previous non-atomic writes visible + // to other threads too. + self.state + .store(NeonIORequestSlotState::Submitted, Ordering::Release); + } + + pub fn get_state(&self) -> NeonIORequestSlotState { + self.state.load(Ordering::Relaxed) + } + + pub fn try_get_result(&self) -> Option { + // This synchronizes-with the store/swap in [`RequestProcessingGuard::completed`] + let state = self.state.load(Ordering::Acquire); + if state == NeonIORequestSlotState::Completed { + let result = unsafe { *self.result.get() }; + self.state + .store(NeonIORequestSlotState::Idle, Ordering::Relaxed); + Some(result) + } else { + None + } + } + + /// Read the IO request from the slot indicated in the wakeup + pub fn start_processing_request<'a>(&'a self) -> Option> { + // XXX: using atomic load rather than compare_exchange would be + // sufficient here, as long as the communicator process has _some_ means + // of tracking which requests it's already processing. That could be a + // flag somewhere in communicator's private memory, for example. + // + // This synchronizes-with the store in [`submit_request`]. + if let Err(s) = self.state.compare_exchange( + NeonIORequestSlotState::Submitted, + NeonIORequestSlotState::Processing, + Ordering::Acquire, + Ordering::Relaxed, + ) { + // FIXME surprising state. This is unexpected at the moment, but if we + // started to process requests more aggressively, without waiting for the + // read from the pipe, then this could happen + panic!("unexpected state in request slot: {s:?}"); + } + + Some(RequestProcessingGuard(self)) + } +} + +/// [`NeonIORequestSlot::start_processing_request`] returns this guard object to +/// indicate that the the caller now "owns" the slot, until it calls +/// [`RequestProcessingGuard::completed`]. +/// +/// TODO: implement Drop on this, to mark the request as Aborted or Errored +/// if [`RequestProcessingGuard::completed`] is not called. +pub struct RequestProcessingGuard<'a>(&'a NeonIORequestSlot); unsafe impl<'a> Send for RequestProcessingGuard<'a> {} unsafe impl<'a> Sync for RequestProcessingGuard<'a> {} @@ -126,82 +208,17 @@ impl<'a> RequestProcessingGuard<'a> { } pub fn completed(self, result: NeonIOResult) { + // Store the result to the slot. unsafe { *self.0.result.get() = result; }; - // Ok, we have completed the IO. Mark the request as completed. After that, - // we no longer have ownership of the slot, and must not modify it. + // Mark the request as completed. After that, we no longer have + // ownership of the slot, and must not modify it. let old_state = self .0 .state - .swap(NeonIOHandleState::Completed, Ordering::Release); - assert!(old_state == NeonIOHandleState::Processing); - } -} - -impl NeonIOHandle { - pub fn fill_request(&self, request: &NeonIORequest, proc_number: i32) { - // Verify that the slot is in Idle state previously, and start filling it. - // - // XXX: This step isn't strictly necessary. Assuming the caller didn't screw up - // and try to use a slot that's already in use, we could fill the slot and - // switch it directly from Idle to Submitted state. - if let Err(s) = self.state.compare_exchange( - NeonIOHandleState::Idle, - NeonIOHandleState::Filling, - Ordering::Relaxed, - Ordering::Relaxed, - ) { - panic!("unexpected state in request slot: {s:?}"); - } - - // This fence synchronizes-with store/swap in `communicator_process_main_loop`. - fence(Ordering::Acquire); - - self.owner_procno.store(proc_number, Ordering::Relaxed); - unsafe { *self.request.get() = *request } - self.state - .store(NeonIOHandleState::Submitted, Ordering::Release); - } - - pub fn get_state(&self) -> NeonIOHandleState { - self.state.load(Ordering::Relaxed) - } - - pub fn try_get_result(&self) -> Option { - // FIXME: ordering? - let state = self.state.load(Ordering::Relaxed); - if state == NeonIOHandleState::Completed { - // This fence synchronizes-with store/swap in `communicator_process_main_loop`. - fence(Ordering::Acquire); - let result = unsafe { *self.result.get() }; - self.state.store(NeonIOHandleState::Idle, Ordering::Relaxed); - Some(result) - } else { - None - } - } - - /// Read the IO request from the slot indicated in the wakeup - pub fn start_processing_request<'a>(&'a self) -> Option> { - // XXX: using compare_exchange for this is not strictly necessary, as long as - // the communicator process has _some_ means of tracking which requests it's - // already processing. That could be a flag somewhere in communicator's private - // memory, for example. - if let Err(s) = self.state.compare_exchange( - NeonIOHandleState::Submitted, - NeonIOHandleState::Processing, - Ordering::Relaxed, - Ordering::Relaxed, - ) { - // FIXME surprising state. This is unexpected at the moment, but if we - // started to process requests more aggressively, without waiting for the - // read from the pipe, then this could happen - panic!("unexpected state in request slot: {s:?}"); - } - fence(Ordering::Acquire); - - Some(RequestProcessingGuard(self)) + .swap(NeonIORequestSlotState::Completed, Ordering::Release); + assert!(old_state == NeonIORequestSlotState::Processing); } } diff --git a/pgxn/neon/communicator/src/backend_interface.rs b/pgxn/neon/communicator/src/backend_interface.rs index 9ed9028b96..abc982193e 100644 --- a/pgxn/neon/communicator/src/backend_interface.rs +++ b/pgxn/neon/communicator/src/backend_interface.rs @@ -3,7 +3,7 @@ use std::os::fd::OwnedFd; -use crate::backend_comms::NeonIOHandle; +use crate::backend_comms::NeonIORequestSlot; use crate::init::CommunicatorInitStruct; use crate::integrated_cache::{BackendCacheReadOp, IntegratedCacheReadAccess}; use crate::neon_request::{CCachedGetPageVResult, COid}; @@ -12,7 +12,7 @@ use crate::neon_request::{NeonIORequest, NeonIOResult}; pub struct CommunicatorBackendStruct<'t> { my_proc_number: i32, - neon_request_slots: &'t [NeonIOHandle], + neon_request_slots: &'t [NeonIORequestSlot], submission_pipe_write_fd: OwnedFd, @@ -76,9 +76,6 @@ pub extern "C" fn bcomm_start_io_request( // Create neon request and submit it bs.start_neon_io_request(slot_idx, request); - // Tell the communicator about it - bs.submit_request(slot_idx); - slot_idx } @@ -118,9 +115,6 @@ pub extern "C" fn bcomm_start_get_page_v_request( // Create neon request and submit it bs.start_neon_io_request(slot_idx, request); - // Tell the communicator about it - bs.submit_request(slot_idx); - slot_idx } @@ -152,20 +146,23 @@ pub extern "C" fn bcomm_get_request_slot_status( bs: &mut CommunicatorBackendStruct, request_slot_idx: u32, ) -> bool { - use crate::backend_comms::NeonIOHandleState; + use crate::backend_comms::NeonIORequestSlotState; match bs.neon_request_slots[request_slot_idx as usize].get_state() { - NeonIOHandleState::Idle => false, - NeonIOHandleState::Filling => { + NeonIORequestSlotState::Idle => false, + NeonIORequestSlotState::Filling => { // 'false' would be the right result here. However, this // is a very transient state. The C code should never // leave a slot in this state, so if it sees that, // something's gone wrong and it's not clear what to do // with it. - panic!("unexpected Filling state in request slot {}", request_slot_idx); - }, - NeonIOHandleState::Submitted => true, - NeonIOHandleState::Processing => true, - NeonIOHandleState::Completed => true, + panic!( + "unexpected Filling state in request slot {}", + request_slot_idx + ); + } + NeonIORequestSlotState::Submitted => true, + NeonIORequestSlotState::Processing => true, + NeonIORequestSlotState::Completed => true, } } @@ -204,11 +201,70 @@ pub extern "C" fn bcomm_cache_contains( ) } +#[repr(C)] +#[derive(Clone, Debug)] +pub struct FileCacheIterator { + next_bucket: u64, + + pub spc_oid: COid, + pub db_oid: COid, + pub rel_number: u32, + pub fork_number: u8, + pub block_number: u32, +} + +/// Iterate over LFC contents +#[unsafe(no_mangle)] +pub extern "C" fn bcomm_cache_iterate_begin(_bs: &mut CommunicatorBackendStruct, iter: *mut FileCacheIterator) { + unsafe { (*iter).next_bucket = 0 }; +} +#[unsafe(no_mangle)] +pub extern "C" fn bcomm_cache_iterate_next(bs: &mut CommunicatorBackendStruct, iter: *mut FileCacheIterator) -> bool { + use crate::integrated_cache::GetBucketResult; + loop { + let next_bucket = unsafe { (*iter).next_bucket } as usize; + match bs.integrated_cache.get_bucket(next_bucket) { + GetBucketResult::Occupied(rel, blk) => { + unsafe { + (*iter).spc_oid = rel.spcnode; + (*iter).db_oid = rel.dbnode; + (*iter).rel_number = rel.relnode; + (*iter).fork_number = rel.forknum; + (*iter).block_number = blk; + + (*iter).next_bucket += 1; + } + break true; + }, + GetBucketResult::Vacant => { + unsafe { + (*iter).next_bucket += 1; + } + continue; + } + GetBucketResult::OutOfBounds => { + break false; + } + } + } +} + impl<'t> CommunicatorBackendStruct<'t> { + /// The slot must be free, or this panics. + pub(crate) fn start_neon_io_request(&mut self, request_slot_idx: i32, request: &NeonIORequest) { + let my_proc_number = self.my_proc_number; + + self.neon_request_slots[request_slot_idx as usize].submit_request(request, my_proc_number); + + // Tell the communicator about it + self.notify_about_request(request_slot_idx); + } + /// Send a wakeup to the communicator process - fn submit_request(self: &CommunicatorBackendStruct<'t>, request_slot_idx: i32) { + fn notify_about_request(self: &CommunicatorBackendStruct<'t>, request_slot_idx: i32) { // wake up communicator by writing the idx to the submission pipe // + // This can block, if the pipe is full. That should be very rare, // because the communicator tries hard to drain the pipe to prevent // that. Also, there's a natural upper bound on how many wakeups can be @@ -221,14 +277,4 @@ impl<'t> CommunicatorBackendStruct<'t> { let _res = nix::unistd::write(&self.submission_pipe_write_fd, &idxbuf); // FIXME: check result, return any errors } - - /// Note: there's no guarantee on when the communicator might pick it up. You should ring - /// the doorbell. But it might pick it up immediately. - /// - /// The slot must be free, or this panics. - pub(crate) fn start_neon_io_request(&mut self, request_slot_idx: i32, request: &NeonIORequest) { - let my_proc_number = self.my_proc_number; - - self.neon_request_slots[request_slot_idx as usize].fill_request(request, my_proc_number); - } } diff --git a/pgxn/neon/communicator/src/file_cache.rs b/pgxn/neon/communicator/src/file_cache.rs index 1f60c97f2c..f153174c6b 100644 --- a/pgxn/neon/communicator/src/file_cache.rs +++ b/pgxn/neon/communicator/src/file_cache.rs @@ -22,6 +22,7 @@ pub type CacheBlock = u64; pub const INVALID_CACHE_BLOCK: CacheBlock = u64::MAX; +#[derive(Debug)] pub struct FileCache { file: Arc, @@ -35,6 +36,7 @@ pub struct FileCache { // TODO: We keep track of all free blocks in this vec. That doesn't really scale. // Idea: when free_blocks fills up with more than 1024 entries, write them all to // one block on disk. +#[derive(Debug)] struct FreeList { next_free_block: CacheBlock, max_blocks: u64, diff --git a/pgxn/neon/communicator/src/init.rs b/pgxn/neon/communicator/src/init.rs index 5f7d593c35..f5af93cc97 100644 --- a/pgxn/neon/communicator/src/init.rs +++ b/pgxn/neon/communicator/src/init.rs @@ -23,7 +23,7 @@ use std::mem; use std::mem::MaybeUninit; use std::os::fd::OwnedFd; -use crate::backend_comms::NeonIOHandle; +use crate::backend_comms::NeonIORequestSlot; use crate::integrated_cache::IntegratedCacheInitStruct; /// This struct is created in the postmaster process, and inherited to @@ -36,7 +36,7 @@ pub struct CommunicatorInitStruct { // Shared memory data structures pub num_neon_request_slots: u32, - pub neon_request_slots: &'static [NeonIOHandle], + pub neon_request_slots: &'static [NeonIORequestSlot], pub integrated_cache_init_struct: IntegratedCacheInitStruct<'static>, } @@ -46,10 +46,7 @@ impl std::fmt::Debug for CommunicatorInitStruct { fmt.debug_struct("CommunicatorInitStruct") .field("submission_pipe_read_fd", &self.submission_pipe_read_fd) .field("submission_pipe_write_fd", &self.submission_pipe_write_fd) - .field( - "num_neon_request_slots", - &self.num_neon_request_slots, - ) + .field("num_neon_request_slots", &self.num_neon_request_slots) .field("neon_request_slots length", &self.neon_request_slots.len()) .finish() } @@ -59,7 +56,7 @@ impl std::fmt::Debug for CommunicatorInitStruct { pub extern "C" fn rcommunicator_shmem_size(num_neon_request_slots: u32) -> u64 { let mut size = 0; - size += mem::size_of::() * num_neon_request_slots as usize; + size += mem::size_of::() * num_neon_request_slots as usize; // For integrated_cache's Allocator. TODO: make this adjustable size += IntegratedCacheInitStruct::shmem_size(); @@ -83,16 +80,16 @@ pub extern "C" fn rcommunicator_shmem_init( unsafe { std::slice::from_raw_parts_mut(shmem_area_ptr, shmem_area_len as usize) }; let (neon_request_slots, remaining_area) = - alloc_array_from_slice::(shmem_area, num_neon_request_slots as usize); + alloc_array_from_slice::(shmem_area, num_neon_request_slots as usize); for slot in neon_request_slots.iter_mut() { - slot.write(NeonIOHandle::default()); + slot.write(NeonIORequestSlot::default()); } // 'neon_request_slots' is initialized now. (MaybeUninit::slice_assume_init_mut() is nightly-only // as of this writing.) let neon_request_slots = unsafe { - std::mem::transmute::<&mut [MaybeUninit], &mut [NeonIOHandle]>( + std::mem::transmute::<&mut [MaybeUninit], &mut [NeonIORequestSlot]>( neon_request_slots, ) }; diff --git a/pgxn/neon/communicator/src/integrated_cache.rs b/pgxn/neon/communicator/src/integrated_cache.rs index 63c771b94c..71b2ee1645 100644 --- a/pgxn/neon/communicator/src/integrated_cache.rs +++ b/pgxn/neon/communicator/src/integrated_cache.rs @@ -46,6 +46,7 @@ pub struct IntegratedCacheInitStruct<'t> { } /// Represents write-access to the integrated cache. This is used by the communicator process. +#[derive(Debug)] pub struct IntegratedCacheWriteAccess<'t> { relsize_cache: neon_shmem::hash::HashMapAccess<'t, RelKey, RelEntry>, block_map: neon_shmem::hash::HashMapAccess<'t, BlockKey, BlockEntry>, @@ -192,6 +193,10 @@ struct RelEntry { /// cached size of the relation /// u32::MAX means 'not known' (that's InvalidBlockNumber in Postgres) nblocks: AtomicU32, + + /// This is the last time the "metadata" of this relation changed, not + /// the contents of the blocks. That is, the size of the relation. + lw_lsn: AtomicLsn, } impl std::fmt::Debug for RelEntry { @@ -338,7 +343,7 @@ impl<'t> IntegratedCacheWriteAccess<'t> { CacheResult::NotFound(lsn) } - pub fn remember_rel_size(&'t self, rel: &RelTag, nblocks: u32) { + pub fn remember_rel_size(&'t self, rel: &RelTag, nblocks: u32, lsn: Lsn) { match self.relsize_cache.entry(RelKey::from(rel)) { Entry::Vacant(e) => { tracing::info!("inserting rel entry for {rel:?}, {nblocks} blocks"); @@ -346,12 +351,14 @@ impl<'t> IntegratedCacheWriteAccess<'t> { _ = e .insert(RelEntry { nblocks: AtomicU32::new(nblocks), + lw_lsn: AtomicLsn::new(lsn.0), }) .expect("out of memory"); } Entry::Occupied(e) => { tracing::info!("updating rel entry for {rel:?}, {nblocks} blocks"); e.get().nblocks.store(nblocks, Ordering::Relaxed); + e.get().lw_lsn.store(lsn); } }; } @@ -515,10 +522,13 @@ impl<'t> IntegratedCacheWriteAccess<'t> { } /// Forget information about given relation in the cache. (For DROP TABLE and such) - pub fn forget_rel(&'t self, rel: &RelTag) { + pub fn forget_rel(&'t self, rel: &RelTag, _nblocks: Option, flush_lsn: Lsn) { tracing::info!("forgetting rel entry for {rel:?}"); self.relsize_cache.remove(&RelKey::from(rel)); + // update with flush LSN + let _ = self.global_lw_lsn.fetch_max(flush_lsn.0, Ordering::Relaxed); + // also forget all cached blocks for the relation // FIXME /* @@ -749,6 +759,12 @@ fn get_rel_size( } } +pub enum GetBucketResult { + Occupied(RelTag, u32), + Vacant, + OutOfBounds, +} + /// Accessor for other backends /// /// This allows backends to read pages from the cache directly, on their own, without making a @@ -771,6 +787,21 @@ impl<'t> IntegratedCacheReadAccess<'t> { .get(&BlockKey::from((rel, block_number))) .is_some() } + + pub fn get_bucket(&self, bucket_no: usize) -> GetBucketResult { + match self.block_map.get_at_bucket(bucket_no).as_deref() { + None => { + // free bucket, or out of bounds + if bucket_no >= self.block_map.get_num_buckets() { + GetBucketResult::OutOfBounds + } else { + GetBucketResult::Vacant + } + } + Some((key, _)) => GetBucketResult::Occupied(key.rel, key.block_number), + } + } + } pub struct BackendCacheReadOp<'t> { diff --git a/pgxn/neon/communicator/src/lib.rs b/pgxn/neon/communicator/src/lib.rs index fbe582df78..d0c5b758da 100644 --- a/pgxn/neon/communicator/src/lib.rs +++ b/pgxn/neon/communicator/src/lib.rs @@ -1,9 +1,7 @@ -//! //! Three main parts: //! - async tokio communicator core, which receives requests and processes them. //! - Main loop and requests queues, which routes requests from backends to the core //! - the per-backend glue code, which submits requests -//! mod backend_comms; @@ -23,5 +21,5 @@ mod worker_process; mod global_allocator; -// FIXME get this from postgres headers somehow +// FIXME: get this from postgres headers somehow pub const BLCKSZ: usize = 8192; diff --git a/pgxn/neon/communicator/src/neon_request.rs b/pgxn/neon/communicator/src/neon_request.rs index f54dcd9222..f777256a5f 100644 --- a/pgxn/neon/communicator/src/neon_request.rs +++ b/pgxn/neon/communicator/src/neon_request.rs @@ -1,11 +1,24 @@ -pub type CLsn = u64; +// Definitions of some core PostgreSQL datatypes. + +/// XLogRecPtr is defined in "access/xlogdefs.h" as: +/// +/// ``` +/// typedef uint64 XLogRecPtr; +/// ``` +/// cbindgen:no-export +pub type XLogRecPtr = u64; + +pub type CLsn = XLogRecPtr; pub type COid = u32; // This conveniently matches PG_IOV_MAX pub const MAX_GETPAGEV_PAGES: usize = 32; -use pageserver_page_api as page_api; +use std::ffi::CStr; +use pageserver_page_api::{self as page_api, SlruKind}; + +/// Request from a Postgres backend to the communicator process #[allow(clippy::large_enum_variant)] #[repr(C)] #[derive(Copy, Clone, Debug)] @@ -17,6 +30,7 @@ pub enum NeonIORequest { RelExists(CRelExistsRequest), RelSize(CRelSizeRequest), GetPageV(CGetPageVRequest), + ReadSlruSegment(CReadSlruSegmentRequest), PrefetchV(CPrefetchVRequest), DbSize(CDbSizeRequest), @@ -28,6 +42,9 @@ pub enum NeonIORequest { RelCreate(CRelCreateRequest), RelTruncate(CRelTruncateRequest), RelUnlink(CRelUnlinkRequest), + + // Other requests + UpdateCachedRelSize(CUpdateCachedRelSizeRequest), } #[repr(C)] @@ -39,6 +56,9 @@ pub enum NeonIOResult { /// the result pages are written to the shared memory addresses given in the request GetPageV, + /// The result is written to the file, path to which is provided + /// in the request. The [`u64`] value here is the number of blocks. + ReadSlruSegment(u64), /// A prefetch request returns as soon as the request has been received by the communicator. /// It is processed in the background. @@ -57,6 +77,10 @@ pub enum NeonIOResult { } impl NeonIORequest { + /// All requests include a unique request ID, which can be used to trace the execution + /// of a request all the way to the pageservers. The request ID needs to be unique + /// within the lifetime of the Postgres instance (but not across servers or across + /// restarts of the same server). pub fn request_id(&self) -> u64 { use NeonIORequest::*; match self { @@ -64,6 +88,7 @@ impl NeonIORequest { RelExists(req) => req.request_id, RelSize(req) => req.request_id, GetPageV(req) => req.request_id, + ReadSlruSegment(req) => req.request_id, PrefetchV(req) => req.request_id, DbSize(req) => req.request_id, WritePage(req) => req.request_id, @@ -72,10 +97,14 @@ impl NeonIORequest { RelCreate(req) => req.request_id, RelTruncate(req) => req.request_id, RelUnlink(req) => req.request_id, + UpdateCachedRelSize(req) => req.request_id, } } } +/// Special quick result to a CGetPageVRequest request, indicating that the +/// the requested pages are present in the local file cache. The backend can +/// read the blocks directly from the given LFC blocks. #[repr(C)] #[derive(Copy, Clone, Debug)] pub struct CCachedGetPageVResult { @@ -92,7 +121,7 @@ pub struct CCachedGetPageVResult { #[repr(C)] #[derive(Copy, Clone, Debug)] pub struct ShmemBuf { - // These fields define where the result is written. Must point into a buffer in shared memory! + // Pointer to where the result is written or where to read from. Must point into a buffer in shared memory! pub ptr: *mut u8, } @@ -170,6 +199,28 @@ pub struct CGetPageVRequest { pub dest: [ShmemBuf; MAX_GETPAGEV_PAGES], } +#[repr(C)] +#[derive(Copy, Clone, Debug)] +pub struct CReadSlruSegmentRequest { + pub request_id: u64, + pub slru_kind: SlruKind, + pub segment_number: u32, + pub request_lsn: CLsn, + /// Must be a null-terminated C string containing the file path + /// where the communicator will write the SLRU segment. + pub destination_file_path: ShmemBuf, +} + +impl CReadSlruSegmentRequest { + /// Returns the file path where the communicator will write the + /// SLRU segment. + pub(crate) fn destination_file_path(&self) -> String { + unsafe { CStr::from_ptr(self.destination_file_path.as_mut_ptr() as *const _) } + .to_string_lossy() + .into_owned() + } +} + #[repr(C)] #[derive(Copy, Clone, Debug)] pub struct CPrefetchVRequest { @@ -187,7 +238,6 @@ pub struct CPrefetchVRequest { pub struct CDbSizeRequest { pub request_id: u64, pub db_oid: COid, - pub request_lsn: CLsn, } #[repr(C)] @@ -201,7 +251,7 @@ pub struct CWritePageRequest { pub block_number: u32, pub lsn: CLsn, - // These fields define where the result is written. Must point into a buffer in shared memory! + // `src` defines the new page contents. Must point into a buffer in shared memory! pub src: ShmemBuf, } @@ -216,7 +266,7 @@ pub struct CRelExtendRequest { pub block_number: u32, pub lsn: CLsn, - // These fields define page contents. Must point into a buffer in shared memory! + // `src` defines the new page contents. Must point into a buffer in shared memory! pub src: ShmemBuf, } @@ -241,6 +291,7 @@ pub struct CRelCreateRequest { pub db_oid: COid, pub rel_number: u32, pub fork_number: u8, + pub lsn: CLsn, } #[repr(C)] @@ -252,6 +303,7 @@ pub struct CRelTruncateRequest { pub rel_number: u32, pub fork_number: u8, pub nblocks: u32, + pub lsn: CLsn, } #[repr(C)] @@ -262,8 +314,7 @@ pub struct CRelUnlinkRequest { pub db_oid: COid, pub rel_number: u32, pub fork_number: u8, - pub block_number: u32, - pub nblocks: u32, + pub lsn: CLsn, } impl CRelExistsRequest { @@ -375,3 +426,26 @@ impl CRelUnlinkRequest { } } } + +#[repr(C)] +#[derive(Copy, Clone, Debug)] +pub struct CUpdateCachedRelSizeRequest { + pub request_id: u64, + pub spc_oid: COid, + pub db_oid: COid, + pub rel_number: u32, + pub fork_number: u8, + pub nblocks: u32, + pub lsn: CLsn, +} + +impl CUpdateCachedRelSizeRequest { + pub fn reltag(&self) -> page_api::RelTag { + page_api::RelTag { + spcnode: self.spc_oid, + dbnode: self.db_oid, + relnode: self.rel_number, + forknum: self.fork_number, + } + } +} diff --git a/pgxn/neon/communicator/src/worker_process/logging.rs b/pgxn/neon/communicator/src/worker_process/logging.rs index 3b652e203f..43f51cd332 100644 --- a/pgxn/neon/communicator/src/worker_process/logging.rs +++ b/pgxn/neon/communicator/src/worker_process/logging.rs @@ -1,10 +1,13 @@ //! Glue code to hook up Rust logging with the `tracing` crate to the PostgreSQL log //! //! In the Rust threads, the log messages are written to a mpsc Channel, and the Postgres -//! process latch is raised. That wakes up the loop in the main thread. It reads the -//! message from the channel and ereport()s it. This ensures that only one thread, the main -//! thread, calls the PostgreSQL logging routines at any time. +//! process latch is raised. That wakes up the loop in the main thread, see +//! `communicator_new_bgworker_main()`. It reads the message from the channel and +//! ereport()s it. This ensures that only one thread, the main thread, calls the +//! PostgreSQL logging routines at any time. +use std::ffi::c_char; +use std::sync::atomic::{AtomicU64, Ordering}; use std::sync::mpsc::sync_channel; use std::sync::mpsc::{Receiver, SyncSender}; use std::sync::mpsc::{TryRecvError, TrySendError}; @@ -12,27 +15,32 @@ use std::sync::mpsc::{TryRecvError, TrySendError}; use tracing::info; use tracing::{Event, Level, Metadata, Subscriber}; use tracing_subscriber::filter::LevelFilter; -use tracing_subscriber::fmt::FmtContext; -use tracing_subscriber::fmt::FormatEvent; -use tracing_subscriber::fmt::FormatFields; -use tracing_subscriber::fmt::FormattedFields; -use tracing_subscriber::fmt::MakeWriter; use tracing_subscriber::fmt::format::Writer; +use tracing_subscriber::fmt::{FmtContext, FormatEvent, FormatFields, FormattedFields, MakeWriter}; use tracing_subscriber::registry::LookupSpan; use crate::worker_process::callbacks::callback_set_my_latch; -pub struct LoggingState { +/// This handle is passed to the C code, and used by [`communicator_worker_poll_logging`] +pub struct LoggingReceiver { receiver: Receiver, } +/// This is passed to `tracing` +struct LoggingSender { + sender: SyncSender, +} + +static DROPPED_EVENT_COUNT: AtomicU64 = AtomicU64::new(0); + /// Called once, at worker process startup. The returned LoggingState is passed back /// in the subsequent calls to `pump_logging`. It is opaque to the C code. #[unsafe(no_mangle)] -pub extern "C" fn configure_logging() -> Box { +pub extern "C" fn communicator_worker_configure_logging() -> Box { let (sender, receiver) = sync_channel(1000); - let maker = Maker { channel: sender }; + let receiver = LoggingReceiver { receiver }; + let sender = LoggingSender { sender }; use tracing_subscriber::prelude::*; let r = tracing_subscriber::registry(); @@ -41,32 +49,45 @@ pub extern "C" fn configure_logging() -> Box { tracing_subscriber::fmt::layer() .with_ansi(false) .event_format(SimpleFormatter::new()) - .with_writer(maker) - // TODO: derive this from log_min_messages? + .with_writer(sender) + // TODO: derive this from log_min_messages? Currently the code in + // communicator_process.c forces log_min_messages='INFO'. .with_filter(LevelFilter::from_level(Level::INFO)), ); r.init(); info!("communicator process logging started"); - let state = LoggingState { receiver }; - - Box::new(state) + Box::new(receiver) } /// Read one message from the logging queue. This is essentially a wrapper to Receiver, /// with a C-friendly signature. /// -/// The message is copied into *errbuf, which is a caller-supplied buffer of size `errbuf_len`. -/// If the message doesn't fit in the buffer, it is truncated. It is always NULL-terminated. +/// The message is copied into *errbuf, which is a caller-supplied buffer of size +/// `errbuf_len`. If the message doesn't fit in the buffer, it is truncated. It is always +/// NULL-terminated. /// -/// The error level is returned *elevel_p. It's one of the PostgreSQL error levels, see elog.h +/// The error level is returned *elevel_p. It's one of the PostgreSQL error levels, see +/// elog.h +/// +/// If there was a message, *dropped_event_count_p is also updated with a counter of how +/// many log messages in total has been dropped. By comparing that with the value from +/// previous call, you can tell how many were dropped since last call. +/// +/// Returns: +/// +/// 0 if there were no messages +/// 1 if there was a message. The message and its level are returned in +/// *errbuf and *elevel_p. *dropped_event_count_p is also updated. +/// -1 on error, i.e the other end of the queue was disconnected #[unsafe(no_mangle)] -pub extern "C" fn pump_logging( - state: &mut LoggingState, - errbuf: *mut u8, +pub extern "C" fn communicator_worker_poll_logging( + state: &mut LoggingReceiver, + errbuf: *mut c_char, errbuf_len: u32, elevel_p: &mut i32, + dropped_event_count_p: &mut u64, ) -> i32 { let msg = match state.receiver.try_recv() { Err(TryRecvError::Empty) => return 0, @@ -75,15 +96,17 @@ pub extern "C" fn pump_logging( }; let src: &[u8] = &msg.message; - let dst = errbuf; + let dst: *mut u8 = errbuf.cast(); let len = std::cmp::min(src.len(), errbuf_len as usize - 1); unsafe { std::ptr::copy_nonoverlapping(src.as_ptr(), dst, len); - *(errbuf.add(len)) = b'\0'; // NULL terminator + *(dst.add(len)) = b'\0'; // NULL terminator } - // XXX: these levels are copied from PostgreSQL's elog.h. Introduce another enum - // to hide these? + // Map the tracing Level to PostgreSQL elevel. + // + // XXX: These levels are copied from PostgreSQL's elog.h. Introduce another enum to + // hide these? *elevel_p = match msg.level { Level::TRACE => 10, // DEBUG5 Level::DEBUG => 14, // DEBUG1 @@ -92,6 +115,8 @@ pub extern "C" fn pump_logging( Level::ERROR => 21, // ERROR }; + *dropped_event_count_p = DROPPED_EVENT_COUNT.load(Ordering::Relaxed); + 1 } @@ -115,7 +140,7 @@ impl Default for FormattedEventWithMeta { struct EventBuilder<'a> { event: FormattedEventWithMeta, - maker: &'a Maker, + sender: &'a LoggingSender, } impl std::io::Write for EventBuilder<'_> { @@ -123,25 +148,21 @@ impl std::io::Write for EventBuilder<'_> { self.event.message.write(buf) } fn flush(&mut self) -> std::io::Result<()> { - self.maker.send_event(self.event.clone()); + self.sender.send_event(self.event.clone()); Ok(()) } } impl Drop for EventBuilder<'_> { fn drop(&mut self) { - let maker = self.maker; + let sender = self.sender; let event = std::mem::take(&mut self.event); - maker.send_event(event); + sender.send_event(event); } } -struct Maker { - channel: SyncSender, -} - -impl<'a> MakeWriter<'a> for Maker { +impl<'a> MakeWriter<'a> for LoggingSender { type Writer = EventBuilder<'a>; fn make_writer(&'a self) -> Self::Writer { @@ -154,33 +175,38 @@ impl<'a> MakeWriter<'a> for Maker { message: Vec::new(), level: *meta.level(), }, - maker: self, + sender: self, } } } -impl Maker { +impl LoggingSender { fn send_event(&self, e: FormattedEventWithMeta) { - match self.channel.try_send(e) { + match self.sender.try_send(e) { Ok(()) => { // notify the main thread callback_set_my_latch(); } Err(TrySendError::Disconnected(_)) => {} Err(TrySendError::Full(_)) => { - // TODO: record that some messages were lost + // The queue is full, cannot send any more. To avoid blocking the tokio + // thread, simply drop the message. Better to lose some logs than get + // stuck if there's a problem with the logging. + // + // Record the fact that was a message was dropped by incrementing the + // counter. + DROPPED_EVENT_COUNT.fetch_add(1, Ordering::Relaxed); } } } } -/// Simple formatter implementation for tracing_subscriber, which prints the log -/// spans and message part like the default formatter, but no timestamp or error -/// level. The error level is captured separately by `FormattedEventWithMeta', -/// and when the error is printed by the main thread, with PostgreSQL ereport(), -/// it gets a timestamp at that point. (The timestamp printed will therefore lag -/// behind the timestamp on the event here, if the main thread doesn't process -/// the log message promptly) +/// Simple formatter implementation for tracing_subscriber, which prints the log spans and +/// message part like the default formatter, but no timestamp or error level. The error +/// level is captured separately by `FormattedEventWithMeta', and when the error is +/// printed by the main thread, with PostgreSQL ereport(), it gets a timestamp at that +/// point. (The timestamp printed will therefore lag behind the timestamp on the event +/// here, if the main thread doesn't process the log message promptly) struct SimpleFormatter; impl FormatEvent for SimpleFormatter @@ -199,11 +225,10 @@ where for span in scope.from_root() { write!(writer, "{}", span.name())?; - // `FormattedFields` is a formatted representation of the span's - // fields, which is stored in its extensions by the `fmt` layer's - // `new_span` method. The fields will have been formatted - // by the same field formatter that's provided to the event - // formatter in the `FmtContext`. + // `FormattedFields` is a formatted representation of the span's fields, + // which is stored in its extensions by the `fmt` layer's `new_span` + // method. The fields will have been formatted by the same field formatter + // that's provided to the event formatter in the `FmtContext`. let ext = span.extensions(); let fields = &ext .get::>() @@ -220,7 +245,7 @@ where // Write fields on the event ctx.field_format().format_fields(writer.by_ref(), event)?; - writeln!(writer) + Ok(()) } } diff --git a/pgxn/neon/communicator/src/worker_process/main_loop.rs b/pgxn/neon/communicator/src/worker_process/main_loop.rs index fe6acbf049..0b2f9da366 100644 --- a/pgxn/neon/communicator/src/worker_process/main_loop.rs +++ b/pgxn/neon/communicator/src/worker_process/main_loop.rs @@ -4,7 +4,7 @@ use std::os::fd::OwnedFd; use std::path::PathBuf; use std::str::FromStr as _; -use crate::backend_comms::NeonIOHandle; +use crate::backend_comms::NeonIORequestSlot; use crate::file_cache::FileCache; use crate::global_allocator::MyAllocatorCollector; use crate::init::CommunicatorInitStruct; @@ -12,7 +12,7 @@ use crate::integrated_cache::{CacheResult, IntegratedCacheWriteAccess}; use crate::neon_request::{CGetPageVRequest, CPrefetchVRequest}; use crate::neon_request::{NeonIORequest, NeonIOResult}; use crate::worker_process::in_progress_ios::{RequestInProgressKey, RequestInProgressTable}; -use pageserver_client_grpc::{PageserverClient, ShardSpec}; +use pageserver_client_grpc::{PageserverClient, ShardSpec, ShardStripeSize}; use pageserver_page_api as page_api; use metrics::{IntCounter, IntCounterVec}; @@ -24,26 +24,41 @@ use utils::id::{TenantId, TimelineId}; use super::callbacks::{get_request_lsn, notify_proc}; -use tracing::{error, info, info_span, trace}; +use tracing::{debug, error, info, info_span, trace}; use utils::lsn::Lsn; pub struct CommunicatorWorkerProcessStruct<'a> { - neon_request_slots: &'a [NeonIOHandle], + /// Tokio runtime that the main loop and any other related tasks runs in. + runtime: tokio::runtime::Handle, + /// Client to communicate with the pageserver client: PageserverClient, - pub(crate) cache: IntegratedCacheWriteAccess<'a>, + /// Request slots that backends use to send IO requests to the communicator. + neon_request_slots: &'a [NeonIORequestSlot], + /// Notification pipe. Backends use this to notify the communicator that a request is waiting to + /// be processed in one of the request slots. submission_pipe_read_fd: OwnedFd, + /// Locking table for all in-progress IO requests. in_progress_table: RequestInProgressTable, - // Metrics + /// Local File Cache, relation size tracking, last-written LSN tracking + pub(crate) cache: IntegratedCacheWriteAccess<'a>, + + /*** Static configuration ***/ + /// Stripe size doesn't change after startup. (The shard map is not stored here, it's passed + /// directly to the client) + stripe_size: Option, + + /*** Metrics ***/ request_counters: IntCounterVec, request_rel_exists_counter: IntCounter, request_rel_size_counter: IntCounter, request_get_pagev_counter: IntCounter, + request_read_slru_segment_counter: IntCounter, request_prefetchv_counter: IntCounter, request_db_size_counter: IntCounter, request_write_page_counter: IntCounter, @@ -70,6 +85,7 @@ pub(super) async fn init( timeline_id: String, auth_token: Option, shard_map: HashMap, + stripe_size: Option, initial_file_cache_size: u64, file_cache_path: Option, ) -> CommunicatorWorkerProcessStruct<'static> { @@ -91,11 +107,12 @@ pub(super) async fn init( .integrated_cache_init_struct .worker_process_init(last_lsn, file_cache); - // TODO: plumb through the stripe size. + debug!("Initialised integrated cache: {cache:?}"); + let tenant_id = TenantId::from_str(&tenant_id).expect("invalid tenant ID"); let timeline_id = TimelineId::from_str(&timeline_id).expect("invalid timeline ID"); - let shard_spec = ShardSpec::new(shard_map, None).expect("invalid shard spec"); - let client = PageserverClient::new(tenant_id, timeline_id, shard_spec, auth_token) + let shard_spec = ShardSpec::new(shard_map, stripe_size).expect("invalid shard spec"); + let client = PageserverClient::new(tenant_id, timeline_id, shard_spec, auth_token, None) .expect("could not create client"); let request_counters = IntCounterVec::new( @@ -109,6 +126,8 @@ pub(super) async fn init( let request_rel_exists_counter = request_counters.with_label_values(&["rel_exists"]); let request_rel_size_counter = request_counters.with_label_values(&["rel_size"]); let request_get_pagev_counter = request_counters.with_label_values(&["get_pagev"]); + let request_read_slru_segment_counter = + request_counters.with_label_values(&["read_slru_segment"]); let request_prefetchv_counter = request_counters.with_label_values(&["prefetchv"]); let request_db_size_counter = request_counters.with_label_values(&["db_size"]); let request_write_page_counter = request_counters.with_label_values(&["write_page"]); @@ -146,6 +165,8 @@ pub(super) async fn init( request_nblocks_counters.with_label_values(&["rel_zero_extend"]); CommunicatorWorkerProcessStruct { + runtime: tokio::runtime::Handle::current(), + stripe_size, neon_request_slots: cis.neon_request_slots, client, cache, @@ -157,6 +178,7 @@ pub(super) async fn init( request_rel_exists_counter, request_rel_size_counter, request_get_pagev_counter, + request_read_slru_segment_counter, request_prefetchv_counter, request_db_size_counter, request_write_page_counter, @@ -179,6 +201,22 @@ pub(super) async fn init( } impl<'t> CommunicatorWorkerProcessStruct<'t> { + /// Update the configuration + pub(super) fn update_shard_map( + &self, + new_shard_map: HashMap, + ) { + let shard_spec = + ShardSpec::new(new_shard_map, self.stripe_size.clone()).expect("invalid shard spec"); + + { + let _in_runtime = self.runtime.enter(); + if let Err(err) = self.client.update_shards(shard_spec) { + tracing::error!("could not update shard map: {err:?}"); + } + } + } + /// Main loop of the worker process. Receive requests from the backends and process them. pub(super) async fn run(&'static self) { let mut idxbuf: [u8; 4] = [0; 4]; @@ -259,9 +297,10 @@ impl<'t> CommunicatorWorkerProcessStruct<'t> { // Is it possible that the last-written LSN is ahead of last flush LSN? Generally not, we // shouldn't evict a page from the buffer cache before all its modifications have been - // safely flushed. That's the "WAL before data" rule. However, such case does exist at index - // building: _bt_blwritepage logs the full page without flushing WAL before smgrextend - // (files are fsynced before build ends). + // safely flushed. That's the "WAL before data" rule. However, there are a few exceptions: + // + // - when creation an index: _bt_blwritepage logs the full page without flushing WAL before + // smgrextend (files are fsynced before build ends). // // XXX: If we make a request LSN greater than the current WAL flush LSN, the pageserver would // block waiting for the WAL arrive, until we flush it and it propagates through the @@ -359,8 +398,14 @@ impl<'t> CommunicatorWorkerProcessStruct<'t> { { Ok(nblocks) => { // update the cache - tracing::info!("updated relsize for {:?} in cache: {}", rel, nblocks); - self.cache.remember_rel_size(&rel, nblocks); + tracing::info!( + "updated relsize for {:?} in cache: {}, lsn {}", + rel, + nblocks, + read_lsn + ); + self.cache + .remember_rel_size(&rel, nblocks, not_modified_since); NeonIOResult::RelSize(nblocks) } @@ -379,6 +424,36 @@ impl<'t> CommunicatorWorkerProcessStruct<'t> { Err(errno) => NeonIOResult::Error(errno), } } + NeonIORequest::ReadSlruSegment(req) => { + self.request_read_slru_segment_counter.inc(); + let lsn = Lsn(req.request_lsn); + let file_path = req.destination_file_path(); + + match self + .client + .get_slru_segment(page_api::GetSlruSegmentRequest { + read_lsn: self.request_lsns(lsn), + kind: req.slru_kind, + segno: req.segment_number, + }) + .await + { + Ok(slru_bytes) => { + if let Err(e) = tokio::fs::write(&file_path, &slru_bytes).await { + info!("could not write slru segment to file {file_path}: {e}"); + return NeonIOResult::Error(e.raw_os_error().unwrap_or(libc::EIO)); + } + + let blocks_count = slru_bytes.len() / crate::BLCKSZ; + + NeonIOResult::ReadSlruSegment(blocks_count as _) + } + Err(err) => { + info!("tonic error: {err:?}"); + NeonIOResult::Error(0) + } + } + } NeonIORequest::PrefetchV(req) => { self.request_prefetchv_counter.inc(); self.request_prefetchv_nblocks_counter @@ -457,7 +532,7 @@ impl<'t> CommunicatorWorkerProcessStruct<'t> { .remember_page(&rel, req.block_number, req.src, Lsn(req.lsn), true) .await; self.cache - .remember_rel_size(&req.reltag(), req.block_number + 1); + .remember_rel_size(&req.reltag(), req.block_number + 1, Lsn(req.lsn)); NeonIOResult::WriteOK } NeonIORequest::RelZeroExtend(req) => { @@ -466,31 +541,42 @@ impl<'t> CommunicatorWorkerProcessStruct<'t> { .inc_by(req.nblocks as u64); // TODO: need to grab an io-in-progress lock for this? I guess not - // TODO: I think we should put the empty pages to the cache, or at least - // update the last-written LSN. - self.cache - .remember_rel_size(&req.reltag(), req.block_number + req.nblocks); + // TODO: We could put the empty pages to the cache. Maybe have + // a marker on the block entries for all-zero pages, instead of + // actually storing the empty pages. + self.cache.remember_rel_size( + &req.reltag(), + req.block_number + req.nblocks, + Lsn(req.lsn), + ); NeonIOResult::WriteOK } NeonIORequest::RelCreate(req) => { self.request_rel_create_counter.inc(); // TODO: need to grab an io-in-progress lock for this? I guess not - self.cache.remember_rel_size(&req.reltag(), 0); + self.cache.remember_rel_size(&req.reltag(), 0, Lsn(req.lsn)); NeonIOResult::WriteOK } NeonIORequest::RelTruncate(req) => { self.request_rel_truncate_counter.inc(); // TODO: need to grab an io-in-progress lock for this? I guess not - self.cache.remember_rel_size(&req.reltag(), req.nblocks); + self.cache + .remember_rel_size(&req.reltag(), req.nblocks, Lsn(req.lsn)); NeonIOResult::WriteOK } NeonIORequest::RelUnlink(req) => { self.request_rel_unlink_counter.inc(); // TODO: need to grab an io-in-progress lock for this? I guess not - self.cache.forget_rel(&req.reltag()); + self.cache.forget_rel(&req.reltag(), None, Lsn(req.lsn)); + NeonIOResult::WriteOK + } + NeonIORequest::UpdateCachedRelSize(req) => { + // TODO: need to grab an io-in-progress lock for this? I guess not + self.cache + .remember_rel_size(&req.reltag(), req.nblocks, Lsn(req.lsn)); NeonIOResult::WriteOK } } @@ -560,7 +646,7 @@ impl<'t> CommunicatorWorkerProcessStruct<'t> { match self .client .get_page(page_api::GetPageRequest { - request_id: req.request_id, + request_id: req.request_id.into(), request_class: page_api::GetPageClass::Normal, read_lsn, rel, @@ -571,18 +657,23 @@ impl<'t> CommunicatorWorkerProcessStruct<'t> { Ok(resp) => { // Write the received page images directly to the shared memory location // that the backend requested. - if resp.page_images.len() != block_numbers.len() { + if resp.pages.len() != block_numbers.len() { error!( "received unexpected response with {} page images from pageserver for a request for {} pages", - resp.page_images.len(), + resp.pages.len(), block_numbers.len(), ); return Err(-1); } - for (page_image, (blkno, _lsn, dest, _guard)) in - resp.page_images.into_iter().zip(cache_misses) + + info!( + "received getpage response for blocks {:?} in rel {:?} lsns {}", + block_numbers, rel, read_lsn + ); + + for (page, (blkno, _lsn, dest, _guard)) in resp.pages.into_iter().zip(cache_misses) { - let src: &[u8] = page_image.as_ref(); + let src: &[u8] = page.image.as_ref(); let len = std::cmp::min(src.len(), dest.bytes_total()); unsafe { std::ptr::copy_nonoverlapping(src.as_ptr(), dest.as_mut_ptr(), len); @@ -593,7 +684,7 @@ impl<'t> CommunicatorWorkerProcessStruct<'t> { .remember_page( &rel, blkno, - page_image, + page.image, read_lsn.not_modified_since_lsn.unwrap(), false, ) @@ -656,7 +747,7 @@ impl<'t> CommunicatorWorkerProcessStruct<'t> { match self .client .get_page(page_api::GetPageRequest { - request_id: req.request_id, + request_id: req.request_id.into(), request_class: page_api::GetPageClass::Prefetch, read_lsn: self.request_lsns(not_modified_since), rel, @@ -669,20 +760,18 @@ impl<'t> CommunicatorWorkerProcessStruct<'t> { "prefetch completed, remembering blocks {:?} in rel {:?} in LFC", block_numbers, rel ); - if resp.page_images.len() != block_numbers.len() { + if resp.pages.len() != block_numbers.len() { error!( "received unexpected response with {} page images from pageserver for a request for {} pages", - resp.page_images.len(), + resp.pages.len(), block_numbers.len(), ); return Err(-1); } - for (page_image, (blkno, _lsn, _guard)) in - resp.page_images.into_iter().zip(cache_misses) - { + for (page, (blkno, _lsn, _guard)) in resp.pages.into_iter().zip(cache_misses) { self.cache - .remember_page(&rel, blkno, page_image, not_modified_since, false) + .remember_page(&rel, blkno, page.image, not_modified_since, false) .await; } } diff --git a/pgxn/neon/communicator/src/worker_process/worker_interface.rs b/pgxn/neon/communicator/src/worker_process/worker_interface.rs index 9aaa483c9e..ff9b1ba699 100644 --- a/pgxn/neon/communicator/src/worker_process/worker_interface.rs +++ b/pgxn/neon/communicator/src/worker_process/worker_interface.rs @@ -10,6 +10,8 @@ use crate::init::CommunicatorInitStruct; use crate::worker_process::main_loop; use crate::worker_process::main_loop::CommunicatorWorkerProcessStruct; +use pageserver_client_grpc::ShardStripeSize; + /// Launch the communicator's tokio tasks, which do most of the work. /// /// The caller has initialized the process as a regular PostgreSQL @@ -24,9 +26,11 @@ pub extern "C" fn communicator_worker_process_launch( auth_token: *const c_char, shard_map: *mut *mut c_char, nshards: u32, + stripe_size: u32, file_cache_path: *const c_char, initial_file_cache_size: u64, ) -> &'static CommunicatorWorkerProcessStruct<'static> { + tracing::warn!("starting threads in rust code"); // Convert the arguments into more convenient Rust types let tenant_id = unsafe { CStr::from_ptr(tenant_id) }.to_str().unwrap(); let timeline_id = unsafe { CStr::from_ptr(timeline_id) }.to_str().unwrap(); @@ -48,7 +52,7 @@ pub extern "C" fn communicator_worker_process_launch( Some(PathBuf::from(c_str.to_str().unwrap())) } }; - let shard_map = parse_shard_map(nshards, shard_map); + let shard_map = shard_map_to_hash(nshards, shard_map); // start main loop let runtime = tokio::runtime::Builder::new_multi_thread() @@ -63,6 +67,11 @@ pub extern "C" fn communicator_worker_process_launch( timeline_id.to_string(), auth_token, shard_map, + if stripe_size > 0 { + Some(ShardStripeSize(stripe_size)) + } else { + None + }, initial_file_cache_size, file_cache_path, )); @@ -84,7 +93,7 @@ pub extern "C" fn communicator_worker_process_launch( } /// Convert the "shard map" from an array of C strings, indexed by shard no to a rust HashMap -fn parse_shard_map( +fn shard_map_to_hash( nshards: u32, shard_map: *mut *mut c_char, ) -> HashMap { @@ -116,6 +125,11 @@ fn parse_shard_map( pub extern "C" fn communicator_worker_config_reload( proc_handle: &'static CommunicatorWorkerProcessStruct<'static>, file_cache_size: u64, + shard_map: *mut *mut c_char, + nshards: u32, ) { proc_handle.cache.resize_file_cache(file_cache_size as u32); + + let shard_map = shard_map_to_hash(nshards, shard_map); + proc_handle.update_shard_map(shard_map); } diff --git a/pgxn/neon/communicator_new.c b/pgxn/neon/communicator_new.c index b809358c45..cb0bbc5ee0 100644 --- a/pgxn/neon/communicator_new.c +++ b/pgxn/neon/communicator_new.c @@ -22,6 +22,7 @@ #endif #include "access/xlog_internal.h" #include "access/xlogutils.h" +#include "common/hashfn.h" #include "executor/instrument.h" #include "miscadmin.h" #include "postmaster/bgworker.h" @@ -39,7 +40,9 @@ #include "storage/spin.h" #include "tcop/tcopprot.h" +#include "bitmap.h" #include "communicator_new.h" +#include "hll.h" #include "neon.h" #include "neon_perf_counters.h" #include "pagestore_client.h" @@ -98,7 +101,19 @@ typedef struct CommunicatorShmemPerBackendData typedef struct CommunicatorShmemData { - int dummy; + /* + * Estimation of working set size. + * + * Note that this is not protected by any locks. That's sloppy, but works + * fine in practice. To "add" a value to the HLL state, we just overwrite + * one of the timestamps. Calculating the estimate reads all the values, but + * it also doesn't depend on seeing a consistent snapshot of the values. We + * could get bogus results if accessing the TimestampTz was not atomic, but + * it on any 64-bit platforms we care about it is, and even if we observed a + * torn read every now and then, it wouldn't affect the overall estimate + * much. + */ + HyperLogLogState wss_estimation; CommunicatorShmemPerBackendData backends[]; /* MaxProcs */ @@ -128,9 +143,12 @@ static bool bounce_needed(void *buffer); static void *bounce_buf(void); static void *bounce_write_if_needed(void *buffer); +static void pump_logging(struct LoggingReceiver *logging); PGDLLEXPORT void communicator_new_bgworker_main(Datum main_arg); static void communicator_new_backend_exit(int code, Datum arg); +static char *print_neon_io_request(NeonIORequest *request); + /* * Request ID assignment. * @@ -168,6 +186,9 @@ pg_init_communicator_new(void) { BackgroundWorker bgw; + if (!neon_use_communicator_worker) + return; + if (pageserver_connstring[0] == '\0' && pageserver_grpc_urls[0] == '\0') { /* running with local storage */ @@ -195,6 +216,9 @@ communicator_new_shmem_size(void) size_t size = 0; int num_request_slots; + if (!neon_use_communicator_worker) + return 0; + size += MAXALIGN( offsetof(CommunicatorShmemData, backends) + MaxProcs * sizeof(CommunicatorShmemPerBackendData) @@ -209,13 +233,16 @@ communicator_new_shmem_size(void) } void -communicator_new_shmem_request(void) +CommunicatorNewShmemRequest(void) { + if (!neon_use_communicator_worker) + return; + RequestAddinShmemSpace(communicator_new_shmem_size()); } void -communicator_new_shmem_startup(void) +CommunicatorNewShmemInit(void) { bool found; int pipefd[2]; @@ -226,6 +253,9 @@ communicator_new_shmem_startup(void) uint64 initial_file_cache_size; uint64 max_file_cache_size; + if (!neon_use_communicator_worker) + return; + rc = pipe(pipefd); if (rc != 0) ereport(ERROR, @@ -248,6 +278,9 @@ communicator_new_shmem_startup(void) shmem_ptr = (char *) shmem_ptr + communicator_size; shmem_size -= communicator_size; + /* Initialize hyper-log-log structure for estimating working set size */ + initSHLL(&communicator_shmem_ptr->wss_estimation); + for (int i = 0; i < MaxProcs; i++) { InitSharedLatch(&communicator_shmem_ptr->backends[i].io_completion_latch); @@ -273,12 +306,10 @@ communicator_new_shmem_startup(void) void communicator_new_bgworker_main(Datum main_arg) { - char **connstrs; - shardno_t num_shards; - struct LoggingState *logging; - char errbuf[1000]; - int elevel; + char **connstrings; + ShardMap shard_map; uint64 file_cache_size; + struct LoggingReceiver *logging; const struct CommunicatorWorkerProcessStruct *proc_handle; /* @@ -306,25 +337,63 @@ communicator_new_bgworker_main(Datum main_arg) BackgroundWorkerUnblockSignals(); - get_shard_map(&connstrs, &num_shards); + if (!parse_shard_map(pageserver_grpc_urls, &shard_map)) + { + /* shouldn't happen, as the GUC was verified already */ + elog(FATAL, "could not parse neon.pageserver_grpcs_urls"); + } + connstrings = palloc(shard_map.num_shards * sizeof(char *)); + for (int i = 0; i < shard_map.num_shards; i++) + connstrings[i] = shard_map.connstring[i]; - logging = configure_logging(); + /* + * By default, INFO messages are not printed to the log. We want + * `tracing::info!` messages emitted from the communicator to be printed, + * however, so increase the log level. + * + * XXX: This overrides any user-set value from the config file. That's not + * great, but on the other hand, there should be little reason for user to + * control the verbosity of the communicator. It's not too verbose by + * default. + */ + SetConfigOption("log_min_messages", "INFO", PGC_SUSET, PGC_S_OVERRIDE); + logging = communicator_worker_configure_logging(); + + elog(LOG, "launching worker process threads"); proc_handle = communicator_worker_process_launch( cis, neon_tenant, neon_timeline, neon_auth_token, - connstrs, - num_shards, + connstrings, + shard_map.num_shards, + neon_stripe_size, lfc_path, file_cache_size); + pfree(connstrings); cis = NULL; + if (proc_handle == NULL) + { + /* + * Something went wrong. Before exiting, forward any log messages that + * might've been generated during the failed launch. + */ + pump_logging(logging); + + elog(PANIC, "failure launching threads"); + } elog(LOG, "communicator threads started"); for (;;) { - int32 rc; + ResetLatch(MyLatch); + + /* + * Forward any log messages from the Rust threads into the normal + * Postgres logging facility. + */ + pump_logging(logging); CHECK_FOR_INTERRUPTS(); @@ -337,40 +406,99 @@ communicator_new_bgworker_main(Datum main_arg) file_cache_size = lfc_size_limit * (1024 * 1024 / BLCKSZ); if (file_cache_size < 100) file_cache_size = 100; - communicator_worker_config_reload(proc_handle, file_cache_size); - } - for (;;) - { - rc = pump_logging(logging, (uint8 *) errbuf, sizeof(errbuf), &elevel); - if (rc == 0) + /* Reload pageserver URLs */ + if (!parse_shard_map(pageserver_grpc_urls, &shard_map)) { - /* nothing to do */ - break; - } - else if (rc == 1) - { - /* Because we don't want to exit on error */ - if (elevel == ERROR) - elevel = LOG; - if (elevel == INFO) - elevel = LOG; - elog(elevel, "[COMMUNICATOR] %s", errbuf); - } - else if (rc == -1) - { - elog(ERROR, "logging channel was closed unexpectedly"); + /* shouldn't happen, as the GUC was verified already */ + elog(FATAL, "could not parse neon.pageserver_grpcs_urls"); } + connstrings = palloc(shard_map.num_shards * sizeof(char *)); + for (int i = 0; i < shard_map.num_shards; i++) + connstrings[i] = shard_map.connstring[i]; + + communicator_worker_config_reload(proc_handle, + file_cache_size, + connstrings, + shard_map.num_shards); + pfree(connstrings); } (void) WaitLatch(MyLatch, WL_LATCH_SET | WL_EXIT_ON_PM_DEATH, 0, PG_WAIT_EXTENSION); - ResetLatch(MyLatch); } } +static void +pump_logging(struct LoggingReceiver *logging) +{ + char errbuf[1000]; + int elevel; + int32 rc; + static uint64_t last_dropped_event_count = 0; + uint64_t dropped_event_count; + uint64_t dropped_now; + + for (;;) + { + rc = communicator_worker_poll_logging(logging, + errbuf, + sizeof(errbuf), + &elevel, + &dropped_event_count); + if (rc == 0) + { + /* nothing to do */ + break; + } + else if (rc == 1) + { + /* Because we don't want to exit on error */ + + if (message_level_is_interesting(elevel)) + { + /* + * Prevent interrupts while cleaning up. + * + * (Not sure if this is required, but all the error handlers + * in Postgres that are installed as sigsetjmp() targets do + * this, so let's follow the example) + */ + HOLD_INTERRUPTS(); + + errstart(elevel, TEXTDOMAIN); + errmsg_internal("[COMMUNICATOR] %s", errbuf); + EmitErrorReport(); + FlushErrorState(); + + /* Now we can allow interrupts again */ + RESUME_INTERRUPTS(); + } + } + else if (rc == -1) + { + elog(ERROR, "logging channel was closed unexpectedly"); + } + } + + /* + * If the queue was full at any time since the last time we reported it, + * report how many messages were lost. We do this outside the loop, so + * that if the logging system is clogged, we don't exacerbate it by + * printing lots of warnings about dropped messages. + */ + dropped_now = dropped_event_count - last_dropped_event_count; + if (dropped_now != 0) + { + elog(WARNING, "%lu communicator log messages were dropped because the log buffer was full", + (unsigned long) dropped_now); + last_dropped_event_count = dropped_event_count; + } +} + + /* * Callbacks from the rust code, in the communicator process. * @@ -543,6 +671,45 @@ communicator_new_cache_contains(NRelFileInfo rinfo, ForkNumber forkNum, blockno); } +/* Dump a list of blocks in the LFC, for use in prewarming later */ +FileCacheState * +communicator_new_get_lfc_state(size_t max_entries) +{ + struct FileCacheIterator iter; + FileCacheState* fcs; + uint8 *bitmap; + /* TODO: Max(max_entries, ) */ + size_t n_entries = max_entries; + size_t state_size = FILE_CACHE_STATE_SIZE_FOR_CHUNKS(n_entries, 1); + size_t n_pages = 0; + + fcs = (FileCacheState *) palloc0(state_size); + SET_VARSIZE(fcs, state_size); + fcs->magic = FILE_CACHE_STATE_MAGIC; + fcs->chunk_size_log = 0; + fcs->n_chunks = n_entries; + bitmap = FILE_CACHE_STATE_BITMAP(fcs); + + bcomm_cache_iterate_begin(my_bs, &iter); + while (n_pages < max_entries && bcomm_cache_iterate_next(my_bs, &iter)) + { + BufferTag tag; + + BufTagInit(tag, iter.rel_number, iter.fork_number, iter.block_number, iter.spc_oid, iter.db_oid); + fcs->chunks[n_pages] = tag; + n_pages++; + } + + /* fill bitmap. TODO: memset would be more efficient, but this is a silly format anyway */ + for (size_t i = 0; i < n_pages; i++) + { + BITMAP_SET(bitmap, i); + } + fcs->n_pages = n_pages; + + return fcs; +} + /* * Drain all in-flight requests from the queue. * @@ -605,7 +772,7 @@ start_request(NeonIORequest *request, struct NeonIOResult *immediate_result_p) if (request_idx == -1) { /* -1 means the request was satisfied immediately. */ - elog(DEBUG4, "communicator request %lu was satisfied immediately", request->rel_exists.request_id); + elog(DEBUG4, "communicator request %s was satisfied immediately", print_neon_io_request(request)); return -1; } Assert(request_idx == my_next_slot_idx); @@ -615,7 +782,8 @@ start_request(NeonIORequest *request, struct NeonIOResult *immediate_result_p) inflight_requests[num_inflight_requests] = request_idx; num_inflight_requests++; - elog(LOG, "started communicator request %lu at slot %d", request->rel_exists.request_id, request_idx); + elog(LOG, "started communicator request %s at slot %d", print_neon_io_request(request), request_idx); + return request_idx; } @@ -740,6 +908,19 @@ communicator_new_read_at_lsnv(NRelFileInfo rinfo, ForkNumber forkNum, BlockNumbe } }; + { + BufferTag tag; + + CopyNRelFileInfoToBufTag(tag, rinfo); + tag.forkNum = forkNum; + for (int i = 0; i < nblocks; i++) + { + tag.blockNum = blockno; + addSHLL(&communicator_shmem_ptr->wss_estimation, + hash_bytes((uint8_t *) &tag, sizeof(tag))); + } + } + elog(DEBUG5, "getpagev called for rel %u/%u/%u.%u block %u (%u blocks)", RelFileInfoFmt(rinfo), forkNum, blockno, nblocks); @@ -832,6 +1013,9 @@ retry: elog(DEBUG1, "read from local cache file was superseded by concurrent update"); goto retry; } + + pgBufferUsage.file_cache.hits += nblocks; + return; } Assert(request_idx == my_next_slot_idx); @@ -841,6 +1025,12 @@ retry: inflight_requests[num_inflight_requests] = request_idx; num_inflight_requests++; + /* + * XXX: If some blocks were in cache but not others, we count all blocks + * as a cache miss. + */ + pgBufferUsage.file_cache.misses += nblocks; + wait_request_completion(request_idx, &result); Assert(num_inflight_requests == 1); Assert(inflight_requests[0] == request_idx); @@ -931,10 +1121,58 @@ communicator_new_dbsize(Oid dbNode) } int -communicator_new_read_slru_segment(SlruKind kind, int64 segno, void *buffer) +communicator_new_read_slru_segment( + SlruKind kind, + uint32_t segno, + neon_request_lsns *request_lsns, + const char* path) { - /* TODO */ - elog(ERROR, "not implemented"); + NeonIOResult result = {}; + NeonIORequest request = { + .tag = NeonIORequest_ReadSlruSegment, + .read_slru_segment = { + .request_id = assign_request_id(), + .slru_kind = kind, + .segment_number = segno, + .request_lsn = request_lsns->request_lsn, + } + }; + int nblocks = -1; + char *temp_path = bounce_buf(); + + if (path == NULL) { + elog(ERROR, "read_slru_segment called with NULL path"); + return -1; + } + + strlcpy(temp_path, path, BLCKSZ); + request.read_slru_segment.destination_file_path.ptr = (uint8_t *) temp_path; + + elog(DEBUG5, "readslrusegment called for kind=%u, segno=%u, file_path=\"%s\"", + kind, segno, request.read_slru_segment.destination_file_path.ptr); + + /* FIXME: see `request_lsns` in main_loop.rs for why this is needed */ + XLogSetAsyncXactLSN(request_lsns->request_lsn); + + perform_request(&request, &result); + + switch (result.tag) + { + case NeonIOResult_ReadSlruSegment: + nblocks = result.read_slru_segment; + break; + case NeonIOResult_Error: + ereport(ERROR, + (errcode_for_file_access(), + errmsg("could not read slru segment, kind=%u, segno=%u: %s", + kind, segno, pg_strerror(result.error)))); + break; + default: + elog(ERROR, "unexpected result for read SLRU operation: %d", result.tag); + break; + } + + return nblocks; } /* Write requests */ @@ -1058,7 +1296,7 @@ communicator_new_rel_zeroextend(NRelFileInfo rinfo, ForkNumber forkNum, BlockNum } void -communicator_new_rel_create(NRelFileInfo rinfo, ForkNumber forkNum) +communicator_new_rel_create(NRelFileInfo rinfo, ForkNumber forkNum, XLogRecPtr lsn) { NeonIORequest request = { .tag = NeonIORequest_RelCreate, @@ -1068,10 +1306,14 @@ communicator_new_rel_create(NRelFileInfo rinfo, ForkNumber forkNum) .db_oid = NInfoGetDbOid(rinfo), .rel_number = NInfoGetRelNumber(rinfo), .fork_number = forkNum, + .lsn = lsn, } }; NeonIOResult result; + /* FIXME: see `request_lsns` in main_loop.rs for why this is needed */ + XLogSetAsyncXactLSN(lsn); + perform_request(&request, &result); switch (result.tag) { @@ -1090,7 +1332,7 @@ communicator_new_rel_create(NRelFileInfo rinfo, ForkNumber forkNum) } void -communicator_new_rel_truncate(NRelFileInfo rinfo, ForkNumber forkNum, BlockNumber nblocks) +communicator_new_rel_truncate(NRelFileInfo rinfo, ForkNumber forkNum, BlockNumber nblocks, XLogRecPtr lsn) { NeonIORequest request = { .tag = NeonIORequest_RelTruncate, @@ -1101,10 +1343,14 @@ communicator_new_rel_truncate(NRelFileInfo rinfo, ForkNumber forkNum, BlockNumbe .rel_number = NInfoGetRelNumber(rinfo), .fork_number = forkNum, .nblocks = nblocks, + .lsn = lsn, } }; NeonIOResult result; + /* FIXME: see `request_lsns` in main_loop.rs for why this is needed */ + XLogSetAsyncXactLSN(lsn); + perform_request(&request, &result); switch (result.tag) { @@ -1123,7 +1369,7 @@ communicator_new_rel_truncate(NRelFileInfo rinfo, ForkNumber forkNum, BlockNumbe } void -communicator_new_rel_unlink(NRelFileInfo rinfo, ForkNumber forkNum) +communicator_new_rel_unlink(NRelFileInfo rinfo, ForkNumber forkNum, XLogRecPtr lsn) { NeonIORequest request = { .tag = NeonIORequest_RelUnlink, @@ -1133,10 +1379,14 @@ communicator_new_rel_unlink(NRelFileInfo rinfo, ForkNumber forkNum) .db_oid = NInfoGetDbOid(rinfo), .rel_number = NInfoGetRelNumber(rinfo), .fork_number = forkNum, + .lsn = lsn, } }; NeonIOResult result; + /* FIXME: see `request_lsns` in main_loop.rs for why this is needed */ + XLogSetAsyncXactLSN(lsn); + perform_request(&request, &result); switch (result.tag) { @@ -1154,6 +1404,181 @@ communicator_new_rel_unlink(NRelFileInfo rinfo, ForkNumber forkNum) } } +void +communicator_new_update_cached_rel_size(NRelFileInfo rinfo, ForkNumber forkNum, BlockNumber nblocks, XLogRecPtr lsn) +{ + NeonIORequest request = { + .tag = NeonIORequest_UpdateCachedRelSize, + .update_cached_rel_size = { + .request_id = assign_request_id(), + .spc_oid = NInfoGetSpcOid(rinfo), + .db_oid = NInfoGetDbOid(rinfo), + .rel_number = NInfoGetRelNumber(rinfo), + .fork_number = forkNum, + .nblocks = nblocks, + .lsn = lsn, + } + }; + NeonIOResult result; + + perform_request(&request, &result); + switch (result.tag) + { + case NeonIOResult_WriteOK: + return; + case NeonIOResult_Error: + ereport(ERROR, + (errcode_for_file_access(), + errmsg("could not update cached size for rel %u/%u/%u.%u: %s", + RelFileInfoFmt(rinfo), forkNum, pg_strerror(result.error)))); + break; + default: + elog(ERROR, "unexpected result for UpdateCachedRelSize operation: %d", result.tag); + break; + } +} + +/* Debugging functions */ + +static char * +print_neon_io_request(NeonIORequest *request) +{ + static char buf[100]; + + switch (request->tag) + { + case NeonIORequest_Empty: + snprintf(buf, sizeof(buf), "Empty"); + return buf; + case NeonIORequest_RelExists: + { + CRelExistsRequest *r = &request->rel_exists; + + snprintf(buf, sizeof(buf), "RelExists: req " UINT64_FORMAT " rel %u/%u/%u.%u", + r->request_id, + r->spc_oid, r->db_oid, r->rel_number, r->fork_number); + return buf; + } + case NeonIORequest_RelSize: + { + CRelSizeRequest *r = &request->rel_size; + + snprintf(buf, sizeof(buf), "RelSize: req " UINT64_FORMAT " rel %u/%u/%u.%u", + r->request_id, + r->spc_oid, r->db_oid, r->rel_number, r->fork_number); + return buf; + } + case NeonIORequest_GetPageV: + { + CGetPageVRequest *r = &request->get_page_v; + + snprintf(buf, sizeof(buf), "GetPageV: req " UINT64_FORMAT " rel %u/%u/%u.%u blks %d-%d", + r->request_id, + r->spc_oid, r->db_oid, r->rel_number, r->fork_number, r->block_number, r->block_number + r->nblocks); + return buf; + } + case NeonIORequest_ReadSlruSegment: + { + CReadSlruSegmentRequest *r = &request->read_slru_segment; + + snprintf(buf, sizeof(buf), "ReadSlruSegment: req " UINT64_FORMAT " slrukind=%u, segno=%u, lsn=%X/%X, file_path=\"%s\"", + r->request_id, + r->slru_kind, + r->segment_number, + LSN_FORMAT_ARGS(r->request_lsn), + r->destination_file_path.ptr); + return buf; + } + case NeonIORequest_PrefetchV: + { + CPrefetchVRequest *r = &request->prefetch_v; + + snprintf(buf, sizeof(buf), "PrefetchV: req " UINT64_FORMAT " rel %u/%u/%u.%u blks %d-%d", + r->request_id, + r->spc_oid, r->db_oid, r->rel_number, r->fork_number, r->block_number, r->block_number + r->nblocks); + return buf; + } + case NeonIORequest_DbSize: + { + CDbSizeRequest *r = &request->db_size; + + snprintf(buf, sizeof(buf), "PrefetchV: req " UINT64_FORMAT " db %u", + r->request_id, r->db_oid); + return buf; + } + case NeonIORequest_WritePage: + { + CWritePageRequest *r = &request->write_page; + + snprintf(buf, sizeof(buf), "WritePage: req " UINT64_FORMAT " rel %u/%u/%u.%u blk %u lsn %X/%X", + r->request_id, + r->spc_oid, r->db_oid, r->rel_number, r->fork_number, r->block_number, + LSN_FORMAT_ARGS(r->lsn)); + return buf; + } + case NeonIORequest_RelExtend: + { + CRelExtendRequest *r = &request->rel_extend; + + snprintf(buf, sizeof(buf), "RelExtend: req " UINT64_FORMAT " rel %u/%u/%u.%u blk %u lsn %X/%X", + r->request_id, + r->spc_oid, r->db_oid, r->rel_number, r->fork_number, r->block_number, + LSN_FORMAT_ARGS(r->lsn)); + return buf; + } + case NeonIORequest_RelZeroExtend: + { + CRelZeroExtendRequest *r = &request->rel_zero_extend; + + snprintf(buf, sizeof(buf), "RelZeroExtend: req " UINT64_FORMAT " rel %u/%u/%u.%u blks %u-%u lsn %X/%X", + r->request_id, + r->spc_oid, r->db_oid, r->rel_number, r->fork_number, r->block_number, r->block_number + r->nblocks, + LSN_FORMAT_ARGS(r->lsn)); + return buf; + } + case NeonIORequest_RelCreate: + { + CRelCreateRequest *r = &request->rel_create; + + snprintf(buf, sizeof(buf), "RelCreate: req " UINT64_FORMAT " rel %u/%u/%u.%u", + r->request_id, + r->spc_oid, r->db_oid, r->rel_number, r->fork_number); + return buf; + } + case NeonIORequest_RelTruncate: + { + CRelTruncateRequest *r = &request->rel_truncate; + + snprintf(buf, sizeof(buf), "RelTruncate: req " UINT64_FORMAT " rel %u/%u/%u.%u blks %u", + r->request_id, + r->spc_oid, r->db_oid, r->rel_number, r->fork_number, r->nblocks); + return buf; + } + case NeonIORequest_RelUnlink: + { + CRelUnlinkRequest *r = &request->rel_unlink; + + snprintf(buf, sizeof(buf), "RelUnlink: req " UINT64_FORMAT " rel %u/%u/%u.%u", + r->request_id, + r->spc_oid, r->db_oid, r->rel_number, r->fork_number); + return buf; + } + case NeonIORequest_UpdateCachedRelSize: + { + CUpdateCachedRelSizeRequest *r = &request->update_cached_rel_size; + + snprintf(buf, sizeof(buf), "UpdateCachedRelSize: req " UINT64_FORMAT " rel %u/%u/%u.%u blocks: %u", + r->request_id, + r->spc_oid, r->db_oid, r->rel_number, r->fork_number, + r->nblocks); + return buf; + } + } + snprintf(buf, sizeof(buf), "Unknown request type %d", (int) request->tag); + return buf; +} + + /* * The worker process can read / write shared buffers directly. But if smgrread() or * smgrwrite() is called with a private temporary buffer, we need to copy it to the @@ -1188,3 +1613,14 @@ bounce_write_if_needed(void *buffer) memcpy(p, buffer, BLCKSZ); return p; } + +int32 +communicator_new_approximate_working_set_size_seconds(time_t duration, bool reset) +{ + int32 dc; + + dc = (int32) estimateSHLL(&communicator_shmem_ptr->wss_estimation, duration); + if (reset) + memset(communicator_shmem_ptr->wss_estimation.regs, 0, sizeof(communicator_shmem_ptr->wss_estimation.regs)); + return dc; +} diff --git a/pgxn/neon/communicator_new.h b/pgxn/neon/communicator_new.h index bbab3f8f5a..8de2fab57a 100644 --- a/pgxn/neon/communicator_new.h +++ b/pgxn/neon/communicator_new.h @@ -12,6 +12,7 @@ #ifndef COMMUNICATOR_NEW_H #define COMMUNICATOR_NEW_H +#include "lfc_prewarm.h" #include "neon_pgversioncompat.h" #include "storage/buf_internals.h" @@ -20,8 +21,8 @@ /* initialization at postmaster startup */ extern void pg_init_communicator_new(void); -extern void communicator_new_shmem_request(void); -extern void communicator_new_shmem_startup(void); +extern void CommunicatorNewShmemRequest(void); +extern void CommunicatorNewShmemInit(void); /* initialization at backend startup */ extern void communicator_new_init(void); @@ -38,8 +39,12 @@ extern void communicator_new_prefetch_register_bufferv(NRelFileInfo rinfo, ForkN BlockNumber nblocks); extern bool communicator_new_cache_contains(NRelFileInfo rinfo, ForkNumber forkNum, BlockNumber blockno); -extern int communicator_new_read_slru_segment(SlruKind kind, int64 segno, - void *buffer); +extern int communicator_new_read_slru_segment( + SlruKind kind, + uint32_t segno, + neon_request_lsns *request_lsns, + const char *path +); /* Write requests, to keep the caches up-to-date */ extern void communicator_new_write_page(NRelFileInfo rinfo, ForkNumber forkNum, BlockNumber blockno, @@ -49,8 +54,14 @@ extern void communicator_new_rel_extend(NRelFileInfo rinfo, ForkNumber forkNum, extern void communicator_new_rel_zeroextend(NRelFileInfo rinfo, ForkNumber forkNum, BlockNumber blockno, BlockNumber nblocks, XLogRecPtr lsn); -extern void communicator_new_rel_create(NRelFileInfo rinfo, ForkNumber forkNum); -extern void communicator_new_rel_truncate(NRelFileInfo rinfo, ForkNumber forkNum, BlockNumber nblocks); -extern void communicator_new_rel_unlink(NRelFileInfo rinfo, ForkNumber forkNum); +extern void communicator_new_rel_create(NRelFileInfo rinfo, ForkNumber forkNum, XLogRecPtr lsn); +extern void communicator_new_rel_truncate(NRelFileInfo rinfo, ForkNumber forkNum, BlockNumber nblocks, XLogRecPtr lsn); +extern void communicator_new_rel_unlink(NRelFileInfo rinfo, ForkNumber forkNum, XLogRecPtr lsn); +extern void communicator_new_update_cached_rel_size(NRelFileInfo rinfo, ForkNumber forkNum, BlockNumber nblocks, XLogRecPtr lsn); + +/* other functions */ +extern int32 communicator_new_approximate_working_set_size_seconds(time_t duration, bool reset); + +extern FileCacheState *communicator_new_get_lfc_state(size_t max_entries); #endif /* COMMUNICATOR_NEW_H */ diff --git a/pgxn/neon/file_cache.c b/pgxn/neon/file_cache.c index e5e2bb9183..7c408c82da 100644 --- a/pgxn/neon/file_cache.c +++ b/pgxn/neon/file_cache.c @@ -134,15 +134,6 @@ typedef struct FileCacheEntry #define N_COND_VARS 64 #define CV_WAIT_TIMEOUT 10 -#define MAX_PREWARM_WORKERS 8 - -typedef struct PrewarmWorkerState -{ - uint32 prewarmed_pages; - uint32 skipped_pages; - TimestampTz completed; -} PrewarmWorkerState; - typedef struct FileCacheControl { uint64 generation; /* generation is needed to handle correct hash @@ -162,46 +153,50 @@ typedef struct FileCacheControl dlist_head lru; /* double linked list for LRU replacement * algorithm */ dlist_head holes; /* double linked list of punched holes */ - HyperLogLogState wss_estimation; /* estimation of working set size */ + ConditionVariable cv[N_COND_VARS]; /* turnstile of condition variables */ - PrewarmWorkerState prewarm_workers[MAX_PREWARM_WORKERS]; - size_t n_prewarm_workers; - size_t n_prewarm_entries; - size_t total_prewarm_pages; - size_t prewarm_batch; - bool prewarm_active; - bool prewarm_canceled; - dsm_handle prewarm_lfc_state_handle; + + /* + * Estimation of working set size. + * + * This is not guarded by the lock. No locking is needed because all the + * writes to the "registers" are simple 64-bit stores, to update a + * timestamp. We assume that: + * + * - 64-bit stores are atomic. We could enforce that by using + * pg_atomic_uint64 instead of TimestampTz as the datatype in hll.h, but + * for now we just rely on it implicitly. + * + * - Even if they're not, and there is a race between two stores, it + * doesn't matter much which one wins because they're both updating the + * register with the current timestamp. Or you have a race between + * resetting the register and updating it, in which case it also doesn't + * matter much which one wins. + * + * - If they're not atomic, you might get an occasional "torn write" if + * you're really unlucky, but we tolerate that too. It just means that + * the estimate will be a little off, until the register is updated + * again. + */ + HyperLogLogState wss_estimation; } FileCacheControl; -#define FILE_CACHE_STATE_MAGIC 0xfcfcfcfc - -#define FILE_CACHE_STATE_BITMAP(fcs) ((uint8*)&(fcs)->chunks[(fcs)->n_chunks]) -#define FILE_CACHE_STATE_SIZE_FOR_CHUNKS(n_chunks) (sizeof(FileCacheState) + (n_chunks)*sizeof(BufferTag) + (((n_chunks) * lfc_blocks_per_chunk)+7)/8) -#define FILE_CACHE_STATE_SIZE(fcs) (sizeof(FileCacheState) + (fcs->n_chunks)*sizeof(BufferTag) + (((fcs->n_chunks) << fcs->chunk_size_log)+7)/8) - static HTAB *lfc_hash; static int lfc_desc = -1; static LWLockId lfc_lock; int lfc_max_size; int lfc_size_limit; -static int lfc_prewarm_limit; -static int lfc_prewarm_batch; static int lfc_chunk_size_log = MAX_BLOCKS_PER_CHUNK_LOG; static int lfc_blocks_per_chunk = MAX_BLOCKS_PER_CHUNK; char *lfc_path; static uint64 lfc_generation; static FileCacheControl *lfc_ctl; -static bool lfc_do_prewarm; -static shmem_startup_hook_type prev_shmem_startup_hook; -#if PG_VERSION_NUM>=150000 -static shmem_request_hook_type prev_shmem_request_hook; -#endif bool lfc_store_prefetch_result; bool lfc_prewarm_update_ws_estimation; -bool AmPrewarmWorker; +bool lfc_do_prewarm; +bool lfc_prewarm_cancel; #define LFC_ENABLED() (lfc_ctl->limit != 0) @@ -230,7 +225,7 @@ lfc_switch_off(void) { int fd; - Assert(!neon_enable_new_communicator); + Assert(!neon_use_communicator_worker); if (LFC_ENABLED()) { @@ -297,7 +292,7 @@ lfc_maybe_disabled(void) static bool lfc_ensure_opened(void) { - Assert(!neon_enable_new_communicator); + Assert(!neon_use_communicator_worker); if (lfc_generation != lfc_ctl->generation) { @@ -318,20 +313,17 @@ lfc_ensure_opened(void) return true; } -static void -lfc_shmem_startup(void) +void +LfcShmemInit(void) { bool found; static HASHCTL info; - Assert(!neon_enable_new_communicator); + if (neon_use_communicator_worker) + return; - if (prev_shmem_startup_hook) - { - prev_shmem_startup_hook(); - } - - LWLockAcquire(AddinShmemInitLock, LW_EXCLUSIVE); + if (lfc_max_size <= 0) + return; lfc_ctl = (FileCacheControl *) ShmemInitStruct("lfc", sizeof(FileCacheControl), &found); if (!found) @@ -376,19 +368,16 @@ lfc_shmem_startup(void) ConditionVariableInit(&lfc_ctl->cv[i]); } - LWLockRelease(AddinShmemInitLock); } -static void -lfc_shmem_request(void) +void +LfcShmemRequest(void) { -#if PG_VERSION_NUM>=150000 - if (prev_shmem_request_hook) - prev_shmem_request_hook(); -#endif - - RequestAddinShmemSpace(sizeof(FileCacheControl) + hash_estimate_size(SIZE_MB_TO_CHUNKS(lfc_max_size) + 1, FILE_CACHE_ENRTY_SIZE)); - RequestNamedLWLockTranche("lfc_lock", 1); + if (lfc_max_size > 0) + { + RequestAddinShmemSpace(sizeof(FileCacheControl) + hash_estimate_size(SIZE_MB_TO_CHUNKS(lfc_max_size) + 1, FILE_CACHE_ENRTY_SIZE)); + RequestNamedLWLockTranche("lfc_lock", 1); + } } static bool @@ -522,7 +511,6 @@ lfc_init(void) if (!process_shared_preload_libraries_in_progress) neon_log(ERROR, "Neon module should be loaded via shared_preload_libraries"); - DefineCustomBoolVariable("neon.store_prefetch_result_in_lfc", "Immediately store received prefetch result in LFC", NULL, @@ -594,49 +582,13 @@ lfc_init(void) lfc_check_chunk_size, lfc_change_chunk_size, NULL); - - DefineCustomIntVariable("neon.file_cache_prewarm_limit", - "Maximal number of prewarmed chunks", - NULL, - &lfc_prewarm_limit, - INT_MAX, /* no limit by default */ - 0, - INT_MAX, - PGC_SIGHUP, - 0, - NULL, - NULL, - NULL); - - DefineCustomIntVariable("neon.file_cache_prewarm_batch", - "Number of pages retrivied by prewarm from page server", - NULL, - &lfc_prewarm_batch, - 64, - 1, - INT_MAX, - PGC_SIGHUP, - 0, - NULL, - NULL, - NULL); - - if (lfc_max_size == 0) - return; - - if (neon_enable_new_communicator) - return; - - prev_shmem_startup_hook = shmem_startup_hook; - shmem_startup_hook = lfc_shmem_startup; -#if PG_VERSION_NUM>=150000 - prev_shmem_request_hook = shmem_request_hook; - shmem_request_hook = lfc_shmem_request; -#else - lfc_shmem_request(); -#endif } +/* + * Dump a list of pages that are currently in the LFC + * + * This is used to get a snapshot that can be used to prewarm the LFC later. + */ FileCacheState* lfc_get_state(size_t max_entries) { @@ -654,7 +606,7 @@ lfc_get_state(size_t max_entries) uint8* bitmap; size_t n_pages = 0; size_t n_entries = Min(max_entries, lfc_ctl->used - lfc_ctl->pinned); - size_t state_size = FILE_CACHE_STATE_SIZE_FOR_CHUNKS(n_entries); + size_t state_size = FILE_CACHE_STATE_SIZE_FOR_CHUNKS(n_entries, lfc_blocks_per_chunk); fcs = (FileCacheState*)palloc0(state_size); SET_VARSIZE(fcs, state_size); fcs->magic = FILE_CACHE_STATE_MAGIC; @@ -688,270 +640,6 @@ lfc_get_state(size_t max_entries) return fcs; } -/* - * Prewarm LFC cache to the specified state. It uses lfc_prefetch function to load prewarmed page without hoilding shared buffer lock - * and avoid race conditions with other backends. - */ -void -lfc_prewarm(FileCacheState* fcs, uint32 n_workers) -{ - size_t fcs_chunk_size_log; - size_t n_entries; - size_t prewarm_batch = Min(lfc_prewarm_batch, readahead_buffer_size); - size_t fcs_size; - dsm_segment *seg; - BackgroundWorkerHandle* bgw_handle[MAX_PREWARM_WORKERS]; - - Assert(!neon_enable_new_communicator); - - if (!lfc_ensure_opened()) - return; - - if (prewarm_batch == 0 || lfc_prewarm_limit == 0 || n_workers == 0) - { - elog(LOG, "LFC: prewarm is disabled"); - return; - } - - if (n_workers > MAX_PREWARM_WORKERS) - { - elog(ERROR, "LFC: Too much prewarm workers, maximum is %d", MAX_PREWARM_WORKERS); - } - - if (fcs == NULL || fcs->n_chunks == 0) - { - elog(LOG, "LFC: nothing to prewarm"); - return; - } - - if (fcs->magic != FILE_CACHE_STATE_MAGIC) - { - elog(ERROR, "LFC: Invalid file cache state magic: %X", fcs->magic); - } - - fcs_size = VARSIZE(fcs); - if (FILE_CACHE_STATE_SIZE(fcs) != fcs_size) - { - elog(ERROR, "LFC: Invalid file cache state size: %u vs. %u", (unsigned)FILE_CACHE_STATE_SIZE(fcs), VARSIZE(fcs)); - } - - fcs_chunk_size_log = fcs->chunk_size_log; - if (fcs_chunk_size_log > MAX_BLOCKS_PER_CHUNK_LOG) - { - elog(ERROR, "LFC: Invalid chunk size log: %u", fcs->chunk_size_log); - } - - n_entries = Min(fcs->n_chunks, lfc_prewarm_limit); - Assert(n_entries != 0); - - LWLockAcquire(lfc_lock, LW_EXCLUSIVE); - - /* Do not prewarm more entries than LFC limit */ - if (lfc_ctl->limit <= lfc_ctl->size) - { - elog(LOG, "LFC: skip prewarm because LFC is already filled"); - LWLockRelease(lfc_lock); - return; - } - - if (lfc_ctl->prewarm_active) - { - LWLockRelease(lfc_lock); - elog(ERROR, "LFC: skip prewarm because another prewarm is still active"); - } - lfc_ctl->n_prewarm_entries = n_entries; - lfc_ctl->n_prewarm_workers = n_workers; - lfc_ctl->prewarm_active = true; - lfc_ctl->prewarm_canceled = false; - lfc_ctl->prewarm_batch = prewarm_batch; - memset(lfc_ctl->prewarm_workers, 0, n_workers*sizeof(PrewarmWorkerState)); - - LWLockRelease(lfc_lock); - - /* Calculate total number of pages to be prewarmed */ - lfc_ctl->total_prewarm_pages = fcs->n_pages; - - seg = dsm_create(fcs_size, 0); - memcpy(dsm_segment_address(seg), fcs, fcs_size); - lfc_ctl->prewarm_lfc_state_handle = dsm_segment_handle(seg); - - /* Spawn background workers */ - for (uint32 i = 0; i < n_workers; i++) - { - BackgroundWorker worker = {0}; - - worker.bgw_flags = BGWORKER_SHMEM_ACCESS; - worker.bgw_start_time = BgWorkerStart_ConsistentState; - worker.bgw_restart_time = BGW_NEVER_RESTART; - strcpy(worker.bgw_library_name, "neon"); - strcpy(worker.bgw_function_name, "lfc_prewarm_main"); - snprintf(worker.bgw_name, BGW_MAXLEN, "LFC prewarm worker %d", i+1); - strcpy(worker.bgw_type, "LFC prewarm worker"); - worker.bgw_main_arg = Int32GetDatum(i); - /* must set notify PID to wait for shutdown */ - worker.bgw_notify_pid = MyProcPid; - - if (!RegisterDynamicBackgroundWorker(&worker, &bgw_handle[i])) - { - ereport(LOG, - (errcode(ERRCODE_INSUFFICIENT_RESOURCES), - errmsg("LFC: registering dynamic bgworker prewarm failed"), - errhint("Consider increasing the configuration parameter \"%s\".", "max_worker_processes"))); - n_workers = i; - lfc_ctl->prewarm_canceled = true; - break; - } - } - - for (uint32 i = 0; i < n_workers; i++) - { - bool interrupted; - do - { - interrupted = false; - PG_TRY(); - { - BgwHandleStatus status = WaitForBackgroundWorkerShutdown(bgw_handle[i]); - if (status != BGWH_STOPPED && status != BGWH_POSTMASTER_DIED) - { - elog(LOG, "LFC: Unexpected status of prewarm worker termination: %d", status); - } - } - PG_CATCH(); - { - elog(LOG, "LFC: cancel prewarm"); - lfc_ctl->prewarm_canceled = true; - interrupted = true; - } - PG_END_TRY(); - } while (interrupted); - - if (!lfc_ctl->prewarm_workers[i].completed) - { - /* Background worker doesn't set completion time: it means that it was abnormally terminated */ - elog(LOG, "LFC: prewarm worker %d failed", i+1); - /* Set completion time to prevent get_prewarm_info from considering this worker as active */ - lfc_ctl->prewarm_workers[i].completed = GetCurrentTimestamp(); - } - } - dsm_detach(seg); - - LWLockAcquire(lfc_lock, LW_EXCLUSIVE); - lfc_ctl->prewarm_active = false; - LWLockRelease(lfc_lock); -} - -void -lfc_prewarm_main(Datum main_arg) -{ - size_t snd_idx = 0, rcv_idx = 0; - size_t n_sent = 0, n_received = 0; - size_t fcs_chunk_size_log; - size_t max_prefetch_pages; - size_t prewarm_batch; - size_t n_workers; - dsm_segment *seg; - FileCacheState* fcs; - uint8* bitmap; - BufferTag tag; - PrewarmWorkerState* ws; - uint32 worker_id = DatumGetInt32(main_arg); - - Assert(!neon_enable_new_communicator); - - AmPrewarmWorker = true; - - pqsignal(SIGTERM, die); - BackgroundWorkerUnblockSignals(); - - seg = dsm_attach(lfc_ctl->prewarm_lfc_state_handle); - if (seg == NULL) - ereport(ERROR, - (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("could not map dynamic shared memory segment"))); - - fcs = (FileCacheState*) dsm_segment_address(seg); - prewarm_batch = lfc_ctl->prewarm_batch; - fcs_chunk_size_log = fcs->chunk_size_log; - n_workers = lfc_ctl->n_prewarm_workers; - max_prefetch_pages = lfc_ctl->n_prewarm_entries << fcs_chunk_size_log; - ws = &lfc_ctl->prewarm_workers[worker_id]; - bitmap = FILE_CACHE_STATE_BITMAP(fcs); - - /* enable prefetch in LFC */ - lfc_store_prefetch_result = true; - lfc_do_prewarm = true; /* Flag for lfc_prefetch preventing replacement of existed entries if LFC cache is full */ - - elog(LOG, "LFC: worker %d start prewarming", worker_id); - while (!lfc_ctl->prewarm_canceled) - { - if (snd_idx < max_prefetch_pages) - { - if ((snd_idx >> fcs_chunk_size_log) % n_workers != worker_id) - { - /* If there are multiple workers, split chunks between them */ - snd_idx += 1 << fcs_chunk_size_log; - } - else - { - if (BITMAP_ISSET(bitmap, snd_idx)) - { - tag = fcs->chunks[snd_idx >> fcs_chunk_size_log]; - tag.blockNum += snd_idx & ((1 << fcs_chunk_size_log) - 1); - if (!lfc_cache_contains(BufTagGetNRelFileInfo(tag), tag.forkNum, tag.blockNum)) - { - (void)communicator_prefetch_register_bufferv(tag, NULL, 1, NULL); - n_sent += 1; - } - else - { - ws->skipped_pages += 1; - BITMAP_CLR(bitmap, snd_idx); - } - } - snd_idx += 1; - } - } - if (n_sent >= n_received + prewarm_batch || snd_idx == max_prefetch_pages) - { - if (n_received == n_sent && snd_idx == max_prefetch_pages) - { - break; - } - if ((rcv_idx >> fcs_chunk_size_log) % n_workers != worker_id) - { - /* Skip chunks processed by other workers */ - rcv_idx += 1 << fcs_chunk_size_log; - continue; - } - - /* Locate next block to prefetch */ - while (!BITMAP_ISSET(bitmap, rcv_idx)) - { - rcv_idx += 1; - } - tag = fcs->chunks[rcv_idx >> fcs_chunk_size_log]; - tag.blockNum += rcv_idx & ((1 << fcs_chunk_size_log) - 1); - if (communicator_prefetch_receive(tag)) - { - ws->prewarmed_pages += 1; - } - else - { - ws->skipped_pages += 1; - } - rcv_idx += 1; - n_received += 1; - } - } - /* No need to perform prefetch cleanup here because prewarm worker will be terminated and - * connection to PS dropped just after return from this function. - */ - Assert(n_sent == n_received || lfc_ctl->prewarm_canceled); - elog(LOG, "LFC: worker %d complete prewarming: loaded %ld pages", worker_id, (long)n_received); - lfc_ctl->prewarm_workers[worker_id].completed = GetCurrentTimestamp(); -} - void lfc_invalidate(NRelFileInfo rinfo, ForkNumber forkNum, BlockNumber nblocks) { @@ -959,7 +647,7 @@ lfc_invalidate(NRelFileInfo rinfo, ForkNumber forkNum, BlockNumber nblocks) FileCacheEntry *entry; uint32 hash; - Assert(!neon_enable_new_communicator); + Assert(!neon_use_communicator_worker); if (lfc_maybe_disabled()) /* fast exit if file cache is disabled */ return; @@ -1006,7 +694,7 @@ lfc_cache_contains(NRelFileInfo rinfo, ForkNumber forkNum, BlockNumber blkno) bool found = false; uint32 hash; - Assert(!neon_enable_new_communicator); + Assert(!neon_use_communicator_worker); if (lfc_maybe_disabled()) /* fast exit if file cache is disabled */ return false; @@ -1043,7 +731,7 @@ lfc_cache_containsv(NRelFileInfo rinfo, ForkNumber forkNum, BlockNumber blkno, uint32 hash; int i = 0; - Assert(!neon_enable_new_communicator); + Assert(!neon_use_communicator_worker); if (lfc_maybe_disabled()) /* fast exit if file cache is disabled */ return 0; @@ -1152,7 +840,7 @@ lfc_readv_select(NRelFileInfo rinfo, ForkNumber forkNum, BlockNumber blkno, int blocks_read = 0; int buf_offset = 0; - Assert(!neon_enable_new_communicator); + Assert(!neon_use_communicator_worker); if (lfc_maybe_disabled()) /* fast exit if file cache is disabled */ return -1; @@ -1162,6 +850,13 @@ lfc_readv_select(NRelFileInfo rinfo, ForkNumber forkNum, BlockNumber blkno, CriticalAssert(BufTagGetRelNumber(&tag) != InvalidRelFileNumber); + /* Update working set size estimate for the blocks */ + for (int i = 0; i < nblocks; i++) + { + tag.blockNum = blkno + i; + addSHLL(&lfc_ctl->wss_estimation, hash_bytes((uint8_t const*)&tag, sizeof(tag))); + } + /* * For every chunk that has blocks we're interested in, we * 1. get the chunk header @@ -1240,14 +935,6 @@ lfc_readv_select(NRelFileInfo rinfo, ForkNumber forkNum, BlockNumber blkno, } entry = hash_search_with_hash_value(lfc_hash, &tag, hash, HASH_FIND, NULL); - - /* Approximate working set for the blocks assumed in this entry */ - for (int i = 0; i < blocks_in_chunk; i++) - { - tag.blockNum = blkno + i; - addSHLL(&lfc_ctl->wss_estimation, hash_bytes((uint8_t const*)&tag, sizeof(tag))); - } - if (entry == NULL) { /* Pages are not cached */ @@ -1465,7 +1152,7 @@ lfc_init_new_entry(FileCacheEntry* entry, uint32 hash) /* Can't add this chunk - we don't have the space for it */ hash_search_with_hash_value(lfc_hash, &entry->key, hash, HASH_REMOVE, NULL); - lfc_ctl->prewarm_canceled = true; /* cancel prewarm if LFC limit is reached */ + lfc_prewarm_cancel = true; /* cancel prewarm if LFC limit is reached */ return false; } @@ -1520,15 +1207,21 @@ lfc_prefetch(NRelFileInfo rinfo, ForkNumber forknum, BlockNumber blkno, int chunk_offs = BLOCK_TO_CHUNK_OFF(blkno); - Assert(!neon_enable_new_communicator); + Assert(!neon_use_communicator_worker); if (lfc_maybe_disabled()) /* fast exit if file cache is disabled */ return false; CopyNRelFileInfoToBufTag(tag, rinfo); + CriticalAssert(BufTagGetRelNumber(&tag) != InvalidRelFileNumber); tag.forkNum = forknum; - CriticalAssert(BufTagGetRelNumber(&tag) != InvalidRelFileNumber); + /* Update working set size estimate for the blocks */ + if (lfc_prewarm_update_ws_estimation) + { + tag.blockNum = blkno; + addSHLL(&lfc_ctl->wss_estimation, hash_bytes((uint8_t const*)&tag, sizeof(tag))); + } tag.blockNum = blkno - chunk_offs; hash = get_hash_value(lfc_hash, &tag); @@ -1546,19 +1239,13 @@ lfc_prefetch(NRelFileInfo rinfo, ForkNumber forknum, BlockNumber blkno, if (lwlsn > lsn) { - elog(DEBUG1, "Skip LFC write for %d because LwLSN=%X/%X is greater than not_nodified_since LSN %X/%X", + elog(DEBUG1, "Skip LFC write for %u because LwLSN=%X/%X is greater than not_nodified_since LSN %X/%X", blkno, LSN_FORMAT_ARGS(lwlsn), LSN_FORMAT_ARGS(lsn)); LWLockRelease(lfc_lock); return false; } entry = hash_search_with_hash_value(lfc_hash, &tag, hash, HASH_ENTER, &found); - - if (lfc_prewarm_update_ws_estimation) - { - tag.blockNum = blkno; - addSHLL(&lfc_ctl->wss_estimation, hash_bytes((uint8_t const*)&tag, sizeof(tag))); - } if (found) { state = GET_STATE(entry, chunk_offs); @@ -1667,15 +1354,21 @@ lfc_writev(NRelFileInfo rinfo, ForkNumber forkNum, BlockNumber blkno, uint32 entry_offset; int buf_offset = 0; - Assert(!neon_enable_new_communicator); + Assert(!neon_use_communicator_worker); if (lfc_maybe_disabled()) /* fast exit if file cache is disabled */ return; CopyNRelFileInfoToBufTag(tag, rinfo); + CriticalAssert(BufTagGetRelNumber(&tag) != InvalidRelFileNumber); tag.forkNum = forkNum; - CriticalAssert(BufTagGetRelNumber(&tag) != InvalidRelFileNumber); + /* Update working set size estimate for the blocks */ + for (int i = 0; i < nblocks; i++) + { + tag.blockNum = blkno + i; + addSHLL(&lfc_ctl->wss_estimation, hash_bytes((uint8_t const*)&tag, sizeof(tag))); + } LWLockAcquire(lfc_lock, LW_EXCLUSIVE); @@ -1716,14 +1409,6 @@ lfc_writev(NRelFileInfo rinfo, ForkNumber forkNum, BlockNumber blkno, cv = &lfc_ctl->cv[hash % N_COND_VARS]; entry = hash_search_with_hash_value(lfc_hash, &tag, hash, HASH_ENTER, &found); - - /* Approximate working set for the blocks assumed in this entry */ - for (int i = 0; i < blocks_in_chunk; i++) - { - tag.blockNum = blkno + i; - addSHLL(&lfc_ctl->wss_estimation, hash_bytes((uint8_t const*)&tag, sizeof(tag))); - } - if (found) { /* @@ -2159,136 +1844,21 @@ local_cache_pages(PG_FUNCTION_ARGS) SRF_RETURN_DONE(funcctx); } -PG_FUNCTION_INFO_V1(approximate_working_set_size_seconds); -Datum -approximate_working_set_size_seconds(PG_FUNCTION_ARGS) +/* + * Internal implementation of the approximate_working_set_size_seconds() + * function. + */ +int32 +lfc_approximate_working_set_size_seconds(time_t duration, bool reset) { - if (neon_enable_new_communicator) - elog(ERROR, "TODO: not implemented"); - - if (lfc_size_limit != 0) - { - int32 dc; - time_t duration = PG_ARGISNULL(0) ? (time_t)-1 : PG_GETARG_INT32(0); - LWLockAcquire(lfc_lock, LW_SHARED); - dc = (int32) estimateSHLL(&lfc_ctl->wss_estimation, duration); - LWLockRelease(lfc_lock); - PG_RETURN_INT32(dc); - } - PG_RETURN_NULL(); -} - -PG_FUNCTION_INFO_V1(approximate_working_set_size); - -Datum -approximate_working_set_size(PG_FUNCTION_ARGS) -{ - if (neon_enable_new_communicator) - elog(ERROR, "TODO: not implemented"); - - if (lfc_size_limit != 0) - { - int32 dc; - bool reset = PG_GETARG_BOOL(0); - LWLockAcquire(lfc_lock, reset ? LW_EXCLUSIVE : LW_SHARED); - dc = (int32) estimateSHLL(&lfc_ctl->wss_estimation, (time_t)-1); - if (reset) - memset(lfc_ctl->wss_estimation.regs, 0, sizeof lfc_ctl->wss_estimation.regs); - LWLockRelease(lfc_lock); - PG_RETURN_INT32(dc); - } - PG_RETURN_NULL(); -} - -PG_FUNCTION_INFO_V1(get_local_cache_state); - -Datum -get_local_cache_state(PG_FUNCTION_ARGS) -{ - size_t max_entries = PG_ARGISNULL(0) ? lfc_prewarm_limit : PG_GETARG_INT32(0); - FileCacheState* fcs; - - if (neon_enable_new_communicator) - elog(ERROR, "TODO: not implemented"); - - fcs = lfc_get_state(max_entries); - - if (fcs != NULL) - PG_RETURN_BYTEA_P((bytea*)fcs); - else - PG_RETURN_NULL(); -} - -PG_FUNCTION_INFO_V1(prewarm_local_cache); - -Datum -prewarm_local_cache(PG_FUNCTION_ARGS) -{ - bytea* state = PG_GETARG_BYTEA_PP(0); - uint32 n_workers = PG_GETARG_INT32(1); - FileCacheState* fcs; - - if (neon_enable_new_communicator) - elog(ERROR, "TODO: not implemented"); - - fcs = (FileCacheState*)state; - lfc_prewarm(fcs, n_workers); - - PG_RETURN_NULL(); -} - -PG_FUNCTION_INFO_V1(get_prewarm_info); - -Datum -get_prewarm_info(PG_FUNCTION_ARGS) -{ - Datum values[4]; - bool nulls[4]; - TupleDesc tupdesc; - uint32 prewarmed_pages = 0; - uint32 skipped_pages = 0; - uint32 active_workers = 0; - uint32 total_pages; - size_t n_workers; - - if (neon_enable_new_communicator) - elog(ERROR, "TODO: not implemented"); + int32 dc; if (lfc_size_limit == 0) - PG_RETURN_NULL(); + return -1; - LWLockAcquire(lfc_lock, LW_SHARED); - if (!lfc_ctl || lfc_ctl->n_prewarm_workers == 0) - { - LWLockRelease(lfc_lock); - PG_RETURN_NULL(); - } - n_workers = lfc_ctl->n_prewarm_workers; - total_pages = lfc_ctl->total_prewarm_pages; - for (size_t i = 0; i < n_workers; i++) - { - PrewarmWorkerState* ws = &lfc_ctl->prewarm_workers[i]; - prewarmed_pages += ws->prewarmed_pages; - skipped_pages += ws->skipped_pages; - active_workers += ws->completed != 0; - } - LWLockRelease(lfc_lock); - - tupdesc = CreateTemplateTupleDesc(4); - TupleDescInitEntry(tupdesc, (AttrNumber) 1, "total_pages", INT4OID, -1, 0); - TupleDescInitEntry(tupdesc, (AttrNumber) 2, "prewarmed_pages", INT4OID, -1, 0); - TupleDescInitEntry(tupdesc, (AttrNumber) 3, "skipped_pages", INT4OID, -1, 0); - TupleDescInitEntry(tupdesc, (AttrNumber) 4, "active_workers", INT4OID, -1, 0); - tupdesc = BlessTupleDesc(tupdesc); - - MemSet(nulls, 0, sizeof(nulls)); - - values[0] = Int32GetDatum(total_pages); - values[1] = Int32GetDatum(prewarmed_pages); - values[2] = Int32GetDatum(skipped_pages); - values[3] = Int32GetDatum(active_workers); - - PG_RETURN_DATUM(HeapTupleGetDatum(heap_form_tuple(tupdesc, values, nulls))); + dc = (int32) estimateSHLL(&lfc_ctl->wss_estimation, duration); + if (reset) + memset(lfc_ctl->wss_estimation.regs, 0, sizeof lfc_ctl->wss_estimation.regs); + return dc; } - diff --git a/pgxn/neon/file_cache.h b/pgxn/neon/file_cache.h index 1b6ff36164..fd79eee532 100644 --- a/pgxn/neon/file_cache.h +++ b/pgxn/neon/file_cache.h @@ -11,18 +11,9 @@ #ifndef FILE_CACHE_h #define FILE_CACHE_h -#include "neon_pgversioncompat.h" +#include "lfc_prewarm.h" -typedef struct FileCacheState -{ - int32 vl_len_; /* varlena header (do not touch directly!) */ - uint32 magic; - uint32 n_chunks; - uint32 n_pages; - uint16 chunk_size_log; - BufferTag chunks[FLEXIBLE_ARRAY_MEMBER]; - /* followed by bitmap */ -} FileCacheState; +#include "neon_pgversioncompat.h" /* GUCs */ extern bool lfc_store_prefetch_result; @@ -30,6 +21,9 @@ extern int lfc_max_size; extern int lfc_size_limit; extern char *lfc_path; +extern bool lfc_do_prewarm; +extern bool lfc_prewarm_cancel; + /* functions for local file cache */ extern void lfc_invalidate(NRelFileInfo rinfo, ForkNumber forkNum, BlockNumber nblocks); extern void lfc_writev(NRelFileInfo rinfo, ForkNumber forkNum, @@ -48,9 +42,12 @@ extern void lfc_init(void); extern bool lfc_prefetch(NRelFileInfo rinfo, ForkNumber forknum, BlockNumber blkno, const void* buffer, XLogRecPtr lsn); extern FileCacheState* lfc_get_state(size_t max_entries); -extern void lfc_prewarm(FileCacheState* fcs, uint32 n_workers); -PGDLLEXPORT void lfc_prewarm_main(Datum main_arg); +extern int32 lfc_approximate_working_set_size_seconds(time_t duration, bool reset); + + +extern int32 lfc_approximate_working_set_size_seconds(time_t duration, bool reset); + static inline bool lfc_read(NRelFileInfo rinfo, ForkNumber forkNum, BlockNumber blkno, diff --git a/pgxn/neon/lfc_prewarm.c b/pgxn/neon/lfc_prewarm.c new file mode 100644 index 0000000000..2acb805f9d --- /dev/null +++ b/pgxn/neon/lfc_prewarm.c @@ -0,0 +1,654 @@ +/*------------------------------------------------------------------------- + * + * lfc_prewarm.c + * Functions related to LFC prewarming + * + * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + *------------------------------------------------------------------------- + */ + +#include "postgres.h" + +#include "bitmap.h" +#include "communicator.h" +#include "communicator_new.h" +#include "file_cache.h" +#include "lfc_prewarm.h" +#include "neon.h" +#include "pagestore_client.h" + +#include "funcapi.h" +#include "miscadmin.h" +#include "postmaster/bgworker.h" +#include "storage/dsm.h" +#include "tcop/tcopprot.h" +#include "utils/timestamp.h" + +#define MAX_PREWARM_WORKERS 8 + +typedef struct PrewarmWorkerState +{ + uint32 prewarmed_pages; + uint32 skipped_pages; + TimestampTz completed; +} PrewarmWorkerState; + +typedef struct PrewarmControl +{ + /* -1 when not using workers, 0 when no prewarm has been performed */ + size_t n_prewarm_workers; + size_t total_prewarm_pages; + bool prewarm_active; + bool prewarm_canceled; + + /* These are used in the non-worker mode */ + uint32 prewarmed_pages; + uint32 skipped_pages; + TimestampTz completed; + + /* These are used with workers */ + PrewarmWorkerState prewarm_workers[MAX_PREWARM_WORKERS]; + dsm_handle prewarm_lfc_state_handle; + size_t prewarm_batch; + size_t n_prewarm_entries; +} PrewarmControl; + +static PrewarmControl *prewarm_ctl; + +static int lfc_prewarm_limit; +static int lfc_prewarm_batch; + +static LWLockId prewarm_lock; + +bool AmPrewarmWorker; + +static void lfc_prewarm_with_workers(FileCacheState *fcs, uint32 n_workers); +static void lfc_prewarm_with_async_requests(FileCacheState *fcs); +PGDLLEXPORT void lfc_prewarm_main(Datum main_arg); + +void +pg_init_prewarm(void) +{ + DefineCustomIntVariable("neon.file_cache_prewarm_limit", + "Maximal number of prewarmed chunks", + NULL, + &lfc_prewarm_limit, + INT_MAX, /* no limit by default */ + 0, + INT_MAX, + PGC_SIGHUP, + 0, + NULL, + NULL, + NULL); + + DefineCustomIntVariable("neon.file_cache_prewarm_batch", + "Number of pages retrivied by prewarm from page server", + NULL, + &lfc_prewarm_batch, + 64, + 1, + INT_MAX, + PGC_SIGHUP, + 0, + NULL, + NULL, + NULL); +} + +static size_t +PrewarmShmemSize(void) +{ + return sizeof(PrewarmControl); +} + +void +PrewarmShmemRequest(void) +{ + RequestAddinShmemSpace(PrewarmShmemSize()); + RequestNamedLWLockTranche("prewarm_lock", 1); +} + +void +PrewarmShmemInit(void) +{ + bool found; + + prewarm_ctl = (PrewarmControl *) ShmemInitStruct("Prewarmer shmem state", + PrewarmShmemSize(), + &found); + if (!found) + { + /* it's zeroed already */ + + prewarm_lock = (LWLockId) GetNamedLWLockTranche("prewarm_lock"); + } +} + +static void +validate_fcs(FileCacheState *fcs) +{ + size_t fcs_size; +#if 0 + size_t fcs_chunk_size_log; +#endif + + if (fcs->magic != FILE_CACHE_STATE_MAGIC) + { + elog(ERROR, "LFC: Invalid file cache state magic: %X", fcs->magic); + } + + fcs_size = VARSIZE(fcs); + if (FILE_CACHE_STATE_SIZE(fcs) != fcs_size) + { + elog(ERROR, "LFC: Invalid file cache state size: %u vs. %u", (unsigned)FILE_CACHE_STATE_SIZE(fcs), VARSIZE(fcs)); + } + + /* FIXME */ +#if 0 + fcs_chunk_size_log = fcs->chunk_size_log; + if (fcs_chunk_size_log > MAX_BLOCKS_PER_CHUNK_LOG) + { + elog(ERROR, "LFC: Invalid chunk size log: %u", fcs->chunk_size_log); + } +#endif +} + +/* + * Prewarm LFC cache to the specified state. It uses lfc_prefetch function to + * load prewarmed page without hoilding shared buffer lock and avoid race + * conditions with other backends. + */ +void +lfc_prewarm_with_workers(FileCacheState *fcs, uint32 n_workers) +{ + size_t n_entries; + size_t prewarm_batch = Min(lfc_prewarm_batch, readahead_buffer_size); + size_t fcs_size = VARSIZE(fcs); + dsm_segment *seg; + BackgroundWorkerHandle* bgw_handle[MAX_PREWARM_WORKERS]; + + Assert(!neon_use_communicator_worker); + + if (prewarm_batch == 0 || lfc_prewarm_limit == 0 || n_workers == 0) + { + elog(LOG, "LFC: prewarm is disabled"); + return; + } + + if (n_workers > MAX_PREWARM_WORKERS) + { + elog(ERROR, "LFC: too many prewarm workers, maximum is %d", MAX_PREWARM_WORKERS); + } + + if (fcs == NULL || fcs->n_chunks == 0) + { + elog(LOG, "LFC: nothing to prewarm"); + return; + } + + n_entries = Min(fcs->n_chunks, lfc_prewarm_limit); + Assert(n_entries != 0); + + LWLockAcquire(prewarm_lock, LW_EXCLUSIVE); + + /* Do not prewarm more entries than LFC limit */ + /* FIXME */ +#if 0 + if (prewarm_ctl->limit <= prewarm_ctl->size) + { + elog(LOG, "LFC: skip prewarm because LFC is already filled"); + LWLockRelease(prewarm_lock); + return; + } +#endif + + if (prewarm_ctl->prewarm_active) + { + LWLockRelease(prewarm_lock); + elog(ERROR, "LFC: skip prewarm because another prewarm is still active"); + } + prewarm_ctl->n_prewarm_entries = n_entries; + prewarm_ctl->n_prewarm_workers = n_workers; + prewarm_ctl->prewarm_active = true; + prewarm_ctl->prewarm_canceled = false; + prewarm_ctl->prewarm_batch = prewarm_batch; + memset(prewarm_ctl->prewarm_workers, 0, n_workers*sizeof(PrewarmWorkerState)); + + /* Calculate total number of pages to be prewarmed */ + prewarm_ctl->total_prewarm_pages = fcs->n_pages; + + LWLockRelease(prewarm_lock); + + seg = dsm_create(fcs_size, 0); + memcpy(dsm_segment_address(seg), fcs, fcs_size); + prewarm_ctl->prewarm_lfc_state_handle = dsm_segment_handle(seg); + + /* Spawn background workers */ + for (uint32 i = 0; i < n_workers; i++) + { + BackgroundWorker worker = {0}; + + worker.bgw_flags = BGWORKER_SHMEM_ACCESS; + worker.bgw_start_time = BgWorkerStart_ConsistentState; + worker.bgw_restart_time = BGW_NEVER_RESTART; + strcpy(worker.bgw_library_name, "neon"); + strcpy(worker.bgw_function_name, "lfc_prewarm_main"); + snprintf(worker.bgw_name, BGW_MAXLEN, "LFC prewarm worker %d", i+1); + strcpy(worker.bgw_type, "LFC prewarm worker"); + worker.bgw_main_arg = Int32GetDatum(i); + /* must set notify PID to wait for shutdown */ + worker.bgw_notify_pid = MyProcPid; + + if (!RegisterDynamicBackgroundWorker(&worker, &bgw_handle[i])) + { + ereport(LOG, + (errcode(ERRCODE_INSUFFICIENT_RESOURCES), + errmsg("LFC: registering dynamic bgworker prewarm failed"), + errhint("Consider increasing the configuration parameter \"%s\".", "max_worker_processes"))); + n_workers = i; + prewarm_ctl->prewarm_canceled = true; + break; + } + } + + for (uint32 i = 0; i < n_workers; i++) + { + bool interrupted; + do + { + interrupted = false; + PG_TRY(); + { + BgwHandleStatus status = WaitForBackgroundWorkerShutdown(bgw_handle[i]); + if (status != BGWH_STOPPED && status != BGWH_POSTMASTER_DIED) + { + elog(LOG, "LFC: Unexpected status of prewarm worker termination: %d", status); + } + } + PG_CATCH(); + { + elog(LOG, "LFC: cancel prewarm"); + prewarm_ctl->prewarm_canceled = true; + interrupted = true; + } + PG_END_TRY(); + } while (interrupted); + + if (!prewarm_ctl->prewarm_workers[i].completed) + { + /* Background worker doesn't set completion time: it means that it was abnormally terminated */ + elog(LOG, "LFC: prewarm worker %d failed", i+1); + /* Set completion time to prevent get_prewarm_info from considering this worker as active */ + prewarm_ctl->prewarm_workers[i].completed = GetCurrentTimestamp(); + } + } + dsm_detach(seg); + + LWLockAcquire(prewarm_lock, LW_EXCLUSIVE); + prewarm_ctl->prewarm_active = false; + LWLockRelease(prewarm_lock); +} + + +void +lfc_prewarm_main(Datum main_arg) +{ + size_t snd_idx = 0, rcv_idx = 0; + size_t n_sent = 0, n_received = 0; + size_t fcs_chunk_size_log; + size_t max_prefetch_pages; + size_t prewarm_batch; + size_t n_workers; + dsm_segment *seg; + FileCacheState* fcs; + uint8* bitmap; + BufferTag tag; + PrewarmWorkerState* ws; + uint32 worker_id = DatumGetInt32(main_arg); + + Assert(!neon_use_communicator_worker); + + AmPrewarmWorker = true; + + pqsignal(SIGTERM, die); + BackgroundWorkerUnblockSignals(); + + seg = dsm_attach(prewarm_ctl->prewarm_lfc_state_handle); + if (seg == NULL) + ereport(ERROR, + (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), + errmsg("could not map dynamic shared memory segment"))); + + fcs = (FileCacheState*) dsm_segment_address(seg); + prewarm_batch = prewarm_ctl->prewarm_batch; + fcs_chunk_size_log = fcs->chunk_size_log; + n_workers = prewarm_ctl->n_prewarm_workers; + max_prefetch_pages = prewarm_ctl->n_prewarm_entries << fcs_chunk_size_log; + ws = &prewarm_ctl->prewarm_workers[worker_id]; + bitmap = FILE_CACHE_STATE_BITMAP(fcs); + + /* enable prefetch in LFC */ + lfc_store_prefetch_result = true; + lfc_do_prewarm = true; /* Flag for lfc_prefetch preventing replacement of existed entries if LFC cache is full */ + + elog(LOG, "LFC: worker %d start prewarming", worker_id); + while (!prewarm_ctl->prewarm_canceled) + { + if (snd_idx < max_prefetch_pages) + { + if ((snd_idx >> fcs_chunk_size_log) % n_workers != worker_id) + { + /* If there are multiple workers, split chunks between them */ + snd_idx += 1 << fcs_chunk_size_log; + } + else + { + if (BITMAP_ISSET(bitmap, snd_idx)) + { + tag = fcs->chunks[snd_idx >> fcs_chunk_size_log]; + tag.blockNum += snd_idx & ((1 << fcs_chunk_size_log) - 1); + if (!lfc_cache_contains(BufTagGetNRelFileInfo(tag), tag.forkNum, tag.blockNum)) + { + (void) communicator_prefetch_register_bufferv(tag, NULL, 1, NULL); + n_sent += 1; + } + else + { + ws->skipped_pages += 1; + BITMAP_CLR(bitmap, snd_idx); + } + } + snd_idx += 1; + } + } + if (n_sent >= n_received + prewarm_batch || snd_idx == max_prefetch_pages) + { + if (n_received == n_sent && snd_idx == max_prefetch_pages) + { + break; + } + if ((rcv_idx >> fcs_chunk_size_log) % n_workers != worker_id) + { + /* Skip chunks processed by other workers */ + rcv_idx += 1 << fcs_chunk_size_log; + continue; + } + + /* Locate next block to prefetch */ + while (!BITMAP_ISSET(bitmap, rcv_idx)) + { + rcv_idx += 1; + } + tag = fcs->chunks[rcv_idx >> fcs_chunk_size_log]; + tag.blockNum += rcv_idx & ((1 << fcs_chunk_size_log) - 1); + if (communicator_prefetch_receive(tag)) + { + ws->prewarmed_pages += 1; + } + else + { + ws->skipped_pages += 1; + } + rcv_idx += 1; + n_received += 1; + } + } + /* No need to perform prefetch cleanup here because prewarm worker will be terminated and + * connection to PS dropped just after return from this function. + */ + Assert(n_sent == n_received || prewarm_ctl->prewarm_canceled); + elog(LOG, "LFC: worker %d complete prewarming: loaded %ld pages", worker_id, (long)n_received); + prewarm_ctl->prewarm_workers[worker_id].completed = GetCurrentTimestamp(); +} + +/* + * Prewarm LFC cache to the specified state. Uses the new communicator + * + * FIXME: Is there a race condition because we're not holding Postgres + * buffer manager locks? + */ +static void +lfc_prewarm_with_async_requests(FileCacheState *fcs) +{ + size_t n_entries; + uint8 *bitmap; + uint64 bitno; + int blocks_per_chunk; + + Assert(neon_use_communicator_worker); + + if (lfc_prewarm_limit == 0) + { + elog(LOG, "LFC: prewarm is disabled"); + return; + } + + if (fcs == NULL || fcs->n_chunks == 0) + { + elog(LOG, "LFC: nothing to prewarm"); + return; + } + + n_entries = Min(fcs->n_chunks, lfc_prewarm_limit); + Assert(n_entries != 0); + + LWLockAcquire(prewarm_lock, LW_EXCLUSIVE); + + /* Do not prewarm more entries than LFC limit */ + /* FIXME */ +#if 0 + if (prewarm_ctl->limit <= prewarm_ctl->size) + { + elog(LOG, "LFC: skip prewarm because LFC is already filled"); + LWLockRelease(prewarm_lock); + return; + } +#endif + + if (prewarm_ctl->prewarm_active) + { + LWLockRelease(prewarm_lock); + elog(ERROR, "LFC: skip prewarm because another prewarm is still active"); + } + prewarm_ctl->n_prewarm_entries = n_entries; + prewarm_ctl->n_prewarm_workers = -1; + prewarm_ctl->prewarm_active = true; + prewarm_ctl->prewarm_canceled = false; + + /* Calculate total number of pages to be prewarmed */ + prewarm_ctl->total_prewarm_pages = fcs->n_pages; + + LWLockRelease(prewarm_lock); + + elog(LOG, "LFC: start prewarming"); + lfc_do_prewarm = true; + lfc_prewarm_cancel = false; + + bitmap = FILE_CACHE_STATE_BITMAP(fcs); + + blocks_per_chunk = 1 << fcs->chunk_size_log; + + bitno = 0; + for (uint32 chunkno = 0; chunkno < fcs->n_chunks; chunkno++) + { + BufferTag *chunk_tag = &fcs->chunks[chunkno]; + BlockNumber request_startblkno = InvalidBlockNumber; + BlockNumber request_endblkno; + + if (lfc_prewarm_cancel) + { + prewarm_ctl->prewarm_canceled = true; + break; + } + + /* take next chunk */ + for (int j = 0; j < blocks_per_chunk; j++) + { + BlockNumber blkno = chunk_tag->blockNum + j; + + if (BITMAP_ISSET(bitmap, bitno)) + { + if (request_startblkno != InvalidBlockNumber) + { + if (request_endblkno == blkno) + { + /* append this block to the request */ + request_endblkno++; + } + else + { + /* flush this request, and start new one */ + communicator_new_prefetch_register_bufferv( + BufTagGetNRelFileInfo(*chunk_tag), + chunk_tag->forkNum, + request_startblkno, + request_endblkno - request_startblkno + ); + request_startblkno = blkno; + request_endblkno = blkno + 1; + } + } + else + { + /* flush this request, if any, and start new one */ + if (request_startblkno != InvalidBlockNumber) + { + communicator_new_prefetch_register_bufferv( + BufTagGetNRelFileInfo(*chunk_tag), + chunk_tag->forkNum, + request_startblkno, + request_endblkno - request_startblkno + ); + } + request_startblkno = blkno; + request_endblkno = blkno + 1; + } + prewarm_ctl->prewarmed_pages += 1; + } + bitno++; + } + + /* flush this request */ + communicator_new_prefetch_register_bufferv( + BufTagGetNRelFileInfo(*chunk_tag), + chunk_tag->forkNum, + request_startblkno, + request_endblkno - request_startblkno + ); + request_startblkno = request_endblkno = InvalidBlockNumber; + } + + Assert(n_sent == n_received || prewarm_ctl->prewarm_canceled); + elog(LOG, "LFC: complete prewarming: loaded %lu pages", (unsigned long) prewarm_ctl->prewarmed_pages); + prewarm_ctl->completed = GetCurrentTimestamp(); + + LWLockAcquire(prewarm_lock, LW_EXCLUSIVE); + prewarm_ctl->prewarm_active = false; + LWLockRelease(prewarm_lock); +} + +PG_FUNCTION_INFO_V1(get_local_cache_state); + +Datum +get_local_cache_state(PG_FUNCTION_ARGS) +{ + size_t max_entries = PG_ARGISNULL(0) ? lfc_prewarm_limit : PG_GETARG_INT32(0); + FileCacheState* fcs; + + if (neon_use_communicator_worker) + fcs = communicator_new_get_lfc_state(max_entries); + else + fcs = lfc_get_state(max_entries); + + if (fcs != NULL) + PG_RETURN_BYTEA_P((bytea*)fcs); + else + PG_RETURN_NULL(); +} + +PG_FUNCTION_INFO_V1(prewarm_local_cache); + +Datum +prewarm_local_cache(PG_FUNCTION_ARGS) +{ + bytea* state = PG_GETARG_BYTEA_PP(0); + uint32 n_workers = PG_GETARG_INT32(1); + FileCacheState* fcs; + + fcs = (FileCacheState *)state; + validate_fcs(fcs); + + if (neon_use_communicator_worker) + lfc_prewarm_with_async_requests(fcs); + else + lfc_prewarm_with_workers(fcs, n_workers); + + PG_RETURN_NULL(); +} + +PG_FUNCTION_INFO_V1(get_prewarm_info); + +Datum +get_prewarm_info(PG_FUNCTION_ARGS) +{ + Datum values[4]; + bool nulls[4]; + TupleDesc tupdesc; + uint32 prewarmed_pages = 0; + uint32 skipped_pages = 0; + uint32 active_workers = 0; + uint32 total_pages; + + if (lfc_size_limit == 0) + PG_RETURN_NULL(); + + LWLockAcquire(prewarm_lock, LW_SHARED); + if (!prewarm_ctl || prewarm_ctl->n_prewarm_workers == 0) + { + LWLockRelease(prewarm_lock); + PG_RETURN_NULL(); + } + + if (prewarm_ctl->n_prewarm_workers == -1) + { + total_pages = prewarm_ctl->total_prewarm_pages; + prewarmed_pages = prewarm_ctl->prewarmed_pages; + skipped_pages = prewarm_ctl->prewarmed_pages; + active_workers = 1; + } + else + { + size_t n_workers; + + n_workers = prewarm_ctl->n_prewarm_workers; + total_pages = prewarm_ctl->total_prewarm_pages; + for (size_t i = 0; i < n_workers; i++) + { + PrewarmWorkerState *ws = &prewarm_ctl->prewarm_workers[i]; + + prewarmed_pages += ws->prewarmed_pages; + skipped_pages += ws->skipped_pages; + active_workers += ws->completed != 0; + } + } + LWLockRelease(prewarm_lock); + + tupdesc = CreateTemplateTupleDesc(4); + TupleDescInitEntry(tupdesc, (AttrNumber) 1, "total_pages", INT4OID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber) 2, "prewarmed_pages", INT4OID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber) 3, "skipped_pages", INT4OID, -1, 0); + TupleDescInitEntry(tupdesc, (AttrNumber) 4, "active_workers", INT4OID, -1, 0); + tupdesc = BlessTupleDesc(tupdesc); + + MemSet(nulls, 0, sizeof(nulls)); + + values[0] = Int32GetDatum(total_pages); + values[1] = Int32GetDatum(prewarmed_pages); + values[2] = Int32GetDatum(skipped_pages); + values[3] = Int32GetDatum(active_workers); + + PG_RETURN_DATUM(HeapTupleGetDatum(heap_form_tuple(tupdesc, values, nulls))); +} diff --git a/pgxn/neon/lfc_prewarm.h b/pgxn/neon/lfc_prewarm.h new file mode 100644 index 0000000000..09d224b1fc --- /dev/null +++ b/pgxn/neon/lfc_prewarm.h @@ -0,0 +1,39 @@ +/*------------------------------------------------------------------------- + * + * lfc_prewarm.h + * Local File Cache prewarmer + * + * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + *------------------------------------------------------------------------- + */ +#ifndef LFC_PREWARM_H +#define LFC_PREWARM_H + +#include "storage/buf_internals.h" + +typedef struct FileCacheState +{ + int32 vl_len_; /* varlena header (do not touch directly!) */ + uint32 magic; + uint32 n_chunks; + uint32 n_pages; + uint16 chunk_size_log; + BufferTag chunks[FLEXIBLE_ARRAY_MEMBER]; + /* followed by bitmap */ +} FileCacheState; + +#define FILE_CACHE_STATE_MAGIC 0xfcfcfcfc + +#define FILE_CACHE_STATE_BITMAP(fcs) ((uint8*)&(fcs)->chunks[(fcs)->n_chunks]) +#define FILE_CACHE_STATE_SIZE_FOR_CHUNKS(n_chunks, blocks_per_chunk) (sizeof(FileCacheState) + (n_chunks)*sizeof(BufferTag) + (((n_chunks) * blocks_per_chunk)+7)/8) +#define FILE_CACHE_STATE_SIZE(fcs) (sizeof(FileCacheState) + (fcs->n_chunks)*sizeof(BufferTag) + (((fcs->n_chunks) << fcs->chunk_size_log)+7)/8) + +extern void pg_init_prewarm(void); +extern void PrewarmShmemRequest(void); +extern void PrewarmShmemInit(void); + +#endif /* LFC_PREWARM_H */ + + diff --git a/pgxn/neon/libpagestore.c b/pgxn/neon/libpagestore.c index ee17b5d33b..690dfd8635 100644 --- a/pgxn/neon/libpagestore.c +++ b/pgxn/neon/libpagestore.c @@ -80,19 +80,13 @@ int neon_protocol_version = 3; static int neon_compute_mode = 0; static int max_reconnect_attempts = 60; -static int stripe_size; +int neon_stripe_size; static int max_sockets; static int pageserver_response_log_timeout = 10000; /* 2.5 minutes. A bit higher than highest default TCP retransmission timeout */ static int pageserver_response_disconnect_timeout = 150000; -typedef struct -{ - char connstring[MAX_SHARDS][MAX_PAGESERVER_CONNSTRING_SIZE]; - size_t num_shards; -} ShardMap; - /* * PagestoreShmemState is kept in shared memory. It contains the connection * strings for each shard. @@ -111,6 +105,11 @@ typedef struct * has changed since last access, and to detect and retry copying the value if * the postmaster changes the value concurrently. (Postmaster doesn't have a * PGPROC entry and therefore cannot use LWLocks.) + * + * stripe_size is now also part of ShardMap, although it is defined by separate GUC. + * Postgres doesn't provide any mechanism to enforce dependencies between GUCs, + * that it we we have to rely on order of GUC definition in config file. + * "neon.stripe_size" should be defined prior to "neon.pageserver_connstring" */ typedef struct { @@ -119,17 +118,13 @@ typedef struct ShardMap shard_map; } PagestoreShmemState; -#if PG_VERSION_NUM >= 150000 -static shmem_request_hook_type prev_shmem_request_hook = NULL; -#endif -static shmem_startup_hook_type prev_shmem_startup_hook; static PagestoreShmemState *pagestore_shared; static uint64 pagestore_local_counter = 0; typedef enum PSConnectionState { PS_Disconnected, /* no connection yet */ PS_Connecting_Startup, /* connection starting up */ - PS_Connecting_PageStream, /* negotiating pagestream */ + PS_Connecting_PageStream, /* negotiating pagestream */ PS_Connected, /* connected, pagestream established */ } PSConnectionState; @@ -193,8 +188,8 @@ PagestoreShmemIsValid(void) * not valid, returns false. The contents of *result are undefined in * that case, and must not be relied on. */ -static bool -ParseShardMap(const char *connstr, ShardMap *result) +bool +parse_shard_map(const char *connstr, ShardMap *result) { const char *p; int nshards = 0; @@ -237,7 +232,10 @@ ParseShardMap(const char *connstr, ShardMap *result) p = sep + 1; } if (result) + { result->num_shards = nshards; + result->stripe_size = neon_stripe_size; + } return true; } @@ -248,7 +246,7 @@ CheckPageserverConnstring(char **newval, void **extra, GucSource source) { char *p = *newval; - return ParseShardMap(p, NULL); + return parse_shard_map(p, NULL); } static void @@ -257,9 +255,15 @@ AssignPageserverConnstring(const char *newval, void *extra) /* * 'neon.pageserver_connstring' is ignored if the new communicator is used. * In that case, the shard map is loaded from 'neon.pageserver_grpc_urls' - * instead. + * instead, and that happens in the communicator process only. */ - if (neon_enable_new_communicator) + if (neon_use_communicator_worker) + return; + + /* + * Only postmaster updates the copy in shared memory. + */ + if (!PagestoreShmemIsValid() || IsUnderPostmaster) return; AssignShardMap(newval); @@ -272,36 +276,15 @@ CheckPageserverGrpcUrls(char **newval, void **extra, GucSource source) { char *p = *newval; - return ParseShardMap(p, NULL); + return parse_shard_map(p, NULL); } -static void -AssignPageserverGrpcUrls(const char *newval, void *extra) -{ - /* - * 'neon.pageserver_grpc-urls' is ignored if the new communicator is not - * used. In that case, the shard map is loaded from 'neon.pageserver_connstring' - instead. - */ - if (!neon_enable_new_communicator) - return; - - AssignShardMap(newval); -} - - static void AssignShardMap(const char *newval) { ShardMap shard_map; - /* - * Only postmaster updates the copy in shared memory. - */ - if (!PagestoreShmemIsValid() || IsUnderPostmaster) - return; - - if (!ParseShardMap(newval, &shard_map)) + if (!parse_shard_map(newval, &shard_map)) { /* * shouldn't happen, because we already checked the value in @@ -324,54 +307,6 @@ AssignShardMap(const char *newval) } } -/* Return a copy of the whole shard map from shared memory */ -void -get_shard_map(char ***connstrs_p, shardno_t *num_shards_p) -{ - uint64 begin_update_counter; - uint64 end_update_counter; - ShardMap *shard_map = &pagestore_shared->shard_map; - shardno_t num_shards; - char *buf; - char **connstrs; - - buf = palloc(MAX_SHARDS*MAX_PAGESERVER_CONNSTRING_SIZE); - connstrs = palloc(sizeof(char *) * MAX_SHARDS); - - /* - * Postmaster can update the shared memory values concurrently, in which - * case we would copy a garbled mix of the old and new values. We will - * detect it because the counter's won't match, and retry. But it's - * important that we don't do anything within the retry-loop that would - * depend on the string having valid contents. - */ - do - { - char *p; - - begin_update_counter = pg_atomic_read_u64(&pagestore_shared->begin_update_counter); - end_update_counter = pg_atomic_read_u64(&pagestore_shared->end_update_counter); - - num_shards = shard_map->num_shards; - - p = buf; - for (int i = 0; i < Min(num_shards, MAX_SHARDS); i++) - { - strlcpy(p, shard_map->connstring[i], MAX_PAGESERVER_CONNSTRING_SIZE); - connstrs[i] = p; - p += MAX_PAGESERVER_CONNSTRING_SIZE; - } - - pg_memory_barrier(); - } - while (begin_update_counter != end_update_counter - || begin_update_counter != pg_atomic_read_u64(&pagestore_shared->begin_update_counter) - || end_update_counter != pg_atomic_read_u64(&pagestore_shared->end_update_counter)); - - *connstrs_p = connstrs; - *num_shards_p = num_shards; -} - /* * Get the current number of shards, and/or the connection string for a * particular shard from the shard map in shared memory. @@ -386,12 +321,13 @@ get_shard_map(char ***connstrs_p, shardno_t *num_shards_p) * last call, terminates all existing connections to all pageservers. */ static void -load_shard_map(shardno_t shard_no, char *connstr_p, shardno_t *num_shards_p) +load_shard_map(shardno_t shard_no, char *connstr_p, shardno_t *num_shards_p, size_t* stripe_size_p) { uint64 begin_update_counter; uint64 end_update_counter; ShardMap *shard_map = &pagestore_shared->shard_map; shardno_t num_shards; + size_t stripe_size; /* * Postmaster can update the shared memory values concurrently, in which @@ -406,6 +342,7 @@ load_shard_map(shardno_t shard_no, char *connstr_p, shardno_t *num_shards_p) end_update_counter = pg_atomic_read_u64(&pagestore_shared->end_update_counter); num_shards = shard_map->num_shards; + stripe_size = shard_map->stripe_size; if (connstr_p && shard_no < MAX_SHARDS) strlcpy(connstr_p, shard_map->connstring[shard_no], MAX_PAGESERVER_CONNSTRING_SIZE); pg_memory_barrier(); @@ -440,6 +377,8 @@ load_shard_map(shardno_t shard_no, char *connstr_p, shardno_t *num_shards_p) if (num_shards_p) *num_shards_p = num_shards; + if (stripe_size_p) + *stripe_size_p = stripe_size; } #define MB (1024*1024) @@ -448,23 +387,24 @@ shardno_t get_shard_number(BufferTag *tag) { shardno_t n_shards; + size_t stripe_size; uint32 hash; - load_shard_map(0, NULL, &n_shards); + load_shard_map(0, NULL, &n_shards, &stripe_size); #if PG_MAJORVERSION_NUM < 16 hash = murmurhash32(tag->rnode.relNode); - hash = hash_combine(hash, murmurhash32(tag->blockNum / stripe_size)); + hash = hash_combine(hash, murmurhash32(tag->blockNum / neon_stripe_size)); #else hash = murmurhash32(tag->relNumber); - hash = hash_combine(hash, murmurhash32(tag->blockNum / stripe_size)); + hash = hash_combine(hash, murmurhash32(tag->blockNum / neon_stripe_size)); #endif return hash % n_shards; } static inline void -CLEANUP_AND_DISCONNECT(PageServer *shard) +CLEANUP_AND_DISCONNECT(PageServer *shard) { if (shard->wes_read) { @@ -486,7 +426,7 @@ CLEANUP_AND_DISCONNECT(PageServer *shard) * complete the connection (e.g. due to receiving an earlier cancellation * during connection start). * Returns true if successfully connected; false if the connection failed. - * + * * Throws errors in unrecoverable situations, or when this backend's query * is canceled. */ @@ -503,7 +443,7 @@ pageserver_connect(shardno_t shard_no, int elevel) * Note that connstr is used both during connection start, and when we * log the successful connection. */ - load_shard_map(shard_no, connstr, NULL); + load_shard_map(shard_no, connstr, NULL, NULL); switch (shard->state) { @@ -1375,18 +1315,12 @@ check_neon_id(char **newval, void **extra, GucSource source) return **newval == '\0' || HexDecodeString(id, *newval, 16); } -static Size -PagestoreShmemSize(void) -{ - return add_size(sizeof(PagestoreShmemState), NeonPerfCountersShmemSize()); -} -static bool +void PagestoreShmemInit(void) { bool found; - LWLockAcquire(AddinShmemInitLock, LW_EXCLUSIVE); pagestore_shared = ShmemInitStruct("libpagestore shared state", sizeof(PagestoreShmemState), &found); @@ -1396,46 +1330,13 @@ PagestoreShmemInit(void) pg_atomic_init_u64(&pagestore_shared->end_update_counter, 0); memset(&pagestore_shared->shard_map, 0, sizeof(ShardMap)); AssignPageserverConnstring(pageserver_connstring, NULL); - AssignPageserverGrpcUrls(pageserver_grpc_urls, NULL); } - - NeonPerfCountersShmemInit(); - - LWLockRelease(AddinShmemInitLock); - return found; } -static void -pagestore_shmem_startup_hook(void) +void +PagestoreShmemRequest(void) { - if (prev_shmem_startup_hook) - prev_shmem_startup_hook(); - - PagestoreShmemInit(); -} - -static void -pagestore_shmem_request(void) -{ -#if PG_VERSION_NUM >= 150000 - if (prev_shmem_request_hook) - prev_shmem_request_hook(); -#endif - - RequestAddinShmemSpace(PagestoreShmemSize()); -} - -static void -pagestore_prepare_shmem(void) -{ -#if PG_VERSION_NUM >= 150000 - prev_shmem_request_hook = shmem_request_hook; - shmem_request_hook = pagestore_shmem_request; -#else - pagestore_shmem_request(); -#endif - prev_shmem_startup_hook = shmem_startup_hook; - shmem_startup_hook = pagestore_shmem_startup_hook; + RequestAddinShmemSpace(sizeof(PagestoreShmemState)); } /* @@ -1444,8 +1345,6 @@ pagestore_prepare_shmem(void) void pg_init_libpagestore(void) { - pagestore_prepare_shmem(); - DefineCustomStringVariable("neon.pageserver_connstring", "connection string to the page server", NULL, @@ -1462,7 +1361,7 @@ pg_init_libpagestore(void) "", PGC_SIGHUP, 0, /* no flags required */ - CheckPageserverGrpcUrls, AssignPageserverGrpcUrls, NULL); + CheckPageserverGrpcUrls, NULL, NULL); DefineCustomStringVariable("neon.timeline_id", "Neon timeline_id the server is running on", @@ -1510,8 +1409,8 @@ pg_init_libpagestore(void) DefineCustomIntVariable("neon.stripe_size", "sharding stripe size", NULL, - &stripe_size, - 32768, 1, INT_MAX, + &neon_stripe_size, + 2048, 1, INT_MAX, PGC_SIGHUP, GUC_UNIT_BLOCKS, NULL, NULL, NULL); @@ -1605,8 +1504,6 @@ pg_init_libpagestore(void) 0, NULL, NULL, NULL); - relsize_hash_init(); - if (page_server != NULL) neon_log(ERROR, "libpagestore already loaded"); diff --git a/pgxn/neon/neon.c b/pgxn/neon/neon.c index ab51abc1de..59ecd9ab1c 100644 --- a/pgxn/neon/neon.c +++ b/pgxn/neon/neon.c @@ -23,6 +23,7 @@ #include "replication/walsender.h" #include "storage/ipc.h" #include "storage/proc.h" +#include "storage/ipc.h" #include "funcapi.h" #include "access/htup_details.h" #include "utils/builtins.h" @@ -52,7 +53,6 @@ PG_MODULE_MAGIC; void _PG_init(void); -bool neon_enable_new_communicator; static int running_xacts_overflow_policy; static bool monitor_query_exec_time = false; @@ -63,12 +63,13 @@ static void neon_ExecutorStart(QueryDesc *queryDesc, int eflags); static void neon_ExecutorEnd(QueryDesc *queryDesc); static shmem_startup_hook_type prev_shmem_startup_hook; -#if PG_VERSION_NUM>=150000 -static shmem_request_hook_type prev_shmem_request_hook; +static void neon_shmem_startup_hook(void); +static void neon_shmem_request_hook(void); + +#if PG_MAJORVERSION_NUM >= 15 +static shmem_request_hook_type prev_shmem_request_hook = NULL; #endif -static void neon_shmem_request(void); -static void neon_shmem_startup_hook(void); #if PG_MAJORVERSION_NUM >= 17 uint32 WAIT_EVENT_NEON_LFC_MAINTENANCE; @@ -458,33 +459,56 @@ _PG_init(void) load_file("$libdir/neon_rmgr", false); #endif - prev_shmem_startup_hook = shmem_startup_hook; - shmem_startup_hook = neon_shmem_startup_hook; -#if PG_VERSION_NUM>=150000 - prev_shmem_request_hook = shmem_request_hook; - shmem_request_hook = neon_shmem_request; -#else - neon_shmem_request(); -#endif - DefineCustomBoolVariable( - "neon.enable_new_communicator", - "Enables new communicator implementation", + "neon.use_communicator_worker", + "Uses the communicator worker implementation", NULL, - &neon_enable_new_communicator, + &neon_use_communicator_worker, true, PGC_POSTMASTER, 0, NULL, NULL, NULL); + /* + * Initializing a pre-loaded Postgres extension happens in three stages: + * + * 1. _PG_init() is called early at postmaster startup. In this stage, no + * shared memory has been allocated yet. Core Postgres GUCs have been + * initialized from the config files, but notably, MaxBackends has not + * calculated yet. In this stage, we must register any extension GUCs + * and can do other early initialization that doesn't depend on shared + * memory. In this stage we must also register "shmem request" and + * "shmem starutup" hooks, to be called in stages 2 and 3. + * + * 2. After MaxBackends have been calculated, the "shmem request" hooks + * are called. The hooks can reserve shared memory by calling + * RequestAddinShmemSpace and RequestNamedLWLockTranche(). The "shmem + * request hooks" are a new mechanism in Postgres v15. In v14 and + * below, you had to make those Requests in stage 1 already, which + * means they could not depend on MaxBackends. (See hack in + * NeonPerfCountersShmemRequest()) + * + * 3. After some more runtime-computed GUCs that affect the amount of + * shared memory needed have been calculated, the "shmem startup" hooks + * are called. In this stage, we allocate any shared memory, LWLocks + * and other shared resources. + * + * Here, in the 'neon' extension, we register just one shmem request hook + * and one startup hook, which call into functions in all the subsystems + * that are part of the extension. On v14, the ShmemRequest functions are + * called in stage 1, and on v15 onwards they are called in stage 2. + */ + + /* Stage 1: Define GUCs, and other early intialization */ pg_init_libpagestore(); + relsize_hash_init(); lfc_init(); + pg_init_prewarm(); pg_init_walproposer(); init_lwlsncache(); pg_init_communicator(); - if (neon_enable_new_communicator) - pg_init_communicator_new(); + pg_init_communicator_new(); Custom_XLogReaderRoutines = NeonOnDemandXLogReaderRoutines; @@ -565,6 +589,15 @@ _PG_init(void) PGC_POSTMASTER, 0, NULL, NULL, NULL); + + DefineCustomStringVariable( + "neon.privileged_role_name", + "Name of the 'weak' superuser role, which we give to the users", + NULL, + &privileged_role_name, + "neon_superuser", + PGC_POSTMASTER, 0, NULL, NULL, NULL); + /* * Important: This must happen after other parts of the extension are * loaded, otherwise any settings to GUCs that were set before the @@ -574,6 +607,22 @@ _PG_init(void) ReportSearchPath(); + /* + * Register initialization hooks for stage 2. (On v14, there's no "shmem + * request" hooks, so call the ShmemRequest functions immediately.) + */ +#if PG_VERSION_NUM >= 150000 + prev_shmem_request_hook = shmem_request_hook; + shmem_request_hook = neon_shmem_request_hook; +#else + neon_shmem_request_hook(); +#endif + + /* Register hooks for stage 3 */ + prev_shmem_startup_hook = shmem_startup_hook; + shmem_startup_hook = neon_shmem_startup_hook; + + /* Other misc initialization */ prev_ExecutorStart = ExecutorStart_hook; ExecutorStart_hook = neon_ExecutorStart; prev_ExecutorEnd = ExecutorEnd_hook; @@ -583,6 +632,8 @@ _PG_init(void) PG_FUNCTION_INFO_V1(pg_cluster_size); PG_FUNCTION_INFO_V1(backpressure_lsns); PG_FUNCTION_INFO_V1(backpressure_throttling_time); +PG_FUNCTION_INFO_V1(approximate_working_set_size_seconds); +PG_FUNCTION_INFO_V1(approximate_working_set_size); Datum pg_cluster_size(PG_FUNCTION_ARGS) @@ -629,17 +680,70 @@ backpressure_throttling_time(PG_FUNCTION_ARGS) PG_RETURN_UINT64(BackpressureThrottlingTime()); } -static void -neon_shmem_request(void) +Datum +approximate_working_set_size_seconds(PG_FUNCTION_ARGS) { -#if PG_VERSION_NUM>=150000 + time_t duration; + int32 dc; + + duration = PG_ARGISNULL(0) ? (time_t) -1 : PG_GETARG_INT32(0); + + if (neon_use_communicator_worker) + dc = communicator_new_approximate_working_set_size_seconds(duration, false); + else + dc = lfc_approximate_working_set_size_seconds(duration, false); + if (dc < 0) + PG_RETURN_NULL(); + else + PG_RETURN_INT32(dc); +} + +Datum +approximate_working_set_size(PG_FUNCTION_ARGS) +{ + bool reset = PG_GETARG_BOOL(0); + int32 dc; + + if (neon_use_communicator_worker) + dc = communicator_new_approximate_working_set_size_seconds(-1, reset); + else + dc = lfc_approximate_working_set_size_seconds(-1, reset); + if (dc < 0) + PG_RETURN_NULL(); + else + PG_RETURN_INT32(dc); +} + +/* + * Initialization stage 2: make requests for the amount of shared memory we + * will need. + * + * For a high-level explanation of the initialization process, see _PG_init(). + */ +static void +neon_shmem_request_hook(void) +{ +#if PG_VERSION_NUM >= 150000 if (prev_shmem_request_hook) prev_shmem_request_hook(); #endif - communicator_new_shmem_request(); + LfcShmemRequest(); + PrewarmShmemRequest(); + NeonPerfCountersShmemRequest(); + PagestoreShmemRequest(); + RelsizeCacheShmemRequest(); + WalproposerShmemRequest(); + LwLsnCacheShmemRequest(); + CommunicatorNewShmemRequest(); } + +/* + * Initialization stage 3: Initialize shared memory. + * + * For a high-level explanation of the initialization process, see _PG_init(). + */ static void neon_shmem_startup_hook(void) { @@ -647,6 +751,17 @@ neon_shmem_startup_hook(void) if (prev_shmem_startup_hook) prev_shmem_startup_hook(); + LWLockAcquire(AddinShmemInitLock, LW_EXCLUSIVE); + + LfcShmemInit(); + PrewarmShmemInit(); + NeonPerfCountersShmemInit(); + PagestoreShmemInit(); + RelsizeCacheShmemInit(); + WalproposerShmemInit(); + LwLsnCacheShmemInit(); + CommunicatorNewShmemInit(); + #if PG_MAJORVERSION_NUM >= 17 WAIT_EVENT_NEON_LFC_MAINTENANCE = WaitEventExtensionNew("Neon/FileCache_Maintenance"); WAIT_EVENT_NEON_LFC_READ = WaitEventExtensionNew("Neon/FileCache_Read"); @@ -660,7 +775,7 @@ neon_shmem_startup_hook(void) WAIT_EVENT_NEON_WAL_DL = WaitEventExtensionNew("Neon/WAL_Download"); #endif - communicator_new_shmem_startup(); + LWLockRelease(AddinShmemInitLock); } /* diff --git a/pgxn/neon/neon.h b/pgxn/neon/neon.h index 149ed5ebed..20c850864a 100644 --- a/pgxn/neon/neon.h +++ b/pgxn/neon/neon.h @@ -13,11 +13,9 @@ #include "utils/wait_event.h" /* GUCs */ -extern bool neon_enable_new_communicator; extern char *neon_auth_token; extern char *neon_timeline; extern char *neon_tenant; - extern char *wal_acceptors_list; extern int wal_acceptor_reconnect_timeout; extern int wal_acceptor_connection_timeout; @@ -72,4 +70,19 @@ extern PGDLLEXPORT void WalProposerSync(int argc, char *argv[]); extern PGDLLEXPORT void WalProposerMain(Datum main_arg); extern PGDLLEXPORT void LogicalSlotsMonitorMain(Datum main_arg); +extern void LfcShmemRequest(void); +extern void PagestoreShmemRequest(void); +extern void RelsizeCacheShmemRequest(void); +extern void WalproposerShmemRequest(void); +extern void LwLsnCacheShmemRequest(void); +extern void NeonPerfCountersShmemRequest(void); + +extern void LfcShmemInit(void); +extern void PagestoreShmemInit(void); +extern void RelsizeCacheShmemInit(void); +extern void WalproposerShmemInit(void); +extern void LwLsnCacheShmemInit(void); +extern void NeonPerfCountersShmemInit(void); + + #endif /* NEON_H */ diff --git a/pgxn/neon/neon_ddl_handler.c b/pgxn/neon/neon_ddl_handler.c index 2ce7b0086b..74a90ea4d4 100644 --- a/pgxn/neon/neon_ddl_handler.c +++ b/pgxn/neon/neon_ddl_handler.c @@ -13,7 +13,7 @@ * accumulate changes. On subtransaction commit, the top of the stack * is merged with the table below it. * - * Support event triggers for neon_superuser + * Support event triggers for {privileged_role_name} * * IDENTIFICATION * contrib/neon/neon_dll_handler.c @@ -49,6 +49,7 @@ #include "neon_ddl_handler.h" #include "neon_utils.h" +#include "neon.h" static ProcessUtility_hook_type PreviousProcessUtilityHook = NULL; static fmgr_hook_type next_fmgr_hook = NULL; @@ -541,11 +542,11 @@ NeonXactCallback(XactEvent event, void *arg) } static bool -RoleIsNeonSuperuser(const char *role_name) +IsPrivilegedRole(const char *role_name) { Assert(role_name); - return strcmp(role_name, "neon_superuser") == 0; + return strcmp(role_name, privileged_role_name) == 0; } static void @@ -578,8 +579,9 @@ HandleCreateDb(CreatedbStmt *stmt) { const char *owner_name = defGetString(downer); - if (RoleIsNeonSuperuser(owner_name)) - elog(ERROR, "can't create a database with owner neon_superuser"); + if (IsPrivilegedRole(owner_name)) + elog(ERROR, "could not create a database with owner %s", privileged_role_name); + entry->owner = get_role_oid(owner_name, false); } else @@ -609,8 +611,9 @@ HandleAlterOwner(AlterOwnerStmt *stmt) memset(entry->old_name, 0, sizeof(entry->old_name)); new_owner = get_rolespec_name(stmt->newowner); - if (RoleIsNeonSuperuser(new_owner)) - elog(ERROR, "can't alter owner to neon_superuser"); + if (IsPrivilegedRole(new_owner)) + elog(ERROR, "could not alter owner to %s", privileged_role_name); + entry->owner = get_role_oid(new_owner, false); entry->type = Op_Set; } @@ -716,8 +719,8 @@ HandleAlterRole(AlterRoleStmt *stmt) InitRoleTableIfNeeded(); role_name = get_rolespec_name(stmt->role); - if (RoleIsNeonSuperuser(role_name) && !superuser()) - elog(ERROR, "can't ALTER neon_superuser"); + if (IsPrivilegedRole(role_name) && !superuser()) + elog(ERROR, "could not ALTER %s", privileged_role_name); dpass = NULL; foreach(option, stmt->options) @@ -831,7 +834,7 @@ HandleRename(RenameStmt *stmt) * * In vanilla only superuser can create Event Triggers. * - * We allow it for neon_superuser by temporary switching to superuser. But as + * We allow it for {privileged_role_name} by temporary switching to superuser. But as * far as event trigger can fire in superuser context we should protect * superuser from execution of arbitrary user's code. * @@ -891,7 +894,7 @@ force_noop(FmgrInfo *finfo) * Also skip executing Event Triggers when GUC neon.event_triggers has been * set to false. This might be necessary to be able to connect again after a * LOGIN Event Trigger has been installed that would prevent connections as - * neon_superuser. + * {privileged_role_name}. */ static void neon_fmgr_hook(FmgrHookEventType event, FmgrInfo *flinfo, Datum *private) @@ -910,24 +913,24 @@ neon_fmgr_hook(FmgrHookEventType event, FmgrInfo *flinfo, Datum *private) } /* - * The neon_superuser role can use the GUC neon.event_triggers to disable + * The {privileged_role_name} role can use the GUC neon.event_triggers to disable * firing Event Trigger. * * SET neon.event_triggers TO false; * - * This only applies to the neon_superuser role though, and only allows - * skipping Event Triggers owned by neon_superuser, which we check by - * proxy of the Event Trigger function being owned by neon_superuser. + * This only applies to the {privileged_role_name} role though, and only allows + * skipping Event Triggers owned by {privileged_role_name}, which we check by + * proxy of the Event Trigger function being owned by {privileged_role_name}. * - * A role that is created in role neon_superuser should be allowed to also + * A role that is created in role {privileged_role_name} should be allowed to also * benefit from the neon_event_triggers GUC, and will be considered the - * same as the neon_superuser role. + * same as the {privileged_role_name} role. */ if (event == FHET_START && !neon_event_triggers - && is_neon_superuser()) + && is_privileged_role()) { - Oid neon_superuser_oid = get_role_oid("neon_superuser", false); + Oid weak_superuser_oid = get_role_oid(privileged_role_name, false); /* Find the Function Attributes (owner Oid, security definer) */ const char *fun_owner_name = NULL; @@ -937,8 +940,8 @@ neon_fmgr_hook(FmgrHookEventType event, FmgrInfo *flinfo, Datum *private) LookupFuncOwnerSecDef(flinfo->fn_oid, &fun_owner, &fun_is_secdef); fun_owner_name = GetUserNameFromId(fun_owner, false); - if (RoleIsNeonSuperuser(fun_owner_name) - || has_privs_of_role(fun_owner, neon_superuser_oid)) + if (IsPrivilegedRole(fun_owner_name) + || has_privs_of_role(fun_owner, weak_superuser_oid)) { elog(WARNING, "Skipping Event Trigger: neon.event_triggers is false"); @@ -953,7 +956,9 @@ neon_fmgr_hook(FmgrHookEventType event, FmgrInfo *flinfo, Datum *private) /* * Fire Event Trigger if both function owner and current user are - * superuser, or none of them are. + * superuser. Allow executing Event Trigger function that belongs to a + * superuser when connected as a non-superuser, even when the function is + * SECURITY DEFINER. */ else if (event == FHET_START /* still enable it to pass pg_regress tests */ @@ -976,32 +981,7 @@ neon_fmgr_hook(FmgrHookEventType event, FmgrInfo *flinfo, Datum *private) function_is_owned_by_super = superuser_arg(function_owner); /* - * 1. Refuse to run SECURITY DEFINER function that belongs to a - * superuser when the current user is not a superuser itself. - */ - if (!role_is_super - && function_is_owned_by_super - && function_is_secdef) - { - char *func_name = get_func_name(flinfo->fn_oid); - - ereport(WARNING, - (errmsg("Skipping Event Trigger"), - errdetail("Event Trigger function \"%s\" is owned by \"%s\" " - "and is SECURITY DEFINER", - func_name, - GetUserNameFromId(function_owner, false)))); - - /* - * we can't skip execution directly inside the fmgr_hook so - * instead we change the event trigger function to a noop - * function. - */ - force_noop(flinfo); - } - - /* - * 2. Refuse to run functions that belongs to a non-superuser when the + * Refuse to run functions that belongs to a non-superuser when the * current user is a superuser. * * We could run a SECURITY DEFINER user-function here and be safe with @@ -1009,7 +989,7 @@ neon_fmgr_hook(FmgrHookEventType event, FmgrInfo *flinfo, Datum *private) * infrastructure maintenance operations, where we prefer to skip * running user-defined code. */ - else if (role_is_super && !function_is_owned_by_super) + if (role_is_super && !function_is_owned_by_super) { char *func_name = get_func_name(flinfo->fn_oid); @@ -1172,13 +1152,13 @@ ProcessCreateEventTrigger( } /* - * Allow neon_superuser to create Event Trigger, while keeping the + * Allow {privileged_role_name} to create Event Trigger, while keeping the * ownership of the object. * * For that we give superuser membership to the role for the execution of * the command. */ - if (IsTransactionState() && is_neon_superuser()) + if (IsTransactionState() && is_privileged_role()) { /* Find the Event Trigger function Oid */ Oid func_oid = LookupFuncName(stmt->funcname, 0, NULL, false); @@ -1255,7 +1235,7 @@ ProcessCreateEventTrigger( * * That way [ ALTER | DROP ] EVENT TRIGGER commands just work. */ - if (IsTransactionState() && is_neon_superuser()) + if (IsTransactionState() && is_privileged_role()) { if (!current_user_is_super) { @@ -1375,19 +1355,17 @@ NeonProcessUtility( } /* - * Only neon_superuser is granted privilege to edit neon.event_triggers GUC. + * Only {privileged_role_name} is granted privilege to edit neon.event_triggers GUC. */ static void neon_event_triggers_assign_hook(bool newval, void *extra) { - /* MyDatabaseId == InvalidOid || !OidIsValid(GetUserId()) */ - - if (IsTransactionState() && !is_neon_superuser()) + if (IsTransactionState() && !is_privileged_role()) { ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("permission denied to set neon.event_triggers"), - errdetail("Only \"neon_superuser\" is allowed to set the GUC"))); + errdetail("Only \"%s\" is allowed to set the GUC", privileged_role_name))); } } diff --git a/pgxn/neon/neon_lwlsncache.c b/pgxn/neon/neon_lwlsncache.c index a8cfa0f825..5887c02c36 100644 --- a/pgxn/neon/neon_lwlsncache.c +++ b/pgxn/neon/neon_lwlsncache.c @@ -1,5 +1,6 @@ #include "postgres.h" +#include "neon.h" #include "neon_lwlsncache.h" #include "miscadmin.h" @@ -81,14 +82,6 @@ static set_max_lwlsn_hook_type prev_set_max_lwlsn_hook = NULL; static set_lwlsn_relation_hook_type prev_set_lwlsn_relation_hook = NULL; static set_lwlsn_db_hook_type prev_set_lwlsn_db_hook = NULL; -static shmem_startup_hook_type prev_shmem_startup_hook; - -#if PG_VERSION_NUM >= 150000 -static shmem_request_hook_type prev_shmem_request_hook; -#endif - -static void shmemrequest(void); -static void shmeminit(void); static void neon_set_max_lwlsn(XLogRecPtr lsn); void @@ -99,16 +92,6 @@ init_lwlsncache(void) lwlc_register_gucs(); - prev_shmem_startup_hook = shmem_startup_hook; - shmem_startup_hook = shmeminit; - - #if PG_VERSION_NUM >= 150000 - prev_shmem_request_hook = shmem_request_hook; - shmem_request_hook = shmemrequest; - #else - shmemrequest(); - #endif - prev_set_lwlsn_block_range_hook = set_lwlsn_block_range_hook; set_lwlsn_block_range_hook = neon_set_lwlsn_block_range; prev_set_lwlsn_block_v_hook = set_lwlsn_block_v_hook; @@ -124,20 +107,19 @@ init_lwlsncache(void) } -static void shmemrequest(void) { +void +LwLsnCacheShmemRequest(void) +{ Size requested_size = sizeof(LwLsnCacheCtl); - + requested_size += hash_estimate_size(lwlsn_cache_size, sizeof(LastWrittenLsnCacheEntry)); RequestAddinShmemSpace(requested_size); - - #if PG_VERSION_NUM >= 150000 - if (prev_shmem_request_hook) - prev_shmem_request_hook(); - #endif } -static void shmeminit(void) { +void +LwLsnCacheShmemInit(void) +{ static HASHCTL info; bool found; if (lwlsn_cache_size > 0) @@ -157,9 +139,6 @@ static void shmeminit(void) { } dlist_init(&LwLsnCache->lastWrittenLsnLRU); LwLsnCache->maxLastWrittenLsn = GetRedoRecPtr(); - if (prev_shmem_startup_hook) { - prev_shmem_startup_hook(); - } } /* diff --git a/pgxn/neon/neon_perf_counters.c b/pgxn/neon/neon_perf_counters.c index d0a3d15108..dd576e4e73 100644 --- a/pgxn/neon/neon_perf_counters.c +++ b/pgxn/neon/neon_perf_counters.c @@ -17,22 +17,32 @@ #include "storage/shmem.h" #include "utils/builtins.h" +#include "neon.h" #include "neon_perf_counters.h" #include "neon_pgversioncompat.h" neon_per_backend_counters *neon_per_backend_counters_shared; -Size -NeonPerfCountersShmemSize(void) +void +NeonPerfCountersShmemRequest(void) { - Size size = 0; - - size = add_size(size, mul_size(NUM_NEON_PERF_COUNTER_SLOTS, - sizeof(neon_per_backend_counters))); - - return size; + Size size; +#if PG_MAJORVERSION_NUM < 15 + /* Hack: in PG14 MaxBackends is not initialized at the time of calling NeonPerfCountersShmemRequest function. + * Do it ourselves and then undo to prevent assertion failure + */ + Assert(MaxBackends == 0); /* not initialized yet */ + InitializeMaxBackends(); + size = mul_size(NUM_NEON_PERF_COUNTER_SLOTS, sizeof(neon_per_backend_counters)); + MaxBackends = 0; +#else + size = mul_size(NUM_NEON_PERF_COUNTER_SLOTS, sizeof(neon_per_backend_counters)); +#endif + RequestAddinShmemSpace(size); } + + void NeonPerfCountersShmemInit(void) { diff --git a/pgxn/neon/neon_pgversioncompat.h b/pgxn/neon/neon_pgversioncompat.h index 7093a790d0..85646a6dc5 100644 --- a/pgxn/neon/neon_pgversioncompat.h +++ b/pgxn/neon/neon_pgversioncompat.h @@ -76,16 +76,16 @@ InitBufferTag(BufferTag *tag, const RelFileNode *rnode, (tag).rnode = (rinfo); \ } while (false) -#define BufTagGetNRelFileInfo(tag) tag.rnode +#define BufTagGetNRelFileInfo(tag) (tag).rnode #define BufTagGetRelNumber(tagp) ((tagp)->rnode.relNode) -#define BufTagInit(tag, relNumber, forknum, blkno, spcOid, dbOid) \ +#define BufTagInit(tag, rel_number, fork_number, block_number, spc_oid, db_oid) \ do { \ - RelFileNode rnode = { .spcNode = spcOid, .dbNode = dbOid, .relNode = relNumber}; \ - (tag).forkNum = forknum; \ - (tag).blockNum = blkno; \ - (tag).rnode = rnode; \ + RelFileNode rnode = { .spcNode = (spc_oid), .dbNode = (db_oid), .relNode = (rel_number)}; \ + (tag).forkNum = (fork_number); \ + (tag).blockNum = (block_number); \ + (tag).rnode = rnode; \ } while (false) #define InvalidRelFileNumber InvalidOid @@ -137,13 +137,13 @@ InitBufferTag(BufferTag *tag, const RelFileNode *rnode, .relNumber = (tag).relNumber, \ }) -#define BufTagInit(tag, relNumber, forknum, blkno, spcOid, dbOid) \ +#define BufTagInit(tag, rel_number, fork_number, block_number, spc_oid, db_oid) \ do { \ - (tag).forkNum = forknum; \ - (tag).blockNum = blkno; \ - (tag).spcOid = spcOid; \ - (tag).dbOid = dbOid; \ - (tag).relNumber = relNumber; \ + (tag).forkNum = (fork_number); \ + (tag).blockNum = (block_number); \ + (tag).spcOid = (spc_oid); \ + (tag).dbOid = (db_oid); \ + (tag).relNumber = (rel_number); \ } while (false) #define SMgrRelGetRelInfo(reln) \ @@ -173,4 +173,8 @@ extern void InitMaterializedSRF(FunctionCallInfo fcinfo, bits32 flags); extern TimeLineID GetWALInsertionTimeLine(void); #endif +/* format codes not present in PG17-; but available in PG18+ */ +#define INT64_HEX_FORMAT "%" INT64_MODIFIER "x" +#define UINT64_HEX_FORMAT "%" INT64_MODIFIER "x" + #endif /* NEON_PGVERSIONCOMPAT_H */ diff --git a/pgxn/neon/pagestore_client.h b/pgxn/neon/pagestore_client.h index c2727e232b..47417a7bd5 100644 --- a/pgxn/neon/pagestore_client.h +++ b/pgxn/neon/pagestore_client.h @@ -244,8 +244,16 @@ extern char *neon_timeline; extern char *neon_tenant; extern int32 max_cluster_size; extern int neon_protocol_version; +extern int neon_stripe_size; -extern void get_shard_map(char ***connstrs_p, shardno_t *num_shards_p); +typedef struct +{ + char connstring[MAX_SHARDS][MAX_PAGESERVER_CONNSTRING_SIZE]; + size_t num_shards; + size_t stripe_size; +} ShardMap; + +extern bool parse_shard_map(const char *connstr, ShardMap *result); extern shardno_t get_shard_number(BufferTag* tag); extern const f_smgr *smgr_neon(ProcNumber backend, NRelFileInfo rinfo); @@ -292,6 +300,7 @@ extern int64 neon_dbsize(Oid dbNode); extern void neon_get_request_lsns(NRelFileInfo rinfo, ForkNumber forknum, BlockNumber blkno, neon_request_lsns *output, BlockNumber nblocks); +extern XLogRecPtr neon_get_write_lsn(void); /* utils for neon relsize cache */ extern void relsize_hash_init(void); diff --git a/pgxn/neon/pagestore_smgr.c b/pgxn/neon/pagestore_smgr.c index 4189af4d32..06ce61d2e5 100644 --- a/pgxn/neon/pagestore_smgr.c +++ b/pgxn/neon/pagestore_smgr.c @@ -502,6 +502,60 @@ nm_adjust_lsn(XLogRecPtr lsn) return lsn; } +/* + * Get a LSN to use to stamp an operation like relation create or truncate. + * On operations on individual pages we use the LSN of the page, but when + * e.g. smgrcreate() is called, we have to do something else. + */ +XLogRecPtr +neon_get_write_lsn(void) +{ + XLogRecPtr lsn; + + if (RecoveryInProgress()) + { + /* + * FIXME: v14 doesn't have GetCurrentReplayRecPtr(). Options: + * - add it in our fork + * - store a magic value that means that you must use + * current latest possible LSN at the time that the request + * on this thing is made again (or some other recent enough + * lsn). + */ +#if PG_VERSION_NUM >= 150000 + lsn = GetCurrentReplayRecPtr(NULL); +#else + lsn = GetXLogReplayRecPtr(NULL); /* FIXME: this is wrong, see above */ +#endif + } + else + lsn = GetXLogInsertRecPtr(); + + /* + * If the insert LSN points to just after page header, round it down to + * the beginning of the page, because the page header might not have been + * inserted to the WAL yet, and if we tried to flush it, the WAL flushing + * code gets upset. + */ + { + int segoff; + + segoff = XLogSegmentOffset(lsn, wal_segment_size); + if (segoff == SizeOfXLogLongPHD) + { + lsn = lsn - segoff; + } + else + { + int offset = lsn % XLOG_BLCKSZ; + + if (offset == SizeOfXLogShortPHD) + lsn = lsn - offset; + } + } + + return lsn; +} /* * Return LSN for requesting pages and number of blocks from page server @@ -768,7 +822,7 @@ neon_exists(SMgrRelation reln, ForkNumber forkNum) return false; } - if (neon_enable_new_communicator) + if (neon_use_communicator_worker) return communicator_new_rel_exists(InfoFromSMgrRel(reln), forkNum); else { @@ -822,37 +876,44 @@ neon_create(SMgrRelation reln, ForkNumber forkNum, bool isRedo) RelFileInfoFmt(InfoFromSMgrRel(reln)), forkNum); - if (neon_enable_new_communicator) + /* + * Newly created relation is empty, remember that in the relsize cache. + * + * Note that in REDO, this is called to make sure the relation fork + * exists, but it does not truncate the relation. So, we can only update + * the relsize if it didn't exist before. + * + * Also, in redo, we must make sure to update the cached size of the + * relation, as that is the primary source of truth for REDO's file length + * considerations, and as file extension isn't (perfectly) logged, we need + * to take care of that before we hit file size checks. + * + * FIXME: This is currently not just an optimization, but required for + * correctness. Postgres can call smgrnblocks() on the newly-created + * relation. Currently, we don't call SetLastWrittenLSN() when a new + * relation created, so if we didn't remember the size in the relsize + * cache, we might call smgrnblocks() on the newly-created relation before + * the creation WAL record has been received by the page server. + * + * XXX: with the new communicator, similar considerations apply. However, + * during replay, neon_get_write_lsn() returns the (end-)LSN of the record + * that's being replayed, so we should not have the correctness issue + * mentioned in previous paragraph. + */ + if (neon_use_communicator_worker) { + XLogRecPtr lsn = neon_get_write_lsn(); + if (isRedo) { if (!communicator_new_rel_exists(InfoFromSMgrRel(reln), forkNum)) - communicator_new_rel_create(InfoFromSMgrRel(reln), forkNum); + communicator_new_rel_create(InfoFromSMgrRel(reln), forkNum, lsn); } else - communicator_new_rel_create(InfoFromSMgrRel(reln), forkNum); + communicator_new_rel_create(InfoFromSMgrRel(reln), forkNum, lsn); } else { - /* - * Newly created relation is empty, remember that in the relsize cache. - * - * Note that in REDO, this is called to make sure the relation fork - * exists, but it does not truncate the relation. So, we can only update - * the relsize if it didn't exist before. - * - * Also, in redo, we must make sure to update the cached size of the - * relation, as that is the primary source of truth for REDO's file length - * considerations, and as file extension isn't (perfectly) logged, we need - * to take care of that before we hit file size checks. - * - * FIXME: This is currently not just an optimization, but required for - * correctness. Postgres can call smgrnblocks() on the newly-created - * relation. Currently, we don't call SetLastWrittenLSN() when a new - * relation created, so if we didn't remember the size in the relsize - * cache, we might call smgrnblocks() on the newly-created relation before - * the creation WAL record hass been received by the page server. - */ if (isRedo) { update_cached_relsize(InfoFromSMgrRel(reln), forkNum, 0); @@ -900,9 +961,11 @@ neon_unlink(NRelFileInfoBackend rinfo, ForkNumber forkNum, bool isRedo) if (!NRelFileInfoBackendIsTemp(rinfo)) { - if (neon_enable_new_communicator) + if (neon_use_communicator_worker) { - communicator_new_rel_unlink(InfoFromNInfoB(rinfo), forkNum); + XLogRecPtr lsn = neon_get_write_lsn(); + + communicator_new_rel_unlink(InfoFromNInfoB(rinfo), forkNum, lsn); } else forget_cached_relsize(InfoFromNInfoB(rinfo), forkNum); @@ -992,7 +1055,7 @@ neon_extend(SMgrRelation reln, ForkNumber forkNum, BlockNumber blkno, forkNum, blkno, (uint32) (lsn >> 32), (uint32) lsn); - if (neon_enable_new_communicator) + if (neon_use_communicator_worker) { // FIXME: this can pass lsn == invalid. Is that ok? communicator_new_rel_extend(InfoFromSMgrRel(reln), forkNum, blkno, (const void *) buffer, lsn); @@ -1119,7 +1182,7 @@ neon_zeroextend(SMgrRelation reln, ForkNumber forkNum, BlockNumber start_block, lsn = XLogInsert(RM_XLOG_ID, XLOG_FPI); - if (!neon_enable_new_communicator) + if (!neon_use_communicator_worker) { for (int i = 0; i < count; i++) { @@ -1135,7 +1198,7 @@ neon_zeroextend(SMgrRelation reln, ForkNumber forkNum, BlockNumber start_block, Assert(lsn != 0); - if (neon_enable_new_communicator) + if (neon_use_communicator_worker) { communicator_new_rel_zeroextend(InfoFromSMgrRel(reln), forkNum, start_block, nblocks, lsn); } @@ -1203,7 +1266,7 @@ neon_prefetch(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, neon_log(ERROR, "unknown relpersistence '%c'", reln->smgr_relpersistence); } - if (neon_enable_new_communicator) + if (neon_use_communicator_worker) { communicator_new_prefetch_register_bufferv(InfoFromSMgrRel(reln), forknum, blocknum, nblocks); return false; @@ -1213,7 +1276,7 @@ neon_prefetch(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, tag.dbOid = reln->smgr_rlocator.locator.dbOid; tag.relNumber = reln->smgr_rlocator.locator.relNumber; tag.forkNum = forknum; - + while (nblocks > 0) { int iterblocks = Min(nblocks, PG_IOV_MAX); @@ -1235,7 +1298,7 @@ neon_prefetch(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, blocknum += iterblocks; } - if (!neon_enable_new_communicator) + if (!neon_use_communicator_worker) communicator_prefetch_pump_state(); return false; @@ -1263,7 +1326,7 @@ neon_prefetch(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum) neon_log(ERROR, "unknown relpersistence '%c'", reln->smgr_relpersistence); } - if (neon_enable_new_communicator) + if (neon_use_communicator_worker) { communicator_new_prefetch_register_bufferv(InfoFromSMgrRel(reln), forknum, blocknum, 1); } @@ -1325,7 +1388,7 @@ neon_writeback(SMgrRelation reln, ForkNumber forknum, */ neon_log(SmgrTrace, "writeback noop"); - if (!neon_enable_new_communicator) + if (!neon_use_communicator_worker) communicator_prefetch_pump_state(); if (debug_compare_local) @@ -1343,7 +1406,7 @@ void neon_read_at_lsn(NRelFileInfo rinfo, ForkNumber forkNum, BlockNumber blkno, neon_request_lsns request_lsns, void *buffer) { - if (neon_enable_new_communicator) + if (neon_use_communicator_worker) { // FIXME: request_lsns is ignored. That affects the neon_test_utils callers. // Add the capability to specify the LSNs explicitly, for the sake of neon_test_utils ? @@ -1476,7 +1539,7 @@ neon_read(SMgrRelation reln, ForkNumber forkNum, BlockNumber blkno, void *buffer neon_log(ERROR, "unknown relpersistence '%c'", reln->smgr_relpersistence); } - if (neon_enable_new_communicator) + if (neon_use_communicator_worker) { communicator_new_read_at_lsnv(InfoFromSMgrRel(reln), forkNum, blkno, (void *) &buffer, 1); @@ -1587,12 +1650,12 @@ neon_readv(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, nblocks, PG_IOV_MAX); /* Try to read PS results if they are available */ - if (!neon_enable_new_communicator) + if (!neon_use_communicator_worker) communicator_prefetch_pump_state(); memset(read_pages, 0, sizeof(read_pages)); - if (neon_enable_new_communicator) + if (neon_use_communicator_worker) { communicator_new_read_at_lsnv(InfoFromSMgrRel(reln), forknum, blocknum, buffers, nblocks); @@ -1601,7 +1664,7 @@ neon_readv(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, { neon_get_request_lsns(InfoFromSMgrRel(reln), forknum, blocknum, request_lsns, nblocks); - + prefetch_result = communicator_prefetch_lookupv(InfoFromSMgrRel(reln), forknum, blocknum, request_lsns, nblocks, buffers, read_pages); @@ -1748,7 +1811,7 @@ neon_write(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, const vo forknum, blocknum, (uint32) (lsn >> 32), (uint32) lsn); - if (neon_enable_new_communicator) + if (neon_use_communicator_worker) { communicator_new_write_page(InfoFromSMgrRel(reln), forknum, blocknum, buffer, lsn); } @@ -1818,7 +1881,7 @@ neon_writev(SMgrRelation reln, ForkNumber forknum, BlockNumber blkno, neon_wallog_pagev(reln, forknum, blkno, nblocks, (const char **) buffers, false); - if (neon_enable_new_communicator) + if (neon_use_communicator_worker) { for (int i = 0; i < nblocks; i++) { @@ -1873,7 +1936,7 @@ neon_nblocks(SMgrRelation reln, ForkNumber forknum) neon_log(ERROR, "unknown relpersistence '%c'", reln->smgr_relpersistence); } - if (neon_enable_new_communicator) + if (neon_use_communicator_worker) { n_blocks = communicator_new_rel_nblocks(InfoFromSMgrRel(reln), forknum); } @@ -1913,7 +1976,7 @@ neon_dbsize(Oid dbNode) neon_request_lsns request_lsns; NRelFileInfo dummy_node = {0}; - if (neon_enable_new_communicator) + if (neon_use_communicator_worker) { db_size = communicator_new_dbsize(dbNode); } @@ -1960,9 +2023,11 @@ neon_truncate(SMgrRelation reln, ForkNumber forknum, BlockNumber old_blocks, Blo neon_log(ERROR, "unknown relpersistence '%c'", reln->smgr_relpersistence); } - if (neon_enable_new_communicator) + if (neon_use_communicator_worker) { - communicator_new_rel_truncate(InfoFromSMgrRel(reln), forknum, nblocks); + XLogRecPtr lsn = neon_get_write_lsn(); + + communicator_new_rel_truncate(InfoFromSMgrRel(reln), forknum, nblocks, lsn); } else { @@ -2039,7 +2104,7 @@ neon_immedsync(SMgrRelation reln, ForkNumber forknum) neon_log(SmgrTrace, "[NEON_SMGR] immedsync noop"); - if (!neon_enable_new_communicator) + if (!neon_use_communicator_worker) communicator_prefetch_pump_state(); if (debug_compare_local) @@ -2226,12 +2291,15 @@ neon_end_unlogged_build(SMgrRelation reln) nblocks = mdnblocks(reln, MAIN_FORKNUM); recptr = GetXLogInsertRecPtr(); - neon_set_lwlsn_block_range(recptr, - InfoFromNInfoB(rinfob), - MAIN_FORKNUM, 0, nblocks); - neon_set_lwlsn_relation(recptr, - InfoFromNInfoB(rinfob), - MAIN_FORKNUM); + if (!neon_use_communicator_worker) + { + neon_set_lwlsn_block_range(recptr, + InfoFromNInfoB(rinfob), + MAIN_FORKNUM, 0, nblocks); + neon_set_lwlsn_relation(recptr, + InfoFromNInfoB(rinfob), + MAIN_FORKNUM); + } /* Remove local copy */ for (int forknum = 0; forknum <= MAX_FORKNUM; forknum++) @@ -2240,8 +2308,11 @@ neon_end_unlogged_build(SMgrRelation reln) RelFileInfoFmt(InfoFromNInfoB(rinfob)), forknum); - // FIXME: also do this with the new communicator - if (!neon_enable_new_communicator) + if (neon_use_communicator_worker) + { + communicator_new_update_cached_rel_size(InfoFromSMgrRel(reln), forknum, nblocks, recptr); + } + else { forget_cached_relsize(InfoFromNInfoB(rinfob), forknum); lfc_invalidate(InfoFromNInfoB(rinfob), forknum, nblocks); @@ -2313,8 +2384,8 @@ neon_read_slru_segment(SMgrRelation reln, const char* path, int segno, void* buf request_lsns.not_modified_since = not_modified_since; request_lsns.effective_request_lsn = request_lsn; - if (neon_enable_new_communicator) - n_blocks = communicator_new_read_slru_segment(kind, segno, buffer); + if (neon_use_communicator_worker) + n_blocks = communicator_new_read_slru_segment(kind, (uint32_t)segno, &request_lsns, path); else n_blocks = communicator_read_slru_segment(kind, segno, &request_lsns, buffer); @@ -2353,7 +2424,7 @@ AtEOXact_neon(XactEvent event, void *arg) } break; } - if (!neon_enable_new_communicator) + if (!neon_use_communicator_worker) communicator_reconfigure_timeout_if_needed(); } @@ -2412,7 +2483,7 @@ smgr_init_neon(void) smgr_init_standard(); neon_init(); - if (neon_enable_new_communicator) + if (neon_use_communicator_worker) communicator_new_init(); else communicator_init(); @@ -2427,7 +2498,7 @@ neon_extend_rel_size(NRelFileInfo rinfo, ForkNumber forknum, BlockNumber blkno, /* This is only used in WAL replay */ Assert(RecoveryInProgress()); - if (neon_enable_new_communicator) + if (neon_use_communicator_worker) { relsize = communicator_new_rel_nblocks(rinfo, forknum); @@ -2606,7 +2677,7 @@ neon_redo_read_buffer_filter(XLogReaderState *record, uint8 block_id) * We should perform this check after assigning LwLSN to prevent * prefetching of some older version of the page by some other backend. */ - if (neon_enable_new_communicator) + if (neon_use_communicator_worker) no_redo_needed = communicator_new_cache_contains(rinfo, forknum, blkno); else no_redo_needed = !lfc_cache_contains(rinfo, forknum, blkno); diff --git a/pgxn/neon/relsize_cache.c b/pgxn/neon/relsize_cache.c index 4ea303f996..613e98f0d4 100644 --- a/pgxn/neon/relsize_cache.c +++ b/pgxn/neon/relsize_cache.c @@ -23,9 +23,7 @@ #include "utils/dynahash.h" #include "utils/guc.h" -#if PG_VERSION_NUM >= 150000 #include "miscadmin.h" -#endif typedef struct { @@ -50,32 +48,23 @@ typedef struct * algorithm */ } RelSizeHashControl; -static HTAB *relsize_hash; -static LWLockId relsize_lock; -static int relsize_hash_size; -static RelSizeHashControl* relsize_ctl; -static shmem_startup_hook_type prev_shmem_startup_hook = NULL; -#if PG_VERSION_NUM >= 150000 -static shmem_request_hook_type prev_shmem_request_hook = NULL; -static void relsize_shmem_request(void); -#endif - /* * Size of a cache entry is 36 bytes. So this default will take about 2.3 MB, * which seems reasonable. */ #define DEFAULT_RELSIZE_HASH_SIZE (64 * 1024) -static void -neon_smgr_shmem_startup(void) +static HTAB *relsize_hash; +static LWLockId relsize_lock; +static int relsize_hash_size = DEFAULT_RELSIZE_HASH_SIZE; +static RelSizeHashControl* relsize_ctl; + +void +RelsizeCacheShmemInit(void) { static HASHCTL info; bool found; - if (prev_shmem_startup_hook) - prev_shmem_startup_hook(); - - LWLockAcquire(AddinShmemInitLock, LW_EXCLUSIVE); relsize_ctl = (RelSizeHashControl *) ShmemInitStruct("relsize_hash", sizeof(RelSizeHashControl), &found); if (!found) { @@ -86,7 +75,6 @@ neon_smgr_shmem_startup(void) relsize_hash_size, relsize_hash_size, &info, HASH_ELEM | HASH_BLOBS); - LWLockRelease(AddinShmemInitLock); relsize_ctl->size = 0; relsize_ctl->hits = 0; relsize_ctl->misses = 0; @@ -100,7 +88,7 @@ get_cached_relsize(NRelFileInfo rinfo, ForkNumber forknum, BlockNumber *size) { bool found = false; - Assert(!neon_enable_new_communicator); + Assert(!neon_use_communicator_worker); if (relsize_hash_size > 0) { @@ -133,7 +121,7 @@ get_cached_relsize(NRelFileInfo rinfo, ForkNumber forknum, BlockNumber *size) void set_cached_relsize(NRelFileInfo rinfo, ForkNumber forknum, BlockNumber size) { - Assert(!neon_enable_new_communicator); + Assert(!neon_use_communicator_worker); if (relsize_hash_size > 0) { @@ -183,7 +171,7 @@ set_cached_relsize(NRelFileInfo rinfo, ForkNumber forknum, BlockNumber size) void update_cached_relsize(NRelFileInfo rinfo, ForkNumber forknum, BlockNumber size) { - Assert(!neon_enable_new_communicator); + Assert(!neon_use_communicator_worker); if (relsize_hash_size > 0) { @@ -219,7 +207,7 @@ update_cached_relsize(NRelFileInfo rinfo, ForkNumber forknum, BlockNumber size) void forget_cached_relsize(NRelFileInfo rinfo, ForkNumber forknum) { - Assert(!neon_enable_new_communicator); + Assert(!neon_use_communicator_worker); if (relsize_hash_size > 0) { @@ -251,34 +239,15 @@ relsize_hash_init(void) PGC_POSTMASTER, 0, NULL, NULL, NULL); - - if (relsize_hash_size > 0) - { -#if PG_VERSION_NUM >= 150000 - prev_shmem_request_hook = shmem_request_hook; - shmem_request_hook = relsize_shmem_request; -#else - RequestAddinShmemSpace(hash_estimate_size(relsize_hash_size, sizeof(RelSizeEntry))); - RequestNamedLWLockTranche("neon_relsize", 1); -#endif - - prev_shmem_startup_hook = shmem_startup_hook; - shmem_startup_hook = neon_smgr_shmem_startup; - } } -#if PG_VERSION_NUM >= 150000 /* * shmem_request hook: request additional shared resources. We'll allocate or * attach to the shared resources in neon_smgr_shmem_startup(). */ -static void -relsize_shmem_request(void) +void +RelsizeCacheShmemRequest(void) { - if (prev_shmem_request_hook) - prev_shmem_request_hook(); - RequestAddinShmemSpace(sizeof(RelSizeHashControl) + hash_estimate_size(relsize_hash_size, sizeof(RelSizeEntry))); RequestNamedLWLockTranche("neon_relsize", 1); } -#endif diff --git a/pgxn/neon/walproposer.h b/pgxn/neon/walproposer.h index 4b223b6b18..19d23925a5 100644 --- a/pgxn/neon/walproposer.h +++ b/pgxn/neon/walproposer.h @@ -376,6 +376,28 @@ typedef struct PageserverFeedback uint32 shard_number; } PageserverFeedback; +/* BEGIN_HADRON */ +/** + * WAL proposer is the only backend that will update `sent_bytes` and `last_recorded_time_us`. + * Once the `sent_bytes` reaches the limit, it puts backpressure on PG backends. + * + * A PG backend checks `should_limit` to see if it should hit backpressure. + * - If yes, it also checks the `last_recorded_time_us` to see + * if it's time to push more WALs. This is because the WAL proposer + * only resets `should_limit` to 0 after it is notified about new WALs + * which might take a while. + */ +typedef struct WalRateLimiter +{ + /* If the value is 1, PG backends will hit backpressure. */ + pg_atomic_uint32 should_limit; + /* The number of bytes sent in the current second. */ + uint64 sent_bytes; + /* The last recorded time in microsecond. */ + pg_atomic_uint64 last_recorded_time_us; +} WalRateLimiter; +/* END_HADRON */ + typedef struct WalproposerShmemState { pg_atomic_uint64 propEpochStartLsn; @@ -395,6 +417,11 @@ typedef struct WalproposerShmemState /* aggregated feedback with min LSNs across shards */ PageserverFeedback min_ps_feedback; + + /* BEGIN_HADRON */ + /* The WAL rate limiter */ + WalRateLimiter wal_rate_limiter; + /* END_HADRON */ } WalproposerShmemState; /* diff --git a/pgxn/neon/walproposer_pg.c b/pgxn/neon/walproposer_pg.c index 185fc83ace..9ed8d0d2d2 100644 --- a/pgxn/neon/walproposer_pg.c +++ b/pgxn/neon/walproposer_pg.c @@ -66,6 +66,9 @@ int wal_acceptor_reconnect_timeout = 1000; int wal_acceptor_connection_timeout = 10000; int safekeeper_proto_version = 3; char *safekeeper_conninfo_options = ""; +/* BEGIN_HADRON */ +int databricks_max_wal_mb_per_second = -1; +/* END_HADRON */ /* Set to true in the walproposer bgw. */ static bool am_walproposer; @@ -80,10 +83,8 @@ static XLogRecPtr standby_flush_lsn = InvalidXLogRecPtr; static XLogRecPtr standby_apply_lsn = InvalidXLogRecPtr; static HotStandbyFeedback agg_hs_feedback; -static void nwp_shmem_startup_hook(void); static void nwp_register_gucs(void); static void assign_neon_safekeepers(const char *newval, void *extra); -static void nwp_prepare_shmem(void); static uint64 backpressure_lag_impl(void); static uint64 startup_backpressure_wrap(void); static bool backpressure_throttling_impl(void); @@ -96,11 +97,6 @@ static TimestampTz walprop_pg_get_current_timestamp(WalProposer *wp); static void walprop_pg_load_libpqwalreceiver(void); static process_interrupts_callback_t PrevProcessInterruptsCallback = NULL; -static shmem_startup_hook_type prev_shmem_startup_hook_type; -#if PG_VERSION_NUM >= 150000 -static shmem_request_hook_type prev_shmem_request_hook = NULL; -static void walproposer_shmem_request(void); -#endif static void WalproposerShmemInit_SyncSafekeeper(void); @@ -190,8 +186,6 @@ pg_init_walproposer(void) nwp_register_gucs(); - nwp_prepare_shmem(); - delay_backend_us = &startup_backpressure_wrap; PrevProcessInterruptsCallback = ProcessInterruptsCallback; ProcessInterruptsCallback = backpressure_throttling_impl; @@ -252,6 +246,18 @@ nwp_register_gucs(void) PGC_POSTMASTER, 0, NULL, NULL, NULL); + + /* BEGIN_HADRON */ + DefineCustomIntVariable( + "databricks.max_wal_mb_per_second", + "The maximum WAL MB per second allowed. If breached, sending WAL hit the backpressure. Setting to -1 disables the limit.", + NULL, + &databricks_max_wal_mb_per_second, + -1, -1, INT_MAX, + PGC_SUSET, + GUC_UNIT_MB, + NULL, NULL, NULL); + /* END_HADRON */ } @@ -393,6 +399,7 @@ assign_neon_safekeepers(const char *newval, void *extra) static uint64 backpressure_lag_impl(void) { + struct WalproposerShmemState* state = NULL; if (max_replication_apply_lag > 0 || max_replication_flush_lag > 0 || max_replication_write_lag > 0) { XLogRecPtr writePtr; @@ -426,6 +433,30 @@ backpressure_lag_impl(void) return (myFlushLsn - applyPtr - max_replication_apply_lag * MB); } } + + /* BEGIN_HADRON */ + if (databricks_max_wal_mb_per_second == -1) { + return 0; + } + + state = GetWalpropShmemState(); + if (state != NULL && !!pg_atomic_read_u32(&state->wal_rate_limiter.should_limit)) + { + TimestampTz now = GetCurrentTimestamp(); + struct WalRateLimiter *limiter = &state->wal_rate_limiter; + uint64 last_recorded_time = pg_atomic_read_u64(&limiter->last_recorded_time_us); + if (now - last_recorded_time > USECS_PER_SEC) + { + /* + * The backend has past 1 second since the last recorded time and it's time to push more WALs. + * If the backends are pushing WALs too fast, the wal proposer will rate limit them again. + */ + uint32 expected = true; + pg_atomic_compare_exchange_u32(&state->wal_rate_limiter.should_limit, &expected, false); + } + return 1; + } + /* END_HADRON */ return 0; } @@ -454,12 +485,11 @@ WalproposerShmemSize(void) return sizeof(WalproposerShmemState); } -static bool +void WalproposerShmemInit(void) { bool found; - LWLockAcquire(AddinShmemInitLock, LW_EXCLUSIVE); walprop_shared = ShmemInitStruct("Walproposer shared state", sizeof(WalproposerShmemState), &found); @@ -472,10 +502,11 @@ WalproposerShmemInit(void) pg_atomic_init_u64(&walprop_shared->mineLastElectedTerm, 0); pg_atomic_init_u64(&walprop_shared->backpressureThrottlingTime, 0); pg_atomic_init_u64(&walprop_shared->currentClusterSize, 0); + /* BEGIN_HADRON */ + pg_atomic_init_u32(&walprop_shared->wal_rate_limiter.should_limit, 0); + pg_atomic_init_u64(&walprop_shared->wal_rate_limiter.last_recorded_time_us, 0); + /* END_HADRON */ } - LWLockRelease(AddinShmemInitLock); - - return found; } static void @@ -487,6 +518,10 @@ WalproposerShmemInit_SyncSafekeeper(void) pg_atomic_init_u64(&walprop_shared->propEpochStartLsn, 0); pg_atomic_init_u64(&walprop_shared->mineLastElectedTerm, 0); pg_atomic_init_u64(&walprop_shared->backpressureThrottlingTime, 0); + /* BEGIN_HADRON */ + pg_atomic_init_u32(&walprop_shared->wal_rate_limiter.should_limit, 0); + pg_atomic_init_u64(&walprop_shared->wal_rate_limiter.last_recorded_time_us, 0); + /* END_HADRON */ } #define BACK_PRESSURE_DELAY 10000L // 0.01 sec @@ -521,7 +556,6 @@ backpressure_throttling_impl(void) if (lag == 0) return retry; - old_status = get_ps_display(&len); new_status = (char *) palloc(len + 64 + 1); memcpy(new_status, old_status, len); @@ -576,42 +610,15 @@ walprop_register_bgworker(void) /* shmem handling */ -static void -nwp_prepare_shmem(void) -{ -#if PG_VERSION_NUM >= 150000 - prev_shmem_request_hook = shmem_request_hook; - shmem_request_hook = walproposer_shmem_request; -#else - RequestAddinShmemSpace(WalproposerShmemSize()); -#endif - prev_shmem_startup_hook_type = shmem_startup_hook; - shmem_startup_hook = nwp_shmem_startup_hook; -} - -#if PG_VERSION_NUM >= 150000 /* * shmem_request hook: request additional shared resources. We'll allocate or - * attach to the shared resources in nwp_shmem_startup_hook(). + * attach to the shared resources in WalproposerShmemInit(). */ -static void -walproposer_shmem_request(void) +void +WalproposerShmemRequest(void) { - if (prev_shmem_request_hook) - prev_shmem_request_hook(); - RequestAddinShmemSpace(WalproposerShmemSize()); } -#endif - -static void -nwp_shmem_startup_hook(void) -{ - if (prev_shmem_startup_hook_type) - prev_shmem_startup_hook_type(); - - WalproposerShmemInit(); -} WalproposerShmemState * GetWalpropShmemState(void) @@ -1458,6 +1465,8 @@ XLogBroadcastWalProposer(WalProposer *wp) { XLogRecPtr startptr; XLogRecPtr endptr; + struct WalproposerShmemState *state = NULL; + TimestampTz now = 0; /* Start from the last sent position */ startptr = sentPtr; @@ -1502,13 +1511,36 @@ XLogBroadcastWalProposer(WalProposer *wp) * that arbitrary LSN is eventually reported as written, flushed and * applied, so that it can measure the elapsed time. */ - LagTrackerWrite(endptr, GetCurrentTimestamp()); + now = GetCurrentTimestamp(); + LagTrackerWrite(endptr, now); /* Do we have any work to do? */ Assert(startptr <= endptr); if (endptr <= startptr) return; + /* BEGIN_HADRON */ + state = GetWalpropShmemState(); + if (databricks_max_wal_mb_per_second != -1 && state != NULL) + { + uint64 max_wal_bytes = (uint64) databricks_max_wal_mb_per_second * 1024 * 1024; + struct WalRateLimiter *limiter = &state->wal_rate_limiter; + uint64 last_recorded_time = pg_atomic_read_u64(&limiter->last_recorded_time_us); + if (now - last_recorded_time > USECS_PER_SEC) + { + /* Reset the rate limiter */ + limiter->sent_bytes = 0; + pg_atomic_write_u64(&limiter->last_recorded_time_us, now); + pg_atomic_write_u32(&limiter->should_limit, false); + } + limiter->sent_bytes += (endptr - startptr); + if (limiter->sent_bytes > max_wal_bytes) + { + pg_atomic_write_u32(&limiter->should_limit, true); + } + } + /* END_HADRON */ + WalProposerBroadcast(wp, startptr, endptr); sentPtr = endptr; diff --git a/pgxn/neon_test_utils/neontest.c b/pgxn/neon_test_utils/neontest.c index d37412f674..5f880dfd23 100644 --- a/pgxn/neon_test_utils/neontest.c +++ b/pgxn/neon_test_utils/neontest.c @@ -236,13 +236,13 @@ clear_buffer_cache(PG_FUNCTION_ARGS) bool save_neon_test_evict; /* - * Temporarily set the zenith_test_evict GUC, so that when we pin and + * Temporarily set the neon_test_evict GUC, so that when we pin and * unpin a buffer, the buffer is evicted. We use that hack to evict all * buffers, as there is no explicit "evict this buffer" function in the * buffer manager. */ - save_neon_test_evict = zenith_test_evict; - zenith_test_evict = true; + save_neon_test_evict = neon_test_evict; + neon_test_evict = true; PG_TRY(); { /* Scan through all the buffers */ @@ -273,7 +273,7 @@ clear_buffer_cache(PG_FUNCTION_ARGS) /* * Pin the buffer, and release it again. Because we have - * zenith_test_evict==true, this will evict the page from the + * neon_test_evict==true, this will evict the page from the * buffer cache if no one else is holding a pin on it. */ if (isvalid) @@ -286,7 +286,7 @@ clear_buffer_cache(PG_FUNCTION_ARGS) PG_FINALLY(); { /* restore the GUC */ - zenith_test_evict = save_neon_test_evict; + neon_test_evict = save_neon_test_evict; } PG_END_TRY(); diff --git a/pgxn/typedefs.list b/pgxn/typedefs.list index 760f384212..3ea8b3b091 100644 --- a/pgxn/typedefs.list +++ b/pgxn/typedefs.list @@ -2953,17 +2953,17 @@ XmlTableBuilderData YYLTYPE YYSTYPE YY_BUFFER_STATE -ZenithErrorResponse -ZenithExistsRequest -ZenithExistsResponse -ZenithGetPageRequest -ZenithGetPageResponse -ZenithMessage -ZenithMessageTag -ZenithNblocksRequest -ZenithNblocksResponse -ZenithRequest -ZenithResponse +NeonErrorResponse +NeonExistsRequest +NeonExistsResponse +NeonGetPageRequest +NeonGetPageResponse +NeonMessage +NeonMessageTag +NeonNblocksRequest +NeonNblocksResponse +NeonRequest +NeonResponse _SPI_connection _SPI_plan __AssignProcessToJobObject diff --git a/poetry.lock b/poetry.lock index 1bc5077eb7..b2072bf1bc 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2,127 +2,123 @@ [[package]] name = "aiohappyeyeballs" -version = "2.3.5" +version = "2.6.1" description = "Happy Eyeballs for asyncio" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" groups = ["main"] files = [ - {file = "aiohappyeyeballs-2.3.5-py3-none-any.whl", hash = "sha256:4d6dea59215537dbc746e93e779caea8178c866856a721c9c660d7a5a7b8be03"}, - {file = "aiohappyeyeballs-2.3.5.tar.gz", hash = "sha256:6fa48b9f1317254f122a07a131a86b71ca6946ca989ce6326fff54a99a920105"}, + {file = "aiohappyeyeballs-2.6.1-py3-none-any.whl", hash = "sha256:f349ba8f4b75cb25c99c5c2d84e997e485204d2902a9597802b0371f09331fb8"}, + {file = "aiohappyeyeballs-2.6.1.tar.gz", hash = "sha256:c3f9d0113123803ccadfdf3f0faa505bc78e6a72d1cc4806cbd719826e943558"}, ] [[package]] name = "aiohttp" -version = "3.10.11" +version = "3.12.14" description = "Async http client/server framework (asyncio)" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" groups = ["main"] files = [ - {file = "aiohttp-3.10.11-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:5077b1a5f40ffa3ba1f40d537d3bec4383988ee51fbba6b74aa8fb1bc466599e"}, - {file = "aiohttp-3.10.11-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:8d6a14a4d93b5b3c2891fca94fa9d41b2322a68194422bef0dd5ec1e57d7d298"}, - {file = "aiohttp-3.10.11-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ffbfde2443696345e23a3c597049b1dd43049bb65337837574205e7368472177"}, - {file = "aiohttp-3.10.11-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:20b3d9e416774d41813bc02fdc0663379c01817b0874b932b81c7f777f67b217"}, - {file = "aiohttp-3.10.11-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2b943011b45ee6bf74b22245c6faab736363678e910504dd7531a58c76c9015a"}, - {file = "aiohttp-3.10.11-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:48bc1d924490f0d0b3658fe5c4b081a4d56ebb58af80a6729d4bd13ea569797a"}, - {file = "aiohttp-3.10.11-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e12eb3f4b1f72aaaf6acd27d045753b18101524f72ae071ae1c91c1cd44ef115"}, - {file = "aiohttp-3.10.11-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f14ebc419a568c2eff3c1ed35f634435c24ead2fe19c07426af41e7adb68713a"}, - {file = "aiohttp-3.10.11-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:72b191cdf35a518bfc7ca87d770d30941decc5aaf897ec8b484eb5cc8c7706f3"}, - {file = "aiohttp-3.10.11-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:5ab2328a61fdc86424ee540d0aeb8b73bbcad7351fb7cf7a6546fc0bcffa0038"}, - {file = "aiohttp-3.10.11-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:aa93063d4af05c49276cf14e419550a3f45258b6b9d1f16403e777f1addf4519"}, - {file = "aiohttp-3.10.11-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:30283f9d0ce420363c24c5c2421e71a738a2155f10adbb1a11a4d4d6d2715cfc"}, - {file = "aiohttp-3.10.11-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:e5358addc8044ee49143c546d2182c15b4ac3a60be01c3209374ace05af5733d"}, - {file = "aiohttp-3.10.11-cp310-cp310-win32.whl", hash = "sha256:e1ffa713d3ea7cdcd4aea9cddccab41edf6882fa9552940344c44e59652e1120"}, - {file = "aiohttp-3.10.11-cp310-cp310-win_amd64.whl", hash = "sha256:778cbd01f18ff78b5dd23c77eb82987ee4ba23408cbed233009fd570dda7e674"}, - {file = "aiohttp-3.10.11-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:80ff08556c7f59a7972b1e8919f62e9c069c33566a6d28586771711e0eea4f07"}, - {file = "aiohttp-3.10.11-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2c8f96e9ee19f04c4914e4e7a42a60861066d3e1abf05c726f38d9d0a466e695"}, - {file = "aiohttp-3.10.11-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:fb8601394d537da9221947b5d6e62b064c9a43e88a1ecd7414d21a1a6fba9c24"}, - {file = "aiohttp-3.10.11-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2ea224cf7bc2d8856d6971cea73b1d50c9c51d36971faf1abc169a0d5f85a382"}, - {file = "aiohttp-3.10.11-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:db9503f79e12d5d80b3efd4d01312853565c05367493379df76d2674af881caa"}, - {file = "aiohttp-3.10.11-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0f449a50cc33f0384f633894d8d3cd020e3ccef81879c6e6245c3c375c448625"}, - {file = "aiohttp-3.10.11-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:82052be3e6d9e0c123499127782a01a2b224b8af8c62ab46b3f6197035ad94e9"}, - {file = "aiohttp-3.10.11-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:20063c7acf1eec550c8eb098deb5ed9e1bb0521613b03bb93644b810986027ac"}, - {file = "aiohttp-3.10.11-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:489cced07a4c11488f47aab1f00d0c572506883f877af100a38f1fedaa884c3a"}, - {file = "aiohttp-3.10.11-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:ea9b3bab329aeaa603ed3bf605f1e2a6f36496ad7e0e1aa42025f368ee2dc07b"}, - {file = "aiohttp-3.10.11-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:ca117819d8ad113413016cb29774b3f6d99ad23c220069789fc050267b786c16"}, - {file = "aiohttp-3.10.11-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:2dfb612dcbe70fb7cdcf3499e8d483079b89749c857a8f6e80263b021745c730"}, - {file = "aiohttp-3.10.11-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:f9b615d3da0d60e7d53c62e22b4fd1c70f4ae5993a44687b011ea3a2e49051b8"}, - {file = "aiohttp-3.10.11-cp311-cp311-win32.whl", hash = "sha256:29103f9099b6068bbdf44d6a3d090e0a0b2be6d3c9f16a070dd9d0d910ec08f9"}, - {file = "aiohttp-3.10.11-cp311-cp311-win_amd64.whl", hash = "sha256:236b28ceb79532da85d59aa9b9bf873b364e27a0acb2ceaba475dc61cffb6f3f"}, - {file = "aiohttp-3.10.11-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:7480519f70e32bfb101d71fb9a1f330fbd291655a4c1c922232a48c458c52710"}, - {file = "aiohttp-3.10.11-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:f65267266c9aeb2287a6622ee2bb39490292552f9fbf851baabc04c9f84e048d"}, - {file = "aiohttp-3.10.11-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:7400a93d629a0608dc1d6c55f1e3d6e07f7375745aaa8bd7f085571e4d1cee97"}, - {file = "aiohttp-3.10.11-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f34b97e4b11b8d4eb2c3a4f975be626cc8af99ff479da7de49ac2c6d02d35725"}, - {file = "aiohttp-3.10.11-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1e7b825da878464a252ccff2958838f9caa82f32a8dbc334eb9b34a026e2c636"}, - {file = "aiohttp-3.10.11-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f9f92a344c50b9667827da308473005f34767b6a2a60d9acff56ae94f895f385"}, - {file = "aiohttp-3.10.11-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc6f1ab987a27b83c5268a17218463c2ec08dbb754195113867a27b166cd6087"}, - {file = "aiohttp-3.10.11-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1dc0f4ca54842173d03322793ebcf2c8cc2d34ae91cc762478e295d8e361e03f"}, - {file = "aiohttp-3.10.11-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:7ce6a51469bfaacff146e59e7fb61c9c23006495d11cc24c514a455032bcfa03"}, - {file = "aiohttp-3.10.11-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:aad3cd91d484d065ede16f3cf15408254e2469e3f613b241a1db552c5eb7ab7d"}, - {file = "aiohttp-3.10.11-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:f4df4b8ca97f658c880fb4b90b1d1ec528315d4030af1ec763247ebfd33d8b9a"}, - {file = "aiohttp-3.10.11-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:2e4e18a0a2d03531edbc06c366954e40a3f8d2a88d2b936bbe78a0c75a3aab3e"}, - {file = "aiohttp-3.10.11-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:6ce66780fa1a20e45bc753cda2a149daa6dbf1561fc1289fa0c308391c7bc0a4"}, - {file = "aiohttp-3.10.11-cp312-cp312-win32.whl", hash = "sha256:a919c8957695ea4c0e7a3e8d16494e3477b86f33067478f43106921c2fef15bb"}, - {file = "aiohttp-3.10.11-cp312-cp312-win_amd64.whl", hash = "sha256:b5e29706e6389a2283a91611c91bf24f218962717c8f3b4e528ef529d112ee27"}, - {file = "aiohttp-3.10.11-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:703938e22434d7d14ec22f9f310559331f455018389222eed132808cd8f44127"}, - {file = "aiohttp-3.10.11-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:9bc50b63648840854e00084c2b43035a62e033cb9b06d8c22b409d56eb098413"}, - {file = "aiohttp-3.10.11-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5f0463bf8b0754bc744e1feb61590706823795041e63edf30118a6f0bf577461"}, - {file = "aiohttp-3.10.11-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f6c6dec398ac5a87cb3a407b068e1106b20ef001c344e34154616183fe684288"}, - {file = "aiohttp-3.10.11-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bcaf2d79104d53d4dcf934f7ce76d3d155302d07dae24dff6c9fffd217568067"}, - {file = "aiohttp-3.10.11-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:25fd5470922091b5a9aeeb7e75be609e16b4fba81cdeaf12981393fb240dd10e"}, - {file = "aiohttp-3.10.11-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bbde2ca67230923a42161b1f408c3992ae6e0be782dca0c44cb3206bf330dee1"}, - {file = "aiohttp-3.10.11-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:249c8ff8d26a8b41a0f12f9df804e7c685ca35a207e2410adbd3e924217b9006"}, - {file = "aiohttp-3.10.11-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:878ca6a931ee8c486a8f7b432b65431d095c522cbeb34892bee5be97b3481d0f"}, - {file = "aiohttp-3.10.11-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:8663f7777ce775f0413324be0d96d9730959b2ca73d9b7e2c2c90539139cbdd6"}, - {file = "aiohttp-3.10.11-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:6cd3f10b01f0c31481fba8d302b61603a2acb37b9d30e1d14e0f5a58b7b18a31"}, - {file = "aiohttp-3.10.11-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:4e8d8aad9402d3aa02fdc5ca2fe68bcb9fdfe1f77b40b10410a94c7f408b664d"}, - {file = "aiohttp-3.10.11-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:38e3c4f80196b4f6c3a85d134a534a56f52da9cb8d8e7af1b79a32eefee73a00"}, - {file = "aiohttp-3.10.11-cp313-cp313-win32.whl", hash = "sha256:fc31820cfc3b2863c6e95e14fcf815dc7afe52480b4dc03393c4873bb5599f71"}, - {file = "aiohttp-3.10.11-cp313-cp313-win_amd64.whl", hash = "sha256:4996ff1345704ffdd6d75fb06ed175938c133425af616142e7187f28dc75f14e"}, - {file = "aiohttp-3.10.11-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:74baf1a7d948b3d640badeac333af581a367ab916b37e44cf90a0334157cdfd2"}, - {file = "aiohttp-3.10.11-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:473aebc3b871646e1940c05268d451f2543a1d209f47035b594b9d4e91ce8339"}, - {file = "aiohttp-3.10.11-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:c2f746a6968c54ab2186574e15c3f14f3e7f67aef12b761e043b33b89c5b5f95"}, - {file = "aiohttp-3.10.11-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d110cabad8360ffa0dec8f6ec60e43286e9d251e77db4763a87dcfe55b4adb92"}, - {file = "aiohttp-3.10.11-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e0099c7d5d7afff4202a0c670e5b723f7718810000b4abcbc96b064129e64bc7"}, - {file = "aiohttp-3.10.11-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0316e624b754dbbf8c872b62fe6dcb395ef20c70e59890dfa0de9eafccd2849d"}, - {file = "aiohttp-3.10.11-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5a5f7ab8baf13314e6b2485965cbacb94afff1e93466ac4d06a47a81c50f9cca"}, - {file = "aiohttp-3.10.11-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c891011e76041e6508cbfc469dd1a8ea09bc24e87e4c204e05f150c4c455a5fa"}, - {file = "aiohttp-3.10.11-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:9208299251370ee815473270c52cd3f7069ee9ed348d941d574d1457d2c73e8b"}, - {file = "aiohttp-3.10.11-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:459f0f32c8356e8125f45eeff0ecf2b1cb6db1551304972702f34cd9e6c44658"}, - {file = "aiohttp-3.10.11-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:14cdc8c1810bbd4b4b9f142eeee23cda528ae4e57ea0923551a9af4820980e39"}, - {file = "aiohttp-3.10.11-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:971aa438a29701d4b34e4943e91b5e984c3ae6ccbf80dd9efaffb01bd0b243a9"}, - {file = "aiohttp-3.10.11-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:9a309c5de392dfe0f32ee57fa43ed8fc6ddf9985425e84bd51ed66bb16bce3a7"}, - {file = "aiohttp-3.10.11-cp38-cp38-win32.whl", hash = "sha256:9ec1628180241d906a0840b38f162a3215114b14541f1a8711c368a8739a9be4"}, - {file = "aiohttp-3.10.11-cp38-cp38-win_amd64.whl", hash = "sha256:9c6e0ffd52c929f985c7258f83185d17c76d4275ad22e90aa29f38e211aacbec"}, - {file = "aiohttp-3.10.11-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:cdc493a2e5d8dc79b2df5bec9558425bcd39aff59fc949810cbd0832e294b106"}, - {file = "aiohttp-3.10.11-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b3e70f24e7d0405be2348da9d5a7836936bf3a9b4fd210f8c37e8d48bc32eca6"}, - {file = "aiohttp-3.10.11-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:968b8fb2a5eee2770eda9c7b5581587ef9b96fbdf8dcabc6b446d35ccc69df01"}, - {file = "aiohttp-3.10.11-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:deef4362af9493d1382ef86732ee2e4cbc0d7c005947bd54ad1a9a16dd59298e"}, - {file = "aiohttp-3.10.11-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:686b03196976e327412a1b094f4120778c7c4b9cff9bce8d2fdfeca386b89829"}, - {file = "aiohttp-3.10.11-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3bf6d027d9d1d34e1c2e1645f18a6498c98d634f8e373395221121f1c258ace8"}, - {file = "aiohttp-3.10.11-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:099fd126bf960f96d34a760e747a629c27fb3634da5d05c7ef4d35ef4ea519fc"}, - {file = "aiohttp-3.10.11-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c73c4d3dae0b4644bc21e3de546530531d6cdc88659cdeb6579cd627d3c206aa"}, - {file = "aiohttp-3.10.11-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:0c5580f3c51eea91559db3facd45d72e7ec970b04528b4709b1f9c2555bd6d0b"}, - {file = "aiohttp-3.10.11-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:fdf6429f0caabfd8a30c4e2eaecb547b3c340e4730ebfe25139779b9815ba138"}, - {file = "aiohttp-3.10.11-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:d97187de3c276263db3564bb9d9fad9e15b51ea10a371ffa5947a5ba93ad6777"}, - {file = "aiohttp-3.10.11-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:0acafb350cfb2eba70eb5d271f55e08bd4502ec35e964e18ad3e7d34d71f7261"}, - {file = "aiohttp-3.10.11-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:c13ed0c779911c7998a58e7848954bd4d63df3e3575f591e321b19a2aec8df9f"}, - {file = "aiohttp-3.10.11-cp39-cp39-win32.whl", hash = "sha256:22b7c540c55909140f63ab4f54ec2c20d2635c0289cdd8006da46f3327f971b9"}, - {file = "aiohttp-3.10.11-cp39-cp39-win_amd64.whl", hash = "sha256:7b26b1551e481012575dab8e3727b16fe7dd27eb2711d2e63ced7368756268fb"}, - {file = "aiohttp-3.10.11.tar.gz", hash = "sha256:9dc2b8f3dcab2e39e0fa309c8da50c3b55e6f34ab25f1a71d3288f24924d33a7"}, + {file = "aiohttp-3.12.14-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:906d5075b5ba0dd1c66fcaaf60eb09926a9fef3ca92d912d2a0bbdbecf8b1248"}, + {file = "aiohttp-3.12.14-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c875bf6fc2fd1a572aba0e02ef4e7a63694778c5646cdbda346ee24e630d30fb"}, + {file = "aiohttp-3.12.14-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fbb284d15c6a45fab030740049d03c0ecd60edad9cd23b211d7e11d3be8d56fd"}, + {file = "aiohttp-3.12.14-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:38e360381e02e1a05d36b223ecab7bc4a6e7b5ab15760022dc92589ee1d4238c"}, + {file = "aiohttp-3.12.14-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:aaf90137b5e5d84a53632ad95ebee5c9e3e7468f0aab92ba3f608adcb914fa95"}, + {file = "aiohttp-3.12.14-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e532a25e4a0a2685fa295a31acf65e027fbe2bea7a4b02cdfbbba8a064577663"}, + {file = "aiohttp-3.12.14-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:eab9762c4d1b08ae04a6c77474e6136da722e34fdc0e6d6eab5ee93ac29f35d1"}, + {file = "aiohttp-3.12.14-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:abe53c3812b2899889a7fca763cdfaeee725f5be68ea89905e4275476ffd7e61"}, + {file = "aiohttp-3.12.14-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5760909b7080aa2ec1d320baee90d03b21745573780a072b66ce633eb77a8656"}, + {file = "aiohttp-3.12.14-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:02fcd3f69051467bbaa7f84d7ec3267478c7df18d68b2e28279116e29d18d4f3"}, + {file = "aiohttp-3.12.14-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:4dcd1172cd6794884c33e504d3da3c35648b8be9bfa946942d353b939d5f1288"}, + {file = "aiohttp-3.12.14-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:224d0da41355b942b43ad08101b1b41ce633a654128ee07e36d75133443adcda"}, + {file = "aiohttp-3.12.14-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:e387668724f4d734e865c1776d841ed75b300ee61059aca0b05bce67061dcacc"}, + {file = "aiohttp-3.12.14-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:dec9cde5b5a24171e0b0a4ca064b1414950904053fb77c707efd876a2da525d8"}, + {file = "aiohttp-3.12.14-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:bbad68a2af4877cc103cd94af9160e45676fc6f0c14abb88e6e092b945c2c8e3"}, + {file = "aiohttp-3.12.14-cp310-cp310-win32.whl", hash = "sha256:ee580cb7c00bd857b3039ebca03c4448e84700dc1322f860cf7a500a6f62630c"}, + {file = "aiohttp-3.12.14-cp310-cp310-win_amd64.whl", hash = "sha256:cf4f05b8cea571e2ccc3ca744e35ead24992d90a72ca2cf7ab7a2efbac6716db"}, + {file = "aiohttp-3.12.14-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:f4552ff7b18bcec18b60a90c6982049cdb9dac1dba48cf00b97934a06ce2e597"}, + {file = "aiohttp-3.12.14-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:8283f42181ff6ccbcf25acaae4e8ab2ff7e92b3ca4a4ced73b2c12d8cd971393"}, + {file = "aiohttp-3.12.14-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:040afa180ea514495aaff7ad34ec3d27826eaa5d19812730fe9e529b04bb2179"}, + {file = "aiohttp-3.12.14-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b413c12f14c1149f0ffd890f4141a7471ba4b41234fe4fd4a0ff82b1dc299dbb"}, + {file = "aiohttp-3.12.14-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:1d6f607ce2e1a93315414e3d448b831238f1874b9968e1195b06efaa5c87e245"}, + {file = "aiohttp-3.12.14-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:565e70d03e924333004ed101599902bba09ebb14843c8ea39d657f037115201b"}, + {file = "aiohttp-3.12.14-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4699979560728b168d5ab63c668a093c9570af2c7a78ea24ca5212c6cdc2b641"}, + {file = "aiohttp-3.12.14-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ad5fdf6af93ec6c99bf800eba3af9a43d8bfd66dce920ac905c817ef4a712afe"}, + {file = "aiohttp-3.12.14-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4ac76627c0b7ee0e80e871bde0d376a057916cb008a8f3ffc889570a838f5cc7"}, + {file = "aiohttp-3.12.14-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:798204af1180885651b77bf03adc903743a86a39c7392c472891649610844635"}, + {file = "aiohttp-3.12.14-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:4f1205f97de92c37dd71cf2d5bcfb65fdaed3c255d246172cce729a8d849b4da"}, + {file = "aiohttp-3.12.14-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:76ae6f1dd041f85065d9df77c6bc9c9703da9b5c018479d20262acc3df97d419"}, + {file = "aiohttp-3.12.14-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:a194ace7bc43ce765338ca2dfb5661489317db216ea7ea700b0332878b392cab"}, + {file = "aiohttp-3.12.14-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:16260e8e03744a6fe3fcb05259eeab8e08342c4c33decf96a9dad9f1187275d0"}, + {file = "aiohttp-3.12.14-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:8c779e5ebbf0e2e15334ea404fcce54009dc069210164a244d2eac8352a44b28"}, + {file = "aiohttp-3.12.14-cp311-cp311-win32.whl", hash = "sha256:a289f50bf1bd5be227376c067927f78079a7bdeccf8daa6a9e65c38bae14324b"}, + {file = "aiohttp-3.12.14-cp311-cp311-win_amd64.whl", hash = "sha256:0b8a69acaf06b17e9c54151a6c956339cf46db4ff72b3ac28516d0f7068f4ced"}, + {file = "aiohttp-3.12.14-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:a0ecbb32fc3e69bc25efcda7d28d38e987d007096cbbeed04f14a6662d0eee22"}, + {file = "aiohttp-3.12.14-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:0400f0ca9bb3e0b02f6466421f253797f6384e9845820c8b05e976398ac1d81a"}, + {file = "aiohttp-3.12.14-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a56809fed4c8a830b5cae18454b7464e1529dbf66f71c4772e3cfa9cbec0a1ff"}, + {file = "aiohttp-3.12.14-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:27f2e373276e4755691a963e5d11756d093e346119f0627c2d6518208483fb6d"}, + {file = "aiohttp-3.12.14-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:ca39e433630e9a16281125ef57ece6817afd1d54c9f1bf32e901f38f16035869"}, + {file = "aiohttp-3.12.14-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9c748b3f8b14c77720132b2510a7d9907a03c20ba80f469e58d5dfd90c079a1c"}, + {file = "aiohttp-3.12.14-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f0a568abe1b15ce69d4cc37e23020720423f0728e3cb1f9bcd3f53420ec3bfe7"}, + {file = "aiohttp-3.12.14-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9888e60c2c54eaf56704b17feb558c7ed6b7439bca1e07d4818ab878f2083660"}, + {file = "aiohttp-3.12.14-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3006a1dc579b9156de01e7916d38c63dc1ea0679b14627a37edf6151bc530088"}, + {file = "aiohttp-3.12.14-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:aa8ec5c15ab80e5501a26719eb48a55f3c567da45c6ea5bb78c52c036b2655c7"}, + {file = "aiohttp-3.12.14-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:39b94e50959aa07844c7fe2206b9f75d63cc3ad1c648aaa755aa257f6f2498a9"}, + {file = "aiohttp-3.12.14-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:04c11907492f416dad9885d503fbfc5dcb6768d90cad8639a771922d584609d3"}, + {file = "aiohttp-3.12.14-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:88167bd9ab69bb46cee91bd9761db6dfd45b6e76a0438c7e884c3f8160ff21eb"}, + {file = "aiohttp-3.12.14-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:791504763f25e8f9f251e4688195e8b455f8820274320204f7eafc467e609425"}, + {file = "aiohttp-3.12.14-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:2785b112346e435dd3a1a67f67713a3fe692d288542f1347ad255683f066d8e0"}, + {file = "aiohttp-3.12.14-cp312-cp312-win32.whl", hash = "sha256:15f5f4792c9c999a31d8decf444e79fcfd98497bf98e94284bf390a7bb8c1729"}, + {file = "aiohttp-3.12.14-cp312-cp312-win_amd64.whl", hash = "sha256:3b66e1a182879f579b105a80d5c4bd448b91a57e8933564bf41665064796a338"}, + {file = "aiohttp-3.12.14-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:3143a7893d94dc82bc409f7308bc10d60285a3cd831a68faf1aa0836c5c3c767"}, + {file = "aiohttp-3.12.14-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:3d62ac3d506cef54b355bd34c2a7c230eb693880001dfcda0bf88b38f5d7af7e"}, + {file = "aiohttp-3.12.14-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:48e43e075c6a438937c4de48ec30fa8ad8e6dfef122a038847456bfe7b947b63"}, + {file = "aiohttp-3.12.14-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:077b4488411a9724cecc436cbc8c133e0d61e694995b8de51aaf351c7578949d"}, + {file = "aiohttp-3.12.14-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:d8c35632575653f297dcbc9546305b2c1133391089ab925a6a3706dfa775ccab"}, + {file = "aiohttp-3.12.14-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6b8ce87963f0035c6834b28f061df90cf525ff7c9b6283a8ac23acee6502afd4"}, + {file = "aiohttp-3.12.14-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f0a2cf66e32a2563bb0766eb24eae7e9a269ac0dc48db0aae90b575dc9583026"}, + {file = "aiohttp-3.12.14-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cdea089caf6d5cde975084a884c72d901e36ef9c2fd972c9f51efbbc64e96fbd"}, + {file = "aiohttp-3.12.14-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8a7865f27db67d49e81d463da64a59365ebd6b826e0e4847aa111056dcb9dc88"}, + {file = "aiohttp-3.12.14-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:0ab5b38a6a39781d77713ad930cb5e7feea6f253de656a5f9f281a8f5931b086"}, + {file = "aiohttp-3.12.14-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:9b3b15acee5c17e8848d90a4ebc27853f37077ba6aec4d8cb4dbbea56d156933"}, + {file = "aiohttp-3.12.14-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:e4c972b0bdaac167c1e53e16a16101b17c6d0ed7eac178e653a07b9f7fad7151"}, + {file = "aiohttp-3.12.14-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:7442488b0039257a3bdbc55f7209587911f143fca11df9869578db6c26feeeb8"}, + {file = "aiohttp-3.12.14-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:f68d3067eecb64c5e9bab4a26aa11bd676f4c70eea9ef6536b0a4e490639add3"}, + {file = "aiohttp-3.12.14-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:f88d3704c8b3d598a08ad17d06006cb1ca52a1182291f04979e305c8be6c9758"}, + {file = "aiohttp-3.12.14-cp313-cp313-win32.whl", hash = "sha256:a3c99ab19c7bf375c4ae3debd91ca5d394b98b6089a03231d4c580ef3c2ae4c5"}, + {file = "aiohttp-3.12.14-cp313-cp313-win_amd64.whl", hash = "sha256:3f8aad695e12edc9d571f878c62bedc91adf30c760c8632f09663e5f564f4baa"}, + {file = "aiohttp-3.12.14-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:b8cc6b05e94d837bcd71c6531e2344e1ff0fb87abe4ad78a9261d67ef5d83eae"}, + {file = "aiohttp-3.12.14-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:d1dcb015ac6a3b8facd3677597edd5ff39d11d937456702f0bb2b762e390a21b"}, + {file = "aiohttp-3.12.14-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3779ed96105cd70ee5e85ca4f457adbce3d9ff33ec3d0ebcdf6c5727f26b21b3"}, + {file = "aiohttp-3.12.14-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:717a0680729b4ebd7569c1dcd718c46b09b360745fd8eb12317abc74b14d14d0"}, + {file = "aiohttp-3.12.14-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:b5dd3a2ef7c7e968dbbac8f5574ebeac4d2b813b247e8cec28174a2ba3627170"}, + {file = "aiohttp-3.12.14-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4710f77598c0092239bc12c1fcc278a444e16c7032d91babf5abbf7166463f7b"}, + {file = "aiohttp-3.12.14-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f3e9f75ae842a6c22a195d4a127263dbf87cbab729829e0bd7857fb1672400b2"}, + {file = "aiohttp-3.12.14-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5f9c8d55d6802086edd188e3a7d85a77787e50d56ce3eb4757a3205fa4657922"}, + {file = "aiohttp-3.12.14-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:79b29053ff3ad307880d94562cca80693c62062a098a5776ea8ef5ef4b28d140"}, + {file = "aiohttp-3.12.14-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:23e1332fff36bebd3183db0c7a547a1da9d3b4091509f6d818e098855f2f27d3"}, + {file = "aiohttp-3.12.14-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:a564188ce831fd110ea76bcc97085dd6c625b427db3f1dbb14ca4baa1447dcbc"}, + {file = "aiohttp-3.12.14-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:a7a1b4302f70bb3ec40ca86de82def532c97a80db49cac6a6700af0de41af5ee"}, + {file = "aiohttp-3.12.14-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:1b07ccef62950a2519f9bfc1e5b294de5dd84329f444ca0b329605ea787a3de5"}, + {file = "aiohttp-3.12.14-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:938bd3ca6259e7e48b38d84f753d548bd863e0c222ed6ee6ace3fd6752768a84"}, + {file = "aiohttp-3.12.14-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:8bc784302b6b9f163b54c4e93d7a6f09563bd01ff2b841b29ed3ac126e5040bf"}, + {file = "aiohttp-3.12.14-cp39-cp39-win32.whl", hash = "sha256:a3416f95961dd7d5393ecff99e3f41dc990fb72eda86c11f2a60308ac6dcd7a0"}, + {file = "aiohttp-3.12.14-cp39-cp39-win_amd64.whl", hash = "sha256:196858b8820d7f60578f8b47e5669b3195c21d8ab261e39b1d705346458f445f"}, + {file = "aiohttp-3.12.14.tar.gz", hash = "sha256:6e06e120e34d93100de448fd941522e11dafa78ef1a893c179901b7d66aa29f2"}, ] [package.dependencies] -aiohappyeyeballs = ">=2.3.0" -aiosignal = ">=1.1.2" +aiohappyeyeballs = ">=2.5.0" +aiosignal = ">=1.4.0" attrs = ">=17.3.0" frozenlist = ">=1.1.1" multidict = ">=4.5,<7.0" -yarl = ">=1.12.0,<2.0" +propcache = ">=0.2.0" +yarl = ">=1.17.0,<2.0" [package.extras] -speedups = ["Brotli ; platform_python_implementation == \"CPython\"", "aiodns (>=3.2.0) ; sys_platform == \"linux\" or sys_platform == \"darwin\"", "brotlicffi ; platform_python_implementation != \"CPython\""] +speedups = ["Brotli ; platform_python_implementation == \"CPython\"", "aiodns (>=3.3.0)", "brotlicffi ; platform_python_implementation != \"CPython\""] [[package]] name = "aiopg" @@ -145,18 +141,19 @@ sa = ["sqlalchemy[postgresql-psycopg2binary] (>=1.3,<1.5)"] [[package]] name = "aiosignal" -version = "1.3.1" +version = "1.4.0" description = "aiosignal: a list of registered asynchronous callbacks" optional = false -python-versions = ">=3.7" +python-versions = ">=3.9" groups = ["main"] files = [ - {file = "aiosignal-1.3.1-py3-none-any.whl", hash = "sha256:f8376fb07dd1e86a584e4fcdec80b36b7f81aac666ebc724e2c090300dd83b17"}, - {file = "aiosignal-1.3.1.tar.gz", hash = "sha256:54cd96e15e1649b75d6c87526a6ff0b6c1b0dd3459f43d9ca11d48c339b68cfc"}, + {file = "aiosignal-1.4.0-py3-none-any.whl", hash = "sha256:053243f8b92b990551949e63930a839ff0cf0b0ebbe0597b0f3fb19e1a0fe82e"}, + {file = "aiosignal-1.4.0.tar.gz", hash = "sha256:f47eecd9468083c2029cc99945502cb7708b082c232f9aca65da147157b251c7"}, ] [package.dependencies] frozenlist = ">=1.1.0" +typing-extensions = {version = ">=4.2", markers = "python_version < \"3.13\""} [[package]] name = "allure-pytest" @@ -3847,4 +3844,4 @@ cffi = ["cffi (>=1.11)"] [metadata] lock-version = "2.1" python-versions = "^3.11" -content-hash = "bd93313f110110aa53b24a3ed47ba2d7f60e2c658a79cdff7320fed1bb1b57b5" +content-hash = "6a1e8ba06b8194bf28d87fd5e184e2ddc2b4a19dffcbe3953b26da3d55c9212f" diff --git a/proxy/Cargo.toml b/proxy/Cargo.toml index ce8610be24..82fe6818e3 100644 --- a/proxy/Cargo.toml +++ b/proxy/Cargo.toml @@ -16,6 +16,7 @@ async-compression.workspace = true async-trait.workspace = true atomic-take.workspace = true aws-config.workspace = true +aws-credential-types.workspace = true aws-sdk-iam.workspace = true aws-sigv4.workspace = true base64.workspace = true @@ -48,6 +49,7 @@ indexmap = { workspace = true, features = ["serde"] } ipnet.workspace = true itertools.workspace = true itoa.workspace = true +json = { path = "../libs/proxy/json" } lasso = { workspace = true, features = ["multi-threaded"] } measured = { workspace = true, features = ["lasso"] } metrics.workspace = true @@ -127,4 +129,4 @@ rstest.workspace = true walkdir.workspace = true rand_distr = "0.4" tokio-postgres.workspace = true -tracing-test = "0.2" \ No newline at end of file +tracing-test = "0.2" diff --git a/proxy/README.md b/proxy/README.md index e10ff3d710..ff48f9f323 100644 --- a/proxy/README.md +++ b/proxy/README.md @@ -123,6 +123,11 @@ docker exec -it proxy-postgres psql -U postgres -c "CREATE TABLE neon_control_pl docker exec -it proxy-postgres psql -U postgres -c "CREATE ROLE proxy WITH SUPERUSER LOGIN PASSWORD 'password';" ``` +If you want to test query cancellation, redis is also required: +```sh +docker run --detach --name proxy-redis --publish 6379:6379 redis:7.0 +``` + Let's create self-signed certificate by running: ```sh openssl req -new -x509 -days 365 -nodes -text -out server.crt -keyout server.key -subj "/CN=*.local.neon.build" @@ -130,7 +135,10 @@ openssl req -new -x509 -days 365 -nodes -text -out server.crt -keyout server.key Then we need to build proxy with 'testing' feature and run, e.g.: ```sh -RUST_LOG=proxy LOGFMT=text cargo run -p proxy --bin proxy --features testing -- --auth-backend postgres --auth-endpoint 'postgresql://postgres:proxy-postgres@127.0.0.1:5432/postgres' -c server.crt -k server.key +RUST_LOG=proxy LOGFMT=text cargo run -p proxy --bin proxy --features testing -- \ + --auth-backend postgres --auth-endpoint 'postgresql://postgres:proxy-postgres@127.0.0.1:5432/postgres' \ + --redis-auth-type="plain" --redis-plain="redis://127.0.0.1:6379" \ + -c server.crt -k server.key ``` Now from client you can start a new session: diff --git a/proxy/src/batch.rs b/proxy/src/batch.rs index 33e08797f2..cf866ab9a3 100644 --- a/proxy/src/batch.rs +++ b/proxy/src/batch.rs @@ -7,13 +7,17 @@ use std::pin::pin; use std::sync::Mutex; use scopeguard::ScopeGuard; +use tokio::sync::oneshot; use tokio::sync::oneshot::error::TryRecvError; use crate::ext::LockExt; +type ProcResult

= Result<

::Res,

::Err>; + pub trait QueueProcessing: Send + 'static { type Req: Send + 'static; type Res: Send; + type Err: Send + Clone; /// Get the desired batch size. fn batch_size(&self, queue_size: usize) -> usize; @@ -24,7 +28,18 @@ pub trait QueueProcessing: Send + 'static { /// If this apply can error, it's expected that errors be forwarded to each Self::Res. /// /// Batching does not need to happen atomically. - fn apply(&mut self, req: Vec) -> impl Future> + Send; + fn apply( + &mut self, + req: Vec, + ) -> impl Future, Self::Err>> + Send; +} + +#[derive(thiserror::Error)] +pub enum BatchQueueError { + #[error(transparent)] + Result(E), + #[error(transparent)] + Cancelled(C), } pub struct BatchQueue { @@ -34,7 +49,7 @@ pub struct BatchQueue { struct BatchJob { req: P::Req, - res: tokio::sync::oneshot::Sender, + res: tokio::sync::oneshot::Sender>, } impl BatchQueue

{ @@ -55,11 +70,11 @@ impl BatchQueue

{ &self, req: P::Req, cancelled: impl Future, - ) -> Result { + ) -> Result> { let (id, mut rx) = self.inner.lock_propagate_poison().register_job(req); let mut cancelled = pin!(cancelled); - let resp = loop { + let resp: Option> = loop { // try become the leader, or try wait for success. let mut processor = tokio::select! { // try become leader. @@ -72,7 +87,7 @@ impl BatchQueue

{ if inner.queue.remove(&id).is_some() { tracing::warn!("batched task cancelled before completion"); } - return Err(cancel); + return Err(BatchQueueError::Cancelled(cancel)); }, }; @@ -96,18 +111,30 @@ impl BatchQueue

{ // good: we didn't get cancelled. ScopeGuard::into_inner(cancel_safety); - if values.len() != resps.len() { - tracing::error!( - "batch: invalid response size, expected={}, got={}", - resps.len(), - values.len() - ); - } + match values { + Ok(values) => { + if values.len() != resps.len() { + tracing::error!( + "batch: invalid response size, expected={}, got={}", + resps.len(), + values.len() + ); + } - // send response values. - for (tx, value) in std::iter::zip(resps, values) { - if tx.send(value).is_err() { - // receiver hung up but that's fine. + // send response values. + for (tx, value) in std::iter::zip(resps, values) { + if tx.send(Ok(value)).is_err() { + // receiver hung up but that's fine. + } + } + } + + Err(err) => { + for tx in resps { + if tx.send(Err(err.clone())).is_err() { + // receiver hung up but that's fine. + } + } } } @@ -129,7 +156,8 @@ impl BatchQueue

{ tracing::debug!(id, "batch: job completed"); - Ok(resp.expect("no response found. batch processer should not panic")) + resp.expect("no response found. batch processer should not panic") + .map_err(BatchQueueError::Result) } } @@ -139,8 +167,8 @@ struct BatchQueueInner { } impl BatchQueueInner

{ - fn register_job(&mut self, req: P::Req) -> (u64, tokio::sync::oneshot::Receiver) { - let (tx, rx) = tokio::sync::oneshot::channel(); + fn register_job(&mut self, req: P::Req) -> (u64, oneshot::Receiver>) { + let (tx, rx) = oneshot::channel(); let id = self.version; @@ -158,7 +186,7 @@ impl BatchQueueInner

{ (id, rx) } - fn get_batch(&mut self, p: &P) -> (Vec, Vec>) { + fn get_batch(&mut self, p: &P) -> (Vec, Vec>>) { let batch_size = p.batch_size(self.queue.len()); let mut reqs = Vec::with_capacity(batch_size); let mut resps = Vec::with_capacity(batch_size); diff --git a/proxy/src/binary/proxy.rs b/proxy/src/binary/proxy.rs index c10678dc68..16a7dc7b67 100644 --- a/proxy/src/binary/proxy.rs +++ b/proxy/src/binary/proxy.rs @@ -21,7 +21,7 @@ use tokio::net::TcpListener; use tokio::sync::Notify; use tokio::task::JoinSet; use tokio_util::sync::CancellationToken; -use tracing::{Instrument, error, info, warn}; +use tracing::{error, info, warn}; use utils::sentry_init::init_sentry; use utils::{project_build_tag, project_git_version}; @@ -195,7 +195,9 @@ struct ProxyCliArgs { #[clap(long, default_value = config::ProjectInfoCacheOptions::CACHE_DEFAULT_OPTIONS)] project_info_cache: String, /// cache for all valid endpoints - #[clap(long, default_value = config::EndpointCacheConfig::CACHE_DEFAULT_OPTIONS)] + // TODO: remove after a couple of releases. + #[clap(long, default_value_t = String::new())] + #[deprecated] endpoint_cache_config: String, #[clap(flatten)] parquet_upload: ParquetUploadArgs, @@ -520,15 +522,7 @@ pub async fn run() -> anyhow::Result<()> { maintenance_tasks.spawn(usage_metrics::task_main(metrics_config)); } - if let Either::Left(auth::Backend::ControlPlane(api, ())) = &auth_backend - && let crate::control_plane::client::ControlPlaneClient::ProxyV1(api) = &**api - && let Some(client) = redis_client - { - // project info cache and invalidation of that cache. - let cache = api.caches.project_info.clone(); - maintenance_tasks.spawn(notifications::task_main(client.clone(), cache.clone())); - maintenance_tasks.spawn(async move { cache.clone().gc_worker().await }); - + if let Some(client) = redis_client { // Try to connect to Redis 3 times with 1 + (0..0.1) second interval. // This prevents immediate exit and pod restart, // which can cause hammering of the redis in case of connection issues. @@ -559,12 +553,15 @@ pub async fn run() -> anyhow::Result<()> { } } - // listen for notifications of new projects/endpoints/branches - let cache = api.caches.endpoints_cache.clone(); - let span = tracing::info_span!("endpoints_cache"); - maintenance_tasks.spawn( - async move { cache.do_read(client, cancellation_token.clone()).await }.instrument(span), - ); + #[allow(irrefutable_let_patterns)] + if let Either::Left(auth::Backend::ControlPlane(api, ())) = &auth_backend + && let crate::control_plane::client::ControlPlaneClient::ProxyV1(api) = &**api + { + // project info cache and invalidation of that cache. + let cache = api.caches.project_info.clone(); + maintenance_tasks.spawn(notifications::task_main(client, cache.clone())); + maintenance_tasks.spawn(async move { cache.gc_worker().await }); + } } let maintenance = loop { @@ -712,18 +709,15 @@ fn build_auth_backend( let wake_compute_cache_config: CacheOptions = args.wake_compute_cache.parse()?; let project_info_cache_config: ProjectInfoCacheOptions = args.project_info_cache.parse()?; - let endpoint_cache_config: config::EndpointCacheConfig = - args.endpoint_cache_config.parse()?; info!("Using NodeInfoCache (wake_compute) with options={wake_compute_cache_config:?}"); info!( "Using AllowedIpsCache (wake_compute) with options={project_info_cache_config:?}" ); - info!("Using EndpointCacheConfig with options={endpoint_cache_config:?}"); + let caches = Box::leak(Box::new(control_plane::caches::ApiCaches::new( wake_compute_cache_config, project_info_cache_config, - endpoint_cache_config, ))); let config::ConcurrencyLockOptions { @@ -793,18 +787,15 @@ fn build_auth_backend( let wake_compute_cache_config: CacheOptions = args.wake_compute_cache.parse()?; let project_info_cache_config: ProjectInfoCacheOptions = args.project_info_cache.parse()?; - let endpoint_cache_config: config::EndpointCacheConfig = - args.endpoint_cache_config.parse()?; info!("Using NodeInfoCache (wake_compute) with options={wake_compute_cache_config:?}"); info!( "Using AllowedIpsCache (wake_compute) with options={project_info_cache_config:?}" ); - info!("Using EndpointCacheConfig with options={endpoint_cache_config:?}"); + let caches = Box::leak(Box::new(control_plane::caches::ApiCaches::new( wake_compute_cache_config, project_info_cache_config, - endpoint_cache_config, ))); let config::ConcurrencyLockOptions { diff --git a/proxy/src/cache/endpoints.rs b/proxy/src/cache/endpoints.rs deleted file mode 100644 index 3c88e07484..0000000000 --- a/proxy/src/cache/endpoints.rs +++ /dev/null @@ -1,283 +0,0 @@ -use std::convert::Infallible; -use std::future::pending; -use std::sync::atomic::{AtomicBool, Ordering}; -use std::sync::{Arc, Mutex}; - -use clashmap::ClashSet; -use redis::streams::{StreamReadOptions, StreamReadReply}; -use redis::{AsyncCommands, FromRedisValue, Value}; -use serde::Deserialize; -use tokio_util::sync::CancellationToken; -use tracing::info; - -use crate::config::EndpointCacheConfig; -use crate::context::RequestContext; -use crate::ext::LockExt; -use crate::intern::{BranchIdInt, EndpointIdInt, ProjectIdInt}; -use crate::metrics::{Metrics, RedisErrors, RedisEventsCount}; -use crate::rate_limiter::GlobalRateLimiter; -use crate::redis::connection_with_credentials_provider::ConnectionWithCredentialsProvider; -use crate::types::EndpointId; - -// TODO: this could be an enum, but events in Redis need to be fixed first. -// ProjectCreated was sent with type:branch_created. So we ignore type. -#[derive(Deserialize, Debug, Clone, PartialEq)] -struct ControlPlaneEvent { - endpoint_created: Option, - branch_created: Option, - project_created: Option, - #[serde(rename = "type")] - _type: Option, -} - -#[derive(Deserialize, Debug, Clone, PartialEq)] -struct EndpointCreated { - endpoint_id: EndpointIdInt, -} - -#[derive(Deserialize, Debug, Clone, PartialEq)] -struct BranchCreated { - branch_id: BranchIdInt, -} - -#[derive(Deserialize, Debug, Clone, PartialEq)] -struct ProjectCreated { - project_id: ProjectIdInt, -} - -impl TryFrom<&Value> for ControlPlaneEvent { - type Error = anyhow::Error; - fn try_from(value: &Value) -> Result { - let json = String::from_redis_value(value)?; - Ok(serde_json::from_str(&json)?) - } -} - -pub struct EndpointsCache { - config: EndpointCacheConfig, - endpoints: ClashSet, - branches: ClashSet, - projects: ClashSet, - ready: AtomicBool, - limiter: Arc>, -} - -impl EndpointsCache { - pub(crate) fn new(config: EndpointCacheConfig) -> Self { - Self { - limiter: Arc::new(Mutex::new(GlobalRateLimiter::new( - config.limiter_info.clone(), - ))), - config, - endpoints: ClashSet::new(), - branches: ClashSet::new(), - projects: ClashSet::new(), - ready: AtomicBool::new(false), - } - } - - pub(crate) fn is_valid(&self, ctx: &RequestContext, endpoint: &EndpointId) -> bool { - if !self.ready.load(Ordering::Acquire) { - // the endpoint cache is not yet fully initialised. - return true; - } - - if !self.should_reject(endpoint) { - ctx.set_rejected(false); - return true; - } - - // report that we might want to reject this endpoint - ctx.set_rejected(true); - - // If cache is disabled, just collect the metrics and return. - if self.config.disable_cache { - return true; - } - - // If the limiter allows, we can pretend like it's valid - // (incase it is, due to redis channel lag). - if self.limiter.lock_propagate_poison().check() { - return true; - } - - // endpoint not found, and there's too much load. - false - } - - fn should_reject(&self, endpoint: &EndpointId) -> bool { - if endpoint.is_endpoint() { - let Some(endpoint) = EndpointIdInt::get(endpoint) else { - // if we haven't interned this endpoint, it's not in the cache. - return true; - }; - !self.endpoints.contains(&endpoint) - } else if endpoint.is_branch() { - let Some(branch) = BranchIdInt::get(endpoint) else { - // if we haven't interned this branch, it's not in the cache. - return true; - }; - !self.branches.contains(&branch) - } else { - let Some(project) = ProjectIdInt::get(endpoint) else { - // if we haven't interned this project, it's not in the cache. - return true; - }; - !self.projects.contains(&project) - } - } - - fn insert_event(&self, event: ControlPlaneEvent) { - if let Some(endpoint_created) = event.endpoint_created { - self.endpoints.insert(endpoint_created.endpoint_id); - Metrics::get() - .proxy - .redis_events_count - .inc(RedisEventsCount::EndpointCreated); - } else if let Some(branch_created) = event.branch_created { - self.branches.insert(branch_created.branch_id); - Metrics::get() - .proxy - .redis_events_count - .inc(RedisEventsCount::BranchCreated); - } else if let Some(project_created) = event.project_created { - self.projects.insert(project_created.project_id); - Metrics::get() - .proxy - .redis_events_count - .inc(RedisEventsCount::ProjectCreated); - } - } - - pub async fn do_read( - &self, - mut con: ConnectionWithCredentialsProvider, - cancellation_token: CancellationToken, - ) -> anyhow::Result { - let mut last_id = "0-0".to_string(); - loop { - if let Err(e) = con.connect().await { - tracing::error!("error connecting to redis: {:?}", e); - self.ready.store(false, Ordering::Release); - } - if let Err(e) = self.read_from_stream(&mut con, &mut last_id).await { - tracing::error!("error reading from redis: {:?}", e); - self.ready.store(false, Ordering::Release); - } - if cancellation_token.is_cancelled() { - info!("cancellation token is cancelled, exiting"); - // Maintenance tasks run forever. Sleep forever when canceled. - pending::<()>().await; - } - tokio::time::sleep(self.config.retry_interval).await; - } - } - - async fn read_from_stream( - &self, - con: &mut ConnectionWithCredentialsProvider, - last_id: &mut String, - ) -> anyhow::Result<()> { - tracing::info!("reading endpoints/branches/projects from redis"); - self.batch_read( - con, - StreamReadOptions::default().count(self.config.initial_batch_size), - last_id, - true, - ) - .await?; - tracing::info!("ready to filter user requests"); - self.ready.store(true, Ordering::Release); - self.batch_read( - con, - StreamReadOptions::default() - .count(self.config.default_batch_size) - .block(self.config.xread_timeout.as_millis() as usize), - last_id, - false, - ) - .await - } - - async fn batch_read( - &self, - conn: &mut ConnectionWithCredentialsProvider, - opts: StreamReadOptions, - last_id: &mut String, - return_when_finish: bool, - ) -> anyhow::Result<()> { - let mut total: usize = 0; - loop { - let mut res: StreamReadReply = conn - .xread_options(&[&self.config.stream_name], &[last_id.as_str()], &opts) - .await?; - - if res.keys.is_empty() { - if return_when_finish { - if total != 0 { - break; - } - anyhow::bail!( - "Redis stream {} is empty, cannot be used to filter endpoints", - self.config.stream_name - ); - } - // If we are not returning when finish, we should wait for more data. - continue; - } - if res.keys.len() != 1 { - anyhow::bail!("Cannot read from redis stream {}", self.config.stream_name); - } - - let key = res.keys.pop().expect("Checked length above"); - let len = key.ids.len(); - for stream_id in key.ids { - total += 1; - for value in stream_id.map.values() { - match value.try_into() { - Ok(event) => self.insert_event(event), - Err(err) => { - Metrics::get().proxy.redis_errors_total.inc(RedisErrors { - channel: &self.config.stream_name, - }); - tracing::error!("error parsing value {value:?}: {err:?}"); - } - } - } - if total.is_power_of_two() { - tracing::debug!("endpoints read {}", total); - } - *last_id = stream_id.id; - } - if return_when_finish && len <= self.config.default_batch_size { - break; - } - } - tracing::info!("read {} endpoints/branches/projects from redis", total); - Ok(()) - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_parse_control_plane_event() { - let s = r#"{"branch_created":null,"endpoint_created":{"endpoint_id":"ep-rapid-thunder-w0qqw2q9"},"project_created":null,"type":"endpoint_created"}"#; - - let endpoint_id: EndpointId = "ep-rapid-thunder-w0qqw2q9".into(); - - assert_eq!( - serde_json::from_str::(s).unwrap(), - ControlPlaneEvent { - endpoint_created: Some(EndpointCreated { - endpoint_id: endpoint_id.into(), - }), - branch_created: None, - project_created: None, - _type: Some("endpoint_created".into()), - } - ); - } -} diff --git a/proxy/src/cache/mod.rs b/proxy/src/cache/mod.rs index 6c168144a7..ce7f781213 100644 --- a/proxy/src/cache/mod.rs +++ b/proxy/src/cache/mod.rs @@ -1,5 +1,4 @@ pub(crate) mod common; -pub(crate) mod endpoints; pub(crate) mod project_info; mod timed_lru; diff --git a/proxy/src/cache/project_info.rs b/proxy/src/cache/project_info.rs index d37c107323..0ef09a8a9a 100644 --- a/proxy/src/cache/project_info.rs +++ b/proxy/src/cache/project_info.rs @@ -1,17 +1,16 @@ use std::collections::{HashMap, HashSet, hash_map}; use std::convert::Infallible; -use std::sync::atomic::AtomicU64; use std::time::Duration; use async_trait::async_trait; use clashmap::ClashMap; use clashmap::mapref::one::Ref; use rand::{Rng, thread_rng}; -use tokio::sync::Mutex; use tokio::time::Instant; use tracing::{debug, info}; use crate::config::ProjectInfoCacheOptions; +use crate::control_plane::messages::{ControlPlaneErrorMessage, Reason}; use crate::control_plane::{EndpointAccessControl, RoleAccessControl}; use crate::intern::{AccountIdInt, EndpointIdInt, ProjectIdInt, RoleNameInt}; use crate::types::{EndpointId, RoleName}; @@ -22,52 +21,53 @@ pub(crate) trait ProjectInfoCache { fn invalidate_endpoint_access_for_project(&self, project_id: ProjectIdInt); fn invalidate_endpoint_access_for_org(&self, account_id: AccountIdInt); fn invalidate_role_secret_for_project(&self, project_id: ProjectIdInt, role_name: RoleNameInt); - async fn decrement_active_listeners(&self); - async fn increment_active_listeners(&self); } struct Entry { - created_at: Instant, + expires_at: Instant, value: T, } impl Entry { - pub(crate) fn new(value: T) -> Self { + pub(crate) fn new(value: T, ttl: Duration) -> Self { Self { - created_at: Instant::now(), + expires_at: Instant::now() + ttl, value, } } - pub(crate) fn get(&self, valid_since: Instant) -> Option<&T> { - (valid_since < self.created_at).then_some(&self.value) + pub(crate) fn get(&self) -> Option<&T> { + (!self.is_expired()).then_some(&self.value) } -} -impl From for Entry { - fn from(value: T) -> Self { - Self::new(value) + fn is_expired(&self) -> bool { + self.expires_at <= Instant::now() } } struct EndpointInfo { - role_controls: HashMap>, - controls: Option>, + role_controls: HashMap>>, + controls: Option>>, } +type ControlPlaneResult = Result>; + impl EndpointInfo { - pub(crate) fn get_role_secret( + pub(crate) fn get_role_secret_with_ttl( &self, role_name: RoleNameInt, - valid_since: Instant, - ) -> Option { - let controls = self.role_controls.get(&role_name)?; - controls.get(valid_since).cloned() + ) -> Option<(ControlPlaneResult, Duration)> { + let entry = self.role_controls.get(&role_name)?; + let ttl = entry.expires_at - Instant::now(); + Some((entry.get()?.clone(), ttl)) } - pub(crate) fn get_controls(&self, valid_since: Instant) -> Option { - let controls = self.controls.as_ref()?; - controls.get(valid_since).cloned() + pub(crate) fn get_controls_with_ttl( + &self, + ) -> Option<(ControlPlaneResult, Duration)> { + let entry = self.controls.as_ref()?; + let ttl = entry.expires_at - Instant::now(); + Some((entry.get()?.clone(), ttl)) } pub(crate) fn invalidate_endpoint(&mut self) { @@ -92,11 +92,8 @@ pub struct ProjectInfoCacheImpl { project2ep: ClashMap>, // FIXME(stefan): we need a way to GC the account2ep map. account2ep: ClashMap>, - config: ProjectInfoCacheOptions, - start_time: Instant, - ttl_disabled_since_us: AtomicU64, - active_listeners_lock: Mutex, + config: ProjectInfoCacheOptions, } #[async_trait] @@ -152,29 +149,6 @@ impl ProjectInfoCache for ProjectInfoCacheImpl { } } } - - async fn decrement_active_listeners(&self) { - let mut listeners_guard = self.active_listeners_lock.lock().await; - if *listeners_guard == 0 { - tracing::error!("active_listeners count is already 0, something is broken"); - return; - } - *listeners_guard -= 1; - if *listeners_guard == 0 { - self.ttl_disabled_since_us - .store(u64::MAX, std::sync::atomic::Ordering::SeqCst); - } - } - - async fn increment_active_listeners(&self) { - let mut listeners_guard = self.active_listeners_lock.lock().await; - *listeners_guard += 1; - if *listeners_guard == 1 { - let new_ttl = (self.start_time.elapsed() + self.config.ttl).as_micros() as u64; - self.ttl_disabled_since_us - .store(new_ttl, std::sync::atomic::Ordering::SeqCst); - } - } } impl ProjectInfoCacheImpl { @@ -184,9 +158,6 @@ impl ProjectInfoCacheImpl { project2ep: ClashMap::new(), account2ep: ClashMap::new(), config, - ttl_disabled_since_us: AtomicU64::new(u64::MAX), - start_time: Instant::now(), - active_listeners_lock: Mutex::new(0), } } @@ -198,30 +169,28 @@ impl ProjectInfoCacheImpl { self.cache.get(&endpoint_id) } - pub(crate) fn get_role_secret( + pub(crate) fn get_role_secret_with_ttl( &self, endpoint_id: &EndpointId, role_name: &RoleName, - ) -> Option { - let valid_since = self.get_cache_times(); + ) -> Option<(ControlPlaneResult, Duration)> { let role_name = RoleNameInt::get(role_name)?; let endpoint_info = self.get_endpoint_cache(endpoint_id)?; - endpoint_info.get_role_secret(role_name, valid_since) + endpoint_info.get_role_secret_with_ttl(role_name) } - pub(crate) fn get_endpoint_access( + pub(crate) fn get_endpoint_access_with_ttl( &self, endpoint_id: &EndpointId, - ) -> Option { - let valid_since = self.get_cache_times(); + ) -> Option<(ControlPlaneResult, Duration)> { let endpoint_info = self.get_endpoint_cache(endpoint_id)?; - endpoint_info.get_controls(valid_since) + endpoint_info.get_controls_with_ttl() } pub(crate) fn insert_endpoint_access( &self, account_id: Option, - project_id: ProjectIdInt, + project_id: Option, endpoint_id: EndpointIdInt, role_name: RoleNameInt, controls: EndpointAccessControl, @@ -230,26 +199,89 @@ impl ProjectInfoCacheImpl { if let Some(account_id) = account_id { self.insert_account2endpoint(account_id, endpoint_id); } - self.insert_project2endpoint(project_id, endpoint_id); + if let Some(project_id) = project_id { + self.insert_project2endpoint(project_id, endpoint_id); + } if self.cache.len() >= self.config.size { // If there are too many entries, wait until the next gc cycle. return; } - let controls = Entry::from(controls); - let role_controls = Entry::from(role_controls); + debug!( + key = &*endpoint_id, + "created a cache entry for endpoint access" + ); + + let controls = Some(Entry::new(Ok(controls), self.config.ttl)); + let role_controls = Entry::new(Ok(role_controls), self.config.ttl); match self.cache.entry(endpoint_id) { clashmap::Entry::Vacant(e) => { e.insert(EndpointInfo { role_controls: HashMap::from_iter([(role_name, role_controls)]), - controls: Some(controls), + controls, }); } clashmap::Entry::Occupied(mut e) => { let ep = e.get_mut(); - ep.controls = Some(controls); + ep.controls = controls; + if ep.role_controls.len() < self.config.max_roles { + ep.role_controls.insert(role_name, role_controls); + } + } + } + } + + pub(crate) fn insert_endpoint_access_err( + &self, + endpoint_id: EndpointIdInt, + role_name: RoleNameInt, + msg: Box, + ttl: Option, + ) { + if self.cache.len() >= self.config.size { + // If there are too many entries, wait until the next gc cycle. + return; + } + + debug!( + key = &*endpoint_id, + "created a cache entry for an endpoint access error" + ); + + let ttl = ttl.unwrap_or(self.config.ttl); + + let controls = if msg.get_reason() == Reason::RoleProtected { + // RoleProtected is the only role-specific error that control plane can give us. + // If a given role name does not exist, it still returns a successful response, + // just with an empty secret. + None + } else { + // We can cache all the other errors in EndpointInfo.controls, + // because they don't depend on what role name we pass to control plane. + Some(Entry::new(Err(msg.clone()), ttl)) + }; + + let role_controls = Entry::new(Err(msg), ttl); + + match self.cache.entry(endpoint_id) { + clashmap::Entry::Vacant(e) => { + e.insert(EndpointInfo { + role_controls: HashMap::from_iter([(role_name, role_controls)]), + controls, + }); + } + clashmap::Entry::Occupied(mut e) => { + let ep = e.get_mut(); + if let Some(entry) = &ep.controls + && !entry.is_expired() + && entry.value.is_ok() + { + // If we have cached non-expired, non-error controls, keep them. + } else { + ep.controls = controls; + } if ep.role_controls.len() < self.config.max_roles { ep.role_controls.insert(role_name, role_controls); } @@ -275,27 +307,6 @@ impl ProjectInfoCacheImpl { } } - fn ignore_ttl_since(&self) -> Option { - let ttl_disabled_since_us = self - .ttl_disabled_since_us - .load(std::sync::atomic::Ordering::Relaxed); - - if ttl_disabled_since_us == u64::MAX { - return None; - } - - Some(self.start_time + Duration::from_micros(ttl_disabled_since_us)) - } - - fn get_cache_times(&self) -> Instant { - let mut valid_since = Instant::now() - self.config.ttl; - if let Some(ignore_ttl_since) = self.ignore_ttl_since() { - // We are fine if entry is not older than ttl or was added before we are getting notifications. - valid_since = valid_since.min(ignore_ttl_since); - } - valid_since - } - pub fn maybe_invalidate_role_secret(&self, endpoint_id: &EndpointId, role_name: &RoleName) { let Some(endpoint_id) = EndpointIdInt::get(endpoint_id) else { return; @@ -313,16 +324,7 @@ impl ProjectInfoCacheImpl { return; }; - let created_at = role_controls.get().created_at; - let expire = match self.ignore_ttl_since() { - // if ignoring TTL, we should still try and roll the password if it's old - // and we the client gave an incorrect password. There could be some lag on the redis channel. - Some(_) => created_at + self.config.ttl < Instant::now(), - // edge case: redis is down, let's be generous and invalidate the cache immediately. - None => true, - }; - - if expire { + if role_controls.get().is_expired() { role_controls.remove(); } } @@ -361,13 +363,11 @@ impl ProjectInfoCacheImpl { #[cfg(test)] mod tests { - use std::sync::Arc; - use super::*; - use crate::control_plane::messages::EndpointRateLimitConfig; + use crate::control_plane::messages::{Details, EndpointRateLimitConfig, ErrorInfo, Status}; use crate::control_plane::{AccessBlockerFlags, AuthSecret}; use crate::scram::ServerSecret; - use crate::types::ProjectId; + use std::sync::Arc; #[tokio::test] async fn test_project_info_cache_settings() { @@ -378,9 +378,9 @@ mod tests { ttl: Duration::from_secs(1), gc_interval: Duration::from_secs(600), }); - let project_id: ProjectId = "project".into(); + let project_id: Option = Some(ProjectIdInt::from(&"project".into())); let endpoint_id: EndpointId = "endpoint".into(); - let account_id: Option = None; + let account_id = None; let user1: RoleName = "user1".into(); let user2: RoleName = "user2".into(); @@ -393,7 +393,7 @@ mod tests { cache.insert_endpoint_access( account_id, - (&project_id).into(), + project_id, (&endpoint_id).into(), (&user1).into(), EndpointAccessControl { @@ -409,7 +409,7 @@ mod tests { cache.insert_endpoint_access( account_id, - (&project_id).into(), + project_id, (&endpoint_id).into(), (&user2).into(), EndpointAccessControl { @@ -423,11 +423,17 @@ mod tests { }, ); - let cached = cache.get_role_secret(&endpoint_id, &user1).unwrap(); - assert_eq!(cached.secret, secret1); + let (cached, ttl) = cache + .get_role_secret_with_ttl(&endpoint_id, &user1) + .unwrap(); + assert_eq!(cached.unwrap().secret, secret1); + assert_eq!(ttl, cache.config.ttl); - let cached = cache.get_role_secret(&endpoint_id, &user2).unwrap(); - assert_eq!(cached.secret, secret2); + let (cached, ttl) = cache + .get_role_secret_with_ttl(&endpoint_id, &user2) + .unwrap(); + assert_eq!(cached.unwrap().secret, secret2); + assert_eq!(ttl, cache.config.ttl); // Shouldn't add more than 2 roles. let user3: RoleName = "user3".into(); @@ -435,7 +441,7 @@ mod tests { cache.insert_endpoint_access( account_id, - (&project_id).into(), + project_id, (&endpoint_id).into(), (&user3).into(), EndpointAccessControl { @@ -449,17 +455,144 @@ mod tests { }, ); - assert!(cache.get_role_secret(&endpoint_id, &user3).is_none()); + assert!( + cache + .get_role_secret_with_ttl(&endpoint_id, &user3) + .is_none() + ); - let cached = cache.get_endpoint_access(&endpoint_id).unwrap(); + let cached = cache + .get_endpoint_access_with_ttl(&endpoint_id) + .unwrap() + .0 + .unwrap(); assert_eq!(cached.allowed_ips, allowed_ips); tokio::time::advance(Duration::from_secs(2)).await; - let cached = cache.get_role_secret(&endpoint_id, &user1); + let cached = cache.get_role_secret_with_ttl(&endpoint_id, &user1); assert!(cached.is_none()); - let cached = cache.get_role_secret(&endpoint_id, &user2); + let cached = cache.get_role_secret_with_ttl(&endpoint_id, &user2); assert!(cached.is_none()); - let cached = cache.get_endpoint_access(&endpoint_id); + let cached = cache.get_endpoint_access_with_ttl(&endpoint_id); assert!(cached.is_none()); } + + #[tokio::test] + async fn test_caching_project_info_errors() { + let cache = ProjectInfoCacheImpl::new(ProjectInfoCacheOptions { + size: 10, + max_roles: 10, + ttl: Duration::from_secs(1), + gc_interval: Duration::from_secs(600), + }); + let project_id = Some(ProjectIdInt::from(&"project".into())); + let endpoint_id: EndpointId = "endpoint".into(); + let account_id = None; + + let user1: RoleName = "user1".into(); + let user2: RoleName = "user2".into(); + let secret = Some(AuthSecret::Scram(ServerSecret::mock([1; 32]))); + + let role_msg = Box::new(ControlPlaneErrorMessage { + error: "role is protected and cannot be used for password-based authentication" + .to_owned() + .into_boxed_str(), + http_status_code: http::StatusCode::NOT_FOUND, + status: Some(Status { + code: "PERMISSION_DENIED".to_owned().into_boxed_str(), + message: "role is protected and cannot be used for password-based authentication" + .to_owned() + .into_boxed_str(), + details: Details { + error_info: Some(ErrorInfo { + reason: Reason::RoleProtected, + }), + retry_info: None, + user_facing_message: None, + }, + }), + }); + + let generic_msg = Box::new(ControlPlaneErrorMessage { + error: "oh noes".to_owned().into_boxed_str(), + http_status_code: http::StatusCode::NOT_FOUND, + status: None, + }); + + let get_role_secret = |endpoint_id, role_name| { + cache + .get_role_secret_with_ttl(endpoint_id, role_name) + .unwrap() + .0 + }; + let get_endpoint_access = + |endpoint_id| cache.get_endpoint_access_with_ttl(endpoint_id).unwrap().0; + + // stores role-specific errors only for get_role_secret + cache.insert_endpoint_access_err( + (&endpoint_id).into(), + (&user1).into(), + role_msg.clone(), + None, + ); + assert_eq!( + get_role_secret(&endpoint_id, &user1).unwrap_err().error, + role_msg.error + ); + assert!(cache.get_endpoint_access_with_ttl(&endpoint_id).is_none()); + + // stores non-role specific errors for both get_role_secret and get_endpoint_access + cache.insert_endpoint_access_err( + (&endpoint_id).into(), + (&user1).into(), + generic_msg.clone(), + None, + ); + assert_eq!( + get_role_secret(&endpoint_id, &user1).unwrap_err().error, + generic_msg.error + ); + assert_eq!( + get_endpoint_access(&endpoint_id).unwrap_err().error, + generic_msg.error + ); + + // error isn't returned for other roles in the same endpoint + assert!( + cache + .get_role_secret_with_ttl(&endpoint_id, &user2) + .is_none() + ); + + // success for a role does not overwrite errors for other roles + cache.insert_endpoint_access( + account_id, + project_id, + (&endpoint_id).into(), + (&user2).into(), + EndpointAccessControl { + allowed_ips: Arc::new(vec![]), + allowed_vpce: Arc::new(vec![]), + flags: AccessBlockerFlags::default(), + rate_limits: EndpointRateLimitConfig::default(), + }, + RoleAccessControl { + secret: secret.clone(), + }, + ); + assert!(get_role_secret(&endpoint_id, &user1).is_err()); + assert!(get_role_secret(&endpoint_id, &user2).is_ok()); + // ...but does clear the access control error + assert!(get_endpoint_access(&endpoint_id).is_ok()); + + // storing an error does not overwrite successful access control response + cache.insert_endpoint_access_err( + (&endpoint_id).into(), + (&user2).into(), + generic_msg.clone(), + None, + ); + assert!(get_role_secret(&endpoint_id, &user2).is_err()); + assert!(get_endpoint_access(&endpoint_id).is_ok()); + } } diff --git a/proxy/src/cache/timed_lru.rs b/proxy/src/cache/timed_lru.rs index 183e1ea449..e87cf53ab9 100644 --- a/proxy/src/cache/timed_lru.rs +++ b/proxy/src/cache/timed_lru.rs @@ -14,8 +14,8 @@ use std::time::{Duration, Instant}; use hashlink::{LruCache, linked_hash_map::RawEntryMut}; use tracing::debug; +use super::Cache; use super::common::Cached; -use super::{Cache, timed_lru}; /// An implementation of timed LRU cache with fixed capacity. /// Key properties: @@ -30,7 +30,7 @@ use super::{Cache, timed_lru}; /// /// * There's an API for immediate invalidation (removal) of a cache entry; /// It's useful in case we know for sure that the entry is no longer correct. -/// See [`timed_lru::Cached`] for more information. +/// See [`Cached`] for more information. /// /// * Expired entries are kept in the cache, until they are evicted by the LRU policy, /// or by a successful lookup (i.e. the entry hasn't expired yet). @@ -217,15 +217,18 @@ impl TimedLru { } impl TimedLru { - /// Retrieve a cached entry in convenient wrapper. - pub(crate) fn get(&self, key: &Q) -> Option> + /// Retrieve a cached entry in convenient wrapper, alongside timing information. + pub(crate) fn get_with_created_at( + &self, + key: &Q, + ) -> Option::Value, Instant)>> where K: Borrow + Clone, Q: Hash + Eq + ?Sized, { self.get_raw(key, |key, entry| Cached { token: Some((self, key.clone())), - value: entry.value.clone(), + value: (entry.value.clone(), entry.created_at), }) } } diff --git a/proxy/src/cancellation.rs b/proxy/src/cancellation.rs index 74413f1a7d..f25121331f 100644 --- a/proxy/src/cancellation.rs +++ b/proxy/src/cancellation.rs @@ -4,12 +4,11 @@ use std::pin::pin; use std::sync::{Arc, OnceLock}; use std::time::Duration; -use anyhow::anyhow; use futures::FutureExt; use ipnet::{IpNet, Ipv4Net, Ipv6Net}; use postgres_client::RawCancelToken; use postgres_client::tls::MakeTlsConnect; -use redis::{Cmd, FromRedisValue, Value}; +use redis::{Cmd, FromRedisValue, SetExpiry, SetOptions, Value}; use serde::{Deserialize, Serialize}; use thiserror::Error; use tokio::net::TcpStream; @@ -18,7 +17,7 @@ use tracing::{debug, error, info}; use crate::auth::AuthError; use crate::auth::backend::ComputeUserInfo; -use crate::batch::{BatchQueue, QueueProcessing}; +use crate::batch::{BatchQueue, BatchQueueError, QueueProcessing}; use crate::config::ComputeConfig; use crate::context::RequestContext; use crate::control_plane::ControlPlaneApi; @@ -28,23 +27,60 @@ use crate::metrics::{CancelChannelSizeGuard, CancellationRequest, Metrics, Redis use crate::pqproto::CancelKeyData; use crate::rate_limiter::LeakyBucketRateLimiter; use crate::redis::keys::KeyPrefix; -use crate::redis::kv_ops::RedisKVClient; +use crate::redis::kv_ops::{RedisKVClient, RedisKVClientError}; +use crate::util::run_until; type IpSubnetKey = IpNet; -const CANCEL_KEY_TTL: std::time::Duration = std::time::Duration::from_secs(600); -const CANCEL_KEY_REFRESH: std::time::Duration = std::time::Duration::from_secs(570); +/// Initial period and TTL is shorter to clear keys of short-lived connections faster. +const CANCEL_KEY_INITIAL_PERIOD: Duration = Duration::from_secs(60); +const CANCEL_KEY_REFRESH_PERIOD: Duration = Duration::from_secs(10 * 60); +/// `CANCEL_KEY_TTL_SLACK` is added to the periods to determine the actual TTL. +const CANCEL_KEY_TTL_SLACK: Duration = Duration::from_secs(30); // Message types for sending through mpsc channel pub enum CancelKeyOp { - StoreCancelKey { + Store { key: CancelKeyData, value: Box, - expire: std::time::Duration, + expire: Duration, }, - GetCancelData { + Refresh { + key: CancelKeyData, + expire: Duration, + }, + Get { key: CancelKeyData, }, + GetOld { + key: CancelKeyData, + }, +} + +impl CancelKeyOp { + const fn redis_msg_kind(&self) -> RedisMsgKind { + match self { + CancelKeyOp::Store { .. } => RedisMsgKind::Set, + CancelKeyOp::Refresh { .. } => RedisMsgKind::Expire, + CancelKeyOp::Get { .. } => RedisMsgKind::Get, + CancelKeyOp::GetOld { .. } => RedisMsgKind::HGet, + } + } + + fn cancel_channel_metric_guard(&self) -> CancelChannelSizeGuard<'static> { + Metrics::get() + .proxy + .cancel_channel_size + .guard(self.redis_msg_kind()) + } +} + +#[derive(thiserror::Error, Debug, Clone)] +pub enum PipelineError { + #[error("could not send cmd to redis: {0}")] + RedisKVClient(Arc), + #[error("incorrect number of responses from redis")] + IncorrectNumberOfResponses, } pub struct Pipeline { @@ -60,7 +96,7 @@ impl Pipeline { } } - async fn execute(self, client: &mut RedisKVClient) -> Vec> { + async fn execute(self, client: &mut RedisKVClient) -> Result, PipelineError> { let responses = self.replies; let batch_size = self.inner.len(); @@ -78,43 +114,44 @@ impl Pipeline { batch_size, responses, "successfully completed cancellation jobs", ); - values.into_iter().map(Ok).collect() + Ok(values.into_iter().collect()) } Ok(value) => { error!(batch_size, ?value, "unexpected redis return value"); - std::iter::repeat_with(|| Err(anyhow!("incorrect response type from redis"))) - .take(responses) - .collect() - } - Err(err) => { - std::iter::repeat_with(|| Err(anyhow!("could not send cmd to redis: {err}"))) - .take(responses) - .collect() + Err(PipelineError::IncorrectNumberOfResponses) } + Err(err) => Err(PipelineError::RedisKVClient(Arc::new(err))), } } - fn add_command_with_reply(&mut self, cmd: Cmd) { + fn add_command(&mut self, cmd: Cmd) { self.inner.add_command(cmd); self.replies += 1; } - - fn add_command_no_reply(&mut self, cmd: Cmd) { - self.inner.add_command(cmd).ignore(); - } } impl CancelKeyOp { fn register(&self, pipe: &mut Pipeline) { match self { - CancelKeyOp::StoreCancelKey { key, value, expire } => { + CancelKeyOp::Store { key, value, expire } => { let key = KeyPrefix::Cancel(*key).build_redis_key(); - pipe.add_command_with_reply(Cmd::hset(&key, "data", &**value)); - pipe.add_command_no_reply(Cmd::expire(&key, expire.as_secs() as i64)); + pipe.add_command(Cmd::set_options( + &key, + &**value, + SetOptions::default().with_expiration(SetExpiry::EX(expire.as_secs())), + )); } - CancelKeyOp::GetCancelData { key } => { + CancelKeyOp::Refresh { key, expire } => { let key = KeyPrefix::Cancel(*key).build_redis_key(); - pipe.add_command_with_reply(Cmd::hget(key, "data")); + pipe.add_command(Cmd::expire(&key, expire.as_secs() as i64)); + } + CancelKeyOp::GetOld { key } => { + let key = KeyPrefix::Cancel(*key).build_redis_key(); + pipe.add_command(Cmd::hget(key, "data")); + } + CancelKeyOp::Get { key } => { + let key = KeyPrefix::Cancel(*key).build_redis_key(); + pipe.add_command(Cmd::get(key)); } } } @@ -127,13 +164,14 @@ pub struct CancellationProcessor { impl QueueProcessing for CancellationProcessor { type Req = (CancelChannelSizeGuard<'static>, CancelKeyOp); - type Res = anyhow::Result; + type Res = redis::Value; + type Err = PipelineError; fn batch_size(&self, _queue_size: usize) -> usize { self.batch_size } - async fn apply(&mut self, batch: Vec) -> Vec { + async fn apply(&mut self, batch: Vec) -> Result, Self::Err> { if !self.client.credentials_refreshed() { // this will cause a timeout for cancellation operations tracing::debug!( @@ -244,18 +282,18 @@ impl CancellationHandler { &self, key: CancelKeyData, ) -> Result, CancelError> { - let guard = Metrics::get() - .proxy - .cancel_channel_size - .guard(RedisMsgKind::HGet); - let op = CancelKeyOp::GetCancelData { key }; + const TIMEOUT: Duration = Duration::from_secs(5); let Some(tx) = self.tx.get() else { tracing::warn!("cancellation handler is not available"); return Err(CancelError::InternalError); }; - const TIMEOUT: Duration = Duration::from_secs(5); + let guard = Metrics::get() + .proxy + .cancel_channel_size + .guard(RedisMsgKind::Get); + let op = CancelKeyOp::Get { key }; let result = timeout( TIMEOUT, tx.call((guard, op), std::future::pending::()), @@ -264,10 +302,37 @@ impl CancellationHandler { .map_err(|_| { tracing::warn!("timed out waiting to receive GetCancelData response"); CancelError::RateLimit - })? - // cannot be cancelled - .unwrap_or_else(|x| match x {}) - .map_err(|e| { + })?; + + // We may still have cancel keys set with HSET "data". + // Check error type and retry with HGET. + // TODO: remove code after HSET is not used anymore. + let result = if let Err(err) = result.as_ref() + && let BatchQueueError::Result(err) = err + && let PipelineError::RedisKVClient(err) = err + && let RedisKVClientError::Redis(err) = &**err + && let Some(errcode) = err.code() + && errcode == "WRONGTYPE" + { + let guard = Metrics::get() + .proxy + .cancel_channel_size + .guard(RedisMsgKind::HGet); + let op = CancelKeyOp::GetOld { key }; + timeout( + TIMEOUT, + tx.call((guard, op), std::future::pending::()), + ) + .await + .map_err(|_| { + tracing::warn!("timed out waiting to receive GetCancelData response"); + CancelError::RateLimit + })? + } else { + result + }; + + let result = result.map_err(|e| { tracing::warn!("failed to receive GetCancelData response: {e}"); CancelError::InternalError })?; @@ -438,39 +503,93 @@ impl Session { let mut cancel = pin!(cancel); + enum State { + Init, + Refresh, + } + + let mut state = State::Init; loop { - let guard = Metrics::get() - .proxy - .cancel_channel_size - .guard(RedisMsgKind::HSet); - let op = CancelKeyOp::StoreCancelKey { - key: self.key, - value: closure_json.clone(), - expire: CANCEL_KEY_TTL, + let (op, mut wait_interval) = match state { + State::Init => { + tracing::debug!( + src=%self.key, + dest=?cancel_closure.cancel_token, + "registering cancellation key" + ); + ( + CancelKeyOp::Store { + key: self.key, + value: closure_json.clone(), + expire: CANCEL_KEY_INITIAL_PERIOD + CANCEL_KEY_TTL_SLACK, + }, + CANCEL_KEY_INITIAL_PERIOD, + ) + } + + State::Refresh => { + tracing::debug!( + src=%self.key, + dest=?cancel_closure.cancel_token, + "refreshing cancellation key" + ); + ( + CancelKeyOp::Refresh { + key: self.key, + expire: CANCEL_KEY_REFRESH_PERIOD + CANCEL_KEY_TTL_SLACK, + }, + CANCEL_KEY_REFRESH_PERIOD, + ) + } }; - tracing::debug!( - src=%self.key, - dest=?cancel_closure.cancel_token, - "registering cancellation key" - ); - - match tx.call((guard, op), cancel.as_mut()).await { - Ok(Ok(_)) => { + match tx + .call((op.cancel_channel_metric_guard(), op), cancel.as_mut()) + .await + { + // SET returns OK + Ok(Value::Okay) => { tracing::debug!( src=%self.key, dest=?cancel_closure.cancel_token, "registered cancellation key" ); + state = State::Refresh; + } - // wait before continuing. - tokio::time::sleep(CANCEL_KEY_REFRESH).await; + // EXPIRE returns 1 + Ok(Value::Int(1)) => { + tracing::debug!( + src=%self.key, + dest=?cancel_closure.cancel_token, + "refreshed cancellation key" + ); } + + Ok(_) => { + // Any other response likely means the key expired. + tracing::warn!(src=%self.key, "refreshing cancellation key failed"); + // Re-enter the SET loop quickly to repush full data. + state = State::Init; + wait_interval = Duration::ZERO; + } + // retry immediately. - Ok(Err(error)) => { - tracing::warn!(?error, "error registering cancellation key"); + Err(BatchQueueError::Result(error)) => { + tracing::warn!(?error, "error refreshing cancellation key"); + // Small delay to prevent busy loop with high cpu and logging. + wait_interval = Duration::from_millis(10); } - Err(Err(_cancelled)) => break, + + Err(BatchQueueError::Cancelled(Err(_cancelled))) => break, + } + + // wait before continuing. break immediately if cancelled. + if run_until(tokio::time::sleep(wait_interval), cancel.as_mut()) + .await + .is_err() + { + break; } } diff --git a/proxy/src/config.rs b/proxy/src/config.rs index f97006e206..6157dc8a6a 100644 --- a/proxy/src/config.rs +++ b/proxy/src/config.rs @@ -18,7 +18,7 @@ use crate::control_plane::locks::ApiLocks; use crate::control_plane::messages::{EndpointJwksResponse, JwksSettings}; use crate::ext::TaskExt; use crate::intern::RoleNameInt; -use crate::rate_limiter::{RateBucketInfo, RateLimitAlgorithm, RateLimiterConfig}; +use crate::rate_limiter::{RateLimitAlgorithm, RateLimiterConfig}; use crate::scram::threadpool::ThreadPool; use crate::serverless::GlobalConnPoolOptions; use crate::serverless::cancel_set::CancelSet; @@ -80,79 +80,6 @@ pub struct AuthenticationConfig { pub console_redirect_confirmation_timeout: tokio::time::Duration, } -#[derive(Debug)] -pub struct EndpointCacheConfig { - /// Batch size to receive all endpoints on the startup. - pub initial_batch_size: usize, - /// Batch size to receive endpoints. - pub default_batch_size: usize, - /// Timeouts for the stream read operation. - pub xread_timeout: Duration, - /// Stream name to read from. - pub stream_name: String, - /// Limiter info (to distinguish when to enable cache). - pub limiter_info: Vec, - /// Disable cache. - /// If true, cache is ignored, but reports all statistics. - pub disable_cache: bool, - /// Retry interval for the stream read operation. - pub retry_interval: Duration, -} - -impl EndpointCacheConfig { - /// Default options for [`crate::control_plane::NodeInfoCache`]. - /// Notice that by default the limiter is empty, which means that cache is disabled. - pub const CACHE_DEFAULT_OPTIONS: &'static str = "initial_batch_size=1000,default_batch_size=10,xread_timeout=5m,stream_name=controlPlane,disable_cache=true,limiter_info=1000@1s,retry_interval=1s"; - - /// Parse cache options passed via cmdline. - /// Example: [`Self::CACHE_DEFAULT_OPTIONS`]. - fn parse(options: &str) -> anyhow::Result { - let mut initial_batch_size = None; - let mut default_batch_size = None; - let mut xread_timeout = None; - let mut stream_name = None; - let mut limiter_info = vec![]; - let mut disable_cache = false; - let mut retry_interval = None; - - for option in options.split(',') { - let (key, value) = option - .split_once('=') - .with_context(|| format!("bad key-value pair: {option}"))?; - - match key { - "initial_batch_size" => initial_batch_size = Some(value.parse()?), - "default_batch_size" => default_batch_size = Some(value.parse()?), - "xread_timeout" => xread_timeout = Some(humantime::parse_duration(value)?), - "stream_name" => stream_name = Some(value.to_string()), - "limiter_info" => limiter_info.push(RateBucketInfo::from_str(value)?), - "disable_cache" => disable_cache = value.parse()?, - "retry_interval" => retry_interval = Some(humantime::parse_duration(value)?), - unknown => bail!("unknown key: {unknown}"), - } - } - RateBucketInfo::validate(&mut limiter_info)?; - - Ok(Self { - initial_batch_size: initial_batch_size.context("missing `initial_batch_size`")?, - default_batch_size: default_batch_size.context("missing `default_batch_size`")?, - xread_timeout: xread_timeout.context("missing `xread_timeout`")?, - stream_name: stream_name.context("missing `stream_name`")?, - disable_cache, - limiter_info, - retry_interval: retry_interval.context("missing `retry_interval`")?, - }) - } -} - -impl FromStr for EndpointCacheConfig { - type Err = anyhow::Error; - - fn from_str(options: &str) -> Result { - let error = || format!("failed to parse endpoint cache options '{options}'"); - Self::parse(options).with_context(error) - } -} #[derive(Debug)] pub struct MetricBackupCollectionConfig { pub remote_storage_config: Option, diff --git a/proxy/src/context/mod.rs b/proxy/src/context/mod.rs index 7b0549e76f..3a8828e70c 100644 --- a/proxy/src/context/mod.rs +++ b/proxy/src/context/mod.rs @@ -7,7 +7,7 @@ use once_cell::sync::OnceCell; use smol_str::SmolStr; use tokio::sync::mpsc; use tracing::field::display; -use tracing::{Span, debug, error, info_span}; +use tracing::{Span, error, info_span}; use try_lock::TryLock; use uuid::Uuid; @@ -15,10 +15,7 @@ use self::parquet::RequestData; use crate::control_plane::messages::{ColdStartInfo, MetricsAuxInfo}; use crate::error::ErrorKind; use crate::intern::{BranchIdInt, ProjectIdInt}; -use crate::metrics::{ - ConnectOutcome, InvalidEndpointsGroup, LatencyAccumulated, LatencyTimer, Metrics, Protocol, - Waiting, -}; +use crate::metrics::{LatencyAccumulated, LatencyTimer, Metrics, Protocol, Waiting}; use crate::pqproto::StartupMessageParams; use crate::protocol2::{ConnectionInfo, ConnectionInfoExtra}; use crate::types::{DbName, EndpointId, RoleName}; @@ -70,8 +67,6 @@ struct RequestContextInner { // This sender is only used to log the length of session in case of success. disconnect_sender: Option>, pub(crate) latency_timer: LatencyTimer, - // Whether proxy decided that it's not a valid endpoint end rejected it before going to cplane. - rejected: Option, disconnect_timestamp: Option>, } @@ -106,7 +101,6 @@ impl Clone for RequestContext { auth_method: inner.auth_method.clone(), jwt_issuer: inner.jwt_issuer.clone(), success: inner.success, - rejected: inner.rejected, cold_start_info: inner.cold_start_info, pg_options: inner.pg_options.clone(), testodrome_query_id: inner.testodrome_query_id.clone(), @@ -151,7 +145,6 @@ impl RequestContext { auth_method: None, jwt_issuer: None, success: false, - rejected: None, cold_start_info: ColdStartInfo::Unknown, pg_options: None, testodrome_query_id: None, @@ -183,11 +176,6 @@ impl RequestContext { ) } - pub(crate) fn set_rejected(&self, rejected: bool) { - let mut this = self.0.try_lock().expect("should not deadlock"); - this.rejected = Some(rejected); - } - pub(crate) fn set_cold_start_info(&self, info: ColdStartInfo) { self.0 .try_lock() @@ -461,38 +449,6 @@ impl RequestContextInner { } fn log_connect(&mut self) { - let outcome = if self.success { - ConnectOutcome::Success - } else { - ConnectOutcome::Failed - }; - - // TODO: get rid of entirely/refactor - // check for false positives - // AND false negatives - if let Some(rejected) = self.rejected { - let ep = self - .endpoint_id - .as_ref() - .map(|x| x.as_str()) - .unwrap_or_default(); - // This makes sense only if cache is disabled - debug!( - ?outcome, - ?rejected, - ?ep, - "check endpoint is valid with outcome" - ); - Metrics::get() - .proxy - .invalid_endpoints_total - .inc(InvalidEndpointsGroup { - protocol: self.protocol, - rejected: rejected.into(), - outcome, - }); - } - if let Some(tx) = self.sender.take() { // If type changes, this error handling needs to be updated. let tx: mpsc::UnboundedSender = tx; diff --git a/proxy/src/context/parquet.rs b/proxy/src/context/parquet.rs index b55cc14532..4d8df19476 100644 --- a/proxy/src/context/parquet.rs +++ b/proxy/src/context/parquet.rs @@ -267,7 +267,7 @@ async fn worker_inner( ) -> anyhow::Result<()> { #[cfg(any(test, feature = "testing"))] let storage = if config.test_remote_failures > 0 { - GenericRemoteStorage::unreliable_wrapper(storage, config.test_remote_failures) + GenericRemoteStorage::unreliable_wrapper(storage, config.test_remote_failures, 100) } else { storage }; diff --git a/proxy/src/control_plane/client/cplane_proxy_v1.rs b/proxy/src/control_plane/client/cplane_proxy_v1.rs index fbacc97661..8a0403c0b0 100644 --- a/proxy/src/control_plane/client/cplane_proxy_v1.rs +++ b/proxy/src/control_plane/client/cplane_proxy_v1.rs @@ -23,12 +23,13 @@ use crate::control_plane::errors::{ ControlPlaneError, GetAuthInfoError, GetEndpointJwksError, WakeComputeError, }; use crate::control_plane::locks::ApiLocks; -use crate::control_plane::messages::{ColdStartInfo, EndpointJwksResponse, Reason}; +use crate::control_plane::messages::{ColdStartInfo, EndpointJwksResponse}; use crate::control_plane::{ AccessBlockerFlags, AuthInfo, AuthSecret, CachedNodeInfo, EndpointAccessControl, NodeInfo, RoleAccessControl, }; use crate::metrics::Metrics; +use crate::proxy::retry::CouldRetry; use crate::rate_limiter::WakeComputeRateLimiter; use crate::types::{EndpointCacheKey, EndpointId, RoleName}; use crate::{compute, http, scram}; @@ -67,6 +68,66 @@ impl NeonControlPlaneClient { self.endpoint.url().as_str() } + async fn get_and_cache_auth_info( + &self, + ctx: &RequestContext, + endpoint: &EndpointId, + role: &RoleName, + cache_key: &EndpointId, + extract: impl FnOnce(&EndpointAccessControl, &RoleAccessControl) -> T, + ) -> Result { + match self.do_get_auth_req(ctx, endpoint, role).await { + Ok(auth_info) => { + let control = EndpointAccessControl { + allowed_ips: Arc::new(auth_info.allowed_ips), + allowed_vpce: Arc::new(auth_info.allowed_vpc_endpoint_ids), + flags: auth_info.access_blocker_flags, + rate_limits: auth_info.rate_limits, + }; + let role_control = RoleAccessControl { + secret: auth_info.secret, + }; + let res = extract(&control, &role_control); + + self.caches.project_info.insert_endpoint_access( + auth_info.account_id, + auth_info.project_id, + cache_key.into(), + role.into(), + control, + role_control, + ); + + if let Some(project_id) = auth_info.project_id { + ctx.set_project_id(project_id); + } + + Ok(res) + } + Err(err) => match err { + GetAuthInfoError::ApiError(ControlPlaneError::Message(ref msg)) => { + let retry_info = msg.status.as_ref().and_then(|s| s.details.retry_info); + + // If we can retry this error, do not cache it, + // unless we were given a retry delay. + if msg.could_retry() && retry_info.is_none() { + return Err(err); + } + + self.caches.project_info.insert_endpoint_access_err( + cache_key.into(), + role.into(), + msg.clone(), + retry_info.map(|r| Duration::from_millis(r.retry_delay_ms)), + ); + + Err(err) + } + err => Err(err), + }, + } + } + async fn do_get_auth_req( &self, ctx: &RequestContext, @@ -159,13 +220,6 @@ impl NeonControlPlaneClient { ctx: &RequestContext, endpoint: &EndpointId, ) -> Result, GetEndpointJwksError> { - if !self - .caches - .endpoints_cache - .is_valid(ctx, &endpoint.normalize()) - { - return Err(GetEndpointJwksError::EndpointNotFound); - } let request_id = ctx.session_id().to_string(); async { let request = self @@ -290,48 +344,34 @@ impl super::ControlPlaneApi for NeonControlPlaneClient { ctx: &RequestContext, endpoint: &EndpointId, role: &RoleName, - ) -> Result { - let normalized_ep = &endpoint.normalize(); - if let Some(secret) = self + ) -> Result { + let key = endpoint.normalize(); + + if let Some((role_control, ttl)) = self .caches .project_info - .get_role_secret(normalized_ep, role) + .get_role_secret_with_ttl(&key, role) { - return Ok(secret); + return match role_control { + Err(mut msg) => { + info!(key = &*key, "found cached get_role_access_control error"); + + // if retry_delay_ms is set change it to the remaining TTL + replace_retry_delay_ms(&mut msg, |_| ttl.as_millis() as u64); + + Err(GetAuthInfoError::ApiError(ControlPlaneError::Message(msg))) + } + Ok(role_control) => { + debug!(key = &*key, "found cached role access control"); + Ok(role_control) + } + }; } - if !self.caches.endpoints_cache.is_valid(ctx, normalized_ep) { - info!("endpoint is not valid, skipping the request"); - return Err(GetAuthInfoError::UnknownEndpoint); - } - - let auth_info = self.do_get_auth_req(ctx, endpoint, role).await?; - - let control = EndpointAccessControl { - allowed_ips: Arc::new(auth_info.allowed_ips), - allowed_vpce: Arc::new(auth_info.allowed_vpc_endpoint_ids), - flags: auth_info.access_blocker_flags, - rate_limits: auth_info.rate_limits, - }; - let role_control = RoleAccessControl { - secret: auth_info.secret, - }; - - if let Some(project_id) = auth_info.project_id { - let normalized_ep_int = normalized_ep.into(); - - self.caches.project_info.insert_endpoint_access( - auth_info.account_id, - project_id, - normalized_ep_int, - role.into(), - control, - role_control.clone(), - ); - ctx.set_project_id(project_id); - } - - Ok(role_control) + self.get_and_cache_auth_info(ctx, endpoint, role, &key, |_, role_control| { + role_control.clone() + }) + .await } #[tracing::instrument(skip_all)] @@ -341,43 +381,30 @@ impl super::ControlPlaneApi for NeonControlPlaneClient { endpoint: &EndpointId, role: &RoleName, ) -> Result { - let normalized_ep = &endpoint.normalize(); - if let Some(control) = self.caches.project_info.get_endpoint_access(normalized_ep) { - return Ok(control); + let key = endpoint.normalize(); + + if let Some((control, ttl)) = self.caches.project_info.get_endpoint_access_with_ttl(&key) { + return match control { + Err(mut msg) => { + info!( + key = &*key, + "found cached get_endpoint_access_control error" + ); + + // if retry_delay_ms is set change it to the remaining TTL + replace_retry_delay_ms(&mut msg, |_| ttl.as_millis() as u64); + + Err(GetAuthInfoError::ApiError(ControlPlaneError::Message(msg))) + } + Ok(control) => { + debug!(key = &*key, "found cached endpoint access control"); + Ok(control) + } + }; } - if !self.caches.endpoints_cache.is_valid(ctx, normalized_ep) { - info!("endpoint is not valid, skipping the request"); - return Err(GetAuthInfoError::UnknownEndpoint); - } - - let auth_info = self.do_get_auth_req(ctx, endpoint, role).await?; - - let control = EndpointAccessControl { - allowed_ips: Arc::new(auth_info.allowed_ips), - allowed_vpce: Arc::new(auth_info.allowed_vpc_endpoint_ids), - flags: auth_info.access_blocker_flags, - rate_limits: auth_info.rate_limits, - }; - let role_control = RoleAccessControl { - secret: auth_info.secret, - }; - - if let Some(project_id) = auth_info.project_id { - let normalized_ep_int = normalized_ep.into(); - - self.caches.project_info.insert_endpoint_access( - auth_info.account_id, - project_id, - normalized_ep_int, - role.into(), - control.clone(), - role_control, - ); - ctx.set_project_id(project_id); - } - - Ok(control) + self.get_and_cache_auth_info(ctx, endpoint, role, &key, |control, _| control.clone()) + .await } #[tracing::instrument(skip_all)] @@ -399,16 +426,27 @@ impl super::ControlPlaneApi for NeonControlPlaneClient { macro_rules! check_cache { () => { - if let Some(cached) = self.caches.node_info.get(&key) { - let (cached, info) = cached.take_value(); - let info = info.map_err(|c| { - info!(key = &*key, "found cached wake_compute error"); - WakeComputeError::ControlPlane(ControlPlaneError::Message(Box::new(*c))) - })?; + if let Some(cached) = self.caches.node_info.get_with_created_at(&key) { + let (cached, (info, created_at)) = cached.take_value(); + return match info { + Err(mut msg) => { + info!(key = &*key, "found cached wake_compute error"); - debug!(key = &*key, "found cached compute node info"); - ctx.set_project(info.aux.clone()); - return Ok(cached.map(|()| info)); + // if retry_delay_ms is set, reduce it by the amount of time it spent in cache + replace_retry_delay_ms(&mut msg, |delay| { + delay.saturating_sub(created_at.elapsed().as_millis() as u64) + }); + + Err(WakeComputeError::ControlPlane(ControlPlaneError::Message( + msg, + ))) + } + Ok(info) => { + debug!(key = &*key, "found cached compute node info"); + ctx.set_project(info.aux.clone()); + Ok(cached.map(|()| info)) + } + }; } }; } @@ -451,47 +489,42 @@ impl super::ControlPlaneApi for NeonControlPlaneClient { Ok(cached.map(|()| node)) } Err(err) => match err { - WakeComputeError::ControlPlane(ControlPlaneError::Message(err)) => { - let Some(status) = &err.status else { - return Err(WakeComputeError::ControlPlane(ControlPlaneError::Message( - err, - ))); - }; + WakeComputeError::ControlPlane(ControlPlaneError::Message(ref msg)) => { + let retry_info = msg.status.as_ref().and_then(|s| s.details.retry_info); - let reason = status - .details - .error_info - .map_or(Reason::Unknown, |x| x.reason); - - // if we can retry this error, do not cache it. - if reason.can_retry() { - return Err(WakeComputeError::ControlPlane(ControlPlaneError::Message( - err, - ))); + // If we can retry this error, do not cache it, + // unless we were given a retry delay. + if msg.could_retry() && retry_info.is_none() { + return Err(err); } - // at this point, we should only have quota errors. debug!( key = &*key, "created a cache entry for the wake compute error" ); - self.caches.node_info.insert_ttl( - key, - Err(err.clone()), - Duration::from_secs(30), - ); + let ttl = retry_info.map_or(Duration::from_secs(30), |r| { + Duration::from_millis(r.retry_delay_ms) + }); - Err(WakeComputeError::ControlPlane(ControlPlaneError::Message( - err, - ))) + self.caches.node_info.insert_ttl(key, Err(msg.clone()), ttl); + + Err(err) } - err => return Err(err), + err => Err(err), }, } } } +fn replace_retry_delay_ms(msg: &mut ControlPlaneErrorMessage, f: impl FnOnce(u64) -> u64) { + if let Some(status) = &mut msg.status + && let Some(retry_info) = &mut status.details.retry_info + { + retry_info.retry_delay_ms = f(retry_info.retry_delay_ms); + } +} + /// Parse http response body, taking status code into account. fn parse_body serde::Deserialize<'a>>( status: StatusCode, diff --git a/proxy/src/control_plane/client/mod.rs b/proxy/src/control_plane/client/mod.rs index 2ffc589df6..ecd4db29b2 100644 --- a/proxy/src/control_plane/client/mod.rs +++ b/proxy/src/control_plane/client/mod.rs @@ -13,9 +13,8 @@ use tracing::{debug, info}; use super::{EndpointAccessControl, RoleAccessControl}; use crate::auth::backend::ComputeUserInfo; use crate::auth::backend::jwt::{AuthRule, FetchAuthRules, FetchAuthRulesError}; -use crate::cache::endpoints::EndpointsCache; use crate::cache::project_info::ProjectInfoCacheImpl; -use crate::config::{CacheOptions, EndpointCacheConfig, ProjectInfoCacheOptions}; +use crate::config::{CacheOptions, ProjectInfoCacheOptions}; use crate::context::RequestContext; use crate::control_plane::{CachedNodeInfo, ControlPlaneApi, NodeInfoCache, errors}; use crate::error::ReportableError; @@ -121,15 +120,12 @@ pub struct ApiCaches { pub(crate) node_info: NodeInfoCache, /// Cache which stores project_id -> endpoint_ids mapping. pub project_info: Arc, - /// List of all valid endpoints. - pub endpoints_cache: Arc, } impl ApiCaches { pub fn new( wake_compute_cache_config: CacheOptions, project_info_cache_config: ProjectInfoCacheOptions, - endpoint_cache_config: EndpointCacheConfig, ) -> Self { Self { node_info: NodeInfoCache::new( @@ -139,7 +135,6 @@ impl ApiCaches { true, ), project_info: Arc::new(ProjectInfoCacheImpl::new(project_info_cache_config)), - endpoints_cache: Arc::new(EndpointsCache::new(endpoint_cache_config)), } } } diff --git a/proxy/src/control_plane/errors.rs b/proxy/src/control_plane/errors.rs index 77312c89c5..1e43010957 100644 --- a/proxy/src/control_plane/errors.rs +++ b/proxy/src/control_plane/errors.rs @@ -43,28 +43,35 @@ impl UserFacingError for ControlPlaneError { } impl ReportableError for ControlPlaneError { - fn get_error_kind(&self) -> crate::error::ErrorKind { + fn get_error_kind(&self) -> ErrorKind { match self { ControlPlaneError::Message(e) => match e.get_reason() { - Reason::RoleProtected => ErrorKind::User, - Reason::ResourceNotFound => ErrorKind::User, - Reason::ProjectNotFound => ErrorKind::User, - Reason::EndpointNotFound => ErrorKind::User, - Reason::BranchNotFound => ErrorKind::User, + Reason::RoleProtected + | Reason::ResourceNotFound + | Reason::ProjectNotFound + | Reason::EndpointNotFound + | Reason::EndpointDisabled + | Reason::BranchNotFound + | Reason::WrongLsnOrTimestamp => ErrorKind::User, + Reason::RateLimitExceeded => ErrorKind::ServiceRateLimit, - Reason::NonDefaultBranchComputeTimeExceeded => ErrorKind::Quota, - Reason::ActiveTimeQuotaExceeded => ErrorKind::Quota, - Reason::ComputeTimeQuotaExceeded => ErrorKind::Quota, - Reason::WrittenDataQuotaExceeded => ErrorKind::Quota, - Reason::DataTransferQuotaExceeded => ErrorKind::Quota, - Reason::LogicalSizeQuotaExceeded => ErrorKind::Quota, - Reason::ConcurrencyLimitReached => ErrorKind::ControlPlane, - Reason::LockAlreadyTaken => ErrorKind::ControlPlane, - Reason::RunningOperations => ErrorKind::ControlPlane, - Reason::ActiveEndpointsLimitExceeded => ErrorKind::ControlPlane, - Reason::Unknown => ErrorKind::ControlPlane, + + Reason::NonDefaultBranchComputeTimeExceeded + | Reason::ActiveTimeQuotaExceeded + | Reason::ComputeTimeQuotaExceeded + | Reason::WrittenDataQuotaExceeded + | Reason::DataTransferQuotaExceeded + | Reason::LogicalSizeQuotaExceeded + | Reason::ActiveEndpointsLimitExceeded => ErrorKind::Quota, + + Reason::ConcurrencyLimitReached + | Reason::LockAlreadyTaken + | Reason::RunningOperations + | Reason::EndpointIdle + | Reason::ProjectUnderMaintenance + | Reason::Unknown => ErrorKind::ControlPlane, }, - ControlPlaneError::Transport(_) => crate::error::ErrorKind::ControlPlane, + ControlPlaneError::Transport(_) => ErrorKind::ControlPlane, } } } @@ -99,10 +106,6 @@ pub(crate) enum GetAuthInfoError { #[error(transparent)] ApiError(ControlPlaneError), - - /// Proxy does not know about the endpoint in advanced - #[error("endpoint not found in endpoint cache")] - UnknownEndpoint, } // This allows more useful interactions than `#[from]`. @@ -119,19 +122,15 @@ impl UserFacingError for GetAuthInfoError { Self::BadSecret => REQUEST_FAILED.to_owned(), // However, API might return a meaningful error. Self::ApiError(e) => e.to_string_client(), - // pretend like control plane returned an error. - Self::UnknownEndpoint => REQUEST_FAILED.to_owned(), } } } impl ReportableError for GetAuthInfoError { - fn get_error_kind(&self) -> crate::error::ErrorKind { + fn get_error_kind(&self) -> ErrorKind { match self { - Self::BadSecret => crate::error::ErrorKind::ControlPlane, - Self::ApiError(_) => crate::error::ErrorKind::ControlPlane, - // we only apply endpoint filtering if control plane is under high load. - Self::UnknownEndpoint => crate::error::ErrorKind::ServiceRateLimit, + Self::BadSecret => ErrorKind::ControlPlane, + Self::ApiError(_) => ErrorKind::ControlPlane, } } } @@ -200,9 +199,6 @@ impl CouldRetry for WakeComputeError { #[derive(Debug, Error)] pub enum GetEndpointJwksError { - #[error("endpoint not found")] - EndpointNotFound, - #[error("failed to build control plane request: {0}")] RequestBuild(#[source] reqwest::Error), diff --git a/proxy/src/control_plane/messages.rs b/proxy/src/control_plane/messages.rs index f0314f91f0..d44d7efcc3 100644 --- a/proxy/src/control_plane/messages.rs +++ b/proxy/src/control_plane/messages.rs @@ -107,7 +107,7 @@ pub(crate) struct ErrorInfo { // Schema could also have `metadata` field, but it's not structured. Skip it for now. } -#[derive(Clone, Copy, Debug, Deserialize, Default)] +#[derive(Clone, Copy, Debug, Deserialize, Default, PartialEq, Eq)] pub(crate) enum Reason { /// RoleProtected indicates that the role is protected and the attempted operation is not permitted on protected roles. #[serde(rename = "ROLE_PROTECTED")] @@ -126,10 +126,16 @@ pub(crate) enum Reason { /// or that the subject doesn't have enough permissions to access the requested endpoint. #[serde(rename = "ENDPOINT_NOT_FOUND")] EndpointNotFound, + /// EndpointDisabled indicates that the endpoint has been disabled and does not accept connections. + #[serde(rename = "ENDPOINT_DISABLED")] + EndpointDisabled, /// BranchNotFound indicates that the branch wasn't found, usually due to the provided ID not being correct, /// or that the subject doesn't have enough permissions to access the requested branch. #[serde(rename = "BRANCH_NOT_FOUND")] BranchNotFound, + /// WrongLsnOrTimestamp indicates that the specified LSN or timestamp are wrong. + #[serde(rename = "WRONG_LSN_OR_TIMESTAMP")] + WrongLsnOrTimestamp, /// RateLimitExceeded indicates that the rate limit for the operation has been exceeded. #[serde(rename = "RATE_LIMIT_EXCEEDED")] RateLimitExceeded, @@ -152,6 +158,9 @@ pub(crate) enum Reason { /// LogicalSizeQuotaExceeded indicates that the logical size quota was exceeded. #[serde(rename = "LOGICAL_SIZE_QUOTA_EXCEEDED")] LogicalSizeQuotaExceeded, + /// ActiveEndpointsLimitExceeded indicates that the limit of concurrently active endpoints was exceeded. + #[serde(rename = "ACTIVE_ENDPOINTS_LIMIT_EXCEEDED")] + ActiveEndpointsLimitExceeded, /// RunningOperations indicates that the project already has some running operations /// and scheduling of new ones is prohibited. #[serde(rename = "RUNNING_OPERATIONS")] @@ -162,9 +171,13 @@ pub(crate) enum Reason { /// LockAlreadyTaken indicates that the we attempted to take a lock that was already taken. #[serde(rename = "LOCK_ALREADY_TAKEN")] LockAlreadyTaken, - /// ActiveEndpointsLimitExceeded indicates that the limit of concurrently active endpoints was exceeded. - #[serde(rename = "ACTIVE_ENDPOINTS_LIMIT_EXCEEDED")] - ActiveEndpointsLimitExceeded, + /// EndpointIdle indicates that the endpoint cannot become active, because it's idle. + #[serde(rename = "ENDPOINT_IDLE")] + EndpointIdle, + /// ProjectUnderMaintenance indicates that the project is currently ongoing maintenance, + /// and thus cannot accept connections. + #[serde(rename = "PROJECT_UNDER_MAINTENANCE")] + ProjectUnderMaintenance, #[default] #[serde(other)] Unknown, @@ -184,13 +197,15 @@ impl Reason { pub(crate) fn can_retry(self) -> bool { match self { // do not retry role protected errors - // not a transitive error + // not a transient error Reason::RoleProtected => false, - // on retry, it will still not be found + // on retry, it will still not be found or valid Reason::ResourceNotFound | Reason::ProjectNotFound | Reason::EndpointNotFound - | Reason::BranchNotFound => false, + | Reason::EndpointDisabled + | Reason::BranchNotFound + | Reason::WrongLsnOrTimestamp => false, // we were asked to go away Reason::RateLimitExceeded | Reason::NonDefaultBranchComputeTimeExceeded @@ -200,11 +215,13 @@ impl Reason { | Reason::DataTransferQuotaExceeded | Reason::LogicalSizeQuotaExceeded | Reason::ActiveEndpointsLimitExceeded => false, - // transitive error. control plane is currently busy + // transient error. control plane is currently busy // but might be ready soon Reason::RunningOperations | Reason::ConcurrencyLimitReached - | Reason::LockAlreadyTaken => true, + | Reason::LockAlreadyTaken + | Reason::EndpointIdle + | Reason::ProjectUnderMaintenance => true, // unknown error. better not retry it. Reason::Unknown => false, } @@ -240,19 +257,19 @@ pub(crate) struct GetEndpointAccessControl { pub(crate) rate_limits: EndpointRateLimitConfig, } -#[derive(Copy, Clone, Deserialize, Default)] +#[derive(Copy, Clone, Deserialize, Default, Debug)] pub struct EndpointRateLimitConfig { pub connection_attempts: ConnectionAttemptsLimit, } -#[derive(Copy, Clone, Deserialize, Default)] +#[derive(Copy, Clone, Deserialize, Default, Debug)] pub struct ConnectionAttemptsLimit { pub tcp: Option, pub ws: Option, pub http: Option, } -#[derive(Copy, Clone, Deserialize)] +#[derive(Copy, Clone, Deserialize, Debug)] pub struct LeakyBucketSetting { pub rps: f64, pub burst: f64, diff --git a/proxy/src/control_plane/mod.rs b/proxy/src/control_plane/mod.rs index a8c59dad0c..9bbd3f4fb7 100644 --- a/proxy/src/control_plane/mod.rs +++ b/proxy/src/control_plane/mod.rs @@ -82,7 +82,7 @@ impl NodeInfo { } } -#[derive(Copy, Clone, Default)] +#[derive(Copy, Clone, Default, Debug)] pub(crate) struct AccessBlockerFlags { pub public_access_blocked: bool, pub vpc_access_blocked: bool, @@ -92,12 +92,12 @@ pub(crate) type NodeInfoCache = TimedLru>>; pub(crate) type CachedNodeInfo = Cached<&'static NodeInfoCache, NodeInfo>; -#[derive(Clone)] +#[derive(Clone, Debug)] pub struct RoleAccessControl { pub secret: Option, } -#[derive(Clone)] +#[derive(Clone, Debug)] pub struct EndpointAccessControl { pub allowed_ips: Arc>, pub allowed_vpce: Arc>, diff --git a/proxy/src/logging.rs b/proxy/src/logging.rs index e608300bd2..d4fd826c13 100644 --- a/proxy/src/logging.rs +++ b/proxy/src/logging.rs @@ -1,12 +1,10 @@ use std::cell::RefCell; use std::collections::HashMap; use std::sync::Arc; -use std::sync::atomic::{AtomicU32, Ordering}; use std::{env, io}; use chrono::{DateTime, Utc}; use opentelemetry::trace::TraceContextExt; -use serde::ser::{SerializeMap, Serializer}; use tracing::subscriber::Interest; use tracing::{Event, Metadata, Span, Subscriber, callsite, span}; use tracing_opentelemetry::OpenTelemetrySpanExt; @@ -16,7 +14,9 @@ use tracing_subscriber::fmt::time::SystemTime; use tracing_subscriber::fmt::{FormatEvent, FormatFields}; use tracing_subscriber::layer::{Context, Layer}; use tracing_subscriber::prelude::*; -use tracing_subscriber::registry::{LookupSpan, SpanRef}; +use tracing_subscriber::registry::LookupSpan; + +use crate::metrics::Metrics; /// Initialize logging and OpenTelemetry tracing and exporter. /// @@ -210,6 +210,9 @@ struct JsonLoggingLayer { /// tracks which fields of each **event** are duplicates skipped_field_indices: CallsiteMap, + /// tracks callsite names to an ID. + callsite_name_ids: papaya::HashMap<&'static str, u32, ahash::RandomState>, + span_info: CallsiteMap, /// Fields we want to keep track of in a separate json object. @@ -222,6 +225,7 @@ impl JsonLoggingLayer { clock, skipped_field_indices: CallsiteMap::default(), span_info: CallsiteMap::default(), + callsite_name_ids: papaya::HashMap::default(), writer, extract_fields, } @@ -232,7 +236,7 @@ impl JsonLoggingLayer { self.span_info .pin() .get_or_insert_with(metadata.callsite(), || { - CallsiteSpanInfo::new(metadata, self.extract_fields) + CallsiteSpanInfo::new(&self.callsite_name_ids, metadata, self.extract_fields) }) .clone() } @@ -249,7 +253,7 @@ where // early, before OTel machinery, and add as event extension. let now = self.clock.now(); - let res: io::Result<()> = EVENT_FORMATTER.with(|f| { + EVENT_FORMATTER.with(|f| { let mut borrow = f.try_borrow_mut(); let formatter = match borrow.as_deref_mut() { Ok(formatter) => formatter, @@ -259,31 +263,19 @@ where Err(_) => &mut EventFormatter::new(), }; - formatter.reset(); formatter.format( now, event, &ctx, &self.skipped_field_indices, self.extract_fields, - )?; - self.writer.make_writer().write_all(formatter.buffer()) - }); + ); - // In case logging fails we generate a simpler JSON object. - if let Err(err) = res - && let Ok(mut line) = serde_json::to_vec(&serde_json::json!( { - "timestamp": now.to_rfc3339_opts(chrono::SecondsFormat::Micros, true), - "level": "ERROR", - "message": format_args!("cannot log event: {err:?}"), - "fields": { - "event": format_args!("{event:?}"), - }, - })) - { - line.push(b'\n'); - self.writer.make_writer().write_all(&line).ok(); - } + let mut writer = self.writer.make_writer(); + if writer.write_all(formatter.buffer()).is_err() { + Metrics::get().proxy.logging_errors_count.inc(); + } + }); } /// Registers a SpanFields instance as span extension. @@ -356,10 +348,11 @@ struct CallsiteSpanInfo { } impl CallsiteSpanInfo { - fn new(metadata: &'static Metadata<'static>, extract_fields: &[&'static str]) -> Self { - // Start at 1 to reserve 0 for default. - static COUNTER: AtomicU32 = AtomicU32::new(1); - + fn new( + callsite_name_ids: &papaya::HashMap<&'static str, u32, ahash::RandomState>, + metadata: &'static Metadata<'static>, + extract_fields: &[&'static str], + ) -> Self { let names: Vec<&'static str> = metadata.fields().iter().map(|f| f.name()).collect(); // get all the indices of span fields we want to focus @@ -372,8 +365,18 @@ impl CallsiteSpanInfo { // normalized_name is unique for each callsite, but it is not // unified across separate proxy instances. // todo: can we do better here? - let cid = COUNTER.fetch_add(1, Ordering::Relaxed); - let normalized_name = format!("{}#{cid}", metadata.name()).into(); + let cid = *callsite_name_ids + .pin() + .update_or_insert(metadata.name(), |&cid| cid + 1, 0); + + // we hope that most span names are unique, in which case this will always be 0 + let normalized_name = if cid == 0 { + metadata.name().into() + } else { + // if the span name is not unique, add the numeric ID to span name to distinguish it. + // sadly this is non-determinstic, across restarts but we should fix it by disambiguating re-used span names instead. + format!("{}#{cid}", metadata.name()).into() + }; Self { extract, @@ -382,9 +385,24 @@ impl CallsiteSpanInfo { } } +#[derive(Clone)] +struct RawValue(Box<[u8]>); + +impl RawValue { + fn new(v: impl json::ValueEncoder) -> Self { + Self(json::value_to_vec!(|val| v.encode(val)).into_boxed_slice()) + } +} + +impl json::ValueEncoder for &RawValue { + fn encode(self, v: json::ValueSer<'_>) { + v.write_raw_json(&self.0); + } +} + /// Stores span field values recorded during the spans lifetime. struct SpanFields { - values: [serde_json::Value; MAX_TRACING_FIELDS], + values: [Option; MAX_TRACING_FIELDS], /// cached span info so we can avoid extra hashmap lookups in the hot path. span_info: CallsiteSpanInfo, @@ -394,7 +412,7 @@ impl SpanFields { fn new(span_info: CallsiteSpanInfo) -> Self { Self { span_info, - values: [const { serde_json::Value::Null }; MAX_TRACING_FIELDS], + values: [const { None }; MAX_TRACING_FIELDS], } } } @@ -402,55 +420,55 @@ impl SpanFields { impl tracing::field::Visit for SpanFields { #[inline] fn record_f64(&mut self, field: &tracing::field::Field, value: f64) { - self.values[field.index()] = serde_json::Value::from(value); + self.values[field.index()] = Some(RawValue::new(value)); } #[inline] fn record_i64(&mut self, field: &tracing::field::Field, value: i64) { - self.values[field.index()] = serde_json::Value::from(value); + self.values[field.index()] = Some(RawValue::new(value)); } #[inline] fn record_u64(&mut self, field: &tracing::field::Field, value: u64) { - self.values[field.index()] = serde_json::Value::from(value); + self.values[field.index()] = Some(RawValue::new(value)); } #[inline] fn record_i128(&mut self, field: &tracing::field::Field, value: i128) { if let Ok(value) = i64::try_from(value) { - self.values[field.index()] = serde_json::Value::from(value); + self.values[field.index()] = Some(RawValue::new(value)); } else { - self.values[field.index()] = serde_json::Value::from(format!("{value}")); + self.values[field.index()] = Some(RawValue::new(format_args!("{value}"))); } } #[inline] fn record_u128(&mut self, field: &tracing::field::Field, value: u128) { if let Ok(value) = u64::try_from(value) { - self.values[field.index()] = serde_json::Value::from(value); + self.values[field.index()] = Some(RawValue::new(value)); } else { - self.values[field.index()] = serde_json::Value::from(format!("{value}")); + self.values[field.index()] = Some(RawValue::new(format_args!("{value}"))); } } #[inline] fn record_bool(&mut self, field: &tracing::field::Field, value: bool) { - self.values[field.index()] = serde_json::Value::from(value); + self.values[field.index()] = Some(RawValue::new(value)); } #[inline] fn record_bytes(&mut self, field: &tracing::field::Field, value: &[u8]) { - self.values[field.index()] = serde_json::Value::from(value); + self.values[field.index()] = Some(RawValue::new(value)); } #[inline] fn record_str(&mut self, field: &tracing::field::Field, value: &str) { - self.values[field.index()] = serde_json::Value::from(value); + self.values[field.index()] = Some(RawValue::new(value)); } #[inline] fn record_debug(&mut self, field: &tracing::field::Field, value: &dyn std::fmt::Debug) { - self.values[field.index()] = serde_json::Value::from(format!("{value:?}")); + self.values[field.index()] = Some(RawValue::new(format_args!("{value:?}"))); } #[inline] @@ -459,7 +477,7 @@ impl tracing::field::Visit for SpanFields { field: &tracing::field::Field, value: &(dyn std::error::Error + 'static), ) { - self.values[field.index()] = serde_json::Value::from(format!("{value}")); + self.values[field.index()] = Some(RawValue::new(format_args!("{value}"))); } } @@ -508,11 +526,6 @@ impl EventFormatter { &self.logline_buffer } - #[inline] - fn reset(&mut self) { - self.logline_buffer.clear(); - } - fn format( &mut self, now: DateTime, @@ -520,8 +533,7 @@ impl EventFormatter { ctx: &Context<'_, S>, skipped_field_indices: &CallsiteMap, extract_fields: &'static [&'static str], - ) -> io::Result<()> - where + ) where S: Subscriber + for<'a> LookupSpan<'a>, { let timestamp = now.to_rfc3339_opts(chrono::SecondsFormat::Micros, true); @@ -536,78 +548,99 @@ impl EventFormatter { .copied() .unwrap_or_default(); - let mut serialize = || { - let mut serializer = serde_json::Serializer::new(&mut self.logline_buffer); - - let mut serializer = serializer.serialize_map(None)?; - + self.logline_buffer.clear(); + let serializer = json::ValueSer::new(&mut self.logline_buffer); + json::value_as_object!(|serializer| { // Timestamp comes first, so raw lines can be sorted by timestamp. - serializer.serialize_entry("timestamp", ×tamp)?; + serializer.entry("timestamp", &*timestamp); // Level next. - serializer.serialize_entry("level", &meta.level().as_str())?; + serializer.entry("level", meta.level().as_str()); // Message next. - serializer.serialize_key("message")?; let mut message_extractor = - MessageFieldExtractor::new(serializer, skipped_field_indices); + MessageFieldExtractor::new(serializer.key("message"), skipped_field_indices); event.record(&mut message_extractor); - let mut serializer = message_extractor.into_serializer()?; + message_extractor.finish(); // Direct message fields. - let mut fields_present = FieldsPresent(false, skipped_field_indices); - event.record(&mut fields_present); - if fields_present.0 { - serializer.serialize_entry( - "fields", - &SerializableEventFields(event, skipped_field_indices), - )?; + { + let mut message_skipper = MessageFieldSkipper::new( + serializer.key("fields").object(), + skipped_field_indices, + ); + event.record(&mut message_skipper); + + // rollback if no fields are present. + if message_skipper.present { + message_skipper.serializer.finish(); + } } - let spans = SerializableSpans { - // collect all spans from parent to root. - spans: ctx + let mut extracted = ExtractedSpanFields::new(extract_fields); + + let spans = serializer.key("spans"); + json::value_as_object!(|spans| { + let parent_spans = ctx .event_span(event) - .map_or(vec![], |parent| parent.scope().collect()), - extracted: ExtractedSpanFields::new(extract_fields), - }; - serializer.serialize_entry("spans", &spans)?; + .map_or(vec![], |parent| parent.scope().collect()); + + for span in parent_spans.iter().rev() { + let ext = span.extensions(); + + // all spans should have this extension. + let Some(fields) = ext.get() else { continue }; + + extracted.layer_span(fields); + + let SpanFields { values, span_info } = fields; + + let span_fields = spans.key(&*span_info.normalized_name); + json::value_as_object!(|span_fields| { + for (field, value) in std::iter::zip(span.metadata().fields(), values) { + if let Some(value) = value { + span_fields.entry(field.name(), value); + } + } + }); + } + }); // TODO: thread-local cache? let pid = std::process::id(); // Skip adding pid 1 to reduce noise for services running in containers. if pid != 1 { - serializer.serialize_entry("process_id", &pid)?; + serializer.entry("process_id", pid); } - THREAD_ID.with(|tid| serializer.serialize_entry("thread_id", tid))?; + THREAD_ID.with(|tid| serializer.entry("thread_id", tid)); // TODO: tls cache? name could change if let Some(thread_name) = std::thread::current().name() && !thread_name.is_empty() && thread_name != "tokio-runtime-worker" { - serializer.serialize_entry("thread_name", thread_name)?; + serializer.entry("thread_name", thread_name); } if let Some(task_id) = tokio::task::try_id() { - serializer.serialize_entry("task_id", &format_args!("{task_id}"))?; + serializer.entry("task_id", format_args!("{task_id}")); } - serializer.serialize_entry("target", meta.target())?; + serializer.entry("target", meta.target()); // Skip adding module if it's the same as target. if let Some(module) = meta.module_path() && module != meta.target() { - serializer.serialize_entry("module", module)?; + serializer.entry("module", module); } if let Some(file) = meta.file() { if let Some(line) = meta.line() { - serializer.serialize_entry("src", &format_args!("{file}:{line}"))?; + serializer.entry("src", format_args!("{file}:{line}")); } else { - serializer.serialize_entry("src", file)?; + serializer.entry("src", file); } } @@ -616,124 +649,104 @@ impl EventFormatter { let otel_spanref = otel_context.span(); let span_context = otel_spanref.span_context(); if span_context.is_valid() { - serializer.serialize_entry( - "trace_id", - &format_args!("{}", span_context.trace_id()), - )?; + serializer.entry("trace_id", format_args!("{}", span_context.trace_id())); } } - if spans.extracted.has_values() { + if extracted.has_values() { // TODO: add fields from event, too? - serializer.serialize_entry("extract", &spans.extracted)?; + let extract = serializer.key("extract"); + json::value_as_object!(|extract| { + for (key, value) in std::iter::zip(extracted.names, extracted.values) { + if let Some(value) = value { + extract.entry(*key, &value); + } + } + }); } + }); - serializer.end() - }; - - serialize().map_err(io::Error::other)?; self.logline_buffer.push(b'\n'); - Ok(()) } } /// Extracts the message field that's mixed will other fields. -struct MessageFieldExtractor { - serializer: S, +struct MessageFieldExtractor<'buf> { + serializer: Option>, skipped_field_indices: SkippedFieldIndices, - state: Option>, } -impl MessageFieldExtractor { +impl<'buf> MessageFieldExtractor<'buf> { #[inline] - fn new(serializer: S, skipped_field_indices: SkippedFieldIndices) -> Self { + fn new(serializer: json::ValueSer<'buf>, skipped_field_indices: SkippedFieldIndices) -> Self { Self { - serializer, + serializer: Some(serializer), skipped_field_indices, - state: None, } } #[inline] - fn into_serializer(mut self) -> Result { - match self.state { - Some(Ok(())) => {} - Some(Err(err)) => return Err(err), - None => self.serializer.serialize_value("")?, + fn finish(self) { + if let Some(ser) = self.serializer { + ser.value(""); } - Ok(self.serializer) } #[inline] - fn accept_field(&self, field: &tracing::field::Field) -> bool { - self.state.is_none() - && field.name() == MESSAGE_FIELD + fn record_field(&mut self, field: &tracing::field::Field, v: impl json::ValueEncoder) { + if field.name() == MESSAGE_FIELD && !self.skipped_field_indices.contains(field.index()) + && let Some(ser) = self.serializer.take() + { + ser.value(v); + } } } -impl tracing::field::Visit for MessageFieldExtractor { +impl tracing::field::Visit for MessageFieldExtractor<'_> { #[inline] fn record_f64(&mut self, field: &tracing::field::Field, value: f64) { - if self.accept_field(field) { - self.state = Some(self.serializer.serialize_value(&value)); - } + self.record_field(field, value); } #[inline] fn record_i64(&mut self, field: &tracing::field::Field, value: i64) { - if self.accept_field(field) { - self.state = Some(self.serializer.serialize_value(&value)); - } + self.record_field(field, value); } #[inline] fn record_u64(&mut self, field: &tracing::field::Field, value: u64) { - if self.accept_field(field) { - self.state = Some(self.serializer.serialize_value(&value)); - } + self.record_field(field, value); } #[inline] fn record_i128(&mut self, field: &tracing::field::Field, value: i128) { - if self.accept_field(field) { - self.state = Some(self.serializer.serialize_value(&value)); - } + self.record_field(field, value); } #[inline] fn record_u128(&mut self, field: &tracing::field::Field, value: u128) { - if self.accept_field(field) { - self.state = Some(self.serializer.serialize_value(&value)); - } + self.record_field(field, value); } #[inline] fn record_bool(&mut self, field: &tracing::field::Field, value: bool) { - if self.accept_field(field) { - self.state = Some(self.serializer.serialize_value(&value)); - } + self.record_field(field, value); } #[inline] fn record_bytes(&mut self, field: &tracing::field::Field, value: &[u8]) { - if self.accept_field(field) { - self.state = Some(self.serializer.serialize_value(&format_args!("{value:x?}"))); - } + self.record_field(field, format_args!("{value:x?}")); } #[inline] fn record_str(&mut self, field: &tracing::field::Field, value: &str) { - if self.accept_field(field) { - self.state = Some(self.serializer.serialize_value(&value)); - } + self.record_field(field, value); } #[inline] fn record_debug(&mut self, field: &tracing::field::Field, value: &dyn std::fmt::Debug) { - if self.accept_field(field) { - self.state = Some(self.serializer.serialize_value(&format_args!("{value:?}"))); - } + self.record_field(field, format_args!("{value:?}")); } #[inline] @@ -742,147 +755,83 @@ impl tracing::field::Visit for MessageFieldExtracto field: &tracing::field::Field, value: &(dyn std::error::Error + 'static), ) { - if self.accept_field(field) { - self.state = Some(self.serializer.serialize_value(&format_args!("{value}"))); - } - } -} - -/// Checks if there's any fields and field values present. If not, the JSON subobject -/// can be skipped. -// This is entirely optional and only cosmetic, though maybe helps a -// bit during log parsing in dashboards when there's no field with empty object. -struct FieldsPresent(pub bool, SkippedFieldIndices); - -// Even though some methods have an overhead (error, bytes) it is assumed the -// compiler won't include this since we ignore the value entirely. -impl tracing::field::Visit for FieldsPresent { - #[inline] - fn record_debug(&mut self, field: &tracing::field::Field, _: &dyn std::fmt::Debug) { - if !self.1.contains(field.index()) - && field.name() != MESSAGE_FIELD - && !field.name().starts_with("log.") - { - self.0 |= true; - } - } -} - -/// Serializes the fields directly supplied with a log event. -struct SerializableEventFields<'a, 'event>(&'a tracing::Event<'event>, SkippedFieldIndices); - -impl serde::ser::Serialize for SerializableEventFields<'_, '_> { - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - use serde::ser::SerializeMap; - let serializer = serializer.serialize_map(None)?; - let mut message_skipper = MessageFieldSkipper::new(serializer, self.1); - self.0.record(&mut message_skipper); - let serializer = message_skipper.into_serializer()?; - serializer.end() + self.record_field(field, format_args!("{value}")); } } /// A tracing field visitor that skips the message field. -struct MessageFieldSkipper { - serializer: S, +struct MessageFieldSkipper<'buf> { + serializer: json::ObjectSer<'buf>, skipped_field_indices: SkippedFieldIndices, - state: Result<(), S::Error>, + present: bool, } -impl MessageFieldSkipper { +impl<'buf> MessageFieldSkipper<'buf> { #[inline] - fn new(serializer: S, skipped_field_indices: SkippedFieldIndices) -> Self { + fn new(serializer: json::ObjectSer<'buf>, skipped_field_indices: SkippedFieldIndices) -> Self { Self { serializer, skipped_field_indices, - state: Ok(()), + present: false, } } #[inline] - fn accept_field(&self, field: &tracing::field::Field) -> bool { - self.state.is_ok() - && field.name() != MESSAGE_FIELD + fn record_field(&mut self, field: &tracing::field::Field, v: impl json::ValueEncoder) { + if field.name() != MESSAGE_FIELD && !field.name().starts_with("log.") && !self.skipped_field_indices.contains(field.index()) - } - - #[inline] - fn into_serializer(self) -> Result { - self.state?; - Ok(self.serializer) + { + self.serializer.entry(field.name(), v); + self.present |= true; + } } } -impl tracing::field::Visit for MessageFieldSkipper { +impl tracing::field::Visit for MessageFieldSkipper<'_> { #[inline] fn record_f64(&mut self, field: &tracing::field::Field, value: f64) { - if self.accept_field(field) { - self.state = self.serializer.serialize_entry(field.name(), &value); - } + self.record_field(field, value); } #[inline] fn record_i64(&mut self, field: &tracing::field::Field, value: i64) { - if self.accept_field(field) { - self.state = self.serializer.serialize_entry(field.name(), &value); - } + self.record_field(field, value); } #[inline] fn record_u64(&mut self, field: &tracing::field::Field, value: u64) { - if self.accept_field(field) { - self.state = self.serializer.serialize_entry(field.name(), &value); - } + self.record_field(field, value); } #[inline] fn record_i128(&mut self, field: &tracing::field::Field, value: i128) { - if self.accept_field(field) { - self.state = self.serializer.serialize_entry(field.name(), &value); - } + self.record_field(field, value); } #[inline] fn record_u128(&mut self, field: &tracing::field::Field, value: u128) { - if self.accept_field(field) { - self.state = self.serializer.serialize_entry(field.name(), &value); - } + self.record_field(field, value); } #[inline] fn record_bool(&mut self, field: &tracing::field::Field, value: bool) { - if self.accept_field(field) { - self.state = self.serializer.serialize_entry(field.name(), &value); - } + self.record_field(field, value); } #[inline] fn record_bytes(&mut self, field: &tracing::field::Field, value: &[u8]) { - if self.accept_field(field) { - self.state = self - .serializer - .serialize_entry(field.name(), &format_args!("{value:x?}")); - } + self.record_field(field, format_args!("{value:x?}")); } #[inline] fn record_str(&mut self, field: &tracing::field::Field, value: &str) { - if self.accept_field(field) { - self.state = self.serializer.serialize_entry(field.name(), &value); - } + self.record_field(field, value); } #[inline] fn record_debug(&mut self, field: &tracing::field::Field, value: &dyn std::fmt::Debug) { - if self.accept_field(field) { - self.state = self - .serializer - .serialize_entry(field.name(), &format_args!("{value:?}")); - } + self.record_field(field, format_args!("{value:?}")); } #[inline] @@ -891,131 +840,40 @@ impl tracing::field::Visit for MessageFieldSkipper< field: &tracing::field::Field, value: &(dyn std::error::Error + 'static), ) { - if self.accept_field(field) { - self.state = self.serializer.serialize_value(&format_args!("{value}")); - } - } -} - -/// Serializes the span stack from root to leaf (parent of event) as object -/// with the span names as keys. To prevent collision we append a numberic value -/// to the name. Also, collects any span fields we're interested in. Last one -/// wins. -struct SerializableSpans<'ctx, S> -where - S: for<'lookup> LookupSpan<'lookup>, -{ - spans: Vec>, - extracted: ExtractedSpanFields, -} - -impl serde::ser::Serialize for SerializableSpans<'_, S> -where - S: for<'lookup> LookupSpan<'lookup>, -{ - fn serialize(&self, serializer: Ser) -> Result - where - Ser: serde::ser::Serializer, - { - let mut serializer = serializer.serialize_map(None)?; - - for span in self.spans.iter().rev() { - let ext = span.extensions(); - - // all spans should have this extension. - let Some(fields) = ext.get() else { continue }; - - self.extracted.layer_span(fields); - - let SpanFields { values, span_info } = fields; - serializer.serialize_entry( - &*span_info.normalized_name, - &SerializableSpanFields { - fields: span.metadata().fields(), - values, - }, - )?; - } - - serializer.end() - } -} - -/// Serializes the span fields as object. -struct SerializableSpanFields<'span> { - fields: &'span tracing::field::FieldSet, - values: &'span [serde_json::Value; MAX_TRACING_FIELDS], -} - -impl serde::ser::Serialize for SerializableSpanFields<'_> { - fn serialize(&self, serializer: S) -> Result - where - S: serde::ser::Serializer, - { - let mut serializer = serializer.serialize_map(None)?; - - for (field, value) in std::iter::zip(self.fields, self.values) { - if value.is_null() { - continue; - } - serializer.serialize_entry(field.name(), value)?; - } - - serializer.end() + self.record_field(field, format_args!("{value}")); } } struct ExtractedSpanFields { names: &'static [&'static str], - values: RefCell>, + values: Vec>, } impl ExtractedSpanFields { fn new(names: &'static [&'static str]) -> Self { ExtractedSpanFields { names, - values: RefCell::new(vec![serde_json::Value::Null; names.len()]), + values: vec![None; names.len()], } } - fn layer_span(&self, fields: &SpanFields) { - let mut v = self.values.borrow_mut(); + fn layer_span(&mut self, fields: &SpanFields) { let SpanFields { values, span_info } = fields; // extract the fields for (i, &j) in span_info.extract.iter().enumerate() { - let Some(value) = values.get(j) else { continue }; + let Some(Some(value)) = values.get(j) else { + continue; + }; - if !value.is_null() { - // TODO: replace clone with reference, if possible. - v[i] = value.clone(); - } + // TODO: replace clone with reference, if possible. + self.values[i] = Some(value.clone()); } } #[inline] fn has_values(&self) -> bool { - self.values.borrow().iter().any(|v| !v.is_null()) - } -} - -impl serde::ser::Serialize for ExtractedSpanFields { - fn serialize(&self, serializer: S) -> Result - where - S: serde::ser::Serializer, - { - let mut serializer = serializer.serialize_map(None)?; - - let values = self.values.borrow(); - for (key, value) in std::iter::zip(self.names, &*values) { - if value.is_null() { - continue; - } - - serializer.serialize_entry(key, value)?; - } - - serializer.end() + self.values.iter().any(|v| v.is_some()) } } @@ -1070,6 +928,7 @@ mod tests { clock: clock.clone(), skipped_field_indices: papaya::HashMap::default(), span_info: papaya::HashMap::default(), + callsite_name_ids: papaya::HashMap::default(), writer: buffer.clone(), extract_fields: &["x"], }; @@ -1078,14 +937,16 @@ mod tests { tracing::subscriber::with_default(registry, || { info_span!("some_span", x = 24).in_scope(|| { - info_span!("some_span", x = 40, x = 41, x = 42).in_scope(|| { - tracing::error!( - a = 1, - a = 2, - a = 3, - message = "explicit message field", - "implicit message field" - ); + info_span!("some_other_span", y = 30).in_scope(|| { + info_span!("some_span", x = 40, x = 41, x = 42).in_scope(|| { + tracing::error!( + a = 1, + a = 2, + a = 3, + message = "explicit message field", + "implicit message field" + ); + }); }); }); }); @@ -1104,12 +965,15 @@ mod tests { "a": 3, }, "spans": { - "some_span#1":{ + "some_span":{ "x": 24, }, - "some_span#2": { + "some_other_span": { + "y": 30, + }, + "some_span#1": { "x": 42, - } + }, }, "extract": { "x": 42, diff --git a/proxy/src/metrics.rs b/proxy/src/metrics.rs index 7a21e4ecee..916604e2ec 100644 --- a/proxy/src/metrics.rs +++ b/proxy/src/metrics.rs @@ -10,7 +10,7 @@ use measured::{ Counter, CounterVec, FixedCardinalityLabel, Gauge, Histogram, HistogramVec, LabelGroup, MetricGroup, }; -use metrics::{CounterPairAssoc, CounterPairVec, HyperLogLog, HyperLogLogVec}; +use metrics::{CounterPairAssoc, CounterPairVec, HyperLogLogVec}; use tokio::time::{self, Instant}; use crate::control_plane::messages::ColdStartInfo; @@ -36,7 +36,6 @@ impl Metrics { metrics.proxy.redis_errors_total.init_all_dense(); metrics.proxy.redis_events_count.init_all_dense(); metrics.proxy.retries_metric.init_all_dense(); - metrics.proxy.invalid_endpoints_total.init_all_dense(); metrics.proxy.connection_failures_total.init_all_dense(); SELF.set(metrics) @@ -80,11 +79,6 @@ pub struct ProxyMetrics { )] pub console_request_latency: HistogramVec, - /// Time it takes to acquire a token to call console plane. - // largest bucket = 3^16 * 0.05ms = 2.15s - #[metric(metadata = Thresholds::exponential_buckets(0.00005, 3.0))] - pub control_plane_token_acquire_seconds: Histogram<16>, - /// Size of the HTTP request body lengths. // smallest bucket = 16 bytes // largest bucket = 4^12 * 16 bytes = 256MB @@ -98,19 +92,10 @@ pub struct ProxyMetrics { /// Number of opened connections to a database. pub http_pool_opened_connections: Gauge, - /// Number of cache hits/misses for allowed ips. - pub allowed_ips_cache_misses: CounterVec>, - /// Number of allowed ips #[metric(metadata = Thresholds::with_buckets([0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 10.0, 20.0, 50.0, 100.0]))] pub allowed_ips_number: Histogram<10>, - /// Number of cache hits/misses for VPC endpoint IDs. - pub vpc_endpoint_id_cache_stats: CounterVec>, - - /// Number of cache hits/misses for access blocker flags. - pub access_blocker_flags_cache_stats: CounterVec>, - /// Number of allowed VPC endpoints IDs #[metric(metadata = Thresholds::with_buckets([0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 10.0, 20.0, 50.0, 100.0]))] pub allowed_vpc_endpoint_ids: Histogram<10>, @@ -127,6 +112,9 @@ pub struct ProxyMetrics { /// Number of bytes sent/received between all clients and backends. pub io_bytes: CounterVec>, + /// Number of IO errors while logging. + pub logging_errors_count: Counter, + /// Number of errors by a given classification. pub errors_total: CounterVec>, @@ -139,21 +127,12 @@ pub struct ProxyMetrics { /// Number of TLS handshake failures pub tls_handshake_failures: Counter, - /// Number of connection requests affected by authentication rate limits - pub requests_auth_rate_limits_total: Counter, - /// HLL approximate cardinality of endpoints that are connecting pub connecting_endpoints: HyperLogLogVec, 32>, /// Number of endpoints affected by errors of a given classification pub endpoints_affected_by_errors: HyperLogLogVec, 32>, - /// Number of endpoints affected by authentication rate limits - pub endpoints_auth_rate_limits: HyperLogLog<32>, - - /// Number of invalid endpoints (per protocol, per rejected). - pub invalid_endpoints_total: CounterVec, - /// Number of retries (per outcome, per retry_type). #[metric(metadata = Thresholds::with_buckets([0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]))] pub retries_metric: HistogramVec, @@ -398,11 +377,10 @@ pub enum Waiting { #[label(singleton = "kind")] #[allow(clippy::enum_variant_names)] pub enum RedisMsgKind { - HSet, - HSetMultiple, + Set, + Get, + Expire, HGet, - HGetAll, - HDel, } #[derive(Default, Clone)] diff --git a/proxy/src/proxy/connect_compute.rs b/proxy/src/proxy/connect_compute.rs index 9f642f52ab..ce9774e3eb 100644 --- a/proxy/src/proxy/connect_compute.rs +++ b/proxy/src/proxy/connect_compute.rs @@ -110,7 +110,7 @@ where debug!(error = ?err, COULD_NOT_CONNECT); let node_info = if !node_info.cached() || !err.should_retry_wake_compute() { - // If we just recieved this from cplane and didn't get it from cache, we shouldn't retry. + // If we just received this from cplane and not from the cache, we shouldn't retry. // Do not need to retrieve a new node_info, just return the old one. if !should_retry(&err, num_retries, compute.retry) { Metrics::get().proxy.retries_metric.observe( diff --git a/proxy/src/proxy/mod.rs b/proxy/src/proxy/mod.rs index 08c81afa04..02651109e0 100644 --- a/proxy/src/proxy/mod.rs +++ b/proxy/src/proxy/mod.rs @@ -195,15 +195,18 @@ impl NeonOptions { // proxy options: /// `PARAMS_COMPAT` allows opting in to forwarding all startup parameters from client to compute. - pub const PARAMS_COMPAT: &str = "proxy_params_compat"; + pub const PARAMS_COMPAT: &'static str = "proxy_params_compat"; // cplane options: /// `LSN` allows provisioning an ephemeral compute with time-travel to the provided LSN. - const LSN: &str = "lsn"; + const LSN: &'static str = "lsn"; + + /// `TIMESTAMP` allows provisioning an ephemeral compute with time-travel to the provided timestamp. + const TIMESTAMP: &'static str = "timestamp"; /// `ENDPOINT_TYPE` allows configuring an ephemeral compute to be read_only or read_write. - const ENDPOINT_TYPE: &str = "endpoint_type"; + const ENDPOINT_TYPE: &'static str = "endpoint_type"; pub(crate) fn parse_params(params: &StartupMessageParams) -> Self { params @@ -228,6 +231,7 @@ impl NeonOptions { // This is not a cplane option, we know it does not create ephemeral computes. Self::PARAMS_COMPAT => false, Self::LSN => true, + Self::TIMESTAMP => true, Self::ENDPOINT_TYPE => true, // err on the side of caution. any cplane options we don't know about // might lead to ephemeral computes. diff --git a/proxy/src/rate_limiter/limiter.rs b/proxy/src/rate_limiter/limiter.rs index 61d4636c2b..fd1b2af023 100644 --- a/proxy/src/rate_limiter/limiter.rs +++ b/proxy/src/rate_limiter/limiter.rs @@ -16,44 +16,6 @@ use super::LeakyBucketConfig; use crate::ext::LockExt; use crate::intern::EndpointIdInt; -pub struct GlobalRateLimiter { - data: Vec, - info: Vec, -} - -impl GlobalRateLimiter { - pub fn new(info: Vec) -> Self { - Self { - data: vec![ - RateBucket { - start: Instant::now(), - count: 0, - }; - info.len() - ], - info, - } - } - - /// Check that number of connections is below `max_rps` rps. - pub fn check(&mut self) -> bool { - let now = Instant::now(); - - let should_allow_request = self - .data - .iter_mut() - .zip(&self.info) - .all(|(bucket, info)| bucket.should_allow_request(info, now, 1)); - - if should_allow_request { - // only increment the bucket counts if the request will actually be accepted - self.data.iter_mut().for_each(|b| b.inc(1)); - } - - should_allow_request - } -} - // Simple per-endpoint rate limiter. // // Check that number of connections to the endpoint is below `max_rps` rps. diff --git a/proxy/src/rate_limiter/mod.rs b/proxy/src/rate_limiter/mod.rs index 112b95873a..828bb63aac 100644 --- a/proxy/src/rate_limiter/mod.rs +++ b/proxy/src/rate_limiter/mod.rs @@ -8,4 +8,4 @@ pub(crate) use limit_algorithm::aimd::Aimd; pub(crate) use limit_algorithm::{ DynamicLimiter, Outcome, RateLimitAlgorithm, RateLimiterConfig, Token, }; -pub use limiter::{GlobalRateLimiter, RateBucketInfo, WakeComputeRateLimiter}; +pub use limiter::{RateBucketInfo, WakeComputeRateLimiter}; diff --git a/proxy/src/redis/connection_with_credentials_provider.rs b/proxy/src/redis/connection_with_credentials_provider.rs index 35a3fe4334..b0bf332e44 100644 --- a/proxy/src/redis/connection_with_credentials_provider.rs +++ b/proxy/src/redis/connection_with_credentials_provider.rs @@ -4,11 +4,12 @@ use std::time::Duration; use futures::FutureExt; use redis::aio::{ConnectionLike, MultiplexedConnection}; -use redis::{ConnectionInfo, IntoConnectionInfo, RedisConnectionInfo, RedisResult}; +use redis::{ConnectionInfo, IntoConnectionInfo, RedisConnectionInfo, RedisError, RedisResult}; use tokio::task::AbortHandle; use tracing::{error, info, warn}; use super::elasticache::CredentialsProvider; +use crate::redis::elasticache::CredentialsProviderError; enum Credentials { Static(ConnectionInfo), @@ -26,6 +27,14 @@ impl Clone for Credentials { } } +#[derive(thiserror::Error, Debug)] +pub enum ConnectionProviderError { + #[error(transparent)] + Redis(#[from] RedisError), + #[error(transparent)] + CredentialsProvider(#[from] CredentialsProviderError), +} + /// A wrapper around `redis::MultiplexedConnection` that automatically refreshes the token. /// Provides PubSub connection without credentials refresh. pub struct ConnectionWithCredentialsProvider { @@ -86,15 +95,18 @@ impl ConnectionWithCredentialsProvider { } } - async fn ping(con: &mut MultiplexedConnection) -> RedisResult<()> { - redis::cmd("PING").query_async(con).await + async fn ping(con: &mut MultiplexedConnection) -> Result<(), ConnectionProviderError> { + redis::cmd("PING") + .query_async(con) + .await + .map_err(Into::into) } pub(crate) fn credentials_refreshed(&self) -> bool { self.credentials_refreshed.load(Ordering::Relaxed) } - pub(crate) async fn connect(&mut self) -> anyhow::Result<()> { + pub(crate) async fn connect(&mut self) -> Result<(), ConnectionProviderError> { let _guard = self.mutex.lock().await; if let Some(con) = self.con.as_mut() { match Self::ping(con).await { @@ -141,7 +153,7 @@ impl ConnectionWithCredentialsProvider { Ok(()) } - async fn get_connection_info(&self) -> anyhow::Result { + async fn get_connection_info(&self) -> Result { match &self.credentials { Credentials::Static(info) => Ok(info.clone()), Credentials::Dynamic(provider, addr) => { @@ -160,7 +172,7 @@ impl ConnectionWithCredentialsProvider { } } - async fn get_client(&self) -> anyhow::Result { + async fn get_client(&self) -> Result { let client = redis::Client::open(self.get_connection_info().await?)?; self.credentials_refreshed.store(true, Ordering::Relaxed); Ok(client) diff --git a/proxy/src/redis/elasticache.rs b/proxy/src/redis/elasticache.rs index 58e3c889a7..6f3b34d381 100644 --- a/proxy/src/redis/elasticache.rs +++ b/proxy/src/redis/elasticache.rs @@ -9,10 +9,12 @@ use aws_config::meta::region::RegionProviderChain; use aws_config::profile::ProfileFileCredentialsProvider; use aws_config::provider_config::ProviderConfig; use aws_config::web_identity_token::WebIdentityTokenCredentialsProvider; +use aws_credential_types::provider::error::CredentialsError; use aws_sdk_iam::config::ProvideCredentials; use aws_sigv4::http_request::{ - self, SignableBody, SignableRequest, SignatureLocation, SigningSettings, + self, SignableBody, SignableRequest, SignatureLocation, SigningError, SigningSettings, }; +use aws_sigv4::sign::v4::signing_params::BuildError; use tracing::info; #[derive(Debug)] @@ -40,6 +42,18 @@ impl AWSIRSAConfig { } } +#[derive(thiserror::Error, Debug)] +pub enum CredentialsProviderError { + #[error(transparent)] + AwsCredentials(#[from] CredentialsError), + #[error(transparent)] + AwsSigv4Build(#[from] BuildError), + #[error(transparent)] + AwsSigv4Singing(#[from] SigningError), + #[error(transparent)] + Http(#[from] http::Error), +} + /// Credentials provider for AWS elasticache authentication. /// /// Official documentation: @@ -92,7 +106,9 @@ impl CredentialsProvider { }) } - pub(crate) async fn provide_credentials(&self) -> anyhow::Result<(String, String)> { + pub(crate) async fn provide_credentials( + &self, + ) -> Result<(String, String), CredentialsProviderError> { let aws_credentials = self .credentials_provider .provide_credentials() diff --git a/proxy/src/redis/kv_ops.rs b/proxy/src/redis/kv_ops.rs index cfdbc21839..d1e97b6b09 100644 --- a/proxy/src/redis/kv_ops.rs +++ b/proxy/src/redis/kv_ops.rs @@ -2,9 +2,18 @@ use std::time::Duration; use futures::FutureExt; use redis::aio::ConnectionLike; -use redis::{Cmd, FromRedisValue, Pipeline, RedisResult}; +use redis::{Cmd, FromRedisValue, Pipeline, RedisError, RedisResult}; use super::connection_with_credentials_provider::ConnectionWithCredentialsProvider; +use crate::redis::connection_with_credentials_provider::ConnectionProviderError; + +#[derive(thiserror::Error, Debug)] +pub enum RedisKVClientError { + #[error(transparent)] + Redis(#[from] RedisError), + #[error(transparent)] + ConnectionProvider(#[from] ConnectionProviderError), +} pub struct RedisKVClient { client: ConnectionWithCredentialsProvider, @@ -32,12 +41,13 @@ impl RedisKVClient { Self { client } } - pub async fn try_connect(&mut self) -> anyhow::Result<()> { + pub async fn try_connect(&mut self) -> Result<(), RedisKVClientError> { self.client .connect() .boxed() .await .inspect_err(|e| tracing::error!("failed to connect to redis: {e}")) + .map_err(Into::into) } pub(crate) fn credentials_refreshed(&self) -> bool { @@ -47,7 +57,7 @@ impl RedisKVClient { pub(crate) async fn query( &mut self, q: &impl Queryable, - ) -> anyhow::Result { + ) -> Result { let e = match q.query(&mut self.client).await { Ok(t) => return Ok(t), Err(e) => e, diff --git a/proxy/src/redis/notifications.rs b/proxy/src/redis/notifications.rs index 973a4c5b02..a6d376562b 100644 --- a/proxy/src/redis/notifications.rs +++ b/proxy/src/redis/notifications.rs @@ -265,10 +265,7 @@ async fn handle_messages( return Ok(()); } let mut conn = match try_connect(&redis).await { - Ok(conn) => { - handler.cache.increment_active_listeners().await; - conn - } + Ok(conn) => conn, Err(e) => { tracing::error!( "failed to connect to redis: {e}, will try to reconnect in {RECONNECT_TIMEOUT:#?}" @@ -287,11 +284,9 @@ async fn handle_messages( } } if cancellation_token.is_cancelled() { - handler.cache.decrement_active_listeners().await; return Ok(()); } } - handler.cache.decrement_active_listeners().await; } } diff --git a/proxy/src/serverless/json.rs b/proxy/src/serverless/json.rs index 2e67d07079..ef7c8a4d82 100644 --- a/proxy/src/serverless/json.rs +++ b/proxy/src/serverless/json.rs @@ -1,6 +1,7 @@ +use json::{ListSer, ObjectSer, ValueSer}; use postgres_client::Row; use postgres_client::types::{Kind, Type}; -use serde_json::{Map, Value}; +use serde_json::Value; // // Convert json non-string types to strings, so that they can be passed to Postgres @@ -74,44 +75,40 @@ pub(crate) enum JsonConversionError { UnbalancedString, } -enum OutputMode { - Array(Vec), - Object(Map), +enum OutputMode<'a> { + Array(ListSer<'a>), + Object(ObjectSer<'a>), } -impl OutputMode { - fn key(&mut self, key: &str) -> &mut Value { +impl OutputMode<'_> { + fn key(&mut self, key: &str) -> ValueSer<'_> { match self { - OutputMode::Array(values) => push_entry(values, Value::Null), - OutputMode::Object(map) => map.entry(key.to_string()).or_insert(Value::Null), + OutputMode::Array(values) => values.entry(), + OutputMode::Object(map) => map.key(key), } } - fn finish(self) -> Value { + fn finish(self) { match self { - OutputMode::Array(values) => Value::Array(values), - OutputMode::Object(map) => Value::Object(map), + OutputMode::Array(values) => values.finish(), + OutputMode::Object(map) => map.finish(), } } } -fn push_entry(arr: &mut Vec, t: T) -> &mut T { - arr.push(t); - arr.last_mut().expect("a value was just inserted") -} - // // Convert postgres row with text-encoded values to JSON object // pub(crate) fn pg_text_row_to_json( + output: ValueSer, row: &Row, raw_output: bool, array_mode: bool, -) -> Result { +) -> Result<(), JsonConversionError> { let mut entries = if array_mode { - OutputMode::Array(Vec::with_capacity(row.columns().len())) + OutputMode::Array(output.list()) } else { - OutputMode::Object(Map::with_capacity(row.columns().len())) + OutputMode::Object(output.object()) }; for (i, column) in row.columns().iter().enumerate() { @@ -120,53 +117,48 @@ pub(crate) fn pg_text_row_to_json( let value = entries.key(column.name()); match pg_value { - Some(v) if raw_output => *value = Value::String(v.to_string()), + Some(v) if raw_output => value.value(v), Some(v) => pg_text_to_json(value, v, column.type_())?, - None => *value = Value::Null, + None => value.value(json::Null), } } - Ok(entries.finish()) + entries.finish(); + Ok(()) } // // Convert postgres text-encoded value to JSON value // -fn pg_text_to_json( - output: &mut Value, - val: &str, - pg_type: &Type, -) -> Result<(), JsonConversionError> { +fn pg_text_to_json(output: ValueSer, val: &str, pg_type: &Type) -> Result<(), JsonConversionError> { if let Kind::Array(elem_type) = pg_type.kind() { // todo: we should fetch this from postgres. let delimiter = ','; - let mut array = vec![]; - pg_array_parse(&mut array, val, elem_type, delimiter)?; - *output = Value::Array(array); + json::value_as_list!(|output| pg_array_parse(output, val, elem_type, delimiter)?); return Ok(()); } match *pg_type { - Type::BOOL => *output = Value::Bool(val == "t"), + Type::BOOL => output.value(val == "t"), Type::INT2 | Type::INT4 => { let val = val.parse::()?; - *output = Value::Number(serde_json::Number::from(val)); + output.value(val); } Type::FLOAT4 | Type::FLOAT8 => { let fval = val.parse::()?; - let num = serde_json::Number::from_f64(fval); - if let Some(num) = num { - *output = Value::Number(num); + if fval.is_finite() { + output.value(fval); } else { // Pass Nan, Inf, -Inf as strings // JS JSON.stringify() does converts them to null, but we // want to preserve them, so we pass them as strings - *output = Value::String(val.to_string()); + output.value(val); } } - Type::JSON | Type::JSONB => *output = serde_json::from_str(val)?, - _ => *output = Value::String(val.to_string()), + // we assume that the string value is valid json. + Type::JSON | Type::JSONB => output.write_raw_json(val.as_bytes()), + _ => output.value(val), } Ok(()) @@ -192,7 +184,7 @@ fn pg_text_to_json( /// gets its own level of curly braces, and delimiters must be written between adjacent /// curly-braced entities of the same level. fn pg_array_parse( - elements: &mut Vec, + elements: &mut ListSer, mut pg_array: &str, elem: &Type, delim: char, @@ -221,7 +213,7 @@ fn pg_array_parse( /// reads a single array from the `pg_array` string and pushes each values to `elements`. /// returns the rest of the `pg_array` string that was not read. fn pg_array_parse_inner<'a>( - elements: &mut Vec, + elements: &mut ListSer, mut pg_array: &'a str, elem: &Type, delim: char, @@ -234,7 +226,7 @@ fn pg_array_parse_inner<'a>( let mut q = String::new(); loop { - let value = push_entry(elements, Value::Null); + let value = elements.entry(); pg_array = pg_array_parse_item(value, &mut q, pg_array, elem, delim)?; // check for separator. @@ -260,7 +252,7 @@ fn pg_array_parse_inner<'a>( /// /// `quoted` is a scratch allocation that has no defined output. fn pg_array_parse_item<'a>( - output: &mut Value, + output: ValueSer, quoted: &mut String, mut pg_array: &'a str, elem: &Type, @@ -276,9 +268,8 @@ fn pg_array_parse_item<'a>( if pg_array.starts_with('{') { // nested array. - let mut nested = vec![]; - pg_array = pg_array_parse_inner(&mut nested, pg_array, elem, delim)?; - *output = Value::Array(nested); + pg_array = + json::value_as_list!(|output| pg_array_parse_inner(output, pg_array, elem, delim))?; return Ok(pg_array); } @@ -306,7 +297,7 @@ fn pg_array_parse_item<'a>( // we might have an item string: // check for null if item == "NULL" { - *output = Value::Null; + output.value(json::Null); } else { pg_text_to_json(output, item, elem)?; } @@ -440,15 +431,15 @@ mod tests { } fn pg_text_to_json(val: &str, pg_type: &Type) -> Value { - let mut v = Value::Null; - super::pg_text_to_json(&mut v, val, pg_type).unwrap(); - v + let output = json::value_to_string!(|v| super::pg_text_to_json(v, val, pg_type).unwrap()); + serde_json::from_str(&output).unwrap() } fn pg_array_parse(pg_array: &str, pg_type: &Type) -> Value { - let mut array = vec![]; - super::pg_array_parse(&mut array, pg_array, pg_type, ',').unwrap(); - Value::Array(array) + let output = json::value_to_string!(|v| json::value_as_list!(|v| { + super::pg_array_parse(v, pg_array, pg_type, ',').unwrap(); + })); + serde_json::from_str(&output).unwrap() } #[test] diff --git a/proxy/src/serverless/sql_over_http.rs b/proxy/src/serverless/sql_over_http.rs index 7a718d0280..8a14f804b6 100644 --- a/proxy/src/serverless/sql_over_http.rs +++ b/proxy/src/serverless/sql_over_http.rs @@ -14,10 +14,7 @@ use hyper::http::{HeaderName, HeaderValue}; use hyper::{Request, Response, StatusCode, header}; use indexmap::IndexMap; use postgres_client::error::{DbError, ErrorPosition, SqlState}; -use postgres_client::{ - GenericClient, IsolationLevel, NoTls, ReadyForQueryStatus, RowStream, Transaction, -}; -use serde::Serialize; +use postgres_client::{GenericClient, IsolationLevel, NoTls, ReadyForQueryStatus, Transaction}; use serde_json::Value; use serde_json::value::RawValue; use tokio::time::{self, Instant}; @@ -687,32 +684,21 @@ impl QueryData { let (inner, mut discard) = client.inner(); let cancel_token = inner.cancel_token(); - match select( + let mut json_buf = vec![]; + + let batch_result = match select( pin!(query_to_json( config, &mut *inner, self, - &mut 0, + json::ValueSer::new(&mut json_buf), parsed_headers )), pin!(cancel.cancelled()), ) .await { - // The query successfully completed. - Either::Left((Ok((status, results)), __not_yet_cancelled)) => { - discard.check_idle(status); - - let json_output = - serde_json::to_string(&results).expect("json serialization should not fail"); - Ok(json_output) - } - // The query failed with an error - Either::Left((Err(e), __not_yet_cancelled)) => { - discard.discard(); - Err(e) - } - // The query was cancelled. + Either::Left((res, __not_yet_cancelled)) => res, Either::Right((_cancelled, query)) => { tracing::info!("cancelling query"); if let Err(err) = cancel_token.cancel_query(NoTls).await { @@ -721,13 +707,7 @@ impl QueryData { // wait for the query cancellation match time::timeout(time::Duration::from_millis(100), query).await { // query successed before it was cancelled. - Ok(Ok((status, results))) => { - discard.check_idle(status); - - let json_output = serde_json::to_string(&results) - .expect("json serialization should not fail"); - Ok(json_output) - } + Ok(Ok(status)) => Ok(status), // query failed or was cancelled. Ok(Err(error)) => { let db_error = match &error { @@ -743,14 +723,29 @@ impl QueryData { discard.discard(); } - Err(SqlOverHttpError::Cancelled(SqlOverHttpCancel::Postgres)) + return Err(SqlOverHttpError::Cancelled(SqlOverHttpCancel::Postgres)); } Err(_timeout) => { discard.discard(); - Err(SqlOverHttpError::Cancelled(SqlOverHttpCancel::Postgres)) + return Err(SqlOverHttpError::Cancelled(SqlOverHttpCancel::Postgres)); } } } + }; + + match batch_result { + // The query successfully completed. + Ok(status) => { + discard.check_idle(status); + + let json_output = String::from_utf8(json_buf).expect("json should be valid utf8"); + Ok(json_output) + } + // The query failed with an error + Err(e) => { + discard.discard(); + Err(e) + } } } } @@ -787,7 +782,7 @@ impl BatchQueryData { }) .map_err(SqlOverHttpError::Postgres)?; - let json_output = match query_batch( + let json_output = match query_batch_to_json( config, cancel.child_token(), &mut transaction, @@ -845,24 +840,21 @@ async fn query_batch( transaction: &mut Transaction<'_>, queries: BatchQueryData, parsed_headers: HttpHeaders, -) -> Result { - let mut results = Vec::with_capacity(queries.queries.len()); - let mut current_size = 0; + results: &mut json::ListSer<'_>, +) -> Result<(), SqlOverHttpError> { for stmt in queries.queries { let query = pin!(query_to_json( config, transaction, stmt, - &mut current_size, + results.entry(), parsed_headers, )); let cancelled = pin!(cancel.cancelled()); let res = select(query, cancelled).await; match res { // TODO: maybe we should check that the transaction bit is set here - Either::Left((Ok((_, values)), _cancelled)) => { - results.push(values); - } + Either::Left((Ok(_), _cancelled)) => {} Either::Left((Err(e), _cancelled)) => { return Err(e); } @@ -872,8 +864,22 @@ async fn query_batch( } } - let results = json!({ "results": results }); - let json_output = serde_json::to_string(&results).expect("json serialization should not fail"); + Ok(()) +} + +async fn query_batch_to_json( + config: &'static HttpConfig, + cancel: CancellationToken, + tx: &mut Transaction<'_>, + queries: BatchQueryData, + headers: HttpHeaders, +) -> Result { + let json_output = json::value_to_string!(|obj| json::value_as_object!(|obj| { + let results = obj.key("results"); + json::value_as_list!(|results| { + query_batch(config, cancel, tx, queries, headers, results).await?; + }); + })); Ok(json_output) } @@ -882,54 +888,54 @@ async fn query_to_json( config: &'static HttpConfig, client: &mut T, data: QueryData, - current_size: &mut usize, + output: json::ValueSer<'_>, parsed_headers: HttpHeaders, -) -> Result<(ReadyForQueryStatus, impl Serialize + use), SqlOverHttpError> { +) -> Result { let query_start = Instant::now(); - let query_params = data.params; + let mut output = json::ObjectSer::new(output); let mut row_stream = client - .query_raw_txt(&data.query, query_params) + .query_raw_txt(&data.query, data.params) .await .map_err(SqlOverHttpError::Postgres)?; let query_acknowledged = Instant::now(); - let columns_len = row_stream.statement.columns().len(); - let mut fields = Vec::with_capacity(columns_len); - + let mut json_fields = output.key("fields").list(); for c in row_stream.statement.columns() { - fields.push(json!({ - "name": c.name().to_owned(), - "dataTypeID": c.type_().oid(), - "tableID": c.table_oid(), - "columnID": c.column_id(), - "dataTypeSize": c.type_size(), - "dataTypeModifier": c.type_modifier(), - "format": "text", - })); + let json_field = json_fields.entry(); + json::value_as_object!(|json_field| { + json_field.entry("name", c.name()); + json_field.entry("dataTypeID", c.type_().oid()); + json_field.entry("tableID", c.table_oid()); + json_field.entry("columnID", c.column_id()); + json_field.entry("dataTypeSize", c.type_size()); + json_field.entry("dataTypeModifier", c.type_modifier()); + json_field.entry("format", "text"); + }); } + json_fields.finish(); - let raw_output = parsed_headers.raw_output; let array_mode = data.array_mode.unwrap_or(parsed_headers.default_array_mode); + let raw_output = parsed_headers.raw_output; // Manually drain the stream into a vector to leave row_stream hanging // around to get a command tag. Also check that the response is not too // big. - let mut rows = Vec::new(); + let mut rows = 0; + let mut json_rows = output.key("rows").list(); while let Some(row) = row_stream.next().await { let row = row.map_err(SqlOverHttpError::Postgres)?; - *current_size += row.body_len(); // we don't have a streaming response support yet so this is to prevent OOM // from a malicious query (eg a cross join) - if *current_size > config.max_response_size_bytes { + if json_rows.as_buffer().len() > config.max_response_size_bytes { return Err(SqlOverHttpError::ResponseTooLarge( config.max_response_size_bytes, )); } - let row = pg_text_row_to_json(&row, raw_output, array_mode)?; - rows.push(row); + pg_text_row_to_json(json_rows.entry(), &row, raw_output, array_mode)?; + rows += 1; // assumption: parsing pg text and converting to json takes CPU time. // let's assume it is slightly expensive, so we should consume some cooperative budget. @@ -937,16 +943,14 @@ async fn query_to_json( // of rows and never hit the tokio mpsc for a long time (although unlikely). tokio::task::consume_budget().await; } + json_rows.finish(); let query_resp_end = Instant::now(); - let RowStream { - command_tag, - status: ready, - .. - } = row_stream; + + let ready = row_stream.status; // grab the command tag and number of rows affected - let command_tag = command_tag.unwrap_or_default(); + let command_tag = row_stream.command_tag.unwrap_or_default(); let mut command_tag_split = command_tag.split(' '); let command_tag_name = command_tag_split.next().unwrap_or_default(); let command_tag_count = if command_tag_name == "INSERT" { @@ -959,7 +963,7 @@ async fn query_to_json( .and_then(|s| s.parse::().ok()); info!( - rows = rows.len(), + rows, ?ready, command_tag, acknowledgement = ?(query_acknowledged - query_start), @@ -967,16 +971,12 @@ async fn query_to_json( "finished executing query" ); - // Resulting JSON format is based on the format of node-postgres result. - let results = json!({ - "command": command_tag_name.to_string(), - "rowCount": command_tag_count, - "rows": rows, - "fields": fields, - "rowAsArray": array_mode, - }); + output.entry("command", command_tag_name); + output.entry("rowCount", command_tag_count); + output.entry("rowAsArray", array_mode); - Ok((ready, results)) + output.finish(); + Ok(ready) } enum Client { diff --git a/proxy/src/types.rs b/proxy/src/types.rs index d5952d1d8b..43b8dc5b29 100644 --- a/proxy/src/types.rs +++ b/proxy/src/types.rs @@ -107,13 +107,3 @@ smol_str_wrapper!(DbName); // postgres hostname, will likely be a port:ip addr smol_str_wrapper!(Host); - -// Endpoints are a bit tricky. Rare they might be branches or projects. -impl EndpointId { - pub(crate) fn is_endpoint(&self) -> bool { - self.0.starts_with("ep-") - } - pub(crate) fn is_branch(&self) -> bool { - self.0.starts_with("br-") - } -} diff --git a/proxy/src/util.rs b/proxy/src/util.rs index 7fc2d9fbdb..0291216d94 100644 --- a/proxy/src/util.rs +++ b/proxy/src/util.rs @@ -7,8 +7,16 @@ pub async fn run_until_cancelled( f: F, cancellation_token: &CancellationToken, ) -> Option { - match select(pin!(f), pin!(cancellation_token.cancelled())).await { - Either::Left((f, _)) => Some(f), - Either::Right(((), _)) => None, + run_until(f, cancellation_token.cancelled()).await.ok() +} + +/// Runs the future `f` unless interrupted by future `condition`. +pub async fn run_until( + f: F1, + condition: F2, +) -> Result { + match select(pin!(f), pin!(condition)).await { + Either::Left((f1, _)) => Ok(f1), + Either::Right((f2, _)) => Err(f2), } } diff --git a/pyproject.toml b/pyproject.toml index e7e314d144..e992e81fe7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -32,7 +32,7 @@ psutil = "^5.9.4" types-psutil = "^5.9.5.12" types-toml = "^0.10.8.6" pytest-httpserver = "^1.0.8" -aiohttp = "3.10.11" +aiohttp = "3.12.14" pytest-rerunfailures = "^15.0" types-pytest-lazy-fixture = "^0.6.3.3" pytest-split = "^0.8.1" diff --git a/safekeeper/Cargo.toml b/safekeeper/Cargo.toml index 6955028c73..56822b5c25 100644 --- a/safekeeper/Cargo.toml +++ b/safekeeper/Cargo.toml @@ -58,6 +58,7 @@ metrics.workspace = true pem.workspace = true postgres_backend.workspace = true postgres_ffi.workspace = true +postgres_ffi_types.workspace = true postgres_versioninfo.workspace = true pq_proto.workspace = true remote_storage.workspace = true @@ -71,6 +72,7 @@ http-utils.workspace = true utils.workspace = true wal_decoder.workspace = true env_logger.workspace = true +nix.workspace = true workspace_hack.workspace = true diff --git a/safekeeper/client/src/mgmt_api.rs b/safekeeper/client/src/mgmt_api.rs index b4bb193a4b..3c8db3029e 100644 --- a/safekeeper/client/src/mgmt_api.rs +++ b/safekeeper/client/src/mgmt_api.rs @@ -6,10 +6,10 @@ use std::error::Error as _; use http_utils::error::HttpErrorBody; -use reqwest::{IntoUrl, Method, StatusCode}; +use reqwest::{IntoUrl, Method, Response, StatusCode}; use safekeeper_api::models::{ self, PullTimelineRequest, PullTimelineResponse, SafekeeperStatus, SafekeeperUtilization, - TimelineCreateRequest, TimelineStatus, + TimelineCreateRequest, }; use utils::id::{NodeId, TenantId, TimelineId}; use utils::logging::SecretString; @@ -161,13 +161,12 @@ impl Client { &self, tenant_id: TenantId, timeline_id: TimelineId, - ) -> Result { + ) -> Result { let uri = format!( "{}/v1/tenant/{}/timeline/{}", self.mgmt_api_endpoint, tenant_id, timeline_id ); - let resp = self.get(&uri).await?; - resp.json().await.map_err(Error::ReceiveBody) + self.get(&uri).await } pub async fn snapshot( diff --git a/safekeeper/src/auth.rs b/safekeeper/src/auth.rs index 81c79fae30..008f903a89 100644 --- a/safekeeper/src/auth.rs +++ b/safekeeper/src/auth.rs @@ -21,7 +21,8 @@ pub fn check_permission(claims: &Claims, tenant_id: Option) -> Result< | Scope::GenerationsApi | Scope::Infra | Scope::Scrubber - | Scope::ControllerPeer, + | Scope::ControllerPeer + | Scope::TenantEndpoint, _, ) => Err(AuthError( format!( diff --git a/safekeeper/src/bin/safekeeper.rs b/safekeeper/src/bin/safekeeper.rs index 8fda625817..2ec541b6f0 100644 --- a/safekeeper/src/bin/safekeeper.rs +++ b/safekeeper/src/bin/safekeeper.rs @@ -17,12 +17,14 @@ use http_utils::tls_certs::ReloadingCertificateResolver; use metrics::set_build_info_metric; use remote_storage::RemoteStorageConfig; use safekeeper::defaults::{ - DEFAULT_CONTROL_FILE_SAVE_INTERVAL, DEFAULT_EVICTION_MIN_RESIDENT, DEFAULT_HEARTBEAT_TIMEOUT, - DEFAULT_HTTP_LISTEN_ADDR, DEFAULT_MAX_OFFLOADER_LAG_BYTES, + DEFAULT_CONTROL_FILE_SAVE_INTERVAL, DEFAULT_EVICTION_MIN_RESIDENT, + DEFAULT_GLOBAL_DISK_CHECK_INTERVAL, DEFAULT_HEARTBEAT_TIMEOUT, DEFAULT_HTTP_LISTEN_ADDR, + DEFAULT_MAX_GLOBAL_DISK_USAGE_RATIO, DEFAULT_MAX_OFFLOADER_LAG_BYTES, DEFAULT_MAX_REELECT_OFFLOADER_LAG_BYTES, DEFAULT_MAX_TIMELINE_DISK_USAGE_BYTES, DEFAULT_PARTIAL_BACKUP_CONCURRENCY, DEFAULT_PARTIAL_BACKUP_TIMEOUT, DEFAULT_PG_LISTEN_ADDR, DEFAULT_SSL_CERT_FILE, DEFAULT_SSL_CERT_RELOAD_PERIOD, DEFAULT_SSL_KEY_FILE, }; +use safekeeper::hadron; use safekeeper::wal_backup::WalBackup; use safekeeper::{ BACKGROUND_RUNTIME, BROKER_RUNTIME, GlobalTimelines, HTTP_RUNTIME, SafeKeeperConf, @@ -37,9 +39,16 @@ use tracing::*; use utils::auth::{JwtAuth, Scope, SwappableJwtAuth}; use utils::id::NodeId; use utils::logging::{self, LogFormat, SecretString}; +use utils::metrics_collector::{METRICS_COLLECTION_INTERVAL, METRICS_COLLECTOR}; use utils::sentry_init::init_sentry; use utils::{pid_file, project_build_tag, project_git_version, tcp_listener}; +use safekeeper::hadron::{ + GLOBAL_DISK_LIMIT_EXCEEDED, get_filesystem_capacity, get_filesystem_usage, +}; +use safekeeper::metrics::GLOBAL_DISK_UTIL_CHECK_SECONDS; +use std::sync::atomic::Ordering; + #[global_allocator] static GLOBAL: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc; @@ -243,9 +252,27 @@ struct Args { #[arg(long)] enable_tls_wal_service_api: bool, + /// Controls whether to collect all metrics on each scrape or to return potentially stale + /// results. + #[arg(long, default_value_t = true)] + force_metric_collection_on_scrape: bool, + /// Run in development mode (disables security checks) #[arg(long, help = "Run in development mode (disables security checks)")] dev: bool, + /* BEGIN_HADRON */ + #[arg(long)] + enable_pull_timeline_on_startup: bool, + /// How often to scan entire data-dir for total disk usage + #[arg(long, value_parser=humantime::parse_duration, default_value = DEFAULT_GLOBAL_DISK_CHECK_INTERVAL)] + global_disk_check_interval: Duration, + /// The portion of the filesystem capacity that can be used by all timelines. + /// A circuit breaker will trip and reject all WAL writes if the total usage + /// exceeds this ratio. + /// Set to 0 to disable the global disk usage limit. + #[arg(long, default_value_t = DEFAULT_MAX_GLOBAL_DISK_USAGE_RATIO)] + max_global_disk_usage_ratio: f64, + /* END_HADRON */ } // Like PathBufValueParser, but allows empty string. @@ -428,6 +455,14 @@ async fn main() -> anyhow::Result<()> { ssl_ca_certs, use_https_safekeeper_api: args.use_https_safekeeper_api, enable_tls_wal_service_api: args.enable_tls_wal_service_api, + force_metric_collection_on_scrape: args.force_metric_collection_on_scrape, + /* BEGIN_HADRON */ + advertise_pg_addr_tenant_only: None, + enable_pull_timeline_on_startup: args.enable_pull_timeline_on_startup, + hcc_base_url: None, + global_disk_check_interval: args.global_disk_check_interval, + max_global_disk_usage_ratio: args.max_global_disk_usage_ratio, + /* END_HADRON */ }); // initialize sentry if SENTRY_DSN is provided @@ -522,6 +557,20 @@ async fn start_safekeeper(conf: Arc) -> Result<()> { // Load all timelines from disk to memory. global_timelines.init().await?; + /* BEGIN_HADRON */ + if conf.enable_pull_timeline_on_startup && global_timelines.timelines_count() == 0 { + match hadron::hcc_pull_timelines(&conf, global_timelines.clone()).await { + Ok(_) => { + info!("Successfully pulled all timelines from peer safekeepers"); + } + Err(e) => { + error!("Failed to pull timelines from peer safekeepers: {:?}", e); + return Err(e); + } + } + } + /* END_HADRON */ + // Run everything in current thread rt, if asked. if conf.current_thread_runtime { info!("running in current thread runtime"); @@ -587,6 +636,49 @@ async fn start_safekeeper(conf: Arc) -> Result<()> { .map(|res| ("Timeline map housekeeping".to_owned(), res)); tasks_handles.push(Box::pin(timeline_housekeeping_handle)); + /* BEGIN_HADRON */ + // Spawn global disk usage watcher task, if a global disk usage limit is specified. + let interval = conf.global_disk_check_interval; + let data_dir = conf.workdir.clone(); + // Use the safekeeper data directory to compute filesystem capacity. This only runs once on startup, so + // there is little point to continue if we can't have the proper protections in place. + let fs_capacity_bytes = get_filesystem_capacity(data_dir.as_std_path()) + .expect("Failed to get filesystem capacity for data directory"); + let limit: u64 = (conf.max_global_disk_usage_ratio * fs_capacity_bytes as f64) as u64; + if limit > 0 { + let disk_usage_watch_handle = BACKGROUND_RUNTIME + .handle() + .spawn(async move { + // Use Tokio interval to preserve fixed cadence between filesystem utilization checks + let mut ticker = tokio::time::interval(interval); + ticker.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Delay); + + loop { + ticker.tick().await; + let data_dir_clone = data_dir.clone(); + let check_start = Instant::now(); + + let usage = tokio::task::spawn_blocking(move || { + get_filesystem_usage(data_dir_clone.as_std_path()) + }) + .await + .unwrap_or(0); + + let elapsed = check_start.elapsed().as_secs_f64(); + GLOBAL_DISK_UTIL_CHECK_SECONDS.observe(elapsed); + if usage > limit { + warn!( + "Global disk usage exceeded limit. Usage: {} bytes, limit: {} bytes", + usage, limit + ); + } + GLOBAL_DISK_LIMIT_EXCEEDED.store(usage > limit, Ordering::Relaxed); + } + }) + .map(|res| ("Global disk usage watcher".to_string(), res)); + tasks_handles.push(Box::pin(disk_usage_watch_handle)); + } + /* END_HADRON */ if let Some(pg_listener_tenant_only) = pg_listener_tenant_only { let wal_service_handle = current_thread_rt .as_ref() @@ -640,6 +732,26 @@ async fn start_safekeeper(conf: Arc) -> Result<()> { .map(|res| ("broker main".to_owned(), res)); tasks_handles.push(Box::pin(broker_task_handle)); + /* BEGIN_HADRON */ + if conf.force_metric_collection_on_scrape { + let metrics_handle = current_thread_rt + .as_ref() + .unwrap_or_else(|| BACKGROUND_RUNTIME.handle()) + .spawn(async move { + let mut interval: tokio::time::Interval = + tokio::time::interval(METRICS_COLLECTION_INTERVAL); + loop { + interval.tick().await; + tokio::task::spawn_blocking(|| { + METRICS_COLLECTOR.run_once(true); + }); + } + }) + .map(|res| ("broker main".to_owned(), res)); + tasks_handles.push(Box::pin(metrics_handle)); + } + /* END_HADRON */ + set_build_info_metric(GIT_VERSION, BUILD_TAG); // TODO: update tokio-stream, convert to real async Stream with diff --git a/safekeeper/src/hadron.rs b/safekeeper/src/hadron.rs new file mode 100644 index 0000000000..8c6a912166 --- /dev/null +++ b/safekeeper/src/hadron.rs @@ -0,0 +1,457 @@ +use once_cell::sync::Lazy; +use pem::Pem; +use safekeeper_api::models::PullTimelineRequest; +use std::{ + collections::HashMap, env::VarError, net::IpAddr, sync::Arc, sync::atomic::AtomicBool, + time::Duration, +}; +use tokio::time::sleep; +use tokio_util::sync::CancellationToken; +use url::Url; +use utils::{backoff, critical_timeline, id::TenantTimelineId, ip_address}; + +use anyhow::{Result, anyhow}; + +use pageserver_api::controller_api::{ + AvailabilityZone, NodeRegisterRequest, SafekeeperTimeline, SafekeeperTimelinesResponse, +}; + +use crate::{ + GlobalTimelines, SafeKeeperConf, + metrics::{ + SK_RECOVERY_PULL_TIMELINE_ERRORS, SK_RECOVERY_PULL_TIMELINE_OKS, + SK_RECOVERY_PULL_TIMELINE_SECONDS, SK_RECOVERY_PULL_TIMELINES_SECONDS, + }, + pull_timeline, + timelines_global_map::DeleteOrExclude, +}; + +// Extract information in the SafeKeeperConf to build a NodeRegisterRequest used to register the safekeeper with the HCC. +fn build_node_registeration_request( + conf: &SafeKeeperConf, + node_ip_addr: Option, +) -> Result { + let advertise_pg_addr_with_port = conf + .advertise_pg_addr_tenant_only + .as_deref() + .expect("advertise_pg_addr_tenant_only is required to register with HCC"); + + // Extract host/port from the string. + let (advertise_host_addr, pg_port_str) = advertise_pg_addr_with_port.split_at( + advertise_pg_addr_with_port + .rfind(':') + .ok_or(anyhow::anyhow!("Invalid advertise_pg_addr"))?, + ); + // Need the `[1..]` to remove the leading ':'. + let pg_port = pg_port_str[1..] + .parse::() + .map_err(|e| anyhow::anyhow!("Cannot parse PG port: {}", e))?; + + let (_, http_port_str) = conf.listen_http_addr.split_at( + conf.listen_http_addr + .rfind(':') + .ok_or(anyhow::anyhow!("Invalid listen_http_addr"))?, + ); + let http_port = http_port_str[1..] + .parse::() + .map_err(|e| anyhow::anyhow!("Cannot parse HTTP port: {}", e))?; + + Ok(NodeRegisterRequest { + node_id: conf.my_id, + listen_pg_addr: advertise_host_addr.to_string(), + listen_pg_port: pg_port, + listen_http_addr: advertise_host_addr.to_string(), + listen_http_port: http_port, + node_ip_addr, + availability_zone_id: AvailabilityZone("todo".to_string()), + listen_grpc_addr: None, + listen_grpc_port: None, + listen_https_port: None, + }) +} + +// Retrieve the JWT token used for authenticating with HCC from the environment variable. +// Returns None if the token cannot be retrieved. +fn get_hcc_auth_token() -> Option { + match std::env::var("HCC_AUTH_TOKEN") { + Ok(v) => { + tracing::info!("Loaded JWT token for authentication with HCC"); + Some(v) + } + Err(VarError::NotPresent) => { + tracing::info!("No JWT token for authentication with HCC detected"); + None + } + Err(_) => { + tracing::info!( + "Failed to either load to detect non-present HCC_AUTH_TOKEN environment variable" + ); + None + } + } +} + +async fn send_safekeeper_register_request( + request_url: &Url, + auth_token: &Option, + request: &NodeRegisterRequest, +) -> Result<()> { + let client = reqwest::Client::new(); + let mut req_builder = client + .post(request_url.clone()) + .header("Content-Type", "application/json"); + if let Some(token) = auth_token { + req_builder = req_builder.bearer_auth(token); + } + req_builder + .json(&request) + .send() + .await? + .error_for_status()?; + Ok(()) +} + +/// Registers this safe keeper with the HCC. +pub async fn register(conf: &SafeKeeperConf) -> Result<()> { + match conf.hcc_base_url.as_ref() { + None => { + tracing::info!("HCC base URL is not set, skipping registration"); + Ok(()) + } + Some(hcc_base_url) => { + // The following operations acquiring the auth token and the node IP address both read environment + // variables. It's fine for now as this `register()` function is only called once during startup. + // If we start to talk to HCC more regularly in the safekeeper we should probably consider + // refactoring things into a "HadronClusterCoordinatorClient" struct. + let auth_token = get_hcc_auth_token(); + let node_ip_addr = + ip_address::read_node_ip_addr_from_env().expect("Error reading node IP address."); + + let request = build_node_registeration_request(conf, node_ip_addr)?; + let cancel = CancellationToken::new(); + let request_url = hcc_base_url.clone().join("/hadron-internal/v1/sk")?; + + backoff::retry( + || async { + send_safekeeper_register_request(&request_url, &auth_token, &request).await + }, + |_| false, + 3, + u32::MAX, + "Calling the HCC safekeeper register API", + &cancel, + ) + .await + .ok_or(anyhow::anyhow!( + "Error in forever retry loop. This error should never be surfaced." + ))? + } + } +} + +async fn safekeeper_list_timelines_request( + conf: &SafeKeeperConf, +) -> Result { + if conf.hcc_base_url.is_none() { + tracing::info!("HCC base URL is not set, skipping registration"); + return Err(anyhow::anyhow!("HCC base URL is not set")); + } + + // The following operations acquiring the auth token and the node IP address both read environment + // variables. It's fine for now as this `register()` function is only called once during startup. + // If we start to talk to HCC more regularly in the safekeeper we should probably consider + // refactoring things into a "HadronClusterCoordinatorClient" struct. + let auth_token = get_hcc_auth_token(); + let method = format!("/control/v1/safekeeper/{}/timelines", conf.my_id.0); + let request_url = conf.hcc_base_url.as_ref().unwrap().clone().join(&method)?; + + let client = reqwest::Client::new(); + let mut req_builder = client + .get(request_url.clone()) + .header("Content-Type", "application/json") + .query(&[("id", conf.my_id.0)]); + if let Some(token) = auth_token { + req_builder = req_builder.bearer_auth(token); + } + let response = req_builder + .send() + .await? + .error_for_status()? + .json::() + .await?; + Ok(response) +} + +// Returns true on success, false otherwise. +pub async fn hcc_pull_timeline( + timeline: SafekeeperTimeline, + conf: &SafeKeeperConf, + global_timelines: Arc, + nodeid_http: &HashMap, +) -> bool { + let mut request = PullTimelineRequest { + tenant_id: timeline.tenant_id, + timeline_id: timeline.timeline_id, + http_hosts: Vec::new(), + ignore_tombstone: None, + }; + for host in timeline.peers { + if host.0 == conf.my_id.0 { + continue; + } + if let Some(http_host) = nodeid_http.get(&host.0) { + request.http_hosts.push(http_host.clone()); + } + } + + let ca_certs = match conf + .ssl_ca_certs + .iter() + .map(Pem::contents) + .map(reqwest::Certificate::from_der) + .collect::, _>>() + { + Ok(result) => result, + Err(_) => { + return false; + } + }; + match pull_timeline::handle_request( + request, + conf.sk_auth_token.clone(), + ca_certs, + global_timelines.clone(), + true, + ) + .await + { + Ok(resp) => { + tracing::info!( + "Completed pulling tenant {} timeline {} from SK {:?}", + timeline.tenant_id, + timeline.timeline_id, + resp.safekeeper_host + ); + return true; + } + Err(e) => { + tracing::error!( + "Failed to pull tenant {} timeline {} from SK {}", + timeline.tenant_id, + timeline.timeline_id, + e + ); + + let ttid = TenantTimelineId { + tenant_id: timeline.tenant_id, + timeline_id: timeline.timeline_id, + }; + // Revert the failed timeline pull. + // Notice that not found timeline returns OK also. + match global_timelines + .delete_or_exclude(&ttid, DeleteOrExclude::DeleteLocal) + .await + { + Ok(dr) => { + tracing::info!( + "Deleted tenant {} timeline {} DirExists: {}", + timeline.tenant_id, + timeline.timeline_id, + dr.dir_existed, + ); + } + Err(e) => { + tracing::error!( + "Failed to delete tenant {} timeline {} from global_timelines: {}", + timeline.tenant_id, + timeline.timeline_id, + e + ); + } + } + } + } + false +} + +pub async fn hcc_pull_timeline_till_success( + timeline: SafekeeperTimeline, + conf: &SafeKeeperConf, + global_timelines: Arc, + nodeid_http: &HashMap, +) { + const MAX_PULL_TIMELINE_RETRIES: u64 = 100; + for i in 0..MAX_PULL_TIMELINE_RETRIES { + if hcc_pull_timeline( + timeline.clone(), + conf, + global_timelines.clone(), + nodeid_http, + ) + .await + { + SK_RECOVERY_PULL_TIMELINE_OKS.inc(); + return; + } + tracing::error!( + "Failed to pull timeline {} from SK peers, retrying {}/{}", + timeline.timeline_id, + i + 1, + MAX_PULL_TIMELINE_RETRIES + ); + tokio::time::sleep(std::time::Duration::from_secs(1)).await; + } + SK_RECOVERY_PULL_TIMELINE_ERRORS.inc(); +} + +pub async fn hcc_pull_timelines( + conf: &SafeKeeperConf, + global_timelines: Arc, +) -> Result<()> { + let _timer = SK_RECOVERY_PULL_TIMELINES_SECONDS.start_timer(); + tracing::info!("Start pulling timelines from SK peers"); + + let mut response = SafekeeperTimelinesResponse { + timelines: Vec::new(), + safekeeper_peers: Vec::new(), + }; + for i in 0..100 { + match safekeeper_list_timelines_request(conf).await { + Ok(timelines) => { + response = timelines; + } + Err(e) => { + tracing::error!("Failed to list timelines from HCC: {}", e); + if i == 99 { + return Err(e); + } + } + } + sleep(Duration::from_millis(100)).await; + } + + let mut nodeid_http = HashMap::new(); + for sk in response.safekeeper_peers { + nodeid_http.insert( + sk.node_id.0, + format!("http://{}:{}", sk.listen_http_addr, sk.http_port), + ); + } + tracing::info!("Received {} timelines from HCC", response.timelines.len()); + for timeline in response.timelines { + let _timer = SK_RECOVERY_PULL_TIMELINE_SECONDS + .with_label_values(&[ + &timeline.tenant_id.to_string(), + &timeline.timeline_id.to_string(), + ]) + .start_timer(); + hcc_pull_timeline_till_success(timeline, conf, global_timelines.clone(), &nodeid_http) + .await; + } + Ok(()) +} + +/// true if the last background scan found total usage > limit +pub static GLOBAL_DISK_LIMIT_EXCEEDED: Lazy = Lazy::new(|| AtomicBool::new(false)); + +/// Returns filesystem usage in bytes for the filesystem containing the given path. +// Need to suppress the clippy::unnecessary_cast warning because the casts on the block count and the +// block size are required on macOS (they are 32-bit integers on macOS, apparantly). +#[allow(clippy::unnecessary_cast)] +pub fn get_filesystem_usage(path: &std::path::Path) -> u64 { + // Allow overriding disk usage via failpoint for tests + fail::fail_point!("sk-global-disk-usage", |val| { + // val is Option; parse payload if present + val.and_then(|s| s.parse::().ok()).unwrap_or(0) + }); + + // Call statvfs(3) for filesystem usage + use nix::sys::statvfs::statvfs; + match statvfs(path) { + Ok(stat) => { + // fragment size (f_frsize) if non-zero else block size (f_bsize) + let frsize = stat.fragment_size(); + let blocksz = if frsize > 0 { + frsize + } else { + stat.block_size() + }; + // used blocks = total blocks - available blocks for unprivileged + let used_blocks = stat.blocks().saturating_sub(stat.blocks_available()); + used_blocks as u64 * blocksz as u64 + } + Err(e) => { + // The global disk usage watcher aren't associated with a tenant or timeline, so we just + // pass placeholder (all-zero) tenant and timeline IDs to the critical!() macro. + let placeholder_ttid = TenantTimelineId::empty(); + critical_timeline!( + placeholder_ttid.tenant_id, + placeholder_ttid.timeline_id, + "Global disk usage watcher failed to read filesystem usage: {:?}", + e + ); + 0 + } + } +} + +/// Returns the total capacity of the current working directory's filesystem in bytes. +#[allow(clippy::unnecessary_cast)] +pub fn get_filesystem_capacity(path: &std::path::Path) -> Result { + // Call statvfs(3) for filesystem stats + use nix::sys::statvfs::statvfs; + match statvfs(path) { + Ok(stat) => { + // fragment size (f_frsize) if non-zero else block size (f_bsize) + let frsize = stat.fragment_size(); + let blocksz = if frsize > 0 { + frsize + } else { + stat.block_size() + }; + Ok(stat.blocks() as u64 * blocksz as u64) + } + Err(e) => Err(anyhow!("Failed to read filesystem capacity: {:?}", e)), + } +} + +#[cfg(test)] +mod tests { + use super::*; + use utils::id::NodeId; + + #[test] + fn test_build_node_registeration_request() { + // Test that: + // 1. We always extract the host name and port used to register with the HCC from the + // `advertise_pg_addr` if it is set. + // 2. The correct ports are extracted from `advertise_pg_addr` and `listen_http_addr`. + let mut conf = SafeKeeperConf::dummy(); + conf.my_id = NodeId(1); + conf.advertise_pg_addr_tenant_only = + Some("safe-keeper-1.safe-keeper.hadron.svc.cluster.local:5454".to_string()); + // `listen_pg_addr` and `listen_pg_addr_tenant_only` are not used for node registration. Set them to a different + // host and port values and make sure that they don't show up in the node registration request. + conf.listen_pg_addr = "0.0.0.0:5456".to_string(); + conf.listen_pg_addr_tenant_only = Some("0.0.0.0:5456".to_string()); + conf.listen_http_addr = "0.0.0.0:7676".to_string(); + let node_ip_addr: Option = Some("127.0.0.1".parse().unwrap()); + + let request = build_node_registeration_request(&conf, node_ip_addr).unwrap(); + assert_eq!(request.node_id, NodeId(1)); + assert_eq!( + request.listen_pg_addr, + "safe-keeper-1.safe-keeper.hadron.svc.cluster.local" + ); + assert_eq!(request.listen_pg_port, 5454); + assert_eq!( + request.listen_http_addr, + "safe-keeper-1.safe-keeper.hadron.svc.cluster.local" + ); + assert_eq!(request.listen_http_port, 7676); + assert_eq!( + request.node_ip_addr, + Some(IpAddr::V4("127.0.0.1".parse().unwrap())) + ); + } +} diff --git a/safekeeper/src/http/routes.rs b/safekeeper/src/http/routes.rs index 384c582678..c9d8e7d3b0 100644 --- a/safekeeper/src/http/routes.rs +++ b/safekeeper/src/http/routes.rs @@ -33,11 +33,13 @@ use utils::id::{TenantId, TenantTimelineId, TimelineId}; use utils::lsn::Lsn; use crate::debug_dump::TimelineDigestRequest; +use crate::hadron::{get_filesystem_capacity, get_filesystem_usage}; use crate::safekeeper::TermLsn; use crate::timelines_global_map::DeleteOrExclude; use crate::{ GlobalTimelines, SafeKeeperConf, copy_timeline, debug_dump, patch_control_file, pull_timeline, }; +use serde_json::json; /// Healthcheck handler. async fn status_handler(request: Request) -> Result, ApiError> { @@ -127,6 +129,21 @@ async fn utilization_handler(request: Request) -> Result, A json_response(StatusCode::OK, utilization) } +/// Returns filesystem capacity and current utilization for the safekeeper data directory. +async fn filesystem_usage_handler(request: Request) -> Result, ApiError> { + check_permission(&request, None)?; + let conf = get_conf(&request); + let path = conf.workdir.as_std_path(); + let capacity = get_filesystem_capacity(path).map_err(ApiError::InternalServerError)?; + let usage = get_filesystem_usage(path); + let resp = json!({ + "data_dir": path, + "capacity_bytes": capacity, + "usage_bytes": usage, + }); + json_response(StatusCode::OK, resp) +} + /// List all (not deleted) timelines. /// Note: it is possible to do the same with debug_dump. async fn timeline_list_handler(request: Request) -> Result, ApiError> { @@ -241,9 +258,14 @@ async fn timeline_pull_handler(mut request: Request) -> Result, pub availability_zone: Option, pub no_sync: bool, + /* BEGIN_HADRON */ + pub advertise_pg_addr_tenant_only: Option, + pub enable_pull_timeline_on_startup: bool, + pub hcc_base_url: Option, + /* END_HADRON */ pub broker_endpoint: Uri, pub broker_keepalive_interval: Duration, pub heartbeat_timeout: Duration, @@ -109,6 +121,10 @@ pub struct SafeKeeperConf { /* BEGIN_HADRON */ pub max_reelect_offloader_lag_bytes: u64, pub max_timeline_disk_usage_bytes: u64, + /// How often to check the working directory's filesystem for total disk usage. + pub global_disk_check_interval: Duration, + /// The portion of the filesystem capacity that can be used by all timelines. + pub max_global_disk_usage_ratio: f64, /* END_HADRON */ pub backup_parallel_jobs: usize, pub wal_backup_enabled: bool, @@ -134,6 +150,7 @@ pub struct SafeKeeperConf { pub ssl_ca_certs: Vec, pub use_https_safekeeper_api: bool, pub enable_tls_wal_service_api: bool, + pub force_metric_collection_on_scrape: bool, } impl SafeKeeperConf { @@ -165,6 +182,8 @@ impl SafeKeeperConf { /* BEGIN_HADRON */ max_reelect_offloader_lag_bytes: defaults::DEFAULT_MAX_REELECT_OFFLOADER_LAG_BYTES, max_timeline_disk_usage_bytes: defaults::DEFAULT_MAX_TIMELINE_DISK_USAGE_BYTES, + global_disk_check_interval: Duration::from_secs(60), + max_global_disk_usage_ratio: defaults::DEFAULT_MAX_GLOBAL_DISK_USAGE_RATIO, /* END_HADRON */ current_thread_runtime: false, walsenders_keep_horizon: false, @@ -183,6 +202,12 @@ impl SafeKeeperConf { ssl_ca_certs: Vec::new(), use_https_safekeeper_api: false, enable_tls_wal_service_api: false, + force_metric_collection_on_scrape: true, + /* BEGIN_HADRON */ + advertise_pg_addr_tenant_only: None, + enable_pull_timeline_on_startup: false, + hcc_base_url: None, + /* END_HADRON */ } } } @@ -221,10 +246,13 @@ pub static WAL_BACKUP_RUNTIME: Lazy = Lazy::new(|| { .expect("Failed to create WAL backup runtime") }); +/// Hadron: Dedicated runtime for infrequent background tasks. pub static BACKGROUND_RUNTIME: Lazy = Lazy::new(|| { tokio::runtime::Builder::new_multi_thread() - .thread_name("background worker") - .worker_threads(1) // there is only one task now (ssl certificate reloading), having more threads doesn't make sense + .thread_name("Hadron background worker") + // One worker thread is enough, as most of the actual tasks run on blocking threads + // which has it own thread pool. + .worker_threads(1) .enable_all() .build() .expect("Failed to create background runtime") diff --git a/safekeeper/src/metrics.rs b/safekeeper/src/metrics.rs index 9baa80f73a..b07852aaee 100644 --- a/safekeeper/src/metrics.rs +++ b/safekeeper/src/metrics.rs @@ -59,6 +59,15 @@ pub static FLUSH_WAL_SECONDS: Lazy = Lazy::new(|| { .expect("Failed to register safekeeper_flush_wal_seconds histogram") }); /* BEGIN_HADRON */ +// Counter of all ProposerAcceptorMessage requests received +pub static PROPOSER_ACCEPTOR_MESSAGES_TOTAL: Lazy = Lazy::new(|| { + register_int_counter_vec!( + "safekeeper_proposer_acceptor_messages_total", + "Total number of ProposerAcceptorMessage requests received by the Safekeeper.", + &["outcome"] + ) + .expect("Failed to register safekeeper_proposer_acceptor_messages_total counter") +}); pub static WAL_DISK_IO_ERRORS: Lazy = Lazy::new(|| { register_int_counter!( "safekeeper_wal_disk_io_errors", @@ -76,6 +85,43 @@ pub static WAL_STORAGE_LIMIT_ERRORS: Lazy = Lazy::new(|| { ) .expect("Failed to register safekeeper_wal_storage_limit_errors counter") }); +pub static SK_RECOVERY_PULL_TIMELINE_ERRORS: Lazy = Lazy::new(|| { + register_int_counter!( + "safekeeper_recovery_pull_timeline_errors", + concat!( + "Number of errors due to pull_timeline errors during SK lost disk recovery.", + "An increase in this metric indicates pull timelines runs into error." + ) + ) + .expect("Failed to register safekeeper_recovery_pull_timeline_errors counter") +}); +pub static SK_RECOVERY_PULL_TIMELINE_OKS: Lazy = Lazy::new(|| { + register_int_counter!( + "safekeeper_recovery_pull_timeline_oks", + concat!( + "Number of successful pull_timeline during SK lost disk recovery.", + "An increase in this metric indicates pull timelines is successful." + ) + ) + .expect("Failed to register safekeeper_recovery_pull_timeline_oks counter") +}); +pub static SK_RECOVERY_PULL_TIMELINES_SECONDS: Lazy = Lazy::new(|| { + register_histogram!( + "safekeeper_recovery_pull_timelines_seconds", + "Seconds to pull timelines", + DISK_FSYNC_SECONDS_BUCKETS.to_vec() + ) + .expect("Failed to register safekeeper_recovery_pull_timelines_seconds histogram") +}); +pub static SK_RECOVERY_PULL_TIMELINE_SECONDS: Lazy = Lazy::new(|| { + register_histogram_vec!( + "safekeeper_recovery_pull_timeline_seconds", + "Seconds to pull timeline", + &["tenant_id", "timeline_id"], + DISK_FSYNC_SECONDS_BUCKETS.to_vec() + ) + .expect("Failed to register safekeeper_recovery_pull_timeline_seconds histogram vec") +}); /* END_HADRON */ pub static PERSIST_CONTROL_FILE_SECONDS: Lazy = Lazy::new(|| { register_histogram!( @@ -917,3 +963,17 @@ async fn collect_timeline_metrics(global_timelines: Arc) -> Vec } res } + +/* BEGIN_HADRON */ +// Metrics reporting the time spent to perform each safekeeper filesystem utilization check. +pub static GLOBAL_DISK_UTIL_CHECK_SECONDS: Lazy = Lazy::new(|| { + // Buckets from 1ms up to 10s + let buckets = vec![0.001, 0.01, 0.1, 0.5, 1.0, 2.0, 5.0, 10.0]; + register_histogram!( + "safekeeper_global_disk_utilization_check_seconds", + "Seconds spent to perform each safekeeper filesystem utilization check", + buckets + ) + .expect("Failed to register safekeeper_global_disk_utilization_check_seconds histogram") +}); +/* END_HADRON */ diff --git a/safekeeper/src/pull_timeline.rs b/safekeeper/src/pull_timeline.rs index 1c9e5bade5..b4c4877b2c 100644 --- a/safekeeper/src/pull_timeline.rs +++ b/safekeeper/src/pull_timeline.rs @@ -8,6 +8,7 @@ use bytes::Bytes; use camino::Utf8PathBuf; use chrono::{DateTime, Utc}; use futures::{SinkExt, StreamExt, TryStreamExt}; +use http::StatusCode; use http_utils::error::ApiError; use postgres_ffi::{PG_TLI, XLogFileName, XLogSegNo}; use remote_storage::GenericRemoteStorage; @@ -21,10 +22,11 @@ use tokio::fs::OpenOptions; use tokio::io::AsyncWrite; use tokio::sync::mpsc; use tokio::task; +use tokio::time::sleep; use tokio_tar::{Archive, Builder, Header}; use tokio_util::io::{CopyToBytes, SinkWriter}; use tokio_util::sync::PollSender; -use tracing::{error, info, instrument}; +use tracing::{error, info, instrument, warn}; use utils::crashsafe::fsync_async_opt; use utils::id::{NodeId, TenantTimelineId}; use utils::logging::SecretString; @@ -449,6 +451,7 @@ pub async fn handle_request( sk_auth_token: Option, ssl_ca_certs: Vec, global_timelines: Arc, + wait_for_peer_timeline_status: bool, ) -> Result { let existing_tli = global_timelines.get(TenantTimelineId::new( request.tenant_id, @@ -472,37 +475,100 @@ pub async fn handle_request( let http_hosts = request.http_hosts.clone(); // Figure out statuses of potential donors. - let responses: Vec> = - futures::future::join_all(http_hosts.iter().map(|url| async { - let cclient = Client::new(http_client.clone(), url.clone(), sk_auth_token.clone()); - let info = cclient - .timeline_status(request.tenant_id, request.timeline_id) - .await?; - Ok(info) - })) - .await; - let mut statuses = Vec::new(); - for (i, response) in responses.into_iter().enumerate() { - match response { - Ok(status) => { - statuses.push((status, i)); - } - Err(e) => { - info!("error fetching status from {}: {e}", http_hosts[i]); + if !wait_for_peer_timeline_status { + let responses: Vec> = + futures::future::join_all(http_hosts.iter().map(|url| async { + let cclient = Client::new(http_client.clone(), url.clone(), sk_auth_token.clone()); + let resp = cclient + .timeline_status(request.tenant_id, request.timeline_id) + .await?; + let info: TimelineStatus = resp + .json() + .await + .context("Failed to deserialize timeline status") + .map_err(|e| mgmt_api::Error::ReceiveErrorBody(e.to_string()))?; + Ok(info) + })) + .await; + + for (i, response) in responses.into_iter().enumerate() { + match response { + Ok(status) => { + statuses.push((status, i)); + } + Err(e) => { + info!("error fetching status from {}: {e}", http_hosts[i]); + } } } - } - // Allow missing responses from up to one safekeeper (say due to downtime) - // e.g. if we created a timeline on PS A and B, with C being offline. Then B goes - // offline and C comes online. Then we want a pull on C with A and B as hosts to work. - let min_required_successful = (http_hosts.len() - 1).max(1); - if statuses.len() < min_required_successful { - return Err(ApiError::InternalServerError(anyhow::anyhow!( - "only got {} successful status responses. required: {min_required_successful}", - statuses.len() - ))); + // Allow missing responses from up to one safekeeper (say due to downtime) + // e.g. if we created a timeline on PS A and B, with C being offline. Then B goes + // offline and C comes online. Then we want a pull on C with A and B as hosts to work. + let min_required_successful = (http_hosts.len() - 1).max(1); + if statuses.len() < min_required_successful { + return Err(ApiError::InternalServerError(anyhow::anyhow!( + "only got {} successful status responses. required: {min_required_successful}", + statuses.len() + ))); + } + } else { + let mut retry = true; + // We must get status from all other peers. + // Otherwise, we may run into split-brain scenario. + while retry { + statuses.clear(); + retry = false; + for (i, url) in http_hosts.iter().enumerate() { + let cclient = Client::new(http_client.clone(), url.clone(), sk_auth_token.clone()); + match cclient + .timeline_status(request.tenant_id, request.timeline_id) + .await + { + Ok(resp) => { + if resp.status() == StatusCode::NOT_FOUND { + warn!( + "Timeline {} not found on peer SK {}, no need to pull it", + TenantTimelineId::new(request.tenant_id, request.timeline_id), + url + ); + return Ok(PullTimelineResponse { + safekeeper_host: None, + }); + } + let info: TimelineStatus = resp + .json() + .await + .context("Failed to deserialize timeline status") + .map_err(ApiError::InternalServerError)?; + statuses.push((info, i)); + } + Err(e) => { + match e { + // If we get a 404, it means the timeline doesn't exist on this safekeeper. + // We can ignore this error. + mgmt_api::Error::ApiError(status, _) + if status == StatusCode::NOT_FOUND => + { + warn!( + "Timeline {} not found on peer SK {}, no need to pull it", + TenantTimelineId::new(request.tenant_id, request.timeline_id), + url + ); + return Ok(PullTimelineResponse { + safekeeper_host: None, + }); + } + _ => {} + } + retry = true; + error!("Failed to get timeline status from {}: {:#}", url, e); + } + } + } + sleep(std::time::Duration::from_millis(100)).await; + } } // Find the most advanced safekeeper @@ -511,6 +577,12 @@ pub async fn handle_request( .max_by_key(|(status, _)| { ( status.acceptor_state.epoch, + /* BEGIN_HADRON */ + // We need to pull from the SK with the highest term. + // This is because another compute may come online and vote the same highest term again on the other two SKs. + // Then, there will be 2 computes running on the same term. + status.acceptor_state.term, + /* END_HADRON */ status.flush_lsn, status.commit_lsn, ) diff --git a/safekeeper/src/safekeeper.rs b/safekeeper/src/safekeeper.rs index 4d15fc9de3..09ca041e22 100644 --- a/safekeeper/src/safekeeper.rs +++ b/safekeeper/src/safekeeper.rs @@ -24,7 +24,7 @@ use utils::id::{NodeId, TenantId, TimelineId}; use utils::lsn::Lsn; use utils::pageserver_feedback::PageserverFeedback; -use crate::metrics::MISC_OPERATION_SECONDS; +use crate::metrics::{MISC_OPERATION_SECONDS, PROPOSER_ACCEPTOR_MESSAGES_TOTAL}; use crate::state::TimelineState; use crate::{control_file, wal_storage}; @@ -938,7 +938,7 @@ where &mut self, msg: &ProposerAcceptorMessage, ) -> Result> { - match msg { + let res = match msg { ProposerAcceptorMessage::Greeting(msg) => self.handle_greeting(msg).await, ProposerAcceptorMessage::VoteRequest(msg) => self.handle_vote_request(msg).await, ProposerAcceptorMessage::Elected(msg) => self.handle_elected(msg).await, @@ -949,7 +949,20 @@ where self.handle_append_request(msg, false).await } ProposerAcceptorMessage::FlushWAL => self.handle_flush().await, - } + }; + + // BEGIN HADRON + match &res { + Ok(_) => PROPOSER_ACCEPTOR_MESSAGES_TOTAL + .with_label_values(&["success"]) + .inc(), + Err(_) => PROPOSER_ACCEPTOR_MESSAGES_TOTAL + .with_label_values(&["error"]) + .inc(), + }; + + res + // END HADRON } /// Handle initial message from proposer: check its sanity and send my diff --git a/safekeeper/src/send_interpreted_wal.rs b/safekeeper/src/send_interpreted_wal.rs index 3797ac39d1..72a436e25f 100644 --- a/safekeeper/src/send_interpreted_wal.rs +++ b/safekeeper/src/send_interpreted_wal.rs @@ -561,6 +561,20 @@ impl InterpretedWalReader { // Update internal and external state, then reset the WAL stream // if required. let senders = self.shard_senders.entry(shard_id).or_default(); + + // Clean up any shard senders that have dropped out before adding the new + // one. This avoids a build up of dead senders. + senders.retain(|sender| { + let closed = sender.tx.is_closed(); + + if closed { + let sender_id = ShardSenderId::new(shard_id, sender.sender_id); + tracing::info!("Removed shard sender {}", sender_id); + } + + !closed + }); + let new_sender_id = match senders.last() { Some(sender) => sender.sender_id.next(), None => SenderId::first() diff --git a/safekeeper/src/send_wal.rs b/safekeeper/src/send_wal.rs index 177e759db5..5891fa88a4 100644 --- a/safekeeper/src/send_wal.rs +++ b/safekeeper/src/send_wal.rs @@ -12,7 +12,8 @@ use futures::FutureExt; use itertools::Itertools; use parking_lot::Mutex; use postgres_backend::{CopyStreamHandlerEnd, PostgresBackend, PostgresBackendReader, QueryError}; -use postgres_ffi::{MAX_SEND_SIZE, PgMajorVersion, TimestampTz, get_current_timestamp}; +use postgres_ffi::{MAX_SEND_SIZE, PgMajorVersion, get_current_timestamp}; +use postgres_ffi_types::TimestampTz; use pq_proto::{BeMessage, WalSndKeepAlive, XLogDataBody}; use safekeeper_api::Term; use safekeeper_api::models::{ diff --git a/safekeeper/src/timeline.rs b/safekeeper/src/timeline.rs index 95b5fe6d5d..a1a0aab9fd 100644 --- a/safekeeper/src/timeline.rs +++ b/safekeeper/src/timeline.rs @@ -29,6 +29,8 @@ use utils::sync::gate::Gate; use crate::metrics::{ FullTimelineInfo, MISC_OPERATION_SECONDS, WAL_STORAGE_LIMIT_ERRORS, WalStorageMetrics, }; + +use crate::hadron::GLOBAL_DISK_LIMIT_EXCEEDED; use crate::rate_limit::RateLimiter; use crate::receive_wal::WalReceivers; use crate::safekeeper::{AcceptorProposerMessage, ProposerAcceptorMessage, SafeKeeper, TermLsn}; @@ -197,7 +199,7 @@ impl StateSK { Ok(TimelineMembershipSwitchResponse { previous_conf: result.previous_conf, current_conf: result.current_conf, - term: self.state().acceptor_state.term, + last_log_term: self.state().acceptor_state.term, flush_lsn: self.flush_lsn(), }) } @@ -1081,6 +1083,11 @@ impl WalResidentTimeline { ); } } + + if GLOBAL_DISK_LIMIT_EXCEEDED.load(Ordering::Relaxed) { + bail!("Global disk usage exceeded limit"); + } + Ok(()) } // END HADRON diff --git a/safekeeper/src/wal_backup.rs b/safekeeper/src/wal_backup.rs index 7e10847a1b..0e8dfd64c3 100644 --- a/safekeeper/src/wal_backup.rs +++ b/safekeeper/src/wal_backup.rs @@ -166,7 +166,7 @@ fn hadron_determine_offloader(mgr: &Manager, state: &StateSnapshot) -> (Option) -> Result<()> { ssl_ca_certs: Vec::new(), use_https_safekeeper_api: false, enable_tls_wal_service_api: false, + force_metric_collection_on_scrape: true, + /* BEGIN_HADRON */ + enable_pull_timeline_on_startup: false, + advertise_pg_addr_tenant_only: None, + hcc_base_url: None, + global_disk_check_interval: Duration::from_secs(10), + max_global_disk_usage_ratio: 0.0, + /* END_HADRON */ }; let mut global = GlobalMap::new(disk, conf.clone())?; diff --git a/storage_controller/Cargo.toml b/storage_controller/Cargo.toml index 143f4241f4..d67be6d469 100644 --- a/storage_controller/Cargo.toml +++ b/storage_controller/Cargo.toml @@ -52,6 +52,7 @@ tokio-rustls.workspace = true tokio-util.workspace = true tokio.workspace = true tracing.workspace = true +uuid.workspace = true measured.workspace = true rustls.workspace = true scopeguard.workspace = true @@ -63,6 +64,7 @@ tokio-postgres-rustls.workspace = true diesel = { version = "2.2.6", features = [ "serde_json", "chrono", + "uuid", ] } diesel-async = { version = "0.5.2", features = ["postgres", "bb8", "async-connection-wrapper"] } diesel_migrations = { version = "2.2.0" } diff --git a/storage_controller/migrations/2025-07-17-000001_hadron_safekeepers/down.sql b/storage_controller/migrations/2025-07-17-000001_hadron_safekeepers/down.sql new file mode 100644 index 0000000000..b45b45e438 --- /dev/null +++ b/storage_controller/migrations/2025-07-17-000001_hadron_safekeepers/down.sql @@ -0,0 +1,2 @@ +DROP TABLE hadron_safekeepers; +DROP TABLE hadron_timeline_safekeepers; diff --git a/storage_controller/migrations/2025-07-17-000001_hadron_safekeepers/up.sql b/storage_controller/migrations/2025-07-17-000001_hadron_safekeepers/up.sql new file mode 100644 index 0000000000..6cee981efc --- /dev/null +++ b/storage_controller/migrations/2025-07-17-000001_hadron_safekeepers/up.sql @@ -0,0 +1,17 @@ +-- hadron_safekeepers keep track of all Safe Keeper nodes that exist in the system. +-- Upon startup, each Safe Keeper reaches out to the hadron cluster coordinator to register its node ID and listen addresses. + +CREATE TABLE hadron_safekeepers ( + sk_node_id BIGINT PRIMARY KEY NOT NULL, + listen_http_addr VARCHAR NOT NULL, + listen_http_port INTEGER NOT NULL, + listen_pg_addr VARCHAR NOT NULL, + listen_pg_port INTEGER NOT NULL +); + +CREATE TABLE hadron_timeline_safekeepers ( + timeline_id VARCHAR NOT NULL, + sk_node_id BIGINT NOT NULL, + legacy_endpoint_id UUID DEFAULT NULL, + PRIMARY KEY(timeline_id, sk_node_id) +); diff --git a/storage_controller/src/auth.rs b/storage_controller/src/auth.rs index ef47abf8c7..8f15f0f072 100644 --- a/storage_controller/src/auth.rs +++ b/storage_controller/src/auth.rs @@ -1,4 +1,5 @@ use utils::auth::{AuthError, Claims, Scope}; +use uuid::Uuid; pub fn check_permission(claims: &Claims, required_scope: Scope) -> Result<(), AuthError> { if claims.scope != required_scope { @@ -7,3 +8,14 @@ pub fn check_permission(claims: &Claims, required_scope: Scope) -> Result<(), Au Ok(()) } + +#[allow(dead_code)] +pub fn check_endpoint_permission(claims: &Claims, endpoint_id: Uuid) -> Result<(), AuthError> { + if claims.scope != Scope::TenantEndpoint { + return Err(AuthError("Scope mismatch. Permission denied".into())); + } + if claims.endpoint_id != Some(endpoint_id) { + return Err(AuthError("Endpoint id mismatch. Permission denied".into())); + } + Ok(()) +} diff --git a/storage_controller/src/compute_hook.rs b/storage_controller/src/compute_hook.rs index 81f7fedac8..9e4e528b16 100644 --- a/storage_controller/src/compute_hook.rs +++ b/storage_controller/src/compute_hook.rs @@ -5,6 +5,8 @@ use std::sync::Arc; use std::time::Duration; use anyhow::Context; +use compute_api::spec::PageserverProtocol; +use compute_api::spec::PageserverShardInfo; use control_plane::endpoint::{ ComputeControlPlane, EndpointStatus, PageserverConnectionInfo, PageserverShardConnectionInfo, }; @@ -13,7 +15,7 @@ use futures::StreamExt; use hyper::StatusCode; use pageserver_api::config::DEFAULT_GRPC_LISTEN_PORT; use pageserver_api::controller_api::AvailabilityZone; -use pageserver_api::shard::{ShardCount, ShardNumber, ShardStripeSize, TenantShardId}; +use pageserver_api::shard::{ShardCount, ShardIndex, ShardNumber, ShardStripeSize, TenantShardId}; use postgres_connection::parse_host_port; use safekeeper_api::membership::SafekeeperGeneration; use serde::{Deserialize, Serialize}; @@ -507,7 +509,16 @@ impl ApiMethod for ComputeHookTenant { if endpoint.tenant_id == *tenant_id && endpoint.status() == EndpointStatus::Running { tracing::info!("Reconfiguring pageservers for endpoint {endpoint_name}"); - let mut shard_conninfos = HashMap::new(); + let shard_count = ShardCount(shards.len().try_into().expect("too many shards")); + + let mut shard_infos: HashMap = HashMap::new(); + + let prefer_protocol = if endpoint.grpc { + PageserverProtocol::Grpc + } else { + PageserverProtocol::Libpq + }; + for shard in shards.iter() { let ps_conf = env .get_pageserver_conf(shard.node_id) @@ -528,19 +539,31 @@ impl ApiMethod for ComputeHookTenant { None }; let pageserver = PageserverShardConnectionInfo { + id: Some(shard.node_id.to_string()), libpq_url, grpc_url, }; - shard_conninfos.insert(shard.shard_number.0 as u32, pageserver); + let shard_info = PageserverShardInfo { + pageservers: vec![pageserver], + }; + shard_infos.insert( + ShardIndex { + shard_number: shard.shard_number, + shard_count, + }, + shard_info, + ); } let pageserver_conninfo = PageserverConnectionInfo { - shards: shard_conninfos, - prefer_grpc: endpoint.grpc, + shard_count: ShardCount::unsharded(), + stripe_size: stripe_size.map(|val| val.0), + shards: shard_infos, + prefer_protocol, }; endpoint - .reconfigure_pageservers(pageserver_conninfo, *stripe_size) + .reconfigure_pageservers(&pageserver_conninfo) .await .map_err(NotifyError::NeonLocal)?; } @@ -824,6 +847,7 @@ impl ComputeHook { let send_locked = tokio::select! { guard = send_lock.lock_owned() => {guard}, _ = cancel.cancelled() => { + tracing::info!("Notification cancelled while waiting for lock"); return Err(NotifyError::ShuttingDown) } }; @@ -865,11 +889,32 @@ impl ComputeHook { let notify_url = compute_hook_url.as_ref().unwrap(); self.do_notify(notify_url, &request, cancel).await } else { - self.do_notify_local::(&request).await.map_err(|e| { + match self.do_notify_local::(&request).await.map_err(|e| { // This path is for testing only, so munge the error into our prod-style error type. - tracing::error!("neon_local notification hook failed: {e}"); - NotifyError::Fatal(StatusCode::INTERNAL_SERVER_ERROR) - }) + if e.to_string().contains("refresh-configuration-pending") { + // If the error message mentions "refresh-configuration-pending", it means the compute node + // rejected our notification request because it already trying to reconfigure itself. We + // can proceed with the rest of the reconcliation process as the compute node already + // discovers the need to reconfigure and will eventually update its configuration once + // we update the pageserver mappings. In fact, it is important that we continue with + // reconcliation to make sure we update the pageserver mappings to unblock the compute node. + tracing::info!("neon_local notification hook failed: {e}"); + tracing::info!("Notification failed likely due to compute node self-reconfiguration, will retry."); + Ok(()) + } else { + tracing::error!("neon_local notification hook failed: {e}"); + Err(NotifyError::Fatal(StatusCode::INTERNAL_SERVER_ERROR)) + } + }) { + // Compute node accepted the notification request. Ok to proceed. + Ok(_) => Ok(()), + // Compute node rejected our request but it is already self-reconfiguring. Ok to proceed. + Err(Ok(_)) => Ok(()), + // Fail the reconciliation attempt in all other cases. Recall that this whole code path involving + // neon_local is for testing only. In production we always retry failed reconcliations so we + // don't have any deadends here. + Err(Err(e)) => Err(e), + } }; match result { diff --git a/storage_controller/src/hadron_utils.rs b/storage_controller/src/hadron_utils.rs new file mode 100644 index 0000000000..871e21c367 --- /dev/null +++ b/storage_controller/src/hadron_utils.rs @@ -0,0 +1,44 @@ +use std::collections::BTreeMap; + +use rand::Rng; +use utils::shard::TenantShardId; + +static CHARSET: &[u8] = b"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*()"; + +/// Generate a random string of `length` that can be used as a password. The generated string +/// contains alphanumeric characters and special characters (!@#$%^&*()) +pub fn generate_random_password(length: usize) -> String { + let mut rng = rand::thread_rng(); + (0..length) + .map(|_| { + let idx = rng.gen_range(0..CHARSET.len()); + CHARSET[idx] as char + }) + .collect() +} + +pub(crate) struct TenantShardSizeMap { + #[expect(dead_code)] + pub map: BTreeMap, +} + +impl TenantShardSizeMap { + pub fn new(map: BTreeMap) -> Self { + Self { map } + } +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn test_generate_random_password() { + let pwd1 = generate_random_password(10); + assert_eq!(pwd1.len(), 10); + let pwd2 = generate_random_password(10); + assert_ne!(pwd1, pwd2); + assert!(pwd1.chars().all(|c| CHARSET.contains(&(c as u8)))); + assert!(pwd2.chars().all(|c| CHARSET.contains(&(c as u8)))); + } +} diff --git a/storage_controller/src/http.rs b/storage_controller/src/http.rs index ee446ea65d..ff73719adb 100644 --- a/storage_controller/src/http.rs +++ b/storage_controller/src/http.rs @@ -48,7 +48,10 @@ use crate::metrics::{ }; use crate::persistence::SafekeeperUpsert; use crate::reconciler::ReconcileError; -use crate::service::{LeadershipStatus, RECONCILE_TIMEOUT, STARTUP_RECONCILE_TIMEOUT, Service}; +use crate::service::{ + LeadershipStatus, RECONCILE_TIMEOUT, STARTUP_RECONCILE_TIMEOUT, Service, + TenantMutationLocations, +}; /// State available to HTTP request handlers pub struct HttpState { @@ -734,83 +737,104 @@ async fn handle_tenant_timeline_passthrough( path ); - // Find the node that holds shard zero - let (node, tenant_shard_id) = if tenant_or_shard_id.is_unsharded() { - service + let tenant_shard_id = if tenant_or_shard_id.is_unsharded() { + // If the request contains only tenant ID, find the node that holds shard zero + let (_, shard_id) = service .tenant_shard0_node(tenant_or_shard_id.tenant_id) - .await? + .await?; + shard_id } else { - ( - service.tenant_shard_node(tenant_or_shard_id).await?, - tenant_or_shard_id, - ) + tenant_or_shard_id }; - // Callers will always pass an unsharded tenant ID. Before proxying, we must - // rewrite this to a shard-aware shard zero ID. - let path = format!("{path}"); - let tenant_str = tenant_or_shard_id.tenant_id.to_string(); - let tenant_shard_str = format!("{tenant_shard_id}"); - let path = path.replace(&tenant_str, &tenant_shard_str); + let service_inner = service.clone(); - let latency = &METRICS_REGISTRY - .metrics_group - .storage_controller_passthrough_request_latency; - - let path_label = path_without_ids(&path) - .split('/') - .filter(|token| !token.is_empty()) - .collect::>() - .join("_"); - let labels = PageserverRequestLabelGroup { - pageserver_id: &node.get_id().to_string(), - path: &path_label, - method: crate::metrics::Method::Get, - }; - - let _timer = latency.start_timer(labels.clone()); - - let client = mgmt_api::Client::new( - service.get_http_client().clone(), - node.base_url(), - service.get_config().pageserver_jwt_token.as_deref(), - ); - let resp = client.op_raw(method, path).await.map_err(|e| - // We return 503 here because if we can't successfully send a request to the pageserver, - // either we aren't available or the pageserver is unavailable. - ApiError::ResourceUnavailable(format!("Error sending pageserver API request to {node}: {e}").into()))?; - - if !resp.status().is_success() { - let error_counter = &METRICS_REGISTRY - .metrics_group - .storage_controller_passthrough_request_error; - error_counter.inc(labels); - } - - // Transform 404 into 503 if we raced with a migration - if resp.status() == reqwest::StatusCode::NOT_FOUND { - // Look up node again: if we migrated it will be different - let new_node = service.tenant_shard_node(tenant_shard_id).await?; - if new_node.get_id() != node.get_id() { - // Rather than retry here, send the client a 503 to prompt a retry: this matches - // the pageserver's use of 503, and all clients calling this API should retry on 503. - return Err(ApiError::ResourceUnavailable( - format!("Pageserver {node} returned 404, was migrated to {new_node}").into(), - )); + service.tenant_shard_remote_mutation(tenant_shard_id, |locations| async move { + let TenantMutationLocations(locations) = locations; + if locations.is_empty() { + return Err(ApiError::NotFound(anyhow::anyhow!("Tenant {} not found", tenant_or_shard_id.tenant_id).into())); } - } - // We have a reqest::Response, would like a http::Response - let mut builder = hyper::Response::builder().status(map_reqwest_hyper_status(resp.status())?); - for (k, v) in resp.headers() { - builder = builder.header(k.as_str(), v.as_bytes()); - } + let (tenant_or_shard_id, locations) = locations.into_iter().next().unwrap(); + let node = locations.latest.node; - let response = builder - .body(Body::wrap_stream(resp.bytes_stream())) - .map_err(|e| ApiError::InternalServerError(e.into()))?; + // Callers will always pass an unsharded tenant ID. Before proxying, we must + // rewrite this to a shard-aware shard zero ID. + let path = format!("{path}"); + let tenant_str = tenant_or_shard_id.tenant_id.to_string(); + let tenant_shard_str = format!("{tenant_shard_id}"); + let path = path.replace(&tenant_str, &tenant_shard_str); - Ok(response) + let latency = &METRICS_REGISTRY + .metrics_group + .storage_controller_passthrough_request_latency; + + let path_label = path_without_ids(&path) + .split('/') + .filter(|token| !token.is_empty()) + .collect::>() + .join("_"); + let labels = PageserverRequestLabelGroup { + pageserver_id: &node.get_id().to_string(), + path: &path_label, + method: crate::metrics::Method::Get, + }; + + let _timer = latency.start_timer(labels.clone()); + + let client = mgmt_api::Client::new( + service_inner.get_http_client().clone(), + node.base_url(), + service_inner.get_config().pageserver_jwt_token.as_deref(), + ); + let resp = client.op_raw(method, path).await.map_err(|e| + // We return 503 here because if we can't successfully send a request to the pageserver, + // either we aren't available or the pageserver is unavailable. + ApiError::ResourceUnavailable(format!("Error sending pageserver API request to {node}: {e}").into()))?; + + if !resp.status().is_success() { + let error_counter = &METRICS_REGISTRY + .metrics_group + .storage_controller_passthrough_request_error; + error_counter.inc(labels); + } + let resp_staus = resp.status(); + + // We have a reqest::Response, would like a http::Response + let mut builder = hyper::Response::builder().status(map_reqwest_hyper_status(resp_staus)?); + for (k, v) in resp.headers() { + builder = builder.header(k.as_str(), v.as_bytes()); + } + let resp_bytes = resp + .bytes() + .await + .map_err(|e| ApiError::InternalServerError(e.into()))?; + // Inspect 404 errors: at this point, we know that the tenant exists, but the pageserver we route + // the request to might not yet be ready. Therefore, if it is a _tenant_ not found error, we can + // convert it into a 503. TODO: we should make this part of the check in `tenant_shard_remote_mutation`. + // However, `tenant_shard_remote_mutation` currently cannot inspect the HTTP error response body, + // so we have to do it here instead. + if resp_staus == reqwest::StatusCode::NOT_FOUND { + let resp_str = std::str::from_utf8(&resp_bytes) + .map_err(|e| ApiError::InternalServerError(e.into()))?; + // We only handle "tenant not found" errors; other 404s like timeline not found should + // be forwarded as-is. + if Service::is_tenant_not_found_error(resp_str, tenant_or_shard_id.tenant_id) { + // Rather than retry here, send the client a 503 to prompt a retry: this matches + // the pageserver's use of 503, and all clients calling this API should retry on 503. + return Err(ApiError::ResourceUnavailable( + format!( + "Pageserver {node} returned tenant 404 due to ongoing migration, retry later" + ) + .into(), + )); + } + } + let response = builder + .body(Body::from(resp_bytes)) + .map_err(|e| ApiError::InternalServerError(e.into()))?; + Ok(response) + }).await? } async fn handle_tenant_locate( @@ -850,6 +874,31 @@ async fn handle_tenant_describe( json_response(StatusCode::OK, service.tenant_describe(tenant_id)?) } +/* BEGIN_HADRON */ +async fn handle_tenant_timeline_describe( + service: Arc, + req: Request, +) -> Result, ApiError> { + check_permissions(&req, Scope::Scrubber)?; + + let tenant_id: TenantId = parse_request_param(&req, "tenant_id")?; + let timeline_id: TimelineId = parse_request_param(&req, "timeline_id")?; + match maybe_forward(req).await { + ForwardOutcome::Forwarded(res) => { + return res; + } + ForwardOutcome::NotForwarded(_req) => {} + }; + + json_response( + StatusCode::OK, + service + .tenant_timeline_describe(tenant_id, timeline_id) + .await?, + ) +} +/* END_HADRON */ + async fn handle_tenant_list( service: Arc, req: Request, @@ -1066,9 +1115,10 @@ async fn handle_node_delete(req: Request) -> Result, ApiErr let state = get_state(&req); let node_id: NodeId = parse_request_param(&req, "node_id")?; + let force: bool = parse_query_param(&req, "force")?.unwrap_or(false); json_response( StatusCode::OK, - state.service.start_node_delete(node_id).await?, + state.service.start_node_delete(node_id, force).await?, ) } @@ -2371,7 +2421,7 @@ pub fn make_router( named_request_span( r, handle_safekeeper_scheduling_policy, - RequestName("v1_safekeeper_status"), + RequestName("v1_safekeeper_scheduling_policy"), ) }) // Tenant Shard operations @@ -2480,6 +2530,13 @@ pub fn make_router( ) }) // Timeline operations + .get("/control/v1/tenant/:tenant_id/timeline/:timeline_id", |r| { + tenant_service_handler( + r, + handle_tenant_timeline_describe, + RequestName("v1_tenant_timeline_describe"), + ) + }) .delete("/v1/tenant/:tenant_id/timeline/:timeline_id", |r| { tenant_service_handler( r, @@ -2565,6 +2622,17 @@ pub fn make_router( ) }, ) + // Tenant timeline mark_invisible passthrough to shard zero + .put( + "/v1/tenant/:tenant_id/timeline/:timeline_id/mark_invisible", + |r| { + tenant_service_handler( + r, + handle_tenant_timeline_passthrough, + RequestName("v1_tenant_timeline_mark_invisible_passthrough"), + ) + }, + ) // Tenant detail GET passthrough to shard zero: .get("/v1/tenant/:tenant_id", |r| { tenant_service_handler( @@ -2583,17 +2651,6 @@ pub fn make_router( RequestName("v1_tenant_passthrough"), ) }) - // Tenant timeline mark_invisible passthrough to shard zero - .put( - "/v1/tenant/:tenant_id/timeline/:timeline_id/mark_invisible", - |r| { - tenant_service_handler( - r, - handle_tenant_timeline_passthrough, - RequestName("v1_tenant_timeline_mark_invisible_passthrough"), - ) - }, - ) } #[cfg(test)] diff --git a/storage_controller/src/lib.rs b/storage_controller/src/lib.rs index 36e3c5dc6c..24b06da83a 100644 --- a/storage_controller/src/lib.rs +++ b/storage_controller/src/lib.rs @@ -6,6 +6,7 @@ extern crate hyper0 as hyper; mod auth; mod background_node_operations; mod compute_hook; +pub mod hadron_utils; mod heartbeater; pub mod http; mod id_lock_map; diff --git a/storage_controller/src/main.rs b/storage_controller/src/main.rs index 2a851dc25b..5d21feeb10 100644 --- a/storage_controller/src/main.rs +++ b/storage_controller/src/main.rs @@ -222,6 +222,9 @@ struct Cli { /// Primarily useful for testing to reduce test execution time. #[arg(long, default_value = "false", action=ArgAction::Set)] kick_secondary_downloads: bool, + + #[arg(long)] + shard_split_request_timeout: Option, } enum StrictMode { @@ -470,6 +473,10 @@ async fn async_main() -> anyhow::Result<()> { timeline_safekeeper_count: args.timeline_safekeeper_count, posthog_config: posthog_config.clone(), kick_secondary_downloads: args.kick_secondary_downloads, + shard_split_request_timeout: args + .shard_split_request_timeout + .map(humantime::Duration::into) + .unwrap_or(Duration::MAX), }; // Validate that we can connect to the database diff --git a/storage_controller/src/metrics.rs b/storage_controller/src/metrics.rs index 8738386968..9c34b34044 100644 --- a/storage_controller/src/metrics.rs +++ b/storage_controller/src/metrics.rs @@ -76,8 +76,8 @@ pub(crate) struct StorageControllerMetricGroup { /// How many shards would like to reconcile but were blocked by concurrency limits pub(crate) storage_controller_pending_reconciles: measured::Gauge, - /// How many shards are keep-failing and will be ignored when considering to run optimizations - pub(crate) storage_controller_keep_failing_reconciles: measured::Gauge, + /// How many shards are stuck and will be ignored when considering to run optimizations + pub(crate) storage_controller_stuck_reconciles: measured::Gauge, /// HTTP request status counters for handled requests pub(crate) storage_controller_http_request_status: @@ -151,6 +151,29 @@ pub(crate) struct StorageControllerMetricGroup { /// Indicator of completed safekeeper reconciles, broken down by safekeeper. pub(crate) storage_controller_safekeeper_reconciles_complete: measured::CounterVec, + + /* BEGIN HADRON */ + /// Hadron `config_watcher` reconciliation runs completed, broken down by success/failure. + pub(crate) storage_controller_config_watcher_complete: + measured::CounterVec, + + /// Hadron long waits for node state changes during drain and fill. + pub(crate) storage_controller_drain_and_fill_long_waits: measured::Counter, + + /// Set to 1 if we detect any page server pods with pending node pool rotation annotations. + /// Requires manual reset after oncall investigation. + pub(crate) storage_controller_ps_node_pool_rotation_pending: measured::Gauge, + + /// Hadron storage scrubber status. + pub(crate) storage_controller_storage_scrub_status: + measured::CounterVec, + + /// Desired number of pageservers managed by the storage controller + pub(crate) storage_controller_num_pageservers_desired: measured::Gauge, + + /// Desired number of safekeepers managed by the storage controller + pub(crate) storage_controller_num_safekeeper_desired: measured::Gauge, + /* END HADRON */ } impl StorageControllerMetrics { @@ -173,6 +196,10 @@ impl Default for StorageControllerMetrics { .storage_controller_reconcile_complete .init_all_dense(); + metrics_group + .storage_controller_config_watcher_complete + .init_all_dense(); + Self { metrics_group, encoder: Mutex::new(measured::text::BufferedTextEncoder::new()), @@ -262,11 +289,48 @@ pub(crate) struct ReconcileLongRunningLabelGroup<'a> { pub(crate) sequence: &'a str, } +#[derive(measured::LabelGroup, Clone)] +#[label(set = StorageScrubberLabelGroupSet)] +pub(crate) struct StorageScrubberLabelGroup<'a> { + #[label(dynamic_with = lasso::ThreadedRodeo, default)] + pub(crate) tenant_id: &'a str, + #[label(dynamic_with = lasso::ThreadedRodeo, default)] + pub(crate) shard_number: &'a str, + #[label(dynamic_with = lasso::ThreadedRodeo, default)] + pub(crate) timeline_id: &'a str, + pub(crate) outcome: StorageScrubberOutcome, +} + +#[derive(FixedCardinalityLabel, Clone, Copy)] +pub(crate) enum StorageScrubberOutcome { + PSOk, + PSWarning, + PSError, + PSOrphan, + SKOk, + SKError, +} + +#[derive(measured::LabelGroup)] +#[label(set = ConfigWatcherCompleteLabelGroupSet)] +pub(crate) struct ConfigWatcherCompleteLabelGroup { + // Reuse the ReconcileOutcome from the SC's reconciliation metrics. + pub(crate) status: ReconcileOutcome, +} + #[derive(FixedCardinalityLabel, Clone, Copy)] pub(crate) enum ReconcileOutcome { + // Successfully reconciled everything. #[label(rename = "ok")] Success, + // Used by tenant-shard reconciler only. Reconciled pageserver state successfully, + // but failed to delivery the compute notificiation. This error is typically transient + // but if its occurance keeps increasing, it should be investigated. + #[label(rename = "ok_no_notify")] + SuccessNoNotify, + // We failed to reconcile some state and the reconcilation will be retried. Error, + // Reconciliation was cancelled. Cancel, } diff --git a/storage_controller/src/node.rs b/storage_controller/src/node.rs index 6642c72f3c..63c82b5682 100644 --- a/storage_controller/src/node.rs +++ b/storage_controller/src/node.rs @@ -51,6 +51,39 @@ pub(crate) struct Node { cancel: CancellationToken, } +#[allow(dead_code)] +const ONE_MILLION: i64 = 1000000; + +// Converts a pool ID to a large number that can be used to assign unique IDs to pods in StatefulSets. +/// For example, if pool_id is 1, then the pods have NodeIds 1000000, 1000001, 1000002, etc. +/// If pool_id is None, then the pods have NodeIds 0, 1, 2, etc. +#[allow(dead_code)] +pub fn transform_pool_id(pool_id: Option) -> i64 { + match pool_id { + Some(id) => (id as i64) * ONE_MILLION, + None => 0, + } +} + +#[allow(dead_code)] +pub fn get_pool_id_from_node_id(node_id: i64) -> i32 { + (node_id / ONE_MILLION) as i32 +} + +/// Example pod name: page-server-0-1, safe-keeper-1-0 +#[allow(dead_code)] +pub fn get_node_id_from_pod_name(pod_name: &str) -> anyhow::Result { + let parts: Vec<&str> = pod_name.split('-').collect(); + if parts.len() != 4 { + return Err(anyhow::anyhow!("Invalid pod name: {}", pod_name)); + } + let pool_id = parts[2].parse::()?; + let node_offset = parts[3].parse::()?; + let node_id = transform_pool_id(Some(pool_id)) + node_offset; + + Ok(NodeId(node_id as u64)) +} + /// When updating [`Node::availability`] we use this type to indicate to the caller /// whether/how they changed it. pub(crate) enum AvailabilityTransition { @@ -403,3 +436,25 @@ impl std::fmt::Debug for Node { write!(f, "{} ({})", self.id, self.listen_http_addr) } } + +#[cfg(test)] +mod tests { + use utils::id::NodeId; + + use crate::node::get_node_id_from_pod_name; + + #[test] + fn test_get_node_id_from_pod_name() { + let pod_name = "page-server-3-12"; + let node_id = get_node_id_from_pod_name(pod_name).unwrap(); + assert_eq!(node_id, NodeId(3000012)); + + let pod_name = "safe-keeper-1-0"; + let node_id = get_node_id_from_pod_name(pod_name).unwrap(); + assert_eq!(node_id, NodeId(1000000)); + + let pod_name = "invalid-pod-name"; + let result = get_node_id_from_pod_name(pod_name); + assert!(result.is_err()); + } +} diff --git a/storage_controller/src/pageserver_client.rs b/storage_controller/src/pageserver_client.rs index d6fe173eb3..9e829e252d 100644 --- a/storage_controller/src/pageserver_client.rs +++ b/storage_controller/src/pageserver_client.rs @@ -14,6 +14,8 @@ use reqwest::StatusCode; use utils::id::{NodeId, TenantId, TimelineId}; use utils::lsn::Lsn; +use crate::hadron_utils::TenantShardSizeMap; + /// Thin wrapper around [`pageserver_client::mgmt_api::Client`]. It allows the storage /// controller to collect metrics in a non-intrusive manner. #[derive(Debug, Clone)] @@ -86,6 +88,59 @@ impl PageserverClient { ) } + #[expect(dead_code)] + pub(crate) async fn tenant_timeline_compact( + &self, + tenant_shard_id: TenantShardId, + timeline_id: TimelineId, + force_image_layer_creation: bool, + wait_until_done: bool, + ) -> Result<()> { + measured_request!( + "tenant_timeline_compact", + crate::metrics::Method::Put, + &self.node_id_label, + self.inner + .tenant_timeline_compact( + tenant_shard_id, + timeline_id, + force_image_layer_creation, + true, + false, + wait_until_done, + ) + .await + ) + } + + /* BEGIN_HADRON */ + pub(crate) async fn tenant_timeline_describe( + &self, + tenant_shard_id: &TenantShardId, + timeline_id: &TimelineId, + ) -> Result { + measured_request!( + "tenant_timeline_describe", + crate::metrics::Method::Get, + &self.node_id_label, + self.inner + .tenant_timeline_describe(tenant_shard_id, timeline_id,) + .await + ) + } + + #[expect(dead_code)] + pub(crate) async fn list_tenant_visible_size(&self) -> Result { + measured_request!( + "list_tenant_visible_size", + crate::metrics::Method::Get, + &self.node_id_label, + self.inner.list_tenant_visible_size().await + ) + .map(TenantShardSizeMap::new) + } + /* END_HADRON */ + pub(crate) async fn tenant_scan_remote_storage( &self, tenant_id: TenantId, @@ -348,6 +403,16 @@ impl PageserverClient { ) } + #[expect(dead_code)] + pub(crate) async fn reset_alert_gauges(&self) -> Result<()> { + measured_request!( + "reset_alert_gauges", + crate::metrics::Method::Post, + &self.node_id_label, + self.inner.reset_alert_gauges().await + ) + } + pub(crate) async fn wait_lsn( &self, tenant_shard_id: TenantShardId, diff --git a/storage_controller/src/reconciler.rs b/storage_controller/src/reconciler.rs index a2fba0fa56..d1590ec75e 100644 --- a/storage_controller/src/reconciler.rs +++ b/storage_controller/src/reconciler.rs @@ -862,11 +862,11 @@ impl Reconciler { Some(conf) if conf.conf.as_ref() == Some(&wanted_conf) => { if refreshed { tracing::info!( - node_id=%node.get_id(), "Observed configuration correct after refresh. Notifying compute."); + node_id=%node.get_id(), "[Attached] Observed configuration correct after refresh. Notifying compute."); self.compute_notify().await?; } else { // Nothing to do - tracing::info!(node_id=%node.get_id(), "Observed configuration already correct."); + tracing::info!(node_id=%node.get_id(), "[Attached] Observed configuration already correct."); } } observed => { @@ -945,17 +945,17 @@ impl Reconciler { match self.observed.locations.get(&node.get_id()) { Some(conf) if conf.conf.as_ref() == Some(&wanted_conf) => { // Nothing to do - tracing::info!(node_id=%node.get_id(), "Observed configuration already correct.") + tracing::info!(node_id=%node.get_id(), "[Secondary] Observed configuration already correct.") } _ => { // Only try and configure secondary locations on nodes that are available. This // allows the reconciler to "succeed" while some secondaries are offline (e.g. after // a node failure, where the failed node will have a secondary intent) if node.is_available() { - tracing::info!(node_id=%node.get_id(), "Observed configuration requires update."); + tracing::info!(node_id=%node.get_id(), "[Secondary] Observed configuration requires update."); changes.push((node.clone(), wanted_conf)) } else { - tracing::info!(node_id=%node.get_id(), "Skipping configuration as secondary, node is unavailable"); + tracing::info!(node_id=%node.get_id(), "[Secondary] Skipping configuration as secondary, node is unavailable"); self.observed .locations .insert(node.get_id(), ObservedStateLocation { conf: None }); @@ -1066,6 +1066,9 @@ impl Reconciler { } result } else { + tracing::info!( + "Compute notification is skipped because the tenant shard does not have an attached (primary) location" + ); Ok(()) } } diff --git a/storage_controller/src/schema.rs b/storage_controller/src/schema.rs index 312f7e0b0e..f3dcdaf798 100644 --- a/storage_controller/src/schema.rs +++ b/storage_controller/src/schema.rs @@ -13,6 +13,24 @@ diesel::table! { } } +diesel::table! { + hadron_safekeepers (sk_node_id) { + sk_node_id -> Int8, + listen_http_addr -> Varchar, + listen_http_port -> Int4, + listen_pg_addr -> Varchar, + listen_pg_port -> Int4, + } +} + +diesel::table! { + hadron_timeline_safekeepers (timeline_id, sk_node_id) { + timeline_id -> Varchar, + sk_node_id -> Int8, + legacy_endpoint_id -> Nullable, + } +} + diesel::table! { metadata_health (tenant_id, shard_number, shard_count) { tenant_id -> Varchar, @@ -105,6 +123,8 @@ diesel::table! { diesel::allow_tables_to_appear_in_same_query!( controllers, + hadron_safekeepers, + hadron_timeline_safekeepers, metadata_health, nodes, safekeeper_timeline_pending_ops, diff --git a/storage_controller/src/service.rs b/storage_controller/src/service.rs index 9360225396..71186076ec 100644 --- a/storage_controller/src/service.rs +++ b/storage_controller/src/service.rs @@ -32,7 +32,7 @@ use pageserver_api::controller_api::{ ShardSchedulingPolicy, ShardsPreferredAzsRequest, ShardsPreferredAzsResponse, SkSchedulingPolicy, TenantCreateRequest, TenantCreateResponse, TenantCreateResponseShard, TenantDescribeResponse, TenantDescribeResponseShard, TenantLocateResponse, TenantPolicyRequest, - TenantShardMigrateRequest, TenantShardMigrateResponse, + TenantShardMigrateRequest, TenantShardMigrateResponse, TenantTimelineDescribeResponse, }; use pageserver_api::models::{ self, DetachBehavior, LocationConfig, LocationConfigListResponse, LocationConfigMode, LsnLease, @@ -60,6 +60,7 @@ use tokio::sync::mpsc::error::TrySendError; use tokio_util::sync::CancellationToken; use tracing::{Instrument, debug, error, info, info_span, instrument, warn}; use utils::completion::Barrier; +use utils::env; use utils::generation::Generation; use utils::id::{NodeId, TenantId, TimelineId}; use utils::lsn::Lsn; @@ -210,9 +211,9 @@ pub const RECONCILER_CONCURRENCY_DEFAULT: usize = 128; pub const PRIORITY_RECONCILER_CONCURRENCY_DEFAULT: usize = 256; pub const SAFEKEEPER_RECONCILER_CONCURRENCY_DEFAULT: usize = 32; -// Number of consecutive reconciliation errors, occured for one shard, +// Number of consecutive reconciliations that have occurred for one shard, // after which the shard is ignored when considering to run optimizations. -const MAX_CONSECUTIVE_RECONCILIATION_ERRORS: usize = 5; +const MAX_CONSECUTIVE_RECONCILES: usize = 10; // Depth of the channel used to enqueue shards for reconciliation when they can't do it immediately. // This channel is finite-size to avoid using excessive memory if we get into a state where reconciles are finishing more slowly @@ -483,6 +484,9 @@ pub struct Config { /// When set, actively checks and initiates heatmap downloads/uploads. pub kick_secondary_downloads: bool, + + /// Timeout used for HTTP client of split requests. [`Duration::MAX`] if None. + pub shard_split_request_timeout: Duration, } impl From for ApiError { @@ -694,47 +698,70 @@ pub(crate) enum ReconcileResultRequest { } #[derive(Clone)] -struct MutationLocation { - node: Node, - generation: Generation, +pub(crate) struct MutationLocation { + pub(crate) node: Node, + pub(crate) generation: Generation, } #[derive(Clone)] -struct ShardMutationLocations { - latest: MutationLocation, - other: Vec, +pub(crate) struct ShardMutationLocations { + pub(crate) latest: MutationLocation, + pub(crate) other: Vec, } #[derive(Default, Clone)] -struct TenantMutationLocations(BTreeMap); +pub(crate) struct TenantMutationLocations(pub BTreeMap); struct ReconcileAllResult { spawned_reconciles: usize, - keep_failing_reconciles: usize, + stuck_reconciles: usize, has_delayed_reconciles: bool, } impl ReconcileAllResult { fn new( spawned_reconciles: usize, - keep_failing_reconciles: usize, + stuck_reconciles: usize, has_delayed_reconciles: bool, ) -> Self { assert!( - spawned_reconciles >= keep_failing_reconciles, - "It is impossible to have more keep-failing reconciles than spawned reconciles" + spawned_reconciles >= stuck_reconciles, + "It is impossible to have less spawned reconciles than stuck reconciles" ); Self { spawned_reconciles, - keep_failing_reconciles, + stuck_reconciles, has_delayed_reconciles, } } /// We can run optimizations only if we don't have any delayed reconciles and - /// all spawned reconciles are also keep-failing reconciles. + /// all spawned reconciles are also stuck reconciles. fn can_run_optimizations(&self) -> bool { - !self.has_delayed_reconciles && self.spawned_reconciles == self.keep_failing_reconciles + !self.has_delayed_reconciles && self.spawned_reconciles == self.stuck_reconciles + } +} + +enum TenantIdOrShardId { + TenantId(TenantId), + TenantShardId(TenantShardId), +} + +impl TenantIdOrShardId { + fn tenant_id(&self) -> TenantId { + match self { + TenantIdOrShardId::TenantId(tenant_id) => *tenant_id, + TenantIdOrShardId::TenantShardId(tenant_shard_id) => tenant_shard_id.tenant_id, + } + } + + fn matches(&self, tenant_shard_id: &TenantShardId) -> bool { + match self { + TenantIdOrShardId::TenantId(tenant_id) => tenant_shard_id.tenant_id == *tenant_id, + TenantIdOrShardId::TenantShardId(this_tenant_shard_id) => { + this_tenant_shard_id == tenant_shard_id + } + } } } @@ -1478,7 +1505,6 @@ impl Service { match result.result { Ok(()) => { - tenant.consecutive_errors_count = 0; tenant.apply_observed_deltas(deltas); tenant.waiter.advance(result.sequence); } @@ -1497,8 +1523,6 @@ impl Service { } } - tenant.consecutive_errors_count = tenant.consecutive_errors_count.saturating_add(1); - // Ordering: populate last_error before advancing error_seq, // so that waiters will see the correct error after waiting. tenant.set_last_error(result.sequence, e); @@ -1510,6 +1534,8 @@ impl Service { } } + tenant.consecutive_reconciles_count = tenant.consecutive_reconciles_count.saturating_add(1); + // If we just finished detaching all shards for a tenant, it might be time to drop it from memory. if tenant.policy == PlacementPolicy::Detached { // We may only drop a tenant from memory while holding the exclusive lock on the tenant ID: this protects us @@ -1677,7 +1703,21 @@ impl Service { .collect::>>()?; let safekeepers: HashMap = safekeepers.into_iter().map(|n| (n.get_id(), n)).collect(); - tracing::info!("Loaded {} safekeepers from database.", safekeepers.len()); + let count_policy = |policy| { + safekeepers + .iter() + .filter(|sk| sk.1.scheduling_policy() == policy) + .count() + }; + let active_sk_count = count_policy(SkSchedulingPolicy::Active); + let activating_sk_count = count_policy(SkSchedulingPolicy::Activating); + let pause_sk_count = count_policy(SkSchedulingPolicy::Pause); + let decom_sk_count = count_policy(SkSchedulingPolicy::Decomissioned); + tracing::info!( + "Loaded {} safekeepers from database. Active {active_sk_count}, activating {activating_sk_count}, \ + paused {pause_sk_count}, decomissioned {decom_sk_count}.", + safekeepers.len() + ); metrics::METRICS_REGISTRY .metrics_group .storage_controller_safekeeper_nodes @@ -1969,6 +2009,17 @@ impl Service { } }); + // Check that there is enough safekeepers configured that we can create new timelines + let test_sk_res_str = match this.safekeepers_for_new_timeline().await { + Ok(v) => format!("Ok({v:?})"), + Err(v) => format!("Err({v:})"), + }; + tracing::info!( + timeline_safekeeper_count = config.timeline_safekeeper_count, + timelines_onto_safekeepers = config.timelines_onto_safekeepers, + "viability test result (test timeline creation on safekeepers): {test_sk_res_str}", + ); + Ok(this) } @@ -4406,7 +4457,7 @@ impl Service { .await; let mut failed = 0; - for (tid, result) in targeted_tenant_shards.iter().zip(results.into_iter()) { + for (tid, (_, result)) in targeted_tenant_shards.iter().zip(results.into_iter()) { match result { Ok(ok) => { if tid.is_shard_zero() { @@ -4723,6 +4774,38 @@ impl Service { Ok(()) } + pub(crate) fn is_tenant_not_found_error(body: &str, tenant_id: TenantId) -> bool { + body.contains(&format!("tenant {tenant_id}")) + } + + fn process_result_and_passthrough_errors( + &self, + tenant_id: TenantId, + results: Vec<(Node, Result)>, + ) -> Result, ApiError> { + let mut processed_results: Vec<(Node, T)> = Vec::with_capacity(results.len()); + for (node, res) in results { + match res { + Ok(res) => processed_results.push((node, res)), + Err(mgmt_api::Error::ApiError(StatusCode::NOT_FOUND, body)) + if Self::is_tenant_not_found_error(&body, tenant_id) => + { + // If there's a tenant not found, we are still in the process of attaching the tenant. + // Return 503 so that the client can retry. + return Err(ApiError::ResourceUnavailable( + format!( + "Timeline is not attached to the pageserver {} yet, please retry", + node.get_id() + ) + .into(), + )); + } + Err(e) => return Err(passthrough_api_error(&node, e)), + } + } + Ok(processed_results) + } + pub(crate) async fn tenant_timeline_lsn_lease( &self, tenant_id: TenantId, @@ -4736,60 +4819,48 @@ impl Service { ) .await; - let targets = { - let locked = self.inner.read().unwrap(); - let mut targets = Vec::new(); + self.tenant_remote_mutation(tenant_id, |locations| async move { + if locations.0.is_empty() { + return Err(ApiError::NotFound( + anyhow::anyhow!("Tenant not found").into(), + )); + } - // If the request got an unsharded tenant id, then apply - // the operation to all shards. Otherwise, apply it to a specific shard. - let shards_range = TenantShardId::tenant_range(tenant_id); + let results = self + .tenant_for_shards_api( + locations + .0 + .iter() + .map(|(tenant_shard_id, ShardMutationLocations { latest, .. })| { + (*tenant_shard_id, latest.node.clone()) + }) + .collect(), + |tenant_shard_id, client| async move { + client + .timeline_lease_lsn(tenant_shard_id, timeline_id, lsn) + .await + }, + 1, + 1, + SHORT_RECONCILE_TIMEOUT, + &self.cancel, + ) + .await; - for (tenant_shard_id, shard) in locked.tenants.range(shards_range) { - if let Some(node_id) = shard.intent.get_attached() { - let node = locked - .nodes - .get(node_id) - .expect("Pageservers may not be deleted while referenced"); - - targets.push((*tenant_shard_id, node.clone())); + let leases = self.process_result_and_passthrough_errors(tenant_id, results)?; + let mut valid_until = None; + for (_, lease) in leases { + if let Some(ref mut valid_until) = valid_until { + *valid_until = std::cmp::min(*valid_until, lease.valid_until); + } else { + valid_until = Some(lease.valid_until); } } - targets - }; - - let res = self - .tenant_for_shards_api( - targets, - |tenant_shard_id, client| async move { - client - .timeline_lease_lsn(tenant_shard_id, timeline_id, lsn) - .await - }, - 1, - 1, - SHORT_RECONCILE_TIMEOUT, - &self.cancel, - ) - .await; - - let mut valid_until = None; - for r in res { - match r { - Ok(lease) => { - if let Some(ref mut valid_until) = valid_until { - *valid_until = std::cmp::min(*valid_until, lease.valid_until); - } else { - valid_until = Some(lease.valid_until); - } - } - Err(e) => { - return Err(ApiError::InternalServerError(anyhow::anyhow!(e))); - } - } - } - Ok(LsnLease { - valid_until: valid_until.unwrap_or_else(SystemTime::now), + Ok(LsnLease { + valid_until: valid_until.unwrap_or_else(SystemTime::now), + }) }) + .await? } pub(crate) async fn tenant_timeline_download_heatmap_layers( @@ -4897,7 +4968,7 @@ impl Service { max_retries: u32, timeout: Duration, cancel: &CancellationToken, - ) -> Vec> + ) -> Vec<(Node, mgmt_api::Result)> where O: Fn(TenantShardId, PageserverClient) -> F + Copy, F: std::future::Future>, @@ -4918,16 +4989,16 @@ impl Service { cancel, ) .await; - (idx, r) + (idx, node, r) }); } - while let Some((idx, r)) = futs.next().await { - results.push((idx, r.unwrap_or(Err(mgmt_api::Error::Cancelled)))); + while let Some((idx, node, r)) = futs.next().await { + results.push((idx, node, r.unwrap_or(Err(mgmt_api::Error::Cancelled)))); } - results.sort_by_key(|(idx, _)| *idx); - results.into_iter().map(|(_, r)| r).collect() + results.sort_by_key(|(idx, _, _)| *idx); + results.into_iter().map(|(_, node, r)| (node, r)).collect() } /// Helper for safely working with the shards in a tenant remotely on pageservers, for example @@ -4936,11 +5007,37 @@ impl Service { /// - Looks up the shards and the nodes where they were most recently attached /// - Guarantees that after the inner function returns, the shards' generations haven't moved on: this /// ensures that the remote operation acted on the most recent generation, and is therefore durable. - async fn tenant_remote_mutation( + pub(crate) async fn tenant_remote_mutation( &self, tenant_id: TenantId, op: O, ) -> Result + where + O: FnOnce(TenantMutationLocations) -> F, + F: std::future::Future, + { + self.tenant_remote_mutation_inner(TenantIdOrShardId::TenantId(tenant_id), op) + .await + } + + pub(crate) async fn tenant_shard_remote_mutation( + &self, + tenant_shard_id: TenantShardId, + op: O, + ) -> Result + where + O: FnOnce(TenantMutationLocations) -> F, + F: std::future::Future, + { + self.tenant_remote_mutation_inner(TenantIdOrShardId::TenantShardId(tenant_shard_id), op) + .await + } + + async fn tenant_remote_mutation_inner( + &self, + tenant_id_or_shard_id: TenantIdOrShardId, + op: O, + ) -> Result where O: FnOnce(TenantMutationLocations) -> F, F: std::future::Future, @@ -4952,7 +5049,13 @@ impl Service { // run concurrently with reconciliations, and it is not guaranteed that the node we find here // will still be the latest when we're done: we will check generations again at the end of // this function to handle that. - let generations = self.persistence.tenant_generations(tenant_id).await?; + let generations = self + .persistence + .tenant_generations(tenant_id_or_shard_id.tenant_id()) + .await? + .into_iter() + .filter(|i| tenant_id_or_shard_id.matches(&i.tenant_shard_id)) + .collect::>(); if generations .iter() @@ -4966,9 +5069,14 @@ impl Service { // One or more shards has not been attached to a pageserver. Check if this is because it's configured // to be detached (409: caller should give up), or because it's meant to be attached but isn't yet (503: caller should retry) let locked = self.inner.read().unwrap(); - for (shard_id, shard) in - locked.tenants.range(TenantShardId::tenant_range(tenant_id)) - { + let tenant_shards = locked + .tenants + .range(TenantShardId::tenant_range( + tenant_id_or_shard_id.tenant_id(), + )) + .filter(|(shard_id, _)| tenant_id_or_shard_id.matches(shard_id)) + .collect::>(); + for (shard_id, shard) in tenant_shards { match shard.policy { PlacementPolicy::Attached(_) => { // This shard is meant to be attached: the caller is not wrong to try and @@ -5078,7 +5186,14 @@ impl Service { // Post-check: are all the generations of all the shards the same as they were initially? This proves that // our remote operation executed on the latest generation and is therefore persistent. { - let latest_generations = self.persistence.tenant_generations(tenant_id).await?; + let latest_generations = self + .persistence + .tenant_generations(tenant_id_or_shard_id.tenant_id()) + .await? + .into_iter() + .filter(|i| tenant_id_or_shard_id.matches(&i.tenant_shard_id)) + .collect::>(); + if latest_generations .into_iter() .map( @@ -5150,6 +5265,9 @@ impl Service { match res { Ok(ok) => Ok(ok), Err(mgmt_api::Error::ApiError(StatusCode::CONFLICT, _)) => Ok(StatusCode::CONFLICT), + Err(mgmt_api::Error::ApiError(StatusCode::PRECONDITION_FAILED, msg)) if msg.contains("Requested tenant is missing") => { + Err(ApiError::ResourceUnavailable("Tenant migration in progress".into())) + }, Err(mgmt_api::Error::ApiError(StatusCode::SERVICE_UNAVAILABLE, msg)) => Err(ApiError::ResourceUnavailable(msg.into())), Err(e) => { Err( @@ -5204,6 +5322,8 @@ impl Service { status_code } /// When you know the TenantId but not a specific shard, and would like to get the node holding shard 0. + /// + /// Returns the node, tenant shard id, and whether it is consistent with the observed state. pub(crate) async fn tenant_shard0_node( &self, tenant_id: TenantId, @@ -5230,6 +5350,8 @@ impl Service { /// When you need to send an HTTP request to the pageserver that holds a shard of a tenant, this /// function looks up and returns node. If the shard isn't found, returns Err(ApiError::NotFound) + /// + /// Returns the intent node and whether it is consistent with the observed state. pub(crate) async fn tenant_shard_node( &self, tenant_shard_id: TenantShardId, @@ -5297,7 +5419,7 @@ impl Service { "Shard refers to nonexistent node" ))); }; - + // As a reconciliation is in flight, we do not have the observed state yet, and therefore we assume it is always inconsistent. Ok(node.clone()) } @@ -5430,6 +5552,92 @@ impl Service { .ok_or_else(|| ApiError::NotFound(anyhow::anyhow!("Tenant {tenant_id} not found").into())) } + /* BEGIN_HADRON */ + pub(crate) async fn tenant_timeline_describe( + &self, + tenant_id: TenantId, + timeline_id: TimelineId, + ) -> Result { + self.tenant_remote_mutation(tenant_id, |locations| async move { + if locations.0.is_empty() { + return Err(ApiError::NotFound( + anyhow::anyhow!("Tenant not found").into(), + )); + }; + + let locations: Vec<(TenantShardId, Node)> = locations + .0 + .iter() + .map(|t| (*t.0, t.1.latest.node.clone())) + .collect(); + let mut futs = FuturesUnordered::new(); + + for (shard_id, node) in locations { + futs.push({ + async move { + let result = node + .with_client_retries( + |client| async move { + client + .tenant_timeline_describe(&shard_id, &timeline_id) + .await + }, + &self.http_client, + &self.config.pageserver_jwt_token, + 3, + 3, + Duration::from_secs(30), + &self.cancel, + ) + .await; + (result, shard_id, node.get_id()) + } + }); + } + + let mut results: Vec = Vec::new(); + while let Some((result, tenant_shard_id, node_id)) = futs.next().await { + match result { + Some(Ok(timeline_info)) => results.push(timeline_info), + Some(Err(e)) => { + tracing::warn!( + "Failed to describe tenant {} timeline {} for pageserver {}: {e}", + tenant_shard_id, + timeline_id, + node_id, + ); + return Err(ApiError::ResourceUnavailable(format!("{e}").into())); + } + None => return Err(ApiError::Cancelled), + } + } + let mut image_consistent_lsn: Option = Some(Lsn::MAX); + for timeline_info in &results { + if let Some(tline_image_consistent_lsn) = timeline_info.image_consistent_lsn { + image_consistent_lsn = Some(std::cmp::min( + image_consistent_lsn.unwrap(), + tline_image_consistent_lsn, + )); + } else { + tracing::warn!( + "Timeline {} on shard {} does not have image consistent lsn", + timeline_info.timeline_id, + timeline_info.tenant_id + ); + image_consistent_lsn = None; + break; + } + } + + Ok(TenantTimelineDescribeResponse { + shards: results, + image_consistent_lsn, + }) + }) + .await? + } + /* END_HADRON */ + /// limit & offset are pagination parameters. Since we are walking an in-memory HashMap, `offset` does not /// avoid traversing data, it just avoid returning it. This is suitable for our purposes, since our in memory /// maps are small enough to traverse fast, our pagination is just to avoid serializing huge JSON responses @@ -5840,7 +6048,7 @@ impl Service { return; } - for result in self + for (_, result) in self .tenant_for_shards_api( attached, |tenant_shard_id, client| async move { @@ -5859,7 +6067,7 @@ impl Service { } } - for result in self + for (_, result) in self .tenant_for_shards_api( secondary, |tenant_shard_id, client| async move { @@ -6261,18 +6469,39 @@ impl Service { // TODO: issue split calls concurrently (this only matters once we're splitting // N>1 shards into M shards -- initially we're usually splitting 1 shard into N). + // HADRON: set a timeout for splitting individual shards on page servers. + // Currently we do not perform any retry because it's not clear if page server can handle + // partially split shards correctly. + let shard_split_timeout = + if let Some(env::DeploymentMode::Local) = env::get_deployment_mode() { + Duration::from_secs(30) + } else { + self.config.shard_split_request_timeout + }; + let mut http_client_builder = reqwest::ClientBuilder::new() + .pool_max_idle_per_host(0) + .timeout(shard_split_timeout); + + for ssl_ca_cert in &self.config.ssl_ca_certs { + http_client_builder = http_client_builder.add_root_certificate(ssl_ca_cert.clone()); + } + let http_client = http_client_builder + .build() + .expect("Failed to construct HTTP client"); for target in &targets { let ShardSplitTarget { parent_id, node, child_ids, } = target; + let client = PageserverClient::new( node.get_id(), - self.http_client.clone(), + http_client.clone(), node.base_url(), self.config.pageserver_jwt_token.as_deref(), ); + let response = client .tenant_shard_split( *parent_id, @@ -7165,6 +7394,7 @@ impl Service { self: &Arc, node_id: NodeId, policy_on_start: NodeSchedulingPolicy, + force: bool, cancel: CancellationToken, ) -> Result<(), OperationError> { let reconciler_config = ReconcilerConfigBuilder::new(ReconcilerPriority::Normal).build(); @@ -7172,23 +7402,27 @@ impl Service { let mut waiters: Vec = Vec::new(); let mut tid_iter = create_shared_shard_iterator(self.clone()); + let reset_node_policy_on_cancel = || async { + match self + .node_configure(node_id, None, Some(policy_on_start)) + .await + { + Ok(()) => OperationError::Cancelled, + Err(err) => { + OperationError::FinalizeError( + format!( + "Failed to finalise delete cancel of {} by setting scheduling policy to {}: {}", + node_id, String::from(policy_on_start), err + ) + .into(), + ) + } + } + }; + while !tid_iter.finished() { if cancel.is_cancelled() { - match self - .node_configure(node_id, None, Some(policy_on_start)) - .await - { - Ok(()) => return Err(OperationError::Cancelled), - Err(err) => { - return Err(OperationError::FinalizeError( - format!( - "Failed to finalise delete cancel of {} by setting scheduling policy to {}: {}", - node_id, String::from(policy_on_start), err - ) - .into(), - )); - } - } + return Err(reset_node_policy_on_cancel().await); } operation_utils::validate_node_state( @@ -7208,6 +7442,12 @@ impl Service { let mut locked = self.inner.write().unwrap(); let (nodes, tenants, scheduler) = locked.parts_mut(); + // Calculate a schedule context here to avoid borrow checker issues. + let mut schedule_context = ScheduleContext::default(); + for (_, shard) in tenants.range(TenantShardId::tenant_range(tid.tenant_id)) { + schedule_context.avoid(&shard.intent.all_pageservers()); + } + let tenant_shard = match tenants.get_mut(&tid) { Some(tenant_shard) => tenant_shard, None => { @@ -7233,9 +7473,6 @@ impl Service { } if tenant_shard.deref_node(node_id) { - // TODO(ephemeralsad): we should process all shards in a tenant at once, so - // we can avoid settling the tenant unevenly. - let mut schedule_context = ScheduleContext::new(ScheduleMode::Normal); if let Err(e) = tenant_shard.schedule(scheduler, &mut schedule_context) { tracing::error!( "Refusing to delete node, shard {} can't be rescheduled: {e}", @@ -7254,8 +7491,18 @@ impl Service { nodes, reconciler_config, ); - if let Some(some) = waiter { - waiters.push(some); + + if force { + // Here we remove an existing observed location for the node we're removing, and it will + // not be re-added by a reconciler's completion because we filter out removed nodes in + // process_result. + // + // Note that we update the shard's observed state _after_ calling maybe_configured_reconcile_shard: + // that means any reconciles we spawned will know about the node we're deleting, + // enabling them to do live migrations if it's still online. + tenant_shard.observed.locations.remove(&node_id); + } else if let Some(waiter) = waiter { + waiters.push(waiter); } } } @@ -7269,21 +7516,7 @@ impl Service { while !waiters.is_empty() { if cancel.is_cancelled() { - match self - .node_configure(node_id, None, Some(policy_on_start)) - .await - { - Ok(()) => return Err(OperationError::Cancelled), - Err(err) => { - return Err(OperationError::FinalizeError( - format!( - "Failed to finalise drain cancel of {} by setting scheduling policy to {}: {}", - node_id, String::from(policy_on_start), err - ) - .into(), - )); - } - } + return Err(reset_node_policy_on_cancel().await); } tracing::info!("Awaiting {} pending delete reconciliations", waiters.len()); @@ -7293,6 +7526,12 @@ impl Service { .await; } + let pf = pausable_failpoint!("delete-node-after-reconciles-spawned", &cancel); + if pf.is_err() { + // An error from pausable_failpoint indicates the cancel token was triggered. + return Err(reset_node_policy_on_cancel().await); + } + self.persistence .set_tombstone(node_id) .await @@ -7888,6 +8127,7 @@ impl Service { pub(crate) async fn start_node_delete( self: &Arc, node_id: NodeId, + force: bool, ) -> Result<(), ApiError> { let (ongoing_op, node_policy, schedulable_nodes_count) = { let locked = self.inner.read().unwrap(); @@ -7957,7 +8197,7 @@ impl Service { tracing::info!("Delete background operation starting"); let res = service - .delete_node(node_id, policy_on_start, cancel) + .delete_node(node_id, policy_on_start, force, cancel) .await; match res { Ok(()) => { @@ -8409,7 +8649,7 @@ impl Service { // This function is an efficient place to update lazy statistics, since we are walking // all tenants. let mut pending_reconciles = 0; - let mut keep_failing_reconciles = 0; + let mut stuck_reconciles = 0; let mut az_violations = 0; // If we find any tenants to drop from memory, stash them to offload after @@ -8445,30 +8685,32 @@ impl Service { // Eventual consistency: if an earlier reconcile job failed, and the shard is still // dirty, spawn another one - let consecutive_errors_count = shard.consecutive_errors_count; if self .maybe_reconcile_shard(shard, &pageservers, ReconcilerPriority::Normal) .is_some() { spawned_reconciles += 1; - // Count shards that are keep-failing. We still want to reconcile them - // to avoid a situation where a shard is stuck. - // But we don't want to consider them when deciding to run optimizations. - if consecutive_errors_count >= MAX_CONSECUTIVE_RECONCILIATION_ERRORS { + if shard.consecutive_reconciles_count >= MAX_CONSECUTIVE_RECONCILES { + // Count shards that are stuck, butwe still want to reconcile them. + // We don't want to consider them when deciding to run optimizations. tracing::warn!( tenant_id=%shard.tenant_shard_id.tenant_id, shard_id=%shard.tenant_shard_id.shard_slug(), - "Shard reconciliation is keep-failing: {} errors", - consecutive_errors_count + "Shard reconciliation is stuck: {} consecutive launches", + shard.consecutive_reconciles_count ); - keep_failing_reconciles += 1; + stuck_reconciles += 1; + } + } else { + if shard.delayed_reconcile { + // Shard wanted to reconcile but for some reason couldn't. + pending_reconciles += 1; } - } else if shard.delayed_reconcile { - // Shard wanted to reconcile but for some reason couldn't. - pending_reconciles += 1; - } + // Reset the counter when we don't need to launch a reconcile. + shard.consecutive_reconciles_count = 0; + } // If this tenant is detached, try dropping it from memory. This is usually done // proactively in [`Self::process_results`], but we do it here to handle the edge // case where a reconcile completes while someone else is holding an op lock for the tenant. @@ -8504,14 +8746,10 @@ impl Service { metrics::METRICS_REGISTRY .metrics_group - .storage_controller_keep_failing_reconciles - .set(keep_failing_reconciles as i64); + .storage_controller_stuck_reconciles + .set(stuck_reconciles as i64); - ReconcileAllResult::new( - spawned_reconciles, - keep_failing_reconciles, - has_delayed_reconciles, - ) + ReconcileAllResult::new(spawned_reconciles, stuck_reconciles, has_delayed_reconciles) } /// `optimize` in this context means identifying shards which have valid scheduled locations, but @@ -8743,7 +8981,7 @@ impl Service { ) .await; - for ((tenant_shard_id, node, optimization), secondary_status) in + for ((tenant_shard_id, node, optimization), (_, secondary_status)) in want_secondary_status.into_iter().zip(results.into_iter()) { match secondary_status { diff --git a/storage_controller/src/service/safekeeper_service.rs b/storage_controller/src/service/safekeeper_service.rs index 90ea48dd7b..7521d7bd86 100644 --- a/storage_controller/src/service/safekeeper_service.rs +++ b/storage_controller/src/service/safekeeper_service.rs @@ -25,7 +25,8 @@ use pageserver_api::models::{SafekeeperInfo, SafekeepersInfo, TimelineInfo}; use safekeeper_api::PgVersionId; use safekeeper_api::membership::{self, MemberSet, SafekeeperGeneration}; use safekeeper_api::models::{ - PullTimelineRequest, TimelineMembershipSwitchRequest, TimelineMembershipSwitchResponse, + PullTimelineRequest, TimelineLocateResponse, TimelineMembershipSwitchRequest, + TimelineMembershipSwitchResponse, }; use safekeeper_api::{INITIAL_TERM, Term}; use safekeeper_client::mgmt_api; @@ -37,21 +38,14 @@ use utils::lsn::Lsn; use super::Service; -#[derive(serde::Serialize, serde::Deserialize, Clone)] -pub struct TimelineLocateResponse { - pub generation: SafekeeperGeneration, - pub sk_set: Vec, - pub new_sk_set: Option>, -} - impl Service { - fn make_member_set(safekeepers: &[Safekeeper]) -> Result { + fn make_member_set(safekeepers: &[Safekeeper]) -> Result { let members = safekeepers .iter() .map(|sk| sk.get_safekeeper_id()) .collect::>(); - MemberSet::new(members).map_err(ApiError::InternalServerError) + MemberSet::new(members) } fn get_safekeepers(&self, ids: &[i64]) -> Result, ApiError> { @@ -86,7 +80,7 @@ impl Service { ) -> Result, ApiError> { let safekeepers = self.get_safekeepers(&timeline_persistence.sk_set)?; - let mset = Self::make_member_set(&safekeepers)?; + let mset = Self::make_member_set(&safekeepers).map_err(ApiError::InternalServerError)?; let mconf = safekeeper_api::membership::Configuration::new(mset); let req = safekeeper_api::models::TimelineCreateRequest { @@ -914,13 +908,13 @@ impl Service { // so it isn't counted toward the quorum. if let Some(min_position) = min_position { if let Ok(ok_res) = &res { - if (ok_res.term, ok_res.flush_lsn) < min_position { + if (ok_res.last_log_term, ok_res.flush_lsn) < min_position { // Use Error::Timeout to make this error retriable. res = Err(mgmt_api::Error::Timeout( format!( "safekeeper {} returned position {:?} which is less than minimum required position {:?}", client.node_id_label(), - (ok_res.term, ok_res.flush_lsn), + (ok_res.last_log_term, ok_res.flush_lsn), min_position ) )); @@ -1111,6 +1105,26 @@ impl Service { } } + if new_sk_set.is_empty() { + return Err(ApiError::BadRequest(anyhow::anyhow!( + "new safekeeper set is empty" + ))); + } + + if new_sk_set.len() < self.config.timeline_safekeeper_count { + return Err(ApiError::BadRequest(anyhow::anyhow!( + "new safekeeper set must have at least {} safekeepers", + self.config.timeline_safekeeper_count + ))); + } + + let new_sk_set_i64 = new_sk_set.iter().map(|id| id.0 as i64).collect::>(); + let new_safekeepers = self.get_safekeepers(&new_sk_set_i64)?; + // Construct new member set in advance to validate it. + // E.g. validates that there is no duplicate safekeepers. + let new_sk_member_set = + Self::make_member_set(&new_safekeepers).map_err(ApiError::BadRequest)?; + // TODO(diko): per-tenant lock is too wide. Consider introducing per-timeline locks. let _tenant_lock = trace_shared_lock( &self.tenant_op_locks, @@ -1141,6 +1155,18 @@ impl Service { .map(|&id| NodeId(id as u64)) .collect::>(); + // Validate that we are not migrating to a decomissioned safekeeper. + for sk in new_safekeepers.iter() { + if !cur_sk_set.contains(&sk.get_id()) + && sk.scheduling_policy() == SkSchedulingPolicy::Decomissioned + { + return Err(ApiError::BadRequest(anyhow::anyhow!( + "safekeeper {} is decomissioned", + sk.get_id() + ))); + } + } + tracing::info!( ?cur_sk_set, ?new_sk_set, @@ -1183,11 +1209,8 @@ impl Service { } let cur_safekeepers = self.get_safekeepers(&timeline.sk_set)?; - let cur_sk_member_set = Self::make_member_set(&cur_safekeepers)?; - - let new_sk_set_i64 = new_sk_set.iter().map(|id| id.0 as i64).collect::>(); - let new_safekeepers = self.get_safekeepers(&new_sk_set_i64)?; - let new_sk_member_set = Self::make_member_set(&new_safekeepers)?; + let cur_sk_member_set = + Self::make_member_set(&cur_safekeepers).map_err(ApiError::InternalServerError)?; let joint_config = membership::Configuration { generation, @@ -1216,7 +1239,7 @@ impl Service { let mut sync_position = (INITIAL_TERM, Lsn::INVALID); for res in results.into_iter().flatten() { - let sk_position = (res.term, res.flush_lsn); + let sk_position = (res.last_log_term, res.flush_lsn); if sync_position < sk_position { sync_position = sk_position; } diff --git a/storage_controller/src/tenant_shard.rs b/storage_controller/src/tenant_shard.rs index 0bfca5385e..f60378470e 100644 --- a/storage_controller/src/tenant_shard.rs +++ b/storage_controller/src/tenant_shard.rs @@ -131,14 +131,16 @@ pub(crate) struct TenantShard { #[serde(serialize_with = "read_last_error")] pub(crate) last_error: std::sync::Arc>>>, - /// Number of consecutive reconciliation errors that have occurred for this shard. + /// Amount of consecutive [`crate::service::Service::reconcile_all`] iterations that have been + /// scheduled a reconciliation for this shard. /// - /// When this count reaches MAX_CONSECUTIVE_RECONCILIATION_ERRORS, the tenant shard - /// will be countered as keep-failing in `reconcile_all` calculations. This will lead to - /// allowing optimizations to run even with some failing shards. + /// If this reaches `MAX_CONSECUTIVE_RECONCILES`, the shard is considered "stuck" and will be + /// ignored when deciding whether optimizations can run. This includes both successful and failed + /// reconciliations. /// - /// The counter is reset to 0 after a successful reconciliation. - pub(crate) consecutive_errors_count: usize, + /// Incremented in [`crate::service::Service::process_result`], and reset to 0 when + /// [`crate::service::Service::reconcile_all`] determines no reconciliation is needed for this shard. + pub(crate) consecutive_reconciles_count: usize, /// If we have a pending compute notification that for some reason we weren't able to send, /// set this to true. If this is set, calls to [`Self::get_reconcile_needed`] will return Yes @@ -603,7 +605,7 @@ impl TenantShard { waiter: Arc::new(SeqWait::new(Sequence(0))), error_waiter: Arc::new(SeqWait::new(Sequence(0))), last_error: Arc::default(), - consecutive_errors_count: 0, + consecutive_reconciles_count: 0, pending_compute_notification: false, scheduling_policy: ShardSchedulingPolicy::default(), preferred_node: None, @@ -1272,7 +1274,9 @@ impl TenantShard { } /// Return true if the optimization was really applied: it will not be applied if the optimization's - /// sequence is behind this tenant shard's + /// sequence is behind this tenant shard's or if the intent state proposed by the optimization + /// is not compatible with the current intent state. The later may happen when the background + /// reconcile loops runs concurrently with HTTP driven optimisations. pub(crate) fn apply_optimization( &mut self, scheduler: &mut Scheduler, @@ -1282,6 +1286,15 @@ impl TenantShard { return false; } + if !self.validate_optimization(&optimization) { + tracing::info!( + "Skipping optimization for {} because it does not match current intent: {:?}", + self.tenant_shard_id, + optimization, + ); + return false; + } + metrics::METRICS_REGISTRY .metrics_group .storage_controller_schedule_optimization @@ -1322,6 +1335,34 @@ impl TenantShard { true } + /// Check that the desired modifications to the intent state are compatible with + /// the current intent state + fn validate_optimization(&self, optimization: &ScheduleOptimization) -> bool { + match optimization.action { + ScheduleOptimizationAction::MigrateAttachment(MigrateAttachment { + old_attached_node_id, + new_attached_node_id, + }) => { + self.intent.attached == Some(old_attached_node_id) + && self.intent.secondary.contains(&new_attached_node_id) + } + ScheduleOptimizationAction::ReplaceSecondary(ReplaceSecondary { + old_node_id: _, + new_node_id, + }) => { + // It's legal to remove a secondary that is not present in the intent state + !self.intent.secondary.contains(&new_node_id) + } + ScheduleOptimizationAction::CreateSecondary(new_node_id) => { + !self.intent.secondary.contains(&new_node_id) + } + ScheduleOptimizationAction::RemoveSecondary(_) => { + // It's legal to remove a secondary that is not present in the intent state + true + } + } + } + /// When a shard has several secondary locations, we need to pick one in situations where /// we promote one of them to an attached location: /// - When draining a node for restart @@ -1570,7 +1611,13 @@ impl TenantShard { // Update result counter let outcome_label = match &result { - Ok(_) => ReconcileOutcome::Success, + Ok(_) => { + if reconciler.compute_notify_failure { + ReconcileOutcome::SuccessNoNotify + } else { + ReconcileOutcome::Success + } + } Err(ReconcileError::Cancel) => ReconcileOutcome::Cancel, Err(_) => ReconcileOutcome::Error, }; @@ -1869,7 +1916,7 @@ impl TenantShard { waiter: Arc::new(SeqWait::new(Sequence::initial())), error_waiter: Arc::new(SeqWait::new(Sequence::initial())), last_error: Arc::default(), - consecutive_errors_count: 0, + consecutive_reconciles_count: 0, pending_compute_notification: false, delayed_reconcile: false, scheduling_policy: serde_json::from_str(&tsp.scheduling_policy).unwrap(), diff --git a/test_runner/fixtures/endpoint/http.py b/test_runner/fixtures/endpoint/http.py index 294c52321b..1d278095ce 100644 --- a/test_runner/fixtures/endpoint/http.py +++ b/test_runner/fixtures/endpoint/http.py @@ -2,11 +2,12 @@ from __future__ import annotations import urllib.parse from enum import StrEnum -from typing import TYPE_CHECKING, final +from typing import TYPE_CHECKING, Any, final import requests from requests.adapters import HTTPAdapter from requests.auth import AuthBase +from requests.exceptions import ReadTimeout from typing_extensions import override from fixtures.log_helper import log @@ -102,6 +103,18 @@ class EndpointHttpClient(requests.Session): wait_until(offloaded) + def promote(self, safekeepers_lsn: dict[str, Any], disconnect: bool = False): + url = f"http://localhost:{self.external_port}/promote" + if disconnect: + try: # send first request to start promote and disconnect + self.post(url, data=safekeepers_lsn, timeout=0.001) + except ReadTimeout: + pass # wait on second request which returns on promotion finish + res = self.post(url, data=safekeepers_lsn) + res.raise_for_status() + json: dict[str, str] = res.json() + return json + def database_schema(self, database: str): res = self.get( f"http://localhost:{self.external_port}/database_schema?database={urllib.parse.quote(database, safe='')}", diff --git a/test_runner/fixtures/metrics.py b/test_runner/fixtures/metrics.py index 1dd4fe8316..6e600b5a86 100644 --- a/test_runner/fixtures/metrics.py +++ b/test_runner/fixtures/metrics.py @@ -159,6 +159,9 @@ PAGESERVER_GLOBAL_METRICS: tuple[str, ...] = ( ) PAGESERVER_PER_TENANT_METRICS: tuple[str, ...] = ( + # BEGIN_HADRON + "pageserver_active_storage_operations_count", + # END_HADRON "pageserver_current_logical_size", "pageserver_resident_physical_size", "pageserver_io_operations_bytes_total", diff --git a/test_runner/fixtures/neon_api.py b/test_runner/fixtures/neon_api.py index 9d85b9a332..bb618325e0 100644 --- a/test_runner/fixtures/neon_api.py +++ b/test_runner/fixtures/neon_api.py @@ -34,7 +34,9 @@ class NeonAPI: self.retries524 = 0 self.retries4xx = 0 - def __request(self, method: str | bytes, endpoint: str, **kwargs: Any) -> requests.Response: + def __request( + self, method: str | bytes, endpoint: str, retry404: bool = False, **kwargs: Any + ) -> requests.Response: kwargs["headers"] = kwargs.get("headers", {}) kwargs["headers"]["Authorization"] = f"Bearer {self.__neon_api_key}" @@ -55,10 +57,12 @@ class NeonAPI: resp.raise_for_status() break elif resp.status_code >= 400: - if resp.status_code == 422: - if resp.json()["message"] == "branch not ready yet": - retry = True - self.retries4xx += 1 + if resp.status_code == 404 and retry404: + retry = True + self.retries4xx += 1 + elif resp.status_code == 422 and resp.json()["message"] == "branch not ready yet": + retry = True + self.retries4xx += 1 elif resp.status_code == 423 and resp.json()["message"] in { "endpoint is in some transitive state, could not suspend", "project already has running conflicting operations, scheduling of new ones is prohibited", @@ -66,7 +70,7 @@ class NeonAPI: retry = True self.retries4xx += 1 elif resp.status_code == 524: - log.info("The request was timed out, trying to get operations") + log.info("The request was timed out") retry = True self.retries524 += 1 if retry: @@ -203,6 +207,9 @@ class NeonAPI: resp = self.__request( "GET", f"/projects/{project_id}/branches/{branch_id}", + # XXX Retry get parent details to work around the issue + # https://databricks.atlassian.net/browse/LKB-279 + retry404=True, headers={ "Accept": "application/json", }, @@ -307,6 +314,10 @@ class NeonAPI: if endpoint_type: data["endpoint"]["type"] = endpoint_type if settings: + # otherwise we get 400 "settings must not be nil" + # TODO(myrrc): fix on cplane side + if "pg_settings" not in settings: + settings["pg_settings"] = {} data["endpoint"]["settings"] = settings resp = self.__request( diff --git a/test_runner/fixtures/neon_cli.py b/test_runner/fixtures/neon_cli.py index 1abd3396e4..f33d4a0d22 100644 --- a/test_runner/fixtures/neon_cli.py +++ b/test_runner/fixtures/neon_cli.py @@ -503,6 +503,7 @@ class NeonLocalCli(AbstractNeonCli): pageserver_id: int | None = None, allow_multiple=False, update_catalog: bool = False, + privileged_role_name: str | None = None, ) -> subprocess.CompletedProcess[str]: args = [ "endpoint", @@ -534,6 +535,8 @@ class NeonLocalCli(AbstractNeonCli): args.extend(["--allow-multiple"]) if update_catalog: args.extend(["--update-catalog"]) + if privileged_role_name is not None: + args.extend(["--privileged-role-name", privileged_role_name]) res = self.raw_cli(args) res.check_returncode() diff --git a/test_runner/fixtures/neon_fixtures.py b/test_runner/fixtures/neon_fixtures.py index 1a21c1d8e3..fc33fb45c1 100644 --- a/test_runner/fixtures/neon_fixtures.py +++ b/test_runner/fixtures/neon_fixtures.py @@ -728,7 +728,7 @@ class NeonEnvBuilder: # NB: neon_local rewrites postgresql.conf on each start based on neon_local config. No need to patch it. # However, in this new NeonEnv, the pageservers and safekeepers listen on different ports, and the storage # controller will currently reject re-attach requests from them because the NodeMetadata isn't identical. - # So, from_repo_dir patches up the the storcon database. + # So, from_repo_dir patches up the storcon database. patch_script_path = self.repo_dir / "storage_controller_db.startup.sql" assert not patch_script_path.exists() patch_script = "" @@ -1315,6 +1315,14 @@ class NeonEnv: # This feature is pending rollout. # tenant_config["rel_size_v2_enabled"] = True + # Test authors tend to forget about the default 10min initial lease deadline + # when writing tests, which turns their immediate gc requests via mgmt API + # into no-ops. Override the binary default here, such that there is no initial + # lease deadline by default in tests. Tests that care can always override it + # themselves. + # Cf https://databricks.atlassian.net/browse/LKB-92?focusedCommentId=6722329 + tenant_config["lsn_lease_length"] = "0s" + if self.pageserver_remote_storage is not None: ps_cfg["remote_storage"] = remote_storage_to_toml_dict( self.pageserver_remote_storage @@ -1787,6 +1795,33 @@ def neon_env_builder( record_property("preserve_database_files", builder.preserve_database_files) +@pytest.fixture(scope="function") +def neon_env_builder_local( + neon_env_builder: NeonEnvBuilder, + test_output_dir: Path, + pg_distrib_dir: Path, +) -> NeonEnvBuilder: + """ + Fixture to create a Neon environment for test with its own pg_install copy. + + This allows the test to edit the list of available extensions in the + local instance of Postgres used for the test, and install extensions via + downloading them when a remote extension is tested, for instance, or + copying files around for local extension testing. + """ + test_local_pginstall = test_output_dir / "pg_install" + log.info(f"copy {pg_distrib_dir} to {test_local_pginstall}") + + # We can't copy only the version that we are currently testing because other + # binaries like the storage controller need specific Postgres versions. + shutil.copytree(pg_distrib_dir, test_local_pginstall) + + neon_env_builder.pg_distrib_dir = test_local_pginstall + log.info(f"local neon_env_builder.pg_distrib_dir: {neon_env_builder.pg_distrib_dir}") + + return neon_env_builder + + @dataclass class PageserverPort: pg: int @@ -2084,11 +2119,14 @@ class NeonStorageController(MetricsGetter, LogUtils): headers=self.headers(TokenScope.ADMIN), ) - def node_delete(self, node_id): + def node_delete(self, node_id, force: bool = False): log.info(f"node_delete({node_id})") + query = f"{self.api}/control/v1/node/{node_id}/delete" + if force: + query += "?force=true" self.request( "PUT", - f"{self.api}/control/v1/node/{node_id}/delete", + query, headers=self.headers(TokenScope.ADMIN), ) @@ -2307,6 +2345,20 @@ class NeonStorageController(MetricsGetter, LogUtils): response.raise_for_status() return response.json() + # HADRON + def tenant_timeline_describe( + self, + tenant_id: TenantId, + timeline_id: TimelineId, + ): + response = self.request( + "GET", + f"{self.api}/control/v1/tenant/{tenant_id}/timeline/{timeline_id}", + headers=self.headers(TokenScope.ADMIN), + ) + response.raise_for_status() + return response.json() + def nodes(self): """ :return: list of {"id": ""} @@ -4275,6 +4327,7 @@ class Endpoint(PgProtocol, LogUtils): pageserver_id: int | None = None, allow_multiple: bool = False, update_catalog: bool = False, + privileged_role_name: str | None = None, ) -> Self: """ Create a new Postgres endpoint. @@ -4302,6 +4355,7 @@ class Endpoint(PgProtocol, LogUtils): pageserver_id=pageserver_id, allow_multiple=allow_multiple, update_catalog=update_catalog, + privileged_role_name=privileged_role_name, ) path = Path("endpoints") / self.endpoint_id / "pgdata" self.pgdata_dir = self.env.repo_dir / path @@ -4318,9 +4372,9 @@ class Endpoint(PgProtocol, LogUtils): # XXX: By checking for None, we enable the new communicator for all tests # by default if grpc or grpc is None: - config_lines += [f"neon.enable_new_communicator=on"] + config_lines += ["neon.use_communicator_worker=on"] else: - config_lines += [f"neon.enable_new_communicator=off"] + config_lines += ["neon.use_communicator_worker=off"] # Delete file cache if it exists (and we're recreating the endpoint) if USE_LFC: @@ -4762,6 +4816,7 @@ class EndpointFactory: config_lines: list[str] | None = None, pageserver_id: int | None = None, update_catalog: bool = False, + privileged_role_name: str | None = None, ) -> Endpoint: ep = Endpoint( self.env, @@ -4785,6 +4840,7 @@ class EndpointFactory: config_lines=config_lines, pageserver_id=pageserver_id, update_catalog=update_catalog, + privileged_role_name=privileged_role_name, ) def stop_all(self, fail_on_error=True) -> Self: @@ -5371,6 +5427,7 @@ SKIP_FILES = frozenset( ( "pg_internal.init", "pg.log", + "neon.signal", "zenith.signal", "pg_hba.conf", "postgresql.conf", diff --git a/test_runner/fixtures/pageserver/allowed_errors.py b/test_runner/fixtures/pageserver/allowed_errors.py index 6a715c4b93..59249f31ad 100755 --- a/test_runner/fixtures/pageserver/allowed_errors.py +++ b/test_runner/fixtures/pageserver/allowed_errors.py @@ -111,6 +111,13 @@ DEFAULT_PAGESERVER_ALLOWED_ERRORS = ( ".*stalling layer flushes for compaction backpressure.*", ".*layer roll waiting for flush due to compaction backpressure.*", ".*BatchSpanProcessor.*", + # Can happen in tests that purposely wipe pageserver "local disk" data. + ".*Local data loss suspected.*", + # Too many frozen layers error is normal during intensive benchmarks + ".*too many frozen layers.*", + ".*Failed to resolve tenant shard after.*", + # Expected warnings when pageserver has not refreshed GC info yet + ".*pitr LSN/interval not found, skipping force image creation LSN calculation.*", ".*No broker updates received for a while.*", *( [ diff --git a/test_runner/fixtures/pageserver/http.py b/test_runner/fixtures/pageserver/http.py index d9037f2d08..f95b0ee4d1 100644 --- a/test_runner/fixtures/pageserver/http.py +++ b/test_runner/fixtures/pageserver/http.py @@ -333,6 +333,13 @@ class PageserverHttpClient(requests.Session, MetricsGetter): res = self.post(f"http://localhost:{self.port}/v1/reload_auth_validation_keys") self.verbose_error(res) + def list_tenant_visible_size(self) -> dict[TenantShardId, int]: + res = self.get(f"http://localhost:{self.port}/v1/list_tenant_visible_size") + self.verbose_error(res) + res_json = res.json() + assert isinstance(res_json, dict) + return res_json + def tenant_list(self) -> list[dict[Any, Any]]: res = self.get(f"http://localhost:{self.port}/v1/tenant") self.verbose_error(res) @@ -840,7 +847,7 @@ class PageserverHttpClient(requests.Session, MetricsGetter): return res_json def timeline_lsn_lease( - self, tenant_id: TenantId | TenantShardId, timeline_id: TimelineId, lsn: Lsn + self, tenant_id: TenantId | TenantShardId, timeline_id: TimelineId, lsn: Lsn, **kwargs ): data = { "lsn": str(lsn), @@ -850,6 +857,7 @@ class PageserverHttpClient(requests.Session, MetricsGetter): res = self.post( f"http://localhost:{self.port}/v1/tenant/{tenant_id}/timeline/{timeline_id}/lsn_lease", json=data, + **kwargs, ) self.verbose_error(res) res_json = res.json() @@ -1002,7 +1010,7 @@ class PageserverHttpClient(requests.Session, MetricsGetter): def get_metrics_str(self) -> str: """You probably want to use get_metrics() instead.""" - res = self.get(f"http://localhost:{self.port}/metrics") + res = self.get(f"http://localhost:{self.port}/metrics?use_latest=true") self.verbose_error(res) return res.text @@ -1247,3 +1255,10 @@ class PageserverHttpClient(requests.Session, MetricsGetter): ) self.verbose_error(res) return res.json() + + def force_refresh_feature_flag(self, tenant_id: TenantId | TenantShardId): + res = self.post( + f"http://localhost:{self.port}/v1/tenant/{tenant_id}/force_refresh_feature_flag", + ) + self.verbose_error(res) + return res.json() diff --git a/test_runner/fixtures/port_distributor.py b/test_runner/fixtures/port_distributor.py index 6a829a9399..e51d08e16e 100644 --- a/test_runner/fixtures/port_distributor.py +++ b/test_runner/fixtures/port_distributor.py @@ -3,6 +3,7 @@ from __future__ import annotations import re import socket from contextlib import closing +from itertools import cycle from fixtures.log_helper import log @@ -34,15 +35,23 @@ def can_bind(host: str, port: int) -> bool: class PortDistributor: def __init__(self, base_port: int, port_number: int): - self.iterator = iter(range(base_port, base_port + port_number)) + self.base_port = base_port + self.port_number = port_number + self.cycle = cycle(range(base_port, base_port + port_number)) self.port_map: dict[int, int] = {} def get_port(self) -> int: - for port in self.iterator: + checked = 0 + for port in self.cycle: if can_bind("localhost", port): return port + elif checked < self.port_number: + checked += 1 + else: + break + raise RuntimeError( - "port range configured for test is exhausted, consider enlarging the range" + f"port range ({self.base_port}..{self.base_port + self.port_number}) configured for test is exhausted, consider enlarging the range" ) def replace_with_new_port(self, value: int | str) -> int | str: diff --git a/test_runner/fixtures/safekeeper/http.py b/test_runner/fixtures/safekeeper/http.py index 839e985419..ceb00c0f90 100644 --- a/test_runner/fixtures/safekeeper/http.py +++ b/test_runner/fixtures/safekeeper/http.py @@ -112,12 +112,18 @@ class TimelineCreateRequest: class TimelineMembershipSwitchResponse: previous_conf: MembershipConfiguration current_conf: MembershipConfiguration + last_log_term: int + flush_lsn: Lsn @classmethod def from_json(cls, d: dict[str, Any]) -> TimelineMembershipSwitchResponse: previous_conf = MembershipConfiguration.from_json(d["previous_conf"]) current_conf = MembershipConfiguration.from_json(d["current_conf"]) - return TimelineMembershipSwitchResponse(previous_conf, current_conf) + last_log_term = d["last_log_term"] + flush_lsn = Lsn(d["flush_lsn"]) + return TimelineMembershipSwitchResponse( + previous_conf, current_conf, last_log_term, flush_lsn + ) class SafekeeperHttpClient(requests.Session, MetricsGetter): @@ -137,7 +143,7 @@ class SafekeeperHttpClient(requests.Session, MetricsGetter): def get_metrics_str(self) -> str: """You probably want to use get_metrics() instead.""" - request_result = self.get(f"http://localhost:{self.port}/metrics") + request_result = self.get(f"http://localhost:{self.port}/metrics?use_latest=true") request_result.raise_for_status() return request_result.text diff --git a/test_runner/performance/pageserver/pagebench/test_pageserver_max_throughput_getpage_at_latest_lsn.py b/test_runner/performance/pageserver/pagebench/test_pageserver_max_throughput_getpage_at_latest_lsn.py index 41696bf887..8e7055ef78 100644 --- a/test_runner/performance/pageserver/pagebench/test_pageserver_max_throughput_getpage_at_latest_lsn.py +++ b/test_runner/performance/pageserver/pagebench/test_pageserver_max_throughput_getpage_at_latest_lsn.py @@ -55,9 +55,10 @@ def test_pageserver_characterize_throughput_with_n_tenants( @pytest.mark.parametrize("duration", [20 * 60]) @pytest.mark.parametrize("pgbench_scale", [get_scale_for_db(2048)]) # we use 1 client to characterize latencies, and 64 clients to characterize throughput/scalability -# we use 64 clients because typically for a high number of connections we recommend the connection pooler -# which by default uses 64 connections -@pytest.mark.parametrize("n_clients", [1, 64]) +# we use 8 clients because we see a latency knee around 6-8 clients on im4gn.2xlarge instance type, +# which we use for this periodic test - at a cpu utilization of around 70 % - which is considered +# a good utilization for pageserver. +@pytest.mark.parametrize("n_clients", [1, 8]) @pytest.mark.parametrize("n_tenants", [1]) @pytest.mark.timeout(2400) def test_pageserver_characterize_latencies_with_1_client_and_throughput_with_many_clients_one_tenant( @@ -70,7 +71,13 @@ def test_pageserver_characterize_latencies_with_1_client_and_throughput_with_man n_clients: int, ): setup_and_run_pagebench_benchmark( - neon_env_builder, zenbenchmark, pg_bin, n_tenants, pgbench_scale, duration, n_clients + neon_env_builder, + zenbenchmark, + pg_bin, + n_tenants, + pgbench_scale, + duration, + n_clients, ) @@ -85,7 +92,8 @@ def setup_and_run_pagebench_benchmark( ): def record(metric, **kwargs): zenbenchmark.record( - metric_name=f"pageserver_max_throughput_getpage_at_latest_lsn.{metric}", **kwargs + metric_name=f"pageserver_max_throughput_getpage_at_latest_lsn.{metric}", + **kwargs, ) params: dict[str, tuple[Any, dict[str, Any]]] = {} @@ -103,9 +111,7 @@ def setup_and_run_pagebench_benchmark( # configure cache sizes like in prod page_cache_size = 16384 max_file_descriptors = 500000 - neon_env_builder.pageserver_config_override = ( - f"page_cache_size={page_cache_size}; max_file_descriptors={max_file_descriptors}" - ) + neon_env_builder.pageserver_config_override = f"page_cache_size={page_cache_size}; max_file_descriptors={max_file_descriptors}; disk_usage_based_eviction={{enabled = false}}" tracing_config = PageserverTracingConfig( sampling_ratio=(0, 1000), @@ -121,7 +127,10 @@ def setup_and_run_pagebench_benchmark( page_cache_size * 8192, {"unit": "byte"}, ), - "pageserver_config_override.max_file_descriptors": (max_file_descriptors, {"unit": ""}), + "pageserver_config_override.max_file_descriptors": ( + max_file_descriptors, + {"unit": ""}, + ), "pageserver_config_override.sampling_ratio": (ratio, {"unit": ""}), } ) diff --git a/test_runner/performance/test_lfc_prewarm.py b/test_runner/performance/test_lfc_prewarm.py new file mode 100644 index 0000000000..6c0083de95 --- /dev/null +++ b/test_runner/performance/test_lfc_prewarm.py @@ -0,0 +1,168 @@ +from __future__ import annotations + +import os +import timeit +import traceback +from concurrent.futures import ThreadPoolExecutor as Exec +from pathlib import Path +from time import sleep +from typing import TYPE_CHECKING, Any, cast + +import pytest +from fixtures.benchmark_fixture import NeonBenchmarker, PgBenchRunResult +from fixtures.log_helper import log +from fixtures.neon_api import NeonAPI, connection_parameters_to_env + +if TYPE_CHECKING: + from fixtures.compare_fixtures import NeonCompare + from fixtures.neon_fixtures import Endpoint, PgBin + from fixtures.pg_version import PgVersion + +from performance.test_perf_pgbench import utc_now_timestamp + +# These tests compare performance for a write-heavy and read-heavy workloads of an ordinary endpoint +# compared to the endpoint which saves its LFC and prewarms using it on startup. + + +def test_compare_prewarmed_pgbench_perf(neon_compare: NeonCompare): + env = neon_compare.env + env.create_branch("normal") + env.create_branch("prewarmed") + pg_bin = neon_compare.pg_bin + ep_normal: Endpoint = env.endpoints.create_start("normal") + ep_prewarmed: Endpoint = env.endpoints.create_start("prewarmed", autoprewarm=True) + + for ep in [ep_normal, ep_prewarmed]: + connstr: str = ep.connstr() + pg_bin.run(["pgbench", "-i", "-I", "dtGvp", connstr, "-s100"]) + ep.safe_psql("CREATE EXTENSION neon") + client = ep.http_client() + client.offload_lfc() + ep.stop() + ep.start() + client.prewarm_lfc_wait() + + run_start_timestamp = utc_now_timestamp() + t0 = timeit.default_timer() + out = pg_bin.run_capture(["pgbench", "-c10", "-T10", connstr]) + run_duration = timeit.default_timer() - t0 + run_end_timestamp = utc_now_timestamp() + + stdout = Path(f"{out}.stdout").read_text() + res = PgBenchRunResult.parse_from_stdout( + stdout=stdout, + run_duration=run_duration, + run_start_timestamp=run_start_timestamp, + run_end_timestamp=run_end_timestamp, + ) + name: str = cast("str", ep.branch_name) + neon_compare.zenbenchmark.record_pg_bench_result(name, res) + + +@pytest.mark.remote_cluster +@pytest.mark.timeout(2 * 60 * 60) +def test_compare_prewarmed_pgbench_perf_benchmark( + pg_bin: PgBin, + neon_api: NeonAPI, + pg_version: PgVersion, + zenbenchmark: NeonBenchmarker, +): + name = f"Test prewarmed pgbench performance, GITHUB_RUN_ID={os.getenv('GITHUB_RUN_ID')}" + project = neon_api.create_project(pg_version, name) + project_id = project["project"]["id"] + neon_api.wait_for_operation_to_finish(project_id) + err = False + try: + benchmark_impl(pg_bin, neon_api, project, zenbenchmark) + except Exception as e: + err = True + log.error(f"Caught exception: {e}") + log.error(traceback.format_exc()) + finally: + assert not err + neon_api.delete_project(project_id) + + +def benchmark_impl( + pg_bin: PgBin, neon_api: NeonAPI, project: dict[str, Any], zenbenchmark: NeonBenchmarker +): + pgbench_size = int(os.getenv("PGBENCH_SIZE") or "3424") # 50GB + offload_secs = 20 + test_duration_min = 5 + pgbench_duration = f"-T{test_duration_min * 60}" + # prewarm API is not publicly exposed. In order to test performance of a + # fully prewarmed endpoint, wait after it restarts. + # The number here is empirical, based on manual runs on staging + prewarmed_sleep_secs = 180 + + branch_id = project["branch"]["id"] + project_id = project["project"]["id"] + normal_env = connection_parameters_to_env( + project["connection_uris"][0]["connection_parameters"] + ) + normal_id = project["endpoints"][0]["id"] + + prewarmed_branch_id = neon_api.create_branch( + project_id, "prewarmed", parent_id=branch_id, add_endpoint=False + )["branch"]["id"] + neon_api.wait_for_operation_to_finish(project_id) + + ep_prewarmed = neon_api.create_endpoint( + project_id, + prewarmed_branch_id, + endpoint_type="read_write", + settings={"autoprewarm": True, "offload_lfc_interval_seconds": offload_secs}, + ) + neon_api.wait_for_operation_to_finish(project_id) + + prewarmed_env = normal_env.copy() + prewarmed_env["PGHOST"] = ep_prewarmed["endpoint"]["host"] + prewarmed_id = ep_prewarmed["endpoint"]["id"] + + def bench(endpoint_name, endpoint_id, env): + pg_bin.run(["pgbench", "-i", "-I", "dtGvp", f"-s{pgbench_size}"], env) + sleep(offload_secs * 2) # ensure LFC is offloaded after pgbench finishes + neon_api.restart_endpoint(project_id, endpoint_id) + sleep(prewarmed_sleep_secs) + + run_start_timestamp = utc_now_timestamp() + t0 = timeit.default_timer() + out = pg_bin.run_capture(["pgbench", "-c10", pgbench_duration, "-Mprepared"], env) + run_duration = timeit.default_timer() - t0 + run_end_timestamp = utc_now_timestamp() + + stdout = Path(f"{out}.stdout").read_text() + res = PgBenchRunResult.parse_from_stdout( + stdout=stdout, + run_duration=run_duration, + run_start_timestamp=run_start_timestamp, + run_end_timestamp=run_end_timestamp, + ) + zenbenchmark.record_pg_bench_result(endpoint_name, res) + + with Exec(max_workers=2) as exe: + exe.submit(bench, "normal", normal_id, normal_env) + exe.submit(bench, "prewarmed", prewarmed_id, prewarmed_env) + + +def test_compare_prewarmed_read_perf(neon_compare: NeonCompare): + env = neon_compare.env + env.create_branch("normal") + env.create_branch("prewarmed") + ep_normal: Endpoint = env.endpoints.create_start("normal") + ep_prewarmed: Endpoint = env.endpoints.create_start("prewarmed", autoprewarm=True) + + sql = [ + "CREATE EXTENSION neon", + "CREATE TABLE foo(key serial primary key, t text default 'foooooooooooooooooooooooooooooooooooooooooooooooooooo')", + "INSERT INTO foo SELECT FROM generate_series(1,1000000)", + ] + for ep in [ep_normal, ep_prewarmed]: + ep.safe_psql_many(sql) + client = ep.http_client() + client.offload_lfc() + ep.stop() + ep.start() + client.prewarm_lfc_wait() + with neon_compare.record_duration(f"{ep.branch_name}_run_duration"): + ep.safe_psql("SELECT count(*) from foo") diff --git a/test_runner/performance/test_sharding_autosplit.py b/test_runner/performance/test_sharding_autosplit.py index 0bb210db23..1b77831b75 100644 --- a/test_runner/performance/test_sharding_autosplit.py +++ b/test_runner/performance/test_sharding_autosplit.py @@ -73,6 +73,11 @@ def test_sharding_autosplit(neon_env_builder: NeonEnvBuilder, pg_bin: PgBin): ".*Local notification hook failed.*", ".*Marking shard.*for notification retry.*", ".*Failed to notify compute.*", + # As an optimization, the storage controller kicks the downloads on the secondary + # after the shard split. However, secondaries are created async, so it's possible + # that the intent state was modified, but the actual secondary hasn't been created, + # which results in an error. + ".*Error calling secondary download after shard split.*", ] ) diff --git a/test_runner/random_ops/test_random_ops.py b/test_runner/random_ops/test_random_ops.py index d3815c40bb..b106e9b729 100644 --- a/test_runner/random_ops/test_random_ops.py +++ b/test_runner/random_ops/test_random_ops.py @@ -13,7 +13,6 @@ from typing import TYPE_CHECKING, Any import pytest from fixtures.log_helper import log -from requests import HTTPError if TYPE_CHECKING: from pathlib import Path @@ -117,7 +116,9 @@ class NeonBranch: def create_child_branch(self) -> NeonBranch | None: return self.project.create_branch(self.id) - def create_ro_endpoint(self) -> NeonEndpoint: + def create_ro_endpoint(self) -> NeonEndpoint | None: + if not self.project.check_limit_endpoints(): + return None return NeonEndpoint( self.project, self.neon_api.create_endpoint(self.project_id, self.id, "read_only", {})["endpoint"], @@ -168,29 +169,21 @@ class NeonBranch: source_timestamp: str | None = None, preserve_under_name: str | None = None, ) -> dict[str, Any] | None: + if not self.project.check_limit_branches(): + return None endpoints = [ep for ep in self.endpoints.values() if ep.type == "read_only"] # Terminate all the benchmarks running to prevent errors. Errors in benchmark during pgbench are expected for ep in endpoints: ep.terminate_benchmark() self.terminate_benchmark() - try: - res: dict[str, Any] = self.neon_api.restore_branch( - self.project_id, - self.id, - source_branch_id, - source_lsn, - source_timestamp, - preserve_under_name, - ) - except HTTPError as he: - if ( - he.response.status_code == 422 - and he.response.json()["code"] == "BRANCHES_LIMIT_EXCEEDED" - ): - log.info("Branch limit exceeded, skipping") - return None - else: - raise HTTPError(he) from he + res: dict[str, Any] = self.neon_api.restore_branch( + self.project_id, + self.id, + source_branch_id, + source_lsn, + source_timestamp, + preserve_under_name, + ) self.project.wait() self.start_benchmark() for ep in endpoints: @@ -239,19 +232,30 @@ class NeonProject: def delete(self) -> None: self.neon_api.delete_project(self.id) + def check_limit_branches(self) -> bool: + if self.limits["max_branches"] == -1 or len(self.branches) < self.limits["max_branches"]: + return True + log.info("branch limit exceeded (%s/%s)", len(self.branches), self.limits["max_branches"]) + return False + + def check_limit_endpoints(self) -> bool: + if ( + self.limits["max_read_only_endpoints"] == -1 + or self.read_only_endpoints_total < self.limits["max_read_only_endpoints"] + ): + return True + log.info( + "Maximum read only endpoint limit exceeded (%s/%s)", + self.read_only_endpoints_total, + self.limits["max_read_only_endpoints"], + ) + return False + def create_branch(self, parent_id: str | None = None) -> NeonBranch | None: self.wait() - try: - branch_def = self.neon_api.create_branch(self.id, parent_id=parent_id) - except HTTPError as he: - if ( - he.response.status_code == 422 - and he.response.json()["code"] == "BRANCHES_LIMIT_EXCEEDED" - ): - log.info("Branch limit exceeded, skipping") - return None - else: - raise HTTPError(he) from he + if not self.check_limit_branches(): + return None + branch_def = self.neon_api.create_branch(self.id, parent_id=parent_id) new_branch = NeonBranch(self, branch_def) self.wait() return new_branch @@ -388,17 +392,9 @@ def do_action(project: NeonProject, action: str) -> bool: log.info("Action: %s", action) if action == "new_branch": log.info("Trying to create a new branch") - if 0 <= project.limits["max_branches"] <= len(project.branches): - log.info( - "Maximum branch limit exceeded (%s of %s)", - len(project.branches), - project.limits["max_branches"], - ) - return False parent = project.branches[ random.choice(list(set(project.branches.keys()) - project.reset_branches)) ] - log.info("Parent: %s", parent) child = parent.create_child_branch() if child is None: return False @@ -413,16 +409,11 @@ def do_action(project: NeonProject, action: str) -> bool: log.info("Leaf branches not found, skipping") return False elif action == "new_ro_endpoint": - if 0 <= project.limits["max_read_only_endpoints"] <= project.read_only_endpoints_total: - log.info( - "Maximum read only endpoint limit exceeded (%s of %s)", - project.read_only_endpoints_total, - project.limits["max_read_only_endpoints"], - ) - return False ep = random.choice( [br for br in project.branches.values() if br.id not in project.reset_branches] ).create_ro_endpoint() + if ep is None: + return False log.info("Created the RO endpoint with id %s branch: %s", ep.id, ep.branch.id) ep.start_benchmark() elif action == "delete_ro_endpoint": diff --git a/test_runner/regress/data/test_event_trigger_extension/test_event_trigger_extension--1.0.sql b/test_runner/regress/data/test_event_trigger_extension/test_event_trigger_extension--1.0.sql new file mode 100644 index 0000000000..2b82102802 --- /dev/null +++ b/test_runner/regress/data/test_event_trigger_extension/test_event_trigger_extension--1.0.sql @@ -0,0 +1,32 @@ +\echo Use "CREATE EXTENSION test_event_trigger_extension" to load this file. \quit + +CREATE SCHEMA event_trigger; + +create sequence if not exists event_trigger.seq_schema_version as int cycle; + +create or replace function event_trigger.increment_schema_version() + returns event_trigger + security definer + language plpgsql +as $$ +begin + perform pg_catalog.nextval('event_trigger.seq_schema_version'); +end; +$$; + +create or replace function event_trigger.get_schema_version() + returns int + security definer + language sql +as $$ + select last_value from event_trigger.seq_schema_version; +$$; + +-- On DDL event, increment the schema version number +create event trigger event_trigger_watch_ddl + on ddl_command_end + execute procedure event_trigger.increment_schema_version(); + +create event trigger event_trigger_watch_drop + on sql_drop + execute procedure event_trigger.increment_schema_version(); diff --git a/test_runner/regress/data/test_event_trigger_extension/test_event_trigger_extension.control b/test_runner/regress/data/test_event_trigger_extension/test_event_trigger_extension.control new file mode 100644 index 0000000000..4fe8c3341b --- /dev/null +++ b/test_runner/regress/data/test_event_trigger_extension/test_event_trigger_extension.control @@ -0,0 +1,8 @@ +default_version = '1.0' +comment = 'Test extension with Event Trigger' + +# make sure the extension objects are owned by the bootstrap user +# to check that the SECURITY DEFINER event trigger function is still +# called during non-superuser DDL events. +superuser = true +trusted = true diff --git a/test_runner/regress/test_attach_tenant_config.py b/test_runner/regress/test_attach_tenant_config.py index 7788faceb4..eaaa3014a5 100644 --- a/test_runner/regress/test_attach_tenant_config.py +++ b/test_runner/regress/test_attach_tenant_config.py @@ -165,6 +165,7 @@ def test_fully_custom_config(positive_env: NeonEnv): "gc_horizon": 23 * (1024 * 1024), "gc_period": "2h 13m", "image_creation_threshold": 7, + "image_layer_force_creation_period": "1m", "pitr_interval": "1m", "lagging_wal_timeout": "23m", "lazy_slru_download": True, diff --git a/test_runner/regress/test_branch_and_gc.py b/test_runner/regress/test_branch_and_gc.py index 8447c9bf2d..148f469a95 100644 --- a/test_runner/regress/test_branch_and_gc.py +++ b/test_runner/regress/test_branch_and_gc.py @@ -7,6 +7,7 @@ from typing import TYPE_CHECKING import pytest from fixtures.common_types import Lsn, TimelineId from fixtures.log_helper import log +from fixtures.neon_fixtures import wait_for_last_flush_lsn from fixtures.pageserver.http import TimelineCreate406 from fixtures.utils import query_scalar, skip_in_debug_build @@ -162,6 +163,9 @@ def test_branch_creation_before_gc(neon_simple_env: NeonEnv): ) lsn = Lsn(res[2][0][0]) + # Wait for all WAL to reach the pageserver, so GC cutoff LSN is greater than `lsn`. + wait_for_last_flush_lsn(env, endpoint0, tenant, b0) + # Use `failpoint=sleep` and `threading` to make the GC iteration triggers *before* the # branch creation task but the individual timeline GC iteration happens *after* # the branch creation task. diff --git a/test_runner/regress/test_broken_timeline.py b/test_runner/regress/test_broken_timeline.py index 1209b3a818..0d92bf8406 100644 --- a/test_runner/regress/test_broken_timeline.py +++ b/test_runner/regress/test_broken_timeline.py @@ -24,10 +24,7 @@ def test_local_corruption(neon_env_builder: NeonEnvBuilder): [ ".*get_values_reconstruct_data for layer .*", ".*could not find data for key.*", - ".*is not active. Current state: Broken.*", ".*will not become active. Current state: Broken.*", - ".*failed to load metadata.*", - ".*load failed.*load local timeline.*", ".*: layer load failed, assuming permanent failure:.*", ".*failed to get checkpoint bytes.*", ".*failed to get control bytes.*", diff --git a/test_runner/regress/test_compaction.py b/test_runner/regress/test_compaction.py index 1570d40ae9..76485c8321 100644 --- a/test_runner/regress/test_compaction.py +++ b/test_runner/regress/test_compaction.py @@ -7,6 +7,7 @@ import time from enum import StrEnum import pytest +from fixtures.common_types import TenantShardId from fixtures.log_helper import log from fixtures.neon_fixtures import ( NeonEnvBuilder, @@ -686,7 +687,7 @@ def test_sharding_compaction( for _i in range(0, 10): # Each of these does some writes then a checkpoint: because we set image_creation_threshold to 1, # these should result in image layers each time we write some data into a shard, and also shards - # recieving less data hitting their "empty image layer" path (wherre they should skip writing the layer, + # receiving less data hitting their "empty image layer" path (where they should skip writing the layer, # rather than asserting) workload.churn_rows(64) @@ -944,3 +945,204 @@ def test_image_layer_compression(neon_env_builder: NeonEnvBuilder, enabled: bool f"SELECT count(*) FROM foo WHERE id={v} and val=repeat('abcde{v:0>3}', 500)" ) assert res[0][0] == 1 + + +# BEGIN_HADRON +def get_layer_map(env, tenant_shard_id, timeline_id, ps_id): + client = env.pageservers[ps_id].http_client() + layer_map = client.layer_map_info(tenant_shard_id, timeline_id) + image_layer_count = 0 + delta_layer_count = 0 + for layer in layer_map.historic_layers: + if layer.kind == "Image": + image_layer_count += 1 + elif layer.kind == "Delta": + delta_layer_count += 1 + return image_layer_count, delta_layer_count + + +def test_image_layer_creation_time_threshold(neon_env_builder: NeonEnvBuilder): + """ + Tests that image layers can be created when the time threshold is reached on non-0 shards. + """ + tenant_conf = { + "compaction_threshold": "100", + "image_creation_threshold": "100", + "image_layer_creation_check_threshold": "1", + # disable distance based image layer creation check + "checkpoint_distance": 10 * 1024 * 1024 * 1024, + "checkpoint_timeout": "100ms", + "image_layer_force_creation_period": "1s", + "pitr_interval": "10s", + "gc_period": "1s", + "compaction_period": "1s", + "lsn_lease_length": "1s", + } + + # consider every tenant large to run the image layer generation check more eagerly + neon_env_builder.pageserver_config_override = ( + "image_layer_generation_large_timeline_threshold=0" + ) + + neon_env_builder.num_pageservers = 1 + neon_env_builder.num_safekeepers = 1 + env = neon_env_builder.init_start( + initial_tenant_conf=tenant_conf, + initial_tenant_shard_count=2, + initial_tenant_shard_stripe_size=1, + ) + + tenant_id = env.initial_tenant + timeline_id = env.initial_timeline + endpoint = env.endpoints.create_start("main") + endpoint.safe_psql("CREATE TABLE foo (id INTEGER, val text)") + + for v in range(10): + endpoint.safe_psql(f"INSERT INTO foo (id, val) VALUES ({v}, repeat('abcde{v:0>3}', 500))") + + tenant_shard_id = TenantShardId(tenant_id, 1, 2) + + # Generate some rows. + for v in range(20): + endpoint.safe_psql(f"INSERT INTO foo (id, val) VALUES ({v}, repeat('abcde{v:0>3}', 500))") + + # restart page server so that logical size on non-0 shards is missing + env.pageserver.restart() + + (old_images, old_deltas) = get_layer_map(env, tenant_shard_id, timeline_id, 0) + log.info(f"old images: {old_images}, old deltas: {old_deltas}") + + def check_image_creation(): + (new_images, old_deltas) = get_layer_map(env, tenant_shard_id, timeline_id, 0) + log.info(f"images: {new_images}, deltas: {old_deltas}") + assert new_images > old_images + + wait_until(check_image_creation) + + endpoint.stop_and_destroy() + + +def test_image_layer_force_creation_period(neon_env_builder: NeonEnvBuilder): + """ + Tests that page server can force creating new images if image_layer_force_creation_period is enabled + """ + # use large knobs to disable L0 compaction/image creation except for the force image creation + tenant_conf = { + "compaction_threshold": "100", + "image_creation_threshold": "100", + "image_layer_creation_check_threshold": "1", + "checkpoint_distance": 10 * 1024, + "checkpoint_timeout": "1s", + "image_layer_force_creation_period": "1s", + "pitr_interval": "10s", + "gc_period": "1s", + "compaction_period": "1s", + "lsn_lease_length": "1s", + } + + # consider every tenant large to run the image layer generation check more eagerly + neon_env_builder.pageserver_config_override = ( + "image_layer_generation_large_timeline_threshold=0" + ) + + neon_env_builder.num_pageservers = 1 + neon_env_builder.num_safekeepers = 1 + env = neon_env_builder.init_start(initial_tenant_conf=tenant_conf) + + tenant_id = env.initial_tenant + timeline_id = env.initial_timeline + + endpoint = env.endpoints.create_start("main") + endpoint.safe_psql("CREATE TABLE foo (id INTEGER PRIMARY KEY, val text)") + # Generate some rows. + for v in range(10): + endpoint.safe_psql(f"INSERT INTO foo (id, val) VALUES ({v}, repeat('abcde{v:0>3}', 500))") + + # Sleep a bit such that the inserts are considered when calculating the forced image layer creation LSN. + time.sleep(2) + + def check_force_image_creation(): + ps_http = env.pageserver.http_client() + ps_http.timeline_compact(tenant_id, timeline_id) + image, delta = get_layer_map(env, tenant_id, timeline_id, 0) + log.info(f"images: {image}, deltas: {delta}") + assert image > 0 + + env.pageserver.assert_log_contains("forcing L0 compaction of") + env.pageserver.assert_log_contains("forcing image creation for partitioned range") + + wait_until(check_force_image_creation) + + endpoint.stop_and_destroy() + + env.pageserver.allowed_errors.append( + ".*created delta file of size.*larger than double of target.*" + ) + + +def test_image_consistent_lsn(neon_env_builder: NeonEnvBuilder): + """ + Test the /v1/tenant//timeline/ endpoint and the computation of image_consistent_lsn + """ + # use large knobs to disable L0 compaction/image creation except for the force image creation + tenant_conf = { + "compaction_threshold": "100", + "image_creation_threshold": "100", + "image_layer_creation_check_threshold": "1", + "checkpoint_distance": 10 * 1024, + "checkpoint_timeout": "1s", + "image_layer_force_creation_period": "1s", + "pitr_interval": "10s", + "gc_period": "1s", + "compaction_period": "1s", + "lsn_lease_length": "1s", + } + + neon_env_builder.num_pageservers = 2 + neon_env_builder.num_safekeepers = 1 + env = neon_env_builder.init_start( + initial_tenant_conf=tenant_conf, + initial_tenant_shard_count=4, + initial_tenant_shard_stripe_size=1, + ) + + tenant_id = env.initial_tenant + timeline_id = env.initial_timeline + + endpoint = env.endpoints.create_start("main") + endpoint.safe_psql("CREATE TABLE foo (id INTEGER, val text)") + for v in range(10): + endpoint.safe_psql( + f"INSERT INTO foo (id, val) VALUES ({v}, repeat('abcde{v:0>3}', 500))", log_query=False + ) + + response = env.storage_controller.tenant_timeline_describe(tenant_id, timeline_id) + shards = response["shards"] + for shard in shards: + assert shard["image_consistent_lsn"] is not None + image_consistent_lsn = response["image_consistent_lsn"] + assert image_consistent_lsn is not None + + # do more writes and wait for image_consistent_lsn to advance + for v in range(100): + endpoint.safe_psql( + f"INSERT INTO foo (id, val) VALUES ({v}, repeat('abcde{v:0>3}', 500))", log_query=False + ) + + def check_image_consistent_lsn_advanced(): + response = env.storage_controller.tenant_timeline_describe(tenant_id, timeline_id) + new_image_consistent_lsn = response["image_consistent_lsn"] + shards = response["shards"] + for shard in shards: + print(f"shard {shard['tenant_id']} image_consistent_lsn{shard['image_consistent_lsn']}") + assert new_image_consistent_lsn != image_consistent_lsn + + wait_until(check_image_consistent_lsn_advanced) + + endpoint.stop_and_destroy() + + for ps in env.pageservers: + ps.allowed_errors.append(".*created delta file of size.*larger than double of target.*") + + +# END_HADRON diff --git a/test_runner/regress/test_compatibility.py b/test_runner/regress/test_compatibility.py index a4d2bf8d9b..a3a20cdc62 100644 --- a/test_runner/regress/test_compatibility.py +++ b/test_runner/regress/test_compatibility.py @@ -187,19 +187,21 @@ def test_create_snapshot( env.pageserver.stop() env.storage_controller.stop() - # Directory `compatibility_snapshot_dir` is uploaded to S3 in a workflow, keep the name in sync with it - compatibility_snapshot_dir = ( + # Directory `new_compatibility_snapshot_dir` is uploaded to S3 in a workflow, keep the name in sync with it + new_compatibility_snapshot_dir = ( top_output_dir / f"compatibility_snapshot_pg{pg_version.v_prefixed}" ) - if compatibility_snapshot_dir.exists(): - shutil.rmtree(compatibility_snapshot_dir) + if new_compatibility_snapshot_dir.exists(): + shutil.rmtree(new_compatibility_snapshot_dir) shutil.copytree( test_output_dir, - compatibility_snapshot_dir, + new_compatibility_snapshot_dir, ignore=shutil.ignore_patterns("pg_dynshmem"), ) + log.info(f"Copied new compatibility snapshot dir to: {new_compatibility_snapshot_dir}") + # check_neon_works does recovery from WAL => the compatibility snapshot's WAL is old => will log this warning ingest_lag_log_line = ".*ingesting record with timestamp lagging more than wait_lsn_timeout.*" @@ -218,6 +220,7 @@ def test_backward_compatibility( """ Test that the new binaries can read old data """ + log.info(f"Using snapshot dir at {compatibility_snapshot_dir}") neon_env_builder.num_safekeepers = 3 env = neon_env_builder.from_repo_dir(compatibility_snapshot_dir / "repo") env.pageserver.allowed_errors.append(ingest_lag_log_line) @@ -242,7 +245,6 @@ def test_forward_compatibility( test_output_dir: Path, top_output_dir: Path, pg_version: PgVersion, - compatibility_snapshot_dir: Path, compute_reconfigure_listener: ComputeReconfigure, ): """ @@ -266,8 +268,14 @@ def test_forward_compatibility( neon_env_builder.neon_binpath = neon_env_builder.compatibility_neon_binpath neon_env_builder.pg_distrib_dir = neon_env_builder.compatibility_pg_distrib_dir + # Note that we are testing with new data, so we should use `new_compatibility_snapshot_dir`, which is created by test_create_snapshot. + new_compatibility_snapshot_dir = ( + top_output_dir / f"compatibility_snapshot_pg{pg_version.v_prefixed}" + ) + + log.info(f"Using snapshot dir at {new_compatibility_snapshot_dir}") env = neon_env_builder.from_repo_dir( - compatibility_snapshot_dir / "repo", + new_compatibility_snapshot_dir / "repo", ) # there may be an arbitrary number of unrelated tests run between create_snapshot and here env.pageserver.allowed_errors.append(ingest_lag_log_line) @@ -296,7 +304,7 @@ def test_forward_compatibility( check_neon_works( env, test_output_dir=test_output_dir, - sql_dump_path=compatibility_snapshot_dir / "dump.sql", + sql_dump_path=new_compatibility_snapshot_dir / "dump.sql", repo_dir=env.repo_dir, ) diff --git a/test_runner/regress/test_compute_metrics.py b/test_runner/regress/test_compute_metrics.py index d1e61e597c..b776f58348 100644 --- a/test_runner/regress/test_compute_metrics.py +++ b/test_runner/regress/test_compute_metrics.py @@ -217,7 +217,7 @@ if SQL_EXPORTER is None: self, logs_dir: Path, config_file: Path, collector_file: Path, port: int ) -> None: # NOTE: Keep the version the same as in - # compute/compute-node.Dockerfile and build-tools.Dockerfile. + # compute/compute-node.Dockerfile and build-tools/Dockerfile. # # The "host" network mode allows sql_exporter to talk to the # endpoint which is running on the host. diff --git a/test_runner/regress/test_download_extensions.py b/test_runner/regress/test_download_extensions.py index fe3b220c67..d7f78afac8 100644 --- a/test_runner/regress/test_download_extensions.py +++ b/test_runner/regress/test_download_extensions.py @@ -2,7 +2,6 @@ from __future__ import annotations import os import platform -import shutil import tarfile from enum import StrEnum from pathlib import Path @@ -31,27 +30,6 @@ if TYPE_CHECKING: from werkzeug.wrappers.request import Request -# use neon_env_builder_local fixture to override the default neon_env_builder fixture -# and use a test-specific pg_install instead of shared one -@pytest.fixture(scope="function") -def neon_env_builder_local( - neon_env_builder: NeonEnvBuilder, - test_output_dir: Path, - pg_distrib_dir: Path, -) -> NeonEnvBuilder: - test_local_pginstall = test_output_dir / "pg_install" - log.info(f"copy {pg_distrib_dir} to {test_local_pginstall}") - - # We can't copy only the version that we are currently testing because other - # binaries like the storage controller need specific Postgres versions. - shutil.copytree(pg_distrib_dir, test_local_pginstall) - - neon_env_builder.pg_distrib_dir = test_local_pginstall - log.info(f"local neon_env_builder.pg_distrib_dir: {neon_env_builder.pg_distrib_dir}") - - return neon_env_builder - - @final class RemoteExtension(StrEnum): SQL_ONLY = "test_extension_sql_only" diff --git a/test_runner/regress/test_event_trigger_extension.py b/test_runner/regress/test_event_trigger_extension.py new file mode 100644 index 0000000000..ac4351dcd5 --- /dev/null +++ b/test_runner/regress/test_event_trigger_extension.py @@ -0,0 +1,102 @@ +from __future__ import annotations + +import shutil +from pathlib import Path +from typing import TYPE_CHECKING, cast + +import pytest +from fixtures.log_helper import log +from fixtures.paths import BASE_DIR + +if TYPE_CHECKING: + from pathlib import Path + + from fixtures.neon_fixtures import ( + NeonEnvBuilder, + ) + from fixtures.pg_version import PgVersion + + +# use neon_env_builder_local fixture to override the default neon_env_builder fixture +# and use a test-specific pg_install instead of shared one +@pytest.fixture(scope="function") +def neon_env_builder_event_trigger_extension( + neon_env_builder_local: NeonEnvBuilder, + test_output_dir: Path, + pg_version: PgVersion, +) -> NeonEnvBuilder: + test_local_pginstall = test_output_dir / "pg_install" + + # Now copy the SQL only extension test_event_trigger_extension in the local + # pginstall extension directory on-disk + test_event_trigger_extension_dir = ( + BASE_DIR / "test_runner" / "regress" / "data" / "test_event_trigger_extension" + ) + + test_local_extension_dir = ( + test_local_pginstall / f"v{pg_version}" / "share" / "postgresql" / "extension" + ) + + log.info(f"copy {test_event_trigger_extension_dir} to {test_local_extension_dir}") + + for f in [ + test_event_trigger_extension_dir / "test_event_trigger_extension.control", + test_event_trigger_extension_dir / "test_event_trigger_extension--1.0.sql", + ]: + shutil.copy(f, test_local_extension_dir) + + return neon_env_builder_local + + +def test_event_trigger_extension(neon_env_builder_event_trigger_extension: NeonEnvBuilder): + """ + Test installing an extension that contains an Event Trigger. + + The Event Trigger function is owned by the extension owner, which at + CREATE EXTENSION is going to be the Postgres bootstrap user, per the + extension control file where both superuser = true and trusted = true. + + Also this function is SECURTY DEFINER, to allow for making changes to + the extension SQL objects, in our case a sequence. + + This test makes sure that the event trigger function is fired correctly + by non-privileged user DDL actions such as CREATE TABLE. + """ + env = neon_env_builder_event_trigger_extension.init_start() + env.create_branch("test_event_trigger_extension") + + endpoint = env.endpoints.create_start("test_event_trigger_extension") + extension = "test_event_trigger_extension" + database = "test_event_trigger_extension" + + endpoint.safe_psql(f"CREATE DATABASE {database}") + endpoint.safe_psql(f"CREATE EXTENSION {extension}", dbname=database) + + # check that the extension is owned by the bootstrap superuser (cloud_admin) + pg_bootstrap_superuser_name = "cloud_admin" + with endpoint.connect(dbname=database) as pg_conn: + with pg_conn.cursor() as cur: + cur.execute( + f"select rolname from pg_roles r join pg_extension e on r.oid = e.extowner where extname = '{extension}'" + ) + owner = cast("tuple[str]", cur.fetchone())[0] + assert owner == pg_bootstrap_superuser_name, ( + f"extension {extension} is not owned by bootstrap user '{pg_bootstrap_superuser_name}'" + ) + + # test that the SQL-only Event Trigger (SECURITY DEFINER function) runs + # correctly now that the extension has been installed + # + # create table to trigger the event trigger, twice, check sequence count + with endpoint.connect(dbname=database) as pg_conn: + log.info("creating SQL objects (tables)") + with pg_conn.cursor() as cur: + cur.execute("CREATE TABLE foo1(id int primary key)") + cur.execute("CREATE TABLE foo2(id int)") + + cur.execute("SELECT event_trigger.get_schema_version()") + res = cast("tuple[int]", cur.fetchone()) + ver = res[0] + + log.info(f"schema version is now {ver}") + assert ver == 2, "schema version is not 2" diff --git a/test_runner/regress/test_feature_flag.py b/test_runner/regress/test_feature_flag.py index 2712d13dcc..c6c192b6f1 100644 --- a/test_runner/regress/test_feature_flag.py +++ b/test_runner/regress/test_feature_flag.py @@ -49,3 +49,12 @@ def test_feature_flag(neon_env_builder: NeonEnvBuilder): env.initial_tenant, "test-feature-flag" )["result"] ) + + env.pageserver.http_client().force_refresh_feature_flag(env.initial_tenant) + + # Check if the properties exist + result = env.pageserver.http_client().evaluate_feature_flag_multivariate( + env.initial_tenant, "test-feature-flag" + ) + assert "tenant_remote_size_mb" in result["properties"] + assert "tenant_id" in result["properties"] diff --git a/test_runner/regress/test_gin_redo.py b/test_runner/regress/test_gin_redo.py index 71382990dc..3ec2163203 100644 --- a/test_runner/regress/test_gin_redo.py +++ b/test_runner/regress/test_gin_redo.py @@ -16,6 +16,7 @@ def test_gin_redo(neon_simple_env: NeonEnv): secondary = env.endpoints.new_replica_start(origin=primary, endpoint_id="secondary") con = primary.connect() cur = con.cursor() + cur.execute("select pg_switch_wal()") cur.execute("create table gin_test_tbl(id integer, i int4[])") cur.execute("create index gin_test_idx on gin_test_tbl using gin (i)") cur.execute("insert into gin_test_tbl select g,array[3, 1, g] from generate_series(1, 10000) g") diff --git a/test_runner/regress/test_lfc_prewarm.py b/test_runner/regress/test_lfc_prewarm.py index 1fa1ead034..0f0cf4cc6d 100644 --- a/test_runner/regress/test_lfc_prewarm.py +++ b/test_runner/regress/test_lfc_prewarm.py @@ -20,46 +20,48 @@ class PrewarmMethod(StrEnum): PREWARM_LABEL = "compute_ctl_lfc_prewarms_total" +PREWARM_ERR_LABEL = "compute_ctl_lfc_prewarm_errors_total" OFFLOAD_LABEL = "compute_ctl_lfc_offloads_total" +OFFLOAD_ERR_LABEL = "compute_ctl_lfc_offload_errors_total" METHOD_VALUES = [e for e in PrewarmMethod] METHOD_IDS = [e.value for e in PrewarmMethod] - - -def check_pinned_entries(cur: Cursor): - # some LFC buffer can be temporary locked by autovacuum or background writer - for _ in range(10): - cur.execute("select lfc_value from neon_lfc_stats where lfc_key='file_cache_chunks_pinned'") - n_pinned = cur.fetchall()[0][0] - if n_pinned == 0: - break - sleep(1) - assert n_pinned == 0 +AUTOOFFLOAD_INTERVAL_SECS = 2 def prom_parse(client: EndpointHttpClient) -> dict[str, float]: + labels = PREWARM_LABEL, OFFLOAD_LABEL, PREWARM_ERR_LABEL, OFFLOAD_ERR_LABEL return { - sample.name: sample.value + sample.name: int(sample.value) for family in prom_parse_impl(client.metrics()) for sample in family.samples - if sample.name in (PREWARM_LABEL, OFFLOAD_LABEL) + if sample.name in labels } def offload_lfc(method: PrewarmMethod, client: EndpointHttpClient, cur: Cursor) -> Any: + if method == PrewarmMethod.POSTGRES: + cur.execute("select neon.get_local_cache_state()") + return cur.fetchall()[0][0] + if method == PrewarmMethod.AUTOPREWARM: + # With autoprewarm, we need to be sure LFC was offloaded after all writes + # finish, so we sleep. Otherwise we'll have less prewarmed pages than we want + sleep(AUTOOFFLOAD_INTERVAL_SECS) client.offload_lfc_wait() - elif method == PrewarmMethod.COMPUTE_CTL: + return + + if method == PrewarmMethod.COMPUTE_CTL: status = client.prewarm_lfc_status() assert status["status"] == "not_prewarmed" assert "error" not in status client.offload_lfc() assert client.prewarm_lfc_status()["status"] == "not_prewarmed" - assert prom_parse(client) == {OFFLOAD_LABEL: 1, PREWARM_LABEL: 0} - elif method == PrewarmMethod.POSTGRES: - cur.execute("select get_local_cache_state()") - return cur.fetchall()[0][0] - else: - raise AssertionError(f"{method} not in PrewarmMethod") + parsed = prom_parse(client) + desired = {OFFLOAD_LABEL: 1, PREWARM_LABEL: 0, OFFLOAD_ERR_LABEL: 0, PREWARM_ERR_LABEL: 0} + assert parsed == desired, f"{parsed=} != {desired=}" + return + + raise AssertionError(f"{method} not in PrewarmMethod") def prewarm_endpoint( @@ -70,7 +72,7 @@ def prewarm_endpoint( elif method == PrewarmMethod.COMPUTE_CTL: client.prewarm_lfc() elif method == PrewarmMethod.POSTGRES: - cur.execute("select prewarm_local_cache(%s)", (lfc_state,)) + cur.execute("select neon.prewarm_local_cache(%s)", (lfc_state,)) def check_prewarmed( @@ -81,12 +83,17 @@ def check_prewarmed( assert prom_parse(client)[PREWARM_LABEL] == 1 elif method == PrewarmMethod.COMPUTE_CTL: assert client.prewarm_lfc_status() == desired_status - assert prom_parse(client) == {OFFLOAD_LABEL: 0, PREWARM_LABEL: 1} + desired = {OFFLOAD_LABEL: 0, PREWARM_LABEL: 1, PREWARM_ERR_LABEL: 0, OFFLOAD_ERR_LABEL: 0} + assert prom_parse(client) == desired @pytest.mark.skipif(not USE_LFC, reason="LFC is disabled, skipping") @pytest.mark.parametrize("method", METHOD_VALUES, ids=METHOD_IDS) def test_lfc_prewarm(neon_simple_env: NeonEnv, method: PrewarmMethod): + """ + Test we can offload endpoint's LFC cache to endpoint storage. + Test we can prewarm endpoint with LFC cache loaded from endpoint storage. + """ env = neon_simple_env n_records = 1000000 cfg = [ @@ -96,21 +103,20 @@ def test_lfc_prewarm(neon_simple_env: NeonEnv, method: PrewarmMethod): "neon.file_cache_size_limit=1GB", "neon.file_cache_prewarm_limit=1000", ] - offload_secs = 2 if method == PrewarmMethod.AUTOPREWARM: endpoint = env.endpoints.create_start( branch_name="main", config_lines=cfg, autoprewarm=True, - offload_lfc_interval_seconds=offload_secs, + offload_lfc_interval_seconds=AUTOOFFLOAD_INTERVAL_SECS, ) else: endpoint = env.endpoints.create_start(branch_name="main", config_lines=cfg) pg_conn = endpoint.connect() pg_cur = pg_conn.cursor() - pg_cur.execute("create extension neon") + pg_cur.execute("create schema neon; create extension neon with schema neon") pg_cur.execute("create database lfc") lfc_conn = endpoint.connect(dbname="lfc") @@ -125,7 +131,7 @@ def test_lfc_prewarm(neon_simple_env: NeonEnv, method: PrewarmMethod): endpoint.stop() if method == PrewarmMethod.AUTOPREWARM: - endpoint.start(autoprewarm=True, offload_lfc_interval_seconds=offload_secs) + endpoint.start(autoprewarm=True, offload_lfc_interval_seconds=AUTOOFFLOAD_INTERVAL_SECS) else: endpoint.start() @@ -136,26 +142,24 @@ def test_lfc_prewarm(neon_simple_env: NeonEnv, method: PrewarmMethod): lfc_cur = lfc_conn.cursor() prewarm_endpoint(method, client, pg_cur, lfc_state) - pg_cur.execute("select lfc_value from neon_lfc_stats where lfc_key='file_cache_used_pages'") + pg_cur.execute( + "select lfc_value from neon.neon_lfc_stats where lfc_key='file_cache_used_pages'" + ) lfc_used_pages = pg_cur.fetchall()[0][0] log.info(f"Used LFC size: {lfc_used_pages}") - pg_cur.execute("select * from get_prewarm_info()") - prewarm_info = pg_cur.fetchall()[0] - log.info(f"Prewarm info: {prewarm_info}") - total, prewarmed, skipped, _ = prewarm_info + pg_cur.execute("select * from neon.get_prewarm_info()") + total, prewarmed, skipped, _ = pg_cur.fetchall()[0] + log.info(f"Prewarm info: {total=} {prewarmed=} {skipped=}") progress = (prewarmed + skipped) * 100 // total log.info(f"Prewarm progress: {progress}%") - assert lfc_used_pages > 10000 - assert ( - prewarm_info[0] > 0 - and prewarm_info[1] > 0 - and prewarm_info[0] == prewarm_info[1] + prewarm_info[2] - ) + assert total > 0 + assert prewarmed > 0 + assert total == prewarmed + skipped + lfc_cur.execute("select sum(pk) from t") assert lfc_cur.fetchall()[0][0] == n_records * (n_records + 1) / 2 - check_pinned_entries(pg_cur) desired = {"status": "completed", "total": total, "prewarmed": prewarmed, "skipped": skipped} check_prewarmed(method, client, desired) @@ -168,6 +172,9 @@ WORKLOAD_IDS = METHOD_IDS[:-1] @pytest.mark.skipif(not USE_LFC, reason="LFC is disabled, skipping") @pytest.mark.parametrize("method", WORKLOAD_VALUES, ids=WORKLOAD_IDS) def test_lfc_prewarm_under_workload(neon_simple_env: NeonEnv, method: PrewarmMethod): + """ + Test continiously prewarming endpoint when there is a write-heavy workload going in parallel + """ env = neon_simple_env n_records = 10000 n_threads = 4 @@ -181,7 +188,7 @@ def test_lfc_prewarm_under_workload(neon_simple_env: NeonEnv, method: PrewarmMet pg_conn = endpoint.connect() pg_cur = pg_conn.cursor() - pg_cur.execute("create extension neon") + pg_cur.execute("create schema neon; create extension neon with schema neon") pg_cur.execute("CREATE DATABASE lfc") lfc_conn = endpoint.connect(dbname="lfc") @@ -233,9 +240,9 @@ def test_lfc_prewarm_under_workload(neon_simple_env: NeonEnv, method: PrewarmMet prewarm_thread.start() def prewarmed(): - assert n_prewarms > 5 + assert n_prewarms > 3 - wait_until(prewarmed) + wait_until(prewarmed, timeout=40) # debug builds don't finish in 20s running = False for t in workload_threads: @@ -246,6 +253,12 @@ def test_lfc_prewarm_under_workload(neon_simple_env: NeonEnv, method: PrewarmMet total_balance = lfc_cur.fetchall()[0][0] assert total_balance == 0 - check_pinned_entries(pg_cur) - if method != PrewarmMethod.POSTGRES: - assert prom_parse(http_client) == {OFFLOAD_LABEL: 1, PREWARM_LABEL: n_prewarms} + if method == PrewarmMethod.POSTGRES: + return + desired = { + OFFLOAD_LABEL: 1, + PREWARM_LABEL: n_prewarms, + OFFLOAD_ERR_LABEL: 0, + PREWARM_ERR_LABEL: 0, + } + assert prom_parse(http_client) == desired diff --git a/test_runner/regress/test_neon_superuser.py b/test_runner/regress/test_neon_superuser.py index f99d79e138..9a28f22e78 100644 --- a/test_runner/regress/test_neon_superuser.py +++ b/test_runner/regress/test_neon_superuser.py @@ -103,3 +103,90 @@ def test_neon_superuser(neon_simple_env: NeonEnv, pg_version: PgVersion): query = "DROP SUBSCRIPTION sub CASCADE" log.info(f"Dropping subscription: {query}") cur.execute(query) + + +def test_privileged_role_override(neon_simple_env: NeonEnv, pg_version: PgVersion): + """ + Test that we can override the privileged role for an endpoint and when we do it, + everything is correctly bootstrapped inside Postgres and we don't have neon_superuser + role in the database. + """ + PRIVILEGED_ROLE_NAME = "my_superuser" + + env = neon_simple_env + env.create_branch("test_privileged_role_override") + ep = env.endpoints.create( + "test_privileged_role_override", + privileged_role_name=PRIVILEGED_ROLE_NAME, + update_catalog=True, + ) + + ep.start() + + ep.wait_for_migrations() + + member_roles = [ + "pg_read_all_data", + "pg_write_all_data", + "pg_monitor", + "pg_signal_backend", + ] + + non_member_roles = [ + "pg_execute_server_program", + "pg_read_server_files", + "pg_write_server_files", + ] + + role_attributes = { + "rolsuper": False, + "rolinherit": True, + "rolcreaterole": True, + "rolcreatedb": True, + "rolcanlogin": False, + "rolreplication": True, + "rolconnlimit": -1, + "rolbypassrls": True, + } + + if pg_version >= PgVersion.V15: + non_member_roles.append("pg_checkpoint") + + if pg_version >= PgVersion.V16: + member_roles.append("pg_create_subscription") + non_member_roles.append("pg_use_reserved_connections") + + with ep.cursor() as cur: + cur.execute(f"SELECT rolname FROM pg_roles WHERE rolname = '{PRIVILEGED_ROLE_NAME}'") + assert cur.fetchall()[0][0] == PRIVILEGED_ROLE_NAME + + cur.execute("SELECT rolname FROM pg_roles WHERE rolname = 'neon_superuser'") + assert len(cur.fetchall()) == 0 + + cur.execute("SHOW neon.privileged_role_name") + assert cur.fetchall()[0][0] == PRIVILEGED_ROLE_NAME + + # check PRIVILEGED_ROLE_NAME role is created + cur.execute(f"select * from pg_roles where rolname = '{PRIVILEGED_ROLE_NAME}'") + assert cur.fetchone() is not None + + # check PRIVILEGED_ROLE_NAME role has the correct member roles + for role in member_roles: + cur.execute(f"SELECT pg_has_role('{PRIVILEGED_ROLE_NAME}', '{role}', 'member')") + assert cur.fetchone() == (True,), ( + f"Role {role} should be a member of {PRIVILEGED_ROLE_NAME}" + ) + + for role in non_member_roles: + cur.execute(f"SELECT pg_has_role('{PRIVILEGED_ROLE_NAME}', '{role}', 'member')") + assert cur.fetchone() == (False,), ( + f"Role {role} should not be a member of {PRIVILEGED_ROLE_NAME}" + ) + + # check PRIVILEGED_ROLE_NAME role has the correct role attributes + for attr, val in role_attributes.items(): + cur.execute(f"SELECT {attr} FROM pg_roles WHERE rolname = '{PRIVILEGED_ROLE_NAME}'") + curr_val = cur.fetchone() + assert curr_val == (val,), ( + f"Role attribute {attr} should be {val} instead of {curr_val}" + ) diff --git a/test_runner/regress/test_normal_work.py b/test_runner/regress/test_normal_work.py index b815fee702..ae545664d2 100644 --- a/test_runner/regress/test_normal_work.py +++ b/test_runner/regress/test_normal_work.py @@ -17,7 +17,9 @@ def check_tenant( config_lines = [ f"neon.safekeeper_proto_version = {safekeeper_proto_version}", ] - endpoint = env.endpoints.create_start("main", tenant_id=tenant_id, config_lines=config_lines, grpc=True) + endpoint = env.endpoints.create_start( + "main", tenant_id=tenant_id, config_lines=config_lines, grpc=True + ) # we rely upon autocommit after each statement res_1 = endpoint.safe_psql_many( queries=[ diff --git a/test_runner/regress/test_pageserver_api.py b/test_runner/regress/test_pageserver_api.py index 7f9207047e..92889e5de3 100644 --- a/test_runner/regress/test_pageserver_api.py +++ b/test_runner/regress/test_pageserver_api.py @@ -3,6 +3,7 @@ from __future__ import annotations from typing import TYPE_CHECKING from fixtures.common_types import Lsn, TenantId, TimelineId +from fixtures.log_helper import log from fixtures.neon_fixtures import ( DEFAULT_BRANCH_NAME, NeonEnv, @@ -164,3 +165,15 @@ def test_pageserver_http_index_part_force_patch(neon_env_builder: NeonEnvBuilder {"rel_size_migration": "legacy"}, ) assert client.timeline_detail(tenant_id, timeline_id)["rel_size_migration"] == "legacy" + + +def test_pageserver_get_tenant_visible_size(neon_env_builder: NeonEnvBuilder): + neon_env_builder.num_pageservers = 1 + env = neon_env_builder.init_start() + env.create_tenant(shard_count=4) + env.create_tenant(shard_count=2) + + json = env.pageserver.http_client().list_tenant_visible_size() + log.info(f"{json}") + # initial tennat + 2 newly created tenants + assert len(json) == 7 diff --git a/test_runner/regress/test_pageserver_layer_rolling.py b/test_runner/regress/test_pageserver_layer_rolling.py index 91c4ef521c..68f470d962 100644 --- a/test_runner/regress/test_pageserver_layer_rolling.py +++ b/test_runner/regress/test_pageserver_layer_rolling.py @@ -246,9 +246,9 @@ def test_total_size_limit(neon_env_builder: NeonEnvBuilder): system_memory = psutil.virtual_memory().total - # The smallest total size limit we can configure is 1/1024th of the system memory (e.g. 128MB on - # a system with 128GB of RAM). We will then write enough data to violate this limit. - max_dirty_data = 128 * 1024 * 1024 + # The smallest total size limit we can configure is 1/1024th of the system memory (e.g. 256MB on + # a system with 256GB of RAM). We will then write enough data to violate this limit. + max_dirty_data = 256 * 1024 * 1024 ephemeral_bytes_per_memory_kb = (max_dirty_data * 1024) // system_memory assert ephemeral_bytes_per_memory_kb > 0 @@ -272,7 +272,7 @@ def test_total_size_limit(neon_env_builder: NeonEnvBuilder): timeline_count = 10 # This is about 2MiB of data per timeline - entries_per_timeline = 100_000 + entries_per_timeline = 200_000 last_flush_lsns = asyncio.run(workload(env, tenant_conf, timeline_count, entries_per_timeline)) wait_until_pageserver_is_caught_up(env, last_flush_lsns) diff --git a/test_runner/regress/test_readonly_node.py b/test_runner/regress/test_readonly_node.py index ee934a900d..5612236250 100644 --- a/test_runner/regress/test_readonly_node.py +++ b/test_runner/regress/test_readonly_node.py @@ -201,11 +201,11 @@ def test_readonly_node_gc(neon_env_builder: NeonEnvBuilder): for shard, ps in tenant_get_shards(env, env.initial_tenant): client = ps.http_client() layers_guarded_before_gc = get_layers_protected_by_lease( - client, shard, env.initial_timeline, lease_lsn=lsn + client, shard, env.initial_timeline, lease_lsn=lease_lsn ) gc_result = client.timeline_gc(shard, env.initial_timeline, 0) layers_guarded_after_gc = get_layers_protected_by_lease( - client, shard, env.initial_timeline, lease_lsn=lsn + client, shard, env.initial_timeline, lease_lsn=lease_lsn ) # Note: cannot assert on `layers_removed` here because it could be layers diff --git a/test_runner/regress/test_replica_promotes.py b/test_runner/regress/test_replica_promotes.py index 4486901bae..8d39ac123a 100644 --- a/test_runner/regress/test_replica_promotes.py +++ b/test_runner/regress/test_replica_promotes.py @@ -1,29 +1,51 @@ """ -File with secondary->primary promotion testing. - -This far, only contains a test that we don't break and that the data is persisted. +Secondary -> primary promotion testing """ +from enum import StrEnum from typing import cast import psycopg2 +import pytest from fixtures.common_types import Lsn from fixtures.log_helper import log from fixtures.neon_fixtures import Endpoint, NeonEnv, wait_replica_caughtup -from fixtures.pg_version import PgVersion +from fixtures.utils import USE_LFC +from psycopg2.extensions import cursor as Cursor from pytest import raises def stop_and_check_lsn(ep: Endpoint, expected_lsn: Lsn | None): ep.stop(mode="immediate-terminate") lsn = ep.terminate_flush_lsn - if expected_lsn is not None: + assert (lsn is not None) == (expected_lsn is not None), f"{lsn=}, {expected_lsn=}" + if lsn is not None: assert lsn >= expected_lsn, f"{expected_lsn=} < {lsn=}" - else: - assert lsn == expected_lsn, f"{expected_lsn=} != {lsn=}" -def test_replica_promotes(neon_simple_env: NeonEnv, pg_version: PgVersion): +def get_lsn_triple(cur: Cursor) -> tuple[str, str, str]: + cur.execute( + """ + SELECT pg_current_wal_insert_lsn(), + pg_current_wal_lsn(), + pg_current_wal_flush_lsn() + """ + ) + return cast("tuple[str, str, str]", cur.fetchone()) + + +class PromoteMethod(StrEnum): + COMPUTE_CTL = "compute-ctl" + POSTGRES = "postgres" + + +METHOD_OPTIONS = [e for e in PromoteMethod] +METHOD_IDS = [e.value for e in PromoteMethod] + + +@pytest.mark.skipif(not USE_LFC, reason="LFC is disabled, skipping") +@pytest.mark.parametrize("method", METHOD_OPTIONS, ids=METHOD_IDS) +def test_replica_promote(neon_simple_env: NeonEnv, method: PromoteMethod): """ Test that a replica safely promotes, and can commit data updates which show up when the primary boots up after the promoted secondary endpoint @@ -38,29 +60,26 @@ def test_replica_promotes(neon_simple_env: NeonEnv, pg_version: PgVersion): with primary.connect() as primary_conn: primary_cur = primary_conn.cursor() + primary_cur.execute("create schema neon;create extension neon with schema neon") primary_cur.execute( "create table t(pk bigint GENERATED ALWAYS AS IDENTITY, payload integer)" ) primary_cur.execute("INSERT INTO t(payload) SELECT generate_series(1, 100)") - primary_cur.execute( - """ - SELECT pg_current_wal_insert_lsn(), - pg_current_wal_lsn(), - pg_current_wal_flush_lsn() - """ - ) - lsn_triple = cast("tuple[str, str, str]", primary_cur.fetchone()) + + lsn_triple = get_lsn_triple(primary_cur) log.info(f"Primary: Current LSN after workload is {lsn_triple}") expected_primary_lsn: Lsn = Lsn(lsn_triple[2]) primary_cur.execute("show neon.safekeepers") safekeepers = primary_cur.fetchall()[0][0] - wait_replica_caughtup(primary, secondary) + if method == PromoteMethod.COMPUTE_CTL: + primary.http_client().offload_lfc() + else: + wait_replica_caughtup(primary, secondary) with secondary.connect() as secondary_conn: secondary_cur = secondary_conn.cursor() secondary_cur.execute("select count(*) from t") - assert secondary_cur.fetchone() == (100,) with raises(psycopg2.Error): @@ -71,28 +90,30 @@ def test_replica_promotes(neon_simple_env: NeonEnv, pg_version: PgVersion): secondary_cur.execute("select count(*) from t") assert secondary_cur.fetchone() == (100,) + primary_endpoint_id = primary.endpoint_id stop_and_check_lsn(primary, expected_primary_lsn) # Reconnect to the secondary to make sure we get a read-write connection promo_conn = secondary.connect() promo_cur = promo_conn.cursor() - promo_cur.execute(f"alter system set neon.safekeepers='{safekeepers}'") - promo_cur.execute("select pg_reload_conf()") + if method == PromoteMethod.COMPUTE_CTL: + client = secondary.http_client() + client.prewarm_lfc(primary_endpoint_id) + # control plane knows safekeepers, simulate it by querying primary + assert (lsn := primary.terminate_flush_lsn) + safekeepers_lsn = {"safekeepers": safekeepers, "wal_flush_lsn": lsn} + assert client.promote(safekeepers_lsn)["status"] == "completed" + else: + promo_cur.execute(f"alter system set neon.safekeepers='{safekeepers}'") + promo_cur.execute("select pg_reload_conf()") + promo_cur.execute("SELECT * FROM pg_promote()") + assert promo_cur.fetchone() == (True,) - promo_cur.execute("SELECT * FROM pg_promote()") - assert promo_cur.fetchone() == (True,) - promo_cur.execute( - """ - SELECT pg_current_wal_insert_lsn(), - pg_current_wal_lsn(), - pg_current_wal_flush_lsn() - """ - ) - log.info(f"Secondary: LSN after promotion is {promo_cur.fetchone()}") + lsn_triple = get_lsn_triple(promo_cur) + log.info(f"Secondary: LSN after promotion is {lsn_triple}") # Reconnect to the secondary to make sure we get a read-write connection - with secondary.connect() as new_primary_conn: - new_primary_cur = new_primary_conn.cursor() + with secondary.connect() as conn, conn.cursor() as new_primary_cur: new_primary_cur.execute("select count(*) from t") assert new_primary_cur.fetchone() == (100,) @@ -101,43 +122,34 @@ def test_replica_promotes(neon_simple_env: NeonEnv, pg_version: PgVersion): ) assert new_primary_cur.fetchall() == [(it,) for it in range(101, 201)] - new_primary_cur = new_primary_conn.cursor() + new_primary_cur = conn.cursor() new_primary_cur.execute("select payload from t") assert new_primary_cur.fetchall() == [(it,) for it in range(1, 201)] new_primary_cur.execute("select count(*) from t") assert new_primary_cur.fetchone() == (200,) - new_primary_cur.execute( - """ - SELECT pg_current_wal_insert_lsn(), - pg_current_wal_lsn(), - pg_current_wal_flush_lsn() - """ - ) - log.info(f"Secondary: LSN after workload is {new_primary_cur.fetchone()}") - with secondary.connect() as second_viewpoint_conn: - new_primary_cur = second_viewpoint_conn.cursor() + lsn_triple = get_lsn_triple(new_primary_cur) + log.info(f"Secondary: LSN after workload is {lsn_triple}") + expected_promoted_lsn = Lsn(lsn_triple[2]) + + with secondary.connect() as conn, conn.cursor() as new_primary_cur: new_primary_cur.execute("select payload from t") assert new_primary_cur.fetchall() == [(it,) for it in range(1, 201)] - # wait_for_last_flush_lsn(env, secondary, env.initial_tenant, env.initial_timeline) - - # secondaries don't sync safekeepers on finish so LSN will be None - stop_and_check_lsn(secondary, None) + if method == PromoteMethod.COMPUTE_CTL: + # compute_ctl's /promote switches replica type to Primary so it syncs + # safekeepers on finish + stop_and_check_lsn(secondary, expected_promoted_lsn) + else: + # on testing postgres, we don't update replica type, secondaries don't + # sync so lsn should be None + stop_and_check_lsn(secondary, None) primary = env.endpoints.create_start(branch_name="main", endpoint_id="primary2") - with primary.connect() as new_primary: - new_primary_cur = new_primary.cursor() - new_primary_cur.execute( - """ - SELECT pg_current_wal_insert_lsn(), - pg_current_wal_lsn(), - pg_current_wal_flush_lsn() - """ - ) - lsn_triple = cast("tuple[str, str, str]", new_primary_cur.fetchone()) + with primary.connect() as new_primary, new_primary.cursor() as new_primary_cur: + lsn_triple = get_lsn_triple(new_primary_cur) expected_primary_lsn = Lsn(lsn_triple[2]) log.info(f"New primary: Boot LSN is {lsn_triple}") @@ -146,5 +158,39 @@ def test_replica_promotes(neon_simple_env: NeonEnv, pg_version: PgVersion): new_primary_cur.execute("INSERT INTO t (payload) SELECT generate_series(201, 300)") new_primary_cur.execute("select count(*) from t") assert new_primary_cur.fetchone() == (300,) - stop_and_check_lsn(primary, expected_primary_lsn) + + +@pytest.mark.skipif(not USE_LFC, reason="LFC is disabled, skipping") +def test_replica_promote_handler_disconnects(neon_simple_env: NeonEnv): + """ + Test that if a handler disconnects from /promote route of compute_ctl, promotion still happens + once, and no error is thrown + """ + env: NeonEnv = neon_simple_env + primary: Endpoint = env.endpoints.create_start(branch_name="main", endpoint_id="primary") + secondary: Endpoint = env.endpoints.new_replica_start(origin=primary, endpoint_id="secondary") + + with primary.connect() as conn, conn.cursor() as cur: + cur.execute("create schema neon;create extension neon with schema neon") + cur.execute("create table t(pk bigint GENERATED ALWAYS AS IDENTITY, payload integer)") + cur.execute("INSERT INTO t(payload) SELECT generate_series(1, 100)") + cur.execute("show neon.safekeepers") + safekeepers = cur.fetchall()[0][0] + + primary.http_client().offload_lfc() + primary_endpoint_id = primary.endpoint_id + primary.stop(mode="immediate-terminate") + assert (lsn := primary.terminate_flush_lsn) + + client = secondary.http_client() + client.prewarm_lfc(primary_endpoint_id) + safekeepers_lsn = {"safekeepers": safekeepers, "wal_flush_lsn": lsn} + assert client.promote(safekeepers_lsn, disconnect=True)["status"] == "completed" + + with secondary.connect() as conn, conn.cursor() as cur: + cur.execute("select count(*) from t") + assert cur.fetchone() == (100,) + cur.execute("INSERT INTO t (payload) SELECT generate_series(101, 200) RETURNING payload") + cur.execute("select count(*) from t") + assert cur.fetchone() == (200,) diff --git a/test_runner/regress/test_safekeeper_migration.py b/test_runner/regress/test_safekeeper_migration.py index 057371175c..170c1a3650 100644 --- a/test_runner/regress/test_safekeeper_migration.py +++ b/test_runner/regress/test_safekeeper_migration.py @@ -2,6 +2,9 @@ from __future__ import annotations from typing import TYPE_CHECKING +import pytest +from fixtures.neon_fixtures import StorageControllerApiException + if TYPE_CHECKING: from fixtures.neon_fixtures import NeonEnvBuilder @@ -27,6 +30,7 @@ def test_safekeeper_migration_simple(neon_env_builder: NeonEnvBuilder): [ ".*Timeline .* was cancelled and cannot be used anymore.*", ".*Timeline .* has been deleted.*", + ".*Timeline .* was not found in global map.*", ".*wal receiver task finished with an error.*", ] ) @@ -74,3 +78,38 @@ def test_safekeeper_migration_simple(neon_env_builder: NeonEnvBuilder): ep.start(safekeeper_generation=1, safekeepers=[3]) assert ep.safe_psql("SELECT * FROM t") == [(i,) for i in range(1, 4)] + + +def test_new_sk_set_validation(neon_env_builder: NeonEnvBuilder): + """ + Test that safekeeper_migrate validates the new_sk_set before starting the migration. + """ + neon_env_builder.num_safekeepers = 3 + neon_env_builder.storage_controller_config = { + "timelines_onto_safekeepers": True, + "timeline_safekeeper_count": 2, + } + env = neon_env_builder.init_start() + + def expect_fail(sk_set: list[int], match: str): + with pytest.raises(StorageControllerApiException, match=match): + env.storage_controller.migrate_safekeepers( + env.initial_tenant, env.initial_timeline, sk_set + ) + # Check that we failed before commiting to the database. + mconf = env.storage_controller.timeline_locate(env.initial_tenant, env.initial_timeline) + assert mconf["generation"] == 1 + + expect_fail([], "safekeeper set is empty") + expect_fail([1], "must have at least 2 safekeepers") + expect_fail([1, 1], "duplicate safekeeper") + expect_fail([1, 100500], "does not exist") + + mconf = env.storage_controller.timeline_locate(env.initial_tenant, env.initial_timeline) + sk_set = mconf["sk_set"] + assert len(sk_set) == 2 + + decom_sk = [sk.id for sk in env.safekeepers if sk.id not in sk_set][0] + env.storage_controller.safekeeper_scheduling_policy(decom_sk, "Decomissioned") + + expect_fail([sk_set[0], decom_sk], "decomissioned") diff --git a/test_runner/regress/test_sharding.py b/test_runner/regress/test_sharding.py index 93c621f564..5549105188 100644 --- a/test_runner/regress/test_sharding.py +++ b/test_runner/regress/test_sharding.py @@ -1,8 +1,11 @@ from __future__ import annotations import os +import random +import threading import time from collections import defaultdict +from threading import Event from typing import TYPE_CHECKING, Any import pytest @@ -1505,6 +1508,256 @@ def test_sharding_split_failures( env.storage_controller.consistency_check() +@pytest.mark.skip(reason="The backpressure change has not been merged yet.") +def test_back_pressure_during_split(neon_env_builder: NeonEnvBuilder): + """ + Test backpressure can ignore new shards during tenant split so that if we abort the split, + PG can continue without being blocked. + """ + DBNAME = "regression" + + init_shard_count = 4 + neon_env_builder.num_pageservers = init_shard_count + stripe_size = 32 + + env = neon_env_builder.init_start( + initial_tenant_shard_count=init_shard_count, initial_tenant_shard_stripe_size=stripe_size + ) + + env.storage_controller.allowed_errors.extend( + [ + # All split failures log a warning when then enqueue the abort operation + ".*Enqueuing background abort.*", + # Tolerate any error lots that mention a failpoint + ".*failpoint.*", + ] + ) + + endpoint = env.endpoints.create( + "main", + config_lines=[ + "max_replication_write_lag = 1MB", + "databricks.max_wal_mb_per_second = 1", + "neon.max_cluster_size = 10GB", + ], + ) + endpoint.respec(skip_pg_catalog_updates=False) # Needed for databricks_system to get created. + endpoint.start() + + endpoint.safe_psql(f"CREATE DATABASE {DBNAME}") + + endpoint.safe_psql("CREATE TABLE usertable ( YCSB_KEY INT, FIELD0 TEXT);") + write_done = Event() + + def write_data(write_done): + while not write_done.is_set(): + endpoint.safe_psql( + "INSERT INTO usertable SELECT random(), repeat('a', 1000);", log_query=False + ) + log.info("write_data thread exiting") + + writer_thread = threading.Thread(target=write_data, args=(write_done,)) + writer_thread.start() + + env.storage_controller.configure_failpoints(("shard-split-pre-complete", "return(1)")) + # split the tenant + with pytest.raises(StorageControllerApiException): + env.storage_controller.tenant_shard_split(env.initial_tenant, shard_count=16) + + write_done.set() + writer_thread.join() + + # writing more data to page servers after split is aborted + for _i in range(5000): + endpoint.safe_psql( + "INSERT INTO usertable SELECT random(), repeat('a', 1000);", log_query=False + ) + + # wait until write lag becomes 0 + def check_write_lag_is_zero(): + res = endpoint.safe_psql( + """ + SELECT + pg_wal_lsn_diff(pg_current_wal_flush_lsn(), received_lsn) as received_lsn_lag + FROM neon.backpressure_lsns(); + """, + dbname="databricks_system", + log_query=False, + ) + log.info(f"received_lsn_lag = {res[0][0]}") + assert res[0][0] == 0 + + wait_until(check_write_lag_is_zero) + endpoint.stop_and_destroy() + + +# BEGIN_HADRON +def test_shard_resolve_during_split_abort(neon_env_builder: NeonEnvBuilder): + """ + Tests that page service is able to resolve the correct shard during tenant split without causing query errors + """ + DBNAME = "regression" + WORKER_THREADS = 16 + ROW_COUNT = 10000 + + init_shard_count = 4 + neon_env_builder.num_pageservers = 1 + stripe_size = 16 + + env = neon_env_builder.init_start( + initial_tenant_shard_count=init_shard_count, initial_tenant_shard_stripe_size=stripe_size + ) + + env.storage_controller.allowed_errors.extend( + [ + # All split failures log a warning when then enqueue the abort operation + ".*Enqueuing background abort.*", + # Tolerate any error lots that mention a failpoint + ".*failpoint.*", + ] + ) + + endpoint = env.endpoints.create("main") + endpoint.respec(skip_pg_catalog_updates=False) # Needed for databricks_system to get created. + endpoint.start() + + endpoint.safe_psql(f"CREATE DATABASE {DBNAME}") + + # generate 10MB of data + endpoint.safe_psql( + f"CREATE TABLE usertable AS SELECT s AS KEY, repeat('a', 1000) as VALUE from generate_series(1, {ROW_COUNT}) s;" + ) + read_done = Event() + + def read_data(read_done): + i = 0 + while not read_done.is_set() or i < 10: + endpoint.safe_psql( + f"SELECT * FROM usertable where KEY = {random.randint(1, ROW_COUNT)}", + log_query=False, + ) + i += 1 + log.info(f"read_data thread exiting. Executed {i} queries.") + + reader_threads = [] + for _i in range(WORKER_THREADS): + reader_thread = threading.Thread(target=read_data, args=(read_done,)) + reader_thread.start() + reader_threads.append(reader_thread) + + env.storage_controller.configure_failpoints(("shard-split-pre-complete", "return(1)")) + # split the tenant + with pytest.raises(StorageControllerApiException): + env.storage_controller.tenant_shard_split(env.initial_tenant, shard_count=16) + + # wait until abort is done + def check_tenant_status(): + active_count = 0 + for i in range(init_shard_count): + status = env.pageserver.http_client().tenant_status( + TenantShardId(env.initial_tenant, i, init_shard_count) + ) + if status["state"]["slug"] == "Active": + active_count += 1 + assert active_count == 4 + + wait_until(check_tenant_status) + + read_done.set() + for thread in reader_threads: + thread.join() + + endpoint.stop() + + +# END_HADRON + + +# HADRON +@pytest.mark.skip(reason="The backpressure change has not been merged yet.") +def test_back_pressure_per_shard(neon_env_builder: NeonEnvBuilder): + """ + Tests back pressure knobs are enforced on the per shard basis instead of at the tenant level. + """ + init_shard_count = 4 + neon_env_builder.num_pageservers = init_shard_count + stripe_size = 1 + + env = neon_env_builder.init_start( + initial_tenant_shard_count=init_shard_count, + initial_tenant_shard_stripe_size=stripe_size, + initial_tenant_conf={ + # disable auto-flush of shards and set max_replication_flush_lag as 15MB. + # The backpressure parameters must be enforced at the shard level to avoid stalling PG. + "checkpoint_distance": 1 * 1024 * 1024 * 1024, + "checkpoint_timeout": "1h", + }, + ) + + endpoint = env.endpoints.create( + "main", + config_lines=[ + "max_replication_write_lag = 0", + "max_replication_apply_lag = 0", + "max_replication_flush_lag = 15MB", + "neon.max_cluster_size = 10GB", + ], + ) + endpoint.respec(skip_pg_catalog_updates=False) # Needed for databricks_system to get created. + endpoint.start() + + # generate 20MB of data + endpoint.safe_psql( + "CREATE TABLE usertable AS SELECT s AS KEY, repeat('a', 1000) as VALUE from generate_series(1, 20000) s;" + ) + res = endpoint.safe_psql( + "SELECT neon.backpressure_throttling_time() as throttling_time", dbname="databricks_system" + )[0] + assert res[0] == 0, f"throttling_time should be 0, but got {res[0]}" + + endpoint.stop() + + +# HADRON +def test_shard_split_page_server_timeout(neon_env_builder: NeonEnvBuilder): + """ + Tests that shard split can correctly handle page server timeouts and abort the split + """ + init_shard_count = 2 + neon_env_builder.num_pageservers = 1 + stripe_size = 1 + + if neon_env_builder.storage_controller_config is None: + neon_env_builder.storage_controller_config = {"shard_split_request_timeout": "5s"} + else: + neon_env_builder.storage_controller_config["shard_split_request_timeout"] = "5s" + + env = neon_env_builder.init_start( + initial_tenant_shard_count=init_shard_count, + initial_tenant_shard_stripe_size=stripe_size, + ) + + env.storage_controller.allowed_errors.extend( + [ + ".*Enqueuing background abort.*", + ".*failpoint.*", + ".*Failed to abort.*", + ".*Exclusive lock by ShardSplit was held.*", + ] + ) + env.pageserver.allowed_errors.extend([".*request was dropped before completing.*"]) + + endpoint1 = env.endpoints.create_start(branch_name="main") + + env.pageserver.http_client().configure_failpoints(("shard-split-post-finish-pause", "pause")) + + with pytest.raises(StorageControllerApiException): + env.storage_controller.tenant_shard_split(env.initial_tenant, shard_count=4) + + env.pageserver.http_client().configure_failpoints(("shard-split-post-finish-pause", "off")) + endpoint1.stop_and_destroy() + + def test_sharding_backpressure(neon_env_builder: NeonEnvBuilder): """ Check a scenario when one of the shards is much slower than others. diff --git a/test_runner/regress/test_storage_controller.py b/test_runner/regress/test_storage_controller.py index 8471ab9f57..9986c1f24a 100644 --- a/test_runner/regress/test_storage_controller.py +++ b/test_runner/regress/test_storage_controller.py @@ -12,7 +12,7 @@ from typing import TYPE_CHECKING import fixtures.utils import pytest from fixtures.auth_tokens import TokenScope -from fixtures.common_types import TenantId, TenantShardId, TimelineId +from fixtures.common_types import Lsn, TenantId, TenantShardId, TimelineId from fixtures.log_helper import log from fixtures.neon_fixtures import ( DEFAULT_AZ_ID, @@ -47,6 +47,7 @@ from fixtures.utils import ( wait_until, ) from fixtures.workload import Workload +from requests.adapters import HTTPAdapter from urllib3 import Retry from werkzeug.wrappers.response import Response @@ -72,6 +73,12 @@ def get_node_shard_counts(env: NeonEnv, tenant_ids): return counts +class DeletionAPIKind(Enum): + OLD = "old" + FORCE = "force" + GRACEFUL = "graceful" + + @pytest.mark.parametrize(**fixtures.utils.allpairs_versions()) def test_storage_controller_smoke( neon_env_builder: NeonEnvBuilder, compute_reconfigure_listener: ComputeReconfigure, combination @@ -990,7 +997,7 @@ def test_storage_controller_compute_hook_retry( @run_only_on_default_postgres("postgres behavior is not relevant") -def test_storage_controller_compute_hook_keep_failing( +def test_storage_controller_compute_hook_stuck_reconciles( httpserver: HTTPServer, neon_env_builder: NeonEnvBuilder, httpserver_listen_address: ListenAddress, @@ -1034,16 +1041,19 @@ def test_storage_controller_compute_hook_keep_failing( alive_pageservers = [p for p in env.pageservers if p.id != banned_tenant_ps.id] # Stop pageserver and ban tenant to trigger failed reconciliation + log.info(f"Banning tenant {banned_tenant} and stopping pageserver {banned_tenant_ps.id}") status_by_tenant[banned_tenant] = 423 banned_tenant_ps.stop() env.storage_controller.allowed_errors.append(NOTIFY_BLOCKED_LOG) env.storage_controller.allowed_errors.extend(NOTIFY_FAILURE_LOGS) - env.storage_controller.allowed_errors.append(".*Shard reconciliation is keep-failing.*") + env.storage_controller.allowed_errors.append(".*Keeping extra secondaries.*") + env.storage_controller.allowed_errors.append(".*Shard reconciliation is stuck.*") env.storage_controller.node_configure(banned_tenant_ps.id, {"availability": "Offline"}) # Migrate all allowed tenant shards to the first alive pageserver # to trigger storage controller optimizations due to affinity rules for shard_number in range(shard_count): + log.info(f"Migrating shard {shard_number} of {allowed_tenant} to {alive_pageservers[0].id}") env.storage_controller.tenant_shard_migrate( TenantShardId(allowed_tenant, shard_number, shard_count), alive_pageservers[0].id, @@ -1052,7 +1062,7 @@ def test_storage_controller_compute_hook_keep_failing( # Make some reconcile_all calls to trigger optimizations # RECONCILE_COUNT must be greater than storcon's MAX_CONSECUTIVE_RECONCILIATION_ERRORS - RECONCILE_COUNT = 12 + RECONCILE_COUNT = 20 for i in range(RECONCILE_COUNT): try: n = env.storage_controller.reconcile_all() @@ -1065,6 +1075,8 @@ def test_storage_controller_compute_hook_keep_failing( assert banned_descr["shards"][0]["is_pending_compute_notification"] is True time.sleep(2) + env.storage_controller.assert_log_contains(".*Shard reconciliation is stuck.*") + # Check that the allowed tenant shards are optimized due to affinity rules locations = alive_pageservers[0].http_client().tenant_list_locations()["tenant_shards"] not_optimized_shard_count = 0 @@ -2569,9 +2581,11 @@ def test_background_operation_cancellation(neon_env_builder: NeonEnvBuilder): @pytest.mark.parametrize("while_offline", [True, False]) +@pytest.mark.parametrize("deletion_api", [DeletionAPIKind.OLD, DeletionAPIKind.FORCE]) def test_storage_controller_node_deletion( neon_env_builder: NeonEnvBuilder, while_offline: bool, + deletion_api: DeletionAPIKind, ): """ Test that deleting a node works & properly reschedules everything that was on the node. @@ -2595,6 +2609,8 @@ def test_storage_controller_node_deletion( assert env.storage_controller.reconcile_all() == 0 victim = env.pageservers[-1] + if deletion_api == DeletionAPIKind.FORCE and not while_offline: + victim.allowed_errors.append(".*request was dropped before completing.*") # The procedure a human would follow is: # 1. Mark pageserver scheduling=pause @@ -2618,7 +2634,12 @@ def test_storage_controller_node_deletion( wait_until(assert_shards_migrated) log.info(f"Deleting pageserver {victim.id}") - env.storage_controller.node_delete_old(victim.id) + if deletion_api == DeletionAPIKind.FORCE: + env.storage_controller.node_delete(victim.id, force=True) + elif deletion_api == DeletionAPIKind.OLD: + env.storage_controller.node_delete_old(victim.id) + else: + raise AssertionError(f"Invalid deletion API: {deletion_api}") if not while_offline: @@ -2631,7 +2652,15 @@ def test_storage_controller_node_deletion( wait_until(assert_victim_evacuated) # The node should be gone from the list API - assert victim.id not in [n["id"] for n in env.storage_controller.node_list()] + def assert_node_is_gone(): + assert victim.id not in [n["id"] for n in env.storage_controller.node_list()] + + if deletion_api == DeletionAPIKind.FORCE: + wait_until(assert_node_is_gone) + elif deletion_api == DeletionAPIKind.OLD: + assert_node_is_gone() + else: + raise AssertionError(f"Invalid deletion API: {deletion_api}") # No tenants should refer to the node in their intent for tenant_id in tenant_ids: @@ -2653,7 +2682,11 @@ def test_storage_controller_node_deletion( env.storage_controller.consistency_check() -def test_storage_controller_node_delete_cancellation(neon_env_builder: NeonEnvBuilder): +@pytest.mark.parametrize("deletion_api", [DeletionAPIKind.FORCE, DeletionAPIKind.GRACEFUL]) +def test_storage_controller_node_delete_cancellation( + neon_env_builder: NeonEnvBuilder, + deletion_api: DeletionAPIKind, +): neon_env_builder.num_pageservers = 3 neon_env_builder.num_azs = 3 env = neon_env_builder.init_configs() @@ -2677,12 +2710,16 @@ def test_storage_controller_node_delete_cancellation(neon_env_builder: NeonEnvBu assert len(nodes) == 3 env.storage_controller.configure_failpoints(("sleepy-delete-loop", "return(10000)")) + env.storage_controller.configure_failpoints(("delete-node-after-reconciles-spawned", "pause")) ps_id_to_delete = env.pageservers[0].id env.storage_controller.warm_up_all_secondaries() + + assert deletion_api in [DeletionAPIKind.FORCE, DeletionAPIKind.GRACEFUL] + force = deletion_api == DeletionAPIKind.FORCE env.storage_controller.retryable_node_operation( - lambda ps_id: env.storage_controller.node_delete(ps_id), + lambda ps_id: env.storage_controller.node_delete(ps_id, force), ps_id_to_delete, max_attempts=3, backoff=2, @@ -2698,6 +2735,8 @@ def test_storage_controller_node_delete_cancellation(neon_env_builder: NeonEnvBu env.storage_controller.cancel_node_delete(ps_id_to_delete) + env.storage_controller.configure_failpoints(("delete-node-after-reconciles-spawned", "off")) + env.storage_controller.poll_node_status( ps_id_to_delete, PageserverAvailability.ACTIVE, @@ -3249,7 +3288,10 @@ def test_storage_controller_ps_restarted_during_drain(neon_env_builder: NeonEnvB wait_until(reconfigure_node_again) -def test_ps_unavailable_after_delete(neon_env_builder: NeonEnvBuilder): +@pytest.mark.parametrize("deletion_api", [DeletionAPIKind.OLD, DeletionAPIKind.FORCE]) +def test_ps_unavailable_after_delete( + neon_env_builder: NeonEnvBuilder, deletion_api: DeletionAPIKind +): neon_env_builder.num_pageservers = 3 env = neon_env_builder.init_start() @@ -3262,10 +3304,16 @@ def test_ps_unavailable_after_delete(neon_env_builder: NeonEnvBuilder): assert_nodes_count(3) ps = env.pageservers[0] - env.storage_controller.node_delete_old(ps.id) - # After deletion, the node count must be reduced - assert_nodes_count(2) + if deletion_api == DeletionAPIKind.FORCE: + ps.allowed_errors.append(".*request was dropped before completing.*") + env.storage_controller.node_delete(ps.id, force=True) + wait_until(lambda: assert_nodes_count(2)) + elif deletion_api == DeletionAPIKind.OLD: + env.storage_controller.node_delete_old(ps.id) + assert_nodes_count(2) + else: + raise AssertionError(f"Invalid deletion API: {deletion_api}") # Running pageserver CLI init in a separate thread with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor: @@ -4811,3 +4859,103 @@ def test_storage_controller_migrate_with_pageserver_restart( "shards": [{"node_id": int(secondary.id), "shard_number": 0}], "preferred_az": DEFAULT_AZ_ID, } + + +@run_only_on_default_postgres("PG version is not important for this test") +def test_storage_controller_forward_404(neon_env_builder: NeonEnvBuilder): + """ + Ensures that the storage controller correctly forwards 404s and converts some of them + into 503s before forwarding to the client. + """ + neon_env_builder.num_pageservers = 2 + neon_env_builder.num_azs = 2 + + env = neon_env_builder.init_start() + env.storage_controller.allowed_errors.append(".*Reconcile error.*") + env.storage_controller.allowed_errors.append(".*Timed out.*") + + env.storage_controller.tenant_policy_update(env.initial_tenant, {"placement": {"Attached": 1}}) + env.storage_controller.reconcile_until_idle() + + # 404s on tenants and timelines are forwarded as-is when reconciler is not running. + + # Access a non-existing timeline -> 404 + with pytest.raises(PageserverApiException) as e: + env.storage_controller.pageserver_api().timeline_detail( + env.initial_tenant, TimelineId.generate() + ) + assert e.value.status_code == 404 + with pytest.raises(PageserverApiException) as e: + env.storage_controller.pageserver_api().timeline_lsn_lease( + env.initial_tenant, TimelineId.generate(), Lsn(0) + ) + assert e.value.status_code == 404 + + # Access a non-existing tenant when reconciler is not running -> 404 + with pytest.raises(PageserverApiException) as e: + env.storage_controller.pageserver_api().timeline_detail( + TenantId.generate(), env.initial_timeline + ) + assert e.value.status_code == 404 + with pytest.raises(PageserverApiException) as e: + env.storage_controller.pageserver_api().timeline_lsn_lease( + TenantId.generate(), env.initial_timeline, Lsn(0) + ) + assert e.value.status_code == 404 + + # Normal requests should succeed + detail = env.storage_controller.pageserver_api().timeline_detail( + env.initial_tenant, env.initial_timeline + ) + last_record_lsn = Lsn(detail["last_record_lsn"]) + env.storage_controller.pageserver_api().timeline_lsn_lease( + env.initial_tenant, env.initial_timeline, last_record_lsn + ) + + # Get into a situation where the intent state is not the same as the observed state. + describe = env.storage_controller.tenant_describe(env.initial_tenant)["shards"][0] + current_primary = describe["node_attached"] + current_secondary = describe["node_secondary"][0] + assert current_primary != current_secondary + + # Pause the reconciler so that the generation number won't be updated. + env.storage_controller.configure_failpoints( + ("reconciler-live-migrate-post-generation-inc", "pause") + ) + + # Do the migration in another thread; the request will be dropped as we don't wait. + shard_zero = TenantShardId(env.initial_tenant, 0, 0) + concurrent.futures.ThreadPoolExecutor(max_workers=1).submit( + env.storage_controller.tenant_shard_migrate, + shard_zero, + current_secondary, + StorageControllerMigrationConfig(override_scheduler=True), + ) + # Not the best way to do this, we should wait until the migration gets started. + time.sleep(1) + placement = env.storage_controller.get_tenants_placement()[str(shard_zero)] + assert placement["observed"] != placement["intent"] + assert placement["observed"]["attached"] == current_primary + assert placement["intent"]["attached"] == current_secondary + + # Now we issue requests that would cause 404 again + retry_strategy = Retry(total=0) + adapter = HTTPAdapter(max_retries=retry_strategy) + + no_retry_api = env.storage_controller.pageserver_api() + no_retry_api.mount("http://", adapter) + no_retry_api.mount("https://", adapter) + + # As intent state != observed state, tenant not found error should return 503, + # so that the client can retry once we've successfully migrated. + with pytest.raises(PageserverApiException) as e: + no_retry_api.timeline_detail(env.initial_tenant, TimelineId.generate()) + assert e.value.status_code == 503, f"unexpected status code and error: {e.value}" + with pytest.raises(PageserverApiException) as e: + no_retry_api.timeline_lsn_lease(env.initial_tenant, TimelineId.generate(), Lsn(0)) + assert e.value.status_code == 503, f"unexpected status code and error: {e.value}" + + # Unblock reconcile operations + env.storage_controller.configure_failpoints( + ("reconciler-live-migrate-post-generation-inc", "off") + ) diff --git a/test_runner/regress/test_subscriber_branching.py b/test_runner/regress/test_subscriber_branching.py index 83bebc19be..63772f7cd4 100644 --- a/test_runner/regress/test_subscriber_branching.py +++ b/test_runner/regress/test_subscriber_branching.py @@ -332,7 +332,7 @@ def test_multiple_subscription_branching(neon_simple_env: NeonEnv): last_insert_lsn = query_scalar(cursor, "select pg_current_wal_insert_lsn();") - def start_publisher_workload(table_num: int, duration: int): + def start_publisher_workload(i: int, duration: int): start = time.time() with endpoint.cursor(dbname="publisher_db") as cur: while time.time() - start < duration: diff --git a/test_runner/regress/test_tenant_size.py b/test_runner/regress/test_tenant_size.py index 190dd914ee..8b291b7cbe 100644 --- a/test_runner/regress/test_tenant_size.py +++ b/test_runner/regress/test_tenant_size.py @@ -740,6 +740,10 @@ def test_lsn_lease_size(neon_env_builder: NeonEnvBuilder, test_output_dir: Path, "pitr_interval": "0s" if zero_gc else "3600s", "gc_period": "0s", "compaction_period": "0s", + # The test exercises leases API, so we need non-zero lease length. + # If this tests ever does GC, we need to accomodate for the initial lease deadline + # after tenant attach, which is also controlled by this variable. + "lsn_lease_length": "600s", } env = neon_env_builder.init_start(initial_tenant_conf=conf) @@ -824,9 +828,7 @@ def insert_with_action( log.info(f"initial size: {initial_size}") with ep.cursor() as cur: - cur.execute( - "CREATE TABLE t0 AS SELECT i::bigint n FROM generate_series(0, 1000000) s(i)" - ) + cur.execute("CREATE TABLE t0 AS SELECT i::bigint n FROM generate_series(0, 10000) s(i)") last_flush_lsn = wait_for_last_flush_lsn(env, ep, tenant, timeline) if action == "lease": @@ -841,15 +843,9 @@ def insert_with_action( raise AssertionError("Invalid action type, only `lease` and `branch`are accepted") with ep.cursor() as cur: - cur.execute( - "CREATE TABLE t1 AS SELECT i::bigint n FROM generate_series(0, 1000000) s(i)" - ) - cur.execute( - "CREATE TABLE t2 AS SELECT i::bigint n FROM generate_series(0, 1000000) s(i)" - ) - cur.execute( - "CREATE TABLE t3 AS SELECT i::bigint n FROM generate_series(0, 1000000) s(i)" - ) + cur.execute("CREATE TABLE t1 AS SELECT i::bigint n FROM generate_series(0, 10000) s(i)") + cur.execute("CREATE TABLE t2 AS SELECT i::bigint n FROM generate_series(0, 10000) s(i)") + cur.execute("CREATE TABLE t3 AS SELECT i::bigint n FROM generate_series(0, 10000) s(i)") last_flush_lsn = wait_for_last_flush_lsn(env, ep, tenant, timeline) diff --git a/test_runner/regress/test_tenants.py b/test_runner/regress/test_tenants.py index c54dd8b38d..7f32f34d36 100644 --- a/test_runner/regress/test_tenants.py +++ b/test_runner/regress/test_tenants.py @@ -76,7 +76,6 @@ def test_tenants_normal_work(neon_env_builder: NeonEnvBuilder): neon_env_builder.num_safekeepers = 3 env = neon_env_builder.init_start() - """Tests tenants with and without wal acceptors""" tenant_1, _ = env.create_tenant() tenant_2, _ = env.create_tenant() diff --git a/test_runner/regress/test_timeline_detach_ancestor.py b/test_runner/regress/test_timeline_detach_ancestor.py index c0f163db32..45b7af719e 100644 --- a/test_runner/regress/test_timeline_detach_ancestor.py +++ b/test_runner/regress/test_timeline_detach_ancestor.py @@ -209,9 +209,9 @@ def test_ancestor_detach_branched_from( client.timeline_delete(env.initial_tenant, env.initial_timeline) wait_timeline_detail_404(client, env.initial_tenant, env.initial_timeline) - # because we do the fullbackup from ancestor at the branch_lsn, the zenith.signal is always different - # as there is always "PREV_LSN: invalid" for "before" - skip_files = {"zenith.signal"} + # because we do the fullbackup from ancestor at the branch_lsn, the neon.signal and/or zenith.signal is always + # different as there is always "PREV_LSN: invalid" for "before" + skip_files = {"zenith.signal", "neon.signal"} assert_pageserver_backups_equal(fullbackup_before, fullbackup_after, skip_files) @@ -767,7 +767,7 @@ def test_compaction_induced_by_detaches_in_history( env.pageserver, env.initial_tenant, branch_timeline_id, branch_lsn, fullbackup_after ) - # we don't need to skip any files, because zenith.signal will be identical + # we don't need to skip any files, because neon.signal will be identical assert_pageserver_backups_equal(fullbackup_before, fullbackup_after, set()) diff --git a/test_runner/regress/test_wal_acceptor.py b/test_runner/regress/test_wal_acceptor.py index 22e6d2e1c3..c691087259 100644 --- a/test_runner/regress/test_wal_acceptor.py +++ b/test_runner/regress/test_wal_acceptor.py @@ -2788,7 +2788,8 @@ def test_timeline_disk_usage_limit(neon_env_builder: NeonEnvBuilder): # Wait for the error message to appear in the compute log def error_logged(): - return endpoint.log_contains("WAL storage utilization exceeds configured limit") is not None + if endpoint.log_contains("WAL storage utilization exceeds configured limit") is None: + raise Exception("Expected error message not found in compute log yet") wait_until(error_logged) log.info("Found expected error message in compute log, resuming.") @@ -2822,3 +2823,87 @@ def test_timeline_disk_usage_limit(neon_env_builder: NeonEnvBuilder): cur.execute("select count(*) from t") # 2000 rows from first insert + 1000 from last insert assert cur.fetchone() == (3000,) + + +def test_global_disk_usage_limit(neon_env_builder: NeonEnvBuilder): + """ + Similar to `test_timeline_disk_usage_limit`, but test that the global disk usage circuit breaker + also works as expected. The test scenario: + 1. Create a timeline and endpoint. + 2. Mock high disk usage via failpoint + 3. Write data to the timeline so that disk usage exceeds the limit. + 4. Verify that the writes hang and the expected error message appears in the compute log. + 5. Mock low disk usage via failpoint + 6. Verify that the hanging writes unblock and we can continue to write as normal. + """ + neon_env_builder.num_safekeepers = 1 + remote_storage_kind = s3_storage() + neon_env_builder.enable_safekeeper_remote_storage(remote_storage_kind) + + env = neon_env_builder.init_start() + + env.create_branch("test_global_disk_usage_limit") + endpoint = env.endpoints.create_start("test_global_disk_usage_limit") + + with closing(endpoint.connect()) as conn: + with conn.cursor() as cur: + cur.execute("create table t2(key int, value text)") + + for sk in env.safekeepers: + sk.stop().start( + extra_opts=["--global-disk-check-interval=1s", "--max-global-disk-usage-ratio=0.8"] + ) + + # Set the failpoint to have the disk usage check return u64::MAX, which definitely exceeds the practical + # limits in the test environment. + for sk in env.safekeepers: + sk.http_client().configure_failpoints( + [("sk-global-disk-usage", "return(18446744073709551615)")] + ) + + # Wait until the global disk usage limit watcher trips the circuit breaker. + def error_logged_in_sk(): + for sk in env.safekeepers: + if sk.log_contains("Global disk usage exceeded limit") is None: + raise Exception("Expected error message not found in safekeeper log yet") + + wait_until(error_logged_in_sk) + + def run_hanging_insert_global(): + with closing(endpoint.connect()) as bg_conn: + with bg_conn.cursor() as bg_cur: + # This should generate more than 1KiB of WAL + bg_cur.execute("insert into t2 select generate_series(1,2000), 'payload'") + + bg_thread_global = threading.Thread(target=run_hanging_insert_global) + bg_thread_global.start() + + def error_logged_in_compute(): + if endpoint.log_contains("Global disk usage exceeded limit") is None: + raise Exception("Expected error message not found in compute log yet") + + wait_until(error_logged_in_compute) + log.info("Found the expected error message in compute log, resuming.") + + time.sleep(2) + assert bg_thread_global.is_alive(), "Global hanging insert unblocked prematurely!" + + # Make the disk usage check always return 0 through the failpoint to simulate the disk pressure easing. + # The SKs should resume accepting WAL writes without restarting. + for sk in env.safekeepers: + sk.http_client().configure_failpoints([("sk-global-disk-usage", "return(0)")]) + + bg_thread_global.join(timeout=120) + assert not bg_thread_global.is_alive(), "Hanging global insert did not complete after restart" + log.info("Global hanging insert unblocked.") + + # Verify that we can continue to write as normal and we don't have obvious data corruption + # following the recovery. + with closing(endpoint.connect()) as conn: + with conn.cursor() as cur: + cur.execute("insert into t2 select generate_series(2001,3000), 'payload'") + + with closing(endpoint.connect()) as conn: + with conn.cursor() as cur: + cur.execute("select count(*) from t2") + assert cur.fetchone() == (3000,) diff --git a/test_runner/regress/test_wal_restore.py b/test_runner/regress/test_wal_restore.py index 0bb63308bb..573016f772 100644 --- a/test_runner/regress/test_wal_restore.py +++ b/test_runner/regress/test_wal_restore.py @@ -3,6 +3,7 @@ from __future__ import annotations import sys import tarfile import tempfile +from pathlib import Path from typing import TYPE_CHECKING import pytest @@ -198,3 +199,115 @@ def test_wal_restore_http(neon_env_builder: NeonEnvBuilder, broken_tenant: bool) # the table is back now! restored = env.endpoints.create_start("main") assert restored.safe_psql("select count(*) from t", user="cloud_admin") == [(300000,)] + + +# BEGIN_HADRON +# TODO: re-enable once CM python is integreated. +# def clear_directory(directory): +# for item in os.listdir(directory): +# item_path = os.path.join(directory, item) +# if os.path.isdir(item_path): +# log.info(f"removing SK directory: {item_path}") +# shutil.rmtree(item_path) +# else: +# log.info(f"removing SK file: {item_path}") +# os.remove(item_path) + + +# def test_sk_pull_timelines( +# neon_env_builder: NeonEnvBuilder, +# ): +# DBNAME = "regression" +# superuser_name = "databricks_superuser" +# neon_env_builder.num_safekeepers = 3 +# neon_env_builder.num_pageservers = 4 +# neon_env_builder.safekeeper_extra_opts = ["--enable-pull-timeline-on-startup"] +# neon_env_builder.enable_safekeeper_remote_storage(s3_storage()) + +# env = neon_env_builder.init_start(initial_tenant_shard_count=4) + +# env.compute_manager.start(base_port=env.compute_manager_port) + +# test_creator = "test_creator" +# test_metastore_id = uuid4() +# test_account_id = uuid4() +# test_workspace_id = 1 +# test_workspace_url = "http://test_workspace_url" +# test_metadata_version = 1 +# test_metadata = { +# "state": "INSTANCE_PROVISIONING", +# "admin_rolename": "admin", +# "admin_password_scram": "abc123456", +# } + +# test_instance_name_1 = "test_instance_1" +# test_instance_read_write_compute_pool_1 = { +# "instance_name": test_instance_name_1, +# "compute_pool_name": "compute_pool_1", +# "creator": test_creator, +# "capacity": 2.0, +# "node_count": 1, +# "metadata_version": 0, +# "metadata": { +# "state": "INSTANCE_PROVISIONING", +# }, +# } + +# test_instance_1_readable_secondaries_enabled = False + +# # Test creation +# create_instance_with_retries( +# env, +# test_instance_name_1, +# test_creator, +# test_metastore_id, +# test_account_id, +# test_workspace_id, +# test_workspace_url, +# test_instance_read_write_compute_pool_1, +# test_metadata_version, +# test_metadata, +# test_instance_1_readable_secondaries_enabled, +# ) +# instance = env.compute_manager.get_instance_by_name(test_instance_name_1, test_workspace_id) +# log.info(f"haoyu Instance created: {instance}") +# assert instance["instance_name"] == test_instance_name_1 +# test_instance_id = instance["instance_id"] +# instance_detail = env.compute_manager.describe_instance(test_instance_id) +# log.info(f"haoyu Instance detail: {instance_detail}") + +# env.initial_tenant = instance_detail[0]["tenant_id"] +# env.initial_timeline = instance_detail[0]["timeline_id"] + +# # Connect to postgres and create a database called "regression". +# endpoint = env.endpoints.create_start("main") +# endpoint.safe_psql(f"CREATE ROLE {superuser_name}") +# endpoint.safe_psql(f"CREATE DATABASE {DBNAME}") + +# endpoint.safe_psql("CREATE TABLE usertable ( YCSB_KEY INT, FIELD0 TEXT);") +# # Write some data. ~20 MB. +# num_rows = 0 +# for _i in range(0, 20000): +# endpoint.safe_psql( +# "INSERT INTO usertable SELECT random(), repeat('a', 1000);", log_query=False +# ) +# num_rows += 1 + +# log.info(f"SKs {env.storage_controller.hcc_sk_node_list()}") + +# env.safekeepers[0].stop(immediate=True) +# clear_directory(env.safekeepers[0].data_dir) +# env.safekeepers[0].start() + +# # PG can still write data. ~20 MB. +# for _i in range(0, 20000): +# endpoint.safe_psql( +# "INSERT INTO usertable SELECT random(), repeat('a', 1000);", log_query=False +# ) +# num_rows += 1 + +# tuples = endpoint.safe_psql("SELECT COUNT(*) FROM usertable;") +# assert tuples[0][0] == num_rows +# endpoint.stop_and_destroy() + +# END_HADRON diff --git a/vendor/postgres-v14 b/vendor/postgres-v14 index 9085654ee8..1cb207d1c9 160000 --- a/vendor/postgres-v14 +++ b/vendor/postgres-v14 @@ -1 +1 @@ -Subproject commit 9085654ee8022d5cc4ca719380a1dc53e5e3246f +Subproject commit 1cb207d1c9efb1f6c6f864a47bf45e992a7f0eb0 diff --git a/vendor/postgres-v15 b/vendor/postgres-v15 index 8c3249f36c..9d19780350 160000 --- a/vendor/postgres-v15 +++ b/vendor/postgres-v15 @@ -1 +1 @@ -Subproject commit 8c3249f36c7df6ac0efb8ee9f1baf4aa1b83e5c9 +Subproject commit 9d19780350c0c7b536312dc3b891ade55628bc7b diff --git a/vendor/postgres-v16 b/vendor/postgres-v16 index 7a4c0eacae..1486f919d4 160000 --- a/vendor/postgres-v16 +++ b/vendor/postgres-v16 @@ -1 +1 @@ -Subproject commit 7a4c0eacaeb9b97416542fa19103061c166460b1 +Subproject commit 1486f919d4dc21637407ee7ed203497bb5bd516a diff --git a/vendor/postgres-v17 b/vendor/postgres-v17 index db424d42d7..160d0b52d6 160000 --- a/vendor/postgres-v17 +++ b/vendor/postgres-v17 @@ -1 +1 @@ -Subproject commit db424d42d748f8ad91ac00e28db2c7f2efa42f7f +Subproject commit 160d0b52d66f4a5d21251a2912a50561bf600333 diff --git a/vendor/revisions.json b/vendor/revisions.json index b260698c86..69e7559c67 100644 --- a/vendor/revisions.json +++ b/vendor/revisions.json @@ -1,18 +1,18 @@ { "v17": [ "17.5", - "db424d42d748f8ad91ac00e28db2c7f2efa42f7f" + "160d0b52d66f4a5d21251a2912a50561bf600333" ], "v16": [ "16.9", - "7a4c0eacaeb9b97416542fa19103061c166460b1" + "1486f919d4dc21637407ee7ed203497bb5bd516a" ], "v15": [ "15.13", - "8c3249f36c7df6ac0efb8ee9f1baf4aa1b83e5c9" + "9d19780350c0c7b536312dc3b891ade55628bc7b" ], "v14": [ "14.18", - "9085654ee8022d5cc4ca719380a1dc53e5e3246f" + "1cb207d1c9efb1f6c6f864a47bf45e992a7f0eb0" ] } diff --git a/workspace_hack/Cargo.toml b/workspace_hack/Cargo.toml index fb10e27d2a..d6d64a2045 100644 --- a/workspace_hack/Cargo.toml +++ b/workspace_hack/Cargo.toml @@ -40,8 +40,10 @@ env_logger = { version = "0.11" } fail = { version = "0.5", default-features = false, features = ["failpoints"] } form_urlencoded = { version = "1" } futures-channel = { version = "0.3", features = ["sink"] } +futures-core = { version = "0.3" } futures-executor = { version = "0.3" } futures-io = { version = "0.3" } +futures-sink = { version = "0.3" } futures-util = { version = "0.3", features = ["channel", "io", "sink"] } generic-array = { version = "0.14", default-features = false, features = ["more_lengths", "zeroize"] } getrandom = { version = "0.2", default-features = false, features = ["std"] } @@ -96,7 +98,7 @@ tikv-jemalloc-sys = { version = "0.6", features = ["profiling", "stats", "unpref time = { version = "0.3", features = ["macros", "serde-well-known"] } tokio = { version = "1", features = ["full", "test-util"] } tokio-rustls = { version = "0.26", default-features = false, features = ["logging", "ring", "tls12"] } -tokio-stream = { version = "0.1", features = ["net"] } +tokio-stream = { version = "0.1", features = ["net", "sync"] } tokio-util = { version = "0.7", features = ["codec", "compat", "io-util", "rt"] } toml_edit = { version = "0.22", features = ["serde"] } tower = { version = "0.5", default-features = false, features = ["balance", "buffer", "limit", "log"] } @@ -105,7 +107,6 @@ tracing-core = { version = "0.1" } tracing-log = { version = "0.2" } tracing-subscriber = { version = "0.3", features = ["env-filter", "json"] } url = { version = "2", features = ["serde"] } -uuid = { version = "1", features = ["serde", "v4", "v7"] } zeroize = { version = "1", features = ["derive", "serde"] } zstd = { version = "0.13" } zstd-safe = { version = "7", default-features = false, features = ["arrays", "legacy", "std", "zdict_builder"] }