diff --git a/.dockerignore b/.dockerignore index 4bf1492ea3..4d9433764e 100644 --- a/.dockerignore +++ b/.dockerignore @@ -4,6 +4,7 @@ !Cargo.lock !Cargo.toml !Makefile +!postgres.mk !rust-toolchain.toml !scripts/ninstall.sh !docker-compose/run-tests.sh diff --git a/.github/workflows/_build-and-test-locally.yml b/.github/workflows/_build-and-test-locally.yml index 663afa2c8b..e2203a38ec 100644 --- a/.github/workflows/_build-and-test-locally.yml +++ b/.github/workflows/_build-and-test-locally.yml @@ -38,6 +38,11 @@ on: required: false default: 1 type: number + rerun-failed: + description: 'rerun failed tests to ignore flaky tests' + required: false + default: true + type: boolean defaults: run: @@ -99,11 +104,10 @@ jobs: # Set some environment variables used by all the steps. # - # CARGO_FLAGS is extra options to pass to "cargo build", "cargo test" etc. - # It also includes --features, if any + # CARGO_FLAGS is extra options to pass to all "cargo" subcommands. # - # CARGO_FEATURES is passed to "cargo metadata". It is separate from CARGO_FLAGS, - # because "cargo metadata" doesn't accept --release or --debug options + # CARGO_PROFILE is passed to "cargo build", "cargo test" etc, but not to + # "cargo metadata", because it doesn't accept --release or --debug options. # # We run tests with addtional features, that are turned off by default (e.g. in release builds), see # corresponding Cargo.toml files for their descriptions. @@ -112,16 +116,16 @@ jobs: ARCH: ${{ inputs.arch }} SANITIZERS: ${{ inputs.sanitizers }} run: | - CARGO_FEATURES="--features testing" + CARGO_FLAGS="--locked --features testing" if [[ $BUILD_TYPE == "debug" && $ARCH == 'x64' ]]; then cov_prefix="scripts/coverage --profraw-prefix=$GITHUB_JOB --dir=/tmp/coverage run" - CARGO_FLAGS="--locked" + CARGO_PROFILE="" elif [[ $BUILD_TYPE == "debug" ]]; then cov_prefix="" - CARGO_FLAGS="--locked" + CARGO_PROFILE="" elif [[ $BUILD_TYPE == "release" ]]; then cov_prefix="" - CARGO_FLAGS="--locked --release" + CARGO_PROFILE="--release" fi if [[ $SANITIZERS == 'enabled' ]]; then make_vars="WITH_SANITIZERS=yes" @@ -131,8 +135,8 @@ jobs: { echo "cov_prefix=${cov_prefix}" echo "make_vars=${make_vars}" - echo "CARGO_FEATURES=${CARGO_FEATURES}" echo "CARGO_FLAGS=${CARGO_FLAGS}" + echo "CARGO_PROFILE=${CARGO_PROFILE}" echo "CARGO_HOME=${GITHUB_WORKSPACE}/.cargo" } >> $GITHUB_ENV @@ -184,34 +188,18 @@ jobs: path: pg_install/v17 key: v1-${{ runner.os }}-${{ runner.arch }}-${{ inputs.build-type }}-pg-${{ steps.pg_v17_rev.outputs.pg_rev }}-bookworm-${{ hashFiles('Makefile', 'build-tools.Dockerfile') }} - - name: Build postgres v14 - if: steps.cache_pg_14.outputs.cache-hit != 'true' - run: mold -run make ${make_vars} postgres-v14 -j$(nproc) - - - name: Build postgres v15 - if: steps.cache_pg_15.outputs.cache-hit != 'true' - run: mold -run make ${make_vars} postgres-v15 -j$(nproc) - - - name: Build postgres v16 - if: steps.cache_pg_16.outputs.cache-hit != 'true' - run: mold -run make ${make_vars} postgres-v16 -j$(nproc) - - - name: Build postgres v17 - if: steps.cache_pg_17.outputs.cache-hit != 'true' - run: mold -run make ${make_vars} postgres-v17 -j$(nproc) - - - name: Build neon extensions - run: mold -run make ${make_vars} neon-pg-ext -j$(nproc) + - name: Build all + # Note: the Makefile picks up BUILD_TYPE and CARGO_PROFILE from the env variables + run: mold -run make ${make_vars} all -j$(nproc) CARGO_BUILD_FLAGS="$CARGO_FLAGS" - name: Build walproposer-lib run: mold -run make ${make_vars} walproposer-lib -j$(nproc) - - name: Run cargo build - env: - WITH_TESTS: ${{ inputs.sanitizers != 'enabled' && '--tests' || '' }} + - name: Build unit tests + if: inputs.sanitizers != 'enabled' run: | export ASAN_OPTIONS=detect_leaks=0 - ${cov_prefix} mold -run cargo build $CARGO_FLAGS $CARGO_FEATURES --bins ${WITH_TESTS} + ${cov_prefix} mold -run cargo build $CARGO_FLAGS $CARGO_PROFILE --tests # Do install *before* running rust tests because they might recompile the # binaries with different features/flags. @@ -223,7 +211,7 @@ jobs: # Install target binaries mkdir -p /tmp/neon/bin/ binaries=$( - ${cov_prefix} cargo metadata $CARGO_FEATURES --format-version=1 --no-deps | + ${cov_prefix} cargo metadata $CARGO_FLAGS --format-version=1 --no-deps | jq -r '.packages[].targets[] | select(.kind | index("bin")) | .name' ) for bin in $binaries; do @@ -240,7 +228,7 @@ jobs: mkdir -p /tmp/neon/test_bin/ test_exe_paths=$( - ${cov_prefix} cargo test $CARGO_FLAGS $CARGO_FEATURES --message-format=json --no-run | + ${cov_prefix} cargo test $CARGO_FLAGS $CARGO_PROFILE --message-format=json --no-run | jq -r '.executable | select(. != null)' ) for bin in $test_exe_paths; do @@ -274,10 +262,10 @@ jobs: export LD_LIBRARY_PATH #nextest does not yet support running doctests - ${cov_prefix} cargo test --doc $CARGO_FLAGS $CARGO_FEATURES + ${cov_prefix} cargo test --doc $CARGO_FLAGS $CARGO_PROFILE # run all non-pageserver tests - ${cov_prefix} cargo nextest run $CARGO_FLAGS $CARGO_FEATURES -E '!package(pageserver)' + ${cov_prefix} cargo nextest run $CARGO_FLAGS $CARGO_PROFILE -E '!package(pageserver)' # run pageserver tests # (When developing new pageserver features gated by config fields, we commonly make the rust @@ -286,13 +274,13 @@ jobs: # pageserver tests from non-pageserver tests cuts down the time it takes for this CI step.) NEON_PAGESERVER_UNIT_TEST_VIRTUAL_FILE_IOENGINE=tokio-epoll-uring \ ${cov_prefix} \ - cargo nextest run $CARGO_FLAGS $CARGO_FEATURES -E 'package(pageserver)' + cargo nextest run $CARGO_FLAGS $CARGO_PROFILE -E 'package(pageserver)' # Run separate tests for real S3 export ENABLE_REAL_S3_REMOTE_STORAGE=nonempty export REMOTE_STORAGE_S3_BUCKET=neon-github-ci-tests export REMOTE_STORAGE_S3_REGION=eu-central-1 - ${cov_prefix} cargo nextest run $CARGO_FLAGS $CARGO_FEATURES -E 'package(remote_storage)' -E 'test(test_real_s3)' + ${cov_prefix} cargo nextest run $CARGO_FLAGS $CARGO_PROFILE -E 'package(remote_storage)' -E 'test(test_real_s3)' # Run separate tests for real Azure Blob Storage # XXX: replace region with `eu-central-1`-like region @@ -301,17 +289,17 @@ jobs: export AZURE_STORAGE_ACCESS_KEY="${{ secrets.AZURE_STORAGE_ACCESS_KEY_DEV }}" export REMOTE_STORAGE_AZURE_CONTAINER="${{ vars.REMOTE_STORAGE_AZURE_CONTAINER }}" export REMOTE_STORAGE_AZURE_REGION="${{ vars.REMOTE_STORAGE_AZURE_REGION }}" - ${cov_prefix} cargo nextest run $CARGO_FLAGS $CARGO_FEATURES -E 'package(remote_storage)' -E 'test(test_real_azure)' + ${cov_prefix} cargo nextest run $CARGO_FLAGS $CARGO_PROFILE -E 'package(remote_storage)' -E 'test(test_real_azure)' - name: Install postgres binaries run: | # Use tar to copy files matching the pattern, preserving the paths in the destionation tar c \ pg_install/v* \ - pg_install/build/*/src/test/regress/*.so \ - pg_install/build/*/src/test/regress/pg_regress \ - pg_install/build/*/src/test/isolation/isolationtester \ - pg_install/build/*/src/test/isolation/pg_isolation_regress \ + build/*/src/test/regress/*.so \ + build/*/src/test/regress/pg_regress \ + build/*/src/test/isolation/isolationtester \ + build/*/src/test/isolation/pg_isolation_regress \ | tar x -C /tmp/neon - name: Upload Neon artifact @@ -379,7 +367,7 @@ jobs: - name: Pytest regression tests continue-on-error: ${{ matrix.lfc_state == 'with-lfc' && inputs.build-type == 'debug' }} uses: ./.github/actions/run-python-test-set - timeout-minutes: ${{ inputs.sanitizers != 'enabled' && 75 || 180 }} + timeout-minutes: ${{ (inputs.build-type == 'release' && inputs.sanitizers != 'enabled') && 75 || 180 }} with: build_type: ${{ inputs.build-type }} test_selection: regress @@ -387,14 +375,14 @@ jobs: run_with_real_s3: true real_s3_bucket: neon-github-ci-tests real_s3_region: eu-central-1 - rerun_failed: ${{ inputs.test-run-count == 1 }} + rerun_failed: ${{ inputs.rerun-failed }} pg_version: ${{ matrix.pg_version }} sanitizers: ${{ inputs.sanitizers }} aws-oidc-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }} # `--session-timeout` is equal to (timeout-minutes - 10 minutes) * 60 seconds. # Attempt to stop tests gracefully to generate test reports # until they are forcibly stopped by the stricter `timeout-minutes` limit. - extra_params: --session-timeout=${{ inputs.sanitizers != 'enabled' && 3000 || 10200 }} --count=${{ inputs.test-run-count }} + extra_params: --session-timeout=${{ (inputs.build-type == 'release' && inputs.sanitizers != 'enabled') && 3000 || 10200 }} --count=${{ inputs.test-run-count }} ${{ inputs.test-selection != '' && format('-k "{0}"', inputs.test-selection) || '' }} env: TEST_RESULT_CONNSTR: ${{ secrets.REGRESS_TEST_RESULT_CONNSTR_NEW }} diff --git a/.github/workflows/build-macos.yml b/.github/workflows/build-macos.yml index 0f7fa3e813..2296807d2d 100644 --- a/.github/workflows/build-macos.yml +++ b/.github/workflows/build-macos.yml @@ -32,161 +32,14 @@ permissions: contents: read jobs: - build-pgxn: - if: | - inputs.pg_versions != '[]' || inputs.rebuild_everything || - contains(github.event.pull_request.labels.*.name, 'run-extra-build-macos') || - contains(github.event.pull_request.labels.*.name, 'run-extra-build-*') || - github.ref_name == 'main' - timeout-minutes: 30 - runs-on: macos-15 - strategy: - matrix: - postgres-version: ${{ inputs.rebuild_everything && fromJSON('["v14", "v15", "v16", "v17"]') || fromJSON(inputs.pg_versions) }} - env: - # Use release build only, to have less debug info around - # Hence keeping target/ (and general cache size) smaller - BUILD_TYPE: release - steps: - - name: Harden the runner (Audit all outbound calls) - uses: step-security/harden-runner@4d991eb9b905ef189e4c376166672c3f2f230481 # v2.11.0 - with: - egress-policy: audit - - - name: Checkout main repo - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - - - name: Set pg ${{ matrix.postgres-version }} for caching - id: pg_rev - run: echo pg_rev=$(git rev-parse HEAD:vendor/postgres-${{ matrix.postgres-version }}) | tee -a "${GITHUB_OUTPUT}" - - - name: Cache postgres ${{ matrix.postgres-version }} build - id: cache_pg - uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 - with: - path: pg_install/${{ matrix.postgres-version }} - key: v1-${{ runner.os }}-${{ runner.arch }}-${{ env.BUILD_TYPE }}-pg-${{ matrix.postgres-version }}-${{ steps.pg_rev.outputs.pg_rev }}-${{ hashFiles('Makefile') }} - - - name: Checkout submodule vendor/postgres-${{ matrix.postgres-version }} - if: steps.cache_pg.outputs.cache-hit != 'true' - run: | - git submodule init vendor/postgres-${{ matrix.postgres-version }} - git submodule update --depth 1 --recursive - - - name: Install build dependencies - if: steps.cache_pg.outputs.cache-hit != 'true' - run: | - brew install flex bison openssl protobuf icu4c - - - name: Set extra env for macOS - if: steps.cache_pg.outputs.cache-hit != 'true' - run: | - echo 'LDFLAGS=-L/usr/local/opt/openssl@3/lib' >> $GITHUB_ENV - echo 'CPPFLAGS=-I/usr/local/opt/openssl@3/include' >> $GITHUB_ENV - - - name: Build Postgres ${{ matrix.postgres-version }} - if: steps.cache_pg.outputs.cache-hit != 'true' - run: | - make postgres-${{ matrix.postgres-version }} -j$(sysctl -n hw.ncpu) - - - name: Build Neon Pg Ext ${{ matrix.postgres-version }} - if: steps.cache_pg.outputs.cache-hit != 'true' - run: | - make "neon-pg-ext-${{ matrix.postgres-version }}" -j$(sysctl -n hw.ncpu) - - - name: Get postgres headers ${{ matrix.postgres-version }} - if: steps.cache_pg.outputs.cache-hit != 'true' - run: | - make postgres-headers-${{ matrix.postgres-version }} -j$(sysctl -n hw.ncpu) - - - name: Upload "pg_install/${{ matrix.postgres-version }}" artifact - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 - with: - name: pg_install--${{ matrix.postgres-version }} - path: pg_install/${{ matrix.postgres-version }} - # The artifact is supposed to be used by the next job in the same workflow, - # so there’s no need to store it for too long. - retention-days: 1 - - build-walproposer-lib: - if: | - inputs.pg_versions != '[]' || inputs.rebuild_everything || - contains(github.event.pull_request.labels.*.name, 'run-extra-build-macos') || - contains(github.event.pull_request.labels.*.name, 'run-extra-build-*') || - github.ref_name == 'main' - timeout-minutes: 30 - runs-on: macos-15 - needs: [build-pgxn] - env: - # Use release build only, to have less debug info around - # Hence keeping target/ (and general cache size) smaller - BUILD_TYPE: release - steps: - - name: Harden the runner (Audit all outbound calls) - uses: step-security/harden-runner@4d991eb9b905ef189e4c376166672c3f2f230481 # v2.11.0 - with: - egress-policy: audit - - - name: Checkout main repo - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - - - name: Set pg v17 for caching - id: pg_rev - run: echo pg_rev=$(git rev-parse HEAD:vendor/postgres-v17) | tee -a "${GITHUB_OUTPUT}" - - - name: Download "pg_install/v17" artifact - uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0 - with: - name: pg_install--v17 - path: pg_install/v17 - - - name: Cache walproposer-lib - id: cache_walproposer_lib - uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 - with: - path: pg_install/build/walproposer-lib - key: v1-${{ runner.os }}-${{ runner.arch }}-${{ env.BUILD_TYPE }}-walproposer_lib-v17-${{ steps.pg_rev.outputs.pg_rev }}-${{ hashFiles('Makefile') }} - - - name: Checkout submodule vendor/postgres-v17 - if: steps.cache_walproposer_lib.outputs.cache-hit != 'true' - run: | - git submodule init vendor/postgres-v17 - git submodule update --depth 1 --recursive - - - name: Install build dependencies - if: steps.cache_walproposer_lib.outputs.cache-hit != 'true' - run: | - brew install flex bison openssl protobuf icu4c - - - name: Set extra env for macOS - if: steps.cache_walproposer_lib.outputs.cache-hit != 'true' - run: | - echo 'LDFLAGS=-L/usr/local/opt/openssl@3/lib' >> $GITHUB_ENV - echo 'CPPFLAGS=-I/usr/local/opt/openssl@3/include' >> $GITHUB_ENV - - - name: Build walproposer-lib (only for v17) - if: steps.cache_walproposer_lib.outputs.cache-hit != 'true' - run: - make walproposer-lib -j$(sysctl -n hw.ncpu) - - - name: Upload "pg_install/build/walproposer-lib" artifact - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 - with: - name: pg_install--build--walproposer-lib - path: pg_install/build/walproposer-lib - # The artifact is supposed to be used by the next job in the same workflow, - # so there’s no need to store it for too long. - retention-days: 1 - - cargo-build: + make-all: if: | inputs.pg_versions != '[]' || inputs.rebuild_rust_code || inputs.rebuild_everything || contains(github.event.pull_request.labels.*.name, 'run-extra-build-macos') || contains(github.event.pull_request.labels.*.name, 'run-extra-build-*') || github.ref_name == 'main' - timeout-minutes: 30 + timeout-minutes: 60 runs-on: macos-15 - needs: [build-pgxn, build-walproposer-lib] env: # Use release build only, to have less debug info around # Hence keeping target/ (and general cache size) smaller @@ -202,41 +55,53 @@ jobs: with: submodules: true - - name: Download "pg_install/v14" artifact - uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0 - with: - name: pg_install--v14 - path: pg_install/v14 - - - name: Download "pg_install/v15" artifact - uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0 - with: - name: pg_install--v15 - path: pg_install/v15 - - - name: Download "pg_install/v16" artifact - uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0 - with: - name: pg_install--v16 - path: pg_install/v16 - - - name: Download "pg_install/v17" artifact - uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0 - with: - name: pg_install--v17 - path: pg_install/v17 - - - name: Download "pg_install/build/walproposer-lib" artifact - uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0 - with: - name: pg_install--build--walproposer-lib - path: pg_install/build/walproposer-lib - - # `actions/download-artifact` doesn't preserve permissions: - # https://github.com/actions/download-artifact?tab=readme-ov-file#permission-loss - - name: Make pg_install/v*/bin/* executable + - name: Install build dependencies run: | - chmod +x pg_install/v*/bin/* + brew install flex bison openssl protobuf icu4c + + - name: Set extra env for macOS + run: | + echo 'LDFLAGS=-L/usr/local/opt/openssl@3/lib' >> $GITHUB_ENV + echo 'CPPFLAGS=-I/usr/local/opt/openssl@3/include' >> $GITHUB_ENV + + - name: Restore "pg_install/" cache + id: cache_pg + uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + with: + path: pg_install + key: v1-${{ runner.os }}-${{ runner.arch }}-${{ env.BUILD_TYPE }}-pg-install-v14-${{ hashFiles('Makefile', 'postgres.mk', 'vendor/revisions.json') }} + + - name: Checkout vendor/postgres submodules + if: steps.cache_pg.outputs.cache-hit != 'true' + run: | + git submodule init + git submodule update --depth 1 --recursive + + - name: Build Postgres + if: steps.cache_pg.outputs.cache-hit != 'true' + run: | + make postgres -j$(sysctl -n hw.ncpu) + + # This isn't strictly necessary, but it makes the cached and non-cached builds more similar, + # When pg_install is restored from cache, there is no 'build/' directory. By removing it + # in a non-cached build too, we enforce that the rest of the steps don't depend on it, + # so that we notice any build caching bugs earlier. + - name: Remove build artifacts + if: steps.cache_pg.outputs.cache-hit != 'true' + run: | + rm -rf build + + # Explicitly update the rust toolchain before running 'make'. The parallel make build can + # invoke 'cargo build' more than once in parallel, for different crates. That's OK, 'cargo' + # does its own locking to prevent concurrent builds from stepping on each other's + # toes. However, it will first try to update the toolchain, and that step is not locked the + # same way. To avoid two toolchain updates running in parallel and stepping on each other's + # toes, ensure that the toolchain is up-to-date beforehand. + - name: Update rust toolchain + run: | + rustup --version && + rustup update && + rustup show - name: Cache cargo deps uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 @@ -248,17 +113,12 @@ jobs: target key: v1-${{ runner.os }}-${{ runner.arch }}-cargo-${{ hashFiles('./Cargo.lock') }}-${{ hashFiles('./rust-toolchain.toml') }}-rust - - name: Install build dependencies - run: | - brew install flex bison openssl protobuf icu4c - - - name: Set extra env for macOS - run: | - echo 'LDFLAGS=-L/usr/local/opt/openssl@3/lib' >> $GITHUB_ENV - echo 'CPPFLAGS=-I/usr/local/opt/openssl@3/include' >> $GITHUB_ENV - - - name: Run cargo build - run: cargo build --all --release -j$(sysctl -n hw.ncpu) + # Build the neon-specific postgres extensions, and all the Rust bits. + # + # Pass PG_INSTALL_CACHED=1 because PostgreSQL was already built and cached + # separately. + - name: Build all + run: PG_INSTALL_CACHED=1 BUILD_TYPE=release make -j$(sysctl -n hw.ncpu) all - name: Check that no warnings are produced run: ./run_clippy.sh diff --git a/.github/workflows/build_and_run_selected_test.yml b/.github/workflows/build_and_run_selected_test.yml index 7f1eb991c4..6d3541d1b6 100644 --- a/.github/workflows/build_and_run_selected_test.yml +++ b/.github/workflows/build_and_run_selected_test.yml @@ -58,6 +58,7 @@ jobs: test-cfg: ${{ inputs.pg-versions }} test-selection: ${{ inputs.test-selection }} test-run-count: ${{ fromJson(inputs.run-count) }} + rerun-failed: false secrets: inherit create-test-report: diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml index 9f2fa3d52c..456c7b8c92 100644 --- a/.github/workflows/build_and_test.yml +++ b/.github/workflows/build_and_test.yml @@ -69,7 +69,7 @@ jobs: submodules: true - name: Check for file changes - uses: step-security/paths-filter@v3 + uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36 # v3.0.2 id: files-changed with: token: ${{ secrets.GITHUB_TOKEN }} @@ -199,6 +199,28 @@ jobs: build-tools-image: ${{ needs.build-build-tools-image.outputs.image }}-bookworm secrets: inherit + validate-compute-manifest: + runs-on: ubuntu-22.04 + needs: [ meta, check-permissions ] + # We do need to run this in `.*-rc-pr` because of hotfixes. + if: ${{ contains(fromJSON('["pr", "push-main", "storage-rc-pr", "proxy-rc-pr", "compute-rc-pr"]'), needs.meta.outputs.run-kind) }} + steps: + - name: Harden the runner (Audit all outbound calls) + uses: step-security/harden-runner@4d991eb9b905ef189e4c376166672c3f2f230481 # v2.11.0 + with: + egress-policy: audit + + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + + - name: Set up Node.js + uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4.4.0 + with: + node-version: '24' + + - name: Validate manifest against schema + run: | + make -C compute manifest-schema-validation + build-and-test-locally: needs: [ meta, build-build-tools-image ] # We do need to run this in `.*-rc-pr` because of hotfixes. @@ -648,7 +670,7 @@ jobs: ghcr.io/neondatabase/neon:${{ needs.meta.outputs.build-tag }}-bookworm-arm64 compute-node-image-arch: - needs: [ check-permissions, build-build-tools-image, meta ] + needs: [ check-permissions, meta ] if: ${{ contains(fromJSON('["push-main", "pr", "compute-rc-pr"]'), needs.meta.outputs.run-kind) }} permissions: id-token: write # aws-actions/configure-aws-credentials @@ -721,7 +743,6 @@ jobs: GIT_VERSION=${{ github.event.pull_request.head.sha || github.sha }} PG_VERSION=${{ matrix.version.pg }} BUILD_TAG=${{ needs.meta.outputs.release-tag || needs.meta.outputs.build-tag }} - TAG=${{ needs.build-build-tools-image.outputs.image-tag }}-${{ matrix.version.debian }} DEBIAN_VERSION=${{ matrix.version.debian }} provenance: false push: true @@ -741,7 +762,6 @@ jobs: GIT_VERSION=${{ github.event.pull_request.head.sha || github.sha }} PG_VERSION=${{ matrix.version.pg }} BUILD_TAG=${{ needs.meta.outputs.release-tag || needs.meta.outputs.build-tag }} - TAG=${{ needs.build-build-tools-image.outputs.image-tag }}-${{ matrix.version.debian }} DEBIAN_VERSION=${{ matrix.version.debian }} provenance: false push: true diff --git a/.github/workflows/build_and_test_fully.yml b/.github/workflows/build_and_test_fully.yml new file mode 100644 index 0000000000..dd1d63b02b --- /dev/null +++ b/.github/workflows/build_and_test_fully.yml @@ -0,0 +1,151 @@ +name: Build and Test Fully + +on: + schedule: + # * is a special character in YAML so you have to quote this string + # ┌───────────── minute (0 - 59) + # │ ┌───────────── hour (0 - 23) + # │ │ ┌───────────── day of the month (1 - 31) + # │ │ │ ┌───────────── month (1 - 12 or JAN-DEC) + # │ │ │ │ ┌───────────── day of the week (0 - 6 or SUN-SAT) + - cron: '0 3 * * *' # run once a day, timezone is utc + workflow_dispatch: + +defaults: + run: + shell: bash -euxo pipefail {0} + +concurrency: + # Allow only one workflow per any non-`main` branch. + group: ${{ github.workflow }}-${{ github.ref_name }}-${{ github.ref_name == 'main' && github.sha || 'anysha' }} + cancel-in-progress: true + +env: + RUST_BACKTRACE: 1 + COPT: '-Werror' + +jobs: + tag: + runs-on: [ self-hosted, small ] + container: ${{ vars.NEON_DEV_AWS_ACCOUNT_ID }}.dkr.ecr.${{ vars.AWS_ECR_REGION }}.amazonaws.com/base:pinned + outputs: + build-tag: ${{steps.build-tag.outputs.tag}} + + steps: + # Need `fetch-depth: 0` to count the number of commits in the branch + - name: Harden the runner (Audit all outbound calls) + uses: step-security/harden-runner@4d991eb9b905ef189e4c376166672c3f2f230481 # v2.11.0 + with: + egress-policy: audit + + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + fetch-depth: 0 + + - name: Get build tag + run: | + echo run:$GITHUB_RUN_ID + echo ref:$GITHUB_REF_NAME + echo rev:$(git rev-list --count HEAD) + if [[ "$GITHUB_REF_NAME" == "main" ]]; then + echo "tag=$(git rev-list --count HEAD)" >> $GITHUB_OUTPUT + elif [[ "$GITHUB_REF_NAME" == "release" ]]; then + echo "tag=release-$(git rev-list --count HEAD)" >> $GITHUB_OUTPUT + elif [[ "$GITHUB_REF_NAME" == "release-proxy" ]]; then + echo "tag=release-proxy-$(git rev-list --count HEAD)" >> $GITHUB_OUTPUT + elif [[ "$GITHUB_REF_NAME" == "release-compute" ]]; then + echo "tag=release-compute-$(git rev-list --count HEAD)" >> $GITHUB_OUTPUT + else + echo "GITHUB_REF_NAME (value '$GITHUB_REF_NAME') is not set to either 'main' or 'release', 'release-proxy', 'release-compute'" + echo "tag=$GITHUB_RUN_ID" >> $GITHUB_OUTPUT + fi + shell: bash + id: build-tag + + build-build-tools-image: + uses: ./.github/workflows/build-build-tools-image.yml + secrets: inherit + + build-and-test-locally: + needs: [ tag, build-build-tools-image ] + strategy: + fail-fast: false + matrix: + arch: [ x64, arm64 ] + build-type: [ debug, release ] + uses: ./.github/workflows/_build-and-test-locally.yml + with: + arch: ${{ matrix.arch }} + build-tools-image: ${{ needs.build-build-tools-image.outputs.image }}-bookworm + build-tag: ${{ needs.tag.outputs.build-tag }} + build-type: ${{ matrix.build-type }} + rerun-failed: false + test-cfg: '[{"pg_version":"v14", "lfc_state": "with-lfc"}, + {"pg_version":"v15", "lfc_state": "with-lfc"}, + {"pg_version":"v16", "lfc_state": "with-lfc"}, + {"pg_version":"v17", "lfc_state": "with-lfc"}, + {"pg_version":"v14", "lfc_state": "without-lfc"}, + {"pg_version":"v15", "lfc_state": "without-lfc"}, + {"pg_version":"v16", "lfc_state": "without-lfc"}, + {"pg_version":"v17", "lfc_state": "withouts-lfc"}]' + secrets: inherit + + + create-test-report: + needs: [ build-and-test-locally, build-build-tools-image ] + if: ${{ !cancelled() }} + permissions: + id-token: write # aws-actions/configure-aws-credentials + statuses: write + contents: write + pull-requests: write + outputs: + report-url: ${{ steps.create-allure-report.outputs.report-url }} + + runs-on: [ self-hosted, small ] + container: + image: ${{ needs.build-build-tools-image.outputs.image }}-bookworm + credentials: + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + options: --init + + steps: + - name: Harden the runner (Audit all outbound calls) + uses: step-security/harden-runner@4d991eb9b905ef189e4c376166672c3f2f230481 # v2.11.0 + with: + egress-policy: audit + + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + + - name: Create Allure report + if: ${{ !cancelled() }} + id: create-allure-report + uses: ./.github/actions/allure-report-generate + with: + store-test-results-into-db: true + aws-oidc-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }} + env: + REGRESS_TEST_RESULT_CONNSTR_NEW: ${{ secrets.REGRESS_TEST_RESULT_CONNSTR_NEW }} + + - uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1 + if: ${{ !cancelled() }} + with: + # Retry script for 5XX server errors: https://github.com/actions/github-script#retries + retries: 5 + script: | + const report = { + reportUrl: "${{ steps.create-allure-report.outputs.report-url }}", + reportJsonUrl: "${{ steps.create-allure-report.outputs.report-json-url }}", + } + + const coverage = {} + + const script = require("./scripts/comment-test-report.js") + await script({ + github, + context, + fetch, + report, + coverage, + }) diff --git a/.github/workflows/build_and_test_with_sanitizers.yml b/.github/workflows/build_and_test_with_sanitizers.yml index c54448dedc..32fb3c7c15 100644 --- a/.github/workflows/build_and_test_with_sanitizers.yml +++ b/.github/workflows/build_and_test_with_sanitizers.yml @@ -79,6 +79,7 @@ jobs: build-tools-image: ${{ needs.build-build-tools-image.outputs.image }}-bookworm build-tag: ${{ needs.tag.outputs.build-tag }} build-type: ${{ matrix.build-type }} + rerun-failed: false test-cfg: '[{"pg_version":"v17"}]' sanitizers: enabled secrets: inherit diff --git a/.github/workflows/large_oltp_benchmark.yml b/.github/workflows/large_oltp_benchmark.yml index 42dcc8e918..4f6858dcae 100644 --- a/.github/workflows/large_oltp_benchmark.yml +++ b/.github/workflows/large_oltp_benchmark.yml @@ -33,11 +33,19 @@ jobs: fail-fast: false # allow other variants to continue even if one fails matrix: include: + # test only read-only custom scripts in new branch without database maintenance + - target: new_branch + custom_scripts: select_any_webhook_with_skew.sql@300 select_recent_webhook.sql@397 select_prefetch_webhook.sql@3 + test_maintenance: false + # test all custom scripts in new branch with database maintenance - target: new_branch custom_scripts: insert_webhooks.sql@200 select_any_webhook_with_skew.sql@300 select_recent_webhook.sql@397 select_prefetch_webhook.sql@3 IUD_one_transaction.sql@100 + test_maintenance: true + # test all custom scripts in reuse branch with database maintenance - target: reuse_branch custom_scripts: insert_webhooks.sql@200 select_any_webhook_with_skew.sql@300 select_recent_webhook.sql@397 select_prefetch_webhook.sql@3 IUD_one_transaction.sql@100 - max-parallel: 1 # we want to run each stripe size sequentially to be able to compare the results + test_maintenance: true + max-parallel: 1 # we want to run each benchmark sequentially to not have noisy neighbors on shared storage (PS, SK) permissions: contents: write statuses: write @@ -145,6 +153,7 @@ jobs: PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}" - name: Benchmark database maintenance + if: ${{ matrix.test_maintenance }} uses: ./.github/actions/run-python-test-set with: build_type: ${{ env.BUILD_TYPE }} diff --git a/.github/workflows/large_oltp_growth.yml b/.github/workflows/large_oltp_growth.yml new file mode 100644 index 0000000000..8ca640d6ef --- /dev/null +++ b/.github/workflows/large_oltp_growth.yml @@ -0,0 +1,175 @@ +name: large oltp growth +# workflow to grow the reuse branch of large oltp benchmark continuously (about 16 GB per run) + +on: + # uncomment to run on push for debugging your PR + # push: + # branches: [ bodobolero/increase_large_oltp_workload ] + + schedule: + # * is a special character in YAML so you have to quote this string + # ┌───────────── minute (0 - 59) + # │ ┌───────────── hour (0 - 23) + # │ │ ┌───────────── day of the month (1 - 31) + # │ │ │ ┌───────────── month (1 - 12 or JAN-DEC) + # │ │ │ │ ┌───────────── day of the week (0 - 6 or SUN-SAT) + - cron: '0 6 * * *' # 06:00 UTC + - cron: '0 8 * * *' # 08:00 UTC + - cron: '0 10 * * *' # 10:00 UTC + - cron: '0 12 * * *' # 12:00 UTC + - cron: '0 14 * * *' # 14:00 UTC + - cron: '0 16 * * *' # 16:00 UTC + workflow_dispatch: # adds ability to run this manually + +defaults: + run: + shell: bash -euxo pipefail {0} + +concurrency: + # Allow only one workflow globally because we need dedicated resources which only exist once + group: large-oltp-growth + cancel-in-progress: true + +permissions: + contents: read + +jobs: + oltp: + strategy: + fail-fast: false # allow other variants to continue even if one fails + matrix: + include: + # for now only grow the reuse branch, not the other branches. + - target: reuse_branch + custom_scripts: + - grow_action_blocks.sql + - grow_action_kwargs.sql + - grow_device_fingerprint_event.sql + - grow_edges.sql + - grow_hotel_rate_mapping.sql + - grow_ocr_pipeline_results_version.sql + - grow_priceline_raw_response.sql + - grow_relabled_transactions.sql + - grow_state_values.sql + - grow_values.sql + - grow_vertices.sql + - update_accounting_coding_body_tracking_category_selection.sql + - update_action_blocks.sql + - update_action_kwargs.sql + - update_denormalized_approval_workflow.sql + - update_device_fingerprint_event.sql + - update_edges.sql + - update_heron_transaction_enriched_log.sql + - update_heron_transaction_enrichment_requests.sql + - update_hotel_rate_mapping.sql + - update_incoming_webhooks.sql + - update_manual_transaction.sql + - update_ml_receipt_matching_log.sql + - update_ocr_pipeine_results_version.sql + - update_orc_pipeline_step_results.sql + - update_orc_pipeline_step_results_version.sql + - update_priceline_raw_response.sql + - update_quickbooks_transactions.sql + - update_raw_finicity_transaction.sql + - update_relabeled_transactions.sql + - update_state_values.sql + - update_stripe_authorization_event_log.sql + - update_transaction.sql + - update_values.sql + - update_vertices.sql + max-parallel: 1 # we want to run each growth workload sequentially (for now there is just one) + permissions: + contents: write + statuses: write + id-token: write # aws-actions/configure-aws-credentials + env: + TEST_PG_BENCH_DURATIONS_MATRIX: "1h" + TEST_PGBENCH_CUSTOM_SCRIPTS: ${{ join(matrix.custom_scripts, ' ') }} + POSTGRES_DISTRIB_DIR: /tmp/neon/pg_install + PG_VERSION: 16 # pre-determined by pre-determined project + TEST_OUTPUT: /tmp/test_output + BUILD_TYPE: remote + PLATFORM: ${{ matrix.target }} + + runs-on: [ self-hosted, us-east-2, x64 ] + container: + image: ghcr.io/neondatabase/build-tools:pinned-bookworm + credentials: + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + options: --init + + steps: + - name: Harden the runner (Audit all outbound calls) + uses: step-security/harden-runner@4d991eb9b905ef189e4c376166672c3f2f230481 # v2.11.0 + with: + egress-policy: audit + + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + + - name: Configure AWS credentials # necessary to download artefacts + uses: aws-actions/configure-aws-credentials@e3dd6a429d7300a6a4c196c26e071d42e0343502 # v4.0.2 + with: + aws-region: eu-central-1 + role-to-assume: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }} + role-duration-seconds: 18000 # 5 hours is currently max associated with IAM role + + - name: Download Neon artifact + uses: ./.github/actions/download + with: + name: neon-${{ runner.os }}-${{ runner.arch }}-release-artifact + path: /tmp/neon/ + prefix: latest + aws-oidc-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }} + + - name: Set up Connection String + id: set-up-connstr + run: | + case "${{ matrix.target }}" in + reuse_branch) + CONNSTR=${{ secrets.BENCHMARK_LARGE_OLTP_REUSE_CONNSTR }} + ;; + *) + echo >&2 "Unknown target=${{ matrix.target }}" + exit 1 + ;; + esac + + CONNSTR_WITHOUT_POOLER="${CONNSTR//-pooler/}" + + echo "connstr=${CONNSTR}" >> $GITHUB_OUTPUT + echo "connstr_without_pooler=${CONNSTR_WITHOUT_POOLER}" >> $GITHUB_OUTPUT + + - name: pgbench with custom-scripts + uses: ./.github/actions/run-python-test-set + with: + build_type: ${{ env.BUILD_TYPE }} + test_selection: performance + run_in_parallel: false + save_perf_report: true + extra_params: -m remote_cluster --timeout 7200 -k test_perf_oltp_large_tenant_growth + pg_version: ${{ env.PG_VERSION }} + aws-oidc-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }} + env: + BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }} + VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}" + PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}" + + - name: Create Allure report + id: create-allure-report + if: ${{ !cancelled() }} + uses: ./.github/actions/allure-report-generate + with: + aws-oidc-role-arn: ${{ vars.DEV_AWS_OIDC_ROLE_ARN }} + + - name: Post to a Slack channel + if: ${{ github.event.schedule && failure() }} + uses: slackapi/slack-github-action@fcfb566f8b0aab22203f066d80ca1d7e4b5d05b3 # v1.27.1 + with: + channel-id: "C06KHQVQ7U3" # on-call-qa-staging-stream + slack-message: | + Periodic large oltp tenant growth increase: ${{ job.status }} + <${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}|GitHub Run> + <${{ steps.create-allure-report.outputs.report-url }}|Allure report> + env: + SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }} diff --git a/.github/workflows/neon_extra_builds.yml b/.github/workflows/neon_extra_builds.yml index 3427a0eb49..3e81183687 100644 --- a/.github/workflows/neon_extra_builds.yml +++ b/.github/workflows/neon_extra_builds.yml @@ -53,7 +53,7 @@ jobs: submodules: true - name: Check for Postgres changes - uses: step-security/paths-filter@v3 + uses: dorny/paths-filter@1441771bbfdd59dcd748680ee64ebd8faab1a242 #v3 id: files_changed with: token: ${{ github.token }} diff --git a/.github/workflows/pre-merge-checks.yml b/.github/workflows/pre-merge-checks.yml index 6fb4753fc0..23b8573097 100644 --- a/.github/workflows/pre-merge-checks.yml +++ b/.github/workflows/pre-merge-checks.yml @@ -34,7 +34,7 @@ jobs: - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - - uses: step-security/changed-files@3dbe17c78367e7d60f00d78ae6781a35be47b4a1 # v45.0.1 + - uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c # v46.0.5 id: python-src with: files: | @@ -45,7 +45,7 @@ jobs: poetry.lock pyproject.toml - - uses: step-security/changed-files@3dbe17c78367e7d60f00d78ae6781a35be47b4a1 # v45.0.1 + - uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c # v46.0.5 id: rust-src with: files: | diff --git a/.github/workflows/proxy-benchmark.yml b/.github/workflows/proxy-benchmark.yml new file mode 100644 index 0000000000..3a98ad4e8e --- /dev/null +++ b/.github/workflows/proxy-benchmark.yml @@ -0,0 +1,84 @@ +name: Periodic proxy performance test on unit-perf hetzner runner + +on: + push: # TODO: remove after testing + branches: + - test-proxy-bench # Runs on pushes to branches starting with test-proxy-bench + # schedule: + # * is a special character in YAML so you have to quote this string + # ┌───────────── minute (0 - 59) + # │ ┌───────────── hour (0 - 23) + # │ │ ┌───────────── day of the month (1 - 31) + # │ │ │ ┌───────────── month (1 - 12 or JAN-DEC) + # │ │ │ │ ┌───────────── day of the week (0 - 6 or SUN-SAT) + # - cron: '0 5 * * *' # Runs at 5 UTC once a day + workflow_dispatch: # adds an ability to run this manually + +defaults: + run: + shell: bash -euo pipefail {0} + +concurrency: + group: ${{ github.workflow }} + cancel-in-progress: false + +permissions: + contents: read + +jobs: + run_periodic_proxybench_test: + permissions: + id-token: write # aws-actions/configure-aws-credentials + statuses: write + contents: write + pull-requests: write + runs-on: [self-hosted, unit-perf] + timeout-minutes: 60 # 1h timeout + container: + image: ghcr.io/neondatabase/build-tools:pinned-bookworm + credentials: + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + options: --init + steps: + - name: Checkout proxy-bench Repo + uses: actions/checkout@v4 + with: + repository: neondatabase/proxy-bench + path: proxy-bench + + - name: Set up the environment which depends on $RUNNER_TEMP on nvme drive + id: set-env + shell: bash -euxo pipefail {0} + run: | + PROXY_BENCH_PATH=$(realpath ./proxy-bench) + { + echo "PROXY_BENCH_PATH=$PROXY_BENCH_PATH" + echo "NEON_DIR=${RUNNER_TEMP}/neon" + echo "TEST_OUTPUT=${PROXY_BENCH_PATH}/test_output" + echo "" + } >> "$GITHUB_ENV" + + - name: Run proxy-bench + run: ${PROXY_BENCH_PATH}/run.sh + + - name: Ingest Bench Results # neon repo script + if: always() + run: | + mkdir -p $TEST_OUTPUT + python $NEON_DIR/scripts/proxy_bench_results_ingest.py --out $TEST_OUTPUT + + - name: Push Metrics to Proxy perf database + if: always() + env: + PERF_TEST_RESULT_CONNSTR: "${{ secrets.PROXY_TEST_RESULT_CONNSTR }}" + REPORT_FROM: $TEST_OUTPUT + run: $NEON_DIR/scripts/generate_and_push_perf_report.sh + + - name: Docker cleanup + if: always() + run: docker compose down + + - name: Notify Failure + if: failure() + run: echo "Proxy bench job failed" && exit 1 \ No newline at end of file diff --git a/.gitignore b/.gitignore index 45eb4dbf0e..69573638f2 100644 --- a/.gitignore +++ b/.gitignore @@ -1,10 +1,12 @@ /artifact_cache +/build /pg_install /target /tmp_check /tmp_check_cli __pycache__/ test_output/ +neon_previous/ .vscode .idea *.swp @@ -13,6 +15,7 @@ neon.iml /.neon /integration_tests/.neon compaction-suite-results.* +pgxn/neon/communicator/communicator_bindings.h # Coverage *.profraw diff --git a/Cargo.lock b/Cargo.lock index f798a1bdda..bb89c8a92a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -18,48 +18,54 @@ dependencies = [ ] [[package]] -name = "adler2" -version = "2.0.1" +name = "adler" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "320119579fcad9c21884f5c4861d16174d0e06250625266f50fe6898340abefa" +checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" + +[[package]] +name = "adler2" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627" [[package]] name = "ahash" -version = "0.8.12" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a15f179cd60c4584b8a8c596927aadc462e27f2ca70c04e0071964a73ba7a75" +checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" dependencies = [ "cfg-if", "const-random", - "getrandom 0.3.3", + "getrandom 0.2.11", "once_cell", "version_check", - "zerocopy", + "zerocopy 0.7.31", ] [[package]] name = "aho-corasick" -version = "1.1.3" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" +checksum = "67fc08ce920c31afb70f013dcce1bfc3a3195de6a228474e45e1f145b36f8d04" dependencies = [ "memchr", ] [[package]] name = "aligned-vec" -version = "0.6.4" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc890384c8602f339876ded803c97ad529f3842aba97f6392b3dba0dd171769b" +checksum = "7e0966165eaf052580bd70eb1b32cb3d6245774c0104d1b2793e9650bf83b52a" dependencies = [ "equator", ] [[package]] name = "allocator-api2" -version = "0.2.21" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" +checksum = "0942ffc6dcaadf03badf6e6a2d0228460359d5e34b57ccdc720b7382dfbd5ec5" [[package]] name = "android-tzdata" @@ -84,9 +90,9 @@ checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" [[package]] name = "anstream" -version = "0.6.19" +version = "0.6.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "301af1932e46185686725e0fad2f8f2aa7da69dd70bf6ecc44d6b703844a3933" +checksum = "64e15c1ab1f89faffbf04a634d5e1962e9074f2741eef6d97f3c4e322426d526" dependencies = [ "anstyle", "anstyle-parse", @@ -99,44 +105,43 @@ dependencies = [ [[package]] name = "anstyle" -version = "1.0.11" +version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "862ed96ca487e809f1c8e5a8447f6ee2cf102f846893800b20cebdf541fc6bbd" +checksum = "1bec1de6f59aedf83baf9ff929c98f2ad654b97c9510f4e70cf6f661d49fd5b1" [[package]] name = "anstyle-parse" -version = "0.2.7" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e7644824f0aa2c7b9384579234ef10eb7efb6a0deb83f9630a49594dd9c15c2" +checksum = "e765fd216e48e067936442276d1d57399e37bce53c264d6fefbe298080cb57ee" dependencies = [ "utf8parse", ] [[package]] name = "anstyle-query" -version = "1.1.3" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c8bdeb6047d8983be085bab0ba1472e6dc604e7041dbf6fcd5e71523014fae9" +checksum = "5ca11d4be1bab0c8bc8734a9aa7bf4ee8316d462a08c6ac5052f888fef5b494b" dependencies = [ - "windows-sys 0.59.0", + "windows-sys 0.48.0", ] [[package]] name = "anstyle-wincon" -version = "3.0.9" +version = "3.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "403f75924867bb1033c59fbf0797484329750cfbe3c4325cd33127941fabc882" +checksum = "5bf74e1b6e971609db8ca7a9ce79fd5768ab6ae46441c572e46cf596f59e57f8" dependencies = [ "anstyle", - "once_cell_polyfill", - "windows-sys 0.59.0", + "windows-sys 0.52.0", ] [[package]] name = "anyhow" -version = "1.0.98" +version = "1.0.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e16d2d3311acee920a9eb8d33b8cbc1787ce4a264e85f964c2404b969bdcd487" +checksum = "c1fd03a028ef38ba2276dce7e33fcd6369c158a1bca17946c4b1b701891c1ff7" dependencies = [ "backtrace", ] @@ -185,9 +190,9 @@ dependencies = [ [[package]] name = "async-compression" -version = "0.4.25" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40f6024f3f856663b45fd0c9b6f2024034a702f453549449e0d84a305900dad4" +checksum = "bc2d0cfb2a7388d34f590e76686704c494ed7aaceed62ee1ba35cbf363abc2a5" dependencies = [ "flate2", "futures-core", @@ -200,20 +205,20 @@ dependencies = [ [[package]] name = "async-lock" -version = "3.4.0" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff6e472cdea888a4bd64f342f09b3f50e1886d32afe8df3d663c01140b811b18" +checksum = "7125e42787d53db9dd54261812ef17e937c95a51e4d291373b670342fa44310c" dependencies = [ - "event-listener 5.4.0", + "event-listener 4.0.0", "event-listener-strategy", "pin-project-lite", ] [[package]] name = "async-stream" -version = "0.3.6" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b5a71a6f37880a80d1d7f19efd781e4b5de42c88f0722cc13bcb6cc2cfe8476" +checksum = "cd56dd203fef61ac097dd65721a419ddccb106b2d2b70ba60a6b529f03961a51" dependencies = [ "async-stream-impl", "futures-core", @@ -222,24 +227,24 @@ dependencies = [ [[package]] name = "async-stream-impl" -version = "0.3.6" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" +checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.100", ] [[package]] name = "async-trait" -version = "0.1.88" +version = "0.1.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e539d3fca749fcee5236ab05e93a52867dd549cc157c8cb7f99595f3cedffdb5" +checksum = "b9ccdd8f2a161be9bd5c023df56f1b2a0bd1d83872ae53b71a84a12c9bf6e842" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.100", ] [[package]] @@ -249,22 +254,27 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a8ab6b55fe97976e46f91ddbed8d147d966475dc29b2032757ba47e02376fbc3" [[package]] -name = "atomic-waker" -version = "1.1.2" +name = "atomic_enum" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" +checksum = "99e1aca718ea7b89985790c94aad72d77533063fe00bc497bb79a7c2dae6a661" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.100", +] [[package]] name = "autocfg" -version = "1.5.0" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" +checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "aws-config" -version = "1.8.0" +version = "1.5.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "455e9fb7743c6f6267eb2830ccc08686fbb3d13c9a689369562fd4d4ef9ea462" +checksum = "9b49afaa341e8dd8577e1a2200468f98956d6eda50bcf4a53246cc00174ba924" dependencies = [ "aws-credential-types", "aws-runtime", @@ -273,15 +283,15 @@ dependencies = [ "aws-sdk-sts", "aws-smithy-async", "aws-smithy-http", - "aws-smithy-json", + "aws-smithy-json 0.60.7", "aws-smithy-runtime", "aws-smithy-runtime-api", "aws-smithy-types", "aws-types", "bytes", - "fastrand 2.3.0", + "fastrand 2.2.0", "hex", - "http 1.3.1", + "http 0.2.9", "ring", "time", "tokio", @@ -292,9 +302,9 @@ dependencies = [ [[package]] name = "aws-credential-types" -version = "1.2.3" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "687bc16bc431a8533fe0097c7f0182874767f920989d7260950172ae8e3c4465" +checksum = "60e8f6b615cb5fc60a98132268508ad104310f0cfb25a1c22eee76efdf9154da" dependencies = [ "aws-smithy-async", "aws-smithy-runtime-api", @@ -302,34 +312,11 @@ dependencies = [ "zeroize", ] -[[package]] -name = "aws-lc-rs" -version = "1.13.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93fcc8f365936c834db5514fc45aee5b1202d677e6b40e48468aaaa8183ca8c7" -dependencies = [ - "aws-lc-sys", - "zeroize", -] - -[[package]] -name = "aws-lc-sys" -version = "0.29.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61b1d86e7705efe1be1b569bab41d4fa1e14e220b60a160f78de2db687add079" -dependencies = [ - "bindgen 0.69.5", - "cc", - "cmake", - "dunce", - "fs_extra", -] - [[package]] name = "aws-runtime" -version = "1.5.8" +version = "1.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f6c68419d8ba16d9a7463671593c54f81ba58cab466e9b759418da606dcc2e2" +checksum = "b5ac934720fbb46206292d2c75b57e67acfc56fe7dfd34fb9a02334af08409ea" dependencies = [ "aws-credential-types", "aws-sigv4", @@ -341,9 +328,10 @@ dependencies = [ "aws-smithy-types", "aws-types", "bytes", - "fastrand 2.3.0", - "http 0.2.12", - "http-body 0.4.6", + "fastrand 2.2.0", + "http 0.2.9", + "http-body 0.4.5", + "once_cell", "percent-encoding", "pin-project-lite", "tracing", @@ -352,54 +340,54 @@ dependencies = [ [[package]] name = "aws-sdk-iam" -version = "1.77.0" +version = "1.53.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e273b8d4662875a313054584302defeba9c04e85dad7c662355e9fc3dba9ae1" +checksum = "fb8a6fea8d335cde419176b1f2c6d2d6e97997719e7df4b51e59064310f48e4a" dependencies = [ "aws-credential-types", "aws-runtime", "aws-smithy-async", "aws-smithy-http", - "aws-smithy-json", + "aws-smithy-json 0.61.1", "aws-smithy-query", "aws-smithy-runtime", "aws-smithy-runtime-api", "aws-smithy-types", "aws-smithy-xml", "aws-types", - "fastrand 2.3.0", - "http 0.2.12", + "http 0.2.9", + "once_cell", "regex-lite", "tracing", ] [[package]] name = "aws-sdk-kms" -version = "1.76.0" +version = "1.51.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8565497721d9f18fa29a68bc5d8225b39e1cc7399d7fc6f1ad803ca934341804" +checksum = "3c30f6fd5646b99d9b45ec3a0c22e67112c175b2383100c960d7ee39d96c8d96" dependencies = [ "aws-credential-types", "aws-runtime", "aws-smithy-async", "aws-smithy-http", - "aws-smithy-json", + "aws-smithy-json 0.61.1", "aws-smithy-runtime", "aws-smithy-runtime-api", "aws-smithy-types", "aws-types", "bytes", - "fastrand 2.3.0", - "http 0.2.12", + "http 0.2.9", + "once_cell", "regex-lite", "tracing", ] [[package]] name = "aws-sdk-s3" -version = "1.93.0" +version = "1.65.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16b9734dc8145b417a3c22eae8769a2879851690982dba718bdc52bd28ad04ce" +checksum = "d3ba2c5c0f2618937ce3d4a5ad574b86775576fa24006bcb3128c6e2cbf3c34e" dependencies = [ "aws-credential-types", "aws-runtime", @@ -408,20 +396,20 @@ dependencies = [ "aws-smithy-checksums", "aws-smithy-eventstream", "aws-smithy-http", - "aws-smithy-json", + "aws-smithy-json 0.61.1", "aws-smithy-runtime", "aws-smithy-runtime-api", "aws-smithy-types", "aws-smithy-xml", "aws-types", "bytes", - "fastrand 2.3.0", + "fastrand 2.2.0", "hex", "hmac", - "http 0.2.12", - "http 1.3.1", - "http-body 0.4.6", + "http 0.2.9", + "http-body 0.4.5", "lru", + "once_cell", "percent-encoding", "regex-lite", "sha2", @@ -431,76 +419,76 @@ dependencies = [ [[package]] name = "aws-sdk-sso" -version = "1.73.0" +version = "1.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2ac1674cba7872061a29baaf02209fefe499ff034dfd91bd4cc59e4d7741489" +checksum = "05ca43a4ef210894f93096039ef1d6fa4ad3edfabb3be92b80908b9f2e4b4eab" dependencies = [ "aws-credential-types", "aws-runtime", "aws-smithy-async", "aws-smithy-http", - "aws-smithy-json", + "aws-smithy-json 0.61.1", "aws-smithy-runtime", "aws-smithy-runtime-api", "aws-smithy-types", "aws-types", "bytes", - "fastrand 2.3.0", - "http 0.2.12", + "http 0.2.9", + "once_cell", "regex-lite", "tracing", ] [[package]] name = "aws-sdk-ssooidc" -version = "1.74.0" +version = "1.51.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a6a22f077f5fd3e3c0270d4e1a110346cddf6769e9433eb9e6daceb4ca3b149" +checksum = "abaf490c2e48eed0bb8e2da2fb08405647bd7f253996e0f93b981958ea0f73b0" dependencies = [ "aws-credential-types", "aws-runtime", "aws-smithy-async", "aws-smithy-http", - "aws-smithy-json", + "aws-smithy-json 0.61.1", "aws-smithy-runtime", "aws-smithy-runtime-api", "aws-smithy-types", "aws-types", "bytes", - "fastrand 2.3.0", - "http 0.2.12", + "http 0.2.9", + "once_cell", "regex-lite", "tracing", ] [[package]] name = "aws-sdk-sts" -version = "1.75.0" +version = "1.51.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3258fa707f2f585ee3049d9550954b959002abd59176975150a01d5cf38ae3f" +checksum = "b68fde0d69c8bfdc1060ea7da21df3e39f6014da316783336deff0a9ec28f4bf" dependencies = [ "aws-credential-types", "aws-runtime", "aws-smithy-async", "aws-smithy-http", - "aws-smithy-json", + "aws-smithy-json 0.61.1", "aws-smithy-query", "aws-smithy-runtime", "aws-smithy-runtime-api", "aws-smithy-types", "aws-smithy-xml", "aws-types", - "fastrand 2.3.0", - "http 0.2.12", + "http 0.2.9", + "once_cell", "regex-lite", "tracing", ] [[package]] name = "aws-sigv4" -version = "1.3.3" +version = "1.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ddfb9021f581b71870a17eac25b52335b82211cdc092e02b6876b2bcefa61666" +checksum = "7d3820e0c08d0737872ff3c7c1f21ebbb6693d832312d6152bf18ef50a5471c2" dependencies = [ "aws-credential-types", "aws-smithy-eventstream", @@ -512,8 +500,9 @@ dependencies = [ "form_urlencoded", "hex", "hmac", - "http 0.2.12", - "http 1.3.1", + "http 0.2.9", + "http 1.1.0", + "once_cell", "p256 0.11.1", "percent-encoding", "ring", @@ -526,9 +515,9 @@ dependencies = [ [[package]] name = "aws-smithy-async" -version = "1.2.5" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e190749ea56f8c42bf15dd76c65e14f8f765233e6df9b0506d9d934ebef867c" +checksum = "62220bc6e97f946ddd51b5f1361f78996e704677afc518a4ff66b7a72ea1378c" dependencies = [ "futures-util", "pin-project-lite", @@ -537,17 +526,18 @@ dependencies = [ [[package]] name = "aws-smithy-checksums" -version = "0.63.3" +version = "0.60.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2f77a921dbd2c78ebe70726799787c1d110a2245dd65e39b20923dfdfb2deee" +checksum = "ba1a71073fca26775c8b5189175ea8863afb1c9ea2cceb02a5de5ad9dfbaa795" dependencies = [ "aws-smithy-http", "aws-smithy-types", "bytes", - "crc-fast", + "crc32c", + "crc32fast", "hex", - "http 0.2.12", - "http-body 0.4.6", + "http 0.2.9", + "http-body 0.4.5", "md-5", "pin-project-lite", "sha1", @@ -557,9 +547,9 @@ dependencies = [ [[package]] name = "aws-smithy-eventstream" -version = "0.60.9" +version = "0.60.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "338a3642c399c0a5d157648426110e199ca7fd1c689cc395676b81aa563700c4" +checksum = "cef7d0a272725f87e51ba2bf89f8c21e4df61b9e49ae1ac367a6d69916ef7c90" dependencies = [ "aws-smithy-types", "bytes", @@ -568,9 +558,9 @@ dependencies = [ [[package]] name = "aws-smithy-http" -version = "0.62.1" +version = "0.60.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99335bec6cdc50a346fda1437f9fefe33abf8c99060739a546a16457f2862ca9" +checksum = "5c8bc3e8fdc6b8d07d976e301c02fe553f72a39b7a9fea820e023268467d7ab6" dependencies = [ "aws-smithy-eventstream", "aws-smithy-runtime-api", @@ -578,9 +568,9 @@ dependencies = [ "bytes", "bytes-utils", "futures-core", - "http 0.2.12", - "http 1.3.1", - "http-body 0.4.6", + "http 0.2.9", + "http-body 0.4.5", + "once_cell", "percent-encoding", "pin-project-lite", "pin-utils", @@ -588,52 +578,23 @@ dependencies = [ ] [[package]] -name = "aws-smithy-http-client" -version = "1.0.5" +name = "aws-smithy-json" +version = "0.60.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f491388e741b7ca73b24130ff464c1478acc34d5b331b7dd0a2ee4643595a15" +checksum = "4683df9469ef09468dad3473d129960119a0d3593617542b7d52086c8486f2d6" dependencies = [ - "aws-smithy-async", - "aws-smithy-runtime-api", "aws-smithy-types", - "h2 0.3.26", - "h2 0.4.10", - "http 0.2.12", - "http 1.3.1", - "http-body 0.4.6", - "hyper 0.14.32", - "hyper 1.6.0", - "hyper-rustls 0.24.2", - "hyper-rustls 0.27.7", - "hyper-util", - "pin-project-lite", - "rustls 0.21.12", - "rustls 0.23.28", - "rustls-native-certs 0.8.1", - "rustls-pki-types", - "tokio", - "tower", - "tracing", ] [[package]] name = "aws-smithy-json" -version = "0.61.4" +version = "0.61.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a16e040799d29c17412943bdbf488fd75db04112d0c0d4b9290bacf5ae0014b9" +checksum = "ee4e69cc50921eb913c6b662f8d909131bb3e6ad6cb6090d3a39b66fc5c52095" dependencies = [ "aws-smithy-types", ] -[[package]] -name = "aws-smithy-observability" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9364d5989ac4dd918e5cc4c4bdcc61c9be17dcd2586ea7f69e348fc7c6cab393" -dependencies = [ - "aws-smithy-runtime-api", -] - [[package]] name = "aws-smithy-query" version = "0.60.7" @@ -646,39 +607,42 @@ dependencies = [ [[package]] name = "aws-smithy-runtime" -version = "1.8.3" +version = "1.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14302f06d1d5b7d333fd819943075b13d27c7700b414f574c3c35859bfb55d5e" +checksum = "9f20685047ca9d6f17b994a07f629c813f08b5bce65523e47124879e60103d45" dependencies = [ "aws-smithy-async", "aws-smithy-http", - "aws-smithy-http-client", - "aws-smithy-observability", "aws-smithy-runtime-api", "aws-smithy-types", "bytes", - "fastrand 2.3.0", - "http 0.2.12", - "http 1.3.1", - "http-body 0.4.6", - "http-body 1.0.1", + "fastrand 2.2.0", + "h2 0.3.26", + "http 0.2.9", + "http-body 0.4.5", + "http-body 1.0.0", + "httparse", + "hyper 0.14.30", + "hyper-rustls 0.24.0", + "once_cell", "pin-project-lite", "pin-utils", + "rustls 0.21.12", "tokio", "tracing", ] [[package]] name = "aws-smithy-runtime-api" -version = "1.8.1" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd8531b6d8882fd8f48f82a9754e682e29dd44cff27154af51fa3eb730f59efb" +checksum = "92165296a47a812b267b4f41032ff8069ab7ff783696d217f0994a0d7ab585cd" dependencies = [ "aws-smithy-async", "aws-smithy-types", "bytes", - "http 0.2.12", - "http 1.3.1", + "http 0.2.9", + "http 1.1.0", "pin-project-lite", "tokio", "tracing", @@ -687,18 +651,18 @@ dependencies = [ [[package]] name = "aws-smithy-types" -version = "1.3.2" +version = "1.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d498595448e43de7f4296b7b7a18a8a02c61ec9349128c80a368f7c3b4ab11a8" +checksum = "4fbd94a32b3a7d55d3806fe27d98d3ad393050439dd05eb53ece36ec5e3d3510" dependencies = [ "base64-simd", "bytes", "bytes-utils", "futures-core", - "http 0.2.12", - "http 1.3.1", - "http-body 0.4.6", - "http-body 1.0.1", + "http 0.2.9", + "http 1.1.0", + "http-body 0.4.5", + "http-body 1.0.0", "http-body-util", "itoa", "num-integer", @@ -713,18 +677,18 @@ dependencies = [ [[package]] name = "aws-smithy-xml" -version = "0.60.10" +version = "0.60.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3db87b96cb1b16c024980f133968d52882ca0daaee3a086c6decc500f6c99728" +checksum = "ab0b0166827aa700d3dc519f72f8b3a91c35d0b8d042dc5d643a91e6f80648fc" dependencies = [ "xmlparser", ] [[package]] name = "aws-types" -version = "1.3.7" +version = "1.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a322fec39e4df22777ed3ad8ea868ac2f94cd15e1a55f6ee8d8d6305057689a" +checksum = "5221b91b3e441e6675310829fd8984801b772cb1546ef6c0e54dec9f1ac13fef" dependencies = [ "aws-credential-types", "aws-smithy-async", @@ -736,22 +700,49 @@ dependencies = [ [[package]] name = "axum" -version = "0.8.4" +version = "0.7.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "021e862c184ae977658b36c4500f7feac3221ca5da43e3f25bd04ab6c79a29b5" +checksum = "edca88bc138befd0323b20752846e6587272d3b03b0343c8ea28a6f819e6e71f" dependencies = [ - "axum-core", + "async-trait", + "axum-core 0.4.5", + "bytes", + "futures-util", + "http 1.1.0", + "http-body 1.0.0", + "http-body-util", + "itoa", + "matchit 0.7.3", + "memchr", + "mime", + "percent-encoding", + "pin-project-lite", + "rustversion", + "serde", + "sync_wrapper 1.0.1", + "tower 0.5.2", + "tower-layer", + "tower-service", +] + +[[package]] +name = "axum" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d6fd624c75e18b3b4c6b9caf42b1afe24437daaee904069137d8bab077be8b8" +dependencies = [ + "axum-core 0.5.0", "base64 0.22.1", "bytes", "form_urlencoded", "futures-util", - "http 1.3.1", - "http-body 1.0.1", + "http 1.1.0", + "http-body 1.0.0", "http-body-util", "hyper 1.6.0", "hyper-util", "itoa", - "matchit", + "matchit 0.8.4", "memchr", "mime", "percent-encoding", @@ -762,10 +753,10 @@ dependencies = [ "serde_path_to_error", "serde_urlencoded", "sha1", - "sync_wrapper 1.0.2", + "sync_wrapper 1.0.1", "tokio", - "tokio-tungstenite 0.26.2", - "tower", + "tokio-tungstenite 0.26.1", + "tower 0.5.2", "tower-layer", "tower-service", "tracing", @@ -773,19 +764,39 @@ dependencies = [ [[package]] name = "axum-core" -version = "0.5.2" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68464cd0412f486726fb3373129ef5d2993f90c34bc2bc1c1e9943b2f4fc7ca6" +checksum = "09f2bd6146b97ae3359fa0cc6d6b376d9539582c7b4220f041a33ec24c226199" dependencies = [ + "async-trait", "bytes", - "futures-core", - "http 1.3.1", - "http-body 1.0.1", + "futures-util", + "http 1.1.0", + "http-body 1.0.0", "http-body-util", "mime", "pin-project-lite", "rustversion", - "sync_wrapper 1.0.2", + "sync_wrapper 1.0.1", + "tower-layer", + "tower-service", +] + +[[package]] +name = "axum-core" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df1362f362fd16024ae199c1970ce98f9661bf5ef94b9808fee734bc3698b733" +dependencies = [ + "bytes", + "futures-util", + "http 1.1.0", + "http-body 1.0.0", + "http-body-util", + "mime", + "pin-project-lite", + "rustversion", + "sync_wrapper 1.0.1", "tower-layer", "tower-service", "tracing", @@ -793,23 +804,25 @@ dependencies = [ [[package]] name = "axum-extra" -version = "0.10.1" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45bf463831f5131b7d3c756525b305d40f1185b688565648a92e1392ca35713d" +checksum = "460fc6f625a1f7705c6cf62d0d070794e94668988b1c38111baeec177c715f7b" dependencies = [ - "axum", - "axum-core", + "axum 0.8.1", + "axum-core 0.5.0", "bytes", + "form_urlencoded", "futures-util", "headers", - "http 1.3.1", - "http-body 1.0.1", + "http 1.1.0", + "http-body 1.0.0", "http-body-util", "mime", "pin-project-lite", - "rustversion", "serde", - "tower", + "serde_html_form", + "serde_path_to_error", + "tower 0.5.2", "tower-layer", "tower-service", ] @@ -824,7 +837,7 @@ dependencies = [ "bytes", "dyn-clone", "futures", - "getrandom 0.2.16", + "getrandom 0.2.11", "hmac", "http-types", "once_cell", @@ -917,14 +930,14 @@ dependencies = [ [[package]] name = "backtrace" -version = "0.3.75" +version = "0.3.74" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6806a6321ec58106fea15becdad98371e28d92ccbc7c8f1b3b6dd724fe8f1002" +checksum = "8d82cb332cdfaed17ae235a638438ac4d4839913cc2af585c3c6746e8f8bee1a" dependencies = [ "addr2line", "cfg-if", "libc", - "miniz_oxide", + "miniz_oxide 0.8.0", "object", "rustc-demangle", "windows-targets 0.52.6", @@ -948,12 +961,6 @@ version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" -[[package]] -name = "base64" -version = "0.20.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ea22880d78093b0cbe17c89f64a7d457941e65759157ec6cb31a31d652b05e5" - [[package]] name = "base64" version = "0.21.7" @@ -978,9 +985,9 @@ dependencies = [ [[package]] name = "base64ct" -version = "1.8.0" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55248b47b0caf0546f7988906588779981c43bb1bc9d0c44087278f80cdb44ba" +checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" [[package]] name = "bb8" @@ -990,15 +997,15 @@ checksum = "d89aabfae550a5c44b43ab941844ffcd2e993cb6900b342debf59e9ea74acdb8" dependencies = [ "async-trait", "futures-util", - "parking_lot 0.12.4", + "parking_lot 0.12.1", "tokio", ] [[package]] name = "bcder" -version = "0.7.5" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89ffdaa8c6398acd07176317eb6c1f9082869dd1cc3fee7c72c6354866b928cc" +checksum = "c627747a6774aab38beb35990d88309481378558875a41da1a4b2e373c906ef0" dependencies = [ "bytes", "smallvec", @@ -1013,39 +1020,16 @@ dependencies = [ "serde", ] -[[package]] -name = "bindgen" -version = "0.69.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "271383c67ccabffb7381723dea0672a673f292304fcb45c01cc648c7a8d58088" -dependencies = [ - "bitflags 2.9.1", - "cexpr", - "clang-sys", - "itertools 0.12.1", - "lazy_static", - "lazycell", - "log", - "prettyplease", - "proc-macro2", - "quote", - "regex", - "rustc-hash 1.1.0", - "shlex", - "syn", - "which", -] - [[package]] name = "bindgen" version = "0.71.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f58bf3d7db68cfbac37cfc485a8d711e87e064c3d0fe0435b92f7a407f9d6b3" dependencies = [ - "bitflags 2.9.1", + "bitflags 2.8.0", "cexpr", "clang-sys", - "itertools 0.13.0", + "itertools 0.12.1", "log", "prettyplease", "proc-macro2", @@ -1053,7 +1037,7 @@ dependencies = [ "regex", "rustc-hash 2.1.1", "shlex", - "syn", + "syn 2.0.100", ] [[package]] @@ -1070,9 +1054,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.9.1" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b8e56985ec62d17e9c1001dc89c88ecd7dc08e47eba5ec7c29c7b5eeecde967" +checksum = "8f68f53c83ab957f72c32642f3868eec03eb974d1fb82e453128456482613d36" [[package]] name = "block-buffer" @@ -1085,47 +1069,48 @@ dependencies = [ [[package]] name = "bstr" -version = "1.12.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "234113d19d0d7d613b40e86fb654acf958910802bcceab913a4f9e7cda03b1a4" +checksum = "a246e68bb43f6cd9db24bea052a53e40405417c5fb372e3d1a8a7f770a564ef5" dependencies = [ "memchr", - "regex-automata 0.4.9", + "once_cell", + "regex-automata 0.1.10", "serde", ] [[package]] name = "bumpalo" -version = "3.19.0" +version = "3.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46c5e41b57b8bba42a04676d81cb89e9ee8e859a1a66f80a5a72e1cb76b34d43" +checksum = "a3e2c3daef883ecc1b5d58c15adae93470a91d425f3532ba1695849656af3fc1" [[package]] name = "bytemuck" -version = "1.23.1" +version = "1.16.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c76a5792e44e4abe34d3abf15636779261d45a7450612059293d1d2cfc63422" +checksum = "102087e286b4677862ea56cf8fc58bb2cdfa8725c40ffb80fe3a008eb7f2fc83" [[package]] name = "byteorder" -version = "1.5.0" +version = "1.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" +checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" [[package]] name = "bytes" -version = "1.10.1" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d71b6127be86fdcfddb610f7182ac57211d4b18a3e9c82eb2d17662f2227ad6a" +checksum = "325918d6fe32f23b19878fe4b34794ae41fc19ddbe53b10571a4874d44ffd39b" dependencies = [ "serde", ] [[package]] name = "bytes-utils" -version = "0.1.4" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7dafe3a8757b027e2be6e4e5601ed563c55989fcf1546e933c66c8eb3a058d35" +checksum = "e47d3a8076e283f3acd27400535992edb3ba4b5bb72f8891ad8fbe7932a7d4b9" dependencies = [ "bytes", "either", @@ -1133,18 +1118,18 @@ dependencies = [ [[package]] name = "camino" -version = "1.1.10" +version = "1.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0da45bc31171d8d6960122e222a67740df867c1dd53b4d51caa297084c185cab" +checksum = "c59e92b5a388f549b863a7bea62612c09f24c8393560709a54558a9abdfb3b9c" dependencies = [ "serde", ] [[package]] name = "camino-tempfile" -version = "1.4.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64308c4c82a5c38679945ddf88738dc1483dcc563bbb5780755ae9f8497d2b20" +checksum = "d2ab15a83d13f75dbd86f082bdefd160b628476ef58d3b900a0ef74e001bb097" dependencies = [ "camino", "tempfile", @@ -1158,28 +1143,28 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cbindgen" -version = "0.28.0" +version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eadd868a2ce9ca38de7eeafdcec9c7065ef89b42b32f0839278d55f35c54d1ff" +checksum = "975982cdb7ad6a142be15bdf84aea7ec6a9e5d4d797c004d43185b24cfe4e684" dependencies = [ "clap", - "heck 0.4.1", + "heck", "indexmap 2.9.0", "log", "proc-macro2", "quote", "serde", "serde_json", - "syn", + "syn 2.0.100", "tempfile", "toml", ] [[package]] name = "cc" -version = "1.2.27" +version = "1.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d487aa071b5f64da6f19a3e848e3578944b726ee5a4854b82172f02aa876bfdc" +checksum = "be714c154be609ec7f5dad223a33bf1482fff90472de28f7362806e6d4832b8c" dependencies = [ "jobserver", "libc", @@ -1197,9 +1182,9 @@ dependencies = [ [[package]] name = "cfg-if" -version = "1.0.1" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9555578bc9e57714c812a1f84e4fc5b4d21fcb063490c624de019f7464c91268" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "cfg_aliases" @@ -1209,9 +1194,9 @@ checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" [[package]] name = "cgroups-rs" -version = "0.3.4" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6db7c2f5545da4c12c5701455d9471da5f07db52e49b9cccb4f5512226dd0836" +checksum = "1fb3af90c8d48ad5f432d8afb521b5b40c2a2fce46dd60e05912de51c47fba64" dependencies = [ "libc", "log", @@ -1222,9 +1207,9 @@ dependencies = [ [[package]] name = "chrono" -version = "0.4.39" +version = "0.4.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e36cc9d416881d2e24f9a963be5fb1cd90966419ac844274161d10488b3e825" +checksum = "a21f936df1771bf62b77f047b726c4625ff2e8aa607c01ec06e5a05bd8463401" dependencies = [ "android-tzdata", "iana-time-zone", @@ -1264,9 +1249,9 @@ dependencies = [ [[package]] name = "clang-sys" -version = "1.8.1" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b023947811758c97c59bf9d1c188fd619ad4718dcaa767947df1cadb14f39f4" +checksum = "c688fc74432808e3eb684cae8830a86be1d66a2bd58e1f248ed0960a590baf6f" dependencies = [ "glob", "libc", @@ -1275,9 +1260,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.40" +version = "4.5.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40b6887a1d8685cebccf115538db5c0efe625ccac9696ad45c409d96566e910f" +checksum = "69371e34337c4c984bbe322360c2547210bf632eb2814bbe78a6e87a2935bd2b" dependencies = [ "clap_builder", "clap_derive", @@ -1285,68 +1270,59 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.40" +version = "4.5.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0c66c08ce9f0c698cbce5c0279d0bb6ac936d8674174fe48f736533b964f59e" +checksum = "6e24c1b4099818523236a8ca881d2b45db98dadfb4625cf6608c12069fcbbde1" dependencies = [ "anstream", "anstyle", "clap_lex", - "strsim", + "strsim 0.11.1", ] [[package]] name = "clap_derive" -version = "4.5.40" +version = "4.5.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2c7947ae4cc3d851207c1adb5b5e260ff0cca11446b1d6d1423788e442257ce" +checksum = "4ac6a0c7b1a9e9a5186361f67dfa1b88213572f427fb9ab038efb2bd8c582dab" dependencies = [ - "heck 0.5.0", + "heck", "proc-macro2", "quote", - "syn", + "syn 2.0.100", ] [[package]] name = "clap_lex" -version = "0.7.5" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b94f61472cee1439c0b966b47e3aca9ae07e45d070759512cd390ea2bebc6675" +checksum = "afb84c814227b90d6895e01398aee0d8033c00e7466aca416fb6a8e0eb19d8a7" [[package]] name = "clashmap" -version = "1.2.0" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8a055b1f1bf558eae4959f6dd77cf2d7d50ae1483928e60ef21ca5a24fd4321" +checksum = "93bd59c81e2bd87a775ae2de75f070f7e2bfe97363a6ad652f46824564c23e4d" dependencies = [ "crossbeam-utils", - "hashbrown 0.15.4 (registry+https://github.com/rust-lang/crates.io-index)", + "hashbrown 0.15.2", "lock_api", - "parking_lot_core 0.9.11", + "parking_lot_core 0.9.8", "polonius-the-crab", "replace_with", ] -[[package]] -name = "cmake" -version = "0.1.54" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7caa3f9de89ddbe2c607f4101924c5abec803763ae9534e4f4d7d8f84aa81f0" -dependencies = [ - "cc", -] - [[package]] name = "colorchoice" -version = "1.0.4" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b05b61dc5112cbb17e4b6cd61790d9845d13888356391624cbe7e41efeac1e75" +checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" [[package]] name = "combine" -version = "4.6.7" +version = "4.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba5a308b75df32fe02788e748662718f03fde005016435c444eea572398219fd" +checksum = "35ed6e9d84f0b51a7f52daf1c7d71dd136fd7a3f41a8462b8cdb8c78d920fad4" dependencies = [ "bytes", "futures-core", @@ -1358,21 +1334,44 @@ dependencies = [ [[package]] name = "comfy-table" -version = "7.1.4" +version = "7.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a65ebfec4fb190b6f90e944a817d60499ee0744e582530e2c9900a22e591d9a" +checksum = "b34115915337defe99b2aff5c2ce6771e5fbc4079f4b506301f5cf394c8452f7" dependencies = [ "crossterm", - "unicode-segmentation", + "strum", + "strum_macros", "unicode-width", ] [[package]] name = "communicator" -version = "0.1.0" +version = "0.0.0" dependencies = [ + "atomic_enum", + "axum 0.8.1", + "bytes", "cbindgen", + "clashmap", + "http 1.1.0", + "libc", + "metrics", "neon-shmem", + "nix 0.30.1", + "pageserver_api", + "pageserver_client_grpc", + "pageserver_page_api", + "prometheus", + "prost 0.13.5", + "thiserror 1.0.69", + "tokio", + "tokio-pipe", + "tonic 0.12.3", + "tracing", + "tracing-subscriber", + "uring-common", + "utils", + "workspace_hack", ] [[package]] @@ -1387,6 +1386,7 @@ dependencies = [ "remote_storage", "serde", "serde_json", + "url", "utils", ] @@ -1400,9 +1400,9 @@ dependencies = [ "aws-sdk-kms", "aws-sdk-s3", "aws-smithy-types", - "axum", + "axum 0.8.1", "axum-extra", - "base64 0.13.1", + "base64 0.22.1", "bytes", "camino", "cfg-if", @@ -1412,7 +1412,8 @@ dependencies = [ "fail", "flate2", "futures", - "http 1.3.1", + "hostname-validator", + "http 1.1.0", "indexmap 2.9.0", "itertools 0.10.5", "jsonwebtoken", @@ -1424,8 +1425,10 @@ dependencies = [ "opentelemetry", "opentelemetry_sdk", "p256 0.13.2", + "pageserver_page_api", "postgres", "postgres_initdb", + "postgres_versioninfo", "regex", "remote_storage", "reqwest", @@ -1442,7 +1445,8 @@ dependencies = [ "tokio-postgres", "tokio-stream", "tokio-util", - "tower", + "tonic 0.13.1", + "tower 0.5.2", "tower-http", "tower-otel", "tracing", @@ -1461,9 +1465,9 @@ dependencies = [ [[package]] name = "concurrent-queue" -version = "2.5.0" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ca0197aee26d1ae37445ee532fefce43251d24cc7c166799f4d46817f1d3973" +checksum = "f057a694a54f12365049b0958a1685bb52d567f5593b355fbf685838e873d400" dependencies = [ "crossbeam-utils", ] @@ -1476,9 +1480,9 @@ checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" [[package]] name = "const-random" -version = "0.1.18" +version = "0.1.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87e00182fe74b066627d63b85fd550ac2998d4b0bd86bfed477a0ae4c7c71359" +checksum = "5aaf16c9c2c612020bcfd042e170f6e32de9b9d75adb5277cdbbd2e2c8c8299a" dependencies = [ "const-random-macro", ] @@ -1489,25 +1493,25 @@ version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f9d839f2a20b0aee515dc581a6172f2321f96cab76c1a38a4c584a194955390e" dependencies = [ - "getrandom 0.2.16", + "getrandom 0.2.11", "once_cell", "tiny-keccak", ] [[package]] name = "const_format" -version = "0.2.34" +version = "0.2.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "126f97965c8ad46d6d9163268ff28432e8f6a1196a55578867832e3049df63dd" +checksum = "7309d9b4d3d2c0641e018d449232f2e28f1b22933c137f157d3dbc14228b8c0e" dependencies = [ "const_format_proc_macros", ] [[package]] name = "const_format_proc_macros" -version = "0.2.34" +version = "0.2.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d57c2eccfb16dbac1f4e61e206105db5820c9d26c3c472bc17c774259ef7744" +checksum = "d897f47bf7270cf70d370f8f98c1abb6d2d4cf60a6845d30e05bfb90c6568650" dependencies = [ "proc-macro2", "quote", @@ -1528,7 +1532,7 @@ name = "control_plane" version = "0.1.0" dependencies = [ "anyhow", - "base64 0.13.1", + "base64 0.22.1", "camino", "clap", "comfy-table", @@ -1538,7 +1542,7 @@ dependencies = [ "http-utils", "humantime", "humantime-serde", - "hyper 0.14.32", + "hyper 0.14.30", "jsonwebtoken", "nix 0.30.1", "once_cell", @@ -1550,6 +1554,7 @@ dependencies = [ "regex", "reqwest", "safekeeper_api", + "safekeeper_client", "scopeguard", "serde", "serde_json", @@ -1571,19 +1576,9 @@ dependencies = [ [[package]] name = "core-foundation" -version = "0.9.4" +version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91e195e091a93c46f7102ec7818a2aa394e1e1771c3ab4825963fa03e45afb8f" -dependencies = [ - "core-foundation-sys", - "libc", -] - -[[package]] -name = "core-foundation" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2a6cd9ae233e7f62ba4e9353e81a88df7fc8a5987b8d445b4d90c879bd156f6" +checksum = "194a7a9e6de53fa55116934067c844d9d749312f75c6f6d0980e8c252f8c2146" dependencies = [ "core-foundation-sys", "libc", @@ -1591,9 +1586,9 @@ dependencies = [ [[package]] name = "core-foundation-sys" -version = "0.8.7" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" +checksum = "e496a50fda8aacccc86d7529e2c1e0892dbd0f898a6b5645b5561b89c3210efa" [[package]] name = "cpp_demangle" @@ -1606,41 +1601,13 @@ dependencies = [ [[package]] name = "cpufeatures" -version = "0.2.17" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59ed5838eebb26a2bb2e58f6d5b5316989ae9d08bab10e0e6d103e656d1b0280" +checksum = "a17b76ff3a4162b0b27f354a0c87015ddad39d35f9c0c36607a3bdd175dde1f1" dependencies = [ "libc", ] -[[package]] -name = "crc" -version = "3.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9710d3b3739c2e349eb44fe848ad0b7c8cb1e42bd87ee49371df2f7acaf3e675" -dependencies = [ - "crc-catalog", -] - -[[package]] -name = "crc-catalog" -version = "2.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19d374276b40fb8bbdee95aef7c7fa6b5316ec764510eb64b8dd0e2ed0d7e7f5" - -[[package]] -name = "crc-fast" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6bf62af4cc77d8fe1c22dde4e721d87f2f54056139d8c412e1366b740305f56f" -dependencies = [ - "crc", - "digest", - "libc", - "rand 0.9.1", - "regex", -] - [[package]] name = "crc32c" version = "0.6.8" @@ -1652,9 +1619,9 @@ dependencies = [ [[package]] name = "crc32fast" -version = "1.4.2" +version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a97769d94ddab943e4510d138150169a2758b5ef3eb191a9ee688de3e23ef7b3" +checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d" dependencies = [ "cfg-if", ] @@ -1703,23 +1670,24 @@ checksum = "5877d3fbf742507b66bc2a1945106bd30dd8504019d596901ddd012a4dd01740" dependencies = [ "chrono", "once_cell", - "winnow 0.6.26", + "winnow", ] [[package]] name = "crossbeam-channel" -version = "0.5.15" +version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82b8f8f868b36967f9606790d1903570de9ceaf870a7bf9fbbd3016d636a2cb2" +checksum = "a33c2bf77f2df06183c3aa30d1e96c0695a313d4f9c453cc3762a6db39f99200" dependencies = [ + "cfg-if", "crossbeam-utils", ] [[package]] name = "crossbeam-deque" -version = "0.8.6" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9dd111b7b7f7d55b72c0a6ae361660ee5853c9af73f70c3c2ef6858b950e2e51" +checksum = "613f8cc01fe9cf1a3eb3d7f488fd2fa8388403e97039e2f73692932e291a770d" dependencies = [ "crossbeam-epoch", "crossbeam-utils", @@ -1742,14 +1710,14 @@ checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" [[package]] name = "crossterm" -version = "0.28.1" +version = "0.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "829d955a0bb380ef178a640b91779e3987da38c9aea133b20614cfed8cdea9c6" +checksum = "f476fe445d41c9e991fd07515a6f463074b782242ccf4a5b7b1d1012e70824df" dependencies = [ - "bitflags 2.9.1", + "bitflags 2.8.0", "crossterm_winapi", - "parking_lot 0.12.4", - "rustix 0.38.44", + "libc", + "parking_lot 0.12.1", "winapi", ] @@ -1764,9 +1732,9 @@ dependencies = [ [[package]] name = "crunchy" -version = "0.2.3" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43da5946c66ffcc7745f48db692ffbb10a83bfe0afd96235c5c2a4fb23994929" +checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" [[package]] name = "crypto-bigint" @@ -1825,14 +1793,14 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.100", ] [[package]] name = "darling" -version = "0.20.11" +version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc7f46116c46ff9ab3eb1597a45688b6715c6e628b5c133e288e709a29bcb4ee" +checksum = "0558d22a7b463ed0241e993f76f09f30b126687447751a8638587b864e4b3944" dependencies = [ "darling_core", "darling_macro", @@ -1840,27 +1808,40 @@ dependencies = [ [[package]] name = "darling_core" -version = "0.20.11" +version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d00b9596d185e565c2207a0b01f8bd1a135483d02d9b7b0a54b11da8d53412e" +checksum = "ab8bfa2e259f8ee1ce5e97824a3c55ec4404a0d772ca7fa96bf19f0752a046eb" dependencies = [ "fnv", "ident_case", "proc-macro2", "quote", - "strsim", - "syn", + "strsim 0.10.0", + "syn 2.0.100", ] [[package]] name = "darling_macro" -version = "0.20.11" +version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc34b93ccb385b40dc71c6fceac4b2ad23662c7eeb248cf10d529b7e055b6ead" +checksum = "29a358ff9f12ec09c3e61fef9b5a9902623a695a46a917b07f269bff1445611a" dependencies = [ "darling_core", "quote", - "syn", + "syn 2.0.100", +] + +[[package]] +name = "dashmap" +version = "5.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6943ae99c34386c84a470c499d3414f66502a41340aa895406e0d2e4a207b91d" +dependencies = [ + "cfg-if", + "hashbrown 0.14.5", + "lock_api", + "once_cell", + "parking_lot_core 0.9.8", ] [[package]] @@ -1874,14 +1855,14 @@ dependencies = [ "hashbrown 0.14.5", "lock_api", "once_cell", - "parking_lot_core 0.9.11", + "parking_lot_core 0.9.8", ] [[package]] name = "data-encoding" -version = "2.9.0" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a2330da5de22e8a3cb63252ce2abb30116bf5265e89c0e01bc17015ce30a476" +checksum = "c2e66c9d817f1720209181c316d28635c050fa304f9c79e47a520882661b7308" [[package]] name = "debugid" @@ -1905,9 +1886,9 @@ dependencies = [ [[package]] name = "der" -version = "0.7.10" +version = "0.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7c1832837b905bbfb5101e07cc24c8deddf52f93225eee6ead5f4d63d53ddcb" +checksum = "fffa369a668c8af7dbf8b5e56c9f744fbd399949ed171606040001947de40b1c" dependencies = [ "const-oid", "der_derive", @@ -1924,7 +1905,7 @@ checksum = "8034092389675178f570469e6c3b0465d3d30b4505c294a6550db47f3c17ad18" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.100", ] [[package]] @@ -1937,16 +1918,6 @@ dependencies = [ "serde", ] -[[package]] -name = "deranged" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c9e6a11ca8224451684bc0d7d5a7adbf8f2fd6887261a1cfc3c0432f9d4068e" -dependencies = [ - "powerfmt", - "serde", -] - [[package]] name = "desim" version = "0.1.0" @@ -1954,7 +1925,7 @@ dependencies = [ "anyhow", "bytes", "hex", - "parking_lot 0.12.4", + "parking_lot 0.12.1", "rand 0.8.5", "smallvec", "tracing", @@ -1969,11 +1940,11 @@ checksum = "ab03c107fafeb3ee9f5925686dbb7a73bc76e3932abb0d2b365cb64b169cf04c" [[package]] name = "diesel" -version = "2.2.11" +version = "2.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a917a9209950404d5be011c81d081a2692a822f73c3d6af586f0cab5ff50f614" +checksum = "ccf1bedf64cdb9643204a36dd15b19a6ce8e7aa7f7b105868e9f1fad5ffa7d12" dependencies = [ - "bitflags 2.9.1", + "bitflags 2.8.0", "byteorder", "chrono", "diesel_derives", @@ -1998,15 +1969,15 @@ dependencies = [ [[package]] name = "diesel_derives" -version = "2.2.6" +version = "2.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52841e97814f407b895d836fa0012091dff79c6268f39ad8155d384c21ae0d26" +checksum = "59de76a222c2b8059f789cbe07afbfd8deb8c31dd0bc2a21f85e256c1def8259" dependencies = [ "diesel_table_macro_syntax", "dsl_auto_type", "proc-macro2", "quote", - "syn", + "syn 2.0.100", ] [[package]] @@ -2026,7 +1997,7 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "209c735641a413bc68c4923a9d6ad4bcb3ca306b794edaa7eb0b3228a99ffb25" dependencies = [ - "syn", + "syn 2.0.100", ] [[package]] @@ -2049,13 +2020,13 @@ dependencies = [ [[package]] name = "displaydoc" -version = "0.2.5" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" +checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.100", ] [[package]] @@ -2069,29 +2040,23 @@ dependencies = [ [[package]] name = "dsl_auto_type" -version = "0.1.3" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "139ae9aca7527f85f26dd76483eb38533fd84bd571065da1739656ef71c5ff5b" +checksum = "0892a17df262a24294c382f0d5997571006e7a4348b4327557c4ff1cd4a8bccc" dependencies = [ "darling", "either", - "heck 0.5.0", + "heck", "proc-macro2", "quote", - "syn", + "syn 2.0.100", ] -[[package]] -name = "dunce" -version = "1.0.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92773504d58c093f6de2459af4af33faa518c13451eb8f2b5698ed3d36e7c813" - [[package]] name = "dyn-clone" -version = "1.0.19" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c7a8fb8a9fbf66c1f703fe16184d10ca0ee9d23be5b4436400408ba54a95005" +checksum = "23d2f3407d9a573d666de4b5bdf10569d73ca9478087346697dcbae6244bfbcd" [[package]] name = "ecdsa" @@ -2111,7 +2076,7 @@ version = "0.16.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ee27f32b5c5292967d2d4a9d7f1e0b0aed2c15daded5a60300e4abb9d8020bca" dependencies = [ - "der 0.7.10", + "der 0.7.8", "digest", "elliptic-curve 0.13.8", "rfc6979 0.4.0", @@ -2143,9 +2108,9 @@ dependencies = [ [[package]] name = "either" -version = "1.15.0" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719" +checksum = "7fcaabb2fef8c910e7f4c7ce9f67a1283a1715879a7c230ca9d6d1ae31f16d91" [[package]] name = "elliptic-curve" @@ -2177,7 +2142,7 @@ dependencies = [ "base64ct", "crypto-bigint 0.5.5", "digest", - "ff 0.13.1", + "ff 0.13.0", "generic-array", "group 0.13.0", "pem-rfc7468", @@ -2195,10 +2160,11 @@ name = "endpoint_storage" version = "0.0.1" dependencies = [ "anyhow", - "axum", + "axum 0.8.1", "axum-extra", "camino", "camino-tempfile", + "clap", "futures", "http-body-util", "itertools 0.10.5", @@ -2211,7 +2177,7 @@ dependencies = [ "test-log", "tokio", "tokio-util", - "tower", + "tower 0.5.2", "tracing", "utils", "workspace_hack", @@ -2219,29 +2185,29 @@ dependencies = [ [[package]] name = "enum-map" -version = "2.7.3" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6866f3bfdf8207509a033af1a75a7b08abda06bbaaeae6669323fd5a097df2e9" +checksum = "988f0d17a0fa38291e5f41f71ea8d46a5d5497b9054d5a759fae2cbb819f2356" dependencies = [ "enum-map-derive", ] [[package]] name = "enum-map-derive" -version = "0.17.0" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f282cfdfe92516eb26c2af8589c274c7c17681f5ecc03c18255fe741c6aa64eb" +checksum = "2a4da76b3b6116d758c7ba93f7ec6a35d2e2cf24feda76c6e38a375f4d5c59f2" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] name = "enumset" -version = "1.1.6" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11a6b7c3d347de0a9f7bfd2f853be43fe32fa6fac30c70f6d6d67a1e936b87ee" +checksum = "e875f1719c16de097dee81ed675e2d9bb63096823ed3f0ca827b7dea3028bbbb" dependencies = [ "enumset_derive", "serde", @@ -2249,14 +2215,14 @@ dependencies = [ [[package]] name = "enumset_derive" -version = "0.11.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6da3ea9e1d1a3b1593e15781f930120e72aa7501610b2f82e5b6739c72e8eac5" +checksum = "e08b6c6ab82d70f08844964ba10c7babb716de2ecaeab9be5717918a5177d3af" dependencies = [ "darling", "proc-macro2", "quote", - "syn", + "syn 2.0.100", ] [[package]] @@ -2271,9 +2237,9 @@ dependencies = [ [[package]] name = "env_logger" -version = "0.11.8" +version = "0.11.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13c863f0904021b108aa8b2f55046443e6b1ebde8fd4a15c399893aae4fa069f" +checksum = "c3716d7a920fb4fac5d84e9d4bce8ceb321e9414b4409da61b07b75c1e3d0697" dependencies = [ "anstream", "anstyle", @@ -2284,38 +2250,38 @@ dependencies = [ [[package]] name = "equator" -version = "0.4.2" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4711b213838dfee0117e3be6ac926007d7f433d7bbe33595975d4190cb07e6fc" +checksum = "c35da53b5a021d2484a7cc49b2ac7f2d840f8236a286f84202369bd338d761ea" dependencies = [ "equator-macro", ] [[package]] name = "equator-macro" -version = "0.4.2" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44f23cf4b44bfce11a86ace86f8a73ffdec849c9fd00a386a53d278bd9e81fb3" +checksum = "3bf679796c0322556351f287a51b49e48f7c4986e727b5dd78c972d30e2e16cc" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.100", ] [[package]] name = "equivalent" -version = "1.0.2" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" +checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" [[package]] name = "errno" -version = "0.3.13" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "778e2ac28f6c47af28e4907f13ffd1e1ddbd400980a9abd7c8df189bf578a5ad" +checksum = "a258e46cdc063eb8519c00b9fc845fc47bcfca4130e2f08e88665ceda8474245" dependencies = [ "libc", - "windows-sys 0.60.2", + "windows-sys 0.52.0", ] [[package]] @@ -2326,9 +2292,9 @@ checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" [[package]] name = "event-listener" -version = "5.4.0" +version = "4.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3492acde4c3fc54c845eaab3eed8bd00c7a7d881f78bfc801e43a93dec1331ae" +checksum = "770d968249b5d99410d61f5bf89057f3199a077a04d087092f58e7d10692baae" dependencies = [ "concurrent-queue", "parking", @@ -2337,11 +2303,11 @@ dependencies = [ [[package]] name = "event-listener-strategy" -version = "0.5.4" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8be9f3dfaaffdae2972880079a491a1a8bb7cbed0b8dd7a347f668b4150a3b93" +checksum = "958e4d70b6d5e81971bebec42271ec641e7ff4e170a6fa605f2b8a8b65cb97d3" dependencies = [ - "event-listener 5.4.0", + "event-listener 4.0.0", "pin-project-lite", ] @@ -2373,9 +2339,9 @@ dependencies = [ [[package]] name = "fastrand" -version = "2.3.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" +checksum = "486f806e73c5707928240ddc295403b1b93c96a02038563881c4a2fd84b81ac4" [[package]] name = "ff" @@ -2389,9 +2355,9 @@ dependencies = [ [[package]] name = "ff" -version = "0.13.1" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0b50bfb653653f9ca9095b427bed08ab8d75a137839d9ad64eb11810d5b6393" +checksum = "ded41244b729663b1e574f1b4fb731469f69f79c17667b5d776b16cda0479449" dependencies = [ "rand_core 0.6.4", "subtle", @@ -2405,14 +2371,14 @@ checksum = "28dea519a9695b9977216879a3ebfddf92f1c08c05d984f8996aecd6ecdc811d" [[package]] name = "filetime" -version = "0.2.25" +version = "0.2.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35c0522e981e68cbfa8c3f978441a5f34b30b96e146b33cd3359176b50fe8586" +checksum = "d4029edd3e734da6fe05b6cd7bd2960760a616bd2ddd0d59a0124746d6272af0" dependencies = [ "cfg-if", "libc", - "libredox", - "windows-sys 0.59.0", + "redox_syscall 0.3.5", + "windows-sys 0.48.0", ] [[package]] @@ -2433,26 +2399,20 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" -[[package]] -name = "fixedbitset" -version = "0.5.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d674e81391d1e1ab681a28d99df07927c6d4aa5b027d7da16ba32d1d21ecd99" - [[package]] name = "flagset" -version = "0.4.7" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7ac824320a75a52197e8f2d787f6a38b6718bb6897a35142d749af3c0e8f4fe" +checksum = "b3ea1ec5f8307826a5b71094dd91fc04d4ae75d5709b20ad351c7fb4815c86ec" [[package]] name = "flate2" -version = "1.1.2" +version = "1.0.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a3d7db9596fecd151c5f638c0ee5d5bd487b6e0ea232e5dc96d5250f6f94b1d" +checksum = "3b9429470923de8e8cbd4d2dc513535400b4b3fef0319fb5c4e1f520a7bef743" dependencies = [ "crc32fast", - "miniz_oxide", + "miniz_oxide 0.7.1", ] [[package]] @@ -2497,12 +2457,6 @@ dependencies = [ "tokio-util", ] -[[package]] -name = "fs_extra" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42703706b716c37f96a77aea830392ad231f44c9e9a67872fa5548707e11b11c" - [[package]] name = "fsevent-sys" version = "4.1.0" @@ -2514,9 +2468,9 @@ dependencies = [ [[package]] name = "futures" -version = "0.3.31" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876" +checksum = "23342abe12aba583913b2e62f22225ff9c950774065e4bfb61a19cd9770fec40" dependencies = [ "futures-channel", "futures-core", @@ -2545,9 +2499,9 @@ checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" [[package]] name = "futures-executor" -version = "0.3.31" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e28d1d997f585e54aebc3f97d39e72338912123a67330d723fdbb564d646c9f" +checksum = "ccecee823288125bd88b4d7f565c9e58e41858e47ab72e8ea2d64e93624386e0" dependencies = [ "futures-core", "futures-task", @@ -2583,7 +2537,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.100", ] [[package]] @@ -2652,14 +2606,14 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.16" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "335ff9f135e4384c8150d6f27c6daed433577f86b4750418338c01a1a2528592" +checksum = "fe9006bed769170c11f845cf00c7c1e9092aeb3f268e007c3e760ac68008070f" dependencies = [ "cfg-if", "js-sys", "libc", - "wasi 0.11.1+wasi-snapshot-preview1", + "wasi 0.11.0+wasi-snapshot-preview1", "wasm-bindgen", ] @@ -2670,11 +2624,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "26145e563e54f2cadc477553f1ec5ee650b00862f0a58bcd12cbdc5f0ea2d2f4" dependencies = [ "cfg-if", - "js-sys", "libc", "r-efi", "wasi 0.14.2+wasi-0.2.4", - "wasm-bindgen", ] [[package]] @@ -2710,36 +2662,34 @@ checksum = "53010ccb100b96a67bc32c0175f0ed1426b31b655d562898e57325f81c023ac0" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.100", ] [[package]] name = "glob" -version = "0.3.2" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8d1add55171497b4705a648c6b583acafb01d58050a51727785f0b2c8e0a2b2" +checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" [[package]] name = "governor" -version = "0.8.1" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be93b4ec2e4710b04d9264c0c7350cdd62a8c20e5e4ac732552ebb8f0debe8eb" +checksum = "842dc78579ce01e6a1576ad896edc92fca002dd60c9c3746b7fc2bec6fb429d0" dependencies = [ "cfg-if", - "dashmap", + "dashmap 6.1.0", "futures-sink", "futures-timer", "futures-util", - "getrandom 0.3.3", "no-std-compat", "nonzero_ext", - "parking_lot 0.12.4", + "parking_lot 0.12.1", "portable-atomic", "quanta", - "rand 0.9.1", + "rand 0.8.5", "smallvec", "spinning_top", - "web-time", ] [[package]] @@ -2759,7 +2709,7 @@ version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63" dependencies = [ - "ff 0.13.1", + "ff 0.13.0", "rand_core 0.6.4", "subtle", ] @@ -2775,7 +2725,7 @@ dependencies = [ "futures-core", "futures-sink", "futures-util", - "http 0.2.12", + "http 0.2.9", "indexmap 2.9.0", "slab", "tokio", @@ -2785,16 +2735,16 @@ dependencies = [ [[package]] name = "h2" -version = "0.4.10" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9421a676d1b147b16b82c9225157dc629087ef8ec4d5e2960f9437a90dac0a5" +checksum = "816ec7294445779408f36fe57bc5b7fc1cf59664059096c65f905c1c61f58069" dependencies = [ - "atomic-waker", "bytes", "fnv", "futures-core", "futures-sink", - "http 1.3.1", + "futures-util", + "http 1.1.0", "indexmap 2.9.0", "slab", "tokio", @@ -2804,9 +2754,9 @@ dependencies = [ [[package]] name = "half" -version = "2.6.0" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "459196ed295495a68f7d7fe1d84f6c4b7ff0e21fe3017b2f283c6fac3ad803c9" +checksum = "6dd08c532ae367adf81c312a4580bc67f1d0fe8bc9c460520283f4c0ff277888" dependencies = [ "cfg-if", "crunchy", @@ -2840,14 +2790,9 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.15.4" +version = "0.15.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5971ac85611da7067dbfcabef3c70ebb5606018acd9e2a3903a0da507521e0d5" -dependencies = [ - "allocator-api2", - "equivalent", - "foldhash", -] +checksum = "bf151400ff0baff5465007dd2f3e717f3fe502074ca563069ce3a6629d07b289" [[package]] name = "hashbrown" @@ -2884,14 +2829,14 @@ dependencies = [ [[package]] name = "headers" -version = "0.4.1" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3314d5adb5d94bcdf56771f2e50dbbc80bb4bdf88967526706205ac9eff24eb" +checksum = "322106e6bd0cba2d5ead589ddb8150a13d7c4217cf80d7c4f682ca994ccc6aa9" dependencies = [ - "base64 0.22.1", + "base64 0.21.7", "bytes", "headers-core", - "http 1.3.1", + "http 1.1.0", "httpdate", "mime", "sha1", @@ -2903,15 +2848,9 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "54b4a22553d4242c49fddb9ba998a99962b5cc6f22cb5a3482bec22522403ce4" dependencies = [ - "http 1.3.1", + "http 1.1.0", ] -[[package]] -name = "heck" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" - [[package]] name = "heck" version = "0.5.0" @@ -2920,9 +2859,9 @@ checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" [[package]] name = "hermit-abi" -version = "0.5.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc0fef456e4baa96da950455cd02c081ca953b141298e41db3fc7e36b1da849c" +checksum = "d77f7ec81a6d05a3abb01ab6eb7590f6083d08449fe5a1c8b1e620283546ccb7" [[package]] name = "hex" @@ -2957,31 +2896,28 @@ dependencies = [ "digest", ] -[[package]] -name = "home" -version = "0.5.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "589533453244b0995c858700322199b2becb13b627df2851f64a2775d024abcf" -dependencies = [ - "windows-sys 0.59.0", -] - [[package]] name = "hostname" -version = "0.4.1" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a56f203cd1c76362b69e3863fd987520ac36cf70a8c92627449b2f64a8cf7d65" +checksum = "f9c7c7c8ac16c798734b8a24560c1362120597c40d5e1459f09498f8f6c8f2ba" dependencies = [ "cfg-if", "libc", - "windows-link", + "windows", ] [[package]] -name = "http" -version = "0.2.12" +name = "hostname-validator" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "601cbb57e577e2f5ef5be8e7b83f0f63994f25aa94d673e54a92d5c516d101f1" +checksum = "f558a64ac9af88b5ba400d99b579451af0d39c6d360980045b91aac966d705e2" + +[[package]] +name = "http" +version = "0.2.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd6effc99afb63425aff9b05836f029929e345a6148a14b7ecd5ab67af944482" dependencies = [ "bytes", "fnv", @@ -2990,9 +2926,9 @@ dependencies = [ [[package]] name = "http" -version = "1.3.1" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4a85d31aea989eead29a3aaf9e1115a180df8282431156e533de47660892565" +checksum = "21b9ddb458710bc376481b842f5da65cdf31522de232c1ca8146abce2a358258" dependencies = [ "bytes", "fnv", @@ -3001,35 +2937,35 @@ dependencies = [ [[package]] name = "http-body" -version = "0.4.6" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2" +checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1" dependencies = [ "bytes", - "http 0.2.12", + "http 0.2.9", "pin-project-lite", ] [[package]] name = "http-body" -version = "1.0.1" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" +checksum = "1cac85db508abc24a2e48553ba12a996e87244a0395ce011e62b37158745d643" dependencies = [ "bytes", - "http 1.3.1", + "http 1.1.0", ] [[package]] name = "http-body-util" -version = "0.1.3" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b021d93e26becf5dc7e1b75b1bed1fd93124b374ceb73f43d4d4eafec896a64a" +checksum = "793429d76616a256bcb62c2a2ec2bed781c8307e797e2598c50010f2bee2544f" dependencies = [ "bytes", - "futures-core", - "http 1.3.1", - "http-body 1.0.1", + "futures-util", + "http 1.1.0", + "http-body 1.0.0", "pin-project-lite", ] @@ -3063,7 +2999,7 @@ dependencies = [ "camino", "fail", "futures", - "hyper 0.14.32", + "hyper 0.14.30", "itertools 0.10.5", "jemalloc_pprof", "jsonwebtoken", @@ -3072,8 +3008,8 @@ dependencies = [ "pprof", "regex", "routerify", - "rustls 0.23.28", - "rustls-pemfile 2.2.0", + "rustls 0.23.27", + "rustls-pemfile 2.1.1", "serde", "serde_json", "serde_path_to_error", @@ -3098,9 +3034,9 @@ checksum = "6dbf3de79e51f3d586ab4cb9d5c3e2c14aa28ed23d180cf89b4df0454a69cc87" [[package]] name = "httpdate" -version = "1.0.3" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" +checksum = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421" [[package]] name = "humantime" @@ -3120,17 +3056,17 @@ dependencies = [ [[package]] name = "hyper" -version = "0.14.32" +version = "0.14.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41dfc780fdec9373c01bae43289ea34c972e40ee3c9f6b3c8801a35f35586ce7" +checksum = "a152ddd61dfaec7273fe8419ab357f33aee0d914c5f4efbf0d96fa749eea5ec9" dependencies = [ "bytes", "futures-channel", "futures-core", "futures-util", "h2 0.3.26", - "http 0.2.12", - "http-body 0.4.6", + "http 0.2.9", + "http-body 0.4.5", "httparse", "httpdate", "itoa", @@ -3151,9 +3087,9 @@ dependencies = [ "bytes", "futures-channel", "futures-util", - "h2 0.4.10", - "http 1.3.1", - "http-body 1.0.1", + "h2 0.4.4", + "http 1.1.0", + "http-body 1.0.0", "httparse", "httpdate", "itoa", @@ -3165,43 +3101,41 @@ dependencies = [ [[package]] name = "hyper-rustls" -version = "0.24.2" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590" +checksum = "0646026eb1b3eea4cd9ba47912ea5ce9cc07713d105b1a14698f4e6433d348b7" dependencies = [ - "futures-util", - "http 0.2.12", - "hyper 0.14.32", + "http 0.2.9", + "hyper 0.14.30", "log", "rustls 0.21.12", - "rustls-native-certs 0.6.3", + "rustls-native-certs 0.6.2", "tokio", - "tokio-rustls 0.24.1", + "tokio-rustls 0.24.0", ] [[package]] name = "hyper-rustls" -version = "0.27.7" +version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3c93eb611681b207e1fe55d5a71ecf91572ec8a6705cdb6857f7d8d5242cf58" +checksum = "a0bea761b46ae2b24eb4aef630d8d1c398157b6fc29e6350ecf090a0b70c952c" dependencies = [ - "http 1.3.1", + "futures-util", + "http 1.1.0", "hyper 1.6.0", "hyper-util", - "rustls 0.23.28", - "rustls-native-certs 0.8.1", + "rustls 0.22.4", "rustls-pki-types", "tokio", - "tokio-rustls 0.26.2", + "tokio-rustls 0.25.0", "tower-service", - "webpki-roots 1.0.1", ] [[package]] name = "hyper-timeout" -version = "0.5.2" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b90d566bffbce6a75bd8b09a05aa8c2cb1fabb6cb348f8840c9e4c90a0d83b0" +checksum = "3203a961e5c83b6f5498933e78b6b263e208c197b63e9c6c53cc82ffd3f63793" dependencies = [ "hyper 1.6.0", "hyper-util", @@ -3216,17 +3150,14 @@ version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dc2fdfdbff08affe55bb779f33b053aa1fe5dd5b54c257343c17edfa55711bdb" dependencies = [ - "base64 0.22.1", "bytes", "futures-channel", "futures-core", "futures-util", - "http 1.3.1", - "http-body 1.0.1", + "http 1.1.0", + "http-body 1.0.0", "hyper 1.6.0", - "ipnet", "libc", - "percent-encoding", "pin-project-lite", "socket2", "tokio", @@ -3236,15 +3167,14 @@ dependencies = [ [[package]] name = "iana-time-zone" -version = "0.1.63" +version = "0.1.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0c919e5debc312ad217002b8048a17b7d83f80703865bbfcfebb0458b0b27d8" +checksum = "235e081f3925a06703c2d0117ea8b91f042756fd6e7a6e5d901e8ca1a996b220" dependencies = [ "android_system_properties", "core-foundation-sys", "iana-time-zone-haiku", "js-sys", - "log", "wasm-bindgen", "windows-core", ] @@ -3260,22 +3190,21 @@ dependencies = [ [[package]] name = "icu_collections" -version = "2.0.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "200072f5d0e3614556f94a9930d5dc3e0662a652823904c3a75dc3b0af7fee47" +checksum = "db2fa452206ebee18c4b5c2274dbf1de17008e874b4dc4f0aea9d01ca79e4526" dependencies = [ "displaydoc", - "potential_utf", "yoke", "zerofrom", "zerovec", ] [[package]] -name = "icu_locale_core" -version = "2.0.0" +name = "icu_locid" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0cde2700ccaed3872079a65fb1a78f6c0a36c91570f28755dda67bc8f7d9f00a" +checksum = "13acbb8371917fc971be86fc8057c41a64b521c184808a698c02acc242dbf637" dependencies = [ "displaydoc", "litemap", @@ -3285,10 +3214,30 @@ dependencies = [ ] [[package]] -name = "icu_normalizer" -version = "2.0.0" +name = "icu_locid_transform" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "436880e8e18df4d7bbc06d58432329d6458cc84531f7ac5f024e93deadb37979" +checksum = "01d11ac35de8e40fdeda00d9e1e9d92525f3f9d887cdd7aa81d727596788b54e" +dependencies = [ + "displaydoc", + "icu_locid", + "icu_locid_transform_data", + "icu_provider", + "tinystr", + "zerovec", +] + +[[package]] +name = "icu_locid_transform_data" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fdc8ff3388f852bede6b579ad4e978ab004f139284d7b28715f773507b946f6e" + +[[package]] +name = "icu_normalizer" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19ce3e0da2ec68599d193c93d088142efd7f9c5d6fc9b803774855747dc6a84f" dependencies = [ "displaydoc", "icu_collections", @@ -3296,54 +3245,67 @@ dependencies = [ "icu_properties", "icu_provider", "smallvec", + "utf16_iter", + "utf8_iter", + "write16", "zerovec", ] [[package]] name = "icu_normalizer_data" -version = "2.0.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00210d6893afc98edb752b664b8890f0ef174c8adbb8d0be9710fa66fbbf72d3" +checksum = "f8cafbf7aa791e9b22bec55a167906f9e1215fd475cd22adfcf660e03e989516" [[package]] name = "icu_properties" -version = "2.0.1" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "016c619c1eeb94efb86809b015c58f479963de65bdb6253345c1a1276f22e32b" +checksum = "93d6020766cfc6302c15dbbc9c8778c37e62c14427cb7f6e601d849e092aeef5" dependencies = [ "displaydoc", "icu_collections", - "icu_locale_core", + "icu_locid_transform", "icu_properties_data", "icu_provider", - "potential_utf", - "zerotrie", + "tinystr", "zerovec", ] [[package]] name = "icu_properties_data" -version = "2.0.1" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "298459143998310acd25ffe6810ed544932242d3f07083eee1084d83a71bd632" +checksum = "67a8effbc3dd3e4ba1afa8ad918d5684b8868b3b26500753effea8d2eed19569" [[package]] name = "icu_provider" -version = "2.0.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03c80da27b5f4187909049ee2d72f276f0d9f99a42c306bd0131ecfe04d8e5af" +checksum = "6ed421c8a8ef78d3e2dbc98a973be2f3770cb42b606e3ab18d6237c4dfde68d9" dependencies = [ "displaydoc", - "icu_locale_core", + "icu_locid", + "icu_provider_macros", "stable_deref_trait", "tinystr", "writeable", "yoke", "zerofrom", - "zerotrie", "zerovec", ] +[[package]] +name = "icu_provider_macros" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.100", +] + [[package]] name = "ident_case" version = "1.0.1" @@ -3363,9 +3325,9 @@ dependencies = [ [[package]] name = "idna_adapter" -version = "1.2.1" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3acae9609540aa318d1bc588455225fb2085b9ed0c4f6bd0d9d5bcd86f1a0344" +checksum = "daca1df1c957320b2cf139ac61e7bd64fed304c5040df000a745aa1de3b4ef71" dependencies = [ "icu_normalizer", "icu_properties", @@ -3389,15 +3351,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cea70ddb795996207ad57735b50c5982d8844f38ba9ee5f1aedcfb708a2aa11e" dependencies = [ "equivalent", - "hashbrown 0.15.4 (registry+https://github.com/rust-lang/crates.io-index)", + "hashbrown 0.15.2", "serde", ] [[package]] name = "indoc" -version = "2.0.6" +version = "2.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4c7245a08504955605670dbf141fceab975f15ca21570696aebe9d2e71576bd" +checksum = "b248f5224d1d606005e02c97f5aa4e88eeb230488bcc03bc9ca4d7991399f2b5" [[package]] name = "infer" @@ -3425,22 +3387,22 @@ dependencies = [ [[package]] name = "inferno" -version = "0.12.2" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2094aecddc672e902cd773bad7071542f63641e01e9187c3bba4b43005e837e9" +checksum = "75a5d75fee4d36809e6b021e4b96b686e763d365ffdb03af2bd00786353f84fe" dependencies = [ "ahash", "clap", "crossbeam-channel", "crossbeam-utils", - "dashmap", + "dashmap 6.1.0", "env_logger", "indexmap 2.9.0", "itoa", "log", "num-format", "once_cell", - "quick-xml 0.37.5", + "quick-xml 0.37.1", "rgb", "str_stack", ] @@ -3467,9 +3429,9 @@ dependencies = [ [[package]] name = "instant" -version = "0.1.13" +version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0242819d153cba4b4b05a5a8f2a7e9bbf97b6055b2a002b395c96b5ff3c0222" +checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" dependencies = [ "cfg-if", "js-sys", @@ -3485,9 +3447,9 @@ checksum = "8bb03732005da905c88227371639bf1ad885cc712789c011c31c5fb3ab3ccf02" [[package]] name = "io-uring" -version = "0.6.4" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "595a0399f411a508feb2ec1e970a4a30c249351e30208960d58298de8660b0e5" +checksum = "460648e47a07a43110fbfa2e0b14afb2be920093c31e5dccc50e49568e099762" dependencies = [ "bitflags 1.3.2", "libc", @@ -3495,29 +3457,19 @@ dependencies = [ [[package]] name = "ipnet" -version = "2.11.0" +version = "2.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130" - -[[package]] -name = "iri-string" -version = "0.7.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbc5ebe9c3a1a7a5127f920a418f7585e9e758e911d0466ed004f393b0e380b2" -dependencies = [ - "memchr", - "serde", -] +checksum = "ddc24109865250148c2e0f3d25d4f0f479571723792d3802153c60922a4fb708" [[package]] name = "is-terminal" -version = "0.4.16" +version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e04d7f318608d35d4b61ddd75cbdaee86b023ebe2bd5a66ee0915f0bf93095a9" +checksum = "f23ff5ef2b80d608d61efee834934d862cd92461afc0560dedf493e4c033738b" dependencies = [ "hermit-abi", "libc", - "windows-sys 0.59.0", + "windows-sys 0.52.0", ] [[package]] @@ -3544,29 +3496,11 @@ dependencies = [ "either", ] -[[package]] -name = "itertools" -version = "0.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186" -dependencies = [ - "either", -] - -[[package]] -name = "itertools" -version = "0.14.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b192c782037fadd9cfa75548310488aabdbf3d2da73885b31bd0abd03351285" -dependencies = [ - "either", -] - [[package]] name = "itoa" -version = "1.0.15" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" +checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" [[package]] name = "jemalloc_pprof" @@ -3587,9 +3521,9 @@ dependencies = [ [[package]] name = "jiff" -version = "0.2.15" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be1f93b8b1eb69c77f24bbb0afdf66f54b632ee39af40ca21c4365a1d7347e49" +checksum = "d699bc6dfc879fb1bf9bdff0d4c56f0884fc6f0d0eb0fba397a6d00cd9a6b85e" dependencies = [ "jiff-static", "log", @@ -3600,22 +3534,21 @@ dependencies = [ [[package]] name = "jiff-static" -version = "0.2.15" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03343451ff899767262ec32146f6d559dd759fdadf42ff0e227c7c48f72594b4" +checksum = "8d16e75759ee0aa64c57a56acbf43916987b20c77373cb7e808979e02b93c9f9" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.100", ] [[package]] name = "jobserver" -version = "0.1.33" +version = "0.1.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38f262f097c174adebe41eb73d66ae9c06b2844fb0da69969647bbddd9b0538a" +checksum = "48d1dbcbbeb6a7fec7e059840aa538bd62aaccf972c7346c4d9d2059312853d0" dependencies = [ - "getrandom 0.3.3", "libc", ] @@ -3657,11 +3590,10 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.77" +version = "0.3.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cfaf33c695fc6e08064efbc1f72ec937429614f25eef83af942d0e227c3a28f" +checksum = "29c15563dc2726973df627357ce0c9ddddbea194836909d655df6a75d2cf296d" dependencies = [ - "once_cell", "wasm-bindgen", ] @@ -3678,11 +3610,11 @@ dependencies = [ [[package]] name = "jsonwebtoken" -version = "9.3.1" +version = "9.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a87cc7a48537badeae96744432de36f4be2b4a34a05a5ef32e9dd8a1c169dde" +checksum = "5c7ea04a7c5c055c175f189b6dc6ba036fd62306b58c66c9f6389036c503a3f4" dependencies = [ - "base64 0.22.1", + "base64 0.21.7", "js-sys", "pem", "ring", @@ -3693,9 +3625,9 @@ dependencies = [ [[package]] name = "kqueue" -version = "1.1.1" +version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eac30106d7dce88daf4a3fcb4879ea939476d5074a9b7ddd0fb97fa4bed5596a" +checksum = "7447f1ca1b7b563588a205fe93dea8df60fd981423a768bc1c0ded35ed147d0c" dependencies = [ "kqueue-sys", "libc", @@ -3713,12 +3645,12 @@ dependencies = [ [[package]] name = "lasso" -version = "0.7.3" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e14eda50a3494b3bf7b9ce51c52434a761e383d7238ce1dd5dcec2fbc13e9fb" +checksum = "4644821e1c3d7a560fe13d842d13f587c07348a1a05d3a797152d41c90c56df2" dependencies = [ - "dashmap", - "hashbrown 0.14.5", + "dashmap 5.5.0", + "hashbrown 0.13.2", ] [[package]] @@ -3730,68 +3662,45 @@ dependencies = [ "spin", ] -[[package]] -name = "lazycell" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" - [[package]] name = "libc" -version = "0.2.174" +version = "0.2.172" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1171693293099992e19cddea4e8b849964e9846f4acee11b3948bcc337be8776" +checksum = "d750af042f7ef4f724306de029d18836c26c1765a54a6a3f094cbd23a7267ffa" [[package]] name = "libloading" -version = "0.8.8" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07033963ba89ebaf1584d767badaa2e8fcec21aedea6b8c0346d487d49c28667" +checksum = "b67380fd3b2fbe7527a606e18729d21c6f3951633d0500574c4dc22d2d638b9f" dependencies = [ "cfg-if", - "windows-targets 0.53.2", + "winapi", ] [[package]] name = "libm" -version = "0.2.15" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9fbbcab51052fe104eb5e5d351cf728d30a5be1fe14d9be8a3b097481fb97de" - -[[package]] -name = "libredox" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" -dependencies = [ - "bitflags 2.9.1", - "libc", - "redox_syscall 0.5.13", -] +checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058" [[package]] name = "linux-raw-sys" -version = "0.4.15" +version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d26c52dbd32dccf2d10cac7725f8eae5296885fb5703b261f7d0a0739ec807ab" +checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" [[package]] name = "linux-raw-sys" -version = "0.6.5" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a385b1be4e5c3e362ad2ffa73c392e53f031eaa5b7d648e64cd87f27f6063d7" - -[[package]] -name = "linux-raw-sys" -version = "0.9.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd945864f07fe9f5371a27ad7b52a172b4b499999f1d97574c9fa68373937e12" +checksum = "f0b5399f6804fbab912acbd8878ed3532d506b7c951b8f9f164ef90fef39e3f4" [[package]] name = "litemap" -version = "0.8.0" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "241eaef5fd12c88705a01fc1066c48c4b36e0dd4377dcdc7ec3942cea7a69956" +checksum = "4ee93343901ab17bd981295f2cf0026d4ad018c7c31ba84549a4ddbb47a45104" [[package]] name = "lock_api" @@ -3805,25 +3714,19 @@ dependencies = [ [[package]] name = "log" -version = "0.4.27" +version = "0.4.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94" +checksum = "30bde2b3dc3671ae49d8e2e9f044c7c005836e7a023ee57cffa25ab82764bb9e" [[package]] name = "lru" -version = "0.12.5" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "234cf4f4a04dc1f57e24b96cc0cd600cf2af460d4161ac5ecdd0af8e1f3b2a38" +checksum = "d3262e75e648fce39813cb56ac41f3c3e3f65217ebf3844d818d1f9398cfb0dc" dependencies = [ - "hashbrown 0.15.4 (registry+https://github.com/rust-lang/crates.io-index)", + "hashbrown 0.14.5", ] -[[package]] -name = "lru-slab" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "112b39cec0b298b6c1999fee3e31427f74f676e4cb9879ed1a121b43661a4154" - [[package]] name = "mappings" version = "0.7.0" @@ -3846,6 +3749,12 @@ dependencies = [ "regex-automata 0.1.10", ] +[[package]] +name = "matchit" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94" + [[package]] name = "matchit" version = "0.8.4" @@ -3854,11 +3763,10 @@ checksum = "47e1ffaa40ddd1f3ed91f717a33c8c0ee23fff369e3aa8772b9605cc1d22f4c3" [[package]] name = "md-5" -version = "0.10.6" +version = "0.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d89e7ee0cfbedfc4da3340218492196241d89eefb6dab27de5df917a6d2e78cf" +checksum = "6365506850d44bff6e2fbcb5176cf63650e48bd45ef2fe2665ae1570e0f4b9ca" dependencies = [ - "cfg-if", "digest", ] @@ -3881,7 +3789,7 @@ dependencies = [ "lasso", "measured-derive", "memchr", - "parking_lot 0.12.4", + "parking_lot 0.12.1", "rustc-hash 1.1.0", "ryu", ] @@ -3892,10 +3800,10 @@ version = "0.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b9e6777fc80a575f9503d908c8b498782a6c3ee88a06cb416dc3941401e43b94" dependencies = [ - "heck 0.5.0", + "heck", "proc-macro2", "quote", - "syn", + "syn 2.0.100", ] [[package]] @@ -3911,15 +3819,15 @@ dependencies = [ [[package]] name = "memchr" -version = "2.7.5" +version = "2.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32a282da65faaf38286cf3be983213fcf1d2e2a58700e808f83f4ea9a4804bc0" +checksum = "f665ee40bc4a3c5590afb1e9677db74a508659dfd71e126420da8274909a0167" [[package]] name = "memmap2" -version = "0.9.5" +version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd3f7eed9d3848f8b98834af67102b720745c4ec028fcd0aa0239277e7de374f" +checksum = "45fd3a57831bf88bc63f8cebc0cf956116276e97fef3966103e96416209f7c92" dependencies = [ "libc", ] @@ -3935,9 +3843,9 @@ dependencies = [ [[package]] name = "memoffset" -version = "0.9.1" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "488016bfae457b036d996092f6cb448677611ce4449e970ceaf42695203f218a" +checksum = "5a634b1c61a95585bd15607c6ab0c4e5b226e695ff2800ba0cdccddf208c406c" dependencies = [ "autocfg", ] @@ -3993,9 +3901,18 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.8.9" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fa76a2c86f704bdb222d66965fb3d63269ce38518b83cb0575fca855ebb6316" +checksum = "e7810e0be55b428ada41041c41f32c9f1a42817901b4ccf45fa3d4b6561e74c7" +dependencies = [ + "adler", +] + +[[package]] +name = "miniz_oxide" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2d80299ef12ff69b16a84bb182e3b9df68b5a91574d3d4fa6e41b65deec4df1" dependencies = [ "adler2", ] @@ -4008,26 +3925,26 @@ checksum = "a4a650543ca06a924e8b371db273b2756685faae30f8487da1b56505a8f78b0c" dependencies = [ "libc", "log", - "wasi 0.11.1+wasi-snapshot-preview1", + "wasi 0.11.0+wasi-snapshot-preview1", "windows-sys 0.48.0", ] [[package]] name = "mio" -version = "1.0.4" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78bed444cc8a2160f01cbcf811ef18cac863ad68ae8ca62092e8db51d51c761c" +checksum = "2886843bf800fba2e3377cff24abf6379b4c4d5c6681eaf9ea5b0d15090450bd" dependencies = [ "libc", - "wasi 0.11.1+wasi-snapshot-preview1", - "windows-sys 0.59.0", + "wasi 0.11.0+wasi-snapshot-preview1", + "windows-sys 0.52.0", ] [[package]] name = "multimap" -version = "0.10.1" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d87ecb2933e8aeadb3e3a02b828fed80a7528047e68b4f424523a0981a3a084" +checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a" [[package]] name = "neon-shmem" @@ -4036,7 +3953,7 @@ dependencies = [ "ahash", "criterion", "foldhash", - "hashbrown 0.15.4 (git+https://github.com/quantumish/hashbrown.git?rev=6610e6d)", + "hashbrown 0.15.4", "libc", "lock_api", "nix 0.30.1", @@ -4051,6 +3968,17 @@ dependencies = [ "xxhash-rust", ] +[[package]] +name = "neonart" +version = "0.1.0" +dependencies = [ + "crossbeam-utils", + "rand 0.9.1", + "rand_distr 0.5.1", + "spin", + "tracing", +] + [[package]] name = "never-say-never" version = "6.6.666" @@ -4088,11 +4016,11 @@ version = "0.30.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "74523f3a35e05aba87a1d978330aef40f67b0304ac79c1c00b294c9830543db6" dependencies = [ - "bitflags 2.9.1", + "bitflags 2.8.0", "cfg-if", "cfg_aliases", "libc", - "memoffset 0.9.1", + "memoffset 0.9.0", ] [[package]] @@ -4123,7 +4051,7 @@ version = "6.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6205bd8bb1e454ad2e27422015fb5e4f2bcc7e08fa8f27058670d208324a4d2d" dependencies = [ - "bitflags 2.9.1", + "bitflags 2.8.0", "crossbeam-channel", "filetime", "fsevent-sys", @@ -4157,9 +4085,9 @@ dependencies = [ [[package]] name = "num" -version = "0.4.3" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35bd024e8b2ff75562e5f34e7f4905839deb4b22955ef5e73d2fea1b9813cb23" +checksum = "b05180d69e3da0e530ba2a1dae5110317e49e3b7f3d41be227dc5f92e49ee7af" dependencies = [ "num-bigint", "num-complex", @@ -4198,9 +4126,9 @@ dependencies = [ [[package]] name = "num-complex" -version = "0.4.6" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73f88a1307638156682bada9d7604135552957b7818057dcef22705b4d509495" +checksum = "1ba157ca0885411de85d6ca030ba7e2a83a28636056c7c699b07c8b6f7383214" dependencies = [ "num-traits", ] @@ -4232,9 +4160,9 @@ dependencies = [ [[package]] name = "num-iter" -version = "0.1.45" +version = "0.1.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1429034a0490724d0075ebb2bc9e875d6503c3cf69e235a8941aa757d83ef5bf" +checksum = "7d03e6c028c5dc5cac6e2dec0efda81fc887605bb3d884578bb6d6bf7514e252" dependencies = [ "autocfg", "num-integer", @@ -4243,10 +4171,11 @@ dependencies = [ [[package]] name = "num-rational" -version = "0.4.2" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f83d14da390562dca69fc84082e73e548e1ad308d24accdedd2720017cb37824" +checksum = "0638a1c9d0a3c0914158145bc76cff373a75a627e6ecbfb71cbe6f453a5a19b0" dependencies = [ + "autocfg", "num-bigint", "num-integer", "num-traits", @@ -4264,9 +4193,9 @@ dependencies = [ [[package]] name = "num_cpus" -version = "1.17.0" +version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91df4bbde75afed763b708b7eee1e8e7651e02d97f6d5dd763e89367e957b23b" +checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" dependencies = [ "hermit-abi", "libc", @@ -4280,8 +4209,8 @@ checksum = "c38841cdd844847e3e7c8d29cef9dcfed8877f8f56f9071f77843ecf3baf937f" dependencies = [ "base64 0.13.1", "chrono", - "getrandom 0.2.16", - "http 0.2.12", + "getrandom 0.2.11", + "http 0.2.9", "rand 0.8.5", "serde", "serde_json", @@ -4293,36 +4222,30 @@ dependencies = [ [[package]] name = "object" -version = "0.36.7" +version = "0.36.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62948e14d923ea95ea2c7c86c71013138b66525b86bdc08d2dcc262bdb497b87" +checksum = "aedf0a2d09c573ed1d8d85b30c119153926a2b36dce0ab28322c09a117a4683e" dependencies = [ "memchr", ] [[package]] name = "once_cell" -version = "1.21.3" +version = "1.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" - -[[package]] -name = "once_cell_polyfill" -version = "1.70.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4895175b425cb1f87721b59f0f286c2092bd4af812243672510e1ac53e2e0ad" +checksum = "1261fe7e33c73b354eab43b1273a57c8f967d0391e80353e51f764ac02cf6775" [[package]] name = "oorandom" -version = "11.1.5" +version = "11.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6790f58c7ff633d8771f42965289203411a5e5c68388703c06e14f24770b41e" +checksum = "0ab1bc2a289d34bd04a330323ac98a1b4bc82c9d9fcb1e66b63caa84da26b575" [[package]] name = "openssl-probe" -version = "0.1.6" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" +checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "opentelemetry" @@ -4346,7 +4269,7 @@ checksum = "10a8a7f5f6ba7c1b286c2fbca0454eaba116f63bbe69ed250b642d36fbb04d80" dependencies = [ "async-trait", "bytes", - "http 1.3.1", + "http 1.1.0", "opentelemetry", "reqwest", ] @@ -4359,7 +4282,7 @@ checksum = "91cf61a1868dacc576bf2b2a1c3e9ab150af7272909e80085c3173384fe11f76" dependencies = [ "async-trait", "futures-core", - "http 1.3.1", + "http 1.1.0", "opentelemetry", "opentelemetry-http", "opentelemetry-proto", @@ -4429,21 +4352,20 @@ dependencies = [ [[package]] name = "os_info" -version = "3.12.0" +version = "3.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0e1ac5fde8d43c34139135df8ea9ee9465394b2d8d20f032d38998f64afffc3" +checksum = "006e42d5b888366f1880eda20371fedde764ed2213dc8496f49622fa0c99cd5e" dependencies = [ "log", - "plist", "serde", - "windows-sys 0.52.0", + "winapi", ] [[package]] name = "outref" -version = "0.5.2" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a80800c0488c3a21695ea981a54918fbb37abf04f4d0720c453632255e2ff0e" +checksum = "4030760ffd992bef45b0ae3f10ce1aba99e33464c90d14dd7c039884963ddc7a" [[package]] name = "overload" @@ -4476,9 +4398,9 @@ dependencies = [ [[package]] name = "p384" -version = "0.13.1" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe42f1670a52a47d448f14b6a5c61dd78fce51856e68edaa38f7ae3a46b8d6b6" +checksum = "70786f51bcc69f6a4c0360e063a4cac5419ef7c5cd5b3c99ad70f3be5ba79209" dependencies = [ "elliptic-curve 0.13.8", "primeorder", @@ -4490,15 +4412,19 @@ version = "0.1.0" dependencies = [ "anyhow", "async-trait", + "axum 0.8.1", "bytes", "camino", "clap", "futures", "hdrhistogram", + "http 1.1.0", "humantime", "humantime-serde", + "metrics", "pageserver_api", "pageserver_client", + "pageserver_client_grpc", "pageserver_page_api", "rand 0.8.5", "reqwest", @@ -4509,6 +4435,7 @@ dependencies = [ "tokio-util", "tonic 0.13.1", "tracing", + "url", "utils", "workspace_hack", ] @@ -4563,11 +4490,11 @@ dependencies = [ "hashlink", "hex", "hex-literal", - "http 1.3.1", + "http 1.1.0", "http-utils", "humantime", "humantime-serde", - "hyper 0.14.32", + "hyper 0.14.30", "indoc", "itertools 0.10.5", "jsonwebtoken", @@ -4581,6 +4508,7 @@ dependencies = [ "pageserver_client", "pageserver_compaction", "pageserver_page_api", + "peekable", "pem", "pin-project-lite", "postgres-protocol", @@ -4588,11 +4516,13 @@ dependencies = [ "postgres_backend", "postgres_connection", "postgres_ffi", + "postgres_ffi_types", "postgres_initdb", "posthog_client_lite", "pprof", "pq_proto", "procfs", + "prost 0.13.5", "rand 0.8.5", "range-set-blaze", "regex", @@ -4600,7 +4530,7 @@ dependencies = [ "reqwest", "rpds", "rstest", - "rustls 0.23.28", + "rustls 0.23.27", "scopeguard", "send-future", "serde", @@ -4626,7 +4556,7 @@ dependencies = [ "toml_edit", "tonic 0.13.1", "tonic-reflection", - "tower", + "tower 0.5.2", "tracing", "tracing-utils", "twox-hash 1.6.3", @@ -4657,7 +4587,9 @@ dependencies = [ "nix 0.30.1", "once_cell", "postgres_backend", - "postgres_ffi", + "postgres_ffi_types", + "postgres_versioninfo", + "posthog_client_lite", "rand 0.8.5", "remote_storage", "reqwest", @@ -4668,6 +4600,7 @@ dependencies = [ "strum", "strum_macros", "thiserror 1.0.69", + "tracing", "tracing-utils", "utils", ] @@ -4681,6 +4614,7 @@ dependencies = [ "futures", "http-utils", "pageserver_api", + "postgres_versioninfo", "reqwest", "serde", "thiserror 1.0.69", @@ -4692,6 +4626,38 @@ dependencies = [ "workspace_hack", ] +[[package]] +name = "pageserver_client_grpc" +version = "0.1.0" +dependencies = [ + "anyhow", + "arc-swap", + "async-trait", + "bytes", + "chrono", + "compute_api", + "dashmap 5.5.0", + "futures", + "http 1.1.0", + "hyper 1.6.0", + "hyper-util", + "metrics", + "pageserver_api", + "pageserver_page_api", + "priority-queue", + "rand 0.8.5", + "scopeguard", + "thiserror 1.0.69", + "tokio", + "tokio-stream", + "tokio-util", + "tonic 0.13.1", + "tower 0.4.13", + "tracing", + "utils", + "uuid", +] + [[package]] name = "pageserver_compaction" version = "0.1.0" @@ -4719,11 +4685,18 @@ dependencies = [ name = "pageserver_page_api" version = "0.1.0" dependencies = [ + "anyhow", "bytes", + "futures", "pageserver_api", - "postgres_ffi", + "postgres_ffi_types", "prost 0.13.5", + "prost-types 0.13.5", + "strum", + "strum_macros", "thiserror 1.0.69", + "tokio", + "tokio-util", "tonic 0.13.1", "tonic-build", "utils", @@ -4732,9 +4705,9 @@ dependencies = [ [[package]] name = "papaya" -version = "0.2.2" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af228bb1296c9b044ee75e2a2325409c2d899bcfcc6150e5e41f148e0a87dd20" +checksum = "6827e3fc394523c21d4464d02c0bb1c19966ea4a58a9844ad6d746214179d2bc" dependencies = [ "equivalent", "seize", @@ -4742,9 +4715,9 @@ dependencies = [ [[package]] name = "parking" -version = "2.2.1" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f38d5652c16fde515bb1ecef450ab0f6a219d619a7274976324d5e377f7dceba" +checksum = "e52c774a4c39359c1d1c52e43f73dd91a75a614652c825408eec30c95a9b2067" [[package]] name = "parking_lot" @@ -4759,12 +4732,12 @@ dependencies = [ [[package]] name = "parking_lot" -version = "0.12.4" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70d58bf43669b5795d1576d0641cfb6fbb2057bf629506267a92807158584a13" +checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" dependencies = [ "lock_api", - "parking_lot_core 0.9.11", + "parking_lot_core 0.9.8", ] [[package]] @@ -4783,28 +4756,28 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.9.11" +version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc838d2a56b5b1a6c25f55575dfc605fabb63bb2365f6c2353ef9159aa69e4a5" +checksum = "93f00c865fe7cabf650081affecd3871070f26767e7b2070a3ffae14c654b447" dependencies = [ "cfg-if", "libc", - "redox_syscall 0.5.13", + "redox_syscall 0.3.5", "smallvec", - "windows-targets 0.52.6", + "windows-targets 0.48.0", ] [[package]] name = "parquet" -version = "53.4.1" +version = "53.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f8cf58b29782a7add991f655ff42929e31a7859f5319e53db9e39a714cb113c" +checksum = "f0fbf928021131daaa57d334ca8e3904fe9ae22f73c56244fc7db9b04eedc3d8" dependencies = [ "ahash", "bytes", "chrono", "half", - "hashbrown 0.15.4 (registry+https://github.com/rust-lang/crates.io-index)", + "hashbrown 0.14.5", "num", "num-bigint", "paste", @@ -4817,14 +4790,14 @@ dependencies = [ [[package]] name = "parquet_derive" -version = "53.4.1" +version = "53.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8a6a70e8ee504da797f192a2f7cf9ce61262a1026794f7cdf988daefa5d0814" +checksum = "86e9fcfae007533a06b580429a3f7e07cb833ec8aa37c041c16563e7918f057e" dependencies = [ "parquet", "proc-macro2", "quote", - "syn", + "syn 2.0.100", ] [[package]] @@ -4840,9 +4813,9 @@ dependencies = [ [[package]] name = "paste" -version = "1.0.15" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" +checksum = "de3145af08024dea9fa9914f381a17b8fc6034dfb00f3a84013f7ff43f29ed4c" [[package]] name = "pbkdf2" @@ -4857,12 +4830,21 @@ dependencies = [ ] [[package]] -name = "pem" -version = "3.0.5" +name = "peekable" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38af38e8470ac9dee3ce1bae1af9c1671fffc44ddfd8bd1d0a3445bf349a8ef3" +checksum = "225f9651e475709164f871dc2f5724956be59cb9edb055372ffeeab01ec2d20b" dependencies = [ - "base64 0.22.1", + "smallvec", +] + +[[package]] +name = "pem" +version = "3.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b8fcc794035347fb64beda2d3b462595dd2753e3f268d89c5aae77e8cf2c310" +dependencies = [ + "base64 0.21.7", "serde", ] @@ -4883,67 +4865,57 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "petgraph" -version = "0.6.5" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4c5cc86750666a3ed20bdaf5ca2a0344f9c67674cae0515bec2da16fbaa47db" +checksum = "4dd7d28ee937e54fe3080c91faa1c3a46c06de6252988a7f4592ba2310ef22a4" dependencies = [ - "fixedbitset 0.4.2", - "indexmap 2.9.0", -] - -[[package]] -name = "petgraph" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3672b37090dbd86368a4145bc067582552b29c27377cad4e0a306c97f9bd7772" -dependencies = [ - "fixedbitset 0.5.7", - "indexmap 2.9.0", + "fixedbitset", + "indexmap 1.9.3", ] [[package]] name = "phf" -version = "0.11.3" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fd6780a80ae0c52cc120a26a1a42c1ae51b247a253e4e06113d23d2c2edd078" +checksum = "928c6535de93548188ef63bb7c4036bd415cd8f36ad25af44b9789b2ee72a48c" dependencies = [ "phf_shared", ] [[package]] name = "phf_shared" -version = "0.11.3" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67eabc2ef2a60eb7faa00097bd1ffdb5bd28e62bf39990626a582201b7a754e5" +checksum = "e1fb5f6f826b772a8d4c0394209441e7d37cbbb967ae9c7e0e8134365c9ee676" dependencies = [ "siphasher", ] [[package]] name = "pin-project" -version = "1.1.10" +version = "1.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "677f1add503faace112b9f1373e43e9e054bfdd22ff1a63c1bc485eaec6a6a8a" +checksum = "dfe2e71e1471fe07709406bf725f710b02927c9c54b2b5b2ec0e8087d97c327d" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.10" +version = "1.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861" +checksum = "f6e859e6e5bd50440ab63c47e3ebabc90f26251f7c73c3d3e837b74a1cc3fa67" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.100", ] [[package]] name = "pin-project-lite" -version = "0.2.16" +version = "0.2.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" +checksum = "8afb450f006bf6385ca15ef45d71d2288452bc3683ce2e2cacc0d18e4be60b58" [[package]] name = "pin-utils" @@ -4957,7 +4929,7 @@ version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c8ffb9f10fa047879315e6625af03c164b16962a5368d724ed16323b68ace47f" dependencies = [ - "der 0.7.10", + "der 0.7.8", "pkcs8 0.10.2", "spki 0.7.3", ] @@ -4978,34 +4950,21 @@ version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7" dependencies = [ - "der 0.7.10", + "der 0.7.8", "spki 0.7.3", ] [[package]] name = "pkg-config" -version = "0.3.32" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c" - -[[package]] -name = "plist" -version = "1.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d77244ce2d584cd84f6a15f86195b8c9b2a0dfbfd817c09e0464244091a58ed" -dependencies = [ - "base64 0.22.1", - "indexmap 2.9.0", - "quick-xml 0.37.5", - "serde", - "time", -] +checksum = "26072860ba924cbfa98ea39c8c19b4dd6a4a25423dbdf219c1eca91aa0cf6964" [[package]] name = "plotters" -version = "0.3.7" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5aeb6f403d7a4911efb1e33402027fc44f29b5bf6def3effcc22d7bb75f2b747" +checksum = "2538b639e642295546c50fcd545198c9d64ee2a38620a628724a3b266d5fbf97" dependencies = [ "num-traits", "plotters-backend", @@ -5016,15 +4975,15 @@ dependencies = [ [[package]] name = "plotters-backend" -version = "0.3.7" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df42e13c12958a16b3f7f4386b9ab1f3e7933914ecea48da7139435263a4172a" +checksum = "193228616381fecdc1224c62e96946dfbc73ff4384fba576e052ff8c1bea8142" [[package]] name = "plotters-svg" -version = "0.3.7" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51bae2ac328883f7acdfea3d66a7c35751187f870bc81f94563733a154d7a670" +checksum = "f9a81d2759aae1dae668f783c308bc5c8ebd191ff4184aaa1b37f65a6ae5a56f" dependencies = [ "plotters-backend", ] @@ -5041,9 +5000,9 @@ dependencies = [ [[package]] name = "portable-atomic" -version = "1.11.1" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f84267b20a16ea918e43c6a88433c2d54fa145c92a811b5b047ccbe153674483" +checksum = "280dc24453071f1b63954171985a0b0d30058d287960968b9b2aca264c8d4ee6" [[package]] name = "portable-atomic-util" @@ -5057,7 +5016,7 @@ dependencies = [ [[package]] name = "postgres" version = "0.19.7" -source = "git+https://github.com/neondatabase/rust-postgres.git?branch=neon#f3cf448febde5fd298071d54d568a9c875a7a62b" +source = "git+https://github.com/neondatabase/rust-postgres.git?branch=neon#1f21e7959a96a34dcfbfce1b14b73286cdadffe9" dependencies = [ "bytes", "fallible-iterator", @@ -5089,7 +5048,7 @@ dependencies = [ name = "postgres-protocol2" version = "0.1.0" dependencies = [ - "base64 0.20.0", + "base64 0.22.1", "byteorder", "bytes", "fallible-iterator", @@ -5129,8 +5088,8 @@ dependencies = [ "bytes", "once_cell", "pq_proto", - "rustls 0.23.28", - "rustls-pemfile 2.2.0", + "rustls 0.23.27", + "rustls-pemfile 2.1.1", "serde", "thiserror 1.0.69", "tokio", @@ -5157,15 +5116,17 @@ name = "postgres_ffi" version = "0.1.0" dependencies = [ "anyhow", - "bindgen 0.71.1", + "bindgen", "bytes", "crc32c", "criterion", "env_logger", "log", - "memoffset 0.9.1", + "memoffset 0.9.0", "once_cell", "postgres", + "postgres_ffi_types", + "postgres_versioninfo", "pprof", "regex", "serde", @@ -5174,17 +5135,37 @@ dependencies = [ "utils", ] +[[package]] +name = "postgres_ffi_types" +version = "0.1.0" +dependencies = [ + "thiserror 1.0.69", + "workspace_hack", +] + [[package]] name = "postgres_initdb" version = "0.1.0" dependencies = [ "anyhow", "camino", + "postgres_versioninfo", "thiserror 1.0.69", "tokio", "workspace_hack", ] +[[package]] +name = "postgres_versioninfo" +version = "0.1.0" +dependencies = [ + "anyhow", + "serde", + "serde_repr", + "thiserror 1.0.69", + "workspace_hack", +] + [[package]] name = "posthog_client_lite" version = "0.1.0" @@ -5203,15 +5184,6 @@ dependencies = [ "workspace_hack", ] -[[package]] -name = "potential_utf" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5a7c30837279ca13e7c867e9e40053bc68740f988cb07f7ca6df43cc734b585" -dependencies = [ - "zerovec", -] - [[package]] name = "powerfmt" version = "0.2.0" @@ -5234,7 +5206,7 @@ dependencies = [ "log", "nix 0.26.4", "once_cell", - "parking_lot 0.12.4", + "parking_lot 0.12.1", "prost 0.12.6", "prost-build 0.12.6", "prost-derive 0.12.6", @@ -5254,7 +5226,7 @@ dependencies = [ "anyhow", "backtrace", "flate2", - "inferno 0.12.2", + "inferno 0.12.0", "num", "paste", "prost 0.13.5", @@ -5262,12 +5234,9 @@ dependencies = [ [[package]] name = "ppv-lite86" -version = "0.2.21" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9" -dependencies = [ - "zerocopy", -] +checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" [[package]] name = "pq_proto" @@ -5285,12 +5254,12 @@ dependencies = [ [[package]] name = "prettyplease" -version = "0.2.35" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "061c1221631e079b26479d25bbf2275bfe5917ae8419cd7e34f13bfc2aa7539a" +checksum = "8d3928fb5db768cb86f891ff014f0144589297e3c6a1aba6ed7cecfdace270c7" dependencies = [ "proc-macro2", - "syn", + "syn 2.0.100", ] [[package]] @@ -5303,10 +5272,21 @@ dependencies = [ ] [[package]] -name = "proc-macro2" -version = "1.0.95" +name = "priority-queue" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02b3e5e68a3a1a02aad3ec490a98007cbc13c37cbe84a3cd7b8e406d76e7f778" +checksum = "5676d703dda103cbb035b653a9f11448c0a7216c7926bd35fcb5865475d0c970" +dependencies = [ + "autocfg", + "equivalent", + "indexmap 2.9.0", +] + +[[package]] +name = "proc-macro2" +version = "1.0.94" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a31971752e70b8b2686d7e46ec17fb38dad4051d94024c88df49b667caea9c84" dependencies = [ "unicode-ident", ] @@ -5317,13 +5297,13 @@ version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "731e0d9356b0c25f16f33b5be79b1c57b562f141ebfcdb0ad8ac2c13a24293b4" dependencies = [ - "bitflags 2.9.1", + "bitflags 2.8.0", "chrono", "flate2", "hex", "lazy_static", "procfs-core", - "rustix 0.38.44", + "rustix", ] [[package]] @@ -5332,7 +5312,7 @@ version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2d3554923a69f4ce04c4a754260c338f505ce22642d3830e049a399fc2059a29" dependencies = [ - "bitflags 2.9.1", + "bitflags 2.8.0", "chrono", "hex", ] @@ -5348,7 +5328,7 @@ dependencies = [ "lazy_static", "libc", "memchr", - "parking_lot 0.12.4", + "parking_lot 0.12.1", "procfs", "thiserror 1.0.69", ] @@ -5380,37 +5360,38 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "22505a5c94da8e3b7c2996394d1c933236c4d743e81a410bcca4e6989fc066a4" dependencies = [ "bytes", - "heck 0.5.0", + "heck", "itertools 0.12.1", "log", "multimap", "once_cell", - "petgraph 0.6.5", + "petgraph", "prettyplease", "prost 0.12.6", "prost-types 0.12.6", "regex", - "syn", + "syn 2.0.100", "tempfile", ] [[package]] name = "prost-build" -version = "0.13.5" +version = "0.13.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be769465445e8c1474e9c5dac2018218498557af32d9ed057325ec9a41ae81bf" +checksum = "0c1318b19085f08681016926435853bbf7858f9c082d0999b80550ff5d9abe15" dependencies = [ - "heck 0.5.0", - "itertools 0.14.0", + "bytes", + "heck", + "itertools 0.12.1", "log", "multimap", "once_cell", - "petgraph 0.7.1", + "petgraph", "prettyplease", "prost 0.13.5", "prost-types 0.13.5", "regex", - "syn", + "syn 2.0.100", "tempfile", ] @@ -5424,7 +5405,7 @@ dependencies = [ "itertools 0.12.1", "proc-macro2", "quote", - "syn", + "syn 2.0.100", ] [[package]] @@ -5434,10 +5415,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a56d757972c98b346a9b766e3f02746cde6dd1cd1d1d563472929fdd74bec4d" dependencies = [ "anyhow", - "itertools 0.14.0", + "itertools 0.12.1", "proc-macro2", "quote", - "syn", + "syn 2.0.100", ] [[package]] @@ -5472,7 +5453,7 @@ dependencies = [ "aws-config", "aws-sdk-iam", "aws-sigv4", - "base64 0.13.1", + "base64 0.22.1", "bstr", "bytes", "camino", @@ -5495,12 +5476,12 @@ dependencies = [ "hex", "hmac", "hostname", - "http 1.3.1", + "http 1.1.0", "http-body-util", "http-utils", "humantime", "humantime-serde", - "hyper 0.14.32", + "hyper 0.14.30", "hyper 1.6.0", "hyper-util", "indexmap 2.9.0", @@ -5516,7 +5497,7 @@ dependencies = [ "opentelemetry", "p256 0.13.2", "papaya", - "parking_lot 0.12.4", + "parking_lot 0.12.1", "parquet", "parquet_derive", "pbkdf2", @@ -5537,9 +5518,9 @@ dependencies = [ "rsa", "rstest", "rustc-hash 1.1.0", - "rustls 0.23.28", - "rustls-native-certs 0.8.1", - "rustls-pemfile 2.2.0", + "rustls 0.23.27", + "rustls-native-certs 0.8.0", + "rustls-pemfile 2.1.1", "scopeguard", "serde", "serde_json", @@ -5574,20 +5555,20 @@ dependencies = [ "walkdir", "workspace_hack", "x509-cert", - "zerocopy", + "zerocopy 0.8.24", ] [[package]] name = "quanta" -version = "0.12.6" +version = "0.12.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3ab5a9d756f0d97bdc89019bd2e4ea098cf9cde50ee7564dde6b81ccc8f06c7" +checksum = "3bd1fe6824cea6538803de3ff1bc0cf3949024db3d43c9643024bfb33a807c0e" dependencies = [ "crossbeam-utils", "libc", "once_cell", "raw-cpuid", - "wasi 0.11.1+wasi-snapshot-preview1", + "wasi 0.11.0+wasi-snapshot-preview1", "web-sys", "winapi", ] @@ -5613,73 +5594,18 @@ dependencies = [ [[package]] name = "quick-xml" -version = "0.37.5" +version = "0.37.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "331e97a1af0bf59823e6eadffe373d7b27f485be8748f71471c662c1f269b7fb" +checksum = "f22f29bdff3987b4d8632ef95fd6424ec7e4e0a57e2f4fc63e489e75357f6a03" dependencies = [ "memchr", ] -[[package]] -name = "quinn" -version = "0.11.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "626214629cda6781b6dc1d316ba307189c85ba657213ce642d9c77670f8202c8" -dependencies = [ - "bytes", - "cfg_aliases", - "pin-project-lite", - "quinn-proto", - "quinn-udp", - "rustc-hash 2.1.1", - "rustls 0.23.28", - "socket2", - "thiserror 2.0.12", - "tokio", - "tracing", - "web-time", -] - -[[package]] -name = "quinn-proto" -version = "0.11.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49df843a9161c85bb8aae55f101bc0bac8bcafd637a620d9122fd7e0b2f7422e" -dependencies = [ - "bytes", - "getrandom 0.3.3", - "lru-slab", - "rand 0.9.1", - "ring", - "rustc-hash 2.1.1", - "rustls 0.23.28", - "rustls-pki-types", - "slab", - "thiserror 2.0.12", - "tinyvec", - "tracing", - "web-time", -] - -[[package]] -name = "quinn-udp" -version = "0.5.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcebb1209ee276352ef14ff8732e24cc2b02bbac986cd74a4c81bcb2f9881970" -dependencies = [ - "cfg_aliases", - "libc", - "once_cell", - "socket2", - "tracing", - "windows-sys 0.59.0", -] - [[package]] name = "quote" -version = "1.0.40" +version = "1.0.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d" +checksum = "c1f1914ce909e1658d9907913b4b91947430c7d9be598b15a1912935b8c04801" dependencies = [ "proc-macro2", ] @@ -5769,7 +5695,7 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom 0.2.16", + "getrandom 0.2.11", ] [[package]] @@ -5824,18 +5750,18 @@ dependencies = [ [[package]] name = "raw-cpuid" -version = "11.5.0" +version = "11.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6df7ab838ed27997ba19a4664507e6f82b41fe6e20be42929332156e5e85146" +checksum = "c6928fa44c097620b706542d428957635951bade7143269085389d42c8a4927e" dependencies = [ - "bitflags 2.9.1", + "bitflags 2.8.0", ] [[package]] name = "rayon" -version = "1.10.0" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b418a60154510ca1a002a752ca9714984e21e4241e804d32555251faf8b78ffa" +checksum = "1d2df5196e37bcc87abebc0053e20787d73847bb33134a69841207dd0a47f03b" dependencies = [ "either", "rayon-core", @@ -5843,19 +5769,21 @@ dependencies = [ [[package]] name = "rayon-core" -version = "1.12.1" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1465873a3dfdaa8ae7cb14b4383657caab0b3e8a0aa9ae8e04b044854c8dfce2" +checksum = "4b8f95bd6966f5c87776639160a66bd8ab9895d9d4ab01ddba9fc60661aebe8d" dependencies = [ + "crossbeam-channel", "crossbeam-deque", "crossbeam-utils", + "num_cpus", ] [[package]] name = "rcgen" -version = "0.13.2" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75e669e5202259b5314d1ea5397316ad400819437857b90861765f24c4cf80a2" +checksum = "54077e1872c46788540de1ea3d7f4ccb1983d12f9aa909b234468676c1a36779" dependencies = [ "pem", "ring", @@ -5866,9 +5794,9 @@ dependencies = [ [[package]] name = "redis" -version = "0.29.5" +version = "0.29.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1bc42f3a12fd4408ce64d8efef67048a924e543bd35c6591c0447fda9054695f" +checksum = "b110459d6e323b7cda23980c46c77157601199c9da6241552b284cd565a7a133" dependencies = [ "arc-swap", "bytes", @@ -5878,8 +5806,8 @@ dependencies = [ "num-bigint", "percent-encoding", "pin-project-lite", - "rustls 0.23.28", - "rustls-native-certs 0.8.1", + "rustls 0.23.27", + "rustls-native-certs 0.8.0", "ryu", "sha1_smol", "socket2", @@ -5909,23 +5837,23 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.5.13" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d04b7d0ee6b4a0207a0a7adb104d23ecb0b47d6beae7152d0fa34b692b29fd6" +checksum = "4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa" dependencies = [ - "bitflags 2.9.1", + "bitflags 1.3.2", ] [[package]] name = "regex" -version = "1.11.1" +version = "1.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" +checksum = "380b951a9c5e80ddfd6136919eef32310721aa4aacd4889a8d39124b026ab343" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.4.9", - "regex-syntax 0.8.5", + "regex-automata 0.4.3", + "regex-syntax 0.8.2", ] [[package]] @@ -5939,20 +5867,20 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.9" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908" +checksum = "5f804c7828047e88b2d32e2d7fe5a105da8ee3264f01902f796c8e067dc2483f" dependencies = [ "aho-corasick", "memchr", - "regex-syntax 0.8.5", + "regex-syntax 0.8.2", ] [[package]] name = "regex-lite" -version = "0.1.6" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53a49587ad06b26609c52e423de037e7f57f20d53535d66e08c695f347df952a" +checksum = "30b661b2f27137bdbc16f00eda72866a92bb28af1753ffbd56744fb6e2e9cd8e" [[package]] name = "regex-syntax" @@ -5962,15 +5890,15 @@ checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "regex-syntax" -version = "0.8.5" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" +checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" [[package]] name = "relative-path" -version = "1.9.3" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba39f3699c378cd8970968dcbff9c43159ea4cfbd88d43c00b22f2ef10a435d2" +checksum = "c707298afce11da2efef2f600116fa93ffa7a032b5d7b628aa17711ec81383ca" [[package]] name = "remote_storage" @@ -6017,62 +5945,64 @@ dependencies = [ [[package]] name = "replace_with" -version = "0.1.8" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51743d3e274e2b18df81c4dc6caf8a5b8e15dbe799e0dca05c7617380094e884" +checksum = "e3a8614ee435691de62bcffcf4a66d91b3594bf1428a5722e79103249a095690" [[package]] name = "reqwest" -version = "0.12.20" +version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eabf4c97d9130e2bf606614eb937e86edac8292eaa6f422f995d7e8de1eb1813" +checksum = "566cafdd92868e0939d3fb961bd0dc25fcfaaed179291093b3d43e6b3150ea10" dependencies = [ "base64 0.22.1", "bytes", "futures-channel", "futures-core", "futures-util", - "http 1.3.1", - "http-body 1.0.1", + "http 1.1.0", + "http-body 1.0.0", "http-body-util", "hyper 1.6.0", - "hyper-rustls 0.27.7", + "hyper-rustls 0.26.0", "hyper-util", + "ipnet", "js-sys", "log", + "mime", + "once_cell", "percent-encoding", "pin-project-lite", - "quinn", - "rustls 0.23.28", - "rustls-native-certs 0.8.1", + "rustls 0.22.4", + "rustls-native-certs 0.7.0", + "rustls-pemfile 2.1.1", "rustls-pki-types", "serde", "serde_json", "serde_urlencoded", - "sync_wrapper 1.0.2", + "sync_wrapper 0.1.2", "tokio", - "tokio-rustls 0.26.2", + "tokio-rustls 0.25.0", "tokio-util", - "tower", - "tower-http", "tower-service", "url", "wasm-bindgen", "wasm-bindgen-futures", "wasm-streams", "web-sys", - "webpki-roots 1.0.1", + "webpki-roots", + "winreg", ] [[package]] name = "reqwest-middleware" -version = "0.4.2" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57f17d28a6e6acfe1733fe24bcd30774d13bffa4b8a22535b4c8c98423088d4e" +checksum = "d1ccd3b55e711f91a9885a2fa6fbbb2e39db1776420b062efc058c6410f7e5e3" dependencies = [ "anyhow", "async-trait", - "http 1.3.1", + "http 1.1.0", "reqwest", "serde", "thiserror 1.0.69", @@ -6088,8 +6018,8 @@ dependencies = [ "anyhow", "async-trait", "futures", - "getrandom 0.2.16", - "http 1.3.1", + "getrandom 0.2.11", + "http 1.1.0", "hyper 1.6.0", "parking_lot 0.11.2", "reqwest", @@ -6103,15 +6033,15 @@ dependencies = [ [[package]] name = "reqwest-tracing" -version = "0.5.8" +version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d70ea85f131b2ee9874f0b160ac5976f8af75f3c9badfe0d955880257d10bd83" +checksum = "73e6153390585f6961341b50e5a1931d6be6dee4292283635903c26ef9d980d2" dependencies = [ "anyhow", "async-trait", - "getrandom 0.2.16", - "http 1.3.1", - "matchit", + "getrandom 0.2.11", + "http 1.1.0", + "matchit 0.8.4", "opentelemetry", "reqwest", "reqwest-middleware", @@ -6166,7 +6096,7 @@ checksum = "a4689e6c2294d81e88dc6261c768b63bc4fcdb852be6d1352498b114f61383b7" dependencies = [ "cc", "cfg-if", - "getrandom 0.2.16", + "getrandom 0.2.11", "libc", "untrusted", "windows-sys 0.52.0", @@ -6174,9 +6104,9 @@ dependencies = [ [[package]] name = "rlimit" -version = "0.10.2" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7043b63bd0cd1aaa628e476b80e6d4023a3b50eb32789f2728908107bd0c793a" +checksum = "3560f70f30a0f16d11d01ed078a07740fe6b489667abc7c7b029155d9f21c3d8" dependencies = [ "libc", ] @@ -6187,8 +6117,8 @@ version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "496c1d3718081c45ba9c31fbfc07417900aa96f4070ff90dc29961836b7a9945" dependencies = [ - "http 0.2.12", - "hyper 0.14.32", + "http 0.2.9", + "hyper 0.14.30", "lazy_static", "percent-encoding", "regex", @@ -6205,9 +6135,9 @@ dependencies = [ [[package]] name = "rsa" -version = "0.9.8" +version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78928ac1ed176a5ca1d17e578a1825f3d81ca54cf41053a592584b020cfd691b" +checksum = "5d0e5124fcb30e76a7e79bfee683a2746db83784b86289f6251b54b7950a0dfc" dependencies = [ "const-oid", "digest", @@ -6248,7 +6178,7 @@ dependencies = [ "regex", "relative-path", "rustc_version", - "syn", + "syn 2.0.100", "unicode-ident", ] @@ -6264,9 +6194,9 @@ dependencies = [ [[package]] name = "rustc-demangle" -version = "0.1.25" +version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "989e6739f80c4ad5b13e0fd7fe89531180375b18520cc8c82080e4dc4035b84f" +checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" [[package]] name = "rustc-hash" @@ -6282,37 +6212,24 @@ checksum = "357703d41365b4b27c590e3ed91eabb1b663f07c4c084095e60cbed4362dff0d" [[package]] name = "rustc_version" -version = "0.4.1" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92" +checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" dependencies = [ "semver", ] [[package]] name = "rustix" -version = "0.38.44" +version = "0.38.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fdb5bc1ae2baa591800df16c9ca78619bf65c0488b41b96ccec5d11220d8c154" +checksum = "d7f649912bc1495e167a6edee79151c84b1bad49748cb4f1f1167f459f6224f6" dependencies = [ - "bitflags 2.9.1", + "bitflags 2.8.0", "errno", "libc", - "linux-raw-sys 0.4.15", - "windows-sys 0.59.0", -] - -[[package]] -name = "rustix" -version = "1.0.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c71e83d6afe7ff64890ec6b71d6a69bb8a610ab78ce364b3352876bb4c801266" -dependencies = [ - "bitflags 2.9.1", - "errno", - "libc", - "linux-raw-sys 0.9.4", - "windows-sys 0.59.0", + "linux-raw-sys 0.4.14", + "windows-sys 0.52.0", ] [[package]] @@ -6329,11 +6246,24 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.28" +version = "0.22.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7160e3e10bf4535308537f3c4e1641468cd0e485175d6163087c0393c7d46643" +checksum = "bf4ef73721ac7bcd79b2b315da7779d8fc09718c6b3d2d1b2d94850eb8c18432" +dependencies = [ + "log", + "ring", + "rustls-pki-types", + "rustls-webpki 0.102.8", + "subtle", + "zeroize", +] + +[[package]] +name = "rustls" +version = "0.23.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "730944ca083c1c233a75c09f199e973ca499344a2b7ba9e755c457e86fb4a321" dependencies = [ - "aws-lc-rs", "log", "once_cell", "ring", @@ -6345,55 +6275,66 @@ dependencies = [ [[package]] name = "rustls-native-certs" -version = "0.6.3" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9aace74cb666635c918e9c12bc0d348266037aa8eb599b5cba565709a8dff00" +checksum = "0167bac7a9f490495f3c33013e7722b53cb087ecbe082fb0c6387c96f634ea50" dependencies = [ "openssl-probe", - "rustls-pemfile 1.0.4", + "rustls-pemfile 1.0.2", "schannel", - "security-framework 2.11.1", + "security-framework", ] [[package]] name = "rustls-native-certs" -version = "0.8.1" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fcff2dd52b58a8d98a70243663a0d234c4e2b79235637849d15913394a247d3" +checksum = "8f1fb85efa936c42c6d5fc28d2629bb51e4b2f4b8a5211e297d599cc5a093792" dependencies = [ "openssl-probe", + "rustls-pemfile 2.1.1", "rustls-pki-types", "schannel", - "security-framework 3.2.0", + "security-framework", +] + +[[package]] +name = "rustls-native-certs" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcaf18a4f2be7326cd874a5fa579fae794320a0f388d365dca7e480e55f83f8a" +dependencies = [ + "openssl-probe", + "rustls-pemfile 2.1.1", + "rustls-pki-types", + "schannel", + "security-framework", ] [[package]] name = "rustls-pemfile" -version = "1.0.4" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c74cae0a4cf6ccbbf5f359f08efdf8ee7e1dc532573bf0db71968cb56b1448c" +checksum = "d194b56d58803a43635bdc398cd17e383d6f71f9182b9a192c127ca42494a59b" dependencies = [ "base64 0.21.7", ] [[package]] name = "rustls-pemfile" -version = "2.2.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dce314e5fee3f39953d46bb63bb8a46d40c2f8fb7cc5a3b6cab2bde9721d6e50" +checksum = "f48172685e6ff52a556baa527774f61fcaa884f59daf3375c62a3f1cd2549dab" dependencies = [ + "base64 0.21.7", "rustls-pki-types", ] [[package]] name = "rustls-pki-types" -version = "1.12.0" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "229a4a4c221013e7e1f1a043678c5cc39fe5171437c88fb47151a21e6f5b5c79" -dependencies = [ - "web-time", - "zeroize", -] +checksum = "917ce264624a4b4db1c364dcc35bfca9ded014d0a958cd47ad3e960e988ea51c" [[package]] name = "rustls-webpki" @@ -6405,13 +6346,23 @@ dependencies = [ "untrusted", ] +[[package]] +name = "rustls-webpki" +version = "0.102.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "64ca1bc8749bd4cf37b5ce386cc146580777b4e8572c7b97baf22c83f444bee9" +dependencies = [ + "ring", + "rustls-pki-types", + "untrusted", +] + [[package]] name = "rustls-webpki" version = "0.103.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e4a72fe2bcf7a6ac6fd7d0b9e5cb68aeb7d4c0a0271730218b3e92d43b4eb435" dependencies = [ - "aws-lc-rs", "ring", "rustls-pki-types", "untrusted", @@ -6419,15 +6370,15 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.21" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a0d197bd2c9dc6e53b84da9556a69ba4cdfab8619eb41a8bd1cc2027a0f6b1d" +checksum = "4f3208ce4d8448b3f3e7d168a73f5e0c43a61e32930de3bceeccedb388b6bf06" [[package]] name = "ryu" -version = "1.0.20" +version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" +checksum = "f91339c0467de62360649f8d3e185ca8de4224ff281f66000de5eb2a77a79041" [[package]] name = "safekeeper" @@ -6448,27 +6399,28 @@ dependencies = [ "fail", "futures", "hex", - "http 1.3.1", + "http 1.1.0", "http-utils", "humantime", - "hyper 0.14.32", + "hyper 0.14.30", "itertools 0.10.5", "jsonwebtoken", "metrics", "once_cell", "pageserver_api", - "parking_lot 0.12.4", + "parking_lot 0.12.1", "pem", "postgres-protocol", "postgres_backend", "postgres_ffi", + "postgres_versioninfo", "pprof", "pq_proto", "rand 0.8.5", "regex", "remote_storage", "reqwest", - "rustls 0.23.28", + "rustls 0.23.27", "safekeeper_api", "safekeeper_client", "scopeguard", @@ -6506,6 +6458,7 @@ dependencies = [ "const_format", "pageserver_api", "postgres_ffi", + "postgres_versioninfo", "pq_proto", "serde", "serde_json", @@ -6537,11 +6490,11 @@ dependencies = [ [[package]] name = "schannel" -version = "0.1.27" +version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f29ebaa345f945cec9fbbc532eb307f0fdad8161f281b6369539c8d84876b3d" +checksum = "fbc91545643bcf3a0bbb6569265615222618bdf33ce4ffbbd13c4bbd4c093534" dependencies = [ - "windows-sys 0.59.0", + "windows-sys 0.52.0", ] [[package]] @@ -6555,9 +6508,9 @@ dependencies = [ [[package]] name = "scopeguard" -version = "1.2.0" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" +checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" [[package]] name = "sct" @@ -6571,12 +6524,9 @@ dependencies = [ [[package]] name = "sd-notify" -version = "0.4.5" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b943eadf71d8b69e661330cb0e2656e31040acf21ee7708e2c238a0ec6af2bf4" -dependencies = [ - "libc", -] +checksum = "621e3680f3e07db4c9c2c3fb07c6223ab2fab2e54bd3c04c3ae037990f428c32" [[package]] name = "seahash" @@ -6605,7 +6555,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3e97a565f76233a6003f9f5c54be1d9c5bdfa3eccfb189469f11ec4901c47dc" dependencies = [ "base16ct 0.2.0", - "der 0.7.10", + "der 0.7.8", "generic-array", "pkcs8 0.10.2", "serdect", @@ -6615,25 +6565,12 @@ dependencies = [ [[package]] name = "security-framework" -version = "2.11.1" +version = "2.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" +checksum = "1fc758eb7bffce5b308734e9b0c1468893cae9ff70ebf13e7090be8dcbcc83a8" dependencies = [ - "bitflags 2.9.1", - "core-foundation 0.9.4", - "core-foundation-sys", - "libc", - "security-framework-sys", -] - -[[package]] -name = "security-framework" -version = "3.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "271720403f46ca04f7ba6f55d438f8bd878d6b8ca0a1046e8228c4145bcbb316" -dependencies = [ - "bitflags 2.9.1", - "core-foundation 0.10.1", + "bitflags 1.3.2", + "core-foundation", "core-foundation-sys", "libc", "security-framework-sys", @@ -6641,9 +6578,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.14.0" +version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49db231d56a190491cb4aeda9527f1ad45345af50b0851622a7adb8c03b01c32" +checksum = "f51d0c0d83bec45f16480d0ce0058397a69e48fcdc52d1dc8855fb68acbd31a7" dependencies = [ "core-foundation-sys", "libc", @@ -6661,9 +6598,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.26" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56e6fa9c48d24d85fb3de5ad847117517440f6beceb7798af16b4a87d616b8d0" +checksum = "bebd363326d05ec3e2f532ab7660680f3b02130d780c299bca73469d521bc0ed" [[package]] name = "send-future" @@ -6679,7 +6616,7 @@ checksum = "255914a8e53822abd946e2ce8baa41d4cded6b8e938913b7f7b9da5b7ab44335" dependencies = [ "httpdate", "reqwest", - "rustls 0.23.28", + "rustls 0.23.27", "sentry-backtrace", "sentry-contexts", "sentry-core", @@ -6687,7 +6624,7 @@ dependencies = [ "sentry-tracing", "tokio", "ureq", - "webpki-roots 0.26.11", + "webpki-roots", ] [[package]] @@ -6770,15 +6707,15 @@ dependencies = [ [[package]] name = "seq-macro" -version = "0.3.6" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1bc711410fbe7399f390ca1c3b60ad0f53f80e95c5eb935e52268a0e2cd49acc" +checksum = "a3f0bf26fd526d2a95683cd0f87bf103b8539e2ca1ef48ce002d67aad59aa0b4" [[package]] name = "serde" -version = "1.0.219" +version = "1.0.215" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6" +checksum = "6513c1ad0b11a9376da888e3e0baa0077f1aed55c17f50e7b2397136129fb88f" dependencies = [ "serde_derive", ] @@ -6795,20 +6732,33 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.219" +version = "1.0.215" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" +checksum = "ad1e866f866923f252f05c889987993144fb74e722403468a4ebd70c3cd756c0" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.100", +] + +[[package]] +name = "serde_html_form" +version = "0.2.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d2de91cf02bbc07cde38891769ccd5d4f073d22a40683aa4bc7a95781aaa2c4" +dependencies = [ + "form_urlencoded", + "indexmap 2.9.0", + "itoa", + "ryu", + "serde", ] [[package]] name = "serde_json" -version = "1.0.140" +version = "1.0.125" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "20068b6e96dc6c9bd23e01df8827e6c7e1f2fddd43c21810382803c136b99373" +checksum = "83c8e735a073ccf5be70aa8066aa984eaf2fa000db6c8d0100ae605b366d31ed" dependencies = [ "itoa", "memchr", @@ -6818,9 +6768,9 @@ dependencies = [ [[package]] name = "serde_path_to_error" -version = "0.1.17" +version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59fab13f937fa393d08645bf3a84bdfe86e296747b506ada67bb15f10f218b2a" +checksum = "4beec8bce849d58d06238cb50db2e1c417cfeafa4c63f692b15c82b7c80f8335" dependencies = [ "itoa", "serde", @@ -6838,10 +6788,21 @@ dependencies = [ ] [[package]] -name = "serde_spanned" -version = "0.6.9" +name = "serde_repr" +version = "0.1.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf41e0cfaf7226dca15e8197172c295a782857fcb97fad1808a166870dee75a3" +checksum = "175ee3e80ae9982737ca543e96133087cbd9a485eecc3bc4de9c1a37b47ea59c" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.100", +] + +[[package]] +name = "serde_spanned" +version = "0.6.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "79e674e01f999af37c49f70a6ede167a8a60b2503e56c5599532a65baa5969a0" dependencies = [ "serde", ] @@ -6860,15 +6821,17 @@ dependencies = [ [[package]] name = "serde_with" -version = "2.3.3" +version = "3.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07ff71d2c147a7b57362cead5e22f772cd52f6ab31cfcd9edcd7f6aeb2a0afbe" +checksum = "d6b6f7f2fcb69f747921f79f3926bd1e203fce4fef62c268dd3abfb6d86029aa" dependencies = [ - "base64 0.13.1", + "base64 0.22.1", "chrono", "hex", "indexmap 1.9.3", + "indexmap 2.9.0", "serde", + "serde_derive", "serde_json", "serde_with_macros", "time", @@ -6876,14 +6839,14 @@ dependencies = [ [[package]] name = "serde_with_macros" -version = "2.3.3" +version = "3.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "881b6f881b17d13214e5d494c939ebab463d01264ce1811e9d4ac3a882e7695f" +checksum = "8d00caa5193a3c8362ac2b73be6b9e768aa5a4b2f721d8f4b339600c3cb51f8e" dependencies = [ "darling", "proc-macro2", "quote", - "syn", + "syn 2.0.100", ] [[package]] @@ -6909,15 +6872,15 @@ dependencies = [ [[package]] name = "sha1_smol" -version = "1.0.1" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbfa15b3dddfee50a0fff136974b3e1bde555604ba463834a7eb7deb6417705d" +checksum = "ae1a47186c03a32177042e55dbc5fd5aee900b8e0069a8d70fba96a9375cd012" [[package]] name = "sha2" -version = "0.10.9" +version = "0.10.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7507d819769d01a365ab707794a4084392c824f54a7a6a7862f8c3d0892b283" +checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8" dependencies = [ "cfg-if", "cpufeatures", @@ -6927,18 +6890,18 @@ dependencies = [ [[package]] name = "sha2-asm" -version = "0.6.4" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b845214d6175804686b2bd482bcffe96651bb2d1200742b712003504a2dac1ab" +checksum = "f27ba7066011e3fb30d808b51affff34f0a66d3a03a58edd787c6e420e40e44e" dependencies = [ "cc", ] [[package]] name = "sharded-slab" -version = "0.1.7" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6" +checksum = "900fba806f70c630b0a382d0d825e17a0f19fcd059a2ade1ff237bcddf446b31" dependencies = [ "lazy_static", ] @@ -6951,9 +6914,9 @@ checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" [[package]] name = "signal-hook" -version = "0.3.18" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d881a16cf4426aa584979d30bd82cb33429027e42122b169753d6ef1085ed6e2" +checksum = "732768f1176d21d09e076c23a93123d40bba92d50c4058da34d45c8de8e682b9" dependencies = [ "libc", "signal-hook-registry", @@ -6961,9 +6924,9 @@ dependencies = [ [[package]] name = "signal-hook-registry" -version = "1.4.5" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9203b8055f63a2a00e2f593bb0510367fe707d7ff1e5c872de2f537b339e5410" +checksum = "d8229b473baa5980ac72ef434c4415e70c4b5e71b423043adb4ba059f89c99a1" dependencies = [ "libc", ] @@ -6990,39 +6953,42 @@ dependencies = [ [[package]] name = "simple_asn1" -version = "0.6.3" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "297f631f50729c8c99b84667867963997ec0b50f32b2a7dbcab828ef0541e8bb" +checksum = "adc4e5204eb1910f40f9cfa375f6f05b68c3abac4b6fd879c8ff5e7ae8a0a085" dependencies = [ "num-bigint", "num-traits", - "thiserror 2.0.12", + "thiserror 1.0.69", "time", ] [[package]] name = "siphasher" -version = "1.0.1" +version = "0.3.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56199f7ddabf13fe5074ce809e7d3f42b42ae711800501b5b16ea82ad029c39d" +checksum = "7bd3e3206899af3f8b12af284fafc038cc1dc2b41d1b89dd17297221c5d225de" [[package]] name = "slab" -version = "0.4.10" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04dc19736151f35336d325007ac991178d504a119863a2fcb3758cdb5e52c50d" +checksum = "6528351c9bc8ab22353f9d776db39a20288e8d6c37ef8cfe3317cf875eecfc2d" +dependencies = [ + "autocfg", +] [[package]] name = "smallvec" -version = "1.15.1" +version = "1.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03" +checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" [[package]] name = "smol_str" -version = "0.2.2" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd538fb6910ac1099850255cf94a94df6551fbdd602454387d0adb2d1ca6dead" +checksum = "e6845563ada680337a52d43bb0b29f396f2d911616f6573012645b9e3d048a49" dependencies = [ "serde", ] @@ -7042,6 +7008,9 @@ name = "spin" version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" +dependencies = [ + "lock_api", +] [[package]] name = "spinning_top" @@ -7069,7 +7038,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d91ed6c858b01f942cd56b37a94b3e0a1798290327d1236e4d9cf4eaca44d29d" dependencies = [ "base64ct", - "der 0.7.10", + "der 0.7.8", ] [[package]] @@ -7104,9 +7073,9 @@ dependencies = [ "hyper-util", "metrics", "once_cell", - "parking_lot 0.12.4", + "parking_lot 0.12.1", "prost 0.13.5", - "rustls 0.23.28", + "rustls 0.23.27", "tokio", "tokio-rustls 0.26.2", "tonic 0.13.1", @@ -7126,6 +7095,7 @@ dependencies = [ "chrono", "clap", "clashmap", + "compute_api", "control_plane", "cron", "diesel", @@ -7137,7 +7107,8 @@ dependencies = [ "hex", "http-utils", "humantime", - "hyper 0.14.32", + "humantime-serde", + "hyper 0.14.30", "itertools 0.10.5", "json-structural-diff", "lasso", @@ -7147,12 +7118,13 @@ dependencies = [ "pageserver_api", "pageserver_client", "postgres_connection", + "posthog_client_lite", "rand 0.8.5", "regex", "reqwest", "routerify", - "rustls 0.23.28", - "rustls-native-certs 0.8.1", + "rustls 0.23.27", + "rustls-native-certs 0.8.0", "safekeeper_api", "safekeeper_client", "scoped-futures", @@ -7204,8 +7176,8 @@ dependencies = [ "postgres_ffi", "remote_storage", "reqwest", - "rustls 0.23.28", - "rustls-native-certs 0.8.1", + "rustls 0.23.27", + "rustls-native-certs 0.8.0", "serde", "serde_json", "storage_controller_client", @@ -7249,15 +7221,20 @@ checksum = "9091b6114800a5f2141aee1d1b9d6ca3592ac062dc5decb3764ec5895a47b4eb" [[package]] name = "stringprep" -version = "0.1.5" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b4df3d392d81bd458a8a621b8bffbd2302a12ffe288a9d931670948749463b1" +checksum = "8ee348cb74b87454fff4b551cbf727025810a004f88aeacae7f85b87f4e9a1c1" dependencies = [ "unicode-bidi", "unicode-normalization", - "unicode-properties", ] +[[package]] +name = "strsim" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" + [[package]] name = "strsim" version = "0.11.1" @@ -7276,30 +7253,30 @@ version = "0.26.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4c6bee85a5a24955dc440386795aa378cd9cf82acd5f764469152d2270e581be" dependencies = [ - "heck 0.5.0", + "heck", "proc-macro2", "quote", "rustversion", - "syn", + "syn 2.0.100", ] [[package]] name = "subtle" -version = "2.6.1" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" +checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc" [[package]] name = "svg_fmt" -version = "0.4.5" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0193cc4331cfd2f3d2011ef287590868599a2f33c3e69bc22c1a3d3acf9e02fb" +checksum = "20e16a0f46cf5fd675563ef54f26e83e20f2366bcf027bcb3cc3ed2b98aaf2ca" [[package]] name = "symbolic-common" -version = "12.15.5" +version = "12.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a1150bdda9314f6cfeeea801c23f5593c6e6a6c72e64f67e48d723a12b8efdb" +checksum = "366f1b4c6baf6cfefc234bbd4899535fca0b06c74443039a73f6dfb2fad88d77" dependencies = [ "debugid", "memmap2", @@ -7309,9 +7286,9 @@ dependencies = [ [[package]] name = "symbolic-demangle" -version = "12.15.5" +version = "12.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f66537def48fbc704a92e4fdaab7833bc7cb2255faca8182592fb5fa617eb82" +checksum = "aba05ba5b9962ea5617baf556293720a8b2d0a282aa14ee4bf10e22efc7da8c8" dependencies = [ "cpp_demangle", "rustc-demangle", @@ -7320,9 +7297,20 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.104" +version = "1.0.109" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17b6f705963418cdb9927482fa304bc562ece2fdd4f616084c50b7023b435a40" +checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "syn" +version = "2.0.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b09a44accad81e1ba1cd74a32461ba89dee89095ba17b32f5d03683b1b1fc2a0" dependencies = [ "proc-macro2", "quote", @@ -7340,29 +7328,26 @@ dependencies = [ [[package]] name = "sync_wrapper" -version = "1.0.2" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bf256ce5efdfa370213c1dabab5935a12e49f2c58d15e9eac2870d3b4f27263" -dependencies = [ - "futures-core", -] +checksum = "a7065abeca94b6a8a577f9bd45aa0867a2238b74e8eb67cf10d492bc39351394" [[package]] name = "synstructure" -version = "0.13.2" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" +checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.100", ] [[package]] name = "sysinfo" -version = "0.29.11" +version = "0.29.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd727fc423c2060f6c92d9534cef765c65a6ed3f428a03d7def74a8c4348e666" +checksum = "165d6d8539689e3d3bc8b98ac59541e1f21c7de7c85d60dc80e43ae0ed2113db" dependencies = [ "cfg-if", "core-foundation-sys", @@ -7375,9 +7360,9 @@ dependencies = [ [[package]] name = "tar" -version = "0.4.44" +version = "0.4.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d863878d212c87a19c1a610eb53bb01fe12951c0501cf5a0d65f724914a667a" +checksum = "b16afcea1f22891c49a00c751c7b63b2233284064f11a200fc624137c51e2ddb" dependencies = [ "filetime", "libc", @@ -7386,14 +7371,14 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.20.0" +version = "3.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8a64e3985349f2441a1a9ef0b853f869006c3855f2cda6862a94d26ebb9d6a1" +checksum = "28cce251fcbc87fac86a866eeb0d6c2d536fc16d06f184bb61aeae11aa4cee0c" dependencies = [ - "fastrand 2.3.0", - "getrandom 0.3.3", + "cfg-if", + "fastrand 2.2.0", "once_cell", - "rustix 1.0.7", + "rustix", "windows-sys 0.59.0", ] @@ -7424,7 +7409,7 @@ checksum = "78ea17a2dc368aeca6f554343ced1b1e31f76d63683fa8016e5844bd7a5144a1" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.100", ] [[package]] @@ -7446,7 +7431,7 @@ checksum = "888d0c3c6db53c0fdab160d2ed5e12ba745383d3e85813f2ea0f2b1475ab553f" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.100", ] [[package]] @@ -7460,11 +7445,11 @@ dependencies = [ [[package]] name = "thiserror" -version = "2.0.12" +version = "2.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "567b8a2dae586314f7be2a752ec7474332959c6460e02bde30d702a66d488708" +checksum = "d452f284b73e6d76dd36758a0c8684b1d5be31f92b89d07fd5822175732206fc" dependencies = [ - "thiserror-impl 2.0.12", + "thiserror-impl 2.0.11", ] [[package]] @@ -7475,27 +7460,28 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.100", ] [[package]] name = "thiserror-impl" -version = "2.0.12" +version = "2.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f7cf42b4507d8ea322120659672cf1b9dbb93f8f2d4ecfd6e51350ff5b17a1d" +checksum = "26afc1baea8a989337eeb52b6e72a039780ce45c3edfcc9c5b9d112feeb173c2" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.100", ] [[package]] name = "thread_local" -version = "1.1.9" +version = "1.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f60246a4944f24f6e018aa17cdeffb7818b76356965d03b07d6a9886e8962185" +checksum = "3fdd6f064ccff2d6567adcb3873ca630700f00b5ad3f060c25b5dcfd9a4ce152" dependencies = [ "cfg-if", + "once_cell", ] [[package]] @@ -7542,11 +7528,11 @@ dependencies = [ [[package]] name = "time" -version = "0.3.41" +version = "0.3.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a7619e19bc266e0f9c5e6686659d394bc57973859340060a69221e57dbc0c40" +checksum = "5dfd88e563464686c916c7e46e623e520ddc6d79fa6641390f2e3fa86e83e885" dependencies = [ - "deranged 0.4.0", + "deranged", "itoa", "js-sys", "num-conv", @@ -7558,15 +7544,15 @@ dependencies = [ [[package]] name = "time-core" -version = "0.1.4" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9e9a38711f559d9e3ce1cdb06dd7c5b8ea546bc90052da6d06bb76da74bb07c" +checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" [[package]] name = "time-macros" -version = "0.2.22" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3526739392ec93fd8b359c8e98514cb3e8e021beb4e5f597b00a0221f8ed8a49" +checksum = "3f252a68540fde3a3877aeea552b832b40ab9a69e318efd078774a01ddee1ccf" dependencies = [ "num-conv", "time-core", @@ -7583,9 +7569,9 @@ dependencies = [ [[package]] name = "tinystr" -version = "0.8.1" +version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d4f6d1145dcb577acf783d4e601bc1d76a13337bb54e6233add580b07344c8b" +checksum = "9117f5d4db391c1cf6927e7bea3db74b9a1c1add8f7eda9ffd5364f40f57b82f" dependencies = [ "displaydoc", "zerovec", @@ -7603,9 +7589,9 @@ dependencies = [ [[package]] name = "tinyvec" -version = "1.9.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09b3661f17e86524eccd4371ab0429194e0d7c008abb45f7a7495b1719463c71" +checksum = "445e881f4f6d382d5f27c034e25eb92edd7c784ceab92a0937db7f2e9471b938" dependencies = [ "tinyvec_macros", ] @@ -7634,20 +7620,20 @@ checksum = "2d2e76690929402faae40aebdda620a2c0e25dd6d3b9afe48867dfd95991f4bd" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.100", ] [[package]] name = "tokio" -version = "1.45.1" +version = "1.43.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75ef51a33ef1da925cea3e4eb122833cb377c61439ca401b770f54902b806779" +checksum = "492a604e2fd7f814268a378409e6c92b5525d747d10db9a229723f55a417958c" dependencies = [ "backtrace", "bytes", "libc", - "mio 1.0.4", - "parking_lot 0.12.4", + "mio 1.0.3", + "parking_lot 0.12.1", "pin-project-lite", "signal-hook-registry", "socket2", @@ -7689,7 +7675,17 @@ checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.100", +] + +[[package]] +name = "tokio-pipe" +version = "0.2.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f213a84bffbd61b8fa0ba8a044b4bbe35d471d0b518867181e82bd5c15542784" +dependencies = [ + "libc", + "tokio", ] [[package]] @@ -7704,7 +7700,7 @@ dependencies = [ "futures-channel", "futures-util", "log", - "parking_lot 0.12.4", + "parking_lot 0.12.1", "percent-encoding", "phf", "pin-project-lite", @@ -7724,7 +7720,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "04fb792ccd6bbcd4bba408eb8a292f70fc4a3589e5d793626f45190e6454b6ab" dependencies = [ "ring", - "rustls 0.23.28", + "rustls 0.23.27", "tokio", "tokio-postgres", "tokio-rustls 0.26.2", @@ -7738,7 +7734,7 @@ dependencies = [ "bytes", "fallible-iterator", "futures-util", - "parking_lot 0.12.4", + "parking_lot 0.12.1", "pin-project-lite", "postgres-protocol2", "postgres-types2", @@ -7750,29 +7746,40 @@ dependencies = [ [[package]] name = "tokio-rustls" -version = "0.24.1" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" +checksum = "e0d409377ff5b1e3ca6437aa86c1eb7d40c134bfec254e44c830defa92669db5" dependencies = [ "rustls 0.21.12", "tokio", ] +[[package]] +name = "tokio-rustls" +version = "0.25.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "775e0c0f0adb3a2f22a00c4745d728b479985fc15ee7ca6a2608388c5569860f" +dependencies = [ + "rustls 0.22.4", + "rustls-pki-types", + "tokio", +] + [[package]] name = "tokio-rustls" version = "0.26.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e727b36a1a0e8b74c376ac2211e40c2c8af09fb4013c60d910495810f008e9b" dependencies = [ - "rustls 0.23.28", + "rustls 0.23.27", "tokio", ] [[package]] name = "tokio-stream" -version = "0.1.17" +version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eca58d7bba4a75707817a2c44174253f9236b2d5fbd055602e9d5c07c139a047" +checksum = "4f4e6ce100d0eb49a2734f8c0812bcd324cf357d21810932c5df6b96ef2b86f1" dependencies = [ "futures-core", "pin-project-lite", @@ -7808,37 +7815,38 @@ dependencies = [ [[package]] name = "tokio-tungstenite" -version = "0.26.2" +version = "0.26.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a9daff607c6d2bf6c16fd681ccb7eecc83e4e2cdc1ca067ffaadfca5de7f084" +checksum = "be4bf6fecd69fcdede0ec680aaf474cdab988f9de6bc73d3758f0160e3b7025a" dependencies = [ "futures-util", "log", "tokio", - "tungstenite 0.26.2", + "tungstenite 0.26.1", ] [[package]] name = "tokio-util" -version = "0.7.15" +version = "0.7.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66a539a9ad6d5d281510d5bd368c973d636c02dbf8a67300bfb6b950696ad7df" +checksum = "5419f34732d9eb6ee4c3578b7989078579b7f039cbbb9ca2c4da015749371e15" dependencies = [ "bytes", "futures-core", "futures-io", "futures-sink", "futures-util", - "hashbrown 0.15.4 (registry+https://github.com/rust-lang/crates.io-index)", + "hashbrown 0.14.5", "pin-project-lite", "tokio", + "tracing", ] [[package]] name = "toml" -version = "0.8.23" +version = "0.8.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc1beb996b9d83529a9e75c17a1686767d148d70663143c7854d8b4a09ced362" +checksum = "6f49eb2ab21d2f26bd6db7bf383edc527a7ebaee412d17af4d40fdccd442f335" dependencies = [ "serde", "serde_spanned", @@ -7848,49 +7856,51 @@ dependencies = [ [[package]] name = "toml_datetime" -version = "0.6.11" +version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22cddaf88f4fbc13c51aebbf5f8eceb5c7c5a9da2ac40a13519eb5b0a0e8f11c" +checksum = "4badfd56924ae69bcc9039335b2e017639ce3f9b001c393c1b2d1ef846ce2cbf" dependencies = [ "serde", ] [[package]] name = "toml_edit" -version = "0.22.27" +version = "0.22.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41fe8c660ae4257887cf66394862d21dbca4a6ddd26f04a3560410406a2f819a" +checksum = "f21c7aaf97f1bd9ca9d4f9e73b0a6c74bd5afef56f2bc931943a6e1c37e04e38" dependencies = [ "indexmap 2.9.0", "serde", "serde_spanned", "toml_datetime", - "toml_write", - "winnow 0.7.11", + "winnow", ] -[[package]] -name = "toml_write" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d99f8c9a7727884afe522e9bd5edbfc91a3312b36a77b5fb8926e4c31a41801" - [[package]] name = "tonic" version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "877c5b330756d856ffcc4553ab34a5684481ade925ecc54bcd1bf02b1d0d4d52" dependencies = [ + "async-stream", "async-trait", + "axum 0.7.9", "base64 0.22.1", "bytes", - "http 1.3.1", - "http-body 1.0.1", + "h2 0.4.4", + "http 1.1.0", + "http-body 1.0.0", "http-body-util", + "hyper 1.6.0", + "hyper-timeout", + "hyper-util", "percent-encoding", "pin-project", "prost 0.13.5", + "socket2", + "tokio", "tokio-stream", + "tower 0.4.13", "tower-layer", "tower-service", "tracing", @@ -7903,12 +7913,13 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7e581ba15a835f4d9ea06c55ab1bd4dce26fc53752c69a04aac00703bfb49ba9" dependencies = [ "async-trait", - "axum", + "axum 0.8.1", "base64 0.22.1", "bytes", - "h2 0.4.10", - "http 1.3.1", - "http-body 1.0.1", + "flate2", + "h2 0.4.4", + "http 1.1.0", + "http-body 1.0.0", "http-body-util", "hyper 1.6.0", "hyper-timeout", @@ -7916,15 +7927,16 @@ dependencies = [ "percent-encoding", "pin-project", "prost 0.13.5", - "rustls-native-certs 0.8.1", + "rustls-native-certs 0.8.0", "socket2", "tokio", "tokio-rustls 0.26.2", "tokio-stream", - "tower", + "tower 0.5.2", "tower-layer", "tower-service", "tracing", + "zstd", ] [[package]] @@ -7935,10 +7947,10 @@ checksum = "eac6f67be712d12f0b41328db3137e0d0757645d8904b4cb7d51cd9c2279e847" dependencies = [ "prettyplease", "proc-macro2", - "prost-build 0.13.5", + "prost-build 0.13.3", "prost-types 0.13.5", "quote", - "syn", + "syn 2.0.100", ] [[package]] @@ -7954,6 +7966,26 @@ dependencies = [ "tonic 0.13.1", ] +[[package]] +name = "tower" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" +dependencies = [ + "futures-core", + "futures-util", + "indexmap 1.9.3", + "pin-project", + "pin-project-lite", + "rand 0.8.5", + "slab", + "tokio", + "tokio-util", + "tower-layer", + "tower-service", + "tracing", +] + [[package]] name = "tower" version = "0.5.2" @@ -7965,7 +7997,7 @@ dependencies = [ "indexmap 2.9.0", "pin-project-lite", "slab", - "sync_wrapper 1.0.2", + "sync_wrapper 1.0.1", "tokio", "tokio-util", "tower-layer", @@ -7975,20 +8007,17 @@ dependencies = [ [[package]] name = "tower-http" -version = "0.6.6" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adc82fd73de2a9722ac5da747f12383d2bfdb93591ee6c58486e0097890f05f2" +checksum = "403fa3b783d4b626a8ad51d766ab03cb6d2dbfc46b1c5d4448395e6628dc9697" dependencies = [ "base64 0.22.1", - "bitflags 2.9.1", + "bitflags 2.8.0", "bytes", - "futures-util", - "http 1.3.1", - "http-body 1.0.1", - "iri-string", + "http 1.1.0", + "http-body 1.0.0", "mime", "pin-project-lite", - "tower", "tower-layer", "tower-service", "tracing", @@ -8006,7 +8035,7 @@ name = "tower-otel" version = "0.2.0" source = "git+https://github.com/mattiapenati/tower-otel?rev=56a7321053bcb72443888257b622ba0d43a11fcd#56a7321053bcb72443888257b622ba0d43a11fcd" dependencies = [ - "http 1.3.1", + "http 1.1.0", "opentelemetry", "pin-project", "tower-layer", @@ -8035,32 +8064,31 @@ dependencies = [ [[package]] name = "tracing-appender" -version = "0.2.3" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3566e8ce28cc0a3fe42519fc80e6b4c943cc4c8cef275620eb8dac2d3d4e06cf" +checksum = "09d48f71a791638519505cefafe162606f706c25592e4bde4d97600c0195312e" dependencies = [ "crossbeam-channel", - "thiserror 1.0.69", "time", "tracing-subscriber", ] [[package]] name = "tracing-attributes" -version = "0.1.30" +version = "0.1.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81383ab64e72a7a8b8e13130c49e3dab29def6d0c7d76a03087b3cf71c5c6903" +checksum = "395ae124c09f9e6918a2310af6038fba074bcf474ac352496d5910dd59a2226d" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.100", ] [[package]] name = "tracing-core" -version = "0.1.34" +version = "0.1.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9d12581f227e93f094d3af2ae690a574abb8a2b9b7a96e7cfe9647b2b617678" +checksum = "e672c95779cf947c5311f83787af4fa8fffd12fb27e4993211a84bdfd9610f9c" dependencies = [ "once_cell", "valuable", @@ -8068,9 +8096,9 @@ dependencies = [ [[package]] name = "tracing-error" -version = "0.2.1" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b1581020d7a273442f5b45074a6a57d5757ad0a47dac0e9f0bd57b81936f3db" +checksum = "d686ec1c0f384b1277f097b2f279a2ecc11afe8c133c1aabf036a27cb4cd206e" dependencies = [ "tracing", "tracing-subscriber", @@ -8154,14 +8182,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "04659ddb06c87d233c566112c1c9c5b9e98256d9af50ec3bc9c8327f873a7568" dependencies = [ "quote", - "syn", + "syn 2.0.100", ] [[package]] name = "tracing-utils" version = "0.1.0" dependencies = [ - "hyper 0.14.32", + "hyper 0.14.30", "opentelemetry", "opentelemetry-otlp", "opentelemetry-semantic-conventions", @@ -8188,7 +8216,7 @@ dependencies = [ "byteorder", "bytes", "data-encoding", - "http 1.3.1", + "http 1.1.0", "httparse", "log", "rand 0.8.5", @@ -8200,18 +8228,19 @@ dependencies = [ [[package]] name = "tungstenite" -version = "0.26.2" +version = "0.26.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4793cb5e56680ecbb1d843515b23b6de9a75eb04b66643e256a396d43be33c13" +checksum = "413083a99c579593656008130e29255e54dcaae495be556cc26888f211648c24" dependencies = [ + "byteorder", "bytes", "data-encoding", - "http 1.3.1", + "http 1.1.0", "httparse", "log", - "rand 0.9.1", + "rand 0.8.5", "sha1", - "thiserror 2.0.12", + "thiserror 2.0.11", "utf-8", ] @@ -8246,9 +8275,9 @@ dependencies = [ [[package]] name = "typenum" -version = "1.18.0" +version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1dccffe3ce07af9386bfd29e80c0ab1a8205a2fc34e4bcd40364df902cfa8f3f" +checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba" [[package]] name = "uname" @@ -8261,15 +8290,15 @@ dependencies = [ [[package]] name = "unicode-bidi" -version = "0.3.18" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c1cb5db39152898a79168971543b1cb5020dff7fe43c8dc468b0885f5e29df5" +checksum = "5ab17db44d7388991a428b2ee655ce0c212e862eff1768a455c58f9aad6e7893" [[package]] name = "unicode-ident" -version = "1.0.18" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512" +checksum = "adb9e6ca4f869e1180728b7950e35922a7fc6397f7b641499e8f3ef06e50dc83" [[package]] name = "unicode-normalization" @@ -8280,29 +8309,17 @@ dependencies = [ "tinyvec", ] -[[package]] -name = "unicode-properties" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e70f2a8b45122e719eb623c01822704c4e0907e7e426a05927e1a1cfff5b75d0" - -[[package]] -name = "unicode-segmentation" -version = "1.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6ccf251212114b54433ec949fd6a7841275f9ada20dddd2f29e9ceea4501493" - [[package]] name = "unicode-width" -version = "0.2.1" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a1a07cc7db3810833284e8d372ccdc6da29741639ecc70c9ec107df0fa6154c" +checksum = "c0edd1e5b14653f783770bce4a4dabb4a5108a5370a5f5d8cfe8710c361f6c8b" [[package]] name = "unicode-xid" -version = "0.2.6" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853" +checksum = "f962df74c8c05a667b5ee8bcf162993134c104e96440b663c8daa176dc772d8c" [[package]] name = "untrusted" @@ -8312,17 +8329,17 @@ checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" [[package]] name = "ureq" -version = "2.12.1" +version = "2.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02d1a66277ed75f640d608235660df48c8e3c19f3b4edb6a263315626cc3c01d" +checksum = "b74fc6b57825be3373f7054754755f03ac3a8f5d70015ccad699ba2029956f4a" dependencies = [ "base64 0.22.1", "log", "once_cell", - "rustls 0.23.28", + "rustls 0.23.27", "rustls-pki-types", "url", - "webpki-roots 0.26.11", + "webpki-roots", ] [[package]] @@ -8333,7 +8350,7 @@ dependencies = [ "bytes", "io-uring", "libc", - "linux-raw-sys 0.6.5", + "linux-raw-sys 0.6.4", ] [[package]] @@ -8350,9 +8367,9 @@ dependencies = [ [[package]] name = "urlencoding" -version = "2.1.3" +version = "2.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "daf8dba3b7eb870caf1ddeed7bc9d2a049f3cfdfae7cb521b087cc33ae4c49da" +checksum = "e8db7427f936968176eaa7cdf81b7f98b980b18495ec28f1b5791ac3bfe3eea9" [[package]] name = "utf-8" @@ -8360,6 +8377,12 @@ version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" +[[package]] +name = "utf16_iter" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8232dd3cdaed5356e0f716d285e4b40b932ac434100fe9b7e0e8e935b9e6246" + [[package]] name = "utf8_iter" version = "1.0.4" @@ -8368,9 +8391,9 @@ checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" [[package]] name = "utf8parse" -version = "0.2.2" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" +checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a" [[package]] name = "utils" @@ -8428,34 +8451,32 @@ dependencies = [ [[package]] name = "uuid" -version = "1.17.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3cf4199d1e5d15ddd86a694e4d0dffa9c323ce759fea589f00fef9d81cc1931d" +checksum = "81dfa00651efa65069b0b6b651f4aaa31ba9e3c3ce0137aaad053604ee7e0314" dependencies = [ - "getrandom 0.3.3", - "js-sys", + "getrandom 0.2.11", "serde", - "wasm-bindgen", ] [[package]] name = "valuable" -version = "0.1.1" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba73ea9cf16a25df0c8caa16c51acb937d5712a8429db78a3ee29d5dcacd3a65" +checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" [[package]] name = "version_check" -version = "0.9.5" +version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" +checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" [[package]] name = "vm_monitor" version = "0.1.0" dependencies = [ "anyhow", - "axum", + "axum 0.8.1", "cgroups-rs", "clap", "futures", @@ -8477,9 +8498,9 @@ checksum = "5c3082ca00d5a5ef149bb8b555a72ae84c9c59f7250f013ac822ac2e49b19c64" [[package]] name = "waker-fn" -version = "1.2.0" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "317211a0dc0ceedd78fb2ca9a44aed3d7b9b26f81870d485c07122b4350673b7" +checksum = "f3c4517f54858c779bbcbf228f4fca63d121bf85fbecb2dc578cdf4a39395690" [[package]] name = "wal_craft" @@ -8509,6 +8530,7 @@ dependencies = [ "futures", "pageserver_api", "postgres_ffi", + "postgres_ffi_types", "pprof", "prost 0.13.5", "remote_storage", @@ -8539,17 +8561,18 @@ name = "walproposer" version = "0.1.0" dependencies = [ "anyhow", - "bindgen 0.71.1", + "bindgen", "postgres_ffi", "utils", ] [[package]] name = "want" -version = "0.3.1" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e" +checksum = "1ce8a968cb1cd110d136ff8b819a556d6fb6d919363c61534f6860c7eb172ba0" dependencies = [ + "log", "try-lock", ] @@ -8561,9 +8584,9 @@ checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" [[package]] name = "wasi" -version = "0.11.1+wasi-snapshot-preview1" +version = "0.11.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" +checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasi" @@ -8582,48 +8605,46 @@ checksum = "b8dad83b4f25e74f184f64c43b150b91efe7647395b42289f38e50566d82855b" [[package]] name = "wasm-bindgen" -version = "0.2.100" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1edc8929d7499fc4e8f0be2262a241556cfc54a0bea223790e71446f2aab1ef5" +checksum = "4be2531df63900aeb2bca0daaaddec08491ee64ceecbee5076636a3b026795a8" dependencies = [ "cfg-if", - "once_cell", - "rustversion", "wasm-bindgen-macro", ] [[package]] name = "wasm-bindgen-backend" -version = "0.2.100" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f0a0651a5c2bc21487bde11ee802ccaf4c51935d0d3d42a6101f98161700bc6" +checksum = "614d787b966d3989fa7bb98a654e369c762374fd3213d212cfc0251257e747da" dependencies = [ "bumpalo", "log", + "once_cell", "proc-macro2", "quote", - "syn", + "syn 2.0.100", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.50" +version = "0.4.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "555d470ec0bc3bb57890405e5d4322cc9ea83cebb085523ced7be4144dac1e61" +checksum = "76bc14366121efc8dbb487ab05bcc9d346b3b5ec0eaa76e46594cabbe51762c0" dependencies = [ "cfg-if", "js-sys", - "once_cell", "wasm-bindgen", "web-sys", ] [[package]] name = "wasm-bindgen-macro" -version = "0.2.100" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fe63fc6d09ed3792bd0897b314f53de8e16568c2b3f7982f468c0bf9bd0b407" +checksum = "a1f8823de937b71b9460c0c34e25f3da88250760bec0ebac694b49997550d726" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -8631,31 +8652,28 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.100" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" +checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.100", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.100" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a05d73b933a847d6cccdda8f838a22ff101ad9bf93e33684f39c1f5f0eece3d" -dependencies = [ - "unicode-ident", -] +checksum = "af190c94f2773fdb3729c55b007a722abb5384da03bc0986df4c289bf5567e96" [[package]] name = "wasm-streams" -version = "0.4.2" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15053d8d85c7eccdbefef60f06769760a563c7f0a9d6902a13d35c7800b0ad65" +checksum = "b65dc4c90b63b118468cf747d8bf3566c1913ef60be765b5730ead9e0a3ba129" dependencies = [ "futures-util", "js-sys", @@ -8681,9 +8699,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.77" +version = "0.3.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33b6dd2ef9186f1f2072e409e99cd22a975331a6b3591b12c764e0e55c60d5d2" +checksum = "77afa9a11836342370f4817622a2f0f418b134426d91a82dfb48f532d2ec13ef" dependencies = [ "js-sys", "wasm-bindgen", @@ -8701,41 +8719,20 @@ dependencies = [ [[package]] name = "webpki-roots" -version = "0.26.11" +version = "0.26.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "521bc38abb08001b01866da9f51eb7c5d647a19260e00054a8c7fd5f9e57f7a9" -dependencies = [ - "webpki-roots 1.0.1", -] - -[[package]] -name = "webpki-roots" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8782dd5a41a24eed3a4f40b606249b3e236ca61adf1f25ea4d45c73de122b502" +checksum = "b3de34ae270483955a94f4b21bdaaeb83d508bb84a01435f393818edb0012009" dependencies = [ "rustls-pki-types", ] -[[package]] -name = "which" -version = "4.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87ba24419a2078cd2b0f2ede2691b6c66d8e47836da3b6db8265ebad47afbfc7" -dependencies = [ - "either", - "home", - "once_cell", - "rustix 0.38.44", -] - [[package]] name = "whoami" -version = "1.6.0" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6994d13118ab492c3c80c1f81928718159254c53c472bf9ce36f8dae4add02a7" +checksum = "a44ab49fad634e88f55bf8f9bb3abd2f27d7204172a112c7c9987e01c1c94ea9" dependencies = [ - "redox_syscall 0.5.13", + "redox_syscall 0.4.1", "wasite", "web-sys", ] @@ -8758,11 +8755,11 @@ checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-util" -version = "0.1.9" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb" +checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" dependencies = [ - "windows-sys 0.59.0", + "winapi", ] [[package]] @@ -8771,63 +8768,23 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" +[[package]] +name = "windows" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e48a53791691ab099e5e2ad123536d0fff50652600abaf43bbf952894110d0be" +dependencies = [ + "windows-core", + "windows-targets 0.52.6", +] + [[package]] name = "windows-core" -version = "0.61.2" +version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0fdd3ddb90610c7638aa2b3a3ab2904fb9e5cdbecc643ddb3647212781c4ae3" +checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" dependencies = [ - "windows-implement", - "windows-interface", - "windows-link", - "windows-result", - "windows-strings", -] - -[[package]] -name = "windows-implement" -version = "0.60.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a47fddd13af08290e67f4acabf4b459f647552718f683a7b415d290ac744a836" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "windows-interface" -version = "0.59.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd9211b69f8dcdfa817bfd14bf1c97c9188afa36f4750130fcdf3f400eca9fa8" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "windows-link" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e6ad25900d524eaabdbbb96d20b4311e1e7ae1699af4fb28c17ae66c80d798a" - -[[package]] -name = "windows-result" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56f42bd332cc6c8eac5af113fc0c1fd6a8fd2aa08a0119358686e5160d0586c6" -dependencies = [ - "windows-link", -] - -[[package]] -name = "windows-strings" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56e6c93f3a0c3b36176cb1327a4958a0353d5d166c2a35cb268ace15e91d3b57" -dependencies = [ - "windows-link", + "windows-targets 0.52.6", ] [[package]] @@ -8836,7 +8793,7 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" dependencies = [ - "windows-targets 0.48.5", + "windows-targets 0.48.0", ] [[package]] @@ -8857,28 +8814,19 @@ dependencies = [ "windows-targets 0.52.6", ] -[[package]] -name = "windows-sys" -version = "0.60.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb" -dependencies = [ - "windows-targets 0.53.2", -] - [[package]] name = "windows-targets" -version = "0.48.5" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" +checksum = "7b1eb6f0cd7c80c79759c929114ef071b87354ce476d9d94271031c0497adfd5" dependencies = [ - "windows_aarch64_gnullvm 0.48.5", - "windows_aarch64_msvc 0.48.5", - "windows_i686_gnu 0.48.5", - "windows_i686_msvc 0.48.5", - "windows_x86_64_gnu 0.48.5", - "windows_x86_64_gnullvm 0.48.5", - "windows_x86_64_msvc 0.48.5", + "windows_aarch64_gnullvm 0.48.0", + "windows_aarch64_msvc 0.48.0", + "windows_i686_gnu 0.48.0", + "windows_i686_msvc 0.48.0", + "windows_x86_64_gnu 0.48.0", + "windows_x86_64_gnullvm 0.48.0", + "windows_x86_64_msvc 0.48.0", ] [[package]] @@ -8890,34 +8838,18 @@ dependencies = [ "windows_aarch64_gnullvm 0.52.6", "windows_aarch64_msvc 0.52.6", "windows_i686_gnu 0.52.6", - "windows_i686_gnullvm 0.52.6", + "windows_i686_gnullvm", "windows_i686_msvc 0.52.6", "windows_x86_64_gnu 0.52.6", "windows_x86_64_gnullvm 0.52.6", "windows_x86_64_msvc 0.52.6", ] -[[package]] -name = "windows-targets" -version = "0.53.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c66f69fcc9ce11da9966ddb31a40968cad001c5bedeb5c2b82ede4253ab48aef" -dependencies = [ - "windows_aarch64_gnullvm 0.53.0", - "windows_aarch64_msvc 0.53.0", - "windows_i686_gnu 0.53.0", - "windows_i686_gnullvm 0.53.0", - "windows_i686_msvc 0.53.0", - "windows_x86_64_gnu 0.53.0", - "windows_x86_64_gnullvm 0.53.0", - "windows_x86_64_msvc 0.53.0", -] - [[package]] name = "windows_aarch64_gnullvm" -version = "0.48.5" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" +checksum = "91ae572e1b79dba883e0d315474df7305d12f569b400fcf90581b06062f7e1bc" [[package]] name = "windows_aarch64_gnullvm" @@ -8925,17 +8857,11 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" -[[package]] -name = "windows_aarch64_gnullvm" -version = "0.53.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86b8d5f90ddd19cb4a147a5fa63ca848db3df085e25fee3cc10b39b6eebae764" - [[package]] name = "windows_aarch64_msvc" -version = "0.48.5" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" +checksum = "b2ef27e0d7bdfcfc7b868b317c1d32c641a6fe4629c171b8928c7b08d98d7cf3" [[package]] name = "windows_aarch64_msvc" @@ -8943,17 +8869,11 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" -[[package]] -name = "windows_aarch64_msvc" -version = "0.53.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7651a1f62a11b8cbd5e0d42526e55f2c99886c77e007179efff86c2b137e66c" - [[package]] name = "windows_i686_gnu" -version = "0.48.5" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" +checksum = "622a1962a7db830d6fd0a69683c80a18fda201879f0f447f065a3b7467daa241" [[package]] name = "windows_i686_gnu" @@ -8961,29 +8881,17 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" -[[package]] -name = "windows_i686_gnu" -version = "0.53.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1dc67659d35f387f5f6c479dc4e28f1d4bb90ddd1a5d3da2e5d97b42d6272c3" - [[package]] name = "windows_i686_gnullvm" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" -[[package]] -name = "windows_i686_gnullvm" -version = "0.53.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ce6ccbdedbf6d6354471319e781c0dfef054c81fbc7cf83f338a4296c0cae11" - [[package]] name = "windows_i686_msvc" -version = "0.48.5" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" +checksum = "4542c6e364ce21bf45d69fdd2a8e455fa38d316158cfd43b3ac1c5b1b19f8e00" [[package]] name = "windows_i686_msvc" @@ -8991,17 +8899,11 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" -[[package]] -name = "windows_i686_msvc" -version = "0.53.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "581fee95406bb13382d2f65cd4a908ca7b1e4c2f1917f143ba16efe98a589b5d" - [[package]] name = "windows_x86_64_gnu" -version = "0.48.5" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" +checksum = "ca2b8a661f7628cbd23440e50b05d705db3686f894fc9580820623656af974b1" [[package]] name = "windows_x86_64_gnu" @@ -9009,17 +8911,11 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" -[[package]] -name = "windows_x86_64_gnu" -version = "0.53.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e55b5ac9ea33f2fc1716d1742db15574fd6fc8dadc51caab1c16a3d3b4190ba" - [[package]] name = "windows_x86_64_gnullvm" -version = "0.48.5" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" +checksum = "7896dbc1f41e08872e9d5e8f8baa8fdd2677f29468c4e156210174edc7f7b953" [[package]] name = "windows_x86_64_gnullvm" @@ -9027,17 +8923,11 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" -[[package]] -name = "windows_x86_64_gnullvm" -version = "0.53.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a6e035dd0599267ce1ee132e51c27dd29437f63325753051e71dd9e42406c57" - [[package]] name = "windows_x86_64_msvc" -version = "0.48.5" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" +checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a" [[package]] name = "windows_x86_64_msvc" @@ -9045,12 +8935,6 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" -[[package]] -name = "windows_x86_64_msvc" -version = "0.53.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486" - [[package]] name = "winnow" version = "0.6.26" @@ -9061,12 +8945,13 @@ dependencies = [ ] [[package]] -name = "winnow" -version = "0.7.11" +name = "winreg" +version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74c7b26e3480b707944fc872477815d29a8e429d2f93a1ce000f5fa84a15cbcd" +checksum = "a277a57398d4bfa075df44f501a17cfdf8542d224f0d36095a2adc7aee4ef0a5" dependencies = [ - "memchr", + "cfg-if", + "windows-sys 0.48.0", ] [[package]] @@ -9075,7 +8960,7 @@ version = "0.39.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6f42320e61fe2cfd34354ecb597f86f413484a798ba44a8ca1165c58d42da6c1" dependencies = [ - "bitflags 2.9.1", + "bitflags 2.8.0", ] [[package]] @@ -9085,9 +8970,8 @@ dependencies = [ "ahash", "anstream", "anyhow", - "axum", - "axum-core", - "base64 0.13.1", + "axum 0.8.1", + "axum-core 0.5.0", "base64 0.21.7", "base64ct", "bytes", @@ -9098,8 +8982,8 @@ dependencies = [ "clap_builder", "const-oid", "crypto-bigint 0.5.5", - "der 0.7.10", - "deranged 0.3.11", + "der 0.7.8", + "deranged", "digest", "ecdsa 0.16.9", "either", @@ -9113,12 +8997,12 @@ dependencies = [ "futures-io", "futures-util", "generic-array", - "getrandom 0.2.16", + "getrandom 0.2.11", "half", "hashbrown 0.14.5", "hex", "hmac", - "hyper 0.14.32", + "hyper 0.14.30", "hyper 1.6.0", "hyper-util", "indexmap 2.9.0", @@ -9146,10 +9030,10 @@ dependencies = [ "quote", "rand 0.8.5", "regex", - "regex-automata 0.4.9", - "regex-syntax 0.8.5", + "regex-automata 0.4.3", + "regex-syntax 0.8.2", "reqwest", - "rustls 0.23.28", + "rustls 0.23.27", "rustls-pki-types", "rustls-webpki 0.103.3", "scopeguard", @@ -9162,7 +9046,7 @@ dependencies = [ "spki 0.7.3", "stable_deref_trait", "subtle", - "syn", + "syn 2.0.100", "sync_wrapper 0.1.2", "tikv-jemalloc-ctl", "tikv-jemalloc-sys", @@ -9173,7 +9057,7 @@ dependencies = [ "tokio-stream", "tokio-util", "toml_edit", - "tower", + "tower 0.5.2", "tracing", "tracing-core", "tracing-log", @@ -9187,10 +9071,16 @@ dependencies = [ ] [[package]] -name = "writeable" -version = "0.6.1" +name = "write16" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea2f10b9bb0928dfb1b42b65e1f9e36f7f54dbdf08457afefb38afcdec4fa2bb" +checksum = "d1890f4022759daae28ed4fe62859b1236caebfc61ede2f63ed4e695f3f6d936" + +[[package]] +name = "writeable" +version = "0.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e9df38ee2d2c3c5948ea468a8406ff0db0b29ae1ffde1bcf20ef305bcc95c51" [[package]] name = "x509-cert" @@ -9199,7 +9089,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1301e935010a701ae5f8655edc0ad17c44bad3ac5ce8c39185f75453b720ae94" dependencies = [ "const-oid", - "der 0.7.10", + "der 0.7.8", "spki 0.7.3", "tls_codec", ] @@ -9213,7 +9103,7 @@ dependencies = [ "bcder", "bytes", "chrono", - "der 0.7.10", + "der 0.7.8", "hex", "pem", "ring", @@ -9225,19 +9115,18 @@ dependencies = [ [[package]] name = "xattr" -version = "1.5.0" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d65cbf2f12c15564212d48f4e3dfb87923d25d611f2aed18f4cb23f0413d89e" +checksum = "ea263437ca03c1522846a4ddafbca2542d0ad5ed9b784909d4b27b76f62bc34a" dependencies = [ "libc", - "rustix 1.0.7", ] [[package]] name = "xmlparser" -version = "0.13.6" +version = "0.13.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66fee0b777b0f5ac1c69bb06d361268faafa61cd4682ae064a171c16c433e9e4" +checksum = "4d25c75bf9ea12c4040a97f829154768bbbce366287e2dc044af160cd79a13fd" [[package]] name = "xxhash-rust" @@ -9256,9 +9145,9 @@ dependencies = [ [[package]] name = "yoke" -version = "0.8.0" +version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f41bb01b8226ef4bfd589436a297c53d118f65921786300e427be8d487695cc" +checksum = "120e6aef9aa629e3d4f52dc8cc43a015c7724194c97dfaf45180d2daf2b77f40" dependencies = [ "serde", "stable_deref_trait", @@ -9268,54 +9157,74 @@ dependencies = [ [[package]] name = "yoke-derive" -version = "0.8.0" +version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38da3c9736e16c5d3c8c597a9aaa5d1fa565d0532ae05e27c24aa62fb32c0ab6" +checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.100", "synstructure", ] [[package]] name = "zerocopy" -version = "0.8.26" +version = "0.7.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1039dd0d3c310cf05de012d8a39ff557cb0d23087fd44cad61df08fc31907a2f" +checksum = "1c4061bedbb353041c12f413700357bec76df2c7e2ca8e4df8bac24c6bf68e3d" dependencies = [ - "zerocopy-derive", + "zerocopy-derive 0.7.31", +] + +[[package]] +name = "zerocopy" +version = "0.8.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2586fea28e186957ef732a5f8b3be2da217d65c5969d4b1e17f973ebbe876879" +dependencies = [ + "zerocopy-derive 0.8.24", ] [[package]] name = "zerocopy-derive" -version = "0.8.26" +version = "0.7.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ecf5b4cc5364572d7f4c329661bcc82724222973f2cab6f050a4e5c22f75181" +checksum = "b3c129550b3e6de3fd0ba67ba5c81818f9805e58b8d7fee80a3a59d2c9fc601a" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.100", +] + +[[package]] +name = "zerocopy-derive" +version = "0.8.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a996a8f63c5c4448cd959ac1bab0aaa3306ccfd060472f85943ee0750f0169be" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.100", ] [[package]] name = "zerofrom" -version = "0.1.6" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50cc42e0333e05660c3587f3bf9d0478688e15d870fab3346451ce7f8c9fbea5" +checksum = "cff3ee08c995dee1859d998dea82f7374f2826091dd9cd47def953cae446cd2e" dependencies = [ "zerofrom-derive", ] [[package]] name = "zerofrom-derive" -version = "0.1.6" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" +checksum = "595eed982f7d355beb85837f651fa22e90b3c044842dc7f2c2842c086f295808" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.100", "synstructure", ] @@ -9337,25 +9246,14 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn", -] - -[[package]] -name = "zerotrie" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36f0bbd478583f79edad978b407914f61b2972f5af6fa089686016be8f9af595" -dependencies = [ - "displaydoc", - "yoke", - "zerofrom", + "syn 2.0.100", ] [[package]] name = "zerovec" -version = "0.11.2" +version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a05eb080e015ba39cc9e23bbe5e7fb04d5fb040350f99f34e338d5fdd294428" +checksum = "aa2b893d79df23bfb12d5461018d408ea19dfafe76c2c7ef6d4eba614f8ff079" dependencies = [ "yoke", "zerofrom", @@ -9364,38 +9262,38 @@ dependencies = [ [[package]] name = "zerovec-derive" -version = "0.11.1" +version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b96237efa0c878c64bd89c436f661be4e46b2f3eff1ebb976f7ef2321d2f58f" +checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.100", ] [[package]] name = "zstd" -version = "0.13.3" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e91ee311a569c327171651566e07972200e76fcfe2242a4fa446149a3881c08a" +checksum = "bffb3309596d527cfcba7dfc6ed6052f1d39dfbd7c867aa2e865e4a449c10110" dependencies = [ "zstd-safe", ] [[package]] name = "zstd-safe" -version = "7.2.1" +version = "7.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54a3ab4db68cea366acc5c897c7b4d4d1b8994a9cd6e6f841f8964566a419059" +checksum = "43747c7422e2924c11144d5229878b98180ef8b06cca4ab5af37afc8a8d8ea3e" dependencies = [ "zstd-sys", ] [[package]] name = "zstd-sys" -version = "2.0.13+zstd.1.5.6" +version = "2.0.9+zstd.1.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38ff0f21cfee8f97d94cef41359e0c89aa6113028ab0291aa8ca0038995a95aa" +checksum = "9e16efa8a874a0481a574084d34cc26fdb3b99627480f785888deb6386506656" dependencies = [ "cc", "pkg-config", diff --git a/Cargo.toml b/Cargo.toml index df0ab04fb6..175328fa46 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -8,6 +8,7 @@ members = [ "pageserver/compaction", "pageserver/ctl", "pageserver/client", + "pageserver/client_grpc", "pageserver/pagebench", "pageserver/page_api", "proxy", @@ -22,6 +23,8 @@ members = [ "libs/http-utils", "libs/pageserver_api", "libs/postgres_ffi", + "libs/postgres_ffi_types", + "libs/postgres_versioninfo", "libs/safekeeper_api", "libs/desim", "libs/neon-shmem", @@ -32,6 +35,7 @@ members = [ "libs/pq_proto", "libs/tenant_size_model", "libs/metrics", + "libs/neonart", "libs/postgres_connection", "libs/remote_storage", "libs/tracing-utils", @@ -72,8 +76,8 @@ aws-credential-types = "1.2.0" aws-sigv4 = { version = "1.2", features = ["sign-http"] } aws-types = "1.3" axum = { version = "0.8.1", features = ["ws"] } -axum-extra = { version = "0.10.0", features = ["typed-header"] } -base64 = "0.13.0" +axum-extra = { version = "0.10.0", features = ["typed-header", "query"] } +base64 = "0.22" bincode = "1.3" bindgen = "0.71" bit_field = "0.10.2" @@ -88,6 +92,7 @@ clap = { version = "4.0", features = ["derive", "env"] } clashmap = { version = "1.0", features = ["raw-api"] } comfy-table = "7.1" const_format = "0.2" +crossbeam-utils = "0.8.21" crc32c = "0.6" diatomic-waker = { version = "0.2.3" } either = "1.8" @@ -146,11 +151,13 @@ parquet = { version = "53", default-features = false, features = ["zstd"] } parquet_derive = "53" pbkdf2 = { version = "0.12.1", features = ["simple", "std"] } pem = "3.0.3" +peekable = "0.3.0" pin-project-lite = "0.2" pprof = { version = "0.14", features = ["criterion", "flamegraph", "frame-pointer", "prost-codec"] } procfs = "0.16" prometheus = {version = "0.13", default-features=false, features = ["process"]} # removes protobuf dependency prost = "0.13.5" +prost-types = "0.13.5" rand = "0.8" redis = { version = "0.29.2", features = ["tokio-rustls-comp", "keep-alive"] } regex = "1.10.2" @@ -172,14 +179,16 @@ sentry = { version = "0.37", default-features = false, features = ["backtrace", serde = { version = "1.0", features = ["derive"] } serde_json = "1" serde_path_to_error = "0.1" -serde_with = { version = "2.0", features = [ "base64" ] } +serde_with = { version = "3", features = [ "base64" ] } serde_assert = "0.5.0" +serde_repr = "0.1.20" sha2 = "0.10.2" signal-hook = "0.3" smallvec = "1.11" smol_str = { version = "0.2.0", features = ["serde"] } socket2 = "0.5" spki = "0.7.3" +spin = "0.9.8" strum = "0.26" strum_macros = "0.26" "subtle" = "2.5.0" @@ -191,16 +200,15 @@ thiserror = "1.0" tikv-jemallocator = { version = "0.6", features = ["profiling", "stats", "unprefixed_malloc_on_supported_platforms"] } tikv-jemalloc-ctl = { version = "0.6", features = ["stats"] } tokio = { version = "1.43.1", features = ["macros"] } -tokio-epoll-uring = { git = "https://github.com/neondatabase/tokio-epoll-uring.git" , branch = "main" } tokio-io-timeout = "1.2.0" tokio-postgres-rustls = "0.12.0" tokio-rustls = { version = "0.26.0", default-features = false, features = ["tls12", "ring"]} tokio-stream = "0.1" tokio-tar = "0.3" -tokio-util = { version = "0.7.10", features = ["io", "rt"] } +tokio-util = { version = "0.7.10", features = ["io", "io-util", "rt"] } toml = "0.8" toml_edit = "0.22" -tonic = { version = "0.13.1", default-features = false, features = ["channel", "codegen", "prost", "router", "server", "tls-ring", "tls-native-roots"] } +tonic = { version = "0.13.1", default-features = false, features = ["channel", "codegen", "gzip", "prost", "router", "server", "tls-ring", "tls-native-roots", "zstd"] } tonic-reflection = { version = "0.13.1", features = ["server"] } tower = { version = "0.5.2", default-features = false } tower-http = { version = "0.6.2", features = ["auth", "request-id", "trace"] } @@ -233,6 +241,9 @@ x509-cert = { version = "0.2.5" } env_logger = "0.11" log = "0.4" +tokio-epoll-uring = { git = "https://github.com/neondatabase/tokio-epoll-uring.git" , branch = "main" } +uring-common = { git = "https://github.com/neondatabase/tokio-epoll-uring.git" , branch = "main" } + ## Libraries from neondatabase/ git forks, ideally with changes to be upstreamed postgres = { git = "https://github.com/neondatabase/rust-postgres.git", branch = "neon" } postgres-protocol = { git = "https://github.com/neondatabase/rust-postgres.git", branch = "neon" } @@ -256,11 +267,14 @@ neon-shmem = { version = "0.1", path = "./libs/neon-shmem/" } pageserver = { path = "./pageserver" } pageserver_api = { version = "0.1", path = "./libs/pageserver_api/" } pageserver_client = { path = "./pageserver/client" } +pageserver_client_grpc = { path = "./pageserver/client_grpc" } pageserver_compaction = { version = "0.1", path = "./pageserver/compaction/" } pageserver_page_api = { path = "./pageserver/page_api" } postgres_backend = { version = "0.1", path = "./libs/postgres_backend/" } postgres_connection = { version = "0.1", path = "./libs/postgres_connection/" } postgres_ffi = { version = "0.1", path = "./libs/postgres_ffi/" } +postgres_ffi_types = { version = "0.1", path = "./libs/postgres_ffi_types/" } +postgres_versioninfo = { version = "0.1", path = "./libs/postgres_versioninfo/" } postgres_initdb = { path = "./libs/postgres_initdb" } posthog_client_lite = { version = "0.1", path = "./libs/posthog_client_lite" } pq_proto = { version = "0.1", path = "./libs/pq_proto/" } @@ -280,7 +294,7 @@ walproposer = { version = "0.1", path = "./libs/walproposer/" } workspace_hack = { version = "0.1", path = "./workspace_hack/" } ## Build dependencies -cbindgen = "0.28.0" +cbindgen = "0.29.0" criterion = "0.5.1" rcgen = "0.13" rstest = "0.18" diff --git a/Dockerfile b/Dockerfile index 3b7962dcf9..55b87d4012 100644 --- a/Dockerfile +++ b/Dockerfile @@ -5,8 +5,6 @@ ARG REPOSITORY=ghcr.io/neondatabase ARG IMAGE=build-tools ARG TAG=pinned -ARG DEFAULT_PG_VERSION=17 -ARG STABLE_PG_VERSION=16 ARG DEBIAN_VERSION=bookworm ARG DEBIAN_FLAVOR=${DEBIAN_VERSION}-slim @@ -32,7 +30,18 @@ ARG BASE_IMAGE_SHA=debian:${DEBIAN_FLAVOR} ARG BASE_IMAGE_SHA=${BASE_IMAGE_SHA/debian:bookworm-slim/debian@$BOOKWORM_SLIM_SHA} ARG BASE_IMAGE_SHA=${BASE_IMAGE_SHA/debian:bullseye-slim/debian@$BULLSEYE_SLIM_SHA} -# Build Postgres +# Naive way: +# +# 1. COPY . . +# 1. make neon-pg-ext +# 2. cargo build +# +# But to enable docker to cache intermediate layers, we perform a few preparatory steps: +# +# - Build all postgres versions, depending on just the contents of vendor/ +# - Use cargo chef to build all rust dependencies + +# 1. Build all postgres versions FROM $REPOSITORY/$IMAGE:$TAG AS pg-build WORKDIR /home/nonroot @@ -40,17 +49,15 @@ COPY --chown=nonroot vendor/postgres-v14 vendor/postgres-v14 COPY --chown=nonroot vendor/postgres-v15 vendor/postgres-v15 COPY --chown=nonroot vendor/postgres-v16 vendor/postgres-v16 COPY --chown=nonroot vendor/postgres-v17 vendor/postgres-v17 -COPY --chown=nonroot pgxn pgxn COPY --chown=nonroot Makefile Makefile +COPY --chown=nonroot postgres.mk postgres.mk COPY --chown=nonroot scripts/ninstall.sh scripts/ninstall.sh ENV BUILD_TYPE=release RUN set -e \ - && mold -run make -j $(nproc) -s neon-pg-ext \ - && rm -rf pg_install/build \ - && tar -C pg_install -czf /home/nonroot/postgres_install.tar.gz . + && mold -run make -j $(nproc) -s postgres -# Prepare cargo-chef recipe +# 2. Prepare cargo-chef recipe FROM $REPOSITORY/$IMAGE:$TAG AS plan WORKDIR /home/nonroot @@ -58,26 +65,22 @@ COPY --chown=nonroot . . RUN cargo chef prepare --recipe-path recipe.json -# Build neon binaries +# Main build image FROM $REPOSITORY/$IMAGE:$TAG AS build WORKDIR /home/nonroot ARG GIT_VERSION=local ARG BUILD_TAG -ARG STABLE_PG_VERSION - -COPY --from=pg-build /home/nonroot/pg_install/v14/include/postgresql/server pg_install/v14/include/postgresql/server -COPY --from=pg-build /home/nonroot/pg_install/v15/include/postgresql/server pg_install/v15/include/postgresql/server -COPY --from=pg-build /home/nonroot/pg_install/v16/include/postgresql/server pg_install/v16/include/postgresql/server -COPY --from=pg-build /home/nonroot/pg_install/v17/include/postgresql/server pg_install/v17/include/postgresql/server -COPY --from=pg-build /home/nonroot/pg_install/v16/lib pg_install/v16/lib -COPY --from=pg-build /home/nonroot/pg_install/v17/lib pg_install/v17/lib -COPY --from=plan /home/nonroot/recipe.json recipe.json - ARG ADDITIONAL_RUSTFLAGS="" +# 3. Build cargo dependencies. Note that this step doesn't depend on anything else than +# `recipe.json`, so the layer can be reused as long as none of the dependencies change. +COPY --from=plan /home/nonroot/recipe.json recipe.json RUN set -e \ && RUSTFLAGS="-Clinker=clang -Clink-arg=-fuse-ld=mold -Clink-arg=-Wl,--no-rosegment -Cforce-frame-pointers=yes ${ADDITIONAL_RUSTFLAGS}" cargo chef cook --locked --release --recipe-path recipe.json +# Perform the main build. We reuse the Postgres build artifacts from the intermediate 'pg-build' +# layer, and the cargo dependencies built in the previous step. +COPY --chown=nonroot --from=pg-build /home/nonroot/pg_install/ pg_install COPY --chown=nonroot . . RUN set -e \ @@ -92,12 +95,11 @@ RUN set -e \ --bin endpoint_storage \ --bin neon_local \ --bin storage_scrubber \ - --locked --release + --locked --release \ + && mold -run make -j $(nproc) -s neon-pg-ext -# Build final image -# +# Assemble the final image FROM $BASE_IMAGE_SHA -ARG DEFAULT_PG_VERSION WORKDIR /data RUN set -e \ @@ -107,9 +109,20 @@ RUN set -e \ libreadline-dev \ libseccomp-dev \ ca-certificates \ - # System postgres for use with client libraries (e.g. in storage controller) - postgresql-15 \ openssl \ + unzip \ + curl \ + && ARCH=$(uname -m) \ + && if [ "$ARCH" = "x86_64" ]; then \ + curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip"; \ + elif [ "$ARCH" = "aarch64" ]; then \ + curl "https://awscli.amazonaws.com/awscli-exe-linux-aarch64.zip" -o "awscliv2.zip"; \ + else \ + echo "Unsupported architecture: $ARCH" && exit 1; \ + fi \ + && unzip awscliv2.zip \ + && ./aws/install \ + && rm -rf aws awscliv2.zip \ && rm -f /etc/apt/apt.conf.d/80-retries \ && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* \ && useradd -d /data neon \ @@ -125,12 +138,15 @@ COPY --from=build --chown=neon:neon /home/nonroot/target/release/proxy COPY --from=build --chown=neon:neon /home/nonroot/target/release/endpoint_storage /usr/local/bin COPY --from=build --chown=neon:neon /home/nonroot/target/release/neon_local /usr/local/bin COPY --from=build --chown=neon:neon /home/nonroot/target/release/storage_scrubber /usr/local/bin +COPY --from=build /home/nonroot/pg_install/v14 /usr/local/v14/ +COPY --from=build /home/nonroot/pg_install/v15 /usr/local/v15/ +COPY --from=build /home/nonroot/pg_install/v16 /usr/local/v16/ +COPY --from=build /home/nonroot/pg_install/v17 /usr/local/v17/ -COPY --from=pg-build /home/nonroot/pg_install/v14 /usr/local/v14/ -COPY --from=pg-build /home/nonroot/pg_install/v15 /usr/local/v15/ -COPY --from=pg-build /home/nonroot/pg_install/v16 /usr/local/v16/ -COPY --from=pg-build /home/nonroot/pg_install/v17 /usr/local/v17/ -COPY --from=pg-build /home/nonroot/postgres_install.tar.gz /data/ +# Deprecated: Old deployment scripts use this tarball which contains all the Postgres binaries. +# That's obsolete, since all the same files are also present under /usr/local/v*. But to keep the +# old scripts working for now, create the tarball. +RUN tar -C /usr/local -cvzf /data/postgres_install.tar.gz v14 v15 v16 v17 # By default, pageserver uses `.neon/` working directory in WORKDIR, so create one and fill it with the dummy config. # Now, when `docker run ... pageserver` is run, it can start without errors, yet will have some default dummy values. diff --git a/Makefile b/Makefile index 820f3c20f1..4b31e26810 100644 --- a/Makefile +++ b/Makefile @@ -1,8 +1,21 @@ ROOT_PROJECT_DIR := $(dir $(abspath $(lastword $(MAKEFILE_LIST)))) -# Where to install Postgres, default is ./pg_install, maybe useful for package managers +# Where to install Postgres, default is ./pg_install, maybe useful for package +# managers. POSTGRES_INSTALL_DIR ?= $(ROOT_PROJECT_DIR)/pg_install/ +# Supported PostgreSQL versions +POSTGRES_VERSIONS = v17 v16 v15 v14 + +# CARGO_BUILD_FLAGS: Extra flags to pass to `cargo build`. `--locked` +# and `--features testing` are popular examples. +# +# CARGO_PROFILE: Set to override the cargo profile to use. By default, +# it is derived from BUILD_TYPE. + +# All intermediate build artifacts are stored here. +BUILD_DIR := build + ICU_PREFIX_DIR := /usr/local/icu # @@ -16,13 +29,18 @@ ifeq ($(BUILD_TYPE),release) PG_CONFIGURE_OPTS = --enable-debug --with-openssl PG_CFLAGS += -O2 -g3 $(CFLAGS) PG_LDFLAGS = $(LDFLAGS) - # Unfortunately, `--profile=...` is a nightly feature - CARGO_BUILD_FLAGS += --release + CARGO_PROFILE ?= --profile=release + # NEON_CARGO_ARTIFACT_TARGET_DIR is the directory where `cargo build` places + # the final build artifacts. There is unfortunately no easy way of changing + # it to a fully predictable path, nor to extract the path with a simple + # command. See https://github.com/rust-lang/cargo/issues/9661 and + # https://github.com/rust-lang/cargo/issues/6790. NEON_CARGO_ARTIFACT_TARGET_DIR = $(ROOT_PROJECT_DIR)/target/release else ifeq ($(BUILD_TYPE),debug) PG_CONFIGURE_OPTS = --enable-debug --with-openssl --enable-cassert --enable-depend PG_CFLAGS += -O0 -g3 $(CFLAGS) PG_LDFLAGS = $(LDFLAGS) + CARGO_PROFILE ?= --profile=dev NEON_CARGO_ARTIFACT_TARGET_DIR = $(ROOT_PROJECT_DIR)/target/debug else $(error Bad build type '$(BUILD_TYPE)', see Makefile for options) @@ -87,146 +105,32 @@ CACHEDIR_TAG_CONTENTS := "Signature: 8a477f597d28d172789f06886806bc55" # Top level Makefile to build Neon and PostgreSQL # .PHONY: all -all: neon postgres neon-pg-ext +all: neon postgres-install neon-pg-ext ### Neon Rust bits # -# The 'postgres_ffi' depends on the Postgres headers. +# The 'postgres_ffi' crate depends on the Postgres headers. .PHONY: neon -neon: postgres-headers walproposer-lib cargo-target-dir +neon: postgres-headers-install walproposer-lib cargo-target-dir +@echo "Compiling Neon" - $(CARGO_CMD_PREFIX) cargo build $(CARGO_BUILD_FLAGS) + $(CARGO_CMD_PREFIX) cargo build $(CARGO_BUILD_FLAGS) $(CARGO_PROFILE) + .PHONY: cargo-target-dir cargo-target-dir: # https://github.com/rust-lang/cargo/issues/14281 mkdir -p target test -e target/CACHEDIR.TAG || echo "$(CACHEDIR_TAG_CONTENTS)" > target/CACHEDIR.TAG -### PostgreSQL parts -# Some rules are duplicated for Postgres v14 and 15. We may want to refactor -# to avoid the duplication in the future, but it's tolerable for now. -# -$(POSTGRES_INSTALL_DIR)/build/%/config.status: - - mkdir -p $(POSTGRES_INSTALL_DIR) - test -e $(POSTGRES_INSTALL_DIR)/CACHEDIR.TAG || echo "$(CACHEDIR_TAG_CONTENTS)" > $(POSTGRES_INSTALL_DIR)/CACHEDIR.TAG - - +@echo "Configuring Postgres $* build" - @test -s $(ROOT_PROJECT_DIR)/vendor/postgres-$*/configure || { \ - echo "\nPostgres submodule not found in $(ROOT_PROJECT_DIR)/vendor/postgres-$*/, execute "; \ - echo "'git submodule update --init --recursive --depth 2 --progress .' in project root.\n"; \ - exit 1; } - mkdir -p $(POSTGRES_INSTALL_DIR)/build/$* - - VERSION=$*; \ - EXTRA_VERSION=$$(cd $(ROOT_PROJECT_DIR)/vendor/postgres-$$VERSION && git rev-parse HEAD); \ - (cd $(POSTGRES_INSTALL_DIR)/build/$$VERSION && \ - env PATH="$(EXTRA_PATH_OVERRIDES):$$PATH" $(ROOT_PROJECT_DIR)/vendor/postgres-$$VERSION/configure \ - CFLAGS='$(PG_CFLAGS)' LDFLAGS='$(PG_LDFLAGS)' \ - $(PG_CONFIGURE_OPTS) --with-extra-version=" ($$EXTRA_VERSION)" \ - --prefix=$(abspath $(POSTGRES_INSTALL_DIR))/$$VERSION > configure.log) - -# nicer alias to run 'configure' -# Note: I've been unable to use templates for this part of our configuration. -# I'm not sure why it wouldn't work, but this is the only place (apart from -# the "build-all-versions" entry points) where direct mention of PostgreSQL -# versions is used. -.PHONY: postgres-configure-v17 -postgres-configure-v17: $(POSTGRES_INSTALL_DIR)/build/v17/config.status -.PHONY: postgres-configure-v16 -postgres-configure-v16: $(POSTGRES_INSTALL_DIR)/build/v16/config.status -.PHONY: postgres-configure-v15 -postgres-configure-v15: $(POSTGRES_INSTALL_DIR)/build/v15/config.status -.PHONY: postgres-configure-v14 -postgres-configure-v14: $(POSTGRES_INSTALL_DIR)/build/v14/config.status - -# Install the PostgreSQL header files into $(POSTGRES_INSTALL_DIR)//include -.PHONY: postgres-headers-% -postgres-headers-%: postgres-configure-% - +@echo "Installing PostgreSQL $* headers" - $(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/$*/src/include MAKELEVEL=0 install - -# Compile and install PostgreSQL -.PHONY: postgres-% -postgres-%: postgres-configure-% \ - postgres-headers-% # to prevent `make install` conflicts with neon's `postgres-headers` - +@echo "Compiling PostgreSQL $*" - $(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/$* MAKELEVEL=0 install - +@echo "Compiling libpq $*" - $(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/$*/src/interfaces/libpq install - +@echo "Compiling pg_prewarm $*" - $(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/$*/contrib/pg_prewarm install - +@echo "Compiling pg_buffercache $*" - $(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/$*/contrib/pg_buffercache install - +@echo "Compiling pg_visibility $*" - $(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/$*/contrib/pg_visibility install - +@echo "Compiling pageinspect $*" - $(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/$*/contrib/pageinspect install - +@echo "Compiling pg_trgm $*" - $(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/$*/contrib/pg_trgm install - +@echo "Compiling amcheck $*" - $(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/$*/contrib/amcheck install - +@echo "Compiling test_decoding $*" - $(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/$*/contrib/test_decoding install - -.PHONY: postgres-clean-% -postgres-clean-%: - $(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/$* MAKELEVEL=0 clean - $(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/$*/contrib/pg_buffercache clean - $(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/$*/contrib/pageinspect clean - $(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/$*/src/interfaces/libpq clean - -.PHONY: postgres-check-% -postgres-check-%: postgres-% - $(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/$* MAKELEVEL=0 check - .PHONY: neon-pg-ext-% -neon-pg-ext-%: postgres-% - +@echo "Compiling communicator $*" - $(CARGO_CMD_PREFIX) cargo build -p communicator $(CARGO_BUILD_FLAGS) - - +@echo "Compiling neon $*" - mkdir -p $(POSTGRES_INSTALL_DIR)/build/neon-$* - $(MAKE) PG_CONFIG=$(POSTGRES_INSTALL_DIR)/$*/bin/pg_config COPT='$(COPT)' \ - LIBCOMMUNICATOR_PATH=$(NEON_CARGO_ARTIFACT_TARGET_DIR) \ - -C $(POSTGRES_INSTALL_DIR)/build/neon-$* \ - -f $(ROOT_PROJECT_DIR)/pgxn/neon/Makefile install - - +@echo "Compiling neon_walredo $*" - mkdir -p $(POSTGRES_INSTALL_DIR)/build/neon-walredo-$* - $(MAKE) PG_CONFIG=$(POSTGRES_INSTALL_DIR)/$*/bin/pg_config COPT='$(COPT)' \ - -C $(POSTGRES_INSTALL_DIR)/build/neon-walredo-$* \ - -f $(ROOT_PROJECT_DIR)/pgxn/neon_walredo/Makefile install - +@echo "Compiling neon_rmgr $*" - mkdir -p $(POSTGRES_INSTALL_DIR)/build/neon-rmgr-$* - $(MAKE) PG_CONFIG=$(POSTGRES_INSTALL_DIR)/$*/bin/pg_config COPT='$(COPT)' \ - -C $(POSTGRES_INSTALL_DIR)/build/neon-rmgr-$* \ - -f $(ROOT_PROJECT_DIR)/pgxn/neon_rmgr/Makefile install - +@echo "Compiling neon_test_utils $*" - mkdir -p $(POSTGRES_INSTALL_DIR)/build/neon-test-utils-$* - $(MAKE) PG_CONFIG=$(POSTGRES_INSTALL_DIR)/$*/bin/pg_config COPT='$(COPT)' \ - -C $(POSTGRES_INSTALL_DIR)/build/neon-test-utils-$* \ - -f $(ROOT_PROJECT_DIR)/pgxn/neon_test_utils/Makefile install - +@echo "Compiling neon_utils $*" - mkdir -p $(POSTGRES_INSTALL_DIR)/build/neon-utils-$* - $(MAKE) PG_CONFIG=$(POSTGRES_INSTALL_DIR)/$*/bin/pg_config COPT='$(COPT)' \ - -C $(POSTGRES_INSTALL_DIR)/build/neon-utils-$* \ - -f $(ROOT_PROJECT_DIR)/pgxn/neon_utils/Makefile install - -.PHONY: neon-pg-clean-ext-% -neon-pg-clean-ext-%: - $(MAKE) PG_CONFIG=$(POSTGRES_INSTALL_DIR)/$*/bin/pg_config \ - -C $(POSTGRES_INSTALL_DIR)/build/neon-$* \ - -f $(ROOT_PROJECT_DIR)/pgxn/neon/Makefile clean - $(MAKE) PG_CONFIG=$(POSTGRES_INSTALL_DIR)/$*/bin/pg_config \ - -C $(POSTGRES_INSTALL_DIR)/build/neon-walredo-$* \ - -f $(ROOT_PROJECT_DIR)/pgxn/neon_walredo/Makefile clean - $(MAKE) PG_CONFIG=$(POSTGRES_INSTALL_DIR)/$*/bin/pg_config \ - -C $(POSTGRES_INSTALL_DIR)/build/neon-test-utils-$* \ - -f $(ROOT_PROJECT_DIR)/pgxn/neon_test_utils/Makefile clean - $(MAKE) PG_CONFIG=$(POSTGRES_INSTALL_DIR)/$*/bin/pg_config \ - -C $(POSTGRES_INSTALL_DIR)/build/neon-utils-$* \ - -f $(ROOT_PROJECT_DIR)/pgxn/neon_utils/Makefile clean +neon-pg-ext-%: postgres-install-% cargo-target-dir + +@echo "Compiling neon-specific Postgres extensions for $*" + mkdir -p $(BUILD_DIR)/pgxn-$* + $(MAKE) PG_CONFIG="$(POSTGRES_INSTALL_DIR)/$*/bin/pg_config" COPT='$(COPT)' \ + NEON_CARGO_ARTIFACT_TARGET_DIR="$(NEON_CARGO_ARTIFACT_TARGET_DIR)" \ + CARGO_BUILD_FLAGS="$(CARGO_BUILD_FLAGS)" \ + CARGO_PROFILE="$(CARGO_PROFILE)" \ + -C $(BUILD_DIR)/pgxn-$*\ + -f $(ROOT_PROJECT_DIR)/pgxn/Makefile install # Build walproposer as a static library. walproposer source code is located # in the pgxn/neon directory. @@ -240,15 +144,15 @@ neon-pg-clean-ext-%: .PHONY: walproposer-lib walproposer-lib: neon-pg-ext-v17 +@echo "Compiling walproposer-lib" - mkdir -p $(POSTGRES_INSTALL_DIR)/build/walproposer-lib + mkdir -p $(BUILD_DIR)/walproposer-lib $(MAKE) PG_CONFIG=$(POSTGRES_INSTALL_DIR)/v17/bin/pg_config COPT='$(COPT)' \ - -C $(POSTGRES_INSTALL_DIR)/build/walproposer-lib \ + -C $(BUILD_DIR)/walproposer-lib \ -f $(ROOT_PROJECT_DIR)/pgxn/neon/Makefile walproposer-lib - cp $(POSTGRES_INSTALL_DIR)/v17/lib/libpgport.a $(POSTGRES_INSTALL_DIR)/build/walproposer-lib - cp $(POSTGRES_INSTALL_DIR)/v17/lib/libpgcommon.a $(POSTGRES_INSTALL_DIR)/build/walproposer-lib - $(AR) d $(POSTGRES_INSTALL_DIR)/build/walproposer-lib/libpgport.a \ + cp $(POSTGRES_INSTALL_DIR)/v17/lib/libpgport.a $(BUILD_DIR)/walproposer-lib + cp $(POSTGRES_INSTALL_DIR)/v17/lib/libpgcommon.a $(BUILD_DIR)/walproposer-lib + $(AR) d $(BUILD_DIR)/walproposer-lib/libpgport.a \ pg_strong_random.o - $(AR) d $(POSTGRES_INSTALL_DIR)/build/walproposer-lib/libpgcommon.a \ + $(AR) d $(BUILD_DIR)/walproposer-lib/libpgcommon.a \ checksum_helper.o \ cryptohash_openssl.o \ hmac_openssl.o \ @@ -256,69 +160,18 @@ walproposer-lib: neon-pg-ext-v17 parse_manifest.o \ scram-common.o ifeq ($(UNAME_S),Linux) - $(AR) d $(POSTGRES_INSTALL_DIR)/build/walproposer-lib/libpgcommon.a \ + $(AR) d $(BUILD_DIR)/walproposer-lib/libpgcommon.a \ pg_crc32c.o endif -.PHONY: walproposer-lib-clean -walproposer-lib-clean: - $(MAKE) PG_CONFIG=$(POSTGRES_INSTALL_DIR)/v17/bin/pg_config \ - -C $(POSTGRES_INSTALL_DIR)/build/walproposer-lib \ - -f $(ROOT_PROJECT_DIR)/pgxn/neon/Makefile clean - +# Shorthand to call neon-pg-ext-% target for all Postgres versions .PHONY: neon-pg-ext -neon-pg-ext: \ - neon-pg-ext-v14 \ - neon-pg-ext-v15 \ - neon-pg-ext-v16 \ - neon-pg-ext-v17 - -.PHONY: neon-pg-clean-ext -neon-pg-clean-ext: \ - neon-pg-clean-ext-v14 \ - neon-pg-clean-ext-v15 \ - neon-pg-clean-ext-v16 \ - neon-pg-clean-ext-v17 - -# shorthand to build all Postgres versions -.PHONY: postgres -postgres: \ - postgres-v14 \ - postgres-v15 \ - postgres-v16 \ - postgres-v17 - -.PHONY: postgres-headers -postgres-headers: \ - postgres-headers-v14 \ - postgres-headers-v15 \ - postgres-headers-v16 \ - postgres-headers-v17 - -.PHONY: postgres-clean -postgres-clean: \ - postgres-clean-v14 \ - postgres-clean-v15 \ - postgres-clean-v16 \ - postgres-clean-v17 - -.PHONY: postgres-check -postgres-check: \ - postgres-check-v14 \ - postgres-check-v15 \ - postgres-check-v16 \ - postgres-check-v17 - -# This doesn't remove the effects of 'configure'. -.PHONY: clean -clean: postgres-clean neon-pg-clean-ext - $(MAKE) -C compute clean - $(CARGO_CMD_PREFIX) cargo clean +neon-pg-ext: $(foreach pg_version,$(POSTGRES_VERSIONS),neon-pg-ext-$(pg_version)) # This removes everything .PHONY: distclean distclean: - $(RM) -r $(POSTGRES_INSTALL_DIR) + $(RM) -r $(POSTGRES_INSTALL_DIR) $(BUILD_DIR) $(CARGO_CMD_PREFIX) cargo clean .PHONY: fmt @@ -327,7 +180,7 @@ fmt: postgres-%-pg-bsd-indent: postgres-% +@echo "Compiling pg_bsd_indent" - $(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/$*/src/tools/pg_bsd_indent/ + $(MAKE) -C $(BUILD_DIR)/$*/src/tools/pg_bsd_indent/ # Create typedef list for the core. Note that generally it should be combined with # buildfarm one to cover platform specific stuff. @@ -346,7 +199,7 @@ postgres-%-pgindent: postgres-%-pg-bsd-indent postgres-%-typedefs.list cat $(ROOT_PROJECT_DIR)/vendor/postgres-$*/src/tools/pgindent/typedefs.list |\ cat - postgres-$*-typedefs.list | sort | uniq > postgres-$*-typedefs-full.list +@echo note: you might want to run it on selected files/dirs instead. - INDENT=$(POSTGRES_INSTALL_DIR)/build/$*/src/tools/pg_bsd_indent/pg_bsd_indent \ + INDENT=$(BUILD_DIR)/$*/src/tools/pg_bsd_indent/pg_bsd_indent \ $(ROOT_PROJECT_DIR)/vendor/postgres-$*/src/tools/pgindent/pgindent --typedefs postgres-$*-typedefs-full.list \ $(ROOT_PROJECT_DIR)/vendor/postgres-$*/src/ \ --excludes $(ROOT_PROJECT_DIR)/vendor/postgres-$*/src/tools/pgindent/exclude_file_patterns @@ -357,12 +210,28 @@ postgres-%-pgindent: postgres-%-pg-bsd-indent postgres-%-typedefs.list neon-pgindent: postgres-v17-pg-bsd-indent neon-pg-ext-v17 $(MAKE) PG_CONFIG=$(POSTGRES_INSTALL_DIR)/v17/bin/pg_config COPT='$(COPT)' \ FIND_TYPEDEF=$(ROOT_PROJECT_DIR)/vendor/postgres-v17/src/tools/find_typedef \ - INDENT=$(POSTGRES_INSTALL_DIR)/build/v17/src/tools/pg_bsd_indent/pg_bsd_indent \ + INDENT=$(BUILD_DIR)/v17/src/tools/pg_bsd_indent/pg_bsd_indent \ PGINDENT_SCRIPT=$(ROOT_PROJECT_DIR)/vendor/postgres-v17/src/tools/pgindent/pgindent \ - -C $(POSTGRES_INSTALL_DIR)/build/neon-v17 \ + -C $(BUILD_DIR)/neon-v17 \ -f $(ROOT_PROJECT_DIR)/pgxn/neon/Makefile pgindent .PHONY: setup-pre-commit-hook setup-pre-commit-hook: ln -s -f $(ROOT_PROJECT_DIR)/pre-commit.py .git/hooks/pre-commit + +# Targets for building PostgreSQL are defined in postgres.mk. +# +# But if the caller has indicated that PostgreSQL is already +# installed, by setting the PG_INSTALL_CACHED variable, skip it. +ifdef PG_INSTALL_CACHED +postgres-install: skip-install +$(foreach pg_version,$(POSTGRES_VERSIONS),postgres-install-$(pg_version)): skip-install +postgres-headers-install: + +@echo "Skipping installation of PostgreSQL headers because PG_INSTALL_CACHED is set" +skip-install: + +@echo "Skipping PostgreSQL installation because PG_INSTALL_CACHED is set" + +else +include postgres.mk +endif diff --git a/build-tools.Dockerfile b/build-tools.Dockerfile index f97f04968e..14a52bd736 100644 --- a/build-tools.Dockerfile +++ b/build-tools.Dockerfile @@ -165,6 +165,7 @@ RUN curl -fsSL \ && rm sql_exporter.tar.gz # protobuf-compiler (protoc) +# Keep the version the same as in compute/compute-node.Dockerfile ENV PROTOC_VERSION=25.1 RUN curl -fsSL "https://github.com/protocolbuffers/protobuf/releases/download/v${PROTOC_VERSION}/protoc-${PROTOC_VERSION}-linux-$(uname -m | sed 's/aarch64/aarch_64/g').zip" -o "protoc.zip" \ && unzip -q protoc.zip -d protoc \ @@ -179,7 +180,7 @@ RUN curl -sL "https://github.com/peak/s5cmd/releases/download/v${S5CMD_VERSION}/ && mv s5cmd /usr/local/bin/s5cmd # LLVM -ENV LLVM_VERSION=19 +ENV LLVM_VERSION=20 RUN curl -fsSL 'https://apt.llvm.org/llvm-snapshot.gpg.key' | apt-key add - \ && echo "deb http://apt.llvm.org/${DEBIAN_VERSION}/ llvm-toolchain-${DEBIAN_VERSION}-${LLVM_VERSION} main" > /etc/apt/sources.list.d/llvm.stable.list \ && apt update \ @@ -292,7 +293,7 @@ WORKDIR /home/nonroot # Rust # Please keep the version of llvm (installed above) in sync with rust llvm (`rustc --version --verbose | grep LLVM`) -ENV RUSTC_VERSION=1.87.0 +ENV RUSTC_VERSION=1.88.0 ENV RUSTUP_HOME="/home/nonroot/.rustup" ENV PATH="/home/nonroot/.cargo/bin:${PATH}" ARG RUSTFILT_VERSION=0.2.1 diff --git a/compute/.gitignore b/compute/.gitignore index 70980d335a..aab2afaa4e 100644 --- a/compute/.gitignore +++ b/compute/.gitignore @@ -3,3 +3,6 @@ etc/neon_collector.yml etc/neon_collector_autoscaling.yml etc/sql_exporter.yml etc/sql_exporter_autoscaling.yml + +# Node.js dependencies +node_modules/ diff --git a/compute/Makefile b/compute/Makefile index 0036196160..ef2e55f7b1 100644 --- a/compute/Makefile +++ b/compute/Makefile @@ -22,7 +22,7 @@ sql_exporter.yml: $(jsonnet_files) --output-file etc/$@ \ --tla-str collector_name=neon_collector \ --tla-str collector_file=neon_collector.yml \ - --tla-str 'connection_string=postgresql://cloud_admin@127.0.0.1:5432/postgres?sslmode=disable&application_name=sql_exporter' \ + --tla-str 'connection_string=postgresql://cloud_admin@127.0.0.1:5432/postgres?sslmode=disable&application_name=sql_exporter&pgaudit.log=none' \ etc/sql_exporter.jsonnet sql_exporter_autoscaling.yml: $(jsonnet_files) @@ -30,7 +30,7 @@ sql_exporter_autoscaling.yml: $(jsonnet_files) --output-file etc/$@ \ --tla-str collector_name=neon_collector_autoscaling \ --tla-str collector_file=neon_collector_autoscaling.yml \ - --tla-str 'connection_string=postgresql://cloud_admin@127.0.0.1:5432/postgres?sslmode=disable&application_name=sql_exporter_autoscaling' \ + --tla-str 'connection_string=postgresql://cloud_admin@127.0.0.1:5432/postgres?sslmode=disable&application_name=sql_exporter_autoscaling&pgaudit.log=none' \ etc/sql_exporter.jsonnet .PHONY: clean @@ -48,3 +48,11 @@ jsonnetfmt-test: .PHONY: jsonnetfmt-format jsonnetfmt-format: jsonnetfmt --in-place $(jsonnet_files) + +.PHONY: manifest-schema-validation +manifest-schema-validation: node_modules + node_modules/.bin/jsonschema validate -d https://json-schema.org/draft/2020-12/schema manifest.schema.json manifest.yaml + +node_modules: package.json + npm install + touch node_modules diff --git a/compute/compute-node.Dockerfile b/compute/compute-node.Dockerfile index 248f52088b..0dd32011fb 100644 --- a/compute/compute-node.Dockerfile +++ b/compute/compute-node.Dockerfile @@ -77,9 +77,6 @@ # build_and_test.yml github workflow for how that's done. ARG PG_VERSION -ARG REPOSITORY=ghcr.io/neondatabase -ARG IMAGE=build-tools -ARG TAG=pinned ARG BUILD_TAG ARG DEBIAN_VERSION=bookworm ARG DEBIAN_FLAVOR=${DEBIAN_VERSION}-slim @@ -118,6 +115,9 @@ ARG EXTENSIONS=all FROM $BASE_IMAGE_SHA AS build-deps ARG DEBIAN_VERSION +# Keep in sync with build-tools.Dockerfile +ENV PROTOC_VERSION=25.1 + # Use strict mode for bash to catch errors early SHELL ["/bin/bash", "-euo", "pipefail", "-c"] @@ -149,8 +149,17 @@ RUN case $DEBIAN_VERSION in \ ninja-build git autoconf automake libtool build-essential bison flex libreadline-dev \ zlib1g-dev libxml2-dev libcurl4-openssl-dev libossp-uuid-dev wget ca-certificates pkg-config libssl-dev \ libicu-dev libxslt1-dev liblz4-dev libzstd-dev zstd curl unzip g++ \ + libclang-dev \ + jsonnet \ $VERSION_INSTALLS \ - && apt clean && rm -rf /var/lib/apt/lists/* + && apt clean && rm -rf /var/lib/apt/lists/* \ + && useradd -ms /bin/bash nonroot -b /home \ + # Install protoc from binary release, since Debian's versions are too old. + && curl -fsSL "https://github.com/protocolbuffers/protobuf/releases/download/v${PROTOC_VERSION}/protoc-${PROTOC_VERSION}-linux-$(uname -m | sed 's/aarch64/aarch_64/g').zip" -o "protoc.zip" \ + && unzip -q protoc.zip -d protoc \ + && mv protoc/bin/protoc /usr/local/bin/protoc \ + && mv protoc/include/google /usr/local/include/google \ + && rm -rf protoc.zip protoc ######################################################################################### # @@ -171,9 +180,6 @@ RUN cd postgres && \ eval $CONFIGURE_CMD && \ make MAKELEVEL=0 -j $(getconf _NPROCESSORS_ONLN) -s install && \ make MAKELEVEL=0 -j $(getconf _NPROCESSORS_ONLN) -s -C contrib/ install && \ - # Install headers - make MAKELEVEL=0 -j $(getconf _NPROCESSORS_ONLN) -s -C src/include install && \ - make MAKELEVEL=0 -j $(getconf _NPROCESSORS_ONLN) -s -C src/interfaces/libpq install && \ # Enable some of contrib extensions echo 'trusted = true' >> /usr/local/pgsql/share/extension/autoinc.control && \ echo 'trusted = true' >> /usr/local/pgsql/share/extension/dblink.control && \ @@ -1057,17 +1063,10 @@ RUN make -j $(getconf _NPROCESSORS_ONLN) && \ ######################################################################################### # -# Layer "pg build with nonroot user and cargo installed" -# This layer is base and common for layers with `pgrx` +# Layer "build-deps with Rust toolchain installed" # ######################################################################################### -FROM pg-build AS pg-build-nonroot-with-cargo -ARG PG_VERSION - -RUN apt update && \ - apt install --no-install-recommends --no-install-suggests -y curl libclang-dev && \ - apt clean && rm -rf /var/lib/apt/lists/* && \ - useradd -ms /bin/bash nonroot -b /home +FROM build-deps AS build-deps-with-cargo ENV HOME=/home/nonroot ENV PATH="/home/nonroot/.cargo/bin:$PATH" @@ -1082,13 +1081,29 @@ RUN curl -sSO https://static.rust-lang.org/rustup/dist/$(uname -m)-unknown-linux ./rustup-init -y --no-modify-path --profile minimal --default-toolchain stable && \ rm rustup-init +######################################################################################### +# +# Layer "pg-build with Rust toolchain installed" +# This layer is base and common for layers with `pgrx` +# +######################################################################################### +FROM pg-build AS pg-build-with-cargo +ARG PG_VERSION + +ENV HOME=/home/nonroot +ENV PATH="/home/nonroot/.cargo/bin:$PATH" +USER nonroot +WORKDIR /home/nonroot + +COPY --from=build-deps-with-cargo /home/nonroot /home/nonroot + ######################################################################################### # # Layer "rust extensions" # This layer is used to build `pgrx` deps # ######################################################################################### -FROM pg-build-nonroot-with-cargo AS rust-extensions-build +FROM pg-build-with-cargo AS rust-extensions-build ARG PG_VERSION RUN case "${PG_VERSION:?}" in \ @@ -1110,7 +1125,7 @@ USER root # and eventually get merged with `rust-extensions-build` # ######################################################################################### -FROM pg-build-nonroot-with-cargo AS rust-extensions-build-pgrx12 +FROM pg-build-with-cargo AS rust-extensions-build-pgrx12 ARG PG_VERSION RUN cargo install --locked --version 0.12.9 cargo-pgrx && \ @@ -1127,7 +1142,7 @@ USER root # and eventually get merged with `rust-extensions-build` # ######################################################################################### -FROM pg-build-nonroot-with-cargo AS rust-extensions-build-pgrx14 +FROM pg-build-with-cargo AS rust-extensions-build-pgrx14 ARG PG_VERSION RUN cargo install --locked --version 0.14.1 cargo-pgrx && \ @@ -1144,10 +1159,12 @@ USER root FROM build-deps AS pgrag-src ARG PG_VERSION - WORKDIR /ext-src +COPY compute/patches/onnxruntime.patch . + RUN wget https://github.com/microsoft/onnxruntime/archive/refs/tags/v1.18.1.tar.gz -O onnxruntime.tar.gz && \ mkdir onnxruntime-src && cd onnxruntime-src && tar xzf ../onnxruntime.tar.gz --strip-components=1 -C . && \ + patch -p1 < /ext-src/onnxruntime.patch && \ echo "#nothing to test here" > neon-test.sh RUN wget https://github.com/neondatabase-labs/pgrag/archive/refs/tags/v0.1.2.tar.gz -O pgrag.tar.gz && \ @@ -1162,7 +1179,7 @@ COPY --from=pgrag-src /ext-src/ /ext-src/ # Install it using virtual environment, because Python 3.11 (the default version on Debian 12 (Bookworm)) complains otherwise WORKDIR /ext-src/onnxruntime-src RUN apt update && apt install --no-install-recommends --no-install-suggests -y \ - python3 python3-pip python3-venv protobuf-compiler && \ + python3 python3-pip python3-venv && \ apt clean && rm -rf /var/lib/apt/lists/* && \ python3 -m venv venv && \ . venv/bin/activate && \ @@ -1555,29 +1572,31 @@ RUN make -j $(getconf _NPROCESSORS_ONLN) && \ FROM build-deps AS pgaudit-src ARG PG_VERSION WORKDIR /ext-src +COPY "compute/patches/pgaudit-parallel_workers-${PG_VERSION}.patch" . RUN case "${PG_VERSION}" in \ "v14") \ - export PGAUDIT_VERSION=1.6.2 \ - export PGAUDIT_CHECKSUM=1f350d70a0cbf488c0f2b485e3a5c9b11f78ad9e3cbb95ef6904afa1eb3187eb \ + export PGAUDIT_VERSION=1.6.3 \ + export PGAUDIT_CHECKSUM=37a8f5a7cc8d9188e536d15cf0fdc457fcdab2547caedb54442c37f124110919 \ ;; \ "v15") \ - export PGAUDIT_VERSION=1.7.0 \ - export PGAUDIT_CHECKSUM=8f4a73e451c88c567e516e6cba7dc1e23bc91686bb6f1f77f8f3126d428a8bd8 \ + export PGAUDIT_VERSION=1.7.1 \ + export PGAUDIT_CHECKSUM=e9c8e6e092d82b2f901d72555ce0fe7780552f35f8985573796cd7e64b09d4ec \ ;; \ "v16") \ - export PGAUDIT_VERSION=16.0 \ - export PGAUDIT_CHECKSUM=d53ef985f2d0b15ba25c512c4ce967dce07b94fd4422c95bd04c4c1a055fe738 \ + export PGAUDIT_VERSION=16.1 \ + export PGAUDIT_CHECKSUM=3bae908ab70ba0c6f51224009dbcfff1a97bd6104c6273297a64292e1b921fee \ ;; \ "v17") \ - export PGAUDIT_VERSION=17.0 \ - export PGAUDIT_CHECKSUM=7d0d08d030275d525f36cd48b38c6455f1023da863385badff0cec44965bfd8c \ + export PGAUDIT_VERSION=17.1 \ + export PGAUDIT_CHECKSUM=9c5f37504d393486cc75d2ced83f75f5899be64fa85f689d6babb833b4361e6c \ ;; \ *) \ echo "pgaudit is not supported on this PostgreSQL version" && exit 1;; \ esac && \ wget https://github.com/pgaudit/pgaudit/archive/refs/tags/${PGAUDIT_VERSION}.tar.gz -O pgaudit.tar.gz && \ echo "${PGAUDIT_CHECKSUM} pgaudit.tar.gz" | sha256sum --check && \ - mkdir pgaudit-src && cd pgaudit-src && tar xzf ../pgaudit.tar.gz --strip-components=1 -C . + mkdir pgaudit-src && cd pgaudit-src && tar xzf ../pgaudit.tar.gz --strip-components=1 -C . && \ + patch -p1 < "/ext-src/pgaudit-parallel_workers-${PG_VERSION}.patch" FROM pg-build AS pgaudit-build COPY --from=pgaudit-src /ext-src/ /ext-src/ @@ -1617,22 +1636,14 @@ RUN make install USE_PGXS=1 -j $(getconf _NPROCESSORS_ONLN) # compile neon extensions # ######################################################################################### -FROM pg-build AS neon-ext-build +FROM pg-build-with-cargo AS neon-ext-build ARG PG_VERSION -COPY pgxn/ pgxn/ -RUN make -j $(getconf _NPROCESSORS_ONLN) \ - -C pgxn/neon \ - -s install && \ - make -j $(getconf _NPROCESSORS_ONLN) \ - -C pgxn/neon_utils \ - -s install && \ - make -j $(getconf _NPROCESSORS_ONLN) \ - -C pgxn/neon_test_utils \ - -s install && \ - make -j $(getconf _NPROCESSORS_ONLN) \ - -C pgxn/neon_rmgr \ - -s install +USER root +COPY . . + +RUN make -j $(getconf _NPROCESSORS_ONLN) -C pgxn -s install-compute \ + BUILD_TYPE=release CARGO_BUILD_FLAGS="--locked --release" NEON_CARGO_ARTIFACT_TARGET_DIR="$(pwd)/target/release" ######################################################################################### # @@ -1722,7 +1733,7 @@ FROM extensions-${EXTENSIONS} AS neon-pg-ext-build # Compile the Neon-specific `compute_ctl`, `fast_import`, and `local_proxy` binaries # ######################################################################################### -FROM $REPOSITORY/$IMAGE:$TAG AS compute-tools +FROM build-deps-with-cargo AS compute-tools ARG BUILD_TAG ENV BUILD_TAG=$BUILD_TAG @@ -1732,7 +1743,7 @@ COPY --chown=nonroot . . RUN --mount=type=cache,uid=1000,target=/home/nonroot/.cargo/registry \ --mount=type=cache,uid=1000,target=/home/nonroot/.cargo/git \ --mount=type=cache,uid=1000,target=/home/nonroot/target \ - mold -run cargo build --locked --profile release-line-debug-size-lto --bin compute_ctl --bin fast_import --bin local_proxy && \ + cargo build --locked --profile release-line-debug-size-lto --bin compute_ctl --bin fast_import --bin local_proxy && \ mkdir target-bin && \ cp target/release-line-debug-size-lto/compute_ctl \ target/release-line-debug-size-lto/fast_import \ @@ -1826,10 +1837,11 @@ RUN rm /usr/local/pgsql/lib/lib*.a # Preprocess the sql_exporter configuration files # ######################################################################################### -FROM $REPOSITORY/$IMAGE:$TAG AS sql_exporter_preprocessor +FROM build-deps AS sql_exporter_preprocessor ARG PG_VERSION USER nonroot +WORKDIR /home/nonroot COPY --chown=nonroot compute compute @@ -1976,7 +1988,7 @@ RUN apt update && \ locales \ lsof \ procps \ - rsyslog \ + rsyslog-gnutls \ screen \ tcpdump \ $VERSION_INSTALLS && \ diff --git a/compute/etc/neon_collector.jsonnet b/compute/etc/neon_collector.jsonnet index e64d907fe4..b712631d71 100644 --- a/compute/etc/neon_collector.jsonnet +++ b/compute/etc/neon_collector.jsonnet @@ -8,6 +8,8 @@ import 'sql_exporter/compute_logical_snapshot_files.libsonnet', import 'sql_exporter/compute_logical_snapshots_bytes.libsonnet', import 'sql_exporter/compute_max_connections.libsonnet', + import 'sql_exporter/compute_pg_oldest_frozen_xid_age.libsonnet', + import 'sql_exporter/compute_pg_oldest_mxid_age.libsonnet', import 'sql_exporter/compute_receive_lsn.libsonnet', import 'sql_exporter/compute_subscriptions_count.libsonnet', import 'sql_exporter/connection_counts.libsonnet', diff --git a/compute/etc/pgbouncer.ini b/compute/etc/pgbouncer.ini index 9d68cbb8d5..fbcdfd4a87 100644 --- a/compute/etc/pgbouncer.ini +++ b/compute/etc/pgbouncer.ini @@ -21,6 +21,8 @@ unix_socket_dir=/tmp/ unix_socket_mode=0777 ; required for pgbouncer_exporter ignore_startup_parameters=extra_float_digits +; pidfile for graceful termination +pidfile=/tmp/pgbouncer.pid ;; Disable connection logging. It produces a lot of logs that no one looks at, ;; and we can get similar log entries from the proxy too. We had incidents in diff --git a/compute/etc/sql_exporter/compute_pg_oldest_frozen_xid_age.libsonnet b/compute/etc/sql_exporter/compute_pg_oldest_frozen_xid_age.libsonnet new file mode 100644 index 0000000000..03d5cf860f --- /dev/null +++ b/compute/etc/sql_exporter/compute_pg_oldest_frozen_xid_age.libsonnet @@ -0,0 +1,13 @@ +{ + metric_name: 'compute_pg_oldest_frozen_xid_age', + type: 'gauge', + help: 'Age of oldest XIDs that have not been frozen by VACUUM. An indicator of how long it has been since VACUUM last ran.', + key_labels: [ + 'database_name', + ], + value_label: 'metric', + values: [ + 'frozen_xid_age', + ], + query: importstr 'sql_exporter/compute_pg_oldest_frozen_xid_age.sql', +} diff --git a/compute/etc/sql_exporter/compute_pg_oldest_frozen_xid_age.sql b/compute/etc/sql_exporter/compute_pg_oldest_frozen_xid_age.sql new file mode 100644 index 0000000000..d2281fdd42 --- /dev/null +++ b/compute/etc/sql_exporter/compute_pg_oldest_frozen_xid_age.sql @@ -0,0 +1,4 @@ +SELECT datname database_name, + age(datfrozenxid) frozen_xid_age +FROM pg_database +ORDER BY frozen_xid_age DESC LIMIT 10; diff --git a/compute/etc/sql_exporter/compute_pg_oldest_mxid_age.libsonnet b/compute/etc/sql_exporter/compute_pg_oldest_mxid_age.libsonnet new file mode 100644 index 0000000000..12063a0f71 --- /dev/null +++ b/compute/etc/sql_exporter/compute_pg_oldest_mxid_age.libsonnet @@ -0,0 +1,13 @@ +{ + metric_name: 'compute_pg_oldest_mxid_age', + type: 'gauge', + help: 'Age of oldest MXIDs that have not been replaced by VACUUM. An indicator of how long it has been since VACUUM last ran.', + key_labels: [ + 'database_name', + ], + value_label: 'metric', + values: [ + 'min_mxid_age', + ], + query: importstr 'sql_exporter/compute_pg_oldest_mxid_age.sql', +} diff --git a/compute/etc/sql_exporter/compute_pg_oldest_mxid_age.sql b/compute/etc/sql_exporter/compute_pg_oldest_mxid_age.sql new file mode 100644 index 0000000000..ed57894b3a --- /dev/null +++ b/compute/etc/sql_exporter/compute_pg_oldest_mxid_age.sql @@ -0,0 +1,4 @@ +SELECT datname database_name, + mxid_age(datminmxid) min_mxid_age +FROM pg_database +ORDER BY min_mxid_age DESC LIMIT 10; diff --git a/compute/manifest.schema.json b/compute/manifest.schema.json new file mode 100644 index 0000000000..a25055b45a --- /dev/null +++ b/compute/manifest.schema.json @@ -0,0 +1,209 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "title": "Neon Compute Manifest Schema", + "description": "Schema for Neon compute node configuration manifest", + "type": "object", + "properties": { + "pg_settings": { + "type": "object", + "properties": { + "common": { + "type": "object", + "properties": { + "client_connection_check_interval": { + "type": "string", + "description": "Check for client disconnection interval in milliseconds" + }, + "effective_io_concurrency": { + "type": "string", + "description": "Effective IO concurrency setting" + }, + "fsync": { + "type": "string", + "enum": ["on", "off"], + "description": "Whether to force fsync to disk" + }, + "hot_standby": { + "type": "string", + "enum": ["on", "off"], + "description": "Whether hot standby is enabled" + }, + "idle_in_transaction_session_timeout": { + "type": "string", + "description": "Timeout for idle transactions in milliseconds" + }, + "listen_addresses": { + "type": "string", + "description": "Addresses to listen on" + }, + "log_connections": { + "type": "string", + "enum": ["on", "off"], + "description": "Whether to log connections" + }, + "log_disconnections": { + "type": "string", + "enum": ["on", "off"], + "description": "Whether to log disconnections" + }, + "log_temp_files": { + "type": "string", + "description": "Size threshold for logging temporary files in KB" + }, + "log_error_verbosity": { + "type": "string", + "enum": ["terse", "verbose", "default"], + "description": "Error logging verbosity level" + }, + "log_min_error_statement": { + "type": "string", + "description": "Minimum error level for statement logging" + }, + "maintenance_io_concurrency": { + "type": "string", + "description": "Maintenance IO concurrency setting" + }, + "max_connections": { + "type": "string", + "description": "Maximum number of connections" + }, + "max_replication_flush_lag": { + "type": "string", + "description": "Maximum replication flush lag" + }, + "max_replication_slots": { + "type": "string", + "description": "Maximum number of replication slots" + }, + "max_replication_write_lag": { + "type": "string", + "description": "Maximum replication write lag" + }, + "max_wal_senders": { + "type": "string", + "description": "Maximum number of WAL senders" + }, + "max_wal_size": { + "type": "string", + "description": "Maximum WAL size" + }, + "neon.unstable_extensions": { + "type": "string", + "description": "List of unstable extensions" + }, + "neon.protocol_version": { + "type": "string", + "description": "Neon protocol version" + }, + "password_encryption": { + "type": "string", + "description": "Password encryption method" + }, + "restart_after_crash": { + "type": "string", + "enum": ["on", "off"], + "description": "Whether to restart after crash" + }, + "superuser_reserved_connections": { + "type": "string", + "description": "Number of reserved connections for superuser" + }, + "synchronous_standby_names": { + "type": "string", + "description": "Names of synchronous standby servers" + }, + "wal_keep_size": { + "type": "string", + "description": "WAL keep size" + }, + "wal_level": { + "type": "string", + "description": "WAL level" + }, + "wal_log_hints": { + "type": "string", + "enum": ["on", "off"], + "description": "Whether to log hints in WAL" + }, + "wal_sender_timeout": { + "type": "string", + "description": "WAL sender timeout in milliseconds" + } + }, + "required": [ + "client_connection_check_interval", + "effective_io_concurrency", + "fsync", + "hot_standby", + "idle_in_transaction_session_timeout", + "listen_addresses", + "log_connections", + "log_disconnections", + "log_temp_files", + "log_error_verbosity", + "log_min_error_statement", + "maintenance_io_concurrency", + "max_connections", + "max_replication_flush_lag", + "max_replication_slots", + "max_replication_write_lag", + "max_wal_senders", + "max_wal_size", + "neon.unstable_extensions", + "neon.protocol_version", + "password_encryption", + "restart_after_crash", + "superuser_reserved_connections", + "synchronous_standby_names", + "wal_keep_size", + "wal_level", + "wal_log_hints", + "wal_sender_timeout" + ] + }, + "replica": { + "type": "object", + "properties": { + "hot_standby": { + "type": "string", + "enum": ["on", "off"], + "description": "Whether hot standby is enabled for replicas" + } + }, + "required": ["hot_standby"] + }, + "per_version": { + "type": "object", + "patternProperties": { + "^1[4-7]$": { + "type": "object", + "properties": { + "common": { + "type": "object", + "properties": { + "io_combine_limit": { + "type": "string", + "description": "IO combine limit" + } + } + }, + "replica": { + "type": "object", + "properties": { + "recovery_prefetch": { + "type": "string", + "enum": ["on", "off"], + "description": "Whether to enable recovery prefetch for PostgreSQL replicas" + } + } + } + } + } + } + } + }, + "required": ["common", "replica", "per_version"] + } + }, + "required": ["pg_settings"] +} diff --git a/compute/manifest.yaml b/compute/manifest.yaml index f1cd20c497..4425241d8a 100644 --- a/compute/manifest.yaml +++ b/compute/manifest.yaml @@ -105,17 +105,17 @@ pg_settings: # Neon hot standby ignores pages that are not in the shared_buffers recovery_prefetch: "off" 16: - common: + common: {} replica: # prefetching of blocks referenced in WAL doesn't make sense for us # Neon hot standby ignores pages that are not in the shared_buffers recovery_prefetch: "off" 15: - common: + common: {} replica: # prefetching of blocks referenced in WAL doesn't make sense for us # Neon hot standby ignores pages that are not in the shared_buffers recovery_prefetch: "off" 14: - common: - replica: + common: {} + replica: {} diff --git a/compute/package-lock.json b/compute/package-lock.json new file mode 100644 index 0000000000..693a37cfcb --- /dev/null +++ b/compute/package-lock.json @@ -0,0 +1,37 @@ +{ + "name": "neon-compute", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "neon-compute", + "dependencies": { + "@sourcemeta/jsonschema": "9.3.4" + } + }, + "node_modules/@sourcemeta/jsonschema": { + "version": "9.3.4", + "resolved": "https://registry.npmjs.org/@sourcemeta/jsonschema/-/jsonschema-9.3.4.tgz", + "integrity": "sha512-hkujfkZAIGXUs4U//We9faZW8LZ4/H9LqagRYsFSulH/VLcKPNhZyCTGg7AhORuzm27zqENvKpnX4g2FzudYFw==", + "cpu": [ + "x64", + "arm64" + ], + "license": "AGPL-3.0", + "os": [ + "darwin", + "linux", + "win32" + ], + "bin": { + "jsonschema": "cli.js" + }, + "engines": { + "node": ">=16" + }, + "funding": { + "url": "https://github.com/sponsors/sourcemeta" + } + } + } +} diff --git a/compute/package.json b/compute/package.json new file mode 100644 index 0000000000..581384dc13 --- /dev/null +++ b/compute/package.json @@ -0,0 +1,7 @@ +{ + "name": "neon-compute", + "private": true, + "dependencies": { + "@sourcemeta/jsonschema": "9.3.4" + } +} \ No newline at end of file diff --git a/compute/patches/anon_v2.patch b/compute/patches/anon_v2.patch index e833a6dfd3..4faf927e39 100644 --- a/compute/patches/anon_v2.patch +++ b/compute/patches/anon_v2.patch @@ -1,8 +1,8 @@ diff --git a/sql/anon.sql b/sql/anon.sql -index 0cdc769..f6cc950 100644 +index 0cdc769..b450327 100644 --- a/sql/anon.sql +++ b/sql/anon.sql -@@ -1141,3 +1141,8 @@ $$ +@@ -1141,3 +1141,15 @@ $$ -- TODO : https://en.wikipedia.org/wiki/L-diversity -- TODO : https://en.wikipedia.org/wiki/T-closeness @@ -11,6 +11,13 @@ index 0cdc769..f6cc950 100644 + +GRANT ALL ON SCHEMA anon to neon_superuser; +GRANT ALL ON ALL TABLES IN SCHEMA anon TO neon_superuser; ++ ++DO $$ ++BEGIN ++ IF current_setting('server_version_num')::int >= 150000 THEN ++ GRANT SET ON PARAMETER anon.transparent_dynamic_masking TO neon_superuser; ++ END IF; ++END $$; diff --git a/sql/init.sql b/sql/init.sql index 7da6553..9b6164b 100644 --- a/sql/init.sql diff --git a/compute/patches/onnxruntime.patch b/compute/patches/onnxruntime.patch new file mode 100644 index 0000000000..2347547e73 --- /dev/null +++ b/compute/patches/onnxruntime.patch @@ -0,0 +1,15 @@ +diff --git a/cmake/deps.txt b/cmake/deps.txt +index d213b09034..229de2ebf0 100644 +--- a/cmake/deps.txt ++++ b/cmake/deps.txt +@@ -22,7 +22,9 @@ dlpack;https://github.com/dmlc/dlpack/archive/refs/tags/v0.6.zip;4d565dd2e5b3132 + # it contains changes on top of 3.4.0 which are required to fix build issues. + # Until the 3.4.1 release this is the best option we have. + # Issue link: https://gitlab.com/libeigen/eigen/-/issues/2744 +-eigen;https://gitlab.com/libeigen/eigen/-/archive/e7248b26a1ed53fa030c5c459f7ea095dfd276ac/eigen-e7248b26a1ed53fa030c5c459f7ea095dfd276ac.zip;be8be39fdbc6e60e94fa7870b280707069b5b81a ++# Moved to github mirror to avoid gitlab issues.Add commentMore actions ++# Issue link: https://github.com/bazelbuild/bazel-central-registry/issues/4355 ++eigen;https://github.com/eigen-mirror/eigen/archive/e7248b26a1ed53fa030c5c459f7ea095dfd276ac/eigen-e7248b26a1ed53fa030c5c459f7ea095dfd276ac.zip;61418a349000ba7744a3ad03cf5071f22ebf860a + flatbuffers;https://github.com/google/flatbuffers/archive/refs/tags/v23.5.26.zip;59422c3b5e573dd192fead2834d25951f1c1670c + fp16;https://github.com/Maratyszcza/FP16/archive/0a92994d729ff76a58f692d3028ca1b64b145d91.zip;b985f6985a05a1c03ff1bb71190f66d8f98a1494 + fxdiv;https://github.com/Maratyszcza/FXdiv/archive/63058eff77e11aa15bf531df5dd34395ec3017c8.zip;a5658f4036402dbca7cebee32be57fb8149811e1 diff --git a/compute/patches/pgaudit-parallel_workers-v14.patch b/compute/patches/pgaudit-parallel_workers-v14.patch new file mode 100644 index 0000000000..5517d3105b --- /dev/null +++ b/compute/patches/pgaudit-parallel_workers-v14.patch @@ -0,0 +1,143 @@ +commit 7220bb3a3f23fa27207d77562dcc286f9a123313 +Author: Tristan Partin +Date: 2025-06-23 02:09:31 +0000 + + Disable logging in parallel workers + + When a query uses parallel workers, pgaudit will log the same query for + every parallel worker. This is undesireable since it can result in log + amplification for queries that use parallel workers. + + Signed-off-by: Tristan Partin + +diff --git a/expected/pgaudit.out b/expected/pgaudit.out +index baa8011..a601375 100644 +--- a/expected/pgaudit.out ++++ b/expected/pgaudit.out +@@ -2563,6 +2563,37 @@ COMMIT; + NOTICE: AUDIT: SESSION,12,4,MISC,COMMIT,,,COMMIT;, + DROP TABLE part_test; + NOTICE: AUDIT: SESSION,13,1,DDL,DROP TABLE,,,DROP TABLE part_test;, ++-- ++-- Test logging in parallel workers ++SET pgaudit.log = 'read'; ++SET pgaudit.log_client = on; ++SET pgaudit.log_level = 'notice'; ++-- Force parallel execution for testing ++SET max_parallel_workers_per_gather = 2; ++SET parallel_tuple_cost = 0; ++SET parallel_setup_cost = 0; ++SET min_parallel_table_scan_size = 0; ++SET min_parallel_index_scan_size = 0; ++-- Create table with enough data to trigger parallel execution ++CREATE TABLE parallel_test (id int, data text); ++INSERT INTO parallel_test SELECT generate_series(1, 1000), 'test data'; ++SELECT count(*) FROM parallel_test; ++NOTICE: AUDIT: SESSION,14,1,READ,SELECT,,,SELECT count(*) FROM parallel_test;, ++ count ++------- ++ 1000 ++(1 row) ++ ++-- Cleanup parallel test ++DROP TABLE parallel_test; ++RESET max_parallel_workers_per_gather; ++RESET parallel_tuple_cost; ++RESET parallel_setup_cost; ++RESET min_parallel_table_scan_size; ++RESET min_parallel_index_scan_size; ++RESET pgaudit.log; ++RESET pgaudit.log_client; ++RESET pgaudit.log_level; + -- Cleanup + -- Set client_min_messages up to warning to avoid noise + SET client_min_messages = 'warning'; +diff --git a/pgaudit.c b/pgaudit.c +index 5e6fd38..ac9ded2 100644 +--- a/pgaudit.c ++++ b/pgaudit.c +@@ -11,6 +11,7 @@ + #include "postgres.h" + + #include "access/htup_details.h" ++#include "access/parallel.h" + #include "access/sysattr.h" + #include "access/xact.h" + #include "access/relation.h" +@@ -1303,7 +1304,7 @@ pgaudit_ExecutorStart_hook(QueryDesc *queryDesc, int eflags) + { + AuditEventStackItem *stackItem = NULL; + +- if (!internalStatement) ++ if (!internalStatement && !IsParallelWorker()) + { + /* Push the audit even onto the stack */ + stackItem = stack_push(); +@@ -1384,7 +1385,7 @@ pgaudit_ExecutorCheckPerms_hook(List *rangeTabls, bool abort) + + /* Log DML if the audit role is valid or session logging is enabled */ + if ((auditOid != InvalidOid || auditLogBitmap != 0) && +- !IsAbortedTransactionBlockState()) ++ !IsAbortedTransactionBlockState() && !IsParallelWorker()) + { + /* If auditLogRows is on, wait for rows processed to be set */ + if (auditLogRows && auditEventStack != NULL) +@@ -1438,7 +1439,7 @@ pgaudit_ExecutorRun_hook(QueryDesc *queryDesc, ScanDirection direction, uint64 c + else + standard_ExecutorRun(queryDesc, direction, count, execute_once); + +- if (auditLogRows && !internalStatement) ++ if (auditLogRows && !internalStatement && !IsParallelWorker()) + { + /* Find an item from the stack by the query memory context */ + stackItem = stack_find_context(queryDesc->estate->es_query_cxt); +@@ -1458,7 +1459,7 @@ pgaudit_ExecutorEnd_hook(QueryDesc *queryDesc) + AuditEventStackItem *stackItem = NULL; + AuditEventStackItem *auditEventStackFull = NULL; + +- if (auditLogRows && !internalStatement) ++ if (auditLogRows && !internalStatement && !IsParallelWorker()) + { + /* Find an item from the stack by the query memory context */ + stackItem = stack_find_context(queryDesc->estate->es_query_cxt); +diff --git a/sql/pgaudit.sql b/sql/pgaudit.sql +index cc1374a..1870a60 100644 +--- a/sql/pgaudit.sql ++++ b/sql/pgaudit.sql +@@ -1612,6 +1612,36 @@ COMMIT; + + DROP TABLE part_test; + ++-- ++-- Test logging in parallel workers ++SET pgaudit.log = 'read'; ++SET pgaudit.log_client = on; ++SET pgaudit.log_level = 'notice'; ++ ++-- Force parallel execution for testing ++SET max_parallel_workers_per_gather = 2; ++SET parallel_tuple_cost = 0; ++SET parallel_setup_cost = 0; ++SET min_parallel_table_scan_size = 0; ++SET min_parallel_index_scan_size = 0; ++ ++-- Create table with enough data to trigger parallel execution ++CREATE TABLE parallel_test (id int, data text); ++INSERT INTO parallel_test SELECT generate_series(1, 1000), 'test data'; ++ ++SELECT count(*) FROM parallel_test; ++ ++-- Cleanup parallel test ++DROP TABLE parallel_test; ++RESET max_parallel_workers_per_gather; ++RESET parallel_tuple_cost; ++RESET parallel_setup_cost; ++RESET min_parallel_table_scan_size; ++RESET min_parallel_index_scan_size; ++RESET pgaudit.log; ++RESET pgaudit.log_client; ++RESET pgaudit.log_level; ++ + -- Cleanup + -- Set client_min_messages up to warning to avoid noise + SET client_min_messages = 'warning'; diff --git a/compute/patches/pgaudit-parallel_workers-v15.patch b/compute/patches/pgaudit-parallel_workers-v15.patch new file mode 100644 index 0000000000..6dfffbd0dd --- /dev/null +++ b/compute/patches/pgaudit-parallel_workers-v15.patch @@ -0,0 +1,143 @@ +commit 29dc2847f6255541992f18faf8a815dfab79631a +Author: Tristan Partin +Date: 2025-06-23 02:09:31 +0000 + + Disable logging in parallel workers + + When a query uses parallel workers, pgaudit will log the same query for + every parallel worker. This is undesireable since it can result in log + amplification for queries that use parallel workers. + + Signed-off-by: Tristan Partin + +diff --git a/expected/pgaudit.out b/expected/pgaudit.out +index b22560b..73f0327 100644 +--- a/expected/pgaudit.out ++++ b/expected/pgaudit.out +@@ -2563,6 +2563,37 @@ COMMIT; + NOTICE: AUDIT: SESSION,12,4,MISC,COMMIT,,,COMMIT;, + DROP TABLE part_test; + NOTICE: AUDIT: SESSION,13,1,DDL,DROP TABLE,,,DROP TABLE part_test;, ++-- ++-- Test logging in parallel workers ++SET pgaudit.log = 'read'; ++SET pgaudit.log_client = on; ++SET pgaudit.log_level = 'notice'; ++-- Force parallel execution for testing ++SET max_parallel_workers_per_gather = 2; ++SET parallel_tuple_cost = 0; ++SET parallel_setup_cost = 0; ++SET min_parallel_table_scan_size = 0; ++SET min_parallel_index_scan_size = 0; ++-- Create table with enough data to trigger parallel execution ++CREATE TABLE parallel_test (id int, data text); ++INSERT INTO parallel_test SELECT generate_series(1, 1000), 'test data'; ++SELECT count(*) FROM parallel_test; ++NOTICE: AUDIT: SESSION,14,1,READ,SELECT,,,SELECT count(*) FROM parallel_test;, ++ count ++------- ++ 1000 ++(1 row) ++ ++-- Cleanup parallel test ++DROP TABLE parallel_test; ++RESET max_parallel_workers_per_gather; ++RESET parallel_tuple_cost; ++RESET parallel_setup_cost; ++RESET min_parallel_table_scan_size; ++RESET min_parallel_index_scan_size; ++RESET pgaudit.log; ++RESET pgaudit.log_client; ++RESET pgaudit.log_level; + -- Cleanup + -- Set client_min_messages up to warning to avoid noise + SET client_min_messages = 'warning'; +diff --git a/pgaudit.c b/pgaudit.c +index 5e6fd38..ac9ded2 100644 +--- a/pgaudit.c ++++ b/pgaudit.c +@@ -11,6 +11,7 @@ + #include "postgres.h" + + #include "access/htup_details.h" ++#include "access/parallel.h" + #include "access/sysattr.h" + #include "access/xact.h" + #include "access/relation.h" +@@ -1303,7 +1304,7 @@ pgaudit_ExecutorStart_hook(QueryDesc *queryDesc, int eflags) + { + AuditEventStackItem *stackItem = NULL; + +- if (!internalStatement) ++ if (!internalStatement && !IsParallelWorker()) + { + /* Push the audit even onto the stack */ + stackItem = stack_push(); +@@ -1384,7 +1385,7 @@ pgaudit_ExecutorCheckPerms_hook(List *rangeTabls, bool abort) + + /* Log DML if the audit role is valid or session logging is enabled */ + if ((auditOid != InvalidOid || auditLogBitmap != 0) && +- !IsAbortedTransactionBlockState()) ++ !IsAbortedTransactionBlockState() && !IsParallelWorker()) + { + /* If auditLogRows is on, wait for rows processed to be set */ + if (auditLogRows && auditEventStack != NULL) +@@ -1438,7 +1439,7 @@ pgaudit_ExecutorRun_hook(QueryDesc *queryDesc, ScanDirection direction, uint64 c + else + standard_ExecutorRun(queryDesc, direction, count, execute_once); + +- if (auditLogRows && !internalStatement) ++ if (auditLogRows && !internalStatement && !IsParallelWorker()) + { + /* Find an item from the stack by the query memory context */ + stackItem = stack_find_context(queryDesc->estate->es_query_cxt); +@@ -1458,7 +1459,7 @@ pgaudit_ExecutorEnd_hook(QueryDesc *queryDesc) + AuditEventStackItem *stackItem = NULL; + AuditEventStackItem *auditEventStackFull = NULL; + +- if (auditLogRows && !internalStatement) ++ if (auditLogRows && !internalStatement && !IsParallelWorker()) + { + /* Find an item from the stack by the query memory context */ + stackItem = stack_find_context(queryDesc->estate->es_query_cxt); +diff --git a/sql/pgaudit.sql b/sql/pgaudit.sql +index 8052426..7f0667b 100644 +--- a/sql/pgaudit.sql ++++ b/sql/pgaudit.sql +@@ -1612,6 +1612,36 @@ COMMIT; + + DROP TABLE part_test; + ++-- ++-- Test logging in parallel workers ++SET pgaudit.log = 'read'; ++SET pgaudit.log_client = on; ++SET pgaudit.log_level = 'notice'; ++ ++-- Force parallel execution for testing ++SET max_parallel_workers_per_gather = 2; ++SET parallel_tuple_cost = 0; ++SET parallel_setup_cost = 0; ++SET min_parallel_table_scan_size = 0; ++SET min_parallel_index_scan_size = 0; ++ ++-- Create table with enough data to trigger parallel execution ++CREATE TABLE parallel_test (id int, data text); ++INSERT INTO parallel_test SELECT generate_series(1, 1000), 'test data'; ++ ++SELECT count(*) FROM parallel_test; ++ ++-- Cleanup parallel test ++DROP TABLE parallel_test; ++RESET max_parallel_workers_per_gather; ++RESET parallel_tuple_cost; ++RESET parallel_setup_cost; ++RESET min_parallel_table_scan_size; ++RESET min_parallel_index_scan_size; ++RESET pgaudit.log; ++RESET pgaudit.log_client; ++RESET pgaudit.log_level; ++ + -- Cleanup + -- Set client_min_messages up to warning to avoid noise + SET client_min_messages = 'warning'; diff --git a/compute/patches/pgaudit-parallel_workers-v16.patch b/compute/patches/pgaudit-parallel_workers-v16.patch new file mode 100644 index 0000000000..6b8b276b7b --- /dev/null +++ b/compute/patches/pgaudit-parallel_workers-v16.patch @@ -0,0 +1,143 @@ +commit cc708dde7ef2af2a8120d757102d2e34c0463a0f +Author: Tristan Partin +Date: 2025-06-23 02:09:31 +0000 + + Disable logging in parallel workers + + When a query uses parallel workers, pgaudit will log the same query for + every parallel worker. This is undesireable since it can result in log + amplification for queries that use parallel workers. + + Signed-off-by: Tristan Partin + +diff --git a/expected/pgaudit.out b/expected/pgaudit.out +index 8772054..9b66ac6 100644 +--- a/expected/pgaudit.out ++++ b/expected/pgaudit.out +@@ -2556,6 +2556,37 @@ DROP SERVER fdw_server; + NOTICE: AUDIT: SESSION,11,1,DDL,DROP SERVER,,,DROP SERVER fdw_server;, + DROP EXTENSION postgres_fdw; + NOTICE: AUDIT: SESSION,12,1,DDL,DROP EXTENSION,,,DROP EXTENSION postgres_fdw;, ++-- ++-- Test logging in parallel workers ++SET pgaudit.log = 'read'; ++SET pgaudit.log_client = on; ++SET pgaudit.log_level = 'notice'; ++-- Force parallel execution for testing ++SET max_parallel_workers_per_gather = 2; ++SET parallel_tuple_cost = 0; ++SET parallel_setup_cost = 0; ++SET min_parallel_table_scan_size = 0; ++SET min_parallel_index_scan_size = 0; ++-- Create table with enough data to trigger parallel execution ++CREATE TABLE parallel_test (id int, data text); ++INSERT INTO parallel_test SELECT generate_series(1, 1000), 'test data'; ++SELECT count(*) FROM parallel_test; ++NOTICE: AUDIT: SESSION,13,1,READ,SELECT,,,SELECT count(*) FROM parallel_test;, ++ count ++------- ++ 1000 ++(1 row) ++ ++-- Cleanup parallel test ++DROP TABLE parallel_test; ++RESET max_parallel_workers_per_gather; ++RESET parallel_tuple_cost; ++RESET parallel_setup_cost; ++RESET min_parallel_table_scan_size; ++RESET min_parallel_index_scan_size; ++RESET pgaudit.log; ++RESET pgaudit.log_client; ++RESET pgaudit.log_level; + -- Cleanup + -- Set client_min_messages up to warning to avoid noise + SET client_min_messages = 'warning'; +diff --git a/pgaudit.c b/pgaudit.c +index 004d1f9..f061164 100644 +--- a/pgaudit.c ++++ b/pgaudit.c +@@ -11,6 +11,7 @@ + #include "postgres.h" + + #include "access/htup_details.h" ++#include "access/parallel.h" + #include "access/sysattr.h" + #include "access/xact.h" + #include "access/relation.h" +@@ -1339,7 +1340,7 @@ pgaudit_ExecutorStart_hook(QueryDesc *queryDesc, int eflags) + { + AuditEventStackItem *stackItem = NULL; + +- if (!internalStatement) ++ if (!internalStatement && !IsParallelWorker()) + { + /* Push the audit even onto the stack */ + stackItem = stack_push(); +@@ -1420,7 +1421,7 @@ pgaudit_ExecutorCheckPerms_hook(List *rangeTabls, List *permInfos, bool abort) + + /* Log DML if the audit role is valid or session logging is enabled */ + if ((auditOid != InvalidOid || auditLogBitmap != 0) && +- !IsAbortedTransactionBlockState()) ++ !IsAbortedTransactionBlockState() && !IsParallelWorker()) + { + /* If auditLogRows is on, wait for rows processed to be set */ + if (auditLogRows && auditEventStack != NULL) +@@ -1475,7 +1476,7 @@ pgaudit_ExecutorRun_hook(QueryDesc *queryDesc, ScanDirection direction, uint64 c + else + standard_ExecutorRun(queryDesc, direction, count, execute_once); + +- if (auditLogRows && !internalStatement) ++ if (auditLogRows && !internalStatement && !IsParallelWorker()) + { + /* Find an item from the stack by the query memory context */ + stackItem = stack_find_context(queryDesc->estate->es_query_cxt); +@@ -1495,7 +1496,7 @@ pgaudit_ExecutorEnd_hook(QueryDesc *queryDesc) + AuditEventStackItem *stackItem = NULL; + AuditEventStackItem *auditEventStackFull = NULL; + +- if (auditLogRows && !internalStatement) ++ if (auditLogRows && !internalStatement && !IsParallelWorker()) + { + /* Find an item from the stack by the query memory context */ + stackItem = stack_find_context(queryDesc->estate->es_query_cxt); +diff --git a/sql/pgaudit.sql b/sql/pgaudit.sql +index 6aae88b..de6d7fd 100644 +--- a/sql/pgaudit.sql ++++ b/sql/pgaudit.sql +@@ -1631,6 +1631,36 @@ DROP USER MAPPING FOR regress_user1 SERVER fdw_server; + DROP SERVER fdw_server; + DROP EXTENSION postgres_fdw; + ++-- ++-- Test logging in parallel workers ++SET pgaudit.log = 'read'; ++SET pgaudit.log_client = on; ++SET pgaudit.log_level = 'notice'; ++ ++-- Force parallel execution for testing ++SET max_parallel_workers_per_gather = 2; ++SET parallel_tuple_cost = 0; ++SET parallel_setup_cost = 0; ++SET min_parallel_table_scan_size = 0; ++SET min_parallel_index_scan_size = 0; ++ ++-- Create table with enough data to trigger parallel execution ++CREATE TABLE parallel_test (id int, data text); ++INSERT INTO parallel_test SELECT generate_series(1, 1000), 'test data'; ++ ++SELECT count(*) FROM parallel_test; ++ ++-- Cleanup parallel test ++DROP TABLE parallel_test; ++RESET max_parallel_workers_per_gather; ++RESET parallel_tuple_cost; ++RESET parallel_setup_cost; ++RESET min_parallel_table_scan_size; ++RESET min_parallel_index_scan_size; ++RESET pgaudit.log; ++RESET pgaudit.log_client; ++RESET pgaudit.log_level; ++ + -- Cleanup + -- Set client_min_messages up to warning to avoid noise + SET client_min_messages = 'warning'; diff --git a/compute/patches/pgaudit-parallel_workers-v17.patch b/compute/patches/pgaudit-parallel_workers-v17.patch new file mode 100644 index 0000000000..f99be10c60 --- /dev/null +++ b/compute/patches/pgaudit-parallel_workers-v17.patch @@ -0,0 +1,143 @@ +commit 8d02e4c6c5e1e8676251b0717a46054267091cb4 +Author: Tristan Partin +Date: 2025-06-23 02:09:31 +0000 + + Disable logging in parallel workers + + When a query uses parallel workers, pgaudit will log the same query for + every parallel worker. This is undesireable since it can result in log + amplification for queries that use parallel workers. + + Signed-off-by: Tristan Partin + +diff --git a/expected/pgaudit.out b/expected/pgaudit.out +index d696287..4b1059a 100644 +--- a/expected/pgaudit.out ++++ b/expected/pgaudit.out +@@ -2568,6 +2568,37 @@ DROP SERVER fdw_server; + NOTICE: AUDIT: SESSION,11,1,DDL,DROP SERVER,,,DROP SERVER fdw_server, + DROP EXTENSION postgres_fdw; + NOTICE: AUDIT: SESSION,12,1,DDL,DROP EXTENSION,,,DROP EXTENSION postgres_fdw, ++-- ++-- Test logging in parallel workers ++SET pgaudit.log = 'read'; ++SET pgaudit.log_client = on; ++SET pgaudit.log_level = 'notice'; ++-- Force parallel execution for testing ++SET max_parallel_workers_per_gather = 2; ++SET parallel_tuple_cost = 0; ++SET parallel_setup_cost = 0; ++SET min_parallel_table_scan_size = 0; ++SET min_parallel_index_scan_size = 0; ++-- Create table with enough data to trigger parallel execution ++CREATE TABLE parallel_test (id int, data text); ++INSERT INTO parallel_test SELECT generate_series(1, 1000), 'test data'; ++SELECT count(*) FROM parallel_test; ++NOTICE: AUDIT: SESSION,13,1,READ,SELECT,,,SELECT count(*) FROM parallel_test, ++ count ++------- ++ 1000 ++(1 row) ++ ++-- Cleanup parallel test ++DROP TABLE parallel_test; ++RESET max_parallel_workers_per_gather; ++RESET parallel_tuple_cost; ++RESET parallel_setup_cost; ++RESET min_parallel_table_scan_size; ++RESET min_parallel_index_scan_size; ++RESET pgaudit.log; ++RESET pgaudit.log_client; ++RESET pgaudit.log_level; + -- Cleanup + -- Set client_min_messages up to warning to avoid noise + SET client_min_messages = 'warning'; +diff --git a/pgaudit.c b/pgaudit.c +index 1764af1..0e48875 100644 +--- a/pgaudit.c ++++ b/pgaudit.c +@@ -11,6 +11,7 @@ + #include "postgres.h" + + #include "access/htup_details.h" ++#include "access/parallel.h" + #include "access/sysattr.h" + #include "access/xact.h" + #include "access/relation.h" +@@ -1406,7 +1407,7 @@ pgaudit_ExecutorStart_hook(QueryDesc *queryDesc, int eflags) + { + AuditEventStackItem *stackItem = NULL; + +- if (!internalStatement) ++ if (!internalStatement && !IsParallelWorker()) + { + /* Push the audit event onto the stack */ + stackItem = stack_push(); +@@ -1489,7 +1490,7 @@ pgaudit_ExecutorCheckPerms_hook(List *rangeTabls, List *permInfos, bool abort) + + /* Log DML if the audit role is valid or session logging is enabled */ + if ((auditOid != InvalidOid || auditLogBitmap != 0) && +- !IsAbortedTransactionBlockState()) ++ !IsAbortedTransactionBlockState() && !IsParallelWorker()) + { + /* If auditLogRows is on, wait for rows processed to be set */ + if (auditLogRows && auditEventStack != NULL) +@@ -1544,7 +1545,7 @@ pgaudit_ExecutorRun_hook(QueryDesc *queryDesc, ScanDirection direction, uint64 c + else + standard_ExecutorRun(queryDesc, direction, count, execute_once); + +- if (auditLogRows && !internalStatement) ++ if (auditLogRows && !internalStatement && !IsParallelWorker()) + { + /* Find an item from the stack by the query memory context */ + stackItem = stack_find_context(queryDesc->estate->es_query_cxt); +@@ -1564,7 +1565,7 @@ pgaudit_ExecutorEnd_hook(QueryDesc *queryDesc) + AuditEventStackItem *stackItem = NULL; + AuditEventStackItem *auditEventStackFull = NULL; + +- if (auditLogRows && !internalStatement) ++ if (auditLogRows && !internalStatement && !IsParallelWorker()) + { + /* Find an item from the stack by the query memory context */ + stackItem = stack_find_context(queryDesc->estate->es_query_cxt); +diff --git a/sql/pgaudit.sql b/sql/pgaudit.sql +index e161f01..c873098 100644 +--- a/sql/pgaudit.sql ++++ b/sql/pgaudit.sql +@@ -1637,6 +1637,36 @@ DROP USER MAPPING FOR regress_user1 SERVER fdw_server; + DROP SERVER fdw_server; + DROP EXTENSION postgres_fdw; + ++-- ++-- Test logging in parallel workers ++SET pgaudit.log = 'read'; ++SET pgaudit.log_client = on; ++SET pgaudit.log_level = 'notice'; ++ ++-- Force parallel execution for testing ++SET max_parallel_workers_per_gather = 2; ++SET parallel_tuple_cost = 0; ++SET parallel_setup_cost = 0; ++SET min_parallel_table_scan_size = 0; ++SET min_parallel_index_scan_size = 0; ++ ++-- Create table with enough data to trigger parallel execution ++CREATE TABLE parallel_test (id int, data text); ++INSERT INTO parallel_test SELECT generate_series(1, 1000), 'test data'; ++ ++SELECT count(*) FROM parallel_test; ++ ++-- Cleanup parallel test ++DROP TABLE parallel_test; ++RESET max_parallel_workers_per_gather; ++RESET parallel_tuple_cost; ++RESET parallel_setup_cost; ++RESET min_parallel_table_scan_size; ++RESET min_parallel_index_scan_size; ++RESET pgaudit.log; ++RESET pgaudit.log_client; ++RESET pgaudit.log_level; ++ + -- Cleanup + -- Set client_min_messages up to warning to avoid noise + SET client_min_messages = 'warning'; diff --git a/compute/vm-image-spec-bookworm.yaml b/compute/vm-image-spec-bookworm.yaml index 057099994a..267e4c83b5 100644 --- a/compute/vm-image-spec-bookworm.yaml +++ b/compute/vm-image-spec-bookworm.yaml @@ -26,7 +26,7 @@ commands: - name: postgres-exporter user: nobody sysvInitAction: respawn - shell: 'DATA_SOURCE_NAME="user=cloud_admin sslmode=disable dbname=postgres application_name=postgres-exporter" /bin/postgres_exporter --config.file=/etc/postgres_exporter.yml' + shell: 'DATA_SOURCE_NAME="user=cloud_admin sslmode=disable dbname=postgres application_name=postgres-exporter pgaudit.log=none" /bin/postgres_exporter --config.file=/etc/postgres_exporter.yml' - name: pgbouncer-exporter user: postgres sysvInitAction: respawn @@ -59,7 +59,7 @@ files: # the rules use ALL as the hostname. Avoid the pointless lookups and the "unable to # resolve host" log messages that they generate. Defaults !fqdn - + # Allow postgres user (which is what compute_ctl runs as) to run /neonvm/bin/resize-swap # and /neonvm/bin/set-disk-quota as root without requiring entering a password (NOPASSWD), # regardless of hostname (ALL) diff --git a/compute/vm-image-spec-bullseye.yaml b/compute/vm-image-spec-bullseye.yaml index d048e20b2e..2b6e77b656 100644 --- a/compute/vm-image-spec-bullseye.yaml +++ b/compute/vm-image-spec-bullseye.yaml @@ -26,7 +26,7 @@ commands: - name: postgres-exporter user: nobody sysvInitAction: respawn - shell: 'DATA_SOURCE_NAME="user=cloud_admin sslmode=disable dbname=postgres application_name=postgres-exporter" /bin/postgres_exporter --config.file=/etc/postgres_exporter.yml' + shell: 'DATA_SOURCE_NAME="user=cloud_admin sslmode=disable dbname=postgres application_name=postgres-exporter pgaudit.log=none" /bin/postgres_exporter --config.file=/etc/postgres_exporter.yml' - name: pgbouncer-exporter user: postgres sysvInitAction: respawn @@ -59,7 +59,7 @@ files: # the rules use ALL as the hostname. Avoid the pointless lookups and the "unable to # resolve host" log messages that they generate. Defaults !fqdn - + # Allow postgres user (which is what compute_ctl runs as) to run /neonvm/bin/resize-swap # and /neonvm/bin/set-disk-quota as root without requiring entering a password (NOPASSWD), # regardless of hostname (ALL) diff --git a/compute_tools/Cargo.toml b/compute_tools/Cargo.toml index f9da3ba700..1a03022d89 100644 --- a/compute_tools/Cargo.toml +++ b/compute_tools/Cargo.toml @@ -27,6 +27,7 @@ fail.workspace = true flate2.workspace = true futures.workspace = true http.workspace = true +hostname-validator = "1.1" indexmap.workspace = true itertools.workspace = true jsonwebtoken.workspace = true @@ -38,6 +39,7 @@ once_cell.workspace = true opentelemetry.workspace = true opentelemetry_sdk.workspace = true p256 = { version = "0.13", features = ["pem"] } +pageserver_page_api.workspace = true postgres.workspace = true regex.workspace = true reqwest = { workspace = true, features = ["json"] } @@ -53,6 +55,7 @@ tokio = { workspace = true, features = ["rt", "rt-multi-thread"] } tokio-postgres.workspace = true tokio-util.workspace = true tokio-stream.workspace = true +tonic.workspace = true tower-otel.workspace = true tracing.workspace = true tracing-opentelemetry.workspace = true @@ -64,6 +67,7 @@ uuid.workspace = true walkdir.workspace = true x509-cert.workspace = true +postgres_versioninfo.workspace = true postgres_initdb.workspace = true compute_api.workspace = true utils.workspace = true diff --git a/compute_tools/src/bin/compute_ctl.rs b/compute_tools/src/bin/compute_ctl.rs index 8b502a058e..db7746b8eb 100644 --- a/compute_tools/src/bin/compute_ctl.rs +++ b/compute_tools/src/bin/compute_ctl.rs @@ -36,6 +36,8 @@ use std::ffi::OsString; use std::fs::File; use std::process::exit; +use std::sync::Arc; +use std::sync::atomic::AtomicU64; use std::sync::mpsc; use std::thread; use std::time::Duration; @@ -124,6 +126,10 @@ struct Cli { /// Interval in seconds for collecting installed extensions statistics #[arg(long, default_value = "3600")] pub installed_extensions_collection_interval: u64, + + /// Run in development mode, skipping VM-specific operations like process termination + #[arg(long, action = clap::ArgAction::SetTrue)] + pub dev: bool, } impl Cli { @@ -159,7 +165,7 @@ fn main() -> Result<()> { .build()?; let _rt_guard = runtime.enter(); - runtime.block_on(init())?; + runtime.block_on(init(cli.dev))?; // enable core dumping for all child processes setrlimit(Resource::CORE, rlimit::INFINITY, rlimit::INFINITY)?; @@ -186,7 +192,9 @@ fn main() -> Result<()> { cgroup: cli.cgroup, #[cfg(target_os = "linux")] vm_monitor_addr: cli.vm_monitor_addr, - installed_extensions_collection_interval: cli.installed_extensions_collection_interval, + installed_extensions_collection_interval: Arc::new(AtomicU64::new( + cli.installed_extensions_collection_interval, + )), }, config, )?; @@ -198,13 +206,13 @@ fn main() -> Result<()> { deinit_and_exit(exit_code); } -async fn init() -> Result<()> { +async fn init(dev_mode: bool) -> Result<()> { init_tracing_and_logging(DEFAULT_LOG_LEVEL).await?; let mut signals = Signals::new([SIGINT, SIGTERM, SIGQUIT])?; thread::spawn(move || { for sig in signals.forever() { - handle_exit_signal(sig); + handle_exit_signal(sig, dev_mode); } }); @@ -263,9 +271,9 @@ fn deinit_and_exit(exit_code: Option) -> ! { /// When compute_ctl is killed, send also termination signal to sync-safekeepers /// to prevent leakage. TODO: it is better to convert compute_ctl to async and /// wait for termination which would be easy then. -fn handle_exit_signal(sig: i32) { +fn handle_exit_signal(sig: i32, dev_mode: bool) { info!("received {sig} termination signal"); - forward_termination_signal(); + forward_termination_signal(dev_mode); exit(1); } diff --git a/compute_tools/src/bin/fast_import.rs b/compute_tools/src/bin/fast_import.rs index e65c210b23..0eca9aba53 100644 --- a/compute_tools/src/bin/fast_import.rs +++ b/compute_tools/src/bin/fast_import.rs @@ -29,7 +29,7 @@ use anyhow::{Context, bail}; use aws_config::BehaviorVersion; use camino::{Utf8Path, Utf8PathBuf}; use clap::{Parser, Subcommand}; -use compute_tools::extension_server::{PostgresMajorVersion, get_pg_version}; +use compute_tools::extension_server::get_pg_version; use nix::unistd::Pid; use std::ops::Not; use tracing::{Instrument, error, info, info_span, warn}; @@ -179,12 +179,8 @@ impl PostgresProcess { .await .context("create pgdata directory")?; - let pg_version = match get_pg_version(self.pgbin.as_ref()) { - PostgresMajorVersion::V14 => 14, - PostgresMajorVersion::V15 => 15, - PostgresMajorVersion::V16 => 16, - PostgresMajorVersion::V17 => 17, - }; + let pg_version = get_pg_version(self.pgbin.as_ref()); + postgres_initdb::do_run_initdb(postgres_initdb::RunInitdbArgs { superuser: initdb_user, locale: DEFAULT_LOCALE, // XXX: this shouldn't be hard-coded, @@ -486,10 +482,8 @@ async fn cmd_pgdata( }; let superuser = "cloud_admin"; - let destination_connstring = format!( - "host=localhost port={} user={} dbname=neondb", - pg_port, superuser - ); + let destination_connstring = + format!("host=localhost port={pg_port} user={superuser} dbname=neondb"); let pgdata_dir = workdir.join("pgdata"); let mut proc = PostgresProcess::new(pgdata_dir.clone(), pg_bin_dir.clone(), pg_lib_dir.clone()); diff --git a/compute_tools/src/bin/fast_import/s3_uri.rs b/compute_tools/src/bin/fast_import/s3_uri.rs index cf4dab7c02..e1a85c73e7 100644 --- a/compute_tools/src/bin/fast_import/s3_uri.rs +++ b/compute_tools/src/bin/fast_import/s3_uri.rs @@ -69,7 +69,7 @@ impl clap::builder::TypedValueParser for S3Uri { S3Uri::from_str(value_str).map_err(|e| { clap::Error::raw( clap::error::ErrorKind::InvalidValue, - format!("Failed to parse S3 URI: {}", e), + format!("Failed to parse S3 URI: {e}"), ) }) } diff --git a/compute_tools/src/catalog.rs b/compute_tools/src/catalog.rs index 082ba62b8e..bc9f64075a 100644 --- a/compute_tools/src/catalog.rs +++ b/compute_tools/src/catalog.rs @@ -22,7 +22,7 @@ pub async fn get_dbs_and_roles(compute: &Arc) -> anyhow::Result { let mut lines = stderr_reader.lines(); if let Some(line) = lines.next_line().await? { - if line.contains(&format!("FATAL: database \"{}\" does not exist", dbname)) { + if line.contains(&format!("FATAL: database \"{dbname}\" does not exist")) { return Err(SchemaDumpError::DatabaseDoesNotExist); } warn!("pg_dump stderr: {}", line) diff --git a/compute_tools/src/compute.rs b/compute_tools/src/compute.rs index bd6ed910be..f69a0e7c73 100644 --- a/compute_tools/src/compute.rs +++ b/compute_tools/src/compute.rs @@ -1,4 +1,4 @@ -use anyhow::{Context, Result}; +use anyhow::{Context, Result, anyhow}; use chrono::{DateTime, Utc}; use compute_api::privilege::Privilege; use compute_api::responses::{ @@ -6,7 +6,8 @@ use compute_api::responses::{ LfcPrewarmState, TlsConfig, }; use compute_api::spec::{ - ComputeAudit, ComputeFeature, ComputeMode, ComputeSpec, ExtVersion, PgIdent, + ComputeAudit, ComputeFeature, ComputeMode, ComputeSpec, ExtVersion, PageserverConnectionInfo, + PageserverShardConnectionInfo, PgIdent, }; use futures::StreamExt; use futures::future::join_all; @@ -15,26 +16,29 @@ use itertools::Itertools; use nix::sys::signal::{Signal, kill}; use nix::unistd::Pid; use once_cell::sync::Lazy; +use pageserver_page_api::{self as page_api, BaseBackupCompression}; use postgres; use postgres::NoTls; use postgres::error::SqlState; use remote_storage::{DownloadError, RemotePath}; use std::collections::{HashMap, HashSet}; -use std::net::SocketAddr; use std::os::unix::fs::{PermissionsExt, symlink}; use std::path::Path; use std::process::{Command, Stdio}; use std::str::FromStr; -use std::sync::atomic::{AtomicU32, Ordering}; +use std::sync::atomic::{AtomicU32, AtomicU64, Ordering}; use std::sync::{Arc, Condvar, Mutex, RwLock}; use std::time::{Duration, Instant}; use std::{env, fs}; -use tokio::spawn; +use tokio::task::JoinHandle; +use tokio::{spawn, time}; use tracing::{Instrument, debug, error, info, instrument, warn}; use url::Url; use utils::id::{TenantId, TimelineId}; use utils::lsn::Lsn; use utils::measured_stream::MeasuredReader; +use utils::pid_file; +use utils::shard::{ShardCount, ShardIndex, ShardNumber}; use crate::configurator::launch_configurator; use crate::disk_quota::set_disk_quota; @@ -44,6 +48,7 @@ use crate::lsn_lease::launch_lsn_lease_bg_task_for_static; use crate::metrics::COMPUTE_CTL_UP; use crate::monitor::launch_monitor; use crate::pg_helpers::*; +use crate::pgbouncer::*; use crate::rsyslog::{ PostgresLogsRsyslogConfig, configure_audit_rsyslog, configure_postgres_logs_export, launch_pgaudit_gc, @@ -67,6 +72,7 @@ pub static BUILD_TAG: Lazy = Lazy::new(|| { .unwrap_or(BUILD_TAG_DEFAULT) .to_string() }); +const DEFAULT_INSTALLED_EXTENSIONS_COLLECTION_INTERVAL: u64 = 3600; /// Static configuration params that don't change after startup. These mostly /// come from the CLI args, or are derived from them. @@ -100,9 +106,11 @@ pub struct ComputeNodeParams { pub remote_ext_base_url: Option, /// Interval for installed extensions collection - pub installed_extensions_collection_interval: u64, + pub installed_extensions_collection_interval: Arc, } +type TaskHandle = Mutex>>; + /// Compute node info shared across several `compute_ctl` threads. pub struct ComputeNode { pub params: ComputeNodeParams, @@ -123,6 +131,10 @@ pub struct ComputeNode { // key: ext_archive_name, value: started download time, download_completed? pub ext_download_progress: RwLock, bool)>>, pub compute_ctl_config: ComputeCtlConfig, + + /// Handle to the extension stats collection task + extension_stats_task: TaskHandle, + lfc_offload_task: TaskHandle, } // store some metrics about download size that might impact startup time @@ -161,6 +173,10 @@ pub struct ComputeState { pub lfc_prewarm_state: LfcPrewarmState, pub lfc_offload_state: LfcOffloadState, + /// WAL flush LSN that is set after terminating Postgres and syncing safekeepers if + /// mode == ComputeMode::Primary. None otherwise + pub terminate_flush_lsn: Option, + pub metrics: ComputeMetrics, } @@ -176,6 +192,7 @@ impl ComputeState { metrics: ComputeMetrics::default(), lfc_prewarm_state: LfcPrewarmState::default(), lfc_offload_state: LfcOffloadState::default(), + terminate_flush_lsn: None, } } @@ -208,13 +225,74 @@ pub struct ParsedSpec { pub spec: ComputeSpec, pub tenant_id: TenantId, pub timeline_id: TimelineId, - pub pageserver_connstr: String, + pub pageserver_conninfo: PageserverConnectionInfo, pub safekeeper_connstrings: Vec, pub storage_auth_token: Option, - pub endpoint_storage_addr: Option, + /// k8s dns name and port + pub endpoint_storage_addr: Option, pub endpoint_storage_token: Option, } +impl ParsedSpec { + pub fn validate(&self) -> Result<(), String> { + // Only Primary nodes are using safekeeper_connstrings, and at the moment + // this method only validates that part of the specs. + if self.spec.mode != ComputeMode::Primary { + return Ok(()); + } + + // While it seems like a good idea to check for an odd number of entries in + // the safekeepers connection string, changes to the list of safekeepers might + // incur appending a new server to a list of 3, in which case a list of 4 + // entries is okay in production. + // + // Still we want unique entries, and at least one entry in the vector + if self.safekeeper_connstrings.is_empty() { + return Err(String::from("safekeeper_connstrings is empty")); + } + + // check for uniqueness of the connection strings in the set + let mut connstrings = self.safekeeper_connstrings.clone(); + + connstrings.sort(); + let mut previous = &connstrings[0]; + + for current in connstrings.iter().skip(1) { + // duplicate entry? + if current == previous { + return Err(format!( + "duplicate entry in safekeeper_connstrings: {current}!", + )); + } + + previous = current; + } + + Ok(()) + } +} + +fn extract_pageserver_conninfo_from_guc( + pageserver_connstring_guc: &str, +) -> PageserverConnectionInfo { + PageserverConnectionInfo { + shards: pageserver_connstring_guc + .split(',') + .enumerate() + .map(|(i, connstr)| { + ( + i as u32, + PageserverShardConnectionInfo { + libpq_url: Some(connstr.to_string()), + grpc_url: None, + }, + ) + }) + .collect(), + prefer_grpc: false, + } +} + impl TryFrom for ParsedSpec { type Error = String; fn try_from(spec: ComputeSpec) -> Result { @@ -224,11 +302,17 @@ impl TryFrom for ParsedSpec { // For backwards-compatibility, the top-level fields in the spec file // may be empty. In that case, we need to dig them from the GUCs in the // cluster.settings field. - let pageserver_connstr = spec - .pageserver_connstring - .clone() - .or_else(|| spec.cluster.settings.find("neon.pageserver_connstring")) - .ok_or("pageserver connstr should be provided")?; + let pageserver_conninfo = match &spec.pageserver_connection_info { + Some(x) => x.clone(), + None => { + if let Some(guc) = spec.cluster.settings.find("neon.pageserver_connstring") { + extract_pageserver_conninfo_from_guc(&guc) + } else { + return Err("pageserver connstr should be provided".to_string()); + } + } + }; + let safekeeper_connstrings = if spec.safekeeper_connstrings.is_empty() { if matches!(spec.mode, ComputeMode::Primary) { spec.cluster @@ -244,6 +328,7 @@ impl TryFrom for ParsedSpec { } else { spec.safekeeper_connstrings.clone() }; + let storage_auth_token = spec.storage_auth_token.clone(); let tenant_id: TenantId = if let Some(tenant_id) = spec.tenant_id { tenant_id @@ -266,28 +351,29 @@ impl TryFrom for ParsedSpec { .or(Err("invalid timeline id"))? }; - let endpoint_storage_addr: Option = spec + let endpoint_storage_addr: Option = spec .endpoint_storage_addr .clone() - .or_else(|| spec.cluster.settings.find("neon.endpoint_storage_addr")) - .unwrap_or_default() - .parse() - .ok(); + .or_else(|| spec.cluster.settings.find("neon.endpoint_storage_addr")); let endpoint_storage_token = spec .endpoint_storage_token .clone() .or_else(|| spec.cluster.settings.find("neon.endpoint_storage_token")); - Ok(ParsedSpec { + let res = ParsedSpec { spec, - pageserver_connstr, + pageserver_conninfo, safekeeper_connstrings, storage_auth_token, tenant_id, timeline_id, endpoint_storage_addr, endpoint_storage_token, - }) + }; + + // Now check validity of the parsed specification + res.validate()?; + Ok(res) } } @@ -314,7 +400,7 @@ fn maybe_cgexec(cmd: &str) -> Command { struct PostgresHandle { postgres: std::process::Child, - log_collector: tokio::task::JoinHandle>, + log_collector: JoinHandle>, } impl PostgresHandle { @@ -328,7 +414,7 @@ struct StartVmMonitorResult { #[cfg(target_os = "linux")] token: tokio_util::sync::CancellationToken, #[cfg(target_os = "linux")] - vm_monitor: Option>>, + vm_monitor: Option>>, } impl ComputeNode { @@ -354,14 +440,11 @@ impl ComputeNode { // that can affect `compute_ctl` and prevent it from properly configuring the database schema. // Unset them via connection string options before connecting to the database. // N.B. keep it in sync with `ZENITH_OPTIONS` in `get_maintenance_client()`. - // - // TODO(ololobus): we currently pass `-c default_transaction_read_only=off` from control plane - // as well. After rolling out this code, we can remove this parameter from control plane. - // In the meantime, double-passing is fine, the last value is applied. - // See: - const EXTRA_OPTIONS: &str = "-c role=cloud_admin -c default_transaction_read_only=off -c search_path=public -c statement_timeout=0"; + const EXTRA_OPTIONS: &str = "-c role=cloud_admin -c default_transaction_read_only=off -c search_path=public -c statement_timeout=0 -c pgaudit.log=none"; let options = match conn_conf.get_options() { - Some(options) => format!("{} {}", options, EXTRA_OPTIONS), + // Allow the control plane to override any options set by the + // compute + Some(options) => format!("{EXTRA_OPTIONS} {options}"), None => EXTRA_OPTIONS.to_string(), }; conn_conf.options(&options); @@ -369,7 +452,7 @@ impl ComputeNode { let mut new_state = ComputeState::new(); if let Some(spec) = config.spec { - let pspec = ParsedSpec::try_from(spec).map_err(|msg| anyhow::anyhow!(msg))?; + let pspec = ParsedSpec::try_from(spec).map_err(|msg| anyhow!(msg))?; new_state.pspec = Some(pspec); } @@ -381,6 +464,8 @@ impl ComputeNode { state_changed: Condvar::new(), ext_download_progress: RwLock::new(HashMap::new()), compute_ctl_config: config.compute_ctl_config, + extension_stats_task: Mutex::new(None), + lfc_offload_task: Mutex::new(None), }) } @@ -468,6 +553,9 @@ impl ComputeNode { None }; + this.terminate_extension_stats_task(); + this.terminate_lfc_offload_task(); + // Terminate the vm_monitor so it releases the file watcher on // /sys/fs/cgroup/neon-postgres. // Note: the vm-monitor only runs on linux because it requires cgroups. @@ -489,12 +577,21 @@ impl ComputeNode { // Reap the postgres process delay_exit |= this.cleanup_after_postgres_exit()?; + // /terminate returns LSN. If we don't sleep at all, connection will break and we + // won't get result. If we sleep too much, tests will take significantly longer + // and Github Action run will error out + let sleep_duration = if delay_exit { + Duration::from_secs(30) + } else { + Duration::from_millis(300) + }; + // If launch failed, keep serving HTTP requests for a while, so the cloud // control plane can get the actual error. if delay_exit { info!("giving control plane 30s to collect the error before shutdown"); - std::thread::sleep(Duration::from_secs(30)); } + std::thread::sleep(sleep_duration); Ok(exit_code) } @@ -695,10 +792,15 @@ impl ComputeNode { // Configure and start rsyslog for compliance audit logging match pspec.spec.audit_log_level { ComputeAudit::Hipaa | ComputeAudit::Extended | ComputeAudit::Full => { - let remote_endpoint = + let remote_tls_endpoint = + std::env::var("AUDIT_LOGGING_TLS_ENDPOINT").unwrap_or("".to_string()); + let remote_plain_endpoint = std::env::var("AUDIT_LOGGING_ENDPOINT").unwrap_or("".to_string()); - if remote_endpoint.is_empty() { - anyhow::bail!("AUDIT_LOGGING_ENDPOINT is empty"); + + if remote_plain_endpoint.is_empty() && remote_tls_endpoint.is_empty() { + anyhow::bail!( + "AUDIT_LOGGING_ENDPOINT and AUDIT_LOGGING_TLS_ENDPOINT are both empty" + ); } let log_directory_path = Path::new(&self.params.pgdata).join("log"); @@ -714,7 +816,8 @@ impl ComputeNode { log_directory_path.clone(), endpoint_id, project_id, - &remote_endpoint, + &remote_plain_endpoint, + &remote_tls_endpoint, )?; // Launch a background task to clean up the audit logs @@ -781,12 +884,15 @@ impl ComputeNode { // Log metrics so that we can search for slow operations in logs info!(?metrics, postmaster_pid = %postmaster_pid, "compute start finished"); - // Spawn the extension stats background task self.spawn_extension_stats_task(); if pspec.spec.autoprewarm { - self.prewarm_lfc(); + info!("autoprewarming on startup as requested"); + self.prewarm_lfc(None); } + if let Some(seconds) = pspec.spec.offload_lfc_interval_seconds { + self.spawn_lfc_offload_task(Duration::from_secs(seconds.into())); + }; Ok(()) } @@ -866,20 +972,25 @@ impl ComputeNode { // Maybe sync safekeepers again, to speed up next startup let compute_state = self.state.lock().unwrap().clone(); let pspec = compute_state.pspec.as_ref().expect("spec must be set"); - if matches!(pspec.spec.mode, compute_api::spec::ComputeMode::Primary) { + let lsn = if matches!(pspec.spec.mode, compute_api::spec::ComputeMode::Primary) { info!("syncing safekeepers on shutdown"); let storage_auth_token = pspec.storage_auth_token.clone(); let lsn = self.sync_safekeepers(storage_auth_token)?; - info!("synced safekeepers at lsn {lsn}"); - } + info!(%lsn, "synced safekeepers"); + Some(lsn) + } else { + info!("not primary, not syncing safekeepers"); + None + }; let mut delay_exit = false; let mut state = self.state.lock().unwrap(); - if state.status == ComputeStatus::TerminationPending { + state.terminate_flush_lsn = lsn; + if let ComputeStatus::TerminationPending { mode } = state.status { state.status = ComputeStatus::Terminated; self.state_changed.notify_all(); // we were asked to terminate gracefully, don't exit to avoid restart - delay_exit = true + delay_exit = mode == compute_api::responses::TerminateMode::Fast } drop(state); @@ -936,15 +1047,87 @@ impl ComputeNode { Ok(()) } - // Get basebackup from the libpq connection to pageserver using `connstr` and - // unarchive it to `pgdata` directory overriding all its previous content. + /// Fetches a basebackup from the Pageserver using the compute state's Pageserver connstring and + /// unarchives it to `pgdata` directory, replacing any existing contents. #[instrument(skip_all, fields(%lsn))] fn try_get_basebackup(&self, compute_state: &ComputeState, lsn: Lsn) -> Result<()> { let spec = compute_state.pspec.as_ref().expect("spec must be set"); - let start_time = Instant::now(); - let shard0_connstr = spec.pageserver_connstr.split(',').next().unwrap(); - let mut config = postgres::Config::from_str(shard0_connstr)?; + let started = Instant::now(); + let (connected, size) = if spec.pageserver_conninfo.prefer_grpc { + self.try_get_basebackup_grpc(spec, lsn)? + } else { + self.try_get_basebackup_libpq(spec, lsn)? + }; + + let mut state = self.state.lock().unwrap(); + state.metrics.pageserver_connect_micros = + connected.duration_since(started).as_micros() as u64; + state.metrics.basebackup_bytes = size as u64; + state.metrics.basebackup_ms = started.elapsed().as_millis() as u64; + + Ok(()) + } + + /// Fetches a basebackup via gRPC. The connstring must use grpc://. Returns the timestamp when + /// the connection was established, and the (compressed) size of the basebackup. + fn try_get_basebackup_grpc(&self, spec: &ParsedSpec, lsn: Lsn) -> Result<(Instant, usize)> { + let shard0 = spec + .pageserver_conninfo + .shards + .get(&0) + .expect("shard 0 connection info missing"); + let shard0_url = shard0.grpc_url.clone().expect("no grpc_url for shard 0"); + + let shard_index = match spec.pageserver_conninfo.shards.len() as u8 { + 0 | 1 => ShardIndex::unsharded(), + count => ShardIndex::new(ShardNumber(0), ShardCount(count)), + }; + + let (reader, connected) = tokio::runtime::Handle::current().block_on(async move { + let mut client = page_api::Client::connect( + shard0_url, + spec.tenant_id, + spec.timeline_id, + shard_index, + spec.storage_auth_token.clone(), + None, // NB: base backups use payload compression + ) + .await?; + let connected = Instant::now(); + let reader = client + .get_base_backup(page_api::GetBaseBackupRequest { + lsn: (lsn != Lsn(0)).then_some(lsn), + compression: BaseBackupCompression::Gzip, + replica: spec.spec.mode != ComputeMode::Primary, + full: false, + }) + .await?; + anyhow::Ok((reader, connected)) + })?; + + let mut reader = MeasuredReader::new(tokio_util::io::SyncIoBridge::new(reader)); + + // Set `ignore_zeros` so that unpack() reads the entire stream and doesn't just stop at the + // end-of-archive marker. If the server errors, the tar::Builder drop handler will write an + // end-of-archive marker before the error is emitted, and we would not see the error. + let mut ar = tar::Archive::new(flate2::read::GzDecoder::new(&mut reader)); + ar.set_ignore_zeros(true); + ar.unpack(&self.params.pgdata)?; + + Ok((connected, reader.get_byte_count())) + } + + /// Fetches a basebackup via libpq. The connstring must use postgresql://. Returns the timestamp + /// when the connection was established, and the (compressed) size of the basebackup. + fn try_get_basebackup_libpq(&self, spec: &ParsedSpec, lsn: Lsn) -> Result<(Instant, usize)> { + let shard0 = spec + .pageserver_conninfo + .shards + .get(&0) + .expect("shard 0 connection info missing"); + let shard0_connstr = shard0.libpq_url.clone().expect("no libpq_url for shard 0"); + let mut config = postgres::Config::from_str(&shard0_connstr)?; // Use the storage auth token from the config file, if given. // Note: this overrides any password set in the connection string. @@ -956,16 +1139,14 @@ impl ComputeNode { } config.application_name("compute_ctl"); - if let Some(spec) = &compute_state.pspec { - config.options(&format!( - "-c neon.compute_mode={}", - spec.spec.mode.to_type_str() - )); - } + config.options(&format!( + "-c neon.compute_mode={}", + spec.spec.mode.to_type_str() + )); // Connect to pageserver let mut client = config.connect(NoTls)?; - let pageserver_connect_micros = start_time.elapsed().as_micros() as u64; + let connected = Instant::now(); let basebackup_cmd = match lsn { Lsn(0) => { @@ -1002,16 +1183,13 @@ impl ComputeNode { // Set `ignore_zeros` so that unpack() reads all the Copy data and // doesn't stop at the end-of-archive marker. Otherwise, if the server // sends an Error after finishing the tarball, we will not notice it. + // The tar::Builder drop handler will write an end-of-archive marker + // before emitting the error, and we would not see it otherwise. let mut ar = tar::Archive::new(flate2::read::GzDecoder::new(&mut bufreader)); ar.set_ignore_zeros(true); ar.unpack(&self.params.pgdata)?; - // Report metrics - let mut state = self.state.lock().unwrap(); - state.metrics.pageserver_connect_micros = pageserver_connect_micros; - state.metrics.basebackup_bytes = measured_reader.get_byte_count() as u64; - state.metrics.basebackup_ms = start_time.elapsed().as_millis() as u64; - Ok(()) + Ok((connected, measured_reader.get_byte_count())) } // Gets the basebackup in a retry loop @@ -1035,10 +1213,7 @@ impl ComputeNode { return result; } Err(ref e) if attempts < max_attempts => { - warn!( - "Failed to get basebackup: {} (attempt {}/{})", - e, attempts, max_attempts - ); + warn!("Failed to get basebackup: {e:?} (attempt {attempts}/{max_attempts})"); std::thread::sleep(std::time::Duration::from_millis(retry_period_ms as u64)); retry_period_ms *= 1.5; } @@ -1064,7 +1239,7 @@ impl ComputeNode { let sk_configs = sk_connstrs.into_iter().map(|connstr| { // Format connstr let id = connstr.clone(); - let connstr = format!("postgresql://no_user@{}", connstr); + let connstr = format!("postgresql://no_user@{connstr}"); let options = format!( "-c timeline_id={} tenant_id={}", pspec.timeline_id, pspec.tenant_id @@ -1247,16 +1422,8 @@ impl ComputeNode { } }; - info!( - "getting basebackup@{} from pageserver {}", - lsn, &pspec.pageserver_connstr - ); - self.get_basebackup(compute_state, lsn).with_context(|| { - format!( - "failed to get basebackup@{} from pageserver {}", - lsn, &pspec.pageserver_connstr - ) - })?; + self.get_basebackup(compute_state, lsn) + .with_context(|| format!("failed to get basebackup@{lsn}"))?; // Update pg_hba.conf received with basebackup. update_pg_hba(pgdata_path)?; @@ -1427,7 +1594,7 @@ impl ComputeNode { let (mut client, connection) = conf.connect(NoTls).await?; tokio::spawn(async move { if let Err(e) = connection.await { - eprintln!("connection error: {}", e); + eprintln!("connection error: {e}"); } }); @@ -1548,6 +1715,8 @@ impl ComputeNode { tls_config = self.compute_ctl_config.tls.clone(); } + self.update_installed_extensions_collection_interval(&spec); + let max_concurrent_connections = self.max_service_connections(compute_state, &spec); // Merge-apply spec & changes to PostgreSQL state. @@ -1570,7 +1739,7 @@ impl ComputeNode { Ok((mut client, connection)) => { tokio::spawn(async move { if let Err(e) = connection.await { - eprintln!("connection error: {}", e); + eprintln!("connection error: {e}"); } }); if let Err(e) = handle_migrations(&mut client).await { @@ -1612,6 +1781,8 @@ impl ComputeNode { let tls_config = self.tls_config(&spec); + self.update_installed_extensions_collection_interval(&spec); + if let Some(ref pgbouncer_settings) = spec.pgbouncer_settings { info!("tuning pgbouncer"); @@ -1750,7 +1921,7 @@ impl ComputeNode { // exit loop ComputeStatus::Failed - | ComputeStatus::TerminationPending + | ComputeStatus::TerminationPending { .. } | ComputeStatus::Terminated => break 'cert_update, // wait @@ -1874,7 +2045,7 @@ impl ComputeNode { let (client, connection) = connect_result.unwrap(); tokio::spawn(async move { if let Err(e) = connection.await { - eprintln!("connection error: {}", e); + eprintln!("connection error: {e}"); } }); let result = client @@ -1916,7 +2087,7 @@ LIMIT 100", self.params .remote_ext_base_url .as_ref() - .ok_or(DownloadError::BadInput(anyhow::anyhow!( + .ok_or(DownloadError::BadInput(anyhow!( "Remote extensions storage is not configured", )))?; @@ -2043,7 +2214,7 @@ LIMIT 100", db_client .simple_query(&query) .await - .with_context(|| format!("Failed to execute query: {}", query))?; + .with_context(|| format!("Failed to execute query: {query}"))?; } Ok(()) @@ -2070,7 +2241,7 @@ LIMIT 100", let version: Option = db_client .query_opt(version_query, &[&ext_name]) .await - .with_context(|| format!("Failed to execute query: {}", version_query))? + .with_context(|| format!("Failed to execute query: {version_query}"))? .map(|row| row.get(0)); // sanitize the inputs as postgres idents. @@ -2085,14 +2256,14 @@ LIMIT 100", db_client .simple_query(&query) .await - .with_context(|| format!("Failed to execute query: {}", query))?; + .with_context(|| format!("Failed to execute query: {query}"))?; } else { let query = format!("CREATE EXTENSION IF NOT EXISTS {ext_name} WITH VERSION {quoted_version}"); db_client .simple_query(&query) .await - .with_context(|| format!("Failed to execute query: {}", query))?; + .with_context(|| format!("Failed to execute query: {query}"))?; } Ok(ext_version) @@ -2112,7 +2283,7 @@ LIMIT 100", let remote_extensions = spec .remote_extensions .as_ref() - .ok_or(anyhow::anyhow!("Remote extensions are not configured"))?; + .ok_or(anyhow!("Remote extensions are not configured"))?; info!("parse shared_preload_libraries from spec.cluster.settings"); let mut libs_vec = Vec::new(); @@ -2191,22 +2362,22 @@ LIMIT 100", /// The operation will time out after a specified duration. pub fn wait_timeout_while_pageserver_connstr_unchanged(&self, duration: Duration) { let state = self.state.lock().unwrap(); - let old_pageserver_connstr = state + let old_pageserver_conninfo = state .pspec .as_ref() .expect("spec must be set") - .pageserver_connstr + .pageserver_conninfo .clone(); let mut unchanged = true; let _ = self .state_changed .wait_timeout_while(state, duration, |s| { - let pageserver_connstr = &s + let pageserver_conninfo = &s .pspec .as_ref() .expect("spec must be set") - .pageserver_connstr; - unchanged = pageserver_connstr == &old_pageserver_connstr; + .pageserver_conninfo; + unchanged = pageserver_conninfo == &old_pageserver_conninfo; unchanged }) .unwrap(); @@ -2216,24 +2387,92 @@ LIMIT 100", } pub fn spawn_extension_stats_task(&self) { + self.terminate_extension_stats_task(); + let conf = self.tokio_conn_conf.clone(); - let installed_extensions_collection_interval = - self.params.installed_extensions_collection_interval; - tokio::spawn(async move { - // An initial sleep is added to ensure that two collections don't happen at the same time. - // The first collection happens during compute startup. - tokio::time::sleep(tokio::time::Duration::from_secs( - installed_extensions_collection_interval, - )) - .await; - let mut interval = tokio::time::interval(tokio::time::Duration::from_secs( - installed_extensions_collection_interval, - )); + let atomic_interval = self.params.installed_extensions_collection_interval.clone(); + let mut installed_extensions_collection_interval = + 2 * atomic_interval.load(std::sync::atomic::Ordering::SeqCst); + info!( + "[NEON_EXT_SPAWN] Spawning background installed extensions worker with Timeout: {}", + installed_extensions_collection_interval + ); + let handle = tokio::spawn(async move { loop { - interval.tick().await; + info!( + "[NEON_EXT_INT_SLEEP]: Interval: {}", + installed_extensions_collection_interval + ); + // Sleep at the start of the loop to ensure that two collections don't happen at the same time. + // The first collection happens during compute startup. + tokio::time::sleep(tokio::time::Duration::from_secs( + installed_extensions_collection_interval, + )) + .await; let _ = installed_extensions(conf.clone()).await; + // Acquire a read lock on the compute spec and then update the interval if necessary + installed_extensions_collection_interval = std::cmp::max( + installed_extensions_collection_interval, + 2 * atomic_interval.load(std::sync::atomic::Ordering::SeqCst), + ); } }); + + // Store the new task handle + *self.extension_stats_task.lock().unwrap() = Some(handle); + } + + fn terminate_extension_stats_task(&self) { + if let Some(h) = self.extension_stats_task.lock().unwrap().take() { + h.abort() + } + } + + pub fn spawn_lfc_offload_task(self: &Arc, interval: Duration) { + self.terminate_lfc_offload_task(); + let secs = interval.as_secs(); + info!("spawning lfc offload worker with {secs}s interval"); + let this = self.clone(); + let handle = spawn(async move { + let mut interval = time::interval(interval); + interval.tick().await; // returns immediately + loop { + interval.tick().await; + this.offload_lfc_async().await; + } + }); + *self.lfc_offload_task.lock().unwrap() = Some(handle); + } + + fn terminate_lfc_offload_task(&self) { + if let Some(h) = self.lfc_offload_task.lock().unwrap().take() { + h.abort() + } + } + + fn update_installed_extensions_collection_interval(&self, spec: &ComputeSpec) { + // Update the interval for collecting installed extensions statistics + // If the value is -1, we never suspend so set the value to default collection. + // If the value is 0, it means default, we will just continue to use the default. + if spec.suspend_timeout_seconds == -1 || spec.suspend_timeout_seconds == 0 { + info!( + "[NEON_EXT_INT_UPD] Spec Timeout: {}, New Timeout: {}", + spec.suspend_timeout_seconds, DEFAULT_INSTALLED_EXTENSIONS_COLLECTION_INTERVAL + ); + self.params.installed_extensions_collection_interval.store( + DEFAULT_INSTALLED_EXTENSIONS_COLLECTION_INTERVAL, + std::sync::atomic::Ordering::SeqCst, + ); + } else { + info!( + "[NEON_EXT_INT_UPD] Spec Timeout: {}", + spec.suspend_timeout_seconds + ); + self.params.installed_extensions_collection_interval.store( + spec.suspend_timeout_seconds as u64, + std::sync::atomic::Ordering::SeqCst, + ); + } } } @@ -2251,12 +2490,68 @@ pub async fn installed_extensions(conf: tokio_postgres::Config) -> Result<()> { Ok(()) } -pub fn forward_termination_signal() { +pub fn forward_termination_signal(dev_mode: bool) { let ss_pid = SYNC_SAFEKEEPERS_PID.load(Ordering::SeqCst); if ss_pid != 0 { let ss_pid = nix::unistd::Pid::from_raw(ss_pid as i32); kill(ss_pid, Signal::SIGTERM).ok(); } + + if !dev_mode { + // Terminate pgbouncer with SIGKILL + match pid_file::read(PGBOUNCER_PIDFILE.into()) { + Ok(pid_file::PidFileRead::LockedByOtherProcess(pid)) => { + info!("sending SIGKILL to pgbouncer process pid: {}", pid); + if let Err(e) = kill(pid, Signal::SIGKILL) { + error!("failed to terminate pgbouncer: {}", e); + } + } + // pgbouncer does not lock the pid file, so we read and kill the process directly + Ok(pid_file::PidFileRead::NotHeldByAnyProcess(_)) => { + if let Ok(pid_str) = std::fs::read_to_string(PGBOUNCER_PIDFILE) { + if let Ok(pid) = pid_str.trim().parse::() { + info!( + "sending SIGKILL to pgbouncer process pid: {} (from unlocked pid file)", + pid + ); + if let Err(e) = kill(Pid::from_raw(pid), Signal::SIGKILL) { + error!("failed to terminate pgbouncer: {}", e); + } + } + } else { + info!("pgbouncer pid file exists but process not running"); + } + } + Ok(pid_file::PidFileRead::NotExist) => { + info!("pgbouncer pid file not found, process may not be running"); + } + Err(e) => { + error!("error reading pgbouncer pid file: {}", e); + } + } + + // Terminate local_proxy + match pid_file::read("/etc/local_proxy/pid".into()) { + Ok(pid_file::PidFileRead::LockedByOtherProcess(pid)) => { + info!("sending SIGTERM to local_proxy process pid: {}", pid); + if let Err(e) = kill(pid, Signal::SIGTERM) { + error!("failed to terminate local_proxy: {}", e); + } + } + Ok(pid_file::PidFileRead::NotHeldByAnyProcess(_)) => { + info!("local_proxy PID file exists but process not running"); + } + Ok(pid_file::PidFileRead::NotExist) => { + info!("local_proxy PID file not found, process may not be running"); + } + Err(e) => { + error!("error reading local_proxy PID file: {}", e); + } + } + } else { + info!("Skipping pgbouncer and local_proxy termination because in dev mode"); + } + let pg_pid = PG_PID.load(Ordering::SeqCst); if pg_pid != 0 { let pg_pid = nix::unistd::Pid::from_raw(pg_pid as i32); @@ -2289,3 +2584,21 @@ impl JoinSetExt for tokio::task::JoinSet { }) } } + +#[cfg(test)] +mod tests { + use std::fs::File; + + use super::*; + + #[test] + fn duplicate_safekeeper_connstring() { + let file = File::open("tests/cluster_spec.json").unwrap(); + let spec: ComputeSpec = serde_json::from_reader(file).unwrap(); + + match ParsedSpec::try_from(spec.clone()) { + Ok(_p) => panic!("Failed to detect duplicate entry"), + Err(e) => assert!(e.starts_with("duplicate entry in safekeeper_connstrings:")), + }; + } +} diff --git a/compute_tools/src/compute_prewarm.rs b/compute_tools/src/compute_prewarm.rs index a6a84b3f1f..4190580e5e 100644 --- a/compute_tools/src/compute_prewarm.rs +++ b/compute_tools/src/compute_prewarm.rs @@ -5,6 +5,7 @@ use compute_api::responses::LfcOffloadState; use compute_api::responses::LfcPrewarmState; use http::StatusCode; use reqwest::Client; +use std::mem::replace; use std::sync::Arc; use tokio::{io::AsyncReadExt, spawn}; use tracing::{error, info}; @@ -25,11 +26,16 @@ struct EndpointStoragePair { } const KEY: &str = "lfc_state"; -impl TryFrom<&crate::compute::ParsedSpec> for EndpointStoragePair { - type Error = anyhow::Error; - fn try_from(pspec: &crate::compute::ParsedSpec) -> Result { - let Some(ref endpoint_id) = pspec.spec.endpoint_id else { - bail!("pspec.endpoint_id missing") +impl EndpointStoragePair { + /// endpoint_id is set to None while prewarming from other endpoint, see replica promotion + /// If not None, takes precedence over pspec.spec.endpoint_id + fn from_spec_and_endpoint( + pspec: &crate::compute::ParsedSpec, + endpoint_id: Option, + ) -> Result { + let endpoint_id = endpoint_id.as_ref().or(pspec.spec.endpoint_id.as_ref()); + let Some(ref endpoint_id) = endpoint_id else { + bail!("pspec.endpoint_id missing, other endpoint_id not provided") }; let Some(ref base_uri) = pspec.endpoint_storage_addr else { bail!("pspec.endpoint_storage_addr missing") @@ -83,21 +89,19 @@ impl ComputeNode { self.state.lock().unwrap().lfc_offload_state.clone() } - /// Returns false if there is a prewarm request ongoing, true otherwise - pub fn prewarm_lfc(self: &Arc) -> bool { - crate::metrics::LFC_PREWARM_REQUESTS.inc(); + /// If there is a prewarm request ongoing, return false, true otherwise + pub fn prewarm_lfc(self: &Arc, from_endpoint: Option) -> bool { { let state = &mut self.state.lock().unwrap().lfc_prewarm_state; - if let LfcPrewarmState::Prewarming = - std::mem::replace(state, LfcPrewarmState::Prewarming) - { + if let LfcPrewarmState::Prewarming = replace(state, LfcPrewarmState::Prewarming) { return false; } } + crate::metrics::LFC_PREWARMS.inc(); let cloned = self.clone(); spawn(async move { - let Err(err) = cloned.prewarm_impl().await else { + let Err(err) = cloned.prewarm_impl(from_endpoint).await else { cloned.state.lock().unwrap().lfc_prewarm_state = LfcPrewarmState::Completed; return; }; @@ -109,13 +113,14 @@ impl ComputeNode { true } - fn endpoint_storage_pair(&self) -> Result { + /// from_endpoint: None for endpoint managed by this compute_ctl + fn endpoint_storage_pair(&self, from_endpoint: Option) -> Result { let state = self.state.lock().unwrap(); - state.pspec.as_ref().unwrap().try_into() + EndpointStoragePair::from_spec_and_endpoint(state.pspec.as_ref().unwrap(), from_endpoint) } - async fn prewarm_impl(&self) -> Result<()> { - let EndpointStoragePair { url, token } = self.endpoint_storage_pair()?; + async fn prewarm_impl(&self, from_endpoint: Option) -> Result<()> { + let EndpointStoragePair { url, token } = self.endpoint_storage_pair(from_endpoint)?; info!(%url, "requesting LFC state from endpoint storage"); let request = Client::new().get(&url).bearer_auth(token); @@ -146,34 +151,43 @@ impl ComputeNode { .map(|_| ()) } - /// Returns false if there is an offload request ongoing, true otherwise + /// If offload request is ongoing, return false, true otherwise pub fn offload_lfc(self: &Arc) -> bool { - crate::metrics::LFC_OFFLOAD_REQUESTS.inc(); { let state = &mut self.state.lock().unwrap().lfc_offload_state; - if let LfcOffloadState::Offloading = - std::mem::replace(state, LfcOffloadState::Offloading) - { + if replace(state, LfcOffloadState::Offloading) == LfcOffloadState::Offloading { return false; } } - let cloned = self.clone(); - spawn(async move { - let Err(err) = cloned.offload_lfc_impl().await else { - cloned.state.lock().unwrap().lfc_offload_state = LfcOffloadState::Completed; - return; - }; - error!(%err); - cloned.state.lock().unwrap().lfc_offload_state = LfcOffloadState::Failed { - error: err.to_string(), - }; - }); + spawn(async move { cloned.offload_lfc_with_state_update().await }); true } + pub async fn offload_lfc_async(self: &Arc) { + { + let state = &mut self.state.lock().unwrap().lfc_offload_state; + if replace(state, LfcOffloadState::Offloading) == LfcOffloadState::Offloading { + return; + } + } + self.offload_lfc_with_state_update().await + } + + async fn offload_lfc_with_state_update(&self) { + crate::metrics::LFC_OFFLOADS.inc(); + let Err(err) = self.offload_lfc_impl().await else { + self.state.lock().unwrap().lfc_offload_state = LfcOffloadState::Completed; + return; + }; + error!(%err); + self.state.lock().unwrap().lfc_offload_state = LfcOffloadState::Failed { + error: err.to_string(), + }; + } + async fn offload_lfc_impl(&self) -> Result<()> { - let EndpointStoragePair { url, token } = self.endpoint_storage_pair()?; + let EndpointStoragePair { url, token } = self.endpoint_storage_pair(None)?; info!(%url, "requesting LFC state from postgres"); let mut compressed = Vec::new(); diff --git a/compute_tools/src/config.rs b/compute_tools/src/config.rs index 933b30134f..ceffbf40de 100644 --- a/compute_tools/src/config.rs +++ b/compute_tools/src/config.rs @@ -51,14 +51,56 @@ pub fn write_postgres_conf( // Write the postgresql.conf content from the spec file as is. if let Some(conf) = &spec.cluster.postgresql_conf { - writeln!(file, "{}", conf)?; + writeln!(file, "{conf}")?; } // Add options for connecting to storage writeln!(file, "# Neon storage settings")?; - if let Some(s) = &spec.pageserver_connstring { - writeln!(file, "neon.pageserver_connstring={}", escape_conf_value(s))?; + + if let Some(conninfo) = &spec.pageserver_connection_info { + let mut libpq_urls: Option> = Some(Vec::new()); + let mut grpc_urls: Option> = Some(Vec::new()); + + for shardno in 0..conninfo.shards.len() { + let info = conninfo.shards.get(&(shardno as u32)).ok_or_else(|| { + anyhow::anyhow!("shard {shardno} missing from pageserver_connection_info shard map") + })?; + + if let Some(url) = &info.libpq_url { + if let Some(ref mut urls) = libpq_urls { + urls.push(url.clone()); + } + } else { + libpq_urls = None + } + if let Some(url) = &info.grpc_url { + if let Some(ref mut urls) = grpc_urls { + urls.push(url.clone()); + } + } else { + grpc_urls = None + } + } + if let Some(libpq_urls) = libpq_urls { + writeln!( + file, + "neon.pageserver_connstring={}", + escape_conf_value(&libpq_urls.join(",")) + )?; + } else { + writeln!(file, "# no neon.pageserver_connstring")?; + } + if let Some(grpc_urls) = grpc_urls { + writeln!( + file, + "neon.pageserver_grpc_urls={}", + escape_conf_value(&grpc_urls.join(",")) + )?; + } else { + writeln!(file, "# no neon.pageserver_grpc_urls")?; + } } + if let Some(stripe_size) = spec.shard_stripe_size { writeln!(file, "neon.stripe_size={stripe_size}")?; } @@ -70,7 +112,7 @@ pub fn write_postgres_conf( ); // If generation is given, prepend sk list with g#number: if let Some(generation) = spec.safekeepers_generation { - write!(neon_safekeepers_value, "g#{}:", generation)?; + write!(neon_safekeepers_value, "g#{generation}:")?; } neon_safekeepers_value.push_str(&spec.safekeeper_connstrings.join(",")); writeln!( @@ -109,8 +151,8 @@ pub fn write_postgres_conf( tls::update_key_path_blocking(pgdata_path, tls_config); // these are the default, but good to be explicit. - writeln!(file, "ssl_cert_file = '{}'", SERVER_CRT)?; - writeln!(file, "ssl_key_file = '{}'", SERVER_KEY)?; + writeln!(file, "ssl_cert_file = '{SERVER_CRT}'")?; + writeln!(file, "ssl_key_file = '{SERVER_KEY}'")?; } // Locales @@ -191,8 +233,7 @@ pub fn write_postgres_conf( } writeln!( file, - "shared_preload_libraries='{}{}'", - libs, extra_shared_preload_libraries + "shared_preload_libraries='{libs}{extra_shared_preload_libraries}'" )?; } else { // Typically, this should be unreacheable, @@ -244,8 +285,7 @@ pub fn write_postgres_conf( } writeln!( file, - "shared_preload_libraries='{}{}'", - libs, extra_shared_preload_libraries + "shared_preload_libraries='{libs}{extra_shared_preload_libraries}'" )?; } else { // Typically, this should be unreacheable, @@ -263,7 +303,7 @@ pub fn write_postgres_conf( } } - writeln!(file, "neon.extension_server_port={}", extension_server_port)?; + writeln!(file, "neon.extension_server_port={extension_server_port}")?; if spec.drop_subscriptions_before_start { writeln!(file, "neon.disable_logical_replication_subscribers=true")?; @@ -291,7 +331,7 @@ where { let path = pgdata_path.join("compute_ctl_temp_override.conf"); let mut file = File::create(path)?; - write!(file, "{}", options)?; + write!(file, "{options}")?; let res = exec(); diff --git a/compute_tools/src/config_template/compute_audit_rsyslog_template.conf b/compute_tools/src/config_template/compute_audit_rsyslog_template.conf index 48b1a6f5c3..f072f652cf 100644 --- a/compute_tools/src/config_template/compute_audit_rsyslog_template.conf +++ b/compute_tools/src/config_template/compute_audit_rsyslog_template.conf @@ -10,7 +10,13 @@ input(type="imfile" File="{log_directory}/*.log" startmsg.regex="^[[:digit:]]{{4}}-[[:digit:]]{{2}}-[[:digit:]]{{2}} [[:digit:]]{{2}}:[[:digit:]]{{2}}:[[:digit:]]{{2}}.[[:digit:]]{{3}} GMT,") # the directory to store rsyslog state files -global(workDirectory="/var/log/rsyslog") +global( + workDirectory="/var/log/rsyslog" + DefaultNetstreamDriverCAFile="/etc/ssl/certs/ca-certificates.crt" +) + +# Whether the remote syslog receiver uses tls +set $.remote_syslog_tls = "{remote_syslog_tls}"; # Construct json, endpoint_id and project_id as additional metadata set $.json_log!endpoint_id = "{endpoint_id}"; @@ -21,5 +27,29 @@ set $.json_log!msg = $msg; template(name="PgAuditLog" type="string" string="<%PRI%>1 %TIMESTAMP:::date-rfc3339% %HOSTNAME% - - - - %$.json_log%") -# Forward to remote syslog receiver (@@:;format -local5.info @@{remote_endpoint};PgAuditLog +# Forward to remote syslog receiver (over TLS) +if ( $syslogtag == 'pgaudit_log' ) then {{ + if ( $.remote_syslog_tls == 'true' ) then {{ + action(type="omfwd" target="{remote_syslog_host}" port="{remote_syslog_port}" protocol="tcp" + template="PgAuditLog" + queue.type="linkedList" + queue.size="1000" + action.ResumeRetryCount="10" + StreamDriver="gtls" + StreamDriverMode="1" + StreamDriverAuthMode="x509/name" + StreamDriverPermittedPeers="{remote_syslog_host}" + StreamDriver.CheckExtendedKeyPurpose="on" + StreamDriver.PermitExpiredCerts="off" + ) + stop + }} else {{ + action(type="omfwd" target="{remote_syslog_host}" port="{remote_syslog_port}" protocol="tcp" + template="PgAuditLog" + queue.type="linkedList" + queue.size="1000" + action.ResumeRetryCount="10" + ) + stop + }} +}} diff --git a/compute_tools/src/extension_server.rs b/compute_tools/src/extension_server.rs index 3764bc1525..47931d5f72 100644 --- a/compute_tools/src/extension_server.rs +++ b/compute_tools/src/extension_server.rs @@ -74,9 +74,11 @@ More specifically, here is an example ext_index.json use std::path::Path; use std::str; +use crate::metrics::{REMOTE_EXT_REQUESTS_TOTAL, UNKNOWN_HTTP_STATUS}; use anyhow::{Context, Result, bail}; use bytes::Bytes; use compute_api::spec::RemoteExtSpec; +use postgres_versioninfo::PgMajorVersion; use regex::Regex; use remote_storage::*; use reqwest::StatusCode; @@ -86,8 +88,6 @@ use tracing::log::warn; use url::Url; use zstd::stream::read::Decoder; -use crate::metrics::{REMOTE_EXT_REQUESTS_TOTAL, UNKNOWN_HTTP_STATUS}; - fn get_pg_config(argument: &str, pgbin: &str) -> String { // gives the result of `pg_config [argument]` // where argument is a flag like `--version` or `--sharedir` @@ -106,7 +106,7 @@ fn get_pg_config(argument: &str, pgbin: &str) -> String { .to_string() } -pub fn get_pg_version(pgbin: &str) -> PostgresMajorVersion { +pub fn get_pg_version(pgbin: &str) -> PgMajorVersion { // pg_config --version returns a (platform specific) human readable string // such as "PostgreSQL 15.4". We parse this to v14/v15/v16 etc. let human_version = get_pg_config("--version", pgbin); @@ -114,25 +114,11 @@ pub fn get_pg_version(pgbin: &str) -> PostgresMajorVersion { } pub fn get_pg_version_string(pgbin: &str) -> String { - match get_pg_version(pgbin) { - PostgresMajorVersion::V14 => "v14", - PostgresMajorVersion::V15 => "v15", - PostgresMajorVersion::V16 => "v16", - PostgresMajorVersion::V17 => "v17", - } - .to_owned() + get_pg_version(pgbin).v_str() } -#[derive(Copy, Clone, Debug, PartialEq, Eq)] -pub enum PostgresMajorVersion { - V14, - V15, - V16, - V17, -} - -fn parse_pg_version(human_version: &str) -> PostgresMajorVersion { - use PostgresMajorVersion::*; +fn parse_pg_version(human_version: &str) -> PgMajorVersion { + use PgMajorVersion::*; // Normal releases have version strings like "PostgreSQL 15.4". But there // are also pre-release versions like "PostgreSQL 17devel" or "PostgreSQL // 16beta2" or "PostgreSQL 17rc1". And with the --with-extra-version @@ -143,10 +129,10 @@ fn parse_pg_version(human_version: &str) -> PostgresMajorVersion { .captures(human_version) { Some(captures) if captures.len() == 2 => match &captures["major"] { - "14" => return V14, - "15" => return V15, - "16" => return V16, - "17" => return V17, + "14" => return PG14, + "15" => return PG15, + "16" => return PG16, + "17" => return PG17, _ => {} }, _ => {} @@ -310,10 +296,7 @@ async fn download_extension_tar(remote_ext_base_url: &Url, ext_path: &str) -> Re async fn do_extension_server_request(uri: Url) -> Result { let resp = reqwest::get(uri).await.map_err(|e| { ( - format!( - "could not perform remote extensions server request: {:?}", - e - ), + format!("could not perform remote extensions server request: {e:?}"), UNKNOWN_HTTP_STATUS.to_string(), ) })?; @@ -323,7 +306,7 @@ async fn do_extension_server_request(uri: Url) -> Result match resp.bytes().await { Ok(resp) => Ok(resp), Err(e) => Err(( - format!("could not read remote extensions server response: {:?}", e), + format!("could not read remote extensions server response: {e:?}"), // It's fine to return and report error with status as 200 OK, // because we still failed to read the response. status.to_string(), @@ -334,10 +317,7 @@ async fn do_extension_server_request(uri: Url) -> Result Err(( - format!( - "unexpected remote extensions server response status code: {}", - status - ), + format!("unexpected remote extensions server response status code: {status}"), status.to_string(), )), } @@ -349,25 +329,25 @@ mod tests { #[test] fn test_parse_pg_version() { - use super::PostgresMajorVersion::*; - assert_eq!(parse_pg_version("PostgreSQL 15.4"), V15); - assert_eq!(parse_pg_version("PostgreSQL 15.14"), V15); + use postgres_versioninfo::PgMajorVersion::*; + assert_eq!(parse_pg_version("PostgreSQL 15.4"), PG15); + assert_eq!(parse_pg_version("PostgreSQL 15.14"), PG15); assert_eq!( parse_pg_version("PostgreSQL 15.4 (Ubuntu 15.4-0ubuntu0.23.04.1)"), - V15 + PG15 ); - assert_eq!(parse_pg_version("PostgreSQL 14.15"), V14); - assert_eq!(parse_pg_version("PostgreSQL 14.0"), V14); + assert_eq!(parse_pg_version("PostgreSQL 14.15"), PG14); + assert_eq!(parse_pg_version("PostgreSQL 14.0"), PG14); assert_eq!( parse_pg_version("PostgreSQL 14.9 (Debian 14.9-1.pgdg120+1"), - V14 + PG14 ); - assert_eq!(parse_pg_version("PostgreSQL 16devel"), V16); - assert_eq!(parse_pg_version("PostgreSQL 16beta1"), V16); - assert_eq!(parse_pg_version("PostgreSQL 16rc2"), V16); - assert_eq!(parse_pg_version("PostgreSQL 16extra"), V16); + assert_eq!(parse_pg_version("PostgreSQL 16devel"), PG16); + assert_eq!(parse_pg_version("PostgreSQL 16beta1"), PG16); + assert_eq!(parse_pg_version("PostgreSQL 16rc2"), PG16); + assert_eq!(parse_pg_version("PostgreSQL 16extra"), PG16); } #[test] diff --git a/compute_tools/src/http/routes/configure.rs b/compute_tools/src/http/routes/configure.rs index c29e3a97da..b7325d283f 100644 --- a/compute_tools/src/http/routes/configure.rs +++ b/compute_tools/src/http/routes/configure.rs @@ -65,7 +65,7 @@ pub(in crate::http) async fn configure( if state.status == ComputeStatus::Failed { let err = state.error.as_ref().map_or("unknown error", |x| x); - let msg = format!("compute configuration failed: {:?}", err); + let msg = format!("compute configuration failed: {err:?}"); return Err(msg); } } diff --git a/compute_tools/src/http/routes/lfc.rs b/compute_tools/src/http/routes/lfc.rs index 07bcc6bfb7..e98bd781a2 100644 --- a/compute_tools/src/http/routes/lfc.rs +++ b/compute_tools/src/http/routes/lfc.rs @@ -2,6 +2,7 @@ use crate::compute_prewarm::LfcPrewarmStateWithProgress; use crate::http::JsonResponse; use axum::response::{IntoResponse, Response}; use axum::{Json, http::StatusCode}; +use axum_extra::extract::OptionalQuery; use compute_api::responses::LfcOffloadState; type Compute = axum::extract::State>; @@ -16,8 +17,16 @@ pub(in crate::http) async fn offload_state(compute: Compute) -> Json Response { - if compute.prewarm_lfc() { +#[derive(serde::Deserialize)] +pub struct PrewarmQuery { + pub from_endpoint: String, +} + +pub(in crate::http) async fn prewarm( + compute: Compute, + OptionalQuery(query): OptionalQuery, +) -> Response { + if compute.prewarm_lfc(query.map(|q| q.from_endpoint)) { StatusCode::ACCEPTED.into_response() } else { JsonResponse::error( diff --git a/compute_tools/src/http/routes/terminate.rs b/compute_tools/src/http/routes/terminate.rs index 2c24d4ad6b..32d90a5990 100644 --- a/compute_tools/src/http/routes/terminate.rs +++ b/compute_tools/src/http/routes/terminate.rs @@ -1,32 +1,42 @@ -use std::sync::Arc; - +use crate::compute::{ComputeNode, forward_termination_signal}; +use crate::http::JsonResponse; use axum::extract::State; -use axum::response::{IntoResponse, Response}; -use compute_api::responses::ComputeStatus; +use axum::response::Response; +use axum_extra::extract::OptionalQuery; +use compute_api::responses::{ComputeStatus, TerminateResponse}; use http::StatusCode; +use serde::Deserialize; +use std::sync::Arc; use tokio::task; use tracing::info; -use crate::compute::{ComputeNode, forward_termination_signal}; -use crate::http::JsonResponse; +#[derive(Deserialize, Default)] +pub struct TerminateQuery { + mode: compute_api::responses::TerminateMode, +} /// Terminate the compute. -pub(in crate::http) async fn terminate(State(compute): State>) -> Response { +pub(in crate::http) async fn terminate( + State(compute): State>, + OptionalQuery(terminate): OptionalQuery, +) -> Response { + let mode = terminate.unwrap_or_default().mode; { let mut state = compute.state.lock().unwrap(); if state.status == ComputeStatus::Terminated { - return StatusCode::CREATED.into_response(); + return JsonResponse::success(StatusCode::CREATED, state.terminate_flush_lsn); } if !matches!(state.status, ComputeStatus::Empty | ComputeStatus::Running) { return JsonResponse::invalid_status(state.status); } - - state.set_status(ComputeStatus::TerminationPending, &compute.state_changed); - drop(state); + state.set_status( + ComputeStatus::TerminationPending { mode }, + &compute.state_changed, + ); } - forward_termination_signal(); + forward_termination_signal(false); info!("sent signal and notified waiters"); // Spawn a blocking thread to wait for compute to become Terminated. @@ -34,7 +44,7 @@ pub(in crate::http) async fn terminate(State(compute): State>) // be able to serve other requests while some particular request // is waiting for compute to finish configuration. let c = compute.clone(); - task::spawn_blocking(move || { + let lsn = task::spawn_blocking(move || { let mut state = c.state.lock().unwrap(); while state.status != ComputeStatus::Terminated { state = c.state_changed.wait(state).unwrap(); @@ -44,11 +54,10 @@ pub(in crate::http) async fn terminate(State(compute): State>) state.status ); } + state.terminate_flush_lsn }) .await .unwrap(); - info!("terminated Postgres"); - - StatusCode::OK.into_response() + JsonResponse::success(StatusCode::OK, TerminateResponse { lsn }) } diff --git a/compute_tools/src/installed_extensions.rs b/compute_tools/src/installed_extensions.rs index d95c168a99..411e03b7ec 100644 --- a/compute_tools/src/installed_extensions.rs +++ b/compute_tools/src/installed_extensions.rs @@ -43,7 +43,7 @@ pub async fn get_installed_extensions(mut conf: Config) -> Result Result>() + ( + spec.pageserver_conninfo.clone(), + spec.storage_auth_token.clone(), + ) }; - let result = try_acquire_lsn_lease(tenant_id, timeline_id, lsn, &configs); + let result = try_acquire_lsn_lease(conninfo, auth.as_deref(), tenant_id, timeline_id, lsn); match result { Ok(Some(res)) => { return Ok(res); @@ -116,68 +108,112 @@ fn acquire_lsn_lease_with_retry( } } -/// Tries to acquire an LSN lease through PS page_service API. +/// Tries to acquire LSN leases on all Pageserver shards. fn try_acquire_lsn_lease( + conninfo: PageserverConnectionInfo, + auth: Option<&str>, tenant_id: TenantId, timeline_id: TimelineId, lsn: Lsn, - configs: &[postgres::Config], ) -> Result> { - fn get_valid_until( - config: &postgres::Config, - tenant_shard_id: TenantShardId, - timeline_id: TimelineId, - lsn: Lsn, - ) -> Result> { - let mut client = config.connect(NoTls)?; - let cmd = format!("lease lsn {} {} {} ", tenant_shard_id, timeline_id, lsn); - let res = client.simple_query(&cmd)?; - let msg = match res.first() { - Some(msg) => msg, - None => bail!("empty response"), - }; - let row = match msg { - SimpleQueryMessage::Row(row) => row, - _ => bail!("error parsing lsn lease response"), + let shard_count = conninfo.shards.len(); + let mut leases = Vec::new(); + + for (shard_number, shard) in conninfo.shards.into_iter() { + let tenant_shard_id = match shard_count { + 0 | 1 => TenantShardId::unsharded(tenant_id), + shard_count => TenantShardId { + tenant_id, + shard_number: ShardNumber(shard_number as u8), + shard_count: ShardCount::new(shard_count as u8), + }, }; - // Note: this will be None if a lease is explicitly not granted. - let valid_until_str = row.get("valid_until"); - - let valid_until = valid_until_str.map(|s| { - SystemTime::UNIX_EPOCH - .checked_add(Duration::from_millis(u128::from_str(s).unwrap() as u64)) - .expect("Time larger than max SystemTime could handle") - }); - Ok(valid_until) + let lease = if conninfo.prefer_grpc { + acquire_lsn_lease_grpc( + &shard.grpc_url.unwrap(), + auth, + tenant_shard_id, + timeline_id, + lsn, + )? + } else { + acquire_lsn_lease_libpq( + &shard.libpq_url.unwrap(), + auth, + tenant_shard_id, + timeline_id, + lsn, + )? + }; + leases.push(lease); } - let shard_count = configs.len(); + Ok(leases.into_iter().min().flatten()) +} - let valid_until = if shard_count > 1 { - configs - .iter() - .enumerate() - .map(|(shard_number, config)| { - let tenant_shard_id = TenantShardId { - tenant_id, - shard_count: ShardCount::new(shard_count as u8), - shard_number: ShardNumber(shard_number as u8), - }; - get_valid_until(config, tenant_shard_id, timeline_id, lsn) - }) - .collect::>>>()? - .into_iter() - .min() - .unwrap() - } else { - get_valid_until( - &configs[0], - TenantShardId::unsharded(tenant_id), - timeline_id, - lsn, - )? +/// Acquires an LSN lease on a single shard, using the libpq API. The connstring must use a +/// postgresql:// scheme. +fn acquire_lsn_lease_libpq( + connstring: &str, + auth: Option<&str>, + tenant_shard_id: TenantShardId, + timeline_id: TimelineId, + lsn: Lsn, +) -> Result> { + let mut config = postgres::Config::from_str(connstring)?; + if let Some(auth) = auth { + config.password(auth); + } + let mut client = config.connect(NoTls)?; + let cmd = format!("lease lsn {tenant_shard_id} {timeline_id} {lsn} "); + let res = client.simple_query(&cmd)?; + let msg = match res.first() { + Some(msg) => msg, + None => bail!("empty response"), + }; + let row = match msg { + SimpleQueryMessage::Row(row) => row, + _ => bail!("error parsing lsn lease response"), }; + // Note: this will be None if a lease is explicitly not granted. + let valid_until_str = row.get("valid_until"); + + let valid_until = valid_until_str.map(|s| { + SystemTime::UNIX_EPOCH + .checked_add(Duration::from_millis(u128::from_str(s).unwrap() as u64)) + .expect("Time larger than max SystemTime could handle") + }); Ok(valid_until) } + +/// Acquires an LSN lease on a single shard, using the gRPC API. The connstring must use a +/// grpc:// scheme. +fn acquire_lsn_lease_grpc( + connstring: &str, + auth: Option<&str>, + tenant_shard_id: TenantShardId, + timeline_id: TimelineId, + lsn: Lsn, +) -> Result> { + tokio::runtime::Handle::current().block_on(async move { + let mut client = page_api::Client::connect( + connstring.to_string(), + tenant_shard_id.tenant_id, + timeline_id, + tenant_shard_id.to_index(), + auth.map(String::from), + None, + ) + .await?; + + let req = page_api::LeaseLsnRequest { lsn }; + match client.lease_lsn(req).await { + Ok(expires) => Ok(Some(expires)), + // Lease couldn't be acquired because the LSN has been garbage collected. + Err(err) if err.code() == tonic::Code::FailedPrecondition => Ok(None), + Err(err) => Err(err.into()), + } + }) +} diff --git a/compute_tools/src/metrics.rs b/compute_tools/src/metrics.rs index 90326b2074..8f81675c49 100644 --- a/compute_tools/src/metrics.rs +++ b/compute_tools/src/metrics.rs @@ -97,20 +97,18 @@ pub(crate) static PG_TOTAL_DOWNTIME_MS: Lazy> = Lazy:: .expect("failed to define a metric") }); -/// Needed as neon.file_cache_prewarm_batch == 0 doesn't mean we never tried to prewarm. -/// On the other hand, LFC_PREWARMED_PAGES is excessive as we can GET /lfc/prewarm -pub(crate) static LFC_PREWARM_REQUESTS: Lazy = Lazy::new(|| { +pub(crate) static LFC_PREWARMS: Lazy = Lazy::new(|| { register_int_counter!( - "compute_ctl_lfc_prewarm_requests_total", - "Total number of LFC prewarm requests made by compute_ctl", + "compute_ctl_lfc_prewarms_total", + "Total number of LFC prewarms requested by compute_ctl or autoprewarm option", ) .expect("failed to define a metric") }); -pub(crate) static LFC_OFFLOAD_REQUESTS: Lazy = Lazy::new(|| { +pub(crate) static LFC_OFFLOADS: Lazy = Lazy::new(|| { register_int_counter!( - "compute_ctl_lfc_offload_requests_total", - "Total number of LFC offload requests made by compute_ctl", + "compute_ctl_lfc_offloads_total", + "Total number of LFC offloads requested by compute_ctl or lfc_offload_period_seconds option", ) .expect("failed to define a metric") }); @@ -124,7 +122,7 @@ pub fn collect() -> Vec { metrics.extend(AUDIT_LOG_DIR_SIZE.collect()); metrics.extend(PG_CURR_DOWNTIME_MS.collect()); metrics.extend(PG_TOTAL_DOWNTIME_MS.collect()); - metrics.extend(LFC_PREWARM_REQUESTS.collect()); - metrics.extend(LFC_OFFLOAD_REQUESTS.collect()); + metrics.extend(LFC_PREWARMS.collect()); + metrics.extend(LFC_OFFLOADS.collect()); metrics } diff --git a/compute_tools/src/monitor.rs b/compute_tools/src/monitor.rs index bacaf05cd5..8a2f6addad 100644 --- a/compute_tools/src/monitor.rs +++ b/compute_tools/src/monitor.rs @@ -83,7 +83,9 @@ impl ComputeMonitor { let compute_status = self.compute.get_status(); if matches!( compute_status, - ComputeStatus::Terminated | ComputeStatus::TerminationPending | ComputeStatus::Failed + ComputeStatus::Terminated + | ComputeStatus::TerminationPending { .. } + | ComputeStatus::Failed ) { info!( "compute is in {} status, stopping compute monitor", diff --git a/compute_tools/src/pg_helpers.rs b/compute_tools/src/pg_helpers.rs index 94467a0d2f..0a3ceed2fa 100644 --- a/compute_tools/src/pg_helpers.rs +++ b/compute_tools/src/pg_helpers.rs @@ -36,9 +36,9 @@ pub fn escape_literal(s: &str) -> String { let res = s.replace('\'', "''").replace('\\', "\\\\"); if res.contains('\\') { - format!("E'{}'", res) + format!("E'{res}'") } else { - format!("'{}'", res) + format!("'{res}'") } } @@ -46,7 +46,7 @@ pub fn escape_literal(s: &str) -> String { /// with `'{}'` is not required, as it returns a ready-to-use config string. pub fn escape_conf_value(s: &str) -> String { let res = s.replace('\'', "''").replace('\\', "\\\\"); - format!("'{}'", res) + format!("'{res}'") } pub trait GenericOptionExt { @@ -446,7 +446,7 @@ pub async fn tune_pgbouncer( let mut pgbouncer_connstr = "host=localhost port=6432 dbname=pgbouncer user=postgres sslmode=disable".to_string(); if let Ok(pass) = std::env::var("PGBOUNCER_PASSWORD") { - pgbouncer_connstr.push_str(format!(" password={}", pass).as_str()); + pgbouncer_connstr.push_str(format!(" password={pass}").as_str()); } pgbouncer_connstr }; @@ -464,7 +464,7 @@ pub async fn tune_pgbouncer( Ok((client, connection)) => { tokio::spawn(async move { if let Err(e) = connection.await { - eprintln!("connection error: {}", e); + eprintln!("connection error: {e}"); } }); break client; diff --git a/compute_tools/src/pgbouncer.rs b/compute_tools/src/pgbouncer.rs new file mode 100644 index 0000000000..189dfabac9 --- /dev/null +++ b/compute_tools/src/pgbouncer.rs @@ -0,0 +1 @@ +pub const PGBOUNCER_PIDFILE: &str = "/tmp/pgbouncer.pid"; diff --git a/compute_tools/src/rsyslog.rs b/compute_tools/src/rsyslog.rs index 3bc2e72b19..3ced0a5654 100644 --- a/compute_tools/src/rsyslog.rs +++ b/compute_tools/src/rsyslog.rs @@ -4,8 +4,10 @@ use std::path::Path; use std::process::Command; use std::time::Duration; use std::{fs::OpenOptions, io::Write}; +use url::{Host, Url}; use anyhow::{Context, Result, anyhow}; +use hostname_validator; use tracing::{error, info, instrument, warn}; const POSTGRES_LOGS_CONF_PATH: &str = "/etc/rsyslog.d/postgres_logs.conf"; @@ -82,18 +84,84 @@ fn restart_rsyslog() -> Result<()> { Ok(()) } +fn parse_audit_syslog_address( + remote_plain_endpoint: &str, + remote_tls_endpoint: &str, +) -> Result<(String, u16, String)> { + let tls; + let remote_endpoint = if !remote_tls_endpoint.is_empty() { + tls = "true".to_string(); + remote_tls_endpoint + } else { + tls = "false".to_string(); + remote_plain_endpoint + }; + // Urlify the remote_endpoint, so parsing can be done with url::Url. + let url_str = format!("http://{remote_endpoint}"); + let url = Url::parse(&url_str).map_err(|err| { + anyhow!("Error parsing {remote_endpoint}, expected host:port, got {err:?}") + })?; + + let is_valid = url.scheme() == "http" + && url.path() == "/" + && url.query().is_none() + && url.fragment().is_none() + && url.username() == "" + && url.password().is_none(); + + if !is_valid { + return Err(anyhow!( + "Invalid address format {remote_endpoint}, expected host:port" + )); + } + let host = match url.host() { + Some(Host::Domain(h)) if hostname_validator::is_valid(h) => h.to_string(), + Some(Host::Ipv4(ip4)) => ip4.to_string(), + Some(Host::Ipv6(ip6)) => ip6.to_string(), + _ => return Err(anyhow!("Invalid host")), + }; + let port = url + .port() + .ok_or_else(|| anyhow!("Invalid port in {remote_endpoint}"))?; + + Ok((host, port, tls)) +} + +fn generate_audit_rsyslog_config( + log_directory: String, + endpoint_id: &str, + project_id: &str, + remote_syslog_host: &str, + remote_syslog_port: u16, + remote_syslog_tls: &str, +) -> String { + format!( + include_str!("config_template/compute_audit_rsyslog_template.conf"), + log_directory = log_directory, + endpoint_id = endpoint_id, + project_id = project_id, + remote_syslog_host = remote_syslog_host, + remote_syslog_port = remote_syslog_port, + remote_syslog_tls = remote_syslog_tls + ) +} + pub fn configure_audit_rsyslog( log_directory: String, endpoint_id: &str, project_id: &str, remote_endpoint: &str, + remote_tls_endpoint: &str, ) -> Result<()> { - let config_content: String = format!( - include_str!("config_template/compute_audit_rsyslog_template.conf"), - log_directory = log_directory, - endpoint_id = endpoint_id, - project_id = project_id, - remote_endpoint = remote_endpoint + let (remote_syslog_host, remote_syslog_port, remote_syslog_tls) = + parse_audit_syslog_address(remote_endpoint, remote_tls_endpoint).unwrap(); + let config_content = generate_audit_rsyslog_config( + log_directory, + endpoint_id, + project_id, + &remote_syslog_host, + remote_syslog_port, + &remote_syslog_tls, ); info!("rsyslog config_content: {}", config_content); @@ -258,6 +326,8 @@ pub fn launch_pgaudit_gc(log_directory: String) { mod tests { use crate::rsyslog::PostgresLogsRsyslogConfig; + use super::{generate_audit_rsyslog_config, parse_audit_syslog_address}; + #[test] fn test_postgres_logs_config() { { @@ -287,4 +357,146 @@ mod tests { assert!(res.is_err()); } } + + #[test] + fn test_parse_audit_syslog_address() { + { + // host:port format (plaintext) + let parsed = parse_audit_syslog_address("collector.host.tld:5555", ""); + assert!(parsed.is_ok()); + assert_eq!( + parsed.unwrap(), + ( + String::from("collector.host.tld"), + 5555, + String::from("false") + ) + ); + } + + { + // host:port format with ipv4 ip address (plaintext) + let parsed = parse_audit_syslog_address("10.0.0.1:5555", ""); + assert!(parsed.is_ok()); + assert_eq!( + parsed.unwrap(), + (String::from("10.0.0.1"), 5555, String::from("false")) + ); + } + + { + // host:port format with ipv6 ip address (plaintext) + let parsed = + parse_audit_syslog_address("[7e60:82ed:cb2e:d617:f904:f395:aaca:e252]:5555", ""); + assert_eq!( + parsed.unwrap(), + ( + String::from("7e60:82ed:cb2e:d617:f904:f395:aaca:e252"), + 5555, + String::from("false") + ) + ); + } + + { + // Only TLS host:port defined + let parsed = parse_audit_syslog_address("", "tls.host.tld:5556"); + assert_eq!( + parsed.unwrap(), + (String::from("tls.host.tld"), 5556, String::from("true")) + ); + } + + { + // tls host should take precedence, when both defined + let parsed = parse_audit_syslog_address("plaintext.host.tld:5555", "tls.host.tld:5556"); + assert_eq!( + parsed.unwrap(), + (String::from("tls.host.tld"), 5556, String::from("true")) + ); + } + + { + // host without port (plaintext) + let parsed = parse_audit_syslog_address("collector.host.tld", ""); + assert!(parsed.is_err()); + } + + { + // port without host + let parsed = parse_audit_syslog_address(":5555", ""); + assert!(parsed.is_err()); + } + + { + // valid host with invalid port + let parsed = parse_audit_syslog_address("collector.host.tld:90001", ""); + assert!(parsed.is_err()); + } + + { + // invalid hostname with valid port + let parsed = parse_audit_syslog_address("-collector.host.tld:5555", ""); + assert!(parsed.is_err()); + } + + { + // parse error + let parsed = parse_audit_syslog_address("collector.host.tld:::5555", ""); + assert!(parsed.is_err()); + } + } + + #[test] + fn test_generate_audit_rsyslog_config() { + { + // plaintext version + let log_directory = "/tmp/log".to_string(); + let endpoint_id = "ep-test-endpoint-id"; + let project_id = "test-project-id"; + let remote_syslog_host = "collector.host.tld"; + let remote_syslog_port = 5555; + let remote_syslog_tls = "false"; + + let conf_str = generate_audit_rsyslog_config( + log_directory, + endpoint_id, + project_id, + remote_syslog_host, + remote_syslog_port, + remote_syslog_tls, + ); + + assert!(conf_str.contains(r#"set $.remote_syslog_tls = "false";"#)); + assert!(conf_str.contains(r#"type="omfwd""#)); + assert!(conf_str.contains(r#"target="collector.host.tld""#)); + assert!(conf_str.contains(r#"port="5555""#)); + assert!(conf_str.contains(r#"StreamDriverPermittedPeers="collector.host.tld""#)); + } + + { + // TLS version + let log_directory = "/tmp/log".to_string(); + let endpoint_id = "ep-test-endpoint-id"; + let project_id = "test-project-id"; + let remote_syslog_host = "collector.host.tld"; + let remote_syslog_port = 5556; + let remote_syslog_tls = "true"; + + let conf_str = generate_audit_rsyslog_config( + log_directory, + endpoint_id, + project_id, + remote_syslog_host, + remote_syslog_port, + remote_syslog_tls, + ); + + assert!(conf_str.contains(r#"set $.remote_syslog_tls = "true";"#)); + assert!(conf_str.contains(r#"type="omfwd""#)); + assert!(conf_str.contains(r#"target="collector.host.tld""#)); + assert!(conf_str.contains(r#"port="5556""#)); + assert!(conf_str.contains(r#"StreamDriverPermittedPeers="collector.host.tld""#)); + } + } } diff --git a/compute_tools/src/spec.rs b/compute_tools/src/spec.rs index 4b38e6e29c..43cfbb48f7 100644 --- a/compute_tools/src/spec.rs +++ b/compute_tools/src/spec.rs @@ -23,12 +23,12 @@ fn do_control_plane_request( ) -> Result { let resp = reqwest::blocking::Client::new() .get(uri) - .header("Authorization", format!("Bearer {}", jwt)) + .header("Authorization", format!("Bearer {jwt}")) .send() .map_err(|e| { ( true, - format!("could not perform request to control plane: {:?}", e), + format!("could not perform request to control plane: {e:?}"), UNKNOWN_HTTP_STATUS.to_string(), ) })?; @@ -39,7 +39,7 @@ fn do_control_plane_request( Ok(spec_resp) => Ok(spec_resp), Err(e) => Err(( true, - format!("could not deserialize control plane response: {:?}", e), + format!("could not deserialize control plane response: {e:?}"), status.to_string(), )), }, @@ -62,7 +62,7 @@ fn do_control_plane_request( // or some internal failure happened. Doesn't make much sense to retry in this case. _ => Err(( false, - format!("unexpected control plane response status code: {}", status), + format!("unexpected control plane response status code: {status}"), status.to_string(), )), } diff --git a/compute_tools/src/spec_apply.rs b/compute_tools/src/spec_apply.rs index 0d1389dbad..fcd072263a 100644 --- a/compute_tools/src/spec_apply.rs +++ b/compute_tools/src/spec_apply.rs @@ -933,56 +933,53 @@ async fn get_operations<'a>( PerDatabasePhase::DeleteDBRoleReferences => { let ctx = ctx.read().await; - let operations = - spec.delta_operations - .iter() - .flatten() - .filter(|op| op.action == "delete_role") - .filter_map(move |op| { - if db.is_owned_by(&op.name) { - return None; - } - if !ctx.roles.contains_key(&op.name) { - return None; - } - let quoted = op.name.pg_quote(); - let new_owner = match &db { - DB::SystemDB => PgIdent::from("cloud_admin").pg_quote(), - DB::UserDB(db) => db.owner.pg_quote(), - }; - let (escaped_role, outer_tag) = op.name.pg_quote_dollar(); + let operations = spec + .delta_operations + .iter() + .flatten() + .filter(|op| op.action == "delete_role") + .filter_map(move |op| { + if db.is_owned_by(&op.name) { + return None; + } + if !ctx.roles.contains_key(&op.name) { + return None; + } + let quoted = op.name.pg_quote(); + let new_owner = match &db { + DB::SystemDB => PgIdent::from("cloud_admin").pg_quote(), + DB::UserDB(db) => db.owner.pg_quote(), + }; + let (escaped_role, outer_tag) = op.name.pg_quote_dollar(); - Some(vec![ - // This will reassign all dependent objects to the db owner - Operation { - query: format!( - "REASSIGN OWNED BY {} TO {}", - quoted, new_owner, - ), - comment: None, - }, - // Revoke some potentially blocking privileges (Neon-specific currently) - Operation { - query: format!( - include_str!("sql/pre_drop_role_revoke_privileges.sql"), - // N.B. this has to be properly dollar-escaped with `pg_quote_dollar()` - role_name = escaped_role, - outer_tag = outer_tag, - ), - comment: None, - }, - // This now will only drop privileges of the role - // TODO: this is obviously not 100% true because of the above case, - // there could be still some privileges that are not revoked. Maybe this - // only drops privileges that were granted *by this* role, not *to this* role, - // but this has to be checked. - Operation { - query: format!("DROP OWNED BY {}", quoted), - comment: None, - }, - ]) - }) - .flatten(); + Some(vec![ + // This will reassign all dependent objects to the db owner + Operation { + query: format!("REASSIGN OWNED BY {quoted} TO {new_owner}",), + comment: None, + }, + // Revoke some potentially blocking privileges (Neon-specific currently) + Operation { + query: format!( + include_str!("sql/pre_drop_role_revoke_privileges.sql"), + // N.B. this has to be properly dollar-escaped with `pg_quote_dollar()` + role_name = escaped_role, + outer_tag = outer_tag, + ), + comment: None, + }, + // This now will only drop privileges of the role + // TODO: this is obviously not 100% true because of the above case, + // there could be still some privileges that are not revoked. Maybe this + // only drops privileges that were granted *by this* role, not *to this* role, + // but this has to be checked. + Operation { + query: format!("DROP OWNED BY {quoted}"), + comment: None, + }, + ]) + }) + .flatten(); Ok(Box::new(operations)) } diff --git a/compute_tools/src/sync_sk.rs b/compute_tools/src/sync_sk.rs index 22b7027b93..6c348644b2 100644 --- a/compute_tools/src/sync_sk.rs +++ b/compute_tools/src/sync_sk.rs @@ -27,7 +27,7 @@ pub async fn ping_safekeeper( let (client, conn) = config.connect(tokio_postgres::NoTls).await?; tokio::spawn(async move { if let Err(e) = conn.await { - eprintln!("connection error: {}", e); + eprintln!("connection error: {e}"); } }); diff --git a/compute_tools/tests/README.md b/compute_tools/tests/README.md new file mode 100644 index 0000000000..adeb9ef4b6 --- /dev/null +++ b/compute_tools/tests/README.md @@ -0,0 +1,6 @@ +### Test files + +The file `cluster_spec.json` has been copied over from libs/compute_api +tests, with some edits: + + - the neon.safekeepers setting contains a duplicate value diff --git a/compute_tools/tests/cluster_spec.json b/compute_tools/tests/cluster_spec.json new file mode 100644 index 0000000000..439d80c057 --- /dev/null +++ b/compute_tools/tests/cluster_spec.json @@ -0,0 +1,246 @@ +{ + "format_version": 1.0, + + "timestamp": "2021-05-23T18:25:43.511Z", + "operation_uuid": "0f657b36-4b0f-4a2d-9c2e-1dcd615e7d8b", + "suspend_timeout_seconds": 3600, + + "cluster": { + "cluster_id": "test-cluster-42", + "name": "Zenith Test", + "state": "restarted", + "roles": [ + { + "name": "postgres", + "encrypted_password": "6b1d16b78004bbd51fa06af9eda75972", + "options": null + }, + { + "name": "alexk", + "encrypted_password": null, + "options": null + }, + { + "name": "zenith \"new\"", + "encrypted_password": "5b1d16b78004bbd51fa06af9eda75972", + "options": null + }, + { + "name": "zen", + "encrypted_password": "9b1d16b78004bbd51fa06af9eda75972" + }, + { + "name": "\"name\";\\n select 1;", + "encrypted_password": "5b1d16b78004bbd51fa06af9eda75972" + }, + { + "name": "MyRole", + "encrypted_password": "5b1d16b78004bbd51fa06af9eda75972" + } + ], + "databases": [ + { + "name": "DB2", + "owner": "alexk", + "options": [ + { + "name": "LC_COLLATE", + "value": "C", + "vartype": "string" + }, + { + "name": "LC_CTYPE", + "value": "C", + "vartype": "string" + }, + { + "name": "TEMPLATE", + "value": "template0", + "vartype": "enum" + } + ] + }, + { + "name": "zenith", + "owner": "MyRole" + }, + { + "name": "zen", + "owner": "zen" + } + ], + "settings": [ + { + "name": "fsync", + "value": "off", + "vartype": "bool" + }, + { + "name": "wal_level", + "value": "logical", + "vartype": "enum" + }, + { + "name": "hot_standby", + "value": "on", + "vartype": "bool" + }, + { + "name": "prewarm_lfc_on_startup", + "value": "off", + "vartype": "bool" + }, + { + "name": "neon.safekeepers", + "value": "127.0.0.1:6502,127.0.0.1:6503,127.0.0.1:6501,127.0.0.1:6502", + "vartype": "string" + }, + { + "name": "wal_log_hints", + "value": "on", + "vartype": "bool" + }, + { + "name": "log_connections", + "value": "on", + "vartype": "bool" + }, + { + "name": "shared_buffers", + "value": "32768", + "vartype": "integer" + }, + { + "name": "port", + "value": "55432", + "vartype": "integer" + }, + { + "name": "max_connections", + "value": "100", + "vartype": "integer" + }, + { + "name": "max_wal_senders", + "value": "10", + "vartype": "integer" + }, + { + "name": "listen_addresses", + "value": "0.0.0.0", + "vartype": "string" + }, + { + "name": "wal_sender_timeout", + "value": "0", + "vartype": "integer" + }, + { + "name": "password_encryption", + "value": "md5", + "vartype": "enum" + }, + { + "name": "maintenance_work_mem", + "value": "65536", + "vartype": "integer" + }, + { + "name": "max_parallel_workers", + "value": "8", + "vartype": "integer" + }, + { + "name": "max_worker_processes", + "value": "8", + "vartype": "integer" + }, + { + "name": "neon.tenant_id", + "value": "b0554b632bd4d547a63b86c3630317e8", + "vartype": "string" + }, + { + "name": "max_replication_slots", + "value": "10", + "vartype": "integer" + }, + { + "name": "neon.timeline_id", + "value": "2414a61ffc94e428f14b5758fe308e13", + "vartype": "string" + }, + { + "name": "shared_preload_libraries", + "value": "neon", + "vartype": "string" + }, + { + "name": "synchronous_standby_names", + "value": "walproposer", + "vartype": "string" + }, + { + "name": "neon.pageserver_connstring", + "value": "host=127.0.0.1 port=6400", + "vartype": "string" + }, + { + "name": "test.escaping", + "value": "here's a backslash \\ and a quote ' and a double-quote \" hooray", + "vartype": "string" + } + ] + }, + "delta_operations": [ + { + "action": "delete_db", + "name": "zenith_test" + }, + { + "action": "rename_db", + "name": "DB", + "new_name": "DB2" + }, + { + "action": "delete_role", + "name": "zenith2" + }, + { + "action": "rename_role", + "name": "zenith new", + "new_name": "zenith \"new\"" + } + ], + "remote_extensions": { + "library_index": { + "postgis-3": "postgis", + "libpgrouting-3.4": "postgis", + "postgis_raster-3": "postgis", + "postgis_sfcgal-3": "postgis", + "postgis_topology-3": "postgis", + "address_standardizer-3": "postgis" + }, + "extension_data": { + "postgis": { + "archive_path": "5834329303/v15/extensions/postgis.tar.zst", + "control_data": { + "postgis.control": "# postgis extension\ncomment = ''PostGIS geometry and geography spatial types and functions''\ndefault_version = ''3.3.2''\nmodule_pathname = ''$libdir/postgis-3''\nrelocatable = false\ntrusted = true\n", + "pgrouting.control": "# pgRouting Extension\ncomment = ''pgRouting Extension''\ndefault_version = ''3.4.2''\nmodule_pathname = ''$libdir/libpgrouting-3.4''\nrelocatable = true\nrequires = ''plpgsql''\nrequires = ''postgis''\ntrusted = true\n", + "postgis_raster.control": "# postgis_raster extension\ncomment = ''PostGIS raster types and functions''\ndefault_version = ''3.3.2''\nmodule_pathname = ''$libdir/postgis_raster-3''\nrelocatable = false\nrequires = postgis\ntrusted = true\n", + "postgis_sfcgal.control": "# postgis topology extension\ncomment = ''PostGIS SFCGAL functions''\ndefault_version = ''3.3.2''\nrelocatable = true\nrequires = postgis\ntrusted = true\n", + "postgis_topology.control": "# postgis topology extension\ncomment = ''PostGIS topology spatial types and functions''\ndefault_version = ''3.3.2''\nrelocatable = false\nschema = topology\nrequires = postgis\ntrusted = true\n", + "address_standardizer.control": "# address_standardizer extension\ncomment = ''Used to parse an address into constituent elements. Generally used to support geocoding address normalization step.''\ndefault_version = ''3.3.2''\nrelocatable = true\ntrusted = true\n", + "postgis_tiger_geocoder.control": "# postgis tiger geocoder extension\ncomment = ''PostGIS tiger geocoder and reverse geocoder''\ndefault_version = ''3.3.2''\nrelocatable = false\nschema = tiger\nrequires = ''postgis,fuzzystrmatch''\nsuperuser= false\ntrusted = true\n", + "address_standardizer_data_us.control": "# address standardizer us dataset\ncomment = ''Address Standardizer US dataset example''\ndefault_version = ''3.3.2''\nrelocatable = true\ntrusted = true\n" + } + } + }, + "custom_extensions": [], + "public_extensions": ["postgis"] + }, + "pgbouncer_settings": { + "default_pool_size": "42", + "pool_mode": "session" + } +} diff --git a/compute_tools/tests/pg_helpers_tests.rs b/compute_tools/tests/pg_helpers_tests.rs index 2b865c75d0..fae59082c6 100644 --- a/compute_tools/tests/pg_helpers_tests.rs +++ b/compute_tools/tests/pg_helpers_tests.rs @@ -31,6 +31,7 @@ mod pg_helpers_tests { wal_level = logical hot_standby = on autoprewarm = off +offload_lfc_interval_seconds = 20 neon.safekeepers = '127.0.0.1:6502,127.0.0.1:6503,127.0.0.1:6501' wal_log_hints = on log_connections = on diff --git a/control_plane/Cargo.toml b/control_plane/Cargo.toml index 62c039047f..bbaa3f12b9 100644 --- a/control_plane/Cargo.toml +++ b/control_plane/Cargo.toml @@ -36,6 +36,7 @@ pageserver_api.workspace = true pageserver_client.workspace = true postgres_backend.workspace = true safekeeper_api.workspace = true +safekeeper_client.workspace = true postgres_connection.workspace = true storage_broker.workspace = true http-utils.workspace = true diff --git a/control_plane/src/bin/neon_local.rs b/control_plane/src/bin/neon_local.rs index ef6985d697..ef8a504cec 100644 --- a/control_plane/src/bin/neon_local.rs +++ b/control_plane/src/bin/neon_local.rs @@ -16,9 +16,9 @@ use std::time::Duration; use anyhow::{Context, Result, anyhow, bail}; use clap::Parser; use compute_api::requests::ComputeClaimsScope; -use compute_api::spec::ComputeMode; +use compute_api::spec::{ComputeMode, PageserverConnectionInfo, PageserverShardConnectionInfo}; use control_plane::broker::StorageBroker; -use control_plane::endpoint::ComputeControlPlane; +use control_plane::endpoint::{ComputeControlPlane, EndpointTerminateMode}; use control_plane::endpoint_storage::{ENDPOINT_STORAGE_DEFAULT_ADDR, EndpointStorage}; use control_plane::local_env; use control_plane::local_env::{ @@ -45,10 +45,10 @@ use pageserver_api::models::{ use pageserver_api::shard::{DEFAULT_STRIPE_SIZE, ShardCount, ShardStripeSize, TenantShardId}; use postgres_backend::AuthType; use postgres_connection::parse_host_port; -use safekeeper_api::membership::SafekeeperGeneration; +use safekeeper_api::membership::{SafekeeperGeneration, SafekeeperId}; use safekeeper_api::{ DEFAULT_HTTP_LISTEN_PORT as DEFAULT_SAFEKEEPER_HTTP_PORT, - DEFAULT_PG_LISTEN_PORT as DEFAULT_SAFEKEEPER_PG_PORT, + DEFAULT_PG_LISTEN_PORT as DEFAULT_SAFEKEEPER_PG_PORT, PgMajorVersion, PgVersionId, }; use storage_broker::DEFAULT_LISTEN_ADDR as DEFAULT_BROKER_ADDR; use tokio::task::JoinSet; @@ -64,7 +64,9 @@ const DEFAULT_PAGESERVER_ID: NodeId = NodeId(1); const DEFAULT_BRANCH_NAME: &str = "main"; project_git_version!(GIT_VERSION); -const DEFAULT_PG_VERSION: u32 = 17; +#[allow(dead_code)] +const DEFAULT_PG_VERSION: PgMajorVersion = PgMajorVersion::PG17; +const DEFAULT_PG_VERSION_NUM: &str = "17"; const DEFAULT_PAGESERVER_CONTROL_PLANE_API: &str = "http://127.0.0.1:1234/upcall/v1/"; @@ -167,9 +169,9 @@ struct TenantCreateCmdArgs { #[clap(short = 'c')] config: Vec, - #[arg(default_value_t = DEFAULT_PG_VERSION)] + #[arg(default_value = DEFAULT_PG_VERSION_NUM)] #[clap(long, help = "Postgres version to use for the initial timeline")] - pg_version: u32, + pg_version: PgMajorVersion, #[clap( long, @@ -290,9 +292,9 @@ struct TimelineCreateCmdArgs { #[clap(long, help = "Human-readable alias for the new timeline")] branch_name: String, - #[arg(default_value_t = DEFAULT_PG_VERSION)] + #[arg(default_value = DEFAULT_PG_VERSION_NUM)] #[clap(long, help = "Postgres version")] - pg_version: u32, + pg_version: PgMajorVersion, } #[derive(clap::Args)] @@ -322,9 +324,9 @@ struct TimelineImportCmdArgs { #[clap(long, help = "Lsn the basebackup ends at")] end_lsn: Option, - #[arg(default_value_t = DEFAULT_PG_VERSION)] + #[arg(default_value = DEFAULT_PG_VERSION_NUM)] #[clap(long, help = "Postgres version of the backup being imported")] - pg_version: u32, + pg_version: PgMajorVersion, } #[derive(clap::Subcommand)] @@ -601,9 +603,17 @@ struct EndpointCreateCmdArgs { )] config_only: bool, - #[arg(default_value_t = DEFAULT_PG_VERSION)] + #[arg(default_value = DEFAULT_PG_VERSION_NUM)] #[clap(long, help = "Postgres version")] - pg_version: u32, + pg_version: PgMajorVersion, + + /// Use gRPC to communicate with Pageservers, by generating grpc:// connstrings. + /// + /// Specified on creation such that it's retained across reconfiguration and restarts. + /// + /// NB: not yet supported by computes. + #[clap(long)] + grpc: bool, #[clap( long, @@ -664,6 +674,23 @@ struct EndpointStartCmdArgs { #[clap(short = 't', long, value_parser= humantime::parse_duration, help = "timeout until we fail the command")] #[arg(default_value = "90s")] start_timeout: Duration, + + #[clap( + long, + help = "Download LFC cache from endpoint storage on endpoint startup", + default_value = "false" + )] + autoprewarm: bool, + + #[clap(long, help = "Upload LFC cache to endpoint storage periodically")] + offload_lfc_interval_seconds: Option, + + #[clap( + long, + help = "Run in development mode, skipping VM-specific operations like process termination", + action = clap::ArgAction::SetTrue + )] + dev: bool, } #[derive(clap::Args)] @@ -696,10 +723,9 @@ struct EndpointStopCmdArgs { )] destroy: bool, - #[clap(long, help = "Postgres shutdown mode, passed to \"pg_ctl -m \"")] - #[arg(value_parser(["smart", "fast", "immediate"]))] - #[arg(default_value = "fast")] - mode: String, + #[clap(long, help = "Postgres shutdown mode")] + #[clap(default_value = "fast")] + mode: EndpointTerminateMode, } #[derive(clap::Args)] @@ -905,7 +931,7 @@ fn print_timeline( br_sym = "┗━"; } - print!("{} @{}: ", br_sym, ancestor_lsn); + print!("{br_sym} @{ancestor_lsn}: "); } // Finally print a timeline id and name with new line @@ -1255,6 +1281,45 @@ async fn handle_timeline(cmd: &TimelineCmd, env: &mut local_env::LocalEnv) -> Re pageserver .timeline_import(tenant_id, timeline_id, base, pg_wal, args.pg_version) .await?; + if env.storage_controller.timelines_onto_safekeepers { + println!("Creating timeline on safekeeper ..."); + let timeline_info = pageserver + .timeline_info( + TenantShardId::unsharded(tenant_id), + timeline_id, + pageserver_client::mgmt_api::ForceAwaitLogicalSize::No, + ) + .await?; + let default_sk = SafekeeperNode::from_env(env, env.safekeepers.first().unwrap()); + let default_host = default_sk + .conf + .listen_addr + .clone() + .unwrap_or_else(|| "localhost".to_string()); + let mconf = safekeeper_api::membership::Configuration { + generation: SafekeeperGeneration::new(1), + members: safekeeper_api::membership::MemberSet { + m: vec![SafekeeperId { + host: default_host, + id: default_sk.conf.id, + pg_port: default_sk.conf.pg_port, + }], + }, + new_members: None, + }; + let pg_version = PgVersionId::from(args.pg_version); + let req = safekeeper_api::models::TimelineCreateRequest { + tenant_id, + timeline_id, + mconf, + pg_version, + system_id: None, + wal_seg_size: None, + start_lsn: timeline_info.last_record_lsn, + commit_lsn: None, + }; + default_sk.create_timeline(&req).await?; + } env.register_branch_mapping(branch_name.to_string(), tenant_id, timeline_id)?; println!("Done"); } @@ -1412,6 +1477,7 @@ async fn handle_endpoint(subcmd: &EndpointCmd, env: &local_env::LocalEnv) -> Res args.internal_http_port, args.pg_version, mode, + args.grpc, !args.update_catalog, false, )?; @@ -1450,22 +1516,35 @@ async fn handle_endpoint(subcmd: &EndpointCmd, env: &local_env::LocalEnv) -> Res )?; } - let (pageservers, stripe_size) = if let Some(pageserver_id) = pageserver_id { - let conf = env.get_pageserver_conf(pageserver_id).unwrap(); - let parsed = parse_host_port(&conf.listen_pg_addr).expect("Bad config"); - ( - vec![(parsed.0, parsed.1.unwrap_or(5432))], - // If caller is telling us what pageserver to use, this is not a tenant which is - // full managed by storage controller, therefore not sharded. - DEFAULT_STRIPE_SIZE, - ) + let (shards, stripe_size) = if let Some(ps_id) = pageserver_id { + let conf = env.get_pageserver_conf(ps_id).unwrap(); + let libpq_url = Some({ + let (host, port) = parse_host_port(&conf.listen_pg_addr)?; + let port = port.unwrap_or(5432); + format!("postgres://no_user@{host}:{port}") + }); + let grpc_url = if let Some(grpc_addr) = &conf.listen_grpc_addr { + let (host, port) = parse_host_port(grpc_addr)?; + let port = port.unwrap_or(DEFAULT_PAGESERVER_GRPC_PORT); + Some(format!("grpc://no_user@{host}:{port}")) + } else { + None + }; + let pageserver = PageserverShardConnectionInfo { + libpq_url, + grpc_url, + }; + + // If caller is telling us what pageserver to use, this is not a tenant which is + // fully managed by storage controller, therefore not sharded. + (vec![(0, pageserver)], DEFAULT_STRIPE_SIZE) } else { // Look up the currently attached location of the tenant, and its striping metadata, // to pass these on to postgres. let storage_controller = StorageController::from_env(env); let locate_result = storage_controller.tenant_locate(endpoint.tenant_id).await?; - let pageservers = futures::future::try_join_all( - locate_result.shards.into_iter().map(|shard| async move { + let shards = futures::future::try_join_all(locate_result.shards.into_iter().map( + |shard| async move { if let ComputeMode::Static(lsn) = endpoint.mode { // Initialize LSN leases for static computes. let conf = env.get_pageserver_conf(shard.node_id).unwrap(); @@ -1477,19 +1556,34 @@ async fn handle_endpoint(subcmd: &EndpointCmd, env: &local_env::LocalEnv) -> Res .await?; } - anyhow::Ok(( - Host::parse(&shard.listen_pg_addr) - .expect("Storage controller reported bad hostname"), - shard.listen_pg_port, - )) - }), - ) + let libpq_host = Host::parse(&shard.listen_pg_addr)?; + let libpq_port = shard.listen_pg_port; + let libpq_url = + Some(format!("postgres://no_user@{libpq_host}:{libpq_port}")); + + let grpc_url = if let Some(grpc_host) = shard.listen_grpc_addr { + let grpc_port = shard.listen_grpc_port.expect("no gRPC port"); + Some(format!("grpc://no_user@{grpc_host}:{grpc_port}")) + } else { + None + }; + let pageserver = PageserverShardConnectionInfo { + libpq_url, + grpc_url, + }; + anyhow::Ok((shard.shard_id.shard_number.0 as u32, pageserver)) + }, + )) .await?; let stripe_size = locate_result.shard_params.stripe_size; - (pageservers, stripe_size) + (shards, stripe_size) + }; + assert!(!shards.is_empty()); + let pageserver_conninfo = PageserverConnectionInfo { + shards: shards.into_iter().collect(), + prefer_grpc: endpoint.grpc, }; - assert!(!pageservers.is_empty()); let ps_conf = env.get_pageserver_conf(DEFAULT_PAGESERVER_ID)?; let auth_token = if matches!(ps_conf.pg_auth_type, AuthType::NeonJWT) { @@ -1513,21 +1607,24 @@ async fn handle_endpoint(subcmd: &EndpointCmd, env: &local_env::LocalEnv) -> Res let endpoint_storage_token = env.generate_auth_token(&claims)?; let endpoint_storage_addr = env.endpoint_storage.listen_addr.to_string(); + let args = control_plane::endpoint::EndpointStartArgs { + auth_token, + endpoint_storage_token, + endpoint_storage_addr, + safekeepers_generation, + safekeepers, + pageserver_conninfo, + remote_ext_base_url: remote_ext_base_url.clone(), + shard_stripe_size: stripe_size.0 as usize, + create_test_user: args.create_test_user, + start_timeout: args.start_timeout, + autoprewarm: args.autoprewarm, + offload_lfc_interval_seconds: args.offload_lfc_interval_seconds, + dev: args.dev, + }; + println!("Starting existing endpoint {endpoint_id}..."); - endpoint - .start( - &auth_token, - endpoint_storage_token, - endpoint_storage_addr, - safekeepers_generation, - safekeepers, - pageservers, - remote_ext_base_url.as_ref(), - stripe_size.0 as usize, - args.create_test_user, - args.start_timeout, - ) - .await?; + endpoint.start(args).await?; } EndpointCmd::Reconfigure(args) => { let endpoint_id = &args.endpoint_id; @@ -1535,12 +1632,27 @@ async fn handle_endpoint(subcmd: &EndpointCmd, env: &local_env::LocalEnv) -> Res .endpoints .get(endpoint_id.as_str()) .with_context(|| format!("postgres endpoint {endpoint_id} is not found"))?; - let pageservers = if let Some(ps_id) = args.endpoint_pageserver_id { - let pageserver = PageServerNode::from_env(env, env.get_pageserver_conf(ps_id)?); - vec![( - pageserver.pg_connection_config.host().clone(), - pageserver.pg_connection_config.port(), - )] + let shards = if let Some(ps_id) = args.endpoint_pageserver_id { + let conf = env.get_pageserver_conf(ps_id)?; + let libpq_url = Some({ + let (host, port) = parse_host_port(&conf.listen_pg_addr)?; + let port = port.unwrap_or(5432); + format!("postgres://no_user@{host}:{port}") + }); + let grpc_url = if let Some(grpc_addr) = &conf.listen_grpc_addr { + let (host, port) = parse_host_port(grpc_addr)?; + let port = port.unwrap_or(DEFAULT_PAGESERVER_GRPC_PORT); + Some(format!("grpc://no_user@{host}:{port}")) + } else { + None + }; + let pageserver = PageserverShardConnectionInfo { + libpq_url, + grpc_url, + }; + // If caller is telling us what pageserver to use, this is not a tenant which is + // fully managed by storage controller, therefore not sharded. + vec![(0, pageserver)] } else { let storage_controller = StorageController::from_env(env); storage_controller @@ -1549,18 +1661,38 @@ async fn handle_endpoint(subcmd: &EndpointCmd, env: &local_env::LocalEnv) -> Res .shards .into_iter() .map(|shard| { + // Use gRPC if requested. + let libpq_host = Host::parse(&shard.listen_pg_addr).expect("bad hostname"); + let libpq_port = shard.listen_pg_port; + let libpq_url = + Some(format!("postgres://no_user@{libpq_host}:{libpq_port}")); + + let grpc_url = if let Some(grpc_host) = shard.listen_grpc_addr { + let grpc_port = shard.listen_grpc_port.expect("no gRPC port"); + Some(format!("grpc://no_user@{grpc_host}:{grpc_port}")) + } else { + None + }; ( - Host::parse(&shard.listen_pg_addr) - .expect("Storage controller reported malformed host"), - shard.listen_pg_port, + shard.shard_id.shard_number.0 as u32, + PageserverShardConnectionInfo { + libpq_url, + grpc_url, + }, ) }) .collect::>() }; + let pageserver_conninfo = PageserverConnectionInfo { + shards: shards.into_iter().collect(), + prefer_grpc: endpoint.grpc, + }; // If --safekeepers argument is given, use only the listed // safekeeper nodes; otherwise all from the env. let safekeepers = parse_safekeepers(&args.safekeepers)?; - endpoint.reconfigure(pageservers, None, safekeepers).await?; + endpoint + .reconfigure(Some(pageserver_conninfo), None, safekeepers, None) + .await?; } EndpointCmd::Stop(args) => { let endpoint_id = &args.endpoint_id; @@ -1568,7 +1700,10 @@ async fn handle_endpoint(subcmd: &EndpointCmd, env: &local_env::LocalEnv) -> Res .endpoints .get(endpoint_id) .with_context(|| format!("postgres endpoint {endpoint_id} is not found"))?; - endpoint.stop(&args.mode, args.destroy)?; + match endpoint.stop(args.mode, args.destroy).await?.lsn { + Some(lsn) => println!("{lsn}"), + None => println!("null"), + } } EndpointCmd::GenerateJwt(args) => { let endpoint = { @@ -1650,7 +1785,7 @@ async fn handle_pageserver(subcmd: &PageserverCmd, env: &local_env::LocalEnv) -> StopMode::Immediate => true, }; if let Err(e) = get_pageserver(env, args.pageserver_id)?.stop(immediate) { - eprintln!("pageserver stop failed: {}", e); + eprintln!("pageserver stop failed: {e}"); exit(1); } } @@ -1659,7 +1794,7 @@ async fn handle_pageserver(subcmd: &PageserverCmd, env: &local_env::LocalEnv) -> let pageserver = get_pageserver(env, args.pageserver_id)?; //TODO what shutdown strategy should we use here? if let Err(e) = pageserver.stop(false) { - eprintln!("pageserver stop failed: {}", e); + eprintln!("pageserver stop failed: {e}"); exit(1); } @@ -1676,7 +1811,7 @@ async fn handle_pageserver(subcmd: &PageserverCmd, env: &local_env::LocalEnv) -> { Ok(_) => println!("Page server is up and running"), Err(err) => { - eprintln!("Page server is not available: {}", err); + eprintln!("Page server is not available: {err}"); exit(1); } } @@ -1713,7 +1848,7 @@ async fn handle_storage_controller( }, }; if let Err(e) = svc.stop(stop_args).await { - eprintln!("stop failed: {}", e); + eprintln!("stop failed: {e}"); exit(1); } } @@ -1735,7 +1870,7 @@ async fn handle_safekeeper(subcmd: &SafekeeperCmd, env: &local_env::LocalEnv) -> let safekeeper = get_safekeeper(env, args.id)?; if let Err(e) = safekeeper.start(&args.extra_opt, &args.start_timeout).await { - eprintln!("safekeeper start failed: {}", e); + eprintln!("safekeeper start failed: {e}"); exit(1); } } @@ -1747,7 +1882,7 @@ async fn handle_safekeeper(subcmd: &SafekeeperCmd, env: &local_env::LocalEnv) -> StopMode::Immediate => true, }; if let Err(e) = safekeeper.stop(immediate) { - eprintln!("safekeeper stop failed: {}", e); + eprintln!("safekeeper stop failed: {e}"); exit(1); } } @@ -1760,12 +1895,12 @@ async fn handle_safekeeper(subcmd: &SafekeeperCmd, env: &local_env::LocalEnv) -> }; if let Err(e) = safekeeper.stop(immediate) { - eprintln!("safekeeper stop failed: {}", e); + eprintln!("safekeeper stop failed: {e}"); exit(1); } if let Err(e) = safekeeper.start(&args.extra_opt, &args.start_timeout).await { - eprintln!("safekeeper start failed: {}", e); + eprintln!("safekeeper start failed: {e}"); exit(1); } } @@ -2000,11 +2135,16 @@ async fn handle_stop_all(args: &StopCmdArgs, env: &local_env::LocalEnv) -> Resul } async fn try_stop_all(env: &local_env::LocalEnv, immediate: bool) { + let mode = if immediate { + EndpointTerminateMode::Immediate + } else { + EndpointTerminateMode::Fast + }; // Stop all endpoints match ComputeControlPlane::load(env.clone()) { Ok(cplane) => { for (_k, node) in cplane.endpoints { - if let Err(e) = node.stop(if immediate { "immediate" } else { "fast" }, false) { + if let Err(e) = node.stop(mode, false).await { eprintln!("postgres stop failed: {e:#}"); } } @@ -2016,7 +2156,7 @@ async fn try_stop_all(env: &local_env::LocalEnv, immediate: bool) { let storage = EndpointStorage::from_env(env); if let Err(e) = storage.stop(immediate) { - eprintln!("endpoint_storage stop failed: {:#}", e); + eprintln!("endpoint_storage stop failed: {e:#}"); } for ps_conf in &env.pageservers { diff --git a/control_plane/src/endpoint.rs b/control_plane/src/endpoint.rs index 774a0053f8..29d369d4d8 100644 --- a/control_plane/src/endpoint.rs +++ b/control_plane/src/endpoint.rs @@ -37,6 +37,7 @@ //! ``` //! use std::collections::BTreeMap; +use std::fmt::Display; use std::net::{IpAddr, Ipv4Addr, SocketAddr, TcpStream}; use std::path::PathBuf; use std::process::Command; @@ -45,16 +46,23 @@ use std::sync::Arc; use std::time::{Duration, Instant}; use anyhow::{Context, Result, anyhow, bail}; +use base64::Engine; +use base64::prelude::BASE64_URL_SAFE_NO_PAD; use compute_api::requests::{ COMPUTE_AUDIENCE, ComputeClaims, ComputeClaimsScope, ConfigurationRequest, }; use compute_api::responses::{ - ComputeConfig, ComputeCtlConfig, ComputeStatus, ComputeStatusResponse, TlsConfig, + ComputeConfig, ComputeCtlConfig, ComputeStatus, ComputeStatusResponse, TerminateResponse, + TlsConfig, }; use compute_api::spec::{ Cluster, ComputeAudit, ComputeFeature, ComputeMode, ComputeSpec, Database, PgIdent, RemoteExtSpec, Role, }; + +// re-export these, because they're used in the reconfigure() function +pub use compute_api::spec::{PageserverConnectionInfo, PageserverShardConnectionInfo}; + use jsonwebtoken::jwk::{ AlgorithmParameters, CommonParameters, EllipticCurve, Jwk, JwkSet, KeyAlgorithm, KeyOperations, OctetKeyPairParameters, OctetKeyPairType, PublicKeyUse, @@ -63,18 +71,17 @@ use nix::sys::signal::{Signal, kill}; use pageserver_api::shard::ShardStripeSize; use pem::Pem; use reqwest::header::CONTENT_TYPE; +use safekeeper_api::PgMajorVersion; use safekeeper_api::membership::SafekeeperGeneration; use serde::{Deserialize, Serialize}; use sha2::{Digest, Sha256}; use spki::der::Decode; use spki::{SubjectPublicKeyInfo, SubjectPublicKeyInfoRef}; use tracing::debug; -use url::Host; use utils::id::{NodeId, TenantId, TimelineId}; use crate::local_env::LocalEnv; use crate::postgresql_conf::PostgresConf; -use crate::storage_controller::StorageController; // contents of a endpoint.json file #[derive(Serialize, Deserialize, PartialEq, Eq, Clone, Debug)] @@ -86,7 +93,8 @@ pub struct EndpointConf { pg_port: u16, external_http_port: u16, internal_http_port: u16, - pg_version: u32, + pg_version: PgMajorVersion, + grpc: bool, skip_pg_catalog_updates: bool, reconfigure_concurrency: usize, drop_subscriptions_before_start: bool, @@ -164,7 +172,7 @@ impl ComputeControlPlane { public_key_use: Some(PublicKeyUse::Signature), key_operations: Some(vec![KeyOperations::Verify]), key_algorithm: Some(KeyAlgorithm::EdDSA), - key_id: Some(base64::encode_config(key_hash, base64::URL_SAFE_NO_PAD)), + key_id: Some(BASE64_URL_SAFE_NO_PAD.encode(key_hash)), x509_url: None::, x509_chain: None::>, x509_sha1_fingerprint: None::, @@ -173,7 +181,7 @@ impl ComputeControlPlane { algorithm: AlgorithmParameters::OctetKeyPair(OctetKeyPairParameters { key_type: OctetKeyPairType::OctetKeyPair, curve: EllipticCurve::Ed25519, - x: base64::encode_config(public_key, base64::URL_SAFE_NO_PAD), + x: BASE64_URL_SAFE_NO_PAD.encode(public_key), }), }], }) @@ -188,8 +196,9 @@ impl ComputeControlPlane { pg_port: Option, external_http_port: Option, internal_http_port: Option, - pg_version: u32, + pg_version: PgMajorVersion, mode: ComputeMode, + grpc: bool, skip_pg_catalog_updates: bool, drop_subscriptions_before_start: bool, ) -> Result> { @@ -224,6 +233,7 @@ impl ComputeControlPlane { // we also skip catalog updates in the cloud. skip_pg_catalog_updates, drop_subscriptions_before_start, + grpc, reconfigure_concurrency: 1, features: vec![], cluster: None, @@ -242,6 +252,7 @@ impl ComputeControlPlane { internal_http_port, pg_port, pg_version, + grpc, skip_pg_catalog_updates, drop_subscriptions_before_start, reconfigure_concurrency: 1, @@ -296,6 +307,8 @@ pub struct Endpoint { pub tenant_id: TenantId, pub timeline_id: TimelineId, pub mode: ComputeMode, + /// If true, the endpoint should use gRPC to communicate with Pageservers. + pub grpc: bool, // port and address of the Postgres server and `compute_ctl`'s HTTP APIs pub pg_address: SocketAddr, @@ -303,7 +316,7 @@ pub struct Endpoint { pub internal_http_address: SocketAddr, // postgres major version in the format: 14, 15, etc. - pg_version: u32, + pg_version: PgMajorVersion, // These are not part of the endpoint as such, but the environment // the endpoint runs in. @@ -331,18 +344,54 @@ pub enum EndpointStatus { RunningNoPidfile, } -impl std::fmt::Display for EndpointStatus { +impl Display for EndpointStatus { fn fmt(&self, writer: &mut std::fmt::Formatter) -> std::fmt::Result { - let s = match self { + writer.write_str(match self { Self::Running => "running", Self::Stopped => "stopped", Self::Crashed => "crashed", Self::RunningNoPidfile => "running, no pidfile", - }; - write!(writer, "{}", s) + }) } } +#[derive(Default, Clone, Copy, clap::ValueEnum)] +pub enum EndpointTerminateMode { + #[default] + /// Use pg_ctl stop -m fast + Fast, + /// Use pg_ctl stop -m immediate + Immediate, + /// Use /terminate?mode=immediate + ImmediateTerminate, +} + +impl std::fmt::Display for EndpointTerminateMode { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.write_str(match &self { + EndpointTerminateMode::Fast => "fast", + EndpointTerminateMode::Immediate => "immediate", + EndpointTerminateMode::ImmediateTerminate => "immediate-terminate", + }) + } +} + +pub struct EndpointStartArgs { + pub auth_token: Option, + pub endpoint_storage_token: String, + pub endpoint_storage_addr: String, + pub safekeepers_generation: Option, + pub safekeepers: Vec, + pub pageserver_conninfo: PageserverConnectionInfo, + pub remote_ext_base_url: Option, + pub shard_stripe_size: usize, + pub create_test_user: bool, + pub start_timeout: Duration, + pub autoprewarm: bool, + pub offload_lfc_interval_seconds: Option, + pub dev: bool, +} + impl Endpoint { fn from_dir_entry(entry: std::fs::DirEntry, env: &LocalEnv) -> Result { if !entry.file_type()?.is_dir() { @@ -378,6 +427,7 @@ impl Endpoint { mode: conf.mode, tenant_id: conf.tenant_id, pg_version: conf.pg_version, + grpc: conf.grpc, skip_pg_catalog_updates: conf.skip_pg_catalog_updates, reconfigure_concurrency: conf.reconfigure_concurrency, drop_subscriptions_before_start: conf.drop_subscriptions_before_start, @@ -504,7 +554,7 @@ impl Endpoint { conf.append("hot_standby", "on"); // prefetching of blocks referenced in WAL doesn't make sense for us // Neon hot standby ignores pages that are not in the shared_buffers - if self.pg_version >= 15 { + if self.pg_version >= PgMajorVersion::PG15 { conf.append("recovery_prefetch", "off"); } } @@ -606,14 +656,6 @@ impl Endpoint { } } - fn build_pageserver_connstr(pageservers: &[(Host, u16)]) -> String { - pageservers - .iter() - .map(|(host, port)| format!("postgresql://no_user@{host}:{port}")) - .collect::>() - .join(",") - } - /// Map safekeepers ids to the actual connection strings. fn build_safekeepers_connstrs(&self, sk_ids: Vec) -> Result> { let mut safekeeper_connstrings = Vec::new(); @@ -646,20 +688,7 @@ impl Endpoint { }) } - #[allow(clippy::too_many_arguments)] - pub async fn start( - &self, - auth_token: &Option, - endpoint_storage_token: String, - endpoint_storage_addr: String, - safekeepers_generation: Option, - safekeepers: Vec, - pageservers: Vec<(Host, u16)>, - remote_ext_base_url: Option<&String>, - shard_stripe_size: usize, - create_test_user: bool, - start_timeout: Duration, - ) -> Result<()> { + pub async fn start(&self, args: EndpointStartArgs) -> Result<()> { if self.status() == EndpointStatus::Running { anyhow::bail!("The endpoint is already running"); } @@ -672,10 +701,7 @@ impl Endpoint { std::fs::remove_dir_all(self.pgdata())?; } - let pageserver_connstring = Self::build_pageserver_connstr(&pageservers); - assert!(!pageserver_connstring.is_empty()); - - let safekeeper_connstrings = self.build_safekeepers_connstrs(safekeepers)?; + let safekeeper_connstrings = self.build_safekeepers_connstrs(args.safekeepers)?; // check for file remote_extensions_spec.json // if it is present, read it and pass to compute_ctl @@ -703,7 +729,7 @@ impl Endpoint { cluster_id: None, // project ID: not used name: None, // project name: not used state: None, - roles: if create_test_user { + roles: if args.create_test_user { vec![Role { name: PgIdent::from_str("test").unwrap(), encrypted_password: None, @@ -712,7 +738,7 @@ impl Endpoint { } else { Vec::new() }, - databases: if create_test_user { + databases: if args.create_test_user { vec![Database { name: PgIdent::from_str("neondb").unwrap(), owner: PgIdent::from_str("test").unwrap(), @@ -733,21 +759,23 @@ impl Endpoint { branch_id: None, endpoint_id: Some(self.endpoint_id.clone()), mode: self.mode, - pageserver_connstring: Some(pageserver_connstring), - safekeepers_generation: safekeepers_generation.map(|g| g.into_inner()), + pageserver_connection_info: Some(args.pageserver_conninfo), + safekeepers_generation: args.safekeepers_generation.map(|g| g.into_inner()), safekeeper_connstrings, - storage_auth_token: auth_token.clone(), + storage_auth_token: args.auth_token.clone(), remote_extensions, pgbouncer_settings: None, - shard_stripe_size: Some(shard_stripe_size), + shard_stripe_size: Some(args.shard_stripe_size), local_proxy_config: None, reconfigure_concurrency: self.reconfigure_concurrency, drop_subscriptions_before_start: self.drop_subscriptions_before_start, audit_log_level: ComputeAudit::Disabled, logs_export_host: None::, - endpoint_storage_addr: Some(endpoint_storage_addr), - endpoint_storage_token: Some(endpoint_storage_token), - autoprewarm: false, + endpoint_storage_addr: Some(args.endpoint_storage_addr), + endpoint_storage_token: Some(args.endpoint_storage_token), + autoprewarm: args.autoprewarm, + offload_lfc_interval_seconds: args.offload_lfc_interval_seconds, + suspend_timeout_seconds: -1, // Only used in neon_local. }; // this strange code is needed to support respec() in tests @@ -758,7 +786,7 @@ impl Endpoint { debug!("spec.cluster {:?}", spec.cluster); // fill missing fields again - if create_test_user { + if args.create_test_user { spec.cluster.roles.push(Role { name: PgIdent::from_str("test").unwrap(), encrypted_password: None, @@ -792,10 +820,10 @@ impl Endpoint { // Launch compute_ctl let conn_str = self.connstr("cloud_admin", "postgres"); - println!("Starting postgres node at '{}'", conn_str); - if create_test_user { + println!("Starting postgres node at '{conn_str}'"); + if args.create_test_user { let conn_str = self.connstr("test", "neondb"); - println!("Also at '{}'", conn_str); + println!("Also at '{conn_str}'"); } let mut cmd = Command::new(self.env.neon_distrib_dir.join("compute_ctl")); cmd.args([ @@ -825,8 +853,12 @@ impl Endpoint { .stderr(logfile.try_clone()?) .stdout(logfile); - if let Some(remote_ext_base_url) = remote_ext_base_url { - cmd.args(["--remote-ext-base-url", remote_ext_base_url]); + if let Some(remote_ext_base_url) = args.remote_ext_base_url { + cmd.args(["--remote-ext-base-url", &remote_ext_base_url]); + } + + if args.dev { + cmd.arg("--dev"); } let child = cmd.spawn()?; @@ -857,10 +889,11 @@ impl Endpoint { Ok(state) => { match state.status { ComputeStatus::Init => { - if Instant::now().duration_since(start_at) > start_timeout { + let timeout = args.start_timeout; + if Instant::now().duration_since(start_at) > timeout { bail!( "compute startup timed out {:?}; still in Init state", - start_timeout + timeout ); } // keep retrying @@ -881,17 +914,17 @@ impl Endpoint { ComputeStatus::Empty | ComputeStatus::ConfigurationPending | ComputeStatus::Configuration - | ComputeStatus::TerminationPending + | ComputeStatus::TerminationPending { .. } | ComputeStatus::Terminated => { bail!("unexpected compute status: {:?}", state.status) } } } Err(e) => { - if Instant::now().duration_since(start_at) > start_timeout { + if Instant::now().duration_since(start_at) > args.start_timeout { return Err(e).context(format!( "timed out {:?} waiting to connect to compute_ctl HTTP", - start_timeout, + args.start_timeout )); } } @@ -930,7 +963,7 @@ impl Endpoint { // reqwest does not export its error construction utility functions, so let's craft the message ourselves let url = response.url().to_owned(); let msg = match response.text().await { - Ok(err_body) => format!("Error: {}", err_body), + Ok(err_body) => format!("Error: {err_body}"), Err(_) => format!("Http error ({}) at {}.", status.as_u16(), url), }; Err(anyhow::anyhow!(msg)) @@ -939,9 +972,10 @@ impl Endpoint { pub async fn reconfigure( &self, - mut pageservers: Vec<(Host, u16)>, + pageserver_conninfo: Option, stripe_size: Option, safekeepers: Option>, + safekeeper_generation: Option, ) -> Result<()> { let (mut spec, compute_ctl_config) = { let config_path = self.endpoint_path().join("config.json"); @@ -954,26 +988,15 @@ impl Endpoint { let postgresql_conf = self.read_postgresql_conf()?; spec.cluster.postgresql_conf = Some(postgresql_conf); - // If we weren't given explicit pageservers, query the storage controller - if pageservers.is_empty() { - let storage_controller = StorageController::from_env(&self.env); - let locate_result = storage_controller.tenant_locate(self.tenant_id).await?; - pageservers = locate_result - .shards - .into_iter() - .map(|shard| { - ( - Host::parse(&shard.listen_pg_addr) - .expect("Storage controller reported bad hostname"), - shard.listen_pg_port, - ) - }) - .collect::>(); + if let Some(pageserver_conninfo) = pageserver_conninfo { + // If pageservers are provided, we need to ensure that they are not empty. + // This is a requirement for the compute_ctl configuration. + anyhow::ensure!( + !pageserver_conninfo.shards.is_empty(), + "no pageservers provided" + ); + spec.pageserver_connection_info = Some(pageserver_conninfo); } - - let pageserver_connstr = Self::build_pageserver_connstr(&pageservers); - assert!(!pageserver_connstr.is_empty()); - spec.pageserver_connstring = Some(pageserver_connstr); if stripe_size.is_some() { spec.shard_stripe_size = stripe_size.map(|s| s.0 as usize); } @@ -982,6 +1005,9 @@ impl Endpoint { if let Some(safekeepers) = safekeepers { let safekeeper_connstrings = self.build_safekeepers_connstrs(safekeepers)?; spec.safekeeper_connstrings = safekeeper_connstrings; + if let Some(g) = safekeeper_generation { + spec.safekeepers_generation = Some(g.into_inner()); + } } let client = reqwest::Client::builder() @@ -1012,15 +1038,52 @@ impl Endpoint { } else { let url = response.url().to_owned(); let msg = match response.text().await { - Ok(err_body) => format!("Error: {}", err_body), + Ok(err_body) => format!("Error: {err_body}"), Err(_) => format!("Http error ({}) at {}.", status.as_u16(), url), }; Err(anyhow::anyhow!(msg)) } } - pub fn stop(&self, mode: &str, destroy: bool) -> Result<()> { - self.pg_ctl(&["-m", mode, "stop"], &None)?; + pub async fn reconfigure_pageservers( + &self, + pageservers: PageserverConnectionInfo, + stripe_size: Option, + ) -> Result<()> { + self.reconfigure(Some(pageservers), stripe_size, None, None) + .await + } + + pub async fn reconfigure_safekeepers( + &self, + safekeepers: Vec, + generation: SafekeeperGeneration, + ) -> Result<()> { + self.reconfigure(None, None, Some(safekeepers), Some(generation)) + .await + } + + pub async fn stop( + &self, + mode: EndpointTerminateMode, + destroy: bool, + ) -> Result { + // pg_ctl stop is fast but doesn't allow us to collect LSN. /terminate is + // slow, and test runs time out. Solution: special mode "immediate-terminate" + // which uses /terminate + let response = if let EndpointTerminateMode::ImmediateTerminate = mode { + let ip = self.external_http_address.ip(); + let port = self.external_http_address.port(); + let url = format!("http://{ip}:{port}/terminate?mode=immediate"); + let token = self.generate_jwt(Some(ComputeClaimsScope::Admin))?; + let request = reqwest::Client::new().post(url).bearer_auth(token); + let response = request.send().await.context("/terminate")?; + let text = response.text().await.context("/terminate result")?; + serde_json::from_str(&text).with_context(|| format!("deserializing {text}"))? + } else { + self.pg_ctl(&["-m", &mode.to_string(), "stop"], &None)?; + TerminateResponse { lsn: None } + }; // Also wait for the compute_ctl process to die. It might have some // cleanup work to do after postgres stops, like syncing safekeepers, @@ -1030,7 +1093,7 @@ impl Endpoint { // waiting. Sometimes we do *not* want this cleanup: tests intentionally // do stop when majority of safekeepers is down, so sync-safekeepers // would hang otherwise. This could be a separate flag though. - let send_sigterm = destroy || mode == "immediate"; + let send_sigterm = destroy || !matches!(mode, EndpointTerminateMode::Fast); self.wait_for_compute_ctl_to_exit(send_sigterm)?; if destroy { println!( @@ -1039,7 +1102,7 @@ impl Endpoint { ); std::fs::remove_dir_all(self.endpoint_path())?; } - Ok(()) + Ok(response) } pub fn connstr(&self, user: &str, db_name: &str) -> String { diff --git a/control_plane/src/local_env.rs b/control_plane/src/local_env.rs index 47b77f0720..d0611113e8 100644 --- a/control_plane/src/local_env.rs +++ b/control_plane/src/local_env.rs @@ -12,9 +12,11 @@ use std::{env, fs}; use anyhow::{Context, bail}; use clap::ValueEnum; +use pageserver_api::config::PostHogConfig; use pem::Pem; use postgres_backend::AuthType; use reqwest::{Certificate, Url}; +use safekeeper_api::PgMajorVersion; use serde::{Deserialize, Serialize}; use utils::auth::encode_from_key_file; use utils::id::{NodeId, TenantId, TenantTimelineId, TimelineId}; @@ -209,6 +211,12 @@ pub struct NeonStorageControllerConf { pub use_https_safekeeper_api: bool, pub use_local_compute_notifications: bool, + + pub timeline_safekeeper_count: Option, + + pub posthog_config: Option, + + pub kick_secondary_downloads: Option, } impl NeonStorageControllerConf { @@ -236,9 +244,12 @@ impl Default for NeonStorageControllerConf { heartbeat_interval: Self::DEFAULT_HEARTBEAT_INTERVAL, long_reconcile_threshold: None, use_https_pageserver_api: false, - timelines_onto_safekeepers: false, + timelines_onto_safekeepers: true, use_https_safekeeper_api: false, use_local_compute_notifications: true, + timeline_safekeeper_count: None, + posthog_config: None, + kick_secondary_downloads: None, } } } @@ -254,7 +265,7 @@ impl Default for EndpointStorageConf { impl NeonBroker { pub fn client_url(&self) -> Url { let url = if let Some(addr) = self.listen_https_addr { - format!("https://{}", addr) + format!("https://{addr}") } else { format!( "http://{}", @@ -418,25 +429,21 @@ impl LocalEnv { self.pg_distrib_dir.clone() } - pub fn pg_distrib_dir(&self, pg_version: u32) -> anyhow::Result { + pub fn pg_distrib_dir(&self, pg_version: PgMajorVersion) -> anyhow::Result { let path = self.pg_distrib_dir.clone(); - #[allow(clippy::manual_range_patterns)] - match pg_version { - 14 | 15 | 16 | 17 => Ok(path.join(format!("v{pg_version}"))), - _ => bail!("Unsupported postgres version: {}", pg_version), - } + Ok(path.join(pg_version.v_str())) } - pub fn pg_dir(&self, pg_version: u32, dir_name: &str) -> anyhow::Result { + pub fn pg_dir(&self, pg_version: PgMajorVersion, dir_name: &str) -> anyhow::Result { Ok(self.pg_distrib_dir(pg_version)?.join(dir_name)) } - pub fn pg_bin_dir(&self, pg_version: u32) -> anyhow::Result { + pub fn pg_bin_dir(&self, pg_version: PgMajorVersion) -> anyhow::Result { self.pg_dir(pg_version, "bin") } - pub fn pg_lib_dir(&self, pg_version: u32) -> anyhow::Result { + pub fn pg_lib_dir(&self, pg_version: PgMajorVersion) -> anyhow::Result { self.pg_dir(pg_version, "lib") } @@ -727,7 +734,7 @@ impl LocalEnv { let config_toml_path = dentry.path().join("pageserver.toml"); let config_toml: PageserverConfigTomlSubset = toml_edit::de::from_str( &std::fs::read_to_string(&config_toml_path) - .with_context(|| format!("read {:?}", config_toml_path))?, + .with_context(|| format!("read {config_toml_path:?}"))?, ) .context("parse pageserver.toml")?; let identity_toml_path = dentry.path().join("identity.toml"); @@ -737,7 +744,7 @@ impl LocalEnv { } let identity_toml: IdentityTomlSubset = toml_edit::de::from_str( &std::fs::read_to_string(&identity_toml_path) - .with_context(|| format!("read {:?}", identity_toml_path))?, + .with_context(|| format!("read {identity_toml_path:?}"))?, ) .context("parse identity.toml")?; let PageserverConfigTomlSubset { diff --git a/control_plane/src/pageserver.rs b/control_plane/src/pageserver.rs index 0cf7ca184d..3f66960edd 100644 --- a/control_plane/src/pageserver.rs +++ b/control_plane/src/pageserver.rs @@ -16,11 +16,13 @@ use std::time::Duration; use anyhow::{Context, bail}; use camino::Utf8PathBuf; +use pageserver_api::config::{DEFAULT_GRPC_LISTEN_PORT, DEFAULT_HTTP_LISTEN_PORT}; use pageserver_api::models::{self, TenantInfo, TimelineInfo}; use pageserver_api::shard::TenantShardId; use pageserver_client::mgmt_api; use postgres_backend::AuthType; use postgres_connection::{PgConnectionConfig, parse_host_port}; +use safekeeper_api::PgMajorVersion; use utils::auth::{Claims, Scope}; use utils::id::{NodeId, TenantId, TimelineId}; use utils::lsn::Lsn; @@ -120,7 +122,7 @@ impl PageServerNode { .env .generate_auth_token(&Claims::new(None, Scope::GenerationsApi)) .unwrap(); - overrides.push(format!("control_plane_api_token='{}'", jwt_token)); + overrides.push(format!("control_plane_api_token='{jwt_token}'")); } if !conf.other.contains_key("remote_storage") { @@ -252,9 +254,10 @@ impl PageServerNode { // the storage controller let metadata_path = datadir.join("metadata.json"); - let (_http_host, http_port) = + let http_host = "localhost".to_string(); + let (_, http_port) = parse_host_port(&self.conf.listen_http_addr).expect("Unable to parse listen_http_addr"); - let http_port = http_port.unwrap_or(9898); + let http_port = http_port.unwrap_or(DEFAULT_HTTP_LISTEN_PORT); let https_port = match self.conf.listen_https_addr.as_ref() { Some(https_addr) => { @@ -265,6 +268,13 @@ impl PageServerNode { None => None, }; + let (mut grpc_host, mut grpc_port) = (None, None); + if let Some(grpc_addr) = &self.conf.listen_grpc_addr { + let (_, port) = parse_host_port(grpc_addr).expect("Unable to parse listen_grpc_addr"); + grpc_host = Some("localhost".to_string()); + grpc_port = Some(port.unwrap_or(DEFAULT_GRPC_LISTEN_PORT)); + } + // Intentionally hand-craft JSON: this acts as an implicit format compat test // in case the pageserver-side structure is edited, and reflects the real life // situation: the metadata is written by some other script. @@ -273,7 +283,9 @@ impl PageServerNode { serde_json::to_vec(&pageserver_api::config::NodeMetadata { postgres_host: "localhost".to_string(), postgres_port: self.pg_connection_config.port(), - http_host: "localhost".to_string(), + grpc_host, + grpc_port, + http_host, http_port, https_port, other: HashMap::from([( @@ -596,7 +608,7 @@ impl PageServerNode { timeline_id: TimelineId, base: (Lsn, PathBuf), pg_wal: Option<(Lsn, PathBuf)>, - pg_version: u32, + pg_version: PgMajorVersion, ) -> anyhow::Result<()> { // Init base reader let (start_lsn, base_tarfile_path) = base; @@ -635,4 +647,16 @@ impl PageServerNode { Ok(()) } + pub async fn timeline_info( + &self, + tenant_shard_id: TenantShardId, + timeline_id: TimelineId, + force_await_logical_size: mgmt_api::ForceAwaitLogicalSize, + ) -> anyhow::Result { + let timeline_info = self + .http_client + .timeline_info(tenant_shard_id, timeline_id, force_await_logical_size) + .await?; + Ok(timeline_info) + } } diff --git a/control_plane/src/safekeeper.rs b/control_plane/src/safekeeper.rs index eec2c997e6..da9dafd8e9 100644 --- a/control_plane/src/safekeeper.rs +++ b/control_plane/src/safekeeper.rs @@ -6,7 +6,6 @@ //! .neon/safekeepers/ //! ``` use std::error::Error as _; -use std::future::Future; use std::io::Write; use std::path::PathBuf; use std::time::Duration; @@ -14,9 +13,9 @@ use std::{io, result}; use anyhow::Context; use camino::Utf8PathBuf; -use http_utils::error::HttpErrorBody; use postgres_connection::PgConnectionConfig; -use reqwest::{IntoUrl, Method}; +use safekeeper_api::models::TimelineCreateRequest; +use safekeeper_client::mgmt_api; use thiserror::Error; use utils::auth::{Claims, Scope}; use utils::id::NodeId; @@ -35,25 +34,14 @@ pub enum SafekeeperHttpError { type Result = result::Result; -pub(crate) trait ResponseErrorMessageExt: Sized { - fn error_from_body(self) -> impl Future> + Send; -} - -impl ResponseErrorMessageExt for reqwest::Response { - async fn error_from_body(self) -> Result { - let status = self.status(); - if !(status.is_client_error() || status.is_server_error()) { - return Ok(self); - } - - // reqwest does not export its error construction utility functions, so let's craft the message ourselves - let url = self.url().to_owned(); - Err(SafekeeperHttpError::Response( - match self.json::().await { - Ok(err_body) => format!("Error: {}", err_body.msg), - Err(_) => format!("Http error ({}) at {}.", status.as_u16(), url), - }, - )) +fn err_from_client_err(err: mgmt_api::Error) -> SafekeeperHttpError { + use mgmt_api::Error::*; + match err { + ApiError(_, str) => SafekeeperHttpError::Response(str), + Cancelled => SafekeeperHttpError::Response("Cancelled".to_owned()), + ReceiveBody(err) => SafekeeperHttpError::Transport(err), + ReceiveErrorBody(err) => SafekeeperHttpError::Response(err), + Timeout(str) => SafekeeperHttpError::Response(format!("timeout: {str}")), } } @@ -70,9 +58,8 @@ pub struct SafekeeperNode { pub pg_connection_config: PgConnectionConfig, pub env: LocalEnv, - pub http_client: reqwest::Client, + pub http_client: mgmt_api::Client, pub listen_addr: String, - pub http_base_url: String, } impl SafekeeperNode { @@ -82,13 +69,14 @@ impl SafekeeperNode { } else { "127.0.0.1".to_string() }; + let jwt = None; + let http_base_url = format!("http://{}:{}", listen_addr, conf.http_port); SafekeeperNode { id: conf.id, conf: conf.clone(), pg_connection_config: Self::safekeeper_connection_config(&listen_addr, conf.pg_port), env: env.clone(), - http_client: env.create_http_client(), - http_base_url: format!("http://{}:{}/v1", listen_addr, conf.http_port), + http_client: mgmt_api::Client::new(env.create_http_client(), http_base_url, jwt), listen_addr, } } @@ -155,7 +143,7 @@ impl SafekeeperNode { let id_string = id.to_string(); // TODO: add availability_zone to the config. // Right now we just specify any value here and use it to check metrics in tests. - let availability_zone = format!("sk-{}", id_string); + let availability_zone = format!("sk-{id_string}"); let mut args = vec![ "-D".to_owned(), @@ -278,20 +266,19 @@ impl SafekeeperNode { ) } - fn http_request(&self, method: Method, url: U) -> reqwest::RequestBuilder { - // TODO: authentication - //if self.env.auth_type == AuthType::NeonJWT { - // builder = builder.bearer_auth(&self.env.safekeeper_auth_token) - //} - self.http_client.request(method, url) + pub async fn check_status(&self) -> Result<()> { + self.http_client + .status() + .await + .map_err(err_from_client_err)?; + Ok(()) } - pub async fn check_status(&self) -> Result<()> { - self.http_request(Method::GET, format!("{}/{}", self.http_base_url, "status")) - .send() - .await? - .error_from_body() - .await?; + pub async fn create_timeline(&self, req: &TimelineCreateRequest) -> Result<()> { + self.http_client + .create_timeline(req) + .await + .map_err(err_from_client_err)?; Ok(()) } } diff --git a/control_plane/src/storage_controller.rs b/control_plane/src/storage_controller.rs index 755d67a7ad..bb83a6319c 100644 --- a/control_plane/src/storage_controller.rs +++ b/control_plane/src/storage_controller.rs @@ -6,6 +6,8 @@ use std::str::FromStr; use std::sync::OnceLock; use std::time::{Duration, Instant}; +use crate::background_process; +use crate::local_env::{LocalEnv, NeonStorageControllerConf}; use camino::{Utf8Path, Utf8PathBuf}; use hyper0::Uri; use nix::unistd::Pid; @@ -22,6 +24,7 @@ use pageserver_client::mgmt_api::ResponseErrorMessageExt; use pem::Pem; use postgres_backend::AuthType; use reqwest::{Method, Response}; +use safekeeper_api::PgMajorVersion; use serde::de::DeserializeOwned; use serde::{Deserialize, Serialize}; use tokio::process::Command; @@ -31,9 +34,6 @@ use utils::auth::{Claims, Scope, encode_from_key_file}; use utils::id::{NodeId, TenantId}; use whoami::username; -use crate::background_process; -use crate::local_env::{LocalEnv, NeonStorageControllerConf}; - pub struct StorageController { env: LocalEnv, private_key: Option, @@ -48,7 +48,7 @@ pub struct StorageController { const COMMAND: &str = "storage_controller"; -const STORAGE_CONTROLLER_POSTGRES_VERSION: u32 = 16; +const STORAGE_CONTROLLER_POSTGRES_VERSION: PgMajorVersion = PgMajorVersion::PG16; const DB_NAME: &str = "storage_controller"; @@ -167,7 +167,7 @@ impl StorageController { fn storage_controller_instance_dir(&self, instance_id: u8) -> PathBuf { self.env .base_data_dir - .join(format!("storage_controller_{}", instance_id)) + .join(format!("storage_controller_{instance_id}")) } fn pid_file(&self, instance_id: u8) -> Utf8PathBuf { @@ -184,9 +184,15 @@ impl StorageController { /// to other versions if that one isn't found. Some automated tests create circumstances /// where only one version is available in pg_distrib_dir, such as `test_remote_extensions`. async fn get_pg_dir(&self, dir_name: &str) -> anyhow::Result { - let prefer_versions = [STORAGE_CONTROLLER_POSTGRES_VERSION, 16, 15, 14]; + const PREFER_VERSIONS: [PgMajorVersion; 5] = [ + STORAGE_CONTROLLER_POSTGRES_VERSION, + PgMajorVersion::PG16, + PgMajorVersion::PG15, + PgMajorVersion::PG14, + PgMajorVersion::PG17, + ]; - for v in prefer_versions { + for v in PREFER_VERSIONS { let path = Utf8PathBuf::from_path_buf(self.env.pg_dir(v, dir_name)?).unwrap(); if tokio::fs::try_exists(&path).await? { return Ok(path); @@ -220,7 +226,7 @@ impl StorageController { "-d", DB_NAME, "-p", - &format!("{}", postgres_port), + &format!("{postgres_port}"), ]; let pg_lib_dir = self.get_pg_lib_dir().await.unwrap(); let envs = [ @@ -263,7 +269,7 @@ impl StorageController { "-h", "localhost", "-p", - &format!("{}", postgres_port), + &format!("{postgres_port}"), "-U", &username(), "-O", @@ -425,7 +431,7 @@ impl StorageController { // from `LocalEnv`'s config file (`.neon/config`). tokio::fs::write( &pg_data_path.join("postgresql.conf"), - format!("port = {}\nfsync=off\n", postgres_port), + format!("port = {postgres_port}\nfsync=off\n"), ) .await?; @@ -477,7 +483,7 @@ impl StorageController { self.setup_database(postgres_port).await?; } - let database_url = format!("postgresql://localhost:{}/{DB_NAME}", postgres_port); + let database_url = format!("postgresql://localhost:{postgres_port}/{DB_NAME}"); // We support running a startup SQL script to fiddle with the database before we launch storcon. // This is used by the test suite. @@ -508,7 +514,7 @@ impl StorageController { drop(client); conn.await??; - let addr = format!("{}:{}", host, listen_port); + let addr = format!("{host}:{listen_port}"); let address_for_peers = Uri::builder() .scheme(scheme) .authority(addr.clone()) @@ -557,6 +563,10 @@ impl StorageController { args.push("--use-local-compute-notifications".to_string()); } + if let Some(value) = self.config.kick_secondary_downloads { + args.push(format!("--kick-secondary-downloads={value}")); + } + if let Some(ssl_ca_file) = self.env.ssl_ca_cert_path() { args.push(format!("--ssl-ca-file={}", ssl_ca_file.to_str().unwrap())); } @@ -628,6 +638,28 @@ impl StorageController { args.push("--timelines-onto-safekeepers".to_string()); } + // neon_local is used in test environments where we often have less than 3 safekeepers. + if self.config.timeline_safekeeper_count.is_some() || self.env.safekeepers.len() < 3 { + let sk_cnt = self + .config + .timeline_safekeeper_count + .unwrap_or(self.env.safekeepers.len()); + + args.push(format!("--timeline-safekeeper-count={sk_cnt}")); + } + + let mut envs = vec![ + ("LD_LIBRARY_PATH".to_owned(), pg_lib_dir.to_string()), + ("DYLD_LIBRARY_PATH".to_owned(), pg_lib_dir.to_string()), + ]; + + if let Some(posthog_config) = &self.config.posthog_config { + envs.push(( + "POSTHOG_CONFIG".to_string(), + serde_json::to_string(posthog_config)?, + )); + } + println!("Starting storage controller"); background_process::start_process( @@ -635,10 +667,7 @@ impl StorageController { &instance_dir, &self.env.storage_controller_bin(), args, - vec![ - ("LD_LIBRARY_PATH".to_owned(), pg_lib_dir.to_string()), - ("DYLD_LIBRARY_PATH".to_owned(), pg_lib_dir.to_string()), - ], + envs, background_process::InitialPidFile::Create(self.pid_file(start_args.instance_id)), &start_args.start_timeout, || async { @@ -802,9 +831,9 @@ impl StorageController { builder = builder.json(&body) } if let Some(private_key) = &self.private_key { - println!("Getting claims for path {}", path); + println!("Getting claims for path {path}"); if let Some(required_claims) = Self::get_claims_for_path(&path)? { - println!("Got claims {:?} for path {}", required_claims, path); + println!("Got claims {required_claims:?} for path {path}"); let jwt_token = encode_from_key_file(&required_claims, private_key)?; builder = builder.header( reqwest::header::AUTHORIZATION, diff --git a/control_plane/storcon_cli/src/main.rs b/control_plane/storcon_cli/src/main.rs index 19c686dcfd..701c4b3b2e 100644 --- a/control_plane/storcon_cli/src/main.rs +++ b/control_plane/storcon_cli/src/main.rs @@ -36,6 +36,10 @@ enum Command { listen_pg_addr: String, #[arg(long)] listen_pg_port: u16, + #[arg(long)] + listen_grpc_addr: Option, + #[arg(long)] + listen_grpc_port: Option, #[arg(long)] listen_http_addr: String, @@ -61,10 +65,31 @@ enum Command { #[arg(long)] scheduling: Option, }, + /// Exists for backup usage and will be removed in future. + /// Use [`Command::NodeStartDelete`] instead, if possible. NodeDelete { #[arg(long)] node_id: NodeId, }, + /// Start deletion of the specified pageserver. + NodeStartDelete { + #[arg(long)] + node_id: NodeId, + }, + /// Cancel deletion of the specified pageserver and wait for `timeout` + /// for the operation to be canceled. May be retried. + NodeCancelDelete { + #[arg(long)] + node_id: NodeId, + #[arg(long)] + timeout: humantime::Duration, + }, + /// Delete a tombstone of node from the storage controller. + /// This is used when we want to allow the node to be re-registered. + NodeDeleteTombstone { + #[arg(long)] + node_id: NodeId, + }, /// Modify a tenant's policies in the storage controller TenantPolicy { #[arg(long)] @@ -82,6 +107,8 @@ enum Command { }, /// List nodes known to the storage controller Nodes {}, + /// List soft deleted nodes known to the storage controller + NodeTombstones {}, /// List tenants known to the storage controller Tenants { /// If this field is set, it will list the tenants on a specific node @@ -410,6 +437,8 @@ async fn main() -> anyhow::Result<()> { node_id, listen_pg_addr, listen_pg_port, + listen_grpc_addr, + listen_grpc_port, listen_http_addr, listen_http_port, listen_https_port, @@ -423,6 +452,8 @@ async fn main() -> anyhow::Result<()> { node_id, listen_pg_addr, listen_pg_port, + listen_grpc_addr, + listen_grpc_port, listen_http_addr, listen_http_port, listen_https_port, @@ -633,7 +664,7 @@ async fn main() -> anyhow::Result<()> { response .new_shards .iter() - .map(|s| format!("{:?}", s)) + .map(|s| format!("{s:?}")) .collect::>() .join(",") ); @@ -755,8 +786,8 @@ async fn main() -> anyhow::Result<()> { println!("Tenant {tenant_id}"); let mut table = comfy_table::Table::new(); - table.add_row(["Policy", &format!("{:?}", policy)]); - table.add_row(["Stripe size", &format!("{:?}", stripe_size)]); + table.add_row(["Policy", &format!("{policy:?}")]); + table.add_row(["Stripe size", &format!("{stripe_size:?}")]); table.add_row(["Config", &serde_json::to_string_pretty(&config).unwrap()]); println!("{table}"); println!("Shards:"); @@ -773,7 +804,7 @@ async fn main() -> anyhow::Result<()> { let secondary = shard .node_secondary .iter() - .map(|n| format!("{}", n)) + .map(|n| format!("{n}")) .collect::>() .join(","); @@ -847,7 +878,7 @@ async fn main() -> anyhow::Result<()> { } } else { // Make it obvious to the user that since they've omitted an AZ, we're clearing it - eprintln!("Clearing preferred AZ for tenant {}", tenant_id); + eprintln!("Clearing preferred AZ for tenant {tenant_id}"); } // Construct a request that modifies all the tenant's shards @@ -896,10 +927,76 @@ async fn main() -> anyhow::Result<()> { .await?; } Command::NodeDelete { node_id } => { + eprintln!("Warning: This command is obsolete and will be removed in a future version"); + eprintln!("Use `NodeStartDelete` instead, if possible"); storcon_client .dispatch::<(), ()>(Method::DELETE, format!("control/v1/node/{node_id}"), None) .await?; } + Command::NodeStartDelete { node_id } => { + storcon_client + .dispatch::<(), ()>( + Method::PUT, + format!("control/v1/node/{node_id}/delete"), + None, + ) + .await?; + println!("Delete started for {node_id}"); + } + Command::NodeCancelDelete { node_id, timeout } => { + storcon_client + .dispatch::<(), ()>( + Method::DELETE, + format!("control/v1/node/{node_id}/delete"), + None, + ) + .await?; + + println!("Waiting for node {node_id} to quiesce on scheduling policy ..."); + + let final_policy = + wait_for_scheduling_policy(storcon_client, node_id, *timeout, |sched| { + !matches!(sched, NodeSchedulingPolicy::Deleting) + }) + .await?; + + println!( + "Delete was cancelled for node {node_id}. Schedulling policy is now {final_policy:?}" + ); + } + Command::NodeDeleteTombstone { node_id } => { + storcon_client + .dispatch::<(), ()>( + Method::DELETE, + format!("debug/v1/tombstone/{node_id}"), + None, + ) + .await?; + } + Command::NodeTombstones {} => { + let mut resp = storcon_client + .dispatch::<(), Vec>( + Method::GET, + "debug/v1/tombstone".to_string(), + None, + ) + .await?; + + resp.sort_by(|a, b| a.listen_http_addr.cmp(&b.listen_http_addr)); + + let mut table = comfy_table::Table::new(); + table.set_header(["Id", "Hostname", "AZ", "Scheduling", "Availability"]); + for node in resp { + table.add_row([ + format!("{}", node.id), + node.listen_http_addr, + node.availability_zone_id, + format!("{:?}", node.scheduling), + format!("{:?}", node.availability), + ]); + } + println!("{table}"); + } Command::TenantSetTimeBasedEviction { tenant_id, period, @@ -1085,8 +1182,7 @@ async fn main() -> anyhow::Result<()> { Err((tenant_shard_id, from, to, error)) => { failure += 1; println!( - "Failed to migrate {} from node {} to node {}: {}", - tenant_shard_id, from, to, error + "Failed to migrate {tenant_shard_id} from node {from} to node {to}: {error}" ); } } @@ -1228,8 +1324,7 @@ async fn main() -> anyhow::Result<()> { concurrency, } => { let mut path = format!( - "/v1/tenant/{}/timeline/{}/download_heatmap_layers", - tenant_shard_id, timeline_id, + "/v1/tenant/{tenant_shard_id}/timeline/{timeline_id}/download_heatmap_layers", ); if let Some(c) = concurrency { @@ -1254,8 +1349,7 @@ async fn watch_tenant_shard( ) -> anyhow::Result<()> { if let Some(until_migrated_to) = until_migrated_to { println!( - "Waiting for tenant shard {} to be migrated to node {}", - tenant_shard_id, until_migrated_to + "Waiting for tenant shard {tenant_shard_id} to be migrated to node {until_migrated_to}" ); } @@ -1278,7 +1372,7 @@ async fn watch_tenant_shard( "attached: {} secondary: {} {}", shard .node_attached - .map(|n| format!("{}", n)) + .map(|n| format!("{n}")) .unwrap_or("none".to_string()), shard .node_secondary @@ -1292,15 +1386,12 @@ async fn watch_tenant_shard( "(reconciler idle)" } ); - println!("{}", summary); + println!("{summary}"); // Maybe drop out if we finished migration if let Some(until_migrated_to) = until_migrated_to { if shard.node_attached == Some(until_migrated_to) && !shard.is_reconciling { - println!( - "Tenant shard {} is now on node {}", - tenant_shard_id, until_migrated_to - ); + println!("Tenant shard {tenant_shard_id} is now on node {until_migrated_to}"); break; } } diff --git a/docker-compose/compute_wrapper/shell/compute.sh b/docker-compose/compute_wrapper/shell/compute.sh index c8ca812bf9..1e62e91fd0 100755 --- a/docker-compose/compute_wrapper/shell/compute.sh +++ b/docker-compose/compute_wrapper/shell/compute.sh @@ -95,3 +95,4 @@ echo "Start compute node" -b /usr/local/bin/postgres \ --compute-id "compute-${RANDOM}" \ --config "${CONFIG_FILE}" + --dev diff --git a/docker-compose/compute_wrapper/var/db/postgres/configs/config.json b/docker-compose/compute_wrapper/var/db/postgres/configs/config.json index 21caf3800c..60e232425b 100644 --- a/docker-compose/compute_wrapper/var/db/postgres/configs/config.json +++ b/docker-compose/compute_wrapper/var/db/postgres/configs/config.json @@ -4,6 +4,7 @@ "timestamp": "2022-10-12T18:00:00.000Z", "operation_uuid": "0f657b36-4b0f-4a2d-9c2e-1dcd615e7d8c", + "suspend_timeout_seconds": -1, "cluster": { "cluster_id": "docker_compose", diff --git a/docs/rfcs/040-Endpoint-Persistent-Unlogged-Files-Storage.md b/docs/rfcs/040-Endpoint-Persistent-Unlogged-Files-Storage.md new file mode 100644 index 0000000000..182b2682af --- /dev/null +++ b/docs/rfcs/040-Endpoint-Persistent-Unlogged-Files-Storage.md @@ -0,0 +1,396 @@ +# Memo: Endpoint Persistent Unlogged Files Storage +Created on 2024-11-05 +Implemented on N/A + +## Summary +A design for a storage system that allows storage of files required to make +Neon's Endpoints have a better experience at or after a reboot. + +## Motivation +Several systems inside PostgreSQL (and Neon) need some persistent storage for +optimal workings across reboots and restarts, but still work without. +Examples are the query-level statistics files of `pg_stat_statements` in +`pg_stat/pg_stat_statements.stat`, and `pg_prewarm`'s `autoprewarm.blocks`. +We need a storage system that can store and manage these files for each +Endpoint, without necessarily granting users access to an unlimited storage +device. + +## Goals +- Store known files for Endpoints with reasonable persistence. + _Data loss in this service, while annoying and bad for UX, won't lose any + customer's data._ + +## Non Goals (if relevant) +- This storage system does not need branching, file versioning, or other such + features. The files are as ephemeral to the timeline of the data as the + Endpoints that host the data. +- This storage system does not need to store _all_ user files, only 'known' + user files. +- This storage system does not need to be hosted fully inside Computes. + _Instead, this will be a separate component similar to Pageserver, + SafeKeeper, the S3 proxy used for dynamically loaded extensions, etc._ + +## Impacted components +- Compute needs new code to load and store these files in its lifetime. +- Control Plane needs to consider this new storage system when signalling + the deletion of an Endpoint, Timeline, or Tenant. +- Control Plane needs to consider this new storage system when it resets + or re-assigns an endpoint's timeline/branch state. + +A new service is created: the Endpoint Persistent Unlogged Files Storage +service. This could be integrated in e.g. Pageserver or Control Plane, or a +separately hosted service. + +## Proposed implementation +Endpoint-related data files are managed by a newly designed service (which +optionally is integrated in an existing service like Pageserver or Control +Plane), which stores data directly into S3 or any blob storage of choice. + +Upon deletion of the Endpoint, or reassignment of the endpoint to a different +branch, this ephemeral data is dropped: the data stored may not match the +state of the branch's data after reassignment, and on endpoint deletion the +data won't have any use to the user. + +Compute gets credentials (JWT token with Tenant, Timeline & Endpoint claims) +which it can use to authenticate to this new service and retrieve and store +data associated with this endpoint. This limited scope reduces leaks of data +across endpoints and timeline resets, and limits the ability of endpoints to +mess with other endpoints' data. + +The path of this endpoint data in S3 is initially as follows: + + s3:/// + tenants/ + / + tenants/ + / + endpoints/ + / + pgdata/ + + +For other blob storages an equivalent or similar path can be constructed. + +### Reliability, failure modes and corner cases (if relevant) +Reliability is important, but not critical to the workings of Neon. The data +stored in this service will, when lost, reduce performance, but won't be a +cause of permanent data loss - only operational metadata is stored. + +Most, if not all, blob storage services have sufficiently high persistence +guarantees to cater our need for persistence and uptime. The only concern with +blob storages is that the access latency is generally higher than local disk, +but for the object types stored (cache state, ...) I don't think this will be +much of an issue. + +### Interaction/Sequence diagram (if relevant) + +In these diagrams you can replace S3 with any persistent storage device of +choice, but S3 is chosen as representative name: The well-known and short name +of AWS' blob storage. Azure Blob Storage should work too, but it has a much +longer name making it less practical for the diagrams. + +Write data: + +```http +POST /tenants//timelines//endpoints//pgdata/ +Host: epufs.svc.neon.local + +<<< + +200 OK +{ + "version": "", # opaque file version token, changes when the file contents change + "size": , +} +``` + +```mermaid +sequenceDiagram + autonumber + participant co as Compute + participant ep as EPUFS + participant s3 as Blob Storage + + co-->ep: Connect with credentials + co->>+ep: Store Unlogged Persistent File + opt is authenticated + ep->>s3: Write UPF to S3 + end + ep->>-co: OK / Failure / Auth Failure + co-->ep: Cancel connection +``` + +Read data: (optional with cache-relevant request parameters, e.g. If-Modified-Since) +```http +GET /tenants//timelines//endpoints//pgdata/ +Host: epufs.svc.neon.local + +<<< + +200 OK + + +``` + +```mermaid +sequenceDiagram + autonumber + participant co as Compute + participant ep as EPUFS + participant s3 as Blob Storage + + co->>+ep: Read Unlogged Persistent File + opt is authenticated + ep->>+s3: Request UPF from storage + s3->>-ep: Receive UPF from storage + end + ep->>-co: OK(response) / Failure(storage, auth, ...) +``` + +Compute Startup: +```mermaid +sequenceDiagram + autonumber + participant co as Compute + participant ps as Pageserver + participant ep as EPUFS + participant es as Extension server + + note over co: Bind endpoint ep-xxx + par Get basebackup + co->>+ps: Request basebackup @ LSN + ps-)ps: Construct basebackup + ps->>-co: Receive basebackup TAR @ LSN + and Get startup-critical Unlogged Persistent Files + co->>+ep: Get all UPFs of endpoint ep-xxx + ep-)ep: Retrieve and gather all UPFs + ep->>-co: TAR of UPFs + and Get startup-critical extensions + loop For every startup-critical extension + co->>es: Get critical extension + es->>co: Receive critical extension + end + end + note over co: Start compute +``` + +CPlane ops: +```http +DELETE /tenants//timelines//endpoints/ +Host: epufs.svc.neon.local + +<<< + +200 OK +{ + "tenant": "", + "timeline": "", + "endpoint": "", + "deleted": { + "files": , + "bytes": , + }, +} +``` + +```http +DELETE /tenants//timelines/ +Host: epufs.svc.neon.local + +<<< + +200 OK +{ + "tenant": "", + "timeline": "", + "deleted": { + "files": , + "bytes": , + }, +} +``` + +```http +DELETE /tenants/ +Host: epufs.svc.neon.local + +<<< + +200 OK +{ + "tenant": "", + "deleted": { + "files": , + "bytes": , + }, +} +``` + +```mermaid +sequenceDiagram + autonumber + participant cp as Control Plane + participant ep as EPUFS + participant s3 as Blob Storage + + alt Tenant deleted + cp-)ep: Tenant deleted + loop For every object associated with removed tenant + ep->>s3: Remove data of deleted tenant from Storage + end + opt + ep-)cp: Tenant cleanup complete + end + alt Timeline deleted + cp-)ep: Timeline deleted + loop For every object associated with removed timeline + ep->>s3: Remove data of deleted timeline from Storage + end + opt + ep-)cp: Timeline cleanup complete + end + else Endpoint reassigned or removed + cp->>+ep: Endpoint reassigned + loop For every object associated with reassigned/removed endpoint + ep->>s3: Remove data from Storage + end + ep->>-cp: Cleanup complete + end +``` + +### Scalability (if relevant) + +Provisionally: As this service is going to be part of compute startup, this +service should be able to quickly respond to all requests. Therefore this +service is deployed to every AZ we host Computes in, and Computes communicate +(generally) only to the EPUFS endpoint of the AZ they're hosted in. + +Local caching of frequently restarted endpoints' data or metadata may be +needed for best performance. However, due to the regional nature of stored +data but zonal nature of the service deployment, we should be careful when we +implement any local caching, as it is possible that computes in AZ 1 will +update data originally written and thus cached by AZ 2. Cache version tests +and invalidation is therefore required if we want to roll out caching to this +service, which is too broad a scope for an MVC. This is why caching is left +out of scope for this RFC, and should be considered separately after this RFC +is implemented. + +### Security implications (if relevant) +This service must be able to authenticate users at least by Tenant ID, +Timeline ID and Endpoint ID. This will use the existing JWT infrastructure of +Compute, which will be upgraded to the extent needed to support Timeline- and +Endpoint-based claims. + +The service requires unlimited access to (a prefix of) a blob storage bucket, +and thus must be hosted outside the Compute VM sandbox. + +A service that generates pre-signed request URLs for Compute to download the +data from that URL is likely problematic, too: Compute would be able to write +unlimited data to the bucket, or exfiltrate this signed URL to get read/write +access to specific objects in this bucket, which would still effectively give +users access to the S3 bucket (but with improved access logging). + +There may be a use case for transferring data associated with one endpoint to +another endpoint (e.g. to make one endpoint warm its caches with the state of +another endpoint), but that's not currently in scope, and specific needs may +be solved through out-of-line communication of data or pre-signed URLs. + +### Unresolved questions (if relevant) +Caching of files is not in the implementation scope of the document, but +should at some future point be considered to maximize performance. + +## Alternative implementation (if relevant) +Several ideas have come up to solve this issue: + +### Use AUXfile +One prevalent idea was to WAL-log the files using our AUXfile mechanism. + +Benefits: + ++ We already have this storage mechanism + +Demerits: + +- It isn't available on read replicas +- Additional WAL will be consumed during shutdown and after the shutdown + checkpoint, which needs PG modifications to work without panics. +- It increases the data we need to manage in our versioned storage, thus + causing higher storage costs with higher retention due to duplication at + the storage layer. + +### Sign URLs for read/write operations, instead of proxying them + +Benefits: + ++ The service can be implemented with a much reduced IO budget + +Demerits: + +- Users could get access to these signed credentials +- Not all blob storage services may implement URL signing + +### Give endpoints each their own directly accessed block volume + +Benefits: + ++ Easier to integrate for PostgreSQL + +Demerits: + +- Little control on data size and contents +- Potentially problematic as we'd need to store data all across the pgdata + directory. +- EBS is not a good candidate + - Attaches in 10s of seconds, if not more; i.e. too cold to start + - Shared EBS volumes are a no-go, as you'd have to schedule the endpoint + with users of the same EBS volumes, which can't work with VM migration + - EBS storage costs are very high (>80$/kilotenant when using a + volume/tenant) + - EBS volumes can't be mounted across AZ boundaries +- Bucket per endpoint is unfeasible + - S3 buckets are priced at $20/month per 1k, which we could better spend + on developers. + - Allocating service accounts takes time (100s of ms), and service accounts + are a limited resource, too; so they're not a good candidate to allocate + on a per-endpoint basis. + - Giving credentials limited to prefix has similar issues as the pre-signed + URL approach. + - Bucket DNS lookup will fill DNS caches and put pressure on DNS lookup + much more than our current systems would. +- Volumes bound by hypervisor are unlikely + - This requires significant investment and increased software on the + hypervisor. + - It is unclear if we can attach volumes after boot, i.e. for pooled + instances. + +### Put the files into a table + +Benefits: + + + Mostly already available in PostgreSQL + +Demerits: + + - Uses WAL + - Can't be used after shutdown checkpoint + - Needs a RW endpoint, and table & catalog access to write to this data + - Gets hit with DB size limitations + - Depending on user acces: + - Inaccessible: + The user doesn't have control over database size caused by + these systems. + - Accessible: + The user can corrupt these files and cause the system to crash while + user-corrupted files are present, thus increasing on-call overhead. + +## Definition of Done (if relevant) + +This project is done if we have: + +- One S3 bucket equivalent per region, which stores this per-endpoint data. +- A new service endpoint in at least every AZ, which indirectly grants + endpoints access to the data stored for these endpoints in these buckets. +- Compute writes & reads temp-data at shutdown and startup, respectively, for + at least the pg_prewarm or lfc_prewarm state files. +- Cleanup of endpoint data is triggered when the endpoint is deleted or is + detached from its current timeline. diff --git a/docs/rfcs/044-feature-flag.md b/docs/rfcs/044-feature-flag.md new file mode 100644 index 0000000000..3a0fe91a13 --- /dev/null +++ b/docs/rfcs/044-feature-flag.md @@ -0,0 +1,179 @@ +# Storage Feature Flags + +In this RFC, we will describe how we will implement per-tenant feature flags. + +## PostHog as Feature Flag Service + +Before we start, let's talk about how current feature flag services work. PostHog is the feature flag service we are currently using across multiple user-facing components in the company. PostHog has two modes of operation: HTTP evaluation and server-side local evaluation. + +Let's assume we have a storage feature flag called gc-compaction and we want to roll it out to scale-tier users with resident size >= 10GB and <= 100GB. + +### Define User Profiles + +The first step is to synchronize our user profiles to the PostHog service. We can simply assume that each tenant is a user in PostHog. Each user profile has some properties associated with it. In our case, it will be: plan type (free, scale, enterprise, etc); resident size (in bytes); primary pageserver (string); region (string). + +### Define Feature Flags + +We would create a feature flag called gc-compaction in PostHog with 4 variants: disabled, stage-1, stage-2, fully-enabled. We will flip the feature flags from disabled to fully-enabled stage by stage for some percentage of our users. + +### Option 1: HTTP Evaluation Mode + +When using PostHog's HTTP evaluation mode, the client will make request to the PostHog service, asking for the value of a feature flag for a specific user. + +* Control plane will report the plan type to PostHog each time it attaches a tenant to the storcon or when the user upgrades/downgrades. It calls the PostHog profile API to associate tenant ID with the plan type. Assume we have X active tenants and such attach or plan change event happens each week, that would be 4X profile update requests per month. +* Pageservers will report the resident size and the primary pageserver to the PostHog service. Assume we report resident size every 24 hours, that would be 30X requests per month. +* Each tenant will request the state of the feature flag every 1 hour, that's 720X requests per month. +* The Rust client would be easy to implement as we only need to call the `/decide` API on PostHog. + +Using the HTTP evaluation mode we will issue 754X requests a month. + +### Option 2: Local Evaluation Mode + +When using PostHog's HTTP evaluation mode, the client (usually the server in a browser/server architecture) will poll the feature flag configuration every 30s (default in the Python client) from PostHog. Such configuration contains data like: + +
+ +Example JSON response from the PostHog local evaluation API + +``` +[ + { + "id": 1, + "name": "Beta Feature", + "key": "person-flag", + "is_simple_flag": True, + "active": True, + "filters": { + "groups": [ + { + "properties": [ + { + "key": "location", + "operator": "exact", + "value": ["Straße"], + "type": "person", + } + ], + "rollout_percentage": 100, + }, + { + "properties": [ + { + "key": "star", + "operator": "exact", + "value": ["ſun"], + "type": "person", + } + ], + "rollout_percentage": 100, + }, + ], + }, + } +] +``` + +
+ +Note that the API only contains information like "under what condition => rollout percentage". The user is responsible to provide the properties required to the client for local evaluation, and the PostHog service (web UI) cannot know if a feature is enabled for the tenant or not until the client uses the `capture` API to report the result back. To control the rollout percentage, the user ID gets mapped to a float number in `[0, 1)` on a consistent hash ring. All values <= the percentage will get the feature enabled or set to the desired value. + +To use the local evaluation mode, the system needs: + +* Assume each pageserver will poll PostHog for the local evaluation JSON every 5 minutes (instead of the 30s default as it's too frequent). That's 8640Y per month, Y is the number of pageservers. Local evaluation requests cost 10x more than the normal decide request, so that's 86400Y request units to bill. +* Storcon needs to store the plan type in the database and pass that information to the pageserver when attaching the tenant. +* Storcon also needs to update PostHog with the active tenants, for example, when the tenant gets detached/attached. Assume each active tenant gets detached/attached every week, that would be 4X requests per month. +* We do not need to update bill type or resident size to PostHog as all these are evaluated locally. +* After each local evaluation of the feature flag, we need to call PostHog's capture event API to update the result of the evaluation that the feature is enabled. We can do this when the flag gets changed compared with the last cached state in memory. That would be at least 4X (assume we do deployment every week so the cache gets cleared) and maybe an additional multiplifier of 10 assume we have 10 active features. + +In this case, we will issue 86400Y + 40X requests per month. + +Assume X = 1,000,000 and Y = 100, + +| | HTTP Evaluation | Local Evaluation | +|---|---|---| +| Latency of propagating the conditions/properties for feature flag | 24 hours | available locally | +| Latency of applying the feature flag | 1 hour | 5 minutes | +| Can properties be reported from different services | Yes | No | +| Do we need to sync billing info etc to pageserver | No | Yes | +| Cost | 75400$ / month | 4864$ / month | + +# Our Solution + +We will use PostHog _only_ as an UI to configure the feature flags. Whether a feature is enabled or not can only be queried through storcon/pageserver instead of using the PostHog UI. (We could report it back to PostHog via `capture_event` but it costs $$$.) This allows us to ramp up the feature flag functionality fast at first. At the same time, it would also give us the option to migrate to our own solution once we want to have more properties and more complex evaluation rules in our system. + +* We will create several fake users (tenants) in PostHog that contains all the properties we will use for evaluating a feature flag (i.e., resident size, billing type, pageserver id, etc.) +* We will use PostHog's local evaluation API to poll the configuration of the feature flags and evaluate them locally on each of the pageserver. +* The evaluation result will not be reported back to PostHog. +* Storcon needs to pull some information from cplane database. +* To know if a feature is currently enabled or not, we need to call the storcon/pageserver API; and we won't be able to know if a feature has been enabled on a tenant before easily: we need to look at the Grafana logs. + +We only need to pay for the 86400Y local evaluation requests (that would be setting Y=0 in solution 2 => $864/month, and even less if we proxy it through storcon). + +## Implementation + +* Pageserver: implement a PostHog local evaluation client. The client will be shared across all tenants on the pageserver with a single API: `evaluate(tenant_id, feature_flag, properties) -> json`. +* Storcon: if we need plan type as the evaluation condition, pull it from cplane database. +* Storcon/Pageserver: implement an HTTP API `:tenant_id/feature/:feature` to retrieve the current feature flag status. +* Storcon/Pageserver: a loop to update the feature flag spec on both storcon and pageserver. Pageserver loop will only be activated if storcon does not push the specs to the pageserver. + +## Difference from Tenant Config + +* Feature flags can be modified by percentage, and the default config for each feature flag can be modified in UI without going through the release process. +* Feature flags are more flexible and won't be persisted anywhere and will be passed as plain JSON over the wire so that do not need to handle backward/forward compatibility as in tenant config. +* The expectation of tenant config is that once we add a flag we cannot remove it (or it will be hard to remove), but feature flags are more flexible. + +# Final Implementation + +* We added a new crate `posthog_lite_client` that supports local feature evaluations. +* We set up two projects "Storage (staging)" and "Storage (production)" in the PostHog console. +* Each pageserver reports 10 fake tenants to PostHog so that we can get all combinations of regions (and other properties) in the PostHog UI. +* Supported properties: AZ, neon_region, pageserver, tenant_id. +* You may use "Pageserver Feature Flags" dashboard to see the evaluation status. +* The feature flag spec is polled on storcon every 30s (in each of the region) and storcon will propagate the spec to the pageservers. +* The pageserver housekeeping loop updates the tenant-specific properties (e.g., remote size) for evaluation. + +Each tenant has a `feature_resolver` object. After you add a feature flag in the PostHog console, you can retrieve it with: + +```rust +// Boolean flag +self + .feature_resolver + .evaluate_boolean("flag") + .is_ok() +// Multivariate flag +self + .feature_resolver + .evaluate_multivariate("gc-comapction-strategy") + .ok(); +``` + +The user needs to handle the case where the evaluation result is an error. This can occur in a variety of cases: + +* During the pageserver start, the feature flag spec has not been retrieved. +* No condition group is matched. +* The feature flag spec contains an operand/operation not supported by the lite PostHog library. + +For boolean flags, the return value is `Result<(), Error>`. `Ok(())` means the flag is evaluated to true. Otherwise, +there is either an error in evaluation or it does not match any groups. + +For multivariate flags, the return value is `Result`. `Ok(variant)` indicates the flag is evaluated +to a variant. Otherwise, there is either an error in evaluation or it does not match any groups. + +The evaluation logic is documented in the PostHog lite library. It compares the consistent hash of a flag key + tenant_id +with the rollout percentage and determines which tenant to roll out a specific feature. + +Users can use the feature flag evaluation API to get the flag evaluation result of a specific tenant for debugging purposes. + +``` +curl http://localhost:9898/v1/tenant/:tenant_id/feature_flag?flag=:key&as=multivariate/boolean" +``` + +By default, the storcon pushes the feature flag specs to the pageservers every 30 seconds, which means that a change in feature flag in the +PostHog UI will propagate to the pageservers within 30 seconds. + +# Future Works + +* Support dynamic tenant properties like logical size as the evaluation condition. +* Support properties like `plan_type` (needs cplane to pass it down). +* Report feature flag evaluation result back to PostHog (if the cost is okay). +* Fast feature flag evaluation cache on critical paths (e.g., cache a feature flag result in `AtomicBool` and use it on the read path). \ No newline at end of file diff --git a/docs/rfcs/2025-03-17-compute-prewarm.md b/docs/rfcs/2025-03-17-compute-prewarm.md new file mode 100644 index 0000000000..6e95b9ac39 --- /dev/null +++ b/docs/rfcs/2025-03-17-compute-prewarm.md @@ -0,0 +1,399 @@ +# Compute rolling restart with prewarm + +Created on 2025-03-17 +Implemented on _TBD_ +Author: Alexey Kondratov (@ololobus) + +## Summary + +This RFC describes an approach to reduce performance degradation due to missing caches after compute node restart, i.e.: + +1. Rolling restart of the running instance via 'warm' replica. +2. Auto-prewarm compute caches after unplanned restart or scale-to-zero. + +## Motivation + +Neon currently implements several features that guarantee high uptime of compute nodes: + +1. Storage high-availability (HA), i.e. each tenant shard has a secondary pageserver location, so we can quickly switch over compute to it in case of primary pageserver failure. +2. Fast compute provisioning, i.e. we have a fleet of pre-created empty computes, that are ready to serve workload, so restarting unresponsive compute is very fast. +3. Preemptive NeonVM compute provisioning in case of k8s node unavailability. + +This helps us to be well-within the uptime SLO of 99.95% most of the time. Problems begin when we go up to multi-TB workloads and 32-64 CU computes. +During restart, compute loses all caches: LFC, shared buffers, file system cache. Depending on the workload, it can take a lot of time to warm up the caches, +so that performance could be degraded and might be even unacceptable for certain workloads. The latter means that although current approach works well for small to +medium workloads, we still have to do some additional work to avoid performance degradation after restart of large instances. + +## Non Goals + +- Details of the persistence storage for prewarm data are out of scope, there is a separate RFC for that: . +- Complete compute/Postgres HA setup and flow. Although it was originally in scope of this RFC, during preliminary research it appeared to be a rabbit hole, so it's worth of a separate RFC. +- Low-level implementation details for Postgres replica-to-primary promotion. There are a lot of things to think and care about: how to start walproposer, [logical replication failover](https://www.postgresql.org/docs/current/logical-replication-failover.html), and so on, but it's worth of at least a separate one-pager design document if not RFC. + +## Impacted components + +Postgres, compute_ctl, Control plane, Endpoint storage for unlogged storage of compute files. +For the latter, we will need to implement a uniform abstraction layer on top of S3, ABS, etc., but +S3 is used in text interchangeably with 'endpoint storage' for simplicity. + +## Proposed implementation + +### compute_ctl spec changes and auto-prewarm + +We are going to extend the current compute spec with the following attributes + +```rust +struct ComputeSpec { + /// [All existing attributes] + ... + /// Whether to do auto-prewarm at start or not. + /// Default to `false`. + pub lfc_auto_prewarm: bool + /// Interval in seconds between automatic dumps of + /// LFC state into S3. Default `None`, which means 'off'. + pub lfc_dump_interval_sec: Option +} +``` + +When `lfc_dump_interval_sec` is set to `N`, `compute_ctl` will periodically dump the LFC state +and store it in S3, so that it could be used either for auto-prewarm after restart or by replica +during the rolling restart. For enabling periodic dumping, we should consider the following value +`lfc_dump_interval_sec=300` (5 minutes), same as in the upstream's `pg_prewarm.autoprewarm_interval`. + +When `lfc_auto_prewarm` is set to `true`, `compute_ctl` will start prewarming the LFC upon restart +iif some of the previous states is present in S3. + +### compute_ctl API + +1. `POST /store_lfc_state` -- dump LFC state using Postgres SQL interface and store result in S3. + This has to be a blocking call, i.e. it will return only after the state is stored in S3. + If there is any concurrent request in progress, we should return `429 Too Many Requests`, + and let the caller to retry. + +2. `GET /dump_lfc_state` -- dump LFC state using Postgres SQL interface and return it as is + in text format suitable for the future restore/prewarm. This API is not strictly needed at + the end state, but could be useful for a faster prototyping of a complete rolling restart flow + with prewarm, as it doesn't require persistent for LFC state storage. + +3. `POST /restore_lfc_state` -- restore/prewarm LFC state with request + + ```yaml + RestoreLFCStateRequest: + oneOf: + - type: object + required: + - lfc_state + properties: + lfc_state: + type: string + description: Raw LFC content dumped with GET `/dump_lfc_state` + - type: object + required: + - lfc_cache_key + properties: + lfc_cache_key: + type: string + description: | + endpoint_id of the source endpoint on the same branch + to use as a 'donor' for LFC content. Compute will look up + LFC content dump in S3 using this key and do prewarm. + ``` + + where `lfc_state` and `lfc_cache_key` are mutually exclusive. + + The actual prewarming will happen asynchronously, so the caller need to check the + prewarm status using the compute's standard `GET /status` API. + +4. `GET /status` -- extend existing API with following attributes + + ```rust + struct ComputeStatusResponse { + // [All existing attributes] + ... + pub prewarm_state: PrewarmState + } + + /// Compute prewarm state. Will be stored in the shared Compute state + /// in compute_ctl + struct PrewarmState { + pub status: PrewarmStatus + /// Total number of pages to prewarm + pub pages_total: i64 + /// Number of pages prewarmed so far + pub pages_processed: i64 + /// Optional prewarm error + pub error: Option + } + + pub enum PrewarmStatus { + /// Prewarming was never requested on this compute + Off, + /// Prewarming was requested, but not started yet + Pending, + /// Prewarming is in progress. The caller should follow + /// `PrewarmState::progress`. + InProgress, + /// Prewarming has been successfully completed + Completed, + /// Prewarming failed. The caller should look at + /// `PrewarmState::error` for the reason. + Failed, + /// It is intended to be used by auto-prewarm if none of + /// the previous LFC states is available in S3. + /// This is a distinct state from the `Failed` because + /// technically it's not a failure and could happen if + /// compute was restart before it dumped anything into S3, + /// or just after the initial rollout of the feature. + Skipped, + } + ``` + +5. `POST /promote` -- this is a **blocking** API call to promote compute replica into primary. + This API should be very similar to the existing `POST /configure` API, i.e. accept the + spec (primary spec, because originally compute was started as replica). It's a distinct + API method because semantics and response codes are different: + + - If promotion is done successfully, it will return `200 OK`. + - If compute is already primary, the call will be no-op and `compute_ctl` + will return `412 Precondition Failed`. + - If, for some reason, second request reaches compute that is in progress of promotion, + it will respond with `429 Too Many Requests`. + - If compute hit any permanent failure during promotion `500 Internal Server Error` + will be returned. + +### Control plane operations + +The complete flow will be present as a sequence diagram in the next section, but here +we just want to list some important steps that have to be done by control plane during +the rolling restart via warm replica, but without much of low-level implementation details. + +1. Register the 'intent' of the instance restart, but not yet interrupt any workload at + primary and also accept new connections. This may require some endpoint state machine + changes, e.g. introduction of the `pending_restart` state. Being in this state also + **mustn't prevent any other operations except restart**: suspend, live-reconfiguration + (e.g. due to notify-attach call from the storage controller), deletion. + +2. Start new replica compute on the same timeline and start prewarming it. This process + may take quite a while, so the same concurrency considerations as in 1. should be applied + here as well. + +3. When warm replica is ready, control plane should: + + 3.1. Terminate the primary compute. Starting from here, **this is a critical section**, + if anything goes off, the only option is to start the primary normally and proceed + with auto-prewarm. + + 3.2. Send cache invalidation message to all proxies, notifying them that all new connections + should request and wait for the new connection details. At this stage, proxy has to also + drop any existing connections to the old primary, so they didn't do stale reads. + + 3.3. Attach warm replica compute to the primary endpoint inside control plane metadata + database. + + 3.4. Promote replica to primary. + + 3.5. When everything is done, finalize the endpoint state to be just `active`. + +### Complete rolling restart flow + +```mermaid + sequenceDiagram + + autonumber + + participant proxy as Neon proxy + + participant cplane as Control plane + + participant primary as Compute (primary) + box Compute (replica) + participant ctl as compute_ctl + participant pg as Postgres + end + + box Endpoint unlogged storage + participant s3proxy as Endpoint storage service + participant s3 as S3/ABS/etc. + end + + + cplane ->> primary: POST /store_lfc_state + primary -->> cplane: 200 OK + + cplane ->> ctl: POST /restore_lfc_state + activate ctl + ctl -->> cplane: 202 Accepted + + activate cplane + cplane ->> ctl: GET /status: poll prewarm status + ctl ->> s3proxy: GET /read_file + s3proxy ->> s3: read file + s3 -->> s3proxy: file content + s3proxy -->> ctl: 200 OK: file content + + proxy ->> cplane: GET /proxy_wake_compute + cplane -->> proxy: 200 OK: old primary conninfo + + ctl ->> pg: prewarm LFC + activate pg + pg -->> ctl: prewarm is completed + deactivate pg + + ctl -->> cplane: 200 OK: prewarm is completed + deactivate ctl + deactivate cplane + + cplane -->> cplane: reassign replica compute to endpoint,
start terminating the old primary compute + activate cplane + cplane ->> proxy: invalidate caches + + proxy ->> cplane: GET /proxy_wake_compute + + cplane -x primary: POST /terminate + primary -->> cplane: 200 OK + note over primary: old primary
compute terminated + + cplane ->> ctl: POST /promote + activate ctl + ctl ->> pg: pg_ctl promote + activate pg + pg -->> ctl: done + deactivate pg + ctl -->> cplane: 200 OK + deactivate ctl + + cplane -->> cplane: finalize operation + cplane -->> proxy: 200 OK: new primary conninfo + deactivate cplane +``` + +### Network bandwidth and prewarm speed + +It's currently known that pageserver can sustain about 3000 RPS per shard for a few running computes. +Large tenants are usually split into 8 shards, so the final formula may look like this: + +```text +8 shards * 3000 RPS * 8 KB =~ 190 MB/s +``` + +so depending on the LFC size, prewarming will take at least: + +- ~5s for 1 GB +- ~50s for 10 GB +- ~5m for 100 GB +- \>1h for 1 TB + +In total, one pageserver is normally capped by 30k RPS, so it obviously can't sustain many computes +doing prewarm at the same time. Later, we may need an additional mechanism for computes to throttle +the prewarming requests gracefully. + +### Reliability, failure modes and corner cases + +We consider following failures while implementing this RFC: + +1. Compute got interrupted/crashed/restarted during prewarm. The caller -- control plane -- should + detect that and start prewarm from the beginning. + +2. Control plane promotion request timed out or hit network issues. If it never reached the + compute, control plane should just repeat it. If it did reach the compute, then during + retry control plane can hit `409` as previous request triggered the promotion already. + In this case, control plane need to retry until either `200` or + permanent error `500` is returned. + +3. Compute got interrupted/crashed/restarted during promotion. At restart it will ask for + a spec from control plane, and its content should signal compute to start as **primary**, + so it's expected that control plane will continue polling for certain period of time and + will discover that compute is ready to accept connections if restart is fast enough. + +4. Any other unexpected failure or timeout during prewarming. This **failure mustn't be fatal**, + control plane has to report failure, terminate replica and keep primary running. + +5. Any other unexpected failure or timeout during promotion. Unfortunately, at this moment + we already have the primary node stopped, so the only option is to start primary again + and proceed with auto-prewarm. + +6. Any unexpected failure during auto-prewarm. This **failure mustn't be fatal**, + `compute_ctl` has to report the failure, but do not crash the compute. + +7. Control plane failed to confirm that old primary has terminated. This can happen, especially + in the future HA setup. In this case, control plane has to ensure that it sent VM deletion + and pod termination requests to k8s, so long-term we do not have two running primaries + on the same timeline. + +### Security implications + +There are two security implications to consider: + +1. Access to `compute_ctl` API. It has to be accessible from the outside of compute, so all + new API methods have to be exposed on the **external** HTTP port and **must** be authenticated + with JWT. + +2. Read/write only your own LFC state data in S3. Although it's not really a security concern, + since LFC state is just a mapping of blocks present in LFC at certain moment in time; + it still has to be highly restricted, so that i) only computes on the same timeline can + read S3 state; ii) each compute can only write to the path that contains it's `endpoint_id`. + Both of this must be validated by Endpoint storage service using the JWT token provided by `compute_ctl`. + +### Unresolved questions + +#### Billing, metrics and monitoring + +Currently, we only label computes with `endpoint_id` after attaching them to the endpoint. +In this proposal, this means that temporary replica will remain unlabelled until it's promoted +to primary. We can also hide it from users in the control plane API, but what to do with +billing and monitoring is still unclear. + +We can probably mark it as 'billable' and tag with `project_id`, so it will be billed, but +not interfere in any way with the current primary monitoring. + +Another thing to consider is how logs and metrics export will switch to the new compute. +It's expected that OpenTelemetry collector will auto-discover the new compute and start +scraping metrics from it. + +#### Auto-prewarm + +It's still an open question whether we need auto-prewarm at all. The author's gut-feeling is +that yes, we need it, but might be not for all workloads, so it could end up exposed as a +user-controllable knob on the endpoint. There are two arguments for that: + +1. Auto-prewarm existing in upstream's `pg_prewarm`, _probably for a reason_. + +2. There are still could be 2 flows when we cannot perform the rolling restart via the warm + replica: i) any failure or interruption during promotion; ii) wake up after scale-to-zero. + The latter might be challenged as well, i.e. one can argue that auto-prewarm may and will + compete with user-workload for storage resources. This is correct, but it might as well + reduce the time to get warm LFC and good performance. + +#### Low-level details of the replica promotion + +There are many things to consider here, but three items just off the top of my head: + +1. How to properly start the `walproposer` inside Postgres. + +2. What to do with logical replication. Currently, we do not include logical replication slots + inside basebackup, because nobody advances them at replica, so they just prevent the WAL + deletion. Yet, we do need to have them at primary after promotion. Starting with Postgres 17, + there is a new feature called + [logical replication failover](https://www.postgresql.org/docs/current/logical-replication-failover.html) + and `synchronized_standby_slots` setting, but we need a plan for the older versions. Should we + request a new basebackup during promotion? + +3. How do we guarantee that replica will receive all the latest WAL from safekeepers? Do some + 'shallow' version of sync safekeepers without data copying? Or just a standard version of + sync safekeepers? + +## Alternative implementation + +The proposal already assumes one of the alternatives -- do not have any persistent storage for +LFC state. This is possible to implement faster with the proposed API, but it means that +we do not implement auto-prewarm yet. + +## Definition of Done + +At the end of implementing this RFC we should have two high-level settings that enable: + +1. Auto-prewarm of user computes upon restart. +2. Perform primary compute restart via the warm replica promotion. + +It also has to be decided what's the criteria for enabling one or both of these flows for +certain clients. diff --git a/endpoint_storage/Cargo.toml b/endpoint_storage/Cargo.toml index b2c9d51551..c2e21d02e2 100644 --- a/endpoint_storage/Cargo.toml +++ b/endpoint_storage/Cargo.toml @@ -8,6 +8,7 @@ anyhow.workspace = true axum-extra.workspace = true axum.workspace = true camino.workspace = true +clap.workspace = true futures.workspace = true jsonwebtoken.workspace = true prometheus.workspace = true diff --git a/endpoint_storage/src/app.rs b/endpoint_storage/src/app.rs index f44efe6d7a..42431c0066 100644 --- a/endpoint_storage/src/app.rs +++ b/endpoint_storage/src/app.rs @@ -374,7 +374,7 @@ MC4CAQAwBQYDK2VwBCIEID/Drmc1AA6U/znNRWpF3zEGegOATQxfkdWxitcOMsIH let request = Request::builder() .uri(format!("/{tenant}/{timeline}/{endpoint}/sub/path/key")) .method(method) - .header("Authorization", format!("Bearer {}", token)) + .header("Authorization", format!("Bearer {token}")) .body(Body::empty()) .unwrap(); let status = ServiceExt::ready(&mut app) diff --git a/endpoint_storage/src/main.rs b/endpoint_storage/src/main.rs index 3d1f05575d..c96cef2083 100644 --- a/endpoint_storage/src/main.rs +++ b/endpoint_storage/src/main.rs @@ -4,6 +4,8 @@ //! for large computes. mod app; use anyhow::Context; +use clap::Parser; +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use tracing::info; use utils::logging; @@ -12,13 +14,29 @@ const fn max_upload_file_limit() -> usize { 100 * 1024 * 1024 } +const fn listen() -> SocketAddr { + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 51243) +} + +#[derive(Parser)] +struct Args { + #[arg(exclusive = true)] + config_file: Option, + #[arg(long, default_value = "false", requires = "config")] + /// to allow testing k8s helm chart where we don't have s3 credentials + no_s3_check_on_startup: bool, + #[arg(long, value_name = "FILE")] + /// inline config mode for k8s helm chart + config: Option, +} + #[derive(serde::Deserialize)] -#[serde(tag = "type")] struct Config { + #[serde(default = "listen")] listen: std::net::SocketAddr, pemfile: camino::Utf8PathBuf, #[serde(flatten)] - storage_config: remote_storage::RemoteStorageConfig, + storage_kind: remote_storage::TypedRemoteStorageKind, #[serde(default = "max_upload_file_limit")] max_upload_file_limit: usize, } @@ -31,13 +49,18 @@ async fn main() -> anyhow::Result<()> { logging::Output::Stdout, )?; - let config: String = std::env::args().skip(1).take(1).collect(); - if config.is_empty() { - anyhow::bail!("Usage: endpoint_storage config.json") - } - info!("Reading config from {config}"); - let config = std::fs::read_to_string(config.clone())?; - let config: Config = serde_json::from_str(&config).context("parsing config")?; + let args = Args::parse(); + let config: Config = if let Some(config_path) = args.config_file { + info!("Reading config from {config_path}"); + let config = std::fs::read_to_string(config_path)?; + serde_json::from_str(&config).context("parsing config")? + } else if let Some(config) = args.config { + info!("Reading inline config"); + serde_json::from_str(&config).context("parsing config")? + } else { + anyhow::bail!("Supply either config file path or --config=inline-config"); + }; + info!("Reading pemfile from {}", config.pemfile.clone()); let pemfile = std::fs::read(config.pemfile.clone())?; info!("Loading public key from {}", config.pemfile.clone()); @@ -46,9 +69,12 @@ async fn main() -> anyhow::Result<()> { let listener = tokio::net::TcpListener::bind(config.listen).await.unwrap(); info!("listening on {}", listener.local_addr().unwrap()); - let storage = remote_storage::GenericRemoteStorage::from_config(&config.storage_config).await?; + let storage = + remote_storage::GenericRemoteStorage::from_storage_kind(config.storage_kind).await?; let cancel = tokio_util::sync::CancellationToken::new(); - app::check_storage_permissions(&storage, cancel.clone()).await?; + if !args.no_s3_check_on_startup { + app::check_storage_permissions(&storage, cancel.clone()).await?; + } let proxy = std::sync::Arc::new(endpoint_storage::Storage { auth, diff --git a/libs/compute_api/Cargo.toml b/libs/compute_api/Cargo.toml index 81b0cd19a1..83cb639f0a 100644 --- a/libs/compute_api/Cargo.toml +++ b/libs/compute_api/Cargo.toml @@ -12,6 +12,7 @@ jsonwebtoken.workspace = true serde.workspace = true serde_json.workspace = true regex.workspace = true +url.workspace = true utils = { path = "../utils" } remote_storage = { version = "0.1", path = "../remote_storage/" } diff --git a/libs/compute_api/src/requests.rs b/libs/compute_api/src/requests.rs index bbab271474..745c44c05b 100644 --- a/libs/compute_api/src/requests.rs +++ b/libs/compute_api/src/requests.rs @@ -16,6 +16,7 @@ pub static COMPUTE_AUDIENCE: &str = "compute"; pub enum ComputeClaimsScope { /// An admin-scoped token allows access to all of `compute_ctl`'s authorized /// facilities. + #[serde(rename = "compute_ctl:admin")] Admin, } @@ -24,7 +25,7 @@ impl FromStr for ComputeClaimsScope { fn from_str(s: &str) -> Result { match s { - "admin" => Ok(ComputeClaimsScope::Admin), + "compute_ctl:admin" => Ok(ComputeClaimsScope::Admin), _ => Err(anyhow::anyhow!("invalid compute claims scope \"{s}\"")), } } @@ -80,3 +81,23 @@ pub struct SetRoleGrantsRequest { pub privileges: Vec, pub role: PgIdent, } + +#[cfg(test)] +mod test { + use std::str::FromStr; + + use crate::requests::ComputeClaimsScope; + + /// Confirm that whether we parse the scope by string or through serde, the + /// same values parse to the same enum variant. + #[test] + fn compute_request_scopes() { + const ADMIN_SCOPE: &str = "compute_ctl:admin"; + + let from_serde: ComputeClaimsScope = + serde_json::from_str(&format!("\"{ADMIN_SCOPE}\"")).unwrap(); + let from_str = ComputeClaimsScope::from_str(ADMIN_SCOPE).unwrap(); + + assert_eq!(from_serde, from_str); + } +} diff --git a/libs/compute_api/src/responses.rs b/libs/compute_api/src/responses.rs index 24d371c6eb..a54411b06a 100644 --- a/libs/compute_api/src/responses.rs +++ b/libs/compute_api/src/responses.rs @@ -58,7 +58,7 @@ pub enum LfcPrewarmState { }, } -#[derive(Serialize, Default, Debug, Clone)] +#[derive(Serialize, Default, Debug, Clone, PartialEq)] #[serde(tag = "status", rename_all = "snake_case")] pub enum LfcOffloadState { #[default] @@ -83,6 +83,16 @@ pub struct ComputeStatusResponse { pub error: Option, } +#[derive(Serialize, Clone, Copy, Debug, Deserialize, PartialEq, Eq, Default)] +#[serde(rename_all = "snake_case")] +pub enum TerminateMode { + #[default] + /// wait 30s till returning from /terminate to allow control plane to get the error + Fast, + /// return from /terminate immediately as soon as all components are terminated + Immediate, +} + #[derive(Serialize, Clone, Copy, Debug, Deserialize, PartialEq, Eq)] #[serde(rename_all = "snake_case")] pub enum ComputeStatus { @@ -103,11 +113,16 @@ pub enum ComputeStatus { // control-plane to terminate it. Failed, // Termination requested - TerminationPending, + TerminationPending { mode: TerminateMode }, // Terminated Postgres Terminated, } +#[derive(Deserialize, Serialize)] +pub struct TerminateResponse { + pub lsn: Option, +} + impl Display for ComputeStatus { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { @@ -117,7 +132,7 @@ impl Display for ComputeStatus { ComputeStatus::Running => f.write_str("running"), ComputeStatus::Configuration => f.write_str("configuration"), ComputeStatus::Failed => f.write_str("failed"), - ComputeStatus::TerminationPending => f.write_str("termination-pending"), + ComputeStatus::TerminationPending { .. } => f.write_str("termination-pending"), ComputeStatus::Terminated => f.write_str("terminated"), } } diff --git a/libs/compute_api/src/spec.rs b/libs/compute_api/src/spec.rs index 0e23b70265..d011f53ee9 100644 --- a/libs/compute_api/src/spec.rs +++ b/libs/compute_api/src/spec.rs @@ -4,11 +4,14 @@ //! provide it by calling the compute_ctl's `/compute_ctl` endpoint, or //! compute_ctl can fetch it by calling the control plane's API. use std::collections::HashMap; +use std::fmt::Display; +use anyhow::anyhow; use indexmap::IndexMap; use regex::Regex; use remote_storage::RemotePath; use serde::{Deserialize, Serialize}; +use url::Url; use utils::id::{TenantId, TimelineId}; use utils::lsn::Lsn; @@ -102,7 +105,11 @@ pub struct ComputeSpec { // updated to fill these fields, we can make these non optional. pub tenant_id: Option, pub timeline_id: Option, - pub pageserver_connstring: Option, + + // Pageserver information can be passed in two different ways: + // 1. Here + // 2. in cluster.settings. This is legacy, we are switching to method 1. + pub pageserver_connection_info: Option, // More neon ids that we expose to the compute_ctl // and to postgres as neon extension GUCs. @@ -178,9 +185,18 @@ pub struct ComputeSpec { /// JWT for authorizing requests to endpoint storage service pub endpoint_storage_token: Option, - /// Download LFC state from endpoint_storage and pass it to Postgres on startup #[serde(default)] + /// Download LFC state from endpoint storage and pass it to Postgres on compute startup pub autoprewarm: bool, + + #[serde(default)] + /// Upload LFC state to endpoint storage periodically. Default value (None) means "don't upload" + pub offload_lfc_interval_seconds: Option, + + /// Suspend timeout in seconds. + /// + /// We use this value to derive other values, such as the installed extensions metric. + pub suspend_timeout_seconds: i64, } /// Feature flag to signal `compute_ctl` to enable certain experimental functionality. @@ -202,6 +218,20 @@ pub enum ComputeFeature { UnknownFeature, } +/// Feature flag to signal `compute_ctl` to enable certain experimental functionality. +#[derive(Clone, Debug, Default, Deserialize, Serialize, Eq, PartialEq)] +pub struct PageserverConnectionInfo { + pub shards: HashMap, + + pub prefer_grpc: bool, +} + +#[derive(Clone, Debug, Default, Deserialize, Serialize, Eq, PartialEq)] +pub struct PageserverShardConnectionInfo { + pub libpq_url: Option, + pub grpc_url: Option, +} + #[derive(Clone, Debug, Default, Deserialize, Serialize)] pub struct RemoteExtSpec { pub public_extensions: Option>, @@ -319,6 +349,12 @@ impl ComputeMode { } } +impl Display for ComputeMode { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.write_str(self.to_type_str()) + } +} + /// Log level for audit logging #[derive(Clone, Debug, Default, Eq, PartialEq, Deserialize, Serialize)] pub enum ComputeAudit { @@ -429,6 +465,47 @@ pub struct JwksSettings { pub jwt_audience: Option, } +/// Protocol used to connect to a Pageserver. Parsed from the connstring scheme. +#[derive(Clone, Copy, Debug, Default, PartialEq, Eq)] +pub enum PageserverProtocol { + /// The original protocol based on libpq and COPY. Uses postgresql:// or postgres:// scheme. + #[default] + Libpq, + /// A newer, gRPC-based protocol. Uses grpc:// scheme. + Grpc, +} + +impl PageserverProtocol { + /// Parses the protocol from a connstring scheme. Defaults to Libpq if no scheme is given. + /// Errors if the connstring is an invalid URL. + pub fn from_connstring(connstring: &str) -> anyhow::Result { + let scheme = match Url::parse(connstring) { + Ok(url) => url.scheme().to_lowercase(), + Err(url::ParseError::RelativeUrlWithoutBase) => return Ok(Self::default()), + Err(err) => return Err(anyhow!("invalid connstring URL: {err}")), + }; + match scheme.as_str() { + "postgresql" | "postgres" => Ok(Self::Libpq), + "grpc" => Ok(Self::Grpc), + scheme => Err(anyhow!("invalid protocol scheme: {scheme}")), + } + } + + /// Returns the URL scheme for the protocol, for use in connstrings. + pub fn scheme(&self) -> &'static str { + match self { + Self::Libpq => "postgresql", + Self::Grpc => "grpc", + } + } +} + +impl Display for PageserverProtocol { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.write_str(self.scheme()) + } +} + #[cfg(test)] mod tests { use std::fs::File; diff --git a/libs/compute_api/tests/cluster_spec.json b/libs/compute_api/tests/cluster_spec.json index 2dd2aae015..86ab8c6e32 100644 --- a/libs/compute_api/tests/cluster_spec.json +++ b/libs/compute_api/tests/cluster_spec.json @@ -3,6 +3,7 @@ "timestamp": "2021-05-23T18:25:43.511Z", "operation_uuid": "0f657b36-4b0f-4a2d-9c2e-1dcd615e7d8b", + "suspend_timeout_seconds": 3600, "cluster": { "cluster_id": "test-cluster-42", @@ -89,6 +90,11 @@ "value": "off", "vartype": "bool" }, + { + "name": "offload_lfc_interval_seconds", + "value": "20", + "vartype": "integer" + }, { "name": "neon.safekeepers", "value": "127.0.0.1:6502,127.0.0.1:6503,127.0.0.1:6501", diff --git a/libs/desim/src/executor.rs b/libs/desim/src/executor.rs index df8b071c06..bdb9c6cd4b 100644 --- a/libs/desim/src/executor.rs +++ b/libs/desim/src/executor.rs @@ -71,7 +71,7 @@ impl Runtime { debug!("thread panicked: {:?}", e); let mut result = ctx.result.lock(); if result.0 == -1 { - *result = (256, format!("thread panicked: {:?}", e)); + *result = (256, format!("thread panicked: {e:?}")); } }); } @@ -419,13 +419,13 @@ pub fn now() -> u64 { with_thread_context(|ctx| ctx.clock.get().unwrap().now()) } -pub fn exit(code: i32, msg: String) { +pub fn exit(code: i32, msg: String) -> ! { with_thread_context(|ctx| { ctx.allow_panic.store(true, Ordering::SeqCst); let mut result = ctx.result.lock(); *result = (code, msg); panic!("exit"); - }); + }) } pub(crate) fn get_thread_ctx() -> Arc { diff --git a/libs/desim/src/proto.rs b/libs/desim/src/proto.rs index 31bc29e6a6..7c3de4ff4b 100644 --- a/libs/desim/src/proto.rs +++ b/libs/desim/src/proto.rs @@ -47,8 +47,8 @@ impl Debug for AnyMessage { match self { AnyMessage::None => write!(f, "None"), AnyMessage::InternalConnect => write!(f, "InternalConnect"), - AnyMessage::Just32(v) => write!(f, "Just32({})", v), - AnyMessage::ReplCell(v) => write!(f, "ReplCell({:?})", v), + AnyMessage::Just32(v) => write!(f, "Just32({v})"), + AnyMessage::ReplCell(v) => write!(f, "ReplCell({v:?})"), AnyMessage::Bytes(v) => write!(f, "Bytes({})", hex::encode(v)), AnyMessage::LSN(v) => write!(f, "LSN({})", Lsn(*v)), } diff --git a/libs/http-utils/src/endpoint.rs b/libs/http-utils/src/endpoint.rs index 64147f2dd0..f32ced1180 100644 --- a/libs/http-utils/src/endpoint.rs +++ b/libs/http-utils/src/endpoint.rs @@ -582,14 +582,14 @@ pub fn attach_openapi_ui( deepLinking: true, showExtensions: true, showCommonExtensions: true, - url: "{}", + url: "{spec_mount_path}", }}) window.ui = ui; }}; - "#, spec_mount_path))).unwrap()) + "#))).unwrap()) }) ) } @@ -696,7 +696,7 @@ mod tests { let remote_addr = SocketAddr::new(IpAddr::from_str("127.0.0.1").unwrap(), 80); let mut service = builder.build(remote_addr); if let Err(e) = poll_fn(|ctx| service.poll_ready(ctx)).await { - panic!("request service is not ready: {:?}", e); + panic!("request service is not ready: {e:?}"); } let mut req: Request = Request::default(); @@ -716,7 +716,7 @@ mod tests { let remote_addr = SocketAddr::new(IpAddr::from_str("127.0.0.1").unwrap(), 80); let mut service = builder.build(remote_addr); if let Err(e) = poll_fn(|ctx| service.poll_ready(ctx)).await { - panic!("request service is not ready: {:?}", e); + panic!("request service is not ready: {e:?}"); } let req: Request = Request::default(); diff --git a/libs/neon-shmem/benches/hmap_resize.rs b/libs/neon-shmem/benches/hmap_resize.rs index 6b86e7ed27..edc0eec50b 100644 --- a/libs/neon-shmem/benches/hmap_resize.rs +++ b/libs/neon-shmem/benches/hmap_resize.rs @@ -1,12 +1,12 @@ -use criterion::{criterion_group, criterion_main, BatchSize, Criterion, BenchmarkId}; +use criterion::{BatchSize, BenchmarkId, Criterion, criterion_group, criterion_main}; use neon_shmem::hash::HashMapAccess; use neon_shmem::hash::HashMapInit; use neon_shmem::hash::entry::Entry; -use rand::prelude::*; use rand::distr::{Distribution, StandardUniform}; -use std::hash::BuildHasher; +use rand::prelude::*; use std::default::Default; - +use std::hash::BuildHasher; + // Taken from bindings to C code #[derive(Clone, Debug, Hash, Eq, PartialEq)] @@ -20,15 +20,15 @@ pub struct FileCacheKey { } impl Distribution for StandardUniform { - // questionable, but doesn't need to be good randomness - fn sample(&self, rng: &mut R) -> FileCacheKey { - FileCacheKey { - _spc_id: rng.random(), - _db_id: rng.random(), - _rel_number: rng.random(), - _fork_num: rng.random(), - _block_num: rng.random() - } + // questionable, but doesn't need to be good randomness + fn sample(&self, rng: &mut R) -> FileCacheKey { + FileCacheKey { + _spc_id: rng.random(), + _db_id: rng.random(), + _rel_number: rng.random(), + _fork_num: rng.random(), + _block_num: rng.random(), + } } } @@ -43,240 +43,288 @@ pub struct FileCacheEntry { } impl FileCacheEntry { - fn dummy() -> Self { - Self { - _offset: 0, - _access_count: 0, - _prev: std::ptr::null_mut(), - _next: std::ptr::null_mut(), - _state: [0; 8] - } - } + fn dummy() -> Self { + Self { + _offset: 0, + _access_count: 0, + _prev: std::ptr::null_mut(), + _next: std::ptr::null_mut(), + _state: [0; 8], + } + } } // Utilities for applying operations. #[derive(Clone, Debug)] -struct TestOp(K, Option); +struct TestOp(K, Option); fn apply_op( - op: TestOp, - map: &mut HashMapAccess, + op: TestOp, + map: &mut HashMapAccess, ) { - let entry = map.entry(op.0); + let entry = map.entry(op.0); match op.1 { - Some(new) => { - match entry { - Entry::Occupied(mut e) => Some(e.insert(new)), - Entry::Vacant(e) => { _ = e.insert(new).unwrap(); None }, - } - }, - None => { - match entry { - Entry::Occupied(e) => Some(e.remove()), - Entry::Vacant(_) => None, - } - }, - }; + Some(new) => match entry { + Entry::Occupied(mut e) => Some(e.insert(new)), + Entry::Vacant(e) => { + _ = e.insert(new).unwrap(); + None + } + }, + None => match entry { + Entry::Occupied(e) => Some(e.remove()), + Entry::Vacant(_) => None, + }, + }; } // Hash utilities struct SeaRandomState { - k1: u64, - k2: u64, - k3: u64, - k4: u64 + k1: u64, + k2: u64, + k3: u64, + k4: u64, } impl std::hash::BuildHasher for SeaRandomState { - type Hasher = seahash::SeaHasher; - - fn build_hasher(&self) -> Self::Hasher { - seahash::SeaHasher::with_seeds(self.k1, self.k2, self.k3, self.k4) - } + type Hasher = seahash::SeaHasher; + + fn build_hasher(&self) -> Self::Hasher { + seahash::SeaHasher::with_seeds(self.k1, self.k2, self.k3, self.k4) + } } impl SeaRandomState { - fn new() -> Self { - let mut rng = rand::rng(); - Self { k1: rng.random(), k2: rng.random(), k3: rng.random(), k4: rng.random() } - } + fn new() -> Self { + let mut rng = rand::rng(); + Self { + k1: rng.random(), + k2: rng.random(), + k3: rng.random(), + k4: rng.random(), + } + } } fn small_benchs(c: &mut Criterion) { - let mut group = c.benchmark_group("Small maps"); + let mut group = c.benchmark_group("Small maps"); group.sample_size(10); - - group.bench_function("small_rehash", |b| { - let ideal_filled = 4_000_000; - let size = 5_000_000; - let mut writer = HashMapInit::new_resizeable(size, size * 2).attach_writer(); - let mut rng = rand::rng(); - while writer.get_num_buckets_in_use() < ideal_filled as usize { - let key: FileCacheKey = rng.random(); - let val = FileCacheEntry::dummy(); - apply_op(TestOp(key, Some(val)), &mut writer); - } - b.iter(|| writer.shuffle()); - }); - - group.bench_function("small_rehash_xxhash", |b| { - let ideal_filled = 4_000_000; - let size = 5_000_000; - let mut writer = HashMapInit::new_resizeable(size, size * 2) - .with_hasher(twox_hash::xxhash64::RandomState::default()) - .attach_writer(); - let mut rng = rand::rng(); - while writer.get_num_buckets_in_use() < ideal_filled as usize { - let key: FileCacheKey = rng.random(); - let val = FileCacheEntry::dummy(); - apply_op(TestOp(key, Some(val)), &mut writer); - } - b.iter(|| writer.shuffle()); - }); + group.bench_function("small_rehash", |b| { + let ideal_filled = 4_000_000; + let size = 5_000_000; + let mut writer = HashMapInit::new_resizeable(size, size * 2).attach_writer(); + let mut rng = rand::rng(); + while writer.get_num_buckets_in_use() < ideal_filled as usize { + let key: FileCacheKey = rng.random(); + let val = FileCacheEntry::dummy(); + apply_op(TestOp(key, Some(val)), &mut writer); + } + b.iter(|| writer.shuffle()); + }); - - group.bench_function("small_rehash_ahash", |b| { - let ideal_filled = 4_000_000; - let size = 5_000_000; - let mut writer = HashMapInit::new_resizeable(size, size * 2) - .with_hasher(ahash::RandomState::default()) - .attach_writer(); - let mut rng = rand::rng(); - while writer.get_num_buckets_in_use() < ideal_filled as usize { - let key: FileCacheKey = rng.random(); - let val = FileCacheEntry::dummy(); - apply_op(TestOp(key, Some(val)), &mut writer); - } - b.iter(|| writer.shuffle()); - }); + group.bench_function("small_rehash_xxhash", |b| { + let ideal_filled = 4_000_000; + let size = 5_000_000; + let mut writer = HashMapInit::new_resizeable(size, size * 2) + .with_hasher(twox_hash::xxhash64::RandomState::default()) + .attach_writer(); + let mut rng = rand::rng(); + while writer.get_num_buckets_in_use() < ideal_filled as usize { + let key: FileCacheKey = rng.random(); + let val = FileCacheEntry::dummy(); + apply_op(TestOp(key, Some(val)), &mut writer); + } + b.iter(|| writer.shuffle()); + }); - group.bench_function("small_rehash_seahash", |b| { - let ideal_filled = 4_000_000; - let size = 5_000_000; - let mut writer = HashMapInit::new_resizeable(size, size * 2) - .with_hasher(SeaRandomState::new()) - .attach_writer(); - let mut rng = rand::rng(); - while writer.get_num_buckets_in_use() < ideal_filled as usize { - let key: FileCacheKey = rng.random(); - let val = FileCacheEntry::dummy(); - apply_op(TestOp(key, Some(val)), &mut writer); - } - b.iter(|| writer.shuffle()); - }); + group.bench_function("small_rehash_ahash", |b| { + let ideal_filled = 4_000_000; + let size = 5_000_000; + let mut writer = HashMapInit::new_resizeable(size, size * 2) + .with_hasher(ahash::RandomState::default()) + .attach_writer(); + let mut rng = rand::rng(); + while writer.get_num_buckets_in_use() < ideal_filled as usize { + let key: FileCacheKey = rng.random(); + let val = FileCacheEntry::dummy(); + apply_op(TestOp(key, Some(val)), &mut writer); + } + b.iter(|| writer.shuffle()); + }); - group.finish(); + group.bench_function("small_rehash_seahash", |b| { + let ideal_filled = 4_000_000; + let size = 5_000_000; + let mut writer = HashMapInit::new_resizeable(size, size * 2) + .with_hasher(SeaRandomState::new()) + .attach_writer(); + let mut rng = rand::rng(); + while writer.get_num_buckets_in_use() < ideal_filled as usize { + let key: FileCacheKey = rng.random(); + let val = FileCacheEntry::dummy(); + apply_op(TestOp(key, Some(val)), &mut writer); + } + b.iter(|| writer.shuffle()); + }); + + group.finish(); } fn real_benchs(c: &mut Criterion) { - let mut group = c.benchmark_group("Realistic workloads"); - group.sample_size(10); + let mut group = c.benchmark_group("Realistic workloads"); + group.sample_size(10); group.bench_function("real_bulk_insert", |b| { - let size = 125_000_000; - let ideal_filled = 100_000_000; - let mut rng = rand::rng(); - b.iter_batched( - || HashMapInit::new_resizeable(size, size * 2).attach_writer(), - |writer| { - for _ in 0..ideal_filled { - let key: FileCacheKey = rng.random(); - let val = FileCacheEntry::dummy(); - let entry = writer.entry(key); - std::hint::black_box(match entry { - Entry::Occupied(mut e) => { e.insert(val); }, - Entry::Vacant(e) => { _ = e.insert(val).unwrap(); }, - }) - } - }, - BatchSize::SmallInput, - ) - }); - - group.bench_function("real_rehash", |b| { - let size = 125_000_000; - let ideal_filled = 100_000_000; - let mut writer = HashMapInit::new_resizeable(size, size).attach_writer(); - let mut rng = rand::rng(); - while writer.get_num_buckets_in_use() < ideal_filled { - let key: FileCacheKey = rng.random(); - let val = FileCacheEntry::dummy(); - apply_op(TestOp(key, Some(val)), &mut writer); - } - b.iter(|| writer.shuffle()); - }); - - group.bench_function("real_rehash_hashbrown", |b| { - let size = 125_000_000; - let ideal_filled = 100_000_000; - let mut writer = hashbrown::raw::RawTable::new(); - let mut rng = rand::rng(); - let hasher = rustc_hash::FxBuildHasher::default(); - unsafe { - writer.resize(size, |(k,_)| hasher.hash_one(&k), - hashbrown::raw::Fallibility::Infallible).unwrap(); - } - while writer.len() < ideal_filled as usize { - let key: FileCacheKey = rng.random(); - let val = FileCacheEntry::dummy(); - writer.insert(hasher.hash_one(&key), (key, val), |(k,_)| hasher.hash_one(&k)); - } - b.iter(|| unsafe { writer.table.rehash_in_place( - &|table, index| hasher.hash_one(&table.bucket::<(FileCacheKey, FileCacheEntry)>(index).as_ref().0), - std::mem::size_of::<(FileCacheKey, FileCacheEntry)>(), - if std::mem::needs_drop::<(FileCacheKey, FileCacheEntry)>() { - Some(|ptr| std::ptr::drop_in_place(ptr as *mut (FileCacheKey, FileCacheEntry))) - } else { - None + let size = 125_000_000; + let ideal_filled = 100_000_000; + let mut rng = rand::rng(); + b.iter_batched( + || HashMapInit::new_resizeable(size, size * 2).attach_writer(), + |writer| { + for _ in 0..ideal_filled { + let key: FileCacheKey = rng.random(); + let val = FileCacheEntry::dummy(); + let entry = writer.entry(key); + std::hint::black_box(match entry { + Entry::Occupied(mut e) => { + e.insert(val); + } + Entry::Vacant(e) => { + _ = e.insert(val).unwrap(); + } + }) + } }, - ) }); - }); + BatchSize::SmallInput, + ) + }); - for elems in [2, 4, 8, 16, 32, 64, 96, 112] { - group.bench_with_input(BenchmarkId::new("real_rehash_varied", elems), &elems, |b, &size| { - let ideal_filled = size * 1_000_000; - let size = 125_000_000; - let mut writer = HashMapInit::new_resizeable(size, size).attach_writer(); - let mut rng = rand::rng(); - while writer.get_num_buckets_in_use() < ideal_filled as usize { - let key: FileCacheKey = rng.random(); - let val = FileCacheEntry::dummy(); - apply_op(TestOp(key, Some(val)), &mut writer); - } - b.iter(|| writer.shuffle()); - }); - group.bench_with_input(BenchmarkId::new("real_rehash_varied_hashbrown", elems), &elems, |b, &size| { - let ideal_filled = size * 1_000_000; - let size = 125_000_000; - let mut writer = hashbrown::raw::RawTable::new(); - let mut rng = rand::rng(); - let hasher = rustc_hash::FxBuildHasher::default(); - unsafe { - writer.resize(size, |(k,_)| hasher.hash_one(&k), - hashbrown::raw::Fallibility::Infallible).unwrap(); - } - while writer.len() < ideal_filled as usize { - let key: FileCacheKey = rng.random(); - let val = FileCacheEntry::dummy(); - writer.insert(hasher.hash_one(&key), (key, val), |(k,_)| hasher.hash_one(&k)); - } - b.iter(|| unsafe { writer.table.rehash_in_place( - &|table, index| hasher.hash_one(&table.bucket::<(FileCacheKey, FileCacheEntry)>(index).as_ref().0), - std::mem::size_of::<(FileCacheKey, FileCacheEntry)>(), - if std::mem::needs_drop::<(FileCacheKey, FileCacheEntry)>() { - Some(|ptr| std::ptr::drop_in_place(ptr as *mut (FileCacheKey, FileCacheEntry))) - } else { - None - }, - ) }); - }); - } - - group.finish(); + group.bench_function("real_rehash", |b| { + let size = 125_000_000; + let ideal_filled = 100_000_000; + let mut writer = HashMapInit::new_resizeable(size, size).attach_writer(); + let mut rng = rand::rng(); + while writer.get_num_buckets_in_use() < ideal_filled { + let key: FileCacheKey = rng.random(); + let val = FileCacheEntry::dummy(); + apply_op(TestOp(key, Some(val)), &mut writer); + } + b.iter(|| writer.shuffle()); + }); + + group.bench_function("real_rehash_hashbrown", |b| { + let size = 125_000_000; + let ideal_filled = 100_000_000; + let mut writer = hashbrown::raw::RawTable::new(); + let mut rng = rand::rng(); + let hasher = rustc_hash::FxBuildHasher::default(); + unsafe { + writer + .resize( + size, + |(k, _)| hasher.hash_one(&k), + hashbrown::raw::Fallibility::Infallible, + ) + .unwrap(); + } + while writer.len() < ideal_filled as usize { + let key: FileCacheKey = rng.random(); + let val = FileCacheEntry::dummy(); + writer.insert(hasher.hash_one(&key), (key, val), |(k, _)| { + hasher.hash_one(&k) + }); + } + b.iter(|| unsafe { + writer.table.rehash_in_place( + &|table, index| { + hasher.hash_one( + &table + .bucket::<(FileCacheKey, FileCacheEntry)>(index) + .as_ref() + .0, + ) + }, + std::mem::size_of::<(FileCacheKey, FileCacheEntry)>(), + if std::mem::needs_drop::<(FileCacheKey, FileCacheEntry)>() { + Some(|ptr| std::ptr::drop_in_place(ptr as *mut (FileCacheKey, FileCacheEntry))) + } else { + None + }, + ) + }); + }); + + for elems in [2, 4, 8, 16, 32, 64, 96, 112] { + group.bench_with_input( + BenchmarkId::new("real_rehash_varied", elems), + &elems, + |b, &size| { + let ideal_filled = size * 1_000_000; + let size = 125_000_000; + let mut writer = HashMapInit::new_resizeable(size, size).attach_writer(); + let mut rng = rand::rng(); + while writer.get_num_buckets_in_use() < ideal_filled as usize { + let key: FileCacheKey = rng.random(); + let val = FileCacheEntry::dummy(); + apply_op(TestOp(key, Some(val)), &mut writer); + } + b.iter(|| writer.shuffle()); + }, + ); + group.bench_with_input( + BenchmarkId::new("real_rehash_varied_hashbrown", elems), + &elems, + |b, &size| { + let ideal_filled = size * 1_000_000; + let size = 125_000_000; + let mut writer = hashbrown::raw::RawTable::new(); + let mut rng = rand::rng(); + let hasher = rustc_hash::FxBuildHasher::default(); + unsafe { + writer + .resize( + size, + |(k, _)| hasher.hash_one(&k), + hashbrown::raw::Fallibility::Infallible, + ) + .unwrap(); + } + while writer.len() < ideal_filled as usize { + let key: FileCacheKey = rng.random(); + let val = FileCacheEntry::dummy(); + writer.insert(hasher.hash_one(&key), (key, val), |(k, _)| { + hasher.hash_one(&k) + }); + } + b.iter(|| unsafe { + writer.table.rehash_in_place( + &|table, index| { + hasher.hash_one( + &table + .bucket::<(FileCacheKey, FileCacheEntry)>(index) + .as_ref() + .0, + ) + }, + std::mem::size_of::<(FileCacheKey, FileCacheEntry)>(), + if std::mem::needs_drop::<(FileCacheKey, FileCacheEntry)>() { + Some(|ptr| { + std::ptr::drop_in_place(ptr as *mut (FileCacheKey, FileCacheEntry)) + }) + } else { + None + }, + ) + }); + }, + ); + } + + group.finish(); } - + criterion_group!(benches, small_benchs, real_benchs); criterion_main!(benches); diff --git a/libs/neon-shmem/src/hash.rs b/libs/neon-shmem/src/hash.rs index d0cec22493..347fc89d17 100644 --- a/libs/neon-shmem/src/hash.rs +++ b/libs/neon-shmem/src/hash.rs @@ -1,6 +1,7 @@ //! Resizable hash table implementation on top of byte-level storage (either a [`ShmemHandle`] or a //! fixed byte array). //! + //! This hash table has two major components: the bucket array and the dictionary. Each bucket //! within the bucket array contains a `Option<(K, V)>` and an index of another bucket. In this way //! there is both an implicit freelist within the bucket array (`None` buckets point to other `None` @@ -17,13 +18,14 @@ //! happen in-place and are at a high level achieved by expanding/reducing the bucket array and //! rebuilding the dictionary by rehashing all keys. -use std::hash::{Hash, BuildHasher}; +use std::hash::{BuildHasher, Hash}; use std::mem::MaybeUninit; use crate::{shmem, sync::{ PthreadRwLock, RwLock, RwLockReadGuard, RwLockWriteGuard, ValueReadGuard }}; use crate::shmem::ShmemHandle; +use crate::{shmem, sync::*}; mod core; pub mod entry; @@ -32,62 +34,80 @@ pub mod entry; mod tests; use core::{Bucket, CoreHashMap, INVALID_POS}; -use entry::{Entry, OccupiedEntry, VacantEntry, PrevPos}; +use entry::{Entry, OccupiedEntry, PrevPos, VacantEntry}; -/// Builder for a [`HashMapAccess`]. +/// This represents a hash table that (possibly) lives in shared memory. +/// If a new process is launched with fork(), the child process inherits +/// this struct. #[must_use] pub struct HashMapInit<'a, K, V, S = rustc_hash::FxBuildHasher> { shmem_handle: Option, - shared_ptr: *mut RwLock>, - shared_size: usize, - hasher: S, - num_buckets: u32, + shared_ptr: *mut HashMapShared<'a, K, V>, + shared_size: usize, + hasher: S, + num_buckets: u32, } -/// Accessor for a hash table. +/// This is a per-process handle to a hash table that (possibly) lives in shared memory. +/// If a child process is launched with fork(), the child process should +/// get its own HashMapAccess by calling HashMapInit::attach_writer/reader(). +/// +/// XXX: We're not making use of it at the moment, but this struct could +/// hold process-local information in the future. pub struct HashMapAccess<'a, K, V, S = rustc_hash::FxBuildHasher> { shmem_handle: Option, shared_ptr: *mut HashMapShared<'a, K, V>, - hasher: S, + hasher: S, } unsafe impl Sync for HashMapAccess<'_, K, V, S> {} unsafe impl Send for HashMapAccess<'_, K, V, S> {} impl<'a, K: Clone + Hash + Eq, V, S> HashMapInit<'a, K, V, S> { - pub fn with_hasher(self, hasher: T) -> HashMapInit<'a, K, V, T> { - HashMapInit { - hasher, - shmem_handle: self.shmem_handle, - shared_ptr: self.shared_ptr, - shared_size: self.shared_size, - num_buckets: self.num_buckets, - } - } + /// Change the 'hasher' used by the hash table. + /// + /// NOTE: This must be called right after creating the hash table, + /// before inserting any entries and before calling attach_writer/reader. + /// Otherwise different accessors could be using different hash function, + /// with confusing results. + pub fn with_hasher(self, hasher: T) -> HashMapInit<'a, K, V, T> { + HashMapInit { + hasher, + shmem_handle: self.shmem_handle, + shared_ptr: self.shared_ptr, + shared_size: self.shared_size, + num_buckets: self.num_buckets, + } + } - /// Loosely (over)estimate the size needed to store a hash table with `num_buckets` buckets. - pub fn estimate_size(num_buckets: u32) -> usize { + /// Loosely (over)estimate the size needed to store a hash table with `num_buckets` buckets. + pub fn estimate_size(num_buckets: u32) -> usize { // add some margin to cover alignment etc. CoreHashMap::::estimate_size(num_buckets) + size_of::>() + 1000 } - /// Initialize a table for writing. - pub fn attach_writer(self) -> HashMapAccess<'a, K, V, S> { - let mut ptr: *mut u8 = self.shared_ptr.cast(); - let end_ptr: *mut u8 = unsafe { ptr.add(self.shared_size) }; + fn new( + num_buckets: u32, + shmem_handle: Option, + area_ptr: *mut u8, + area_size: usize, + hasher: S, + ) -> Self { + let mut ptr: *mut u8 = area_ptr; + let end_ptr: *mut u8 = unsafe { ptr.add(area_size) }; - // carve out area for the One Big Lock (TM) and the HashMapShared. - ptr = unsafe { ptr.add(ptr.align_offset(align_of::())) }; - let raw_lock_ptr = ptr; - ptr = unsafe { ptr.add(size_of::()) }; - ptr = unsafe { ptr.add(ptr.align_offset(align_of::>())) }; - let shared_ptr: *mut HashMapShared = ptr.cast(); + // carve out area for the One Big Lock (TM) and the HashMapShared. + ptr = unsafe { ptr.add(ptr.align_offset(align_of::())) }; + let raw_lock_ptr = ptr; + ptr = unsafe { ptr.add(size_of::()) }; + ptr = unsafe { ptr.add(ptr.align_offset(align_of::>())) }; + let shared_ptr: *mut HashMapShared = ptr.cast(); ptr = unsafe { ptr.add(size_of::>()) }; - + // carve out the buckets ptr = unsafe { ptr.byte_add(ptr.align_offset(align_of::>())) }; let buckets_ptr = ptr; - ptr = unsafe { ptr.add(size_of::>() * self.num_buckets as usize) }; + ptr = unsafe { ptr.add(size_of::>() * num_buckets as usize) }; // use remaining space for the dictionary ptr = unsafe { ptr.byte_add(ptr.align_offset(align_of::())) }; @@ -97,26 +117,36 @@ impl<'a, K: Clone + Hash + Eq, V, S> HashMapInit<'a, K, V, S> { assert!(dictionary_size > 0); let buckets = - unsafe { std::slice::from_raw_parts_mut(buckets_ptr.cast(), self.num_buckets as usize) }; + unsafe { std::slice::from_raw_parts_mut(buckets_ptr.cast(), num_buckets as usize) }; let dictionary = unsafe { std::slice::from_raw_parts_mut(dictionary_ptr.cast(), dictionary_size as usize) }; + let hashmap = CoreHashMap::new(buckets, dictionary); - let lock = RwLock::from_raw(PthreadRwLock::new( - std::ptr::NonNull::new(raw_lock_ptr.cast()).unwrap() - ), hashmap); - unsafe { - std::ptr::write(shared_ptr, lock); - } - - HashMapAccess { - shmem_handle: self.shmem_handle, + let lock = RwLock::from_raw(PthreadRwLock::new(raw_lock_ptr.cast()), hashmap); + unsafe { + std::ptr::write(shared_ptr, lock); + } + + Self { + num_buckets, + shmem_handle, shared_ptr, - hasher: self.hasher, + shared_size: area_size, + hasher, } } - /// Initialize a table for reading. Currently identical to [`HashMapInit::attach_writer`]. + /// Attach to a hash table for writing. + pub fn attach_writer(self) -> HashMapAccess<'a, K, V, S> { + HashMapAccess { + shmem_handle: self.shmem_handle, + shared_ptr: self.shared_ptr, + hasher: self.hasher, + } + } + + /// Initialize a table for reading. Currently identical to [`HashMapInit::attach_writer`]. pub fn attach_reader(self) -> HashMapAccess<'a, K, V, S> { self.attach_writer() } @@ -138,78 +168,77 @@ type HashMapShared<'a, K, V> = RwLock>; impl<'a, K, V> HashMapInit<'a, K, V, rustc_hash::FxBuildHasher> where - K: Clone + Hash + Eq + K: Clone + Hash + Eq, { - /// Place the hash table within a user-supplied fixed memory area. - pub fn with_fixed( - num_buckets: u32, - area: &'a mut [MaybeUninit], - ) -> Self { - Self { - num_buckets, - shmem_handle: None, - shared_ptr: area.as_mut_ptr().cast(), - shared_size: area.len(), - hasher: rustc_hash::FxBuildHasher, - } + /// Place the hash table within a user-supplied fixed memory area. + pub fn with_fixed(num_buckets: u32, area: &'a mut [MaybeUninit]) -> Self { + Self::new( + num_buckets, + None, + area.as_mut_ptr().cast(), + area.len(), + rustc_hash::FxBuildHasher, + ) } /// Place a new hash map in the given shared memory area - /// - /// # Panics - /// Will panic on failure to resize area to expected map size. + /// + /// # Panics + /// Will panic on failure to resize area to expected map size. pub fn with_shmem(num_buckets: u32, shmem: ShmemHandle) -> Self { - let size = Self::estimate_size(num_buckets); - shmem + let size = Self::estimate_size(num_buckets); + shmem .set_size(size) .expect("could not resize shared memory area"); - Self { - num_buckets, - shared_ptr: shmem.data_ptr.as_ptr().cast(), - shmem_handle: Some(shmem), - shared_size: size, - hasher: rustc_hash::FxBuildHasher - } + let ptr = shmem.data_ptr.as_ptr().cast(); + Self::new( + num_buckets, + Some(shmem), + ptr, + size, + rustc_hash::FxBuildHasher, + ) } - /// Make a resizable hash map within a new shared memory area with the given name. - pub fn new_resizeable_named(num_buckets: u32, max_buckets: u32, name: &str) -> Self { - let size = Self::estimate_size(num_buckets); - let max_size = Self::estimate_size(max_buckets); - let shmem = ShmemHandle::new(name, size, max_size) - .expect("failed to make shared memory area"); - - Self { - num_buckets, - shared_ptr: shmem.data_ptr.as_ptr().cast(), - shmem_handle: Some(shmem), - shared_size: size, - hasher: rustc_hash::FxBuildHasher - } - } + /// Make a resizable hash map within a new shared memory area with the given name. + pub fn new_resizeable_named(num_buckets: u32, max_buckets: u32, name: &str) -> Self { + let size = Self::estimate_size(num_buckets); + let max_size = Self::estimate_size(max_buckets); + let shmem = + ShmemHandle::new(name, size, max_size).expect("failed to make shared memory area"); + let ptr = shmem.data_ptr.as_ptr().cast(); - /// Make a resizable hash map within a new anonymous shared memory area. - pub fn new_resizeable(num_buckets: u32, max_buckets: u32) -> Self { - use std::sync::atomic::{AtomicUsize, Ordering}; - static COUNTER: AtomicUsize = AtomicUsize::new(0); - let val = COUNTER.fetch_add(1, Ordering::Relaxed); - let name = format!("neon_shmem_hmap{val}"); - Self::new_resizeable_named(num_buckets, max_buckets, &name) - } + Self::new( + num_buckets, + Some(shmem), + ptr, + size, + rustc_hash::FxBuildHasher, + ) + } + + /// Make a resizable hash map within a new anonymous shared memory area. + pub fn new_resizeable(num_buckets: u32, max_buckets: u32) -> Self { + use std::sync::atomic::{AtomicUsize, Ordering}; + static COUNTER: AtomicUsize = AtomicUsize::new(0); + let val = COUNTER.fetch_add(1, Ordering::Relaxed); + let name = format!("neon_shmem_hmap{val}"); + Self::new_resizeable_named(num_buckets, max_buckets, &name) + } } impl<'a, K, V, S: BuildHasher> HashMapAccess<'a, K, V, S> where K: Clone + Hash + Eq, { - /// Hash a key using the map's hasher. - #[inline] + /// Hash a key using the map's hasher. + #[inline] fn get_hash_value(&self, key: &K) -> u64 { - self.hasher.hash_one(key) + self.hasher.hash_one(key) } - fn entry_with_hash(&self, key: K, hash: u64) -> Entry<'a, '_, K, V> { - let mut map = unsafe { self.shared_ptr.as_ref() }.unwrap().write(); + fn entry_with_hash(&self, key: K, hash: u64) -> Entry<'a, '_, K, V> { + let mut map = unsafe { self.shared_ptr.as_ref() }.unwrap().write(); let dict_pos = hash as usize % map.dictionary.len(); let first = map.dictionary[dict_pos]; if first == INVALID_POS { @@ -247,55 +276,55 @@ where prev_pos = PrevPos::Chained(next); next = bucket.next; } - } - - /// Get a reference to the corresponding value for a key. + } + + /// Get a reference to the corresponding value for a key. pub fn get<'e>(&'e self, key: &K) -> Option> { - let hash = self.get_hash_value(key); + let hash = self.get_hash_value(key); let map = unsafe { self.shared_ptr.as_ref() }.unwrap().read(); - RwLockReadGuard::try_map(map, |m| m.get_with_hash(key, hash)).ok() + RwLockReadGuard::try_map(map, |m| m.get_with_hash(key, hash)).ok() } - /// Get a reference to the entry containing a key. + /// Get a reference to the entry containing a key. pub fn entry(&self, key: K) -> Entry<'a, '_, K, V> { - let hash = self.get_hash_value(&key); - self.entry_with_hash(key, hash) + let hash = self.get_hash_value(&key); + self.entry_with_hash(key, hash) } - /// Remove a key given its hash. Returns the associated value if it existed. + /// Remove a key given its hash. Returns the associated value if it existed. pub fn remove(&self, key: &K) -> Option { let hash = self.get_hash_value(key); match self.entry_with_hash(key.clone(), hash) { Entry::Occupied(e) => Some(e.remove()), - Entry::Vacant(_) => None + Entry::Vacant(_) => None, } } - /// Insert/update a key. Returns the previous associated value if it existed. - /// - /// # Errors - /// Will return [`core::FullError`] if there is no more space left in the map. + /// Insert/update a key. Returns the previous associated value if it existed. + /// + /// # Errors + /// Will return [`core::FullError`] if there is no more space left in the map. pub fn insert(&self, key: K, value: V) -> Result, core::FullError> { - let hash = self.get_hash_value(&key); + let hash = self.get_hash_value(&key); match self.entry_with_hash(key.clone(), hash) { Entry::Occupied(mut e) => Ok(Some(e.insert(value))), Entry::Vacant(e) => { - _ = e.insert(value)?; - Ok(None) - } + _ = e.insert(value)?; + Ok(None) + } } } - - /// Optionally return the entry for a bucket at a given index if it exists. - /// - /// Has more overhead than one would intuitively expect: performs both a clone of the key - /// due to the [`OccupiedEntry`] type owning the key and also a hash of the key in order - /// to enable repairing the hash chain if the entry is removed. + + /// Optionally return the entry for a bucket at a given index if it exists. + /// + /// Has more overhead than one would intuitively expect: performs both a clone of the key + /// due to the [`OccupiedEntry`] type owning the key and also a hash of the key in order + /// to enable repairing the hash chain if the entry is removed. pub fn entry_at_bucket(&self, pos: usize) -> Option> { let map = unsafe { self.shared_ptr.as_mut() }.unwrap().write(); - if pos >= map.buckets.len() { - return None; - } + if pos >= map.buckets.len() { + return None; + } let entry = map.buckets[pos].inner.as_ref(); match entry { @@ -311,7 +340,7 @@ where } } - /// Returns the number of buckets in the table. + /// Returns the number of buckets in the table. pub fn get_num_buckets(&self) -> usize { let map = unsafe { self.shared_ptr.as_ref() }.unwrap().read(); map.get_num_buckets() @@ -319,18 +348,18 @@ where /// Return the key and value stored in bucket with given index. This can be used to /// iterate through the hash map. - // TODO: An Iterator might be nicer. The communicator's clock algorithm needs to - // _slowly_ iterate through all buckets with its clock hand, without holding a lock. - // If we switch to an Iterator, it must not hold the lock. + // TODO: An Iterator might be nicer. The communicator's clock algorithm needs to + // _slowly_ iterate through all buckets with its clock hand, without holding a lock. + // If we switch to an Iterator, it must not hold the lock. pub fn get_at_bucket(&self, pos: usize) -> Option> { let map = unsafe { self.shared_ptr.as_ref() }.unwrap().read(); if pos >= map.buckets.len() { return None; } - RwLockReadGuard::try_map(map, |m| m.buckets[pos].inner.as_ref()).ok() + RwLockReadGuard::try_map(map, |m| m.buckets[pos].inner.as_ref()).ok() } - /// Returns the index of the bucket a given value corresponds to. + /// Returns the index of the bucket a given value corresponds to. pub fn get_bucket_for_value(&self, val_ptr: *const V) -> usize { let map = unsafe { self.shared_ptr.as_ref() }.unwrap().read(); @@ -347,9 +376,9 @@ where map.buckets_in_use as usize } - /// Clears all entries in a table. Does not reset any shrinking operations. - pub fn clear(&self) { - let mut map = unsafe { self.shared_ptr.as_mut() }.unwrap().write(); + /// Clears all entries in a table. Does not reset any shrinking operations. + pub fn clear(&self) { + let mut map = unsafe { self.shared_ptr.as_mut() }.unwrap().write(); map.clear(); } @@ -378,19 +407,19 @@ where buckets = std::slice::from_raw_parts_mut(buckets_ptr, num_buckets as usize); dictionary = std::slice::from_raw_parts_mut(dictionary_ptr, dictionary_size); - } + } for e in dictionary.iter_mut() { *e = INVALID_POS; } - + for (i, bucket) in buckets.iter_mut().enumerate().take(rehash_buckets as usize) { if bucket.inner.is_none() { - bucket.next = inner.free_head; + bucket.next = inner.free_head; inner.free_head = i as u32; - continue; + continue; } - let hash = self.hasher.hash_one(&bucket.inner.as_ref().unwrap().0); + let hash = self.hasher.hash_one(&bucket.inner.as_ref().unwrap().0); let pos: usize = (hash % dictionary.len() as u64) as usize; bucket.next = dictionary[pos]; dictionary[pos] = i as u32; @@ -398,34 +427,37 @@ where inner.dictionary = dictionary; inner.buckets = buckets; - } + } - /// Rehash the map without growing or shrinking. - pub fn shuffle(&self) { - let mut map = unsafe { self.shared_ptr.as_mut() }.unwrap().write(); - let num_buckets = map.get_num_buckets() as u32; - let size_bytes = HashMapInit::::estimate_size(num_buckets); - let end_ptr: *mut u8 = unsafe { self.shared_ptr.byte_add(size_bytes).cast() }; + /// Rehash the map without growing or shrinking. + pub fn shuffle(&self) { + let mut map = unsafe { self.shared_ptr.as_mut() }.unwrap().write(); + let num_buckets = map.get_num_buckets() as u32; + let size_bytes = HashMapInit::::estimate_size(num_buckets); + let end_ptr: *mut u8 = unsafe { self.shared_ptr.byte_add(size_bytes).cast() }; let buckets_ptr = map.buckets.as_mut_ptr(); - self.rehash_dict(&mut map, buckets_ptr, end_ptr, num_buckets, num_buckets); - } + self.rehash_dict(&mut map, buckets_ptr, end_ptr, num_buckets, num_buckets); + } - /// Grow the number of buckets within the table. + /// Grow the number of buckets within the table. /// /// 1. Grows the underlying shared memory area /// 2. Initializes new buckets and overwrites the current dictionary /// 3. Rehashes the dictionary - /// - /// # Panics - /// Panics if called on a map initialized with [`HashMapInit::with_fixed`]. - /// - /// # Errors - /// Returns an [`shmem::Error`] if any errors occur resizing the memory region. + /// + /// # Panics + /// Panics if called on a map initialized with [`HashMapInit::with_fixed`]. + /// + /// # Errors + /// Returns an [`shmem::Error`] if any errors occur resizing the memory region. pub fn grow(&self, num_buckets: u32) -> Result<(), shmem::Error> { let mut map = unsafe { self.shared_ptr.as_mut() }.unwrap().write(); let old_num_buckets = map.buckets.len() as u32; - assert!(num_buckets >= old_num_buckets, "grow called with a smaller number of buckets"); + assert!( + num_buckets >= old_num_buckets, + "grow called with a smaller number of buckets" + ); if num_buckets == old_num_buckets { return Ok(()); } @@ -439,13 +471,13 @@ where let end_ptr: *mut u8 = unsafe { shmem_handle.data_ptr.as_ptr().add(size_bytes) }; // Initialize new buckets. The new buckets are linked to the free list. - // NB: This overwrites the dictionary! + // NB: This overwrites the dictionary! let buckets_ptr = map.buckets.as_mut_ptr(); unsafe { for i in old_num_buckets..num_buckets { let bucket = buckets_ptr.add(i as usize); bucket.write(core::Bucket { - next: if i < num_buckets-1 { + next: if i < num_buckets - 1 { i + 1 } else { map.free_head @@ -455,33 +487,33 @@ where } } - self.rehash_dict(&mut map, buckets_ptr, end_ptr, num_buckets, old_num_buckets); + self.rehash_dict(&mut map, buckets_ptr, end_ptr, num_buckets, old_num_buckets); map.free_head = old_num_buckets; Ok(()) } - /// Begin a shrink, limiting all new allocations to be in buckets with index below `num_buckets`. - /// - /// # Panics - /// Panics if called on a map initialized with [`HashMapInit::with_fixed`] or if `num_buckets` is - /// greater than the number of buckets in the map. - pub fn begin_shrink(&mut self, num_buckets: u32) { - let mut map = unsafe { self.shared_ptr.as_mut() }.unwrap().write(); - assert!( - num_buckets <= map.get_num_buckets() as u32, + /// Begin a shrink, limiting all new allocations to be in buckets with index below `num_buckets`. + /// + /// # Panics + /// Panics if called on a map initialized with [`HashMapInit::with_fixed`] or if `num_buckets` is + /// greater than the number of buckets in the map. + pub fn begin_shrink(&mut self, num_buckets: u32) { + let mut map = unsafe { self.shared_ptr.as_mut() }.unwrap().write(); + assert!( + num_buckets <= map.get_num_buckets() as u32, "shrink called with a larger number of buckets" ); - _ = self + _ = self .shmem_handle .as_ref() .expect("shrink called on a fixed-size hash table"); - map.alloc_limit = num_buckets; - } + map.alloc_limit = num_buckets; + } - /// If a shrink operation is underway, returns the target size of the map. Otherwise, returns None. - pub fn shrink_goal(&self) -> Option { - let map = unsafe { self.shared_ptr.as_mut() }.unwrap().read(); + /// If a shrink operation is underway, returns the target size of the map. Otherwise, returns None. + pub fn shrink_goal(&self) -> Option { + let map = unsafe { self.shared_ptr.as_mut() }.unwrap().read(); let goal = map.alloc_limit; if goal == INVALID_POS { None } else { Some(goal as usize) } } @@ -503,37 +535,55 @@ where "called finish_shrink when no shrink is in progress" ); - let num_buckets = map.alloc_limit; + /// Complete a shrink after caller has evicted entries, removing the unused buckets and rehashing. + /// + /// # Panics + /// The following cases result in a panic: + /// - Calling this function on a map initialized with [`HashMapInit::with_fixed`]. + /// - Calling this function on a map when no shrink operation is in progress. + /// - Calling this function on a map with `shrink_mode` set to [`HashMapShrinkMode::Remap`] and + /// there are more buckets in use than the value returned by [`HashMapAccess::shrink_goal`]. + /// + /// # Errors + /// Returns an [`shmem::Error`] if any errors occur resizing the memory region. + pub fn finish_shrink(&self) -> Result<(), shmem::Error> { + let mut map = unsafe { self.shared_ptr.as_mut() }.unwrap().write(); + assert!( + map.alloc_limit != INVALID_POS, + "called finish_shrink when no shrink is in progress" + ); - if map.get_num_buckets() == num_buckets as usize { + let num_buckets = map.alloc_limit; + + if map.get_num_buckets() == num_buckets as usize { return Ok(()); } - assert!( - map.buckets_in_use <= num_buckets, - "called finish_shrink before enough entries were removed" - ); - - for i in (num_buckets as usize)..map.buckets.len() { - if let Some((k, v)) = map.buckets[i].inner.take() { - // alloc_bucket increases count, so need to decrease since we're just moving - map.buckets_in_use -= 1; - map.alloc_bucket(k, v).unwrap(); - } - } + assert!( + map.buckets_in_use <= num_buckets, + "called finish_shrink before enough entries were removed" + ); + + for i in (num_buckets as usize)..map.buckets.len() { + if let Some((k, v)) = map.buckets[i].inner.take() { + // alloc_bucket increases count, so need to decrease since we're just moving + map.buckets_in_use -= 1; + map.alloc_bucket(k, v).unwrap(); + } + } let shmem_handle = self .shmem_handle .as_ref() .expect("shrink called on a fixed-size hash table"); - let size_bytes = HashMapInit::::estimate_size(num_buckets); + let size_bytes = HashMapInit::::estimate_size(num_buckets); shmem_handle.set_size(size_bytes)?; let end_ptr: *mut u8 = unsafe { shmem_handle.data_ptr.as_ptr().add(size_bytes) }; - let buckets_ptr = map.buckets.as_mut_ptr(); - self.rehash_dict(&mut map, buckets_ptr, end_ptr, num_buckets, num_buckets); - map.alloc_limit = INVALID_POS; - - Ok(()) - } + let buckets_ptr = map.buckets.as_mut_ptr(); + self.rehash_dict(&mut map, buckets_ptr, end_ptr, num_buckets, num_buckets); + map.alloc_limit = INVALID_POS; + + Ok(()) + } } diff --git a/libs/neon-shmem/src/hash/core.rs b/libs/neon-shmem/src/hash/core.rs index 473c417ece..013eb9a09c 100644 --- a/libs/neon-shmem/src/hash/core.rs +++ b/libs/neon-shmem/src/hash/core.rs @@ -11,26 +11,26 @@ pub(crate) const INVALID_POS: u32 = u32::MAX; /// Fundamental storage unit within the hash table. Either empty or contains a key-value pair. /// Always part of a chain of some kind (either a freelist if empty or a hash chain if full). pub(crate) struct Bucket { - /// Index of next bucket in the chain. - pub(crate) next: u32, - /// Key-value pair contained within bucket. + /// Index of next bucket in the chain. + pub(crate) next: u32, + /// Key-value pair contained within bucket. pub(crate) inner: Option<(K, V)>, } /// Core hash table implementation. pub(crate) struct CoreHashMap<'a, K, V> { - /// Dictionary used to map hashes to bucket indices. + /// Dictionary used to map hashes to bucket indices. pub(crate) dictionary: &'a mut [u32], - /// Buckets containing key-value pairs. + /// Buckets containing key-value pairs. pub(crate) buckets: &'a mut [Bucket], - /// Head of the freelist. + /// Head of the freelist. pub(crate) free_head: u32, - /// Maximum index of a bucket allowed to be allocated. [`INVALID_POS`] if no limit. - pub(crate) alloc_limit: u32, + /// Maximum index of a bucket allowed to be allocated. [`INVALID_POS`] if no limit. + pub(crate) alloc_limit: u32, /// The number of currently occupied buckets. pub(crate) buckets_in_use: u32, - // pub(crate) lock: libc::pthread_mutex_t, - // Unclear what the purpose of this is. + // pub(crate) lock: libc::pthread_mutex_t, + // Unclear what the purpose of this is. pub(crate) _user_list_head: u32, } @@ -41,7 +41,7 @@ pub struct FullError(); impl<'a, K: Clone + Hash + Eq, V> CoreHashMap<'a, K, V> { const FILL_FACTOR: f32 = 0.60; - /// Estimate the size of data contained within the the hash map. + /// Estimate the size of data contained within the the hash map. pub fn estimate_size(num_buckets: u32) -> usize { let mut size = 0; @@ -53,7 +53,7 @@ impl<'a, K: Clone + Hash + Eq, V> CoreHashMap<'a, K, V> { as usize; size - } + } pub fn new( buckets: &'a mut [MaybeUninit>], @@ -66,7 +66,7 @@ impl<'a, K: Clone + Hash + Eq, V> CoreHashMap<'a, K, V> { i as u32 + 1 } else { INVALID_POS - }, + }, inner: None, }); } @@ -89,11 +89,11 @@ impl<'a, K: Clone + Hash + Eq, V> CoreHashMap<'a, K, V> { free_head: 0, buckets_in_use: 0, _user_list_head: INVALID_POS, - alloc_limit: INVALID_POS, + alloc_limit: INVALID_POS, } } - /// Get the value associated with a key (if it exists) given its hash. + /// Get the value associated with a key (if it exists) given its hash. pub fn get_with_hash(&self, key: &K, hash: u64) -> Option<&V> { let mut next = self.dictionary[hash as usize % self.dictionary.len()]; loop { @@ -110,22 +110,22 @@ impl<'a, K: Clone + Hash + Eq, V> CoreHashMap<'a, K, V> { } } - /// Get number of buckets in map. + /// Get number of buckets in map. pub fn get_num_buckets(&self) -> usize { self.buckets.len() } - /// Clears all entries from the hashmap. - /// - /// Does not reset any allocation limits, but does clear any entries beyond them. - pub fn clear(&mut self) { - for i in 0..self.buckets.len() { + /// Clears all entries from the hashmap. + /// + /// Does not reset any allocation limits, but does clear any entries beyond them. + pub fn clear(&mut self) { + for i in 0..self.buckets.len() { self.buckets[i] = Bucket { next: if i < self.buckets.len() - 1 { i as u32 + 1 } else { INVALID_POS - }, + }, inner: None, } } @@ -133,46 +133,46 @@ impl<'a, K: Clone + Hash + Eq, V> CoreHashMap<'a, K, V> { self.dictionary[i] = INVALID_POS; } - self.free_head = 0; - self.buckets_in_use = 0; - } + self.free_head = 0; + self.buckets_in_use = 0; + } - /// Find the position of an unused bucket via the freelist and initialize it. + /// Find the position of an unused bucket via the freelist and initialize it. pub(crate) fn alloc_bucket(&mut self, key: K, value: V) -> Result { let mut pos = self.free_head; - // Find the first bucket we're *allowed* to use. - let mut prev = PrevPos::First(self.free_head); - while pos != INVALID_POS && pos >= self.alloc_limit { - let bucket = &mut self.buckets[pos as usize]; - prev = PrevPos::Chained(pos); - pos = bucket.next; - } - if pos == INVALID_POS { - return Err(FullError()); - } + // Find the first bucket we're *allowed* to use. + let mut prev = PrevPos::First(self.free_head); + while pos != INVALID_POS && pos >= self.alloc_limit { + let bucket = &mut self.buckets[pos as usize]; + prev = PrevPos::Chained(pos); + pos = bucket.next; + } + if pos == INVALID_POS { + return Err(FullError()); + } - // Repair the freelist. - match prev { - PrevPos::First(_) => { - let next_pos = self.buckets[pos as usize].next; - self.free_head = next_pos; - } - PrevPos::Chained(p) => if p != INVALID_POS { - let next_pos = self.buckets[pos as usize].next; - self.buckets[p as usize].next = next_pos; - }, - _ => unreachable!() - } + // Repair the freelist. + match prev { + PrevPos::First(_) => { + let next_pos = self.buckets[pos as usize].next; + self.free_head = next_pos; + } + PrevPos::Chained(p) => { + if p != INVALID_POS { + let next_pos = self.buckets[pos as usize].next; + self.buckets[p as usize].next = next_pos; + } + } + _ => unreachable!(), + } - // Initialize the bucket. - let bucket = &mut self.buckets[pos as usize]; - self.buckets_in_use += 1; + // Initialize the bucket. + let bucket = &mut self.buckets[pos as usize]; + self.buckets_in_use += 1; bucket.next = INVALID_POS; bucket.inner = Some((key, value)); Ok(pos) } } - - diff --git a/libs/neon-shmem/src/hash/entry.rs b/libs/neon-shmem/src/hash/entry.rs index a5832665aa..bf2f63fe9c 100644 --- a/libs/neon-shmem/src/hash/entry.rs +++ b/libs/neon-shmem/src/hash/entry.rs @@ -6,31 +6,30 @@ use crate::sync::{RwLockWriteGuard, ValueWriteGuard}; use std::hash::Hash; use std::mem; - pub enum Entry<'a, 'b, K, V> { - Occupied(OccupiedEntry<'a, 'b, K, V>), + Occupied(OccupiedEntry<'a, 'b, K, V>), Vacant(VacantEntry<'a, 'b, K, V>), } /// Enum representing the previous position within a chain. #[derive(Clone, Copy)] pub(crate) enum PrevPos { - /// Starting index within the dictionary. + /// Starting index within the dictionary. First(u32), - /// Regular index within the buckets. + /// Regular index within the buckets. Chained(u32), - /// Unknown - e.g. the associated entry was retrieved by index instead of chain. - Unknown(u64), + /// Unknown - e.g. the associated entry was retrieved by index instead of chain. + Unknown(u64), } pub struct OccupiedEntry<'a, 'b, K, V> { - /// Mutable reference to the map containing this entry. - pub(crate) map: RwLockWriteGuard<'b, CoreHashMap<'a, K, V>>, - /// The key of the occupied entry + /// Mutable reference to the map containing this entry. + pub(crate) map: RwLockWriteGuard<'b, CoreHashMap<'a, K, V>>, + /// The key of the occupied entry pub(crate) _key: K, - /// The index of the previous entry in the chain. + /// The index of the previous entry in the chain. pub(crate) prev_pos: PrevPos, - /// The position of the bucket in the [`CoreHashMap`] bucket array. + /// The position of the bucket in the [`CoreHashMap`] bucket array. pub(crate) bucket_pos: u32, } @@ -51,56 +50,57 @@ impl OccupiedEntry<'_, '_, K, V> { .1 } - /// Inserts a value into the entry, replacing (and returning) the existing value. + /// Inserts a value into the entry, replacing (and returning) the existing value. pub fn insert(&mut self, value: V) -> V { let bucket = &mut self.map.buckets[self.bucket_pos as usize]; // This assumes inner is Some, which it must be for an OccupiedEntry mem::replace(&mut bucket.inner.as_mut().unwrap().1, value) } - /// Removes the entry from the hash map, returning the value originally stored within it. - /// - /// This may result in multiple bucket accesses if the entry was obtained by index as the - /// previous chain entry needs to be discovered in this case. - /// - /// # Panics - /// Panics if the `prev_pos` field is equal to [`PrevPos::Unknown`]. In practice, this means - /// the entry was obtained via calling something like [`CoreHashMap::entry_at_bucket`]. + /// Removes the entry from the hash map, returning the value originally stored within it. + /// + /// This may result in multiple bucket accesses if the entry was obtained by index as the + /// previous chain entry needs to be discovered in this case. + /// + /// # Panics + /// Panics if the `prev_pos` field is equal to [`PrevPos::Unknown`]. In practice, this means + /// the entry was obtained via calling something like [`CoreHashMap::entry_at_bucket`]. pub fn remove(mut self) -> V { - // If this bucket was queried by index, go ahead and follow its chain from the start. - let prev = if let PrevPos::Unknown(hash) = self.prev_pos { - let dict_idx = hash as usize % self.map.dictionary.len(); - let mut prev = PrevPos::First(dict_idx as u32); - let mut curr = self.map.dictionary[dict_idx]; - while curr != self.bucket_pos { - curr = self.map.buckets[curr as usize].next; - prev = PrevPos::Chained(curr); - } - prev - } else { - self.prev_pos - }; - + // If this bucket was queried by index, go ahead and follow its chain from the start. + let prev = if let PrevPos::Unknown(hash) = self.prev_pos { + let dict_idx = hash as usize % self.map.dictionary.len(); + let mut prev = PrevPos::First(dict_idx as u32); + let mut curr = self.map.dictionary[dict_idx]; + while curr != self.bucket_pos { + assert!(curr != INVALID_POS); + prev = PrevPos::Chained(curr); + curr = self.map.buckets[curr as usize].next; + } + prev + } else { + self.prev_pos + }; + // CoreHashMap::remove returns Option<(K, V)>. We know it's Some for an OccupiedEntry. let bucket = &mut self.map.buckets[self.bucket_pos as usize]; - + // unlink it from the chain match prev { PrevPos::First(dict_pos) => { - self.map.dictionary[dict_pos as usize] = bucket.next; - }, + self.map.dictionary[dict_pos as usize] = bucket.next; + } PrevPos::Chained(bucket_pos) => { - // println!("we think prev of {} is {bucket_pos}", self.bucket_pos); + // println!("we think prev of {} is {bucket_pos}", self.bucket_pos); self.map.buckets[bucket_pos as usize].next = bucket.next; - }, - _ => unreachable!(), + } + _ => unreachable!(), } // and add it to the freelist - let free = self.map.free_head; + let free = self.map.free_head; let bucket = &mut self.map.buckets[self.bucket_pos as usize]; let old_value = bucket.inner.take(); - bucket.next = free; + bucket.next = free; self.map.free_head = self.bucket_pos; self.map.buckets_in_use -= 1; @@ -110,19 +110,19 @@ impl OccupiedEntry<'_, '_, K, V> { /// An abstract view into a vacant entry within the map. pub struct VacantEntry<'a, 'b, K, V> { - /// Mutable reference to the map containing this entry. - pub(crate) map: RwLockWriteGuard<'b, CoreHashMap<'a, K, V>>, - /// The key to be inserted into this entry. + /// Mutable reference to the map containing this entry. + pub(crate) map: RwLockWriteGuard<'b, CoreHashMap<'a, K, V>>, + /// The key to be inserted into this entry. pub(crate) key: K, - /// The position within the dictionary corresponding to the key's hash. + /// The position within the dictionary corresponding to the key's hash. pub(crate) dict_pos: u32, } impl<'b, K: Clone + Hash + Eq, V> VacantEntry<'_, 'b, K, V> { - /// Insert a value into the vacant entry, finding and populating an empty bucket in the process. - /// - /// # Errors - /// Will return [`FullError`] if there are no unoccupied buckets in the map. + /// Insert a value into the vacant entry, finding and populating an empty bucket in the process. + /// + /// # Errors + /// Will return [`FullError`] if there are no unoccupied buckets in the map. pub fn insert(mut self, value: V) -> Result, FullError> { let pos = self.map.alloc_bucket(self.key, value)?; if pos == INVALID_POS { @@ -131,9 +131,8 @@ impl<'b, K: Clone + Hash + Eq, V> VacantEntry<'_, 'b, K, V> { self.map.buckets[pos as usize].next = self.map.dictionary[self.dict_pos as usize]; self.map.dictionary[self.dict_pos as usize] = pos; - Ok(RwLockWriteGuard::map( - self.map, - |m| &mut m.buckets[pos as usize].inner.as_mut().unwrap().1 - )) + Ok(RwLockWriteGuard::map(self.map, |m| { + &mut m.buckets[pos as usize].inner.as_mut().unwrap().1 + })) } } diff --git a/libs/neon-shmem/src/hash/tests.rs b/libs/neon-shmem/src/hash/tests.rs index d7b8d19050..aee47a0b3e 100644 --- a/libs/neon-shmem/src/hash/tests.rs +++ b/libs/neon-shmem/src/hash/tests.rs @@ -3,9 +3,9 @@ use std::collections::HashSet; use std::fmt::Debug; use std::mem::MaybeUninit; +use crate::hash::Entry; use crate::hash::HashMapAccess; use crate::hash::HashMapInit; -use crate::hash::Entry; use crate::hash::core::FullError; use rand::seq::SliceRandom; @@ -35,20 +35,21 @@ impl<'a> From<&'a [u8]> for TestKey { } } -fn test_inserts + Copy>(keys: &[K]) { - let w = HashMapInit::::new_resizeable_named( - 100000, 120000, "test_inserts" - ).attach_writer(); +fn test_inserts + Copy>(keys: &[K]) { + let w = HashMapInit::::new_resizeable_named(100000, 120000, "test_inserts") + .attach_writer(); for (idx, k) in keys.iter().enumerate() { - let res = w.entry((*k).into()); - match res { - Entry::Occupied(mut e) => { e.insert(idx); } - Entry::Vacant(e) => { - let res = e.insert(idx); - assert!(res.is_ok()); - }, - }; + let res = w.entry((*k).into()); + match res { + Entry::Occupied(mut e) => { + e.insert(idx); + } + Entry::Vacant(e) => { + let res = e.insert(idx); + assert!(res.is_ok()); + } + }; } for (idx, k) in keys.iter().enumerate() { @@ -83,7 +84,7 @@ fn sparse() { for _ in 0..10000 { loop { let key = rand::random::(); - if used_keys.get(&key).is_some() { + if used_keys.contains(&key) { continue; } used_keys.insert(key); @@ -109,79 +110,85 @@ fn apply_op( shadow.remove(&op.0) }; - let entry = map.entry(op.0); + let entry = map.entry(op.0); let hash_existing = match op.1 { - Some(new) => { - match entry { - Entry::Occupied(mut e) => Some(e.insert(new)), - Entry::Vacant(e) => { _ = e.insert(new).unwrap(); None }, - } - }, - None => { - match entry { - Entry::Occupied(e) => Some(e.remove()), - Entry::Vacant(_) => None, - } - }, - }; + Some(new) => match entry { + Entry::Occupied(mut e) => Some(e.insert(new)), + Entry::Vacant(e) => { + _ = e.insert(new).unwrap(); + None + } + }, + None => match entry { + Entry::Occupied(e) => Some(e.remove()), + Entry::Vacant(_) => None, + }, + }; - assert_eq!(shadow_existing, hash_existing); + assert_eq!(shadow_existing, hash_existing); } fn do_random_ops( - num_ops: usize, - size: u32, - del_prob: f64, - writer: &mut HashMapAccess, - shadow: &mut BTreeMap, - rng: &mut rand::rngs::ThreadRng, + num_ops: usize, + size: u32, + del_prob: f64, + writer: &mut HashMapAccess, + shadow: &mut BTreeMap, + rng: &mut rand::rngs::ThreadRng, ) { - for i in 0..num_ops { + for i in 0..num_ops { let key: TestKey = ((rng.next_u32() % size) as u128).into(); - let op = TestOp(key, if rng.random_bool(del_prob) { Some(i) } else { None }); + let op = TestOp( + key, + if rng.random_bool(del_prob) { + Some(i) + } else { + None + }, + ); apply_op(&op, writer, shadow); } } fn do_deletes( - num_ops: usize, - writer: &mut HashMapAccess, - shadow: &mut BTreeMap, + num_ops: usize, + writer: &mut HashMapAccess, + shadow: &mut BTreeMap, ) { - for _ in 0..num_ops { - let (k, _) = shadow.pop_first().unwrap(); - writer.remove(&k); - } + for _ in 0..num_ops { + let (k, _) = shadow.pop_first().unwrap(); + writer.remove(&k); + } } fn do_shrink( - writer: &mut HashMapAccess, - shadow: &mut BTreeMap, - to: u32 + writer: &mut HashMapAccess, + shadow: &mut BTreeMap, + to: u32, ) { - assert!(writer.shrink_goal().is_none()); - writer.begin_shrink(to); - assert_eq!(writer.shrink_goal(), Some(to as usize)); - while writer.get_num_buckets_in_use() > to as usize { - let (k, _) = shadow.pop_first().unwrap(); - let entry = writer.entry(k); - if let Entry::Occupied(e) = entry { - e.remove(); - } - } - let old_usage = writer.get_num_buckets_in_use(); - writer.finish_shrink().unwrap(); - assert!(writer.shrink_goal().is_none()); - assert_eq!(writer.get_num_buckets_in_use(), old_usage); + assert!(writer.shrink_goal().is_none()); + writer.begin_shrink(to); + assert_eq!(writer.shrink_goal(), Some(to as usize)); + while writer.get_num_buckets_in_use() > to as usize { + let (k, _) = shadow.pop_first().unwrap(); + let entry = writer.entry(k); + if let Entry::Occupied(e) = entry { + e.remove(); + } + } + let old_usage = writer.get_num_buckets_in_use(); + writer.finish_shrink().unwrap(); + assert!(writer.shrink_goal().is_none()); + assert_eq!(writer.get_num_buckets_in_use(), old_usage); } #[test] fn random_ops() { - let mut writer = HashMapInit::::new_resizeable_named( - 100000, 120000, "test_random" - ).attach_writer(); + let mut writer = + HashMapInit::::new_resizeable_named(100000, 120000, "test_random") + .attach_writer(); let mut shadow: std::collections::BTreeMap = BTreeMap::new(); - + let distribution = Zipf::new(u128::MAX as f64, 1.1).unwrap(); let mut rng = rand::rng(); for i in 0..100000 { @@ -193,235 +200,230 @@ fn random_ops() { } } - #[test] fn test_shuffle() { - let mut writer = HashMapInit::::new_resizeable_named( - 1000, 1200, "test_shuf" - ).attach_writer(); + let mut writer = HashMapInit::::new_resizeable_named(1000, 1200, "test_shuf") + .attach_writer(); let mut shadow: std::collections::BTreeMap = BTreeMap::new(); let mut rng = rand::rng(); do_random_ops(10000, 1000, 0.75, &mut writer, &mut shadow, &mut rng); writer.shuffle(); - do_random_ops(10000, 1000, 0.75, &mut writer, &mut shadow, &mut rng); + do_random_ops(10000, 1000, 0.75, &mut writer, &mut shadow, &mut rng); } #[test] fn test_grow() { - let mut writer = HashMapInit::::new_resizeable_named( - 1000, 2000, "test_grow" - ).attach_writer(); + let mut writer = HashMapInit::::new_resizeable_named(1000, 2000, "test_grow") + .attach_writer(); let mut shadow: std::collections::BTreeMap = BTreeMap::new(); let mut rng = rand::rng(); do_random_ops(10000, 1000, 0.75, &mut writer, &mut shadow, &mut rng); - let old_usage = writer.get_num_buckets_in_use(); + let old_usage = writer.get_num_buckets_in_use(); writer.grow(1500).unwrap(); - assert_eq!(writer.get_num_buckets_in_use(), old_usage); - assert_eq!(writer.get_num_buckets(), 1500); - do_random_ops(10000, 1500, 0.75, &mut writer, &mut shadow, &mut rng); + assert_eq!(writer.get_num_buckets_in_use(), old_usage); + assert_eq!(writer.get_num_buckets(), 1500); + do_random_ops(10000, 1500, 0.75, &mut writer, &mut shadow, &mut rng); } #[test] fn test_clear() { - let mut writer = HashMapInit::::new_resizeable_named( - 1500, 2000, "test_clear" - ).attach_writer(); + let mut writer = HashMapInit::::new_resizeable_named(1500, 2000, "test_clear") + .attach_writer(); let mut shadow: std::collections::BTreeMap = BTreeMap::new(); let mut rng = rand::rng(); do_random_ops(2000, 1500, 0.75, &mut writer, &mut shadow, &mut rng); - writer.clear(); - assert_eq!(writer.get_num_buckets_in_use(), 0); - assert_eq!(writer.get_num_buckets(), 1500); - while let Some((key, _)) = shadow.pop_first() { - assert!(writer.get(&key).is_none()); - } - do_random_ops(2000, 1500, 0.75, &mut writer, &mut shadow, &mut rng); - for i in 0..(1500 - writer.get_num_buckets_in_use()) { - writer.insert((1500 + i as u128).into(), 0).unwrap(); - } - assert_eq!(writer.insert(5000.into(), 0), Err(FullError {})); - writer.clear(); - assert!(writer.insert(5000.into(), 0).is_ok()); + writer.clear(); + assert_eq!(writer.get_num_buckets_in_use(), 0); + assert_eq!(writer.get_num_buckets(), 1500); + while let Some((key, _)) = shadow.pop_first() { + assert!(writer.get(&key).is_none()); + } + do_random_ops(2000, 1500, 0.75, &mut writer, &mut shadow, &mut rng); + for i in 0..(1500 - writer.get_num_buckets_in_use()) { + writer.insert((1500 + i as u128).into(), 0).unwrap(); + } + assert_eq!(writer.insert(5000.into(), 0), Err(FullError {})); + writer.clear(); + assert!(writer.insert(5000.into(), 0).is_ok()); } #[test] fn test_idx_remove() { - let mut writer = HashMapInit::::new_resizeable_named( - 1500, 2000, "test_clear" - ).attach_writer(); + let mut writer = HashMapInit::::new_resizeable_named(1500, 2000, "test_clear") + .attach_writer(); let mut shadow: std::collections::BTreeMap = BTreeMap::new(); let mut rng = rand::rng(); do_random_ops(2000, 1500, 0.25, &mut writer, &mut shadow, &mut rng); - for _ in 0..100 { - let idx = (rng.next_u32() % 1500) as usize; - if let Some(e) = writer.entry_at_bucket(idx) { - shadow.remove(&e._key); - e.remove(); - } - - } - while let Some((key, val)) = shadow.pop_first() { - assert_eq!(*writer.get(&key).unwrap(), val); - } + for _ in 0..100 { + let idx = (rng.next_u32() % 1500) as usize; + if let Some(e) = writer.entry_at_bucket(idx) { + shadow.remove(&e._key); + e.remove(); + } + } + while let Some((key, val)) = shadow.pop_first() { + assert_eq!(*writer.get(&key).unwrap(), val); + } } #[test] fn test_idx_get() { - let mut writer = HashMapInit::::new_resizeable_named( - 1500, 2000, "test_clear" - ).attach_writer(); + let mut writer = HashMapInit::::new_resizeable_named(1500, 2000, "test_clear") + .attach_writer(); let mut shadow: std::collections::BTreeMap = BTreeMap::new(); let mut rng = rand::rng(); do_random_ops(2000, 1500, 0.25, &mut writer, &mut shadow, &mut rng); - for _ in 0..100 { - let idx = (rng.next_u32() % 1500) as usize; - if let Some(pair) = writer.get_at_bucket(idx) { - { - let v: *const usize = &pair.1; - assert_eq!(writer.get_bucket_for_value(v), idx); - } - { - let v: *const usize = &pair.1; - assert_eq!(writer.get_bucket_for_value(v), idx); - } - } - } + for _ in 0..100 { + let idx = (rng.next_u32() % 1500) as usize; + if let Some(pair) = writer.get_at_bucket(idx) { + { + let v: *const usize = &pair.1; + assert_eq!(writer.get_bucket_for_value(v), idx); + } + { + let v: *const usize = &pair.1; + assert_eq!(writer.get_bucket_for_value(v), idx); + } + } + } } #[test] fn test_shrink() { - let mut writer = HashMapInit::::new_resizeable_named( - 1500, 2000, "test_shrink" - ).attach_writer(); + let mut writer = HashMapInit::::new_resizeable_named(1500, 2000, "test_shrink") + .attach_writer(); let mut shadow: std::collections::BTreeMap = BTreeMap::new(); let mut rng = rand::rng(); - - do_random_ops(10000, 1500, 0.75, &mut writer, &mut shadow, &mut rng); - do_shrink(&mut writer, &mut shadow, 1000); - assert_eq!(writer.get_num_buckets(), 1000); - do_deletes(500, &mut writer, &mut shadow); - do_random_ops(10000, 500, 0.75, &mut writer, &mut shadow, &mut rng); - assert!(writer.get_num_buckets_in_use() <= 1000); + + do_random_ops(10000, 1500, 0.75, &mut writer, &mut shadow, &mut rng); + do_shrink(&mut writer, &mut shadow, 1000); + assert_eq!(writer.get_num_buckets(), 1000); + do_deletes(500, &mut writer, &mut shadow); + do_random_ops(10000, 500, 0.75, &mut writer, &mut shadow, &mut rng); + assert!(writer.get_num_buckets_in_use() <= 1000); } #[test] fn test_shrink_grow_seq() { - let mut writer = HashMapInit::::new_resizeable_named( - 1000, 20000, "test_grow_seq" - ).attach_writer(); + let mut writer = + HashMapInit::::new_resizeable_named(1000, 20000, "test_grow_seq") + .attach_writer(); let mut shadow: std::collections::BTreeMap = BTreeMap::new(); let mut rng = rand::rng(); do_random_ops(500, 1000, 0.1, &mut writer, &mut shadow, &mut rng); - eprintln!("Shrinking to 750"); + eprintln!("Shrinking to 750"); do_shrink(&mut writer, &mut shadow, 750); - do_random_ops(200, 1000, 0.5, &mut writer, &mut shadow, &mut rng); - eprintln!("Growing to 1500"); - writer.grow(1500).unwrap(); - do_random_ops(600, 1500, 0.1, &mut writer, &mut shadow, &mut rng); - eprintln!("Shrinking to 200"); - while shadow.len() > 100 { - do_deletes(1, &mut writer, &mut shadow); - } - do_shrink(&mut writer, &mut shadow, 200); - do_random_ops(50, 1500, 0.25, &mut writer, &mut shadow, &mut rng); - eprintln!("Growing to 10k"); - writer.grow(10000).unwrap(); - do_random_ops(10000, 5000, 0.25, &mut writer, &mut shadow, &mut rng); + do_random_ops(200, 1000, 0.5, &mut writer, &mut shadow, &mut rng); + eprintln!("Growing to 1500"); + writer.grow(1500).unwrap(); + do_random_ops(600, 1500, 0.1, &mut writer, &mut shadow, &mut rng); + eprintln!("Shrinking to 200"); + while shadow.len() > 100 { + do_deletes(1, &mut writer, &mut shadow); + } + do_shrink(&mut writer, &mut shadow, 200); + do_random_ops(50, 1500, 0.25, &mut writer, &mut shadow, &mut rng); + eprintln!("Growing to 10k"); + writer.grow(10000).unwrap(); + do_random_ops(10000, 5000, 0.25, &mut writer, &mut shadow, &mut rng); } #[test] fn test_bucket_ops() { - let writer = HashMapInit::::new_resizeable_named( - 1000, 1200, "test_bucket_ops" - ).attach_writer(); - match writer.entry(1.into()) { - Entry::Occupied(mut e) => { e.insert(2); }, - Entry::Vacant(e) => { _ = e.insert(2).unwrap(); }, - } - assert_eq!(writer.get_num_buckets_in_use(), 1); - assert_eq!(writer.get_num_buckets(), 1000); - assert_eq!(*writer.get(&1.into()).unwrap(), 2); - let pos = match writer.entry(1.into()) { - Entry::Occupied(e) => { - assert_eq!(e._key, 1.into()); - let pos = e.bucket_pos as usize; - pos - }, - Entry::Vacant(_) => { panic!("Insert didn't affect entry"); }, - }; - assert_eq!(writer.entry_at_bucket(pos).unwrap()._key, 1.into()); - assert_eq!(*writer.get_at_bucket(pos).unwrap(), (1.into(), 2)); - { - let ptr: *const usize = &*writer.get(&1.into()).unwrap(); - assert_eq!(writer.get_bucket_for_value(ptr), pos); - } - writer.remove(&1.into()); - assert!(writer.get(&1.into()).is_none()); + let writer = HashMapInit::::new_resizeable_named(1000, 1200, "test_bucket_ops") + .attach_writer(); + match writer.entry(1.into()) { + Entry::Occupied(mut e) => { + e.insert(2); + } + Entry::Vacant(e) => { + _ = e.insert(2).unwrap(); + } + } + assert_eq!(writer.get_num_buckets_in_use(), 1); + assert_eq!(writer.get_num_buckets(), 1000); + assert_eq!(*writer.get(&1.into()).unwrap(), 2); + let pos = match writer.entry(1.into()) { + Entry::Occupied(e) => { + assert_eq!(e._key, 1.into()); + let pos = e.bucket_pos as usize; + pos + } + Entry::Vacant(_) => { + panic!("Insert didn't affect entry"); + } + }; + assert_eq!(writer.entry_at_bucket(pos).unwrap()._key, 1.into()); + assert_eq!(*writer.get_at_bucket(pos).unwrap(), (1.into(), 2)); + { + let ptr: *const usize = &*writer.get(&1.into()).unwrap(); + assert_eq!(writer.get_bucket_for_value(ptr), pos); + } + writer.remove(&1.into()); + assert!(writer.get(&1.into()).is_none()); } #[test] fn test_shrink_zero() { - let mut writer = HashMapInit::::new_resizeable_named( - 1500, 2000, "test_shrink_zero" - ).attach_writer(); - writer.begin_shrink(0); - for i in 0..1500 { - writer.entry_at_bucket(i).map(|x| x.remove()); - } - writer.finish_shrink().unwrap(); - assert_eq!(writer.get_num_buckets_in_use(), 0); - let entry = writer.entry(1.into()); - if let Entry::Vacant(v) = entry { - assert!(v.insert(2).is_err()); - } else { - panic!("Somehow got non-vacant entry in empty map.") - } - writer.grow(50).unwrap(); - let entry = writer.entry(1.into()); - if let Entry::Vacant(v) = entry { - assert!(v.insert(2).is_ok()); - } else { - panic!("Somehow got non-vacant entry in empty map.") - } - assert_eq!(writer.get_num_buckets_in_use(), 1); + let mut writer = + HashMapInit::::new_resizeable_named(1500, 2000, "test_shrink_zero") + .attach_writer(); + writer.begin_shrink(0); + for i in 0..1500 { + writer.entry_at_bucket(i).map(|x| x.remove()); + } + writer.finish_shrink().unwrap(); + assert_eq!(writer.get_num_buckets_in_use(), 0); + let entry = writer.entry(1.into()); + if let Entry::Vacant(v) = entry { + assert!(v.insert(2).is_err()); + } else { + panic!("Somehow got non-vacant entry in empty map.") + } + writer.grow(50).unwrap(); + let entry = writer.entry(1.into()); + if let Entry::Vacant(v) = entry { + assert!(v.insert(2).is_ok()); + } else { + panic!("Somehow got non-vacant entry in empty map.") + } + assert_eq!(writer.get_num_buckets_in_use(), 1); } #[test] #[should_panic] fn test_grow_oom() { - let writer = HashMapInit::::new_resizeable_named( - 1500, 2000, "test_grow_oom" - ).attach_writer(); - writer.grow(20000).unwrap(); + let writer = HashMapInit::::new_resizeable_named(1500, 2000, "test_grow_oom") + .attach_writer(); + writer.grow(20000).unwrap(); } #[test] #[should_panic] fn test_shrink_bigger() { - let mut writer = HashMapInit::::new_resizeable_named( - 1500, 2500, "test_shrink_bigger" - ).attach_writer(); - writer.begin_shrink(2000); + let mut writer = + HashMapInit::::new_resizeable_named(1500, 2500, "test_shrink_bigger") + .attach_writer(); + writer.begin_shrink(2000); } #[test] #[should_panic] fn test_shrink_early_finish() { - let writer = HashMapInit::::new_resizeable_named( - 1500, 2500, "test_shrink_early_finish" - ).attach_writer(); - writer.finish_shrink().unwrap(); + let writer = + HashMapInit::::new_resizeable_named(1500, 2500, "test_shrink_early_finish") + .attach_writer(); + writer.finish_shrink().unwrap(); } #[test] #[should_panic] fn test_shrink_fixed_size() { - let mut area = [MaybeUninit::uninit(); 10000]; + let mut area = [MaybeUninit::uninit(); 10000]; let init_struct = HashMapInit::::with_fixed(3, &mut area); let mut writer = init_struct.attach_writer(); - writer.begin_shrink(1); + writer.begin_shrink(1); } - diff --git a/libs/neon-shmem/src/shmem.rs b/libs/neon-shmem/src/shmem.rs index 7c7285f67e..f19f402859 100644 --- a/libs/neon-shmem/src/shmem.rs +++ b/libs/neon-shmem/src/shmem.rs @@ -76,19 +76,15 @@ impl ShmemHandle { Self::new_with_fd(fd, initial_size, max_size) } - fn new_with_fd( - fd: OwnedFd, - initial_size: usize, - max_size: usize, - ) -> Result { + fn new_with_fd(fd: OwnedFd, initial_size: usize, max_size: usize) -> Result { // We reserve the high-order bit for the `RESIZE_IN_PROGRESS` flag, and the actual size // is a little larger than this because of the SharedStruct header. Make the upper limit // somewhat smaller than that, because with anything close to that, you'll run out of // memory anyway. assert!(max_size < 1 << 48, "max size {max_size} too large"); - + assert!( - initial_size <= max_size, + initial_size <= max_size, "initial size {initial_size} larger than max size {max_size}" ); @@ -150,12 +146,12 @@ impl ShmemHandle { let shared = self.shared(); assert!( - new_size <= self.max_size, + new_size <= self.max_size, "new size ({new_size}) is greater than max size ({})", - self.max_size + self.max_size ); - assert_eq!(self.max_size, shared.max_size); + assert_eq!(self.max_size, shared.max_size); // Lock the area by setting the bit in `current_size` // @@ -187,9 +183,8 @@ impl ShmemHandle { let result = { use std::cmp::Ordering::{Equal, Greater, Less}; match new_size.cmp(&old_size) { - Less => nix_ftruncate(&self.fd, new_size as i64).map_err(|e| { - Error::new("could not shrink shmem segment, ftruncate failed", e) - }), + Less => nix_ftruncate(&self.fd, new_size as i64) + .map_err(|e| Error::new("could not shrink shmem segment, ftruncate failed", e)), Equal => Ok(()), Greater => enlarge_file(self.fd.as_fd(), new_size as u64), } @@ -207,7 +202,7 @@ impl ShmemHandle { /// Returns the current user-visible size of the shared memory segment. /// /// NOTE: a concurrent [`ShmemHandle::set_size()`] call can change the size at any time. - /// It is the caller's responsibility not to access the area beyond the current size. + /// It is the caller's responsibility not to access the area beyond the current size. pub fn current_size(&self) -> usize { let total_current_size = self.shared().current_size.load(Ordering::Relaxed) & !RESIZE_IN_PROGRESS; @@ -253,12 +248,8 @@ fn enlarge_file(fd: BorrowedFd, size: u64) -> Result<(), Error> { // we don't get a segfault later when trying to actually use it. #[cfg(not(target_os = "macos"))] { - nix::fcntl::posix_fallocate(fd, 0, size as i64).map_err(|e| { - Error::new( - "could not grow shmem segment, posix_fallocate failed", - e, - ) - }) + nix::fcntl::posix_fallocate(fd, 0, size as i64) + .map_err(|e| Error::new("could not grow shmem segment, posix_fallocate failed", e)) } // As a fallback on macos, which doesn't have posix_fallocate, use plain 'fallocate' #[cfg(target_os = "macos")] @@ -279,7 +270,7 @@ mod tests { fn assert_range(ptr: *const u8, expected: u8, range: Range) { for i in range { let b = unsafe { *(ptr.add(i)) }; - assert_eq!(expected, b, "unexpected byte at offset {}", i); + assert_eq!(expected, b, "unexpected byte at offset {i}"); } } diff --git a/libs/neonart/Cargo.toml b/libs/neonart/Cargo.toml new file mode 100644 index 0000000000..915269e0cb --- /dev/null +++ b/libs/neonart/Cargo.toml @@ -0,0 +1,14 @@ +[package] +name = "neonart" +version = "0.1.0" +edition.workspace = true +license.workspace = true + +[dependencies] +crossbeam-utils.workspace = true +spin.workspace = true +tracing.workspace = true + +[dev-dependencies] +rand = "0.9.1" +rand_distr = "0.5.1" diff --git a/libs/neonart/src/algorithm.rs b/libs/neonart/src/algorithm.rs new file mode 100644 index 0000000000..14cccfca6f --- /dev/null +++ b/libs/neonart/src/algorithm.rs @@ -0,0 +1,599 @@ +mod lock_and_version; +pub(crate) mod node_ptr; +mod node_ref; + +use std::vec::Vec; + +use crate::algorithm::lock_and_version::ConcurrentUpdateError; +use crate::algorithm::node_ptr::MAX_PREFIX_LEN; +use crate::algorithm::node_ref::{NewNodeRef, NodeRef, ReadLockedNodeRef, WriteLockedNodeRef}; +use crate::allocator::OutOfMemoryError; + +use crate::TreeWriteGuard; +use crate::UpdateAction; +use crate::allocator::ArtAllocator; +use crate::epoch::EpochPin; +use crate::{Key, Value}; + +pub(crate) type RootPtr = node_ptr::NodePtr; + +#[derive(Debug)] +pub enum ArtError { + ConcurrentUpdate, // need to retry + OutOfMemory, +} + +impl From for ArtError { + fn from(_: ConcurrentUpdateError) -> ArtError { + ArtError::ConcurrentUpdate + } +} + +impl From for ArtError { + fn from(_: OutOfMemoryError) -> ArtError { + ArtError::OutOfMemory + } +} + +pub fn new_root( + allocator: &impl ArtAllocator, +) -> Result, OutOfMemoryError> { + node_ptr::new_root(allocator) +} + +pub(crate) fn search<'e, K: Key, V: Value>( + key: &K, + root: RootPtr, + epoch_pin: &'e EpochPin, +) -> Option<&'e V> { + loop { + let root_ref = NodeRef::from_root_ptr(root); + if let Ok(result) = lookup_recurse(key.as_bytes(), root_ref, None, epoch_pin) { + break result; + } + // retry + } +} + +pub(crate) fn iter_next<'e, V: Value>( + key: &[u8], + root: RootPtr, + epoch_pin: &'e EpochPin, +) -> Option<(Vec, &'e V)> { + loop { + let mut path = Vec::new(); + let root_ref = NodeRef::from_root_ptr(root); + + match next_recurse(key, &mut path, root_ref, epoch_pin) { + Ok(Some(v)) => { + assert_eq!(path.len(), key.len()); + break Some((path, v)); + } + Ok(None) => break None, + Err(ConcurrentUpdateError()) => { + // retry + continue; + } + } + } +} + +pub(crate) fn update_fn<'e, 'g, K: Key, V: Value, A: ArtAllocator, F>( + key: &K, + value_fn: F, + root: RootPtr, + guard: &'g mut TreeWriteGuard<'e, K, V, A>, +) -> Result<(), OutOfMemoryError> +where + F: FnOnce(Option<&V>) -> UpdateAction, +{ + let value_fn_cell = std::cell::Cell::new(Some(value_fn)); + loop { + let root_ref = NodeRef::from_root_ptr(root); + let this_value_fn = |arg: Option<&V>| value_fn_cell.take().unwrap()(arg); + let key_bytes = key.as_bytes(); + + match update_recurse( + key_bytes, + this_value_fn, + root_ref, + None, + None, + guard, + 0, + key_bytes, + ) { + Ok(()) => break Ok(()), + Err(ArtError::ConcurrentUpdate) => { + continue; // retry + } + Err(ArtError::OutOfMemory) => break Err(OutOfMemoryError()), + } + } +} + +// Error means you must retry. +// +// This corresponds to the 'lookupOpt' function in the paper +#[allow(clippy::only_used_in_recursion)] +fn lookup_recurse<'e, V: Value>( + key: &[u8], + node: NodeRef<'e, V>, + parent: Option>, + epoch_pin: &'e EpochPin, +) -> Result, ConcurrentUpdateError> { + let rnode = node.read_lock_or_restart()?; + if let Some(parent) = parent { + parent.read_unlock_or_restart()?; + } + + // check if the prefix matches, may increment level + let prefix_len = if let Some(prefix_len) = rnode.prefix_matches(key) { + prefix_len + } else { + rnode.read_unlock_or_restart()?; + return Ok(None); + }; + + if rnode.is_leaf() { + assert_eq!(key.len(), prefix_len); + let vptr = rnode.get_leaf_value_ptr()?; + // safety: It's OK to return a ref of the pointer because we checked the version + // and the lifetime of 'epoch_pin' enforces that the reference is only accessible + // as long as the epoch is pinned. + let v = unsafe { vptr.as_ref().unwrap() }; + return Ok(Some(v)); + } + + let key = &key[prefix_len..]; + + // find child (or leaf value) + let next_node = rnode.find_child_or_restart(key[0])?; + + match next_node { + None => Ok(None), // key not found + Some(child) => lookup_recurse(&key[1..], child, Some(rnode), epoch_pin), + } +} + +#[allow(clippy::only_used_in_recursion)] +fn next_recurse<'e, V: Value>( + min_key: &[u8], + path: &mut Vec, + node: NodeRef<'e, V>, + epoch_pin: &'e EpochPin, +) -> Result, ConcurrentUpdateError> { + let rnode = node.read_lock_or_restart()?; + let prefix = rnode.get_prefix(); + if !prefix.is_empty() { + path.extend_from_slice(prefix); + } + + use std::cmp::Ordering; + let comparison = path.as_slice().cmp(&min_key[0..path.len()]); + if comparison == Ordering::Less { + rnode.read_unlock_or_restart()?; + return Ok(None); + } + + if rnode.is_leaf() { + assert_eq!(path.len(), min_key.len()); + let vptr = rnode.get_leaf_value_ptr()?; + // safety: It's OK to return a ref of the pointer because we checked the version + // and the lifetime of 'epoch_pin' enforces that the reference is only accessible + // as long as the epoch is pinned. + let v = unsafe { vptr.as_ref().unwrap() }; + return Ok(Some(v)); + } + + let mut min_key_byte = match comparison { + Ordering::Less => unreachable!(), // checked this above already + Ordering::Equal => min_key[path.len()], + Ordering::Greater => 0, + }; + + loop { + match rnode.find_next_child_or_restart(min_key_byte)? { + None => { + return Ok(None); + } + Some((key_byte, child_ref)) => { + let path_len = path.len(); + path.push(key_byte); + let result = next_recurse(min_key, path, child_ref, epoch_pin)?; + if result.is_some() { + return Ok(result); + } + if key_byte == u8::MAX { + return Ok(None); + } + path.truncate(path_len); + min_key_byte = key_byte + 1; + } + } + } +} + +// This corresponds to the 'insertOpt' function in the paper +#[allow(clippy::only_used_in_recursion)] +#[allow(clippy::too_many_arguments)] +pub(crate) fn update_recurse<'e, K: Key, V: Value, A: ArtAllocator, F>( + key: &[u8], + value_fn: F, + node: NodeRef<'e, V>, + rparent: Option<(ReadLockedNodeRef, u8)>, + rgrandparent: Option<(ReadLockedNodeRef, u8)>, + guard: &'_ mut TreeWriteGuard<'e, K, V, A>, + level: usize, + orig_key: &[u8], +) -> Result<(), ArtError> +where + F: FnOnce(Option<&V>) -> UpdateAction, +{ + let rnode = node.read_lock_or_restart()?; + + let prefix_match_len = rnode.prefix_matches(key); + if prefix_match_len.is_none() { + let (rparent, parent_key) = rparent.expect("direct children of the root have no prefix"); + let mut wparent = rparent.upgrade_to_write_lock_or_restart()?; + let mut wnode = rnode.upgrade_to_write_lock_or_restart()?; + + match value_fn(None) { + UpdateAction::Nothing => {} + UpdateAction::Insert(new_value) => { + insert_split_prefix(key, new_value, &mut wnode, &mut wparent, parent_key, guard)?; + } + UpdateAction::Remove => { + panic!("unexpected Remove action on insertion"); + } + } + wnode.write_unlock(); + wparent.write_unlock(); + return Ok(()); + } + let prefix_match_len = prefix_match_len.unwrap(); + let key = &key[prefix_match_len..]; + let level = level + prefix_match_len; + + if rnode.is_leaf() { + assert_eq!(key.len(), 0); + let (rparent, parent_key) = rparent.expect("root cannot be leaf"); + let mut wparent = rparent.upgrade_to_write_lock_or_restart()?; + let mut wnode = rnode.upgrade_to_write_lock_or_restart()?; + + // safety: Now that we have acquired the write lock, we have exclusive access to the + // value. XXX: There might be concurrent reads though? + let value_mut = wnode.get_leaf_value_mut(); + + match value_fn(Some(value_mut)) { + UpdateAction::Nothing => { + wparent.write_unlock(); + wnode.write_unlock(); + } + UpdateAction::Insert(_) => panic!("cannot insert over existing value"), + UpdateAction::Remove => { + guard.remember_obsolete_node(wnode.as_ptr()); + wparent.delete_child(parent_key); + wnode.write_unlock_obsolete(); + + if let Some(rgrandparent) = rgrandparent { + // FIXME: Ignore concurrency error. It doesn't lead to + // corruption, but it means we might leak something. Until + // another update cleans it up. + let _ = cleanup_parent(wparent, rgrandparent, guard); + } + } + } + + return Ok(()); + } + + let next_node = rnode.find_child_or_restart(key[0])?; + + if next_node.is_none() { + if rnode.is_full() { + let (rparent, parent_key) = rparent.expect("root node cannot become full"); + let mut wparent = rparent.upgrade_to_write_lock_or_restart()?; + let wnode = rnode.upgrade_to_write_lock_or_restart()?; + + match value_fn(None) { + UpdateAction::Nothing => { + wnode.write_unlock(); + wparent.write_unlock(); + } + UpdateAction::Insert(new_value) => { + insert_and_grow(key, new_value, wnode, &mut wparent, parent_key, guard)?; + wparent.write_unlock(); + } + UpdateAction::Remove => { + panic!("unexpected Remove action on insertion"); + } + }; + } else { + let mut wnode = rnode.upgrade_to_write_lock_or_restart()?; + if let Some((rparent, _)) = rparent { + rparent.read_unlock_or_restart()?; + } + match value_fn(None) { + UpdateAction::Nothing => {} + UpdateAction::Insert(new_value) => { + insert_to_node(&mut wnode, key, new_value, guard)?; + } + UpdateAction::Remove => { + panic!("unexpected Remove action on insertion"); + } + }; + wnode.write_unlock(); + } + Ok(()) + } else { + let next_child = next_node.unwrap(); // checked above it's not None + if let Some((ref rparent, _)) = rparent { + rparent.check_or_restart()?; + } + + // recurse to next level + update_recurse( + &key[1..], + value_fn, + next_child, + Some((rnode, key[0])), + rparent, + guard, + level + 1, + orig_key, + ) + } +} + +#[derive(Clone)] +enum PathElement { + Prefix(Vec), + KeyByte(u8), +} + +impl std::fmt::Debug for PathElement { + fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> { + match self { + PathElement::Prefix(prefix) => write!(fmt, "{prefix:?}"), + PathElement::KeyByte(key_byte) => write!(fmt, "{key_byte}"), + } + } +} + +pub(crate) fn dump_tree( + root: RootPtr, + epoch_pin: &'_ EpochPin, + dst: &mut dyn std::io::Write, +) { + let root_ref = NodeRef::from_root_ptr(root); + + let _ = dump_recurse(&[], root_ref, epoch_pin, 0, dst); +} + +// TODO: return an Err if writeln!() returns error, instead of unwrapping +#[allow(clippy::only_used_in_recursion)] +fn dump_recurse<'e, V: Value + std::fmt::Debug>( + path: &[PathElement], + node: NodeRef<'e, V>, + epoch_pin: &'e EpochPin, + level: usize, + dst: &mut dyn std::io::Write, +) -> Result<(), ConcurrentUpdateError> { + let indent = str::repeat(" ", level); + + let rnode = node.read_lock_or_restart()?; + let mut path = Vec::from(path); + let prefix = rnode.get_prefix(); + if !prefix.is_empty() { + path.push(PathElement::Prefix(Vec::from(prefix))); + } + + if rnode.is_leaf() { + let vptr = rnode.get_leaf_value_ptr()?; + // safety: It's OK to return a ref of the pointer because we checked the version + // and the lifetime of 'epoch_pin' enforces that the reference is only accessible + // as long as the epoch is pinned. + let val = unsafe { vptr.as_ref().unwrap() }; + writeln!(dst, "{indent} {path:?}: {val:?}").unwrap(); + return Ok(()); + } + + for key_byte in 0..=u8::MAX { + match rnode.find_child_or_restart(key_byte)? { + None => continue, + Some(child_ref) => { + let rchild = child_ref.read_lock_or_restart()?; + writeln!( + dst, + "{} {:?}, {}: prefix {:?}", + indent, + &path, + key_byte, + rchild.get_prefix() + ) + .unwrap(); + + let mut child_path = path.clone(); + child_path.push(PathElement::KeyByte(key_byte)); + + dump_recurse(&child_path, child_ref, epoch_pin, level + 1, dst)?; + } + } + } + + Ok(()) +} + +///```text +/// [fooba]r -> value +/// +/// [foo]b -> [a]r -> value +/// e -> [ls]e -> value +///``` +fn insert_split_prefix>( + key: &[u8], + value: V, + node: &mut WriteLockedNodeRef, + parent: &mut WriteLockedNodeRef, + parent_key: u8, + guard: &'_ TreeWriteGuard, +) -> Result<(), OutOfMemoryError> { + let old_node = node; + let old_prefix = old_node.get_prefix(); + let common_prefix_len = common_prefix(key, old_prefix); + + // Allocate a node for the new value. + let new_value_node = allocate_node_for_value( + &key[common_prefix_len + 1..], + value, + guard.tree_writer.allocator, + )?; + + // Allocate a new internal node with the common prefix + // FIXME: deallocate 'new_value_node' on OOM + let mut prefix_node = + node_ref::new_internal(&key[..common_prefix_len], guard.tree_writer.allocator)?; + + // Add the old node and the new nodes to the new internal node + prefix_node.insert_old_child(old_prefix[common_prefix_len], old_node); + prefix_node.insert_new_child(key[common_prefix_len], new_value_node); + + // Modify the prefix of the old child in place + old_node.truncate_prefix(old_prefix.len() - common_prefix_len - 1); + + // replace the pointer in the parent + parent.replace_child(parent_key, prefix_node.into_ptr()); + + Ok(()) +} + +fn insert_to_node>( + wnode: &mut WriteLockedNodeRef, + key: &[u8], + value: V, + guard: &'_ TreeWriteGuard, +) -> Result<(), OutOfMemoryError> { + let value_child = allocate_node_for_value(&key[1..], value, guard.tree_writer.allocator)?; + wnode.insert_child(key[0], value_child.into_ptr()); + Ok(()) +} + +// On entry: 'parent' and 'node' are locked +fn insert_and_grow<'e, 'g, K: Key, V: Value, A: ArtAllocator>( + key: &[u8], + value: V, + wnode: WriteLockedNodeRef, + parent: &mut WriteLockedNodeRef, + parent_key_byte: u8, + guard: &'g mut TreeWriteGuard<'e, K, V, A>, +) -> Result<(), ArtError> { + let mut bigger_node = wnode.grow(guard.tree_writer.allocator)?; + + // FIXME: deallocate 'bigger_node' on OOM + let value_child = allocate_node_for_value(&key[1..], value, guard.tree_writer.allocator)?; + bigger_node.insert_new_child(key[0], value_child); + + // Replace the pointer in the parent + parent.replace_child(parent_key_byte, bigger_node.into_ptr()); + + guard.remember_obsolete_node(wnode.as_ptr()); + wnode.write_unlock_obsolete(); + + Ok(()) +} + +fn cleanup_parent<'e, 'g, K: Key, V: Value, A: ArtAllocator>( + wparent: WriteLockedNodeRef, + rgrandparent: (ReadLockedNodeRef, u8), + guard: &'g mut TreeWriteGuard<'e, K, V, A>, +) -> Result<(), ArtError> { + let (rgrandparent, grandparent_key_byte) = rgrandparent; + + // If the parent becomes completely empty after the deletion, remove the parent from the + // grandparent. (This case is possible because we reserve only 8 bytes for the prefix.) + // TODO: not implemented. + + // If the parent has only one child, replace the parent with the remaining child. (This is not + // possible if the child's prefix field cannot absorb the parent's) + if wparent.num_children() == 1 { + // Try to lock the remaining child. This can fail if the child is updated + // concurrently. + let (key_byte, remaining_child) = wparent.find_remaining_child(); + + let mut wremaining_child = remaining_child.write_lock_or_restart()?; + + if 1 + wremaining_child.get_prefix().len() + wparent.get_prefix().len() <= MAX_PREFIX_LEN { + let mut wgrandparent = rgrandparent.upgrade_to_write_lock_or_restart()?; + + // Ok, we have locked the leaf, the parent, the grandparent, and the parent's only + // remaining leaf. Proceed with the updates. + + // Update the prefix on the remaining leaf + wremaining_child.prepend_prefix(wparent.get_prefix(), key_byte); + + // Replace the pointer in the grandparent to point directly to the remaining leaf + wgrandparent.replace_child(grandparent_key_byte, wremaining_child.as_ptr()); + + // Mark the parent as deleted. + guard.remember_obsolete_node(wparent.as_ptr()); + wparent.write_unlock_obsolete(); + return Ok(()); + } + } + + // If the parent's children would fit on a smaller node type after the deletion, replace it with + // a smaller node. + if wparent.can_shrink() { + let mut wgrandparent = rgrandparent.upgrade_to_write_lock_or_restart()?; + let smaller_node = wparent.shrink(guard.tree_writer.allocator)?; + + // Replace the pointer in the grandparent + wgrandparent.replace_child(grandparent_key_byte, smaller_node.into_ptr()); + + guard.remember_obsolete_node(wparent.as_ptr()); + wparent.write_unlock_obsolete(); + return Ok(()); + } + + // nothing to do + wparent.write_unlock(); + Ok(()) +} + +// Allocate a new leaf node to hold 'value'. If the key is long, we +// may need to allocate new internal nodes to hold it too +fn allocate_node_for_value<'a, V: Value, A: ArtAllocator>( + key: &[u8], + value: V, + allocator: &'a A, +) -> Result, OutOfMemoryError> { + let mut prefix_off = key.len().saturating_sub(MAX_PREFIX_LEN); + + let leaf_node = node_ref::new_leaf(&key[prefix_off..key.len()], value, allocator)?; + + let mut node = leaf_node; + while prefix_off > 0 { + // Need another internal node + let remain_prefix = &key[0..prefix_off]; + + prefix_off = remain_prefix.len().saturating_sub(MAX_PREFIX_LEN + 1); + let mut internal_node = node_ref::new_internal( + &remain_prefix[prefix_off..remain_prefix.len() - 1], + allocator, + )?; + internal_node.insert_new_child(*remain_prefix.last().unwrap(), node); + node = internal_node; + } + + Ok(node) +} + +fn common_prefix(a: &[u8], b: &[u8]) -> usize { + for i in 0..MAX_PREFIX_LEN { + if a[i] != b[i] { + return i; + } + } + panic!("prefixes are equal"); +} diff --git a/libs/neonart/src/algorithm/lock_and_version.rs b/libs/neonart/src/algorithm/lock_and_version.rs new file mode 100644 index 0000000000..025897864c --- /dev/null +++ b/libs/neonart/src/algorithm/lock_and_version.rs @@ -0,0 +1,117 @@ +//! Each node in the tree has contains one atomic word that stores three things: +//! +//! Bit 0: set if the node is "obsolete". An obsolete node has been removed from the tree, +//! but might still be accessed by concurrent readers until the epoch expires. +//! Bit 1: set if the node is currently write-locked. Used as a spinlock. +//! Bits 2-63: Version number, incremented every time the node is modified. +//! +//! AtomicLockAndVersion represents that. + +use std::sync::atomic::{AtomicU64, Ordering}; + +pub(crate) struct ConcurrentUpdateError(); + +pub(crate) struct AtomicLockAndVersion { + inner: AtomicU64, +} + +impl AtomicLockAndVersion { + pub(crate) fn new() -> AtomicLockAndVersion { + AtomicLockAndVersion { + inner: AtomicU64::new(0), + } + } +} + +impl AtomicLockAndVersion { + pub(crate) fn read_lock_or_restart(&self) -> Result { + let version = self.await_node_unlocked(); + if is_obsolete(version) { + return Err(ConcurrentUpdateError()); + } + Ok(version) + } + + pub(crate) fn check_or_restart(&self, version: u64) -> Result<(), ConcurrentUpdateError> { + self.read_unlock_or_restart(version) + } + + pub(crate) fn read_unlock_or_restart(&self, version: u64) -> Result<(), ConcurrentUpdateError> { + if self.inner.load(Ordering::Acquire) != version { + return Err(ConcurrentUpdateError()); + } + Ok(()) + } + + pub(crate) fn upgrade_to_write_lock_or_restart( + &self, + version: u64, + ) -> Result<(), ConcurrentUpdateError> { + if self + .inner + .compare_exchange( + version, + set_locked_bit(version), + Ordering::Acquire, + Ordering::Relaxed, + ) + .is_err() + { + return Err(ConcurrentUpdateError()); + } + Ok(()) + } + + pub(crate) fn write_lock_or_restart(&self) -> Result<(), ConcurrentUpdateError> { + let old = self.inner.load(Ordering::Relaxed); + if is_obsolete(old) || is_locked(old) { + return Err(ConcurrentUpdateError()); + } + if self + .inner + .compare_exchange( + old, + set_locked_bit(old), + Ordering::Acquire, + Ordering::Relaxed, + ) + .is_err() + { + return Err(ConcurrentUpdateError()); + } + Ok(()) + } + + pub(crate) fn write_unlock(&self) { + // reset locked bit and overflow into version + self.inner.fetch_add(2, Ordering::Release); + } + + pub(crate) fn write_unlock_obsolete(&self) { + // set obsolete, reset locked, overflow into version + self.inner.fetch_add(3, Ordering::Release); + } + + // Helper functions + fn await_node_unlocked(&self) -> u64 { + let mut version = self.inner.load(Ordering::Acquire); + while is_locked(version) { + // spinlock + std::thread::yield_now(); + version = self.inner.load(Ordering::Acquire) + } + version + } +} + +fn set_locked_bit(version: u64) -> u64 { + version + 2 +} + +fn is_obsolete(version: u64) -> bool { + (version & 1) == 1 +} + +fn is_locked(version: u64) -> bool { + (version & 2) == 2 +} diff --git a/libs/neonart/src/algorithm/node_ptr.rs b/libs/neonart/src/algorithm/node_ptr.rs new file mode 100644 index 0000000000..45822ba888 --- /dev/null +++ b/libs/neonart/src/algorithm/node_ptr.rs @@ -0,0 +1,1099 @@ +//! This file contains the implementations of all the different node variants. +//! These implementations use pointers, see node_ref.rs for slightly safer +//! wrappers that deal with references instead. +use std::marker::PhantomData; +use std::ptr::NonNull; + +use super::lock_and_version::AtomicLockAndVersion; + +use crate::Value; +use crate::allocator::ArtAllocator; +use crate::allocator::OutOfMemoryError; + +pub(crate) const MAX_PREFIX_LEN: usize = 8; + +enum NodeTag { + Internal4, + Internal16, + Internal48, + Internal256, + Leaf, +} + +#[repr(C)] +struct NodeBase { + tag: NodeTag, +} + +pub(crate) struct NodePtr { + ptr: *mut NodeBase, + + phantom_value: PhantomData, +} + +impl PartialEq for NodePtr { + fn eq(&self, other: &NodePtr) -> bool { + self.ptr == other.ptr + } +} + +impl std::fmt::Debug for NodePtr { + fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> { + write!(fmt, "0x{}", self.ptr.addr()) + } +} + +impl Copy for NodePtr {} +impl Clone for NodePtr { + #[allow(clippy::non_canonical_clone_impl)] + fn clone(&self) -> NodePtr { + NodePtr { + ptr: self.ptr, + phantom_value: PhantomData, + } + } +} + +enum NodeVariant<'a, V> { + Internal4(&'a NodeInternal4), + Internal16(&'a NodeInternal16), + Internal48(&'a NodeInternal48), + Internal256(&'a NodeInternal256), + Leaf(&'a NodeLeaf), +} + +enum NodeVariantMut<'a, V> { + Internal4(&'a mut NodeInternal4), + Internal16(&'a mut NodeInternal16), + Internal48(&'a mut NodeInternal48), + Internal256(&'a mut NodeInternal256), + Leaf(&'a mut NodeLeaf), +} + +#[repr(C)] +pub struct NodeInternal4 { + tag: NodeTag, + prefix_len: u8, + num_children: u8, + + child_keys: [u8; 4], + + lock_and_version: AtomicLockAndVersion, + prefix: [u8; MAX_PREFIX_LEN], + + child_ptrs: [NodePtr; 4], +} + +#[repr(C)] +pub struct NodeInternal16 { + tag: NodeTag, + prefix_len: u8, + num_children: u8, + + lock_and_version: AtomicLockAndVersion, + prefix: [u8; MAX_PREFIX_LEN], + child_keys: [u8; 16], + child_ptrs: [NodePtr; 16], +} + +#[repr(C)] +pub struct NodeInternal48 { + tag: NodeTag, + prefix_len: u8, + num_children: u8, + + lock_and_version: AtomicLockAndVersion, + prefix: [u8; MAX_PREFIX_LEN], + child_indexes: [u8; 256], + child_ptrs: [NodePtr; 48], +} +const INVALID_CHILD_INDEX: u8 = u8::MAX; + +#[repr(C)] +pub struct NodeInternal256 { + tag: NodeTag, + prefix_len: u8, + num_children: u16, + + lock_and_version: AtomicLockAndVersion, + prefix: [u8; MAX_PREFIX_LEN], + + child_ptrs: [NodePtr; 256], +} + +#[repr(C)] +pub struct NodeLeaf { + tag: NodeTag, + prefix_len: u8, + + // TODO: It's not clear if we need a full version on leaf nodes. I think a single bit + // to indicate if the node is obsolete would be sufficient. + lock_and_version: AtomicLockAndVersion, + prefix: [u8; MAX_PREFIX_LEN], + + value: V, +} + +impl NodePtr { + pub(crate) fn is_leaf(&self) -> bool { + match self.variant() { + NodeVariant::Internal4(_) => false, + NodeVariant::Internal16(_) => false, + NodeVariant::Internal48(_) => false, + NodeVariant::Internal256(_) => false, + NodeVariant::Leaf(_) => true, + } + } + + pub(crate) fn lockword(&self) -> &AtomicLockAndVersion { + match self.variant() { + NodeVariant::Internal4(n) => &n.lock_and_version, + NodeVariant::Internal16(n) => &n.lock_and_version, + NodeVariant::Internal48(n) => &n.lock_and_version, + NodeVariant::Internal256(n) => &n.lock_and_version, + NodeVariant::Leaf(n) => &n.lock_and_version, + } + } + + pub(crate) fn is_null(&self) -> bool { + self.ptr.is_null() + } + + pub(crate) const fn null() -> NodePtr { + NodePtr { + ptr: std::ptr::null_mut(), + phantom_value: PhantomData, + } + } + + fn variant(&self) -> NodeVariant { + unsafe { + match (*self.ptr).tag { + NodeTag::Internal4 => NodeVariant::Internal4( + NonNull::new_unchecked(self.ptr.cast::>()).as_ref(), + ), + NodeTag::Internal16 => NodeVariant::Internal16( + NonNull::new_unchecked(self.ptr.cast::>()).as_ref(), + ), + NodeTag::Internal48 => NodeVariant::Internal48( + NonNull::new_unchecked(self.ptr.cast::>()).as_ref(), + ), + NodeTag::Internal256 => NodeVariant::Internal256( + NonNull::new_unchecked(self.ptr.cast::>()).as_ref(), + ), + NodeTag::Leaf => NodeVariant::Leaf( + NonNull::new_unchecked(self.ptr.cast::>()).as_ref(), + ), + } + } + } + + fn variant_mut(&mut self) -> NodeVariantMut { + unsafe { + match (*self.ptr).tag { + NodeTag::Internal4 => NodeVariantMut::Internal4( + NonNull::new_unchecked(self.ptr.cast::>()).as_mut(), + ), + NodeTag::Internal16 => NodeVariantMut::Internal16( + NonNull::new_unchecked(self.ptr.cast::>()).as_mut(), + ), + NodeTag::Internal48 => NodeVariantMut::Internal48( + NonNull::new_unchecked(self.ptr.cast::>()).as_mut(), + ), + NodeTag::Internal256 => NodeVariantMut::Internal256( + NonNull::new_unchecked(self.ptr.cast::>()).as_mut(), + ), + NodeTag::Leaf => NodeVariantMut::Leaf( + NonNull::new_unchecked(self.ptr.cast::>()).as_mut(), + ), + } + } + } +} + +impl NodePtr { + pub(crate) fn prefix_matches(&self, key: &[u8]) -> Option { + let node_prefix = self.get_prefix(); + assert!(node_prefix.len() <= key.len()); // because we only use fixed-size keys + if &key[0..node_prefix.len()] != node_prefix { + None + } else { + Some(node_prefix.len()) + } + } + + pub(crate) fn get_prefix(&self) -> &[u8] { + match self.variant() { + NodeVariant::Internal4(n) => n.get_prefix(), + NodeVariant::Internal16(n) => n.get_prefix(), + NodeVariant::Internal48(n) => n.get_prefix(), + NodeVariant::Internal256(n) => n.get_prefix(), + NodeVariant::Leaf(n) => n.get_prefix(), + } + } + + pub(crate) fn is_full(&self) -> bool { + match self.variant() { + NodeVariant::Internal4(n) => n.is_full(), + NodeVariant::Internal16(n) => n.is_full(), + NodeVariant::Internal48(n) => n.is_full(), + NodeVariant::Internal256(n) => n.is_full(), + NodeVariant::Leaf(_) => panic!("is_full() called on leaf node"), + } + } + + pub(crate) fn num_children(&self) -> usize { + match self.variant() { + NodeVariant::Internal4(n) => n.num_children as usize, + NodeVariant::Internal16(n) => n.num_children as usize, + NodeVariant::Internal48(n) => n.num_children as usize, + NodeVariant::Internal256(n) => n.num_children as usize, + NodeVariant::Leaf(_) => panic!("is_full() called on leaf node"), + } + } + + pub(crate) fn can_shrink(&self) -> bool { + match self.variant() { + NodeVariant::Internal4(n) => n.can_shrink(), + NodeVariant::Internal16(n) => n.can_shrink(), + NodeVariant::Internal48(n) => n.can_shrink(), + NodeVariant::Internal256(n) => n.can_shrink(), + NodeVariant::Leaf(_) => panic!("can_shrink() called on leaf node"), + } + } + + pub(crate) fn find_child(&self, key_byte: u8) -> Option> { + match self.variant() { + NodeVariant::Internal4(n) => n.find_child(key_byte), + NodeVariant::Internal16(n) => n.find_child(key_byte), + NodeVariant::Internal48(n) => n.find_child(key_byte), + NodeVariant::Internal256(n) => n.find_child(key_byte), + NodeVariant::Leaf(_) => panic!("find_child called on leaf node"), + } + } + + pub(crate) fn find_next_child(&self, key_byte: u8) -> Option<(u8, NodePtr)> { + match self.variant() { + NodeVariant::Internal4(n) => n.find_next_child(key_byte), + NodeVariant::Internal16(n) => n.find_next_child(key_byte), + NodeVariant::Internal48(n) => n.find_next_child(key_byte), + NodeVariant::Internal256(n) => n.find_next_child(key_byte), + NodeVariant::Leaf(_) => panic!("find_next_child called on leaf node"), + } + } + + pub(crate) fn truncate_prefix(&mut self, new_prefix_len: usize) { + match self.variant_mut() { + NodeVariantMut::Internal4(n) => n.truncate_prefix(new_prefix_len), + NodeVariantMut::Internal16(n) => n.truncate_prefix(new_prefix_len), + NodeVariantMut::Internal48(n) => n.truncate_prefix(new_prefix_len), + NodeVariantMut::Internal256(n) => n.truncate_prefix(new_prefix_len), + NodeVariantMut::Leaf(n) => n.truncate_prefix(new_prefix_len), + } + } + + pub(crate) fn prepend_prefix(&mut self, prefix: &[u8], prefix_byte: u8) { + match self.variant_mut() { + NodeVariantMut::Internal4(n) => n.prepend_prefix(prefix, prefix_byte), + NodeVariantMut::Internal16(n) => n.prepend_prefix(prefix, prefix_byte), + NodeVariantMut::Internal48(n) => n.prepend_prefix(prefix, prefix_byte), + NodeVariantMut::Internal256(n) => n.prepend_prefix(prefix, prefix_byte), + NodeVariantMut::Leaf(n) => n.prepend_prefix(prefix, prefix_byte), + } + } + + pub(crate) fn grow( + &self, + allocator: &impl ArtAllocator, + ) -> Result, OutOfMemoryError> { + match self.variant() { + NodeVariant::Internal4(n) => n.grow(allocator), + NodeVariant::Internal16(n) => n.grow(allocator), + NodeVariant::Internal48(n) => n.grow(allocator), + NodeVariant::Internal256(_) => panic!("cannot grow Internal256 node"), + NodeVariant::Leaf(_) => panic!("cannot grow Leaf node"), + } + } + + pub(crate) fn insert_child(&mut self, key_byte: u8, child: NodePtr) { + match self.variant_mut() { + NodeVariantMut::Internal4(n) => n.insert_child(key_byte, child), + NodeVariantMut::Internal16(n) => n.insert_child(key_byte, child), + NodeVariantMut::Internal48(n) => n.insert_child(key_byte, child), + NodeVariantMut::Internal256(n) => n.insert_child(key_byte, child), + NodeVariantMut::Leaf(_) => panic!("insert_child called on leaf node"), + } + } + + pub(crate) fn replace_child(&mut self, key_byte: u8, replacement: NodePtr) { + match self.variant_mut() { + NodeVariantMut::Internal4(n) => n.replace_child(key_byte, replacement), + NodeVariantMut::Internal16(n) => n.replace_child(key_byte, replacement), + NodeVariantMut::Internal48(n) => n.replace_child(key_byte, replacement), + NodeVariantMut::Internal256(n) => n.replace_child(key_byte, replacement), + NodeVariantMut::Leaf(_) => panic!("replace_child called on leaf node"), + } + } + + pub(crate) fn delete_child(&mut self, key_byte: u8) { + match self.variant_mut() { + NodeVariantMut::Internal4(n) => n.delete_child(key_byte), + NodeVariantMut::Internal16(n) => n.delete_child(key_byte), + NodeVariantMut::Internal48(n) => n.delete_child(key_byte), + NodeVariantMut::Internal256(n) => n.delete_child(key_byte), + NodeVariantMut::Leaf(_) => panic!("delete_child called on leaf node"), + } + } + + pub(crate) fn shrink( + &self, + allocator: &impl ArtAllocator, + ) -> Result, OutOfMemoryError> { + match self.variant() { + NodeVariant::Internal4(_) => panic!("shrink called on internal4 node"), + NodeVariant::Internal16(n) => n.shrink(allocator), + NodeVariant::Internal48(n) => n.shrink(allocator), + NodeVariant::Internal256(n) => n.shrink(allocator), + NodeVariant::Leaf(_) => panic!("shrink called on leaf node"), + } + } + + pub(crate) fn get_leaf_value(&self) -> &V { + match self.variant() { + NodeVariant::Internal4(_) + | NodeVariant::Internal16(_) + | NodeVariant::Internal48(_) + | NodeVariant::Internal256(_) => panic!("get_leaf_value called on internal node"), + NodeVariant::Leaf(n) => n.get_leaf_value(), + } + } + + pub(crate) fn get_leaf_value_mut(&mut self) -> &mut V { + match self.variant_mut() { + NodeVariantMut::Internal4(_) + | NodeVariantMut::Internal16(_) + | NodeVariantMut::Internal48(_) + | NodeVariantMut::Internal256(_) => panic!("get_leaf_value called on internal node"), + NodeVariantMut::Leaf(n) => n.get_leaf_value_mut(), + } + } + + pub(crate) fn deallocate(self, allocator: &impl ArtAllocator) { + match self.variant() { + NodeVariant::Internal4(_) => allocator.dealloc_node_internal4(self.ptr.cast()), + NodeVariant::Internal16(_) => allocator.dealloc_node_internal16(self.ptr.cast()), + NodeVariant::Internal48(_) => allocator.dealloc_node_internal48(self.ptr.cast()), + NodeVariant::Internal256(_) => allocator.dealloc_node_internal256(self.ptr.cast()), + NodeVariant::Leaf(_) => allocator.dealloc_node_leaf(self.ptr.cast()), + } + } +} + +pub fn new_root( + allocator: &impl ArtAllocator, +) -> Result, OutOfMemoryError> { + let ptr: *mut NodeInternal256 = allocator.alloc_node_internal256().cast(); + if ptr.is_null() { + return Err(OutOfMemoryError()); + } + + unsafe { + *ptr = NodeInternal256::::new(); + } + + Ok(ptr.into()) +} + +pub fn new_internal( + prefix: &[u8], + allocator: &impl ArtAllocator, +) -> Result, OutOfMemoryError> { + let ptr: *mut NodeInternal4 = allocator.alloc_node_internal4().cast(); + if ptr.is_null() { + return Err(OutOfMemoryError()); + } + let mut init = NodeInternal4 { + tag: NodeTag::Internal4, + lock_and_version: AtomicLockAndVersion::new(), + + prefix: [8; MAX_PREFIX_LEN], + prefix_len: prefix.len() as u8, + num_children: 0, + + child_keys: [0; 4], + child_ptrs: [const { NodePtr::null() }; 4], + }; + init.prefix[0..prefix.len()].copy_from_slice(prefix); + unsafe { ptr.write(init) }; + + Ok(ptr.into()) +} + +pub fn new_leaf( + prefix: &[u8], + value: V, + allocator: &impl ArtAllocator, +) -> Result, OutOfMemoryError> { + let ptr: *mut NodeLeaf = allocator.alloc_node_leaf().cast(); + if ptr.is_null() { + return Err(OutOfMemoryError()); + } + let mut init = NodeLeaf { + tag: NodeTag::Leaf, + lock_and_version: AtomicLockAndVersion::new(), + + prefix: [8; MAX_PREFIX_LEN], + prefix_len: prefix.len() as u8, + + value, + }; + init.prefix[0..prefix.len()].copy_from_slice(prefix); + unsafe { ptr.write(init) }; + + Ok(ptr.into()) +} + +impl NodeInternal4 { + fn get_prefix(&self) -> &[u8] { + &self.prefix[0..self.prefix_len as usize] + } + + fn prepend_prefix(&mut self, prefix: &[u8], prefix_byte: u8) { + assert!(1 + prefix.len() + self.prefix_len as usize <= MAX_PREFIX_LEN); + let mut new = Vec::with_capacity(MAX_PREFIX_LEN); + new.extend_from_slice(prefix); + new.push(prefix_byte); + new.extend_from_slice(&self.prefix[0..self.prefix_len as usize]); + self.prefix[0..new.len()].copy_from_slice(&new); + self.prefix_len = new.len() as u8; + } + + fn truncate_prefix(&mut self, new_prefix_len: usize) { + assert!(new_prefix_len < self.prefix_len as usize); + let prefix = &mut self.prefix; + let offset = self.prefix_len as usize - new_prefix_len; + for i in 0..new_prefix_len { + prefix[i] = prefix[i + offset]; + } + self.prefix_len = new_prefix_len as u8; + } + + fn find_child(&self, key: u8) -> Option> { + for i in 0..self.num_children as usize { + if self.child_keys[i] == key { + return Some(self.child_ptrs[i]); + } + } + None + } + + fn find_next_child(&self, min_key: u8) -> Option<(u8, NodePtr)> { + let mut found: Option<(usize, u8)> = None; + for i in 0..self.num_children as usize { + let this_key = self.child_keys[i]; + if this_key >= min_key { + if let Some((_, found_key)) = found { + if this_key < found_key { + found = Some((i, this_key)); + } + } else { + found = Some((i, this_key)); + } + } + } + if let Some((found_idx, found_key)) = found { + Some((found_key, self.child_ptrs[found_idx])) + } else { + None + } + } + + fn replace_child(&mut self, key_byte: u8, replacement: NodePtr) { + for i in 0..self.num_children as usize { + if self.child_keys[i] == key_byte { + self.child_ptrs[i] = replacement; + return; + } + } + panic!("could not re-find parent with key {key_byte}"); + } + + fn delete_child(&mut self, key_byte: u8) { + for i in 0..self.num_children as usize { + if self.child_keys[i] == key_byte { + self.num_children -= 1; + for j in i..self.num_children as usize { + self.child_keys[j] = self.child_keys[j + 1]; + self.child_ptrs[j] = self.child_ptrs[j + 1]; + } + return; + } + } + panic!("could not re-find parent with key {key_byte}"); + } + + fn is_full(&self) -> bool { + self.num_children == 4 + } + + fn can_shrink(&self) -> bool { + false + } + + fn insert_child(&mut self, key_byte: u8, child: NodePtr) { + assert!(self.num_children < 4); + + let idx = self.num_children as usize; + self.child_keys[idx] = key_byte; + self.child_ptrs[idx] = child; + self.num_children += 1; + } + + fn grow(&self, allocator: &impl ArtAllocator) -> Result, OutOfMemoryError> { + let ptr: *mut NodeInternal16 = allocator.alloc_node_internal16().cast(); + if ptr.is_null() { + return Err(OutOfMemoryError()); + } + let mut init = NodeInternal16 { + tag: NodeTag::Internal16, + lock_and_version: AtomicLockAndVersion::new(), + + prefix: self.prefix, + prefix_len: self.prefix_len, + num_children: self.num_children, + + child_keys: [0; 16], + child_ptrs: [const { NodePtr::null() }; 16], + }; + for i in 0..self.num_children as usize { + init.child_keys[i] = self.child_keys[i]; + init.child_ptrs[i] = self.child_ptrs[i]; + } + unsafe { ptr.write(init) }; + Ok(ptr.into()) + } +} + +impl NodeInternal16 { + fn get_prefix(&self) -> &[u8] { + &self.prefix[0..self.prefix_len as usize] + } + + fn prepend_prefix(&mut self, prefix: &[u8], prefix_byte: u8) { + assert!(1 + prefix.len() + self.prefix_len as usize <= MAX_PREFIX_LEN); + let mut new = Vec::with_capacity(MAX_PREFIX_LEN); + new.extend_from_slice(prefix); + new.push(prefix_byte); + new.extend_from_slice(&self.prefix[0..self.prefix_len as usize]); + self.prefix[0..new.len()].copy_from_slice(&new); + self.prefix_len = new.len() as u8; + } + + fn truncate_prefix(&mut self, new_prefix_len: usize) { + assert!(new_prefix_len < self.prefix_len as usize); + let prefix = &mut self.prefix; + let offset = self.prefix_len as usize - new_prefix_len; + for i in 0..new_prefix_len { + prefix[i] = prefix[i + offset]; + } + self.prefix_len = new_prefix_len as u8; + } + + fn find_child(&self, key_byte: u8) -> Option> { + for i in 0..self.num_children as usize { + if self.child_keys[i] == key_byte { + return Some(self.child_ptrs[i]); + } + } + None + } + + fn find_next_child(&self, min_key: u8) -> Option<(u8, NodePtr)> { + let mut found: Option<(usize, u8)> = None; + for i in 0..self.num_children as usize { + let this_key = self.child_keys[i]; + if this_key >= min_key { + if let Some((_, found_key)) = found { + if this_key < found_key { + found = Some((i, this_key)); + } + } else { + found = Some((i, this_key)); + } + } + } + if let Some((found_idx, found_key)) = found { + Some((found_key, self.child_ptrs[found_idx])) + } else { + None + } + } + + fn replace_child(&mut self, key_byte: u8, replacement: NodePtr) { + for i in 0..self.num_children as usize { + if self.child_keys[i] == key_byte { + self.child_ptrs[i] = replacement; + return; + } + } + panic!("could not re-find parent with key {key_byte}"); + } + + fn delete_child(&mut self, key_byte: u8) { + for i in 0..self.num_children as usize { + if self.child_keys[i] == key_byte { + self.num_children -= 1; + for j in i..self.num_children as usize { + self.child_keys[j] = self.child_keys[j + 1]; + self.child_ptrs[j] = self.child_ptrs[j + 1]; + } + return; + } + } + panic!("could not re-find parent with key {key_byte}"); + } + + fn is_full(&self) -> bool { + self.num_children == 16 + } + + fn can_shrink(&self) -> bool { + self.num_children <= 4 + } + + fn insert_child(&mut self, key_byte: u8, child: NodePtr) { + assert!(self.num_children < 16); + + let idx = self.num_children as usize; + self.child_keys[idx] = key_byte; + self.child_ptrs[idx] = child; + self.num_children += 1; + } + + fn grow(&self, allocator: &impl ArtAllocator) -> Result, OutOfMemoryError> { + let ptr: *mut NodeInternal48 = allocator.alloc_node_internal48().cast(); + if ptr.is_null() { + return Err(OutOfMemoryError()); + } + let mut init = NodeInternal48 { + tag: NodeTag::Internal48, + lock_and_version: AtomicLockAndVersion::new(), + + prefix: self.prefix, + prefix_len: self.prefix_len, + num_children: self.num_children, + + child_indexes: [INVALID_CHILD_INDEX; 256], + child_ptrs: [const { NodePtr::null() }; 48], + }; + for i in 0..self.num_children as usize { + let idx = self.child_keys[i] as usize; + init.child_indexes[idx] = i as u8; + init.child_ptrs[i] = self.child_ptrs[i]; + } + init.validate(); + unsafe { ptr.write(init) }; + Ok(ptr.into()) + } + + fn shrink(&self, allocator: &impl ArtAllocator) -> Result, OutOfMemoryError> { + assert!(self.num_children <= 4); + let ptr: *mut NodeInternal4 = allocator.alloc_node_internal4().cast(); + if ptr.is_null() { + return Err(OutOfMemoryError()); + } + let mut init = NodeInternal4 { + tag: NodeTag::Internal4, + lock_and_version: AtomicLockAndVersion::new(), + + prefix: self.prefix, + prefix_len: self.prefix_len, + num_children: self.num_children, + + child_keys: [0; 4], + child_ptrs: [const { NodePtr::null() }; 4], + }; + for i in 0..self.num_children as usize { + init.child_keys[i] = self.child_keys[i]; + init.child_ptrs[i] = self.child_ptrs[i]; + } + unsafe { ptr.write(init) }; + Ok(ptr.into()) + } +} + +impl NodeInternal48 { + fn validate(&self) { + let mut shadow_indexes = std::collections::HashSet::new(); + let mut count = 0; + for i in 0..256 { + let idx = self.child_indexes[i]; + if idx != INVALID_CHILD_INDEX { + assert!( + idx < self.num_children, + "i {} idx {}, num_children {}", + i, + idx, + self.num_children + ); + assert!(!shadow_indexes.contains(&idx)); + shadow_indexes.insert(idx); + count += 1; + } + } + assert_eq!(count, self.num_children); + } + + fn prepend_prefix(&mut self, prefix: &[u8], prefix_byte: u8) { + assert!(1 + prefix.len() + self.prefix_len as usize <= MAX_PREFIX_LEN); + let mut new = Vec::with_capacity(MAX_PREFIX_LEN); + new.extend_from_slice(prefix); + new.push(prefix_byte); + new.extend_from_slice(&self.prefix[0..self.prefix_len as usize]); + self.prefix[0..new.len()].copy_from_slice(&new); + self.prefix_len = new.len() as u8; + } + + fn get_prefix(&self) -> &[u8] { + &self.prefix[0..self.prefix_len as usize] + } + + fn truncate_prefix(&mut self, new_prefix_len: usize) { + assert!(new_prefix_len < self.prefix_len as usize); + let prefix = &mut self.prefix; + let offset = self.prefix_len as usize - new_prefix_len; + for i in 0..new_prefix_len { + prefix[i] = prefix[i + offset]; + } + self.prefix_len = new_prefix_len as u8; + } + + fn find_child(&self, key_byte: u8) -> Option> { + let idx = self.child_indexes[key_byte as usize]; + if idx != INVALID_CHILD_INDEX { + Some(self.child_ptrs[idx as usize]) + } else { + None + } + } + + fn find_next_child(&self, min_key: u8) -> Option<(u8, NodePtr)> { + for key in min_key..=u8::MAX { + let idx = self.child_indexes[key as usize]; + if idx != INVALID_CHILD_INDEX { + return Some((key, self.child_ptrs[idx as usize])); + } + } + None + } + + fn replace_child(&mut self, key_byte: u8, replacement: NodePtr) { + let idx = self.child_indexes[key_byte as usize]; + if idx == INVALID_CHILD_INDEX { + panic!("could not re-find parent with key {key_byte}"); + } + self.child_ptrs[idx as usize] = replacement; + self.validate(); + } + + fn delete_child(&mut self, key_byte: u8) { + let idx = self.child_indexes[key_byte as usize] as usize; + if idx == INVALID_CHILD_INDEX as usize { + panic!("could not re-find parent with key {key_byte}"); + } + + // Compact the child_ptrs array + let removed_idx = (self.num_children - 1) as usize; + if idx != removed_idx { + for i in 0..=u8::MAX as usize { + if self.child_indexes[i] as usize == removed_idx { + self.child_indexes[i] = idx as u8; + self.child_ptrs[idx] = self.child_ptrs[removed_idx]; + + self.child_indexes[key_byte as usize] = INVALID_CHILD_INDEX; + self.num_children -= 1; + self.validate(); + return; + } + } + panic!("could not re-find last index {removed_idx} on Internal48 node"); + } else { + self.child_indexes[key_byte as usize] = INVALID_CHILD_INDEX; + self.num_children -= 1; + } + } + + fn is_full(&self) -> bool { + self.num_children == 48 + } + + fn can_shrink(&self) -> bool { + self.num_children <= 16 + } + + fn insert_child(&mut self, key_byte: u8, child: NodePtr) { + assert!(self.num_children < 48); + assert!(self.child_indexes[key_byte as usize] == INVALID_CHILD_INDEX); + let idx = self.num_children; + self.child_indexes[key_byte as usize] = idx; + self.child_ptrs[idx as usize] = child; + self.num_children += 1; + self.validate(); + } + + fn grow(&self, allocator: &impl ArtAllocator) -> Result, OutOfMemoryError> { + let ptr: *mut NodeInternal256 = allocator.alloc_node_internal256().cast(); + if ptr.is_null() { + return Err(OutOfMemoryError()); + } + let mut init = NodeInternal256 { + tag: NodeTag::Internal256, + lock_and_version: AtomicLockAndVersion::new(), + + prefix: self.prefix, + prefix_len: self.prefix_len, + num_children: self.num_children as u16, + + child_ptrs: [const { NodePtr::null() }; 256], + }; + for i in 0..256 { + let idx = self.child_indexes[i]; + if idx != INVALID_CHILD_INDEX { + init.child_ptrs[i] = self.child_ptrs[idx as usize]; + } + } + unsafe { ptr.write(init) }; + Ok(ptr.into()) + } + + fn shrink(&self, allocator: &impl ArtAllocator) -> Result, OutOfMemoryError> { + assert!(self.num_children <= 16); + let ptr: *mut NodeInternal16 = allocator.alloc_node_internal16().cast(); + if ptr.is_null() { + return Err(OutOfMemoryError()); + } + let mut init = NodeInternal16 { + tag: NodeTag::Internal16, + lock_and_version: AtomicLockAndVersion::new(), + + prefix: self.prefix, + prefix_len: self.prefix_len, + num_children: self.num_children, + + child_keys: [0; 16], + child_ptrs: [const { NodePtr::null() }; 16], + }; + let mut j = 0; + for i in 0..256 { + let idx = self.child_indexes[i]; + if idx != INVALID_CHILD_INDEX { + init.child_keys[j] = i as u8; + init.child_ptrs[j] = self.child_ptrs[idx as usize]; + j += 1; + } + } + assert_eq!(j, self.num_children as usize); + unsafe { ptr.write(init) }; + Ok(ptr.into()) + } +} + +impl NodeInternal256 { + fn get_prefix(&self) -> &[u8] { + &self.prefix[0..self.prefix_len as usize] + } + + fn prepend_prefix(&mut self, prefix: &[u8], prefix_byte: u8) { + assert!(1 + prefix.len() + self.prefix_len as usize <= MAX_PREFIX_LEN); + let mut new = Vec::with_capacity(MAX_PREFIX_LEN); + new.extend_from_slice(prefix); + new.push(prefix_byte); + new.extend_from_slice(&self.prefix[0..self.prefix_len as usize]); + self.prefix[0..new.len()].copy_from_slice(&new); + self.prefix_len = new.len() as u8; + } + + fn truncate_prefix(&mut self, new_prefix_len: usize) { + assert!(new_prefix_len < self.prefix_len as usize); + let prefix = &mut self.prefix; + let offset = self.prefix_len as usize - new_prefix_len; + for i in 0..new_prefix_len { + prefix[i] = prefix[i + offset]; + } + self.prefix_len = new_prefix_len as u8; + } + + fn find_child(&self, key_byte: u8) -> Option> { + let idx = key_byte as usize; + if !self.child_ptrs[idx].is_null() { + Some(self.child_ptrs[idx]) + } else { + None + } + } + + fn find_next_child(&self, min_key: u8) -> Option<(u8, NodePtr)> { + for key in min_key..=u8::MAX { + if !self.child_ptrs[key as usize].is_null() { + return Some((key, self.child_ptrs[key as usize])); + } + } + None + } + + fn replace_child(&mut self, key_byte: u8, replacement: NodePtr) { + let idx = key_byte as usize; + if !self.child_ptrs[idx].is_null() { + self.child_ptrs[idx] = replacement + } else { + panic!("could not re-find parent with key {key_byte}"); + } + } + + fn delete_child(&mut self, key_byte: u8) { + let idx = key_byte as usize; + if self.child_ptrs[idx].is_null() { + panic!("could not re-find parent with key {key_byte}"); + } + self.num_children -= 1; + self.child_ptrs[idx] = NodePtr::null(); + } + + fn is_full(&self) -> bool { + self.num_children == 256 + } + + fn can_shrink(&self) -> bool { + self.num_children <= 48 + } + + fn insert_child(&mut self, key_byte: u8, child: NodePtr) { + assert!(self.num_children < 256); + assert!(self.child_ptrs[key_byte as usize].is_null()); + self.child_ptrs[key_byte as usize] = child; + self.num_children += 1; + } + + fn shrink(&self, allocator: &impl ArtAllocator) -> Result, OutOfMemoryError> { + assert!(self.num_children <= 48); + let ptr: *mut NodeInternal48 = allocator.alloc_node_internal48().cast(); + if ptr.is_null() { + return Err(OutOfMemoryError()); + } + let mut init = NodeInternal48 { + tag: NodeTag::Internal48, + lock_and_version: AtomicLockAndVersion::new(), + + prefix: self.prefix, + prefix_len: self.prefix_len, + num_children: self.num_children as u8, + + child_indexes: [INVALID_CHILD_INDEX; 256], + child_ptrs: [const { NodePtr::null() }; 48], + }; + let mut j = 0; + for i in 0..256 { + if !self.child_ptrs[i].is_null() { + init.child_indexes[i] = j; + init.child_ptrs[j as usize] = self.child_ptrs[i]; + j += 1; + } + } + assert_eq!(j as u16, self.num_children); + unsafe { ptr.write(init) }; + Ok(ptr.into()) + } +} + +impl NodeLeaf { + fn get_prefix(&self) -> &[u8] { + &self.prefix[0..self.prefix_len as usize] + } + + fn prepend_prefix(&mut self, prefix: &[u8], prefix_byte: u8) { + assert!(1 + prefix.len() + self.prefix_len as usize <= MAX_PREFIX_LEN); + let mut new = Vec::with_capacity(MAX_PREFIX_LEN); + new.extend_from_slice(prefix); + new.push(prefix_byte); + new.extend_from_slice(&self.prefix[0..self.prefix_len as usize]); + self.prefix[0..new.len()].copy_from_slice(&new); + self.prefix_len = new.len() as u8; + } + + fn truncate_prefix(&mut self, new_prefix_len: usize) { + assert!(new_prefix_len < self.prefix_len as usize); + let prefix = &mut self.prefix; + let offset = self.prefix_len as usize - new_prefix_len; + for i in 0..new_prefix_len { + prefix[i] = prefix[i + offset]; + } + self.prefix_len = new_prefix_len as u8; + } + + fn get_leaf_value<'a: 'b, 'b>(&'a self) -> &'b V { + &self.value + } + + fn get_leaf_value_mut<'a: 'b, 'b>(&'a mut self) -> &'b mut V { + &mut self.value + } +} + +impl NodeInternal256 { + pub(crate) fn new() -> NodeInternal256 { + NodeInternal256 { + tag: NodeTag::Internal256, + lock_and_version: AtomicLockAndVersion::new(), + + prefix: [0; MAX_PREFIX_LEN], + prefix_len: 0, + num_children: 0, + + child_ptrs: [const { NodePtr::null() }; 256], + } + } +} + +impl From<*mut NodeInternal4> for NodePtr { + fn from(val: *mut NodeInternal4) -> NodePtr { + NodePtr { + ptr: val.cast(), + phantom_value: PhantomData, + } + } +} +impl From<*mut NodeInternal16> for NodePtr { + fn from(val: *mut NodeInternal16) -> NodePtr { + NodePtr { + ptr: val.cast(), + phantom_value: PhantomData, + } + } +} + +impl From<*mut NodeInternal48> for NodePtr { + fn from(val: *mut NodeInternal48) -> NodePtr { + NodePtr { + ptr: val.cast(), + phantom_value: PhantomData, + } + } +} + +impl From<*mut NodeInternal256> for NodePtr { + fn from(val: *mut NodeInternal256) -> NodePtr { + NodePtr { + ptr: val.cast(), + phantom_value: PhantomData, + } + } +} + +impl From<*mut NodeLeaf> for NodePtr { + fn from(val: *mut NodeLeaf) -> NodePtr { + NodePtr { + ptr: val.cast(), + phantom_value: PhantomData, + } + } +} diff --git a/libs/neonart/src/algorithm/node_ref.rs b/libs/neonart/src/algorithm/node_ref.rs new file mode 100644 index 0000000000..5403aaabdf --- /dev/null +++ b/libs/neonart/src/algorithm/node_ref.rs @@ -0,0 +1,349 @@ +use std::fmt::Debug; +use std::marker::PhantomData; + +use super::node_ptr; +use super::node_ptr::NodePtr; +use crate::EpochPin; +use crate::Value; +use crate::algorithm::lock_and_version::AtomicLockAndVersion; +use crate::algorithm::lock_and_version::ConcurrentUpdateError; +use crate::allocator::ArtAllocator; +use crate::allocator::OutOfMemoryError; + +pub struct NodeRef<'e, V> { + ptr: NodePtr, + + phantom: PhantomData<&'e EpochPin<'e>>, +} + +impl<'e, V> Debug for NodeRef<'e, V> { + fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> { + write!(fmt, "{:?}", self.ptr) + } +} + +impl<'e, V: Value> NodeRef<'e, V> { + pub(crate) fn from_root_ptr(root_ptr: NodePtr) -> NodeRef<'e, V> { + NodeRef { + ptr: root_ptr, + phantom: PhantomData, + } + } + + pub(crate) fn read_lock_or_restart( + &self, + ) -> Result, ConcurrentUpdateError> { + let version = self.lockword().read_lock_or_restart()?; + Ok(ReadLockedNodeRef { + ptr: self.ptr, + version, + phantom: self.phantom, + }) + } + + pub(crate) fn write_lock_or_restart( + &self, + ) -> Result, ConcurrentUpdateError> { + self.lockword().write_lock_or_restart()?; + Ok(WriteLockedNodeRef { + ptr: self.ptr, + phantom: self.phantom, + }) + } + + fn lockword(&self) -> &AtomicLockAndVersion { + self.ptr.lockword() + } +} + +/// A reference to a node that has been optimistically read-locked. The functions re-check +/// the version after each read. +pub struct ReadLockedNodeRef<'e, V> { + ptr: NodePtr, + version: u64, + + phantom: PhantomData<&'e EpochPin<'e>>, +} + +impl<'e, V: Value> ReadLockedNodeRef<'e, V> { + pub(crate) fn is_leaf(&self) -> bool { + self.ptr.is_leaf() + } + + pub(crate) fn is_full(&self) -> bool { + self.ptr.is_full() + } + + pub(crate) fn get_prefix(&self) -> &[u8] { + self.ptr.get_prefix() + } + + /// Note: because we're only holding a read lock, the prefix can change concurrently. + /// You must be prepared to restart, if read_unlock() returns error later. + /// + /// Returns the length of the prefix, or None if it's not a match + pub(crate) fn prefix_matches(&self, key: &[u8]) -> Option { + self.ptr.prefix_matches(key) + } + + pub(crate) fn find_child_or_restart( + &self, + key_byte: u8, + ) -> Result>, ConcurrentUpdateError> { + let child_or_value = self.ptr.find_child(key_byte); + self.ptr.lockword().check_or_restart(self.version)?; + + match child_or_value { + None => Ok(None), + Some(child_ptr) => Ok(Some(NodeRef { + ptr: child_ptr, + phantom: self.phantom, + })), + } + } + + pub(crate) fn find_next_child_or_restart( + &self, + min_key_byte: u8, + ) -> Result)>, ConcurrentUpdateError> { + let child_or_value = self.ptr.find_next_child(min_key_byte); + self.ptr.lockword().check_or_restart(self.version)?; + + match child_or_value { + None => Ok(None), + Some((k, child_ptr)) => Ok(Some(( + k, + NodeRef { + ptr: child_ptr, + phantom: self.phantom, + }, + ))), + } + } + + pub(crate) fn get_leaf_value_ptr(&self) -> Result<*const V, ConcurrentUpdateError> { + let result = self.ptr.get_leaf_value(); + self.ptr.lockword().check_or_restart(self.version)?; + + // Extend the lifetime. + let result = std::ptr::from_ref(result); + + Ok(result) + } + + pub(crate) fn upgrade_to_write_lock_or_restart( + self, + ) -> Result, ConcurrentUpdateError> { + self.ptr + .lockword() + .upgrade_to_write_lock_or_restart(self.version)?; + + Ok(WriteLockedNodeRef { + ptr: self.ptr, + phantom: self.phantom, + }) + } + + pub(crate) fn read_unlock_or_restart(self) -> Result<(), ConcurrentUpdateError> { + self.ptr.lockword().check_or_restart(self.version)?; + Ok(()) + } + + pub(crate) fn check_or_restart(&self) -> Result<(), ConcurrentUpdateError> { + self.ptr.lockword().check_or_restart(self.version)?; + Ok(()) + } +} + +/// A reference to a node that has been optimistically read-locked. The functions re-check +/// the version after each read. +pub struct WriteLockedNodeRef<'e, V> { + ptr: NodePtr, + phantom: PhantomData<&'e EpochPin<'e>>, +} + +impl<'e, V: Value> WriteLockedNodeRef<'e, V> { + pub(crate) fn can_shrink(&self) -> bool { + self.ptr.can_shrink() + } + + pub(crate) fn num_children(&self) -> usize { + self.ptr.num_children() + } + + pub(crate) fn write_unlock(mut self) { + self.ptr.lockword().write_unlock(); + self.ptr = NodePtr::null(); + } + + pub(crate) fn write_unlock_obsolete(mut self) { + self.ptr.lockword().write_unlock_obsolete(); + self.ptr = NodePtr::null(); + } + + pub(crate) fn get_prefix(&self) -> &[u8] { + self.ptr.get_prefix() + } + + pub(crate) fn truncate_prefix(&mut self, new_prefix_len: usize) { + self.ptr.truncate_prefix(new_prefix_len) + } + + pub(crate) fn prepend_prefix(&mut self, prefix: &[u8], prefix_byte: u8) { + self.ptr.prepend_prefix(prefix, prefix_byte) + } + + pub(crate) fn insert_child(&mut self, key_byte: u8, child: NodePtr) { + self.ptr.insert_child(key_byte, child) + } + + pub(crate) fn get_leaf_value_mut(&mut self) -> &mut V { + self.ptr.get_leaf_value_mut() + } + + pub(crate) fn grow<'a, A>( + &self, + allocator: &'a A, + ) -> Result, OutOfMemoryError> + where + A: ArtAllocator, + { + let new_node = self.ptr.grow(allocator)?; + Ok(NewNodeRef { + ptr: new_node, + allocator, + extra_nodes: Vec::new(), + }) + } + + pub(crate) fn shrink<'a, A>( + &self, + allocator: &'a A, + ) -> Result, OutOfMemoryError> + where + A: ArtAllocator, + { + let new_node = self.ptr.shrink(allocator)?; + Ok(NewNodeRef { + ptr: new_node, + allocator, + extra_nodes: Vec::new(), + }) + } + + pub(crate) fn as_ptr(&self) -> NodePtr { + self.ptr + } + + pub(crate) fn replace_child(&mut self, key_byte: u8, replacement: NodePtr) { + self.ptr.replace_child(key_byte, replacement); + } + + pub(crate) fn delete_child(&mut self, key_byte: u8) { + self.ptr.delete_child(key_byte); + } + + pub(crate) fn find_remaining_child(&self) -> (u8, NodeRef<'e, V>) { + assert_eq!(self.num_children(), 1); + let child_or_value = self.ptr.find_next_child(0); + + match child_or_value { + None => panic!("could not find only child in node"), + Some((k, child_ptr)) => ( + k, + NodeRef { + ptr: child_ptr, + phantom: self.phantom, + }, + ), + } + } +} + +impl<'e, V> Drop for WriteLockedNodeRef<'e, V> { + fn drop(&mut self) { + if !self.ptr.is_null() { + self.ptr.lockword().write_unlock(); + } + } +} + +pub(crate) struct NewNodeRef<'a, V, A> +where + V: Value, + A: ArtAllocator, +{ + ptr: NodePtr, + allocator: &'a A, + + extra_nodes: Vec>, +} + +impl<'a, V, A> NewNodeRef<'a, V, A> +where + V: Value, + A: ArtAllocator, +{ + pub(crate) fn insert_old_child(&mut self, key_byte: u8, child: &WriteLockedNodeRef) { + self.ptr.insert_child(key_byte, child.as_ptr()) + } + + pub(crate) fn into_ptr(mut self) -> NodePtr { + let ptr = self.ptr; + self.ptr = NodePtr::null(); + ptr + } + + pub(crate) fn insert_new_child(&mut self, key_byte: u8, child: NewNodeRef<'a, V, A>) { + let child_ptr = child.into_ptr(); + self.ptr.insert_child(key_byte, child_ptr); + self.extra_nodes.push(child_ptr); + } +} + +impl<'a, V, A> Drop for NewNodeRef<'a, V, A> +where + V: Value, + A: ArtAllocator, +{ + /// This drop implementation deallocates the newly allocated node, if into_ptr() was not called. + fn drop(&mut self) { + if !self.ptr.is_null() { + self.ptr.deallocate(self.allocator); + for p in self.extra_nodes.iter() { + p.deallocate(self.allocator); + } + } + } +} + +pub(crate) fn new_internal<'a, V, A>( + prefix: &[u8], + allocator: &'a A, +) -> Result, OutOfMemoryError> +where + V: Value, + A: ArtAllocator, +{ + Ok(NewNodeRef { + ptr: node_ptr::new_internal(prefix, allocator)?, + allocator, + extra_nodes: Vec::new(), + }) +} + +pub(crate) fn new_leaf<'a, V, A>( + prefix: &[u8], + value: V, + allocator: &'a A, +) -> Result, OutOfMemoryError> +where + V: Value, + A: ArtAllocator, +{ + Ok(NewNodeRef { + ptr: node_ptr::new_leaf(prefix, value, allocator)?, + allocator, + extra_nodes: Vec::new(), + }) +} diff --git a/libs/neonart/src/allocator.rs b/libs/neonart/src/allocator.rs new file mode 100644 index 0000000000..f95e251458 --- /dev/null +++ b/libs/neonart/src/allocator.rs @@ -0,0 +1,156 @@ +pub mod block; +mod multislab; +mod slab; +pub mod r#static; + +use std::alloc::Layout; +use std::marker::PhantomData; +use std::mem::MaybeUninit; +use std::sync::atomic::Ordering; + +use crate::allocator::multislab::MultiSlabAllocator; +use crate::allocator::r#static::alloc_from_slice; + +use spin; + +use crate::Tree; +pub use crate::algorithm::node_ptr::{ + NodeInternal4, NodeInternal16, NodeInternal48, NodeInternal256, NodeLeaf, +}; + +#[derive(Debug)] +pub struct OutOfMemoryError(); + +pub trait ArtAllocator { + fn alloc_tree(&self) -> *mut Tree; + + fn alloc_node_internal4(&self) -> *mut NodeInternal4; + fn alloc_node_internal16(&self) -> *mut NodeInternal16; + fn alloc_node_internal48(&self) -> *mut NodeInternal48; + fn alloc_node_internal256(&self) -> *mut NodeInternal256; + fn alloc_node_leaf(&self) -> *mut NodeLeaf; + + fn dealloc_node_internal4(&self, ptr: *mut NodeInternal4); + fn dealloc_node_internal16(&self, ptr: *mut NodeInternal16); + fn dealloc_node_internal48(&self, ptr: *mut NodeInternal48); + fn dealloc_node_internal256(&self, ptr: *mut NodeInternal256); + fn dealloc_node_leaf(&self, ptr: *mut NodeLeaf); +} + +pub struct ArtMultiSlabAllocator<'t, V> +where + V: crate::Value, +{ + tree_area: spin::Mutex>>>, + + pub(crate) inner: MultiSlabAllocator<'t, 5>, + + phantom_val: PhantomData, +} + +impl<'t, V: crate::Value> ArtMultiSlabAllocator<'t, V> { + const LAYOUTS: [Layout; 5] = [ + Layout::new::>(), + Layout::new::>(), + Layout::new::>(), + Layout::new::>(), + Layout::new::>(), + ]; + + pub fn new(area: &'t mut [MaybeUninit]) -> &'t mut ArtMultiSlabAllocator<'t, V> { + let (allocator_area, remain) = alloc_from_slice::>(area); + let (tree_area, remain) = alloc_from_slice::>(remain); + + allocator_area.write(ArtMultiSlabAllocator { + tree_area: spin::Mutex::new(Some(tree_area)), + inner: MultiSlabAllocator::new(remain, &Self::LAYOUTS), + phantom_val: PhantomData, + }) + } +} + +impl<'t, V: crate::Value> ArtAllocator for ArtMultiSlabAllocator<'t, V> { + fn alloc_tree(&self) -> *mut Tree { + let mut t = self.tree_area.lock(); + if let Some(tree_area) = t.take() { + return tree_area.as_mut_ptr().cast(); + } + panic!("cannot allocate more than one tree"); + } + + fn alloc_node_internal4(&self) -> *mut NodeInternal4 { + self.inner.alloc_slab(0).cast() + } + fn alloc_node_internal16(&self) -> *mut NodeInternal16 { + self.inner.alloc_slab(1).cast() + } + fn alloc_node_internal48(&self) -> *mut NodeInternal48 { + self.inner.alloc_slab(2).cast() + } + fn alloc_node_internal256(&self) -> *mut NodeInternal256 { + self.inner.alloc_slab(3).cast() + } + fn alloc_node_leaf(&self) -> *mut NodeLeaf { + self.inner.alloc_slab(4).cast() + } + + fn dealloc_node_internal4(&self, ptr: *mut NodeInternal4) { + self.inner.dealloc_slab(0, ptr.cast()) + } + + fn dealloc_node_internal16(&self, ptr: *mut NodeInternal16) { + self.inner.dealloc_slab(1, ptr.cast()) + } + fn dealloc_node_internal48(&self, ptr: *mut NodeInternal48) { + self.inner.dealloc_slab(2, ptr.cast()) + } + fn dealloc_node_internal256(&self, ptr: *mut NodeInternal256) { + self.inner.dealloc_slab(3, ptr.cast()) + } + fn dealloc_node_leaf(&self, ptr: *mut NodeLeaf) { + self.inner.dealloc_slab(4, ptr.cast()) + } +} + +impl<'t, V: crate::Value> ArtMultiSlabAllocator<'t, V> { + pub(crate) fn get_statistics(&self) -> ArtMultiSlabStats { + ArtMultiSlabStats { + num_internal4: self.inner.slab_descs[0] + .num_allocated + .load(Ordering::Relaxed), + num_internal16: self.inner.slab_descs[1] + .num_allocated + .load(Ordering::Relaxed), + num_internal48: self.inner.slab_descs[2] + .num_allocated + .load(Ordering::Relaxed), + num_internal256: self.inner.slab_descs[3] + .num_allocated + .load(Ordering::Relaxed), + num_leaf: self.inner.slab_descs[4] + .num_allocated + .load(Ordering::Relaxed), + + num_blocks_internal4: self.inner.slab_descs[0].num_blocks.load(Ordering::Relaxed), + num_blocks_internal16: self.inner.slab_descs[1].num_blocks.load(Ordering::Relaxed), + num_blocks_internal48: self.inner.slab_descs[2].num_blocks.load(Ordering::Relaxed), + num_blocks_internal256: self.inner.slab_descs[3].num_blocks.load(Ordering::Relaxed), + num_blocks_leaf: self.inner.slab_descs[4].num_blocks.load(Ordering::Relaxed), + } + } +} + +#[derive(Clone, Debug)] +pub struct ArtMultiSlabStats { + pub num_internal4: u64, + pub num_internal16: u64, + pub num_internal48: u64, + pub num_internal256: u64, + pub num_leaf: u64, + + pub num_blocks_internal4: u64, + pub num_blocks_internal16: u64, + pub num_blocks_internal48: u64, + pub num_blocks_internal256: u64, + pub num_blocks_leaf: u64, +} diff --git a/libs/neonart/src/allocator/block.rs b/libs/neonart/src/allocator/block.rs new file mode 100644 index 0000000000..9c1bb6e176 --- /dev/null +++ b/libs/neonart/src/allocator/block.rs @@ -0,0 +1,191 @@ +//! Simple allocator of fixed-size blocks + +use std::mem::MaybeUninit; +use std::sync::atomic::{AtomicU64, Ordering}; + +use spin; + +pub const BLOCK_SIZE: usize = 16 * 1024; + +const INVALID_BLOCK: u64 = u64::MAX; + +pub(crate) struct BlockAllocator<'t> { + blocks_ptr: &'t [MaybeUninit], + num_blocks: u64, + num_initialized: AtomicU64, + + freelist_head: spin::Mutex, +} + +struct FreeListBlock { + inner: spin::Mutex, +} + +struct FreeListBlockInner { + next: u64, + + num_free_blocks: u64, + free_blocks: [u64; 100], // FIXME: fill the rest of the block +} + +impl<'t> BlockAllocator<'t> { + pub(crate) fn new(area: &'t mut [MaybeUninit]) -> Self { + // Use all the space for the blocks + let padding = area.as_ptr().align_offset(BLOCK_SIZE); + let remain = &mut area[padding..]; + + let num_blocks = (remain.len() / BLOCK_SIZE) as u64; + + BlockAllocator { + blocks_ptr: remain, + num_blocks, + num_initialized: AtomicU64::new(0), + freelist_head: spin::Mutex::new(INVALID_BLOCK), + } + } + + /// safety: you must hold a lock on the pointer to this block, otherwise it might get + /// reused for another kind of block + fn read_freelist_block(&self, blkno: u64) -> &FreeListBlock { + let ptr: *const FreeListBlock = self.get_block_ptr(blkno).cast(); + unsafe { ptr.as_ref().unwrap() } + } + + fn get_block_ptr(&self, blkno: u64) -> *mut u8 { + assert!(blkno < self.num_blocks); + unsafe { + self.blocks_ptr + .as_ptr() + .byte_offset(blkno as isize * BLOCK_SIZE as isize) + } + .cast_mut() + .cast() + } + + #[allow(clippy::mut_from_ref)] + pub(crate) fn alloc_block(&self) -> &mut [MaybeUninit] { + // FIXME: handle OOM + let blkno = self.alloc_block_internal(); + if blkno == INVALID_BLOCK { + panic!("out of memory"); + } + + let ptr: *mut MaybeUninit = self.get_block_ptr(blkno).cast(); + unsafe { std::slice::from_raw_parts_mut(ptr, BLOCK_SIZE) } + } + + fn alloc_block_internal(&self) -> u64 { + // check the free list. + { + let mut freelist_head = self.freelist_head.lock(); + if *freelist_head != INVALID_BLOCK { + let freelist_block = self.read_freelist_block(*freelist_head); + + // acquire lock on the freelist block before releasing the lock on the parent (i.e. lock coupling) + let mut g = freelist_block.inner.lock(); + + if g.num_free_blocks > 0 { + g.num_free_blocks -= 1; + let result = g.free_blocks[g.num_free_blocks as usize]; + return result; + } else { + // consume the freelist block itself + let result = *freelist_head; + *freelist_head = g.next; + // This freelist block is now unlinked and can be repurposed + drop(g); + return result; + } + } + } + + // If there are some blocks left that we've never used, pick next such block + let mut next_uninitialized = self.num_initialized.load(Ordering::Relaxed); + while next_uninitialized < self.num_blocks { + match self.num_initialized.compare_exchange( + next_uninitialized, + next_uninitialized + 1, + Ordering::Relaxed, + Ordering::Relaxed, + ) { + Ok(_) => { + return next_uninitialized; + } + Err(old) => { + next_uninitialized = old; + continue; + } + } + } + + // out of blocks + INVALID_BLOCK + } + + // TODO: this is currently unused. The slab allocator never releases blocks + #[allow(dead_code)] + pub(crate) fn release_block(&self, block_ptr: *mut u8) { + let blockno = unsafe { block_ptr.byte_offset_from(self.blocks_ptr) / BLOCK_SIZE as isize }; + self.release_block_internal(blockno as u64); + } + + fn release_block_internal(&self, blockno: u64) { + let mut freelist_head = self.freelist_head.lock(); + if *freelist_head != INVALID_BLOCK { + let freelist_block = self.read_freelist_block(*freelist_head); + + // acquire lock on the freelist block before releasing the lock on the parent (i.e. lock coupling) + let mut g = freelist_block.inner.lock(); + + let num_free_blocks = g.num_free_blocks; + if num_free_blocks < g.free_blocks.len() as u64 { + g.free_blocks[num_free_blocks as usize] = blockno; + g.num_free_blocks += 1; + return; + } + } + + // Convert the block into a new freelist block + let block_ptr: *mut FreeListBlock = self.get_block_ptr(blockno).cast(); + let init = FreeListBlock { + inner: spin::Mutex::new(FreeListBlockInner { + next: *freelist_head, + num_free_blocks: 0, + free_blocks: [INVALID_BLOCK; 100], + }), + }; + unsafe { (*block_ptr) = init }; + *freelist_head = blockno; + } + + // for debugging + pub(crate) fn get_statistics(&self) -> BlockAllocatorStats { + let mut num_free_blocks = 0; + + let mut _prev_lock = None; + let head_lock = self.freelist_head.lock(); + let mut next_blk = *head_lock; + let mut _head_lock = Some(head_lock); + while next_blk != INVALID_BLOCK { + let freelist_block = self.read_freelist_block(next_blk); + let lock = freelist_block.inner.lock(); + num_free_blocks += lock.num_free_blocks; + next_blk = lock.next; + _prev_lock = Some(lock); // hold the lock until we've read the next block + _head_lock = None; + } + + BlockAllocatorStats { + num_blocks: self.num_blocks, + num_initialized: self.num_initialized.load(Ordering::Relaxed), + num_free_blocks, + } + } +} + +#[derive(Clone, Debug)] +pub struct BlockAllocatorStats { + pub num_blocks: u64, + pub num_initialized: u64, + pub num_free_blocks: u64, +} diff --git a/libs/neonart/src/allocator/multislab.rs b/libs/neonart/src/allocator/multislab.rs new file mode 100644 index 0000000000..a75b411ec2 --- /dev/null +++ b/libs/neonart/src/allocator/multislab.rs @@ -0,0 +1,33 @@ +use std::alloc::Layout; +use std::mem::MaybeUninit; + +use crate::allocator::block::BlockAllocator; +use crate::allocator::slab::SlabDesc; + +pub struct MultiSlabAllocator<'t, const N: usize> { + pub(crate) block_allocator: BlockAllocator<'t>, + + pub(crate) slab_descs: [SlabDesc; N], +} + +impl<'t, const N: usize> MultiSlabAllocator<'t, N> { + pub(crate) fn new( + area: &'t mut [MaybeUninit], + layouts: &[Layout; N], + ) -> MultiSlabAllocator<'t, N> { + let block_allocator = BlockAllocator::new(area); + MultiSlabAllocator { + block_allocator, + + slab_descs: std::array::from_fn(|i| SlabDesc::new(&layouts[i])), + } + } + + pub(crate) fn alloc_slab(&self, slab_idx: usize) -> *mut u8 { + self.slab_descs[slab_idx].alloc_chunk(&self.block_allocator) + } + + pub(crate) fn dealloc_slab(&self, slab_idx: usize, ptr: *mut u8) { + self.slab_descs[slab_idx].dealloc_chunk(ptr, &self.block_allocator) + } +} diff --git a/libs/neonart/src/allocator/slab.rs b/libs/neonart/src/allocator/slab.rs new file mode 100644 index 0000000000..5471d24118 --- /dev/null +++ b/libs/neonart/src/allocator/slab.rs @@ -0,0 +1,433 @@ +//! A slab allocator that carves out fixed-size chunks from larger blocks. +//! +//! + +use std::alloc::Layout; +use std::mem::MaybeUninit; +use std::ops::Deref; +use std::sync::atomic::{AtomicU32, AtomicU64, Ordering}; + +use spin; + +use super::alloc_from_slice; +use super::block::BlockAllocator; + +use crate::allocator::block::BLOCK_SIZE; + +pub(crate) struct SlabDesc { + pub(crate) layout: Layout, + + block_lists: spin::RwLock, + + pub(crate) num_blocks: AtomicU64, + pub(crate) num_allocated: AtomicU64, +} + +// FIXME: Not sure if SlabDesc is really Sync or Send. It probably is when it's empty, but +// 'block_lists' contains pointers when it's not empty. In the current use as part of the +// the art tree, SlabDescs are only moved during initialization. +unsafe impl Sync for SlabDesc {} +unsafe impl Send for SlabDesc {} + +#[derive(Default, Debug)] +struct BlockLists { + full_blocks: BlockList, + nonfull_blocks: BlockList, +} + +impl BlockLists { + // Unlink a node. It must be in either one of the two lists. + unsafe fn unlink(&mut self, elem: *mut SlabBlockHeader) { + let list = unsafe { + if (*elem).next.is_null() { + if self.full_blocks.tail == elem { + Some(&mut self.full_blocks) + } else { + Some(&mut self.nonfull_blocks) + } + } else if (*elem).prev.is_null() { + if self.full_blocks.head == elem { + Some(&mut self.full_blocks) + } else { + Some(&mut self.nonfull_blocks) + } + } else { + None + } + }; + unsafe { unlink_slab_block(list, elem) }; + } +} + +unsafe fn unlink_slab_block(mut list: Option<&mut BlockList>, elem: *mut SlabBlockHeader) { + unsafe { + if (*elem).next.is_null() { + assert_eq!(list.as_ref().unwrap().tail, elem); + list.as_mut().unwrap().tail = (*elem).prev; + } else { + assert_eq!((*(*elem).next).prev, elem); + (*(*elem).next).prev = (*elem).prev; + } + if (*elem).prev.is_null() { + assert_eq!(list.as_ref().unwrap().head, elem); + list.as_mut().unwrap().head = (*elem).next; + } else { + assert_eq!((*(*elem).prev).next, elem); + (*(*elem).prev).next = (*elem).next; + } + } +} + +#[derive(Debug)] +struct BlockList { + head: *mut SlabBlockHeader, + tail: *mut SlabBlockHeader, +} + +impl Default for BlockList { + fn default() -> Self { + BlockList { + head: std::ptr::null_mut(), + tail: std::ptr::null_mut(), + } + } +} + +impl BlockList { + unsafe fn push_head(&mut self, elem: *mut SlabBlockHeader) { + unsafe { + if self.is_empty() { + self.tail = elem; + (*elem).next = std::ptr::null_mut(); + } else { + (*elem).next = self.head; + (*self.head).prev = elem; + } + (*elem).prev = std::ptr::null_mut(); + self.head = elem; + } + } + + fn is_empty(&self) -> bool { + self.head.is_null() + } + + unsafe fn unlink(&mut self, elem: *mut SlabBlockHeader) { + unsafe { unlink_slab_block(Some(self), elem) } + } + + #[cfg(test)] + fn dump(&self) { + let mut next = self.head; + + while !next.is_null() { + let n = unsafe { next.as_ref() }.unwrap(); + eprintln!( + " blk {:?} (free {}/{})", + next, + n.num_free_chunks.load(Ordering::Relaxed), + n.num_chunks + ); + next = n.next; + } + } +} + +impl SlabDesc { + pub(crate) fn new(layout: &Layout) -> SlabDesc { + SlabDesc { + layout: *layout, + block_lists: spin::RwLock::new(BlockLists::default()), + num_allocated: AtomicU64::new(0), + num_blocks: AtomicU64::new(0), + } + } +} + +#[derive(Debug)] +struct SlabBlockHeader { + free_chunks_head: spin::Mutex<*mut FreeChunk>, + num_free_chunks: AtomicU32, + num_chunks: u32, // this is really a constant for a given Layout + + // these fields are protected by the lock on the BlockLists + prev: *mut SlabBlockHeader, + next: *mut SlabBlockHeader, +} + +struct FreeChunk { + next: *mut FreeChunk, +} + +enum ReadOrWriteGuard<'a, T> { + Read(spin::RwLockReadGuard<'a, T>), + Write(spin::RwLockWriteGuard<'a, T>), +} + +impl<'a, T> Deref for ReadOrWriteGuard<'a, T> { + type Target = T; + + fn deref(&self) -> &::Target { + match self { + ReadOrWriteGuard::Read(g) => g.deref(), + ReadOrWriteGuard::Write(g) => g.deref(), + } + } +} + +impl SlabDesc { + pub fn alloc_chunk(&self, block_allocator: &BlockAllocator) -> *mut u8 { + // Are there any free chunks? + let mut acquire_write = false; + 'outer: loop { + let mut block_lists_guard = if acquire_write { + ReadOrWriteGuard::Write(self.block_lists.write()) + } else { + ReadOrWriteGuard::Read(self.block_lists.read()) + }; + 'inner: loop { + let block_ptr = block_lists_guard.nonfull_blocks.head; + if block_ptr.is_null() { + break 'outer; + } + unsafe { + let mut free_chunks_head = (*block_ptr).free_chunks_head.lock(); + if !(*free_chunks_head).is_null() { + let result = *free_chunks_head; + (*free_chunks_head) = (*result).next; + let _old = (*block_ptr).num_free_chunks.fetch_sub(1, Ordering::Relaxed); + + self.num_allocated.fetch_add(1, Ordering::Relaxed); + return result.cast(); + } + } + + // The block at the head of the list was full. Grab write lock and retry + match block_lists_guard { + ReadOrWriteGuard::Read(_) => { + acquire_write = true; + continue 'outer; + } + ReadOrWriteGuard::Write(ref mut g) => { + // move the node to the list of full blocks + unsafe { + g.nonfull_blocks.unlink(block_ptr); + g.full_blocks.push_head(block_ptr); + }; + continue 'inner; + } + } + } + } + + // no free chunks. Allocate a new block (and the chunk from that) + let (new_block, new_chunk) = self.alloc_block_and_chunk(block_allocator); + self.num_blocks.fetch_add(1, Ordering::Relaxed); + + // Add the block to the list in the SlabDesc + unsafe { + let mut block_lists_guard = self.block_lists.write(); + block_lists_guard.nonfull_blocks.push_head(new_block); + } + self.num_allocated.fetch_add(1, Ordering::Relaxed); + new_chunk + } + + pub fn dealloc_chunk(&self, chunk_ptr: *mut u8, _block_allocator: &BlockAllocator) { + // Find the block it belongs to. You can find the block from the address. (And knowing the + // layout, you could calculate the chunk number too.) + let block_ptr: *mut SlabBlockHeader = { + let block_addr = (chunk_ptr.addr() / BLOCK_SIZE) * BLOCK_SIZE; + chunk_ptr.with_addr(block_addr).cast() + }; + let chunk_ptr: *mut FreeChunk = chunk_ptr.cast(); + + // Mark the chunk as free in 'freechunks' list + let num_chunks; + let num_free_chunks; + unsafe { + let mut free_chunks_head = (*block_ptr).free_chunks_head.lock(); + (*chunk_ptr).next = *free_chunks_head; + *free_chunks_head = chunk_ptr; + + num_free_chunks = (*block_ptr).num_free_chunks.fetch_add(1, Ordering::Relaxed) + 1; + num_chunks = (*block_ptr).num_chunks; + } + + if num_free_chunks == 1 { + // If the block was full previously, add it to the nonfull blocks list. Note that + // we're not holding the lock anymore, so it can immediately become full again. + // That's harmless, it will be moved back to the full list again when a call + // to alloc_chunk() sees it. + let mut block_lists = self.block_lists.write(); + unsafe { + block_lists.unlink(block_ptr); + block_lists.nonfull_blocks.push_head(block_ptr); + }; + } else if num_free_chunks == num_chunks { + // If the block became completely empty, move it to the free list + // TODO + // FIXME: we're still holding the spinlock. It's not exactly safe to return it to + // the free blocks list, is it? Defer it as garbage to wait out concurrent updates? + //block_allocator.release_block() + } + + // update stats + self.num_allocated.fetch_sub(1, Ordering::Relaxed); + } + + fn alloc_block_and_chunk( + &self, + block_allocator: &BlockAllocator, + ) -> (*mut SlabBlockHeader, *mut u8) { + // fixme: handle OOM + let block_slice: &mut [MaybeUninit] = block_allocator.alloc_block(); + let (block_header, remain) = alloc_from_slice::(block_slice); + + let padding = remain.as_ptr().align_offset(self.layout.align()); + + let num_chunks = (remain.len() - padding) / self.layout.size(); + + let first_chunk_ptr: *mut FreeChunk = remain[padding..].as_mut_ptr().cast(); + + unsafe { + let mut chunk_ptr = first_chunk_ptr; + for _ in 0..num_chunks - 1 { + let next_chunk_ptr = chunk_ptr.byte_add(self.layout.size()); + (*chunk_ptr).next = next_chunk_ptr; + chunk_ptr = next_chunk_ptr; + } + (*chunk_ptr).next = std::ptr::null_mut(); + + let result_chunk = first_chunk_ptr; + + let block_header = block_header.write(SlabBlockHeader { + free_chunks_head: spin::Mutex::new((*first_chunk_ptr).next), + prev: std::ptr::null_mut(), + next: std::ptr::null_mut(), + num_chunks: num_chunks as u32, + num_free_chunks: AtomicU32::new(num_chunks as u32 - 1), + }); + + (block_header, result_chunk.cast()) + } + } + + #[cfg(test)] + fn dump(&self) { + eprintln!( + "slab dump ({} blocks, {} allocated chunks)", + self.num_blocks.load(Ordering::Relaxed), + self.num_allocated.load(Ordering::Relaxed) + ); + let lists = self.block_lists.read(); + + eprintln!("nonfull blocks:"); + lists.nonfull_blocks.dump(); + eprintln!("full blocks:"); + lists.full_blocks.dump(); + } +} + +#[cfg(test)] +mod tests { + use super::*; + + use rand::Rng; + use rand_distr::Zipf; + + struct TestObject { + val: usize, + _dummy: [u8; BLOCK_SIZE / 4], + } + + struct TestObjectSlab<'a>(SlabDesc, BlockAllocator<'a>); + impl<'a> TestObjectSlab<'a> { + fn new(block_allocator: BlockAllocator) -> TestObjectSlab { + TestObjectSlab(SlabDesc::new(&Layout::new::()), block_allocator) + } + + fn alloc(&self, val: usize) -> *mut TestObject { + let obj: *mut TestObject = self.0.alloc_chunk(&self.1).cast(); + unsafe { (*obj).val = val }; + obj + } + + fn dealloc(&self, obj: *mut TestObject) { + self.0.dealloc_chunk(obj.cast(), &self.1) + } + } + + #[test] + fn test_slab_alloc() { + const MEM_SIZE: usize = 100000000; + let mut area = Box::new_uninit_slice(MEM_SIZE); + let block_allocator = BlockAllocator::new(&mut area); + + let slab = TestObjectSlab::new(block_allocator); + + let mut all: Vec<*mut TestObject> = Vec::new(); + for i in 0..11 { + all.push(slab.alloc(i)); + } + #[allow(clippy::needless_range_loop)] + for i in 0..11 { + assert!(unsafe { (*all[i]).val == i }); + } + + let distribution = Zipf::new(10.0, 1.1).unwrap(); + let mut rng = rand::rng(); + for _ in 0..100000 { + slab.0.dump(); + let idx = rng.sample(distribution) as usize; + let ptr: *mut TestObject = all[idx]; + if !ptr.is_null() { + assert_eq!(unsafe { (*ptr).val }, idx); + slab.dealloc(ptr); + all[idx] = std::ptr::null_mut(); + } else { + all[idx] = slab.alloc(idx); + } + } + } + + fn new_test_blk(i: u32) -> *mut SlabBlockHeader { + Box::into_raw(Box::new(SlabBlockHeader { + free_chunks_head: spin::Mutex::new(std::ptr::null_mut()), + num_free_chunks: AtomicU32::new(0), + num_chunks: i, + prev: std::ptr::null_mut(), + next: std::ptr::null_mut(), + })) + } + + #[test] + fn test_block_linked_list() { + // note: these are leaked, but that's OK for tests + let a = new_test_blk(0); + let b = new_test_blk(1); + + let mut list = BlockList::default(); + assert!(list.is_empty()); + + unsafe { + list.push_head(a); + assert!(!list.is_empty()); + list.unlink(a); + } + assert!(list.is_empty()); + + unsafe { + list.push_head(b); + list.push_head(a); + assert_eq!(list.head, a); + assert_eq!((*a).next, b); + assert_eq!((*b).prev, a); + assert_eq!(list.tail, b); + + list.unlink(a); + list.unlink(b); + assert!(list.is_empty()); + } + } +} diff --git a/libs/neonart/src/allocator/static.rs b/libs/neonart/src/allocator/static.rs new file mode 100644 index 0000000000..ab1683c411 --- /dev/null +++ b/libs/neonart/src/allocator/static.rs @@ -0,0 +1,44 @@ +use std::mem::MaybeUninit; + +pub fn alloc_from_slice( + area: &mut [MaybeUninit], +) -> (&mut MaybeUninit, &mut [MaybeUninit]) { + let layout = std::alloc::Layout::new::(); + + let area_start = area.as_mut_ptr(); + + // pad to satisfy alignment requirements + let padding = area_start.align_offset(layout.align()); + if padding + layout.size() > area.len() { + panic!("out of memory"); + } + let area = &mut area[padding..]; + let (result_area, remain) = area.split_at_mut(layout.size()); + + let result_ptr: *mut MaybeUninit = result_area.as_mut_ptr().cast(); + let result = unsafe { result_ptr.as_mut().unwrap() }; + + (result, remain) +} + +pub fn alloc_array_from_slice( + area: &mut [MaybeUninit], + len: usize, +) -> (&mut [MaybeUninit], &mut [MaybeUninit]) { + let layout = std::alloc::Layout::new::(); + + let area_start = area.as_mut_ptr(); + + // pad to satisfy alignment requirements + let padding = area_start.align_offset(layout.align()); + if padding + layout.size() * len > area.len() { + panic!("out of memory"); + } + let area = &mut area[padding..]; + let (result_area, remain) = area.split_at_mut(layout.size() * len); + + let result_ptr: *mut MaybeUninit = result_area.as_mut_ptr().cast(); + let result = unsafe { std::slice::from_raw_parts_mut(result_ptr.as_mut().unwrap(), len) }; + + (result, remain) +} diff --git a/libs/neonart/src/epoch.rs b/libs/neonart/src/epoch.rs new file mode 100644 index 0000000000..a1a112bd17 --- /dev/null +++ b/libs/neonart/src/epoch.rs @@ -0,0 +1,142 @@ +//! This is similar to crossbeam_epoch crate, but works in shared memory + +use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering}; + +use crossbeam_utils::CachePadded; + +const NUM_SLOTS: usize = 1000; + +/// This is the struct that is stored in shmem +/// +/// bit 0: is it pinned or not? +/// rest of the bits are the epoch counter. +pub struct EpochShared { + global_epoch: AtomicU64, + participants: [CachePadded; NUM_SLOTS], + + broadcast_lock: spin::Mutex<()>, +} + +impl EpochShared { + pub fn new() -> EpochShared { + EpochShared { + global_epoch: AtomicU64::new(2), + participants: [const { CachePadded::new(AtomicU64::new(2)) }; NUM_SLOTS], + broadcast_lock: spin::Mutex::new(()), + } + } + + pub fn register(&self) -> LocalHandle { + LocalHandle { + global: self, + last_slot: AtomicUsize::new(0), // todo: choose more intelligently + } + } + + fn release_pin(&self, slot: usize, _epoch: u64) { + let global_epoch = self.global_epoch.load(Ordering::Relaxed); + self.participants[slot].store(global_epoch, Ordering::Relaxed); + } + + fn pin_internal(&self, slot_hint: usize) -> (usize, u64) { + // pick a slot + let mut slot = slot_hint; + let epoch = loop { + let old = self.participants[slot].fetch_or(1, Ordering::Relaxed); + if old & 1 == 0 { + // Got this slot + break old; + } + + // the slot was busy by another thread / process. try a different slot + slot += 1; + if slot == NUM_SLOTS { + slot = 0; + } + continue; + }; + (slot, epoch) + } + + pub(crate) fn advance(&self) -> u64 { + // Advance the global epoch + let old_epoch = self.global_epoch.fetch_add(2, Ordering::Relaxed); + // Anyone that release their pin after this will update their slot. + old_epoch + 2 + } + + pub(crate) fn broadcast(&self) { + let Some(_guard) = self.broadcast_lock.try_lock() else { + return; + }; + + let epoch = self.global_epoch.load(Ordering::Relaxed); + let old_epoch = epoch.wrapping_sub(2); + + // Update all free slots. + for i in 0..NUM_SLOTS { + // TODO: check result, as a sanity check. It should either be the old epoch, or pinned + let _ = self.participants[i].compare_exchange( + old_epoch, + epoch, + Ordering::Relaxed, + Ordering::Relaxed, + ); + } + + // FIXME: memory fence here, since we used Relaxed? + } + + pub(crate) fn get_oldest(&self) -> u64 { + // Read all slots. + let now = self.global_epoch.load(Ordering::Relaxed); + let mut oldest = now; + for i in 0..NUM_SLOTS { + let this_epoch = self.participants[i].load(Ordering::Relaxed); + let delta = now.wrapping_sub(this_epoch); + if delta > u64::MAX / 2 { + // this is very recent + } else if delta > now.wrapping_sub(oldest) { + oldest = this_epoch; + } + } + oldest + } + + pub(crate) fn get_current(&self) -> u64 { + self.global_epoch.load(Ordering::Relaxed) + } +} + +pub(crate) struct EpochPin<'e> { + slot: usize, + pub(crate) epoch: u64, + + handle: &'e LocalHandle<'e>, +} + +impl<'e> Drop for EpochPin<'e> { + fn drop(&mut self) { + self.handle.global.release_pin(self.slot, self.epoch); + } +} + +pub struct LocalHandle<'g> { + global: &'g EpochShared, + + last_slot: AtomicUsize, +} + +impl<'g> LocalHandle<'g> { + pub fn pin(&self) -> EpochPin { + let (slot, epoch) = self + .global + .pin_internal(self.last_slot.load(Ordering::Relaxed)); + self.last_slot.store(slot, Ordering::Relaxed); + EpochPin { + handle: self, + epoch, + slot, + } + } +} diff --git a/libs/neonart/src/lib.rs b/libs/neonart/src/lib.rs new file mode 100644 index 0000000000..5c1c36a91e --- /dev/null +++ b/libs/neonart/src/lib.rs @@ -0,0 +1,583 @@ +//! Adaptive Radix Tree (ART) implementation, with Optimistic Lock Coupling. +//! +//! The data structure is described in these two papers: +//! +//! [1] Leis, V. & Kemper, Alfons & Neumann, Thomas. (2013). +//! The adaptive radix tree: ARTful indexing for main-memory databases. +//! Proceedings - International Conference on Data Engineering. 38-49. 10.1109/ICDE.2013.6544812. +//! https://db.in.tum.de/~leis/papers/ART.pdf +//! +//! [2] Leis, Viktor & Scheibner, Florian & Kemper, Alfons & Neumann, Thomas. (2016). +//! The ART of practical synchronization. +//! 1-8. 10.1145/2933349.2933352. +//! https://db.in.tum.de/~leis/papers/artsync.pdf +//! +//! [1] describes the base data structure, and [2] describes the Optimistic Lock Coupling that we +//! use. +//! +//! The papers mention a few different variants. We have made the following choices in this +//! implementation: +//! +//! - All keys have the same length +//! +//! - Single-value leaves. +//! +//! - For collapsing inner nodes, we use the Pessimistic approach, where each inner node stores a +//! variable length "prefix", which stores the keys of all the one-way nodes which have been +//! removed. However, similar to the "hybrid" approach described in the paper, each node only has +//! space for a constant-size prefix of 8 bytes. If a node would have a longer prefix, then we +//! create create one-way nodes to store them. (There was no particular reason for this choice, +//! the "hybrid" approach described in the paper might be better.) +//! +//! - For concurrency, we use Optimistic Lock Coupling. The paper [2] also describes another method, +//! ROWEX, which generally performs better when there is contention, but that is not important +//! for use and Optimisic Lock Coupling is simpler to implement. +//! +//! ## Requirements +//! +//! This data structure is currently used for the integrated LFC, relsize and last-written LSN cache +//! in the compute communicator, part of the 'neon' Postgres extension. We have some unique +//! requirements, which is why we had to write our own. Namely: +//! +//! - The data structure has to live in fixed-sized shared memory segment. That rules out any +//! built-in Rust collections and most crates. (Except possibly with the 'allocator_api' rust +//! feature, which still nightly-only experimental as of this writing). +//! +//! - The data structure is accessed from multiple processes. Only one process updates the data +//! structure, but other processes perform reads. That rules out using built-in Rust locking +//! primitives like Mutex and RwLock, and most crates too. +//! +//! - Within the one process with write-access, multiple threads can perform updates concurrently. +//! That rules out using PostgreSQL LWLocks for the locking. +//! +//! The implementation is generic, and doesn't depend on any PostgreSQL specifics, but it has been +//! written with that usage and the above constraints in mind. Some noteworthy assumptions: +//! +//! - Contention is assumed to be rare. In the integrated cache in PostgreSQL, there's higher level +//! locking in the PostgreSQL buffer manager, which ensures that two backends should not try to +//! read / write the same page at the same time. (Prefetching can conflict with actual reads, +//! however.) +//! +//! - The keys in the integrated cache are 17 bytes long. +//! +//! ## Usage +//! +//! Because this is designed to be used as a Postgres shared memory data structure, initialization +//! happens in three stages: +//! +//! 0. A fixed area of shared memory is allocated at postmaster startup. +//! +//! 1. TreeInitStruct::new() is called to initialize it, still in Postmaster process, before any +//! other process or thread is running. It returns a TreeInitStruct, which is inherited by all +//! the processes through fork(). +//! +//! 2. One process may have write-access to the struct, by calling +//! [TreeInitStruct::attach_writer]. (That process is the communicator process.) +//! +//! 3. Other processes get read-access to the struct, by calling [TreeInitStruct::attach_reader] +//! +//! "Write access" means that you can insert / update / delete values in the tree. +//! +//! NOTE: The Values stored in the tree are sometimes moved, when a leaf node fills up and a new +//! larger node needs to be allocated. The versioning and epoch-based allocator ensure that the data +//! structure stays consistent, but if the Value has interior mutability, like atomic fields, +//! updates to such fields might be lost if the leaf node is concurrently moved! If that becomes a +//! problem, the version check could be passed up to the caller, so that the caller could detect the +//! lost updates and retry the operation. +//! +//! ## Implementation +//! +//! node_ptr: Provides low-level implementations of the four different node types (eight actually, +//! since there is an Internal and Leaf variant of each) +//! +//! lock_and_version.rs: Provides an abstraction for the combined lock and version counter on each +//! node. +//! +//! node_ref.rs: The code in node_ptr.rs deals with raw pointers. node_ref.rs provides more type-safe +//! abstractions on top. +//! +//! algorithm.rs: Contains the functions to implement lookups and updates in the tree +//! +//! allocator.rs: Provides a facility to allocate memory for the tree nodes. (We must provide our +//! own abstraction for that because we need the data structure to live in a pre-allocated shared +//! memory segment). +//! +//! epoch.rs: The data structure requires that when a node is removed from the tree, it is not +//! immediately deallocated, but stays around for as long as concurrent readers might still have +//! pointers to them. This is enforced by an epoch system. This is similar to +//! e.g. crossbeam_epoch, but we couldn't use that either because it has to work across processes +//! communicating over the shared memory segment. +//! +//! ## See also +//! +//! There are some existing Rust ART implementations out there, but none of them filled all +//! the requirements: +//! +//! - https://github.com/XiangpengHao/congee +//! - https://github.com/declanvk/blart +//! +//! ## TODO +//! +//! - Removing values has not been implemented + +mod algorithm; +pub mod allocator; +mod epoch; + +use algorithm::RootPtr; +use algorithm::node_ptr::NodePtr; + +use std::collections::VecDeque; +use std::fmt::Debug; +use std::marker::PhantomData; +use std::ptr::NonNull; +use std::sync::atomic::{AtomicBool, Ordering}; + +use crate::epoch::EpochPin; + +#[cfg(test)] +mod tests; + +use allocator::ArtAllocator; +pub use allocator::ArtMultiSlabAllocator; +pub use allocator::OutOfMemoryError; + +/// Fixed-length key type. +/// +pub trait Key: Debug { + const KEY_LEN: usize; + + fn as_bytes(&self) -> &[u8]; +} + +/// Values stored in the tree +/// +/// Values need to be Cloneable, because when a node "grows", the value is copied to a new node and +/// the old sticks around until all readers that might see the old value are gone. +// fixme obsolete, no longer needs Clone +pub trait Value {} + +const MAX_GARBAGE: usize = 1024; + +/// The root of the tree, plus other tree-wide data. This is stored in the shared memory. +pub struct Tree { + /// For simplicity, so that we never need to grow or shrink the root, the root node is always an + /// Internal256 node. Also, it never has a prefix (that's actually a bit wasteful, incurring one + /// indirection to every lookup) + root: RootPtr, + + writer_attached: AtomicBool, + + epoch: epoch::EpochShared, +} + +unsafe impl Sync for Tree {} +unsafe impl Send for Tree {} + +struct GarbageQueue(VecDeque<(NodePtr, u64)>); + +unsafe impl Sync for GarbageQueue {} +unsafe impl Send for GarbageQueue {} + +impl GarbageQueue { + fn new() -> GarbageQueue { + GarbageQueue(VecDeque::with_capacity(MAX_GARBAGE)) + } + + fn remember_obsolete_node(&mut self, ptr: NodePtr, epoch: u64) { + self.0.push_front((ptr, epoch)); + } + + fn next_obsolete(&mut self, cutoff_epoch: u64) -> Option> { + if let Some(back) = self.0.back() { + if back.1 < cutoff_epoch { + return Some(self.0.pop_back().unwrap().0); + } + } + None + } +} + +/// Struct created at postmaster startup +pub struct TreeInitStruct<'t, K: Key, V: Value, A: ArtAllocator> { + tree: &'t Tree, + + allocator: &'t A, + + phantom_key: PhantomData, +} + +/// The worker process has a reference to this. The write operations are only safe +/// from the worker process +pub struct TreeWriteAccess<'t, K: Key, V: Value, A: ArtAllocator> +where + K: Key, + V: Value, +{ + tree: &'t Tree, + + pub allocator: &'t A, + + epoch_handle: epoch::LocalHandle<'t>, + + phantom_key: PhantomData, + + /// Obsolete nodes that cannot be recycled until their epoch expires. + garbage: spin::Mutex>, +} + +/// The backends have a reference to this. It cannot be used to modify the tree +pub struct TreeReadAccess<'t, K: Key, V: Value> +where + K: Key, + V: Value, +{ + tree: &'t Tree, + + epoch_handle: epoch::LocalHandle<'t>, + + phantom_key: PhantomData, +} + +impl<'t, K: Key, V: Value, A: ArtAllocator> TreeInitStruct<'t, K, V, A> { + pub fn new(allocator: &'t A) -> TreeInitStruct<'t, K, V, A> { + let tree_ptr = allocator.alloc_tree(); + let tree_ptr = NonNull::new(tree_ptr).expect("out of memory"); + let init = Tree { + root: algorithm::new_root(allocator).expect("out of memory"), + writer_attached: AtomicBool::new(false), + epoch: epoch::EpochShared::new(), + }; + unsafe { tree_ptr.write(init) }; + + TreeInitStruct { + tree: unsafe { tree_ptr.as_ref() }, + allocator, + phantom_key: PhantomData, + } + } + + pub fn attach_writer(self) -> TreeWriteAccess<'t, K, V, A> { + let previously_attached = self.tree.writer_attached.swap(true, Ordering::Relaxed); + if previously_attached { + panic!("writer already attached"); + } + TreeWriteAccess { + tree: self.tree, + allocator: self.allocator, + phantom_key: PhantomData, + epoch_handle: self.tree.epoch.register(), + garbage: spin::Mutex::new(GarbageQueue::new()), + } + } + + pub fn attach_reader(self) -> TreeReadAccess<'t, K, V> { + TreeReadAccess { + tree: self.tree, + phantom_key: PhantomData, + epoch_handle: self.tree.epoch.register(), + } + } +} + +impl<'t, K: Key, V: Value, A: ArtAllocator> TreeWriteAccess<'t, K, V, A> { + pub fn start_write<'g>(&'t self) -> TreeWriteGuard<'g, K, V, A> + where + 't: 'g, + { + TreeWriteGuard { + tree_writer: self, + epoch_pin: self.epoch_handle.pin(), + phantom_key: PhantomData, + created_garbage: false, + } + } + + pub fn start_read(&'t self) -> TreeReadGuard<'t, K, V> { + TreeReadGuard { + tree: self.tree, + epoch_pin: self.epoch_handle.pin(), + phantom_key: PhantomData, + } + } +} + +impl<'t, K: Key, V: Value> TreeReadAccess<'t, K, V> { + pub fn start_read(&'t self) -> TreeReadGuard<'t, K, V> { + TreeReadGuard { + tree: self.tree, + epoch_pin: self.epoch_handle.pin(), + phantom_key: PhantomData, + } + } +} + +pub struct TreeReadGuard<'e, K, V> +where + K: Key, + V: Value, +{ + tree: &'e Tree, + + epoch_pin: EpochPin<'e>, + phantom_key: PhantomData, +} + +impl<'e, K: Key, V: Value> TreeReadGuard<'e, K, V> { + pub fn get(&'e self, key: &K) -> Option<&'e V> { + algorithm::search(key, self.tree.root, &self.epoch_pin) + } +} + +pub struct TreeWriteGuard<'e, K, V, A> +where + K: Key, + V: Value, + A: ArtAllocator, +{ + tree_writer: &'e TreeWriteAccess<'e, K, V, A>, + + epoch_pin: EpochPin<'e>, + phantom_key: PhantomData, + + created_garbage: bool, +} + +pub enum UpdateAction { + Nothing, + Insert(V), + Remove, +} + +impl<'e, K: Key, V: Value, A: ArtAllocator> TreeWriteGuard<'e, K, V, A> { + /// Get a value + pub fn get(&'e mut self, key: &K) -> Option<&'e V> { + algorithm::search(key, self.tree_writer.tree.root, &self.epoch_pin) + } + + /// Insert a value + pub fn insert(self, key: &K, value: V) -> Result { + let mut success = None; + + self.update_with_fn(key, |existing| { + if existing.is_some() { + success = Some(false); + UpdateAction::Nothing + } else { + success = Some(true); + UpdateAction::Insert(value) + } + })?; + Ok(success.expect("value_fn not called")) + } + + /// Remove value. Returns true if it existed + pub fn remove(self, key: &K) -> bool { + let mut result = false; + // FIXME: It's not clear if OOM is expected while removing. It seems + // not nice, but shrinking a node can OOM. Then again, we could opt + // to not shrink a node if we cannot allocate, to live a little longer. + self.update_with_fn(key, |existing| match existing { + Some(_) => { + result = true; + UpdateAction::Remove + } + None => UpdateAction::Nothing, + }) + .expect("out of memory while removing"); + result + } + + /// Try to remove value and return the old value. + pub fn remove_and_return(self, key: &K) -> Option + where + V: Clone, + { + let mut old = None; + self.update_with_fn(key, |existing| { + old = existing.cloned(); + UpdateAction::Remove + }) + .expect("out of memory while removing"); + old + } + + /// Update key using the given function. All the other modifying operations are based on this. + /// + /// The function is passed a reference to the existing value, if any. If the function + /// returns None, the value is removed from the tree (or if there was no existing value, + /// does nothing). If the function returns Some, the existing value is replaced, of if there + /// was no existing value, it is inserted. FIXME: update comment + pub fn update_with_fn(mut self, key: &K, value_fn: F) -> Result<(), OutOfMemoryError> + where + F: FnOnce(Option<&V>) -> UpdateAction, + { + algorithm::update_fn(key, value_fn, self.tree_writer.tree.root, &mut self)?; + + if self.created_garbage { + let _ = self.collect_garbage(); + } + Ok(()) + } + + fn remember_obsolete_node(&mut self, ptr: NodePtr) { + self.tree_writer + .garbage + .lock() + .remember_obsolete_node(ptr, self.epoch_pin.epoch); + self.created_garbage = true; + } + + // returns number of nodes recycled + fn collect_garbage(&self) -> usize { + self.tree_writer.tree.epoch.advance(); + self.tree_writer.tree.epoch.broadcast(); + + let cutoff_epoch = self.tree_writer.tree.epoch.get_oldest(); + + let mut result = 0; + let mut garbage_queue = self.tree_writer.garbage.lock(); + while let Some(ptr) = garbage_queue.next_obsolete(cutoff_epoch) { + ptr.deallocate(self.tree_writer.allocator); + result += 1; + } + result + } +} + +pub struct TreeIterator +where + K: Key + for<'a> From<&'a [u8]>, +{ + done: bool, + pub next_key: Vec, + max_key: Option>, + + phantom_key: PhantomData, +} + +impl TreeIterator +where + K: Key + for<'a> From<&'a [u8]>, +{ + pub fn new_wrapping() -> TreeIterator { + TreeIterator { + done: false, + next_key: vec![0; K::KEY_LEN], + max_key: None, + phantom_key: PhantomData, + } + } + + pub fn new(range: &std::ops::Range) -> TreeIterator { + let result = TreeIterator { + done: false, + next_key: Vec::from(range.start.as_bytes()), + max_key: Some(Vec::from(range.end.as_bytes())), + phantom_key: PhantomData, + }; + assert_eq!(result.next_key.len(), K::KEY_LEN); + assert_eq!(result.max_key.as_ref().unwrap().len(), K::KEY_LEN); + + result + } + + pub fn next<'g, V>(&mut self, read_guard: &'g TreeReadGuard<'g, K, V>) -> Option<(K, &'g V)> + where + V: Value, + { + if self.done { + return None; + } + + let mut wrapped_around = false; + loop { + assert_eq!(self.next_key.len(), K::KEY_LEN); + if let Some((k, v)) = + algorithm::iter_next(&self.next_key, read_guard.tree.root, &read_guard.epoch_pin) + { + assert_eq!(k.len(), K::KEY_LEN); + assert_eq!(self.next_key.len(), K::KEY_LEN); + + // Check if we reached the end of the range + if let Some(max_key) = &self.max_key { + if k.as_slice() >= max_key.as_slice() { + self.done = true; + break None; + } + } + + // increment the key + self.next_key = k.clone(); + increment_key(self.next_key.as_mut_slice()); + let k = k.as_slice().into(); + + break Some((k, v)); + } else { + if self.max_key.is_some() { + self.done = true; + } else { + // Start from beginning + if !wrapped_around { + for i in 0..K::KEY_LEN { + self.next_key[i] = 0; + } + wrapped_around = true; + continue; + } else { + // The tree is completely empty + // FIXME: perhaps we should remember the starting point instead. + // Currently this will scan some ranges twice. + break None; + } + } + break None; + } + } + } +} + +fn increment_key(key: &mut [u8]) -> bool { + for i in (0..key.len()).rev() { + let (byte, overflow) = key[i].overflowing_add(1); + key[i] = byte; + if !overflow { + return false; + } + } + true +} + +// Debugging functions +impl<'e, K: Key, V: Value + Debug, A: ArtAllocator> TreeWriteGuard<'e, K, V, A> { + pub fn dump(&mut self, dst: &mut dyn std::io::Write) { + algorithm::dump_tree(self.tree_writer.tree.root, &self.epoch_pin, dst) + } +} +impl<'e, K: Key, V: Value + Debug> TreeReadGuard<'e, K, V> { + pub fn dump(&mut self, dst: &mut dyn std::io::Write) { + algorithm::dump_tree(self.tree.root, &self.epoch_pin, dst) + } +} +impl<'e, K: Key, V: Value> TreeWriteAccess<'e, K, V, ArtMultiSlabAllocator<'e, V>> { + pub fn get_statistics(&self) -> ArtTreeStatistics { + self.allocator.get_statistics(); + ArtTreeStatistics { + blocks: self.allocator.inner.block_allocator.get_statistics(), + slabs: self.allocator.get_statistics(), + epoch: self.tree.epoch.get_current(), + oldest_epoch: self.tree.epoch.get_oldest(), + num_garbage: self.garbage.lock().0.len() as u64, + } + } +} + +#[derive(Clone, Debug)] +pub struct ArtTreeStatistics { + pub blocks: allocator::block::BlockAllocatorStats, + pub slabs: allocator::ArtMultiSlabStats, + + pub epoch: u64, + pub oldest_epoch: u64, + pub num_garbage: u64, +} diff --git a/libs/neonart/src/tests.rs b/libs/neonart/src/tests.rs new file mode 100644 index 0000000000..2afe41517c --- /dev/null +++ b/libs/neonart/src/tests.rs @@ -0,0 +1,236 @@ +use std::collections::BTreeMap; +use std::collections::HashSet; +use std::fmt::{Debug, Formatter}; +use std::sync::atomic::{AtomicUsize, Ordering}; + +use crate::ArtAllocator; +use crate::ArtMultiSlabAllocator; +use crate::TreeInitStruct; +use crate::TreeIterator; +use crate::TreeWriteAccess; +use crate::UpdateAction; + +use crate::{Key, Value}; + +use rand::Rng; +use rand::seq::SliceRandom; +use rand_distr::Zipf; + +const TEST_KEY_LEN: usize = 16; + +#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)] +struct TestKey([u8; TEST_KEY_LEN]); + +impl TestKey { + const MIN: TestKey = TestKey([0; TEST_KEY_LEN]); + const MAX: TestKey = TestKey([u8::MAX; TEST_KEY_LEN]); +} + +impl Key for TestKey { + const KEY_LEN: usize = TEST_KEY_LEN; + fn as_bytes(&self) -> &[u8] { + &self.0 + } +} + +impl From<&TestKey> for u128 { + fn from(val: &TestKey) -> u128 { + u128::from_be_bytes(val.0) + } +} + +impl From for TestKey { + fn from(val: u128) -> TestKey { + TestKey(val.to_be_bytes()) + } +} + +impl<'a> From<&'a [u8]> for TestKey { + fn from(bytes: &'a [u8]) -> TestKey { + TestKey(bytes.try_into().unwrap()) + } +} + +impl Value for usize {} + +fn test_inserts + Copy>(keys: &[K]) { + const MEM_SIZE: usize = 10000000; + let mut area = Box::new_uninit_slice(MEM_SIZE); + + let allocator = ArtMultiSlabAllocator::new(&mut area); + + let init_struct = TreeInitStruct::::new(allocator); + let tree_writer = init_struct.attach_writer(); + + for (idx, k) in keys.iter().enumerate() { + let w = tree_writer.start_write(); + let res = w.insert(&(*k).into(), idx); + assert!(res.is_ok()); + } + + for (idx, k) in keys.iter().enumerate() { + let r = tree_writer.start_read(); + let value = r.get(&(*k).into()); + assert_eq!(value, Some(idx).as_ref()); + } + + eprintln!("stats: {:?}", tree_writer.get_statistics()); +} + +#[test] +fn dense() { + // This exercises splitting a node with prefix + let keys: &[u128] = &[0, 1, 2, 3, 256]; + test_inserts(keys); + + // Dense keys + let mut keys: Vec = (0..10000).collect(); + test_inserts(&keys); + + // Do the same in random orders + for _ in 1..10 { + keys.shuffle(&mut rand::rng()); + test_inserts(&keys); + } +} + +#[test] +fn sparse() { + // sparse keys + let mut keys: Vec = Vec::new(); + let mut used_keys = HashSet::new(); + for _ in 0..10000 { + loop { + let key = rand::random::(); + if used_keys.contains(&key) { + continue; + } + used_keys.insert(key); + keys.push(key.into()); + break; + } + } + test_inserts(&keys); +} + +struct TestValue(AtomicUsize); + +impl TestValue { + fn new(val: usize) -> TestValue { + TestValue(AtomicUsize::new(val)) + } + + fn load(&self) -> usize { + self.0.load(Ordering::Relaxed) + } +} + +impl Value for TestValue {} + +impl Clone for TestValue { + fn clone(&self) -> TestValue { + TestValue::new(self.load()) + } +} + +impl Debug for TestValue { + fn fmt(&self, fmt: &mut Formatter<'_>) -> Result<(), std::fmt::Error> { + write!(fmt, "{:?}", self.load()) + } +} + +#[derive(Clone, Debug)] +struct TestOp(TestKey, Option); + +fn apply_op>( + op: &TestOp, + tree: &TreeWriteAccess, + shadow: &mut BTreeMap, +) { + eprintln!("applying op: {op:?}"); + + // apply the change to the shadow tree first + let shadow_existing = if let Some(v) = op.1 { + shadow.insert(op.0, v) + } else { + shadow.remove(&op.0) + }; + + // apply to Art tree + let w = tree.start_write(); + w.update_with_fn(&op.0, |existing| { + assert_eq!(existing.map(TestValue::load), shadow_existing); + + match (existing, op.1) { + (None, None) => UpdateAction::Nothing, + (None, Some(new_val)) => UpdateAction::Insert(TestValue::new(new_val)), + (Some(_old_val), None) => UpdateAction::Remove, + (Some(old_val), Some(new_val)) => { + old_val.0.store(new_val, Ordering::Relaxed); + UpdateAction::Nothing + } + } + }) + .expect("out of memory"); +} + +fn test_iter>( + tree: &TreeWriteAccess, + shadow: &BTreeMap, +) { + let mut shadow_iter = shadow.iter(); + let mut iter = TreeIterator::new(&(TestKey::MIN..TestKey::MAX)); + + loop { + let shadow_item = shadow_iter.next().map(|(k, v)| (*k, *v)); + let r = tree.start_read(); + let item = iter.next(&r); + + if shadow_item != item.map(|(k, v)| (k, v.load())) { + eprintln!("FAIL: iterator returned {item:?}, expected {shadow_item:?}"); + tree.start_read().dump(&mut std::io::stderr()); + + eprintln!("SHADOW:"); + for si in shadow { + eprintln!("key: {:?}, val: {}", si.0, si.1); + } + panic!("FAIL: iterator returned {item:?}, expected {shadow_item:?}"); + } + if item.is_none() { + break; + } + } +} + +#[test] +fn random_ops() { + const MEM_SIZE: usize = 10000000; + let mut area = Box::new_uninit_slice(MEM_SIZE); + + let allocator = ArtMultiSlabAllocator::new(&mut area); + + let init_struct = TreeInitStruct::::new(allocator); + let tree_writer = init_struct.attach_writer(); + + let mut shadow: std::collections::BTreeMap = BTreeMap::new(); + + let distribution = Zipf::new(u128::MAX as f64, 1.1).unwrap(); + let mut rng = rand::rng(); + for i in 0..100000 { + let mut key: TestKey = (rng.sample(distribution) as u128).into(); + + if rng.random_bool(0.10) { + key = TestKey::from(u128::from(&key) | 0xffffffff); + } + + let op = TestOp(key, if rng.random_bool(0.75) { Some(i) } else { None }); + + apply_op(&op, &tree_writer, &mut shadow); + + if i % 1000 == 0 { + eprintln!("{i} ops processed"); + eprintln!("stats: {:?}", tree_writer.get_statistics()); + test_iter(&tree_writer, &shadow); + } + } +} diff --git a/libs/pageserver_api/Cargo.toml b/libs/pageserver_api/Cargo.toml index 25f29b8ecd..7accbdabca 100644 --- a/libs/pageserver_api/Cargo.toml +++ b/libs/pageserver_api/Cargo.toml @@ -17,7 +17,9 @@ anyhow.workspace = true bytes.workspace = true byteorder.workspace = true utils.workspace = true -postgres_ffi.workspace = true +postgres_ffi_types.workspace = true +postgres_versioninfo.workspace = true +posthog_client_lite.workspace = true enum-map.workspace = true strum.workspace = true strum_macros.workspace = true @@ -28,12 +30,13 @@ humantime-serde.workspace = true chrono = { workspace = true, features = ["serde"] } itertools.workspace = true storage_broker.workspace = true -camino = {workspace = true, features = ["serde1"]} +camino = { workspace = true, features = ["serde1"] } remote_storage.workspace = true postgres_backend.workspace = true -nix = {workspace = true, optional = true} +nix = { workspace = true, optional = true } reqwest.workspace = true rand.workspace = true +tracing.workspace = true tracing-utils.workspace = true once_cell.workspace = true diff --git a/libs/pageserver_api/src/config.rs b/libs/pageserver_api/src/config.rs index 30b0612082..00d6b61399 100644 --- a/libs/pageserver_api/src/config.rs +++ b/libs/pageserver_api/src/config.rs @@ -4,6 +4,7 @@ use camino::Utf8PathBuf; mod tests; use const_format::formatcp; +use posthog_client_lite::PostHogClientConfig; pub const DEFAULT_PG_LISTEN_PORT: u16 = 64000; pub const DEFAULT_PG_LISTEN_ADDR: &str = formatcp!("127.0.0.1:{DEFAULT_PG_LISTEN_PORT}"); pub const DEFAULT_HTTP_LISTEN_PORT: u16 = 9898; @@ -12,6 +13,7 @@ pub const DEFAULT_HTTP_LISTEN_ADDR: &str = formatcp!("127.0.0.1:{DEFAULT_HTTP_LI pub const DEFAULT_GRPC_LISTEN_PORT: u16 = 51051; // storage-broker already uses 50051 use std::collections::HashMap; +use std::fmt::Display; use std::num::{NonZeroU64, NonZeroUsize}; use std::str::FromStr; use std::time::Duration; @@ -24,16 +26,17 @@ use utils::logging::LogFormat; use crate::models::{ImageCompressionAlgorithm, LsnLease}; // Certain metadata (e.g. externally-addressable name, AZ) is delivered -// as a separate structure. This information is not neeed by the pageserver +// as a separate structure. This information is not needed by the pageserver // itself, it is only used for registering the pageserver with the control // plane and/or storage controller. -// #[derive(PartialEq, Eq, Debug, serde::Serialize, serde::Deserialize)] pub struct NodeMetadata { #[serde(rename = "host")] pub postgres_host: String, #[serde(rename = "port")] pub postgres_port: u16, + pub grpc_host: Option, + pub grpc_port: Option, pub http_host: String, pub http_port: u16, pub https_port: Option, @@ -44,19 +47,81 @@ pub struct NodeMetadata { pub other: HashMap, } -/// PostHog integration config. +impl Display for NodeMetadata { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "postgresql://{}:{} ", + self.postgres_host, self.postgres_port + )?; + if let Some(grpc_host) = &self.grpc_host { + let grpc_port = self.grpc_port.unwrap_or_default(); + write!(f, "grpc://{grpc_host}:{grpc_port} ")?; + } + write!(f, "http://{}:{} ", self.http_host, self.http_port)?; + write!(f, "other:{:?}", self.other)?; + Ok(()) + } +} + +/// PostHog integration config. This is used in pageserver, storcon, and neon_local. +/// Ensure backward compatibility when adding new fields. #[derive(Debug, Clone, PartialEq, Eq, serde::Serialize, serde::Deserialize)] pub struct PostHogConfig { /// PostHog project ID - pub project_id: String, + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + pub project_id: Option, /// Server-side (private) API key - pub server_api_key: String, + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + pub server_api_key: Option, /// Client-side (public) API key - pub client_api_key: String, + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + pub client_api_key: Option, /// Private API URL - pub private_api_url: String, + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + pub private_api_url: Option, /// Public API URL - pub public_api_url: String, + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + pub public_api_url: Option, + /// Refresh interval for the feature flag spec. + /// The storcon will push the feature flag spec to the pageserver. If the pageserver does not receive + /// the spec for `refresh_interval`, it will fetch the spec from the PostHog API. + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + #[serde(with = "humantime_serde")] + pub refresh_interval: Option, +} + +impl PostHogConfig { + pub fn try_into_posthog_config(self) -> Result { + let Some(project_id) = self.project_id else { + return Err("project_id is required"); + }; + let Some(server_api_key) = self.server_api_key else { + return Err("server_api_key is required"); + }; + let Some(client_api_key) = self.client_api_key else { + return Err("client_api_key is required"); + }; + let Some(private_api_url) = self.private_api_url else { + return Err("private_api_url is required"); + }; + let Some(public_api_url) = self.public_api_url else { + return Err("public_api_url is required"); + }; + Ok(PostHogClientConfig { + project_id, + server_api_key, + client_api_key, + private_api_url, + public_api_url, + }) + } } /// `pageserver.toml` @@ -337,17 +402,26 @@ pub struct TimelineImportConfig { pub struct BasebackupCacheConfig { #[serde(with = "humantime_serde")] pub cleanup_period: Duration, - // FIXME: Support max_size_bytes. - // pub max_size_bytes: usize, - pub max_size_entries: i64, + /// Maximum total size of basebackup cache entries on disk in bytes. + /// The cache may slightly exceed this limit because we do not know + /// the exact size of the cache entry untill it's written to disk. + pub max_total_size_bytes: u64, + // TODO(diko): support max_entry_size_bytes. + // pub max_entry_size_bytes: u64, + pub max_size_entries: usize, + /// Size of the channel used to send prepare requests to the basebackup cache worker. + /// If exceeded, new prepare requests will be dropped. + pub prepare_channel_size: usize, } impl Default for BasebackupCacheConfig { fn default() -> Self { Self { cleanup_period: Duration::from_secs(60), - // max_size_bytes: 1024 * 1024 * 1024, // 1 GiB - max_size_entries: 1000, + max_total_size_bytes: 1024 * 1024 * 1024, // 1 GiB + // max_entry_size_bytes: 16 * 1024 * 1024, // 16 MiB + max_size_entries: 10000, + prepare_channel_size: 100, } } } @@ -792,7 +866,7 @@ pub mod tenant_conf_defaults { // By default ingest enough WAL for two new L0 layers before checking if new image // image layers should be created. pub const DEFAULT_IMAGE_LAYER_CREATION_CHECK_THRESHOLD: u8 = 2; - pub const DEFAULT_GC_COMPACTION_ENABLED: bool = false; + pub const DEFAULT_GC_COMPACTION_ENABLED: bool = true; pub const DEFAULT_GC_COMPACTION_VERIFICATION: bool = true; pub const DEFAULT_GC_COMPACTION_INITIAL_THRESHOLD_KB: u64 = 5 * 1024 * 1024; // 5GB pub const DEFAULT_GC_COMPACTION_RATIO_PERCENT: u64 = 100; diff --git a/libs/pageserver_api/src/config/tests.rs b/libs/pageserver_api/src/config/tests.rs index 9e61873273..7137df969a 100644 --- a/libs/pageserver_api/src/config/tests.rs +++ b/libs/pageserver_api/src/config/tests.rs @@ -14,6 +14,8 @@ fn test_node_metadata_v1_backward_compatibilty() { NodeMetadata { postgres_host: "localhost".to_string(), postgres_port: 23, + grpc_host: None, + grpc_port: None, http_host: "localhost".to_string(), http_port: 42, https_port: None, @@ -37,6 +39,35 @@ fn test_node_metadata_v2_backward_compatibilty() { NodeMetadata { postgres_host: "localhost".to_string(), postgres_port: 23, + grpc_host: None, + grpc_port: None, + http_host: "localhost".to_string(), + http_port: 42, + https_port: Some(123), + other: HashMap::new(), + } + ) +} + +#[test] +fn test_node_metadata_v3_backward_compatibilty() { + let v3 = serde_json::to_vec(&serde_json::json!({ + "host": "localhost", + "port": 23, + "grpc_host": "localhost", + "grpc_port": 51, + "http_host": "localhost", + "http_port": 42, + "https_port": 123, + })); + + assert_eq!( + serde_json::from_slice::(&v3.unwrap()).unwrap(), + NodeMetadata { + postgres_host: "localhost".to_string(), + postgres_port: 23, + grpc_host: Some("localhost".to_string()), + grpc_port: Some(51), http_host: "localhost".to_string(), http_port: 42, https_port: Some(123), diff --git a/libs/pageserver_api/src/controller_api.rs b/libs/pageserver_api/src/controller_api.rs index c5b49edba0..a8c7083b17 100644 --- a/libs/pageserver_api/src/controller_api.rs +++ b/libs/pageserver_api/src/controller_api.rs @@ -52,6 +52,8 @@ pub struct NodeRegisterRequest { pub listen_pg_addr: String, pub listen_pg_port: u16, + pub listen_grpc_addr: Option, + pub listen_grpc_port: Option, pub listen_http_addr: String, pub listen_http_port: u16, @@ -101,6 +103,8 @@ pub struct TenantLocateResponseShard { pub listen_pg_addr: String, pub listen_pg_port: u16, + pub listen_grpc_addr: Option, + pub listen_grpc_port: Option, pub listen_http_addr: String, pub listen_http_port: u16, @@ -152,6 +156,8 @@ pub struct NodeDescribeResponse { pub listen_pg_addr: String, pub listen_pg_port: u16, + pub listen_grpc_addr: Option, + pub listen_grpc_port: Option, } #[derive(Serialize, Deserialize, Debug)] @@ -344,6 +350,35 @@ impl Default for ShardSchedulingPolicy { } } +#[derive(Serialize, Deserialize, Clone, Copy, Eq, PartialEq, Debug)] +pub enum NodeLifecycle { + Active, + Deleted, +} + +impl FromStr for NodeLifecycle { + type Err = anyhow::Error; + + fn from_str(s: &str) -> Result { + match s { + "active" => Ok(Self::Active), + "deleted" => Ok(Self::Deleted), + _ => Err(anyhow::anyhow!("Unknown node lifecycle '{s}'")), + } + } +} + +impl From for String { + fn from(value: NodeLifecycle) -> String { + use NodeLifecycle::*; + match value { + Active => "active", + Deleted => "deleted", + } + .to_string() + } +} + #[derive(Serialize, Deserialize, Clone, Copy, Eq, PartialEq, Debug)] pub enum NodeSchedulingPolicy { Active, @@ -351,6 +386,7 @@ pub enum NodeSchedulingPolicy { Pause, PauseForRestart, Draining, + Deleting, } impl FromStr for NodeSchedulingPolicy { @@ -363,6 +399,7 @@ impl FromStr for NodeSchedulingPolicy { "pause" => Ok(Self::Pause), "pause_for_restart" => Ok(Self::PauseForRestart), "draining" => Ok(Self::Draining), + "deleting" => Ok(Self::Deleting), _ => Err(anyhow::anyhow!("Unknown scheduling state '{s}'")), } } @@ -377,6 +414,7 @@ impl From for String { Pause => "pause", PauseForRestart => "pause_for_restart", Draining => "draining", + Deleting => "deleting", } .to_string() } @@ -385,6 +423,7 @@ impl From for String { #[derive(Serialize, Deserialize, Clone, Copy, Eq, PartialEq, Debug)] pub enum SkSchedulingPolicy { Active, + Activating, Pause, Decomissioned, } @@ -395,6 +434,7 @@ impl FromStr for SkSchedulingPolicy { fn from_str(s: &str) -> Result { Ok(match s { "active" => Self::Active, + "activating" => Self::Activating, "pause" => Self::Pause, "decomissioned" => Self::Decomissioned, _ => { @@ -411,6 +451,7 @@ impl From for String { use SkSchedulingPolicy::*; match value { Active => "active", + Activating => "activating", Pause => "pause", Decomissioned => "decomissioned", } @@ -511,6 +552,11 @@ pub struct TimelineImportRequest { pub sk_set: Vec, } +#[derive(serde::Serialize, serde::Deserialize, Clone)] +pub struct TimelineSafekeeperMigrateRequest { + pub new_sk_set: Vec, +} + #[cfg(test)] mod test { use serde_json; @@ -542,8 +588,7 @@ mod test { let err = serde_json::from_value::(create_request).unwrap_err(); assert!( err.to_string().contains("unknown field `unknown_field`"), - "expect unknown field `unknown_field` error, got: {}", - err + "expect unknown field `unknown_field` error, got: {err}" ); } diff --git a/libs/pageserver_api/src/key.rs b/libs/pageserver_api/src/key.rs index c14975167b..102bbee879 100644 --- a/libs/pageserver_api/src/key.rs +++ b/libs/pageserver_api/src/key.rs @@ -4,8 +4,8 @@ use std::ops::Range; use anyhow::{Result, bail}; use byteorder::{BE, ByteOrder}; use bytes::Bytes; -use postgres_ffi::relfile_utils::{FSM_FORKNUM, VISIBILITYMAP_FORKNUM}; -use postgres_ffi::{Oid, RepOriginId}; +use postgres_ffi_types::forknum::{FSM_FORKNUM, VISIBILITYMAP_FORKNUM}; +use postgres_ffi_types::{Oid, RepOriginId}; use serde::{Deserialize, Serialize}; use utils::const_assert; @@ -194,7 +194,7 @@ impl Key { /// will be rejected on the write path. #[allow(dead_code)] pub fn is_valid_key_on_write_path_strong(&self) -> bool { - use postgres_ffi::pg_constants::{DEFAULTTABLESPACE_OID, GLOBALTABLESPACE_OID}; + use postgres_ffi_types::constants::{DEFAULTTABLESPACE_OID, GLOBALTABLESPACE_OID}; if !self.is_i128_representable() { return false; } diff --git a/libs/pageserver_api/src/keyspace.rs b/libs/pageserver_api/src/keyspace.rs index 79e3ef553b..10a242e13b 100644 --- a/libs/pageserver_api/src/keyspace.rs +++ b/libs/pageserver_api/src/keyspace.rs @@ -1,7 +1,6 @@ use std::ops::Range; use itertools::Itertools; -use postgres_ffi::BLCKSZ; use crate::key::Key; use crate::shard::{ShardCount, ShardIdentity}; @@ -269,9 +268,13 @@ impl KeySpace { /// Partition a key space into roughly chunks of roughly 'target_size' bytes /// in each partition. /// - pub fn partition(&self, shard_identity: &ShardIdentity, target_size: u64) -> KeyPartitioning { - // Assume that each value is 8k in size. - let target_nblocks = (target_size / BLCKSZ as u64) as u32; + pub fn partition( + &self, + shard_identity: &ShardIdentity, + target_size: u64, + block_size: u64, + ) -> KeyPartitioning { + let target_nblocks = (target_size / block_size) as u32; let mut parts = Vec::new(); let mut current_part = Vec::new(); @@ -331,8 +334,7 @@ impl KeySpace { std::cmp::max(range.start, prev.start) < std::cmp::min(range.end, prev.end); assert!( !overlap, - "Attempt to merge ovelapping keyspaces: {:?} overlaps {:?}", - prev, range + "Attempt to merge ovelapping keyspaces: {prev:?} overlaps {range:?}" ); } @@ -1101,7 +1103,7 @@ mod tests { // total range contains at least one shard-local page let all_nonzero = fragments.iter().all(|f| f.0 > 0); if !all_nonzero { - eprintln!("Found a zero-length fragment: {:?}", fragments); + eprintln!("Found a zero-length fragment: {fragments:?}"); } assert!(all_nonzero); } else { diff --git a/libs/pageserver_api/src/lib.rs b/libs/pageserver_api/src/lib.rs index ff705e79cd..52aed7a2c2 100644 --- a/libs/pageserver_api/src/lib.rs +++ b/libs/pageserver_api/src/lib.rs @@ -5,11 +5,10 @@ pub mod controller_api; pub mod key; pub mod keyspace; pub mod models; -pub mod record; +pub mod pagestream_api; pub mod reltag; pub mod shard; /// Public API types pub mod upcall_api; -pub mod value; pub mod config; diff --git a/libs/pageserver_api/src/models.rs b/libs/pageserver_api/src/models.rs index 881f24b86c..16545364c1 100644 --- a/libs/pageserver_api/src/models.rs +++ b/libs/pageserver_api/src/models.rs @@ -5,16 +5,13 @@ pub mod utilization; use core::ops::Range; use std::collections::HashMap; use std::fmt::Display; -use std::io::{BufRead, Read}; use std::num::{NonZeroU32, NonZeroU64, NonZeroUsize}; use std::str::FromStr; use std::time::{Duration, SystemTime}; -use byteorder::{BigEndian, ReadBytesExt}; -use bytes::{Buf, BufMut, Bytes, BytesMut}; #[cfg(feature = "testing")] use camino::Utf8PathBuf; -use postgres_ffi::BLCKSZ; +use postgres_versioninfo::PgMajorVersion; use serde::{Deserialize, Deserializer, Serialize, Serializer}; use serde_with::serde_as; pub use utilization::PageserverUtilization; @@ -24,8 +21,9 @@ use utils::{completion, serde_system_time}; use crate::config::Ratio; use crate::key::{CompactKey, Key}; -use crate::reltag::RelTag; -use crate::shard::{DEFAULT_STRIPE_SIZE, ShardCount, ShardStripeSize, TenantShardId}; +use crate::shard::{ + DEFAULT_STRIPE_SIZE, ShardCount, ShardIdentity, ShardStripeSize, TenantShardId, +}; /// The state of a tenant in this pageserver. /// @@ -403,7 +401,7 @@ pub enum TimelineCreateRequestMode { // inherits the ancestor's pg_version. Earlier code wasn't // using a flattened enum, so, it was an accepted field, and // we continue to accept it by having it here. - pg_version: Option, + pg_version: Option, #[serde(default, skip_serializing_if = "std::ops::Not::not")] read_only: bool, }, @@ -415,7 +413,7 @@ pub enum TimelineCreateRequestMode { Bootstrap { #[serde(default)] existing_initdb_timeline_id: Option, - pg_version: Option, + pg_version: Option, }, } @@ -479,7 +477,7 @@ pub struct TenantShardSplitResponse { } /// Parameters that apply to all shards in a tenant. Used during tenant creation. -#[derive(Serialize, Deserialize, Debug)] +#[derive(Clone, Copy, Serialize, Deserialize, Debug)] #[serde(deny_unknown_fields)] pub struct ShardParameters { pub count: ShardCount, @@ -501,6 +499,15 @@ impl Default for ShardParameters { } } +impl From for ShardParameters { + fn from(identity: ShardIdentity) -> Self { + Self { + count: identity.count, + stripe_size: identity.stripe_size, + } + } +} + #[derive(Debug, Default, Clone, Eq, PartialEq)] pub enum FieldPatch { Upsert(T), @@ -1187,7 +1194,7 @@ impl Display for ImageCompressionAlgorithm { ImageCompressionAlgorithm::Disabled => write!(f, "disabled"), ImageCompressionAlgorithm::Zstd { level } => { if let Some(level) = level { - write!(f, "zstd({})", level) + write!(f, "zstd({level})") } else { write!(f, "zstd") } @@ -1578,7 +1585,7 @@ pub struct TimelineInfo { pub last_received_msg_lsn: Option, /// the timestamp (in microseconds) of the last received message pub last_received_msg_ts: Option, - pub pg_version: u32, + pub pg_version: PgMajorVersion, pub state: TimelineState, @@ -1907,219 +1914,6 @@ pub struct ScanDisposableKeysResponse { pub not_disposable_count: usize, } -// Wrapped in libpq CopyData -#[derive(PartialEq, Eq, Debug)] -pub enum PagestreamFeMessage { - Exists(PagestreamExistsRequest), - Nblocks(PagestreamNblocksRequest), - GetPage(PagestreamGetPageRequest), - DbSize(PagestreamDbSizeRequest), - GetSlruSegment(PagestreamGetSlruSegmentRequest), - #[cfg(feature = "testing")] - Test(PagestreamTestRequest), -} - -// Wrapped in libpq CopyData -#[derive(Debug, strum_macros::EnumProperty)] -pub enum PagestreamBeMessage { - Exists(PagestreamExistsResponse), - Nblocks(PagestreamNblocksResponse), - GetPage(PagestreamGetPageResponse), - Error(PagestreamErrorResponse), - DbSize(PagestreamDbSizeResponse), - GetSlruSegment(PagestreamGetSlruSegmentResponse), - #[cfg(feature = "testing")] - Test(PagestreamTestResponse), -} - -// Keep in sync with `pagestore_client.h` -#[repr(u8)] -enum PagestreamFeMessageTag { - Exists = 0, - Nblocks = 1, - GetPage = 2, - DbSize = 3, - GetSlruSegment = 4, - /* future tags above this line */ - /// For testing purposes, not available in production. - #[cfg(feature = "testing")] - Test = 99, -} - -// Keep in sync with `pagestore_client.h` -#[repr(u8)] -enum PagestreamBeMessageTag { - Exists = 100, - Nblocks = 101, - GetPage = 102, - Error = 103, - DbSize = 104, - GetSlruSegment = 105, - /* future tags above this line */ - /// For testing purposes, not available in production. - #[cfg(feature = "testing")] - Test = 199, -} - -impl TryFrom for PagestreamFeMessageTag { - type Error = u8; - fn try_from(value: u8) -> Result { - match value { - 0 => Ok(PagestreamFeMessageTag::Exists), - 1 => Ok(PagestreamFeMessageTag::Nblocks), - 2 => Ok(PagestreamFeMessageTag::GetPage), - 3 => Ok(PagestreamFeMessageTag::DbSize), - 4 => Ok(PagestreamFeMessageTag::GetSlruSegment), - #[cfg(feature = "testing")] - 99 => Ok(PagestreamFeMessageTag::Test), - _ => Err(value), - } - } -} - -impl TryFrom for PagestreamBeMessageTag { - type Error = u8; - fn try_from(value: u8) -> Result { - match value { - 100 => Ok(PagestreamBeMessageTag::Exists), - 101 => Ok(PagestreamBeMessageTag::Nblocks), - 102 => Ok(PagestreamBeMessageTag::GetPage), - 103 => Ok(PagestreamBeMessageTag::Error), - 104 => Ok(PagestreamBeMessageTag::DbSize), - 105 => Ok(PagestreamBeMessageTag::GetSlruSegment), - #[cfg(feature = "testing")] - 199 => Ok(PagestreamBeMessageTag::Test), - _ => Err(value), - } - } -} - -// A GetPage request contains two LSN values: -// -// request_lsn: Get the page version at this point in time. Lsn::Max is a special value that means -// "get the latest version present". It's used by the primary server, which knows that no one else -// is writing WAL. 'not_modified_since' must be set to a proper value even if request_lsn is -// Lsn::Max. Standby servers use the current replay LSN as the request LSN. -// -// not_modified_since: Hint to the pageserver that the client knows that the page has not been -// modified between 'not_modified_since' and the request LSN. It's always correct to set -// 'not_modified_since equal' to 'request_lsn' (unless Lsn::Max is used as the 'request_lsn'), but -// passing an earlier LSN can speed up the request, by allowing the pageserver to process the -// request without waiting for 'request_lsn' to arrive. -// -// The now-defunct V1 interface contained only one LSN, and a boolean 'latest' flag. The V1 interface was -// sufficient for the primary; the 'lsn' was equivalent to the 'not_modified_since' value, and -// 'latest' was set to true. The V2 interface was added because there was no correct way for a -// standby to request a page at a particular non-latest LSN, and also include the -// 'not_modified_since' hint. That led to an awkward choice of either using an old LSN in the -// request, if the standby knows that the page hasn't been modified since, and risk getting an error -// if that LSN has fallen behind the GC horizon, or requesting the current replay LSN, which could -// require the pageserver unnecessarily to wait for the WAL to arrive up to that point. The new V2 -// interface allows sending both LSNs, and let the pageserver do the right thing. There was no -// difference in the responses between V1 and V2. -// -// V3 version of protocol adds request ID to all requests. This request ID is also included in response -// as well as other fields from requests, which allows to verify that we receive response for our request. -// We copy fields from request to response to make checking more reliable: request ID is formed from process ID -// and local counter, so in principle there can be duplicated requests IDs if process PID is reused. -// -#[derive(Debug, PartialEq, Eq, Clone, Copy)] -pub enum PagestreamProtocolVersion { - V2, - V3, -} - -pub type RequestId = u64; - -#[derive(Debug, Default, PartialEq, Eq, Clone, Copy)] -pub struct PagestreamRequest { - pub reqid: RequestId, - pub request_lsn: Lsn, - pub not_modified_since: Lsn, -} - -#[derive(Debug, PartialEq, Eq, Clone, Copy)] -pub struct PagestreamExistsRequest { - pub hdr: PagestreamRequest, - pub rel: RelTag, -} - -#[derive(Debug, PartialEq, Eq, Clone, Copy)] -pub struct PagestreamNblocksRequest { - pub hdr: PagestreamRequest, - pub rel: RelTag, -} - -#[derive(Debug, Default, PartialEq, Eq, Clone, Copy)] -pub struct PagestreamGetPageRequest { - pub hdr: PagestreamRequest, - pub rel: RelTag, - pub blkno: u32, -} - -#[derive(Debug, PartialEq, Eq, Clone, Copy)] -pub struct PagestreamDbSizeRequest { - pub hdr: PagestreamRequest, - pub dbnode: u32, -} - -#[derive(Debug, PartialEq, Eq, Clone, Copy)] -pub struct PagestreamGetSlruSegmentRequest { - pub hdr: PagestreamRequest, - pub kind: u8, - pub segno: u32, -} - -#[derive(Debug)] -pub struct PagestreamExistsResponse { - pub req: PagestreamExistsRequest, - pub exists: bool, -} - -#[derive(Debug)] -pub struct PagestreamNblocksResponse { - pub req: PagestreamNblocksRequest, - pub n_blocks: u32, -} - -#[derive(Debug)] -pub struct PagestreamGetPageResponse { - pub req: PagestreamGetPageRequest, - pub page: Bytes, -} - -#[derive(Debug)] -pub struct PagestreamGetSlruSegmentResponse { - pub req: PagestreamGetSlruSegmentRequest, - pub segment: Bytes, -} - -#[derive(Debug)] -pub struct PagestreamErrorResponse { - pub req: PagestreamRequest, - pub message: String, -} - -#[derive(Debug)] -pub struct PagestreamDbSizeResponse { - pub req: PagestreamDbSizeRequest, - pub db_size: i64, -} - -#[cfg(feature = "testing")] -#[derive(Debug, PartialEq, Eq, Clone)] -pub struct PagestreamTestRequest { - pub hdr: PagestreamRequest, - pub batch_key: u64, - pub message: String, -} - -#[cfg(feature = "testing")] -#[derive(Debug)] -pub struct PagestreamTestResponse { - pub req: PagestreamTestRequest, -} - // This is a cut-down version of TenantHistorySize from the pageserver crate, omitting fields // that require pageserver-internal types. It is sufficient to get the total size. #[derive(Serialize, Deserialize, Debug)] @@ -2131,506 +1925,6 @@ pub struct TenantHistorySize { pub size: Option, } -impl PagestreamFeMessage { - /// Serialize a compute -> pageserver message. This is currently only used in testing - /// tools. Always uses protocol version 3. - pub fn serialize(&self) -> Bytes { - let mut bytes = BytesMut::new(); - - match self { - Self::Exists(req) => { - bytes.put_u8(PagestreamFeMessageTag::Exists as u8); - bytes.put_u64(req.hdr.reqid); - bytes.put_u64(req.hdr.request_lsn.0); - bytes.put_u64(req.hdr.not_modified_since.0); - bytes.put_u32(req.rel.spcnode); - bytes.put_u32(req.rel.dbnode); - bytes.put_u32(req.rel.relnode); - bytes.put_u8(req.rel.forknum); - } - - Self::Nblocks(req) => { - bytes.put_u8(PagestreamFeMessageTag::Nblocks as u8); - bytes.put_u64(req.hdr.reqid); - bytes.put_u64(req.hdr.request_lsn.0); - bytes.put_u64(req.hdr.not_modified_since.0); - bytes.put_u32(req.rel.spcnode); - bytes.put_u32(req.rel.dbnode); - bytes.put_u32(req.rel.relnode); - bytes.put_u8(req.rel.forknum); - } - - Self::GetPage(req) => { - bytes.put_u8(PagestreamFeMessageTag::GetPage as u8); - bytes.put_u64(req.hdr.reqid); - bytes.put_u64(req.hdr.request_lsn.0); - bytes.put_u64(req.hdr.not_modified_since.0); - bytes.put_u32(req.rel.spcnode); - bytes.put_u32(req.rel.dbnode); - bytes.put_u32(req.rel.relnode); - bytes.put_u8(req.rel.forknum); - bytes.put_u32(req.blkno); - } - - Self::DbSize(req) => { - bytes.put_u8(PagestreamFeMessageTag::DbSize as u8); - bytes.put_u64(req.hdr.reqid); - bytes.put_u64(req.hdr.request_lsn.0); - bytes.put_u64(req.hdr.not_modified_since.0); - bytes.put_u32(req.dbnode); - } - - Self::GetSlruSegment(req) => { - bytes.put_u8(PagestreamFeMessageTag::GetSlruSegment as u8); - bytes.put_u64(req.hdr.reqid); - bytes.put_u64(req.hdr.request_lsn.0); - bytes.put_u64(req.hdr.not_modified_since.0); - bytes.put_u8(req.kind); - bytes.put_u32(req.segno); - } - #[cfg(feature = "testing")] - Self::Test(req) => { - bytes.put_u8(PagestreamFeMessageTag::Test as u8); - bytes.put_u64(req.hdr.reqid); - bytes.put_u64(req.hdr.request_lsn.0); - bytes.put_u64(req.hdr.not_modified_since.0); - bytes.put_u64(req.batch_key); - let message = req.message.as_bytes(); - bytes.put_u64(message.len() as u64); - bytes.put_slice(message); - } - } - - bytes.into() - } - - pub fn parse( - body: &mut R, - protocol_version: PagestreamProtocolVersion, - ) -> anyhow::Result { - // these correspond to the NeonMessageTag enum in pagestore_client.h - // - // TODO: consider using protobuf or serde bincode for less error prone - // serialization. - let msg_tag = body.read_u8()?; - let (reqid, request_lsn, not_modified_since) = match protocol_version { - PagestreamProtocolVersion::V2 => ( - 0, - Lsn::from(body.read_u64::()?), - Lsn::from(body.read_u64::()?), - ), - PagestreamProtocolVersion::V3 => ( - body.read_u64::()?, - Lsn::from(body.read_u64::()?), - Lsn::from(body.read_u64::()?), - ), - }; - - match PagestreamFeMessageTag::try_from(msg_tag) - .map_err(|tag: u8| anyhow::anyhow!("invalid tag {tag}"))? - { - PagestreamFeMessageTag::Exists => { - Ok(PagestreamFeMessage::Exists(PagestreamExistsRequest { - hdr: PagestreamRequest { - reqid, - request_lsn, - not_modified_since, - }, - rel: RelTag { - spcnode: body.read_u32::()?, - dbnode: body.read_u32::()?, - relnode: body.read_u32::()?, - forknum: body.read_u8()?, - }, - })) - } - PagestreamFeMessageTag::Nblocks => { - Ok(PagestreamFeMessage::Nblocks(PagestreamNblocksRequest { - hdr: PagestreamRequest { - reqid, - request_lsn, - not_modified_since, - }, - rel: RelTag { - spcnode: body.read_u32::()?, - dbnode: body.read_u32::()?, - relnode: body.read_u32::()?, - forknum: body.read_u8()?, - }, - })) - } - PagestreamFeMessageTag::GetPage => { - Ok(PagestreamFeMessage::GetPage(PagestreamGetPageRequest { - hdr: PagestreamRequest { - reqid, - request_lsn, - not_modified_since, - }, - rel: RelTag { - spcnode: body.read_u32::()?, - dbnode: body.read_u32::()?, - relnode: body.read_u32::()?, - forknum: body.read_u8()?, - }, - blkno: body.read_u32::()?, - })) - } - PagestreamFeMessageTag::DbSize => { - Ok(PagestreamFeMessage::DbSize(PagestreamDbSizeRequest { - hdr: PagestreamRequest { - reqid, - request_lsn, - not_modified_since, - }, - dbnode: body.read_u32::()?, - })) - } - PagestreamFeMessageTag::GetSlruSegment => Ok(PagestreamFeMessage::GetSlruSegment( - PagestreamGetSlruSegmentRequest { - hdr: PagestreamRequest { - reqid, - request_lsn, - not_modified_since, - }, - kind: body.read_u8()?, - segno: body.read_u32::()?, - }, - )), - #[cfg(feature = "testing")] - PagestreamFeMessageTag::Test => Ok(PagestreamFeMessage::Test(PagestreamTestRequest { - hdr: PagestreamRequest { - reqid, - request_lsn, - not_modified_since, - }, - batch_key: body.read_u64::()?, - message: { - let len = body.read_u64::()?; - let mut buf = vec![0; len as usize]; - body.read_exact(&mut buf)?; - String::from_utf8(buf)? - }, - })), - } - } -} - -impl PagestreamBeMessage { - pub fn serialize(&self, protocol_version: PagestreamProtocolVersion) -> Bytes { - let mut bytes = BytesMut::new(); - - use PagestreamBeMessageTag as Tag; - match protocol_version { - PagestreamProtocolVersion::V2 => { - match self { - Self::Exists(resp) => { - bytes.put_u8(Tag::Exists as u8); - bytes.put_u8(resp.exists as u8); - } - - Self::Nblocks(resp) => { - bytes.put_u8(Tag::Nblocks as u8); - bytes.put_u32(resp.n_blocks); - } - - Self::GetPage(resp) => { - bytes.put_u8(Tag::GetPage as u8); - bytes.put(&resp.page[..]) - } - - Self::Error(resp) => { - bytes.put_u8(Tag::Error as u8); - bytes.put(resp.message.as_bytes()); - bytes.put_u8(0); // null terminator - } - Self::DbSize(resp) => { - bytes.put_u8(Tag::DbSize as u8); - bytes.put_i64(resp.db_size); - } - - Self::GetSlruSegment(resp) => { - bytes.put_u8(Tag::GetSlruSegment as u8); - bytes.put_u32((resp.segment.len() / BLCKSZ as usize) as u32); - bytes.put(&resp.segment[..]); - } - - #[cfg(feature = "testing")] - Self::Test(resp) => { - bytes.put_u8(Tag::Test as u8); - bytes.put_u64(resp.req.batch_key); - let message = resp.req.message.as_bytes(); - bytes.put_u64(message.len() as u64); - bytes.put_slice(message); - } - } - } - PagestreamProtocolVersion::V3 => { - match self { - Self::Exists(resp) => { - bytes.put_u8(Tag::Exists as u8); - bytes.put_u64(resp.req.hdr.reqid); - bytes.put_u64(resp.req.hdr.request_lsn.0); - bytes.put_u64(resp.req.hdr.not_modified_since.0); - bytes.put_u32(resp.req.rel.spcnode); - bytes.put_u32(resp.req.rel.dbnode); - bytes.put_u32(resp.req.rel.relnode); - bytes.put_u8(resp.req.rel.forknum); - bytes.put_u8(resp.exists as u8); - } - - Self::Nblocks(resp) => { - bytes.put_u8(Tag::Nblocks as u8); - bytes.put_u64(resp.req.hdr.reqid); - bytes.put_u64(resp.req.hdr.request_lsn.0); - bytes.put_u64(resp.req.hdr.not_modified_since.0); - bytes.put_u32(resp.req.rel.spcnode); - bytes.put_u32(resp.req.rel.dbnode); - bytes.put_u32(resp.req.rel.relnode); - bytes.put_u8(resp.req.rel.forknum); - bytes.put_u32(resp.n_blocks); - } - - Self::GetPage(resp) => { - bytes.put_u8(Tag::GetPage as u8); - bytes.put_u64(resp.req.hdr.reqid); - bytes.put_u64(resp.req.hdr.request_lsn.0); - bytes.put_u64(resp.req.hdr.not_modified_since.0); - bytes.put_u32(resp.req.rel.spcnode); - bytes.put_u32(resp.req.rel.dbnode); - bytes.put_u32(resp.req.rel.relnode); - bytes.put_u8(resp.req.rel.forknum); - bytes.put_u32(resp.req.blkno); - bytes.put(&resp.page[..]) - } - - Self::Error(resp) => { - bytes.put_u8(Tag::Error as u8); - bytes.put_u64(resp.req.reqid); - bytes.put_u64(resp.req.request_lsn.0); - bytes.put_u64(resp.req.not_modified_since.0); - bytes.put(resp.message.as_bytes()); - bytes.put_u8(0); // null terminator - } - Self::DbSize(resp) => { - bytes.put_u8(Tag::DbSize as u8); - bytes.put_u64(resp.req.hdr.reqid); - bytes.put_u64(resp.req.hdr.request_lsn.0); - bytes.put_u64(resp.req.hdr.not_modified_since.0); - bytes.put_u32(resp.req.dbnode); - bytes.put_i64(resp.db_size); - } - - Self::GetSlruSegment(resp) => { - bytes.put_u8(Tag::GetSlruSegment as u8); - bytes.put_u64(resp.req.hdr.reqid); - bytes.put_u64(resp.req.hdr.request_lsn.0); - bytes.put_u64(resp.req.hdr.not_modified_since.0); - bytes.put_u8(resp.req.kind); - bytes.put_u32(resp.req.segno); - bytes.put_u32((resp.segment.len() / BLCKSZ as usize) as u32); - bytes.put(&resp.segment[..]); - } - - #[cfg(feature = "testing")] - Self::Test(resp) => { - bytes.put_u8(Tag::Test as u8); - bytes.put_u64(resp.req.hdr.reqid); - bytes.put_u64(resp.req.hdr.request_lsn.0); - bytes.put_u64(resp.req.hdr.not_modified_since.0); - bytes.put_u64(resp.req.batch_key); - let message = resp.req.message.as_bytes(); - bytes.put_u64(message.len() as u64); - bytes.put_slice(message); - } - } - } - } - bytes.into() - } - - pub fn deserialize(buf: Bytes) -> anyhow::Result { - let mut buf = buf.reader(); - let msg_tag = buf.read_u8()?; - - use PagestreamBeMessageTag as Tag; - let ok = - match Tag::try_from(msg_tag).map_err(|tag: u8| anyhow::anyhow!("invalid tag {tag}"))? { - Tag::Exists => { - let reqid = buf.read_u64::()?; - let request_lsn = Lsn(buf.read_u64::()?); - let not_modified_since = Lsn(buf.read_u64::()?); - let rel = RelTag { - spcnode: buf.read_u32::()?, - dbnode: buf.read_u32::()?, - relnode: buf.read_u32::()?, - forknum: buf.read_u8()?, - }; - let exists = buf.read_u8()? != 0; - Self::Exists(PagestreamExistsResponse { - req: PagestreamExistsRequest { - hdr: PagestreamRequest { - reqid, - request_lsn, - not_modified_since, - }, - rel, - }, - exists, - }) - } - Tag::Nblocks => { - let reqid = buf.read_u64::()?; - let request_lsn = Lsn(buf.read_u64::()?); - let not_modified_since = Lsn(buf.read_u64::()?); - let rel = RelTag { - spcnode: buf.read_u32::()?, - dbnode: buf.read_u32::()?, - relnode: buf.read_u32::()?, - forknum: buf.read_u8()?, - }; - let n_blocks = buf.read_u32::()?; - Self::Nblocks(PagestreamNblocksResponse { - req: PagestreamNblocksRequest { - hdr: PagestreamRequest { - reqid, - request_lsn, - not_modified_since, - }, - rel, - }, - n_blocks, - }) - } - Tag::GetPage => { - let reqid = buf.read_u64::()?; - let request_lsn = Lsn(buf.read_u64::()?); - let not_modified_since = Lsn(buf.read_u64::()?); - let rel = RelTag { - spcnode: buf.read_u32::()?, - dbnode: buf.read_u32::()?, - relnode: buf.read_u32::()?, - forknum: buf.read_u8()?, - }; - let blkno = buf.read_u32::()?; - let mut page = vec![0; 8192]; // TODO: use MaybeUninit - buf.read_exact(&mut page)?; - Self::GetPage(PagestreamGetPageResponse { - req: PagestreamGetPageRequest { - hdr: PagestreamRequest { - reqid, - request_lsn, - not_modified_since, - }, - rel, - blkno, - }, - page: page.into(), - }) - } - Tag::Error => { - let reqid = buf.read_u64::()?; - let request_lsn = Lsn(buf.read_u64::()?); - let not_modified_since = Lsn(buf.read_u64::()?); - let mut msg = Vec::new(); - buf.read_until(0, &mut msg)?; - let cstring = std::ffi::CString::from_vec_with_nul(msg)?; - let rust_str = cstring.to_str()?; - Self::Error(PagestreamErrorResponse { - req: PagestreamRequest { - reqid, - request_lsn, - not_modified_since, - }, - message: rust_str.to_owned(), - }) - } - Tag::DbSize => { - let reqid = buf.read_u64::()?; - let request_lsn = Lsn(buf.read_u64::()?); - let not_modified_since = Lsn(buf.read_u64::()?); - let dbnode = buf.read_u32::()?; - let db_size = buf.read_i64::()?; - Self::DbSize(PagestreamDbSizeResponse { - req: PagestreamDbSizeRequest { - hdr: PagestreamRequest { - reqid, - request_lsn, - not_modified_since, - }, - dbnode, - }, - db_size, - }) - } - Tag::GetSlruSegment => { - let reqid = buf.read_u64::()?; - let request_lsn = Lsn(buf.read_u64::()?); - let not_modified_since = Lsn(buf.read_u64::()?); - let kind = buf.read_u8()?; - let segno = buf.read_u32::()?; - let n_blocks = buf.read_u32::()?; - let mut segment = vec![0; n_blocks as usize * BLCKSZ as usize]; - buf.read_exact(&mut segment)?; - Self::GetSlruSegment(PagestreamGetSlruSegmentResponse { - req: PagestreamGetSlruSegmentRequest { - hdr: PagestreamRequest { - reqid, - request_lsn, - not_modified_since, - }, - kind, - segno, - }, - segment: segment.into(), - }) - } - #[cfg(feature = "testing")] - Tag::Test => { - let reqid = buf.read_u64::()?; - let request_lsn = Lsn(buf.read_u64::()?); - let not_modified_since = Lsn(buf.read_u64::()?); - let batch_key = buf.read_u64::()?; - let len = buf.read_u64::()?; - let mut msg = vec![0; len as usize]; - buf.read_exact(&mut msg)?; - let message = String::from_utf8(msg)?; - Self::Test(PagestreamTestResponse { - req: PagestreamTestRequest { - hdr: PagestreamRequest { - reqid, - request_lsn, - not_modified_since, - }, - batch_key, - message, - }, - }) - } - }; - let remaining = buf.into_inner(); - if !remaining.is_empty() { - anyhow::bail!( - "remaining bytes in msg with tag={msg_tag}: {}", - remaining.len() - ); - } - Ok(ok) - } - - pub fn kind(&self) -> &'static str { - match self { - Self::Exists(_) => "Exists", - Self::Nblocks(_) => "Nblocks", - Self::GetPage(_) => "GetPage", - Self::Error(_) => "Error", - Self::DbSize(_) => "DbSize", - Self::GetSlruSegment(_) => "GetSlruSegment", - #[cfg(feature = "testing")] - Self::Test(_) => "Test", - } - } -} - #[derive(Debug, Serialize, Deserialize)] pub struct PageTraceEvent { pub key: CompactKey, @@ -2656,68 +1950,6 @@ mod tests { use super::*; - #[test] - fn test_pagestream() { - // Test serialization/deserialization of PagestreamFeMessage - let messages = vec![ - PagestreamFeMessage::Exists(PagestreamExistsRequest { - hdr: PagestreamRequest { - reqid: 0, - request_lsn: Lsn(4), - not_modified_since: Lsn(3), - }, - rel: RelTag { - forknum: 1, - spcnode: 2, - dbnode: 3, - relnode: 4, - }, - }), - PagestreamFeMessage::Nblocks(PagestreamNblocksRequest { - hdr: PagestreamRequest { - reqid: 0, - request_lsn: Lsn(4), - not_modified_since: Lsn(4), - }, - rel: RelTag { - forknum: 1, - spcnode: 2, - dbnode: 3, - relnode: 4, - }, - }), - PagestreamFeMessage::GetPage(PagestreamGetPageRequest { - hdr: PagestreamRequest { - reqid: 0, - request_lsn: Lsn(4), - not_modified_since: Lsn(3), - }, - rel: RelTag { - forknum: 1, - spcnode: 2, - dbnode: 3, - relnode: 4, - }, - blkno: 7, - }), - PagestreamFeMessage::DbSize(PagestreamDbSizeRequest { - hdr: PagestreamRequest { - reqid: 0, - request_lsn: Lsn(4), - not_modified_since: Lsn(3), - }, - dbnode: 7, - }), - ]; - for msg in messages { - let bytes = msg.serialize(); - let reconstructed = - PagestreamFeMessage::parse(&mut bytes.reader(), PagestreamProtocolVersion::V3) - .unwrap(); - assert!(msg == reconstructed); - } - } - #[test] fn test_tenantinfo_serde() { // Test serialization/deserialization of TenantInfo @@ -2791,8 +2023,7 @@ mod tests { let err = serde_json::from_value::(config_request).unwrap_err(); assert!( err.to_string().contains("unknown field `unknown_field`"), - "expect unknown field `unknown_field` error, got: {}", - err + "expect unknown field `unknown_field` error, got: {err}" ); } diff --git a/libs/pageserver_api/src/pagestream_api.rs b/libs/pageserver_api/src/pagestream_api.rs new file mode 100644 index 0000000000..862da8268a --- /dev/null +++ b/libs/pageserver_api/src/pagestream_api.rs @@ -0,0 +1,798 @@ +//! Rust definitions of the libpq-based pagestream API +//! +//! See also the C implementation of the same API in pgxn/neon/pagestore_client.h + +use std::io::{BufRead, Read}; + +use crate::reltag::RelTag; + +use byteorder::{BigEndian, ReadBytesExt}; +use bytes::{Buf, BufMut, Bytes, BytesMut}; +use utils::lsn::Lsn; + +/// Block size. +/// +/// XXX: We assume 8k block size in the SLRU fetch API. It's not great to hardcode +/// that in the protocol, because Postgres supports different block sizes as a compile +/// time option. +const BLCKSZ: usize = 8192; + +// Wrapped in libpq CopyData +#[derive(PartialEq, Eq, Debug)] +pub enum PagestreamFeMessage { + Exists(PagestreamExistsRequest), + Nblocks(PagestreamNblocksRequest), + GetPage(PagestreamGetPageRequest), + DbSize(PagestreamDbSizeRequest), + GetSlruSegment(PagestreamGetSlruSegmentRequest), + #[cfg(feature = "testing")] + Test(PagestreamTestRequest), +} + +// Wrapped in libpq CopyData +#[derive(Debug, strum_macros::EnumProperty)] +pub enum PagestreamBeMessage { + Exists(PagestreamExistsResponse), + Nblocks(PagestreamNblocksResponse), + GetPage(PagestreamGetPageResponse), + Error(PagestreamErrorResponse), + DbSize(PagestreamDbSizeResponse), + GetSlruSegment(PagestreamGetSlruSegmentResponse), + #[cfg(feature = "testing")] + Test(PagestreamTestResponse), +} + +// Keep in sync with `pagestore_client.h` +#[repr(u8)] +enum PagestreamFeMessageTag { + Exists = 0, + Nblocks = 1, + GetPage = 2, + DbSize = 3, + GetSlruSegment = 4, + /* future tags above this line */ + /// For testing purposes, not available in production. + #[cfg(feature = "testing")] + Test = 99, +} + +// Keep in sync with `pagestore_client.h` +#[repr(u8)] +enum PagestreamBeMessageTag { + Exists = 100, + Nblocks = 101, + GetPage = 102, + Error = 103, + DbSize = 104, + GetSlruSegment = 105, + /* future tags above this line */ + /// For testing purposes, not available in production. + #[cfg(feature = "testing")] + Test = 199, +} + +impl TryFrom for PagestreamFeMessageTag { + type Error = u8; + fn try_from(value: u8) -> Result { + match value { + 0 => Ok(PagestreamFeMessageTag::Exists), + 1 => Ok(PagestreamFeMessageTag::Nblocks), + 2 => Ok(PagestreamFeMessageTag::GetPage), + 3 => Ok(PagestreamFeMessageTag::DbSize), + 4 => Ok(PagestreamFeMessageTag::GetSlruSegment), + #[cfg(feature = "testing")] + 99 => Ok(PagestreamFeMessageTag::Test), + _ => Err(value), + } + } +} + +impl TryFrom for PagestreamBeMessageTag { + type Error = u8; + fn try_from(value: u8) -> Result { + match value { + 100 => Ok(PagestreamBeMessageTag::Exists), + 101 => Ok(PagestreamBeMessageTag::Nblocks), + 102 => Ok(PagestreamBeMessageTag::GetPage), + 103 => Ok(PagestreamBeMessageTag::Error), + 104 => Ok(PagestreamBeMessageTag::DbSize), + 105 => Ok(PagestreamBeMessageTag::GetSlruSegment), + #[cfg(feature = "testing")] + 199 => Ok(PagestreamBeMessageTag::Test), + _ => Err(value), + } + } +} + +// A GetPage request contains two LSN values: +// +// request_lsn: Get the page version at this point in time. Lsn::Max is a special value that means +// "get the latest version present". It's used by the primary server, which knows that no one else +// is writing WAL. 'not_modified_since' must be set to a proper value even if request_lsn is +// Lsn::Max. Standby servers use the current replay LSN as the request LSN. +// +// not_modified_since: Hint to the pageserver that the client knows that the page has not been +// modified between 'not_modified_since' and the request LSN. It's always correct to set +// 'not_modified_since equal' to 'request_lsn' (unless Lsn::Max is used as the 'request_lsn'), but +// passing an earlier LSN can speed up the request, by allowing the pageserver to process the +// request without waiting for 'request_lsn' to arrive. +// +// The now-defunct V1 interface contained only one LSN, and a boolean 'latest' flag. The V1 interface was +// sufficient for the primary; the 'lsn' was equivalent to the 'not_modified_since' value, and +// 'latest' was set to true. The V2 interface was added because there was no correct way for a +// standby to request a page at a particular non-latest LSN, and also include the +// 'not_modified_since' hint. That led to an awkward choice of either using an old LSN in the +// request, if the standby knows that the page hasn't been modified since, and risk getting an error +// if that LSN has fallen behind the GC horizon, or requesting the current replay LSN, which could +// require the pageserver unnecessarily to wait for the WAL to arrive up to that point. The new V2 +// interface allows sending both LSNs, and let the pageserver do the right thing. There was no +// difference in the responses between V1 and V2. +// +// V3 version of protocol adds request ID to all requests. This request ID is also included in response +// as well as other fields from requests, which allows to verify that we receive response for our request. +// We copy fields from request to response to make checking more reliable: request ID is formed from process ID +// and local counter, so in principle there can be duplicated requests IDs if process PID is reused. +// +#[derive(Debug, PartialEq, Eq, Clone, Copy)] +pub enum PagestreamProtocolVersion { + V2, + V3, +} + +pub type RequestId = u64; + +#[derive(Debug, Default, PartialEq, Eq, Clone, Copy)] +pub struct PagestreamRequest { + pub reqid: RequestId, + pub request_lsn: Lsn, + pub not_modified_since: Lsn, +} + +#[derive(Debug, PartialEq, Eq, Clone, Copy)] +pub struct PagestreamExistsRequest { + pub hdr: PagestreamRequest, + pub rel: RelTag, +} + +#[derive(Debug, PartialEq, Eq, Clone, Copy)] +pub struct PagestreamNblocksRequest { + pub hdr: PagestreamRequest, + pub rel: RelTag, +} + +#[derive(Debug, Default, PartialEq, Eq, Clone, Copy)] +pub struct PagestreamGetPageRequest { + pub hdr: PagestreamRequest, + pub rel: RelTag, + pub blkno: u32, +} + +#[derive(Debug, PartialEq, Eq, Clone, Copy)] +pub struct PagestreamDbSizeRequest { + pub hdr: PagestreamRequest, + pub dbnode: u32, +} + +#[derive(Debug, PartialEq, Eq, Clone, Copy)] +pub struct PagestreamGetSlruSegmentRequest { + pub hdr: PagestreamRequest, + pub kind: u8, + pub segno: u32, +} + +#[derive(Debug)] +pub struct PagestreamExistsResponse { + pub req: PagestreamExistsRequest, + pub exists: bool, +} + +#[derive(Debug)] +pub struct PagestreamNblocksResponse { + pub req: PagestreamNblocksRequest, + pub n_blocks: u32, +} + +#[derive(Debug)] +pub struct PagestreamGetPageResponse { + pub req: PagestreamGetPageRequest, + pub page: Bytes, +} + +#[derive(Debug)] +pub struct PagestreamGetSlruSegmentResponse { + pub req: PagestreamGetSlruSegmentRequest, + pub segment: Bytes, +} + +#[derive(Debug)] +pub struct PagestreamErrorResponse { + pub req: PagestreamRequest, + pub message: String, +} + +#[derive(Debug)] +pub struct PagestreamDbSizeResponse { + pub req: PagestreamDbSizeRequest, + pub db_size: i64, +} + +#[cfg(feature = "testing")] +#[derive(Debug, PartialEq, Eq, Clone)] +pub struct PagestreamTestRequest { + pub hdr: PagestreamRequest, + pub batch_key: u64, + pub message: String, +} + +#[cfg(feature = "testing")] +#[derive(Debug)] +pub struct PagestreamTestResponse { + pub req: PagestreamTestRequest, +} + +impl PagestreamFeMessage { + /// Serialize a compute -> pageserver message. This is currently only used in testing + /// tools. Always uses protocol version 3. + pub fn serialize(&self) -> Bytes { + let mut bytes = BytesMut::new(); + + match self { + Self::Exists(req) => { + bytes.put_u8(PagestreamFeMessageTag::Exists as u8); + bytes.put_u64(req.hdr.reqid); + bytes.put_u64(req.hdr.request_lsn.0); + bytes.put_u64(req.hdr.not_modified_since.0); + bytes.put_u32(req.rel.spcnode); + bytes.put_u32(req.rel.dbnode); + bytes.put_u32(req.rel.relnode); + bytes.put_u8(req.rel.forknum); + } + + Self::Nblocks(req) => { + bytes.put_u8(PagestreamFeMessageTag::Nblocks as u8); + bytes.put_u64(req.hdr.reqid); + bytes.put_u64(req.hdr.request_lsn.0); + bytes.put_u64(req.hdr.not_modified_since.0); + bytes.put_u32(req.rel.spcnode); + bytes.put_u32(req.rel.dbnode); + bytes.put_u32(req.rel.relnode); + bytes.put_u8(req.rel.forknum); + } + + Self::GetPage(req) => { + bytes.put_u8(PagestreamFeMessageTag::GetPage as u8); + bytes.put_u64(req.hdr.reqid); + bytes.put_u64(req.hdr.request_lsn.0); + bytes.put_u64(req.hdr.not_modified_since.0); + bytes.put_u32(req.rel.spcnode); + bytes.put_u32(req.rel.dbnode); + bytes.put_u32(req.rel.relnode); + bytes.put_u8(req.rel.forknum); + bytes.put_u32(req.blkno); + } + + Self::DbSize(req) => { + bytes.put_u8(PagestreamFeMessageTag::DbSize as u8); + bytes.put_u64(req.hdr.reqid); + bytes.put_u64(req.hdr.request_lsn.0); + bytes.put_u64(req.hdr.not_modified_since.0); + bytes.put_u32(req.dbnode); + } + + Self::GetSlruSegment(req) => { + bytes.put_u8(PagestreamFeMessageTag::GetSlruSegment as u8); + bytes.put_u64(req.hdr.reqid); + bytes.put_u64(req.hdr.request_lsn.0); + bytes.put_u64(req.hdr.not_modified_since.0); + bytes.put_u8(req.kind); + bytes.put_u32(req.segno); + } + #[cfg(feature = "testing")] + Self::Test(req) => { + bytes.put_u8(PagestreamFeMessageTag::Test as u8); + bytes.put_u64(req.hdr.reqid); + bytes.put_u64(req.hdr.request_lsn.0); + bytes.put_u64(req.hdr.not_modified_since.0); + bytes.put_u64(req.batch_key); + let message = req.message.as_bytes(); + bytes.put_u64(message.len() as u64); + bytes.put_slice(message); + } + } + + bytes.into() + } + + pub fn parse( + body: &mut R, + protocol_version: PagestreamProtocolVersion, + ) -> anyhow::Result { + // these correspond to the NeonMessageTag enum in pagestore_client.h + // + // TODO: consider using protobuf or serde bincode for less error prone + // serialization. + let msg_tag = body.read_u8()?; + let (reqid, request_lsn, not_modified_since) = match protocol_version { + PagestreamProtocolVersion::V2 => ( + 0, + Lsn::from(body.read_u64::()?), + Lsn::from(body.read_u64::()?), + ), + PagestreamProtocolVersion::V3 => ( + body.read_u64::()?, + Lsn::from(body.read_u64::()?), + Lsn::from(body.read_u64::()?), + ), + }; + + match PagestreamFeMessageTag::try_from(msg_tag) + .map_err(|tag: u8| anyhow::anyhow!("invalid tag {tag}"))? + { + PagestreamFeMessageTag::Exists => { + Ok(PagestreamFeMessage::Exists(PagestreamExistsRequest { + hdr: PagestreamRequest { + reqid, + request_lsn, + not_modified_since, + }, + rel: RelTag { + spcnode: body.read_u32::()?, + dbnode: body.read_u32::()?, + relnode: body.read_u32::()?, + forknum: body.read_u8()?, + }, + })) + } + PagestreamFeMessageTag::Nblocks => { + Ok(PagestreamFeMessage::Nblocks(PagestreamNblocksRequest { + hdr: PagestreamRequest { + reqid, + request_lsn, + not_modified_since, + }, + rel: RelTag { + spcnode: body.read_u32::()?, + dbnode: body.read_u32::()?, + relnode: body.read_u32::()?, + forknum: body.read_u8()?, + }, + })) + } + PagestreamFeMessageTag::GetPage => { + Ok(PagestreamFeMessage::GetPage(PagestreamGetPageRequest { + hdr: PagestreamRequest { + reqid, + request_lsn, + not_modified_since, + }, + rel: RelTag { + spcnode: body.read_u32::()?, + dbnode: body.read_u32::()?, + relnode: body.read_u32::()?, + forknum: body.read_u8()?, + }, + blkno: body.read_u32::()?, + })) + } + PagestreamFeMessageTag::DbSize => { + Ok(PagestreamFeMessage::DbSize(PagestreamDbSizeRequest { + hdr: PagestreamRequest { + reqid, + request_lsn, + not_modified_since, + }, + dbnode: body.read_u32::()?, + })) + } + PagestreamFeMessageTag::GetSlruSegment => Ok(PagestreamFeMessage::GetSlruSegment( + PagestreamGetSlruSegmentRequest { + hdr: PagestreamRequest { + reqid, + request_lsn, + not_modified_since, + }, + kind: body.read_u8()?, + segno: body.read_u32::()?, + }, + )), + #[cfg(feature = "testing")] + PagestreamFeMessageTag::Test => Ok(PagestreamFeMessage::Test(PagestreamTestRequest { + hdr: PagestreamRequest { + reqid, + request_lsn, + not_modified_since, + }, + batch_key: body.read_u64::()?, + message: { + let len = body.read_u64::()?; + let mut buf = vec![0; len as usize]; + body.read_exact(&mut buf)?; + String::from_utf8(buf)? + }, + })), + } + } +} + +impl PagestreamBeMessage { + pub fn serialize(&self, protocol_version: PagestreamProtocolVersion) -> Bytes { + let mut bytes = BytesMut::new(); + + use PagestreamBeMessageTag as Tag; + match protocol_version { + PagestreamProtocolVersion::V2 => { + match self { + Self::Exists(resp) => { + bytes.put_u8(Tag::Exists as u8); + bytes.put_u8(resp.exists as u8); + } + + Self::Nblocks(resp) => { + bytes.put_u8(Tag::Nblocks as u8); + bytes.put_u32(resp.n_blocks); + } + + Self::GetPage(resp) => { + bytes.put_u8(Tag::GetPage as u8); + bytes.put(&resp.page[..]) + } + + Self::Error(resp) => { + bytes.put_u8(Tag::Error as u8); + bytes.put(resp.message.as_bytes()); + bytes.put_u8(0); // null terminator + } + Self::DbSize(resp) => { + bytes.put_u8(Tag::DbSize as u8); + bytes.put_i64(resp.db_size); + } + + Self::GetSlruSegment(resp) => { + bytes.put_u8(Tag::GetSlruSegment as u8); + bytes.put_u32((resp.segment.len() / BLCKSZ) as u32); + bytes.put(&resp.segment[..]); + } + + #[cfg(feature = "testing")] + Self::Test(resp) => { + bytes.put_u8(Tag::Test as u8); + bytes.put_u64(resp.req.batch_key); + let message = resp.req.message.as_bytes(); + bytes.put_u64(message.len() as u64); + bytes.put_slice(message); + } + } + } + PagestreamProtocolVersion::V3 => { + match self { + Self::Exists(resp) => { + bytes.put_u8(Tag::Exists as u8); + bytes.put_u64(resp.req.hdr.reqid); + bytes.put_u64(resp.req.hdr.request_lsn.0); + bytes.put_u64(resp.req.hdr.not_modified_since.0); + bytes.put_u32(resp.req.rel.spcnode); + bytes.put_u32(resp.req.rel.dbnode); + bytes.put_u32(resp.req.rel.relnode); + bytes.put_u8(resp.req.rel.forknum); + bytes.put_u8(resp.exists as u8); + } + + Self::Nblocks(resp) => { + bytes.put_u8(Tag::Nblocks as u8); + bytes.put_u64(resp.req.hdr.reqid); + bytes.put_u64(resp.req.hdr.request_lsn.0); + bytes.put_u64(resp.req.hdr.not_modified_since.0); + bytes.put_u32(resp.req.rel.spcnode); + bytes.put_u32(resp.req.rel.dbnode); + bytes.put_u32(resp.req.rel.relnode); + bytes.put_u8(resp.req.rel.forknum); + bytes.put_u32(resp.n_blocks); + } + + Self::GetPage(resp) => { + bytes.put_u8(Tag::GetPage as u8); + bytes.put_u64(resp.req.hdr.reqid); + bytes.put_u64(resp.req.hdr.request_lsn.0); + bytes.put_u64(resp.req.hdr.not_modified_since.0); + bytes.put_u32(resp.req.rel.spcnode); + bytes.put_u32(resp.req.rel.dbnode); + bytes.put_u32(resp.req.rel.relnode); + bytes.put_u8(resp.req.rel.forknum); + bytes.put_u32(resp.req.blkno); + bytes.put(&resp.page[..]) + } + + Self::Error(resp) => { + bytes.put_u8(Tag::Error as u8); + bytes.put_u64(resp.req.reqid); + bytes.put_u64(resp.req.request_lsn.0); + bytes.put_u64(resp.req.not_modified_since.0); + bytes.put(resp.message.as_bytes()); + bytes.put_u8(0); // null terminator + } + Self::DbSize(resp) => { + bytes.put_u8(Tag::DbSize as u8); + bytes.put_u64(resp.req.hdr.reqid); + bytes.put_u64(resp.req.hdr.request_lsn.0); + bytes.put_u64(resp.req.hdr.not_modified_since.0); + bytes.put_u32(resp.req.dbnode); + bytes.put_i64(resp.db_size); + } + + Self::GetSlruSegment(resp) => { + bytes.put_u8(Tag::GetSlruSegment as u8); + bytes.put_u64(resp.req.hdr.reqid); + bytes.put_u64(resp.req.hdr.request_lsn.0); + bytes.put_u64(resp.req.hdr.not_modified_since.0); + bytes.put_u8(resp.req.kind); + bytes.put_u32(resp.req.segno); + bytes.put_u32((resp.segment.len() / BLCKSZ) as u32); + bytes.put(&resp.segment[..]); + } + + #[cfg(feature = "testing")] + Self::Test(resp) => { + bytes.put_u8(Tag::Test as u8); + bytes.put_u64(resp.req.hdr.reqid); + bytes.put_u64(resp.req.hdr.request_lsn.0); + bytes.put_u64(resp.req.hdr.not_modified_since.0); + bytes.put_u64(resp.req.batch_key); + let message = resp.req.message.as_bytes(); + bytes.put_u64(message.len() as u64); + bytes.put_slice(message); + } + } + } + } + bytes.into() + } + + pub fn deserialize(buf: Bytes) -> anyhow::Result { + let mut buf = buf.reader(); + let msg_tag = buf.read_u8()?; + + use PagestreamBeMessageTag as Tag; + let ok = + match Tag::try_from(msg_tag).map_err(|tag: u8| anyhow::anyhow!("invalid tag {tag}"))? { + Tag::Exists => { + let reqid = buf.read_u64::()?; + let request_lsn = Lsn(buf.read_u64::()?); + let not_modified_since = Lsn(buf.read_u64::()?); + let rel = RelTag { + spcnode: buf.read_u32::()?, + dbnode: buf.read_u32::()?, + relnode: buf.read_u32::()?, + forknum: buf.read_u8()?, + }; + let exists = buf.read_u8()? != 0; + Self::Exists(PagestreamExistsResponse { + req: PagestreamExistsRequest { + hdr: PagestreamRequest { + reqid, + request_lsn, + not_modified_since, + }, + rel, + }, + exists, + }) + } + Tag::Nblocks => { + let reqid = buf.read_u64::()?; + let request_lsn = Lsn(buf.read_u64::()?); + let not_modified_since = Lsn(buf.read_u64::()?); + let rel = RelTag { + spcnode: buf.read_u32::()?, + dbnode: buf.read_u32::()?, + relnode: buf.read_u32::()?, + forknum: buf.read_u8()?, + }; + let n_blocks = buf.read_u32::()?; + Self::Nblocks(PagestreamNblocksResponse { + req: PagestreamNblocksRequest { + hdr: PagestreamRequest { + reqid, + request_lsn, + not_modified_since, + }, + rel, + }, + n_blocks, + }) + } + Tag::GetPage => { + let reqid = buf.read_u64::()?; + let request_lsn = Lsn(buf.read_u64::()?); + let not_modified_since = Lsn(buf.read_u64::()?); + let rel = RelTag { + spcnode: buf.read_u32::()?, + dbnode: buf.read_u32::()?, + relnode: buf.read_u32::()?, + forknum: buf.read_u8()?, + }; + let blkno = buf.read_u32::()?; + let mut page = vec![0; 8192]; // TODO: use MaybeUninit + buf.read_exact(&mut page)?; + Self::GetPage(PagestreamGetPageResponse { + req: PagestreamGetPageRequest { + hdr: PagestreamRequest { + reqid, + request_lsn, + not_modified_since, + }, + rel, + blkno, + }, + page: page.into(), + }) + } + Tag::Error => { + let reqid = buf.read_u64::()?; + let request_lsn = Lsn(buf.read_u64::()?); + let not_modified_since = Lsn(buf.read_u64::()?); + let mut msg = Vec::new(); + buf.read_until(0, &mut msg)?; + let cstring = std::ffi::CString::from_vec_with_nul(msg)?; + let rust_str = cstring.to_str()?; + Self::Error(PagestreamErrorResponse { + req: PagestreamRequest { + reqid, + request_lsn, + not_modified_since, + }, + message: rust_str.to_owned(), + }) + } + Tag::DbSize => { + let reqid = buf.read_u64::()?; + let request_lsn = Lsn(buf.read_u64::()?); + let not_modified_since = Lsn(buf.read_u64::()?); + let dbnode = buf.read_u32::()?; + let db_size = buf.read_i64::()?; + Self::DbSize(PagestreamDbSizeResponse { + req: PagestreamDbSizeRequest { + hdr: PagestreamRequest { + reqid, + request_lsn, + not_modified_since, + }, + dbnode, + }, + db_size, + }) + } + Tag::GetSlruSegment => { + let reqid = buf.read_u64::()?; + let request_lsn = Lsn(buf.read_u64::()?); + let not_modified_since = Lsn(buf.read_u64::()?); + let kind = buf.read_u8()?; + let segno = buf.read_u32::()?; + let n_blocks = buf.read_u32::()?; + let mut segment = vec![0; n_blocks as usize * BLCKSZ]; + buf.read_exact(&mut segment)?; + Self::GetSlruSegment(PagestreamGetSlruSegmentResponse { + req: PagestreamGetSlruSegmentRequest { + hdr: PagestreamRequest { + reqid, + request_lsn, + not_modified_since, + }, + kind, + segno, + }, + segment: segment.into(), + }) + } + #[cfg(feature = "testing")] + Tag::Test => { + let reqid = buf.read_u64::()?; + let request_lsn = Lsn(buf.read_u64::()?); + let not_modified_since = Lsn(buf.read_u64::()?); + let batch_key = buf.read_u64::()?; + let len = buf.read_u64::()?; + let mut msg = vec![0; len as usize]; + buf.read_exact(&mut msg)?; + let message = String::from_utf8(msg)?; + Self::Test(PagestreamTestResponse { + req: PagestreamTestRequest { + hdr: PagestreamRequest { + reqid, + request_lsn, + not_modified_since, + }, + batch_key, + message, + }, + }) + } + }; + let remaining = buf.into_inner(); + if !remaining.is_empty() { + anyhow::bail!( + "remaining bytes in msg with tag={msg_tag}: {}", + remaining.len() + ); + } + Ok(ok) + } + + pub fn kind(&self) -> &'static str { + match self { + Self::Exists(_) => "Exists", + Self::Nblocks(_) => "Nblocks", + Self::GetPage(_) => "GetPage", + Self::Error(_) => "Error", + Self::DbSize(_) => "DbSize", + Self::GetSlruSegment(_) => "GetSlruSegment", + #[cfg(feature = "testing")] + Self::Test(_) => "Test", + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_pagestream() { + // Test serialization/deserialization of PagestreamFeMessage + let messages = vec![ + PagestreamFeMessage::Exists(PagestreamExistsRequest { + hdr: PagestreamRequest { + reqid: 0, + request_lsn: Lsn(4), + not_modified_since: Lsn(3), + }, + rel: RelTag { + forknum: 1, + spcnode: 2, + dbnode: 3, + relnode: 4, + }, + }), + PagestreamFeMessage::Nblocks(PagestreamNblocksRequest { + hdr: PagestreamRequest { + reqid: 0, + request_lsn: Lsn(4), + not_modified_since: Lsn(4), + }, + rel: RelTag { + forknum: 1, + spcnode: 2, + dbnode: 3, + relnode: 4, + }, + }), + PagestreamFeMessage::GetPage(PagestreamGetPageRequest { + hdr: PagestreamRequest { + reqid: 0, + request_lsn: Lsn(4), + not_modified_since: Lsn(3), + }, + rel: RelTag { + forknum: 1, + spcnode: 2, + dbnode: 3, + relnode: 4, + }, + blkno: 7, + }), + PagestreamFeMessage::DbSize(PagestreamDbSizeRequest { + hdr: PagestreamRequest { + reqid: 0, + request_lsn: Lsn(4), + not_modified_since: Lsn(3), + }, + dbnode: 7, + }), + ]; + for msg in messages { + let bytes = msg.serialize(); + let reconstructed = + PagestreamFeMessage::parse(&mut bytes.reader(), PagestreamProtocolVersion::V3) + .unwrap(); + assert!(msg == reconstructed); + } + } +} diff --git a/libs/pageserver_api/src/reltag.rs b/libs/pageserver_api/src/reltag.rs index e0dd4fdfe8..d0e37dffae 100644 --- a/libs/pageserver_api/src/reltag.rs +++ b/libs/pageserver_api/src/reltag.rs @@ -1,9 +1,9 @@ use std::cmp::Ordering; use std::fmt; -use postgres_ffi::Oid; -use postgres_ffi::pg_constants::GLOBALTABLESPACE_OID; -use postgres_ffi::relfile_utils::{MAIN_FORKNUM, forkname_to_number, forknumber_to_name}; +use postgres_ffi_types::Oid; +use postgres_ffi_types::constants::GLOBALTABLESPACE_OID; +use postgres_ffi_types::forknum::{MAIN_FORKNUM, forkname_to_number, forknumber_to_name}; use serde::{Deserialize, Serialize}; /// diff --git a/libs/pageserver_api/src/shard.rs b/libs/pageserver_api/src/shard.rs index feb59f5070..d6f4cd5e66 100644 --- a/libs/pageserver_api/src/shard.rs +++ b/libs/pageserver_api/src/shard.rs @@ -35,8 +35,9 @@ use std::hash::{Hash, Hasher}; #[doc(inline)] pub use ::utils::shard::*; -use postgres_ffi::relfile_utils::INIT_FORKNUM; +use postgres_ffi_types::forknum::INIT_FORKNUM; use serde::{Deserialize, Serialize}; +use utils::critical; use crate::key::Key; use crate::models::ShardParameters; @@ -179,7 +180,7 @@ impl ShardIdentity { /// For use when creating ShardIdentity instances for new shards, where a creation request /// specifies the ShardParameters that apply to all shards. - pub fn from_params(number: ShardNumber, params: &ShardParameters) -> Self { + pub fn from_params(number: ShardNumber, params: ShardParameters) -> Self { Self { number, count: params.count, @@ -188,6 +189,17 @@ impl ShardIdentity { } } + /// Asserts that the given shard identities are equal. Changes to shard parameters will likely + /// result in data corruption. + pub fn assert_equal(&self, other: ShardIdentity) { + if self != &other { + // TODO: for now, we're conservative and just log errors in production. Turn this into a + // real assertion when we're confident it doesn't misfire, and also reject requests that + // attempt to change it with an error response. + critical!("shard identity mismatch: {self:?} != {other:?}"); + } + } + fn is_broken(&self) -> bool { self.layout == LAYOUT_BROKEN } @@ -320,7 +332,11 @@ fn hash_combine(mut a: u32, mut b: u32) -> u32 { /// /// The mapping of key to shard is not stable across changes to ShardCount: this is intentional /// and will be handled at higher levels when shards are split. -fn key_to_shard_number(count: ShardCount, stripe_size: ShardStripeSize, key: &Key) -> ShardNumber { +pub fn key_to_shard_number( + count: ShardCount, + stripe_size: ShardStripeSize, + key: &Key, +) -> ShardNumber { // Fast path for un-sharded tenants or broadcast keys if count < ShardCount(2) || key_is_shard0(key) { return ShardNumber(0); diff --git a/libs/pageserver_api/src/upcall_api.rs b/libs/pageserver_api/src/upcall_api.rs index 4dce5f7817..07cada2eb1 100644 --- a/libs/pageserver_api/src/upcall_api.rs +++ b/libs/pageserver_api/src/upcall_api.rs @@ -9,7 +9,7 @@ use utils::id::{NodeId, TimelineId}; use crate::controller_api::NodeRegisterRequest; use crate::models::{LocationConfigMode, ShardImportStatus}; -use crate::shard::TenantShardId; +use crate::shard::{ShardStripeSize, TenantShardId}; /// Upcall message sent by the pageserver to the configured `control_plane_api` on /// startup. @@ -23,19 +23,13 @@ pub struct ReAttachRequest { pub register: Option, } -fn default_mode() -> LocationConfigMode { - LocationConfigMode::AttachedSingle -} - #[derive(Serialize, Deserialize, Debug)] pub struct ReAttachResponseTenant { pub id: TenantShardId, /// Mandatory if LocationConfigMode is None or set to an Attached* mode pub r#gen: Option, - - /// Default value only for backward compat: this field should be set - #[serde(default = "default_mode")] pub mode: LocationConfigMode, + pub stripe_size: ShardStripeSize, } #[derive(Serialize, Deserialize)] pub struct ReAttachResponse { diff --git a/libs/postgres_backend/src/lib.rs b/libs/postgres_backend/src/lib.rs index 714d8ac403..851d824291 100644 --- a/libs/postgres_backend/src/lib.rs +++ b/libs/postgres_backend/src/lib.rs @@ -78,7 +78,13 @@ pub fn is_expected_io_error(e: &io::Error) -> bool { use io::ErrorKind::*; matches!( e.kind(), - BrokenPipe | ConnectionRefused | ConnectionAborted | ConnectionReset | TimedOut + HostUnreachable + | NetworkUnreachable + | BrokenPipe + | ConnectionRefused + | ConnectionAborted + | ConnectionReset + | TimedOut, ) } @@ -939,7 +945,7 @@ impl PostgresBackendReader { FeMessage::CopyFail => Err(CopyStreamHandlerEnd::CopyFail), FeMessage::Terminate => Err(CopyStreamHandlerEnd::Terminate), _ => Err(CopyStreamHandlerEnd::from(ConnectionError::Protocol( - ProtocolError::Protocol(format!("unexpected message in COPY stream {:?}", msg)), + ProtocolError::Protocol(format!("unexpected message in COPY stream {msg:?}")), ))), }, None => Err(CopyStreamHandlerEnd::EOF), diff --git a/libs/postgres_backend/tests/simple_select.rs b/libs/postgres_backend/tests/simple_select.rs index 75ca123014..23e17799bd 100644 --- a/libs/postgres_backend/tests/simple_select.rs +++ b/libs/postgres_backend/tests/simple_select.rs @@ -61,7 +61,7 @@ async fn simple_select() { // so spawn it off to run on its own. tokio::spawn(async move { if let Err(e) = connection.await { - eprintln!("connection error: {}", e); + eprintln!("connection error: {e}"); } }); @@ -137,7 +137,7 @@ async fn simple_select_ssl() { // so spawn it off to run on its own. tokio::spawn(async move { if let Err(e) = connection.await { - eprintln!("connection error: {}", e); + eprintln!("connection error: {e}"); } }); diff --git a/libs/postgres_connection/src/lib.rs b/libs/postgres_connection/src/lib.rs index cd981b3729..2388303329 100644 --- a/libs/postgres_connection/src/lib.rs +++ b/libs/postgres_connection/src/lib.rs @@ -223,7 +223,7 @@ mod tests_pg_connection_config { assert_eq!(cfg.port(), 123); assert_eq!(cfg.raw_address(), "stub.host.example:123"); assert_eq!( - format!("{:?}", cfg), + format!("{cfg:?}"), "PgConnectionConfig { host: Domain(\"stub.host.example\"), port: 123, password: None }" ); } @@ -239,7 +239,7 @@ mod tests_pg_connection_config { assert_eq!(cfg.port(), 123); assert_eq!(cfg.raw_address(), "[::1]:123"); assert_eq!( - format!("{:?}", cfg), + format!("{cfg:?}"), "PgConnectionConfig { host: Ipv6(::1), port: 123, password: None }" ); } @@ -252,7 +252,7 @@ mod tests_pg_connection_config { assert_eq!(cfg.port(), 123); assert_eq!(cfg.raw_address(), "stub.host.example:123"); assert_eq!( - format!("{:?}", cfg), + format!("{cfg:?}"), "PgConnectionConfig { host: Domain(\"stub.host.example\"), port: 123, password: Some(REDACTED-STRING) }" ); } diff --git a/libs/postgres_ffi/Cargo.toml b/libs/postgres_ffi/Cargo.toml index b7a376841d..d4fec6cbe9 100644 --- a/libs/postgres_ffi/Cargo.toml +++ b/libs/postgres_ffi/Cargo.toml @@ -16,8 +16,10 @@ memoffset.workspace = true pprof.workspace = true thiserror.workspace = true serde.workspace = true +postgres_ffi_types.workspace = true utils.workspace = true tracing.workspace = true +postgres_versioninfo.workspace = true [dev-dependencies] env_logger.workspace = true diff --git a/libs/postgres_ffi/benches/waldecoder.rs b/libs/postgres_ffi/benches/waldecoder.rs index 2e1d62e452..b2a884c7db 100644 --- a/libs/postgres_ffi/benches/waldecoder.rs +++ b/libs/postgres_ffi/benches/waldecoder.rs @@ -4,6 +4,7 @@ use criterion::{Bencher, Criterion, criterion_group, criterion_main}; use postgres_ffi::v17::wal_generator::LogicalMessageGenerator; use postgres_ffi::v17::waldecoder_handler::WalStreamDecoderHandler; use postgres_ffi::waldecoder::WalStreamDecoder; +use postgres_versioninfo::PgMajorVersion; use pprof::criterion::{Output, PProfProfiler}; use utils::lsn::Lsn; @@ -32,7 +33,7 @@ fn bench_complete_record(c: &mut Criterion) { let value_size = LogicalMessageGenerator::make_value_size(size, PREFIX); let value = vec![1; value_size]; - let mut decoder = WalStreamDecoder::new(Lsn(0), 170000); + let mut decoder = WalStreamDecoder::new(Lsn(0), PgMajorVersion::PG17); let msg = LogicalMessageGenerator::new(PREFIX, &value) .next() .unwrap() diff --git a/libs/postgres_ffi/src/lib.rs b/libs/postgres_ffi/src/lib.rs index 05d8de4c7a..9297ac46c9 100644 --- a/libs/postgres_ffi/src/lib.rs +++ b/libs/postgres_ffi/src/lib.rs @@ -14,6 +14,8 @@ use bytes::Bytes; use utils::bin_ser::SerializeError; use utils::lsn::Lsn; +pub use postgres_versioninfo::PgMajorVersion; + macro_rules! postgres_ffi { ($version:ident) => { #[path = "."] @@ -91,21 +93,22 @@ macro_rules! dispatch_pgversion { $version => $code, default = $invalid_pgver_handling, pgversions = [ - 14 : v14, - 15 : v15, - 16 : v16, - 17 : v17, + $crate::PgMajorVersion::PG14 => v14, + $crate::PgMajorVersion::PG15 => v15, + $crate::PgMajorVersion::PG16 => v16, + $crate::PgMajorVersion::PG17 => v17, ] ) }; ($pgversion:expr => $code:expr, default = $default:expr, - pgversions = [$($sv:literal : $vsv:ident),+ $(,)?]) => { - match ($pgversion) { + pgversions = [$($sv:pat => $vsv:ident),+ $(,)?]) => { + match ($pgversion.clone().into()) { $($sv => { use $crate::$vsv as pgv; $code },)+ + #[allow(unreachable_patterns)] _ => { $default } @@ -179,9 +182,9 @@ macro_rules! enum_pgversion { $($variant ( $crate::$md::$t )),+ } impl self::$name { - pub fn pg_version(&self) -> u32 { + pub fn pg_version(&self) -> PgMajorVersion { enum_pgversion_dispatch!(self, $name, _ign, { - pgv::bindings::PG_MAJORVERSION_NUM + pgv::bindings::MY_PGVERSION }) } } @@ -195,15 +198,15 @@ macro_rules! enum_pgversion { }; {name = $name:ident, path = $p:ident, - typ = $t:ident, + $(typ = $t:ident,)? pgversions = [$($variant:ident : $md:ident),+ $(,)?]} => { pub enum $name { - $($variant ($crate::$md::$p::$t)),+ + $($variant $(($crate::$md::$p::$t))?),+ } impl $name { - pub fn pg_version(&self) -> u32 { + pub fn pg_version(&self) -> PgMajorVersion { enum_pgversion_dispatch!(self, $name, _ign, { - pgv::bindings::PG_MAJORVERSION_NUM + pgv::bindings::MY_PGVERSION }) } } @@ -249,22 +252,21 @@ pub use v14::xlog_utils::{ try_from_pg_timestamp, }; -pub fn bkpimage_is_compressed(bimg_info: u8, version: u32) -> bool { +pub fn bkpimage_is_compressed(bimg_info: u8, version: PgMajorVersion) -> bool { dispatch_pgversion!(version, pgv::bindings::bkpimg_is_compressed(bimg_info)) } pub fn generate_wal_segment( segno: u64, system_id: u64, - pg_version: u32, + pg_version: PgMajorVersion, lsn: Lsn, ) -> Result { assert_eq!(segno, lsn.segment_number(WAL_SEGMENT_SIZE)); dispatch_pgversion!( pg_version, - pgv::xlog_utils::generate_wal_segment(segno, system_id, lsn), - Err(SerializeError::BadInput) + pgv::xlog_utils::generate_wal_segment(segno, system_id, lsn) ) } @@ -272,7 +274,7 @@ pub fn generate_pg_control( pg_control_bytes: &[u8], checkpoint_bytes: &[u8], lsn: Lsn, - pg_version: u32, + pg_version: PgMajorVersion, ) -> anyhow::Result<(Bytes, u64, bool)> { dispatch_pgversion!( pg_version, @@ -352,6 +354,7 @@ pub fn fsm_logical_to_physical(addr: BlockNumber) -> BlockNumber { pub mod waldecoder { use std::num::NonZeroU32; + use crate::PgMajorVersion; use bytes::{Buf, Bytes, BytesMut}; use thiserror::Error; use utils::lsn::Lsn; @@ -369,7 +372,7 @@ pub mod waldecoder { pub struct WalStreamDecoder { pub lsn: Lsn, - pub pg_version: u32, + pub pg_version: PgMajorVersion, pub inputbuf: BytesMut, pub state: State, } @@ -382,7 +385,7 @@ pub mod waldecoder { } impl WalStreamDecoder { - pub fn new(lsn: Lsn, pg_version: u32) -> WalStreamDecoder { + pub fn new(lsn: Lsn, pg_version: PgMajorVersion) -> WalStreamDecoder { WalStreamDecoder { lsn, pg_version, diff --git a/libs/postgres_ffi/src/pg_constants.rs b/libs/postgres_ffi/src/pg_constants.rs index b0bdd8a8da..f61b9a71c2 100644 --- a/libs/postgres_ffi/src/pg_constants.rs +++ b/libs/postgres_ffi/src/pg_constants.rs @@ -11,11 +11,7 @@ use crate::{BLCKSZ, PageHeaderData}; -// -// From pg_tablespace_d.h -// -pub const DEFAULTTABLESPACE_OID: u32 = 1663; -pub const GLOBALTABLESPACE_OID: u32 = 1664; +// Note: There are a few more widely-used constants in the postgres_ffi_types::constants crate. // From storage_xlog.h pub const XLOG_SMGR_CREATE: u8 = 0x10; diff --git a/libs/postgres_ffi/src/pg_constants_v14.rs b/libs/postgres_ffi/src/pg_constants_v14.rs index fe01a5df7c..fd393995db 100644 --- a/libs/postgres_ffi/src/pg_constants_v14.rs +++ b/libs/postgres_ffi/src/pg_constants_v14.rs @@ -1,3 +1,7 @@ +use crate::PgMajorVersion; + +pub const MY_PGVERSION: PgMajorVersion = PgMajorVersion::PG14; + pub const XLOG_DBASE_CREATE: u8 = 0x00; pub const XLOG_DBASE_DROP: u8 = 0x10; diff --git a/libs/postgres_ffi/src/pg_constants_v15.rs b/libs/postgres_ffi/src/pg_constants_v15.rs index 3cd1b7aec5..6c1e2c13de 100644 --- a/libs/postgres_ffi/src/pg_constants_v15.rs +++ b/libs/postgres_ffi/src/pg_constants_v15.rs @@ -1,3 +1,7 @@ +use crate::PgMajorVersion; + +pub const MY_PGVERSION: PgMajorVersion = PgMajorVersion::PG15; + pub const XACT_XINFO_HAS_DROPPED_STATS: u32 = 1u32 << 8; pub const XLOG_DBASE_CREATE_FILE_COPY: u8 = 0x00; diff --git a/libs/postgres_ffi/src/pg_constants_v16.rs b/libs/postgres_ffi/src/pg_constants_v16.rs index 31bd5b68fd..d84db502f3 100644 --- a/libs/postgres_ffi/src/pg_constants_v16.rs +++ b/libs/postgres_ffi/src/pg_constants_v16.rs @@ -1,3 +1,7 @@ +use crate::PgMajorVersion; + +pub const MY_PGVERSION: PgMajorVersion = PgMajorVersion::PG16; + pub const XACT_XINFO_HAS_DROPPED_STATS: u32 = 1u32 << 8; pub const XLOG_DBASE_CREATE_FILE_COPY: u8 = 0x00; diff --git a/libs/postgres_ffi/src/pg_constants_v17.rs b/libs/postgres_ffi/src/pg_constants_v17.rs index 2132938680..14d4b3d42f 100644 --- a/libs/postgres_ffi/src/pg_constants_v17.rs +++ b/libs/postgres_ffi/src/pg_constants_v17.rs @@ -1,3 +1,7 @@ +use crate::PgMajorVersion; + +pub const MY_PGVERSION: PgMajorVersion = PgMajorVersion::PG17; + pub const XACT_XINFO_HAS_DROPPED_STATS: u32 = 1u32 << 8; pub const XLOG_DBASE_CREATE_FILE_COPY: u8 = 0x00; diff --git a/libs/postgres_ffi/src/relfile_utils.rs b/libs/postgres_ffi/src/relfile_utils.rs index aa0e625b47..38f94b7221 100644 --- a/libs/postgres_ffi/src/relfile_utils.rs +++ b/libs/postgres_ffi/src/relfile_utils.rs @@ -4,50 +4,7 @@ use once_cell::sync::OnceCell; use regex::Regex; -// -// Fork numbers, from relpath.h -// -pub const MAIN_FORKNUM: u8 = 0; -pub const FSM_FORKNUM: u8 = 1; -pub const VISIBILITYMAP_FORKNUM: u8 = 2; -pub const INIT_FORKNUM: u8 = 3; - -#[derive(Debug, Clone, thiserror::Error, PartialEq, Eq)] -pub enum FilePathError { - #[error("invalid relation fork name")] - InvalidForkName, - #[error("invalid relation data file name")] - InvalidFileName, -} - -impl From for FilePathError { - fn from(_e: core::num::ParseIntError) -> Self { - FilePathError::InvalidFileName - } -} - -/// Convert Postgres relation file's fork suffix to fork number. -pub fn forkname_to_number(forkname: Option<&str>) -> Result { - match forkname { - // "main" is not in filenames, it's implicit if the fork name is not present - None => Ok(MAIN_FORKNUM), - Some("fsm") => Ok(FSM_FORKNUM), - Some("vm") => Ok(VISIBILITYMAP_FORKNUM), - Some("init") => Ok(INIT_FORKNUM), - Some(_) => Err(FilePathError::InvalidForkName), - } -} - -/// Convert Postgres fork number to the right suffix of the relation data file. -pub fn forknumber_to_name(forknum: u8) -> Option<&'static str> { - match forknum { - MAIN_FORKNUM => None, - FSM_FORKNUM => Some("fsm"), - VISIBILITYMAP_FORKNUM => Some("vm"), - INIT_FORKNUM => Some("init"), - _ => Some("UNKNOWN FORKNUM"), - } -} +use postgres_ffi_types::forknum::*; /// Parse a filename of a relation file. Returns (relfilenode, forknum, segno) tuple. /// @@ -75,7 +32,9 @@ pub fn parse_relfilename(fname: &str) -> Result<(u32, u8, u32), FilePathError> { .ok_or(FilePathError::InvalidFileName)?; let relnode_str = caps.name("relnode").unwrap().as_str(); - let relnode = relnode_str.parse::()?; + let relnode = relnode_str + .parse::() + .map_err(|_e| FilePathError::InvalidFileName)?; let forkname = caps.name("forkname").map(|f| f.as_str()); let forknum = forkname_to_number(forkname)?; @@ -84,7 +43,11 @@ pub fn parse_relfilename(fname: &str) -> Result<(u32, u8, u32), FilePathError> { let segno = if segno_match.is_none() { 0 } else { - segno_match.unwrap().as_str().parse::()? + segno_match + .unwrap() + .as_str() + .parse::() + .map_err(|_e| FilePathError::InvalidFileName)? }; Ok((relnode, forknum, segno)) diff --git a/libs/postgres_ffi/src/waldecoder_handler.rs b/libs/postgres_ffi/src/waldecoder_handler.rs index b4d50375bd..9cd40645ec 100644 --- a/libs/postgres_ffi/src/waldecoder_handler.rs +++ b/libs/postgres_ffi/src/waldecoder_handler.rs @@ -114,7 +114,7 @@ impl WalStreamDecoderHandler for WalStreamDecoder { let hdr = XLogLongPageHeaderData::from_bytes(&mut self.inputbuf).map_err( |e| WalDecodeError { - msg: format!("long header deserialization failed {}", e), + msg: format!("long header deserialization failed {e}"), lsn: self.lsn, }, )?; @@ -130,7 +130,7 @@ impl WalStreamDecoderHandler for WalStreamDecoder { let hdr = XLogPageHeaderData::from_bytes(&mut self.inputbuf).map_err(|e| { WalDecodeError { - msg: format!("header deserialization failed {}", e), + msg: format!("header deserialization failed {e}"), lsn: self.lsn, } })?; @@ -155,7 +155,7 @@ impl WalStreamDecoderHandler for WalStreamDecoder { let xl_tot_len = (&self.inputbuf[0..4]).get_u32_le(); if (xl_tot_len as usize) < XLOG_SIZE_OF_XLOG_RECORD { return Err(WalDecodeError { - msg: format!("invalid xl_tot_len {}", xl_tot_len), + msg: format!("invalid xl_tot_len {xl_tot_len}"), lsn: self.lsn, }); } @@ -218,7 +218,7 @@ impl WalStreamDecoderHandler for WalStreamDecoder { let xlogrec = XLogRecord::from_slice(&recordbuf[0..XLOG_SIZE_OF_XLOG_RECORD]).map_err(|e| { WalDecodeError { - msg: format!("xlog record deserialization failed {}", e), + msg: format!("xlog record deserialization failed {e}"), lsn: self.lsn, } })?; diff --git a/libs/postgres_ffi/src/walrecord.rs b/libs/postgres_ffi/src/walrecord.rs index 1ccf4590a9..d593123dc0 100644 --- a/libs/postgres_ffi/src/walrecord.rs +++ b/libs/postgres_ffi/src/walrecord.rs @@ -9,8 +9,8 @@ use utils::bin_ser::DeserializeError; use utils::lsn::Lsn; use crate::{ - BLCKSZ, BlockNumber, MultiXactId, MultiXactOffset, MultiXactStatus, Oid, RepOriginId, - TimestampTz, TransactionId, XLOG_SIZE_OF_XLOG_RECORD, XLogRecord, pg_constants, + BLCKSZ, BlockNumber, MultiXactId, MultiXactOffset, MultiXactStatus, Oid, PgMajorVersion, + RepOriginId, TimestampTz, TransactionId, XLOG_SIZE_OF_XLOG_RECORD, XLogRecord, pg_constants, }; #[repr(C)] @@ -199,20 +199,17 @@ impl DecodedWALRecord { /// Check if this WAL record represents a legacy "copy" database creation, which populates new relations /// by reading other existing relations' data blocks. This is more complex to apply than new-style database /// creations which simply include all the desired blocks in the WAL, so we need a helper function to detect this case. - pub fn is_dbase_create_copy(&self, pg_version: u32) -> bool { + pub fn is_dbase_create_copy(&self, pg_version: PgMajorVersion) -> bool { if self.xl_rmid == pg_constants::RM_DBASE_ID { let info = self.xl_info & pg_constants::XLR_RMGR_INFO_MASK; match pg_version { - 14 => { + PgMajorVersion::PG14 => { // Postgres 14 database creations are always the legacy kind info == crate::v14::bindings::XLOG_DBASE_CREATE } - 15 => info == crate::v15::bindings::XLOG_DBASE_CREATE_FILE_COPY, - 16 => info == crate::v16::bindings::XLOG_DBASE_CREATE_FILE_COPY, - 17 => info == crate::v17::bindings::XLOG_DBASE_CREATE_FILE_COPY, - _ => { - panic!("Unsupported postgres version {pg_version}") - } + PgMajorVersion::PG15 => info == crate::v15::bindings::XLOG_DBASE_CREATE_FILE_COPY, + PgMajorVersion::PG16 => info == crate::v16::bindings::XLOG_DBASE_CREATE_FILE_COPY, + PgMajorVersion::PG17 => info == crate::v17::bindings::XLOG_DBASE_CREATE_FILE_COPY, } } else { false @@ -248,7 +245,7 @@ impl DecodedWALRecord { pub fn decode_wal_record( record: Bytes, decoded: &mut DecodedWALRecord, - pg_version: u32, + pg_version: PgMajorVersion, ) -> anyhow::Result<()> { let mut rnode_spcnode: u32 = 0; let mut rnode_dbnode: u32 = 0; @@ -1106,9 +1103,9 @@ pub struct XlClogTruncate { } impl XlClogTruncate { - pub fn decode(buf: &mut Bytes, pg_version: u32) -> XlClogTruncate { + pub fn decode(buf: &mut Bytes, pg_version: PgMajorVersion) -> XlClogTruncate { XlClogTruncate { - pageno: if pg_version < 17 { + pageno: if pg_version < PgMajorVersion::PG17 { buf.get_u32_le() } else { buf.get_u64_le() as u32 @@ -1199,7 +1196,7 @@ pub fn describe_postgres_wal_record(record: &Bytes) -> Result "HEAP2 MULTI_INSERT", pg_constants::XLOG_HEAP2_VISIBLE => "HEAP2 VISIBLE", _ => { - unknown_str = format!("HEAP2 UNKNOWN_0x{:02x}", info); + unknown_str = format!("HEAP2 UNKNOWN_0x{info:02x}"); &unknown_str } } @@ -1212,7 +1209,7 @@ pub fn describe_postgres_wal_record(record: &Bytes) -> Result "HEAP UPDATE", pg_constants::XLOG_HEAP_HOT_UPDATE => "HEAP HOT_UPDATE", _ => { - unknown_str = format!("HEAP2 UNKNOWN_0x{:02x}", info); + unknown_str = format!("HEAP2 UNKNOWN_0x{info:02x}"); &unknown_str } } @@ -1223,7 +1220,7 @@ pub fn describe_postgres_wal_record(record: &Bytes) -> Result "XLOG FPI", pg_constants::XLOG_FPI_FOR_HINT => "XLOG FPI_FOR_HINT", _ => { - unknown_str = format!("XLOG UNKNOWN_0x{:02x}", info); + unknown_str = format!("XLOG UNKNOWN_0x{info:02x}"); &unknown_str } } @@ -1231,7 +1228,7 @@ pub fn describe_postgres_wal_record(record: &Bytes) -> Result { let info = xlogrec.xl_info & pg_constants::XLR_RMGR_INFO_MASK; - unknown_str = format!("UNKNOWN_RM_{} INFO_0x{:02x}", rmid, info); + unknown_str = format!("UNKNOWN_RM_{rmid} INFO_0x{info:02x}"); &unknown_str } }; diff --git a/libs/postgres_ffi/src/xlog_utils.rs b/libs/postgres_ffi/src/xlog_utils.rs index 14fb1f2a1f..f7b6296053 100644 --- a/libs/postgres_ffi/src/xlog_utils.rs +++ b/libs/postgres_ffi/src/xlog_utils.rs @@ -11,9 +11,9 @@ use super::super::waldecoder::WalStreamDecoder; use super::bindings::{ CheckPoint, ControlFileData, DBState_DB_SHUTDOWNED, FullTransactionId, TimeLineID, TimestampTz, XLogLongPageHeaderData, XLogPageHeaderData, XLogRecPtr, XLogRecord, XLogSegNo, XLOG_PAGE_MAGIC, + MY_PGVERSION }; use super::wal_generator::LogicalMessageGenerator; -use super::PG_MAJORVERSION; use crate::pg_constants; use crate::PG_TLI; use crate::{uint32, uint64, Oid}; @@ -233,7 +233,7 @@ pub fn find_end_of_wal( let mut result = start_lsn; let mut curr_lsn = start_lsn; let mut buf = [0u8; XLOG_BLCKSZ]; - let pg_version = PG_MAJORVERSION[1..3].parse::().unwrap(); + let pg_version = MY_PGVERSION; debug!("find_end_of_wal PG_VERSION: {}", pg_version); let mut decoder = WalStreamDecoder::new(start_lsn, pg_version); diff --git a/libs/postgres_ffi/wal_craft/src/bin/wal_craft.rs b/libs/postgres_ffi/wal_craft/src/bin/wal_craft.rs index 6151ce34ac..44bc4dfa95 100644 --- a/libs/postgres_ffi/wal_craft/src/bin/wal_craft.rs +++ b/libs/postgres_ffi/wal_craft/src/bin/wal_craft.rs @@ -4,6 +4,7 @@ use std::str::FromStr; use anyhow::*; use clap::{Arg, ArgMatches, Command, value_parser}; use postgres::Client; +use postgres_ffi::PgMajorVersion; use wal_craft::*; fn main() -> Result<()> { @@ -48,7 +49,7 @@ fn main() -> Result<()> { Some(("with-initdb", arg_matches)) => { let cfg = Conf { pg_version: *arg_matches - .get_one::("pg-version") + .get_one::("pg-version") .context("'pg-version' is required")?, pg_distrib_dir: arg_matches .get_one::("pg-distrib-dir") diff --git a/libs/postgres_ffi/wal_craft/src/lib.rs b/libs/postgres_ffi/wal_craft/src/lib.rs index ca9530faef..ef9e854297 100644 --- a/libs/postgres_ffi/wal_craft/src/lib.rs +++ b/libs/postgres_ffi/wal_craft/src/lib.rs @@ -9,8 +9,8 @@ use log::*; use postgres::Client; use postgres::types::PgLsn; use postgres_ffi::{ - WAL_SEGMENT_SIZE, XLOG_BLCKSZ, XLOG_SIZE_OF_XLOG_LONG_PHD, XLOG_SIZE_OF_XLOG_RECORD, - XLOG_SIZE_OF_XLOG_SHORT_PHD, + PgMajorVersion, WAL_SEGMENT_SIZE, XLOG_BLCKSZ, XLOG_SIZE_OF_XLOG_LONG_PHD, + XLOG_SIZE_OF_XLOG_RECORD, XLOG_SIZE_OF_XLOG_SHORT_PHD, }; macro_rules! xlog_utils_test { @@ -29,7 +29,7 @@ macro_rules! xlog_utils_test { postgres_ffi::for_all_postgres_versions! { xlog_utils_test } pub struct Conf { - pub pg_version: u32, + pub pg_version: PgMajorVersion, pub pg_distrib_dir: PathBuf, pub datadir: PathBuf, } @@ -52,11 +52,7 @@ impl Conf { pub fn pg_distrib_dir(&self) -> anyhow::Result { let path = self.pg_distrib_dir.clone(); - #[allow(clippy::manual_range_patterns)] - match self.pg_version { - 14 | 15 | 16 | 17 => Ok(path.join(format!("v{}", self.pg_version))), - _ => bail!("Unsupported postgres version: {}", self.pg_version), - } + Ok(path.join(self.pg_version.v_str())) } fn pg_bin_dir(&self) -> anyhow::Result { diff --git a/libs/postgres_ffi/wal_craft/src/xlog_utils_test.rs b/libs/postgres_ffi/wal_craft/src/xlog_utils_test.rs index 4a33dbe25b..366aa7dbef 100644 --- a/libs/postgres_ffi/wal_craft/src/xlog_utils_test.rs +++ b/libs/postgres_ffi/wal_craft/src/xlog_utils_test.rs @@ -24,7 +24,7 @@ fn init_logging() { fn test_end_of_wal(test_name: &str) { use crate::*; - let pg_version = PG_MAJORVERSION[1..3].parse::().unwrap(); + let pg_version = MY_PGVERSION; // Craft some WAL let top_path = PathBuf::from(env!("CARGO_MANIFEST_DIR")) @@ -34,7 +34,7 @@ fn test_end_of_wal(test_name: &str) { let cfg = Conf { pg_version, pg_distrib_dir: top_path.join("pg_install"), - datadir: top_path.join(format!("test_output/{}-{PG_MAJORVERSION}", test_name)), + datadir: top_path.join(format!("test_output/{test_name}-{PG_MAJORVERSION}")), }; if cfg.datadir.exists() { fs::remove_dir_all(&cfg.datadir).unwrap(); diff --git a/libs/postgres_ffi_types/Cargo.toml b/libs/postgres_ffi_types/Cargo.toml new file mode 100644 index 0000000000..50c6fc7874 --- /dev/null +++ b/libs/postgres_ffi_types/Cargo.toml @@ -0,0 +1,11 @@ +[package] +name = "postgres_ffi_types" +version = "0.1.0" +edition.workspace = true +license.workspace = true + +[dependencies] +thiserror.workspace = true +workspace_hack = { version = "0.1", path = "../../workspace_hack" } + +[dev-dependencies] diff --git a/libs/postgres_ffi_types/src/constants.rs b/libs/postgres_ffi_types/src/constants.rs new file mode 100644 index 0000000000..c1a004c5ab --- /dev/null +++ b/libs/postgres_ffi_types/src/constants.rs @@ -0,0 +1,8 @@ +//! Misc constants, copied from PostgreSQL headers. +//! +//! Any constants included here must be the same in all PostgreSQL versions and unlikely to change +//! in the future either! + +// From pg_tablespace_d.h +pub const DEFAULTTABLESPACE_OID: u32 = 1663; +pub const GLOBALTABLESPACE_OID: u32 = 1664; diff --git a/libs/postgres_ffi_types/src/forknum.rs b/libs/postgres_ffi_types/src/forknum.rs new file mode 100644 index 0000000000..9b225d8ce5 --- /dev/null +++ b/libs/postgres_ffi_types/src/forknum.rs @@ -0,0 +1,36 @@ +// Fork numbers, from relpath.h +pub const MAIN_FORKNUM: u8 = 0; +pub const FSM_FORKNUM: u8 = 1; +pub const VISIBILITYMAP_FORKNUM: u8 = 2; +pub const INIT_FORKNUM: u8 = 3; + +#[derive(Debug, Clone, thiserror::Error, PartialEq, Eq)] +pub enum FilePathError { + #[error("invalid relation fork name")] + InvalidForkName, + #[error("invalid relation data file name")] + InvalidFileName, +} + +/// Convert Postgres relation file's fork suffix to fork number. +pub fn forkname_to_number(forkname: Option<&str>) -> Result { + match forkname { + // "main" is not in filenames, it's implicit if the fork name is not present + None => Ok(MAIN_FORKNUM), + Some("fsm") => Ok(FSM_FORKNUM), + Some("vm") => Ok(VISIBILITYMAP_FORKNUM), + Some("init") => Ok(INIT_FORKNUM), + Some(_) => Err(FilePathError::InvalidForkName), + } +} + +/// Convert Postgres fork number to the right suffix of the relation data file. +pub fn forknumber_to_name(forknum: u8) -> Option<&'static str> { + match forknum { + MAIN_FORKNUM => None, + FSM_FORKNUM => Some("fsm"), + VISIBILITYMAP_FORKNUM => Some("vm"), + INIT_FORKNUM => Some("init"), + _ => Some("UNKNOWN FORKNUM"), + } +} diff --git a/libs/postgres_ffi_types/src/lib.rs b/libs/postgres_ffi_types/src/lib.rs new file mode 100644 index 0000000000..84ef499b9f --- /dev/null +++ b/libs/postgres_ffi_types/src/lib.rs @@ -0,0 +1,13 @@ +//! This package contains some PostgreSQL constants and datatypes that are the same in all versions +//! of PostgreSQL and unlikely to change in the future either. These could be derived from the +//! PostgreSQL headers with 'bindgen', but in order to avoid proliferating the dependency to bindgen +//! and the PostgreSQL C headers to all services, we prefer to have this small stand-alone crate for +//! them instead. +//! +//! Be mindful in what you add here, as these types are deeply ingrained in the APIs. + +pub mod constants; +pub mod forknum; + +pub type Oid = u32; +pub type RepOriginId = u16; diff --git a/libs/postgres_initdb/Cargo.toml b/libs/postgres_initdb/Cargo.toml index 1605279bce..5b3b0cd936 100644 --- a/libs/postgres_initdb/Cargo.toml +++ b/libs/postgres_initdb/Cargo.toml @@ -9,4 +9,5 @@ anyhow.workspace = true tokio.workspace = true camino.workspace = true thiserror.workspace = true +postgres_versioninfo.workspace = true workspace_hack = { version = "0.1", path = "../../workspace_hack" } diff --git a/libs/postgres_initdb/src/lib.rs b/libs/postgres_initdb/src/lib.rs index ed54696861..a0c6ebef81 100644 --- a/libs/postgres_initdb/src/lib.rs +++ b/libs/postgres_initdb/src/lib.rs @@ -7,12 +7,13 @@ use std::fmt; use camino::Utf8Path; +use postgres_versioninfo::PgMajorVersion; pub struct RunInitdbArgs<'a> { pub superuser: &'a str, pub locale: &'a str, pub initdb_bin: &'a Utf8Path, - pub pg_version: u32, + pub pg_version: PgMajorVersion, pub library_search_path: &'a Utf8Path, pub pgdata: &'a Utf8Path, } @@ -31,15 +32,15 @@ pub enum Error { impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { - Error::Spawn(e) => write!(f, "Error spawning command: {:?}", e), + Error::Spawn(e) => write!(f, "Error spawning command: {e:?}"), Error::Failed { status, stderr } => write!( f, "Command failed with status {:?}: {}", status, String::from_utf8_lossy(stderr) ), - Error::WaitOutput(e) => write!(f, "Error waiting for command output: {:?}", e), - Error::Other(e) => write!(f, "Error: {:?}", e), + Error::WaitOutput(e) => write!(f, "Error waiting for command output: {e:?}"), + Error::Other(e) => write!(f, "Error: {e:?}"), } } } @@ -79,12 +80,16 @@ pub async fn do_run_initdb(args: RunInitdbArgs<'_>) -> Result<(), Error> { .stderr(std::process::Stdio::piped()); // Before version 14, only the libc provide was available. - if pg_version > 14 { + if pg_version > PgMajorVersion::PG14 { // Version 17 brought with it a builtin locale provider which only provides // C and C.UTF-8. While being safer for collation purposes since it is // guaranteed to be consistent throughout a major release, it is also more // performant. - let locale_provider = if pg_version >= 17 { "builtin" } else { "libc" }; + let locale_provider = if pg_version >= PgMajorVersion::PG17 { + "builtin" + } else { + "libc" + }; initdb_command.args(["--locale-provider", locale_provider]); } diff --git a/libs/postgres_versioninfo/Cargo.toml b/libs/postgres_versioninfo/Cargo.toml new file mode 100644 index 0000000000..cc59f9698d --- /dev/null +++ b/libs/postgres_versioninfo/Cargo.toml @@ -0,0 +1,12 @@ +[package] +name = "postgres_versioninfo" +version = "0.1.0" +edition = "2024" +license.workspace = true + +[dependencies] +anyhow.workspace = true +thiserror.workspace = true +serde.workspace = true +serde_repr.workspace = true +workspace_hack = { version = "0.1", path = "../../workspace_hack" } diff --git a/libs/postgres_versioninfo/src/lib.rs b/libs/postgres_versioninfo/src/lib.rs new file mode 100644 index 0000000000..286507b654 --- /dev/null +++ b/libs/postgres_versioninfo/src/lib.rs @@ -0,0 +1,175 @@ +use serde::{Deserialize, Deserializer, Serialize, Serializer}; +use serde_repr::{Deserialize_repr, Serialize_repr}; +use std::fmt::{Display, Formatter}; +use std::str::FromStr; + +/// An enum with one variant for each major version of PostgreSQL that we support. +/// +#[derive(Debug, Clone, Copy, Ord, PartialOrd, Eq, PartialEq, Deserialize_repr, Serialize_repr)] +#[repr(u32)] +pub enum PgMajorVersion { + PG14 = 14, + PG15 = 15, + PG16 = 16, + PG17 = 17, + // !!! When you add a new PgMajorVersion, don't forget to update PgMajorVersion::ALL +} + +/// A full PostgreSQL version ID, in MMmmbb numerical format (Major/minor/bugfix) +#[derive(Debug, Copy, Clone, Ord, PartialOrd, Eq, PartialEq)] +#[repr(transparent)] +pub struct PgVersionId(u32); + +impl PgVersionId { + pub const UNKNOWN: PgVersionId = PgVersionId(0); + + pub fn from_full_pg_version(version: u32) -> PgVersionId { + match version { + 0 => PgVersionId(version), // unknown version + 140000..180000 => PgVersionId(version), + _ => panic!("Invalid full PostgreSQL version ID {version}"), + } + } +} + +impl Display for PgVersionId { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + u32::fmt(&self.0, f) + } +} + +impl Serialize for PgVersionId { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + u32::serialize(&self.0, serializer) + } +} + +impl<'de> Deserialize<'de> for PgVersionId { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + u32::deserialize(deserializer).map(PgVersionId) + } + + fn deserialize_in_place(deserializer: D, place: &mut Self) -> Result<(), D::Error> + where + D: Deserializer<'de>, + { + u32::deserialize_in_place(deserializer, &mut place.0) + } +} + +impl PgMajorVersion { + /// Get the numerical representation of the represented Major Version + pub const fn major_version_num(&self) -> u32 { + match self { + PgMajorVersion::PG14 => 14, + PgMajorVersion::PG15 => 15, + PgMajorVersion::PG16 => 16, + PgMajorVersion::PG17 => 17, + } + } + + /// Get the contents of this version's PG_VERSION file. + /// + /// The PG_VERSION file is used to determine the PostgreSQL version that currently + /// owns the data in a PostgreSQL data directory. + pub fn versionfile_string(&self) -> &'static str { + match self { + PgMajorVersion::PG14 => "14", + PgMajorVersion::PG15 => "15", + PgMajorVersion::PG16 => "16\x0A", + PgMajorVersion::PG17 => "17\x0A", + } + } + + /// Get the v{version} string of this major PostgreSQL version. + /// + /// Because this was hand-coded in various places, this was moved into a shared + /// implementation. + pub fn v_str(&self) -> String { + match self { + PgMajorVersion::PG14 => "v14", + PgMajorVersion::PG15 => "v15", + PgMajorVersion::PG16 => "v16", + PgMajorVersion::PG17 => "v17", + } + .to_string() + } + + /// All currently supported major versions of PostgreSQL. + pub const ALL: &'static [PgMajorVersion] = &[ + PgMajorVersion::PG14, + PgMajorVersion::PG15, + PgMajorVersion::PG16, + PgMajorVersion::PG17, + ]; +} + +impl Display for PgMajorVersion { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + f.write_str(match self { + PgMajorVersion::PG14 => "PgMajorVersion::PG14", + PgMajorVersion::PG15 => "PgMajorVersion::PG15", + PgMajorVersion::PG16 => "PgMajorVersion::PG16", + PgMajorVersion::PG17 => "PgMajorVersion::PG17", + }) + } +} + +#[derive(Debug, thiserror::Error)] +#[allow(dead_code)] +pub struct InvalidPgVersion(u32); + +impl Display for InvalidPgVersion { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + write!(f, "InvalidPgVersion({})", self.0) + } +} + +impl TryFrom for PgMajorVersion { + type Error = InvalidPgVersion; + + fn try_from(value: PgVersionId) -> Result { + Ok(match value.0 / 10000 { + 14 => PgMajorVersion::PG14, + 15 => PgMajorVersion::PG15, + 16 => PgMajorVersion::PG16, + 17 => PgMajorVersion::PG17, + _ => return Err(InvalidPgVersion(value.0)), + }) + } +} + +impl From for PgVersionId { + fn from(value: PgMajorVersion) -> Self { + PgVersionId((value as u32) * 10000) + } +} + +#[derive(Debug, PartialEq, Eq, thiserror::Error)] +pub struct PgMajorVersionParseError(String); + +impl Display for PgMajorVersionParseError { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + write!(f, "PgMajorVersionParseError({})", self.0) + } +} + +impl FromStr for PgMajorVersion { + type Err = PgMajorVersionParseError; + + fn from_str(s: &str) -> Result { + Ok(match s { + "14" => PgMajorVersion::PG14, + "15" => PgMajorVersion::PG15, + "16" => PgMajorVersion::PG16, + "17" => PgMajorVersion::PG17, + _ => return Err(PgMajorVersionParseError(s.to_string())), + }) + } +} diff --git a/libs/posthog_client_lite/src/background_loop.rs b/libs/posthog_client_lite/src/background_loop.rs index a404c76da9..08cb0d2264 100644 --- a/libs/posthog_client_lite/src/background_loop.rs +++ b/libs/posthog_client_lite/src/background_loop.rs @@ -1,17 +1,22 @@ //! A background loop that fetches feature flags from PostHog and updates the feature store. -use std::{sync::Arc, time::Duration}; +use std::{ + sync::Arc, + time::{Duration, SystemTime}, +}; use arc_swap::ArcSwap; use tokio_util::sync::CancellationToken; use tracing::{Instrument, info_span}; -use crate::{CaptureEvent, FeatureStore, PostHogClient, PostHogClientConfig}; +use crate::{ + CaptureEvent, FeatureStore, LocalEvaluationResponse, PostHogClient, PostHogClientConfig, +}; /// A background loop that fetches feature flags from PostHog and updates the feature store. pub struct FeatureResolverBackgroundLoop { posthog_client: PostHogClient, - feature_store: ArcSwap, + feature_store: ArcSwap<(SystemTime, Arc)>, cancel: CancellationToken, } @@ -19,11 +24,35 @@ impl FeatureResolverBackgroundLoop { pub fn new(config: PostHogClientConfig, shutdown_pageserver: CancellationToken) -> Self { Self { posthog_client: PostHogClient::new(config), - feature_store: ArcSwap::new(Arc::new(FeatureStore::new())), + feature_store: ArcSwap::new(Arc::new(( + SystemTime::UNIX_EPOCH, + Arc::new(FeatureStore::new()), + ))), cancel: shutdown_pageserver, } } + /// Update the feature store with a new feature flag spec bypassing the normal refresh loop. + pub fn update(&self, spec: String) -> anyhow::Result<()> { + let resp: LocalEvaluationResponse = serde_json::from_str(&spec)?; + self.update_feature_store_nofail(resp, "http_propagate"); + Ok(()) + } + + fn update_feature_store_nofail(&self, resp: LocalEvaluationResponse, source: &'static str) { + let project_id = self.posthog_client.config.project_id.parse::().ok(); + match FeatureStore::new_with_flags(resp.flags, project_id) { + Ok(feature_store) => { + self.feature_store + .store(Arc::new((SystemTime::now(), Arc::new(feature_store)))); + tracing::info!("Feature flag updated from {}", source); + } + Err(e) => { + tracing::warn!("Cannot process feature flag spec from {}: {}", source, e); + } + } + } + pub fn spawn( self: Arc, handle: &tokio::runtime::Handle, @@ -36,7 +65,10 @@ impl FeatureResolverBackgroundLoop { // Main loop of updating the feature flags. handle.spawn( async move { - tracing::info!("Starting PostHog feature resolver"); + tracing::info!( + "Starting PostHog feature resolver with refresh period: {:?}", + refresh_period + ); let mut ticker = tokio::time::interval(refresh_period); ticker.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip); loop { @@ -44,6 +76,17 @@ impl FeatureResolverBackgroundLoop { _ = ticker.tick() => {} _ = cancel.cancelled() => break } + { + let last_update = this.feature_store.load().0; + if let Ok(elapsed) = last_update.elapsed() { + if elapsed < refresh_period { + tracing::debug!( + "Skipping feature flag refresh because it's too soon" + ); + continue; + } + } + } let resp = match this .posthog_client .get_feature_flags_local_evaluation() @@ -55,9 +98,7 @@ impl FeatureResolverBackgroundLoop { continue; } }; - let feature_store = FeatureStore::new_with_flags(resp.flags); - this.feature_store.store(Arc::new(feature_store)); - tracing::info!("Feature flag updated"); + this.update_feature_store_nofail(resp, "refresh_loop"); } tracing::info!("PostHog feature resolver stopped"); } @@ -82,6 +123,6 @@ impl FeatureResolverBackgroundLoop { } pub fn feature_store(&self) -> Arc { - self.feature_store.load_full() + self.feature_store.load().1.clone() } } diff --git a/libs/posthog_client_lite/src/lib.rs b/libs/posthog_client_lite/src/lib.rs index f607b1be0a..d042ee2410 100644 --- a/libs/posthog_client_lite/src/lib.rs +++ b/libs/posthog_client_lite/src/lib.rs @@ -39,6 +39,9 @@ pub struct LocalEvaluationResponse { #[derive(Deserialize)] pub struct LocalEvaluationFlag { + #[allow(dead_code)] + id: u64, + team_id: u64, key: String, filters: LocalEvaluationFlagFilters, active: bool, @@ -107,17 +110,32 @@ impl FeatureStore { } } - pub fn new_with_flags(flags: Vec) -> Self { + pub fn new_with_flags( + flags: Vec, + project_id: Option, + ) -> Result { let mut store = Self::new(); - store.set_flags(flags); - store + store.set_flags(flags, project_id)?; + Ok(store) } - pub fn set_flags(&mut self, flags: Vec) { + pub fn set_flags( + &mut self, + flags: Vec, + project_id: Option, + ) -> Result<(), &'static str> { self.flags.clear(); for flag in flags { + if let Some(project_id) = project_id { + if flag.team_id != project_id { + return Err( + "Retrieved a spec with different project id, wrong config? Discarding the feature flags.", + ); + } + } self.flags.insert(flag.key.clone(), flag); } + Ok(()) } /// Generate a consistent hash for a user ID (e.g., tenant ID). @@ -150,15 +168,13 @@ impl FeatureStore { let PostHogFlagFilterPropertyValue::String(provided) = provided else { // Left should be a string return Err(PostHogEvaluationError::Internal(format!( - "The left side of the condition is not a string: {:?}", - provided + "The left side of the condition is not a string: {provided:?}" ))); }; let PostHogFlagFilterPropertyValue::List(requested) = requested else { // Right should be a list of string return Err(PostHogEvaluationError::Internal(format!( - "The right side of the condition is not a list: {:?}", - requested + "The right side of the condition is not a list: {requested:?}" ))); }; Ok(requested.contains(provided)) @@ -167,14 +183,12 @@ impl FeatureStore { let PostHogFlagFilterPropertyValue::String(requested) = requested else { // Right should be a string return Err(PostHogEvaluationError::Internal(format!( - "The right side of the condition is not a string: {:?}", - requested + "The right side of the condition is not a string: {requested:?}" ))); }; let Ok(requested) = requested.parse::() else { return Err(PostHogEvaluationError::Internal(format!( - "Can not parse the right side of the condition as a number: {:?}", - requested + "Can not parse the right side of the condition as a number: {requested:?}" ))); }; // Left can either be a number or a string @@ -183,16 +197,14 @@ impl FeatureStore { PostHogFlagFilterPropertyValue::String(provided) => { let Ok(provided) = provided.parse::() else { return Err(PostHogEvaluationError::Internal(format!( - "Can not parse the left side of the condition as a number: {:?}", - provided + "Can not parse the left side of the condition as a number: {provided:?}" ))); }; provided } _ => { return Err(PostHogEvaluationError::Internal(format!( - "The left side of the condition is not a number or a string: {:?}", - provided + "The left side of the condition is not a number or a string: {provided:?}" ))); } }; @@ -200,14 +212,12 @@ impl FeatureStore { "lt" => Ok(provided < requested), "gt" => Ok(provided > requested), op => Err(PostHogEvaluationError::Internal(format!( - "Unsupported operator: {}", - op + "Unsupported operator: {op}" ))), } } _ => Err(PostHogEvaluationError::Internal(format!( - "Unsupported operator: {}", - operator + "Unsupported operator: {operator}" ))), } } @@ -355,8 +365,7 @@ impl FeatureStore { if let Some(flag_config) = self.flags.get(flag_key) { if !flag_config.active { return Err(PostHogEvaluationError::NotAvailable(format!( - "The feature flag is not active: {}", - flag_key + "The feature flag is not active: {flag_key}" ))); } let Some(ref multivariate) = flag_config.filters.multivariate else { @@ -383,8 +392,7 @@ impl FeatureStore { // This should not happen because the rollout percentage always adds up to 100, but just in case that PostHog // returned invalid spec, we return an error. return Err(PostHogEvaluationError::Internal(format!( - "Rollout percentage does not add up to 100: {}", - flag_key + "Rollout percentage does not add up to 100: {flag_key}" ))); } GroupEvaluationResult::Unmatched => continue, @@ -395,8 +403,7 @@ impl FeatureStore { } else { // The feature flag is not available yet Err(PostHogEvaluationError::NotAvailable(format!( - "Not found in the local evaluation spec: {}", - flag_key + "Not found in the local evaluation spec: {flag_key}" ))) } } @@ -422,8 +429,7 @@ impl FeatureStore { if let Some(flag_config) = self.flags.get(flag_key) { if !flag_config.active { return Err(PostHogEvaluationError::NotAvailable(format!( - "The feature flag is not active: {}", - flag_key + "The feature flag is not active: {flag_key}" ))); } if flag_config.filters.multivariate.is_some() { @@ -438,8 +444,7 @@ impl FeatureStore { match self.evaluate_group(group, hash_on_global_rollout_percentage, properties)? { GroupEvaluationResult::MatchedAndOverride(_) => { return Err(PostHogEvaluationError::Internal(format!( - "Boolean flag cannot have overrides: {}", - flag_key + "Boolean flag cannot have overrides: {flag_key}" ))); } GroupEvaluationResult::MatchedAndEvaluate => { @@ -453,8 +458,7 @@ impl FeatureStore { } else { // The feature flag is not available yet Err(PostHogEvaluationError::NotAvailable(format!( - "Not found in the local evaluation spec: {}", - flag_key + "Not found in the local evaluation spec: {flag_key}" ))) } } @@ -465,8 +469,7 @@ impl FeatureStore { Ok(flag_config.filters.multivariate.is_none()) } else { Err(PostHogEvaluationError::NotAvailable(format!( - "Not found in the local evaluation spec: {}", - flag_key + "Not found in the local evaluation spec: {flag_key}" ))) } } @@ -534,23 +537,33 @@ impl PostHogClient { }) } - /// Fetch the feature flag specs from the server. - /// - /// This is unfortunately an undocumented API at: - /// - - /// - - /// - /// The handling logic in [`FeatureStore`] mostly follows the Python API implementation. - /// See `_compute_flag_locally` in - pub async fn get_feature_flags_local_evaluation( - &self, - ) -> anyhow::Result { + /// Check if the server API key is a feature flag secure API key. This key can only be + /// used to fetch the feature flag specs and can only be used on a undocumented API + /// endpoint. + fn is_feature_flag_secure_api_key(&self) -> bool { + self.config.server_api_key.starts_with("phs_") + } + + /// Get the raw JSON spec, same as `get_feature_flags_local_evaluation` but without parsing. + pub async fn get_feature_flags_local_evaluation_raw(&self) -> anyhow::Result { // BASE_URL/api/projects/:project_id/feature_flags/local_evaluation // with bearer token of self.server_api_key - let url = format!( - "{}/api/projects/{}/feature_flags/local_evaluation", - self.config.private_api_url, self.config.project_id - ); + // OR + // BASE_URL/api/feature_flag/local_evaluation/ + // with bearer token of feature flag specific self.server_api_key + let url = if self.is_feature_flag_secure_api_key() { + // The new feature local evaluation secure API token + format!( + "{}/api/feature_flag/local_evaluation", + self.config.private_api_url + ) + } else { + // The old personal API token + format!( + "{}/api/projects/{}/feature_flags/local_evaluation", + self.config.private_api_url, self.config.project_id + ) + }; let response = self .client .get(url) @@ -566,7 +579,22 @@ impl PostHogClient { body )); } - Ok(serde_json::from_str(&body)?) + Ok(body) + } + + /// Fetch the feature flag specs from the server. + /// + /// This is unfortunately an undocumented API at: + /// - + /// - + /// + /// The handling logic in [`FeatureStore`] mostly follows the Python API implementation. + /// See `_compute_flag_locally` in + pub async fn get_feature_flags_local_evaluation( + &self, + ) -> Result { + let raw = self.get_feature_flags_local_evaluation_raw().await?; + Ok(serde_json::from_str(&raw)?) } /// Capture an event. This will only be used to report the feature flag usage back to PostHog, though @@ -803,7 +831,7 @@ mod tests { fn evaluate_multivariate() { let mut store = FeatureStore::new(); let response: LocalEvaluationResponse = serde_json::from_str(data()).unwrap(); - store.set_flags(response.flags); + store.set_flags(response.flags, None).unwrap(); // This lacks the required properties and cannot be evaluated. let variant = @@ -873,7 +901,7 @@ mod tests { let mut store = FeatureStore::new(); let response: LocalEvaluationResponse = serde_json::from_str(data()).unwrap(); - store.set_flags(response.flags); + store.set_flags(response.flags, None).unwrap(); // This lacks the required properties and cannot be evaluated. let variant = store.evaluate_boolean_inner("boolean-flag", 1.00, &HashMap::new()); @@ -929,7 +957,7 @@ mod tests { let mut store = FeatureStore::new(); let response: LocalEvaluationResponse = serde_json::from_str(data()).unwrap(); - store.set_flags(response.flags); + store.set_flags(response.flags, None).unwrap(); // This lacks the required properties and cannot be evaluated. let variant = diff --git a/libs/pq_proto/src/lib.rs b/libs/pq_proto/src/lib.rs index e7afc64564..482dd9a298 100644 --- a/libs/pq_proto/src/lib.rs +++ b/libs/pq_proto/src/lib.rs @@ -198,7 +198,7 @@ impl fmt::Display for CancelKeyData { // This format is more compact and might work better for logs. f.debug_tuple("CancelKeyData") - .field(&format_args!("{:x}", id)) + .field(&format_args!("{id:x}")) .finish() } } @@ -291,8 +291,7 @@ impl FeMessage { let len = (&buf[1..5]).read_u32::().unwrap(); if len < 4 { return Err(ProtocolError::Protocol(format!( - "invalid message length {}", - len + "invalid message length {len}" ))); } @@ -367,8 +366,7 @@ impl FeStartupPacket { #[allow(clippy::manual_range_contains)] if len < 8 || len > MAX_STARTUP_PACKET_LENGTH { return Err(ProtocolError::Protocol(format!( - "invalid startup packet message length {}", - len + "invalid startup packet message length {len}" ))); } diff --git a/libs/proxy/postgres-protocol2/Cargo.toml b/libs/proxy/postgres-protocol2/Cargo.toml index 7ebb05eec1..9c8f8f3531 100644 --- a/libs/proxy/postgres-protocol2/Cargo.toml +++ b/libs/proxy/postgres-protocol2/Cargo.toml @@ -5,7 +5,7 @@ edition = "2024" license = "MIT/Apache-2.0" [dependencies] -base64 = "0.20" +base64.workspace = true byteorder.workspace = true bytes.workspace = true fallible-iterator.workspace = true diff --git a/libs/proxy/postgres-protocol2/src/authentication/sasl.rs b/libs/proxy/postgres-protocol2/src/authentication/sasl.rs index 2daf9a80d4..274c81c500 100644 --- a/libs/proxy/postgres-protocol2/src/authentication/sasl.rs +++ b/libs/proxy/postgres-protocol2/src/authentication/sasl.rs @@ -3,6 +3,8 @@ use std::fmt::Write; use std::{io, iter, mem, str}; +use base64::Engine as _; +use base64::prelude::BASE64_STANDARD; use hmac::{Hmac, Mac}; use rand::{self, Rng}; use sha2::digest::FixedOutput; @@ -50,7 +52,7 @@ pub(crate) async fn hi(str: &[u8], salt: &[u8], iterations: u32) -> [u8; 32] { } // yield every ~250us // hopefully reduces tail latencies - if i % 1024 == 0 { + if i.is_multiple_of(1024) { yield_now().await } } @@ -226,7 +228,7 @@ impl ScramSha256 { let (client_key, server_key) = match password { Credentials::Password(password) => { - let salt = match base64::decode(parsed.salt) { + let salt = match BASE64_STANDARD.decode(parsed.salt) { Ok(salt) => salt, Err(e) => return Err(io::Error::new(io::ErrorKind::InvalidInput, e)), }; @@ -255,7 +257,7 @@ impl ScramSha256 { let mut cbind_input = vec![]; cbind_input.extend(channel_binding.gs2_header().as_bytes()); cbind_input.extend(channel_binding.cbind_data()); - let cbind_input = base64::encode(&cbind_input); + let cbind_input = BASE64_STANDARD.encode(&cbind_input); self.message.clear(); write!(&mut self.message, "c={},r={}", cbind_input, parsed.nonce).unwrap(); @@ -272,7 +274,12 @@ impl ScramSha256 { *proof ^= signature; } - write!(&mut self.message, ",p={}", base64::encode(client_proof)).unwrap(); + write!( + &mut self.message, + ",p={}", + BASE64_STANDARD.encode(client_proof) + ) + .unwrap(); self.state = State::Finish { server_key, @@ -301,12 +308,12 @@ impl ScramSha256 { let verifier = match parsed { ServerFinalMessage::Error(e) => { - return Err(io::Error::other(format!("SCRAM error: {}", e))); + return Err(io::Error::other(format!("SCRAM error: {e}"))); } ServerFinalMessage::Verifier(verifier) => verifier, }; - let verifier = match base64::decode(verifier) { + let verifier = match BASE64_STANDARD.decode(verifier) { Ok(verifier) => verifier, Err(e) => return Err(io::Error::new(io::ErrorKind::InvalidInput, e)), }; @@ -336,10 +343,8 @@ impl<'a> Parser<'a> { match self.it.next() { Some((_, c)) if c == target => Ok(()), Some((i, c)) => { - let m = format!( - "unexpected character at byte {}: expected `{}` but got `{}", - i, target, c - ); + let m = + format!("unexpected character at byte {i}: expected `{target}` but got `{c}"); Err(io::Error::new(io::ErrorKind::InvalidInput, m)) } None => Err(io::Error::new( @@ -405,7 +410,7 @@ impl<'a> Parser<'a> { match self.it.peek() { Some(&(i, _)) => Err(io::Error::new( io::ErrorKind::InvalidInput, - format!("unexpected trailing data at byte {}", i), + format!("unexpected trailing data at byte {i}"), )), None => Ok(()), } diff --git a/libs/proxy/postgres-protocol2/src/message/backend.rs b/libs/proxy/postgres-protocol2/src/message/backend.rs index d7eaef9509..3fc9a9335c 100644 --- a/libs/proxy/postgres-protocol2/src/message/backend.rs +++ b/libs/proxy/postgres-protocol2/src/message/backend.rs @@ -211,7 +211,7 @@ impl Message { tag => { return Err(io::Error::new( io::ErrorKind::InvalidInput, - format!("unknown authentication tag `{}`", tag), + format!("unknown authentication tag `{tag}`"), )); } }, @@ -238,7 +238,7 @@ impl Message { tag => { return Err(io::Error::new( io::ErrorKind::InvalidInput, - format!("unknown message tag `{}`", tag), + format!("unknown message tag `{tag}`"), )); } }; diff --git a/libs/proxy/postgres-protocol2/src/password/mod.rs b/libs/proxy/postgres-protocol2/src/password/mod.rs index 4cd9bfb060..e00ca1e34c 100644 --- a/libs/proxy/postgres-protocol2/src/password/mod.rs +++ b/libs/proxy/postgres-protocol2/src/password/mod.rs @@ -6,6 +6,8 @@ //! side. This is good because it ensures the cleartext password won't //! end up in logs pg_stat displays, etc. +use base64::Engine as _; +use base64::prelude::BASE64_STANDARD; use hmac::{Hmac, Mac}; use rand::RngCore; use sha2::digest::FixedOutput; @@ -83,8 +85,8 @@ pub(crate) async fn scram_sha_256_salt( format!( "SCRAM-SHA-256${}:{}${}:{}", SCRAM_DEFAULT_ITERATIONS, - base64::encode(salt), - base64::encode(stored_key), - base64::encode(server_key) + BASE64_STANDARD.encode(salt), + BASE64_STANDARD.encode(stored_key), + BASE64_STANDARD.encode(server_key) ) } diff --git a/libs/proxy/postgres-types2/src/lib.rs b/libs/proxy/postgres-types2/src/lib.rs index 7c9874bda3..c98c45636b 100644 --- a/libs/proxy/postgres-types2/src/lib.rs +++ b/libs/proxy/postgres-types2/src/lib.rs @@ -46,7 +46,7 @@ impl fmt::Display for Type { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { match self.schema() { "public" | "pg_catalog" => {} - schema => write!(fmt, "{}.", schema)?, + schema => write!(fmt, "{schema}.")?, } fmt.write_str(self.name()) } diff --git a/libs/proxy/tokio-postgres2/src/cancel_query.rs b/libs/proxy/tokio-postgres2/src/cancel_query.rs index 0bdad0b554..94fbf333ed 100644 --- a/libs/proxy/tokio-postgres2/src/cancel_query.rs +++ b/libs/proxy/tokio-postgres2/src/cancel_query.rs @@ -1,5 +1,3 @@ -use std::io; - use tokio::net::TcpStream; use crate::client::SocketConfig; @@ -8,25 +6,15 @@ use crate::tls::MakeTlsConnect; use crate::{Error, cancel_query_raw, connect_socket}; pub(crate) async fn cancel_query( - config: Option, + config: SocketConfig, ssl_mode: SslMode, - mut tls: T, + tls: T, process_id: i32, secret_key: i32, ) -> Result<(), Error> where T: MakeTlsConnect, { - let config = match config { - Some(config) => config, - None => { - return Err(Error::connect(io::Error::new( - io::ErrorKind::InvalidInput, - "unknown host", - ))); - } - }; - let hostname = match &config.host { Host::Tcp(host) => &**host, }; diff --git a/libs/proxy/tokio-postgres2/src/cancel_token.rs b/libs/proxy/tokio-postgres2/src/cancel_token.rs index f6526395ee..c5566b4ad9 100644 --- a/libs/proxy/tokio-postgres2/src/cancel_token.rs +++ b/libs/proxy/tokio-postgres2/src/cancel_token.rs @@ -7,11 +7,16 @@ use crate::config::SslMode; use crate::tls::{MakeTlsConnect, TlsConnect}; use crate::{Error, cancel_query, cancel_query_raw}; -/// The capability to request cancellation of in-progress queries on a -/// connection. -#[derive(Clone, Serialize, Deserialize)] +/// A cancellation token that allows easy cancellation of a query. +#[derive(Clone)] pub struct CancelToken { - pub socket_config: Option, + pub socket_config: SocketConfig, + pub raw: RawCancelToken, +} + +/// A raw cancellation token that allows cancellation of a query, given a fresh connection to postgres. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct RawCancelToken { pub ssl_mode: SslMode, pub process_id: i32, pub secret_key: i32, @@ -36,14 +41,16 @@ impl CancelToken { { cancel_query::cancel_query( self.socket_config.clone(), - self.ssl_mode, + self.raw.ssl_mode, tls, - self.process_id, - self.secret_key, + self.raw.process_id, + self.raw.secret_key, ) .await } +} +impl RawCancelToken { /// Like `cancel_query`, but uses a stream which is already connected to the server rather than opening a new /// connection itself. pub async fn cancel_query_raw(&self, stream: S, tls: T) -> Result<(), Error> diff --git a/libs/proxy/tokio-postgres2/src/client.rs b/libs/proxy/tokio-postgres2/src/client.rs index a7edfc076a..828884ffd8 100644 --- a/libs/proxy/tokio-postgres2/src/client.rs +++ b/libs/proxy/tokio-postgres2/src/client.rs @@ -12,6 +12,7 @@ use postgres_protocol2::message::frontend; use serde::{Deserialize, Serialize}; use tokio::sync::mpsc; +use crate::cancel_token::RawCancelToken; use crate::codec::{BackendMessages, FrontendMessage}; use crate::config::{Host, SslMode}; use crate::query::RowStream; @@ -89,7 +90,7 @@ pub struct InnerClient { } impl InnerClient { - pub fn start(&mut self) -> Result { + pub fn start(&mut self) -> Result, Error> { self.responses.waiting += 1; Ok(PartialQuery(Some(self))) } @@ -226,7 +227,7 @@ impl Client { &mut self, statement: &str, params: I, - ) -> Result + ) -> Result, Error> where S: AsRef, I: IntoIterator>, @@ -261,7 +262,7 @@ impl Client { pub(crate) async fn simple_query_raw( &mut self, query: &str, - ) -> Result { + ) -> Result, Error> { simple_query::simple_query(self.inner_mut(), query).await } @@ -331,10 +332,12 @@ impl Client { /// connection associated with this client. pub fn cancel_token(&self) -> CancelToken { CancelToken { - socket_config: Some(self.socket_config.clone()), - ssl_mode: self.ssl_mode, - process_id: self.process_id, - secret_key: self.secret_key, + socket_config: self.socket_config.clone(), + raw: RawCancelToken { + ssl_mode: self.ssl_mode, + process_id: self.process_id, + secret_key: self.secret_key, + }, } } diff --git a/libs/proxy/tokio-postgres2/src/config.rs b/libs/proxy/tokio-postgres2/src/config.rs index 978d348741..961cbc923e 100644 --- a/libs/proxy/tokio-postgres2/src/config.rs +++ b/libs/proxy/tokio-postgres2/src/config.rs @@ -12,12 +12,13 @@ use tokio::net::TcpStream; use crate::connect::connect; use crate::connect_raw::{RawConnection, connect_raw}; -use crate::tls::{MakeTlsConnect, TlsConnect}; +use crate::connect_tls::connect_tls; +use crate::maybe_tls_stream::MaybeTlsStream; +use crate::tls::{MakeTlsConnect, TlsConnect, TlsStream}; use crate::{Client, Connection, Error}; /// TLS configuration. #[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)] -#[non_exhaustive] pub enum SslMode { /// Do not use TLS. Disable, @@ -231,7 +232,7 @@ impl Config { /// Requires the `runtime` Cargo feature (enabled by default). pub async fn connect( &self, - tls: T, + tls: &T, ) -> Result<(Client, Connection), Error> where T: MakeTlsConnect, @@ -239,7 +240,7 @@ impl Config { connect(tls, self).await } - pub async fn connect_raw( + pub async fn tls_and_authenticate( &self, stream: S, tls: T, @@ -248,7 +249,19 @@ impl Config { S: AsyncRead + AsyncWrite + Unpin, T: TlsConnect, { - connect_raw(stream, tls, self).await + let stream = connect_tls(stream, self.ssl_mode, tls).await?; + connect_raw(stream, self).await + } + + pub async fn authenticate( + &self, + stream: MaybeTlsStream, + ) -> Result, Error> + where + S: AsyncRead + AsyncWrite + Unpin, + T: TlsStream + Unpin, + { + connect_raw(stream, self).await } } diff --git a/libs/proxy/tokio-postgres2/src/connect.rs b/libs/proxy/tokio-postgres2/src/connect.rs index 39a0a87c74..4a07eccf9a 100644 --- a/libs/proxy/tokio-postgres2/src/connect.rs +++ b/libs/proxy/tokio-postgres2/src/connect.rs @@ -9,11 +9,12 @@ use crate::codec::BackendMessage; use crate::config::Host; use crate::connect_raw::connect_raw; use crate::connect_socket::connect_socket; +use crate::connect_tls::connect_tls; use crate::tls::{MakeTlsConnect, TlsConnect}; use crate::{Client, Config, Connection, Error, RawConnection}; pub async fn connect( - mut tls: T, + tls: &T, config: &Config, ) -> Result<(Client, Connection), Error> where @@ -44,13 +45,14 @@ where T: TlsConnect, { let socket = connect_socket(host_addr, host, port, config.connect_timeout).await?; + let stream = connect_tls(socket, config.ssl_mode, tls).await?; let RawConnection { stream, parameters, delayed_notice, process_id, secret_key, - } = connect_raw(socket, tls, config).await?; + } = connect_raw(stream, config).await?; let socket_config = SocketConfig { host_addr, diff --git a/libs/proxy/tokio-postgres2/src/connect_raw.rs b/libs/proxy/tokio-postgres2/src/connect_raw.rs index 20dc538cf2..b89a600a2e 100644 --- a/libs/proxy/tokio-postgres2/src/connect_raw.rs +++ b/libs/proxy/tokio-postgres2/src/connect_raw.rs @@ -16,9 +16,8 @@ use tokio_util::codec::Framed; use crate::Error; use crate::codec::{BackendMessage, BackendMessages, FrontendMessage, PostgresCodec}; use crate::config::{self, AuthKeys, Config}; -use crate::connect_tls::connect_tls; use crate::maybe_tls_stream::MaybeTlsStream; -use crate::tls::{TlsConnect, TlsStream}; +use crate::tls::TlsStream; pub struct StartupStream { inner: Framed, PostgresCodec>, @@ -87,16 +86,13 @@ pub struct RawConnection { } pub async fn connect_raw( - stream: S, - tls: T, + stream: MaybeTlsStream, config: &Config, -) -> Result, Error> +) -> Result, Error> where S: AsyncRead + AsyncWrite + Unpin, - T: TlsConnect, + T: TlsStream + Unpin, { - let stream = connect_tls(stream, config.ssl_mode, tls).await?; - let mut stream = StartupStream { inner: Framed::new(stream, PostgresCodec), buf: BackendMessages::empty(), diff --git a/libs/proxy/tokio-postgres2/src/error/mod.rs b/libs/proxy/tokio-postgres2/src/error/mod.rs index 8149bceeb9..5309bce17e 100644 --- a/libs/proxy/tokio-postgres2/src/error/mod.rs +++ b/libs/proxy/tokio-postgres2/src/error/mod.rs @@ -332,10 +332,10 @@ impl fmt::Display for DbError { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { write!(fmt, "{}: {}", self.severity, self.message)?; if let Some(detail) = &self.detail { - write!(fmt, "\nDETAIL: {}", detail)?; + write!(fmt, "\nDETAIL: {detail}")?; } if let Some(hint) = &self.hint { - write!(fmt, "\nHINT: {}", hint)?; + write!(fmt, "\nHINT: {hint}")?; } Ok(()) } @@ -398,9 +398,9 @@ impl fmt::Display for Error { Kind::Io => fmt.write_str("error communicating with the server")?, Kind::UnexpectedMessage => fmt.write_str("unexpected message from server")?, Kind::Tls => fmt.write_str("error performing TLS handshake")?, - Kind::ToSql(idx) => write!(fmt, "error serializing parameter {}", idx)?, - Kind::FromSql(idx) => write!(fmt, "error deserializing column {}", idx)?, - Kind::Column(column) => write!(fmt, "invalid column `{}`", column)?, + Kind::ToSql(idx) => write!(fmt, "error serializing parameter {idx}")?, + Kind::FromSql(idx) => write!(fmt, "error deserializing column {idx}")?, + Kind::Column(column) => write!(fmt, "invalid column `{column}`")?, Kind::Closed => fmt.write_str("connection closed")?, Kind::Db => fmt.write_str("db error")?, Kind::Parse => fmt.write_str("error parsing response from server")?, @@ -411,7 +411,7 @@ impl fmt::Display for Error { Kind::Timeout => fmt.write_str("timeout waiting for server")?, }; if let Some(ref cause) = self.0.cause { - write!(fmt, ": {}", cause)?; + write!(fmt, ": {cause}")?; } Ok(()) } diff --git a/libs/proxy/tokio-postgres2/src/generic_client.rs b/libs/proxy/tokio-postgres2/src/generic_client.rs index eeefb45d26..4c5fc623c5 100644 --- a/libs/proxy/tokio-postgres2/src/generic_client.rs +++ b/libs/proxy/tokio-postgres2/src/generic_client.rs @@ -12,7 +12,11 @@ mod private { /// This trait is "sealed", and cannot be implemented outside of this crate. pub trait GenericClient: private::Sealed { /// Like `Client::query_raw_txt`. - async fn query_raw_txt(&mut self, statement: &str, params: I) -> Result + async fn query_raw_txt( + &mut self, + statement: &str, + params: I, + ) -> Result, Error> where S: AsRef + Sync + Send, I: IntoIterator> + Sync + Send, @@ -22,7 +26,11 @@ pub trait GenericClient: private::Sealed { impl private::Sealed for Client {} impl GenericClient for Client { - async fn query_raw_txt(&mut self, statement: &str, params: I) -> Result + async fn query_raw_txt( + &mut self, + statement: &str, + params: I, + ) -> Result, Error> where S: AsRef + Sync + Send, I: IntoIterator> + Sync + Send, @@ -35,7 +43,11 @@ impl GenericClient for Client { impl private::Sealed for Transaction<'_> {} impl GenericClient for Transaction<'_> { - async fn query_raw_txt(&mut self, statement: &str, params: I) -> Result + async fn query_raw_txt( + &mut self, + statement: &str, + params: I, + ) -> Result, Error> where S: AsRef + Sync + Send, I: IntoIterator> + Sync + Send, diff --git a/libs/proxy/tokio-postgres2/src/lib.rs b/libs/proxy/tokio-postgres2/src/lib.rs index 9556070ed5..791c93b972 100644 --- a/libs/proxy/tokio-postgres2/src/lib.rs +++ b/libs/proxy/tokio-postgres2/src/lib.rs @@ -3,7 +3,7 @@ use postgres_protocol2::message::backend::ReadyForQueryBody; -pub use crate::cancel_token::CancelToken; +pub use crate::cancel_token::{CancelToken, RawCancelToken}; pub use crate::client::{Client, SocketConfig}; pub use crate::config::Config; pub use crate::connect_raw::RawConnection; diff --git a/libs/proxy/tokio-postgres2/src/row.rs b/libs/proxy/tokio-postgres2/src/row.rs index 5fc955eef4..36d578558f 100644 --- a/libs/proxy/tokio-postgres2/src/row.rs +++ b/libs/proxy/tokio-postgres2/src/row.rs @@ -156,7 +156,7 @@ impl Row { { match self.get_inner(&idx) { Ok(ok) => ok, - Err(err) => panic!("error retrieving column {}: {}", idx, err), + Err(err) => panic!("error retrieving column {idx}: {err}"), } } @@ -274,7 +274,7 @@ impl SimpleQueryRow { { match self.get_inner(&idx) { Ok(ok) => ok, - Err(err) => panic!("error retrieving column {}: {}", idx, err), + Err(err) => panic!("error retrieving column {idx}: {err}"), } } diff --git a/libs/proxy/tokio-postgres2/src/tls.rs b/libs/proxy/tokio-postgres2/src/tls.rs index 41b51368ff..f9cbcf4991 100644 --- a/libs/proxy/tokio-postgres2/src/tls.rs +++ b/libs/proxy/tokio-postgres2/src/tls.rs @@ -47,7 +47,7 @@ pub trait MakeTlsConnect { /// Creates a new `TlsConnect`or. /// /// The domain name is provided for certificate verification and SNI. - fn make_tls_connect(&mut self, domain: &str) -> Result; + fn make_tls_connect(&self, domain: &str) -> Result; } /// An asynchronous function wrapping a stream in a TLS session. @@ -85,7 +85,7 @@ impl MakeTlsConnect for NoTls { type TlsConnect = NoTls; type Error = NoTlsError; - fn make_tls_connect(&mut self, _: &str) -> Result { + fn make_tls_connect(&self, _: &str) -> Result { Ok(NoTls) } } diff --git a/libs/proxy/tokio-postgres2/src/transaction.rs b/libs/proxy/tokio-postgres2/src/transaction.rs index 12fe0737d4..0e37d2aad7 100644 --- a/libs/proxy/tokio-postgres2/src/transaction.rs +++ b/libs/proxy/tokio-postgres2/src/transaction.rs @@ -47,7 +47,7 @@ impl<'a> Transaction<'a> { &mut self, statement: &str, params: I, - ) -> Result + ) -> Result, Error> where S: AsRef, I: IntoIterator>, diff --git a/libs/remote_storage/src/azure_blob.rs b/libs/remote_storage/src/azure_blob.rs index 5363e935e3..e9c24ac723 100644 --- a/libs/remote_storage/src/azure_blob.rs +++ b/libs/remote_storage/src/azure_blob.rs @@ -10,7 +10,7 @@ use std::sync::Arc; use std::time::{Duration, SystemTime}; use std::{env, io}; -use anyhow::{Context, Result}; +use anyhow::{Context, Result, anyhow}; use azure_core::request_options::{IfMatchCondition, MaxResults, Metadata, Range}; use azure_core::{Continuable, HttpClient, RetryOptions, TransportOptions}; use azure_storage::StorageCredentials; @@ -37,6 +37,7 @@ use crate::metrics::{AttemptOutcome, RequestKind, start_measuring_requests}; use crate::{ ConcurrencyLimiter, Download, DownloadError, DownloadKind, DownloadOpts, Listing, ListingMode, ListingObject, RemotePath, RemoteStorage, StorageMetadata, TimeTravelError, TimeoutOrCancel, + Version, VersionKind, }; pub struct AzureBlobStorage { @@ -405,6 +406,39 @@ impl AzureBlobStorage { pub fn container_name(&self) -> &str { &self.container_name } + + async fn list_versions_with_permit( + &self, + _permit: &tokio::sync::SemaphorePermit<'_>, + prefix: Option<&RemotePath>, + mode: ListingMode, + max_keys: Option, + cancel: &CancellationToken, + ) -> Result { + let customize_builder = |mut builder: ListBlobsBuilder| { + builder = builder.include_versions(true); + // We do not return this info back to `VersionListing` yet. + builder = builder.include_deleted(true); + builder + }; + let kind = RequestKind::ListVersions; + + let mut stream = std::pin::pin!(self.list_streaming_for_fn( + prefix, + mode, + max_keys, + cancel, + kind, + customize_builder + )); + let mut combined: crate::VersionListing = + stream.next().await.expect("At least one item required")?; + while let Some(list) = stream.next().await { + let list = list?; + combined.versions.extend(list.versions.into_iter()); + } + Ok(combined) + } } trait ListingCollector { @@ -488,27 +522,10 @@ impl RemoteStorage for AzureBlobStorage { max_keys: Option, cancel: &CancellationToken, ) -> std::result::Result { - let customize_builder = |mut builder: ListBlobsBuilder| { - builder = builder.include_versions(true); - builder - }; let kind = RequestKind::ListVersions; - - let mut stream = std::pin::pin!(self.list_streaming_for_fn( - prefix, - mode, - max_keys, - cancel, - kind, - customize_builder - )); - let mut combined: crate::VersionListing = - stream.next().await.expect("At least one item required")?; - while let Some(list) = stream.next().await { - let list = list?; - combined.versions.extend(list.versions.into_iter()); - } - Ok(combined) + let permit = self.permit(kind, cancel).await?; + self.list_versions_with_permit(&permit, prefix, mode, max_keys, cancel) + .await } async fn head_object( @@ -803,14 +820,159 @@ impl RemoteStorage for AzureBlobStorage { async fn time_travel_recover( &self, - _prefix: Option<&RemotePath>, - _timestamp: SystemTime, - _done_if_after: SystemTime, - _cancel: &CancellationToken, + prefix: Option<&RemotePath>, + timestamp: SystemTime, + done_if_after: SystemTime, + cancel: &CancellationToken, + _complexity_limit: Option, ) -> Result<(), TimeTravelError> { - // TODO use Azure point in time recovery feature for this - // https://learn.microsoft.com/en-us/azure/storage/blobs/point-in-time-restore-overview - Err(TimeTravelError::Unimplemented) + let msg = "PLEASE NOTE: Azure Blob storage time-travel recovery may not work as expected " + .to_string() + + "for some specific files. If a file gets deleted but then overwritten and we want to recover " + + "to the time during the file was not present, this functionality will recover the file. Only " + + "use the functionality for services that can tolerate this. For example, recovering a state of the " + + "pageserver tenants."; + tracing::error!("{}", msg); + + let kind = RequestKind::TimeTravel; + let permit = self.permit(kind, cancel).await?; + + let mode = ListingMode::NoDelimiter; + let version_listing = self + .list_versions_with_permit(&permit, prefix, mode, None, cancel) + .await + .map_err(|err| match err { + DownloadError::Other(e) => TimeTravelError::Other(e), + DownloadError::Cancelled => TimeTravelError::Cancelled, + other => TimeTravelError::Other(other.into()), + })?; + let versions_and_deletes = version_listing.versions; + + tracing::info!( + "Built list for time travel with {} versions and deletions", + versions_and_deletes.len() + ); + + // Work on the list of references instead of the objects directly, + // otherwise we get lifetime errors in the sort_by_key call below. + let mut versions_and_deletes = versions_and_deletes.iter().collect::>(); + + versions_and_deletes.sort_by_key(|vd| (&vd.key, &vd.last_modified)); + + let mut vds_for_key = HashMap::<_, Vec<_>>::new(); + + for vd in &versions_and_deletes { + let Version { key, .. } = &vd; + let version_id = vd.version_id().map(|v| v.0.as_str()); + if version_id == Some("null") { + return Err(TimeTravelError::Other(anyhow!( + "Received ListVersions response for key={key} with version_id='null', \ + indicating either disabled versioning, or legacy objects with null version id values" + ))); + } + tracing::trace!("Parsing version key={key} kind={:?}", vd.kind); + + vds_for_key.entry(key).or_default().push(vd); + } + + let warn_threshold = 3; + let max_retries = 10; + let is_permanent = |e: &_| matches!(e, TimeTravelError::Cancelled); + + for (key, versions) in vds_for_key { + let last_vd = versions.last().unwrap(); + let key = self.relative_path_to_name(key); + if last_vd.last_modified > done_if_after { + tracing::debug!("Key {key} has version later than done_if_after, skipping"); + continue; + } + // the version we want to restore to. + let version_to_restore_to = + match versions.binary_search_by_key(×tamp, |tpl| tpl.last_modified) { + Ok(v) => v, + Err(e) => e, + }; + if version_to_restore_to == versions.len() { + tracing::debug!("Key {key} has no changes since timestamp, skipping"); + continue; + } + let mut do_delete = false; + if version_to_restore_to == 0 { + // All versions more recent, so the key didn't exist at the specified time point. + tracing::debug!( + "All {} versions more recent for {key}, deleting", + versions.len() + ); + do_delete = true; + } else { + match &versions[version_to_restore_to - 1] { + Version { + kind: VersionKind::Version(version_id), + .. + } => { + let source_url = format!( + "{}/{}?versionid={}", + self.client + .url() + .map_err(|e| TimeTravelError::Other(anyhow!("{e}")))?, + key, + version_id.0 + ); + tracing::debug!( + "Promoting old version {} for {key} at {}...", + version_id.0, + source_url + ); + backoff::retry( + || async { + let blob_client = self.client.blob_client(key.clone()); + let op = blob_client.copy(Url::from_str(&source_url).unwrap()); + tokio::select! { + res = op => res.map_err(|e| TimeTravelError::Other(e.into())), + _ = cancel.cancelled() => Err(TimeTravelError::Cancelled), + } + }, + is_permanent, + warn_threshold, + max_retries, + "copying object version for time_travel_recover", + cancel, + ) + .await + .ok_or_else(|| TimeTravelError::Cancelled) + .and_then(|x| x)?; + tracing::info!(?version_id, %key, "Copied old version in Azure blob storage"); + } + Version { + kind: VersionKind::DeletionMarker, + .. + } => { + do_delete = true; + } + } + }; + if do_delete { + if matches!(last_vd.kind, VersionKind::DeletionMarker) { + // Key has since been deleted (but there was some history), no need to do anything + tracing::debug!("Key {key} already deleted, skipping."); + } else { + tracing::debug!("Deleting {key}..."); + + self.delete(&RemotePath::from_string(&key).unwrap(), cancel) + .await + .map_err(|e| { + // delete_oid0 will use TimeoutOrCancel + if TimeoutOrCancel::caused_by_cancel(&e) { + TimeTravelError::Cancelled + } else { + TimeTravelError::Other(e) + } + })?; + } + } + } + + Ok(()) } } diff --git a/libs/remote_storage/src/config.rs b/libs/remote_storage/src/config.rs index 52978be5b4..5bc1f678ae 100644 --- a/libs/remote_storage/src/config.rs +++ b/libs/remote_storage/src/config.rs @@ -87,6 +87,28 @@ pub enum RemoteStorageKind { AzureContainer(AzureConfig), } +#[derive(Deserialize)] +#[serde(tag = "type")] +/// Version of RemoteStorageKind which deserializes with type: LocalFs | AwsS3 | AzureContainer +/// Needed for endpoint storage service +pub enum TypedRemoteStorageKind { + LocalFs { local_path: Utf8PathBuf }, + AwsS3(S3Config), + AzureContainer(AzureConfig), +} + +impl From for RemoteStorageKind { + fn from(value: TypedRemoteStorageKind) -> Self { + match value { + TypedRemoteStorageKind::LocalFs { local_path } => { + RemoteStorageKind::LocalFs { local_path } + } + TypedRemoteStorageKind::AwsS3(v) => RemoteStorageKind::AwsS3(v), + TypedRemoteStorageKind::AzureContainer(v) => RemoteStorageKind::AzureContainer(v), + } + } +} + /// AWS S3 bucket coordinates and access credentials to manage the bucket contents (read and write). #[derive(Clone, PartialEq, Eq, Deserialize, Serialize)] pub struct S3Config { diff --git a/libs/remote_storage/src/lib.rs b/libs/remote_storage/src/lib.rs index b265d37a62..ed416b2811 100644 --- a/libs/remote_storage/src/lib.rs +++ b/libs/remote_storage/src/lib.rs @@ -31,6 +31,7 @@ use anyhow::Context; pub use azure_core::Etag; use bytes::Bytes; use camino::{Utf8Path, Utf8PathBuf}; +pub use config::TypedRemoteStorageKind; pub use error::{DownloadError, TimeTravelError, TimeoutOrCancel}; use futures::StreamExt; use futures::stream::Stream; @@ -440,6 +441,7 @@ pub trait RemoteStorage: Send + Sync + 'static { timestamp: SystemTime, done_if_after: SystemTime, cancel: &CancellationToken, + complexity_limit: Option, ) -> Result<(), TimeTravelError>; } @@ -651,22 +653,23 @@ impl GenericRemoteStorage> { timestamp: SystemTime, done_if_after: SystemTime, cancel: &CancellationToken, + complexity_limit: Option, ) -> Result<(), TimeTravelError> { match self { Self::LocalFs(s) => { - s.time_travel_recover(prefix, timestamp, done_if_after, cancel) + s.time_travel_recover(prefix, timestamp, done_if_after, cancel, complexity_limit) .await } Self::AwsS3(s) => { - s.time_travel_recover(prefix, timestamp, done_if_after, cancel) + s.time_travel_recover(prefix, timestamp, done_if_after, cancel, complexity_limit) .await } Self::AzureBlob(s) => { - s.time_travel_recover(prefix, timestamp, done_if_after, cancel) + s.time_travel_recover(prefix, timestamp, done_if_after, cancel, complexity_limit) .await } Self::Unreliable(s) => { - s.time_travel_recover(prefix, timestamp, done_if_after, cancel) + s.time_travel_recover(prefix, timestamp, done_if_after, cancel, complexity_limit) .await } } @@ -674,6 +677,15 @@ impl GenericRemoteStorage> { } impl GenericRemoteStorage { + pub async fn from_storage_kind(kind: TypedRemoteStorageKind) -> anyhow::Result { + Self::from_config(&RemoteStorageConfig { + storage: kind.into(), + timeout: RemoteStorageConfig::DEFAULT_TIMEOUT, + small_timeout: RemoteStorageConfig::DEFAULT_SMALL_TIMEOUT, + }) + .await + } + pub async fn from_config(storage_config: &RemoteStorageConfig) -> anyhow::Result { let timeout = storage_config.timeout; diff --git a/libs/remote_storage/src/local_fs.rs b/libs/remote_storage/src/local_fs.rs index 6607b55f1a..30690b1bdb 100644 --- a/libs/remote_storage/src/local_fs.rs +++ b/libs/remote_storage/src/local_fs.rs @@ -400,7 +400,7 @@ impl RemoteStorage for LocalFs { key }; - let relative_key = format!("{}", relative_key); + let relative_key = format!("{relative_key}"); if relative_key.contains(REMOTE_STORAGE_PREFIX_SEPARATOR) { let first_part = relative_key .split(REMOTE_STORAGE_PREFIX_SEPARATOR) @@ -594,13 +594,9 @@ impl RemoteStorage for LocalFs { let from_path = from.with_base(&self.storage_root); let to_path = to.with_base(&self.storage_root); create_target_directory(&to_path).await?; - fs::copy(&from_path, &to_path).await.with_context(|| { - format!( - "Failed to copy file from '{from_path}' to '{to_path}'", - from_path = from_path, - to_path = to_path - ) - })?; + fs::copy(&from_path, &to_path) + .await + .with_context(|| format!("Failed to copy file from '{from_path}' to '{to_path}'"))?; Ok(()) } @@ -610,6 +606,7 @@ impl RemoteStorage for LocalFs { _timestamp: SystemTime, _done_if_after: SystemTime, _cancel: &CancellationToken, + _complexity_limit: Option, ) -> Result<(), TimeTravelError> { Err(TimeTravelError::Unimplemented) } @@ -1182,7 +1179,7 @@ mod fs_tests { .write(true) .create_new(true) .open(path)?; - write!(file_for_writing, "{}", contents)?; + write!(file_for_writing, "{contents}")?; drop(file_for_writing); let file_size = path.metadata()?.len() as usize; Ok(( diff --git a/libs/remote_storage/src/s3_bucket.rs b/libs/remote_storage/src/s3_bucket.rs index d98ff552ee..8a2e5bd10e 100644 --- a/libs/remote_storage/src/s3_bucket.rs +++ b/libs/remote_storage/src/s3_bucket.rs @@ -981,22 +981,16 @@ impl RemoteStorage for S3Bucket { timestamp: SystemTime, done_if_after: SystemTime, cancel: &CancellationToken, + complexity_limit: Option, ) -> Result<(), TimeTravelError> { let kind = RequestKind::TimeTravel; let permit = self.permit(kind, cancel).await?; tracing::trace!("Target time: {timestamp:?}, done_if_after {done_if_after:?}"); - // Limit the number of versions deletions, mostly so that we don't - // keep requesting forever if the list is too long, as we'd put the - // list in RAM. - // Building a list of 100k entries that reaches the limit roughly takes - // 40 seconds, and roughly corresponds to tenants of 2 TiB physical size. - const COMPLEXITY_LIMIT: Option = NonZeroU32::new(100_000); - let mode = ListingMode::NoDelimiter; let version_listing = self - .list_versions_with_permit(&permit, prefix, mode, COMPLEXITY_LIMIT, cancel) + .list_versions_with_permit(&permit, prefix, mode, complexity_limit, cancel) .await .map_err(|err| match err { DownloadError::Other(e) => TimeTravelError::Other(e), @@ -1022,6 +1016,7 @@ impl RemoteStorage for S3Bucket { let Version { key, .. } = &vd; let version_id = vd.version_id().map(|v| v.0.as_str()); if version_id == Some("null") { + // TODO: check the behavior of using the SDK on a non-versioned container return Err(TimeTravelError::Other(anyhow!( "Received ListVersions response for key={key} with version_id='null', \ indicating either disabled versioning, or legacy objects with null version id values" diff --git a/libs/remote_storage/src/simulate_failures.rs b/libs/remote_storage/src/simulate_failures.rs index 894cf600be..f9856a5856 100644 --- a/libs/remote_storage/src/simulate_failures.rs +++ b/libs/remote_storage/src/simulate_failures.rs @@ -240,11 +240,12 @@ impl RemoteStorage for UnreliableWrapper { timestamp: SystemTime, done_if_after: SystemTime, cancel: &CancellationToken, + complexity_limit: Option, ) -> Result<(), TimeTravelError> { self.attempt(RemoteOp::TimeTravelRecover(prefix.map(|p| p.to_owned()))) .map_err(TimeTravelError::Other)?; self.inner - .time_travel_recover(prefix, timestamp, done_if_after, cancel) + .time_travel_recover(prefix, timestamp, done_if_after, cancel, complexity_limit) .await } } diff --git a/libs/remote_storage/tests/test_real_s3.rs b/libs/remote_storage/tests/test_real_s3.rs index d38e13fd05..6b893edf75 100644 --- a/libs/remote_storage/tests/test_real_s3.rs +++ b/libs/remote_storage/tests/test_real_s3.rs @@ -157,7 +157,7 @@ async fn s3_time_travel_recovery_works(ctx: &mut MaybeEnabledStorage) -> anyhow: // No changes after recovery to t2 (no-op) let t_final = time_point().await; ctx.client - .time_travel_recover(None, t2, t_final, &cancel) + .time_travel_recover(None, t2, t_final, &cancel, None) .await?; let t2_files_recovered = list_files(&ctx.client, &cancel).await?; println!("after recovery to t2: {t2_files_recovered:?}"); @@ -173,7 +173,7 @@ async fn s3_time_travel_recovery_works(ctx: &mut MaybeEnabledStorage) -> anyhow: // after recovery to t1: path1 is back, path2 has the old content let t_final = time_point().await; ctx.client - .time_travel_recover(None, t1, t_final, &cancel) + .time_travel_recover(None, t1, t_final, &cancel, None) .await?; let t1_files_recovered = list_files(&ctx.client, &cancel).await?; println!("after recovery to t1: {t1_files_recovered:?}"); @@ -189,7 +189,7 @@ async fn s3_time_travel_recovery_works(ctx: &mut MaybeEnabledStorage) -> anyhow: // after recovery to t0: everything is gone except for path1 let t_final = time_point().await; ctx.client - .time_travel_recover(None, t0, t_final, &cancel) + .time_travel_recover(None, t0, t_final, &cancel, None) .await?; let t0_files_recovered = list_files(&ctx.client, &cancel).await?; println!("after recovery to t0: {t0_files_recovered:?}"); diff --git a/libs/safekeeper_api/Cargo.toml b/libs/safekeeper_api/Cargo.toml index d9d080e8fe..928e583b0b 100644 --- a/libs/safekeeper_api/Cargo.toml +++ b/libs/safekeeper_api/Cargo.toml @@ -10,6 +10,7 @@ const_format.workspace = true serde.workspace = true serde_json.workspace = true postgres_ffi.workspace = true +postgres_versioninfo.workspace = true pq_proto.workspace = true tokio.workspace = true utils.workspace = true diff --git a/libs/safekeeper_api/src/lib.rs b/libs/safekeeper_api/src/lib.rs index fa86523ad7..ba0bfee971 100644 --- a/libs/safekeeper_api/src/lib.rs +++ b/libs/safekeeper_api/src/lib.rs @@ -8,6 +8,8 @@ pub mod membership; /// Public API types pub mod models; +pub use postgres_versioninfo::{PgMajorVersion, PgVersionId}; + /// Consensus logical timestamp. Note: it is a part of sk control file. pub type Term = u64; /// With this term timeline is created initially. It @@ -20,7 +22,7 @@ pub const INITIAL_TERM: Term = 0; #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] pub struct ServerInfo { /// Postgres server version - pub pg_version: u32, + pub pg_version: PgVersionId, pub system_id: SystemId, pub wal_seg_size: u32, } diff --git a/libs/safekeeper_api/src/membership.rs b/libs/safekeeper_api/src/membership.rs index 3d4d17096e..1751c54f6a 100644 --- a/libs/safekeeper_api/src/membership.rs +++ b/libs/safekeeper_api/src/membership.rs @@ -193,10 +193,10 @@ mod tests { }) .unwrap(); - println!("members: {}", members); + println!("members: {members}"); let j = serde_json::to_string(&members).expect("failed to serialize"); - println!("members json: {}", j); + println!("members json: {j}"); assert_eq!( j, r#"[{"id":42,"host":"lala.org","pg_port":5432},{"id":43,"host":"bubu.org","pg_port":5432}]"# diff --git a/libs/safekeeper_api/src/models.rs b/libs/safekeeper_api/src/models.rs index 8658dc4011..1774489c1c 100644 --- a/libs/safekeeper_api/src/models.rs +++ b/libs/safekeeper_api/src/models.rs @@ -4,6 +4,7 @@ use std::net::SocketAddr; use pageserver_api::shard::ShardIdentity; use postgres_ffi::TimestampTz; +use postgres_versioninfo::PgVersionId; use serde::{Deserialize, Serialize}; use tokio::time::Instant; use utils::id::{NodeId, TenantId, TenantTimelineId, TimelineId}; @@ -13,7 +14,7 @@ use utils::pageserver_feedback::PageserverFeedback; use crate::membership::Configuration; use crate::{ServerInfo, Term}; -#[derive(Debug, Serialize)] +#[derive(Debug, Serialize, Deserialize)] pub struct SafekeeperStatus { pub id: NodeId, } @@ -23,8 +24,7 @@ pub struct TimelineCreateRequest { pub tenant_id: TenantId, pub timeline_id: TimelineId, pub mconf: Configuration, - /// In the PG_VERSION_NUM macro format, like 140017. - pub pg_version: u32, + pub pg_version: PgVersionId, pub system_id: Option, // By default WAL_SEGMENT_SIZE pub wal_seg_size: Option, @@ -210,7 +210,7 @@ pub struct TimelineStatus { } /// Request to switch membership configuration. -#[derive(Serialize, Deserialize)] +#[derive(Clone, Serialize, Deserialize)] #[serde(transparent)] pub struct TimelineMembershipSwitchRequest { pub mconf: Configuration, @@ -221,6 +221,8 @@ pub struct TimelineMembershipSwitchRequest { pub struct TimelineMembershipSwitchResponse { pub previous_conf: Configuration, pub current_conf: Configuration, + pub term: Term, + pub flush_lsn: Lsn, } #[derive(Clone, Copy, Serialize, Deserialize)] diff --git a/libs/utils/src/error.rs b/libs/utils/src/error.rs index 7ce203e918..6fa86916c1 100644 --- a/libs/utils/src/error.rs +++ b/libs/utils/src/error.rs @@ -41,7 +41,7 @@ pub fn report_compact_sources(e: &E) -> impl std::fmt::Dis // why is E a generic parameter here? hope that rustc will see through a default // Error::source implementation and leave the following out if there cannot be any // sources: - Sources(self.0.source()).try_for_each(|src| write!(f, ": {}", src)) + Sources(self.0.source()).try_for_each(|src| write!(f, ": {src}")) } } diff --git a/libs/utils/src/generation.rs b/libs/utils/src/generation.rs index b5e4a4644a..8a3bef914a 100644 --- a/libs/utils/src/generation.rs +++ b/libs/utils/src/generation.rs @@ -135,7 +135,7 @@ impl Debug for Generation { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { Self::Valid(v) => { - write!(f, "{:08x}", v) + write!(f, "{v:08x}") } Self::None => { write!(f, "") diff --git a/libs/utils/src/id.rs b/libs/utils/src/id.rs index 68cb1f0209..e3037aec21 100644 --- a/libs/utils/src/id.rs +++ b/libs/utils/src/id.rs @@ -280,7 +280,7 @@ impl TryFrom> for TimelineId { value .unwrap_or_default() .parse::() - .with_context(|| format!("Could not parse timeline id from {:?}", value)) + .with_context(|| format!("Could not parse timeline id from {value:?}")) } } diff --git a/libs/utils/src/logging.rs b/libs/utils/src/logging.rs index 0ac8201795..5828a400a0 100644 --- a/libs/utils/src/logging.rs +++ b/libs/utils/src/logging.rs @@ -24,12 +24,28 @@ macro_rules! critical { if cfg!(debug_assertions) { panic!($($arg)*); } + // Increment both metrics $crate::logging::TRACING_EVENT_COUNT_METRIC.inc_critical(); let backtrace = std::backtrace::Backtrace::capture(); tracing::error!("CRITICAL: {}\n{backtrace}", format!($($arg)*)); }}; } +#[macro_export] +macro_rules! critical_timeline { + ($tenant_shard_id:expr, $timeline_id:expr, $($arg:tt)*) => {{ + if cfg!(debug_assertions) { + panic!($($arg)*); + } + // Increment both metrics + $crate::logging::TRACING_EVENT_COUNT_METRIC.inc_critical(); + $crate::logging::HADRON_CRITICAL_STORAGE_EVENT_COUNT_METRIC.inc(&$tenant_shard_id.to_string(), &$timeline_id.to_string()); + let backtrace = std::backtrace::Backtrace::capture(); + tracing::error!("CRITICAL: [tenant_shard_id: {}, timeline_id: {}] {}\n{backtrace}", + $tenant_shard_id, $timeline_id, format!($($arg)*)); + }}; +} + #[derive(EnumString, strum_macros::Display, VariantNames, Eq, PartialEq, Debug, Clone, Copy)] #[strum(serialize_all = "snake_case")] pub enum LogFormat { @@ -61,6 +77,36 @@ pub struct TracingEventCountMetric { trace: IntCounter, } +// Begin Hadron: Add a HadronCriticalStorageEventCountMetric metric that is sliced by tenant_id and timeline_id +pub struct HadronCriticalStorageEventCountMetric { + critical: IntCounterVec, +} + +pub static HADRON_CRITICAL_STORAGE_EVENT_COUNT_METRIC: Lazy = + Lazy::new(|| { + let vec = metrics::register_int_counter_vec!( + "hadron_critical_storage_event_count", + "Number of critical storage events, by tenant_id and timeline_id", + &["tenant_shard_id", "timeline_id"] + ) + .expect("failed to define metric"); + HadronCriticalStorageEventCountMetric::new(vec) + }); + +impl HadronCriticalStorageEventCountMetric { + fn new(vec: IntCounterVec) -> Self { + Self { critical: vec } + } + + // Allow public access from `critical!` macro. + pub fn inc(&self, tenant_shard_id: &str, timeline_id: &str) { + self.critical + .with_label_values(&[tenant_shard_id, timeline_id]) + .inc(); + } +} +// End Hadron + pub static TRACING_EVENT_COUNT_METRIC: Lazy = Lazy::new(|| { let vec = metrics::register_int_counter_vec!( "libmetrics_tracing_event_count", diff --git a/libs/utils/src/postgres_client.rs b/libs/utils/src/postgres_client.rs index 4167839e28..7596fefe38 100644 --- a/libs/utils/src/postgres_client.rs +++ b/libs/utils/src/postgres_client.rs @@ -89,7 +89,7 @@ pub fn wal_stream_connection_config( .set_password(args.auth_token.map(|s| s.to_owned())); if let Some(availability_zone) = args.availability_zone { - connstr = connstr.extend_options([format!("availability_zone={}", availability_zone)]); + connstr = connstr.extend_options([format!("availability_zone={availability_zone}")]); } Ok(connstr) diff --git a/libs/utils/src/shard.rs b/libs/utils/src/shard.rs index c8c410a725..633a57c97f 100644 --- a/libs/utils/src/shard.rs +++ b/libs/utils/src/shard.rs @@ -49,6 +49,12 @@ pub struct TenantShardId { pub shard_count: ShardCount, } +impl std::fmt::Display for ShardCount { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + self.0.fmt(f) + } +} + impl ShardCount { pub const MAX: Self = Self(u8::MAX); pub const MIN: Self = Self(0); @@ -196,7 +202,7 @@ impl std::fmt::Display for TenantShardId { impl std::fmt::Debug for TenantShardId { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { // Debug is the same as Display: the compact hex representation - write!(f, "{}", self) + write!(f, "{self}") } } @@ -284,7 +290,7 @@ impl std::fmt::Display for ShardIndex { impl std::fmt::Debug for ShardIndex { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { // Debug is the same as Display: the compact hex representation - write!(f, "{}", self) + write!(f, "{self}") } } diff --git a/libs/utils/src/signals.rs b/libs/utils/src/signals.rs index 426bb65916..bdaa3cb665 100644 --- a/libs/utils/src/signals.rs +++ b/libs/utils/src/signals.rs @@ -29,7 +29,7 @@ impl ShutdownSignals { SIGINT => Signal::Interrupt, SIGTERM => Signal::Terminate, SIGQUIT => Signal::Quit, - other => panic!("unknown signal: {}", other), + other => panic!("unknown signal: {other}"), }; handler(signal)?; diff --git a/libs/utils/src/sync/gate.rs b/libs/utils/src/sync/gate.rs index 93460785bf..862b2cff9e 100644 --- a/libs/utils/src/sync/gate.rs +++ b/libs/utils/src/sync/gate.rs @@ -86,6 +86,14 @@ pub enum GateError { GateClosed, } +impl GateError { + pub fn is_cancel(&self) -> bool { + match self { + GateError::GateClosed => true, + } + } +} + impl Default for Gate { fn default() -> Self { Self { diff --git a/libs/vm_monitor/src/dispatcher.rs b/libs/vm_monitor/src/dispatcher.rs index 7b7201ab77..7bd6adc2f8 100644 --- a/libs/vm_monitor/src/dispatcher.rs +++ b/libs/vm_monitor/src/dispatcher.rs @@ -90,8 +90,7 @@ impl Dispatcher { Err(e) => { sink.send(Message::Text(Utf8Bytes::from( serde_json::to_string(&ProtocolResponse::Error(format!( - "Received protocol version range {} which does not overlap with {}", - agent_range, monitor_range + "Received protocol version range {agent_range} which does not overlap with {monitor_range}" ))) .unwrap(), ))) diff --git a/libs/vm_monitor/src/filecache.rs b/libs/vm_monitor/src/filecache.rs index bc42347e5a..55bbdea169 100644 --- a/libs/vm_monitor/src/filecache.rs +++ b/libs/vm_monitor/src/filecache.rs @@ -285,7 +285,7 @@ impl FileCacheState { // why we're constructing the query here. self.client .query( - &format!("ALTER SYSTEM SET neon.file_cache_size_limit = {};", num_mb), + &format!("ALTER SYSTEM SET neon.file_cache_size_limit = {num_mb};"), &[], ) .await diff --git a/libs/wal_decoder/Cargo.toml b/libs/wal_decoder/Cargo.toml index cb0ef4b00d..600ef091f5 100644 --- a/libs/wal_decoder/Cargo.toml +++ b/libs/wal_decoder/Cargo.toml @@ -14,6 +14,7 @@ bytes.workspace = true pageserver_api.workspace = true prost.workspace = true postgres_ffi.workspace = true +postgres_ffi_types.workspace = true serde.workspace = true thiserror.workspace = true tokio = { workspace = true, features = ["io-util"] } diff --git a/libs/wal_decoder/benches/bench_interpret_wal.rs b/libs/wal_decoder/benches/bench_interpret_wal.rs index ed6ba4d267..e3956eca05 100644 --- a/libs/wal_decoder/benches/bench_interpret_wal.rs +++ b/libs/wal_decoder/benches/bench_interpret_wal.rs @@ -10,7 +10,7 @@ use futures::StreamExt; use futures::stream::FuturesUnordered; use pageserver_api::shard::{ShardIdentity, ShardStripeSize}; use postgres_ffi::waldecoder::WalStreamDecoder; -use postgres_ffi::{MAX_SEND_SIZE, WAL_SEGMENT_SIZE}; +use postgres_ffi::{MAX_SEND_SIZE, PgMajorVersion, WAL_SEGMENT_SIZE}; use pprof::criterion::{Output, PProfProfiler}; use remote_storage::{ DownloadOpts, GenericRemoteStorage, ListingMode, RemoteStorageConfig, RemoteStorageKind, @@ -64,7 +64,7 @@ async fn download_bench_data( let temp_dir_parent: Utf8PathBuf = env::current_dir().unwrap().try_into()?; let temp_dir = camino_tempfile::tempdir_in(temp_dir_parent)?; - eprintln!("Downloading benchmark data to {:?}", temp_dir); + eprintln!("Downloading benchmark data to {temp_dir:?}"); let listing = client .list(None, ListingMode::NoDelimiter, None, cancel) @@ -115,12 +115,12 @@ struct BenchmarkData { #[derive(Deserialize)] struct BenchmarkMetadata { - pg_version: u32, + pg_version: PgMajorVersion, start_lsn: Lsn, } async fn load_bench_data(path: &Utf8Path, input_size: usize) -> anyhow::Result { - eprintln!("Loading benchmark data from {:?}", path); + eprintln!("Loading benchmark data from {path:?}"); let mut entries = tokio::fs::read_dir(path).await?; let mut ordered_segment_paths = Vec::new(); diff --git a/libs/wal_decoder/build.rs b/libs/wal_decoder/build.rs index d5b7ad02ad..e8acb52256 100644 --- a/libs/wal_decoder/build.rs +++ b/libs/wal_decoder/build.rs @@ -6,6 +6,6 @@ fn main() -> Result<(), Box> { // the build then. Anyway, per cargo docs build script shouldn't output to // anywhere but $OUT_DIR. tonic_build::compile_protos("proto/interpreted_wal.proto") - .unwrap_or_else(|e| panic!("failed to compile protos {:?}", e)); + .unwrap_or_else(|e| panic!("failed to compile protos {e:?}")); Ok(()) } diff --git a/libs/wal_decoder/src/decoder.rs b/libs/wal_decoder/src/decoder.rs index cb0835e894..0843eb35bf 100644 --- a/libs/wal_decoder/src/decoder.rs +++ b/libs/wal_decoder/src/decoder.rs @@ -7,9 +7,9 @@ use bytes::{Buf, Bytes}; use pageserver_api::key::rel_block_to_key; use pageserver_api::reltag::{RelTag, SlruKind}; use pageserver_api::shard::ShardIdentity; -use postgres_ffi::pg_constants; -use postgres_ffi::relfile_utils::VISIBILITYMAP_FORKNUM; use postgres_ffi::walrecord::*; +use postgres_ffi::{PgMajorVersion, pg_constants}; +use postgres_ffi_types::forknum::VISIBILITYMAP_FORKNUM; use utils::lsn::Lsn; use crate::models::*; @@ -24,7 +24,7 @@ impl InterpretedWalRecord { buf: Bytes, shards: &[ShardIdentity], next_record_lsn: Lsn, - pg_version: u32, + pg_version: PgMajorVersion, ) -> anyhow::Result> { let mut decoded = DecodedWALRecord::default(); decode_wal_record(buf, &mut decoded, pg_version)?; @@ -78,7 +78,7 @@ impl MetadataRecord { decoded: &DecodedWALRecord, shard_records: &mut HashMap, next_record_lsn: Lsn, - pg_version: u32, + pg_version: PgMajorVersion, ) -> anyhow::Result<()> { // Note: this doesn't actually copy the bytes since // the [`Bytes`] type implements it via a level of indirection. @@ -193,7 +193,7 @@ impl MetadataRecord { fn decode_heapam_record( buf: &mut Bytes, decoded: &DecodedWALRecord, - pg_version: u32, + pg_version: PgMajorVersion, ) -> anyhow::Result> { // Handle VM bit updates that are implicitly part of heap records. @@ -205,7 +205,7 @@ impl MetadataRecord { let mut flags = pg_constants::VISIBILITYMAP_VALID_BITS; match pg_version { - 14 => { + PgMajorVersion::PG14 => { if decoded.xl_rmid == pg_constants::RM_HEAP_ID { let info = decoded.xl_info & pg_constants::XLOG_HEAP_OPMASK; @@ -272,7 +272,7 @@ impl MetadataRecord { anyhow::bail!("Unknown RMGR {} for Heap decoding", decoded.xl_rmid); } } - 15 => { + PgMajorVersion::PG15 => { if decoded.xl_rmid == pg_constants::RM_HEAP_ID { let info = decoded.xl_info & pg_constants::XLOG_HEAP_OPMASK; @@ -339,7 +339,7 @@ impl MetadataRecord { anyhow::bail!("Unknown RMGR {} for Heap decoding", decoded.xl_rmid); } } - 16 => { + PgMajorVersion::PG16 => { if decoded.xl_rmid == pg_constants::RM_HEAP_ID { let info = decoded.xl_info & pg_constants::XLOG_HEAP_OPMASK; @@ -406,7 +406,7 @@ impl MetadataRecord { anyhow::bail!("Unknown RMGR {} for Heap decoding", decoded.xl_rmid); } } - 17 => { + PgMajorVersion::PG17 => { if decoded.xl_rmid == pg_constants::RM_HEAP_ID { let info = decoded.xl_info & pg_constants::XLOG_HEAP_OPMASK; @@ -473,7 +473,6 @@ impl MetadataRecord { anyhow::bail!("Unknown RMGR {} for Heap decoding", decoded.xl_rmid); } } - _ => {} } if new_heap_blkno.is_some() || old_heap_blkno.is_some() { @@ -500,7 +499,7 @@ impl MetadataRecord { fn decode_neonmgr_record( buf: &mut Bytes, decoded: &DecodedWALRecord, - pg_version: u32, + pg_version: PgMajorVersion, ) -> anyhow::Result> { // Handle VM bit updates that are implicitly part of heap records. @@ -514,7 +513,7 @@ impl MetadataRecord { assert_eq!(decoded.xl_rmid, pg_constants::RM_NEON_ID); match pg_version { - 16 | 17 => { + PgMajorVersion::PG16 | PgMajorVersion::PG17 => { let info = decoded.xl_info & pg_constants::XLOG_HEAP_OPMASK; match info { @@ -574,7 +573,7 @@ impl MetadataRecord { info => anyhow::bail!("Unknown WAL record type for Neon RMGR: {}", info), } } - _ => anyhow::bail!( + PgMajorVersion::PG15 | PgMajorVersion::PG14 => anyhow::bail!( "Neon RMGR has no known compatibility with PostgreSQL version {}", pg_version ), @@ -629,116 +628,121 @@ impl MetadataRecord { fn decode_dbase_record( buf: &mut Bytes, decoded: &DecodedWALRecord, - pg_version: u32, + pg_version: PgMajorVersion, ) -> anyhow::Result> { // TODO: Refactor this to avoid the duplication between postgres versions. let info = decoded.xl_info & pg_constants::XLR_RMGR_INFO_MASK; tracing::debug!(%info, %pg_version, "handle RM_DBASE_ID"); - if pg_version == 14 { - if info == postgres_ffi::v14::bindings::XLOG_DBASE_CREATE { - let createdb = XlCreateDatabase::decode(buf); - tracing::debug!("XLOG_DBASE_CREATE v14"); + match pg_version { + PgMajorVersion::PG14 => { + if info == postgres_ffi::v14::bindings::XLOG_DBASE_CREATE { + let createdb = XlCreateDatabase::decode(buf); + tracing::debug!("XLOG_DBASE_CREATE v14"); - let record = MetadataRecord::Dbase(DbaseRecord::Create(DbaseCreate { - db_id: createdb.db_id, - tablespace_id: createdb.tablespace_id, - src_db_id: createdb.src_db_id, - src_tablespace_id: createdb.src_tablespace_id, - })); + let record = MetadataRecord::Dbase(DbaseRecord::Create(DbaseCreate { + db_id: createdb.db_id, + tablespace_id: createdb.tablespace_id, + src_db_id: createdb.src_db_id, + src_tablespace_id: createdb.src_tablespace_id, + })); - return Ok(Some(record)); - } else if info == postgres_ffi::v14::bindings::XLOG_DBASE_DROP { - let dropdb = XlDropDatabase::decode(buf); + return Ok(Some(record)); + } else if info == postgres_ffi::v14::bindings::XLOG_DBASE_DROP { + let dropdb = XlDropDatabase::decode(buf); - let record = MetadataRecord::Dbase(DbaseRecord::Drop(DbaseDrop { - db_id: dropdb.db_id, - tablespace_ids: dropdb.tablespace_ids, - })); + let record = MetadataRecord::Dbase(DbaseRecord::Drop(DbaseDrop { + db_id: dropdb.db_id, + tablespace_ids: dropdb.tablespace_ids, + })); - return Ok(Some(record)); + return Ok(Some(record)); + } } - } else if pg_version == 15 { - if info == postgres_ffi::v15::bindings::XLOG_DBASE_CREATE_WAL_LOG { - tracing::debug!("XLOG_DBASE_CREATE_WAL_LOG: noop"); - } else if info == postgres_ffi::v15::bindings::XLOG_DBASE_CREATE_FILE_COPY { - // The XLOG record was renamed between v14 and v15, - // but the record format is the same. - // So we can reuse XlCreateDatabase here. - tracing::debug!("XLOG_DBASE_CREATE_FILE_COPY"); + PgMajorVersion::PG15 => { + if info == postgres_ffi::v15::bindings::XLOG_DBASE_CREATE_WAL_LOG { + tracing::debug!("XLOG_DBASE_CREATE_WAL_LOG: noop"); + } else if info == postgres_ffi::v15::bindings::XLOG_DBASE_CREATE_FILE_COPY { + // The XLOG record was renamed between v14 and v15, + // but the record format is the same. + // So we can reuse XlCreateDatabase here. + tracing::debug!("XLOG_DBASE_CREATE_FILE_COPY"); - let createdb = XlCreateDatabase::decode(buf); - let record = MetadataRecord::Dbase(DbaseRecord::Create(DbaseCreate { - db_id: createdb.db_id, - tablespace_id: createdb.tablespace_id, - src_db_id: createdb.src_db_id, - src_tablespace_id: createdb.src_tablespace_id, - })); + let createdb = XlCreateDatabase::decode(buf); + let record = MetadataRecord::Dbase(DbaseRecord::Create(DbaseCreate { + db_id: createdb.db_id, + tablespace_id: createdb.tablespace_id, + src_db_id: createdb.src_db_id, + src_tablespace_id: createdb.src_tablespace_id, + })); - return Ok(Some(record)); - } else if info == postgres_ffi::v15::bindings::XLOG_DBASE_DROP { - let dropdb = XlDropDatabase::decode(buf); - let record = MetadataRecord::Dbase(DbaseRecord::Drop(DbaseDrop { - db_id: dropdb.db_id, - tablespace_ids: dropdb.tablespace_ids, - })); + return Ok(Some(record)); + } else if info == postgres_ffi::v15::bindings::XLOG_DBASE_DROP { + let dropdb = XlDropDatabase::decode(buf); + let record = MetadataRecord::Dbase(DbaseRecord::Drop(DbaseDrop { + db_id: dropdb.db_id, + tablespace_ids: dropdb.tablespace_ids, + })); - return Ok(Some(record)); + return Ok(Some(record)); + } } - } else if pg_version == 16 { - if info == postgres_ffi::v16::bindings::XLOG_DBASE_CREATE_WAL_LOG { - tracing::debug!("XLOG_DBASE_CREATE_WAL_LOG: noop"); - } else if info == postgres_ffi::v16::bindings::XLOG_DBASE_CREATE_FILE_COPY { - // The XLOG record was renamed between v14 and v15, - // but the record format is the same. - // So we can reuse XlCreateDatabase here. - tracing::debug!("XLOG_DBASE_CREATE_FILE_COPY"); + PgMajorVersion::PG16 => { + if info == postgres_ffi::v16::bindings::XLOG_DBASE_CREATE_WAL_LOG { + tracing::debug!("XLOG_DBASE_CREATE_WAL_LOG: noop"); + } else if info == postgres_ffi::v16::bindings::XLOG_DBASE_CREATE_FILE_COPY { + // The XLOG record was renamed between v14 and v15, + // but the record format is the same. + // So we can reuse XlCreateDatabase here. + tracing::debug!("XLOG_DBASE_CREATE_FILE_COPY"); - let createdb = XlCreateDatabase::decode(buf); - let record = MetadataRecord::Dbase(DbaseRecord::Create(DbaseCreate { - db_id: createdb.db_id, - tablespace_id: createdb.tablespace_id, - src_db_id: createdb.src_db_id, - src_tablespace_id: createdb.src_tablespace_id, - })); + let createdb = XlCreateDatabase::decode(buf); + let record = MetadataRecord::Dbase(DbaseRecord::Create(DbaseCreate { + db_id: createdb.db_id, + tablespace_id: createdb.tablespace_id, + src_db_id: createdb.src_db_id, + src_tablespace_id: createdb.src_tablespace_id, + })); - return Ok(Some(record)); - } else if info == postgres_ffi::v16::bindings::XLOG_DBASE_DROP { - let dropdb = XlDropDatabase::decode(buf); - let record = MetadataRecord::Dbase(DbaseRecord::Drop(DbaseDrop { - db_id: dropdb.db_id, - tablespace_ids: dropdb.tablespace_ids, - })); + return Ok(Some(record)); + } else if info == postgres_ffi::v16::bindings::XLOG_DBASE_DROP { + let dropdb = XlDropDatabase::decode(buf); + let record = MetadataRecord::Dbase(DbaseRecord::Drop(DbaseDrop { + db_id: dropdb.db_id, + tablespace_ids: dropdb.tablespace_ids, + })); - return Ok(Some(record)); + return Ok(Some(record)); + } } - } else if pg_version == 17 { - if info == postgres_ffi::v17::bindings::XLOG_DBASE_CREATE_WAL_LOG { - tracing::debug!("XLOG_DBASE_CREATE_WAL_LOG: noop"); - } else if info == postgres_ffi::v17::bindings::XLOG_DBASE_CREATE_FILE_COPY { - // The XLOG record was renamed between v14 and v15, - // but the record format is the same. - // So we can reuse XlCreateDatabase here. - tracing::debug!("XLOG_DBASE_CREATE_FILE_COPY"); + PgMajorVersion::PG17 => { + if info == postgres_ffi::v17::bindings::XLOG_DBASE_CREATE_WAL_LOG { + tracing::debug!("XLOG_DBASE_CREATE_WAL_LOG: noop"); + } else if info == postgres_ffi::v17::bindings::XLOG_DBASE_CREATE_FILE_COPY { + // The XLOG record was renamed between v14 and v15, + // but the record format is the same. + // So we can reuse XlCreateDatabase here. + tracing::debug!("XLOG_DBASE_CREATE_FILE_COPY"); - let createdb = XlCreateDatabase::decode(buf); - let record = MetadataRecord::Dbase(DbaseRecord::Create(DbaseCreate { - db_id: createdb.db_id, - tablespace_id: createdb.tablespace_id, - src_db_id: createdb.src_db_id, - src_tablespace_id: createdb.src_tablespace_id, - })); + let createdb = XlCreateDatabase::decode(buf); + let record = MetadataRecord::Dbase(DbaseRecord::Create(DbaseCreate { + db_id: createdb.db_id, + tablespace_id: createdb.tablespace_id, + src_db_id: createdb.src_db_id, + src_tablespace_id: createdb.src_tablespace_id, + })); - return Ok(Some(record)); - } else if info == postgres_ffi::v17::bindings::XLOG_DBASE_DROP { - let dropdb = XlDropDatabase::decode(buf); - let record = MetadataRecord::Dbase(DbaseRecord::Drop(DbaseDrop { - db_id: dropdb.db_id, - tablespace_ids: dropdb.tablespace_ids, - })); + return Ok(Some(record)); + } else if info == postgres_ffi::v17::bindings::XLOG_DBASE_DROP { + let dropdb = XlDropDatabase::decode(buf); + let record = MetadataRecord::Dbase(DbaseRecord::Drop(DbaseDrop { + db_id: dropdb.db_id, + tablespace_ids: dropdb.tablespace_ids, + })); - return Ok(Some(record)); + return Ok(Some(record)); + } } } @@ -748,12 +752,12 @@ impl MetadataRecord { fn decode_clog_record( buf: &mut Bytes, decoded: &DecodedWALRecord, - pg_version: u32, + pg_version: PgMajorVersion, ) -> anyhow::Result> { let info = decoded.xl_info & !pg_constants::XLR_INFO_MASK; if info == pg_constants::CLOG_ZEROPAGE { - let pageno = if pg_version < 17 { + let pageno = if pg_version < PgMajorVersion::PG17 { buf.get_u32_le() } else { buf.get_u64_le() as u32 @@ -765,7 +769,7 @@ impl MetadataRecord { ClogZeroPage { segno, rpageno }, )))) } else { - assert!(info == pg_constants::CLOG_TRUNCATE); + assert_eq!(info, pg_constants::CLOG_TRUNCATE); let xlrec = XlClogTruncate::decode(buf, pg_version); Ok(Some(MetadataRecord::Clog(ClogRecord::Truncate( @@ -838,14 +842,14 @@ impl MetadataRecord { fn decode_multixact_record( buf: &mut Bytes, decoded: &DecodedWALRecord, - pg_version: u32, + pg_version: PgMajorVersion, ) -> anyhow::Result> { let info = decoded.xl_info & pg_constants::XLR_RMGR_INFO_MASK; if info == pg_constants::XLOG_MULTIXACT_ZERO_OFF_PAGE || info == pg_constants::XLOG_MULTIXACT_ZERO_MEM_PAGE { - let pageno = if pg_version < 17 { + let pageno = if pg_version < PgMajorVersion::PG17 { buf.get_u32_le() } else { buf.get_u64_le() as u32 diff --git a/libs/wal_decoder/src/models.rs b/libs/wal_decoder/src/models.rs index 7e1934c6c3..94a00c0e53 100644 --- a/libs/wal_decoder/src/models.rs +++ b/libs/wal_decoder/src/models.rs @@ -25,6 +25,9 @@ //! | //! |--> write to KV store within the pageserver +pub mod record; +pub mod value; + use bytes::Bytes; use pageserver_api::reltag::{RelTag, SlruKind}; use postgres_ffi::walrecord::{ diff --git a/libs/pageserver_api/src/record.rs b/libs/wal_decoder/src/models/record.rs similarity index 99% rename from libs/pageserver_api/src/record.rs rename to libs/wal_decoder/src/models/record.rs index 73516c5220..51659ed904 100644 --- a/libs/pageserver_api/src/record.rs +++ b/libs/wal_decoder/src/models/record.rs @@ -128,6 +128,6 @@ pub fn describe_wal_record(rec: &NeonWalRecord) -> Result Ok(format!("{:?}", rec)), + _ => Ok(format!("{rec:?}")), } } diff --git a/libs/pageserver_api/src/value.rs b/libs/wal_decoder/src/models/value.rs similarity index 99% rename from libs/pageserver_api/src/value.rs rename to libs/wal_decoder/src/models/value.rs index e9000939c3..3b4f896a45 100644 --- a/libs/pageserver_api/src/value.rs +++ b/libs/wal_decoder/src/models/value.rs @@ -10,7 +10,7 @@ use bytes::Bytes; use serde::{Deserialize, Serialize}; -use crate::record::NeonWalRecord; +use crate::models::record::NeonWalRecord; #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] pub enum Value { diff --git a/libs/wal_decoder/src/serialized_batch.rs b/libs/wal_decoder/src/serialized_batch.rs index b451d6d8e0..ab38ff3d73 100644 --- a/libs/wal_decoder/src/serialized_batch.rs +++ b/libs/wal_decoder/src/serialized_batch.rs @@ -1,4 +1,4 @@ -//! This module implements batch type for serialized [`pageserver_api::value::Value`] +//! This module implements batch type for serialized [`crate::models::value::Value`] //! instances. Each batch contains a raw buffer (serialized values) //! and a list of metadata for each (key, LSN) tuple present in the batch. //! @@ -10,17 +10,17 @@ use std::collections::{BTreeSet, HashMap}; use bytes::{Bytes, BytesMut}; use pageserver_api::key::{CompactKey, Key, rel_block_to_key}; use pageserver_api::keyspace::KeySpace; -use pageserver_api::record::NeonWalRecord; use pageserver_api::reltag::RelTag; use pageserver_api::shard::ShardIdentity; -use pageserver_api::value::Value; use postgres_ffi::walrecord::{DecodedBkpBlock, DecodedWALRecord}; -use postgres_ffi::{BLCKSZ, page_is_new, page_set_lsn, pg_constants}; +use postgres_ffi::{BLCKSZ, PgMajorVersion, page_is_new, page_set_lsn, pg_constants}; use serde::{Deserialize, Serialize}; use utils::bin_ser::BeSer; use utils::lsn::Lsn; use crate::models::InterpretedWalRecord; +use crate::models::record::NeonWalRecord; +use crate::models::value::Value; static ZERO_PAGE: Bytes = Bytes::from_static(&[0u8; BLCKSZ as usize]); @@ -139,7 +139,7 @@ impl SerializedValueBatch { decoded: DecodedWALRecord, shard_records: &mut HashMap, next_record_lsn: Lsn, - pg_version: u32, + pg_version: PgMajorVersion, ) -> anyhow::Result<()> { // First determine how big the buffers need to be and allocate it up-front. // This duplicates some of the work below, but it's empirically much faster. @@ -267,7 +267,7 @@ impl SerializedValueBatch { fn estimate_buffer_size( decoded: &DecodedWALRecord, shard: &ShardIdentity, - pg_version: u32, + pg_version: PgMajorVersion, ) -> usize { let mut estimate: usize = 0; @@ -303,7 +303,11 @@ impl SerializedValueBatch { estimate } - fn block_is_image(decoded: &DecodedWALRecord, blk: &DecodedBkpBlock, pg_version: u32) -> bool { + fn block_is_image( + decoded: &DecodedWALRecord, + blk: &DecodedBkpBlock, + pg_version: PgMajorVersion, + ) -> bool { blk.apply_image && blk.has_image && decoded.xl_rmid == pg_constants::RM_XLOG_ID diff --git a/libs/walproposer/build.rs b/libs/walproposer/build.rs index 530ceb1327..b13c8b32b4 100644 --- a/libs/walproposer/build.rs +++ b/libs/walproposer/build.rs @@ -13,22 +13,24 @@ fn main() -> anyhow::Result<()> { // Tell cargo to invalidate the built crate whenever the wrapper changes println!("cargo:rerun-if-changed=bindgen_deps.h"); + let root_path = PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("../.."); + // Finding the location of built libraries and Postgres C headers: // - if POSTGRES_INSTALL_DIR is set look into it, otherwise look into `/pg_install` // - if there's a `bin/pg_config` file use it for getting include server, otherwise use `/pg_install/{PG_MAJORVERSION}/include/postgresql/server` let pg_install_dir = if let Some(postgres_install_dir) = env::var_os("POSTGRES_INSTALL_DIR") { postgres_install_dir.into() } else { - PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("../../pg_install") + root_path.join("pg_install") }; let pg_install_abs = std::fs::canonicalize(pg_install_dir)?; - let walproposer_lib_dir = pg_install_abs.join("build/walproposer-lib"); + let walproposer_lib_dir = root_path.join("build/walproposer-lib"); let walproposer_lib_search_str = walproposer_lib_dir .to_str() .ok_or(anyhow!("Bad non-UTF path"))?; - let pgxn_neon = PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("../../pgxn/neon"); + let pgxn_neon = root_path.join("pgxn/neon"); let pgxn_neon = std::fs::canonicalize(pgxn_neon)?; let pgxn_neon = pgxn_neon.to_str().ok_or(anyhow!("Bad non-UTF path"))?; diff --git a/libs/walproposer/src/api_bindings.rs b/libs/walproposer/src/api_bindings.rs index 4d6cbae9a9..7c6abf252e 100644 --- a/libs/walproposer/src/api_bindings.rs +++ b/libs/walproposer/src/api_bindings.rs @@ -311,7 +311,7 @@ extern "C" fn get_redo_start_lsn(wp: *mut WalProposer) -> XLogRecPtr { } } -extern "C-unwind" fn finish_sync_safekeepers(wp: *mut WalProposer, lsn: XLogRecPtr) { +unsafe extern "C-unwind" fn finish_sync_safekeepers(wp: *mut WalProposer, lsn: XLogRecPtr) -> ! { unsafe { let callback_data = (*(*wp).config).callback_data; let api = callback_data as *mut Box; @@ -376,7 +376,7 @@ impl Level { FATAL => Level::Fatal, PANIC => Level::Panic, WPEVENT => Level::WPEvent, - _ => panic!("unknown log level {}", elevel), + _ => panic!("unknown log level {elevel}"), } } } @@ -446,7 +446,7 @@ pub fn empty_shmem() -> crate::bindings::WalproposerShmemState { impl std::fmt::Display for Level { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(f, "{:?}", self) + write!(f, "{self:?}") } } diff --git a/libs/walproposer/src/walproposer.rs b/libs/walproposer/src/walproposer.rs index e95494297c..93bb0d5eb0 100644 --- a/libs/walproposer/src/walproposer.rs +++ b/libs/walproposer/src/walproposer.rs @@ -144,7 +144,7 @@ pub trait ApiImpl { todo!() } - fn finish_sync_safekeepers(&self, _lsn: u64) { + fn finish_sync_safekeepers(&self, _lsn: u64) -> ! { todo!() } @@ -380,7 +380,7 @@ mod tests { } fn conn_send_query(&self, _: &mut crate::bindings::Safekeeper, query: &str) -> bool { - println!("conn_send_query: {}", query); + println!("conn_send_query: {query}"); true } @@ -399,13 +399,13 @@ mod tests { ) -> crate::bindings::PGAsyncReadResult { println!("conn_async_read"); let reply = self.next_safekeeper_reply(); - println!("conn_async_read result: {:?}", reply); + println!("conn_async_read result: {reply:?}"); vec.extend_from_slice(reply); crate::bindings::PGAsyncReadResult_PG_ASYNC_READ_SUCCESS } fn conn_blocking_write(&self, _: &mut crate::bindings::Safekeeper, buf: &[u8]) -> bool { - println!("conn_blocking_write: {:?}", buf); + println!("conn_blocking_write: {buf:?}"); self.check_walproposer_msg(buf); true } @@ -456,10 +456,7 @@ mod tests { timeout_millis: i64, ) -> super::WaitResult { let data = self.wait_events.get(); - println!( - "wait_event_set, timeout_millis={}, res={:?}", - timeout_millis, data - ); + println!("wait_event_set, timeout_millis={timeout_millis}, res={data:?}"); super::WaitResult::Network(data.sk, data.event_mask) } @@ -469,13 +466,13 @@ mod tests { true } - fn finish_sync_safekeepers(&self, lsn: u64) { + fn finish_sync_safekeepers(&self, lsn: u64) -> ! { self.sync_channel.send(lsn).unwrap(); panic!("sync safekeepers finished at lsn={}", lsn); } fn log_internal(&self, _wp: &mut crate::bindings::WalProposer, level: Level, msg: &str) { - println!("wp_log[{}] {}", level, msg); + println!("wp_log[{level}] {msg}"); } fn after_election(&self, _wp: &mut crate::bindings::WalProposer) { diff --git a/pageserver/Cargo.toml b/pageserver/Cargo.toml index 9591c729e8..db932d126e 100644 --- a/pageserver/Cargo.toml +++ b/pageserver/Cargo.toml @@ -12,6 +12,9 @@ testing = ["fail/failpoints", "pageserver_api/testing", "wal_decoder/testing", " fuzz-read-path = ["testing"] +# Enables benchmarking only APIs +benchmarking = [] + [dependencies] anyhow.workspace = true arc-swap.workspace = true @@ -51,17 +54,20 @@ pageserver_api.workspace = true pageserver_client.workspace = true # for ResponseErrorMessageExt TOOD refactor that pageserver_compaction.workspace = true pageserver_page_api.workspace = true +peekable.workspace = true pem.workspace = true pin-project-lite.workspace = true postgres_backend.workspace = true postgres_connection.workspace = true postgres_ffi.workspace = true +postgres_ffi_types.workspace = true postgres_initdb.workspace = true postgres-protocol.workspace = true postgres-types.workspace = true posthog_client_lite.workspace = true pprof.workspace = true pq_proto.workspace = true +prost.workspace = true rand.workspace = true range-set-blaze = { version = "0.1.16", features = ["alloc"] } regex.workspace = true @@ -126,6 +132,7 @@ harness = false [[bench]] name = "bench_ingest" harness = false +required-features = ["benchmarking"] [[bench]] name = "upload_queue" diff --git a/pageserver/benches/bench_ingest.rs b/pageserver/benches/bench_ingest.rs index eaadfe14ae..438c6e235e 100644 --- a/pageserver/benches/bench_ingest.rs +++ b/pageserver/benches/bench_ingest.rs @@ -1,23 +1,30 @@ use std::env; use std::num::NonZeroUsize; +use std::sync::Arc; use bytes::Bytes; use camino::Utf8PathBuf; use criterion::{Criterion, criterion_group, criterion_main}; +use futures::stream::FuturesUnordered; use pageserver::config::PageServerConf; use pageserver::context::{DownloadBehavior, RequestContext}; +use pageserver::keyspace::KeySpace; use pageserver::l0_flush::{L0FlushConfig, L0FlushGlobalState}; use pageserver::task_mgr::TaskKind; -use pageserver::tenant::storage_layer::InMemoryLayer; +use pageserver::tenant::storage_layer::IoConcurrency; +use pageserver::tenant::storage_layer::{InMemoryLayer, ValuesReconstructState}; use pageserver::{page_cache, virtual_file}; +use pageserver_api::config::GetVectoredConcurrentIo; use pageserver_api::key::Key; use pageserver_api::models::virtual_file::IoMode; use pageserver_api::shard::TenantShardId; -use pageserver_api::value::Value; -use strum::IntoEnumIterator; +use tokio_stream::StreamExt; use tokio_util::sync::CancellationToken; use utils::bin_ser::BeSer; use utils::id::{TenantId, TimelineId}; +use utils::lsn::Lsn; +use utils::sync::gate::Gate; +use wal_decoder::models::value::Value; use wal_decoder::serialized_batch::SerializedValueBatch; // A very cheap hash for generating non-sequential keys. @@ -30,7 +37,7 @@ fn murmurhash32(mut h: u32) -> u32 { h } -#[derive(serde::Serialize, Clone, Copy, Debug)] +#[derive(serde::Serialize, Clone, Copy, Debug, PartialEq)] enum KeyLayout { /// Sequential unique keys Sequential, @@ -40,19 +47,30 @@ enum KeyLayout { RandomReuse(u32), } -#[derive(serde::Serialize, Clone, Copy, Debug)] +#[derive(serde::Serialize, Clone, Copy, Debug, PartialEq)] enum WriteDelta { Yes, No, } +#[derive(serde::Serialize, Clone, Copy, Debug, PartialEq)] +enum ConcurrentReads { + Yes, + No, +} + async fn ingest( conf: &'static PageServerConf, put_size: usize, put_count: usize, key_layout: KeyLayout, write_delta: WriteDelta, + concurrent_reads: ConcurrentReads, ) -> anyhow::Result<()> { + if concurrent_reads == ConcurrentReads::Yes { + assert_eq!(key_layout, KeyLayout::Sequential); + } + let mut lsn = utils::lsn::Lsn(1000); let mut key = Key::from_i128(0x0); @@ -68,16 +86,18 @@ async fn ingest( let gate = utils::sync::gate::Gate::default(); let cancel = CancellationToken::new(); - let layer = InMemoryLayer::create( - conf, - timeline_id, - tenant_shard_id, - lsn, - &gate, - &cancel, - &ctx, - ) - .await?; + let layer = Arc::new( + InMemoryLayer::create( + conf, + timeline_id, + tenant_shard_id, + lsn, + &gate, + &cancel, + &ctx, + ) + .await?, + ); let data = Value::Image(Bytes::from(vec![0u8; put_size])); let data_ser_size = data.serialized_size().unwrap() as usize; @@ -86,6 +106,61 @@ async fn ingest( pageserver::context::DownloadBehavior::Download, ); + const READ_BATCH_SIZE: u32 = 32; + let (tx, mut rx) = tokio::sync::watch::channel::>(None); + let reader_cancel = CancellationToken::new(); + let reader_handle = if concurrent_reads == ConcurrentReads::Yes { + Some(tokio::task::spawn({ + let cancel = reader_cancel.clone(); + let layer = layer.clone(); + let ctx = ctx.attached_child(); + async move { + let gate = Gate::default(); + let gate_guard = gate.enter().unwrap(); + let io_concurrency = IoConcurrency::spawn_from_conf( + GetVectoredConcurrentIo::SidecarTask, + gate_guard, + ); + + rx.wait_for(|key| key.is_some()).await.unwrap(); + + while !cancel.is_cancelled() { + let key = match *rx.borrow() { + Some(some) => some, + None => unreachable!(), + }; + + let mut start_key = key; + start_key.field6 = key.field6.saturating_sub(READ_BATCH_SIZE); + let key_range = start_key..key.next(); + + let mut reconstruct_state = ValuesReconstructState::new(io_concurrency.clone()); + + layer + .get_values_reconstruct_data( + KeySpace::single(key_range), + Lsn(1)..Lsn(u64::MAX), + &mut reconstruct_state, + &ctx, + ) + .await + .unwrap(); + + let mut collect_futs = std::mem::take(&mut reconstruct_state.keys) + .into_values() + .map(|state| state.sink_pending_ios()) + .collect::>(); + while collect_futs.next().await.is_some() {} + } + + drop(io_concurrency); + gate.close().await; + } + })) + } else { + None + }; + const BATCH_SIZE: usize = 16; let mut batch = Vec::new(); @@ -113,19 +188,27 @@ async fn ingest( batch.push((key.to_compact(), lsn, data_ser_size, data.clone())); if batch.len() >= BATCH_SIZE { + let last_key = Key::from_compact(batch.last().unwrap().0); + let this_batch = std::mem::take(&mut batch); let serialized = SerializedValueBatch::from_values(this_batch); layer.put_batch(serialized, &ctx).await?; + + tx.send(Some(last_key)).unwrap(); } } if !batch.is_empty() { + let last_key = Key::from_compact(batch.last().unwrap().0); + let this_batch = std::mem::take(&mut batch); let serialized = SerializedValueBatch::from_values(this_batch); layer.put_batch(serialized, &ctx).await?; + + tx.send(Some(last_key)).unwrap(); } layer.freeze(lsn + 1).await; - if matches!(write_delta, WriteDelta::Yes) { + if write_delta == WriteDelta::Yes { let l0_flush_state = L0FlushGlobalState::new(L0FlushConfig::Direct { max_concurrency: NonZeroUsize::new(1).unwrap(), }); @@ -136,6 +219,11 @@ async fn ingest( tokio::fs::remove_file(path).await?; } + reader_cancel.cancel(); + if let Some(handle) = reader_handle { + handle.await.unwrap(); + } + Ok(()) } @@ -147,6 +235,7 @@ fn ingest_main( put_count: usize, key_layout: KeyLayout, write_delta: WriteDelta, + concurrent_reads: ConcurrentReads, ) { pageserver::virtual_file::set_io_mode(io_mode); @@ -156,7 +245,15 @@ fn ingest_main( .unwrap(); runtime.block_on(async move { - let r = ingest(conf, put_size, put_count, key_layout, write_delta).await; + let r = ingest( + conf, + put_size, + put_count, + key_layout, + write_delta, + concurrent_reads, + ) + .await; if let Err(e) = r { panic!("{e:?}"); } @@ -195,6 +292,7 @@ fn criterion_benchmark(c: &mut Criterion) { key_size: usize, key_layout: KeyLayout, write_delta: WriteDelta, + concurrent_reads: ConcurrentReads, } #[derive(Clone)] struct HandPickedParameters { @@ -245,7 +343,7 @@ fn criterion_benchmark(c: &mut Criterion) { ]; let exploded_parameters = { let mut out = Vec::new(); - for io_mode in IoMode::iter() { + for concurrent_reads in [ConcurrentReads::Yes, ConcurrentReads::No] { for param in expect.clone() { let HandPickedParameters { volume_mib, @@ -253,12 +351,18 @@ fn criterion_benchmark(c: &mut Criterion) { key_layout, write_delta, } = param; + + if key_layout != KeyLayout::Sequential && concurrent_reads == ConcurrentReads::Yes { + continue; + } + out.push(ExplodedParameters { - io_mode, + io_mode: IoMode::DirectRw, volume_mib, key_size, key_layout, write_delta, + concurrent_reads, }); } } @@ -272,9 +376,10 @@ fn criterion_benchmark(c: &mut Criterion) { key_size, key_layout, write_delta, + concurrent_reads, } = self; format!( - "io_mode={io_mode:?} volume_mib={volume_mib:?} key_size_bytes={key_size:?} key_layout={key_layout:?} write_delta={write_delta:?}" + "io_mode={io_mode:?} volume_mib={volume_mib:?} key_size_bytes={key_size:?} key_layout={key_layout:?} write_delta={write_delta:?} concurrent_reads={concurrent_reads:?}" ) } } @@ -287,12 +392,23 @@ fn criterion_benchmark(c: &mut Criterion) { key_size, key_layout, write_delta, + concurrent_reads, } = params; let put_count = volume_mib * 1024 * 1024 / key_size; group.throughput(criterion::Throughput::Bytes((key_size * put_count) as u64)); group.sample_size(10); group.bench_function(id, |b| { - b.iter(|| ingest_main(conf, io_mode, key_size, put_count, key_layout, write_delta)) + b.iter(|| { + ingest_main( + conf, + io_mode, + key_size, + put_count, + key_layout, + write_delta, + concurrent_reads, + ) + }) }); } } diff --git a/pageserver/benches/bench_walredo.rs b/pageserver/benches/bench_walredo.rs index 215682d90c..efb970f705 100644 --- a/pageserver/benches/bench_walredo.rs +++ b/pageserver/benches/bench_walredo.rs @@ -67,12 +67,13 @@ use once_cell::sync::Lazy; use pageserver::config::PageServerConf; use pageserver::walredo::{PostgresRedoManager, RedoAttemptType}; use pageserver_api::key::Key; -use pageserver_api::record::NeonWalRecord; use pageserver_api::shard::TenantShardId; +use postgres_ffi::{BLCKSZ, PgMajorVersion}; use tokio::sync::Barrier; use tokio::task::JoinSet; use utils::id::TenantId; use utils::lsn::Lsn; +use wal_decoder::models::record::NeonWalRecord; fn bench(c: &mut Criterion) { macro_rules! bench_group { @@ -94,7 +95,7 @@ fn bench(c: &mut Criterion) { // // benchmark the protocol implementation // - let pg_version = 14; + let pg_version = PgMajorVersion::PG14; bench_group!( "ping", Arc::new(move |mgr: Arc| async move { @@ -107,7 +108,7 @@ fn bench(c: &mut Criterion) { let make_redo_work = |req: &'static Request| { Arc::new(move |mgr: Arc| async move { let page = req.execute(&mgr).await.unwrap(); - assert_eq!(page.remaining(), 8192); + assert_eq!(page.remaining(), BLCKSZ as usize); }) }; bench_group!("short", { @@ -208,7 +209,7 @@ struct Request { lsn: Lsn, base_img: Option<(Lsn, Bytes)>, records: Vec<(Lsn, NeonWalRecord)>, - pg_version: u32, + pg_version: PgMajorVersion, } impl Request { @@ -267,7 +268,7 @@ impl Request { pg_record(false, b"\xbc\0\0\0\0\0\0\0h?m\x01\0\0\0\0p\n\0\09\x08\xa3\xea\0 \x8c\0\x7f\x06\0\0\xd22\0\0\xeb\x04\0\0\0\0\0\0\xff\x02\0@\0\0another_table\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x98\x08\0\0\x02@\0\0\0\0\0\0\n\0\0\0\x02\0\0\0\0@\0\0\0\0\0\0\x05\0\0\0\0@zD\x05\0\0\0\0\0\0\0\0\0pr\x01\0\0\0\0\0\0\0\0\x01d\0\0\0\0\0\0\x04\0\0\x01\0\0\0\x02\0"), ), ], - pg_version: 14, + pg_version: PgMajorVersion::PG14, } } @@ -516,7 +517,7 @@ impl Request { (lsn!("0/16B8000"), pg_record(false, b"C\0\0\0\0\x04\0\0p\x7fk\x01\0\0\0\0\0\n\0\0\\\xc4:?\0 \x12\0\x7f\x06\0\0\xd22\0\0\0@\0\0\0\0\0\0\xff\x03\x01\0\0\x08\x01\0\0\0\x18\0\xe1\0\0\0\0\0\0\0\xe2\0\0")), (lsn!("0/16CBD68"), pg_record(false, b"@ \0\0\0\0\0\0\xc0|l\x01\0\0\0\0@\t\0\0\xdf\xb0\x1a`\0\x12\0\0\0 \0\0\x04\x7f\x06\0\0\xd22\0\0\0@\0\0\0\0\0\0\x01\x80\0\0\0\0\0\0\xff\x05\0\0\0\0\0\0\0\0\0\0\0\0\x18\0\0 \0 \x04 \0\0\0\0\x01\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x04\0\0\x01")), ], - pg_version: 14, + pg_version: PgMajorVersion::PG14, } } } diff --git a/pageserver/client/Cargo.toml b/pageserver/client/Cargo.toml index 970a437a42..47e2a6ddae 100644 --- a/pageserver/client/Cargo.toml +++ b/pageserver/client/Cargo.toml @@ -18,6 +18,7 @@ workspace_hack = { version = "0.1", path = "../../workspace_hack" } tokio-postgres.workspace = true tokio-stream.workspace = true tokio.workspace = true +postgres_versioninfo.workspace = true futures.workspace = true tokio-util.workspace = true anyhow.workspace = true diff --git a/pageserver/client/src/mgmt_api.rs b/pageserver/client/src/mgmt_api.rs index 219e63c9d4..af4be23b9b 100644 --- a/pageserver/client/src/mgmt_api.rs +++ b/pageserver/client/src/mgmt_api.rs @@ -7,6 +7,7 @@ use detach_ancestor::AncestorDetached; use http_utils::error::HttpErrorBody; use pageserver_api::models::*; use pageserver_api::shard::TenantShardId; +use postgres_versioninfo::PgMajorVersion; pub use reqwest::Body as ReqwestBody; use reqwest::{IntoUrl, Method, StatusCode, Url}; use utils::id::{TenantId, TimelineId}; @@ -508,11 +509,11 @@ impl Client { .expect("Cannot build URL"); path.query_pairs_mut() - .append_pair("recurse", &format!("{}", recurse)); + .append_pair("recurse", &format!("{recurse}")); if let Some(concurrency) = concurrency { path.query_pairs_mut() - .append_pair("concurrency", &format!("{}", concurrency)); + .append_pair("concurrency", &format!("{concurrency}")); } self.request(Method::POST, path, ()).await.map(|_| ()) @@ -745,9 +746,11 @@ impl Client { timeline_id: TimelineId, base_lsn: Lsn, end_lsn: Lsn, - pg_version: u32, + pg_version: PgMajorVersion, basebackup_tarball: ReqwestBody, ) -> Result<()> { + let pg_version = pg_version.major_version_num(); + let uri = format!( "{}/v1/tenant/{tenant_id}/timeline/{timeline_id}/import_basebackup?base_lsn={base_lsn}&end_lsn={end_lsn}&pg_version={pg_version}", self.mgmt_api_endpoint, @@ -841,4 +844,13 @@ impl Client { .await .map_err(Error::ReceiveBody) } + + pub async fn update_feature_flag_spec(&self, spec: String) -> Result<()> { + let uri = format!("{}/v1/feature_flag_spec", self.mgmt_api_endpoint); + self.request(Method::POST, uri, spec) + .await? + .json() + .await + .map_err(Error::ReceiveBody) + } } diff --git a/pageserver/client/src/page_service.rs b/pageserver/client/src/page_service.rs index ef35ac2f48..085c0e6543 100644 --- a/pageserver/client/src/page_service.rs +++ b/pageserver/client/src/page_service.rs @@ -2,7 +2,7 @@ use std::sync::{Arc, Mutex}; use futures::stream::{SplitSink, SplitStream}; use futures::{SinkExt, StreamExt}; -use pageserver_api::models::{ +use pageserver_api::pagestream_api::{ PagestreamBeMessage, PagestreamFeMessage, PagestreamGetPageRequest, PagestreamGetPageResponse, }; use pageserver_api::reltag::RelTag; diff --git a/pageserver/client_grpc/Cargo.toml b/pageserver/client_grpc/Cargo.toml new file mode 100644 index 0000000000..681b3d3bc3 --- /dev/null +++ b/pageserver/client_grpc/Cargo.toml @@ -0,0 +1,44 @@ +[package] +name = "pageserver_client_grpc" +version = "0.1.0" +edition = "2024" + +[features] +testing = ["pageserver_api/testing"] + +[dependencies] +anyhow.workspace = true +arc-swap.workspace = true +bytes.workspace = true +futures.workspace = true +http.workspace = true +thiserror.workspace = true +tonic.workspace = true +tracing.workspace = true +tokio = { version = "1.43.1", features = [ + "full", + "macros", + "net", + "io-util", + "rt", + "rt-multi-thread", +] } +uuid = { version = "1", features = ["v4"] } +tower = { version = "0.4", features = ["timeout", "util"] } +rand = "0.8" +tokio-util = { version = "0.7", features = ["compat"] } +hyper-util = "0.1.9" +hyper = "1.6.0" +metrics.workspace = true +priority-queue = "2.3.1" +scopeguard.workspace = true +async-trait = { version = "0.1" } +tokio-stream = "0.1" +dashmap = "5" +chrono = { version = "0.4", features = ["serde"] } +compute_api.workspace = true + + +pageserver_page_api.workspace = true +pageserver_api.workspace = true +utils.workspace = true diff --git a/pageserver/client_grpc/src/client.rs b/pageserver/client_grpc/src/client.rs new file mode 100644 index 0000000000..4f3f606935 --- /dev/null +++ b/pageserver/client_grpc/src/client.rs @@ -0,0 +1,471 @@ +use std::collections::HashMap; +use std::num::NonZero; +use std::sync::Arc; + +use anyhow::anyhow; +use arc_swap::ArcSwap; +use futures::stream::FuturesUnordered; +use futures::{FutureExt as _, StreamExt as _}; +use tracing::instrument; + +use crate::pool::{ChannelPool, ClientGuard, ClientPool, StreamGuard, StreamPool}; +use crate::retry::Retry; +use crate::split::GetPageSplitter; +use compute_api::spec::PageserverProtocol; +use pageserver_api::shard::ShardStripeSize; +use pageserver_page_api as page_api; +use utils::id::{TenantId, TimelineId}; +use utils::shard::{ShardCount, ShardIndex, ShardNumber}; + +/// Max number of concurrent clients per channel (i.e. TCP connection). New channels will be spun up +/// when full. +/// +/// TODO: tune all of these constants, and consider making them configurable. +/// TODO: consider separate limits for unary and streaming clients, so we don't fill up channels +/// with only streams. +const MAX_CLIENTS_PER_CHANNEL: NonZero = NonZero::new(16).unwrap(); + +/// Max number of concurrent unary request clients per shard. +const MAX_UNARY_CLIENTS: NonZero = NonZero::new(64).unwrap(); + +/// Max number of concurrent GetPage streams per shard. The max number of concurrent GetPage +/// requests is given by `MAX_STREAMS * MAX_STREAM_QUEUE_DEPTH`. +const MAX_STREAMS: NonZero = NonZero::new(64).unwrap(); + +/// Max number of pipelined requests per stream. +const MAX_STREAM_QUEUE_DEPTH: NonZero = NonZero::new(2).unwrap(); + +/// Max number of concurrent bulk GetPage streams per shard, used e.g. for prefetches. Because these +/// are more throughput-oriented, we have a smaller limit but higher queue depth. +const MAX_BULK_STREAMS: NonZero = NonZero::new(16).unwrap(); + +/// Max number of pipelined requests per bulk stream. These are more throughput-oriented and thus +/// get a larger queue depth. +const MAX_BULK_STREAM_QUEUE_DEPTH: NonZero = NonZero::new(4).unwrap(); + +/// A rich Pageserver gRPC client for a single tenant timeline. This client is more capable than the +/// basic `page_api::Client` gRPC client, and supports: +/// +/// * Sharded tenants across multiple Pageservers. +/// * Pooling of connections, clients, and streams for efficient resource use. +/// * Concurrent use by many callers. +/// * Internal handling of GetPage bidirectional streams, with pipelining and error handling. +/// * Automatic retries. +/// * Observability. +/// +/// TODO: this client does not support base backups or LSN leases, as these are only used by +/// compute_ctl. Consider adding this, but LSN leases need concurrent requests on all shards. +pub struct PageserverClient { + /// The tenant ID. + tenant_id: TenantId, + /// The timeline ID. + timeline_id: TimelineId, + /// The JWT auth token for this tenant, if any. + auth_token: Option, + /// The shards for this tenant. + shards: ArcSwap, + /// The retry configuration. + retry: Retry, +} + +impl PageserverClient { + /// Creates a new Pageserver client for a given tenant and timeline. Uses the Pageservers given + /// in the shard spec, which must be complete and must use gRPC URLs. + pub fn new( + tenant_id: TenantId, + timeline_id: TimelineId, + shard_spec: ShardSpec, + auth_token: Option, + ) -> anyhow::Result { + let shards = Shards::new(tenant_id, timeline_id, shard_spec, auth_token.clone())?; + Ok(Self { + tenant_id, + timeline_id, + auth_token, + shards: ArcSwap::new(Arc::new(shards)), + retry: Retry, + }) + } + + /// Updates the shards from the given shard spec. In-flight requests will complete using the + /// existing shards, but may retry with the new shards if they fail. + /// + /// TODO: verify that in-flight requests are allowed to complete, and that the old pools are + /// properly spun down and dropped afterwards. + pub fn update_shards(&self, shard_spec: ShardSpec) -> anyhow::Result<()> { + let shards = Shards::new( + self.tenant_id, + self.timeline_id, + shard_spec, + self.auth_token.clone(), + )?; + self.shards.store(Arc::new(shards)); + Ok(()) + } + + /// Returns whether a relation exists. + #[instrument(skip_all, fields(rel=%req.rel, lsn=%req.read_lsn))] + pub async fn check_rel_exists( + &self, + req: page_api::CheckRelExistsRequest, + ) -> tonic::Result { + self.retry + .with(async || { + // Relation metadata is only available on shard 0. + let mut client = self.shards.load_full().get_zero().client().await?; + client.check_rel_exists(req).await + }) + .await + } + + /// Returns the total size of a database, as # of bytes. + #[instrument(skip_all, fields(db_oid=%req.db_oid, lsn=%req.read_lsn))] + pub async fn get_db_size( + &self, + req: page_api::GetDbSizeRequest, + ) -> tonic::Result { + self.retry + .with(async || { + // Relation metadata is only available on shard 0. + let mut client = self.shards.load_full().get_zero().client().await?; + client.get_db_size(req).await + }) + .await + } + + /// Fetches pages. The `request_id` must be unique across all in-flight requests. Automatically + /// splits requests that straddle shard boundaries, and assembles the responses. + /// + /// Unlike `page_api::Client`, this automatically converts `status_code` into `tonic::Status` + /// errors. All responses will have `GetPageStatusCode::Ok`. + #[instrument(skip_all, fields( + req_id = %req.request_id, + class = %req.request_class, + rel = %req.rel, + blkno = %req.block_numbers[0], + blks = %req.block_numbers.len(), + lsn = %req.read_lsn, + ))] + pub async fn get_page( + &self, + req: page_api::GetPageRequest, + ) -> tonic::Result { + // Make sure we have at least one page. + if req.block_numbers.is_empty() { + return Err(tonic::Status::invalid_argument("no block number")); + } + + // The shards may change while we're fetching pages. We execute the request using a stable + // view of the shards (especially important for requests that span shards), but retry the + // top-level (pre-split) request to pick up shard changes. This can lead to unnecessary + // retries and re-splits in some cases where requests span shards, but these are expected to + // be rare. + // + // TODO: the gRPC server and client doesn't yet properly support shard splits. Revisit this + // once we figure out how to handle these. + self.retry + .with(async || Self::get_page_with_shards(req.clone(), &self.shards.load_full()).await) + .await + } + + /// Fetches pages using the given shards. This uses a stable view of the shards, regardless of + /// concurrent shard updates. Does not retry internally, but is retried by `get_page()`. + async fn get_page_with_shards( + req: page_api::GetPageRequest, + shards: &Shards, + ) -> tonic::Result { + // Fast path: request is for a single shard. + if let Some(shard_id) = + GetPageSplitter::is_single_shard(&req, shards.count, shards.stripe_size) + { + return Self::get_page_with_shard(req, shards.get(shard_id)?).await; + } + + // Request spans multiple shards. Split it, dispatch concurrent per-shard requests, and + // reassemble the responses. + let mut splitter = GetPageSplitter::split(req, shards.count, shards.stripe_size); + + let mut shard_requests = FuturesUnordered::new(); + for (shard_id, shard_req) in splitter.drain_requests() { + let future = Self::get_page_with_shard(shard_req, shards.get(shard_id)?) + .map(move |result| result.map(|resp| (shard_id, resp))); + shard_requests.push(future); + } + + while let Some((shard_id, shard_response)) = shard_requests.next().await.transpose()? { + splitter.add_response(shard_id, shard_response); + } + + splitter.assemble_response() + } + + /// Fetches pages on the given shard. Does not retry internally. + async fn get_page_with_shard( + req: page_api::GetPageRequest, + shard: &Shard, + ) -> tonic::Result { + let expected = req.block_numbers.len(); + let stream = shard.stream(req.request_class.is_bulk()).await; + let resp = stream.send(req).await?; + + // Convert per-request errors into a tonic::Status. + if resp.status_code != page_api::GetPageStatusCode::Ok { + return Err(tonic::Status::new( + resp.status_code.into(), + resp.reason.unwrap_or_else(|| String::from("unknown error")), + )); + } + + // Check that we received the expected number of pages. + let actual = resp.page_images.len(); + if expected != actual { + return Err(tonic::Status::data_loss(format!( + "expected {expected} pages, got {actual}", + ))); + } + + Ok(resp) + } + + /// Returns the size of a relation, as # of blocks. + #[instrument(skip_all, fields(rel=%req.rel, lsn=%req.read_lsn))] + pub async fn get_rel_size( + &self, + req: page_api::GetRelSizeRequest, + ) -> tonic::Result { + self.retry + .with(async || { + // Relation metadata is only available on shard 0. + let mut client = self.shards.load_full().get_zero().client().await?; + client.get_rel_size(req).await + }) + .await + } + + /// Fetches an SLRU segment. + #[instrument(skip_all, fields(kind=%req.kind, segno=%req.segno, lsn=%req.read_lsn))] + pub async fn get_slru_segment( + &self, + req: page_api::GetSlruSegmentRequest, + ) -> tonic::Result { + self.retry + .with(async || { + // SLRU segments are only available on shard 0. + let mut client = self.shards.load_full().get_zero().client().await?; + client.get_slru_segment(req).await + }) + .await + } +} + +/// Shard specification for a PageserverClient. +pub struct ShardSpec { + /// Maps shard indices to gRPC URLs. + /// + /// INVARIANT: every shard 0..count is present, and shard 0 is always present. + /// INVARIANT: every URL is valid and uses grpc:// scheme. + urls: HashMap, + /// The shard count. + /// + /// NB: this is 0 for unsharded tenants, following `ShardIndex::unsharded()` convention. + count: ShardCount, + /// The stripe size for these shards. + stripe_size: ShardStripeSize, +} + +impl ShardSpec { + /// Creates a new shard spec with the given URLs and stripe size. All shards must be given. + /// The stripe size may be omitted for unsharded tenants. + pub fn new( + urls: HashMap, + stripe_size: Option, + ) -> anyhow::Result { + // Compute the shard count. + let count = match urls.len() { + 0 => return Err(anyhow!("no shards provided")), + 1 => ShardCount::new(0), // NB: unsharded tenants use 0, like `ShardIndex::unsharded()` + n if n > u8::MAX as usize => return Err(anyhow!("too many shards: {n}")), + n => ShardCount::new(n as u8), + }; + + // Determine the stripe size. It doesn't matter for unsharded tenants. + if stripe_size.is_none() && !count.is_unsharded() { + return Err(anyhow!("stripe size must be given for sharded tenants")); + } + let stripe_size = stripe_size.unwrap_or_default(); + + // Validate the shard spec. + for (shard_id, url) in &urls { + // The shard index must match the computed shard count, even for unsharded tenants. + if shard_id.shard_count != count { + return Err(anyhow!("invalid shard index {shard_id}, expected {count}")); + } + // The shard index' number and count must be consistent. + if !shard_id.is_unsharded() && shard_id.shard_number.0 >= shard_id.shard_count.0 { + return Err(anyhow!("invalid shard index {shard_id}")); + } + // The above conditions guarantee that we have all shards 0..count: len() matches count, + // shard number < count, and numbers are unique (via hashmap). + + // Validate the URL. + if PageserverProtocol::from_connstring(url)? != PageserverProtocol::Grpc { + return Err(anyhow!("invalid shard URL {url}: must use gRPC")); + } + } + + Ok(Self { + urls, + count, + stripe_size, + }) + } +} + +/// Tracks the tenant's shards. +struct Shards { + /// Shards by shard index. + /// + /// INVARIANT: every shard 0..count is present. + /// INVARIANT: shard 0 is always present. + by_index: HashMap, + /// The shard count. + /// + /// NB: this is 0 for unsharded tenants, following `ShardIndex::unsharded()` convention. + count: ShardCount, + /// The stripe size. Only used for sharded tenants. + stripe_size: ShardStripeSize, +} + +impl Shards { + /// Creates a new set of shards based on a shard spec. + fn new( + tenant_id: TenantId, + timeline_id: TimelineId, + shard_spec: ShardSpec, + auth_token: Option, + ) -> anyhow::Result { + // NB: the shard spec has already been validated when constructed. + let mut shards = HashMap::with_capacity(shard_spec.urls.len()); + for (shard_id, url) in shard_spec.urls { + shards.insert( + shard_id, + Shard::new(url, tenant_id, timeline_id, shard_id, auth_token.clone())?, + ); + } + + Ok(Self { + by_index: shards, + count: shard_spec.count, + stripe_size: shard_spec.stripe_size, + }) + } + + /// Looks up the given shard. + #[allow(clippy::result_large_err)] // TODO: check perf impact + fn get(&self, shard_id: ShardIndex) -> tonic::Result<&Shard> { + self.by_index + .get(&shard_id) + .ok_or_else(|| tonic::Status::not_found(format!("unknown shard {shard_id}"))) + } + + /// Returns shard 0. + fn get_zero(&self) -> &Shard { + self.get(ShardIndex::new(ShardNumber(0), self.count)) + .expect("always present") + } +} + +/// A single shard. Uses dedicated resource pools with the following structure: +/// +/// * Channel pool: unbounded. +/// * Unary client pool: MAX_UNARY_CLIENTS. +/// * Stream client pool: unbounded. +/// * Stream pool: MAX_STREAMS and MAX_STREAM_QUEUE_DEPTH. +/// * Bulk channel pool: unbounded. +/// * Bulk client pool: unbounded. +/// * Bulk stream pool: MAX_BULK_STREAMS and MAX_BULK_STREAM_QUEUE_DEPTH. +struct Shard { + /// Unary gRPC client pool. + client_pool: Arc, + /// GetPage stream pool. + stream_pool: Arc, + /// GetPage stream pool for bulk requests, e.g. prefetches. + bulk_stream_pool: Arc, +} + +impl Shard { + /// Creates a new shard. It has its own dedicated resource pools. + fn new( + url: String, + tenant_id: TenantId, + timeline_id: TimelineId, + shard_id: ShardIndex, + auth_token: Option, + ) -> anyhow::Result { + // Common channel pool for unary and stream requests. Bounded by client/stream pools. + let channel_pool = ChannelPool::new(url.clone(), MAX_CLIENTS_PER_CHANNEL)?; + + // Client pool for unary requests. + let client_pool = ClientPool::new( + channel_pool.clone(), + tenant_id, + timeline_id, + shard_id, + auth_token.clone(), + Some(MAX_UNARY_CLIENTS), + ); + + // GetPage stream pool. Uses a dedicated client pool to avoid starving out unary clients, + // but shares a channel pool with it (as it's unbounded). + let stream_pool = StreamPool::new( + ClientPool::new( + channel_pool.clone(), + tenant_id, + timeline_id, + shard_id, + auth_token.clone(), + None, // unbounded, limited by stream pool + ), + Some(MAX_STREAMS), + MAX_STREAM_QUEUE_DEPTH, + ); + + // Bulk GetPage stream pool, e.g. for prefetches. Uses dedicated channel/client/stream pools + // to avoid head-of-line blocking of latency-sensitive requests. + let bulk_stream_pool = StreamPool::new( + ClientPool::new( + ChannelPool::new(url, MAX_CLIENTS_PER_CHANNEL)?, + tenant_id, + timeline_id, + shard_id, + auth_token, + None, // unbounded, limited by stream pool + ), + Some(MAX_BULK_STREAMS), + MAX_BULK_STREAM_QUEUE_DEPTH, + ); + + Ok(Self { + client_pool, + stream_pool, + bulk_stream_pool, + }) + } + + /// Returns a pooled client for this shard. + async fn client(&self) -> tonic::Result { + self.client_pool + .get() + .await + .map_err(|err| tonic::Status::internal(format!("failed to get client: {err}"))) + } + + /// Returns a pooled stream for this shard. If `bulk` is `true`, uses the dedicated bulk stream + /// pool (e.g. for prefetches). + async fn stream(&self, bulk: bool) -> StreamGuard { + match bulk { + false => self.stream_pool.get().await, + true => self.bulk_stream_pool.get().await, + } + } +} diff --git a/pageserver/client_grpc/src/lib.rs b/pageserver/client_grpc/src/lib.rs new file mode 100644 index 0000000000..14fb3fbd5a --- /dev/null +++ b/pageserver/client_grpc/src/lib.rs @@ -0,0 +1,6 @@ +mod client; +mod pool; +mod retry; +mod split; + +pub use client::{PageserverClient, ShardSpec}; diff --git a/pageserver/client_grpc/src/pool.rs b/pageserver/client_grpc/src/pool.rs new file mode 100644 index 0000000000..0e4bff2f1b --- /dev/null +++ b/pageserver/client_grpc/src/pool.rs @@ -0,0 +1,755 @@ +//! This module provides various Pageserver gRPC client resource pools. +//! +//! These pools are designed to reuse gRPC resources (connections, clients, and streams) across +//! multiple concurrent callers (i.e. Postgres backends). This avoids the resource cost and latency +//! of creating dedicated TCP connections and server tasks for every Postgres backend. +//! +//! Each resource has its own, nested pool. The pools are custom-built for the properties of each +//! resource -- they are different enough that a generic pool isn't suitable. +//! +//! * ChannelPool: manages gRPC channels (TCP connections) to a single Pageserver. Multiple clients +//! can acquire and use the same channel concurrently (via HTTP/2 stream multiplexing), up to a +//! per-channel client limit. Channels may be closed when they are no longer used by any clients. +//! +//! * ClientPool: manages gRPC clients for a single tenant shard. Each client acquires a (shared) +//! channel from the ChannelPool for the client's lifetime. A client can only be acquired by a +//! single caller at a time, and is returned to the pool when dropped. Idle clients may be removed +//! from the pool after some time, to free up the channel. +//! +//! * StreamPool: manages bidirectional gRPC GetPage streams. Each stream acquires a client from the +//! ClientPool for the stream's lifetime. Internal streams are not exposed to callers; instead, it +//! returns a guard that can be used to send a single request, to properly enforce queue depth and +//! route responses. Internally, the pool will reuse or spin up a suitable stream for the request, +//! possibly pipelining multiple requests from multiple callers on the same stream (up to some +//! queue depth). Idle streams may be removed from the pool after a while to free up the client. +//! +//! Each channel corresponds to one TCP connection. Each client unary request and each stream +//! corresponds to one HTTP/2 stream and server task. +//! +//! TODO: error handling (including custom error types). +//! TODO: observability. + +use std::collections::{BTreeMap, HashMap}; +use std::num::NonZero; +use std::ops::{Deref, DerefMut}; +use std::sync::atomic::{AtomicUsize, Ordering}; +use std::sync::{Arc, Mutex, Weak}; +use std::time::{Duration, Instant}; + +use futures::StreamExt as _; +use tokio::sync::mpsc::{Receiver, Sender}; +use tokio::sync::{OwnedSemaphorePermit, Semaphore, mpsc, oneshot}; +use tokio_util::sync::CancellationToken; +use tonic::transport::{Channel, Endpoint}; +use tracing::{error, warn}; + +use pageserver_page_api as page_api; +use utils::id::{TenantId, TimelineId}; +use utils::shard::ShardIndex; + +/// Reap channels/clients/streams that have been idle for this long. +/// +/// TODO: this is per-pool. For nested pools, it can take up to 3x as long for a TCP connection to +/// be reaped. First, we must wait for an idle stream to be reaped, which marks its client as idle. +/// Then, we must wait for the idle client to be reaped, which marks its channel as idle. Then, we +/// must wait for the idle channel to be reaped. Is that a problem? Maybe not, we just have to +/// account for it when setting the reap threshold. Alternatively, we can immediately reap empty +/// channels, and/or stream pool clients. +const REAP_IDLE_THRESHOLD: Duration = match cfg!(any(test, feature = "testing")) { + false => Duration::from_secs(180), + true => Duration::from_secs(1), // exercise reaping in tests +}; + +/// Reap idle resources with this interval. +const REAP_IDLE_INTERVAL: Duration = match cfg!(any(test, feature = "testing")) { + false => Duration::from_secs(10), + true => Duration::from_secs(1), // exercise reaping in tests +}; + +/// A gRPC channel pool, for a single Pageserver. A channel is shared by many clients (via HTTP/2 +/// stream multiplexing), up to `clients_per_channel` -- a new channel will be spun up beyond this. +/// The pool does not limit the number of channels, and instead relies on `ClientPool` or +/// `StreamPool` to limit the number of concurrent clients. +/// +/// The pool is always wrapped in an outer `Arc`, to allow long-lived guards across tasks/threads. +/// +/// TODO: consider prewarming a set of channels, to avoid initial connection latency. +/// TODO: consider adding a circuit breaker for errors and fail fast. +pub struct ChannelPool { + /// Pageserver endpoint to connect to. + endpoint: Endpoint, + /// Max number of clients per channel. Beyond this, a new channel will be created. + max_clients_per_channel: NonZero, + /// Open channels. + channels: Mutex>, + /// Reaps idle channels. + idle_reaper: Reaper, + /// Channel ID generator. + next_channel_id: AtomicUsize, +} + +type ChannelID = usize; + +struct ChannelEntry { + /// The gRPC channel (i.e. TCP connection). Shared by multiple clients. + channel: Channel, + /// Number of clients using this channel. + clients: usize, + /// The channel has been idle (no clients) since this time. None if channel is in use. + /// INVARIANT: Some if clients == 0, otherwise None. + idle_since: Option, +} + +impl ChannelPool { + /// Creates a new channel pool for the given Pageserver endpoint. + pub fn new(endpoint: E, max_clients_per_channel: NonZero) -> anyhow::Result> + where + E: TryInto + Send + Sync + 'static, + >::Error: std::error::Error + Send + Sync, + { + let pool = Arc::new(Self { + endpoint: endpoint.try_into()?, + max_clients_per_channel, + channels: Mutex::default(), + idle_reaper: Reaper::new(REAP_IDLE_THRESHOLD, REAP_IDLE_INTERVAL), + next_channel_id: AtomicUsize::default(), + }); + pool.idle_reaper.spawn(&pool); + Ok(pool) + } + + /// Acquires a gRPC channel for a client. Multiple clients may acquire the same channel. + /// + /// This never blocks (except for mutex acquisition). The channel is connected lazily on first + /// use, and the `ChannelPool` does not have a channel limit. Channels will be re-established + /// automatically on failure (TODO: verify). + /// + /// Callers should not clone the returned channel, and must hold onto the returned guard as long + /// as the channel is in use. It is unfortunately not possible to enforce this: the Protobuf + /// client requires an owned `Channel` and we don't have access to the channel's internal + /// refcount. + /// + /// This is not performance-sensitive. It is only called when creating a new client, and clients + /// are pooled and reused by `ClientPool`. The total number of channels will also be small. O(n) + /// performance is therefore okay. + pub fn get(self: &Arc) -> ChannelGuard { + let mut channels = self.channels.lock().unwrap(); + + // Try to find an existing channel with available capacity. We check entries in BTreeMap + // order, to fill up the lower-ordered channels first. The ClientPool also prefers clients + // with lower-ordered channel IDs first. This will cluster clients in lower-ordered + // channels, and free up higher-ordered channels such that they can be reaped. + for (&id, entry) in channels.iter_mut() { + assert!( + entry.clients <= self.max_clients_per_channel.get(), + "channel overflow" + ); + assert_eq!( + entry.idle_since.is_some(), + entry.clients == 0, + "incorrect channel idle state" + ); + if entry.clients < self.max_clients_per_channel.get() { + entry.clients += 1; + entry.idle_since = None; + return ChannelGuard { + pool: Arc::downgrade(self), + id, + channel: Some(entry.channel.clone()), + }; + } + } + + // Create a new channel. We connect lazily on first use, such that we don't block here and + // other clients can join onto the same channel while it's connecting. + let channel = self.endpoint.connect_lazy(); + + let id = self.next_channel_id.fetch_add(1, Ordering::Relaxed); + let entry = ChannelEntry { + channel: channel.clone(), + clients: 1, // account for the guard below + idle_since: None, + }; + channels.insert(id, entry); + + ChannelGuard { + pool: Arc::downgrade(self), + id, + channel: Some(channel), + } + } +} + +impl Reapable for ChannelPool { + /// Reaps channels that have been idle since before the cutoff. + fn reap_idle(&self, cutoff: Instant) { + self.channels.lock().unwrap().retain(|_, entry| { + let Some(idle_since) = entry.idle_since else { + assert_ne!(entry.clients, 0, "empty channel not marked idle"); + return true; + }; + assert_eq!(entry.clients, 0, "idle channel has clients"); + idle_since >= cutoff + }) + } +} + +/// Tracks a channel acquired from the pool. The owned inner channel can be obtained with `take()`, +/// since the gRPC client requires an owned `Channel`. +pub struct ChannelGuard { + pool: Weak, + id: ChannelID, + channel: Option, +} + +impl ChannelGuard { + /// Returns the inner owned channel. Panics if called more than once. The caller must hold onto + /// the guard as long as the channel is in use, and should not clone it. + pub fn take(&mut self) -> Channel { + self.channel.take().expect("channel already taken") + } +} + +/// Returns the channel to the pool. +impl Drop for ChannelGuard { + fn drop(&mut self) { + let Some(pool) = self.pool.upgrade() else { + return; // pool was dropped + }; + + let mut channels = pool.channels.lock().unwrap(); + let entry = channels.get_mut(&self.id).expect("unknown channel"); + assert!(entry.idle_since.is_none(), "active channel marked idle"); + assert!(entry.clients > 0, "channel underflow"); + entry.clients -= 1; + if entry.clients == 0 { + entry.idle_since = Some(Instant::now()); // mark channel as idle + } + } +} + +/// A pool of gRPC clients for a single tenant shard. Each client acquires a channel from the inner +/// `ChannelPool`. A client is only given out to single caller at a time. The pool limits the total +/// number of concurrent clients to `max_clients` via semaphore. +/// +/// The pool is always wrapped in an outer `Arc`, to allow long-lived guards across tasks/threads. +pub struct ClientPool { + /// Tenant ID. + tenant_id: TenantId, + /// Timeline ID. + timeline_id: TimelineId, + /// Shard ID. + shard_id: ShardIndex, + /// Authentication token, if any. + auth_token: Option, + /// Channel pool to acquire channels from. + channel_pool: Arc, + /// Limits the max number of concurrent clients for this pool. None if the pool is unbounded. + limiter: Option>, + /// Idle pooled clients. Acquired clients are removed from here and returned on drop. + /// + /// The first client in the map will be acquired next. The map is sorted by client ID, which in + /// turn is sorted by its channel ID, such that we prefer acquiring idle clients from + /// lower-ordered channels. This allows us to free up and reap higher-numbered channels as idle + /// clients are reaped. + idle: Mutex>, + /// Reaps idle clients. + idle_reaper: Reaper, + /// Unique client ID generator. + next_client_id: AtomicUsize, +} + +type ClientID = (ChannelID, usize); + +struct ClientEntry { + /// The pooled gRPC client. + client: page_api::Client, + /// The channel guard for the channel used by the client. + channel_guard: ChannelGuard, + /// The client has been idle since this time. All clients in `ClientPool::idle` are idle by + /// definition, so this is the time when it was added back to the pool. + idle_since: Instant, +} + +impl ClientPool { + /// Creates a new client pool for the given tenant shard. Channels are acquired from the given + /// `ChannelPool`, which must point to a Pageserver that hosts the tenant shard. Allows up to + /// `max_clients` concurrent clients, or unbounded if None. + pub fn new( + channel_pool: Arc, + tenant_id: TenantId, + timeline_id: TimelineId, + shard_id: ShardIndex, + auth_token: Option, + max_clients: Option>, + ) -> Arc { + let pool = Arc::new(Self { + tenant_id, + timeline_id, + shard_id, + auth_token, + channel_pool, + idle: Mutex::default(), + idle_reaper: Reaper::new(REAP_IDLE_THRESHOLD, REAP_IDLE_INTERVAL), + limiter: max_clients.map(|max| Arc::new(Semaphore::new(max.get()))), + next_client_id: AtomicUsize::default(), + }); + pool.idle_reaper.spawn(&pool); + pool + } + + /// Gets a client from the pool, or creates a new one if necessary. Connections are established + /// lazily and do not block, but this call can block if the pool is at `max_clients`. The client + /// is returned to the pool when the guard is dropped. + /// + /// This is moderately performance-sensitive. It is called for every unary request, but these + /// establish a new gRPC stream per request so they're already expensive. GetPage requests use + /// the `StreamPool` instead. + pub async fn get(self: &Arc) -> anyhow::Result { + // Acquire a permit if the pool is bounded. + let mut permit = None; + if let Some(limiter) = self.limiter.clone() { + permit = Some(limiter.acquire_owned().await.expect("never closed")); + } + + // Fast path: acquire an idle client from the pool. + if let Some((id, entry)) = self.idle.lock().unwrap().pop_first() { + return Ok(ClientGuard { + pool: Arc::downgrade(self), + id, + client: Some(entry.client), + channel_guard: Some(entry.channel_guard), + permit, + }); + } + + // Slow path: construct a new client. + let mut channel_guard = self.channel_pool.get(); + let client = page_api::Client::new( + channel_guard.take(), + self.tenant_id, + self.timeline_id, + self.shard_id, + self.auth_token.clone(), + None, + )?; + + Ok(ClientGuard { + pool: Arc::downgrade(self), + id: ( + channel_guard.id, + self.next_client_id.fetch_add(1, Ordering::Relaxed), + ), + client: Some(client), + channel_guard: Some(channel_guard), + permit, + }) + } +} + +impl Reapable for ClientPool { + /// Reaps clients that have been idle since before the cutoff. + fn reap_idle(&self, cutoff: Instant) { + self.idle + .lock() + .unwrap() + .retain(|_, entry| entry.idle_since >= cutoff) + } +} + +/// A client acquired from the pool. The inner client can be accessed via Deref. The client is +/// returned to the pool when dropped. +pub struct ClientGuard { + pool: Weak, + id: ClientID, + client: Option, // Some until dropped + channel_guard: Option, // Some until dropped + permit: Option, // None if pool is unbounded +} + +impl Deref for ClientGuard { + type Target = page_api::Client; + + fn deref(&self) -> &Self::Target { + self.client.as_ref().expect("not dropped") + } +} + +impl DerefMut for ClientGuard { + fn deref_mut(&mut self) -> &mut Self::Target { + self.client.as_mut().expect("not dropped") + } +} + +/// Returns the client to the pool. +impl Drop for ClientGuard { + fn drop(&mut self) { + let Some(pool) = self.pool.upgrade() else { + return; // pool was dropped + }; + + let entry = ClientEntry { + client: self.client.take().expect("dropped once"), + channel_guard: self.channel_guard.take().expect("dropped once"), + idle_since: Instant::now(), + }; + pool.idle.lock().unwrap().insert(self.id, entry); + + _ = self.permit; // returned on drop, referenced for visibility + } +} + +/// A pool of bidirectional gRPC streams. Currently only used for GetPage streams. Each stream +/// acquires a client from the inner `ClientPool` for the stream's lifetime. +/// +/// Individual streams are not exposed to callers -- instead, the returned guard can be used to send +/// a single request and await the response. Internally, requests are multiplexed across streams and +/// channels. This allows proper queue depth enforcement and response routing. +/// +/// TODO: consider making this generic over request and response types; not currently needed. +pub struct StreamPool { + /// The client pool to acquire clients from. Must be unbounded. + client_pool: Arc, + /// All pooled streams. + /// + /// Incoming requests will be sent over an existing stream with available capacity. If all + /// streams are full, a new one is spun up and added to the pool (up to `max_streams`). Each + /// stream has an associated Tokio task that processes requests and responses. + streams: Mutex>, + /// The max number of concurrent streams, or None if unbounded. + max_streams: Option>, + /// The max number of concurrent requests per stream. + max_queue_depth: NonZero, + /// Limits the max number of concurrent requests, given by `max_streams * max_queue_depth`. + /// None if the pool is unbounded. + limiter: Option>, + /// Reaps idle streams. + idle_reaper: Reaper, + /// Stream ID generator. + next_stream_id: AtomicUsize, +} + +type StreamID = usize; +type RequestSender = Sender<(page_api::GetPageRequest, ResponseSender)>; +type RequestReceiver = Receiver<(page_api::GetPageRequest, ResponseSender)>; +type ResponseSender = oneshot::Sender>; + +struct StreamEntry { + /// Sends caller requests to the stream task. The stream task exits when this is dropped. + sender: RequestSender, + /// Number of in-flight requests on this stream. + queue_depth: usize, + /// The time when this stream went idle (queue_depth == 0). + /// INVARIANT: Some if queue_depth == 0, otherwise None. + idle_since: Option, +} + +impl StreamPool { + /// Creates a new stream pool, using the given client pool. It will send up to `max_queue_depth` + /// concurrent requests on each stream, and use up to `max_streams` concurrent streams. + /// + /// The client pool must be unbounded. The stream pool will enforce its own limits, and because + /// streams are long-lived they can cause persistent starvation if they exhaust the client pool. + /// The stream pool should generally have its own dedicated client pool (but it can share a + /// channel pool with others since these are always unbounded). + pub fn new( + client_pool: Arc, + max_streams: Option>, + max_queue_depth: NonZero, + ) -> Arc { + assert!(client_pool.limiter.is_none(), "bounded client pool"); + let pool = Arc::new(Self { + client_pool, + streams: Mutex::default(), + limiter: max_streams.map(|max_streams| { + Arc::new(Semaphore::new(max_streams.get() * max_queue_depth.get())) + }), + max_streams, + max_queue_depth, + idle_reaper: Reaper::new(REAP_IDLE_THRESHOLD, REAP_IDLE_INTERVAL), + next_stream_id: AtomicUsize::default(), + }); + pool.idle_reaper.spawn(&pool); + pool + } + + /// Acquires an available stream from the pool, or spins up a new stream async if all streams + /// are full. Returns a guard that can be used to send a single request on the stream and await + /// the response, with queue depth quota already acquired. Blocks if the pool is at capacity + /// (i.e. `CLIENT_LIMIT * STREAM_QUEUE_DEPTH` requests in flight). + /// + /// This is very performance-sensitive, as it is on the GetPage hot path. + /// + /// TODO: this must do something more sophisticated for performance. We want: + /// + /// * Cheap, concurrent access in the common case where we can use a pooled stream. + /// * Quick acquisition of pooled streams with available capacity. + /// * Prefer streams that belong to lower-numbered channels, to reap idle channels. + /// * Prefer filling up existing streams' queue depth before spinning up new streams. + /// * Don't hold a lock while spinning up new streams. + /// * Allow concurrent clients to join onto streams while they're spun up. + /// * Allow spinning up multiple streams concurrently, but don't overshoot limits. + /// + /// For now, we just do something simple but inefficient (linear scan under mutex). + pub async fn get(self: &Arc) -> StreamGuard { + // Acquire a permit if the pool is bounded. + let mut permit = None; + if let Some(limiter) = self.limiter.clone() { + permit = Some(limiter.acquire_owned().await.expect("never closed")); + } + let mut streams = self.streams.lock().unwrap(); + + // Look for a pooled stream with available capacity. + for (&id, entry) in streams.iter_mut() { + assert!( + entry.queue_depth <= self.max_queue_depth.get(), + "stream queue overflow" + ); + assert_eq!( + entry.idle_since.is_some(), + entry.queue_depth == 0, + "incorrect stream idle state" + ); + if entry.queue_depth < self.max_queue_depth.get() { + entry.queue_depth += 1; + entry.idle_since = None; + return StreamGuard { + pool: Arc::downgrade(self), + id, + sender: entry.sender.clone(), + permit, + }; + } + } + + // No available stream, spin up a new one. We install the stream entry in the pool first and + // return the guard, while spinning up the stream task async. This allows other callers to + // join onto this stream and also create additional streams concurrently if this fills up. + let id = self.next_stream_id.fetch_add(1, Ordering::Relaxed); + let (req_tx, req_rx) = mpsc::channel(self.max_queue_depth.get()); + let entry = StreamEntry { + sender: req_tx.clone(), + queue_depth: 1, // reserve quota for this caller + idle_since: None, + }; + streams.insert(id, entry); + + if let Some(max_streams) = self.max_streams { + assert!(streams.len() <= max_streams.get(), "stream overflow"); + }; + + let client_pool = self.client_pool.clone(); + let pool = Arc::downgrade(self); + + tokio::spawn(async move { + if let Err(err) = Self::run_stream(client_pool, req_rx).await { + error!("stream failed: {err}"); + } + // Remove stream from pool on exit. Weak reference to avoid holding the pool alive. + if let Some(pool) = pool.upgrade() { + let entry = pool.streams.lock().unwrap().remove(&id); + assert!(entry.is_some(), "unknown stream ID: {id}"); + } + }); + + StreamGuard { + pool: Arc::downgrade(self), + id, + sender: req_tx, + permit, + } + } + + /// Runs a stream task. This acquires a client from the `ClientPool` and establishes a + /// bidirectional GetPage stream, then forwards requests and responses between callers and the + /// stream. It does not track or enforce queue depths -- that's done by `get()` since it must be + /// atomic with pool stream acquisition. + /// + /// The task exits when the request channel is closed, or on a stream error. The caller is + /// responsible for removing the stream from the pool on exit. + async fn run_stream( + client_pool: Arc, + mut caller_rx: RequestReceiver, + ) -> anyhow::Result<()> { + // Acquire a client from the pool and create a stream. + let mut client = client_pool.get().await?; + + let (req_tx, req_rx) = mpsc::channel(1); + let req_stream = tokio_stream::wrappers::ReceiverStream::new(req_rx); + let mut resp_stream = client.get_pages(req_stream).await?; + + // Track caller response channels by request ID. If the task returns early, these response + // channels will be dropped and the waiting callers will receive an error. + let mut callers = HashMap::new(); + + // Process requests and responses. + loop { + tokio::select! { + // Receive requests from callers and send them to the stream. + req = caller_rx.recv() => { + // Shut down if request channel is closed. + let Some((req, resp_tx)) = req else { + return Ok(()); + }; + + // Store the response channel by request ID. + if callers.contains_key(&req.request_id) { + // Error on request ID duplicates. Ignore callers that went away. + _ = resp_tx.send(Err(tonic::Status::invalid_argument( + format!("duplicate request ID: {}", req.request_id), + ))); + continue; + } + callers.insert(req.request_id, resp_tx); + + // Send the request on the stream. Bail out if the send fails. + req_tx.send(req).await.map_err(|_| { + tonic::Status::unavailable("stream closed") + })?; + } + + // Receive responses from the stream and send them to callers. + resp = resp_stream.next() => { + // Shut down if the stream is closed, and bail out on stream errors. + let Some(resp) = resp.transpose()? else { + return Ok(()) + }; + + // Send the response to the caller. Ignore errors if the caller went away. + let Some(resp_tx) = callers.remove(&resp.request_id) else { + warn!("received response for unknown request ID: {}", resp.request_id); + continue; + }; + _ = resp_tx.send(Ok(resp)); + } + } + } + } +} + +impl Reapable for StreamPool { + /// Reaps streams that have been idle since before the cutoff. + fn reap_idle(&self, cutoff: Instant) { + self.streams.lock().unwrap().retain(|_, entry| { + let Some(idle_since) = entry.idle_since else { + assert_ne!(entry.queue_depth, 0, "empty stream not marked idle"); + return true; + }; + assert_eq!(entry.queue_depth, 0, "idle stream has requests"); + idle_since >= cutoff + }); + } +} + +/// A pooled stream reference. Can be used to send a single request, to properly enforce queue +/// depth. Queue depth is already reserved and will be returned on drop. +pub struct StreamGuard { + pool: Weak, + id: StreamID, + sender: RequestSender, + permit: Option, // None if pool is unbounded +} + +impl StreamGuard { + /// Sends a request on the stream and awaits the response. Consumes the guard, since it's only + /// valid for a single request (to enforce queue depth). This also drops the guard on return and + /// returns the queue depth quota to the pool. + /// + /// The `GetPageRequest::request_id` must be unique across in-flight requests. + /// + /// NB: errors are often returned as `GetPageResponse::status_code` instead of `tonic::Status` + /// to avoid tearing down the stream for per-request errors. Callers must check this. + pub async fn send( + self, + req: page_api::GetPageRequest, + ) -> tonic::Result { + let (resp_tx, resp_rx) = oneshot::channel(); + + self.sender + .send((req, resp_tx)) + .await + .map_err(|_| tonic::Status::unavailable("stream closed"))?; + + resp_rx + .await + .map_err(|_| tonic::Status::unavailable("stream closed"))? + } +} + +impl Drop for StreamGuard { + fn drop(&mut self) { + let Some(pool) = self.pool.upgrade() else { + return; // pool was dropped + }; + + // Release the queue depth reservation on drop. This can prematurely decrement it if dropped + // before the response is received, but that's okay. + let mut streams = pool.streams.lock().unwrap(); + let entry = streams.get_mut(&self.id).expect("unknown stream"); + assert!(entry.idle_since.is_none(), "active stream marked idle"); + assert!(entry.queue_depth > 0, "stream queue underflow"); + entry.queue_depth -= 1; + if entry.queue_depth == 0 { + entry.idle_since = Some(Instant::now()); // mark stream as idle + } + + _ = self.permit; // returned on drop, referenced for visibility + } +} + +/// Periodically reaps idle resources from a pool. +struct Reaper { + /// The task check interval. + interval: Duration, + /// The threshold for reaping idle resources. + threshold: Duration, + /// Cancels the reaper task. Cancelled when the reaper is dropped. + cancel: CancellationToken, +} + +impl Reaper { + /// Creates a new reaper. + pub fn new(threshold: Duration, interval: Duration) -> Self { + Self { + cancel: CancellationToken::new(), + threshold, + interval, + } + } + + /// Spawns a task to periodically reap idle resources from the given task pool. The task is + /// cancelled when the reaper is dropped. + pub fn spawn(&self, pool: &Arc) { + // NB: hold a weak pool reference, otherwise the task will prevent dropping the pool. + let pool = Arc::downgrade(pool); + let cancel = self.cancel.clone(); + let (interval, threshold) = (self.interval, self.threshold); + + tokio::spawn(async move { + loop { + tokio::select! { + _ = tokio::time::sleep(interval) => { + let Some(pool) = pool.upgrade() else { + return; // pool was dropped + }; + pool.reap_idle(Instant::now() - threshold); + } + + _ = cancel.cancelled() => return, + } + } + }); + } +} + +impl Drop for Reaper { + fn drop(&mut self) { + self.cancel.cancel(); // cancel reaper task + } +} + +/// A reapable resource pool. +trait Reapable: Send + Sync + 'static { + /// Reaps resources that have been idle since before the given cutoff. + fn reap_idle(&self, cutoff: Instant); +} diff --git a/pageserver/client_grpc/src/retry.rs b/pageserver/client_grpc/src/retry.rs new file mode 100644 index 0000000000..fe26c99289 --- /dev/null +++ b/pageserver/client_grpc/src/retry.rs @@ -0,0 +1,152 @@ +use std::time::Duration; + +use tokio::time::Instant; +use tracing::{error, info, warn}; + +use utils::backoff::exponential_backoff_duration; + +/// A retry handler for Pageserver gRPC requests. +/// +/// This is used instead of backoff::retry for better control and observability. +#[derive(Clone, Copy)] +pub struct Retry; + +impl Retry { + /// The per-request timeout. + // TODO: tune these, and/or make them configurable. Should we retry forever? + const REQUEST_TIMEOUT: Duration = Duration::from_secs(10); + /// The total timeout across all attempts + const TOTAL_TIMEOUT: Duration = Duration::from_secs(60); + /// The initial backoff duration. + const BASE_BACKOFF: Duration = Duration::from_millis(10); + /// The maximum backoff duration. + const MAX_BACKOFF: Duration = Duration::from_secs(10); + /// If true, log successful requests. For debugging. + const LOG_SUCCESS: bool = false; + + /// Runs the given async closure with timeouts and retries (exponential backoff). Logs errors, + /// using the current tracing span for context. + /// + /// Only certain gRPC status codes are retried, see [`Self::should_retry`]. For default + /// timeouts, see [`Self::REQUEST_TIMEOUT`] and [`Self::TOTAL_TIMEOUT`]. + pub async fn with(&self, mut f: F) -> tonic::Result + where + F: FnMut() -> O, + O: Future>, + { + let started = Instant::now(); + let deadline = started + Self::TOTAL_TIMEOUT; + let mut last_error = None; + let mut retries = 0; + loop { + // Set up a future to wait for the backoff (if any) and run the request with a timeout. + let backoff_and_try = async { + // NB: sleep() always sleeps 1ms, even when given a 0 argument. See: + // https://github.com/tokio-rs/tokio/issues/6866 + if let Some(backoff) = Self::backoff_duration(retries) { + tokio::time::sleep(backoff).await; + } + + let request_started = Instant::now(); + tokio::time::timeout(Self::REQUEST_TIMEOUT, f()) + .await + .map_err(|_| { + tonic::Status::deadline_exceeded(format!( + "request timed out after {:.3}s", + request_started.elapsed().as_secs_f64() + )) + })? + }; + + // Wait for the backoff and request, or bail out if the total timeout is exceeded. + let result = tokio::select! { + result = backoff_and_try => result, + + _ = tokio::time::sleep_until(deadline) => { + let last_error = last_error.unwrap_or_else(|| { + tonic::Status::deadline_exceeded(format!( + "request timed out after {:.3}s", + started.elapsed().as_secs_f64() + )) + }); + error!( + "giving up after {:.3}s and {retries} retries, last error {:?}: {}", + started.elapsed().as_secs_f64(), last_error.code(), last_error.message(), + ); + return Err(last_error); + } + }; + + match result { + // Success, return the result. + Ok(result) => { + if retries > 0 || Self::LOG_SUCCESS { + info!( + "request succeeded after {retries} retries in {:.3}s", + started.elapsed().as_secs_f64(), + ); + } + + return Ok(result); + } + + // Error, retry or bail out. + Err(status) => { + let (code, message) = (status.code(), status.message()); + let attempt = retries + 1; + + if !Self::should_retry(code) { + // NB: include the attempt here too. This isn't necessarily the first + // attempt, because the error may change between attempts. + error!( + "request failed with {code:?}: {message}, not retrying (attempt {attempt})" + ); + return Err(status); + } + + warn!("request failed with {code:?}: {message}, retrying (attempt {attempt})"); + + retries += 1; + last_error = Some(status); + } + } + } + } + + /// Returns the backoff duration for the given retry attempt, or None for no backoff. + fn backoff_duration(retry: usize) -> Option { + let backoff = exponential_backoff_duration( + retry as u32, + Self::BASE_BACKOFF.as_secs_f64(), + Self::MAX_BACKOFF.as_secs_f64(), + ); + (!backoff.is_zero()).then_some(backoff) + } + + /// Returns true if the given status code should be retries. + fn should_retry(code: tonic::Code) -> bool { + match code { + tonic::Code::Ok => panic!("unexpected Ok status code"), + + // These codes are transient, so retry them. + tonic::Code::Aborted => true, + tonic::Code::Cancelled => true, + tonic::Code::DeadlineExceeded => true, // maybe transient slowness + tonic::Code::Internal => true, // maybe transient failure? + tonic::Code::ResourceExhausted => true, + tonic::Code::Unavailable => true, + + // The following codes will like continue to fail, so don't retry. + tonic::Code::AlreadyExists => false, + tonic::Code::DataLoss => false, + tonic::Code::FailedPrecondition => false, + tonic::Code::InvalidArgument => false, + tonic::Code::NotFound => false, + tonic::Code::OutOfRange => false, + tonic::Code::PermissionDenied => false, + tonic::Code::Unauthenticated => false, + tonic::Code::Unimplemented => false, + tonic::Code::Unknown => false, + } + } +} diff --git a/pageserver/client_grpc/src/split.rs b/pageserver/client_grpc/src/split.rs new file mode 100644 index 0000000000..894aaa992c --- /dev/null +++ b/pageserver/client_grpc/src/split.rs @@ -0,0 +1,168 @@ +use std::collections::HashMap; + +use bytes::Bytes; + +use pageserver_api::key::rel_block_to_key; +use pageserver_api::shard::{ShardStripeSize, key_to_shard_number}; +use pageserver_page_api as page_api; +use utils::shard::{ShardCount, ShardIndex}; + +/// Splits GetPageRequests that straddle shard boundaries and assembles the responses. +/// TODO: add tests for this. +pub struct GetPageSplitter { + /// The original request ID. Used for all shard requests. + request_id: page_api::RequestID, + /// Split requests by shard index. + requests: HashMap, + /// Maps the offset in `GetPageRequest::block_numbers` to the owning shard. Used to assemble + /// the response pages in the same order as the original request. + block_shards: Vec, + /// Page responses by shard index. Will be assembled into a single response. + responses: HashMap>, +} + +impl GetPageSplitter { + /// Checks if the given request only touches a single shard, and returns the shard ID. This is + /// the common case, so we check first in order to avoid unnecessary allocations and overhead. + /// The caller must ensure that the request has at least one block number, or this will panic. + pub fn is_single_shard( + req: &page_api::GetPageRequest, + count: ShardCount, + stripe_size: ShardStripeSize, + ) -> Option { + // Fast path: unsharded tenant. + if count.is_unsharded() { + return Some(ShardIndex::unsharded()); + } + + // Find the base shard index for the first page, and compare with the rest. + let key = rel_block_to_key(req.rel, *req.block_numbers.first().expect("no pages")); + let shard_number = key_to_shard_number(count, stripe_size, &key); + + req.block_numbers + .iter() + .skip(1) // computed above + .all(|&blkno| { + let key = rel_block_to_key(req.rel, blkno); + key_to_shard_number(count, stripe_size, &key) == shard_number + }) + .then_some(ShardIndex::new(shard_number, count)) + } + + /// Splits the given request. + pub fn split( + req: page_api::GetPageRequest, + count: ShardCount, + stripe_size: ShardStripeSize, + ) -> Self { + // The caller should make sure we don't split requests unnecessarily. + debug_assert!( + Self::is_single_shard(&req, count, stripe_size).is_none(), + "unnecessary request split" + ); + + // Split the requests by shard index. + let mut requests = HashMap::with_capacity(2); // common case + let mut block_shards = Vec::with_capacity(req.block_numbers.len()); + for blkno in req.block_numbers { + let key = rel_block_to_key(req.rel, blkno); + let shard_number = key_to_shard_number(count, stripe_size, &key); + let shard_id = ShardIndex::new(shard_number, count); + + let shard_req = requests + .entry(shard_id) + .or_insert_with(|| page_api::GetPageRequest { + request_id: req.request_id, + request_class: req.request_class, + rel: req.rel, + read_lsn: req.read_lsn, + block_numbers: Vec::new(), + }); + shard_req.block_numbers.push(blkno); + block_shards.push(shard_id); + } + + Self { + request_id: req.request_id, + responses: HashMap::with_capacity(requests.len()), + requests, + block_shards, + } + } + + /// Drains the per-shard requests, moving them out of the hashmap to avoid extra allocations. + pub fn drain_requests( + &mut self, + ) -> impl Iterator { + self.requests.drain() + } + + /// Adds a response from the given shard. The response must match the request ID and have an OK + /// status code. A response must not already exist for the given shard ID. + pub fn add_response(&mut self, shard_id: ShardIndex, response: page_api::GetPageResponse) { + // NB: this is called below a `Retry::with()`, so unrecoverable errors should not use a + // retryable status code (e.g. `Internal`). + + // The caller should already have converted status codes into tonic::Status. + assert_eq!( + response.status_code, + page_api::GetPageStatusCode::Ok, + "non-OK response" + ); + + // The stream pool ensures the response matches the request ID. + assert_eq!(response.request_id, self.request_id, "response ID mismatch"); + + // Add the response data to the map. + let old = self.responses.insert(shard_id, response.page_images); + + // We only dispatch one request per shard. + assert!(old.is_none(), "duplicate response for shard {shard_id}"); + } + + /// Assembles the shard responses into a single response. Responses must be present for all + /// relevant shards, and the total number of pages must match the original request. + #[allow(clippy::result_large_err)] + pub fn assemble_response(self) -> tonic::Result { + // NB: this is called below a `Retry::with()`, so unrecoverable errors should not use a + // retryable status code (e.g. `Internal`). + + let mut response = page_api::GetPageResponse { + request_id: self.request_id, + status_code: page_api::GetPageStatusCode::Ok, + reason: None, + page_images: Vec::with_capacity(self.block_shards.len()), + }; + + // Set up per-shard page iterators we can pull from. + let mut shard_responses = HashMap::with_capacity(self.responses.len()); + for (shard_id, responses) in self.responses { + shard_responses.insert(shard_id, responses.into_iter()); + } + + // Reassemble the responses in the same order as the original request. + for shard_id in &self.block_shards { + let page = shard_responses + .get_mut(shard_id) + .ok_or_else(|| { + tonic::Status::data_loss(format!("missing response for shard {shard_id}")) + })? + .next() + .ok_or_else(|| { + tonic::Status::data_loss(format!("missing page from shard {shard_id}")) + })?; + response.page_images.push(page); + } + + // Make sure there are no additional pages. + for (shard_id, mut pages) in shard_responses { + if pages.next().is_some() { + return Err(tonic::Status::out_of_range(format!( + "extra pages returned from shard {shard_id}" + ))); + } + } + + Ok(response) + } +} diff --git a/pageserver/compaction/src/simulator/draw.rs b/pageserver/compaction/src/simulator/draw.rs index 3d35d1b91e..8322fe7d6d 100644 --- a/pageserver/compaction/src/simulator/draw.rs +++ b/pageserver/compaction/src/simulator/draw.rs @@ -152,7 +152,7 @@ pub fn draw_history(history: &[LayerTraceEvent], mut output: let key_diff = key_end - key_start; if key_start >= key_end { - panic!("Invalid key range {}-{}", key_start, key_end); + panic!("Invalid key range {key_start}-{key_end}"); } let lsn_start = lsn_map.map(f.lsn_range.start); @@ -212,12 +212,12 @@ pub fn draw_history(history: &[LayerTraceEvent], mut output: )?; writeln!(svg, "")?; } - Ordering::Greater => panic!("Invalid lsn range {}-{}", lsn_start, lsn_end), + Ordering::Greater => panic!("Invalid lsn range {lsn_start}-{lsn_end}"), } files_seen.insert(f); } - writeln!(svg, "{}", EndSvg)?; + writeln!(svg, "{EndSvg}")?; let mut layer_events_str = String::new(); let mut first = true; diff --git a/pageserver/ctl/src/draw_timeline_dir.rs b/pageserver/ctl/src/draw_timeline_dir.rs index 80ca414543..2135d302c1 100644 --- a/pageserver/ctl/src/draw_timeline_dir.rs +++ b/pageserver/ctl/src/draw_timeline_dir.rs @@ -20,7 +20,7 @@ //! //! # local timeline dir //! ls test_output/test_pgbench\[neon-45-684\]/repo/tenants/$TENANT/timelines/$TIMELINE | \ -//! grep "__" | cargo run --release --bin pagectl draw-timeline-dir > out.svg +//! grep "__" | cargo run --release --bin pagectl draw-timeline > out.svg //! //! # Layer map dump from `/v1/tenant/$TENANT/timeline/$TIMELINE/layer` //! (jq -r '.historic_layers[] | .layer_file_name' | cargo run -p pagectl draw-timeline) < layer-map.json > out.svg @@ -81,7 +81,11 @@ fn build_coordinate_compression_map(coords: Vec) -> BTreeMap (Range, Range) { let split: Vec<&str> = name.split("__").collect(); let keys: Vec<&str> = split[0].split('-').collect(); - let mut lsns: Vec<&str> = split[1].split('-').collect(); + + // Remove the temporary file extension, e.g., remove the `.d20a.___temp` part from the following filename: + // 000000067F000040490000404A00441B0000-000000067F000040490000404A00441B4000__000043483A34CE00.d20a.___temp + let lsns = split[1].split('.').collect::>()[0]; + let mut lsns: Vec<&str> = lsns.split('-').collect(); // The current format of the layer file name: 000000067F0000000400000B150100000000-000000067F0000000400000D350100000000__00000000014B7AC8-v1-00000001 @@ -224,7 +228,7 @@ pub fn main() -> Result<()> { let lsn_max = lsn_map.len(); if key_start >= key_end { - panic!("Invalid key range {}-{}", key_start, key_end); + panic!("Invalid key range {key_start}-{key_end}"); } let lsn_start = *lsn_map.get(&lsnr.start).unwrap(); @@ -246,7 +250,7 @@ pub fn main() -> Result<()> { ymargin = 0.05; fill = Fill::Color(rgb(0, 0, 0)); } - Ordering::Greater => panic!("Invalid lsn range {}-{}", lsn_start, lsn_end), + Ordering::Greater => panic!("Invalid lsn range {lsn_start}-{lsn_end}"), } println!( @@ -283,10 +287,10 @@ pub fn main() -> Result<()> { ); } - println!("{}", EndSvg); + println!("{EndSvg}"); - eprintln!("num_images: {}", num_images); - eprintln!("num_deltas: {}", num_deltas); + eprintln!("num_images: {num_images}"); + eprintln!("num_deltas: {num_deltas}"); Ok(()) } diff --git a/pageserver/ctl/src/key.rs b/pageserver/ctl/src/key.rs index 600f7c412e..c4daafdfd0 100644 --- a/pageserver/ctl/src/key.rs +++ b/pageserver/ctl/src/key.rs @@ -372,7 +372,7 @@ impl std::fmt::Debug for RelTagish { f.write_char('/')?; } first = false; - write!(f, "{}", x) + write!(f, "{x}") }) } } diff --git a/pageserver/ctl/src/layer_map_analyzer.rs b/pageserver/ctl/src/layer_map_analyzer.rs index c49c8b58df..ef844fbd0f 100644 --- a/pageserver/ctl/src/layer_map_analyzer.rs +++ b/pageserver/ctl/src/layer_map_analyzer.rs @@ -224,8 +224,7 @@ pub(crate) async fn main(cmd: &AnalyzeLayerMapCmd) -> Result<()> { } } println!( - "Total delta layers {} image layers {} excess layers {}", - total_delta_layers, total_image_layers, total_excess_layers + "Total delta layers {total_delta_layers} image layers {total_image_layers} excess layers {total_excess_layers}" ); Ok(()) } diff --git a/pageserver/ctl/src/layers.rs b/pageserver/ctl/src/layers.rs index 79f56a5a51..42b3e4a9ba 100644 --- a/pageserver/ctl/src/layers.rs +++ b/pageserver/ctl/src/layers.rs @@ -13,7 +13,7 @@ use pageserver::{page_cache, virtual_file}; use pageserver_api::key::Key; use utils::id::{TenantId, TimelineId}; -use crate::layer_map_analyzer::parse_filename; +use crate::layer_map_analyzer::{LayerFile, parse_filename}; #[derive(Subcommand)] pub(crate) enum LayerCmd { @@ -38,6 +38,8 @@ pub(crate) enum LayerCmd { /// The id from list-layer command id: usize, }, + /// Dump all information of a layer file locally + DumpLayerLocal { path: PathBuf }, RewriteSummary { layer_file_path: Utf8PathBuf, #[clap(long)] @@ -131,15 +133,7 @@ pub(crate) async fn main(cmd: &LayerCmd) -> Result<()> { } for (idx, layer_file) in to_print { - println!( - "[{:3}] key:{}-{}\n lsn:{}-{}\n delta:{}", - idx, - layer_file.key_range.start, - layer_file.key_range.end, - layer_file.lsn_range.start, - layer_file.lsn_range.end, - layer_file.is_delta, - ); + print_layer_file(idx, &layer_file); } Ok(()) } @@ -159,16 +153,7 @@ pub(crate) async fn main(cmd: &LayerCmd) -> Result<()> { let layer = layer?; if let Ok(layer_file) = parse_filename(&layer.file_name().into_string().unwrap()) { if *id == idx { - // TODO(chi): dedup code - println!( - "[{:3}] key:{}-{}\n lsn:{}-{}\n delta:{}", - idx, - layer_file.key_range.start, - layer_file.key_range.end, - layer_file.lsn_range.start, - layer_file.lsn_range.end, - layer_file.is_delta, - ); + print_layer_file(idx, &layer_file); if layer_file.is_delta { read_delta_file(layer.path(), &ctx).await?; @@ -183,6 +168,18 @@ pub(crate) async fn main(cmd: &LayerCmd) -> Result<()> { } Ok(()) } + LayerCmd::DumpLayerLocal { path } => { + if let Ok(layer_file) = parse_filename(path.file_name().unwrap().to_str().unwrap()) { + print_layer_file(0, &layer_file); + + if layer_file.is_delta { + read_delta_file(path, &ctx).await?; + } else { + read_image_file(path, &ctx).await?; + } + } + Ok(()) + } LayerCmd::RewriteSummary { layer_file_path, new_tenant_id, @@ -247,3 +244,15 @@ pub(crate) async fn main(cmd: &LayerCmd) -> Result<()> { } } } + +fn print_layer_file(idx: usize, layer_file: &LayerFile) { + println!( + "[{:3}] key:{}-{}\n lsn:{}-{}\n delta:{}", + idx, + layer_file.key_range.start, + layer_file.key_range.end, + layer_file.lsn_range.start, + layer_file.lsn_range.end, + layer_file.is_delta, + ); +} diff --git a/pageserver/ctl/src/main.rs b/pageserver/ctl/src/main.rs index 1d81b839a8..3cd4faaf2e 100644 --- a/pageserver/ctl/src/main.rs +++ b/pageserver/ctl/src/main.rs @@ -176,9 +176,11 @@ async fn main() -> anyhow::Result<()> { let config = RemoteStorageConfig::from_toml_str(&cmd.config_toml_str)?; let storage = remote_storage::GenericRemoteStorage::from_config(&config).await; let cancel = CancellationToken::new(); + // Complexity limit: as we are running this command locally, we should have a lot of memory available, and we do not + // need to limit the number of versions we are going to delete. storage .unwrap() - .time_travel_recover(Some(&prefix), timestamp, done_if_after, &cancel) + .time_travel_recover(Some(&prefix), timestamp, done_if_after, &cancel, None) .await?; } Commands::Key(dkc) => dkc.execute(), diff --git a/pageserver/page_api/Cargo.toml b/pageserver/page_api/Cargo.toml index e643b5749b..fbad8cf9d0 100644 --- a/pageserver/page_api/Cargo.toml +++ b/pageserver/page_api/Cargo.toml @@ -5,11 +5,18 @@ edition.workspace = true license.workspace = true [dependencies] +anyhow.workspace = true bytes.workspace = true +futures.workspace = true pageserver_api.workspace = true -postgres_ffi.workspace = true +postgres_ffi_types.workspace = true prost.workspace = true +prost-types.workspace = true +strum.workspace = true +strum_macros.workspace = true thiserror.workspace = true +tokio.workspace = true +tokio-util.workspace = true tonic.workspace = true utils.workspace = true workspace_hack.workspace = true diff --git a/pageserver/page_api/proto/page_service.proto b/pageserver/page_api/proto/page_service.proto index 44976084bf..1d6c230916 100644 --- a/pageserver/page_api/proto/page_service.proto +++ b/pageserver/page_api/proto/page_service.proto @@ -35,6 +35,8 @@ syntax = "proto3"; package page_api; +import "google/protobuf/timestamp.proto"; + service PageService { // Returns whether a relation exists. rpc CheckRelExists(CheckRelExistsRequest) returns (CheckRelExistsResponse); @@ -64,6 +66,10 @@ service PageService { // Fetches an SLRU segment. rpc GetSlruSegment (GetSlruSegmentRequest) returns (GetSlruSegmentResponse); + + // Acquires or extends a lease on the given LSN. This guarantees that the Pageserver won't garbage + // collect the LSN until the lease expires. Must be acquired on all relevant shards. + rpc LeaseLsn (LeaseLsnRequest) returns (LeaseLsnResponse); } // The LSN a request should read at. @@ -102,12 +108,27 @@ message CheckRelExistsResponse { bool exists = 1; } -// Requests a base backup at a given LSN. +// Requests a base backup. message GetBaseBackupRequest { - // The LSN to fetch a base backup at. - ReadLsn read_lsn = 1; + // The LSN to fetch the base backup at. 0 or absent means the latest LSN known to the Pageserver. + uint64 lsn = 1; // If true, logical replication slots will not be created. bool replica = 2; + // If true, include relation files in the base backup. Mainly for debugging and tests. + bool full = 3; + // Compression algorithm to use. Base backups send a compressed payload instead of using gRPC + // compression, so that we can cache compressed backups on the server. + BaseBackupCompression compression = 4; +} + +// Base backup compression algorithms. +enum BaseBackupCompression { + // Unknown algorithm. Used when clients send an unsupported algorithm. + BASE_BACKUP_COMPRESSION_UNKNOWN = 0; + // No compression. + BASE_BACKUP_COMPRESSION_NONE = 1; + // GZIP compression. + BASE_BACKUP_COMPRESSION_GZIP = 2; } // Base backup response chunk, returned as an ordered stream. @@ -237,3 +258,17 @@ message GetSlruSegmentRequest { message GetSlruSegmentResponse { bytes segment = 1; } + +// Acquires or extends a lease on the given LSN. This guarantees that the Pageserver won't garbage +// collect the LSN until the lease expires. Must be acquired on all relevant shards. +message LeaseLsnRequest { + // The LSN to lease. Can't be 0 or below the current GC cutoff. + uint64 lsn = 1; +} + +// Lease acquisition response. If the lease could not be granted because the LSN has already been +// garbage collected, a FailedPrecondition status will be returned instead. +message LeaseLsnResponse { + // The lease expiration time. + google.protobuf.Timestamp expires = 1; +} diff --git a/pageserver/page_api/src/client.rs b/pageserver/page_api/src/client.rs new file mode 100644 index 0000000000..18d02c4ed8 --- /dev/null +++ b/pageserver/page_api/src/client.rs @@ -0,0 +1,188 @@ +use anyhow::anyhow; +use futures::{Stream, StreamExt as _, TryStreamExt as _}; +use tokio::io::AsyncRead; +use tokio_util::io::StreamReader; +use tonic::codec::CompressionEncoding; +use tonic::metadata::AsciiMetadataValue; +use tonic::service::Interceptor; +use tonic::service::interceptor::InterceptedService; +use tonic::transport::{Channel, Endpoint}; + +use utils::id::{TenantId, TimelineId}; +use utils::shard::ShardIndex; + +use crate::model::*; +use crate::proto; + +/// A basic Pageserver gRPC client, for a single tenant shard. This API uses native Rust domain +/// types from `model` rather than generated Protobuf types. +pub struct Client { + inner: proto::PageServiceClient>, +} + +impl Client { + /// Connects to the given gRPC endpoint. + pub async fn connect( + endpoint: E, + tenant_id: TenantId, + timeline_id: TimelineId, + shard_id: ShardIndex, + auth_token: Option, + compression: Option, + ) -> anyhow::Result + where + E: TryInto + Send + Sync + 'static, + >::Error: std::error::Error + Send + Sync, + { + let endpoint: Endpoint = endpoint + .try_into() + .map_err(|err| anyhow!("invalid endpoint: {err}"))?; + let channel = endpoint.connect().await?; + Self::new( + channel, + tenant_id, + timeline_id, + shard_id, + auth_token, + compression, + ) + } + + /// Creates a new client using the given gRPC channel. + pub fn new( + channel: Channel, + tenant_id: TenantId, + timeline_id: TimelineId, + shard_id: ShardIndex, + auth_token: Option, + compression: Option, + ) -> anyhow::Result { + let auth = AuthInterceptor::new(tenant_id, timeline_id, shard_id, auth_token)?; + let mut inner = proto::PageServiceClient::with_interceptor(channel, auth); + + if let Some(compression) = compression { + // TODO: benchmark this (including network latency). + inner = inner + .accept_compressed(compression) + .send_compressed(compression); + } + + Ok(Self { inner }) + } + + /// Returns whether a relation exists. + pub async fn check_rel_exists( + &mut self, + req: CheckRelExistsRequest, + ) -> tonic::Result { + let req = proto::CheckRelExistsRequest::from(req); + let resp = self.inner.check_rel_exists(req).await?.into_inner(); + Ok(resp.into()) + } + + /// Fetches a base backup. + pub async fn get_base_backup( + &mut self, + req: GetBaseBackupRequest, + ) -> tonic::Result> { + let req = proto::GetBaseBackupRequest::from(req); + let chunks = self.inner.get_base_backup(req).await?.into_inner(); + Ok(StreamReader::new( + chunks + .map_ok(|resp| resp.chunk) + .map_err(std::io::Error::other), + )) + } + + /// Returns the total size of a database, as # of bytes. + pub async fn get_db_size(&mut self, req: GetDbSizeRequest) -> tonic::Result { + let req = proto::GetDbSizeRequest::from(req); + let resp = self.inner.get_db_size(req).await?.into_inner(); + Ok(resp.into()) + } + + /// Fetches pages. + /// + /// This is implemented as a bidirectional streaming RPC for performance. Per-request errors are + /// typically returned as status_code instead of errors, to avoid tearing down the entire stream + /// via a tonic::Status error. + pub async fn get_pages( + &mut self, + reqs: impl Stream + Send + 'static, + ) -> tonic::Result> + Send + 'static> { + let reqs = reqs.map(proto::GetPageRequest::from); + let resps = self.inner.get_pages(reqs).await?.into_inner(); + Ok(resps.map_ok(GetPageResponse::from)) + } + + /// Returns the size of a relation, as # of blocks. + pub async fn get_rel_size( + &mut self, + req: GetRelSizeRequest, + ) -> tonic::Result { + let req = proto::GetRelSizeRequest::from(req); + let resp = self.inner.get_rel_size(req).await?.into_inner(); + Ok(resp.into()) + } + + /// Fetches an SLRU segment. + pub async fn get_slru_segment( + &mut self, + req: GetSlruSegmentRequest, + ) -> tonic::Result { + let req = proto::GetSlruSegmentRequest::from(req); + let resp = self.inner.get_slru_segment(req).await?.into_inner(); + Ok(resp.try_into()?) + } + + /// Acquires or extends a lease on the given LSN. This guarantees that the Pageserver won't + /// garbage collect the LSN until the lease expires. Must be acquired on all relevant shards. + /// + /// Returns the lease expiration time, or a FailedPrecondition status if the lease could not be + /// acquired because the LSN has already been garbage collected. + pub async fn lease_lsn(&mut self, req: LeaseLsnRequest) -> tonic::Result { + let req = proto::LeaseLsnRequest::from(req); + let resp = self.inner.lease_lsn(req).await?.into_inner(); + Ok(resp.try_into()?) + } +} + +/// Adds authentication metadata to gRPC requests. +#[derive(Clone)] +struct AuthInterceptor { + tenant_id: AsciiMetadataValue, + timeline_id: AsciiMetadataValue, + shard_id: AsciiMetadataValue, + auth_header: Option, // including "Bearer " prefix +} + +impl AuthInterceptor { + fn new( + tenant_id: TenantId, + timeline_id: TimelineId, + shard_id: ShardIndex, + auth_token: Option, + ) -> anyhow::Result { + Ok(Self { + tenant_id: tenant_id.to_string().try_into()?, + timeline_id: timeline_id.to_string().try_into()?, + shard_id: shard_id.to_string().try_into()?, + auth_header: auth_token + .map(|token| format!("Bearer {token}").try_into()) + .transpose()?, + }) + } +} + +impl Interceptor for AuthInterceptor { + fn call(&mut self, mut req: tonic::Request<()>) -> tonic::Result> { + let metadata = req.metadata_mut(); + metadata.insert("neon-tenant-id", self.tenant_id.clone()); + metadata.insert("neon-timeline-id", self.timeline_id.clone()); + metadata.insert("neon-shard-id", self.shard_id.clone()); + if let Some(ref auth_header) = self.auth_header { + metadata.insert("authorization", auth_header.clone()); + } + Ok(req) + } +} diff --git a/pageserver/page_api/src/lib.rs b/pageserver/page_api/src/lib.rs index f515f27f3e..e78f6ce206 100644 --- a/pageserver/page_api/src/lib.rs +++ b/pageserver/page_api/src/lib.rs @@ -18,6 +18,8 @@ pub mod proto { pub use page_service_server::{PageService, PageServiceServer}; } +mod client; +pub use client::Client; mod model; pub use model::*; diff --git a/pageserver/page_api/src/model.rs b/pageserver/page_api/src/model.rs index 1a08d04cc1..af78212b68 100644 --- a/pageserver/page_api/src/model.rs +++ b/pageserver/page_api/src/model.rs @@ -16,22 +16,25 @@ //! stream combinators without dealing with errors, and avoids validating the same message twice. use std::fmt::Display; +use std::time::{Duration, SystemTime, UNIX_EPOCH}; use bytes::Bytes; -use postgres_ffi::Oid; -// TODO: split out Lsn, RelTag, SlruKind, Oid and other basic types to a separate crate, to avoid +use postgres_ffi_types::Oid; +// TODO: split out Lsn, RelTag, SlruKind and other basic types to a separate crate, to avoid // pulling in all of their other crate dependencies when building the client. use utils::lsn::Lsn; use crate::proto; /// A protocol error. Typically returned via try_from() or try_into(). -#[derive(thiserror::Error, Debug)] +#[derive(thiserror::Error, Clone, Debug)] pub enum ProtocolError { #[error("field '{0}' has invalid value '{1}'")] Invalid(&'static str, String), #[error("required field '{0}' is missing")] Missing(&'static str), + #[error("invalid combination of not_modified_lsn '{0}' and request_lsn '{1}'")] + InvalidLsns(Lsn, Lsn), } impl ProtocolError { @@ -84,9 +87,9 @@ impl TryFrom for ReadLsn { return Err(ProtocolError::invalid("request_lsn", pb.request_lsn)); } if pb.not_modified_since_lsn > pb.request_lsn { - return Err(ProtocolError::invalid( - "not_modified_since_lsn", - pb.not_modified_since_lsn, + return Err(ProtocolError::InvalidLsns( + Lsn(pb.not_modified_since_lsn), + Lsn(pb.request_lsn), )); } Ok(Self { @@ -182,13 +185,18 @@ impl From for proto::CheckRelExistsResponse { } } -/// Requests a base backup at a given LSN. +/// Requests a base backup. #[derive(Clone, Copy, Debug)] pub struct GetBaseBackupRequest { - /// The LSN to fetch a base backup at. - pub read_lsn: ReadLsn, + /// The LSN to fetch a base backup at. If None, uses the latest LSN known to the Pageserver. + pub lsn: Option, /// If true, logical replication slots will not be created. pub replica: bool, + /// If true, include relation files in the base backup. Mainly for debugging and tests. + pub full: bool, + /// Compression algorithm to use. Base backups send a compressed payload instead of using gRPC + /// compression, so that we can cache compressed backups on the server. + pub compression: BaseBackupCompression, } impl TryFrom for GetBaseBackupRequest { @@ -196,11 +204,10 @@ impl TryFrom for GetBaseBackupRequest { fn try_from(pb: proto::GetBaseBackupRequest) -> Result { Ok(Self { - read_lsn: pb - .read_lsn - .ok_or(ProtocolError::Missing("read_lsn"))? - .try_into()?, + lsn: (pb.lsn != 0).then_some(Lsn(pb.lsn)), replica: pb.replica, + full: pb.full, + compression: pb.compression.try_into()?, }) } } @@ -208,12 +215,58 @@ impl TryFrom for GetBaseBackupRequest { impl From for proto::GetBaseBackupRequest { fn from(request: GetBaseBackupRequest) -> Self { Self { - read_lsn: Some(request.read_lsn.into()), + lsn: request.lsn.unwrap_or_default().0, replica: request.replica, + full: request.full, + compression: request.compression.into(), } } } +/// Base backup compression algorithm. +#[derive(Clone, Copy, Debug)] +pub enum BaseBackupCompression { + None, + Gzip, +} + +impl TryFrom for BaseBackupCompression { + type Error = ProtocolError; + + fn try_from(pb: proto::BaseBackupCompression) -> Result { + match pb { + proto::BaseBackupCompression::Unknown => Err(ProtocolError::invalid("compression", pb)), + proto::BaseBackupCompression::None => Ok(Self::None), + proto::BaseBackupCompression::Gzip => Ok(Self::Gzip), + } + } +} + +impl TryFrom for BaseBackupCompression { + type Error = ProtocolError; + + fn try_from(compression: i32) -> Result { + proto::BaseBackupCompression::try_from(compression) + .map_err(|_| ProtocolError::invalid("compression", compression)) + .and_then(Self::try_from) + } +} + +impl From for proto::BaseBackupCompression { + fn from(compression: BaseBackupCompression) -> Self { + match compression { + BaseBackupCompression::None => Self::None, + BaseBackupCompression::Gzip => Self::Gzip, + } + } +} + +impl From for i32 { + fn from(compression: BaseBackupCompression) -> Self { + proto::BaseBackupCompression::from(compression).into() + } +} + pub type GetBaseBackupResponseChunk = Bytes; impl TryFrom for GetBaseBackupResponseChunk { @@ -333,7 +386,7 @@ impl From for proto::GetPageRequest { pub type RequestID = u64; /// A GetPage request class. -#[derive(Clone, Copy, Debug)] +#[derive(Clone, Copy, Debug, strum_macros::Display)] pub enum GetPageClass { /// Unknown class. For backwards compatibility: used when an older client version sends a class /// that a newer server version has removed. @@ -346,6 +399,19 @@ pub enum GetPageClass { Background, } +impl GetPageClass { + /// Returns true if this is considered a bulk request (i.e. more throughput-oriented rather than + /// latency-sensitive). + pub fn is_bulk(&self) -> bool { + match self { + Self::Unknown => false, + Self::Normal => false, + Self::Prefetch => true, + Self::Background => true, + } + } +} + impl From for GetPageClass { fn from(pb: proto::GetPageClass) -> Self { match pb { @@ -422,12 +488,45 @@ impl From for proto::GetPageResponse { } } +impl GetPageResponse { + /// Attempts to represent a tonic::Status as a GetPageResponse if appropriate. Returning a + /// tonic::Status will terminate the GetPage stream, so per-request errors are emitted as a + /// GetPageResponse with a non-OK status code instead. + #[allow(clippy::result_large_err)] + pub fn try_from_status( + status: tonic::Status, + request_id: RequestID, + ) -> Result { + // We shouldn't see an OK status here, because we're emitting an error. + debug_assert_ne!(status.code(), tonic::Code::Ok); + if status.code() == tonic::Code::Ok { + return Err(tonic::Status::internal(format!( + "unexpected OK status: {status:?}", + ))); + } + + // If we can't convert the tonic::Code to a GetPageStatusCode, this is not a per-request + // error and we should return a tonic::Status to terminate the stream. + let Ok(status_code) = status.code().try_into() else { + return Err(status); + }; + + // Return a GetPageResponse for the status. + Ok(Self { + request_id, + status_code, + reason: Some(status.message().to_string()), + page_images: Vec::new(), + }) + } +} + /// A GetPage response status code. /// /// These are effectively equivalent to gRPC statuses. However, we use a bidirectional stream /// (potentially shared by many backends), and a gRPC status response would terminate the stream so /// we send GetPageResponse messages with these codes instead. -#[derive(Clone, Copy, Debug)] +#[derive(Clone, Copy, Debug, PartialEq, strum_macros::Display)] pub enum GetPageStatusCode { /// Unknown status. For forwards compatibility: used when an older client version receives a new /// status code from a newer server version. @@ -485,8 +584,57 @@ impl From for i32 { } } +impl TryFrom for GetPageStatusCode { + type Error = tonic::Code; + + fn try_from(code: tonic::Code) -> Result { + use tonic::Code; + + let status_code = match code { + Code::Ok => Self::Ok, + + // These are per-request errors, which should be returned as GetPageResponses. + Code::AlreadyExists => Self::InvalidRequest, + Code::DataLoss => Self::InternalError, + Code::FailedPrecondition => Self::InvalidRequest, + Code::InvalidArgument => Self::InvalidRequest, + Code::Internal => Self::InternalError, + Code::NotFound => Self::NotFound, + Code::OutOfRange => Self::InvalidRequest, + Code::ResourceExhausted => Self::SlowDown, + + // These should terminate the stream by returning a tonic::Status. + Code::Aborted + | Code::Cancelled + | Code::DeadlineExceeded + | Code::PermissionDenied + | Code::Unauthenticated + | Code::Unavailable + | Code::Unimplemented + | Code::Unknown => return Err(code), + }; + Ok(status_code) + } +} + +impl From for tonic::Code { + fn from(status_code: GetPageStatusCode) -> Self { + use tonic::Code; + + match status_code { + GetPageStatusCode::Unknown => Code::Unknown, + GetPageStatusCode::Ok => Code::Ok, + GetPageStatusCode::NotFound => Code::NotFound, + GetPageStatusCode::InvalidRequest => Code::InvalidArgument, + GetPageStatusCode::InternalError => Code::Internal, + GetPageStatusCode::SlowDown => Code::ResourceExhausted, + } + } +} + // Fetches the size of a relation at a given LSN, as # of blocks. Only valid on shard 0, other // shards will error. +#[derive(Clone, Copy, Debug)] pub struct GetRelSizeRequest { pub read_lsn: ReadLsn, pub rel: RelTag, @@ -530,6 +678,7 @@ impl From for proto::GetRelSizeResponse { } /// Requests an SLRU segment. Only valid on shard 0, other shards will error. +#[derive(Clone, Copy, Debug)] pub struct GetSlruSegmentRequest { pub read_lsn: ReadLsn, pub kind: SlruKind, @@ -585,3 +734,54 @@ impl From for proto::GetSlruSegmentResponse { // SlruKind is defined in pageserver_api::reltag. pub type SlruKind = pageserver_api::reltag::SlruKind; + +/// Acquires or extends a lease on the given LSN. This guarantees that the Pageserver won't garbage +/// collect the LSN until the lease expires. +pub struct LeaseLsnRequest { + /// The LSN to lease. + pub lsn: Lsn, +} + +impl TryFrom for LeaseLsnRequest { + type Error = ProtocolError; + + fn try_from(pb: proto::LeaseLsnRequest) -> Result { + if pb.lsn == 0 { + return Err(ProtocolError::Missing("lsn")); + } + Ok(Self { lsn: Lsn(pb.lsn) }) + } +} + +impl From for proto::LeaseLsnRequest { + fn from(request: LeaseLsnRequest) -> Self { + Self { lsn: request.lsn.0 } + } +} + +/// Lease expiration time. If the lease could not be granted because the LSN has already been +/// garbage collected, a FailedPrecondition status will be returned instead. +pub type LeaseLsnResponse = SystemTime; + +impl TryFrom for LeaseLsnResponse { + type Error = ProtocolError; + + fn try_from(pb: proto::LeaseLsnResponse) -> Result { + let expires = pb.expires.ok_or(ProtocolError::Missing("expires"))?; + UNIX_EPOCH + .checked_add(Duration::new(expires.seconds as u64, expires.nanos as u32)) + .ok_or_else(|| ProtocolError::invalid("expires", expires)) + } +} + +impl From for proto::LeaseLsnResponse { + fn from(response: LeaseLsnResponse) -> Self { + let expires = response.duration_since(UNIX_EPOCH).unwrap_or_default(); + Self { + expires: Some(prost_types::Timestamp { + seconds: expires.as_secs() as i64, + nanos: expires.subsec_nanos() as i32, + }), + } + } +} diff --git a/pageserver/pagebench/Cargo.toml b/pageserver/pagebench/Cargo.toml index 5e4af88e69..117ef0167e 100644 --- a/pageserver/pagebench/Cargo.toml +++ b/pageserver/pagebench/Cargo.toml @@ -24,9 +24,14 @@ tracing.workspace = true tokio.workspace = true tokio-stream.workspace = true tokio-util.workspace = true +axum.workspace = true +http.workspace = true +metrics.workspace = true tonic.workspace = true +url.workspace = true pageserver_client.workspace = true +pageserver_client_grpc.workspace = true pageserver_api.workspace = true pageserver_page_api.workspace = true utils = { path = "../../libs/utils/" } diff --git a/pageserver/pagebench/src/cmd/aux_files.rs b/pageserver/pagebench/src/cmd/aux_files.rs index 6441c047c2..43d7a73399 100644 --- a/pageserver/pagebench/src/cmd/aux_files.rs +++ b/pageserver/pagebench/src/cmd/aux_files.rs @@ -62,7 +62,7 @@ async fn main_impl(args: Args) -> anyhow::Result<()> { let tenant_shard_id = TenantShardId::unsharded(timeline.tenant_id); let timeline_id = timeline.timeline_id; - println!("operating on timeline {}", timeline); + println!("operating on timeline {timeline}"); mgmt_api_client .set_tenant_config(&TenantConfigRequest { @@ -75,8 +75,8 @@ async fn main_impl(args: Args) -> anyhow::Result<()> { let items = (0..100) .map(|id| { ( - format!("pg_logical/mappings/{:03}.{:03}", batch, id), - format!("{:08}", id), + format!("pg_logical/mappings/{batch:03}.{id:03}"), + format!("{id:08}"), ) }) .collect::>(); diff --git a/pageserver/pagebench/src/cmd/basebackup.rs b/pageserver/pagebench/src/cmd/basebackup.rs index 43ad92980c..c14bb73136 100644 --- a/pageserver/pagebench/src/cmd/basebackup.rs +++ b/pageserver/pagebench/src/cmd/basebackup.rs @@ -1,20 +1,29 @@ use std::collections::HashMap; use std::num::NonZeroUsize; use std::ops::Range; -use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering}; +use std::pin::Pin; +use std::sync::atomic::{AtomicU64, Ordering}; use std::sync::{Arc, Mutex}; use std::time::Instant; -use anyhow::Context; +use anyhow::anyhow; +use futures::TryStreamExt as _; use pageserver_api::shard::TenantShardId; use pageserver_client::mgmt_api::ForceAwaitLogicalSize; use pageserver_client::page_service::BasebackupRequest; +use pageserver_page_api as page_api; use rand::prelude::*; +use tokio::io::AsyncRead; use tokio::sync::Barrier; use tokio::task::JoinSet; +use tokio_util::compat::{TokioAsyncReadCompatExt as _, TokioAsyncWriteCompatExt as _}; +use tokio_util::io::StreamReader; +use tonic::async_trait; use tracing::{info, instrument}; +use url::Url; use utils::id::TenantTimelineId; use utils::lsn::Lsn; +use utils::shard::ShardIndex; use crate::util::tokio_thread_local_stats::AllThreadLocalStats; use crate::util::{request_stats, tokio_thread_local_stats}; @@ -24,14 +33,15 @@ use crate::util::{request_stats, tokio_thread_local_stats}; pub(crate) struct Args { #[clap(long, default_value = "http://localhost:9898")] mgmt_api_endpoint: String, - #[clap(long, default_value = "postgres://postgres@localhost:64000")] + /// The Pageserver to connect to. Use postgresql:// for libpq, or grpc:// for gRPC. + #[clap(long, default_value = "postgresql://postgres@localhost:64000")] page_service_connstring: String, #[clap(long)] pageserver_jwt: Option, #[clap(long, default_value = "1")] num_clients: NonZeroUsize, - #[clap(long, default_value = "1.0")] - gzip_probability: f64, + #[clap(long)] + no_compression: bool, #[clap(long)] runtime: Option, #[clap(long)] @@ -146,12 +156,27 @@ async fn main_impl( let mut work_senders = HashMap::new(); let mut tasks = Vec::new(); - for tl in &timelines { + let scheme = match Url::parse(&args.page_service_connstring) { + Ok(url) => url.scheme().to_lowercase().to_string(), + Err(url::ParseError::RelativeUrlWithoutBase) => "postgresql".to_string(), + Err(err) => return Err(anyhow!("invalid connstring: {err}")), + }; + for &tl in &timelines { let (sender, receiver) = tokio::sync::mpsc::channel(1); // TODO: not sure what the implications of this are work_senders.insert(tl, sender); - tasks.push(tokio::spawn(client( - args, - *tl, + + let client: Box = match scheme.as_str() { + "postgresql" | "postgres" => Box::new( + LibpqClient::new(&args.page_service_connstring, tl, !args.no_compression).await?, + ), + "grpc" => Box::new( + GrpcClient::new(&args.page_service_connstring, tl, !args.no_compression).await?, + ), + scheme => return Err(anyhow!("invalid scheme {scheme}")), + }; + + tasks.push(tokio::spawn(run_worker( + client, Arc::clone(&start_work_barrier), receiver, Arc::clone(&all_work_done_barrier), @@ -166,13 +191,7 @@ async fn main_impl( let mut rng = rand::thread_rng(); let target = all_targets.choose(&mut rng).unwrap(); let lsn = target.lsn_range.clone().map(|r| rng.gen_range(r)); - ( - target.timeline, - Work { - lsn, - gzip: rng.gen_bool(args.gzip_probability), - }, - ) + (target.timeline, Work { lsn }) }; let sender = work_senders.get(&timeline).unwrap(); // TODO: what if this blocks? @@ -216,13 +235,11 @@ async fn main_impl( #[derive(Copy, Clone)] struct Work { lsn: Option, - gzip: bool, } #[instrument(skip_all)] -async fn client( - args: &'static Args, - timeline: TenantTimelineId, +async fn run_worker( + mut client: Box, start_work_barrier: Arc, mut work: tokio::sync::mpsc::Receiver, all_work_done_barrier: Arc, @@ -230,37 +247,14 @@ async fn client( ) { start_work_barrier.wait().await; - let client = pageserver_client::page_service::Client::new(args.page_service_connstring.clone()) - .await - .unwrap(); - - while let Some(Work { lsn, gzip }) = work.recv().await { + while let Some(Work { lsn }) = work.recv().await { let start = Instant::now(); - let copy_out_stream = client - .basebackup(&BasebackupRequest { - tenant_id: timeline.tenant_id, - timeline_id: timeline.timeline_id, - lsn, - gzip, - }) - .await - .with_context(|| format!("start basebackup for {timeline}")) - .unwrap(); + let stream = client.basebackup(lsn).await.unwrap(); - use futures::StreamExt; - let size = Arc::new(AtomicUsize::new(0)); - copy_out_stream - .for_each({ - |r| { - let size = Arc::clone(&size); - async move { - let size = Arc::clone(&size); - size.fetch_add(r.unwrap().len(), Ordering::Relaxed); - } - } - }) - .await; - info!("basebackup size is {} bytes", size.load(Ordering::Relaxed)); + let size = futures::io::copy(stream.compat(), &mut tokio::io::sink().compat_write()) + .await + .unwrap(); + info!("basebackup size is {size} bytes"); let elapsed = start.elapsed(); live_stats.inc(); STATS.with(|stats| { @@ -270,3 +264,97 @@ async fn client( all_work_done_barrier.wait().await; } + +/// A basebackup client. This allows switching out the client protocol implementation. +#[async_trait] +trait Client: Send { + async fn basebackup( + &mut self, + lsn: Option, + ) -> anyhow::Result>>; +} + +/// A libpq-based Pageserver client. +struct LibpqClient { + inner: pageserver_client::page_service::Client, + ttid: TenantTimelineId, + compression: bool, +} + +impl LibpqClient { + async fn new( + connstring: &str, + ttid: TenantTimelineId, + compression: bool, + ) -> anyhow::Result { + Ok(Self { + inner: pageserver_client::page_service::Client::new(connstring.to_string()).await?, + ttid, + compression, + }) + } +} + +#[async_trait] +impl Client for LibpqClient { + async fn basebackup( + &mut self, + lsn: Option, + ) -> anyhow::Result>> { + let req = BasebackupRequest { + tenant_id: self.ttid.tenant_id, + timeline_id: self.ttid.timeline_id, + lsn, + gzip: self.compression, + }; + let stream = self.inner.basebackup(&req).await?; + Ok(Box::pin(StreamReader::new( + stream.map_err(std::io::Error::other), + ))) + } +} + +/// A gRPC Pageserver client. +struct GrpcClient { + inner: page_api::Client, + compression: page_api::BaseBackupCompression, +} + +impl GrpcClient { + async fn new( + connstring: &str, + ttid: TenantTimelineId, + compression: bool, + ) -> anyhow::Result { + let inner = page_api::Client::connect( + connstring.to_string(), + ttid.tenant_id, + ttid.timeline_id, + ShardIndex::unsharded(), + None, + None, // NB: uses payload compression + ) + .await?; + let compression = match compression { + true => page_api::BaseBackupCompression::Gzip, + false => page_api::BaseBackupCompression::None, + }; + Ok(Self { inner, compression }) + } +} + +#[async_trait] +impl Client for GrpcClient { + async fn basebackup( + &mut self, + lsn: Option, + ) -> anyhow::Result>> { + let req = page_api::GetBaseBackupRequest { + lsn, + replica: false, + full: false, + compression: self.compression, + }; + Ok(Box::pin(self.inner.get_base_backup(req).await?)) + } +} diff --git a/pageserver/pagebench/src/cmd/getpage_latest_lsn.rs b/pageserver/pagebench/src/cmd/getpage_latest_lsn.rs index 3f3b6e396e..01c6bea2e5 100644 --- a/pageserver/pagebench/src/cmd/getpage_latest_lsn.rs +++ b/pageserver/pagebench/src/cmd/getpage_latest_lsn.rs @@ -10,33 +10,35 @@ use anyhow::Context; use async_trait::async_trait; use bytes::Bytes; use camino::Utf8PathBuf; +use futures::{Stream, StreamExt as _}; use pageserver_api::key::Key; use pageserver_api::keyspace::KeySpaceAccum; -use pageserver_api::models::{PagestreamGetPageRequest, PagestreamRequest}; +use pageserver_api::pagestream_api::{PagestreamGetPageRequest, PagestreamRequest}; use pageserver_api::reltag::RelTag; use pageserver_api::shard::TenantShardId; -use pageserver_page_api::proto; +use pageserver_page_api as page_api; use rand::prelude::*; use tokio::task::JoinSet; use tokio_util::sync::CancellationToken; use tracing::info; +use url::Url; use utils::id::TenantTimelineId; use utils::lsn::Lsn; +use utils::shard::ShardIndex; use crate::util::tokio_thread_local_stats::AllThreadLocalStats; use crate::util::{request_stats, tokio_thread_local_stats}; -#[derive(clap::ValueEnum, Clone, Debug)] -enum Protocol { - Libpq, - Grpc, -} - /// GetPage@LatestLSN, uniformly distributed across the compute-accessible keyspace. #[derive(clap::Parser)] pub(crate) struct Args { + #[clap(long, default_value = "false")] + grpc: bool, + #[clap(long, default_value = "false")] + grpc_stream: bool, #[clap(long, default_value = "http://localhost:9898")] mgmt_api_endpoint: String, + /// Pageserver connection string. Supports postgresql:// and grpc:// protocols. #[clap(long, default_value = "postgres://postgres@localhost:64000")] page_service_connstring: String, #[clap(long)] @@ -45,8 +47,9 @@ pub(crate) struct Args { num_clients: NonZeroUsize, #[clap(long)] runtime: Option, - #[clap(long, value_enum, default_value = "libpq")] - protocol: Protocol, + /// If true, enable compression (only for gRPC). + #[clap(long)] + compression: bool, /// Each client sends requests at the given rate. /// /// If a request takes too long and we should be issuing a new request already, @@ -73,6 +76,9 @@ pub(crate) struct Args { #[clap(long)] set_io_mode: Option, + #[clap(long)] + only_relnode: Option, + /// Queue depth generated in each client. #[clap(long, default_value = "1")] queue_depth: NonZeroUsize, @@ -87,10 +93,31 @@ pub(crate) struct Args { #[clap(long, default_value = "1")] batch_size: NonZeroUsize, - #[clap(long)] - only_relnode: Option, - targets: Option>, + + #[clap(long, default_value = "100")] + pool_max_consumers: NonZeroUsize, + + #[clap(long, default_value = "5")] + pool_error_threshold: NonZeroUsize, + + #[clap(long, default_value = "5000")] + pool_connect_timeout: NonZeroUsize, + + #[clap(long, default_value = "1000")] + pool_connect_backoff: NonZeroUsize, + + #[clap(long, default_value = "60000")] + pool_max_idle_duration: NonZeroUsize, + + #[clap(long, default_value = "0")] + max_delay_ms: usize, + + #[clap(long, default_value = "0")] + percent_drops: usize, + + #[clap(long, default_value = "0")] + percent_hangs: usize, } /// State shared by all clients @@ -147,7 +174,6 @@ pub(crate) fn main(args: Args) -> anyhow::Result<()> { main_impl(args, thread_local_stats) }) } - async fn main_impl( args: Args, all_thread_local_stats: AllThreadLocalStats, @@ -312,6 +338,7 @@ async fn main_impl( let rps_period = args .per_client_rate .map(|rps_limit| Duration::from_secs_f64(1.0 / (rps_limit as f64))); + let make_worker: &dyn Fn(WorkerId) -> Pin>> = &|worker_id| { let ss = shared_state.clone(); let cancel = cancel.clone(); @@ -325,18 +352,32 @@ async fn main_impl( .unwrap(); Box::pin(async move { - let client: Box = match args.protocol { - Protocol::Libpq => Box::new( - LibpqClient::new(args.page_service_connstring.clone(), worker_id.timeline) - .await - .unwrap(), + let scheme = match Url::parse(&args.page_service_connstring) { + Ok(url) => url.scheme().to_lowercase().to_string(), + Err(url::ParseError::RelativeUrlWithoutBase) => "postgresql".to_string(), + Err(err) => panic!("invalid connstring: {err}"), + }; + let client: Box = match scheme.as_str() { + "postgresql" | "postgres" => { + assert!(!args.compression, "libpq does not support compression"); + Box::new( + LibpqClient::new(&args.page_service_connstring, worker_id.timeline) + .await + .unwrap(), + ) + } + + "grpc" => Box::new( + GrpcClient::new( + &args.page_service_connstring, + worker_id.timeline, + args.compression, + ) + .await + .unwrap(), ), - Protocol::Grpc => Box::new( - GrpcClient::new(args.page_service_connstring.clone(), worker_id.timeline) - .await - .unwrap(), - ), + scheme => panic!("unsupported scheme {scheme}"), }; run_worker(args, client, ss, cancel, rps_period, ranges, weights).await }) @@ -543,8 +584,8 @@ struct LibpqClient { } impl LibpqClient { - async fn new(connstring: String, ttid: TenantTimelineId) -> anyhow::Result { - let inner = pageserver_client::page_service::Client::new(connstring) + async fn new(connstring: &str, ttid: TenantTimelineId) -> anyhow::Result { + let inner = pageserver_client::page_service::Client::new(connstring.to_string()) .await? .pagestream(ttid.tenant_id, ttid.timeline_id) .await?; @@ -600,34 +641,36 @@ impl Client for LibpqClient { } } -/// A gRPC client using the raw, no-frills gRPC client. +/// A gRPC Pageserver client. struct GrpcClient { - req_tx: tokio::sync::mpsc::Sender, - resp_rx: tonic::Streaming, + req_tx: tokio::sync::mpsc::Sender, + resp_rx: Pin> + Send>>, } impl GrpcClient { - async fn new(connstring: String, ttid: TenantTimelineId) -> anyhow::Result { - let mut client = pageserver_page_api::proto::PageServiceClient::connect(connstring).await?; + async fn new( + connstring: &str, + ttid: TenantTimelineId, + compression: bool, + ) -> anyhow::Result { + let mut client = page_api::Client::connect( + connstring.to_string(), + ttid.tenant_id, + ttid.timeline_id, + ShardIndex::unsharded(), + None, + compression.then_some(tonic::codec::CompressionEncoding::Zstd), + ) + .await?; // The channel has a buffer size of 1, since 0 is not allowed. It does not matter, since the // benchmark will control the queue depth (i.e. in-flight requests) anyway, and requests are // buffered by Tonic and the OS too. let (req_tx, req_rx) = tokio::sync::mpsc::channel(1); let req_stream = tokio_stream::wrappers::ReceiverStream::new(req_rx); - let mut req = tonic::Request::new(req_stream); - let metadata = req.metadata_mut(); - metadata.insert("neon-tenant-id", ttid.tenant_id.to_string().try_into()?); - metadata.insert("neon-timeline-id", ttid.timeline_id.to_string().try_into()?); - metadata.insert("neon-shard-id", "0000".try_into()?); + let resp_rx = Box::pin(client.get_pages(req_stream).await?); - let resp = client.get_pages(req).await?; - let resp_stream = resp.into_inner(); - - Ok(Self { - req_tx, - resp_rx: resp_stream, - }) + Ok(Self { req_tx, resp_rx }) } } @@ -641,27 +684,27 @@ impl Client for GrpcClient { rel: RelTag, blks: Vec, ) -> anyhow::Result<()> { - let req = proto::GetPageRequest { + let req = page_api::GetPageRequest { request_id: req_id, - request_class: proto::GetPageClass::Normal as i32, - read_lsn: Some(proto::ReadLsn { - request_lsn: req_lsn.0, - not_modified_since_lsn: mod_lsn.0, - }), - rel: Some(rel.into()), - block_number: blks, + request_class: page_api::GetPageClass::Normal, + read_lsn: page_api::ReadLsn { + request_lsn: req_lsn, + not_modified_since_lsn: Some(mod_lsn), + }, + rel, + block_numbers: blks, }; self.req_tx.send(req).await?; Ok(()) } async fn recv_get_page(&mut self) -> anyhow::Result<(u64, Vec)> { - let resp = self.resp_rx.message().await?.unwrap(); + let resp = self.resp_rx.next().await.unwrap().unwrap(); anyhow::ensure!( - resp.status_code == proto::GetPageStatusCode::Ok as i32, + resp.status_code == page_api::GetPageStatusCode::Ok, "unexpected status code: {}", - resp.status_code + resp.status_code, ); - Ok((resp.request_id, resp.page_image)) + Ok((resp.request_id, resp.page_images)) } } diff --git a/pageserver/src/basebackup.rs b/pageserver/src/basebackup.rs index 2a0548b811..36dada1e89 100644 --- a/pageserver/src/basebackup.rs +++ b/pageserver/src/basebackup.rs @@ -14,19 +14,19 @@ use std::fmt::Write as FmtWrite; use std::time::{Instant, SystemTime}; use anyhow::{Context, anyhow}; +use async_compression::tokio::write::GzipEncoder; use bytes::{BufMut, Bytes, BytesMut}; use fail::fail_point; use pageserver_api::key::{Key, rel_block_to_key}; use pageserver_api::reltag::{RelTag, SlruKind}; -use postgres_ffi::pg_constants::{ - DEFAULTTABLESPACE_OID, GLOBALTABLESPACE_OID, PG_HBA, PGDATA_SPECIAL_FILES, -}; -use postgres_ffi::relfile_utils::{INIT_FORKNUM, MAIN_FORKNUM}; +use postgres_ffi::pg_constants::{PG_HBA, PGDATA_SPECIAL_FILES}; use postgres_ffi::{ - BLCKSZ, PG_TLI, RELSEG_SIZE, WAL_SEGMENT_SIZE, XLogFileName, dispatch_pgversion, pg_constants, + BLCKSZ, PG_TLI, PgMajorVersion, RELSEG_SIZE, WAL_SEGMENT_SIZE, XLogFileName, + dispatch_pgversion, pg_constants, }; -use tokio::io; -use tokio::io::AsyncWrite; +use postgres_ffi_types::constants::{DEFAULTTABLESPACE_OID, GLOBALTABLESPACE_OID}; +use postgres_ffi_types::forknum::{INIT_FORKNUM, MAIN_FORKNUM}; +use tokio::io::{self, AsyncWrite, AsyncWriteExt as _}; use tokio_tar::{Builder, EntryType, Header}; use tracing::*; use utils::lsn::Lsn; @@ -97,6 +97,7 @@ impl From for tonic::Status { /// * When working without safekeepers. In this situation it is important to match the lsn /// we are taking basebackup on with the lsn that is used in pageserver's walreceiver /// to start the replication. +#[allow(clippy::too_many_arguments)] pub async fn send_basebackup_tarball<'a, W>( write: &'a mut W, timeline: &'a Timeline, @@ -104,6 +105,7 @@ pub async fn send_basebackup_tarball<'a, W>( prev_lsn: Option, full_backup: bool, replica: bool, + gzip_level: Option, ctx: &'a RequestContext, ) -> Result<(), BasebackupError> where @@ -122,7 +124,7 @@ where // prev_lsn value; that happens if the timeline was just branched from // an old LSN and it doesn't have any WAL of its own yet. We will set // prev_lsn to Lsn(0) if we cannot provide the correct value. - let (backup_prev, backup_lsn) = if let Some(req_lsn) = req_lsn { + let (backup_prev, lsn) = if let Some(req_lsn) = req_lsn { // Backup was requested at a particular LSN. The caller should've // already checked that it's a valid LSN. @@ -143,7 +145,7 @@ where }; // Consolidate the derived and the provided prev_lsn values - let prev_lsn = if let Some(provided_prev_lsn) = prev_lsn { + let prev_record_lsn = if let Some(provided_prev_lsn) = prev_lsn { if backup_prev != Lsn(0) && backup_prev != provided_prev_lsn { return Err(BasebackupError::Server(anyhow!( "backup_prev {backup_prev} != provided_prev_lsn {provided_prev_lsn}" @@ -155,30 +157,55 @@ where }; info!( - "taking basebackup lsn={}, prev_lsn={} (full_backup={}, replica={})", - backup_lsn, prev_lsn, full_backup, replica + "taking basebackup lsn={lsn}, prev_lsn={prev_record_lsn} \ + (full_backup={full_backup}, replica={replica}, gzip={gzip_level:?})", + ); + let span = info_span!("send_tarball", backup_lsn=%lsn); + + let io_concurrency = IoConcurrency::spawn_from_conf( + timeline.conf.get_vectored_concurrent_io, + timeline + .gate + .enter() + .map_err(|_| BasebackupError::Shutdown)?, ); - let basebackup = Basebackup { - ar: Builder::new_non_terminated(write), - timeline, - lsn: backup_lsn, - prev_record_lsn: prev_lsn, - full_backup, - replica, - ctx, - io_concurrency: IoConcurrency::spawn_from_conf( - timeline.conf.get_vectored_concurrent_io, - timeline - .gate - .enter() - .map_err(|_| BasebackupError::Shutdown)?, - ), - }; - basebackup + if let Some(gzip_level) = gzip_level { + let mut encoder = GzipEncoder::with_quality(write, gzip_level); + Basebackup { + ar: Builder::new_non_terminated(&mut encoder), + timeline, + lsn, + prev_record_lsn, + full_backup, + replica, + ctx, + io_concurrency, + } .send_tarball() - .instrument(info_span!("send_tarball", backup_lsn=%backup_lsn)) - .await + .instrument(span) + .await?; + encoder + .shutdown() + .await + .map_err(|err| BasebackupError::Client(err, "gzip"))?; + } else { + Basebackup { + ar: Builder::new_non_terminated(write), + timeline, + lsn, + prev_record_lsn, + full_backup, + replica, + ctx, + io_concurrency, + } + .send_tarball() + .instrument(span) + .await?; + } + + Ok(()) } /// This is short-living object only for the time of tarball creation, @@ -372,6 +399,7 @@ where .partition( self.timeline.get_shard_identity(), self.timeline.conf.max_get_vectored_keys.get() as u64 * BLCKSZ as u64, + BLCKSZ as u64, ); let mut slru_builder = SlruSegmentsBuilder::new(&mut self.ar); @@ -619,10 +647,7 @@ where }; if spcnode == GLOBALTABLESPACE_OID { - let pg_version_str = match self.timeline.pg_version { - 14 | 15 => self.timeline.pg_version.to_string(), - ver => format!("{ver}\x0A"), - }; + let pg_version_str = self.timeline.pg_version.versionfile_string(); let header = new_tar_header("PG_VERSION", pg_version_str.len() as u64)?; self.ar .append(&header, pg_version_str.as_bytes()) @@ -669,7 +694,7 @@ where } // Append dir path for each database - let path = format!("base/{}", dbnode); + let path = format!("base/{dbnode}"); let header = new_tar_header_dir(&path)?; self.ar .append(&header, io::empty()) @@ -677,19 +702,16 @@ where .map_err(|e| BasebackupError::Client(e, "add_dbdir,base"))?; if let Some(img) = relmap_img { - let dst_path = format!("base/{}/PG_VERSION", dbnode); + let dst_path = format!("base/{dbnode}/PG_VERSION"); - let pg_version_str = match self.timeline.pg_version { - 14 | 15 => self.timeline.pg_version.to_string(), - ver => format!("{ver}\x0A"), - }; + let pg_version_str = self.timeline.pg_version.versionfile_string(); let header = new_tar_header(&dst_path, pg_version_str.len() as u64)?; self.ar .append(&header, pg_version_str.as_bytes()) .await .map_err(|e| BasebackupError::Client(e, "add_dbdir,base/PG_VERSION"))?; - let relmap_path = format!("base/{}/pg_filenode.map", dbnode); + let relmap_path = format!("base/{dbnode}/pg_filenode.map"); let header = new_tar_header(&relmap_path, img.len() as u64)?; self.ar .append(&header, &img[..]) @@ -713,10 +735,10 @@ where buf.extend_from_slice(&img[..]); let crc = crc32c::crc32c(&img[..]); buf.put_u32_le(crc); - let path = if self.timeline.pg_version < 17 { - format!("pg_twophase/{:>08X}", xid) + let path = if self.timeline.pg_version < PgMajorVersion::PG17 { + format!("pg_twophase/{xid:>08X}") } else { - format!("pg_twophase/{:>016X}", xid) + format!("pg_twophase/{xid:>016X}") }; let header = new_tar_header(&path, buf.len() as u64)?; self.ar @@ -768,7 +790,7 @@ where //send wal segment let segno = self.lsn.segment_number(WAL_SEGMENT_SIZE); let wal_file_name = XLogFileName(PG_TLI, segno, WAL_SEGMENT_SIZE); - let wal_file_path = format!("pg_wal/{}", wal_file_name); + let wal_file_path = format!("pg_wal/{wal_file_name}"); let header = new_tar_header(&wal_file_path, WAL_SEGMENT_SIZE as u64)?; let wal_seg = postgres_ffi::generate_wal_segment( diff --git a/pageserver/src/basebackup_cache.rs b/pageserver/src/basebackup_cache.rs index 3a8ec555f7..4966fee2d7 100644 --- a/pageserver/src/basebackup_cache.rs +++ b/pageserver/src/basebackup_cache.rs @@ -1,12 +1,12 @@ use std::{collections::HashMap, sync::Arc}; -use async_compression::tokio::write::GzipEncoder; +use anyhow::Context; use camino::{Utf8Path, Utf8PathBuf}; use metrics::core::{AtomicU64, GenericCounter}; use pageserver_api::{config::BasebackupCacheConfig, models::TenantState}; use tokio::{ io::{AsyncWriteExt, BufWriter}, - sync::mpsc::{UnboundedReceiver, UnboundedSender}, + sync::mpsc::{Receiver, Sender, error::TrySendError}, }; use tokio_util::sync::CancellationToken; use utils::{ @@ -18,7 +18,10 @@ use utils::{ use crate::{ basebackup::send_basebackup_tarball, context::{DownloadBehavior, RequestContext}, - metrics::{BASEBACKUP_CACHE_ENTRIES, BASEBACKUP_CACHE_PREPARE, BASEBACKUP_CACHE_READ}, + metrics::{ + BASEBACKUP_CACHE_ENTRIES, BASEBACKUP_CACHE_PREPARE, BASEBACKUP_CACHE_PREPARE_QUEUE_SIZE, + BASEBACKUP_CACHE_READ, BASEBACKUP_CACHE_SIZE, + }, task_mgr::TaskKind, tenant::{ Timeline, @@ -32,11 +35,16 @@ pub struct BasebackupPrepareRequest { pub lsn: Lsn, } -pub type BasebackupPrepareSender = UnboundedSender; -pub type BasebackupPrepareReceiver = UnboundedReceiver; +pub type BasebackupPrepareSender = Sender; +pub type BasebackupPrepareReceiver = Receiver; -type BasebackupRemoveEntrySender = UnboundedSender; -type BasebackupRemoveEntryReceiver = UnboundedReceiver; +#[derive(Clone)] +struct CacheEntry { + /// LSN at which the basebackup was taken. + lsn: Lsn, + /// Size of the basebackup archive in bytes. + size_bytes: u64, +} /// BasebackupCache stores cached basebackup archives for timelines on local disk. /// @@ -52,68 +60,118 @@ type BasebackupRemoveEntryReceiver = UnboundedReceiver; /// and ~1 RPS for get requests. pub struct BasebackupCache { data_dir: Utf8PathBuf, - config: BasebackupCacheConfig, - tenant_manager: Arc, - remove_entry_sender: BasebackupRemoveEntrySender, + config: Option, - entries: std::sync::Mutex>, + entries: std::sync::Mutex>, - cancel: CancellationToken, + prepare_sender: BasebackupPrepareSender, read_hit_count: GenericCounter, read_miss_count: GenericCounter, read_err_count: GenericCounter, - prepare_ok_count: GenericCounter, prepare_skip_count: GenericCounter, - prepare_err_count: GenericCounter, } impl BasebackupCache { - /// Creates a BasebackupCache and spawns the background task. - /// The initialization of the cache is performed in the background and does not - /// block the caller. The cache will return `None` for any get requests until - /// initialization is complete. - pub fn spawn( - runtime_handle: &tokio::runtime::Handle, + /// Create a new BasebackupCache instance. + /// Also returns a BasebackupPrepareReceiver which is needed to start + /// the background task. + /// The cache is initialized from the data_dir in the background task. + /// The cache will return `None` for any get requests until the initialization is complete. + /// The background task is spawned separately using [`Self::spawn_background_task`] + /// to avoid a circular dependency between the cache and the tenant manager. + pub fn new( data_dir: Utf8PathBuf, config: Option, - prepare_receiver: BasebackupPrepareReceiver, - tenant_manager: Arc, - cancel: CancellationToken, - ) -> Arc { - let (remove_entry_sender, remove_entry_receiver) = tokio::sync::mpsc::unbounded_channel(); + ) -> (Arc, BasebackupPrepareReceiver) { + let chan_size = config.as_ref().map(|c| c.max_size_entries).unwrap_or(1); - let enabled = config.is_some(); + let (prepare_sender, prepare_receiver) = tokio::sync::mpsc::channel(chan_size); let cache = Arc::new(BasebackupCache { data_dir, - config: config.unwrap_or_default(), - tenant_manager, - remove_entry_sender, - + config, entries: std::sync::Mutex::new(HashMap::new()), - - cancel, + prepare_sender, read_hit_count: BASEBACKUP_CACHE_READ.with_label_values(&["hit"]), read_miss_count: BASEBACKUP_CACHE_READ.with_label_values(&["miss"]), read_err_count: BASEBACKUP_CACHE_READ.with_label_values(&["error"]), - prepare_ok_count: BASEBACKUP_CACHE_PREPARE.with_label_values(&["ok"]), prepare_skip_count: BASEBACKUP_CACHE_PREPARE.with_label_values(&["skip"]), - prepare_err_count: BASEBACKUP_CACHE_PREPARE.with_label_values(&["error"]), }); - if enabled { - runtime_handle.spawn( - cache - .clone() - .background(prepare_receiver, remove_entry_receiver), - ); - } + (cache, prepare_receiver) + } - cache + /// Spawns the background task. + /// The background task initializes the cache from the disk, + /// processes prepare requests, and cleans up outdated cache entries. + /// Noop if the cache is disabled (config is None). + pub fn spawn_background_task( + self: Arc, + runtime_handle: &tokio::runtime::Handle, + prepare_receiver: BasebackupPrepareReceiver, + tenant_manager: Arc, + cancel: CancellationToken, + ) { + if let Some(config) = self.config.clone() { + let background = BackgroundTask { + c: self, + + config, + tenant_manager, + cancel, + + entry_count: 0, + total_size_bytes: 0, + + prepare_ok_count: BASEBACKUP_CACHE_PREPARE.with_label_values(&["ok"]), + prepare_skip_count: BASEBACKUP_CACHE_PREPARE.with_label_values(&["skip"]), + prepare_err_count: BASEBACKUP_CACHE_PREPARE.with_label_values(&["error"]), + }; + runtime_handle.spawn(background.run(prepare_receiver)); + } + } + + /// Send a basebackup prepare request to the background task. + /// The basebackup will be prepared asynchronously, it does not block the caller. + /// The request will be skipped if any cache limits are exceeded. + pub fn send_prepare(&self, tenant_shard_id: TenantShardId, timeline_id: TimelineId, lsn: Lsn) { + let req = BasebackupPrepareRequest { + tenant_shard_id, + timeline_id, + lsn, + }; + + BASEBACKUP_CACHE_PREPARE_QUEUE_SIZE.inc(); + let res = self.prepare_sender.try_send(req); + + if let Err(e) = res { + BASEBACKUP_CACHE_PREPARE_QUEUE_SIZE.dec(); + self.prepare_skip_count.inc(); + match e { + TrySendError::Full(_) => { + // Basebackup prepares are pretty rare, normally we should not hit this. + tracing::info!( + tenant_id = %tenant_shard_id.tenant_id, + %timeline_id, + %lsn, + "Basebackup prepare channel is full, skipping the request" + ); + } + TrySendError::Closed(_) => { + // Normal during shutdown, not critical. + tracing::info!( + tenant_id = %tenant_shard_id.tenant_id, + %timeline_id, + %lsn, + "Basebackup prepare channel is closed, skipping the request" + ); + } + } + } } /// Gets a basebackup entry from the cache. @@ -126,9 +184,13 @@ impl BasebackupCache { timeline_id: TimelineId, lsn: Lsn, ) -> Option { + if !self.is_enabled() { + return None; + } + // Fast path. Check if the entry exists using the in-memory state. let tti = TenantTimelineId::new(tenant_id, timeline_id); - if self.entries.lock().unwrap().get(&tti) != Some(&lsn) { + if self.entries.lock().unwrap().get(&tti).map(|e| e.lsn) != Some(lsn) { self.read_miss_count.inc(); return None; } @@ -153,6 +215,10 @@ impl BasebackupCache { } } + pub fn is_enabled(&self) -> bool { + self.config.is_some() + } + // Private methods. fn entry_filename(tenant_id: TenantId, timeline_id: TimelineId, lsn: Lsn) -> String { @@ -166,6 +232,42 @@ impl BasebackupCache { self.data_dir .join(Self::entry_filename(tenant_id, timeline_id, lsn)) } +} + +/// The background task that does the job to prepare basebackups +/// and manage the cache entries on disk. +/// It is a separate struct from BasebackupCache to allow holding +/// a mutable reference to this state without a mutex lock, +/// while BasebackupCache is referenced by the clients. +struct BackgroundTask { + c: Arc, + + config: BasebackupCacheConfig, + tenant_manager: Arc, + cancel: CancellationToken, + + /// Number of the entries in the cache. + /// This counter is used for metrics and applying cache limits. + /// It generally should be equal to c.entries.len(), but it's calculated + /// pessimistically for abnormal situations: if we encountered some errors + /// during removing the entry from disk, we won't decrement this counter to + /// make sure that we don't exceed the limit with "trashed" files on the disk. + /// It will also count files in the data_dir that are not valid cache entries. + entry_count: usize, + /// Total size of all the entries on the disk. + /// This counter is used for metrics and applying cache limits. + /// Similar to entry_count, it is calculated pessimistically for abnormal situations. + total_size_bytes: u64, + + prepare_ok_count: GenericCounter, + prepare_skip_count: GenericCounter, + prepare_err_count: GenericCounter, +} + +impl BackgroundTask { + fn tmp_dir(&self) -> Utf8PathBuf { + self.c.data_dir.join("tmp") + } fn entry_tmp_path( &self, @@ -173,9 +275,8 @@ impl BasebackupCache { timeline_id: TimelineId, lsn: Lsn, ) -> Utf8PathBuf { - self.data_dir - .join("tmp") - .join(Self::entry_filename(tenant_id, timeline_id, lsn)) + self.tmp_dir() + .join(BasebackupCache::entry_filename(tenant_id, timeline_id, lsn)) } fn parse_entry_filename(filename: &str) -> Option<(TenantId, TimelineId, Lsn)> { @@ -194,18 +295,21 @@ impl BasebackupCache { Some((tenant_id, timeline_id, lsn)) } - async fn cleanup(&self) -> anyhow::Result<()> { - // Cleanup tmp directory. - let tmp_dir = self.data_dir.join("tmp"); - let mut tmp_dir = tokio::fs::read_dir(&tmp_dir).await?; - while let Some(dir_entry) = tmp_dir.next_entry().await? { - if let Err(e) = tokio::fs::remove_file(dir_entry.path()).await { - tracing::warn!("Failed to remove basebackup cache tmp file: {:#}", e); - } + // Recreate the tmp directory to clear all files in it. + async fn clean_tmp_dir(&self) -> anyhow::Result<()> { + let tmp_dir = self.tmp_dir(); + if tmp_dir.exists() { + tokio::fs::remove_dir_all(&tmp_dir).await?; } + tokio::fs::create_dir_all(&tmp_dir).await?; + Ok(()) + } - // Remove outdated entries. - let entries_old = self.entries.lock().unwrap().clone(); + async fn cleanup(&mut self) -> anyhow::Result<()> { + self.clean_tmp_dir().await?; + + // Leave only up-to-date entries. + let entries_old = self.c.entries.lock().unwrap().clone(); let mut entries_new = HashMap::new(); for (tenant_shard_id, tenant_slot) in self.tenant_manager.list() { if !tenant_shard_id.is_shard_zero() { @@ -218,43 +322,42 @@ impl BasebackupCache { for timeline in tenant.list_timelines() { let tti = TenantTimelineId::new(tenant_id, timeline.timeline_id); - if let Some(&entry_lsn) = entries_old.get(&tti) { - if timeline.get_last_record_lsn() <= entry_lsn { - entries_new.insert(tti, entry_lsn); + if let Some(entry) = entries_old.get(&tti) { + if timeline.get_last_record_lsn() <= entry.lsn { + entries_new.insert(tti, entry.clone()); } } } } - for (&tti, &lsn) in entries_old.iter() { + // Try to remove all entries that are not up-to-date. + for (&tti, entry) in entries_old.iter() { if !entries_new.contains_key(&tti) { - self.remove_entry_sender - .send(self.entry_path(tti.tenant_id, tti.timeline_id, lsn)) - .unwrap(); + self.try_remove_entry(tti.tenant_id, tti.timeline_id, entry) + .await; } } - BASEBACKUP_CACHE_ENTRIES.set(entries_new.len() as i64); - *self.entries.lock().unwrap() = entries_new; + // Note: BackgroundTask is the only writer for self.c.entries, + // so it couldn't have been modified concurrently. + *self.c.entries.lock().unwrap() = entries_new; Ok(()) } - async fn on_startup(&self) -> anyhow::Result<()> { - // Create data_dir and tmp directory if they do not exist. - tokio::fs::create_dir_all(&self.data_dir.join("tmp")) + async fn on_startup(&mut self) -> anyhow::Result<()> { + // Create data_dir if it does not exist. + tokio::fs::create_dir_all(&self.c.data_dir) .await - .map_err(|e| { - anyhow::anyhow!( - "Failed to create basebackup cache data_dir {:?}: {:?}", - self.data_dir, - e - ) - })?; + .context("Failed to create basebackup cache data directory")?; + + self.clean_tmp_dir() + .await + .context("Failed to clean tmp directory")?; // Read existing entries from the data_dir and add them to in-memory state. - let mut entries = HashMap::new(); - let mut dir = tokio::fs::read_dir(&self.data_dir).await?; + let mut entries = HashMap::::new(); + let mut dir = tokio::fs::read_dir(&self.c.data_dir).await?; while let Some(dir_entry) = dir.next_entry().await? { let filename = dir_entry.file_name(); @@ -263,33 +366,43 @@ impl BasebackupCache { continue; } + let size_bytes = dir_entry + .metadata() + .await + .map_err(|e| { + anyhow::anyhow!("Failed to read metadata for file {:?}: {:?}", filename, e) + })? + .len(); + + self.entry_count += 1; + BASEBACKUP_CACHE_ENTRIES.set(self.entry_count as u64); + + self.total_size_bytes += size_bytes; + BASEBACKUP_CACHE_SIZE.set(self.total_size_bytes); + let parsed = Self::parse_entry_filename(filename.to_string_lossy().as_ref()); let Some((tenant_id, timeline_id, lsn)) = parsed else { tracing::warn!("Invalid basebackup cache file name: {:?}", filename); continue; }; + let cur_entry = CacheEntry { lsn, size_bytes }; + let tti = TenantTimelineId::new(tenant_id, timeline_id); use std::collections::hash_map::Entry::*; match entries.entry(tti) { Occupied(mut entry) => { - let entry_lsn = *entry.get(); + let found_entry = entry.get(); // Leave only the latest entry, remove the old one. - if lsn < entry_lsn { - self.remove_entry_sender.send(self.entry_path( - tenant_id, - timeline_id, - lsn, - ))?; - } else if lsn > entry_lsn { - self.remove_entry_sender.send(self.entry_path( - tenant_id, - timeline_id, - entry_lsn, - ))?; - entry.insert(lsn); + if cur_entry.lsn < found_entry.lsn { + self.try_remove_entry(tenant_id, timeline_id, &cur_entry) + .await; + } else if cur_entry.lsn > found_entry.lsn { + self.try_remove_entry(tenant_id, timeline_id, found_entry) + .await; + entry.insert(cur_entry); } else { // Two different filenames parsed to the same timline_id and LSN. // Should never happen. @@ -300,22 +413,17 @@ impl BasebackupCache { } } Vacant(entry) => { - entry.insert(lsn); + entry.insert(cur_entry); } } } - BASEBACKUP_CACHE_ENTRIES.set(entries.len() as i64); - *self.entries.lock().unwrap() = entries; + *self.c.entries.lock().unwrap() = entries; Ok(()) } - async fn background( - self: Arc, - mut prepare_receiver: BasebackupPrepareReceiver, - mut remove_entry_receiver: BasebackupRemoveEntryReceiver, - ) { + async fn run(mut self, mut prepare_receiver: BasebackupPrepareReceiver) { // Panic in the background is a safe fallback. // It will drop receivers and the cache will be effectively disabled. self.on_startup() @@ -328,6 +436,7 @@ impl BasebackupCache { loop { tokio::select! { Some(req) = prepare_receiver.recv() => { + BASEBACKUP_CACHE_PREPARE_QUEUE_SIZE.dec(); if let Err(err) = self.prepare_basebackup( req.tenant_shard_id, req.timeline_id, @@ -338,11 +447,6 @@ impl BasebackupCache { continue; } } - Some(req) = remove_entry_receiver.recv() => { - if let Err(e) = tokio::fs::remove_file(req).await { - tracing::warn!("Failed to remove basebackup cache file: {:#}", e); - } - } _ = cleanup_ticker.tick() => { self.cleanup().await.unwrap_or_else(|e| { tracing::warn!("Failed to clean up basebackup cache: {:#}", e); @@ -356,6 +460,67 @@ impl BasebackupCache { } } + /// Try to remove an entry from disk. + /// The caller is responsible for removing the entry from the in-memory state. + /// Updates size counters and corresponding metrics. + /// Ignores the filesystem errors as not-so-important, but the size counters + /// are not decremented in this case, so the file will continue to be counted + /// towards the size limits. + async fn try_remove_entry( + &mut self, + tenant_id: TenantId, + timeline_id: TimelineId, + entry: &CacheEntry, + ) { + let entry_path = self.c.entry_path(tenant_id, timeline_id, entry.lsn); + + match tokio::fs::remove_file(&entry_path).await { + Ok(_) => {} + Err(e) if e.kind() == std::io::ErrorKind::NotFound => {} + Err(e) => { + tracing::warn!( + "Failed to remove basebackup cache file for tenant {} timeline {} LSN {}: {:#}", + tenant_id, + timeline_id, + entry.lsn, + e + ); + return; + } + } + + self.entry_count -= 1; + BASEBACKUP_CACHE_ENTRIES.set(self.entry_count as u64); + + self.total_size_bytes -= entry.size_bytes; + BASEBACKUP_CACHE_SIZE.set(self.total_size_bytes); + } + + /// Insert the cache entry into in-memory state and update the size counters. + /// Assumes that the file for the entry already exists on disk. + /// If the entry already exists with previous LSN, it will be removed. + async fn upsert_entry( + &mut self, + tenant_id: TenantId, + timeline_id: TimelineId, + entry: CacheEntry, + ) { + let tti = TenantTimelineId::new(tenant_id, timeline_id); + + self.entry_count += 1; + BASEBACKUP_CACHE_ENTRIES.set(self.entry_count as u64); + + self.total_size_bytes += entry.size_bytes; + BASEBACKUP_CACHE_SIZE.set(self.total_size_bytes); + + let old_entry = self.c.entries.lock().unwrap().insert(tti, entry); + + if let Some(old_entry) = old_entry { + self.try_remove_entry(tenant_id, timeline_id, &old_entry) + .await; + } + } + /// Prepare a basebackup for the given timeline. /// /// If the basebackup already exists with a higher LSN or the timeline already @@ -364,7 +529,7 @@ impl BasebackupCache { /// The basebackup is prepared in a temporary directory and then moved to the final /// location to make the operation atomic. async fn prepare_basebackup( - &self, + &mut self, tenant_shard_id: TenantShardId, timeline_id: TimelineId, req_lsn: Lsn, @@ -378,30 +543,44 @@ impl BasebackupCache { let tti = TenantTimelineId::new(tenant_shard_id.tenant_id, timeline_id); + // TODO(diko): I don't think we will hit the limit, + // but if we do, it makes sense to try to evict oldest entries. here + if self.entry_count >= self.config.max_size_entries { + tracing::info!( + %tenant_shard_id, + %timeline_id, + %req_lsn, + "Basebackup cache is full (max_size_entries), skipping basebackup", + ); + self.prepare_skip_count.inc(); + return Ok(()); + } + + if self.total_size_bytes >= self.config.max_total_size_bytes { + tracing::info!( + %tenant_shard_id, + %timeline_id, + %req_lsn, + "Basebackup cache is full (max_total_size_bytes), skipping basebackup", + ); + self.prepare_skip_count.inc(); + return Ok(()); + } + { - let entries = self.entries.lock().unwrap(); - if let Some(&entry_lsn) = entries.get(&tti) { - if entry_lsn >= req_lsn { + let entries = self.c.entries.lock().unwrap(); + if let Some(entry) = entries.get(&tti) { + if entry.lsn >= req_lsn { tracing::info!( %timeline_id, %req_lsn, - %entry_lsn, + %entry.lsn, "Basebackup entry already exists for timeline with higher LSN, skipping basebackup", ); self.prepare_skip_count.inc(); return Ok(()); } } - - if entries.len() as i64 >= self.config.max_size_entries { - tracing::info!( - %timeline_id, - %req_lsn, - "Basebackup cache is full, skipping basebackup", - ); - self.prepare_skip_count.inc(); - return Ok(()); - } } let tenant = self @@ -437,56 +616,54 @@ impl BasebackupCache { .prepare_basebackup_tmp(&entry_tmp_path, &timeline, req_lsn) .await; - if let Err(err) = res { - tracing::info!("Failed to prepare basebackup tmp file: {:#}", err); - // Try to clean up tmp file. If we fail, the background clean up task will take care of it. - match tokio::fs::remove_file(&entry_tmp_path).await { - Ok(_) => {} - Err(e) if e.kind() == std::io::ErrorKind::NotFound => {} - Err(e) => { - tracing::info!("Failed to remove basebackup tmp file: {:?}", e); + let entry = match res { + Ok(entry) => entry, + Err(err) => { + tracing::info!("Failed to prepare basebackup tmp file: {:#}", err); + // Try to clean up tmp file. If we fail, the background clean up task will take care of it. + match tokio::fs::remove_file(&entry_tmp_path).await { + Ok(_) => {} + Err(e) if e.kind() == std::io::ErrorKind::NotFound => {} + Err(e) => { + tracing::info!("Failed to remove basebackup tmp file: {:?}", e); + } } + return Err(err); } - return Err(err); - } + }; // Move the tmp file to the final location atomically. - let entry_path = self.entry_path(tenant_shard_id.tenant_id, timeline_id, req_lsn); + // The tmp file is fsynced, so it's guaranteed that we will not have a partial file + // in the main directory. + // It's not necessary to fsync the inode after renaming, because the worst case is that + // the rename operation will be rolled back on the disk failure, the entry will disappear + // from the main directory, and the entry access will cause a cache miss. + let entry_path = self + .c + .entry_path(tenant_shard_id.tenant_id, timeline_id, req_lsn); tokio::fs::rename(&entry_tmp_path, &entry_path).await?; - let mut entries = self.entries.lock().unwrap(); - if let Some(old_lsn) = entries.insert(tti, req_lsn) { - // Remove the old entry if it exists. - self.remove_entry_sender - .send(self.entry_path(tenant_shard_id.tenant_id, timeline_id, old_lsn)) - .unwrap(); - } - BASEBACKUP_CACHE_ENTRIES.set(entries.len() as i64); + self.upsert_entry(tenant_shard_id.tenant_id, timeline_id, entry) + .await; self.prepare_ok_count.inc(); Ok(()) } /// Prepares a basebackup in a temporary file. + /// Guarantees that the tmp file is fsynced before returning. async fn prepare_basebackup_tmp( &self, - emptry_tmp_path: &Utf8Path, + entry_tmp_path: &Utf8Path, timeline: &Arc, req_lsn: Lsn, - ) -> anyhow::Result<()> { + ) -> anyhow::Result { let ctx = RequestContext::new(TaskKind::BasebackupCache, DownloadBehavior::Download); let ctx = ctx.with_scope_timeline(timeline); - let file = tokio::fs::File::create(emptry_tmp_path).await?; + let file = tokio::fs::File::create(entry_tmp_path).await?; let mut writer = BufWriter::new(file); - let mut encoder = GzipEncoder::with_quality( - &mut writer, - // Level::Best because compression is not on the hot path of basebackup requests. - // The decompression is almost not affected by the compression level. - async_compression::Level::Best, - ); - // We may receive a request before the WAL record is applied to the timeline. // Wait for the requested LSN to be applied. timeline @@ -499,20 +676,28 @@ impl BasebackupCache { .await?; send_basebackup_tarball( - &mut encoder, + &mut writer, timeline, Some(req_lsn), None, false, false, + // Level::Best because compression is not on the hot path of basebackup requests. + // The decompression is almost not affected by the compression level. + Some(async_compression::Level::Best), &ctx, ) .await?; - encoder.shutdown().await?; writer.flush().await?; writer.into_inner().sync_all().await?; - Ok(()) + // TODO(diko): we can count it via Writer wrapper instead of a syscall. + let size_bytes = tokio::fs::metadata(entry_tmp_path).await?.len(); + + Ok(CacheEntry { + lsn: req_lsn, + size_bytes, + }) } } diff --git a/pageserver/src/bin/pageserver.rs b/pageserver/src/bin/pageserver.rs index a1a95ad2d1..327384fd82 100644 --- a/pageserver/src/bin/pageserver.rs +++ b/pageserver/src/bin/pageserver.rs @@ -23,6 +23,7 @@ use pageserver::deletion_queue::DeletionQueue; use pageserver::disk_usage_eviction_task::{self, launch_disk_usage_global_eviction_task}; use pageserver::feature_resolver::FeatureResolver; use pageserver::metrics::{STARTUP_DURATION, STARTUP_IS_LOADING}; +use pageserver::page_service::GrpcPageServiceHandler; use pageserver::task_mgr::{ BACKGROUND_RUNTIME, COMPUTE_REQUEST_RUNTIME, MGMT_REQUEST_RUNTIME, WALRECEIVER_RUNTIME, }; @@ -568,11 +569,14 @@ fn start_pageserver( pageserver::l0_flush::L0FlushGlobalState::new(conf.l0_flush.clone()); // Scan the local 'tenants/' directory and start loading the tenants - let (basebackup_prepare_sender, basebackup_prepare_receiver) = - tokio::sync::mpsc::unbounded_channel(); + let (basebackup_cache, basebackup_prepare_receiver) = BasebackupCache::new( + conf.basebackup_cache_dir(), + conf.basebackup_cache_config.clone(), + ); let deletion_queue_client = deletion_queue.new_client(); let background_purges = mgr::BackgroundPurges::default(); - let tenant_manager = BACKGROUND_RUNTIME.block_on(mgr::init_tenant_mgr( + + let tenant_manager = mgr::init( conf, background_purges.clone(), TenantSharedResources { @@ -580,18 +584,16 @@ fn start_pageserver( remote_storage: remote_storage.clone(), deletion_queue_client, l0_flush_global_state, - basebackup_prepare_sender, - feature_resolver, + basebackup_cache: Arc::clone(&basebackup_cache), + feature_resolver: feature_resolver.clone(), }, - order, shutdown_pageserver.clone(), - ))?; + ); let tenant_manager = Arc::new(tenant_manager); + BACKGROUND_RUNTIME.block_on(mgr::init_tenant_mgr(tenant_manager.clone(), order))?; - let basebackup_cache = BasebackupCache::spawn( + basebackup_cache.spawn_background_task( BACKGROUND_RUNTIME.handle(), - conf.basebackup_cache_dir(), - conf.basebackup_cache_config.clone(), basebackup_prepare_receiver, Arc::clone(&tenant_manager), shutdown_pageserver.child_token(), @@ -713,6 +715,7 @@ fn start_pageserver( disk_usage_eviction_state, deletion_queue.new_client(), secondary_controller, + feature_resolver, ) .context("Failed to initialize router state")?, ); @@ -803,7 +806,6 @@ fn start_pageserver( } else { None }, - basebackup_cache, ); // Spawn a Pageserver gRPC server task. It will spawn separate tasks for @@ -814,7 +816,7 @@ fn start_pageserver( // necessary? let mut page_service_grpc = None; if let Some(grpc_listener) = grpc_listener { - page_service_grpc = Some(page_service::spawn_grpc( + page_service_grpc = Some(GrpcPageServiceHandler::spawn( tenant_manager.clone(), grpc_auth, otel_guard.as_ref().map(|g| g.dispatch.clone()), diff --git a/pageserver/src/bin/test_helper_slow_client_reads.rs b/pageserver/src/bin/test_helper_slow_client_reads.rs index 0215dd06fb..8b641abee7 100644 --- a/pageserver/src/bin/test_helper_slow_client_reads.rs +++ b/pageserver/src/bin/test_helper_slow_client_reads.rs @@ -2,7 +2,9 @@ use std::io::{Read, Write, stdin, stdout}; use std::time::Duration; use clap::Parser; -use pageserver_api::models::{PagestreamRequest, PagestreamTestRequest}; +use pageserver_api::pagestream_api::{ + PagestreamFeMessage, PagestreamRequest, PagestreamTestRequest, +}; use utils::id::{TenantId, TimelineId}; use utils::lsn::Lsn; @@ -28,17 +30,15 @@ async fn main() -> anyhow::Result<()> { let mut msg = 0; loop { msg += 1; - let fut = sender.send(pageserver_api::models::PagestreamFeMessage::Test( - PagestreamTestRequest { - hdr: PagestreamRequest { - reqid: 0, - request_lsn: Lsn(23), - not_modified_since: Lsn(23), - }, - batch_key: 42, - message: format!("message {}", msg), + let fut = sender.send(PagestreamFeMessage::Test(PagestreamTestRequest { + hdr: PagestreamRequest { + reqid: 0, + request_lsn: Lsn(23), + not_modified_since: Lsn(23), }, - )); + batch_key: 42, + message: format!("message {msg}"), + })); let Ok(res) = tokio::time::timeout(Duration::from_secs(10), fut).await else { eprintln!("pipe seems full"); break; diff --git a/pageserver/src/config.rs b/pageserver/src/config.rs index 3492a8d966..75b41b9b60 100644 --- a/pageserver/src/config.rs +++ b/pageserver/src/config.rs @@ -11,7 +11,7 @@ use std::num::NonZeroUsize; use std::sync::Arc; use std::time::Duration; -use anyhow::{Context, bail, ensure}; +use anyhow::{Context, ensure}; use camino::{Utf8Path, Utf8PathBuf}; use once_cell::sync::OnceCell; use pageserver_api::config::{ @@ -22,11 +22,13 @@ use pageserver_api::models::ImageCompressionAlgorithm; use pageserver_api::shard::TenantShardId; use pem::Pem; use postgres_backend::AuthType; +use postgres_ffi::PgMajorVersion; use remote_storage::{RemotePath, RemoteStorageConfig}; use reqwest::Url; use storage_broker::Uri; use utils::id::{NodeId, TimelineId}; use utils::logging::{LogFormat, SecretString}; +use utils::serde_percent::Percent; use crate::tenant::storage_layer::inmemory_layer::IndexEntry; use crate::tenant::{TENANTS_SEGMENT_NAME, TIMELINES_SEGMENT_NAME}; @@ -338,20 +340,16 @@ impl PageServerConf { // // Postgres distribution paths // - pub fn pg_distrib_dir(&self, pg_version: u32) -> anyhow::Result { + pub fn pg_distrib_dir(&self, pg_version: PgMajorVersion) -> anyhow::Result { let path = self.pg_distrib_dir.clone(); - #[allow(clippy::manual_range_patterns)] - match pg_version { - 14 | 15 | 16 | 17 => Ok(path.join(format!("v{pg_version}"))), - _ => bail!("Unsupported postgres version: {}", pg_version), - } + Ok(path.join(pg_version.v_str())) } - pub fn pg_bin_dir(&self, pg_version: u32) -> anyhow::Result { + pub fn pg_bin_dir(&self, pg_version: PgMajorVersion) -> anyhow::Result { Ok(self.pg_distrib_dir(pg_version)?.join("bin")) } - pub fn pg_lib_dir(&self, pg_version: u32) -> anyhow::Result { + pub fn pg_lib_dir(&self, pg_version: PgMajorVersion) -> anyhow::Result { Ok(self.pg_distrib_dir(pg_version)?.join("lib")) } @@ -462,7 +460,16 @@ impl PageServerConf { metric_collection_endpoint, metric_collection_bucket, synthetic_size_calculation_interval, - disk_usage_based_eviction, + disk_usage_based_eviction: Some(disk_usage_based_eviction.unwrap_or( + DiskUsageEvictionTaskConfig { + max_usage_pct: Percent::new(80).unwrap(), + min_avail_bytes: 2_000_000_000, + period: Duration::from_secs(60), + #[cfg(feature = "testing")] + mock_statvfs: None, + eviction_order: Default::default(), + }, + )), test_remote_failures, ondemand_download_behavior_treat_error_as_warn, background_task_maximum_delay, @@ -700,6 +707,8 @@ impl ConfigurableSemaphore { #[cfg(test)] mod tests { + use std::time::Duration; + use camino::Utf8PathBuf; use rstest::rstest; use utils::id::NodeId; @@ -765,4 +774,56 @@ mod tests { let result = PageServerConf::parse_and_validate(NodeId(0), config_toml, &workdir); assert_eq!(result.is_ok(), is_valid); } + + #[test] + fn test_config_posthog_config_is_valid() { + let input = r#" + control_plane_api = "http://localhost:6666" + + [posthog_config] + server_api_key = "phs_AAA" + client_api_key = "phc_BBB" + project_id = "000" + private_api_url = "https://us.posthog.com" + public_api_url = "https://us.i.posthog.com" + "#; + let config_toml = toml_edit::de::from_str::(input) + .expect("posthogconfig is valid"); + let workdir = Utf8PathBuf::from("/nonexistent"); + PageServerConf::parse_and_validate(NodeId(0), config_toml, &workdir) + .expect("parse_and_validate"); + } + + #[test] + fn test_config_posthog_incomplete_config_is_valid() { + let input = r#" + control_plane_api = "http://localhost:6666" + + [posthog_config] + server_api_key = "phs_AAA" + private_api_url = "https://us.posthog.com" + public_api_url = "https://us.i.posthog.com" + "#; + let config_toml = toml_edit::de::from_str::(input) + .expect("posthogconfig is valid"); + let workdir = Utf8PathBuf::from("/nonexistent"); + PageServerConf::parse_and_validate(NodeId(0), config_toml, &workdir) + .expect("parse_and_validate"); + } + + #[test] + fn test_config_disk_usage_based_eviction_is_valid() { + let input = r#" + control_plane_api = "http://localhost:6666" + "#; + let config_toml = toml_edit::de::from_str::(input) + .expect("disk_usage_based_eviction is valid"); + let workdir = Utf8PathBuf::from("/nonexistent"); + let config = PageServerConf::parse_and_validate(NodeId(0), config_toml, &workdir).unwrap(); + let disk_usage_based_eviction = config.disk_usage_based_eviction.unwrap(); + assert_eq!(disk_usage_based_eviction.max_usage_pct.get(), 80); + assert_eq!(disk_usage_based_eviction.min_avail_bytes, 2_000_000_000); + assert_eq!(disk_usage_based_eviction.period, Duration::from_secs(60)); + assert_eq!(disk_usage_based_eviction.eviction_order, Default::default()); + } } diff --git a/pageserver/src/consumption_metrics/upload.rs b/pageserver/src/consumption_metrics/upload.rs index eba773272a..16d42b6fe4 100644 --- a/pageserver/src/consumption_metrics/upload.rs +++ b/pageserver/src/consumption_metrics/upload.rs @@ -99,7 +99,7 @@ pub(super) async fn upload_metrics_bucket( // Compose object path let datetime: DateTime = SystemTime::now().into(); - let ts_prefix = datetime.format("year=%Y/month=%m/day=%d/%H:%M:%SZ"); + let ts_prefix = datetime.format("year=%Y/month=%m/day=%d/hour=%H/%H:%M:%SZ"); let path = RemotePath::from_string(&format!("{ts_prefix}_{node_id}.ndjson.gz"))?; // Set up a gzip writer into a buffer @@ -109,7 +109,7 @@ pub(super) async fn upload_metrics_bucket( // Serialize and write into compressed buffer let started_at = std::time::Instant::now(); - for res in serialize_in_chunks(CHUNK_SIZE, metrics, idempotency_keys) { + for res in serialize_in_chunks_ndjson(CHUNK_SIZE, metrics, idempotency_keys) { let (_chunk, body) = res?; gzip_writer.write_all(&body).await?; } @@ -216,6 +216,86 @@ fn serialize_in_chunks<'a>( } } +/// Serializes the input metrics as NDJSON in chunks of chunk_size. Each event +/// is serialized as a separate JSON object on its own line. The provided +/// idempotency keys are injected into the corresponding metric events (reused +/// across different metrics sinks), and must have the same length as input. +fn serialize_in_chunks_ndjson<'a>( + chunk_size: usize, + input: &'a [NewRawMetric], + idempotency_keys: &'a [IdempotencyKey<'a>], +) -> impl ExactSizeIterator> + 'a +{ + use bytes::BufMut; + + assert_eq!(input.len(), idempotency_keys.len()); + + struct Iter<'a> { + inner: std::slice::Chunks<'a, NewRawMetric>, + idempotency_keys: std::slice::Iter<'a, IdempotencyKey<'a>>, + chunk_size: usize, + + // write to a BytesMut so that we can cheaply clone the frozen Bytes for retries + buffer: bytes::BytesMut, + // chunk amount of events are reused to produce the serialized document + scratch: Vec>, + } + + impl<'a> Iterator for Iter<'a> { + type Item = Result<(&'a [NewRawMetric], bytes::Bytes), serde_json::Error>; + + fn next(&mut self) -> Option { + let chunk = self.inner.next()?; + + if self.scratch.is_empty() { + // first round: create events with N strings + self.scratch.extend( + chunk + .iter() + .zip(&mut self.idempotency_keys) + .map(|(raw_metric, key)| raw_metric.as_event(key)), + ); + } else { + // next rounds: update_in_place to reuse allocations + assert_eq!(self.scratch.len(), self.chunk_size); + itertools::izip!(self.scratch.iter_mut(), chunk, &mut self.idempotency_keys) + .for_each(|(slot, raw_metric, key)| raw_metric.update_in_place(slot, key)); + } + + // Serialize each event as NDJSON (one JSON object per line) + for event in self.scratch[..chunk.len()].iter() { + let res = serde_json::to_writer((&mut self.buffer).writer(), event); + if let Err(e) = res { + return Some(Err(e)); + } + // Add newline after each event to follow NDJSON format + self.buffer.put_u8(b'\n'); + } + + Some(Ok((chunk, self.buffer.split().freeze()))) + } + + fn size_hint(&self) -> (usize, Option) { + self.inner.size_hint() + } + } + + impl ExactSizeIterator for Iter<'_> {} + + let buffer = bytes::BytesMut::new(); + let inner = input.chunks(chunk_size); + let idempotency_keys = idempotency_keys.iter(); + let scratch = Vec::new(); + + Iter { + inner, + idempotency_keys, + chunk_size, + buffer, + scratch, + } +} + trait RawMetricExt { fn as_event(&self, key: &IdempotencyKey<'_>) -> Event; fn update_in_place(&self, event: &mut Event, key: &IdempotencyKey<'_>); @@ -479,6 +559,43 @@ mod tests { } } + #[test] + fn chunked_serialization_ndjson() { + let examples = metric_samples(); + assert!(examples.len() > 1); + + let now = Utc::now(); + let idempotency_keys = (0..examples.len()) + .map(|i| FixedGen::new(now, "1", i as u16).generate()) + .collect::>(); + + // Parse NDJSON format - each line is a separate JSON object + let parse_ndjson = |body: &[u8]| -> Vec> { + let body_str = std::str::from_utf8(body).unwrap(); + body_str + .trim_end_matches('\n') + .lines() + .filter(|line| !line.is_empty()) + .map(|line| serde_json::from_str::>(line).unwrap()) + .collect() + }; + + let correct = serialize_in_chunks_ndjson(examples.len(), &examples, &idempotency_keys) + .map(|res| res.unwrap().1) + .flat_map(|body| parse_ndjson(&body)) + .collect::>(); + + for chunk_size in 1..examples.len() { + let actual = serialize_in_chunks_ndjson(chunk_size, &examples, &idempotency_keys) + .map(|res| res.unwrap().1) + .flat_map(|body| parse_ndjson(&body)) + .collect::>(); + + // if these are equal, it means that multi-chunking version works as well + assert_eq!(correct, actual); + } + } + #[derive(Clone, Copy)] struct FixedGen<'a>(chrono::DateTime, &'a str, u16); diff --git a/pageserver/src/controller_upcall_client.rs b/pageserver/src/controller_upcall_client.rs index dc38ea616c..f1f9aaf43c 100644 --- a/pageserver/src/controller_upcall_client.rs +++ b/pageserver/src/controller_upcall_client.rs @@ -159,14 +159,7 @@ impl StorageControllerUpcallApi for StorageControllerUpcallClient { Ok(m) => { // Since we run one time at startup, be generous in our logging and // dump all metadata. - tracing::info!( - "Loaded node metadata: postgres {}:{}, http {}:{}, other fields: {:?}", - m.postgres_host, - m.postgres_port, - m.http_host, - m.http_port, - m.other - ); + tracing::info!("Loaded node metadata: {m}"); let az_id = { let az_id_from_metadata = m @@ -195,6 +188,8 @@ impl StorageControllerUpcallApi for StorageControllerUpcallClient { node_id: conf.id, listen_pg_addr: m.postgres_host, listen_pg_port: m.postgres_port, + listen_grpc_addr: m.grpc_host, + listen_grpc_port: m.grpc_port, listen_http_addr: m.http_host, listen_http_port: m.http_port, listen_https_port: m.https_port, diff --git a/pageserver/src/feature_resolver.rs b/pageserver/src/feature_resolver.rs index 50de3b691c..6ce4522080 100644 --- a/pageserver/src/feature_resolver.rs +++ b/pageserver/src/feature_resolver.rs @@ -1,20 +1,26 @@ use std::{collections::HashMap, sync::Arc, time::Duration}; +use arc_swap::ArcSwap; +use pageserver_api::config::NodeMetadata; use posthog_client_lite::{ - CaptureEvent, FeatureResolverBackgroundLoop, PostHogClientConfig, PostHogEvaluationError, + CaptureEvent, FeatureResolverBackgroundLoop, PostHogEvaluationError, PostHogFlagFilterPropertyValue, }; +use rand::Rng; use remote_storage::RemoteStorageKind; use serde_json::json; use tokio_util::sync::CancellationToken; use utils::id::TenantId; -use crate::{config::PageServerConf, metrics::FEATURE_FLAG_EVALUATION}; +use crate::{config::PageServerConf, metrics::FEATURE_FLAG_EVALUATION, tenant::TenantShard}; + +const DEFAULT_POSTHOG_REFRESH_INTERVAL: Duration = Duration::from_secs(600); #[derive(Clone)] pub struct FeatureResolver { inner: Option>, internal_properties: Option>>, + force_overrides_for_testing: Arc>>, } impl FeatureResolver { @@ -22,9 +28,17 @@ impl FeatureResolver { Self { inner: None, internal_properties: None, + force_overrides_for_testing: Arc::new(ArcSwap::new(Arc::new(HashMap::new()))), } } + pub fn update(&self, spec: String) -> anyhow::Result<()> { + if let Some(inner) = &self.inner { + inner.update(spec)?; + } + Ok(()) + } + pub fn spawn( conf: &PageServerConf, shutdown_pageserver: CancellationToken, @@ -32,16 +46,24 @@ impl FeatureResolver { ) -> anyhow::Result { // DO NOT block in this function: make it return as fast as possible to avoid startup delays. if let Some(posthog_config) = &conf.posthog_config { - let inner = FeatureResolverBackgroundLoop::new( - PostHogClientConfig { - server_api_key: posthog_config.server_api_key.clone(), - client_api_key: posthog_config.client_api_key.clone(), - project_id: posthog_config.project_id.clone(), - private_api_url: posthog_config.private_api_url.clone(), - public_api_url: posthog_config.public_api_url.clone(), - }, - shutdown_pageserver, - ); + let posthog_client_config = match posthog_config.clone().try_into_posthog_config() { + Ok(config) => config, + Err(e) => { + tracing::warn!( + "invalid posthog config, skipping posthog integration: {}", + e + ); + return Ok(FeatureResolver { + inner: None, + internal_properties: None, + force_overrides_for_testing: Arc::new(ArcSwap::new(Arc::new( + HashMap::new(), + ))), + }); + } + }; + let inner = + FeatureResolverBackgroundLoop::new(posthog_client_config, shutdown_pageserver); let inner = Arc::new(inner); // The properties shared by all tenants on this pageserver. @@ -86,9 +108,38 @@ impl FeatureResolver { } } } - // TODO: add pageserver URL. + // TODO: move this to a background task so that we don't block startup in case of slow disk + let metadata_path = conf.metadata_path(); + match std::fs::read_to_string(&metadata_path) { + Ok(metadata_str) => match serde_json::from_str::(&metadata_str) { + Ok(metadata) => { + properties.insert( + "hostname".to_string(), + PostHogFlagFilterPropertyValue::String(metadata.http_host), + ); + if let Some(cplane_region) = metadata.other.get("region_id") { + if let Some(cplane_region) = cplane_region.as_str() { + // This region contains the cell number + properties.insert( + "neon_region".to_string(), + PostHogFlagFilterPropertyValue::String( + cplane_region.to_string(), + ), + ); + } + } + } + Err(e) => { + tracing::warn!("Failed to parse metadata.json: {}", e); + } + }, + Err(e) => { + tracing::warn!("Failed to read metadata.json: {}", e); + } + } Arc::new(properties) }; + let fake_tenants = { let mut tenants = Vec::new(); for i in 0..10 { @@ -98,9 +149,16 @@ impl FeatureResolver { conf.id, i ); + + let tenant_properties = PerTenantProperties { + remote_size_mb: Some(rand::thread_rng().gen_range(100.0..1000000.00)), + } + .into_posthog_properties(); + let properties = Self::collect_properties_inner( distinct_id.clone(), Some(&internal_properties), + &tenant_properties, ); tenants.push(CaptureEvent { event: "initial_tenant_report".to_string(), @@ -110,18 +168,23 @@ impl FeatureResolver { } tenants }; - // TODO: make refresh period configurable - inner - .clone() - .spawn(handle, Duration::from_secs(60), fake_tenants); + inner.clone().spawn( + handle, + posthog_config + .refresh_interval + .unwrap_or(DEFAULT_POSTHOG_REFRESH_INTERVAL), + fake_tenants, + ); Ok(FeatureResolver { inner: Some(inner), internal_properties: Some(internal_properties), + force_overrides_for_testing: Arc::new(ArcSwap::new(Arc::new(HashMap::new()))), }) } else { Ok(FeatureResolver { inner: None, internal_properties: None, + force_overrides_for_testing: Arc::new(ArcSwap::new(Arc::new(HashMap::new()))), }) } } @@ -129,6 +192,7 @@ impl FeatureResolver { fn collect_properties_inner( tenant_id: String, internal_properties: Option<&HashMap>, + tenant_properties: &HashMap, ) -> HashMap { let mut properties = HashMap::new(); if let Some(internal_properties) = internal_properties { @@ -140,6 +204,9 @@ impl FeatureResolver { "tenant_id".to_string(), PostHogFlagFilterPropertyValue::String(tenant_id), ); + for (key, value) in tenant_properties.iter() { + properties.insert(key.clone(), value.clone()); + } properties } @@ -147,8 +214,13 @@ impl FeatureResolver { pub(crate) fn collect_properties( &self, tenant_id: TenantId, + tenant_properties: &HashMap, ) -> HashMap { - Self::collect_properties_inner(tenant_id.to_string(), self.internal_properties.as_deref()) + Self::collect_properties_inner( + tenant_id.to_string(), + self.internal_properties.as_deref(), + tenant_properties, + ) } /// Evaluate a multivariate feature flag. Currently, we do not support any properties. @@ -160,12 +232,18 @@ impl FeatureResolver { &self, flag_key: &str, tenant_id: TenantId, + tenant_properties: &HashMap, ) -> Result { + let force_overrides = self.force_overrides_for_testing.load(); + if let Some(value) = force_overrides.get(flag_key) { + return Ok(value.clone()); + } + if let Some(inner) = &self.inner { let res = inner.feature_store().evaluate_multivariate( flag_key, &tenant_id.to_string(), - &self.collect_properties(tenant_id), + &self.collect_properties(tenant_id, tenant_properties), ); match &res { Ok(value) => { @@ -198,12 +276,22 @@ impl FeatureResolver { &self, flag_key: &str, tenant_id: TenantId, + tenant_properties: &HashMap, ) -> Result<(), PostHogEvaluationError> { + let force_overrides = self.force_overrides_for_testing.load(); + if let Some(value) = force_overrides.get(flag_key) { + return if value == "true" { + Ok(()) + } else { + Err(PostHogEvaluationError::NoConditionGroupMatched) + }; + } + if let Some(inner) = &self.inner { let res = inner.feature_store().evaluate_boolean( flag_key, &tenant_id.to_string(), - &self.collect_properties(tenant_id), + &self.collect_properties(tenant_id, tenant_properties), ); match &res { Ok(()) => { @@ -230,8 +318,97 @@ impl FeatureResolver { inner.feature_store().is_feature_flag_boolean(flag_key) } else { Err(PostHogEvaluationError::NotAvailable( - "PostHog integration is not enabled".to_string(), + "PostHog integration is not enabled, cannot auto-determine the flag type" + .to_string(), )) } } + + /// Force override a feature flag for testing. This is only for testing purposes. Assume the caller only call it + /// from a single thread so it won't race. + pub fn force_override_for_testing(&self, flag_key: &str, value: Option<&str>) { + let mut force_overrides = self.force_overrides_for_testing.load().as_ref().clone(); + if let Some(value) = value { + force_overrides.insert(flag_key.to_string(), value.to_string()); + } else { + force_overrides.remove(flag_key); + } + self.force_overrides_for_testing + .store(Arc::new(force_overrides)); + } +} + +struct PerTenantProperties { + pub remote_size_mb: Option, +} + +impl PerTenantProperties { + pub fn into_posthog_properties(self) -> HashMap { + let mut properties = HashMap::new(); + if let Some(remote_size_mb) = self.remote_size_mb { + properties.insert( + "tenant_remote_size_mb".to_string(), + PostHogFlagFilterPropertyValue::Number(remote_size_mb), + ); + } + properties + } +} + +#[derive(Clone)] +pub struct TenantFeatureResolver { + inner: FeatureResolver, + tenant_id: TenantId, + cached_tenant_properties: Arc>>, +} + +impl TenantFeatureResolver { + pub fn new(inner: FeatureResolver, tenant_id: TenantId) -> Self { + Self { + inner, + tenant_id, + cached_tenant_properties: Arc::new(ArcSwap::new(Arc::new(HashMap::new()))), + } + } + + pub fn evaluate_multivariate(&self, flag_key: &str) -> Result { + self.inner.evaluate_multivariate( + flag_key, + self.tenant_id, + &self.cached_tenant_properties.load(), + ) + } + + pub fn evaluate_boolean(&self, flag_key: &str) -> Result<(), PostHogEvaluationError> { + self.inner.evaluate_boolean( + flag_key, + self.tenant_id, + &self.cached_tenant_properties.load(), + ) + } + + pub fn collect_properties(&self) -> HashMap { + self.inner + .collect_properties(self.tenant_id, &self.cached_tenant_properties.load()) + } + + pub fn is_feature_flag_boolean(&self, flag_key: &str) -> Result { + self.inner.is_feature_flag_boolean(flag_key) + } + + pub fn update_cached_tenant_properties(&self, tenant_shard: &TenantShard) { + let mut remote_size_mb = None; + for timeline in tenant_shard.list_timelines() { + let size = timeline.metrics.resident_physical_size_get(); + if size == 0 { + remote_size_mb = None; + } + if let Some(ref mut remote_size_mb) = remote_size_mb { + *remote_size_mb += size as f64 / 1024.0 / 1024.0; + } + } + self.cached_tenant_properties.store(Arc::new( + PerTenantProperties { remote_size_mb }.into_posthog_properties(), + )); + } } diff --git a/pageserver/src/http/routes.rs b/pageserver/src/http/routes.rs index c8a2a0209f..02094e6aa9 100644 --- a/pageserver/src/http/routes.rs +++ b/pageserver/src/http/routes.rs @@ -41,6 +41,7 @@ use pageserver_api::models::{ TopTenantShardItem, TopTenantShardsRequest, TopTenantShardsResponse, }; use pageserver_api::shard::{ShardCount, TenantShardId}; +use postgres_ffi::PgMajorVersion; use remote_storage::{DownloadError, GenericRemoteStorage, TimeTravelError}; use scopeguard::defer; use serde_json::json; @@ -59,6 +60,7 @@ use crate::config::PageServerConf; use crate::context; use crate::context::{DownloadBehavior, RequestContext, RequestContextBuilder}; use crate::deletion_queue::DeletionQueueClient; +use crate::feature_resolver::FeatureResolver; use crate::pgdatadir_mapping::LsnForTimestamp; use crate::task_mgr::TaskKind; use crate::tenant::config::LocationConf; @@ -73,6 +75,7 @@ use crate::tenant::remote_timeline_client::{ use crate::tenant::secondary::SecondaryController; use crate::tenant::size::ModelInputs; use crate::tenant::storage_layer::{IoConcurrency, LayerAccessStatsReset, LayerName}; +use crate::tenant::timeline::layer_manager::LayerManagerLockHolder; use crate::tenant::timeline::offload::{OffloadError, offload_timeline}; use crate::tenant::timeline::{ CompactFlags, CompactOptions, CompactRequest, CompactionError, MarkInvisibleRequest, Timeline, @@ -106,6 +109,7 @@ pub struct State { deletion_queue_client: DeletionQueueClient, secondary_controller: SecondaryController, latest_utilization: tokio::sync::Mutex>, + feature_resolver: FeatureResolver, } impl State { @@ -119,6 +123,7 @@ impl State { disk_usage_eviction_state: Arc, deletion_queue_client: DeletionQueueClient, secondary_controller: SecondaryController, + feature_resolver: FeatureResolver, ) -> anyhow::Result { let allowlist_routes = &[ "/v1/status", @@ -139,6 +144,7 @@ impl State { deletion_queue_client, secondary_controller, latest_utilization: Default::default(), + feature_resolver, }) } } @@ -282,11 +288,11 @@ impl From for ApiError { GetActiveTenantError::WillNotBecomeActive(TenantState::Stopping { .. }) => { ApiError::ShuttingDown } - GetActiveTenantError::WillNotBecomeActive(_) => ApiError::Conflict(format!("{}", e)), + GetActiveTenantError::WillNotBecomeActive(_) => ApiError::Conflict(format!("{e}")), GetActiveTenantError::Cancelled => ApiError::ShuttingDown, GetActiveTenantError::NotFound(gte) => gte.into(), GetActiveTenantError::WaitForActiveTimeout { .. } => { - ApiError::ResourceUnavailable(format!("{}", e).into()) + ApiError::ResourceUnavailable(format!("{e}").into()) } GetActiveTenantError::SwitchedTenant => { // in our HTTP handlers, this error doesn't happen @@ -1010,7 +1016,7 @@ async fn get_lsn_by_timestamp_handler( let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?; let timestamp_raw = must_get_query_param(&request, "timestamp")?; let timestamp = humantime::parse_rfc3339(×tamp_raw) - .with_context(|| format!("Invalid time: {:?}", timestamp_raw)) + .with_context(|| format!("Invalid time: {timestamp_raw:?}")) .map_err(ApiError::BadRequest)?; let timestamp_pg = postgres_ffi::to_pg_timestamp(timestamp); @@ -1105,7 +1111,7 @@ async fn get_timestamp_of_lsn_handler( json_response(StatusCode::OK, time) } None => Err(ApiError::PreconditionFailed( - format!("Timestamp for lsn {} not found", lsn).into(), + format!("Timestamp for lsn {lsn} not found").into(), )), } } @@ -1451,7 +1457,10 @@ async fn timeline_layer_scan_disposable_keys( let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Download) .with_scope_timeline(&timeline); - let guard = timeline.layers.read().await; + let guard = timeline + .layers + .read(LayerManagerLockHolder::GetLayerMapInfo) + .await; let Some(layer) = guard.try_get_from_key(&layer_name.clone().into()) else { return Err(ApiError::NotFound( anyhow::anyhow!("Layer {tenant_shard_id}/{timeline_id}/{layer_name} not found").into(), @@ -1884,9 +1893,13 @@ async fn update_tenant_config_handler( let location_conf = LocationConf::attached_single( new_tenant_conf.clone(), tenant.get_generation(), - &ShardParameters::default(), + ShardParameters::from(tenant.get_shard_identity()), ); + tenant + .get_shard_identity() + .assert_equal(location_conf.shard); // not strictly necessary since we construct it above + crate::tenant::TenantShard::persist_tenant_config(state.conf, &tenant_shard_id, &location_conf) .await .map_err(|e| ApiError::InternalServerError(anyhow::anyhow!(e)))?; @@ -1928,9 +1941,13 @@ async fn patch_tenant_config_handler( let location_conf = LocationConf::attached_single( updated, tenant.get_generation(), - &ShardParameters::default(), + ShardParameters::from(tenant.get_shard_identity()), ); + tenant + .get_shard_identity() + .assert_equal(location_conf.shard); // not strictly necessary since we construct it above + crate::tenant::TenantShard::persist_tenant_config(state.conf, &tenant_shard_id, &location_conf) .await .map_err(|e| ApiError::InternalServerError(anyhow::anyhow!(e)))?; @@ -2413,7 +2430,7 @@ async fn timeline_offload_handler( } if let (false, reason) = timeline.can_offload() { return Err(ApiError::PreconditionFailed( - format!("Timeline::can_offload() check failed: {}", reason) .into(), + format!("Timeline::can_offload() check failed: {reason}") .into(), )); } offload_timeline(&tenant, &timeline) @@ -2421,6 +2438,7 @@ async fn timeline_offload_handler( .map_err(|e| { match e { OffloadError::Cancelled => ApiError::ResourceUnavailable("Timeline shutting down".into()), + OffloadError::AlreadyInProgress => ApiError::Conflict("Timeline already being offloaded or deleted".into()), _ => ApiError::InternalServerError(anyhow!(e)) } })?; @@ -3377,7 +3395,7 @@ async fn put_tenant_timeline_import_basebackup( let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?; let base_lsn: Lsn = must_parse_query_param(&request, "base_lsn")?; let end_lsn: Lsn = must_parse_query_param(&request, "end_lsn")?; - let pg_version: u32 = must_parse_query_param(&request, "pg_version")?; + let pg_version: PgMajorVersion = must_parse_query_param(&request, "pg_version")?; check_permission(&request, Some(tenant_id))?; @@ -3671,8 +3689,8 @@ async fn tenant_evaluate_feature_flag( let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?; check_permission(&request, Some(tenant_shard_id.tenant_id))?; - let flag: String = must_parse_query_param(&request, "flag")?; - let as_type: String = must_parse_query_param(&request, "as")?; + let flag: String = parse_request_param(&request, "flag_key")?; + let as_type: Option = parse_query_param(&request, "as")?; let state = get_state(&request); @@ -3680,23 +3698,25 @@ async fn tenant_evaluate_feature_flag( let tenant = state .tenant_manager .get_attached_tenant_shard(tenant_shard_id)?; - let properties = tenant.feature_resolver.collect_properties(tenant_shard_id.tenant_id); - if as_type == "boolean" { - let result = tenant.feature_resolver.evaluate_boolean(&flag, tenant_shard_id.tenant_id); + // TODO: the properties we get here might be stale right after it is collected. But such races are rare (updated every 10s) + // and we don't need to worry about it for now. + let properties = tenant.feature_resolver.collect_properties(); + if as_type.as_deref() == Some("boolean") { + let result = tenant.feature_resolver.evaluate_boolean(&flag); let result = result.map(|_| true).map_err(|e| e.to_string()); json_response(StatusCode::OK, json!({ "result": result, "properties": properties })) - } else if as_type == "multivariate" { - let result = tenant.feature_resolver.evaluate_multivariate(&flag, tenant_shard_id.tenant_id).map_err(|e| e.to_string()); + } else if as_type.as_deref() == Some("multivariate") { + let result = tenant.feature_resolver.evaluate_multivariate(&flag).map_err(|e| e.to_string()); json_response(StatusCode::OK, json!({ "result": result, "properties": properties })) } else { // Auto infer the type of the feature flag. let is_boolean = tenant.feature_resolver.is_feature_flag_boolean(&flag).map_err(|e| ApiError::InternalServerError(anyhow::anyhow!("{e}")))?; if is_boolean { - let result = tenant.feature_resolver.evaluate_boolean(&flag, tenant_shard_id.tenant_id); + let result = tenant.feature_resolver.evaluate_boolean(&flag); let result = result.map(|_| true).map_err(|e| e.to_string()); json_response(StatusCode::OK, json!({ "result": result, "properties": properties })) } else { - let result = tenant.feature_resolver.evaluate_multivariate(&flag, tenant_shard_id.tenant_id).map_err(|e| e.to_string()); + let result = tenant.feature_resolver.evaluate_multivariate(&flag).map_err(|e| e.to_string()); json_response(StatusCode::OK, json!({ "result": result, "properties": properties })) } } @@ -3705,6 +3725,49 @@ async fn tenant_evaluate_feature_flag( .await } +async fn force_override_feature_flag_for_testing_put( + request: Request, + _cancel: CancellationToken, +) -> Result, ApiError> { + check_permission(&request, None)?; + + let flag: String = parse_request_param(&request, "flag_key")?; + let value: String = must_parse_query_param(&request, "value")?; + let state = get_state(&request); + state + .feature_resolver + .force_override_for_testing(&flag, Some(&value)); + json_response(StatusCode::OK, ()) +} + +async fn force_override_feature_flag_for_testing_delete( + request: Request, + _cancel: CancellationToken, +) -> Result, ApiError> { + check_permission(&request, None)?; + + let flag: String = parse_request_param(&request, "flag_key")?; + let state = get_state(&request); + state + .feature_resolver + .force_override_for_testing(&flag, None); + json_response(StatusCode::OK, ()) +} + +async fn update_feature_flag_spec( + mut request: Request, + _cancel: CancellationToken, +) -> Result, ApiError> { + check_permission(&request, None)?; + let body = json_request(&mut request).await?; + let state = get_state(&request); + state + .feature_resolver + .update(body) + .map_err(ApiError::InternalServerError)?; + json_response(StatusCode::OK, ()) +} + /// Common functionality of all the HTTP API handlers. /// /// - Adds a tracing span to each request (by `request_span`) @@ -4081,8 +4144,17 @@ pub fn make_router( "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/activate_post_import", |r| api_handler(r, activate_post_import_handler), ) - .get("/v1/tenant/:tenant_shard_id/feature_flag", |r| { + .get("/v1/tenant/:tenant_shard_id/feature_flag/:flag_key", |r| { api_handler(r, tenant_evaluate_feature_flag) }) + .put("/v1/feature_flag/:flag_key", |r| { + testing_api_handler("force override feature flag - put", r, force_override_feature_flag_for_testing_put) + }) + .delete("/v1/feature_flag/:flag_key", |r| { + testing_api_handler("force override feature flag - delete", r, force_override_feature_flag_for_testing_delete) + }) + .post("/v1/feature_flag_spec", |r| { + api_handler(r, update_feature_flag_spec) + }) .any(handler_404)) } diff --git a/pageserver/src/import_datadir.rs b/pageserver/src/import_datadir.rs index 911449c7c5..96fe0c1078 100644 --- a/pageserver/src/import_datadir.rs +++ b/pageserver/src/import_datadir.rs @@ -520,7 +520,7 @@ async fn import_file( } if file_path.starts_with("global") { - let spcnode = postgres_ffi::pg_constants::GLOBALTABLESPACE_OID; + let spcnode = postgres_ffi_types::constants::GLOBALTABLESPACE_OID; let dbnode = 0; match file_name.as_ref() { @@ -553,7 +553,7 @@ async fn import_file( } } } else if file_path.starts_with("base") { - let spcnode = pg_constants::DEFAULTTABLESPACE_OID; + let spcnode = postgres_ffi_types::constants::DEFAULTTABLESPACE_OID; let dbnode: u32 = file_path .iter() .nth(1) diff --git a/pageserver/src/lib.rs b/pageserver/src/lib.rs index ae7cbf1d6b..0dd3c465e0 100644 --- a/pageserver/src/lib.rs +++ b/pageserver/src/lib.rs @@ -38,6 +38,7 @@ pub mod walredo; use camino::Utf8Path; use deletion_queue::DeletionQueue; +use postgres_ffi::PgMajorVersion; use tenant::mgr::{BackgroundPurges, TenantManager}; use tenant::secondary; use tracing::{info, info_span}; @@ -51,7 +52,7 @@ use tracing::{info, info_span}; /// backwards-compatible changes to the metadata format. pub const STORAGE_FORMAT_VERSION: u16 = 3; -pub const DEFAULT_PG_VERSION: u32 = 17; +pub const DEFAULT_PG_VERSION: PgMajorVersion = PgMajorVersion::PG17; // Magic constants used to identify different kinds of files pub const IMAGE_FILE_MAGIC: u16 = 0x5A60; diff --git a/pageserver/src/metrics.rs b/pageserver/src/metrics.rs index 3eb70ffac2..21faceef49 100644 --- a/pageserver/src/metrics.rs +++ b/pageserver/src/metrics.rs @@ -1053,6 +1053,15 @@ pub(crate) static TENANT_STATE_METRIC: Lazy = Lazy::new(|| { .expect("Failed to register pageserver_tenant_states_count metric") }); +pub(crate) static TIMELINE_STATE_METRIC: Lazy = Lazy::new(|| { + register_uint_gauge_vec!( + "pageserver_timeline_states_count", + "Count of timelines per state", + &["state"] + ) + .expect("Failed to register pageserver_timeline_states_count metric") +}); + /// A set of broken tenants. /// /// These are expected to be so rare that a set is fine. Set as in a new timeseries per each broken @@ -1718,12 +1727,7 @@ impl Drop for SmgrOpTimer { impl SmgrOpFlushInProgress { /// The caller must guarantee that `socket_fd`` outlives this function. - pub(crate) async fn measure( - self, - started_at: Instant, - mut fut: Fut, - socket_fd: RawFd, - ) -> O + pub(crate) async fn measure(self, started_at: Instant, fut: Fut, socket_fd: RawFd) -> O where Fut: std::future::Future, { @@ -3325,6 +3329,8 @@ impl TimelineMetrics { &timeline_id, ); + TIMELINE_STATE_METRIC.with_label_values(&["active"]).inc(); + TimelineMetrics { tenant_id, shard_id, @@ -3415,7 +3421,7 @@ impl TimelineMetrics { pub fn dec_frozen_layer(&self, layer: &InMemoryLayer) { assert!(matches!(layer.info(), InMemoryLayerInfo::Frozen { .. })); let labels = self.make_frozen_layer_labels(layer); - let size = layer.try_len().expect("frozen layer should have no writer"); + let size = layer.len(); TIMELINE_LAYER_COUNT .get_metric_with_label_values(&labels) .unwrap() @@ -3430,7 +3436,7 @@ impl TimelineMetrics { pub fn inc_frozen_layer(&self, layer: &InMemoryLayer) { assert!(matches!(layer.info(), InMemoryLayerInfo::Frozen { .. })); let labels = self.make_frozen_layer_labels(layer); - let size = layer.try_len().expect("frozen layer should have no writer"); + let size = layer.len(); TIMELINE_LAYER_COUNT .get_metric_with_label_values(&labels) .unwrap() @@ -3479,6 +3485,8 @@ impl TimelineMetrics { return; } + TIMELINE_STATE_METRIC.with_label_values(&["active"]).dec(); + let tenant_id = &self.tenant_id; let timeline_id = &self.timeline_id; let shard_id = &self.shard_id; @@ -4415,24 +4423,30 @@ pub(crate) static BASEBACKUP_CACHE_PREPARE: Lazy = Lazy::new(|| { .expect("failed to define a metric") }); -pub(crate) static BASEBACKUP_CACHE_ENTRIES: Lazy = Lazy::new(|| { - register_int_gauge!( +pub(crate) static BASEBACKUP_CACHE_ENTRIES: Lazy = Lazy::new(|| { + register_uint_gauge!( "pageserver_basebackup_cache_entries_total", "Number of entries in the basebackup cache" ) .expect("failed to define a metric") }); -// FIXME: Support basebackup cache size metrics. -#[allow(dead_code)] -pub(crate) static BASEBACKUP_CACHE_SIZE: Lazy = Lazy::new(|| { - register_int_gauge!( +pub(crate) static BASEBACKUP_CACHE_SIZE: Lazy = Lazy::new(|| { + register_uint_gauge!( "pageserver_basebackup_cache_size_bytes", "Total size of all basebackup cache entries on disk in bytes" ) .expect("failed to define a metric") }); +pub(crate) static BASEBACKUP_CACHE_PREPARE_QUEUE_SIZE: Lazy = Lazy::new(|| { + register_uint_gauge!( + "pageserver_basebackup_cache_prepare_queue_size", + "Number of requests in the basebackup prepare channel" + ) + .expect("failed to define a metric") +}); + static PAGESERVER_CONFIG_IGNORED_ITEMS: Lazy = Lazy::new(|| { register_uint_gauge_vec!( "pageserver_config_ignored_items", diff --git a/pageserver/src/page_service.rs b/pageserver/src/page_service.rs index 4a1ddf09b5..ad50d32dc8 100644 --- a/pageserver/src/page_service.rs +++ b/pageserver/src/page_service.rs @@ -12,9 +12,9 @@ use std::task::{Context, Poll}; use std::time::{Duration, Instant, SystemTime}; use std::{io, str}; -use anyhow::{Context as _, anyhow, bail}; -use async_compression::tokio::write::GzipEncoder; -use bytes::{Buf, BytesMut}; +use anyhow::{Context as _, bail}; +use bytes::{Buf as _, BufMut as _, BytesMut}; +use chrono::Utc; use futures::future::BoxFuture; use futures::{FutureExt, Stream}; use itertools::Itertools; @@ -25,12 +25,13 @@ use pageserver_api::config::{ PageServiceProtocolPipelinedBatchingStrategy, PageServiceProtocolPipelinedExecutionStrategy, }; use pageserver_api::key::rel_block_to_key; -use pageserver_api::models::{ - self, PageTraceEvent, PagestreamBeMessage, PagestreamDbSizeRequest, PagestreamDbSizeResponse, +use pageserver_api::models::{PageTraceEvent, TenantState}; +use pageserver_api::pagestream_api::{ + self, PagestreamBeMessage, PagestreamDbSizeRequest, PagestreamDbSizeResponse, PagestreamErrorResponse, PagestreamExistsRequest, PagestreamExistsResponse, PagestreamFeMessage, PagestreamGetPageRequest, PagestreamGetSlruSegmentRequest, PagestreamGetSlruSegmentResponse, PagestreamNblocksRequest, PagestreamNblocksResponse, - PagestreamProtocolVersion, PagestreamRequest, TenantState, + PagestreamProtocolVersion, PagestreamRequest, }; use pageserver_api::reltag::SlruKind; use pageserver_api::shard::TenantShardId; @@ -40,7 +41,7 @@ use postgres_backend::{ AuthType, PostgresBackend, PostgresBackendReader, QueryError, is_expected_io_error, }; use postgres_ffi::BLCKSZ; -use postgres_ffi::pg_constants::DEFAULTTABLESPACE_OID; +use postgres_ffi_types::constants::DEFAULTTABLESPACE_OID; use pq_proto::framed::ConnectionError; use pq_proto::{BeMessage, FeMessage, FeStartupPacket, RowDescriptor}; use smallvec::{SmallVec, smallvec}; @@ -62,7 +63,6 @@ use utils::{failpoint_support, span_record}; use crate::auth::check_permission; use crate::basebackup::{self, BasebackupError}; -use crate::basebackup_cache::BasebackupCache; use crate::config::PageServerConf; use crate::context::{ DownloadBehavior, PerfInstrumentFutureExt, RequestContext, RequestContextBuilder, @@ -137,7 +137,6 @@ pub fn spawn( perf_trace_dispatch: Option, tcp_listener: tokio::net::TcpListener, tls_config: Option>, - basebackup_cache: Arc, ) -> Listener { let cancel = CancellationToken::new(); let libpq_ctx = RequestContext::todo_child( @@ -159,7 +158,6 @@ pub fn spawn( conf.pg_auth_type, tls_config, conf.page_service_pipelining.clone(), - basebackup_cache, libpq_ctx, cancel.clone(), ) @@ -169,99 +167,6 @@ pub fn spawn( Listener { cancel, task } } -/// Spawns a gRPC server for the page service. -/// -/// TODO: move this onto GrpcPageServiceHandler::spawn(). -/// TODO: this doesn't support TLS. We need TLS reloading via ReloadingCertificateResolver, so we -/// need to reimplement the TCP+TLS accept loop ourselves. -pub fn spawn_grpc( - tenant_manager: Arc, - auth: Option>, - perf_trace_dispatch: Option, - get_vectored_concurrent_io: GetVectoredConcurrentIo, - listener: std::net::TcpListener, -) -> anyhow::Result { - let cancel = CancellationToken::new(); - let ctx = RequestContextBuilder::new(TaskKind::PageRequestHandler) - .download_behavior(DownloadBehavior::Download) - .perf_span_dispatch(perf_trace_dispatch) - .detached_child(); - let gate = Gate::default(); - - // Set up the TCP socket. We take a preconfigured TcpListener to bind the - // port early during startup. - let incoming = { - let _runtime = COMPUTE_REQUEST_RUNTIME.enter(); // required by TcpListener::from_std - listener.set_nonblocking(true)?; - tonic::transport::server::TcpIncoming::from(tokio::net::TcpListener::from_std(listener)?) - .with_nodelay(Some(GRPC_TCP_NODELAY)) - .with_keepalive(Some(GRPC_TCP_KEEPALIVE_TIME)) - }; - - // Set up the gRPC server. - // - // TODO: consider tuning window sizes. - let mut server = tonic::transport::Server::builder() - .http2_keepalive_interval(Some(GRPC_HTTP2_KEEPALIVE_INTERVAL)) - .http2_keepalive_timeout(Some(GRPC_HTTP2_KEEPALIVE_TIMEOUT)) - .max_concurrent_streams(Some(GRPC_MAX_CONCURRENT_STREAMS)); - - // Main page service stack. Uses a mix of Tonic interceptors and Tower layers: - // - // * Interceptors: can inspect and modify the gRPC request. Sync code only, runs before service. - // - // * Layers: allow async code, can run code after the service response. However, only has access - // to the raw HTTP request/response, not the gRPC types. - let page_service_handler = GrpcPageServiceHandler { - tenant_manager, - ctx, - gate_guard: gate.enter().expect("gate was just created"), - get_vectored_concurrent_io, - }; - - let observability_layer = ObservabilityLayer; - let mut tenant_interceptor = TenantMetadataInterceptor; - let mut auth_interceptor = TenantAuthInterceptor::new(auth); - - let page_service = tower::ServiceBuilder::new() - // Create tracing span and record request start time. - .layer(observability_layer) - // Intercept gRPC requests. - .layer(tonic::service::InterceptorLayer::new(move |mut req| { - // Extract tenant metadata. - req = tenant_interceptor.call(req)?; - // Authenticate tenant JWT token. - req = auth_interceptor.call(req)?; - Ok(req) - })) - .service(proto::PageServiceServer::new(page_service_handler)); - let server = server.add_service(page_service); - - // Reflection service for use with e.g. grpcurl. - let reflection_service = tonic_reflection::server::Builder::configure() - .register_encoded_file_descriptor_set(proto::FILE_DESCRIPTOR_SET) - .build_v1()?; - let server = server.add_service(reflection_service); - - // Spawn server task. - let task_cancel = cancel.clone(); - let task = COMPUTE_REQUEST_RUNTIME.spawn(task_mgr::exit_on_panic_or_error( - "grpc listener", - async move { - let result = server - .serve_with_incoming_shutdown(incoming, task_cancel.cancelled()) - .await; - if result.is_ok() { - // TODO: revisit shutdown logic once page service is implemented. - gate.close().await; - } - result - }, - )); - - Ok(CancellableTask { task, cancel }) -} - impl Listener { pub async fn stop_accepting(self) -> Connections { self.cancel.cancel(); @@ -311,7 +216,6 @@ pub async fn libpq_listener_main( auth_type: AuthType, tls_config: Option>, pipelining_config: PageServicePipeliningConfig, - basebackup_cache: Arc, listener_ctx: RequestContext, listener_cancel: CancellationToken, ) -> Connections { @@ -355,7 +259,6 @@ pub async fn libpq_listener_main( auth_type, tls_config.clone(), pipelining_config.clone(), - Arc::clone(&basebackup_cache), connection_ctx, connections_cancel.child_token(), gate_guard, @@ -398,7 +301,6 @@ async fn page_service_conn_main( auth_type: AuthType, tls_config: Option>, pipelining_config: PageServicePipeliningConfig, - basebackup_cache: Arc, connection_ctx: RequestContext, cancel: CancellationToken, gate_guard: GateGuard, @@ -464,7 +366,6 @@ async fn page_service_conn_main( pipelining_config, conf.get_vectored_concurrent_io, perf_span_fields, - basebackup_cache, connection_ctx, cancel.clone(), gate_guard, @@ -484,16 +385,14 @@ async fn page_service_conn_main( } else { let tenant_id = conn_handler.timeline_handles.as_ref().unwrap().tenant_id(); Err(io_error).context(format!( - "Postgres connection error for tenant_id={:?} client at peer_addr={}", - tenant_id, peer_addr + "Postgres connection error for tenant_id={tenant_id:?} client at peer_addr={peer_addr}" )) } } other => { let tenant_id = conn_handler.timeline_handles.as_ref().unwrap().tenant_id(); other.context(format!( - "Postgres query error for tenant_id={:?} client peer_addr={}", - tenant_id, peer_addr + "Postgres query error for tenant_id={tenant_id:?} client peer_addr={peer_addr}" )) } } @@ -520,8 +419,6 @@ struct PageServerHandler { pipelining_config: PageServicePipeliningConfig, get_vectored_concurrent_io: GetVectoredConcurrentIo, - basebackup_cache: Arc, - gate_guard: GateGuard, } @@ -716,60 +613,6 @@ enum PageStreamError { BadRequest(Cow<'static, str>), } -impl PageStreamError { - /// Converts a PageStreamError into a proto::GetPageResponse with the appropriate status - /// code, or a gRPC status if it should terminate the stream (e.g. shutdown). This is a - /// convenience method for use from a get_pages gRPC stream. - #[allow(clippy::result_large_err)] - fn into_get_page_response( - self, - request_id: page_api::RequestID, - ) -> Result { - use page_api::GetPageStatusCode; - use tonic::Code; - - // We dispatch to Into first, and then map it to a GetPageResponse. - let status: tonic::Status = self.into(); - let status_code = match status.code() { - // We shouldn't see an OK status here, because we're emitting an error. - Code::Ok => { - debug_assert_ne!(status.code(), Code::Ok); - return Err(tonic::Status::internal(format!( - "unexpected OK status: {status:?}", - ))); - } - - // These are per-request errors, returned as GetPageResponses. - Code::AlreadyExists => GetPageStatusCode::InvalidRequest, - Code::DataLoss => GetPageStatusCode::InternalError, - Code::FailedPrecondition => GetPageStatusCode::InvalidRequest, - Code::InvalidArgument => GetPageStatusCode::InvalidRequest, - Code::Internal => GetPageStatusCode::InternalError, - Code::NotFound => GetPageStatusCode::NotFound, - Code::OutOfRange => GetPageStatusCode::InvalidRequest, - Code::ResourceExhausted => GetPageStatusCode::SlowDown, - - // These should terminate the stream. - Code::Aborted => return Err(status), - Code::Cancelled => return Err(status), - Code::DeadlineExceeded => return Err(status), - Code::PermissionDenied => return Err(status), - Code::Unauthenticated => return Err(status), - Code::Unavailable => return Err(status), - Code::Unimplemented => return Err(status), - Code::Unknown => return Err(status), - }; - - Ok(page_api::GetPageResponse { - request_id, - status_code, - reason: Some(status.message().to_string()), - page_images: Vec::new(), - } - .into()) - } -} - impl From for tonic::Status { fn from(err: PageStreamError) -> Self { use tonic::Code; @@ -859,7 +702,7 @@ struct BatchedGetPageRequest { #[cfg(feature = "testing")] struct BatchedTestRequest { - req: models::PagestreamTestRequest, + req: pagestream_api::PagestreamTestRequest, timer: SmgrOpTimer, } @@ -873,13 +716,13 @@ enum BatchedFeMessage { span: Span, timer: SmgrOpTimer, shard: WeakHandle, - req: models::PagestreamExistsRequest, + req: PagestreamExistsRequest, }, Nblocks { span: Span, timer: SmgrOpTimer, shard: WeakHandle, - req: models::PagestreamNblocksRequest, + req: PagestreamNblocksRequest, }, GetPage { span: Span, @@ -891,13 +734,13 @@ enum BatchedFeMessage { span: Span, timer: SmgrOpTimer, shard: WeakHandle, - req: models::PagestreamDbSizeRequest, + req: PagestreamDbSizeRequest, }, GetSlruSegment { span: Span, timer: SmgrOpTimer, shard: WeakHandle, - req: models::PagestreamGetSlruSegmentRequest, + req: PagestreamGetSlruSegmentRequest, }, #[cfg(feature = "testing")] Test { @@ -1061,7 +904,6 @@ impl PageServerHandler { pipelining_config: PageServicePipeliningConfig, get_vectored_concurrent_io: GetVectoredConcurrentIo, perf_span_fields: ConnectionPerfSpanFields, - basebackup_cache: Arc, connection_ctx: RequestContext, cancel: CancellationToken, gate_guard: GateGuard, @@ -1075,7 +917,6 @@ impl PageServerHandler { cancel, pipelining_config, get_vectored_concurrent_io, - basebackup_cache, gate_guard, } } @@ -2286,8 +2127,7 @@ impl PageServerHandler { if request_lsn < not_modified_since { return Err(PageStreamError::BadRequest( format!( - "invalid request with request LSN {} and not_modified_since {}", - request_lsn, not_modified_since, + "invalid request with request LSN {request_lsn} and not_modified_since {not_modified_since}", ) .into(), )); @@ -2590,10 +2430,9 @@ impl PageServerHandler { .map(|(req, res)| { res.map(|page| { ( - PagestreamBeMessage::GetPage(models::PagestreamGetPageResponse { - req: req.req, - page, - }), + PagestreamBeMessage::GetPage( + pagestream_api::PagestreamGetPageResponse { req: req.req, page }, + ), req.timer, req.ctx, ) @@ -2660,7 +2499,7 @@ impl PageServerHandler { .map(|(req, res)| { res.map(|()| { ( - PagestreamBeMessage::Test(models::PagestreamTestResponse { + PagestreamBeMessage::Test(pagestream_api::PagestreamTestResponse { req: req.req.clone(), }), req.timer, @@ -2763,26 +2602,16 @@ impl PageServerHandler { prev_lsn, full_backup, replica, + None, &ctx, ) .await?; } else { let mut writer = BufWriter::new(pgb.copyout_writer()); - let cached = { - // Basebackup is cached only for this combination of parameters. - if timeline.is_basebackup_cache_enabled() - && gzip - && lsn.is_some() - && prev_lsn.is_none() - { - self.basebackup_cache - .get(tenant_id, timeline_id, lsn.unwrap()) - .await - } else { - None - } - }; + let cached = timeline + .get_cached_basebackup_if_enabled(lsn, prev_lsn, full_backup, replica, gzip) + .await; if let Some(mut cached) = cached { from_cache = true; @@ -2791,31 +2620,6 @@ impl PageServerHandler { .map_err(|err| { BasebackupError::Client(err, "handle_basebackup_request,cached,copy") })?; - } else if gzip { - let mut encoder = GzipEncoder::with_quality( - &mut writer, - // NOTE using fast compression because it's on the critical path - // for compute startup. For an empty database, we get - // <100KB with this method. The Level::Best compression method - // gives us <20KB, but maybe we should add basebackup caching - // on compute shutdown first. - async_compression::Level::Fastest, - ); - basebackup::send_basebackup_tarball( - &mut encoder, - &timeline, - lsn, - prev_lsn, - full_backup, - replica, - &ctx, - ) - .await?; - // shutdown the encoder to ensure the gzip footer is written - encoder - .shutdown() - .await - .map_err(|e| QueryError::Disconnected(ConnectionError::Io(e)))?; } else { basebackup::send_basebackup_tarball( &mut writer, @@ -2824,6 +2628,11 @@ impl PageServerHandler { prev_lsn, full_backup, replica, + // NB: using fast compression because it's on the critical path for compute + // startup. For an empty database, we get <100KB with this method. The + // Level::Best compression method gives us <20KB, but maybe we should add + // basebackup caching on compute shutdown first. + gzip.then_some(async_compression::Level::Fastest), &ctx, ) .await?; @@ -3361,11 +3170,115 @@ where pub struct GrpcPageServiceHandler { tenant_manager: Arc, ctx: RequestContext, + cancel: CancellationToken, gate_guard: GateGuard, get_vectored_concurrent_io: GetVectoredConcurrentIo, } impl GrpcPageServiceHandler { + /// Spawns a gRPC server for the page service. + /// + /// TODO: this doesn't support TLS. We need TLS reloading via ReloadingCertificateResolver, so we + /// need to reimplement the TCP+TLS accept loop ourselves. + pub fn spawn( + tenant_manager: Arc, + auth: Option>, + perf_trace_dispatch: Option, + get_vectored_concurrent_io: GetVectoredConcurrentIo, + listener: std::net::TcpListener, + ) -> anyhow::Result { + let cancel = CancellationToken::new(); + let ctx = RequestContextBuilder::new(TaskKind::PageRequestHandler) + .download_behavior(DownloadBehavior::Download) + .perf_span_dispatch(perf_trace_dispatch) + .detached_child(); + let gate = Gate::default(); + + // Set up the TCP socket. We take a preconfigured TcpListener to bind the + // port early during startup. + let incoming = { + let _runtime = COMPUTE_REQUEST_RUNTIME.enter(); // required by TcpListener::from_std + listener.set_nonblocking(true)?; + tonic::transport::server::TcpIncoming::from(tokio::net::TcpListener::from_std( + listener, + )?) + .with_nodelay(Some(GRPC_TCP_NODELAY)) + .with_keepalive(Some(GRPC_TCP_KEEPALIVE_TIME)) + }; + + // Set up the gRPC server. + // + // TODO: consider tuning window sizes. + let mut server = tonic::transport::Server::builder() + .http2_keepalive_interval(Some(GRPC_HTTP2_KEEPALIVE_INTERVAL)) + .http2_keepalive_timeout(Some(GRPC_HTTP2_KEEPALIVE_TIMEOUT)) + .max_concurrent_streams(Some(GRPC_MAX_CONCURRENT_STREAMS)); + + // Main page service stack. Uses a mix of Tonic interceptors and Tower layers: + // + // * Interceptors: can inspect and modify the gRPC request. Sync code only, runs before service. + // + // * Layers: allow async code, can run code after the service response. However, only has access + // to the raw HTTP request/response, not the gRPC types. + let page_service_handler = GrpcPageServiceHandler { + tenant_manager, + ctx, + cancel: cancel.clone(), + gate_guard: gate.enter().expect("gate was just created"), + get_vectored_concurrent_io, + }; + + let observability_layer = ObservabilityLayer; + let mut tenant_interceptor = TenantMetadataInterceptor; + let mut auth_interceptor = TenantAuthInterceptor::new(auth); + + let page_service = tower::ServiceBuilder::new() + // Create tracing span and record request start time. + .layer(observability_layer) + // Intercept gRPC requests. + .layer(tonic::service::InterceptorLayer::new(move |mut req| { + // Extract tenant metadata. + req = tenant_interceptor.call(req)?; + // Authenticate tenant JWT token. + req = auth_interceptor.call(req)?; + Ok(req) + })) + // Run the page service. + .service( + proto::PageServiceServer::new(page_service_handler) + // Support both gzip and zstd compression. The client decides what to use. + .accept_compressed(tonic::codec::CompressionEncoding::Gzip) + .accept_compressed(tonic::codec::CompressionEncoding::Zstd) + .send_compressed(tonic::codec::CompressionEncoding::Gzip) + .send_compressed(tonic::codec::CompressionEncoding::Zstd), + ); + let server = server.add_service(page_service); + + // Reflection service for use with e.g. grpcurl. + let reflection_service = tonic_reflection::server::Builder::configure() + .register_encoded_file_descriptor_set(proto::FILE_DESCRIPTOR_SET) + .build_v1()?; + let server = server.add_service(reflection_service); + + // Spawn server task. + let task_cancel = cancel.clone(); + let task = COMPUTE_REQUEST_RUNTIME.spawn(task_mgr::exit_on_panic_or_error( + "grpc listener", + async move { + let result = server + .serve_with_incoming_shutdown(incoming, task_cancel.cancelled()) + .await; + if result.is_ok() { + // TODO: revisit shutdown logic once page service is implemented. + gate.close().await; + } + result + }, + )); + + Ok(CancellableTask { task, cancel }) + } + /// Errors if the request is executed on a non-zero shard. Only shard 0 has a complete view of /// relations and their sizes, as well as SLRU segments and similar data. #[allow(clippy::result_large_err)] @@ -3436,12 +3349,14 @@ impl GrpcPageServiceHandler { /// Processes a GetPage batch request, via the GetPages bidirectional streaming RPC. /// - /// NB: errors will terminate the stream. Per-request errors should return a GetPageResponse - /// with an appropriate status code instead. + /// NB: errors returned from here are intercepted in get_pages(), and may be converted to a + /// GetPageResponse with an appropriate status code to avoid terminating the stream. /// /// TODO: get_vectored() currently enforces a batch limit of 32. Postgres will typically send /// batches up to effective_io_concurrency = 100. Either we have to accept large batches, or /// split them up in the client or server. + /// + /// TODO: verify that the given keys belong to this shard. #[instrument(skip_all, fields(req_id, rel, blkno, blks, req_lsn, mod_lsn))] async fn get_page( ctx: &RequestContext, @@ -3454,7 +3369,7 @@ impl GrpcPageServiceHandler { let ctx = ctx.with_scope_page_service_pagestream(&timeline); // Validate the request, decorate the span, and convert it to a Pagestream request. - let req: page_api::GetPageRequest = req.try_into()?; + let req = page_api::GetPageRequest::try_from(req)?; span_record!( req_id = %req.request_id, @@ -3465,7 +3380,7 @@ impl GrpcPageServiceHandler { ); let latest_gc_cutoff_lsn = timeline.get_applied_gc_cutoff_lsn(); // hold guard - let effective_lsn = match PageServerHandler::effective_request_lsn( + let effective_lsn = PageServerHandler::effective_request_lsn( &timeline, timeline.get_last_record_lsn(), req.read_lsn.request_lsn, @@ -3473,10 +3388,7 @@ impl GrpcPageServiceHandler { .not_modified_since_lsn .unwrap_or(req.read_lsn.request_lsn), &latest_gc_cutoff_lsn, - ) { - Ok(lsn) => lsn, - Err(err) => return err.into_get_page_response(req.request_id), - }; + )?; let mut batch = SmallVec::with_capacity(req.block_numbers.len()); for blkno in req.block_numbers { @@ -3533,7 +3445,7 @@ impl GrpcPageServiceHandler { "unexpected response: {resp:?}" ))); } - Err(err) => return err.err.into_get_page_response(req.request_id), + Err(err) => return Err(err.err.into()), }; } @@ -3587,83 +3499,113 @@ impl proto::PageService for GrpcPageServiceHandler { Ok(tonic::Response::new(resp.into())) } - // TODO: ensure clients use gzip compression for the stream. #[instrument(skip_all, fields(lsn))] async fn get_base_backup( &self, req: tonic::Request, ) -> Result, tonic::Status> { - // Send 64 KB chunks to avoid large memory allocations. - const CHUNK_SIZE: usize = 64 * 1024; + // Send chunks of 256 KB to avoid large memory allocations. pagebench basebackup shows this + // to be the sweet spot where throughput is saturated. + const CHUNK_SIZE: usize = 256 * 1024; let timeline = self.get_request_timeline(&req).await?; let ctx = self.ctx.with_scope_timeline(&timeline); - // Validate the request, decorate the span, and wait for the LSN to arrive. - // - // TODO: this requires a read LSN, is that ok? + // Validate the request and decorate the span. Self::ensure_shard_zero(&timeline)?; if timeline.is_archived() == Some(true) { return Err(tonic::Status::failed_precondition("timeline is archived")); } let req: page_api::GetBaseBackupRequest = req.into_inner().try_into()?; - span_record!(lsn=%req.read_lsn); + span_record!(lsn=?req.lsn); - let latest_gc_cutoff_lsn = timeline.get_applied_gc_cutoff_lsn(); - timeline - .wait_lsn( - req.read_lsn.request_lsn, - WaitLsnWaiter::PageService, - WaitLsnTimeout::Default, - &ctx, - ) - .await?; - timeline - .check_lsn_is_in_scope(req.read_lsn.request_lsn, &latest_gc_cutoff_lsn) - .map_err(|err| { - tonic::Status::invalid_argument(format!("invalid basebackup LSN: {err}")) - })?; + // Wait for the LSN to arrive, if given. + if let Some(lsn) = req.lsn { + let latest_gc_cutoff_lsn = timeline.get_applied_gc_cutoff_lsn(); + timeline + .wait_lsn( + lsn, + WaitLsnWaiter::PageService, + WaitLsnTimeout::Default, + &ctx, + ) + .await?; + timeline + .check_lsn_is_in_scope(lsn, &latest_gc_cutoff_lsn) + .map_err(|err| { + tonic::Status::invalid_argument(format!("invalid basebackup LSN: {err}")) + })?; + } // Spawn a task to run the basebackup. - // - // TODO: do we need to support full base backups, for debugging? let span = Span::current(); let (mut simplex_read, mut simplex_write) = tokio::io::simplex(CHUNK_SIZE); let jh = tokio::spawn(async move { - let result = basebackup::send_basebackup_tarball( - &mut simplex_write, - &timeline, - Some(req.read_lsn.request_lsn), - None, - false, - req.replica, - &ctx, - ) - .instrument(span) // propagate request span - .await; - simplex_write.shutdown().await.map_err(|err| { - BasebackupError::Server(anyhow!("simplex shutdown failed: {err}")) - })?; + let gzip_level = match req.compression { + page_api::BaseBackupCompression::None => None, + // NB: using fast compression because it's on the critical path for compute + // startup. For an empty database, we get <100KB with this method. The + // Level::Best compression method gives us <20KB, but maybe we should add + // basebackup caching on compute shutdown first. + page_api::BaseBackupCompression::Gzip => Some(async_compression::Level::Fastest), + }; + + // Check for a cached basebackup. + let cached = timeline + .get_cached_basebackup_if_enabled( + req.lsn, + None, + req.full, + req.replica, + gzip_level.is_some(), + ) + .await; + + let result = if let Some(mut cached) = cached { + // If we have a cached basebackup, send it. + tokio::io::copy(&mut cached, &mut simplex_write) + .await + .map(|_| ()) + .map_err(|err| BasebackupError::Client(err, "cached,copy")) + } else { + basebackup::send_basebackup_tarball( + &mut simplex_write, + &timeline, + req.lsn, + None, + req.full, + req.replica, + gzip_level, + &ctx, + ) + .instrument(span) // propagate request span + .await + }; + simplex_write + .shutdown() + .await + .map_err(|err| BasebackupError::Client(err, "simplex_write"))?; result }); // Emit chunks of size CHUNK_SIZE. let chunks = async_stream::try_stream! { - let mut chunk = BytesMut::with_capacity(CHUNK_SIZE); loop { - let n = simplex_read.read_buf(&mut chunk).await.map_err(|err| { - tonic::Status::internal(format!("failed to read basebackup chunk: {err}")) - })?; - - // If we read 0 bytes, either the chunk is full or the stream is closed. - if n == 0 { - if chunk.is_empty() { - break; + let mut chunk = BytesMut::with_capacity(CHUNK_SIZE).limit(CHUNK_SIZE); + loop { + let n = simplex_read.read_buf(&mut chunk).await.map_err(|err| { + tonic::Status::internal(format!("failed to read basebackup chunk: {err}")) + })?; + if n == 0 { + break; // full chunk or closed stream } - yield proto::GetBaseBackupResponseChunk::from(chunk.clone().freeze()); - chunk.clear(); } + let chunk = chunk.into_inner().freeze(); + if chunk.is_empty() { + break; + } + yield proto::GetBaseBackupResponseChunk::from(chunk); } // Wait for the basebackup task to exit and check for errors. jh.await.map_err(|err| { @@ -3732,6 +3674,7 @@ impl proto::PageService for GrpcPageServiceHandler { // Spawn a task to handle the GetPageRequest stream. let span = Span::current(); let ctx = self.ctx.attached_child(); + let cancel = self.cancel.clone(); let mut reqs = req.into_inner(); let resps = async_stream::try_stream! { @@ -3739,10 +3682,30 @@ impl proto::PageService for GrpcPageServiceHandler { .get(ttid.tenant_id, ttid.timeline_id, shard_selector) .await? .downgrade(); - while let Some(req) = reqs.message().await? { - yield Self::get_page(&ctx, &timeline, req, io_concurrency.clone()) + + loop { + let req = tokio::select! { + req = reqs.message() => req, + _ = cancel.cancelled() => { + tracing::info!("closing getpages stream due to shutdown"); + break; + }, + }; + let req = if let Some(req) = req? { + req + } else { + break; + }; + let req_id = req.request_id; + let result = Self::get_page(&ctx, &timeline, req, io_concurrency.clone()) .instrument(span.clone()) // propagate request span - .await? + .await; + yield match result { + Ok(resp) => resp, + // Convert per-request errors to GetPageResponses as appropriate, or terminate + // the stream with a tonic::Status. + Err(err) => page_api::GetPageResponse::try_from_status(err, req_id)?.into(), + } } }; @@ -3816,6 +3779,36 @@ impl proto::PageService for GrpcPageServiceHandler { let resp: page_api::GetSlruSegmentResponse = resp.segment; Ok(tonic::Response::new(resp.into())) } + + #[instrument(skip_all, fields(lsn))] + async fn lease_lsn( + &self, + req: tonic::Request, + ) -> Result, tonic::Status> { + let timeline = self.get_request_timeline(&req).await?; + let ctx = self.ctx.with_scope_timeline(&timeline); + + // Validate and convert the request, and decorate the span. + let req: page_api::LeaseLsnRequest = req.into_inner().try_into()?; + + span_record!(lsn=%req.lsn); + + // Attempt to acquire a lease. Return FailedPrecondition if the lease could not be granted. + let lease_length = timeline.get_lsn_lease_length(); + let expires = match timeline.renew_lsn_lease(req.lsn, lease_length, &ctx) { + Ok(lease) => lease.valid_until, + Err(err) => return Err(tonic::Status::failed_precondition(format!("{err}"))), + }; + + // TODO: is this spammy? Move it compute-side? + info!( + "acquired lease for {} until {}", + req.lsn, + chrono::DateTime::::from(expires).to_rfc3339() + ); + + Ok(tonic::Response::new(expires.into())) + } } /// gRPC middleware layer that handles observability concerns: diff --git a/pageserver/src/pgdatadir_mapping.rs b/pageserver/src/pgdatadir_mapping.rs index 633d62210d..31f38d485f 100644 --- a/pageserver/src/pgdatadir_mapping.rs +++ b/pageserver/src/pgdatadir_mapping.rs @@ -23,12 +23,11 @@ use pageserver_api::key::{ }; use pageserver_api::keyspace::{KeySpaceRandomAccum, SparseKeySpace}; use pageserver_api::models::RelSizeMigration; -use pageserver_api::record::NeonWalRecord; use pageserver_api::reltag::{BlockNumber, RelTag, SlruKind}; use pageserver_api::shard::ShardIdentity; -use pageserver_api::value::Value; -use postgres_ffi::relfile_utils::{FSM_FORKNUM, VISIBILITYMAP_FORKNUM}; -use postgres_ffi::{BLCKSZ, Oid, RepOriginId, TimestampTz, TransactionId}; +use postgres_ffi::{BLCKSZ, PgMajorVersion, TimestampTz, TransactionId}; +use postgres_ffi_types::forknum::{FSM_FORKNUM, VISIBILITYMAP_FORKNUM}; +use postgres_ffi_types::{Oid, RepOriginId}; use serde::{Deserialize, Serialize}; use strum::IntoEnumIterator; use tokio_util::sync::CancellationToken; @@ -36,6 +35,8 @@ use tracing::{debug, info, info_span, trace, warn}; use utils::bin_ser::{BeSer, DeserializeError}; use utils::lsn::Lsn; use utils::pausable_failpoint; +use wal_decoder::models::record::NeonWalRecord; +use wal_decoder::models::value::Value; use wal_decoder::serialized_batch::{SerializedValueBatch, ValueMeta}; use super::tenant::{PageReconstructError, Timeline}; @@ -720,6 +721,7 @@ impl Timeline { let batches = keyspace.partition( self.get_shard_identity(), self.conf.max_get_vectored_keys.get() as u64 * BLCKSZ as u64, + BLCKSZ as u64, ); let io_concurrency = IoConcurrency::spawn_from_conf( @@ -960,6 +962,7 @@ impl Timeline { let batches = keyspace.partition( self.get_shard_identity(), self.conf.max_get_vectored_keys.get() as u64 * BLCKSZ as u64, + BLCKSZ as u64, ); let io_concurrency = IoConcurrency::spawn_from_conf( @@ -1078,7 +1081,7 @@ impl Timeline { // fetch directory entry let buf = self.get(TWOPHASEDIR_KEY, lsn, ctx).await?; - if self.pg_version >= 17 { + if self.pg_version >= PgMajorVersion::PG17 { Ok(TwoPhaseDirectoryV17::des(&buf)?.xids) } else { Ok(TwoPhaseDirectory::des(&buf)? @@ -1182,7 +1185,7 @@ impl Timeline { } let origin_id = k.field6 as RepOriginId; let origin_lsn = Lsn::des(&v) - .with_context(|| format!("decode replorigin value for {}: {v:?}", origin_id))?; + .with_context(|| format!("decode replorigin value for {origin_id}: {v:?}"))?; if origin_lsn != Lsn::INVALID { result.insert(origin_id, origin_lsn); } @@ -1610,7 +1613,7 @@ impl DatadirModification<'_> { .push((DirectoryKind::Db, MetricsUpdate::Set(0))); self.put(DBDIR_KEY, Value::Image(buf.into())); - let buf = if self.tline.pg_version >= 17 { + let buf = if self.tline.pg_version >= PgMajorVersion::PG17 { TwoPhaseDirectoryV17::ser(&TwoPhaseDirectoryV17 { xids: HashSet::new(), }) @@ -1964,7 +1967,7 @@ impl DatadirModification<'_> { ) -> Result<(), WalIngestError> { // Add it to the directory entry let dirbuf = self.get(TWOPHASEDIR_KEY, ctx).await?; - let newdirbuf = if self.tline.pg_version >= 17 { + let newdirbuf = if self.tline.pg_version >= PgMajorVersion::PG17 { let mut dir = TwoPhaseDirectoryV17::des(&dirbuf)?; if !dir.xids.insert(xid) { Err(WalIngestErrorKind::FileAlreadyExists(xid))?; @@ -2380,7 +2383,7 @@ impl DatadirModification<'_> { ) -> Result<(), WalIngestError> { // Remove it from the directory entry let buf = self.get(TWOPHASEDIR_KEY, ctx).await?; - let newdirbuf = if self.tline.pg_version >= 17 { + let newdirbuf = if self.tline.pg_version >= PgMajorVersion::PG17 { let mut dir = TwoPhaseDirectoryV17::des(&buf)?; if !dir.xids.remove(&xid) { @@ -2437,8 +2440,7 @@ impl DatadirModification<'_> { if path == p { assert!( modifying_file.is_none(), - "duplicated entries found for {}", - path + "duplicated entries found for {path}" ); modifying_file = Some(content); } else { @@ -3013,7 +3015,7 @@ mod tests { // This shard will get the even blocks let shard = ShardIdentity::from_params( ShardNumber(0), - &ShardParameters { + ShardParameters { count: ShardCount(2), stripe_size: ShardStripeSize(1), }, diff --git a/pageserver/src/tenant.rs b/pageserver/src/tenant.rs index f9fdc143b4..f4877fd763 100644 --- a/pageserver/src/tenant.rs +++ b/pageserver/src/tenant.rs @@ -38,6 +38,7 @@ use pageserver_api::models::{ WalRedoManagerStatus, }; use pageserver_api::shard::{ShardIdentity, ShardStripeSize, TenantShardId}; +use postgres_ffi::PgMajorVersion; use remote_storage::{DownloadError, GenericRemoteStorage, TimeoutOrCancel}; use remote_timeline_client::index::GcCompactionState; use remote_timeline_client::manifest::{ @@ -51,6 +52,7 @@ use secondary::heatmap::{HeatMapTenant, HeatMapTimeline}; use storage_broker::BrokerClientChannel; use timeline::compaction::{CompactionOutcome, GcCompactionQueue}; use timeline::import_pgdata::ImportingTimeline; +use timeline::layer_manager::LayerManagerLockHolder; use timeline::offload::{OffloadError, offload_timeline}; use timeline::{ CompactFlags, CompactOptions, CompactionError, PreviousHeatmap, ShutdownMode, import_pgdata, @@ -78,18 +80,19 @@ use self::timeline::uninit::{TimelineCreateGuard, TimelineExclusionError, Uninit use self::timeline::{ EvictionTaskTenantState, GcCutoffs, TimelineDeleteProgress, TimelineResources, WaitLsnError, }; -use crate::basebackup_cache::BasebackupPrepareSender; +use crate::basebackup_cache::BasebackupCache; use crate::config::PageServerConf; use crate::context; use crate::context::RequestContextBuilder; use crate::context::{DownloadBehavior, RequestContext}; use crate::deletion_queue::{DeletionQueueClient, DeletionQueueError}; -use crate::feature_resolver::FeatureResolver; +use crate::feature_resolver::{FeatureResolver, TenantFeatureResolver}; use crate::l0_flush::L0FlushGlobalState; use crate::metrics::{ BROKEN_TENANTS_SET, CIRCUIT_BREAKERS_BROKEN, CIRCUIT_BREAKERS_UNBROKEN, CONCURRENT_INITDBS, INITDB_RUN_TIME, INITDB_SEMAPHORE_ACQUISITION_TIME, TENANT, TENANT_OFFLOADED_TIMELINES, - TENANT_STATE_METRIC, TENANT_SYNTHETIC_SIZE_METRIC, remove_tenant_metrics, + TENANT_STATE_METRIC, TENANT_SYNTHETIC_SIZE_METRIC, TIMELINE_STATE_METRIC, + remove_tenant_metrics, }; use crate::task_mgr::TaskKind; use crate::tenant::config::LocationMode; @@ -159,7 +162,7 @@ pub struct TenantSharedResources { pub remote_storage: GenericRemoteStorage, pub deletion_queue_client: DeletionQueueClient, pub l0_flush_global_state: L0FlushGlobalState, - pub basebackup_prepare_sender: BasebackupPrepareSender, + pub basebackup_cache: Arc, pub feature_resolver: FeatureResolver, } @@ -328,7 +331,7 @@ pub struct TenantShard { deletion_queue_client: DeletionQueueClient, /// A channel to send async requests to prepare a basebackup for the basebackup cache. - basebackup_prepare_sender: BasebackupPrepareSender, + basebackup_cache: Arc, /// Cached logical sizes updated updated on each [`TenantShard::gather_size_inputs`]. cached_logical_sizes: tokio::sync::Mutex>, @@ -383,7 +386,7 @@ pub struct TenantShard { l0_flush_global_state: L0FlushGlobalState, - pub(crate) feature_resolver: FeatureResolver, + pub(crate) feature_resolver: TenantFeatureResolver, } impl std::fmt::Debug for TenantShard { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { @@ -494,8 +497,8 @@ impl WalRedoManager { key: pageserver_api::key::Key, lsn: Lsn, base_img: Option<(Lsn, bytes::Bytes)>, - records: Vec<(Lsn, pageserver_api::record::NeonWalRecord)>, - pg_version: u32, + records: Vec<(Lsn, wal_decoder::models::record::NeonWalRecord)>, + pg_version: PgMajorVersion, redo_attempt_type: RedoAttemptType, ) -> Result { match self { @@ -544,6 +547,28 @@ pub struct OffloadedTimeline { /// Part of the `OffloadedTimeline` object's lifecycle: this needs to be set before we drop it pub deleted_from_ancestor: AtomicBool, + + _metrics_guard: OffloadedTimelineMetricsGuard, +} + +/// Increases the offloaded timeline count metric when created, and decreases when dropped. +struct OffloadedTimelineMetricsGuard; + +impl OffloadedTimelineMetricsGuard { + fn new() -> Self { + TIMELINE_STATE_METRIC + .with_label_values(&["offloaded"]) + .inc(); + Self + } +} + +impl Drop for OffloadedTimelineMetricsGuard { + fn drop(&mut self) { + TIMELINE_STATE_METRIC + .with_label_values(&["offloaded"]) + .dec(); + } } impl OffloadedTimeline { @@ -576,6 +601,8 @@ impl OffloadedTimeline { delete_progress: timeline.delete_progress.clone(), deleted_from_ancestor: AtomicBool::new(false), + + _metrics_guard: OffloadedTimelineMetricsGuard::new(), }) } fn from_manifest(tenant_shard_id: TenantShardId, manifest: &OffloadedTimelineManifest) -> Self { @@ -595,6 +622,7 @@ impl OffloadedTimeline { archived_at, delete_progress: TimelineDeleteProgress::default(), deleted_from_ancestor: AtomicBool::new(false), + _metrics_guard: OffloadedTimelineMetricsGuard::new(), } } fn manifest(&self) -> OffloadedTimelineManifest { @@ -906,7 +934,7 @@ pub(crate) enum CreateTimelineParams { pub(crate) struct CreateTimelineParamsBootstrap { pub(crate) new_timeline_id: TimelineId, pub(crate) existing_initdb_timeline_id: Option, - pub(crate) pg_version: u32, + pub(crate) pg_version: PgMajorVersion, } /// NB: See comment on [`CreateTimelineIdempotency::Branch`] for why there's no `pg_version` here. @@ -944,7 +972,7 @@ pub(crate) enum CreateTimelineIdempotency { /// NB: special treatment, see comment in [`Self`]. FailWithConflict, Bootstrap { - pg_version: u32, + pg_version: PgMajorVersion, }, /// NB: branches always have the same `pg_version` as their ancestor. /// While [`pageserver_api::models::TimelineCreateRequestMode::Branch::pg_version`] @@ -1289,7 +1317,7 @@ impl TenantShard { ancestor.is_some() || timeline .layers - .read() + .read(LayerManagerLockHolder::LoadLayerMap) .await .layer_map() .expect( @@ -1335,7 +1363,7 @@ impl TenantShard { remote_storage, deletion_queue_client, l0_flush_global_state, - basebackup_prepare_sender, + basebackup_cache, feature_resolver, } = resources; @@ -1352,7 +1380,7 @@ impl TenantShard { remote_storage.clone(), deletion_queue_client, l0_flush_global_state, - basebackup_prepare_sender, + basebackup_cache, feature_resolver, )); @@ -1832,6 +1860,29 @@ impl TenantShard { } } + // At this point we've initialized all timelines and are tracking them. + // Now compute the layer visibility for all (not offloaded) timelines. + let compute_visiblity_for = { + let timelines_accessor = self.timelines.lock().unwrap(); + let mut timelines_offloaded_accessor = self.timelines_offloaded.lock().unwrap(); + + timelines_offloaded_accessor.extend(offloaded_timelines_list.into_iter()); + + // Before activation, populate each Timeline's GcInfo with information about its children + self.initialize_gc_info(&timelines_accessor, &timelines_offloaded_accessor, None); + + timelines_accessor.values().cloned().collect::>() + }; + + for tl in compute_visiblity_for { + tl.update_layer_visibility().await.with_context(|| { + format!( + "failed initial timeline visibility computation {} for tenant {}", + tl.timeline_id, self.tenant_shard_id + ) + })?; + } + // Walk through deleted timelines, resume deletion for (timeline_id, index_part, remote_timeline_client) in timelines_to_resume_deletions { remote_timeline_client @@ -1851,10 +1902,6 @@ impl TenantShard { .context("resume_deletion") .map_err(LoadLocalTimelineError::ResumeDeletion)?; } - { - let mut offloaded_timelines_accessor = self.timelines_offloaded.lock().unwrap(); - offloaded_timelines_accessor.extend(offloaded_timelines_list.into_iter()); - } // Stash the preloaded tenant manifest, and upload a new manifest if changed. // @@ -2495,7 +2542,7 @@ impl TenantShard { self: &Arc, new_timeline_id: TimelineId, initdb_lsn: Lsn, - pg_version: u32, + pg_version: PgMajorVersion, ctx: &RequestContext, ) -> anyhow::Result<(UninitializedTimeline, RequestContext)> { anyhow::ensure!( @@ -2547,7 +2594,7 @@ impl TenantShard { self: &Arc, new_timeline_id: TimelineId, initdb_lsn: Lsn, - pg_version: u32, + pg_version: PgMajorVersion, ctx: &RequestContext, ) -> anyhow::Result> { let (uninit_tl, ctx) = self @@ -2586,7 +2633,7 @@ impl TenantShard { self: &Arc, new_timeline_id: TimelineId, initdb_lsn: Lsn, - pg_version: u32, + pg_version: PgMajorVersion, ctx: &RequestContext, in_memory_layer_desc: Vec, delta_layer_desc: Vec, @@ -2617,7 +2664,7 @@ impl TenantShard { } let layer_names = tline .layers - .read() + .read(LayerManagerLockHolder::Testing) .await .layer_map() .unwrap() @@ -2852,7 +2899,7 @@ impl TenantShard { Lsn(0), initdb_lsn, initdb_lsn, - 15, + PgMajorVersion::PG15, ); this.prepare_new_timeline( new_timeline_id, @@ -3132,7 +3179,12 @@ impl TenantShard { for timeline in &compact { // Collect L0 counts. Can't await while holding lock above. - if let Ok(lm) = timeline.layers.read().await.layer_map() { + if let Ok(lm) = timeline + .layers + .read(LayerManagerLockHolder::Compaction) + .await + .layer_map() + { l0_counts.insert(timeline.timeline_id, lm.level0_deltas().len()); } } @@ -3211,7 +3263,7 @@ impl TenantShard { }; let gc_compaction_strategy = self .feature_resolver - .evaluate_multivariate("gc-comapction-strategy", self.tenant_shard_id.tenant_id) + .evaluate_multivariate("gc-comapction-strategy") .ok(); let span = if let Some(gc_compaction_strategy) = gc_compaction_strategy { info_span!("gc_compact_timeline", timeline_id = %timeline.timeline_id, strategy = %gc_compaction_strategy) @@ -3233,6 +3285,7 @@ impl TenantShard { .or_else(|err| match err { // Ignore this, we likely raced with unarchival. OffloadError::NotArchived => Ok(()), + OffloadError::AlreadyInProgress => Ok(()), err => Err(err), })?; } @@ -3356,6 +3409,9 @@ impl TenantShard { if let Some(ref walredo_mgr) = self.walredo_mgr { walredo_mgr.maybe_quiesce(WALREDO_IDLE_TIMEOUT); } + + // Update the feature resolver with the latest tenant-spcific data. + self.feature_resolver.update_cached_tenant_properties(self); } pub fn timeline_has_no_attached_children(&self, timeline_id: TimelineId) -> bool { @@ -3398,7 +3454,7 @@ impl TenantShard { use pageserver_api::models::ActivatingFrom; match &*current_state { TenantState::Activating(_) | TenantState::Active | TenantState::Broken { .. } | TenantState::Stopping { .. } => { - panic!("caller is responsible for calling activate() only on Loading / Attaching tenants, got {state:?}", state = current_state); + panic!("caller is responsible for calling activate() only on Loading / Attaching tenants, got {current_state:?}"); } TenantState::Attaching => { *current_state = TenantState::Activating(ActivatingFrom::Attaching); @@ -3417,9 +3473,6 @@ impl TenantShard { .values() .filter(|timeline| !(timeline.is_broken() || timeline.is_stopping())); - // Before activation, populate each Timeline's GcInfo with information about its children - self.initialize_gc_info(&timelines_accessor, &timelines_offloaded_accessor, None); - // Spawn gc and compaction loops. The loops will shut themselves // down when they notice that the tenant is inactive. tasks::start_background_loops(self, background_jobs_can_start); @@ -3823,6 +3876,10 @@ impl TenantShard { &self.tenant_shard_id } + pub(crate) fn get_shard_identity(&self) -> ShardIdentity { + self.shard_identity + } + pub(crate) fn get_shard_stripe_size(&self) -> ShardStripeSize { self.shard_identity.stripe_size } @@ -4331,7 +4388,7 @@ impl TenantShard { remote_storage: GenericRemoteStorage, deletion_queue_client: DeletionQueueClient, l0_flush_global_state: L0FlushGlobalState, - basebackup_prepare_sender: BasebackupPrepareSender, + basebackup_cache: Arc, feature_resolver: FeatureResolver, ) -> TenantShard { assert!(!attached_conf.location.generation.is_none()); @@ -4436,8 +4493,11 @@ impl TenantShard { ongoing_timeline_detach: std::sync::Mutex::default(), gc_block: Default::default(), l0_flush_global_state, - basebackup_prepare_sender, - feature_resolver, + basebackup_cache, + feature_resolver: TenantFeatureResolver::new( + feature_resolver, + tenant_shard_id.tenant_id, + ), } } @@ -4476,6 +4536,10 @@ impl TenantShard { Ok(toml_edit::de::from_str::(&config)?) } + /// Stores a tenant location config to disk. + /// + /// NB: make sure to call `ShardIdentity::assert_equal` before persisting a new config, to avoid + /// changes to shard parameters that may result in data corruption. #[tracing::instrument(skip_all, fields(tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug()))] pub(super) async fn persist_tenant_config( conf: &'static PageServerConf, @@ -4874,7 +4938,7 @@ impl TenantShard { } let layer_names = tline .layers - .read() + .read(LayerManagerLockHolder::Testing) .await .layer_map() .unwrap() @@ -5042,7 +5106,7 @@ impl TenantShard { pub(crate) async fn bootstrap_timeline_test( self: &Arc, timeline_id: TimelineId, - pg_version: u32, + pg_version: PgMajorVersion, load_existing_initdb: Option, ctx: &RequestContext, ) -> anyhow::Result> { @@ -5184,7 +5248,7 @@ impl TenantShard { async fn bootstrap_timeline( self: &Arc, timeline_id: TimelineId, - pg_version: u32, + pg_version: PgMajorVersion, load_existing_initdb: Option, ctx: &RequestContext, ) -> Result { @@ -5365,7 +5429,7 @@ impl TenantShard { pagestream_throttle_metrics: self.pagestream_throttle_metrics.clone(), l0_compaction_trigger: self.l0_compaction_trigger.clone(), l0_flush_global_state: self.l0_flush_global_state.clone(), - basebackup_prepare_sender: self.basebackup_prepare_sender.clone(), + basebackup_cache: self.basebackup_cache.clone(), feature_resolver: self.feature_resolver.clone(), } } @@ -5722,7 +5786,7 @@ impl TenantShard { async fn run_initdb( conf: &'static PageServerConf, initdb_target_dir: &Utf8Path, - pg_version: u32, + pg_version: PgMajorVersion, cancel: &CancellationToken, ) -> Result<(), InitdbError> { let initdb_bin_path = conf @@ -5804,10 +5868,10 @@ pub(crate) mod harness { use once_cell::sync::OnceCell; use pageserver_api::key::Key; use pageserver_api::models::ShardParameters; - use pageserver_api::record::NeonWalRecord; use pageserver_api::shard::ShardIndex; use utils::id::TenantId; use utils::logging; + use wal_decoder::models::record::NeonWalRecord; use super::*; use crate::deletion_queue::mock::MockDeletionQueue; @@ -5951,7 +6015,7 @@ pub(crate) mod harness { ) -> anyhow::Result> { let walredo_mgr = Arc::new(WalRedoManager::from(TestRedoManager)); - let (basebackup_requst_sender, _) = tokio::sync::mpsc::unbounded_channel(); + let (basebackup_cache, _) = BasebackupCache::new(Utf8PathBuf::new(), None); let tenant = Arc::new(TenantShard::new( TenantState::Attaching, @@ -5959,7 +6023,7 @@ pub(crate) mod harness { AttachedTenantConf::try_from(LocationConf::attached_single( self.tenant_conf.clone(), self.generation, - &ShardParameters::default(), + ShardParameters::default(), )) .unwrap(), self.shard_identity, @@ -5969,7 +6033,7 @@ pub(crate) mod harness { self.deletion_queue.new_client(), // TODO: ideally we should run all unit tests with both configs L0FlushGlobalState::new(L0FlushConfig::default()), - basebackup_requst_sender, + basebackup_cache, FeatureResolver::new_disabled(), )); @@ -6003,7 +6067,7 @@ pub(crate) mod harness { lsn: Lsn, base_img: Option<(Lsn, Bytes)>, records: Vec<(Lsn, NeonWalRecord)>, - _pg_version: u32, + _pg_version: PgMajorVersion, _redo_attempt_type: RedoAttemptType, ) -> Result { let records_neon = records.iter().all(|r| apply_neon::can_apply_in_neon(&r.1)); @@ -6062,9 +6126,6 @@ mod tests { #[cfg(feature = "testing")] use pageserver_api::keyspace::KeySpaceRandomAccum; use pageserver_api::models::{CompactionAlgorithm, CompactionAlgorithmSettings}; - #[cfg(feature = "testing")] - use pageserver_api::record::NeonWalRecord; - use pageserver_api::value::Value; use pageserver_compaction::helpers::overlaps_with; #[cfg(feature = "testing")] use rand::SeedableRng; @@ -6085,6 +6146,9 @@ mod tests { use timeline::{CompactOptions, DeltaLayerTestDesc, VersionedKeySpaceQuery}; use utils::id::TenantId; use utils::shard::{ShardCount, ShardNumber}; + #[cfg(feature = "testing")] + use wal_decoder::models::record::NeonWalRecord; + use wal_decoder::models::value::Value; use super::*; use crate::DEFAULT_PG_VERSION; @@ -6175,7 +6239,7 @@ mod tests { async fn randomize_timeline( tenant: &Arc, new_timeline_id: TimelineId, - pg_version: u32, + pg_version: PgMajorVersion, spec: TestTimelineSpecification, random: &mut rand::rngs::StdRng, ctx: &RequestContext, @@ -6568,7 +6632,7 @@ mod tests { .put( *TEST_KEY, lsn, - &Value::Image(test_img(&format!("foo at {}", lsn))), + &Value::Image(test_img(&format!("foo at {lsn}"))), ctx, ) .await?; @@ -6578,7 +6642,7 @@ mod tests { .put( *TEST_KEY, lsn, - &Value::Image(test_img(&format!("foo at {}", lsn))), + &Value::Image(test_img(&format!("foo at {lsn}"))), ctx, ) .await?; @@ -6592,7 +6656,7 @@ mod tests { .put( *TEST_KEY, lsn, - &Value::Image(test_img(&format!("foo at {}", lsn))), + &Value::Image(test_img(&format!("foo at {lsn}"))), ctx, ) .await?; @@ -6602,7 +6666,7 @@ mod tests { .put( *TEST_KEY, lsn, - &Value::Image(test_img(&format!("foo at {}", lsn))), + &Value::Image(test_img(&format!("foo at {lsn}"))), ctx, ) .await?; @@ -6944,7 +7008,7 @@ mod tests { .await?; make_some_layers(tline.as_ref(), Lsn(0x20), &ctx).await?; - let layer_map = tline.layers.read().await; + let layer_map = tline.layers.read(LayerManagerLockHolder::Testing).await; let level0_deltas = layer_map .layer_map()? .level0_deltas() @@ -7101,7 +7165,7 @@ mod tests { .put( test_key, lsn, - &Value::Image(test_img(&format!("{} at {}", blknum, lsn))), + &Value::Image(test_img(&format!("{blknum} at {lsn}"))), ctx, ) .await?; @@ -7180,7 +7244,7 @@ mod tests { let lsn = Lsn(0x10); let inserted = bulk_insert_compact_gc(&tenant, &tline, &ctx, lsn, 50, 10000).await?; - let guard = tline.layers.read().await; + let guard = tline.layers.read(LayerManagerLockHolder::Testing).await; let lm = guard.layer_map()?; lm.dump(true, &ctx).await?; @@ -7389,7 +7453,7 @@ mod tests { .put( gap_at_key, current_lsn, - &Value::Image(test_img(&format!("{} at {}", gap_at_key, current_lsn))), + &Value::Image(test_img(&format!("{gap_at_key} at {current_lsn}"))), &ctx, ) .await?; @@ -7428,7 +7492,7 @@ mod tests { .put( current_key, current_lsn, - &Value::Image(test_img(&format!("{} at {}", current_key, current_lsn))), + &Value::Image(test_img(&format!("{current_key} at {current_lsn}"))), &ctx, ) .await?; @@ -7536,7 +7600,7 @@ mod tests { while key < end_key { current_lsn += 0x10; - let image_value = format!("{} at {}", child_gap_at_key, current_lsn); + let image_value = format!("{child_gap_at_key} at {current_lsn}"); let mut writer = parent_timeline.writer().await; writer @@ -7579,7 +7643,7 @@ mod tests { .put( key, current_lsn, - &Value::Image(test_img(&format!("{} at {}", key, current_lsn))), + &Value::Image(test_img(&format!("{key} at {current_lsn}"))), &ctx, ) .await?; @@ -7700,7 +7764,7 @@ mod tests { .put( test_key, lsn, - &Value::Image(test_img(&format!("{} at {}", blknum, lsn))), + &Value::Image(test_img(&format!("{blknum} at {lsn}"))), &ctx, ) .await?; @@ -7721,7 +7785,7 @@ mod tests { .put( test_key, lsn, - &Value::Image(test_img(&format!("{} at {}", blknum, lsn))), + &Value::Image(test_img(&format!("{blknum} at {lsn}"))), &ctx, ) .await?; @@ -7735,7 +7799,7 @@ mod tests { test_key.field6 = blknum as u32; assert_eq!( tline.get(test_key, lsn, &ctx).await?, - test_img(&format!("{} at {}", blknum, last_lsn)) + test_img(&format!("{blknum} at {last_lsn}")) ); } @@ -7781,7 +7845,7 @@ mod tests { .put( test_key, lsn, - &Value::Image(test_img(&format!("{} at {}", blknum, lsn))), + &Value::Image(test_img(&format!("{blknum} at {lsn}"))), &ctx, ) .await?; @@ -7810,11 +7874,11 @@ mod tests { .put( test_key, lsn, - &Value::Image(test_img(&format!("{} at {}", blknum, lsn))), + &Value::Image(test_img(&format!("{blknum} at {lsn}"))), &ctx, ) .await?; - println!("updating {} at {}", blknum, lsn); + println!("updating {blknum} at {lsn}"); writer.finish_write(lsn); drop(writer); updated[blknum] = lsn; @@ -7825,7 +7889,7 @@ mod tests { test_key.field6 = blknum as u32; assert_eq!( tline.get(test_key, lsn, &ctx).await?, - test_img(&format!("{} at {}", blknum, last_lsn)) + test_img(&format!("{blknum} at {last_lsn}")) ); } @@ -7878,11 +7942,11 @@ mod tests { .put( test_key, lsn, - &Value::Image(test_img(&format!("{} {} at {}", idx, blknum, lsn))), + &Value::Image(test_img(&format!("{idx} {blknum} at {lsn}"))), &ctx, ) .await?; - println!("updating [{}][{}] at {}", idx, blknum, lsn); + println!("updating [{idx}][{blknum}] at {lsn}"); writer.finish_write(lsn); drop(writer); updated[idx][blknum] = lsn; @@ -8088,7 +8152,7 @@ mod tests { .put( test_key, lsn, - &Value::Image(test_img(&format!("{} at {}", blknum, lsn))), + &Value::Image(test_img(&format!("{blknum} at {lsn}"))), &ctx, ) .await?; @@ -8105,7 +8169,7 @@ mod tests { test_key.field6 = (blknum * STEP) as u32; assert_eq!( tline.get(test_key, lsn, &ctx).await?, - test_img(&format!("{} at {}", blknum, last_lsn)) + test_img(&format!("{blknum} at {last_lsn}")) ); } @@ -8142,7 +8206,7 @@ mod tests { .put( test_key, lsn, - &Value::Image(test_img(&format!("{} at {}", blknum, lsn))), + &Value::Image(test_img(&format!("{blknum} at {lsn}"))), &ctx, ) .await?; @@ -8208,12 +8272,23 @@ mod tests { tline.freeze_and_flush().await?; // force create a delta layer } - let before_num_l0_delta_files = - tline.layers.read().await.layer_map()?.level0_deltas().len(); + let before_num_l0_delta_files = tline + .layers + .read(LayerManagerLockHolder::Testing) + .await + .layer_map()? + .level0_deltas() + .len(); tline.compact(&cancel, EnumSet::default(), &ctx).await?; - let after_num_l0_delta_files = tline.layers.read().await.layer_map()?.level0_deltas().len(); + let after_num_l0_delta_files = tline + .layers + .read(LayerManagerLockHolder::Testing) + .await + .layer_map()? + .level0_deltas() + .len(); assert!( after_num_l0_delta_files < before_num_l0_delta_files, @@ -8384,7 +8459,7 @@ mod tests { .put( test_key, lsn, - &Value::Image(test_img(&format!("{} at {}", blknum, lsn))), + &Value::Image(test_img(&format!("{blknum} at {lsn}"))), &ctx, ) .await?; @@ -8404,7 +8479,7 @@ mod tests { .put( test_key, lsn, - &Value::Image(test_img(&format!("{} at {}", blknum, lsn))), + &Value::Image(test_img(&format!("{blknum} at {lsn}"))), &ctx, ) .await?; @@ -9325,12 +9400,7 @@ mod tests { let end_lsn = Lsn(0x100); let image_layers = (0x20..=0x90) .step_by(0x10) - .map(|n| { - ( - Lsn(n), - vec![(key, test_img(&format!("data key at {:x}", n)))], - ) - }) + .map(|n| (Lsn(n), vec![(key, test_img(&format!("data key at {n:x}")))])) .collect(); let timeline = tenant @@ -11374,11 +11444,11 @@ mod tests { if left != right { eprintln!("---LEFT---"); for left in left.iter() { - eprintln!("{}", left); + eprintln!("{left}"); } eprintln!("---RIGHT---"); for right in right.iter() { - eprintln!("{}", right); + eprintln!("{right}"); } assert_eq!(left, right); } diff --git a/pageserver/src/tenant/checks.rs b/pageserver/src/tenant/checks.rs index d5b979ab2a..83d54f09de 100644 --- a/pageserver/src/tenant/checks.rs +++ b/pageserver/src/tenant/checks.rs @@ -63,8 +63,7 @@ pub fn check_valid_layermap(metadata: &[LayerName]) -> Option { && overlaps_with(&layer.key_range, &other_layer.key_range) { let err = format!( - "layer violates the layer map LSN split assumption: layer {} intersects with layer {}", - layer, other_layer + "layer violates the layer map LSN split assumption: layer {layer} intersects with layer {other_layer}" ); return Some(err); } diff --git a/pageserver/src/tenant/config.rs b/pageserver/src/tenant/config.rs index bf82fc8df8..67df767abd 100644 --- a/pageserver/src/tenant/config.rs +++ b/pageserver/src/tenant/config.rs @@ -12,6 +12,7 @@ use pageserver_api::models; use pageserver_api::shard::{ShardCount, ShardIdentity, ShardNumber, ShardStripeSize}; use serde::{Deserialize, Serialize}; +use utils::critical; use utils::generation::Generation; #[derive(Debug, Copy, Clone, Serialize, Deserialize, PartialEq, Eq)] @@ -61,8 +62,10 @@ pub(crate) struct LocationConf { /// The detailed shard identity. This structure is already scoped within /// a TenantShardId, but we need the full ShardIdentity to enable calculating /// key->shard mappings. - #[serde(default = "ShardIdentity::unsharded")] - #[serde(skip_serializing_if = "ShardIdentity::is_unsharded")] + /// + /// NB: we store this even for unsharded tenants, so that we agree with storcon on the intended + /// stripe size. Otherwise, a split request that does not specify a stripe size may use a + /// different default than storcon, which can lead to incorrect stripe sizes and corruption. pub(crate) shard: ShardIdentity, /// The pan-cluster tenant configuration, the same on all locations @@ -134,7 +137,7 @@ impl LocationConf { pub(crate) fn attached_single( tenant_conf: pageserver_api::models::TenantConfig, generation: Generation, - shard_params: &models::ShardParameters, + shard_params: models::ShardParameters, ) -> Self { Self { mode: LocationMode::Attached(AttachedLocationConfig { @@ -149,7 +152,12 @@ impl LocationConf { /// For use when attaching/re-attaching: update the generation stored in this /// structure. If we were in a secondary state, promote to attached (posession /// of a fresh generation implies this). - pub(crate) fn attach_in_generation(&mut self, mode: AttachmentMode, generation: Generation) { + pub(crate) fn attach_in_generation( + &mut self, + mode: AttachmentMode, + generation: Generation, + stripe_size: ShardStripeSize, + ) { match &mut self.mode { LocationMode::Attached(attach_conf) => { attach_conf.generation = generation; @@ -163,6 +171,18 @@ impl LocationConf { }) } } + + // This should never happen. + // TODO: turn this into a proper assertion. + if stripe_size != self.shard.stripe_size { + critical!( + "stripe size mismatch: {} != {}", + self.shard.stripe_size, + stripe_size, + ); + } + + self.shard.stripe_size = stripe_size; } pub(crate) fn try_from(conf: &'_ models::LocationConfig) -> anyhow::Result { diff --git a/pageserver/src/tenant/ephemeral_file.rs b/pageserver/src/tenant/ephemeral_file.rs index 2edf22e9fd..203b5bf592 100644 --- a/pageserver/src/tenant/ephemeral_file.rs +++ b/pageserver/src/tenant/ephemeral_file.rs @@ -3,7 +3,7 @@ use std::io; use std::sync::Arc; -use std::sync::atomic::AtomicU64; +use std::sync::atomic::{AtomicU64, Ordering}; use camino::Utf8PathBuf; use num_traits::Num; @@ -18,6 +18,7 @@ use crate::assert_u64_eq_usize::{U64IsUsize, UsizeIsU64}; use crate::config::PageServerConf; use crate::context::RequestContext; use crate::page_cache; +use crate::tenant::storage_layer::inmemory_layer::GlobalResourceUnits; use crate::tenant::storage_layer::inmemory_layer::vectored_dio_read::File; use crate::virtual_file::owned_buffers_io::io_buf_aligned::IoBufAlignedMut; use crate::virtual_file::owned_buffers_io::slice::SliceMutExt; @@ -30,9 +31,13 @@ pub struct EphemeralFile { _tenant_shard_id: TenantShardId, _timeline_id: TimelineId, page_cache_file_id: page_cache::FileId, - bytes_written: u64, file: TempVirtualFileCoOwnedByEphemeralFileAndBufferedWriter, - buffered_writer: BufferedWriter, + + buffered_writer: tokio::sync::RwLock, + + bytes_written: AtomicU64, + + resource_units: std::sync::Mutex, } type BufferedWriter = owned_buffers_io::write::BufferedWriter< @@ -94,9 +99,8 @@ impl EphemeralFile { _tenant_shard_id: tenant_shard_id, _timeline_id: timeline_id, page_cache_file_id, - bytes_written: 0, file: file.clone(), - buffered_writer: BufferedWriter::new( + buffered_writer: tokio::sync::RwLock::new(BufferedWriter::new( file, 0, || IoBufferMut::with_capacity(TAIL_SZ), @@ -104,7 +108,9 @@ impl EphemeralFile { cancel.child_token(), ctx, info_span!(parent: None, "ephemeral_file_buffered_writer", tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug(), timeline_id=%timeline_id, path = %filename), - ), + )), + bytes_written: AtomicU64::new(0), + resource_units: std::sync::Mutex::new(GlobalResourceUnits::new()), }) } } @@ -151,15 +157,17 @@ impl std::ops::Deref for TempVirtualFileCoOwnedByEphemeralFileAndBufferedWriter #[derive(Debug, thiserror::Error)] pub(crate) enum EphemeralFileWriteError { - #[error("{0}")] - TooLong(String), #[error("cancelled")] Cancelled, } impl EphemeralFile { pub(crate) fn len(&self) -> u64 { - self.bytes_written + // TODO(vlad): The value returned here is not always correct if + // we have more than one concurrent writer. Writes are always + // sequenced, but we could grab the buffered writer lock if we wanted + // to. + self.bytes_written.load(Ordering::Acquire) } pub(crate) fn page_cache_file_id(&self) -> page_cache::FileId { @@ -186,7 +194,7 @@ impl EphemeralFile { /// Panics if the write is short because there's no way we can recover from that. /// TODO: make upstack handle this as an error. pub(crate) async fn write_raw( - &mut self, + &self, srcbuf: &[u8], ctx: &RequestContext, ) -> Result { @@ -198,22 +206,13 @@ impl EphemeralFile { } async fn write_raw_controlled( - &mut self, + &self, srcbuf: &[u8], ctx: &RequestContext, ) -> Result<(u64, Option), EphemeralFileWriteError> { - let pos = self.bytes_written; + let mut writer = self.buffered_writer.write().await; - let new_bytes_written = pos.checked_add(srcbuf.len().into_u64()).ok_or_else(|| { - EphemeralFileWriteError::TooLong(format!( - "write would grow EphemeralFile beyond u64::MAX: len={pos} writen={srcbuf_len}", - srcbuf_len = srcbuf.len(), - )) - })?; - - // Write the payload - let (nwritten, control) = self - .buffered_writer + let (nwritten, control) = writer .write_buffered_borrowed_controlled(srcbuf, ctx) .await .map_err(|e| match e { @@ -225,43 +224,69 @@ impl EphemeralFile { "buffered writer has no short writes" ); - self.bytes_written = new_bytes_written; + // There's no realistic risk of overflow here. We won't have exabytes sized files on disk. + let pos = self + .bytes_written + .fetch_add(srcbuf.len().into_u64(), Ordering::AcqRel); + + let mut resource_units = self.resource_units.lock().unwrap(); + resource_units.maybe_publish_size(self.bytes_written.load(Ordering::Relaxed)); Ok((pos, control)) } + + pub(crate) fn tick(&self) -> Option { + let mut resource_units = self.resource_units.lock().unwrap(); + let len = self.bytes_written.load(Ordering::Relaxed); + resource_units.publish_size(len) + } } impl super::storage_layer::inmemory_layer::vectored_dio_read::File for EphemeralFile { async fn read_exact_at_eof_ok( &self, start: u64, - dst: tokio_epoll_uring::Slice, + mut dst: tokio_epoll_uring::Slice, ctx: &RequestContext, ) -> std::io::Result<(tokio_epoll_uring::Slice, usize)> { - let submitted_offset = self.buffered_writer.bytes_submitted(); + // We will fill the slice in back to front. Hence, we need + // the slice to be fully initialized. + // TODO(vlad): Is there a nicer way of doing this? + dst.as_mut_rust_slice_full_zeroed(); - let mutable = match self.buffered_writer.inspect_mutable() { - Some(mutable) => &mutable[0..mutable.pending()], - None => { - // Timeline::cancel and hence buffered writer flush was cancelled. - // Remain read-available while timeline is shutting down. - &[] - } - }; + let writer = self.buffered_writer.read().await; - let maybe_flushed = self.buffered_writer.inspect_maybe_flushed(); + // Read bytes written while under lock. This is a hack to deal with concurrent + // writes updating the number of bytes written. `bytes_written` is not DIO alligned + // but we may end the read there. + // + // TODO(vlad): Feels like there's a nicer path where we align the end if it + // shoots over the end of the file. + let bytes_written = self.bytes_written.load(Ordering::Acquire); let dst_cap = dst.bytes_total().into_u64(); let end = { // saturating_add is correct here because the max file size is u64::MAX, so, // if start + dst.len() > u64::MAX, then we know it will be a short read let mut end: u64 = start.saturating_add(dst_cap); - if end > self.bytes_written { - end = self.bytes_written; + if end > bytes_written { + end = bytes_written; } end }; + let submitted_offset = writer.bytes_submitted(); + let maybe_flushed = writer.inspect_maybe_flushed(); + + let mutable = match writer.inspect_mutable() { + Some(mutable) => &mutable[0..mutable.pending()], + None => { + // Timeline::cancel and hence buffered writer flush was cancelled. + // Remain read-available while timeline is shutting down. + &[] + } + }; + // inclusive, exclusive #[derive(Debug)] struct Range(N, N); @@ -306,13 +331,33 @@ impl super::storage_layer::inmemory_layer::vectored_dio_read::File for Ephemeral let mutable_range = Range(std::cmp::max(start, submitted_offset), end); - let dst = if written_range.len() > 0 { + // There are three sources from which we might have to read data: + // 1. The file itself + // 2. The buffer which contains changes currently being flushed + // 3. The buffer which contains chnages yet to be flushed + // + // For better concurrency, we do them in reverse order: perform the in-memory + // reads while holding the writer lock, drop the writer lock and read from the + // file if required. + + let dst = if mutable_range.len() > 0 { + let offset_in_buffer = mutable_range + .0 + .checked_sub(submitted_offset) + .unwrap() + .into_usize(); + let to_copy = + &mutable[offset_in_buffer..(offset_in_buffer + mutable_range.len().into_usize())]; let bounds = dst.bounds(); - let slice = self - .file - .read_exact_at(dst.slice(0..written_range.len().into_usize()), start, ctx) - .await?; - Slice::from_buf_bounds(Slice::into_inner(slice), bounds) + let mut view = dst.slice({ + let start = + written_range.len().into_usize() + maybe_flushed_range.len().into_usize(); + let end = start.checked_add(mutable_range.len().into_usize()).unwrap(); + start..end + }); + view.as_mut_rust_slice_full_zeroed() + .copy_from_slice(to_copy); + Slice::from_buf_bounds(Slice::into_inner(view), bounds) } else { dst }; @@ -342,24 +387,15 @@ impl super::storage_layer::inmemory_layer::vectored_dio_read::File for Ephemeral dst }; - let dst = if mutable_range.len() > 0 { - let offset_in_buffer = mutable_range - .0 - .checked_sub(submitted_offset) - .unwrap() - .into_usize(); - let to_copy = - &mutable[offset_in_buffer..(offset_in_buffer + mutable_range.len().into_usize())]; + drop(writer); + + let dst = if written_range.len() > 0 { let bounds = dst.bounds(); - let mut view = dst.slice({ - let start = - written_range.len().into_usize() + maybe_flushed_range.len().into_usize(); - let end = start.checked_add(mutable_range.len().into_usize()).unwrap(); - start..end - }); - view.as_mut_rust_slice_full_zeroed() - .copy_from_slice(to_copy); - Slice::from_buf_bounds(Slice::into_inner(view), bounds) + let slice = self + .file + .read_exact_at(dst.slice(0..written_range.len().into_usize()), start, ctx) + .await?; + Slice::from_buf_bounds(Slice::into_inner(slice), bounds) } else { dst }; @@ -460,13 +496,15 @@ mod tests { let gate = utils::sync::gate::Gate::default(); let cancel = CancellationToken::new(); - let mut file = EphemeralFile::create(conf, tenant_id, timeline_id, &gate, &cancel, &ctx) + let file = EphemeralFile::create(conf, tenant_id, timeline_id, &gate, &cancel, &ctx) .await .unwrap(); - let mutable = file.buffered_writer.mutable(); + let writer = file.buffered_writer.read().await; + let mutable = writer.mutable(); let cap = mutable.capacity(); let align = mutable.align(); + drop(writer); let write_nbytes = cap * 2 + cap / 2; @@ -504,10 +542,11 @@ mod tests { let file_contents = std::fs::read(file.file.path()).unwrap(); assert!(file_contents == content[0..cap * 2]); - let maybe_flushed_buffer_contents = file.buffered_writer.inspect_maybe_flushed().unwrap(); + let writer = file.buffered_writer.read().await; + let maybe_flushed_buffer_contents = writer.inspect_maybe_flushed().unwrap(); assert_eq!(&maybe_flushed_buffer_contents[..], &content[cap..cap * 2]); - let mutable_buffer_contents = file.buffered_writer.mutable(); + let mutable_buffer_contents = writer.mutable(); assert_eq!(mutable_buffer_contents, &content[cap * 2..write_nbytes]); } @@ -517,12 +556,14 @@ mod tests { let gate = utils::sync::gate::Gate::default(); let cancel = CancellationToken::new(); - let mut file = EphemeralFile::create(conf, tenant_id, timeline_id, &gate, &cancel, &ctx) + let file = EphemeralFile::create(conf, tenant_id, timeline_id, &gate, &cancel, &ctx) .await .unwrap(); // mutable buffer and maybe_flushed buffer each has `cap` bytes. - let cap = file.buffered_writer.mutable().capacity(); + let writer = file.buffered_writer.read().await; + let cap = writer.mutable().capacity(); + drop(writer); let content: Vec = rand::thread_rng() .sample_iter(rand::distributions::Standard) @@ -540,12 +581,13 @@ mod tests { 2 * cap.into_u64(), "buffered writer requires one write to be flushed if we write 2.5x buffer capacity" ); + let writer = file.buffered_writer.read().await; assert_eq!( - &file.buffered_writer.inspect_maybe_flushed().unwrap()[0..cap], + &writer.inspect_maybe_flushed().unwrap()[0..cap], &content[cap..cap * 2] ); assert_eq!( - &file.buffered_writer.mutable()[0..cap / 2], + &writer.mutable()[0..cap / 2], &content[cap * 2..cap * 2 + cap / 2] ); } @@ -563,13 +605,15 @@ mod tests { let gate = utils::sync::gate::Gate::default(); let cancel = CancellationToken::new(); - let mut file = EphemeralFile::create(conf, tenant_id, timeline_id, &gate, &cancel, &ctx) + let file = EphemeralFile::create(conf, tenant_id, timeline_id, &gate, &cancel, &ctx) .await .unwrap(); - let mutable = file.buffered_writer.mutable(); + let writer = file.buffered_writer.read().await; + let mutable = writer.mutable(); let cap = mutable.capacity(); let align = mutable.align(); + drop(writer); let content: Vec = rand::thread_rng() .sample_iter(rand::distributions::Standard) .take(cap * 2 + cap / 2) diff --git a/pageserver/src/tenant/metadata.rs b/pageserver/src/tenant/metadata.rs index bea3128265..2f407de951 100644 --- a/pageserver/src/tenant/metadata.rs +++ b/pageserver/src/tenant/metadata.rs @@ -18,6 +18,7 @@ //! [`IndexPart`]: super::remote_timeline_client::index::IndexPart use anyhow::ensure; +use postgres_ffi::PgMajorVersion; use serde::{Deserialize, Serialize}; use utils::bin_ser::{BeSer, SerializeError}; use utils::id::TimelineId; @@ -136,7 +137,7 @@ struct TimelineMetadataBodyV2 { latest_gc_cutoff_lsn: Lsn, initdb_lsn: Lsn, - pg_version: u32, + pg_version: PgMajorVersion, } #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] @@ -167,7 +168,7 @@ impl TimelineMetadata { ancestor_lsn: Lsn, latest_gc_cutoff_lsn: Lsn, initdb_lsn: Lsn, - pg_version: u32, + pg_version: PgMajorVersion, ) -> Self { Self { hdr: TimelineMetadataHeader { @@ -215,7 +216,7 @@ impl TimelineMetadata { ancestor_lsn: body.ancestor_lsn, latest_gc_cutoff_lsn: body.latest_gc_cutoff_lsn, initdb_lsn: body.initdb_lsn, - pg_version: 14, // All timelines created before this version had pg_version 14 + pg_version: PgMajorVersion::PG14, // All timelines created before this version had pg_version 14 }; hdr.format_version = METADATA_FORMAT_VERSION; @@ -317,7 +318,7 @@ impl TimelineMetadata { self.body.initdb_lsn } - pub fn pg_version(&self) -> u32 { + pub fn pg_version(&self) -> PgMajorVersion { self.body.pg_version } @@ -331,7 +332,7 @@ impl TimelineMetadata { Lsn::from_hex("00000000").unwrap(), Lsn::from_hex("00000000").unwrap(), Lsn::from_hex("00000000").unwrap(), - 0, + PgMajorVersion::PG14, ); let bytes = instance.to_bytes().unwrap(); Self::from_bytes(&bytes).unwrap() @@ -545,13 +546,12 @@ mod tests { Lsn(0), Lsn(0), Lsn(0), - 14, // All timelines created before this version had pg_version 14 + PgMajorVersion::PG14, // All timelines created before this version had pg_version 14 ); assert_eq!( deserialized_metadata.body, expected_metadata.body, - "Metadata of the old version {} should be upgraded to the latest version {}", - METADATA_OLD_FORMAT_VERSION, METADATA_FORMAT_VERSION + "Metadata of the old version {METADATA_OLD_FORMAT_VERSION} should be upgraded to the latest version {METADATA_FORMAT_VERSION}" ); } @@ -566,7 +566,7 @@ mod tests { Lsn(0), // Updating this version to 17 will cause the test to fail at the // next assert_eq!(). - 16, + PgMajorVersion::PG16, ); let expected_bytes = vec![ /* TimelineMetadataHeader */ diff --git a/pageserver/src/tenant/mgr.rs b/pageserver/src/tenant/mgr.rs index 86aef9b42c..95f5c60170 100644 --- a/pageserver/src/tenant/mgr.rs +++ b/pageserver/src/tenant/mgr.rs @@ -12,7 +12,6 @@ use anyhow::Context; use camino::{Utf8DirEntry, Utf8Path, Utf8PathBuf}; use futures::StreamExt; use itertools::Itertools; -use once_cell::sync::Lazy; use pageserver_api::key::Key; use pageserver_api::models::{DetachBehavior, LocationConfigMode}; use pageserver_api::shard::{ @@ -52,6 +51,7 @@ use crate::tenant::config::{ use crate::tenant::span::debug_assert_current_span_has_tenant_id; use crate::tenant::storage_layer::inmemory_layer; use crate::tenant::timeline::ShutdownMode; +use crate::tenant::timeline::layer_manager::LayerManagerLockHolder; use crate::tenant::{ AttachedTenantConf, GcError, LoadConfigError, SpawnMode, TenantShard, TenantState, }; @@ -103,7 +103,7 @@ pub(crate) enum TenantsMap { /// [`init_tenant_mgr`] is not done yet. Initializing, /// [`init_tenant_mgr`] is done, all on-disk tenants have been loaded. - /// New tenants can be added using [`tenant_map_acquire_slot`]. + /// New tenants can be added using [`TenantManager::tenant_map_acquire_slot`]. Open(BTreeMap), /// The pageserver has entered shutdown mode via [`TenantManager::shutdown`]. /// Existing tenants are still accessible, but no new tenants can be created. @@ -129,7 +129,7 @@ pub(crate) enum ShardSelector { /// /// This represents the subset of a LocationConfig that we receive during re-attach. pub(crate) enum TenantStartupMode { - Attached((AttachmentMode, Generation)), + Attached((AttachmentMode, Generation, ShardStripeSize)), Secondary, } @@ -143,15 +143,21 @@ impl TenantStartupMode { match (rart.mode, rart.r#gen) { (LocationConfigMode::Detached, _) => None, (LocationConfigMode::Secondary, _) => Some(Self::Secondary), - (LocationConfigMode::AttachedMulti, Some(g)) => { - Some(Self::Attached((AttachmentMode::Multi, Generation::new(g)))) - } - (LocationConfigMode::AttachedSingle, Some(g)) => { - Some(Self::Attached((AttachmentMode::Single, Generation::new(g)))) - } - (LocationConfigMode::AttachedStale, Some(g)) => { - Some(Self::Attached((AttachmentMode::Stale, Generation::new(g)))) - } + (LocationConfigMode::AttachedMulti, Some(g)) => Some(Self::Attached(( + AttachmentMode::Multi, + Generation::new(g), + rart.stripe_size, + ))), + (LocationConfigMode::AttachedSingle, Some(g)) => Some(Self::Attached(( + AttachmentMode::Single, + Generation::new(g), + rart.stripe_size, + ))), + (LocationConfigMode::AttachedStale, Some(g)) => Some(Self::Attached(( + AttachmentMode::Stale, + Generation::new(g), + rart.stripe_size, + ))), _ => { tracing::warn!( "Received invalid re-attach state for tenant {}: {rart:?}", @@ -284,9 +290,6 @@ impl BackgroundPurges { } } -static TENANTS: Lazy> = - Lazy::new(|| std::sync::RwLock::new(TenantsMap::Initializing)); - /// Responsible for storing and mutating the collection of all tenants /// that this pageserver has state for. /// @@ -297,10 +300,7 @@ static TENANTS: Lazy> = /// and attached modes concurrently. pub struct TenantManager { conf: &'static PageServerConf, - // TODO: currently this is a &'static pointing to TENANTs. When we finish refactoring - // out of that static variable, the TenantManager can own this. - // See https://github.com/neondatabase/neon/issues/5796 - tenants: &'static std::sync::RwLock, + tenants: std::sync::RwLock, resources: TenantSharedResources, // Long-running operations that happen outside of a [`Tenant`] lifetime should respect this token. @@ -325,9 +325,11 @@ fn emergency_generations( Some(( *tid, match &lc.mode { - LocationMode::Attached(alc) => { - TenantStartupMode::Attached((alc.attach_mode, alc.generation)) - } + LocationMode::Attached(alc) => TenantStartupMode::Attached(( + alc.attach_mode, + alc.generation, + ShardStripeSize::default(), + )), LocationMode::Secondary(_) => TenantStartupMode::Secondary, }, )) @@ -371,7 +373,7 @@ async fn init_load_generations( .iter() .flat_map(|(id, start_mode)| { match start_mode { - TenantStartupMode::Attached((_mode, generation)) => Some(generation), + TenantStartupMode::Attached((_mode, generation, _stripe_size)) => Some(generation), TenantStartupMode::Secondary => None, } .map(|gen_| (*id, *gen_)) @@ -479,21 +481,43 @@ pub(crate) enum DeleteTenantError { Other(#[from] anyhow::Error), } -/// Initialize repositories with locally available timelines. +/// Initialize repositories at `Initializing` state. +pub fn init( + conf: &'static PageServerConf, + background_purges: BackgroundPurges, + resources: TenantSharedResources, + cancel: CancellationToken, +) -> TenantManager { + TenantManager { + conf, + tenants: std::sync::RwLock::new(TenantsMap::Initializing), + resources, + cancel, + background_purges, + } +} + +/// Transition repositories from `Initializing` state to `Open` state with locally available timelines. /// Timelines that are only partially available locally (remote storage has more data than this pageserver) /// are scheduled for download and added to the tenant once download is completed. #[instrument(skip_all)] pub async fn init_tenant_mgr( - conf: &'static PageServerConf, - background_purges: BackgroundPurges, - resources: TenantSharedResources, + tenant_manager: Arc, init_order: InitializationOrder, - cancel: CancellationToken, -) -> anyhow::Result { +) -> anyhow::Result<()> { + debug_assert!(matches!( + *tenant_manager.tenants.read().unwrap(), + TenantsMap::Initializing + )); let mut tenants = BTreeMap::new(); let ctx = RequestContext::todo_child(TaskKind::Startup, DownloadBehavior::Warn); + let conf = tenant_manager.conf; + let resources = &tenant_manager.resources; + let cancel = &tenant_manager.cancel; + let background_purges = &tenant_manager.background_purges; + // Initialize dynamic limits that depend on system resources let system_memory = sysinfo::System::new_with_specifics(sysinfo::RefreshKind::new().with_memory()) @@ -512,7 +536,7 @@ pub async fn init_tenant_mgr( let tenant_configs = init_load_tenant_configs(conf).await; // Determine which tenants are to be secondary or attached, and in which generation - let tenant_modes = init_load_generations(conf, &tenant_configs, &resources, &cancel).await?; + let tenant_modes = init_load_generations(conf, &tenant_configs, resources, cancel).await?; tracing::info!( "Attaching {} tenants at startup, warming up {} at a time", @@ -569,7 +593,7 @@ pub async fn init_tenant_mgr( location_conf.mode = LocationMode::Secondary(DEFAULT_SECONDARY_CONF); } } - Some(TenantStartupMode::Attached((attach_mode, generation))) => { + Some(TenantStartupMode::Attached((attach_mode, generation, stripe_size))) => { let old_gen_higher = match &location_conf.mode { LocationMode::Attached(AttachedLocationConfig { generation: old_generation, @@ -593,7 +617,7 @@ pub async fn init_tenant_mgr( // local disk content: demote to secondary rather than detaching. location_conf.mode = LocationMode::Secondary(DEFAULT_SECONDARY_CONF); } else { - location_conf.attach_in_generation(*attach_mode, *generation); + location_conf.attach_in_generation(*attach_mode, *generation, *stripe_size); } } } @@ -669,18 +693,10 @@ pub async fn init_tenant_mgr( info!("Processed {} local tenants at startup", tenants.len()); - let mut tenants_map = TENANTS.write().unwrap(); - assert!(matches!(&*tenants_map, &TenantsMap::Initializing)); + let mut tenant_map = tenant_manager.tenants.write().unwrap(); + *tenant_map = TenantsMap::Open(tenants); - *tenants_map = TenantsMap::Open(tenants); - - Ok(TenantManager { - conf, - tenants: &TENANTS, - resources, - cancel: CancellationToken::new(), - background_purges, - }) + Ok(()) } /// Wrapper for Tenant::spawn that checks invariants before running @@ -719,142 +735,6 @@ fn tenant_spawn( ) } -async fn shutdown_all_tenants0(tenants: &std::sync::RwLock) { - let mut join_set = JoinSet::new(); - - #[cfg(all(debug_assertions, not(test)))] - { - // Check that our metrics properly tracked the size of the tenants map. This is a convenient location to check, - // as it happens implicitly at the end of tests etc. - let m = tenants.read().unwrap(); - debug_assert_eq!(METRICS.slots_total(), m.len() as u64); - } - - // Atomically, 1. create the shutdown tasks and 2. prevent creation of new tenants. - let (total_in_progress, total_attached) = { - let mut m = tenants.write().unwrap(); - match &mut *m { - TenantsMap::Initializing => { - *m = TenantsMap::ShuttingDown(BTreeMap::default()); - info!("tenants map is empty"); - return; - } - TenantsMap::Open(tenants) => { - let mut shutdown_state = BTreeMap::new(); - let mut total_in_progress = 0; - let mut total_attached = 0; - - for (tenant_shard_id, v) in std::mem::take(tenants).into_iter() { - match v { - TenantSlot::Attached(t) => { - shutdown_state.insert(tenant_shard_id, TenantSlot::Attached(t.clone())); - join_set.spawn( - async move { - let res = { - let (_guard, shutdown_progress) = completion::channel(); - t.shutdown(shutdown_progress, ShutdownMode::FreezeAndFlush).await - }; - - if let Err(other_progress) = res { - // join the another shutdown in progress - other_progress.wait().await; - } - - // we cannot afford per tenant logging here, because if s3 is degraded, we are - // going to log too many lines - debug!("tenant successfully stopped"); - } - .instrument(info_span!("shutdown", tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug())), - ); - - total_attached += 1; - } - TenantSlot::Secondary(state) => { - // We don't need to wait for this individually per-tenant: the - // downloader task will be waited on eventually, this cancel - // is just to encourage it to drop out if it is doing work - // for this tenant right now. - state.cancel.cancel(); - - shutdown_state.insert(tenant_shard_id, TenantSlot::Secondary(state)); - } - TenantSlot::InProgress(notify) => { - // InProgress tenants are not visible in TenantsMap::ShuttingDown: we will - // wait for their notifications to fire in this function. - join_set.spawn(async move { - notify.wait().await; - }); - - total_in_progress += 1; - } - } - } - *m = TenantsMap::ShuttingDown(shutdown_state); - (total_in_progress, total_attached) - } - TenantsMap::ShuttingDown(_) => { - error!( - "already shutting down, this function isn't supposed to be called more than once" - ); - return; - } - } - }; - - let started_at = std::time::Instant::now(); - - info!( - "Waiting for {} InProgress tenants and {} Attached tenants to shut down", - total_in_progress, total_attached - ); - - let total = join_set.len(); - let mut panicked = 0; - let mut buffering = true; - const BUFFER_FOR: std::time::Duration = std::time::Duration::from_millis(500); - let mut buffered = std::pin::pin!(tokio::time::sleep(BUFFER_FOR)); - - while !join_set.is_empty() { - tokio::select! { - Some(joined) = join_set.join_next() => { - match joined { - Ok(()) => {}, - Err(join_error) if join_error.is_cancelled() => { - unreachable!("we are not cancelling any of the tasks"); - } - Err(join_error) if join_error.is_panic() => { - // cannot really do anything, as this panic is likely a bug - panicked += 1; - } - Err(join_error) => { - warn!("unknown kind of JoinError: {join_error}"); - } - } - if !buffering { - // buffer so that every 500ms since the first update (or starting) we'll log - // how far away we are; this is because we will get SIGKILL'd at 10s, and we - // are not able to log *then*. - buffering = true; - buffered.as_mut().reset(tokio::time::Instant::now() + BUFFER_FOR); - } - }, - _ = &mut buffered, if buffering => { - buffering = false; - info!(remaining = join_set.len(), total, elapsed_ms = started_at.elapsed().as_millis(), "waiting for tenants to shutdown"); - } - } - } - - if panicked > 0 { - warn!( - panicked, - total, "observed panicks while shutting down tenants" - ); - } - - // caller will log how long we took -} - #[derive(thiserror::Error, Debug)] pub(crate) enum UpsertLocationError { #[error("Bad config request: {0}")] @@ -1000,6 +880,9 @@ impl TenantManager { // phase of writing config and/or waiting for flush, before returning. match fast_path_taken { Some(FastPathModified::Attached(tenant)) => { + tenant + .shard_identity + .assert_equal(new_location_config.shard); TenantShard::persist_tenant_config( self.conf, &tenant_shard_id, @@ -1034,7 +917,10 @@ impl TenantManager { return Ok(Some(tenant)); } - Some(FastPathModified::Secondary(_secondary_tenant)) => { + Some(FastPathModified::Secondary(secondary_tenant)) => { + secondary_tenant + .shard_identity + .assert_equal(new_location_config.shard); TenantShard::persist_tenant_config( self.conf, &tenant_shard_id, @@ -1056,7 +942,8 @@ impl TenantManager { // the tenant is inaccessible to the outside world while we are doing this, but that is sensible: // the state is ill-defined while we're in transition. Transitions are async, but fast: we do // not do significant I/O, and shutdowns should be prompt via cancellation tokens. - let mut slot_guard = tenant_map_acquire_slot(&tenant_shard_id, TenantSlotAcquireMode::Any) + let mut slot_guard = self + .tenant_map_acquire_slot(&tenant_shard_id, TenantSlotAcquireMode::Any) .map_err(|e| match e { TenantSlotError::NotFound(_) => { unreachable!("Called with mode Any") @@ -1067,6 +954,10 @@ impl TenantManager { match slot_guard.get_old_value() { Some(TenantSlot::Attached(tenant)) => { + tenant + .shard_identity + .assert_equal(new_location_config.shard); + // The case where we keep a Tenant alive was covered above in the special case // for Attached->Attached transitions in the same generation. By this point, // if we see an attached tenant we know it will be discarded and should be @@ -1100,9 +991,13 @@ impl TenantManager { // rather than assuming it to be empty. spawn_mode = SpawnMode::Eager; } - Some(TenantSlot::Secondary(state)) => { + Some(TenantSlot::Secondary(secondary_tenant)) => { + secondary_tenant + .shard_identity + .assert_equal(new_location_config.shard); + info!("Shutting down secondary tenant"); - state.shutdown().await; + secondary_tenant.shutdown().await; } Some(TenantSlot::InProgress(_)) => { // This should never happen: acquire_slot should error out @@ -1223,6 +1118,75 @@ impl TenantManager { } } + fn tenant_map_acquire_slot( + &self, + tenant_shard_id: &TenantShardId, + mode: TenantSlotAcquireMode, + ) -> Result { + use TenantSlotAcquireMode::*; + METRICS.tenant_slot_writes.inc(); + + let mut locked = self.tenants.write().unwrap(); + let span = tracing::info_span!("acquire_slot", tenant_id=%tenant_shard_id.tenant_id, shard_id = %tenant_shard_id.shard_slug()); + let _guard = span.enter(); + + let m = match &mut *locked { + TenantsMap::Initializing => return Err(TenantMapError::StillInitializing.into()), + TenantsMap::ShuttingDown(_) => return Err(TenantMapError::ShuttingDown.into()), + TenantsMap::Open(m) => m, + }; + + use std::collections::btree_map::Entry; + + let entry = m.entry(*tenant_shard_id); + + match entry { + Entry::Vacant(v) => match mode { + MustExist => { + tracing::debug!("Vacant && MustExist: return NotFound"); + Err(TenantSlotError::NotFound(*tenant_shard_id)) + } + _ => { + let (completion, barrier) = utils::completion::channel(); + let inserting = TenantSlot::InProgress(barrier); + METRICS.slot_inserted(&inserting); + v.insert(inserting); + tracing::debug!("Vacant, inserted InProgress"); + Ok(SlotGuard::new( + *tenant_shard_id, + None, + completion, + &self.tenants, + )) + } + }, + Entry::Occupied(mut o) => { + // Apply mode-driven checks + match (o.get(), mode) { + (TenantSlot::InProgress(_), _) => { + tracing::debug!("Occupied, failing for InProgress"); + Err(TenantSlotError::InProgress) + } + _ => { + // Happy case: the slot was not in any state that violated our mode + let (completion, barrier) = utils::completion::channel(); + let in_progress = TenantSlot::InProgress(barrier); + METRICS.slot_inserted(&in_progress); + let old_value = o.insert(in_progress); + METRICS.slot_removed(&old_value); + tracing::debug!("Occupied, replaced with InProgress"); + Ok(SlotGuard::new( + *tenant_shard_id, + Some(old_value), + completion, + &self.tenants, + )) + } + } + } + } + } + /// Resetting a tenant is equivalent to detaching it, then attaching it again with the same /// LocationConf that was last used to attach it. Optionally, the local file cache may be /// dropped before re-attaching. @@ -1239,7 +1203,8 @@ impl TenantManager { drop_cache: bool, ctx: &RequestContext, ) -> anyhow::Result<()> { - let mut slot_guard = tenant_map_acquire_slot(&tenant_shard_id, TenantSlotAcquireMode::Any)?; + let mut slot_guard = + self.tenant_map_acquire_slot(&tenant_shard_id, TenantSlotAcquireMode::Any)?; let Some(old_slot) = slot_guard.get_old_value() else { anyhow::bail!("Tenant not found when trying to reset"); }; @@ -1388,7 +1353,8 @@ impl TenantManager { Ok(()) } - let slot_guard = tenant_map_acquire_slot(&tenant_shard_id, TenantSlotAcquireMode::Any)?; + let slot_guard = + self.tenant_map_acquire_slot(&tenant_shard_id, TenantSlotAcquireMode::Any)?; match &slot_guard.old_value { Some(TenantSlot::Attached(tenant)) => { // Legacy deletion flow: the tenant remains attached, goes to Stopping state, and @@ -1539,7 +1505,7 @@ impl TenantManager { // Phase 2: Put the parent shard to InProgress and grab a reference to the parent Tenant drop(tenant); let mut parent_slot_guard = - tenant_map_acquire_slot(&tenant_shard_id, TenantSlotAcquireMode::Any)?; + self.tenant_map_acquire_slot(&tenant_shard_id, TenantSlotAcquireMode::Any)?; let parent = match parent_slot_guard.get_old_value() { Some(TenantSlot::Attached(t)) => t, Some(TenantSlot::Secondary(_)) => anyhow::bail!("Tenant location in secondary mode"), @@ -1671,7 +1637,12 @@ impl TenantManager { } } - // Phase 5: Shut down the parent shard, and erase it from disk + // Phase 5: Shut down the parent shard. We leave it on disk in case the split fails and we + // have to roll back to the parent shard, avoiding a cold start. It will be cleaned up once + // the storage controller commits the split, or if all else fails, on the next restart. + // + // TODO: We don't flush the ephemeral layer here, because the split is likely to succeed and + // catching up the parent should be reasonably quick. Consider using FreezeAndFlush instead. let (_guard, progress) = completion::channel(); match parent.shutdown(progress, ShutdownMode::Hard).await { Ok(()) => {} @@ -1679,11 +1650,6 @@ impl TenantManager { other.wait().await; } } - let local_tenant_directory = self.conf.tenant_path(&tenant_shard_id); - let tmp_path = safe_rename_tenant_dir(&local_tenant_directory) - .await - .with_context(|| format!("local tenant directory {local_tenant_directory:?} rename"))?; - self.background_purges.spawn(tmp_path); fail::fail_point!("shard-split-pre-finish", |_| Err(anyhow::anyhow!( "failpoint" @@ -1715,7 +1681,10 @@ impl TenantManager { let parent_timelines = timelines.keys().cloned().collect::>(); for timeline in timelines.values() { tracing::info!(timeline_id=%timeline.timeline_id, "Loading list of layers to hardlink"); - let layers = timeline.layers.read().await; + let layers = timeline + .layers + .read(LayerManagerLockHolder::GetLayerMapInfo) + .await; for layer in layers.likely_resident_layers() { let relative_path = layer @@ -1843,44 +1812,210 @@ impl TenantManager { pub(crate) async fn shutdown(&self) { self.cancel.cancel(); - shutdown_all_tenants0(self.tenants).await + self.shutdown_all_tenants0().await } + async fn shutdown_all_tenants0(&self) { + let mut join_set = JoinSet::new(); + + #[cfg(all(debug_assertions, not(test)))] + { + // Check that our metrics properly tracked the size of the tenants map. This is a convenient location to check, + // as it happens implicitly at the end of tests etc. + let m = self.tenants.read().unwrap(); + debug_assert_eq!(METRICS.slots_total(), m.len() as u64); + } + + // Atomically, 1. create the shutdown tasks and 2. prevent creation of new tenants. + let (total_in_progress, total_attached) = { + let mut m = self.tenants.write().unwrap(); + match &mut *m { + TenantsMap::Initializing => { + *m = TenantsMap::ShuttingDown(BTreeMap::default()); + info!("tenants map is empty"); + return; + } + TenantsMap::Open(tenants) => { + let mut shutdown_state = BTreeMap::new(); + let mut total_in_progress = 0; + let mut total_attached = 0; + + for (tenant_shard_id, v) in std::mem::take(tenants).into_iter() { + match v { + TenantSlot::Attached(t) => { + shutdown_state + .insert(tenant_shard_id, TenantSlot::Attached(t.clone())); + join_set.spawn( + async move { + let res = { + let (_guard, shutdown_progress) = completion::channel(); + t.shutdown(shutdown_progress, ShutdownMode::FreezeAndFlush).await + }; + + if let Err(other_progress) = res { + // join the another shutdown in progress + other_progress.wait().await; + } + + // we cannot afford per tenant logging here, because if s3 is degraded, we are + // going to log too many lines + debug!("tenant successfully stopped"); + } + .instrument(info_span!("shutdown", tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug())), + ); + + total_attached += 1; + } + TenantSlot::Secondary(state) => { + // We don't need to wait for this individually per-tenant: the + // downloader task will be waited on eventually, this cancel + // is just to encourage it to drop out if it is doing work + // for this tenant right now. + state.cancel.cancel(); + + shutdown_state + .insert(tenant_shard_id, TenantSlot::Secondary(state)); + } + TenantSlot::InProgress(notify) => { + // InProgress tenants are not visible in TenantsMap::ShuttingDown: we will + // wait for their notifications to fire in this function. + join_set.spawn(async move { + notify.wait().await; + }); + + total_in_progress += 1; + } + } + } + *m = TenantsMap::ShuttingDown(shutdown_state); + (total_in_progress, total_attached) + } + TenantsMap::ShuttingDown(_) => { + error!( + "already shutting down, this function isn't supposed to be called more than once" + ); + return; + } + } + }; + + let started_at = std::time::Instant::now(); + + info!( + "Waiting for {} InProgress tenants and {} Attached tenants to shut down", + total_in_progress, total_attached + ); + + let total = join_set.len(); + let mut panicked = 0; + let mut buffering = true; + const BUFFER_FOR: std::time::Duration = std::time::Duration::from_millis(500); + let mut buffered = std::pin::pin!(tokio::time::sleep(BUFFER_FOR)); + + while !join_set.is_empty() { + tokio::select! { + Some(joined) = join_set.join_next() => { + match joined { + Ok(()) => {}, + Err(join_error) if join_error.is_cancelled() => { + unreachable!("we are not cancelling any of the tasks"); + } + Err(join_error) if join_error.is_panic() => { + // cannot really do anything, as this panic is likely a bug + panicked += 1; + } + Err(join_error) => { + warn!("unknown kind of JoinError: {join_error}"); + } + } + if !buffering { + // buffer so that every 500ms since the first update (or starting) we'll log + // how far away we are; this is because we will get SIGKILL'd at 10s, and we + // are not able to log *then*. + buffering = true; + buffered.as_mut().reset(tokio::time::Instant::now() + BUFFER_FOR); + } + }, + _ = &mut buffered, if buffering => { + buffering = false; + info!(remaining = join_set.len(), total, elapsed_ms = started_at.elapsed().as_millis(), "waiting for tenants to shutdown"); + } + } + } + + if panicked > 0 { + warn!( + panicked, + total, "observed panicks while shutting down tenants" + ); + } + + // caller will log how long we took + } + + /// Detaches a tenant, and removes its local files asynchronously. + /// + /// File removal is idempotent: even if the tenant has already been removed, this will still + /// remove any local files. This is used during shard splits, where we leave the parent shard's + /// files around in case we have to roll back the split. pub(crate) async fn detach_tenant( &self, conf: &'static PageServerConf, tenant_shard_id: TenantShardId, deletion_queue_client: &DeletionQueueClient, ) -> Result<(), TenantStateError> { - let tmp_path = self + if let Some(tmp_path) = self .detach_tenant0(conf, tenant_shard_id, deletion_queue_client) - .await?; - self.background_purges.spawn(tmp_path); + .await? + { + self.background_purges.spawn(tmp_path); + } Ok(()) } + /// Detaches a tenant. This renames the tenant directory to a temporary path and returns it, + /// allowing the caller to delete it asynchronously. Returns None if the dir is already removed. async fn detach_tenant0( &self, conf: &'static PageServerConf, tenant_shard_id: TenantShardId, deletion_queue_client: &DeletionQueueClient, - ) -> Result { + ) -> Result, TenantStateError> { let tenant_dir_rename_operation = |tenant_id_to_clean: TenantShardId| async move { let local_tenant_directory = conf.tenant_path(&tenant_id_to_clean); + if !tokio::fs::try_exists(&local_tenant_directory).await? { + // If the tenant directory doesn't exist, it's already cleaned up. + return Ok(None); + } safe_rename_tenant_dir(&local_tenant_directory) .await .with_context(|| { format!("local tenant directory {local_tenant_directory:?} rename") }) + .map(Some) }; - let removal_result = remove_tenant_from_memory( - self.tenants, - tenant_shard_id, - tenant_dir_rename_operation(tenant_shard_id), - ) - .await; + let mut removal_result = self + .remove_tenant_from_memory( + tenant_shard_id, + tenant_dir_rename_operation(tenant_shard_id), + ) + .await; + + // If the tenant was not found, it was likely already removed. Attempt to remove the tenant + // directory on disk anyway. For example, during shard splits, we shut down and remove the + // parent shard, but leave its directory on disk in case we have to roll back the split. + // + // TODO: it would be better to leave the parent shard attached until the split is committed. + // This will be needed by the gRPC page service too, such that a compute can continue to + // read from the parent shard until it's notified about the new child shards. See: + // . + if let Err(TenantStateError::SlotError(TenantSlotError::NotFound(_))) = removal_result { + removal_result = tenant_dir_rename_operation(tenant_shard_id) + .await + .map_err(TenantStateError::Other); + } // Flush pending deletions, so that they have a good chance of passing validation // before this tenant is potentially re-attached elsewhere. @@ -1920,17 +2055,16 @@ impl TenantManager { ) -> Result, detach_ancestor::Error> { use detach_ancestor::Error; - let slot_guard = - tenant_map_acquire_slot(&tenant_shard_id, TenantSlotAcquireMode::MustExist).map_err( - |e| { - use TenantSlotError::*; + let slot_guard = self + .tenant_map_acquire_slot(&tenant_shard_id, TenantSlotAcquireMode::MustExist) + .map_err(|e| { + use TenantSlotError::*; - match e { - MapState(TenantMapError::ShuttingDown) => Error::ShuttingDown, - NotFound(_) | InProgress | MapState(_) => Error::DetachReparent(e.into()), - } - }, - )?; + match e { + MapState(TenantMapError::ShuttingDown) => Error::ShuttingDown, + NotFound(_) | InProgress | MapState(_) => Error::DetachReparent(e.into()), + } + })?; let tenant = { let old_slot = slot_guard @@ -2080,7 +2214,7 @@ impl TenantManager { selector: ShardSelector, ) -> ShardResolveResult { let tenants = self.tenants.read().unwrap(); - let mut want_shard = None; + let mut want_shard: Option = None; let mut any_in_progress = None; match &*tenants { @@ -2105,14 +2239,23 @@ impl TenantManager { return ShardResolveResult::Found(tenant.clone()); } ShardSelector::Page(key) => { - // First slot we see for this tenant, calculate the expected shard number - // for the key: we will use this for checking if this and subsequent - // slots contain the key, rather than recalculating the hash each time. - if want_shard.is_none() { - want_shard = Some(tenant.shard_identity.get_shard_number(&key)); + // Each time we find an attached slot with a different shard count, + // recompute the expected shard number: during shard splits we might + // have multiple shards with the old shard count. + if want_shard.is_none() + || want_shard.unwrap().shard_count != tenant.shard_identity.count + { + want_shard = Some(ShardIndex { + shard_number: tenant.shard_identity.get_shard_number(&key), + shard_count: tenant.shard_identity.count, + }); } - if Some(tenant.shard_identity.number) == want_shard { + if Some(ShardIndex { + shard_number: tenant.shard_identity.number, + shard_count: tenant.shard_identity.count, + }) == want_shard + { return ShardResolveResult::Found(tenant.clone()); } } @@ -2263,6 +2406,80 @@ impl TenantManager { other => ApiError::InternalServerError(anyhow::anyhow!(other)), }) } + + /// Stops and removes the tenant from memory, if it's not [`TenantState::Stopping`] already, bails otherwise. + /// Allows to remove other tenant resources manually, via `tenant_cleanup`. + /// If the cleanup fails, tenant will stay in memory in [`TenantState::Broken`] state, and another removal + async fn remove_tenant_from_memory( + &self, + tenant_shard_id: TenantShardId, + tenant_cleanup: F, + ) -> Result + where + F: std::future::Future>, + { + let mut slot_guard = + self.tenant_map_acquire_slot(&tenant_shard_id, TenantSlotAcquireMode::MustExist)?; + + // allow pageserver shutdown to await for our completion + let (_guard, progress) = completion::channel(); + + // The SlotGuard allows us to manipulate the Tenant object without fear of some + // concurrent API request doing something else for the same tenant ID. + let attached_tenant = match slot_guard.get_old_value() { + Some(TenantSlot::Attached(tenant)) => { + // whenever we remove a tenant from memory, we don't want to flush and wait for upload + let shutdown_mode = ShutdownMode::Hard; + + // shutdown is sure to transition tenant to stopping, and wait for all tasks to complete, so + // that we can continue safely to cleanup. + match tenant.shutdown(progress, shutdown_mode).await { + Ok(()) => {} + Err(_other) => { + // if pageserver shutdown or other detach/ignore is already ongoing, we don't want to + // wait for it but return an error right away because these are distinct requests. + slot_guard.revert(); + return Err(TenantStateError::IsStopping(tenant_shard_id)); + } + } + Some(tenant) + } + Some(TenantSlot::Secondary(secondary_state)) => { + tracing::info!("Shutting down in secondary mode"); + secondary_state.shutdown().await; + None + } + Some(TenantSlot::InProgress(_)) => { + // Acquiring a slot guarantees its old value was not InProgress + unreachable!(); + } + None => None, + }; + + match tenant_cleanup + .await + .with_context(|| format!("Failed to run cleanup for tenant {tenant_shard_id}")) + { + Ok(hook_value) => { + // Success: drop the old TenantSlot::Attached. + slot_guard + .drop_old_value() + .expect("We just called shutdown"); + + Ok(hook_value) + } + Err(e) => { + // If we had a Tenant, set it to Broken and put it back in the TenantsMap + if let Some(attached_tenant) = attached_tenant { + attached_tenant.set_broken(e.to_string()).await; + } + // Leave the broken tenant in the map + slot_guard.revert(); + + Err(TenantStateError::Other(e)) + } + } + } } #[derive(Debug, thiserror::Error)] @@ -2427,7 +2644,7 @@ pub(crate) enum TenantMapError { /// this tenant to retry later, or wait for the InProgress state to end. /// /// This structure enforces the important invariant that we do not have overlapping -/// tasks that will try use local storage for a the same tenant ID: we enforce that +/// tasks that will try to use local storage for a the same tenant ID: we enforce that /// the previous contents of a slot have been shut down before the slot can be /// left empty or used for something else /// @@ -2440,7 +2657,7 @@ pub(crate) enum TenantMapError { /// The `old_value` may be dropped before the SlotGuard is dropped, by calling /// `drop_old_value`. It is an error to call this without shutting down /// the conents of `old_value`. -pub(crate) struct SlotGuard { +pub(crate) struct SlotGuard<'a> { tenant_shard_id: TenantShardId, old_value: Option, upserted: bool, @@ -2448,19 +2665,23 @@ pub(crate) struct SlotGuard { /// [`TenantSlot::InProgress`] carries the corresponding Barrier: it will /// release any waiters as soon as this SlotGuard is dropped. completion: utils::completion::Completion, + + tenants: &'a std::sync::RwLock, } -impl SlotGuard { +impl<'a> SlotGuard<'a> { fn new( tenant_shard_id: TenantShardId, old_value: Option, completion: utils::completion::Completion, + tenants: &'a std::sync::RwLock, ) -> Self { Self { tenant_shard_id, old_value, upserted: false, completion, + tenants, } } @@ -2484,8 +2705,8 @@ impl SlotGuard { )); } - let replaced = { - let mut locked = TENANTS.write().unwrap(); + let replaced: Option = { + let mut locked = self.tenants.write().unwrap(); if let TenantSlot::InProgress(_) = new_value { // It is never expected to try and upsert InProgress via this path: it should @@ -2593,7 +2814,7 @@ impl SlotGuard { } } -impl Drop for SlotGuard { +impl<'a> Drop for SlotGuard<'a> { fn drop(&mut self) { if self.upserted { return; @@ -2601,7 +2822,7 @@ impl Drop for SlotGuard { // Our old value is already shutdown, or it never existed: it is safe // for us to fully release the TenantSlot back into an empty state - let mut locked = TENANTS.write().unwrap(); + let mut locked = self.tenants.write().unwrap(); let m = match &mut *locked { TenantsMap::Initializing => { @@ -2683,151 +2904,6 @@ enum TenantSlotAcquireMode { MustExist, } -fn tenant_map_acquire_slot( - tenant_shard_id: &TenantShardId, - mode: TenantSlotAcquireMode, -) -> Result { - tenant_map_acquire_slot_impl(tenant_shard_id, &TENANTS, mode) -} - -fn tenant_map_acquire_slot_impl( - tenant_shard_id: &TenantShardId, - tenants: &std::sync::RwLock, - mode: TenantSlotAcquireMode, -) -> Result { - use TenantSlotAcquireMode::*; - METRICS.tenant_slot_writes.inc(); - - let mut locked = tenants.write().unwrap(); - let span = tracing::info_span!("acquire_slot", tenant_id=%tenant_shard_id.tenant_id, shard_id = %tenant_shard_id.shard_slug()); - let _guard = span.enter(); - - let m = match &mut *locked { - TenantsMap::Initializing => return Err(TenantMapError::StillInitializing.into()), - TenantsMap::ShuttingDown(_) => return Err(TenantMapError::ShuttingDown.into()), - TenantsMap::Open(m) => m, - }; - - use std::collections::btree_map::Entry; - - let entry = m.entry(*tenant_shard_id); - - match entry { - Entry::Vacant(v) => match mode { - MustExist => { - tracing::debug!("Vacant && MustExist: return NotFound"); - Err(TenantSlotError::NotFound(*tenant_shard_id)) - } - _ => { - let (completion, barrier) = utils::completion::channel(); - let inserting = TenantSlot::InProgress(barrier); - METRICS.slot_inserted(&inserting); - v.insert(inserting); - tracing::debug!("Vacant, inserted InProgress"); - Ok(SlotGuard::new(*tenant_shard_id, None, completion)) - } - }, - Entry::Occupied(mut o) => { - // Apply mode-driven checks - match (o.get(), mode) { - (TenantSlot::InProgress(_), _) => { - tracing::debug!("Occupied, failing for InProgress"); - Err(TenantSlotError::InProgress) - } - _ => { - // Happy case: the slot was not in any state that violated our mode - let (completion, barrier) = utils::completion::channel(); - let in_progress = TenantSlot::InProgress(barrier); - METRICS.slot_inserted(&in_progress); - let old_value = o.insert(in_progress); - METRICS.slot_removed(&old_value); - tracing::debug!("Occupied, replaced with InProgress"); - Ok(SlotGuard::new( - *tenant_shard_id, - Some(old_value), - completion, - )) - } - } - } - } -} - -/// Stops and removes the tenant from memory, if it's not [`TenantState::Stopping`] already, bails otherwise. -/// Allows to remove other tenant resources manually, via `tenant_cleanup`. -/// If the cleanup fails, tenant will stay in memory in [`TenantState::Broken`] state, and another removal -/// operation would be needed to remove it. -async fn remove_tenant_from_memory( - tenants: &std::sync::RwLock, - tenant_shard_id: TenantShardId, - tenant_cleanup: F, -) -> Result -where - F: std::future::Future>, -{ - let mut slot_guard = - tenant_map_acquire_slot_impl(&tenant_shard_id, tenants, TenantSlotAcquireMode::MustExist)?; - - // allow pageserver shutdown to await for our completion - let (_guard, progress) = completion::channel(); - - // The SlotGuard allows us to manipulate the Tenant object without fear of some - // concurrent API request doing something else for the same tenant ID. - let attached_tenant = match slot_guard.get_old_value() { - Some(TenantSlot::Attached(tenant)) => { - // whenever we remove a tenant from memory, we don't want to flush and wait for upload - let shutdown_mode = ShutdownMode::Hard; - - // shutdown is sure to transition tenant to stopping, and wait for all tasks to complete, so - // that we can continue safely to cleanup. - match tenant.shutdown(progress, shutdown_mode).await { - Ok(()) => {} - Err(_other) => { - // if pageserver shutdown or other detach/ignore is already ongoing, we don't want to - // wait for it but return an error right away because these are distinct requests. - slot_guard.revert(); - return Err(TenantStateError::IsStopping(tenant_shard_id)); - } - } - Some(tenant) - } - Some(TenantSlot::Secondary(secondary_state)) => { - tracing::info!("Shutting down in secondary mode"); - secondary_state.shutdown().await; - None - } - Some(TenantSlot::InProgress(_)) => { - // Acquiring a slot guarantees its old value was not InProgress - unreachable!(); - } - None => None, - }; - - match tenant_cleanup - .await - .with_context(|| format!("Failed to run cleanup for tenant {tenant_shard_id}")) - { - Ok(hook_value) => { - // Success: drop the old TenantSlot::Attached. - slot_guard - .drop_old_value() - .expect("We just called shutdown"); - - Ok(hook_value) - } - Err(e) => { - // If we had a Tenant, set it to Broken and put it back in the TenantsMap - if let Some(attached_tenant) = attached_tenant { - attached_tenant.set_broken(e.to_string()).await; - } - // Leave the broken tenant in the map - slot_guard.revert(); - - Err(TenantStateError::Other(e)) - } - } -} - use http_utils::error::ApiError; use pageserver_api::models::TimelineGcRequest; @@ -2838,11 +2914,19 @@ mod tests { use std::collections::BTreeMap; use std::sync::Arc; + use camino::Utf8PathBuf; + use storage_broker::BrokerClientChannel; use tracing::Instrument; use super::super::harness::TenantHarness; use super::TenantsMap; - use crate::tenant::mgr::TenantSlot; + use crate::{ + basebackup_cache::BasebackupCache, + tenant::{ + TenantSharedResources, + mgr::{BackgroundPurges, TenantManager, TenantSlot}, + }, + }; #[tokio::test(start_paused = true)] async fn shutdown_awaits_in_progress_tenant() { @@ -2863,23 +2947,45 @@ mod tests { let _e = span.enter(); let tenants = BTreeMap::from([(id, TenantSlot::Attached(t.clone()))]); - let tenants = Arc::new(std::sync::RwLock::new(TenantsMap::Open(tenants))); // Invoke remove_tenant_from_memory with a cleanup hook that blocks until we manually // permit it to proceed: that will stick the tenant in InProgress + let (basebackup_cache, _) = BasebackupCache::new(Utf8PathBuf::new(), None); + + let tenant_manager = TenantManager { + tenants: std::sync::RwLock::new(TenantsMap::Open(tenants)), + conf: h.conf, + resources: TenantSharedResources { + broker_client: BrokerClientChannel::connect_lazy("foobar.com") + .await + .unwrap(), + remote_storage: h.remote_storage.clone(), + deletion_queue_client: h.deletion_queue.new_client(), + l0_flush_global_state: crate::l0_flush::L0FlushGlobalState::new( + h.conf.l0_flush.clone(), + ), + basebackup_cache, + feature_resolver: crate::feature_resolver::FeatureResolver::new_disabled(), + }, + cancel: tokio_util::sync::CancellationToken::new(), + background_purges: BackgroundPurges::default(), + }; + + let tenant_manager = Arc::new(tenant_manager); + let (until_cleanup_completed, can_complete_cleanup) = utils::completion::channel(); let (until_cleanup_started, cleanup_started) = utils::completion::channel(); let mut remove_tenant_from_memory_task = { + let tenant_manager = tenant_manager.clone(); let jh = tokio::spawn({ - let tenants = tenants.clone(); async move { let cleanup = async move { drop(until_cleanup_started); can_complete_cleanup.wait().await; anyhow::Ok(()) }; - super::remove_tenant_from_memory(&tenants, id, cleanup).await + tenant_manager.remove_tenant_from_memory(id, cleanup).await } .instrument(h.span()) }); @@ -2892,9 +2998,11 @@ mod tests { let mut shutdown_task = { let (until_shutdown_started, shutdown_started) = utils::completion::channel(); + let tenant_manager = tenant_manager.clone(); + let shutdown_task = tokio::spawn(async move { drop(until_shutdown_started); - super::shutdown_all_tenants0(&tenants).await; + tenant_manager.shutdown_all_tenants0().await; }); shutdown_started.wait().await; diff --git a/pageserver/src/tenant/remote_timeline_client/index.rs b/pageserver/src/tenant/remote_timeline_client/index.rs index a5cd8989aa..6060c42cbb 100644 --- a/pageserver/src/tenant/remote_timeline_client/index.rs +++ b/pageserver/src/tenant/remote_timeline_client/index.rs @@ -427,8 +427,8 @@ impl GcBlocking { #[cfg(test)] mod tests { + use postgres_ffi::PgMajorVersion; use std::str::FromStr; - use utils::id::TimelineId; use super::*; @@ -831,7 +831,7 @@ mod tests { Lsn::INVALID, Lsn::from_str("0/1696070").unwrap(), Lsn::from_str("0/1696070").unwrap(), - 14, + PgMajorVersion::PG14, ).with_recalculated_checksum().unwrap(), deleted_at: Some(parse_naive_datetime("2023-07-31T09:00:00.123000000")), archived_at: None, @@ -893,7 +893,7 @@ mod tests { Lsn::INVALID, Lsn::from_str("0/1696070").unwrap(), Lsn::from_str("0/1696070").unwrap(), - 14, + PgMajorVersion::PG14, ).with_recalculated_checksum().unwrap(), deleted_at: Some(parse_naive_datetime("2023-07-31T09:00:00.123000000")), archived_at: Some(parse_naive_datetime("2023-04-29T09:00:00.123000000")), @@ -957,7 +957,7 @@ mod tests { Lsn::INVALID, Lsn::from_str("0/1696070").unwrap(), Lsn::from_str("0/1696070").unwrap(), - 14, + PgMajorVersion::PG14, ).with_recalculated_checksum().unwrap(), deleted_at: None, lineage: Default::default(), @@ -1033,7 +1033,7 @@ mod tests { Lsn::INVALID, Lsn::from_str("0/1696070").unwrap(), Lsn::from_str("0/1696070").unwrap(), - 14, + PgMajorVersion::PG14, ).with_recalculated_checksum().unwrap(), deleted_at: None, lineage: Default::default(), @@ -1114,7 +1114,7 @@ mod tests { Lsn::INVALID, Lsn::from_str("0/1696070").unwrap(), Lsn::from_str("0/1696070").unwrap(), - 14, + PgMajorVersion::PG14, ).with_recalculated_checksum().unwrap(), deleted_at: None, lineage: Default::default(), @@ -1199,7 +1199,7 @@ mod tests { Lsn::INVALID, Lsn::from_str("0/1696070").unwrap(), Lsn::from_str("0/1696070").unwrap(), - 14, + PgMajorVersion::PG14, ).with_recalculated_checksum().unwrap(), deleted_at: None, lineage: Default::default(), @@ -1287,7 +1287,7 @@ mod tests { Lsn::INVALID, Lsn::from_str("0/1696070").unwrap(), Lsn::from_str("0/1696070").unwrap(), - 14, + PgMajorVersion::PG14, ).with_recalculated_checksum().unwrap(), deleted_at: None, lineage: Default::default(), diff --git a/pageserver/src/tenant/remote_timeline_client/upload.rs b/pageserver/src/tenant/remote_timeline_client/upload.rs index 89f6136530..ffb4717d9f 100644 --- a/pageserver/src/tenant/remote_timeline_client/upload.rs +++ b/pageserver/src/tenant/remote_timeline_client/upload.rs @@ -1,6 +1,7 @@ //! Helper functions to upload files to remote storage with a RemoteStorage use std::io::{ErrorKind, SeekFrom}; +use std::num::NonZeroU32; use std::time::SystemTime; use anyhow::{Context, bail}; @@ -228,11 +229,25 @@ pub(crate) async fn time_travel_recover_tenant( let timelines_path = super::remote_timelines_path(tenant_shard_id); prefixes.push(timelines_path); } + + // Limit the number of versions deletions, mostly so that we don't + // keep requesting forever if the list is too long, as we'd put the + // list in RAM. + // Building a list of 100k entries that reaches the limit roughly takes + // 40 seconds, and roughly corresponds to tenants of 2 TiB physical size. + const COMPLEXITY_LIMIT: Option = NonZeroU32::new(100_000); + for prefix in &prefixes { backoff::retry( || async { storage - .time_travel_recover(Some(prefix), timestamp, done_if_after, cancel) + .time_travel_recover( + Some(prefix), + timestamp, + done_if_after, + cancel, + COMPLEXITY_LIMIT, + ) .await }, |e| !matches!(e, TimeTravelError::Other(_)), diff --git a/pageserver/src/tenant/secondary.rs b/pageserver/src/tenant/secondary.rs index 2fa0ed9be9..e06788543a 100644 --- a/pageserver/src/tenant/secondary.rs +++ b/pageserver/src/tenant/secondary.rs @@ -101,7 +101,7 @@ pub(crate) struct SecondaryTenant { // Secondary mode does not need the full shard identity or the pageserver_api::models::TenantConfig. However, // storing these enables us to report our full LocationConf, enabling convenient reconciliation // by the control plane (see [`Self::get_location_conf`]) - shard_identity: ShardIdentity, + pub(crate) shard_identity: ShardIdentity, tenant_conf: std::sync::Mutex, // Internal state used by the Downloader. diff --git a/pageserver/src/tenant/secondary/downloader.rs b/pageserver/src/tenant/secondary/downloader.rs index dd49c843f3..6b315dc4bc 100644 --- a/pageserver/src/tenant/secondary/downloader.rs +++ b/pageserver/src/tenant/secondary/downloader.rs @@ -1427,7 +1427,7 @@ async fn init_timeline_state( let local_meta = dentry .metadata() .await - .fatal_err(&format!("Read metadata on {}", file_path)); + .fatal_err(&format!("Read metadata on {file_path}")); let file_name = file_path.file_name().expect("created it from the dentry"); if crate::is_temporary(&file_path) diff --git a/pageserver/src/tenant/storage_layer.rs b/pageserver/src/tenant/storage_layer.rs index 9d15e7c4de..9fbb9d2438 100644 --- a/pageserver/src/tenant/storage_layer.rs +++ b/pageserver/src/tenant/storage_layer.rs @@ -34,11 +34,11 @@ pub use layer_name::{DeltaLayerName, ImageLayerName, LayerName}; use pageserver_api::config::GetVectoredConcurrentIo; use pageserver_api::key::Key; use pageserver_api::keyspace::{KeySpace, KeySpaceRandomAccum}; -use pageserver_api::record::NeonWalRecord; -use pageserver_api::value::Value; use tracing::{Instrument, info_span, trace}; use utils::lsn::Lsn; use utils::sync::gate::GateGuard; +use wal_decoder::models::record::NeonWalRecord; +use wal_decoder::models::value::Value; use self::inmemory_layer::InMemoryLayerFileId; use super::PageReconstructError; @@ -109,7 +109,7 @@ pub(crate) enum OnDiskValue { /// Reconstruct data accumulated for a single key during a vectored get #[derive(Debug, Default)] -pub(crate) struct VectoredValueReconstructState { +pub struct VectoredValueReconstructState { pub(crate) on_disk_values: Vec<(Lsn, OnDiskValueIoWaiter)>, pub(crate) situation: ValueReconstructSituation, @@ -244,13 +244,60 @@ impl VectoredValueReconstructState { res } + + /// Benchmarking utility to await for the completion of all pending ios + /// + /// # Cancel-Safety + /// + /// Technically fine to stop polling this future, but, the IOs will still + /// be executed to completion by the sidecar task and hold on to / consume resources. + /// Better not do it to make reasonsing about the system easier. + #[cfg(feature = "benchmarking")] + pub async fn sink_pending_ios(self) -> Result<(), std::io::Error> { + let mut res = Ok(()); + + // We should try hard not to bail early, so that by the time we return from this + // function, all IO for this value is done. It's not required -- we could totally + // stop polling the IO futures in the sidecar task, they need to support that, + // but just stopping to poll doesn't reduce the IO load on the disk. It's easier + // to reason about the system if we just wait for all IO to complete, even if + // we're no longer interested in the result. + // + // Revisit this when IO futures are replaced with a more sophisticated IO system + // and an IO scheduler, where we know which IOs were submitted and which ones + // just queued. Cf the comment on IoConcurrency::spawn_io. + for (_lsn, waiter) in self.on_disk_values { + let value_recv_res = waiter + .wait_completion() + // we rely on the caller to poll us to completion, so this is not a bail point + .await; + + match (&mut res, value_recv_res) { + (Err(_), _) => { + // We've already failed, no need to process more. + } + (Ok(_), Err(_wait_err)) => { + // This shouldn't happen - likely the sidecar task panicked. + unreachable!(); + } + (Ok(_), Ok(Err(err))) => { + let err: std::io::Error = err; + res = Err(err); + } + (Ok(_ok), Ok(Ok(OnDiskValue::RawImage(_img)))) => {} + (Ok(_ok), Ok(Ok(OnDiskValue::WalRecordOrImage(_buf)))) => {} + } + } + + res + } } /// Bag of data accumulated during a vectored get.. -pub(crate) struct ValuesReconstructState { +pub struct ValuesReconstructState { /// The keys will be removed after `get_vectored` completes. The caller outside `Timeline` /// should not expect to get anything from this hashmap. - pub(crate) keys: HashMap, + pub keys: HashMap, /// The keys which are already retrieved keys_done: KeySpaceRandomAccum, @@ -272,7 +319,7 @@ pub(crate) struct ValuesReconstructState { /// The desired end state is that we always do parallel IO. /// This struct and the dispatching in the impl will be removed once /// we've built enough confidence. -pub(crate) enum IoConcurrency { +pub enum IoConcurrency { Sequential, SidecarTask { task_id: usize, @@ -317,10 +364,7 @@ impl IoConcurrency { Self::spawn(SelectedIoConcurrency::Sequential) } - pub(crate) fn spawn_from_conf( - conf: GetVectoredConcurrentIo, - gate_guard: GateGuard, - ) -> IoConcurrency { + pub fn spawn_from_conf(conf: GetVectoredConcurrentIo, gate_guard: GateGuard) -> IoConcurrency { let selected = match conf { GetVectoredConcurrentIo::Sequential => SelectedIoConcurrency::Sequential, GetVectoredConcurrentIo::SidecarTask => SelectedIoConcurrency::SidecarTask(gate_guard), @@ -425,16 +469,6 @@ impl IoConcurrency { } } - pub(crate) fn clone(&self) -> Self { - match self { - IoConcurrency::Sequential => IoConcurrency::Sequential, - IoConcurrency::SidecarTask { task_id, ios_tx } => IoConcurrency::SidecarTask { - task_id: *task_id, - ios_tx: ios_tx.clone(), - }, - } - } - /// Submit an IO to be executed in the background. DEADLOCK RISK, read the full doc string. /// /// The IO is represented as an opaque future. @@ -573,6 +607,18 @@ impl IoConcurrency { } } +impl Clone for IoConcurrency { + fn clone(&self) -> Self { + match self { + IoConcurrency::Sequential => IoConcurrency::Sequential, + IoConcurrency::SidecarTask { task_id, ios_tx } => IoConcurrency::SidecarTask { + task_id: *task_id, + ios_tx: ios_tx.clone(), + }, + } + } +} + /// Make noise in case the [`ValuesReconstructState`] gets dropped while /// there are still IOs in flight. /// Refer to `collect_pending_ios` for why we prefer not to do that. @@ -603,7 +649,7 @@ impl Drop for ValuesReconstructState { } impl ValuesReconstructState { - pub(crate) fn new(io_concurrency: IoConcurrency) -> Self { + pub fn new(io_concurrency: IoConcurrency) -> Self { Self { keys: HashMap::new(), keys_done: KeySpaceRandomAccum::new(), diff --git a/pageserver/src/tenant/storage_layer/batch_split_writer.rs b/pageserver/src/tenant/storage_layer/batch_split_writer.rs index 51f2e909a2..7f0ca5b337 100644 --- a/pageserver/src/tenant/storage_layer/batch_split_writer.rs +++ b/pageserver/src/tenant/storage_layer/batch_split_writer.rs @@ -4,11 +4,11 @@ use std::sync::Arc; use bytes::Bytes; use pageserver_api::key::{KEY_SIZE, Key}; -use pageserver_api::value::Value; use tokio_util::sync::CancellationToken; use utils::id::TimelineId; use utils::lsn::Lsn; use utils::shard::TenantShardId; +use wal_decoder::models::value::Value; use super::errors::PutError; use super::layer::S3_UPLOAD_LIMIT; @@ -55,11 +55,11 @@ pub struct BatchLayerWriter { } impl BatchLayerWriter { - pub async fn new(conf: &'static PageServerConf) -> anyhow::Result { - Ok(Self { + pub fn new(conf: &'static PageServerConf) -> Self { + Self { generated_layer_writers: Vec::new(), conf, - }) + } } pub fn add_unfinished_image_writer( @@ -182,7 +182,7 @@ impl BatchLayerWriter { /// An image writer that takes images and produces multiple image layers. #[must_use] pub struct SplitImageLayerWriter<'a> { - inner: ImageLayerWriter, + inner: Option, target_layer_size: u64, lsn: Lsn, conf: &'static PageServerConf, @@ -196,7 +196,7 @@ pub struct SplitImageLayerWriter<'a> { impl<'a> SplitImageLayerWriter<'a> { #[allow(clippy::too_many_arguments)] - pub async fn new( + pub fn new( conf: &'static PageServerConf, timeline_id: TimelineId, tenant_shard_id: TenantShardId, @@ -205,30 +205,19 @@ impl<'a> SplitImageLayerWriter<'a> { target_layer_size: u64, gate: &'a utils::sync::gate::Gate, cancel: CancellationToken, - ctx: &RequestContext, - ) -> anyhow::Result { - Ok(Self { + ) -> Self { + Self { target_layer_size, - inner: ImageLayerWriter::new( - conf, - timeline_id, - tenant_shard_id, - &(start_key..Key::MAX), - lsn, - gate, - cancel.clone(), - ctx, - ) - .await?, + inner: None, conf, timeline_id, tenant_shard_id, - batches: BatchLayerWriter::new(conf).await?, + batches: BatchLayerWriter::new(conf), lsn, start_key, gate, cancel, - }) + } } pub async fn put_image( @@ -237,12 +226,31 @@ impl<'a> SplitImageLayerWriter<'a> { img: Bytes, ctx: &RequestContext, ) -> Result<(), PutError> { + if self.inner.is_none() { + self.inner = Some( + ImageLayerWriter::new( + self.conf, + self.timeline_id, + self.tenant_shard_id, + &(self.start_key..Key::MAX), + self.lsn, + self.gate, + self.cancel.clone(), + ctx, + ) + .await + .map_err(PutError::Other)?, + ); + } + + let inner = self.inner.as_mut().unwrap(); + // The current estimation is an upper bound of the space that the key/image could take // because we did not consider compression in this estimation. The resulting image layer // could be smaller than the target size. let addition_size_estimation = KEY_SIZE as u64 + img.len() as u64; - if self.inner.num_keys() >= 1 - && self.inner.estimated_size() + addition_size_estimation >= self.target_layer_size + if inner.num_keys() >= 1 + && inner.estimated_size() + addition_size_estimation >= self.target_layer_size { let next_image_writer = ImageLayerWriter::new( self.conf, @@ -256,7 +264,7 @@ impl<'a> SplitImageLayerWriter<'a> { ) .await .map_err(PutError::Other)?; - let prev_image_writer = std::mem::replace(&mut self.inner, next_image_writer); + let prev_image_writer = std::mem::replace(inner, next_image_writer); self.batches.add_unfinished_image_writer( prev_image_writer, self.start_key..key, @@ -264,7 +272,7 @@ impl<'a> SplitImageLayerWriter<'a> { ); self.start_key = key; } - self.inner.put_image(key, img, ctx).await + inner.put_image(key, img, ctx).await } pub(crate) async fn finish_with_discard_fn( @@ -281,8 +289,10 @@ impl<'a> SplitImageLayerWriter<'a> { let Self { mut batches, inner, .. } = self; - if inner.num_keys() != 0 { - batches.add_unfinished_image_writer(inner, self.start_key..end_key, self.lsn); + if let Some(inner) = inner { + if inner.num_keys() != 0 { + batches.add_unfinished_image_writer(inner, self.start_key..end_key, self.lsn); + } } batches.finish_with_discard_fn(tline, ctx, discard_fn).await } @@ -319,7 +329,7 @@ pub struct SplitDeltaLayerWriter<'a> { } impl<'a> SplitDeltaLayerWriter<'a> { - pub async fn new( + pub fn new( conf: &'static PageServerConf, timeline_id: TimelineId, tenant_shard_id: TenantShardId, @@ -327,8 +337,8 @@ impl<'a> SplitDeltaLayerWriter<'a> { target_layer_size: u64, gate: &'a utils::sync::gate::Gate, cancel: CancellationToken, - ) -> anyhow::Result { - Ok(Self { + ) -> Self { + Self { target_layer_size, inner: None, conf, @@ -336,10 +346,10 @@ impl<'a> SplitDeltaLayerWriter<'a> { tenant_shard_id, lsn_range, last_key_written: Key::MIN, - batches: BatchLayerWriter::new(conf).await?, + batches: BatchLayerWriter::new(conf), gate, cancel, - }) + } } pub async fn put_value( @@ -497,10 +507,7 @@ mod tests { 4 * 1024 * 1024, &tline.gate, tline.cancel.clone(), - &ctx, - ) - .await - .unwrap(); + ); let mut delta_writer = SplitDeltaLayerWriter::new( tenant.conf, @@ -510,9 +517,7 @@ mod tests { 4 * 1024 * 1024, &tline.gate, tline.cancel.clone(), - ) - .await - .unwrap(); + ); image_writer .put_image(get_key(0), get_img(0), &ctx) @@ -578,10 +583,7 @@ mod tests { 4 * 1024 * 1024, &tline.gate, tline.cancel.clone(), - &ctx, - ) - .await - .unwrap(); + ); let mut delta_writer = SplitDeltaLayerWriter::new( tenant.conf, tline.timeline_id, @@ -590,9 +592,7 @@ mod tests { 4 * 1024 * 1024, &tline.gate, tline.cancel.clone(), - ) - .await - .unwrap(); + ); const N: usize = 2000; for i in 0..N { let i = i as u32; @@ -679,10 +679,7 @@ mod tests { 4 * 1024, &tline.gate, tline.cancel.clone(), - &ctx, - ) - .await - .unwrap(); + ); let mut delta_writer = SplitDeltaLayerWriter::new( tenant.conf, @@ -692,9 +689,7 @@ mod tests { 4 * 1024, &tline.gate, tline.cancel.clone(), - ) - .await - .unwrap(); + ); image_writer .put_image(get_key(0), get_img(0), &ctx) @@ -770,9 +765,7 @@ mod tests { 4 * 1024 * 1024, &tline.gate, tline.cancel.clone(), - ) - .await - .unwrap(); + ); for i in 0..N { let i = i as u32; diff --git a/pageserver/src/tenant/storage_layer/delta_layer.rs b/pageserver/src/tenant/storage_layer/delta_layer.rs index 2c1b27c8d5..c2f76c859c 100644 --- a/pageserver/src/tenant/storage_layer/delta_layer.rs +++ b/pageserver/src/tenant/storage_layer/delta_layer.rs @@ -44,7 +44,6 @@ use pageserver_api::key::{DBDIR_KEY, KEY_SIZE, Key}; use pageserver_api::keyspace::KeySpace; use pageserver_api::models::ImageCompressionAlgorithm; use pageserver_api::shard::TenantShardId; -use pageserver_api::value::Value; use serde::{Deserialize, Serialize}; use tokio::sync::OnceCell; use tokio_epoll_uring::IoBuf; @@ -54,6 +53,7 @@ use utils::bin_ser::BeSer; use utils::bin_ser::SerializeError; use utils::id::{TenantId, TimelineId}; use utils::lsn::Lsn; +use wal_decoder::models::value::Value; use super::errors::PutError; use super::{ @@ -783,7 +783,7 @@ impl DeltaLayer { ctx, ) .await - .with_context(|| format!("Failed to open file '{}'", path))?; + .with_context(|| format!("Failed to open file '{path}'"))?; let file_id = page_cache::next_file_id(); let block_reader = FileBlockReader::new(&file, file_id); let summary_blk = block_reader.read_blk(0, ctx).await?; @@ -1306,7 +1306,7 @@ impl DeltaLayerInner { // is it an image or will_init walrecord? // FIXME: this could be handled by threading the BlobRef to the // VectoredReadBuilder - let will_init = pageserver_api::value::ValueBytes::will_init(&data) + let will_init = wal_decoder::models::value::ValueBytes::will_init(&data) .inspect_err(|_e| { #[cfg(feature = "testing")] tracing::error!(data=?utils::Hex(&data), err=?_e, %key, %lsn, "failed to parse will_init out of serialized value"); @@ -1369,7 +1369,7 @@ impl DeltaLayerInner { format!(" img {} bytes", img.len()) } Value::WalRecord(rec) => { - let wal_desc = pageserver_api::record::describe_wal_record(&rec)?; + let wal_desc = wal_decoder::models::record::describe_wal_record(&rec)?; format!( " rec {} bytes will_init: {} {}", buf.len(), @@ -1401,7 +1401,7 @@ impl DeltaLayerInner { match val { Value::Image(img) => { let checkpoint = CheckPoint::decode(&img)?; - println!(" CHECKPOINT: {:?}", checkpoint); + println!(" CHECKPOINT: {checkpoint:?}"); } Value::WalRecord(_rec) => { println!(" unexpected walrecord value for checkpoint key"); @@ -1622,12 +1622,6 @@ impl DeltaLayerIterator<'_> { pub(crate) mod test { use std::collections::BTreeMap; - use bytes::Bytes; - use itertools::MinMaxResult; - use pageserver_api::value::Value; - use rand::prelude::{SeedableRng, SliceRandom, StdRng}; - use rand::{Rng, RngCore}; - use super::*; use crate::DEFAULT_PG_VERSION; use crate::context::DownloadBehavior; @@ -1635,7 +1629,13 @@ pub(crate) mod test { use crate::tenant::disk_btree::tests::TestDisk; use crate::tenant::harness::{TIMELINE_ID, TenantHarness}; use crate::tenant::storage_layer::{Layer, ResidentLayer}; + use crate::tenant::timeline::layer_manager::LayerManagerLockHolder; use crate::tenant::{TenantShard, Timeline}; + use bytes::Bytes; + use itertools::MinMaxResult; + use postgres_ffi::PgMajorVersion; + use rand::prelude::{SeedableRng, SliceRandom, StdRng}; + use rand::{Rng, RngCore}; /// Construct an index for a fictional delta layer and and then /// traverse in order to plan vectored reads for a query. Finally, @@ -1987,7 +1987,7 @@ pub(crate) mod test { #[tokio::test] async fn copy_delta_prefix_smoke() { use bytes::Bytes; - use pageserver_api::record::NeonWalRecord; + use wal_decoder::models::record::NeonWalRecord; let h = crate::tenant::harness::TenantHarness::create("truncate_delta_smoke") .await @@ -1995,14 +1995,14 @@ pub(crate) mod test { let (tenant, ctx) = h.load().await; let ctx = &ctx; let timeline = tenant - .create_test_timeline(TimelineId::generate(), Lsn(0x10), 14, ctx) + .create_test_timeline(TimelineId::generate(), Lsn(0x10), PgMajorVersion::PG14, ctx) .await .unwrap(); let ctx = &ctx.with_scope_timeline(&timeline); let initdb_layer = timeline .layers - .read() + .read(crate::tenant::timeline::layer_manager::LayerManagerLockHolder::Testing) .await .likely_resident_layers() .next() @@ -2078,7 +2078,7 @@ pub(crate) mod test { let new_layer = timeline .layers - .read() + .read(LayerManagerLockHolder::Testing) .await .likely_resident_layers() .find(|&x| x != &initdb_layer) diff --git a/pageserver/src/tenant/storage_layer/filter_iterator.rs b/pageserver/src/tenant/storage_layer/filter_iterator.rs index 1a330ecfc2..d345195446 100644 --- a/pageserver/src/tenant/storage_layer/filter_iterator.rs +++ b/pageserver/src/tenant/storage_layer/filter_iterator.rs @@ -4,8 +4,8 @@ use std::sync::Arc; use anyhow::bail; use pageserver_api::key::Key; use pageserver_api::keyspace::{KeySpace, SparseKeySpace}; -use pageserver_api::value::Value; use utils::lsn::Lsn; +use wal_decoder::models::value::Value; use super::PersistentLayerKey; use super::merge_iterator::{MergeIterator, MergeIteratorItem}; @@ -126,7 +126,6 @@ mod tests { #[tokio::test] async fn filter_keyspace_iterator() { use bytes::Bytes; - use pageserver_api::value::Value; let harness = TenantHarness::create("filter_iterator_filter_keyspace_iterator") .await diff --git a/pageserver/src/tenant/storage_layer/image_layer.rs b/pageserver/src/tenant/storage_layer/image_layer.rs index 740f53f928..9f76f697d3 100644 --- a/pageserver/src/tenant/storage_layer/image_layer.rs +++ b/pageserver/src/tenant/storage_layer/image_layer.rs @@ -42,7 +42,6 @@ use pageserver_api::config::MaxVectoredReadBytes; use pageserver_api::key::{DBDIR_KEY, KEY_SIZE, Key}; use pageserver_api::keyspace::KeySpace; use pageserver_api::shard::{ShardIdentity, TenantShardId}; -use pageserver_api::value::Value; use serde::{Deserialize, Serialize}; use tokio::sync::OnceCell; use tokio_stream::StreamExt; @@ -52,6 +51,7 @@ use utils::bin_ser::BeSer; use utils::bin_ser::SerializeError; use utils::id::{TenantId, TimelineId}; use utils::lsn::Lsn; +use wal_decoder::models::value::Value; use super::errors::PutError; use super::layer_name::ImageLayerName; @@ -272,8 +272,7 @@ impl ImageLayer { conf.timeline_path(&tenant_shard_id, &timeline_id) .join(format!( - "{fname}.{:x}.{TEMP_FILE_SUFFIX}", - filename_disambiguator + "{fname}.{filename_disambiguator:x}.{TEMP_FILE_SUFFIX}" )) } @@ -370,7 +369,7 @@ impl ImageLayer { ctx, ) .await - .with_context(|| format!("Failed to open file '{}'", path))?; + .with_context(|| format!("Failed to open file '{path}'"))?; let file_id = page_cache::next_file_id(); let block_reader = FileBlockReader::new(&file, file_id); let summary_blk = block_reader.read_blk(0, ctx).await?; @@ -1232,10 +1231,10 @@ mod test { use itertools::Itertools; use pageserver_api::key::Key; use pageserver_api::shard::{ShardCount, ShardIdentity, ShardNumber, ShardStripeSize}; - use pageserver_api::value::Value; use utils::generation::Generation; use utils::id::{TenantId, TimelineId}; use utils::lsn::Lsn; + use wal_decoder::models::value::Value; use super::{ImageLayerIterator, ImageLayerWriter}; use crate::DEFAULT_PG_VERSION; @@ -1475,7 +1474,7 @@ mod test { assert_eq!(l1, expect_lsn); assert_eq!(&i1, i2); } - (o1, o2) => panic!("iterators length mismatch: {:?}, {:?}", o1, o2), + (o1, o2) => panic!("iterators length mismatch: {o1:?}, {o2:?}"), } } } diff --git a/pageserver/src/tenant/storage_layer/inmemory_layer.rs b/pageserver/src/tenant/storage_layer/inmemory_layer.rs index 200beba115..c4d53c6405 100644 --- a/pageserver/src/tenant/storage_layer/inmemory_layer.rs +++ b/pageserver/src/tenant/storage_layer/inmemory_layer.rs @@ -70,23 +70,15 @@ pub struct InMemoryLayer { /// We use a separate lock for the index to reduce the critical section /// during which reads cannot be planned. /// - /// If you need access to both the index and the underlying file at the same time, - /// respect the following locking order to avoid deadlocks: - /// 1. [`InMemoryLayer::inner`] - /// 2. [`InMemoryLayer::index`] - /// - /// Note that the file backing [`InMemoryLayer::inner`] is append-only, - /// so it is not necessary to hold simultaneous locks on index. - /// This avoids holding index locks across IO, and is crucial for avoiding read tail latency. + /// Note that the file backing [`InMemoryLayer::file`] is append-only, + /// so it is not necessary to hold a lock on the index while reading or writing from the file. /// In particular: - /// 1. It is safe to read and release [`InMemoryLayer::index`] before locking and reading from [`InMemoryLayer::inner`]. - /// 2. It is safe to write and release [`InMemoryLayer::inner`] before locking and updating [`InMemoryLayer::index`]. + /// 1. It is safe to read and release [`InMemoryLayer::index`] before reading from [`InMemoryLayer::file`]. + /// 2. It is safe to write to [`InMemoryLayer::file`] before locking and updating [`InMemoryLayer::index`]. index: RwLock>>, - /// The above fields never change, except for `end_lsn`, which is only set once, - /// and `index` (see rationale there). - /// All other changing parts are in `inner`, and protected by a mutex. - inner: RwLock, + /// Wrapper for the actual on-disk file. Uses interior mutability for concurrent reads/writes. + file: EphemeralFile, estimated_in_mem_size: AtomicU64, } @@ -96,20 +88,10 @@ impl std::fmt::Debug for InMemoryLayer { f.debug_struct("InMemoryLayer") .field("start_lsn", &self.start_lsn) .field("end_lsn", &self.end_lsn) - .field("inner", &self.inner) .finish() } } -pub struct InMemoryLayerInner { - /// The values are stored in a serialized format in this file. - /// Each serialized Value is preceded by a 'u32' length field. - /// PerSeg::page_versions map stores offsets into this file. - file: EphemeralFile, - - resource_units: GlobalResourceUnits, -} - /// Support the same max blob length as blob_io, because ultimately /// all the InMemoryLayer contents end up being written into a delta layer, /// using the [`crate::tenant::blob_io`]. @@ -258,12 +240,6 @@ struct IndexEntryUnpacked { pos: u64, } -impl std::fmt::Debug for InMemoryLayerInner { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("InMemoryLayerInner").finish() - } -} - /// State shared by all in-memory (ephemeral) layers. Updated infrequently during background ticks in Timeline, /// to minimize contention. /// @@ -280,7 +256,7 @@ pub(crate) struct GlobalResources { } // Per-timeline RAII struct for its contribution to [`GlobalResources`] -struct GlobalResourceUnits { +pub(crate) struct GlobalResourceUnits { // How many dirty bytes have I added to the global dirty_bytes: this guard object is responsible // for decrementing the global counter by this many bytes when dropped. dirty_bytes: u64, @@ -292,7 +268,7 @@ impl GlobalResourceUnits { // updated when the Timeline "ticks" in the background. const MAX_SIZE_DRIFT: u64 = 10 * 1024 * 1024; - fn new() -> Self { + pub(crate) fn new() -> Self { GLOBAL_RESOURCES .dirty_layers .fetch_add(1, AtomicOrdering::Relaxed); @@ -304,7 +280,7 @@ impl GlobalResourceUnits { /// /// Returns the effective layer size limit that should be applied, if any, to keep /// the total number of dirty bytes below the configured maximum. - fn publish_size(&mut self, size: u64) -> Option { + pub(crate) fn publish_size(&mut self, size: u64) -> Option { let new_global_dirty_bytes = match size.cmp(&self.dirty_bytes) { Ordering::Equal => GLOBAL_RESOURCES.dirty_bytes.load(AtomicOrdering::Relaxed), Ordering::Greater => { @@ -349,7 +325,7 @@ impl GlobalResourceUnits { // Call publish_size if the input size differs from last published size by more than // the drift limit - fn maybe_publish_size(&mut self, size: u64) { + pub(crate) fn maybe_publish_size(&mut self, size: u64) { let publish = match size.cmp(&self.dirty_bytes) { Ordering::Equal => false, Ordering::Greater => size - self.dirty_bytes > Self::MAX_SIZE_DRIFT, @@ -398,8 +374,8 @@ impl InMemoryLayer { } } - pub(crate) fn try_len(&self) -> Option { - self.inner.try_read().map(|i| i.file.len()).ok() + pub(crate) fn len(&self) -> u64 { + self.file.len() } pub(crate) fn assert_writable(&self) { @@ -430,7 +406,7 @@ impl InMemoryLayer { // Look up the keys in the provided keyspace and update // the reconstruct state with whatever is found. - pub(crate) async fn get_values_reconstruct_data( + pub async fn get_values_reconstruct_data( self: &Arc, keyspace: KeySpace, lsn_range: Range, @@ -479,14 +455,13 @@ impl InMemoryLayer { } } } - drop(index); // release the lock before we spawn the IO; if it's serial-mode IO we will deadlock on the read().await below + drop(index); // release the lock before we spawn the IO let read_from = Arc::clone(self); let read_ctx = ctx.attached_child(); reconstruct_state .spawn_io(async move { - let inner = read_from.inner.read().await; let f = vectored_dio_read::execute( - &inner.file, + &read_from.file, reads .iter() .flat_map(|(_, value_reads)| value_reads.iter().map(|v| &v.read)), @@ -518,7 +493,6 @@ impl InMemoryLayer { // This is kinda forced for InMemoryLayer because we need to inner.read() anyway, // but it's less obvious for DeltaLayer and ImageLayer. So, keep this explicit // drop for consistency among all three layer types. - drop(inner); drop(read_from); }) .await; @@ -537,7 +511,7 @@ fn inmem_layer_log_display( start_lsn: Lsn, end_lsn: Lsn, ) -> std::fmt::Result { - write!(f, "timeline {} in-memory ", timeline)?; + write!(f, "timeline {timeline} in-memory ")?; inmem_layer_display(f, start_lsn, end_lsn) } @@ -549,12 +523,6 @@ impl std::fmt::Display for InMemoryLayer { } impl InMemoryLayer { - /// Get layer size. - pub async fn size(&self) -> Result { - let inner = self.inner.read().await; - Ok(inner.file.len()) - } - pub fn estimated_in_mem_size(&self) -> u64 { self.estimated_in_mem_size.load(AtomicOrdering::Relaxed) } @@ -587,10 +555,7 @@ impl InMemoryLayer { end_lsn: OnceLock::new(), opened_at: Instant::now(), index: RwLock::new(BTreeMap::new()), - inner: RwLock::new(InMemoryLayerInner { - file, - resource_units: GlobalResourceUnits::new(), - }), + file, estimated_in_mem_size: AtomicU64::new(0), }) } @@ -599,41 +564,37 @@ impl InMemoryLayer { /// /// Errors are not retryable, the [`InMemoryLayer`] must be discarded, and not be read from. /// The reason why it's not retryable is that the [`EphemeralFile`] writes are not retryable. + /// + /// This method shall not be called concurrently. We enforce this property via [`crate::tenant::Timeline::write_lock`]. + /// /// TODO: it can be made retryable if we aborted the process on EphemeralFile write errors. pub async fn put_batch( &self, serialized_batch: SerializedValueBatch, ctx: &RequestContext, ) -> anyhow::Result<()> { - let (base_offset, metadata) = { - let mut inner = self.inner.write().await; - self.assert_writable(); + self.assert_writable(); - let base_offset = inner.file.len(); + let base_offset = self.file.len(); - let SerializedValueBatch { - raw, - metadata, - max_lsn: _, - len: _, - } = serialized_batch; + let SerializedValueBatch { + raw, + metadata, + max_lsn: _, + len: _, + } = serialized_batch; - // Write the batch to the file - inner.file.write_raw(&raw, ctx).await?; - let new_size = inner.file.len(); + // Write the batch to the file + self.file.write_raw(&raw, ctx).await?; + let new_size = self.file.len(); - let expected_new_len = base_offset - .checked_add(raw.len().into_u64()) - // write_raw would error if we were to overflow u64. - // also IndexEntry and higher levels in - //the code don't allow the file to grow that large - .unwrap(); - assert_eq!(new_size, expected_new_len); - - inner.resource_units.maybe_publish_size(new_size); - - (base_offset, metadata) - }; + let expected_new_len = base_offset + .checked_add(raw.len().into_u64()) + // write_raw would error if we were to overflow u64. + // also IndexEntry and higher levels in + //the code don't allow the file to grow that large + .unwrap(); + assert_eq!(new_size, expected_new_len); // Update the index with the new entries let mut index = self.index.write().await; @@ -686,10 +647,8 @@ impl InMemoryLayer { self.opened_at } - pub(crate) async fn tick(&self) -> Option { - let mut inner = self.inner.write().await; - let size = inner.file.len(); - inner.resource_units.publish_size(size) + pub(crate) fn tick(&self) -> Option { + self.file.tick() } pub(crate) async fn put_tombstones(&self, _key_ranges: &[(Range, Lsn)]) -> Result<()> { @@ -753,12 +712,6 @@ impl InMemoryLayer { gate: &utils::sync::gate::Gate, cancel: CancellationToken, ) -> Result> { - // Grab the lock in read-mode. We hold it over the I/O, but because this - // layer is not writeable anymore, no one should be trying to acquire the - // write lock on it, so we shouldn't block anyone. See the comment on - // [`InMemoryLayer::freeze`] to understand how locking between the append path - // and layer flushing works. - let inner = self.inner.read().await; let index = self.index.read().await; use l0_flush::Inner; @@ -793,7 +746,7 @@ impl InMemoryLayer { match l0_flush_global_state { l0_flush::Inner::Direct { .. } => { - let file_contents = inner.file.load_to_io_buf(ctx).await?; + let file_contents = self.file.load_to_io_buf(ctx).await?; let file_contents = file_contents.freeze(); for (key, vec_map) in index.iter() { diff --git a/pageserver/src/tenant/storage_layer/inmemory_layer/vectored_dio_read.rs b/pageserver/src/tenant/storage_layer/inmemory_layer/vectored_dio_read.rs index ea354fc716..27fbc6f5fb 100644 --- a/pageserver/src/tenant/storage_layer/inmemory_layer/vectored_dio_read.rs +++ b/pageserver/src/tenant/storage_layer/inmemory_layer/vectored_dio_read.rs @@ -380,7 +380,7 @@ impl std::fmt::Debug for LogicalReadState { write!(f, "Ongoing({:?})", BufferDebug::from(b as &dyn Buffer)) } LogicalReadState::Ok(b) => write!(f, "Ok({:?})", BufferDebug::from(b as &dyn Buffer)), - LogicalReadState::Error(e) => write!(f, "Error({:?})", e), + LogicalReadState::Error(e) => write!(f, "Error({e:?})"), LogicalReadState::Undefined => write!(f, "Undefined"), } } diff --git a/pageserver/src/tenant/storage_layer/layer.rs b/pageserver/src/tenant/storage_layer/layer.rs index 3d55972017..0be13e67a8 100644 --- a/pageserver/src/tenant/storage_layer/layer.rs +++ b/pageserver/src/tenant/storage_layer/layer.rs @@ -105,7 +105,7 @@ impl std::fmt::Display for Layer { impl std::fmt::Debug for Layer { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{}", self) + write!(f, "{self}") } } diff --git a/pageserver/src/tenant/storage_layer/layer/tests.rs b/pageserver/src/tenant/storage_layer/layer/tests.rs index b6fd4678d6..9bdce163c9 100644 --- a/pageserver/src/tenant/storage_layer/layer/tests.rs +++ b/pageserver/src/tenant/storage_layer/layer/tests.rs @@ -1,6 +1,7 @@ use std::time::UNIX_EPOCH; use pageserver_api::key::{CONTROLFILE_KEY, Key}; +use postgres_ffi::PgMajorVersion; use tokio::task::JoinSet; use utils::completion::{self, Completion}; use utils::id::TimelineId; @@ -10,6 +11,7 @@ use super::*; use crate::context::DownloadBehavior; use crate::tenant::harness::{TenantHarness, test_img}; use crate::tenant::storage_layer::{IoConcurrency, LayerVisibilityHint}; +use crate::tenant::timeline::layer_manager::LayerManagerLockHolder; /// Used in tests to advance a future to wanted await point, and not futher. const ADVANCE: std::time::Duration = std::time::Duration::from_secs(3600); @@ -44,7 +46,7 @@ async fn smoke_test() { .create_test_timeline_with_layers( TimelineId::generate(), Lsn(0x10), - 14, + PgMajorVersion::PG14, &ctx, Default::default(), // in-memory layers Default::default(), @@ -59,7 +61,7 @@ async fn smoke_test() { // there to avoid the timeline being illegally empty let (layer, dummy_layer) = { let mut layers = { - let layers = timeline.layers.read().await; + let layers = timeline.layers.read(LayerManagerLockHolder::Testing).await; layers.likely_resident_layers().cloned().collect::>() }; @@ -215,7 +217,7 @@ async fn smoke_test() { // Simulate GC removing our test layer. { - let mut g = timeline.layers.write().await; + let mut g = timeline.layers.write(LayerManagerLockHolder::Testing).await; let layers = &[layer]; g.open_mut().unwrap().finish_gc_timeline(layers); @@ -255,13 +257,18 @@ async fn evict_and_wait_on_wanted_deleted() { let (tenant, ctx) = h.load().await; let timeline = tenant - .create_test_timeline(TimelineId::generate(), Lsn(0x10), 14, &ctx) + .create_test_timeline( + TimelineId::generate(), + Lsn(0x10), + PgMajorVersion::PG14, + &ctx, + ) .await .unwrap(); let layer = { let mut layers = { - let layers = timeline.layers.read().await; + let layers = timeline.layers.read(LayerManagerLockHolder::Testing).await; layers.likely_resident_layers().cloned().collect::>() }; @@ -305,7 +312,7 @@ async fn evict_and_wait_on_wanted_deleted() { // assert that once we remove the `layer` from the layer map and drop our reference, // the deletion of the layer in remote_storage happens. { - let mut layers = timeline.layers.write().await; + let mut layers = timeline.layers.write(LayerManagerLockHolder::Testing).await; layers.open_mut().unwrap().finish_gc_timeline(&[layer]); } @@ -340,14 +347,19 @@ fn read_wins_pending_eviction() { let download_span = span.in_scope(|| tracing::info_span!("downloading", timeline_id = 1)); let timeline = tenant - .create_test_timeline(TimelineId::generate(), Lsn(0x10), 14, &ctx) + .create_test_timeline( + TimelineId::generate(), + Lsn(0x10), + PgMajorVersion::PG14, + &ctx, + ) .await .unwrap(); let ctx = ctx.with_scope_timeline(&timeline); let layer = { let mut layers = { - let layers = timeline.layers.read().await; + let layers = timeline.layers.read(LayerManagerLockHolder::Testing).await; layers.likely_resident_layers().cloned().collect::>() }; @@ -473,14 +485,19 @@ fn multiple_pending_evictions_scenario(name: &'static str, in_order: bool) { let download_span = span.in_scope(|| tracing::info_span!("downloading", timeline_id = 1)); let timeline = tenant - .create_test_timeline(TimelineId::generate(), Lsn(0x10), 14, &ctx) + .create_test_timeline( + TimelineId::generate(), + Lsn(0x10), + PgMajorVersion::PG14, + &ctx, + ) .await .unwrap(); let ctx = ctx.with_scope_timeline(&timeline); let layer = { let mut layers = { - let layers = timeline.layers.read().await; + let layers = timeline.layers.read(LayerManagerLockHolder::Testing).await; layers.likely_resident_layers().cloned().collect::>() }; @@ -643,7 +660,12 @@ async fn cancelled_get_or_maybe_download_does_not_cancel_eviction() { let (tenant, ctx) = h.load().await; let timeline = tenant - .create_test_timeline(TimelineId::generate(), Lsn(0x10), 14, &ctx) + .create_test_timeline( + TimelineId::generate(), + Lsn(0x10), + PgMajorVersion::PG14, + &ctx, + ) .await .unwrap(); let ctx = ctx.with_scope_timeline(&timeline); @@ -655,7 +677,7 @@ async fn cancelled_get_or_maybe_download_does_not_cancel_eviction() { let layer = { let mut layers = { - let layers = timeline.layers.read().await; + let layers = timeline.layers.read(LayerManagerLockHolder::Testing).await; layers.likely_resident_layers().cloned().collect::>() }; @@ -729,7 +751,12 @@ async fn evict_and_wait_does_not_wait_for_download() { let download_span = span.in_scope(|| tracing::info_span!("downloading", timeline_id = 1)); let timeline = tenant - .create_test_timeline(TimelineId::generate(), Lsn(0x10), 14, &ctx) + .create_test_timeline( + TimelineId::generate(), + Lsn(0x10), + PgMajorVersion::PG14, + &ctx, + ) .await .unwrap(); let ctx = ctx.with_scope_timeline(&timeline); @@ -741,7 +768,7 @@ async fn evict_and_wait_does_not_wait_for_download() { let layer = { let mut layers = { - let layers = timeline.layers.read().await; + let layers = timeline.layers.read(LayerManagerLockHolder::Testing).await; layers.likely_resident_layers().cloned().collect::>() }; @@ -823,7 +850,7 @@ async fn evict_and_wait_does_not_wait_for_download() { #[tokio::test(start_paused = true)] async fn eviction_cancellation_on_drop() { use bytes::Bytes; - use pageserver_api::value::Value; + use wal_decoder::models::value::Value; // this is the runtime on which Layer spawns the blocking tasks on let handle = tokio::runtime::Handle::current(); @@ -835,7 +862,12 @@ async fn eviction_cancellation_on_drop() { let (tenant, ctx) = h.load().await; let timeline = tenant - .create_test_timeline(TimelineId::generate(), Lsn(0x10), 14, &ctx) + .create_test_timeline( + TimelineId::generate(), + Lsn(0x10), + PgMajorVersion::PG14, + &ctx, + ) .await .unwrap(); @@ -862,7 +894,7 @@ async fn eviction_cancellation_on_drop() { let (evicted_layer, not_evicted) = { let mut layers = { - let mut guard = timeline.layers.write().await; + let mut guard = timeline.layers.write(LayerManagerLockHolder::Testing).await; let layers = guard.likely_resident_layers().cloned().collect::>(); // remove the layers from layermap guard.open_mut().unwrap().finish_gc_timeline(&layers); diff --git a/pageserver/src/tenant/storage_layer/merge_iterator.rs b/pageserver/src/tenant/storage_layer/merge_iterator.rs index ea3dea50c3..c15abcdf3f 100644 --- a/pageserver/src/tenant/storage_layer/merge_iterator.rs +++ b/pageserver/src/tenant/storage_layer/merge_iterator.rs @@ -4,8 +4,8 @@ use std::sync::Arc; use anyhow::bail; use pageserver_api::key::Key; -use pageserver_api::value::Value; use utils::lsn::Lsn; +use wal_decoder::models::value::Value; use super::delta_layer::{DeltaLayerInner, DeltaLayerIterator}; use super::image_layer::{ImageLayerInner, ImageLayerIterator}; @@ -402,9 +402,9 @@ impl<'a> MergeIterator<'a> { mod tests { use itertools::Itertools; use pageserver_api::key::Key; - #[cfg(feature = "testing")] - use pageserver_api::record::NeonWalRecord; use utils::lsn::Lsn; + #[cfg(feature = "testing")] + use wal_decoder::models::record::NeonWalRecord; use super::*; use crate::DEFAULT_PG_VERSION; @@ -436,7 +436,6 @@ mod tests { #[tokio::test] async fn merge_in_between() { use bytes::Bytes; - use pageserver_api::value::Value; let harness = TenantHarness::create("merge_iterator_merge_in_between") .await @@ -501,7 +500,6 @@ mod tests { #[tokio::test] async fn delta_merge() { use bytes::Bytes; - use pageserver_api::value::Value; let harness = TenantHarness::create("merge_iterator_delta_merge") .await @@ -578,7 +576,6 @@ mod tests { #[tokio::test] async fn delta_image_mixed_merge() { use bytes::Bytes; - use pageserver_api::value::Value; let harness = TenantHarness::create("merge_iterator_delta_image_mixed_merge") .await diff --git a/pageserver/src/tenant/tasks.rs b/pageserver/src/tenant/tasks.rs index 4709a6d616..954dd38bb4 100644 --- a/pageserver/src/tenant/tasks.rs +++ b/pageserver/src/tenant/tasks.rs @@ -17,14 +17,17 @@ use tracing::*; use utils::backoff::exponential_backoff_duration; use utils::completion::Barrier; use utils::pausable_failpoint; +use utils::sync::gate::GateError; use crate::context::{DownloadBehavior, RequestContext}; use crate::metrics::{self, BackgroundLoopSemaphoreMetricsRecorder, TENANT_TASK_EVENTS}; use crate::task_mgr::{self, BACKGROUND_RUNTIME, TOKIO_WORKER_THREADS, TaskKind}; +use crate::tenant::blob_io::WriteBlobError; use crate::tenant::throttle::Stats; use crate::tenant::timeline::CompactionError; use crate::tenant::timeline::compaction::CompactionOutcome; use crate::tenant::{TenantShard, TenantState}; +use crate::virtual_file::owned_buffers_io::write::FlushTaskError; /// Semaphore limiting concurrent background tasks (across all tenants). /// @@ -313,7 +316,20 @@ pub(crate) fn log_compaction_error( let timeline = root_cause .downcast_ref::() .is_some_and(|e| e.is_stopping()); - let is_stopping = upload_queue || timeline; + let buffered_writer_flush_task_canelled = root_cause + .downcast_ref::() + .is_some_and(|e| e.is_cancel()); + let write_blob_cancelled = root_cause + .downcast_ref::() + .is_some_and(|e| e.is_cancel()); + let gate_closed = root_cause + .downcast_ref::() + .is_some_and(|e| e.is_cancel()); + let is_stopping = upload_queue + || timeline + || buffered_writer_flush_task_canelled + || write_blob_cancelled + || gate_closed; if is_stopping { Level::INFO diff --git a/pageserver/src/tenant/timeline.rs b/pageserver/src/tenant/timeline.rs index 3522af2de0..aeced98859 100644 --- a/pageserver/src/tenant/timeline.rs +++ b/pageserver/src/tenant/timeline.rs @@ -35,7 +35,11 @@ use fail::fail_point; use futures::stream::FuturesUnordered; use futures::{FutureExt, StreamExt}; use handle::ShardTimelineId; -use layer_manager::Shutdown; +use layer_manager::{ + LayerManagerLockHolder, LayerManagerReadGuard, LayerManagerWriteGuard, LockedLayerManager, + Shutdown, +}; + use offload::OffloadError; use once_cell::sync::Lazy; use pageserver_api::config::tenant_conf_defaults::DEFAULT_PITR_INTERVAL; @@ -52,11 +56,9 @@ use pageserver_api::models::{ }; use pageserver_api::reltag::{BlockNumber, RelTag}; use pageserver_api::shard::{ShardIdentity, ShardIndex, ShardNumber, TenantShardId}; -#[cfg(test)] -use pageserver_api::value::Value; use postgres_connection::PgConnectionConfig; use postgres_ffi::v14::xlog_utils; -use postgres_ffi::{WAL_SEGMENT_SIZE, to_pg_timestamp}; +use postgres_ffi::{PgMajorVersion, WAL_SEGMENT_SIZE, to_pg_timestamp}; use rand::Rng; use remote_storage::DownloadError; use serde_with::serde_as; @@ -76,13 +78,14 @@ use utils::rate_limit::RateLimit; use utils::seqwait::SeqWait; use utils::simple_rcu::{Rcu, RcuReadGuard}; use utils::sync::gate::{Gate, GateGuard}; -use utils::{completion, critical, fs_ext, pausable_failpoint}; +use utils::{completion, critical_timeline, fs_ext, pausable_failpoint}; +#[cfg(test)] +use wal_decoder::models::value::Value; use wal_decoder::serialized_batch::{SerializedValueBatch, ValueMeta}; use self::delete::DeleteTimelineFlow; pub(super) use self::eviction_task::EvictionTaskTenantState; use self::eviction_task::EvictionTaskTimelineState; -use self::layer_manager::LayerManager; use self::logical_size::LogicalSize; use self::walreceiver::{WalReceiver, WalReceiverConf}; use super::remote_timeline_client::RemoteTimelineClient; @@ -92,18 +95,18 @@ use super::storage_layer::{LayerFringe, LayerVisibilityHint, ReadableLayer}; use super::tasks::log_compaction_error; use super::upload_queue::NotInitialized; use super::{ - AttachedTenantConf, BasebackupPrepareSender, GcError, HeatMapTimeline, MaybeOffloaded, + AttachedTenantConf, GcError, HeatMapTimeline, MaybeOffloaded, debug_assert_current_span_has_tenant_and_timeline_id, }; use crate::PERF_TRACE_TARGET; use crate::aux_file::AuxFileSizeEstimator; -use crate::basebackup_cache::BasebackupPrepareRequest; +use crate::basebackup_cache::BasebackupCache; use crate::config::PageServerConf; use crate::context::{ DownloadBehavior, PerfInstrumentFutureExt, RequestContext, RequestContextBuilder, }; use crate::disk_usage_eviction_task::{DiskUsageEvictionInfo, EvictionCandidate, finite_f32}; -use crate::feature_resolver::FeatureResolver; +use crate::feature_resolver::TenantFeatureResolver; use crate::keyspace::{KeyPartitioning, KeySpace}; use crate::l0_flush::{self, L0FlushGlobalState}; use crate::metrics::{ @@ -175,19 +178,19 @@ pub enum LastImageLayerCreationStatus { impl std::fmt::Display for ImageLayerCreationMode { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{:?}", self) + write!(f, "{self:?}") } } /// Temporary function for immutable storage state refactor, ensures we are dropping mutex guard instead of other things. /// Can be removed after all refactors are done. -fn drop_rlock(rlock: tokio::sync::RwLockReadGuard) { +fn drop_layer_manager_rlock(rlock: LayerManagerReadGuard<'_>) { drop(rlock) } /// Temporary function for immutable storage state refactor, ensures we are dropping mutex guard instead of other things. /// Can be removed after all refactors are done. -fn drop_wlock(rlock: tokio::sync::RwLockWriteGuard<'_, T>) { +fn drop_layer_manager_wlock(rlock: LayerManagerWriteGuard<'_>) { drop(rlock) } @@ -198,8 +201,8 @@ pub struct TimelineResources { pub pagestream_throttle_metrics: Arc, pub l0_compaction_trigger: Arc, pub l0_flush_global_state: l0_flush::L0FlushGlobalState, - pub basebackup_prepare_sender: BasebackupPrepareSender, - pub feature_resolver: FeatureResolver, + pub basebackup_cache: Arc, + pub feature_resolver: TenantFeatureResolver, } pub struct Timeline { @@ -222,7 +225,7 @@ pub struct Timeline { /// to shards, and is constant through the lifetime of this Timeline. shard_identity: ShardIdentity, - pub pg_version: u32, + pub pg_version: PgMajorVersion, /// The tuple has two elements. /// 1. `LayerFileManager` keeps track of the various physical representations of the layer files (inmem, local, remote). @@ -241,7 +244,7 @@ pub struct Timeline { /// /// In the future, we'll be able to split up the tuple of LayerMap and `LayerFileManager`, /// so that e.g. on-demand-download/eviction, and layer spreading, can operate just on `LayerFileManager`. - pub(crate) layers: tokio::sync::RwLock, + pub(crate) layers: LockedLayerManager, last_freeze_at: AtomicLsn, // Atomic would be more appropriate here. @@ -445,9 +448,9 @@ pub struct Timeline { wait_lsn_log_slow: tokio::sync::Semaphore, /// A channel to send async requests to prepare a basebackup for the basebackup cache. - basebackup_prepare_sender: BasebackupPrepareSender, + basebackup_cache: Arc, - feature_resolver: FeatureResolver, + feature_resolver: TenantFeatureResolver, } pub(crate) enum PreviousHeatmap { @@ -629,7 +632,7 @@ pub enum ReadPathLayerId { impl std::fmt::Display for ReadPathLayerId { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { - ReadPathLayerId::PersistentLayer(key) => write!(f, "{}", key), + ReadPathLayerId::PersistentLayer(key) => write!(f, "{key}"), ReadPathLayerId::InMemoryLayer(range) => { write!(f, "in-mem {}..{}", range.start, range.end) } @@ -705,7 +708,7 @@ impl MissingKeyError { impl std::fmt::Debug for MissingKeyError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{}", self) + write!(f, "{self}") } } @@ -718,19 +721,19 @@ impl std::fmt::Display for MissingKeyError { )?; if let Some(ref ancestor_lsn) = self.ancestor_lsn { - write!(f, ", ancestor {}", ancestor_lsn)?; + write!(f, ", ancestor {ancestor_lsn}")?; } if let Some(ref query) = self.query { - write!(f, ", query {}", query)?; + write!(f, ", query {query}")?; } if let Some(ref read_path) = self.read_path { - write!(f, "\n{}", read_path)?; + write!(f, "\n{read_path}")?; } if let Some(ref backtrace) = self.backtrace { - write!(f, "\n{}", backtrace)?; + write!(f, "\n{backtrace}")?; } Ok(()) @@ -760,7 +763,7 @@ pub(crate) enum CreateImageLayersError { PageReconstructError(#[source] PageReconstructError), #[error(transparent)] - Other(#[from] anyhow::Error), + Other(anyhow::Error), } impl From for CreateImageLayersError { @@ -813,7 +816,7 @@ impl From for FlushLayerError { } #[derive(thiserror::Error, Debug)] -pub(crate) enum GetVectoredError { +pub enum GetVectoredError { #[error("timeline shutting down")] Cancelled, @@ -846,7 +849,7 @@ impl From for GetVectoredError { } #[derive(thiserror::Error, Debug)] -pub(crate) enum GetReadyAncestorError { +pub enum GetReadyAncestorError { #[error("ancestor LSN wait error")] AncestorLsnTimeout(#[from] WaitLsnError), @@ -936,7 +939,7 @@ impl std::fmt::Debug for Timeline { } #[derive(thiserror::Error, Debug, Clone)] -pub(crate) enum WaitLsnError { +pub enum WaitLsnError { // Called on a timeline which is shutting down #[error("Shutdown")] Shutdown, @@ -1055,8 +1058,8 @@ pub(crate) enum WaitLsnWaiter<'a> { /// Argument to [`Timeline::shutdown`]. #[derive(Debug, Clone, Copy)] pub(crate) enum ShutdownMode { - /// Graceful shutdown, may do a lot of I/O as we flush any open layers to disk and then - /// also to remote storage. This method can easily take multiple seconds for a busy timeline. + /// Graceful shutdown, may do a lot of I/O as we flush any open layers to disk. This method can + /// take multiple seconds for a busy timeline. /// /// While we are flushing, we continue to accept read I/O for LSNs ingested before /// the call to [`Timeline::shutdown`]. @@ -1535,7 +1538,10 @@ impl Timeline { /// This method makes no distinction between local and remote layers. /// Hence, the result **does not represent local filesystem usage**. pub(crate) async fn layer_size_sum(&self) -> u64 { - let guard = self.layers.read().await; + let guard = self + .layers + .read(LayerManagerLockHolder::GetLayerMapInfo) + .await; guard.layer_size_sum() } @@ -1845,7 +1851,7 @@ impl Timeline { // time, and this was missed. // if write_guard.is_none() { return; } - let Ok(layers_guard) = self.layers.try_read() else { + let Ok(layers_guard) = self.layers.try_read(LayerManagerLockHolder::TryFreezeLayer) else { // Don't block if the layer lock is busy return; }; @@ -1896,16 +1902,11 @@ impl Timeline { return; }; - let Some(current_size) = open_layer.try_len() else { - // Unexpected: since we hold the write guard, nobody else should be writing to this layer, so - // read lock to get size should always succeed. - tracing::warn!("Lock conflict while reading size of open layer"); - return; - }; + let current_size = open_layer.len(); let current_lsn = self.get_last_record_lsn(); - let checkpoint_distance_override = open_layer.tick().await; + let checkpoint_distance_override = open_layer.tick(); if let Some(size_override) = checkpoint_distance_override { if current_size > size_override { @@ -2143,14 +2144,31 @@ impl Timeline { debug_assert_current_span_has_tenant_and_timeline_id(); // Regardless of whether we're going to try_freeze_and_flush - // or not, stop ingesting any more data. + // cancel walreceiver to stop ingesting more data asap. + // + // Note that we're accepting a race condition here where we may + // do the final flush below, before walreceiver observes the + // cancellation and exits. + // This means we may open a new InMemoryLayer after the final flush below. + // Flush loop is also still running for a short while, so, in theory, it + // could also make its way into the upload queue. + // + // If we wait for the shutdown of the walreceiver before moving on to the + // flush, then that would be avoided. But we don't do it because the + // walreceiver entertains reads internally, which means that it possibly + // depends on the download of layers. Layer download is only sensitive to + // the cancellation of the entire timeline, so cancelling the walreceiver + // will have no effect on the individual get requests. + // This would cause problems when there is a lot of ongoing downloads or + // there is S3 unavailabilities, i.e. detach, deletion, etc would hang, + // and we can't deallocate resources of the timeline, etc. let walreceiver = self.walreceiver.lock().unwrap().take(); tracing::debug!( is_some = walreceiver.is_some(), "Waiting for WalReceiverManager..." ); if let Some(walreceiver) = walreceiver { - walreceiver.shutdown().await; + walreceiver.cancel().await; } // ... and inform any waiters for newer LSNs that there won't be any. self.last_record_lsn.shutdown(); @@ -2158,7 +2176,7 @@ impl Timeline { if let ShutdownMode::FreezeAndFlush = mode { let do_flush = if let Some((open, frozen)) = self .layers - .read() + .read(LayerManagerLockHolder::Shutdown) .await .layer_map() .map(|lm| (lm.open_layer.is_some(), lm.frozen_layers.len())) @@ -2262,7 +2280,10 @@ impl Timeline { // Allow any remaining in-memory layers to do cleanup -- until that, they hold the gate // open. let mut write_guard = self.write_lock.lock().await; - self.layers.write().await.shutdown(&mut write_guard); + self.layers + .write(LayerManagerLockHolder::Shutdown) + .await + .shutdown(&mut write_guard); } // Finally wait until any gate-holders are complete. @@ -2365,7 +2386,10 @@ impl Timeline { &self, reset: LayerAccessStatsReset, ) -> Result { - let guard = self.layers.read().await; + let guard = self + .layers + .read(LayerManagerLockHolder::GetLayerMapInfo) + .await; let layer_map = guard.layer_map()?; let mut in_memory_layers = Vec::with_capacity(layer_map.frozen_layers.len() + 1); if let Some(open_layer) = &layer_map.open_layer { @@ -2493,6 +2517,37 @@ impl Timeline { .unwrap_or(self.conf.default_tenant_conf.basebackup_cache_enabled) } + /// Try to get a basebackup from the on-disk cache. + pub(crate) async fn get_cached_basebackup(&self, lsn: Lsn) -> Option { + self.basebackup_cache + .get(self.tenant_shard_id.tenant_id, self.timeline_id, lsn) + .await + } + + /// Convenience method to attempt fetching a basebackup for the timeline if enabled and safe for + /// the given request parameters. + /// + /// TODO: consider moving this onto GrpcPageServiceHandler once the libpq handler is gone. + pub async fn get_cached_basebackup_if_enabled( + &self, + lsn: Option, + prev_lsn: Option, + full: bool, + replica: bool, + gzip: bool, + ) -> Option { + if !self.is_basebackup_cache_enabled() || !self.basebackup_cache.is_enabled() { + return None; + } + // We have to know which LSN to fetch the basebackup for. + let lsn = lsn?; + // We only cache gzipped, non-full basebackups for primary computes with automatic prev_lsn. + if prev_lsn.is_some() || full || replica || !gzip { + return None; + } + self.get_cached_basebackup(lsn).await + } + /// Prepare basebackup for the given LSN and store it in the basebackup cache. /// The method is asynchronous and returns immediately. /// The actual basebackup preparation is performed in the background @@ -2514,17 +2569,8 @@ impl Timeline { return; } - let res = self - .basebackup_prepare_sender - .send(BasebackupPrepareRequest { - tenant_shard_id: self.tenant_shard_id, - timeline_id: self.timeline_id, - lsn, - }); - if let Err(e) = res { - // May happen during shutdown, it's not critical. - info!("Failed to send shutdown checkpoint: {e:#}"); - } + self.basebackup_cache + .send_prepare(self.tenant_shard_id, self.timeline_id, lsn); } } @@ -2906,7 +2952,7 @@ impl Timeline { shard_identity: ShardIdentity, walredo_mgr: Option>, resources: TimelineResources, - pg_version: u32, + pg_version: PgMajorVersion, state: TimelineState, attach_wal_lag_cooldown: Arc>, create_idempotency: crate::tenant::CreateTimelineIdempotency, @@ -3081,7 +3127,7 @@ impl Timeline { wait_lsn_log_slow: tokio::sync::Semaphore::new(1), - basebackup_prepare_sender: resources.basebackup_prepare_sender, + basebackup_cache: resources.basebackup_cache, feature_resolver: resources.feature_resolver, }; @@ -3232,7 +3278,7 @@ impl Timeline { /// Initialize with an empty layer map. Used when creating a new timeline. pub(super) fn init_empty_layer_map(&self, start_lsn: Lsn) { - let mut layers = self.layers.try_write().expect( + let mut layers = self.layers.try_write(LayerManagerLockHolder::Init).expect( "in the context where we call this function, no other task has access to the object", ); layers @@ -3252,7 +3298,10 @@ impl Timeline { use init::Decision::*; use init::{Discovered, DismissedLayer}; - let mut guard = self.layers.write().await; + let mut guard = self + .layers + .write(LayerManagerLockHolder::LoadLayerMap) + .await; let timer = self.metrics.load_layer_map_histo.start_timer(); @@ -3407,10 +3456,6 @@ impl Timeline { // TenantShard::create_timeline will wait for these uploads to happen before returning, or // on retry. - // Now that we have the full layer map, we may calculate the visibility of layers within it (a global scan) - drop(guard); // drop write lock, update_layer_visibility will take a read lock. - self.update_layer_visibility().await?; - info!( "loaded layer map with {} layers at {}, total physical size: {}", num_layers, disk_consistent_lsn, total_physical_size @@ -3869,7 +3914,10 @@ impl Timeline { &self, layer_name: &LayerName, ) -> Result, layer_manager::Shutdown> { - let guard = self.layers.read().await; + let guard = self + .layers + .read(LayerManagerLockHolder::GetLayerMapInfo) + .await; let layer = guard .layer_map()? .iter_historic_layers() @@ -3902,7 +3950,10 @@ impl Timeline { return None; } - let guard = self.layers.read().await; + let guard = self + .layers + .read(LayerManagerLockHolder::GenerateHeatmap) + .await; // Firstly, if there's any heatmap left over from when this location // was a secondary, take that into account. Keep layers that are: @@ -4000,7 +4051,10 @@ impl Timeline { } pub(super) async fn generate_unarchival_heatmap(&self, end_lsn: Lsn) -> PreviousHeatmap { - let guard = self.layers.read().await; + let guard = self + .layers + .read(LayerManagerLockHolder::GenerateHeatmap) + .await; let now = SystemTime::now(); let mut heatmap_layers = Vec::default(); @@ -4342,7 +4396,7 @@ impl Timeline { query: &VersionedKeySpaceQuery, ) -> Result { let mut fringe = LayerFringe::new(); - let guard = self.layers.read().await; + let guard = self.layers.read(LayerManagerLockHolder::GetPage).await; match query { VersionedKeySpaceQuery::Uniform { keyspace, lsn } => { @@ -4445,7 +4499,7 @@ impl Timeline { // required for correctness, but avoids visiting extra layers // which turns out to be a perf bottleneck in some cases. if !unmapped_keyspace.is_empty() { - let guard = timeline.layers.read().await; + let guard = timeline.layers.read(LayerManagerLockHolder::GetPage).await; guard.update_search_fringe(&unmapped_keyspace, cont_lsn, &mut fringe)?; // It's safe to drop the layer map lock after planning the next round of reads. @@ -4555,7 +4609,10 @@ impl Timeline { _guard: &tokio::sync::MutexGuard<'_, Option>, ctx: &RequestContext, ) -> anyhow::Result> { - let mut guard = self.layers.write().await; + let mut guard = self + .layers + .write(LayerManagerLockHolder::GetLayerForWrite) + .await; let last_record_lsn = self.get_last_record_lsn(); ensure!( @@ -4597,7 +4654,10 @@ impl Timeline { write_lock: &mut tokio::sync::MutexGuard<'_, Option>, ) -> Result { let frozen = { - let mut guard = self.layers.write().await; + let mut guard = self + .layers + .write(LayerManagerLockHolder::TryFreezeLayer) + .await; guard .open_mut()? .try_freeze_in_memory_layer(at, &self.last_freeze_at, write_lock, &self.metrics) @@ -4637,8 +4697,23 @@ impl Timeline { mut layer_flush_start_rx: tokio::sync::watch::Receiver<(u64, Lsn)>, ctx: &RequestContext, ) { + // Always notify waiters about the flush loop exiting since the loop might stop + // when the timeline hasn't been cancelled. + let scopeguard_rx = layer_flush_start_rx.clone(); + scopeguard::defer! { + let (flush_counter, _) = *scopeguard_rx.borrow(); + let _ = self + .layer_flush_done_tx + .send_replace((flush_counter, Err(FlushLayerError::Cancelled))); + } + // Subscribe to L0 delta layer updates, for compaction backpressure. - let mut watch_l0 = match self.layers.read().await.layer_map() { + let mut watch_l0 = match self + .layers + .read(LayerManagerLockHolder::FlushLoop) + .await + .layer_map() + { Ok(lm) => lm.watch_level0_deltas(), Err(Shutdown) => return, }; @@ -4661,9 +4736,6 @@ impl Timeline { let result = loop { if self.cancel.is_cancelled() { info!("dropping out of flush loop for timeline shutdown"); - // Note: we do not bother transmitting into [`layer_flush_done_tx`], because - // anyone waiting on that will respect self.cancel as well: they will stop - // waiting at the same time we as drop out of this loop. return; } @@ -4674,8 +4746,8 @@ impl Timeline { } // Fetch the next layer to flush, if any. - let (layer, l0_count, frozen_count, frozen_size) = { - let layers = self.layers.read().await; + let (layer, l0_count, frozen_count, frozen_size, open_layer_size) = { + let layers = self.layers.read(LayerManagerLockHolder::FlushLoop).await; let Ok(lm) = layers.layer_map() else { info!("dropping out of flush loop for timeline shutdown"); return; @@ -4687,8 +4759,13 @@ impl Timeline { .iter() .map(|l| l.estimated_in_mem_size()) .sum(); + let open_layer_size: u64 = lm + .open_layer + .as_ref() + .map(|l| l.estimated_in_mem_size()) + .unwrap_or(0); let layer = lm.frozen_layers.front().cloned(); - (layer, l0_count, frozen_count, frozen_size) + (layer, l0_count, frozen_count, frozen_size, open_layer_size) // drop 'layers' lock }; let Some(layer) = layer else { @@ -4701,7 +4778,7 @@ impl Timeline { if l0_count >= stall_threshold { warn!( "stalling layer flushes for compaction backpressure at {l0_count} \ - L0 layers ({frozen_count} frozen layers with {frozen_size} bytes)" + L0 layers ({frozen_count} frozen layers with {frozen_size} bytes, {open_layer_size} bytes in open layer)" ); let stall_timer = self .metrics @@ -4754,7 +4831,7 @@ impl Timeline { let delay = flush_duration.as_secs_f64(); info!( "delaying layer flush by {delay:.3}s for compaction backpressure at \ - {l0_count} L0 layers ({frozen_count} frozen layers with {frozen_size} bytes)" + {l0_count} L0 layers ({frozen_count} frozen layers with {frozen_size} bytes, {open_layer_size} bytes in open layer)" ); let _delay_timer = self .metrics @@ -4971,7 +5048,10 @@ impl Timeline { // in-memory layer from the map now. The flushed layer is stored in // the mapping in `create_delta_layer`. { - let mut guard = self.layers.write().await; + let mut guard = self + .layers + .write(LayerManagerLockHolder::FlushFrozenLayer) + .await; guard.open_mut()?.finish_flush_l0_layer( delta_layer_to_add.as_ref(), @@ -5173,7 +5253,11 @@ impl Timeline { } let (dense_ks, sparse_ks) = self.collect_keyspace(lsn, ctx).await?; - let dense_partitioning = dense_ks.partition(&self.shard_identity, partition_size); + let dense_partitioning = dense_ks.partition( + &self.shard_identity, + partition_size, + postgres_ffi::BLCKSZ as u64, + ); let sparse_partitioning = SparseKeyPartitioning { parts: vec![sparse_ks], }; // no partitioning for metadata keys for now @@ -5186,7 +5270,7 @@ impl Timeline { async fn time_for_new_image_layer(&self, partition: &KeySpace, lsn: Lsn) -> bool { let threshold = self.get_image_creation_threshold(); - let guard = self.layers.read().await; + let guard = self.layers.read(LayerManagerLockHolder::Compaction).await; let Ok(layers) = guard.layer_map() else { return false; }; @@ -5246,6 +5330,7 @@ impl Timeline { ctx: &RequestContext, img_range: Range, io_concurrency: IoConcurrency, + progress: Option<(usize, usize)>, ) -> Result { let mut wrote_keys = false; @@ -5322,11 +5407,15 @@ impl Timeline { } } + let progress_report = progress + .map(|(idx, total)| format!("({idx}/{total}) ")) + .unwrap_or_default(); if wrote_keys { // Normal path: we have written some data into the new image layer for this // partition, so flush it to disk. info!( - "produced image layer for rel {}", + "{} produced image layer for rel {}", + progress_report, ImageLayerName { key_range: img_range.clone(), lsn @@ -5336,7 +5425,12 @@ impl Timeline { unfinished_image_layer: image_layer_writer, }) } else { - tracing::debug!("no data in range {}-{}", img_range.start, img_range.end); + tracing::debug!( + "{} no data in range {}-{}", + progress_report, + img_range.start, + img_range.end + ); Ok(ImageLayerCreationOutcome::Empty) } } @@ -5528,7 +5622,7 @@ impl Timeline { self.should_check_if_image_layers_required(lsn) }; - let mut batch_image_writer = BatchLayerWriter::new(self.conf).await?; + let mut batch_image_writer = BatchLayerWriter::new(self.conf); let mut all_generated = true; @@ -5571,7 +5665,8 @@ impl Timeline { } } - for partition in partition_parts.iter() { + let total = partition_parts.len(); + for (idx, partition) in partition_parts.iter().enumerate() { if self.cancel.is_cancelled() { return Err(CreateImageLayersError::Cancelled); } @@ -5604,7 +5699,7 @@ impl Timeline { if let ImageLayerCreationMode::Force = mode { // When forced to create image layers, we might try and create them where they already // exist. This mode is only used in tests/debug. - let layers = self.layers.read().await; + let layers = self.layers.read(LayerManagerLockHolder::Compaction).await; if layers.contains_key(&PersistentLayerKey { key_range: img_range.clone(), lsn_range: PersistentLayerDesc::image_layer_lsn_range(lsn), @@ -5632,7 +5727,8 @@ impl Timeline { self.cancel.clone(), ctx, ) - .await?; + .await + .map_err(CreateImageLayersError::Other)?; fail_point!("image-layer-writer-fail-before-finish", |_| { Err(CreateImageLayersError::Other(anyhow::anyhow!( @@ -5655,6 +5751,7 @@ impl Timeline { ctx, img_range.clone(), io_concurrency, + Some((idx, total)), ) .await? } else { @@ -5727,9 +5824,12 @@ impl Timeline { } } - let image_layers = batch_image_writer.finish(self, ctx).await?; + let image_layers = batch_image_writer + .finish(self, ctx) + .await + .map_err(CreateImageLayersError::Other)?; - let mut guard = self.layers.write().await; + let mut guard = self.layers.write(LayerManagerLockHolder::Compaction).await; // FIXME: we could add the images to be uploaded *before* returning from here, but right // now they are being scheduled outside of write lock; current way is inconsistent with @@ -5737,7 +5837,7 @@ impl Timeline { guard .open_mut()? .track_new_image_layers(&image_layers, &self.metrics); - drop_wlock(guard); + drop_layer_manager_wlock(guard); let duration = timer.stop_and_record(); // Creating image layers may have caused some previously visible layers to be covered @@ -5901,7 +6001,7 @@ impl Drop for Timeline { if let Ok(mut gc_info) = ancestor.gc_info.write() { if !gc_info.remove_child_not_offloaded(self.timeline_id) { tracing::error!(tenant_id = %self.tenant_shard_id.tenant_id, shard_id = %self.tenant_shard_id.shard_slug(), timeline_id = %self.timeline_id, - "Couldn't remove retain_lsn entry from offloaded timeline's parent: already removed"); + "Couldn't remove retain_lsn entry from timeline's parent on drop: already removed"); } } } @@ -6107,7 +6207,7 @@ impl Timeline { layers_to_remove: &[Layer], ) -> Result<(), CompactionError> { let mut guard = tokio::select! { - guard = self.layers.write() => guard, + guard = self.layers.write(LayerManagerLockHolder::Compaction) => guard, _ = self.cancel.cancelled() => { return Err(CompactionError::ShuttingDown); } @@ -6156,7 +6256,7 @@ impl Timeline { self.remote_client .schedule_compaction_update(&remove_layers, new_deltas)?; - drop_wlock(guard); + drop_layer_manager_wlock(guard); Ok(()) } @@ -6166,7 +6266,7 @@ impl Timeline { mut replace_layers: Vec<(Layer, ResidentLayer)>, mut drop_layers: Vec, ) -> Result<(), CompactionError> { - let mut guard = self.layers.write().await; + let mut guard = self.layers.write(LayerManagerLockHolder::Compaction).await; // Trim our lists in case our caller (compaction) raced with someone else (GC) removing layers: we want // to avoid double-removing, and avoid rewriting something that was removed. @@ -6505,7 +6605,7 @@ impl Timeline { debug!("retain_lsns: {:?}", retain_lsns); - let mut layers_to_remove = Vec::new(); + let max_retain_lsn = retain_lsns.iter().max(); // Scan all layers in the timeline (remote or on-disk). // @@ -6515,105 +6615,110 @@ impl Timeline { // 3. it doesn't need to be retained for 'retain_lsns'; // 4. it does not need to be kept for LSNs holding valid leases. // 5. newer on-disk image layers cover the layer's whole key range - // - // TODO holding a write lock is too agressive and avoidable - let mut guard = self.layers.write().await; - let layers = guard.layer_map()?; - 'outer: for l in layers.iter_historic_layers() { - result.layers_total += 1; + let layers_to_remove = { + let mut layers_to_remove = Vec::new(); - // 1. Is it newer than GC horizon cutoff point? - if l.get_lsn_range().end > space_cutoff { - info!( - "keeping {} because it's newer than space_cutoff {}", - l.layer_name(), - space_cutoff, - ); - result.layers_needed_by_cutoff += 1; - continue 'outer; - } + let guard = self + .layers + .read(LayerManagerLockHolder::GarbageCollection) + .await; + let layers = guard.layer_map()?; + 'outer: for l in layers.iter_historic_layers() { + result.layers_total += 1; - // 2. It is newer than PiTR cutoff point? - if l.get_lsn_range().end > time_cutoff { - info!( - "keeping {} because it's newer than time_cutoff {}", - l.layer_name(), - time_cutoff, - ); - result.layers_needed_by_pitr += 1; - continue 'outer; - } - - // 3. Is it needed by a child branch? - // NOTE With that we would keep data that - // might be referenced by child branches forever. - // We can track this in child timeline GC and delete parent layers when - // they are no longer needed. This might be complicated with long inheritance chains. - // - // TODO Vec is not a great choice for `retain_lsns` - for retain_lsn in &retain_lsns { - // start_lsn is inclusive - if &l.get_lsn_range().start <= retain_lsn { - info!( - "keeping {} because it's still might be referenced by child branch forked at {} is_dropped: xx is_incremental: {}", + // 1. Is it newer than GC horizon cutoff point? + if l.get_lsn_range().end > space_cutoff { + debug!( + "keeping {} because it's newer than space_cutoff {}", l.layer_name(), - retain_lsn, - l.is_incremental(), + space_cutoff, ); - result.layers_needed_by_branches += 1; + result.layers_needed_by_cutoff += 1; continue 'outer; } - } - // 4. Is there a valid lease that requires us to keep this layer? - if let Some(lsn) = &max_lsn_with_valid_lease { - // keep if layer start <= any of the lease - if &l.get_lsn_range().start <= lsn { - info!( - "keeping {} because there is a valid lease preventing GC at {}", + // 2. It is newer than PiTR cutoff point? + if l.get_lsn_range().end > time_cutoff { + debug!( + "keeping {} because it's newer than time_cutoff {}", l.layer_name(), - lsn, + time_cutoff, ); - result.layers_needed_by_leases += 1; + result.layers_needed_by_pitr += 1; continue 'outer; } + + // 3. Is it needed by a child branch? + // NOTE With that we would keep data that + // might be referenced by child branches forever. + // We can track this in child timeline GC and delete parent layers when + // they are no longer needed. This might be complicated with long inheritance chains. + if let Some(retain_lsn) = max_retain_lsn { + // start_lsn is inclusive + if &l.get_lsn_range().start <= retain_lsn { + debug!( + "keeping {} because it's still might be referenced by child branch forked at {} is_dropped: xx is_incremental: {}", + l.layer_name(), + retain_lsn, + l.is_incremental(), + ); + result.layers_needed_by_branches += 1; + continue 'outer; + } + } + + // 4. Is there a valid lease that requires us to keep this layer? + if let Some(lsn) = &max_lsn_with_valid_lease { + // keep if layer start <= any of the lease + if &l.get_lsn_range().start <= lsn { + debug!( + "keeping {} because there is a valid lease preventing GC at {}", + l.layer_name(), + lsn, + ); + result.layers_needed_by_leases += 1; + continue 'outer; + } + } + + // 5. Is there a later on-disk layer for this relation? + // + // The end-LSN is exclusive, while disk_consistent_lsn is + // inclusive. For example, if disk_consistent_lsn is 100, it is + // OK for a delta layer to have end LSN 101, but if the end LSN + // is 102, then it might not have been fully flushed to disk + // before crash. + // + // For example, imagine that the following layers exist: + // + // 1000 - image (A) + // 1000-2000 - delta (B) + // 2000 - image (C) + // 2000-3000 - delta (D) + // 3000 - image (E) + // + // If GC horizon is at 2500, we can remove layers A and B, but + // we cannot remove C, even though it's older than 2500, because + // the delta layer 2000-3000 depends on it. + if !layers + .image_layer_exists(&l.get_key_range(), &(l.get_lsn_range().end..new_gc_cutoff)) + { + debug!("keeping {} because it is the latest layer", l.layer_name()); + result.layers_not_updated += 1; + continue 'outer; + } + + // We didn't find any reason to keep this file, so remove it. + info!( + "garbage collecting {} is_dropped: xx is_incremental: {}", + l.layer_name(), + l.is_incremental(), + ); + layers_to_remove.push(l); } - // 5. Is there a later on-disk layer for this relation? - // - // The end-LSN is exclusive, while disk_consistent_lsn is - // inclusive. For example, if disk_consistent_lsn is 100, it is - // OK for a delta layer to have end LSN 101, but if the end LSN - // is 102, then it might not have been fully flushed to disk - // before crash. - // - // For example, imagine that the following layers exist: - // - // 1000 - image (A) - // 1000-2000 - delta (B) - // 2000 - image (C) - // 2000-3000 - delta (D) - // 3000 - image (E) - // - // If GC horizon is at 2500, we can remove layers A and B, but - // we cannot remove C, even though it's older than 2500, because - // the delta layer 2000-3000 depends on it. - if !layers - .image_layer_exists(&l.get_key_range(), &(l.get_lsn_range().end..new_gc_cutoff)) - { - info!("keeping {} because it is the latest layer", l.layer_name()); - result.layers_not_updated += 1; - continue 'outer; - } - - // We didn't find any reason to keep this file, so remove it. - info!( - "garbage collecting {} is_dropped: xx is_incremental: {}", - l.layer_name(), - l.is_incremental(), - ); - layers_to_remove.push(l); - } + layers_to_remove + }; if !layers_to_remove.is_empty() { // Persist the new GC cutoff value before we actually remove anything. @@ -6629,15 +6734,19 @@ impl Timeline { } })?; + let mut guard = self + .layers + .write(LayerManagerLockHolder::GarbageCollection) + .await; + let gc_layers = layers_to_remove .iter() - .map(|x| guard.get_from_desc(x)) + .flat_map(|desc| guard.try_get_from_key(&desc.key()).cloned()) .collect::>(); result.layers_removed = gc_layers.len() as u64; self.remote_client.schedule_gc_update(&gc_layers)?; - guard.open_mut()?.finish_gc_timeline(&gc_layers); #[cfg(feature = "testing")] @@ -6732,7 +6841,11 @@ impl Timeline { Err(walredo::Error::Cancelled) => return Err(PageReconstructError::Cancelled), Err(walredo::Error::Other(err)) => { if fire_critical_error { - critical!("walredo failure during page reconstruction: {err:?}"); + critical_timeline!( + self.tenant_shard_id, + self.timeline_id, + "walredo failure during page reconstruction: {err:?}" + ); } return Err(PageReconstructError::WalRedo( err.context("reconstruct a page image"), @@ -6819,7 +6932,10 @@ impl Timeline { use pageserver_api::models::DownloadRemoteLayersTaskState; let remaining = { - let guard = self.layers.read().await; + let guard = self + .layers + .read(LayerManagerLockHolder::GetLayerMapInfo) + .await; let Ok(lm) = guard.layer_map() else { // technically here we could look into iterating accessible layers, but downloading // all layers of a shutdown timeline makes no sense regardless. @@ -6925,7 +7041,7 @@ impl Timeline { impl Timeline { /// Returns non-remote layers for eviction. pub(crate) async fn get_local_layers_for_disk_usage_eviction(&self) -> DiskUsageEvictionInfo { - let guard = self.layers.read().await; + let guard = self.layers.read(LayerManagerLockHolder::Eviction).await; let mut max_layer_size: Option = None; let resident_layers = guard @@ -7026,7 +7142,7 @@ impl Timeline { let image_layer = Layer::finish_creating(self.conf, self, desc, &path)?; info!("force created image layer {}", image_layer.local_path()); { - let mut guard = self.layers.write().await; + let mut guard = self.layers.write(LayerManagerLockHolder::Testing).await; guard .open_mut() .unwrap() @@ -7089,7 +7205,7 @@ impl Timeline { let delta_layer = Layer::finish_creating(self.conf, self, desc, &path)?; info!("force created delta layer {}", delta_layer.local_path()); { - let mut guard = self.layers.write().await; + let mut guard = self.layers.write(LayerManagerLockHolder::Testing).await; guard .open_mut() .unwrap() @@ -7134,9 +7250,7 @@ impl Timeline { if let Some(end) = layer_end_lsn { assert!( end <= last_record_lsn, - "advance last record lsn before inserting a layer, end_lsn={}, last_record_lsn={}", - end, - last_record_lsn, + "advance last record lsn before inserting a layer, end_lsn={end}, last_record_lsn={last_record_lsn}", ); } @@ -7184,7 +7298,7 @@ impl Timeline { // Link the layer to the layer map { - let mut guard = self.layers.write().await; + let mut guard = self.layers.write(LayerManagerLockHolder::Testing).await; let layer_map = guard.open_mut().unwrap(); layer_map.force_insert_in_memory_layer(Arc::new(layer)); } @@ -7201,7 +7315,7 @@ impl Timeline { io_concurrency: IoConcurrency, ) -> anyhow::Result> { let mut all_data = Vec::new(); - let guard = self.layers.read().await; + let guard = self.layers.read(LayerManagerLockHolder::Testing).await; for layer in guard.layer_map()?.iter_historic_layers() { if !layer.is_delta() && layer.image_layer_lsn() == lsn { let layer = guard.get_from_desc(&layer); @@ -7230,7 +7344,7 @@ impl Timeline { self: &Arc, ) -> anyhow::Result> { let mut layers = Vec::new(); - let guard = self.layers.read().await; + let guard = self.layers.read(LayerManagerLockHolder::Testing).await; for layer in guard.layer_map()?.iter_historic_layers() { layers.push(layer.key()); } @@ -7322,7 +7436,7 @@ impl TimelineWriter<'_> { .tl .get_layer_for_write(at, &self.write_guard, ctx) .await?; - let initial_size = layer.size().await?; + let initial_size = layer.len(); let last_freeze_at = self.last_freeze_at.load(); self.write_guard.replace(TimelineWriterState::new( @@ -7342,7 +7456,7 @@ impl TimelineWriter<'_> { let l0_count = self .tl .layers - .read() + .read(LayerManagerLockHolder::GetLayerMapInfo) .await .layer_map()? .level0_deltas() @@ -7550,17 +7664,19 @@ mod tests { use std::sync::Arc; use pageserver_api::key::Key; - use pageserver_api::value::Value; + use postgres_ffi::PgMajorVersion; use std::iter::Iterator; use tracing::Instrument; use utils::id::TimelineId; use utils::lsn::Lsn; + use wal_decoder::models::value::Value; use super::HeatMapTimeline; use crate::context::RequestContextBuilder; use crate::tenant::harness::{TenantHarness, test_img}; use crate::tenant::layer_map::LayerMap; use crate::tenant::storage_layer::{Layer, LayerName, LayerVisibilityHint}; + use crate::tenant::timeline::layer_manager::LayerManagerLockHolder; use crate::tenant::timeline::{DeltaLayerTestDesc, EvictionError}; use crate::tenant::{PreviousHeatmap, Timeline}; @@ -7623,7 +7739,7 @@ mod tests { .create_test_timeline_with_layers( TimelineId::generate(), Lsn(0x10), - 14, + PgMajorVersion::PG14, &ctx, Vec::new(), // in-memory layers delta_layers, @@ -7668,7 +7784,7 @@ mod tests { // Evict all the layers and stash the old heatmap in the timeline. // This simulates a migration to a cold secondary location. - let guard = timeline.layers.read().await; + let guard = timeline.layers.read(LayerManagerLockHolder::Testing).await; let mut all_layers = Vec::new(); let forever = std::time::Duration::from_secs(120); for layer in guard.likely_resident_layers() { @@ -7759,7 +7875,7 @@ mod tests { .create_test_timeline_with_layers( TimelineId::generate(), Lsn(0x10), - 14, + PgMajorVersion::PG14, &ctx, Vec::new(), // in-memory layers delta_layers, @@ -7790,7 +7906,7 @@ mod tests { }))); // Evict all the layers in the previous heatmap - let guard = timeline.layers.read().await; + let guard = timeline.layers.read(LayerManagerLockHolder::Testing).await; let forever = std::time::Duration::from_secs(120); for layer in guard.likely_resident_layers() { layer.evict_and_wait(forever).await.unwrap(); @@ -7819,7 +7935,12 @@ mod tests { let (tenant, ctx) = harness.load().await; let timeline = tenant - .create_test_timeline(TimelineId::generate(), Lsn(0x10), 14, &ctx) + .create_test_timeline( + TimelineId::generate(), + Lsn(0x10), + PgMajorVersion::PG14, + &ctx, + ) .await .unwrap(); @@ -7853,7 +7974,10 @@ mod tests { } async fn find_some_layer(timeline: &Timeline) -> Layer { - let layers = timeline.layers.read().await; + let layers = timeline + .layers + .read(LayerManagerLockHolder::GetLayerMapInfo) + .await; let desc = layers .layer_map() .unwrap() diff --git a/pageserver/src/tenant/timeline/analysis.rs b/pageserver/src/tenant/timeline/analysis.rs index 96864ec44b..90c70086ed 100644 --- a/pageserver/src/tenant/timeline/analysis.rs +++ b/pageserver/src/tenant/timeline/analysis.rs @@ -4,6 +4,7 @@ use std::ops::Range; use utils::lsn::Lsn; use super::Timeline; +use crate::tenant::timeline::layer_manager::LayerManagerLockHolder; #[derive(serde::Serialize)] pub(crate) struct RangeAnalysis { @@ -24,7 +25,10 @@ impl Timeline { let num_of_l0; let all_layer_files = { - let guard = self.layers.read().await; + let guard = self + .layers + .read(LayerManagerLockHolder::GetLayerMapInfo) + .await; num_of_l0 = guard.layer_map().unwrap().level0_deltas().len(); guard.all_persistent_layers() }; diff --git a/pageserver/src/tenant/timeline/compaction.rs b/pageserver/src/tenant/timeline/compaction.rs index 72ca0f9cc1..ac3930fb71 100644 --- a/pageserver/src/tenant/timeline/compaction.rs +++ b/pageserver/src/tenant/timeline/compaction.rs @@ -9,7 +9,7 @@ use std::ops::{Deref, Range}; use std::sync::Arc; use std::time::{Duration, Instant}; -use super::layer_manager::LayerManager; +use super::layer_manager::LayerManagerLockHolder; use super::{ CompactFlags, CompactOptions, CompactionError, CreateImageLayersError, DurationRecorder, GetVectoredError, ImageLayerCreationMode, LastImageLayerCreationStatus, RecordedDuration, @@ -29,18 +29,18 @@ use pageserver_api::config::tenant_conf_defaults::DEFAULT_CHECKPOINT_DISTANCE; use pageserver_api::key::{KEY_SIZE, Key}; use pageserver_api::keyspace::{KeySpace, ShardedRange}; use pageserver_api::models::{CompactInfoResponse, CompactKeyRange}; -use pageserver_api::record::NeonWalRecord; use pageserver_api::shard::{ShardCount, ShardIdentity, TenantShardId}; -use pageserver_api::value::Value; use pageserver_compaction::helpers::{fully_contains, overlaps_with}; use pageserver_compaction::interface::*; use serde::Serialize; use tokio::sync::{OwnedSemaphorePermit, Semaphore}; use tokio_util::sync::CancellationToken; use tracing::{Instrument, debug, error, info, info_span, trace, warn}; -use utils::critical; +use utils::critical_timeline; use utils::id::TimelineId; use utils::lsn::Lsn; +use wal_decoder::models::record::NeonWalRecord; +use wal_decoder::models::value::Value; use crate::context::{AccessStatsBehavior, RequestContext, RequestContextBuilder}; use crate::page_cache; @@ -62,7 +62,7 @@ use crate::tenant::storage_layer::{ use crate::tenant::tasks::log_compaction_error; use crate::tenant::timeline::{ DeltaLayerWriter, ImageLayerCreationOutcome, ImageLayerWriter, IoConcurrency, Layer, - ResidentLayer, drop_rlock, + ResidentLayer, drop_layer_manager_rlock, }; use crate::tenant::{DeltaLayer, MaybeOffloaded}; use crate::virtual_file::{MaybeFatalIo, VirtualFile}; @@ -101,7 +101,11 @@ pub enum GcCompactionQueueItem { /// Whether the compaction is triggered automatically (determines whether we need to update L2 LSN) auto: bool, }, - SubCompactionJob(CompactOptions), + SubCompactionJob { + i: usize, + total: usize, + options: CompactOptions, + }, Notify(GcCompactionJobId, Option), } @@ -163,7 +167,7 @@ impl GcCompactionQueueItem { running, job_id: id.0, }), - GcCompactionQueueItem::SubCompactionJob(options) => Some(CompactInfoResponse { + GcCompactionQueueItem::SubCompactionJob { options, .. } => Some(CompactInfoResponse { compact_key_range: options.compact_key_range, compact_lsn_range: options.compact_lsn_range, sub_compaction: options.sub_compaction, @@ -314,7 +318,10 @@ impl GcCompactionQueue { .unwrap_or(Lsn::INVALID); let layers = { - let guard = timeline.layers.read().await; + let guard = timeline + .layers + .read(LayerManagerLockHolder::GetLayerMapInfo) + .await; let layer_map = guard.layer_map()?; layer_map.iter_historic_layers().collect_vec() }; @@ -408,7 +415,10 @@ impl GcCompactionQueue { timeline: &Arc, lsn: Lsn, ) -> Result { - let guard = timeline.layers.read().await; + let guard = timeline + .layers + .read(LayerManagerLockHolder::GetLayerMapInfo) + .await; let layer_map = guard.layer_map()?; let layers = layer_map.iter_historic_layers().collect_vec(); let mut size = 0; @@ -483,7 +493,7 @@ impl GcCompactionQueue { .map(|job| job.compact_lsn_range.end) .max() .unwrap(); - for job in jobs { + for (i, job) in jobs.into_iter().enumerate() { // Unfortunately we need to convert the `GcCompactJob` back to `CompactionOptions` // until we do further refactors to allow directly call `compact_with_gc`. let mut flags: EnumSet = EnumSet::default(); @@ -501,7 +511,11 @@ impl GcCompactionQueue { compact_lsn_range: Some(job.compact_lsn_range.into()), sub_compaction_max_job_size_mb: None, }; - pending_tasks.push(GcCompactionQueueItem::SubCompactionJob(options)); + pending_tasks.push(GcCompactionQueueItem::SubCompactionJob { + options, + i, + total: jobs_len, + }); } if !auto { @@ -645,7 +659,7 @@ impl GcCompactionQueue { } } } - GcCompactionQueueItem::SubCompactionJob(options) => { + GcCompactionQueueItem::SubCompactionJob { options, i, total } => { // TODO: error handling, clear the queue if any task fails? let _gc_guard = match gc_block.start().await { Ok(guard) => guard, @@ -657,6 +671,7 @@ impl GcCompactionQueue { ))); } }; + info!("running gc-compaction subcompaction job {}/{}", i, total); let res = timeline.compact_with_options(cancel, options, ctx).await; let compaction_result = match res { Ok(res) => res, @@ -851,7 +866,7 @@ impl KeyHistoryRetention { } let layer_generation; { - let guard = tline.layers.read().await; + let guard = tline.layers.read(LayerManagerLockHolder::Compaction).await; if !guard.contains_key(key) { return false; } @@ -971,7 +986,7 @@ impl KeyHistoryRetention { tline .reconstruct_value(key, lsn, data, RedoAttemptType::GcCompaction) .await - .with_context(|| format!("verification failed for key {} at lsn {}", key, lsn))?; + .with_context(|| format!("verification failed for key {key} at lsn {lsn}"))?; Ok(()) } @@ -1282,7 +1297,10 @@ impl Timeline { // We do the repartition on the L0-L1 boundary. All data below the boundary // are compacted by L0 with low read amplification, thus making the `repartition` // function run fast. - let guard = self.layers.read().await; + let guard = self + .layers + .read(LayerManagerLockHolder::GetLayerMapInfo) + .await; guard .all_persistent_layers() .iter() @@ -1301,7 +1319,7 @@ impl Timeline { || cfg!(feature = "testing") || self .feature_resolver - .evaluate_boolean("image-compaction-boundary", self.tenant_shard_id.tenant_id) + .evaluate_boolean("image-compaction-boundary") .is_ok() { let last_repartition_lsn = self.partitioning.read().1; @@ -1372,7 +1390,11 @@ impl Timeline { GetVectoredError::MissingKey(_), ) = err { - critical!("missing key during compaction: {err:?}"); + critical_timeline!( + self.tenant_shard_id, + self.timeline_id, + "missing key during compaction: {err:?}" + ); } })?; @@ -1400,7 +1422,11 @@ impl Timeline { // Alert on critical errors that indicate data corruption. Err(err) if err.is_critical() => { - critical!("could not compact, repartitioning keyspace failed: {err:?}"); + critical_timeline!( + self.tenant_shard_id, + self.timeline_id, + "could not compact, repartitioning keyspace failed: {err:?}" + ); } // Log other errors. No partitioning? This is normal, if the timeline was just created @@ -1461,7 +1487,7 @@ impl Timeline { let latest_gc_cutoff = self.get_applied_gc_cutoff_lsn(); let pitr_cutoff = self.gc_info.read().unwrap().cutoffs.time; - let layers = self.layers.read().await; + let layers = self.layers.read(LayerManagerLockHolder::Compaction).await; let layers_iter = layers.layer_map()?.iter_historic_layers(); let (layers_total, mut layers_checked) = (layers_iter.len(), 0); for layer_desc in layers_iter { @@ -1582,13 +1608,15 @@ impl Timeline { let started = Instant::now(); let mut replace_image_layers = Vec::new(); + let total = layers_to_rewrite.len(); - for layer in layers_to_rewrite { + for (i, layer) in layers_to_rewrite.into_iter().enumerate() { if self.cancel.is_cancelled() { return Err(CompactionError::ShuttingDown); } - info!(layer=%layer, "rewriting layer after shard split"); + info!(layer=%layer, "rewriting layer after shard split: {}/{}", i, total); + let mut image_layer_writer = ImageLayerWriter::new( self.conf, self.timeline_id, @@ -1722,7 +1750,10 @@ impl Timeline { // are implicitly left visible, because LayerVisibilityHint's default is Visible, and we never modify it here. // Note that L0 deltas _can_ be covered by image layers, but we consider them 'visible' because we anticipate that // they will be subject to L0->L1 compaction in the near future. - let layer_manager = self.layers.read().await; + let layer_manager = self + .layers + .read(LayerManagerLockHolder::GetLayerMapInfo) + .await; let layer_map = layer_manager.layer_map()?; let readable_points = { @@ -1767,20 +1798,14 @@ impl Timeline { } = { let phase1_span = info_span!("compact_level0_phase1"); let ctx = ctx.attached_child(); - let mut stats = CompactLevel0Phase1StatsBuilder { + let stats = CompactLevel0Phase1StatsBuilder { version: Some(2), tenant_id: Some(self.tenant_shard_id), timeline_id: Some(self.timeline_id), ..Default::default() }; - let begin = tokio::time::Instant::now(); - let phase1_layers_locked = self.layers.read().await; - let now = tokio::time::Instant::now(); - stats.read_lock_acquisition_micros = - DurationRecorder::Recorded(RecordedDuration(now - begin), now); self.compact_level0_phase1( - phase1_layers_locked, stats, target_file_size, force_compaction_ignore_threshold, @@ -1801,16 +1826,19 @@ impl Timeline { } /// Level0 files first phase of compaction, explained in the [`Self::compact_legacy`] comment. - async fn compact_level0_phase1<'a>( - self: &'a Arc, - guard: tokio::sync::RwLockReadGuard<'a, LayerManager>, + async fn compact_level0_phase1( + self: &Arc, mut stats: CompactLevel0Phase1StatsBuilder, target_file_size: u64, force_compaction_ignore_threshold: bool, ctx: &RequestContext, ) -> Result { - stats.read_lock_held_spawn_blocking_startup_micros = - stats.read_lock_acquisition_micros.till_now(); // set by caller + let begin = tokio::time::Instant::now(); + let guard = self.layers.read(LayerManagerLockHolder::Compaction).await; + let now = tokio::time::Instant::now(); + stats.read_lock_acquisition_micros = + DurationRecorder::Recorded(RecordedDuration(now - begin), now); + let layers = guard.layer_map()?; let level0_deltas = layers.level0_deltas(); stats.level0_deltas_count = Some(level0_deltas.len()); @@ -1845,6 +1873,12 @@ impl Timeline { .map(|x| guard.get_from_desc(x)) .collect::>(); + drop_layer_manager_rlock(guard); + + // The is the last LSN that we have seen for L0 compaction in the timeline. This LSN might be updated + // by the time we finish the compaction. So we need to get it here. + let l0_last_record_lsn = self.get_last_record_lsn(); + // Gather the files to compact in this iteration. // // Start with the oldest Level 0 delta file, and collect any other @@ -1932,9 +1966,7 @@ impl Timeline { // we don't accidentally use it later in the function. drop(level0_deltas); - stats.read_lock_held_prerequisites_micros = stats - .read_lock_held_spawn_blocking_startup_micros - .till_now(); + stats.compaction_prerequisites_micros = stats.read_lock_acquisition_micros.till_now(); // TODO: replace with streaming k-merge let all_keys = { @@ -1956,7 +1988,7 @@ impl Timeline { all_keys }; - stats.read_lock_held_key_sort_micros = stats.read_lock_held_prerequisites_micros.till_now(); + stats.read_lock_held_key_sort_micros = stats.compaction_prerequisites_micros.till_now(); // Determine N largest holes where N is number of compacted layers. The vec is sorted by key range start. // @@ -1990,7 +2022,6 @@ impl Timeline { } } let max_holes = deltas_to_compact.len(); - let last_record_lsn = self.get_last_record_lsn(); let min_hole_range = (target_file_size / page_cache::PAGE_SZ as u64) as i128; let min_hole_coverage_size = 3; // TODO: something more flexible? // min-heap (reserve space for one more element added before eviction) @@ -2009,8 +2040,12 @@ impl Timeline { // has not so much sense, because largest holes will corresponds field1/field2 changes. // But we are mostly interested to eliminate holes which cause generation of excessive image layers. // That is why it is better to measure size of hole as number of covering image layers. - let coverage_size = - layers.image_coverage(&key_range, last_record_lsn).len(); + let coverage_size = { + // TODO: optimize this with copy-on-write layer map. + let guard = self.layers.read(LayerManagerLockHolder::Compaction).await; + let layers = guard.layer_map()?; + layers.image_coverage(&key_range, l0_last_record_lsn).len() + }; if coverage_size >= min_hole_coverage_size { heap.push(Hole { key_range, @@ -2029,7 +2064,6 @@ impl Timeline { holes }; stats.read_lock_held_compute_holes_micros = stats.read_lock_held_key_sort_micros.till_now(); - drop_rlock(guard); if self.cancel.is_cancelled() { return Err(CompactionError::ShuttingDown); @@ -2370,9 +2404,8 @@ struct CompactLevel0Phase1StatsBuilder { tenant_id: Option, timeline_id: Option, read_lock_acquisition_micros: DurationRecorder, - read_lock_held_spawn_blocking_startup_micros: DurationRecorder, read_lock_held_key_sort_micros: DurationRecorder, - read_lock_held_prerequisites_micros: DurationRecorder, + compaction_prerequisites_micros: DurationRecorder, read_lock_held_compute_holes_micros: DurationRecorder, read_lock_drop_micros: DurationRecorder, write_layer_files_micros: DurationRecorder, @@ -2387,9 +2420,8 @@ struct CompactLevel0Phase1Stats { tenant_id: TenantShardId, timeline_id: TimelineId, read_lock_acquisition_micros: RecordedDuration, - read_lock_held_spawn_blocking_startup_micros: RecordedDuration, read_lock_held_key_sort_micros: RecordedDuration, - read_lock_held_prerequisites_micros: RecordedDuration, + compaction_prerequisites_micros: RecordedDuration, read_lock_held_compute_holes_micros: RecordedDuration, read_lock_drop_micros: RecordedDuration, write_layer_files_micros: RecordedDuration, @@ -2414,16 +2446,12 @@ impl TryFrom for CompactLevel0Phase1Stats { .read_lock_acquisition_micros .into_recorded() .ok_or_else(|| anyhow!("read_lock_acquisition_micros not set"))?, - read_lock_held_spawn_blocking_startup_micros: value - .read_lock_held_spawn_blocking_startup_micros - .into_recorded() - .ok_or_else(|| anyhow!("read_lock_held_spawn_blocking_startup_micros not set"))?, read_lock_held_key_sort_micros: value .read_lock_held_key_sort_micros .into_recorded() .ok_or_else(|| anyhow!("read_lock_held_key_sort_micros not set"))?, - read_lock_held_prerequisites_micros: value - .read_lock_held_prerequisites_micros + compaction_prerequisites_micros: value + .compaction_prerequisites_micros .into_recorded() .ok_or_else(|| anyhow!("read_lock_held_prerequisites_micros not set"))?, read_lock_held_compute_holes_micros: value @@ -2469,7 +2497,7 @@ impl Timeline { // Find the top of the historical layers let end_lsn = { - let guard = self.layers.read().await; + let guard = self.layers.read(LayerManagerLockHolder::Compaction).await; let layers = guard.layer_map()?; let l0_deltas = layers.level0_deltas(); @@ -2635,15 +2663,15 @@ impl Timeline { use std::fmt::Write; let mut output = String::new(); if let Some((key, _, _)) = replay_history.first() { - write!(output, "key={} ", key).unwrap(); + write!(output, "key={key} ").unwrap(); let mut cnt = 0; for (_, lsn, val) in replay_history { if val.is_image() { - write!(output, "i@{} ", lsn).unwrap(); + write!(output, "i@{lsn} ").unwrap(); } else if val.will_init() { - write!(output, "di@{} ", lsn).unwrap(); + write!(output, "di@{lsn} ").unwrap(); } else { - write!(output, "d@{} ", lsn).unwrap(); + write!(output, "d@{lsn} ").unwrap(); } cnt += 1; if cnt >= 128 { @@ -3008,7 +3036,7 @@ impl Timeline { } split_key_ranges.sort(); let all_layers = { - let guard = self.layers.read().await; + let guard = self.layers.read(LayerManagerLockHolder::Compaction).await; let layer_map = guard.layer_map()?; layer_map.iter_historic_layers().collect_vec() }; @@ -3112,12 +3140,12 @@ impl Timeline { .await?; let jobs_len = jobs.len(); for (idx, job) in jobs.into_iter().enumerate() { - info!( - "running enhanced gc bottom-most compaction, sub-compaction {}/{}", - idx + 1, - jobs_len - ); + let sub_compaction_progress = format!("{}/{}", idx + 1, jobs_len); self.compact_with_gc_inner(cancel, job, ctx, yield_for_l0) + .instrument(info_span!( + "sub_compaction", + sub_compaction_progress = sub_compaction_progress + )) .await?; } if jobs_len == 0 { @@ -3185,7 +3213,10 @@ impl Timeline { // 1. If a layer is in the selection, all layers below it are in the selection. // 2. Inferred from (1), for each key in the layer selection, the value can be reconstructed only with the layers in the layer selection. let job_desc = { - let guard = self.layers.read().await; + let guard = self + .layers + .read(LayerManagerLockHolder::GarbageCollection) + .await; let layers = guard.layer_map()?; let gc_info = self.gc_info.read().unwrap(); let mut retain_lsns_below_horizon = Vec::new(); @@ -3488,22 +3519,16 @@ impl Timeline { // Only create image layers when there is no ancestor branches. TODO: create covering image layer // when some condition meet. let mut image_layer_writer = if !has_data_below { - Some( - SplitImageLayerWriter::new( - self.conf, - self.timeline_id, - self.tenant_shard_id, - job_desc.compaction_key_range.start, - lowest_retain_lsn, - self.get_compaction_target_size(), - &self.gate, - self.cancel.clone(), - ctx, - ) - .await - .context("failed to create image layer writer") - .map_err(CompactionError::Other)?, - ) + Some(SplitImageLayerWriter::new( + self.conf, + self.timeline_id, + self.tenant_shard_id, + job_desc.compaction_key_range.start, + lowest_retain_lsn, + self.get_compaction_target_size(), + &self.gate, + self.cancel.clone(), + )) } else { None }; @@ -3516,10 +3541,7 @@ impl Timeline { self.get_compaction_target_size(), &self.gate, self.cancel.clone(), - ) - .await - .context("failed to create delta layer writer") - .map_err(CompactionError::Other)?; + ); #[derive(Default)] struct RewritingLayers { @@ -3956,7 +3978,10 @@ impl Timeline { // First, do a sanity check to ensure the newly-created layer map does not contain overlaps. let all_layers = { - let guard = self.layers.read().await; + let guard = self + .layers + .read(LayerManagerLockHolder::GarbageCollection) + .await; let layer_map = guard.layer_map()?; layer_map.iter_historic_layers().collect_vec() }; @@ -4020,7 +4045,10 @@ impl Timeline { let update_guard = self.gc_compaction_layer_update_lock.write().await; // Acquiring the update guard ensures current read operations end and new read operations are blocked. // TODO: can we use `latest_gc_cutoff` Rcu to achieve the same effect? - let mut guard = self.layers.write().await; + let mut guard = self + .layers + .write(LayerManagerLockHolder::GarbageCollection) + .await; guard .open_mut()? .finish_gc_compaction(&layer_selection, &compact_to, &self.metrics); @@ -4088,7 +4116,11 @@ impl TimelineAdaptor { pub async fn flush_updates(&mut self) -> Result<(), CompactionError> { let layers_to_delete = { - let guard = self.timeline.layers.read().await; + let guard = self + .timeline + .layers + .read(LayerManagerLockHolder::Compaction) + .await; self.layers_to_delete .iter() .map(|x| guard.get_from_desc(x)) @@ -4133,7 +4165,11 @@ impl CompactionJobExecutor for TimelineAdaptor { ) -> anyhow::Result>> { self.flush_updates().await?; - let guard = self.timeline.layers.read().await; + let guard = self + .timeline + .layers + .read(LayerManagerLockHolder::Compaction) + .await; let layer_map = guard.layer_map()?; let result = layer_map @@ -4172,7 +4208,11 @@ impl CompactionJobExecutor for TimelineAdaptor { // this is a lot more complex than a simple downcast... if layer.is_delta() { let l = { - let guard = self.timeline.layers.read().await; + let guard = self + .timeline + .layers + .read(LayerManagerLockHolder::Compaction) + .await; guard.get_from_desc(layer) }; let result = l.download_and_keep_resident(ctx).await?; @@ -4297,7 +4337,8 @@ impl TimelineAdaptor { self.timeline.cancel.clone(), ctx, ) - .await?; + .await + .map_err(CreateImageLayersError::Other)?; fail_point!("image-layer-writer-fail-before-finish", |_| { Err(CreateImageLayersError::Other(anyhow::anyhow!( @@ -4306,7 +4347,10 @@ impl TimelineAdaptor { }); let keyspace = KeySpace { - ranges: self.get_keyspace(key_range, lsn, ctx).await?, + ranges: self + .get_keyspace(key_range, lsn, ctx) + .await + .map_err(CreateImageLayersError::Other)?, }; // TODO set proper (stateful) start. The create_image_layer_for_rel_blocks function mostly let outcome = self @@ -4318,6 +4362,7 @@ impl TimelineAdaptor { ctx, key_range.clone(), IoConcurrency::sequential(), + None, ) .await?; @@ -4325,9 +4370,13 @@ impl TimelineAdaptor { unfinished_image_layer, } = outcome { - let (desc, path) = unfinished_image_layer.finish(ctx).await?; + let (desc, path) = unfinished_image_layer + .finish(ctx) + .await + .map_err(CreateImageLayersError::Other)?; let image_layer = - Layer::finish_creating(self.timeline.conf, &self.timeline, desc, &path)?; + Layer::finish_creating(self.timeline.conf, &self.timeline, desc, &path) + .map_err(CreateImageLayersError::Other)?; self.new_images.push(image_layer); } diff --git a/pageserver/src/tenant/timeline/delete.rs b/pageserver/src/tenant/timeline/delete.rs index 51bdd59f4f..f7dc44be90 100644 --- a/pageserver/src/tenant/timeline/delete.rs +++ b/pageserver/src/tenant/timeline/delete.rs @@ -241,8 +241,17 @@ impl DeleteTimelineFlow { { Ok(r) => r, Err(DownloadError::NotFound) => { - // Deletion is already complete + // Deletion is already complete. + // As we came here, we will need to remove the timeline from the tenant though. tracing::info!("Timeline already deleted in remote storage"); + if let TimelineOrOffloaded::Offloaded(_) = &timeline { + // We only supoprt this for offloaded timelines, as we don't know which state non-offloaded timelines are in. + tracing::info!( + "Timeline with gone index part is offloaded timeline. Removing from tenant." + ); + remove_maybe_offloaded_timeline_from_tenant(tenant, &timeline, &guard) + .await?; + } return Ok(()); } Err(e) => { diff --git a/pageserver/src/tenant/timeline/detach_ancestor.rs b/pageserver/src/tenant/timeline/detach_ancestor.rs index 40eda8c785..223e888e27 100644 --- a/pageserver/src/tenant/timeline/detach_ancestor.rs +++ b/pageserver/src/tenant/timeline/detach_ancestor.rs @@ -19,7 +19,7 @@ use utils::id::TimelineId; use utils::lsn::Lsn; use utils::sync::gate::GateError; -use super::layer_manager::LayerManager; +use super::layer_manager::{LayerManager, LayerManagerLockHolder}; use super::{FlushLayerError, Timeline}; use crate::context::{DownloadBehavior, RequestContext}; use crate::task_mgr::TaskKind; @@ -182,6 +182,7 @@ pub(crate) async fn generate_tombstone_image_layer( detached: &Arc, ancestor: &Arc, ancestor_lsn: Lsn, + historic_layers_to_copy: &Vec, ctx: &RequestContext, ) -> Result, Error> { tracing::info!( @@ -199,7 +200,24 @@ pub(crate) async fn generate_tombstone_image_layer( let image_lsn = ancestor_lsn; { - let layers = detached.layers.read().await; + for layer in historic_layers_to_copy { + let desc = layer.layer_desc(); + if !desc.is_delta + && desc.lsn_range.start == image_lsn + && overlaps_with(&key_range, &desc.key_range) + { + tracing::info!( + layer=%layer, "will copy tombstone from ancestor instead of creating a new one" + ); + + return Ok(None); + } + } + + let layers = detached + .layers + .read(LayerManagerLockHolder::DetachAncestor) + .await; for layer in layers.all_persistent_layers() { if !layer.is_delta && layer.lsn_range.start == image_lsn @@ -423,7 +441,7 @@ pub(super) async fn prepare( // we do not need to start from our layers, because they can only be layers that come // *after* ancestor_lsn let layers = tokio::select! { - guard = ancestor.layers.read() => guard, + guard = ancestor.layers.read(LayerManagerLockHolder::DetachAncestor) => guard, _ = detached.cancel.cancelled() => { return Err(ShuttingDown); } @@ -447,7 +465,8 @@ pub(super) async fn prepare( Vec::with_capacity(straddling_branchpoint.len() + rest_of_historic.len() + 1); if let Some(tombstone_layer) = - generate_tombstone_image_layer(detached, &ancestor, ancestor_lsn, ctx).await? + generate_tombstone_image_layer(detached, &ancestor, ancestor_lsn, &rest_of_historic, ctx) + .await? { new_layers.push(tombstone_layer.into()); } @@ -869,7 +888,12 @@ async fn remote_copy( // Double check that the file is orphan (probably from an earlier attempt), then delete it let key = file_name.clone().into(); - if adoptee.layers.read().await.contains_key(&key) { + if adoptee + .layers + .read(LayerManagerLockHolder::DetachAncestor) + .await + .contains_key(&key) + { // We are supposed to filter out such cases before coming to this function return Err(Error::Prepare(anyhow::anyhow!( "layer file {file_name} already present and inside layer map" @@ -877,7 +901,7 @@ async fn remote_copy( } tracing::info!("Deleting orphan layer file to make way for hard linking"); // Delete orphan layer file and try again, to ensure this layer has a well understood source - std::fs::remove_file(adopted_path) + std::fs::remove_file(&adoptee_path) .map_err(|e| Error::launder(e.into(), Error::Prepare))?; std::fs::hard_link(adopted_path, &adoptee_path) .map_err(|e| Error::launder(e.into(), Error::Prepare))?; diff --git a/pageserver/src/tenant/timeline/eviction_task.rs b/pageserver/src/tenant/timeline/eviction_task.rs index b1b0d32c9b..1328c3ac12 100644 --- a/pageserver/src/tenant/timeline/eviction_task.rs +++ b/pageserver/src/tenant/timeline/eviction_task.rs @@ -33,6 +33,7 @@ use crate::tenant::size::CalculateSyntheticSizeError; use crate::tenant::storage_layer::LayerVisibilityHint; use crate::tenant::tasks::{BackgroundLoopKind, BackgroundLoopSemaphorePermit, sleep_random}; use crate::tenant::timeline::EvictionError; +use crate::tenant::timeline::layer_manager::LayerManagerLockHolder; use crate::tenant::{LogicalSizeCalculationCause, TenantShard}; #[derive(Default)] @@ -208,7 +209,7 @@ impl Timeline { let mut js = tokio::task::JoinSet::new(); { - let guard = self.layers.read().await; + let guard = self.layers.read(LayerManagerLockHolder::Eviction).await; guard .likely_resident_layers() diff --git a/pageserver/src/tenant/timeline/handle.rs b/pageserver/src/tenant/timeline/handle.rs index 809b350f38..2dbff20ab2 100644 --- a/pageserver/src/tenant/timeline/handle.rs +++ b/pageserver/src/tenant/timeline/handle.rs @@ -887,7 +887,7 @@ mod tests { .expect("we still have it"); } - fn make_relation_key_for_shard(shard: ShardNumber, params: &ShardParameters) -> Key { + fn make_relation_key_for_shard(shard: ShardNumber, params: ShardParameters) -> Key { rel_block_to_key( RelTag { spcnode: 1663, @@ -917,14 +917,14 @@ mod tests { let child0 = Arc::new_cyclic(|myself| StubTimeline { gate: Default::default(), id: timeline_id, - shard: ShardIdentity::from_params(ShardNumber(0), &child_params), + shard: ShardIdentity::from_params(ShardNumber(0), child_params), per_timeline_state: PerTimelineState::default(), myself: myself.clone(), }); let child1 = Arc::new_cyclic(|myself| StubTimeline { gate: Default::default(), id: timeline_id, - shard: ShardIdentity::from_params(ShardNumber(1), &child_params), + shard: ShardIdentity::from_params(ShardNumber(1), child_params), per_timeline_state: PerTimelineState::default(), myself: myself.clone(), }); @@ -937,7 +937,7 @@ mod tests { let handle = cache .get( timeline_id, - ShardSelector::Page(make_relation_key_for_shard(ShardNumber(i), &child_params)), + ShardSelector::Page(make_relation_key_for_shard(ShardNumber(i), child_params)), &StubManager { shards: vec![parent.clone()], }, @@ -961,7 +961,7 @@ mod tests { let handle = cache .get( timeline_id, - ShardSelector::Page(make_relation_key_for_shard(ShardNumber(i), &child_params)), + ShardSelector::Page(make_relation_key_for_shard(ShardNumber(i), child_params)), &StubManager { shards: vec![], // doesn't matter what's in here, the cache is fully loaded }, @@ -978,7 +978,7 @@ mod tests { let parent_handle = cache .get( timeline_id, - ShardSelector::Page(make_relation_key_for_shard(ShardNumber(0), &child_params)), + ShardSelector::Page(make_relation_key_for_shard(ShardNumber(0), child_params)), &StubManager { shards: vec![parent.clone()], }, @@ -995,7 +995,7 @@ mod tests { let handle = cache .get( timeline_id, - ShardSelector::Page(make_relation_key_for_shard(ShardNumber(i), &child_params)), + ShardSelector::Page(make_relation_key_for_shard(ShardNumber(i), child_params)), &StubManager { shards: vec![child0.clone(), child1.clone()], // <====== this changed compared to previous loop }, diff --git a/pageserver/src/tenant/timeline/import_pgdata.rs b/pageserver/src/tenant/timeline/import_pgdata.rs index 606ad09ef1..817d76ce2f 100644 --- a/pageserver/src/tenant/timeline/import_pgdata.rs +++ b/pageserver/src/tenant/timeline/import_pgdata.rs @@ -15,6 +15,7 @@ use super::{Timeline, TimelineDeleteProgress}; use crate::context::RequestContext; use crate::controller_upcall_client::{StorageControllerUpcallApi, StorageControllerUpcallClient}; use crate::tenant::metadata::TimelineMetadata; +use crate::tenant::timeline::layer_manager::LayerManagerLockHolder; mod flow; mod importbucket_client; @@ -163,7 +164,10 @@ async fn prepare_import( info!("wipe the slate clean"); { // TODO: do we need to hold GC lock for this? - let mut guard = timeline.layers.write().await; + let mut guard = timeline + .layers + .write(LayerManagerLockHolder::ImportPgData) + .await; assert!( guard.layer_map()?.open_layer.is_none(), "while importing, there should be no in-memory layer" // this just seems like a good place to assert it diff --git a/pageserver/src/tenant/timeline/import_pgdata/flow.rs b/pageserver/src/tenant/timeline/import_pgdata/flow.rs index e003bb6810..d471e9fc69 100644 --- a/pageserver/src/tenant/timeline/import_pgdata/flow.rs +++ b/pageserver/src/tenant/timeline/import_pgdata/flow.rs @@ -36,8 +36,8 @@ use pageserver_api::keyspace::{ShardedRange, singleton_range}; use pageserver_api::models::{ShardImportProgress, ShardImportProgressV1, ShardImportStatus}; use pageserver_api::reltag::{RelTag, SlruKind}; use pageserver_api::shard::ShardIdentity; +use postgres_ffi::BLCKSZ; use postgres_ffi::relfile_utils::parse_relfilename; -use postgres_ffi::{BLCKSZ, pg_constants}; use remote_storage::RemotePath; use tokio::sync::Semaphore; use tokio_stream::StreamExt; @@ -56,6 +56,7 @@ use crate::pgdatadir_mapping::{ }; use crate::task_mgr::TaskKind; use crate::tenant::storage_layer::{AsLayerDesc, ImageLayerWriter, Layer}; +use crate::tenant::timeline::layer_manager::LayerManagerLockHolder; pub async fn run( timeline: Arc, @@ -557,7 +558,7 @@ impl PgDataDir { PgDataDirDb::new( storage, &basedir.join(dboid.to_string()), - pg_constants::DEFAULTTABLESPACE_OID, + postgres_ffi_types::constants::DEFAULTTABLESPACE_OID, dboid, &datadir_path, ) @@ -570,7 +571,7 @@ impl PgDataDir { PgDataDirDb::new( storage, &datadir_path.join("global"), - postgres_ffi::pg_constants::GLOBALTABLESPACE_OID, + postgres_ffi_types::constants::GLOBALTABLESPACE_OID, 0, &datadir_path, ) @@ -984,7 +985,10 @@ impl ChunkProcessingJob { let (desc, path) = writer.finish(ctx).await?; { - let guard = timeline.layers.read().await; + let guard = timeline + .layers + .read(LayerManagerLockHolder::ImportPgData) + .await; let existing_layer = guard.try_get_from_key(&desc.key()); if let Some(layer) = existing_layer { if layer.metadata().generation == timeline.generation { @@ -1007,7 +1011,10 @@ impl ChunkProcessingJob { // certain that the existing layer is identical to the new one, so in that case // we replace the old layer with the one we just generated. - let mut guard = timeline.layers.write().await; + let mut guard = timeline + .layers + .write(LayerManagerLockHolder::ImportPgData) + .await; let existing_layer = guard .try_get_from_key(&resident_layer.layer_desc().key()) @@ -1036,7 +1043,7 @@ impl ChunkProcessingJob { } } - crate::tenant::timeline::drop_wlock(guard); + crate::tenant::timeline::drop_layer_manager_wlock(guard); timeline .remote_client diff --git a/pageserver/src/tenant/timeline/import_pgdata/importbucket_client.rs b/pageserver/src/tenant/timeline/import_pgdata/importbucket_client.rs index bf2d9875c1..98c44313f1 100644 --- a/pageserver/src/tenant/timeline/import_pgdata/importbucket_client.rs +++ b/pageserver/src/tenant/timeline/import_pgdata/importbucket_client.rs @@ -3,7 +3,7 @@ use std::sync::Arc; use anyhow::Context; use bytes::Bytes; -use postgres_ffi::ControlFileData; +use postgres_ffi::{ControlFileData, PgMajorVersion}; use remote_storage::{ Download, DownloadError, DownloadKind, DownloadOpts, GenericRemoteStorage, Listing, ListingObject, RemotePath, RemoteStorageConfig, @@ -264,7 +264,7 @@ impl ControlFile { pub(crate) fn base_lsn(&self) -> Lsn { Lsn(self.control_file_data.checkPoint).align() } - pub(crate) fn pg_version(&self) -> u32 { + pub(crate) fn pg_version(&self) -> PgMajorVersion { self.try_pg_version() .expect("prepare() checks that try_pg_version doesn't error") } @@ -274,13 +274,14 @@ impl ControlFile { pub(crate) fn control_file_buf(&self) -> &Bytes { &self.control_file_buf } - fn try_pg_version(&self) -> anyhow::Result { + + fn try_pg_version(&self) -> anyhow::Result { Ok(match self.control_file_data.catalog_version_no { // thesea are from catversion.h - 202107181 => 14, - 202209061 => 15, - 202307071 => 16, - 202406281 => 17, + 202107181 => PgMajorVersion::PG14, + 202209061 => PgMajorVersion::PG15, + 202307071 => PgMajorVersion::PG16, + 202406281 => PgMajorVersion::PG17, catversion => { anyhow::bail!("unrecognized catalog version {catversion}") } diff --git a/pageserver/src/tenant/timeline/layer_manager.rs b/pageserver/src/tenant/timeline/layer_manager.rs index ae898260d2..2eccf48579 100644 --- a/pageserver/src/tenant/timeline/layer_manager.rs +++ b/pageserver/src/tenant/timeline/layer_manager.rs @@ -1,5 +1,8 @@ use std::collections::HashMap; +use std::mem::ManuallyDrop; +use std::ops::{Deref, DerefMut}; use std::sync::Arc; +use std::time::Duration; use anyhow::{Context, bail, ensure}; use itertools::Itertools; @@ -20,6 +23,155 @@ use crate::tenant::storage_layer::{ PersistentLayerKey, ReadableLayerWeak, ResidentLayer, }; +/// Warn if the lock was held for longer than this threshold. +/// It's very generous and we should bring this value down over time. +const LAYER_MANAGER_LOCK_WARN_THRESHOLD: Duration = Duration::from_secs(5); +const LAYER_MANAGER_LOCK_READ_WARN_THRESHOLD: Duration = Duration::from_secs(30); + +/// Describes the operation that is holding the layer manager lock +#[derive(Debug, Clone, Copy, strum_macros::Display)] +#[strum(serialize_all = "kebab_case")] +pub(crate) enum LayerManagerLockHolder { + GetLayerMapInfo, + GenerateHeatmap, + GetPage, + Init, + LoadLayerMap, + GetLayerForWrite, + TryFreezeLayer, + FlushFrozenLayer, + FlushLoop, + Compaction, + GarbageCollection, + Shutdown, + ImportPgData, + DetachAncestor, + Eviction, + #[cfg(test)] + Testing, +} + +/// Wrapper for the layer manager that tracks the amount of time during which +/// it was held under read or write lock +#[derive(Default)] +pub(crate) struct LockedLayerManager { + locked: tokio::sync::RwLock, +} + +pub(crate) struct LayerManagerReadGuard<'a> { + guard: ManuallyDrop>, + acquired_at: std::time::Instant, + holder: LayerManagerLockHolder, +} + +pub(crate) struct LayerManagerWriteGuard<'a> { + guard: ManuallyDrop>, + acquired_at: std::time::Instant, + holder: LayerManagerLockHolder, +} + +impl Drop for LayerManagerReadGuard<'_> { + fn drop(&mut self) { + // Drop the lock first, before potentially warning if it was held for too long. + // SAFETY: ManuallyDrop in Drop implementation + unsafe { ManuallyDrop::drop(&mut self.guard) }; + + let held_for = self.acquired_at.elapsed(); + if held_for >= LAYER_MANAGER_LOCK_READ_WARN_THRESHOLD { + tracing::warn!( + holder=%self.holder, + "Layer manager read lock held for {}s", + held_for.as_secs_f64(), + ); + } + } +} + +impl Drop for LayerManagerWriteGuard<'_> { + fn drop(&mut self) { + // Drop the lock first, before potentially warning if it was held for too long. + // SAFETY: ManuallyDrop in Drop implementation + unsafe { ManuallyDrop::drop(&mut self.guard) }; + + let held_for = self.acquired_at.elapsed(); + if held_for >= LAYER_MANAGER_LOCK_WARN_THRESHOLD { + tracing::warn!( + holder=%self.holder, + "Layer manager write lock held for {}s", + held_for.as_secs_f64(), + ); + } + } +} + +impl Deref for LayerManagerReadGuard<'_> { + type Target = LayerManager; + + fn deref(&self) -> &Self::Target { + self.guard.deref() + } +} + +impl Deref for LayerManagerWriteGuard<'_> { + type Target = LayerManager; + + fn deref(&self) -> &Self::Target { + self.guard.deref() + } +} + +impl DerefMut for LayerManagerWriteGuard<'_> { + fn deref_mut(&mut self) -> &mut Self::Target { + self.guard.deref_mut() + } +} + +impl LockedLayerManager { + pub(crate) async fn read(&self, holder: LayerManagerLockHolder) -> LayerManagerReadGuard { + let guard = ManuallyDrop::new(self.locked.read().await); + LayerManagerReadGuard { + guard, + acquired_at: std::time::Instant::now(), + holder, + } + } + + pub(crate) fn try_read( + &self, + holder: LayerManagerLockHolder, + ) -> Result { + let guard = ManuallyDrop::new(self.locked.try_read()?); + + Ok(LayerManagerReadGuard { + guard, + acquired_at: std::time::Instant::now(), + holder, + }) + } + + pub(crate) async fn write(&self, holder: LayerManagerLockHolder) -> LayerManagerWriteGuard { + let guard = ManuallyDrop::new(self.locked.write().await); + LayerManagerWriteGuard { + guard, + acquired_at: std::time::Instant::now(), + holder, + } + } + + pub(crate) fn try_write( + &self, + holder: LayerManagerLockHolder, + ) -> Result { + let guard = ManuallyDrop::new(self.locked.try_write()?); + + Ok(LayerManagerWriteGuard { + guard, + acquired_at: std::time::Instant::now(), + holder, + }) + } +} + /// Provides semantic APIs to manipulate the layer map. pub(crate) enum LayerManager { /// Open as in not shutdown layer manager; we still have in-memory layers and we can manipulate diff --git a/pageserver/src/tenant/timeline/offload.rs b/pageserver/src/tenant/timeline/offload.rs index 5920315917..9464f034c7 100644 --- a/pageserver/src/tenant/timeline/offload.rs +++ b/pageserver/src/tenant/timeline/offload.rs @@ -19,6 +19,8 @@ pub(crate) enum OffloadError { NotArchived, #[error(transparent)] RemoteStorage(anyhow::Error), + #[error("Offload or deletion already in progress")] + AlreadyInProgress, #[error("Unexpected offload error: {0}")] Other(anyhow::Error), } @@ -44,20 +46,26 @@ pub(crate) async fn offload_timeline( timeline.timeline_id, TimelineDeleteGuardKind::Offload, ); - if let Err(DeleteTimelineError::HasChildren(children)) = delete_guard_res { - let is_archived = timeline.is_archived(); - if is_archived == Some(true) { - tracing::error!("timeline is archived but has non-archived children: {children:?}"); + let (timeline, guard) = match delete_guard_res { + Ok(timeline_and_guard) => timeline_and_guard, + Err(DeleteTimelineError::HasChildren(children)) => { + let is_archived = timeline.is_archived(); + if is_archived == Some(true) { + tracing::error!("timeline is archived but has non-archived children: {children:?}"); + return Err(OffloadError::NotArchived); + } + tracing::info!( + ?is_archived, + "timeline is not archived and has unarchived children" + ); return Err(OffloadError::NotArchived); } - tracing::info!( - ?is_archived, - "timeline is not archived and has unarchived children" - ); - return Err(OffloadError::NotArchived); + Err(DeleteTimelineError::AlreadyInProgress(_)) => { + tracing::info!("timeline offload or deletion already in progress"); + return Err(OffloadError::AlreadyInProgress); + } + Err(e) => return Err(OffloadError::Other(anyhow::anyhow!(e))), }; - let (timeline, guard) = - delete_guard_res.map_err(|e| OffloadError::Other(anyhow::anyhow!(e)))?; let TimelineOrOffloaded::Timeline(timeline) = timeline else { tracing::error!("timeline already offloaded, but given timeline object"); diff --git a/pageserver/src/tenant/timeline/walreceiver.rs b/pageserver/src/tenant/timeline/walreceiver.rs index 633c94a010..3fe6c21a7d 100644 --- a/pageserver/src/tenant/timeline/walreceiver.rs +++ b/pageserver/src/tenant/timeline/walreceiver.rs @@ -63,7 +63,6 @@ pub struct WalReceiver { /// All task spawned by [`WalReceiver::start`] and its children are sensitive to this token. /// It's a child token of [`Timeline`] so that timeline shutdown can cancel WalReceiver tasks early for `freeze_and_flush=true`. cancel: CancellationToken, - task: tokio::task::JoinHandle<()>, } impl WalReceiver { @@ -80,7 +79,7 @@ impl WalReceiver { let loop_status = Arc::new(std::sync::RwLock::new(None)); let manager_status = Arc::clone(&loop_status); let cancel = timeline.cancel.child_token(); - let task = WALRECEIVER_RUNTIME.spawn({ + let _task = WALRECEIVER_RUNTIME.spawn({ let cancel = cancel.clone(); async move { debug_assert_current_span_has_tenant_and_timeline_id(); @@ -121,25 +120,14 @@ impl WalReceiver { Self { manager_status, cancel, - task, } } #[instrument(skip_all, level = tracing::Level::DEBUG)] - pub async fn shutdown(self) { + pub async fn cancel(self) { debug_assert_current_span_has_tenant_and_timeline_id(); debug!("cancelling walreceiver tasks"); self.cancel.cancel(); - match self.task.await { - Ok(()) => debug!("Shutdown success"), - Err(je) if je.is_cancelled() => unreachable!("not used"), - Err(je) if je.is_panic() => { - // already logged by panic hook - } - Err(je) => { - error!("shutdown walreceiver task join error: {je}") - } - } } pub(crate) fn status(&self) -> Option { diff --git a/pageserver/src/tenant/timeline/walreceiver/connection_manager.rs b/pageserver/src/tenant/timeline/walreceiver/connection_manager.rs index 7e0b0e9b25..9b151d2449 100644 --- a/pageserver/src/tenant/timeline/walreceiver/connection_manager.rs +++ b/pageserver/src/tenant/timeline/walreceiver/connection_manager.rs @@ -100,6 +100,7 @@ pub(super) async fn connection_manager_loop_step( // with other streams on this client (other connection managers). When // object goes out of scope, stream finishes in drop() automatically. let mut broker_subscription = subscribe_for_timeline_updates(broker_client, id, cancel).await?; + let mut broker_reset_interval = tokio::time::interval(tokio::time::Duration::from_secs(30)); debug!("Subscribed for broker timeline updates"); loop { @@ -156,7 +157,10 @@ pub(super) async fn connection_manager_loop_step( // Got a new update from the broker broker_update = broker_subscription.message() /* TODO: review cancellation-safety */ => { match broker_update { - Ok(Some(broker_update)) => connection_manager_state.register_timeline_update(broker_update), + Ok(Some(broker_update)) => { + broker_reset_interval.reset(); + connection_manager_state.register_timeline_update(broker_update); + }, Err(status) => { match status.code() { Code::Unknown if status.message().contains("stream closed because of a broken pipe") || status.message().contains("connection reset") || status.message().contains("error reading a body from connection") => { @@ -178,6 +182,14 @@ pub(super) async fn connection_manager_loop_step( } }, + _ = broker_reset_interval.tick() => { + if wait_lsn_status.borrow().is_some() { + tracing::warn!("No broker updates received for a while, but waiting for WAL. Re-setting stream ...") + } + + broker_subscription = subscribe_for_timeline_updates(broker_client, id, cancel).await?; + }, + new_event = async { // Reminder: this match arm needs to be cancellation-safe. loop { diff --git a/pageserver/src/tenant/timeline/walreceiver/walreceiver_connection.rs b/pageserver/src/tenant/timeline/walreceiver/walreceiver_connection.rs index 343e04f5f0..f619c69599 100644 --- a/pageserver/src/tenant/timeline/walreceiver/walreceiver_connection.rs +++ b/pageserver/src/tenant/timeline/walreceiver/walreceiver_connection.rs @@ -25,7 +25,7 @@ use tokio_postgres::replication::ReplicationStream; use tokio_postgres::{Client, SimpleQueryMessage, SimpleQueryRow}; use tokio_util::sync::CancellationToken; use tracing::{Instrument, debug, error, info, trace, warn}; -use utils::critical; +use utils::critical_timeline; use utils::id::NodeId; use utils::lsn::Lsn; use utils::pageserver_feedback::PageserverFeedback; @@ -360,10 +360,13 @@ pub(super) async fn handle_walreceiver_connection( match raw_wal_start_lsn.cmp(&expected_wal_start) { std::cmp::Ordering::Greater => { let msg = format!( - "Gap in streamed WAL: [{}, {})", - expected_wal_start, raw_wal_start_lsn + "Gap in streamed WAL: [{expected_wal_start}, {raw_wal_start_lsn}" + ); + critical_timeline!( + timeline.tenant_shard_id, + timeline.timeline_id, + "{msg}" ); - critical!("{msg}"); return Err(WalReceiverError::Other(anyhow!(msg))); } std::cmp::Ordering::Less => { @@ -376,7 +379,11 @@ pub(super) async fn handle_walreceiver_connection( "Received record with next_record_lsn multiple times ({} < {})", first_rec.next_record_lsn, expected_wal_start ); - critical!("{msg}"); + critical_timeline!( + timeline.tenant_shard_id, + timeline.timeline_id, + "{msg}" + ); return Err(WalReceiverError::Other(anyhow!(msg))); } } @@ -445,7 +452,11 @@ pub(super) async fn handle_walreceiver_connection( // TODO: we can't differentiate cancellation errors with // anyhow::Error, so just ignore it if we're cancelled. if !cancellation.is_cancelled() && !timeline.is_stopping() { - critical!("{err:?}") + critical_timeline!( + timeline.tenant_shard_id, + timeline.timeline_id, + "{err:?}" + ); } })?; diff --git a/pageserver/src/virtual_file/owned_buffers_io/aligned_buffer/buffer.rs b/pageserver/src/virtual_file/owned_buffers_io/aligned_buffer/buffer.rs index 090d2ece85..85ea5c4d80 100644 --- a/pageserver/src/virtual_file/owned_buffers_io/aligned_buffer/buffer.rs +++ b/pageserver/src/virtual_file/owned_buffers_io/aligned_buffer/buffer.rs @@ -68,16 +68,9 @@ impl AlignedBuffer { assert!( begin <= end, - "range start must not be greater than end: {:?} <= {:?}", - begin, - end, - ); - assert!( - end <= len, - "range end out of bounds: {:?} <= {:?}", - end, - len, + "range start must not be greater than end: {begin:?} <= {end:?}", ); + assert!(end <= len, "range end out of bounds: {end:?} <= {len:?}",); let begin = self.range.start + begin; let end = self.range.start + end; diff --git a/pageserver/src/virtual_file/owned_buffers_io/aligned_buffer/buffer_mut.rs b/pageserver/src/virtual_file/owned_buffers_io/aligned_buffer/buffer_mut.rs index 07f949b89e..93116ea85e 100644 --- a/pageserver/src/virtual_file/owned_buffers_io/aligned_buffer/buffer_mut.rs +++ b/pageserver/src/virtual_file/owned_buffers_io/aligned_buffer/buffer_mut.rs @@ -242,10 +242,7 @@ unsafe impl bytes::BufMut for AlignedBufferMut { /// Panic with a nice error message. #[cold] fn panic_advance(idx: usize, len: usize) -> ! { - panic!( - "advance out of bounds: the len is {} but advancing by {}", - len, idx - ); + panic!("advance out of bounds: the len is {len} but advancing by {idx}"); } /// Safety: [`AlignedBufferMut`] has exclusive ownership of the io buffer, diff --git a/pageserver/src/walingest.rs b/pageserver/src/walingest.rs index c1a3b79915..f852051178 100644 --- a/pageserver/src/walingest.rs +++ b/pageserver/src/walingest.rs @@ -28,20 +28,20 @@ use std::time::{Duration, Instant, SystemTime}; use bytes::{Buf, Bytes}; use pageserver_api::key::{Key, rel_block_to_key}; -use pageserver_api::record::NeonWalRecord; use pageserver_api::reltag::{BlockNumber, RelTag, SlruKind}; use pageserver_api::shard::ShardIdentity; -use postgres_ffi::relfile_utils::{FSM_FORKNUM, INIT_FORKNUM, MAIN_FORKNUM, VISIBILITYMAP_FORKNUM}; use postgres_ffi::walrecord::*; use postgres_ffi::{ - TimestampTz, TransactionId, dispatch_pgversion, enum_pgversion, enum_pgversion_dispatch, - fsm_logical_to_physical, pg_constants, + PgMajorVersion, TimestampTz, TransactionId, dispatch_pgversion, enum_pgversion, + enum_pgversion_dispatch, fsm_logical_to_physical, pg_constants, }; +use postgres_ffi_types::forknum::{FSM_FORKNUM, INIT_FORKNUM, MAIN_FORKNUM, VISIBILITYMAP_FORKNUM}; use tracing::*; use utils::bin_ser::{DeserializeError, SerializeError}; use utils::lsn::Lsn; use utils::rate_limit::RateLimit; -use utils::{critical, failpoint_support}; +use utils::{critical_timeline, failpoint_support}; +use wal_decoder::models::record::NeonWalRecord; use wal_decoder::models::*; use crate::ZERO_PAGE; @@ -418,18 +418,30 @@ impl WalIngest { // as there has historically been cases where PostgreSQL has cleared spurious VM pages. See: // https://github.com/neondatabase/neon/pull/10634. let Some(vm_size) = get_relsize(modification, vm_rel, ctx).await? else { - critical!("clear_vm_bits for unknown VM relation {vm_rel}"); + critical_timeline!( + modification.tline.tenant_shard_id, + modification.tline.timeline_id, + "clear_vm_bits for unknown VM relation {vm_rel}" + ); return Ok(()); }; if let Some(blknum) = new_vm_blk { if blknum >= vm_size { - critical!("new_vm_blk {blknum} not in {vm_rel} of size {vm_size}"); + critical_timeline!( + modification.tline.tenant_shard_id, + modification.tline.timeline_id, + "new_vm_blk {blknum} not in {vm_rel} of size {vm_size}" + ); new_vm_blk = None; } } if let Some(blknum) = old_vm_blk { if blknum >= vm_size { - critical!("old_vm_blk {blknum} not in {vm_rel} of size {vm_size}"); + critical_timeline!( + modification.tline.tenant_shard_id, + modification.tline.timeline_id, + "old_vm_blk {blknum} not in {vm_rel} of size {vm_size}" + ); old_vm_blk = None; } } @@ -781,7 +793,7 @@ impl WalIngest { ) -> Result<(), WalIngestError> { let (xact_common, is_commit, is_prepared) = match record { XactRecord::Prepare(XactPrepare { xl_xid, data }) => { - let xid: u64 = if modification.tline.pg_version >= 17 { + let xid: u64 = if modification.tline.pg_version >= PgMajorVersion::PG17 { self.adjust_to_full_transaction_id(xl_xid)? } else { xl_xid as u64 @@ -886,7 +898,7 @@ impl WalIngest { xl_xid, parsed.xid, lsn, ); - let xid: u64 = if modification.tline.pg_version >= 17 { + let xid: u64 = if modification.tline.pg_version >= PgMajorVersion::PG17 { self.adjust_to_full_transaction_id(parsed.xid)? } else { parsed.xid as u64 @@ -1241,7 +1253,7 @@ impl WalIngest { if xlog_checkpoint.oldestActiveXid == pg_constants::INVALID_TRANSACTION_ID && info == pg_constants::XLOG_CHECKPOINT_SHUTDOWN { - let oldest_active_xid = if pg_version >= 17 { + let oldest_active_xid = if pg_version >= PgMajorVersion::PG17 { let mut oldest_active_full_xid = cp.nextXid.value; for xid in modification.tline.list_twophase_files(lsn, ctx).await? { if xid < oldest_active_full_xid { @@ -1475,10 +1487,11 @@ impl WalIngest { const fn rate_limiter( &self, - pg_version: u32, + pg_version: PgMajorVersion, ) -> Option<&Lazy>> { - const MIN_PG_VERSION: u32 = 14; - const MAX_PG_VERSION: u32 = 17; + const MIN_PG_VERSION: u32 = PgMajorVersion::PG14.major_version_num(); + const MAX_PG_VERSION: u32 = PgMajorVersion::PG17.major_version_num(); + let pg_version = pg_version.major_version_num(); if pg_version < MIN_PG_VERSION || pg_version > MAX_PG_VERSION { return None; @@ -1603,6 +1616,7 @@ async fn get_relsize( #[cfg(test)] mod tests { use anyhow::Result; + use postgres_ffi::PgMajorVersion; use postgres_ffi::RELSEG_SIZE; use super::*; @@ -1625,7 +1639,7 @@ mod tests { #[tokio::test] async fn test_zeroed_checkpoint_decodes_correctly() -> Result<(), anyhow::Error> { - for i in 14..=16 { + for i in PgMajorVersion::ALL { dispatch_pgversion!(i, { pgv::CheckPoint::decode(&pgv::ZERO_CHECKPOINT)?; }); @@ -2108,7 +2122,7 @@ mod tests { // Check relation content for blkno in 0..relsize { let lsn = Lsn(0x20); - let data = format!("foo blk {} at {}", blkno, lsn); + let data = format!("foo blk {blkno} at {lsn}"); assert_eq!( tline .get_rel_page_at_lsn( @@ -2142,7 +2156,7 @@ mod tests { for blkno in 0..1 { let lsn = Lsn(0x20); - let data = format!("foo blk {} at {}", blkno, lsn); + let data = format!("foo blk {blkno} at {lsn}"); assert_eq!( tline .get_rel_page_at_lsn( @@ -2167,7 +2181,7 @@ mod tests { ); for blkno in 0..relsize { let lsn = Lsn(0x20); - let data = format!("foo blk {} at {}", blkno, lsn); + let data = format!("foo blk {blkno} at {lsn}"); assert_eq!( tline .get_rel_page_at_lsn( @@ -2188,7 +2202,7 @@ mod tests { let lsn = Lsn(0x80); let mut m = tline.begin_modification(lsn); for blkno in 0..relsize { - let data = format!("foo blk {} at {}", blkno, lsn); + let data = format!("foo blk {blkno} at {lsn}"); walingest .put_rel_page_image(&mut m, TESTREL_A, blkno, test_img(&data), &ctx) .await?; @@ -2210,7 +2224,7 @@ mod tests { // Check relation content for blkno in 0..relsize { let lsn = Lsn(0x80); - let data = format!("foo blk {} at {}", blkno, lsn); + let data = format!("foo blk {blkno} at {lsn}"); assert_eq!( tline .get_rel_page_at_lsn( @@ -2335,7 +2349,7 @@ mod tests { // 5. Grep sk logs for "restart decoder" to get startpoint // 6. Run just the decoder from this test to get the endpoint. // It's the last LSN the decoder will output. - let pg_version = 15; // The test data was generated by pg15 + let pg_version = PgMajorVersion::PG15; // The test data was generated by pg15 let path = "test_data/sk_wal_segment_from_pgbench"; let wal_segment_path = format!("{path}/000000010000000000000001.zst"); let source_initdb_path = format!("{path}/{INITDB_PATH}"); @@ -2414,6 +2428,6 @@ mod tests { } let duration = started_at.elapsed(); - println!("done in {:?}", duration); + println!("done in {duration:?}"); } } diff --git a/pageserver/src/walredo.rs b/pageserver/src/walredo.rs index ed8a954369..b17b5a15f9 100644 --- a/pageserver/src/walredo.rs +++ b/pageserver/src/walredo.rs @@ -32,12 +32,13 @@ use anyhow::Context; use bytes::{Bytes, BytesMut}; use pageserver_api::key::Key; use pageserver_api::models::{WalRedoManagerProcessStatus, WalRedoManagerStatus}; -use pageserver_api::record::NeonWalRecord; use pageserver_api::shard::TenantShardId; +use postgres_ffi::PgMajorVersion; use tracing::*; use utils::lsn::Lsn; use utils::sync::gate::GateError; use utils::sync::heavier_once_cell; +use wal_decoder::models::record::NeonWalRecord; use crate::config::PageServerConf; use crate::metrics::{ @@ -165,7 +166,7 @@ impl PostgresRedoManager { lsn: Lsn, base_img: Option<(Lsn, Bytes)>, records: Vec<(Lsn, NeonWalRecord)>, - pg_version: u32, + pg_version: PgMajorVersion, redo_attempt_type: RedoAttemptType, ) -> Result { if records.is_empty() { @@ -232,7 +233,7 @@ impl PostgresRedoManager { /// # Cancel-Safety /// /// This method is cancellation-safe. - pub async fn ping(&self, pg_version: u32) -> Result<(), Error> { + pub async fn ping(&self, pg_version: PgMajorVersion) -> Result<(), Error> { self.do_with_walredo_process(pg_version, |proc| async move { proc.ping(Duration::from_secs(1)) .await @@ -342,7 +343,7 @@ impl PostgresRedoManager { O, >( &self, - pg_version: u32, + pg_version: PgMajorVersion, closure: F, ) -> Result { let proc: Arc = match self.redo_process.get_or_init_detached().await { @@ -442,7 +443,7 @@ impl PostgresRedoManager { base_img_lsn: Lsn, records: &[(Lsn, NeonWalRecord)], wal_redo_timeout: Duration, - pg_version: u32, + pg_version: PgMajorVersion, max_retry_attempts: u32, ) -> Result { *(self.last_redo_at.lock().unwrap()) = Some(Instant::now()); @@ -571,11 +572,12 @@ mod tests { use bytes::Bytes; use pageserver_api::key::Key; - use pageserver_api::record::NeonWalRecord; use pageserver_api::shard::TenantShardId; + use postgres_ffi::PgMajorVersion; use tracing::Instrument; use utils::id::TenantId; use utils::lsn::Lsn; + use wal_decoder::models::record::NeonWalRecord; use super::PostgresRedoManager; use crate::config::PageServerConf; @@ -586,7 +588,7 @@ mod tests { let h = RedoHarness::new().unwrap(); h.manager - .ping(14) + .ping(PgMajorVersion::PG14) .instrument(h.span()) .await .expect("ping should work"); @@ -612,7 +614,7 @@ mod tests { Lsn::from_str("0/16E2408").unwrap(), None, short_records(), - 14, + PgMajorVersion::PG14, RedoAttemptType::ReadPage, ) .instrument(h.span()) @@ -641,7 +643,7 @@ mod tests { Lsn::from_str("0/16E2408").unwrap(), None, short_records(), - 14, + PgMajorVersion::PG14, RedoAttemptType::ReadPage, ) .instrument(h.span()) @@ -663,7 +665,7 @@ mod tests { Lsn::INVALID, None, short_records(), - 16, /* 16 currently produces stderr output on startup, which adds a nice extra edge */ + PgMajorVersion::PG16, /* 16 currently produces stderr output on startup, which adds a nice extra edge */ RedoAttemptType::ReadPage, ) .instrument(h.span()) diff --git a/pageserver/src/walredo/apply_neon.rs b/pageserver/src/walredo/apply_neon.rs index a3840f1f6f..a525579082 100644 --- a/pageserver/src/walredo/apply_neon.rs +++ b/pageserver/src/walredo/apply_neon.rs @@ -2,16 +2,16 @@ use anyhow::Context; use byteorder::{ByteOrder, LittleEndian}; use bytes::BytesMut; use pageserver_api::key::Key; -use pageserver_api::record::NeonWalRecord; use pageserver_api::reltag::SlruKind; -use postgres_ffi::relfile_utils::VISIBILITYMAP_FORKNUM; use postgres_ffi::v14::nonrelfile_utils::{ mx_offset_to_flags_bitshift, mx_offset_to_flags_offset, mx_offset_to_member_offset, transaction_id_set_status, }; use postgres_ffi::{BLCKSZ, pg_constants}; +use postgres_ffi_types::forknum::VISIBILITYMAP_FORKNUM; use tracing::*; use utils::lsn::Lsn; +use wal_decoder::models::record::NeonWalRecord; /// Can this request be served by neon redo functions /// or we need to pass it to wal-redo postgres process? @@ -52,8 +52,7 @@ pub(crate) fn apply_in_neon( let (rel, _) = key.to_rel_block().context("invalid record")?; assert!( rel.forknum == VISIBILITYMAP_FORKNUM, - "TruncateVisibilityMap record on unexpected rel {}", - rel + "TruncateVisibilityMap record on unexpected rel {rel}" ); let map = &mut page[pg_constants::MAXALIGN_SIZE_OF_PAGE_HEADER_DATA..]; map[*trunc_byte + 1..].fill(0u8); @@ -78,8 +77,7 @@ pub(crate) fn apply_in_neon( let (rel, blknum) = key.to_rel_block().context("invalid record")?; assert!( rel.forknum == VISIBILITYMAP_FORKNUM, - "ClearVisibilityMapFlags record on unexpected rel {}", - rel + "ClearVisibilityMapFlags record on unexpected rel {rel}" ); if let Some(heap_blkno) = *new_heap_blkno { // Calculate the VM block and offset that corresponds to the heap block. @@ -124,8 +122,7 @@ pub(crate) fn apply_in_neon( assert_eq!( slru_kind, SlruKind::Clog, - "ClogSetCommitted record with unexpected key {}", - key + "ClogSetCommitted record with unexpected key {key}" ); for &xid in xids { let pageno = xid / pg_constants::CLOG_XACTS_PER_PAGE; @@ -135,15 +132,11 @@ pub(crate) fn apply_in_neon( // Check that we're modifying the correct CLOG block. assert!( segno == expected_segno, - "ClogSetCommitted record for XID {} with unexpected key {}", - xid, - key + "ClogSetCommitted record for XID {xid} with unexpected key {key}" ); assert!( blknum == expected_blknum, - "ClogSetCommitted record for XID {} with unexpected key {}", - xid, - key + "ClogSetCommitted record for XID {xid} with unexpected key {key}" ); transaction_id_set_status(xid, pg_constants::TRANSACTION_STATUS_COMMITTED, page); @@ -169,8 +162,7 @@ pub(crate) fn apply_in_neon( assert_eq!( slru_kind, SlruKind::Clog, - "ClogSetAborted record with unexpected key {}", - key + "ClogSetAborted record with unexpected key {key}" ); for &xid in xids { let pageno = xid / pg_constants::CLOG_XACTS_PER_PAGE; @@ -180,15 +172,11 @@ pub(crate) fn apply_in_neon( // Check that we're modifying the correct CLOG block. assert!( segno == expected_segno, - "ClogSetAborted record for XID {} with unexpected key {}", - xid, - key + "ClogSetAborted record for XID {xid} with unexpected key {key}" ); assert!( blknum == expected_blknum, - "ClogSetAborted record for XID {} with unexpected key {}", - xid, - key + "ClogSetAborted record for XID {xid} with unexpected key {key}" ); transaction_id_set_status(xid, pg_constants::TRANSACTION_STATUS_ABORTED, page); @@ -199,8 +187,7 @@ pub(crate) fn apply_in_neon( assert_eq!( slru_kind, SlruKind::MultiXactOffsets, - "MultixactOffsetCreate record with unexpected key {}", - key + "MultixactOffsetCreate record with unexpected key {key}" ); // Compute the block and offset to modify. // See RecordNewMultiXact in PostgreSQL sources. @@ -213,15 +200,11 @@ pub(crate) fn apply_in_neon( let expected_blknum = pageno % pg_constants::SLRU_PAGES_PER_SEGMENT; assert!( segno == expected_segno, - "MultiXactOffsetsCreate record for multi-xid {} with unexpected key {}", - mid, - key + "MultiXactOffsetsCreate record for multi-xid {mid} with unexpected key {key}" ); assert!( blknum == expected_blknum, - "MultiXactOffsetsCreate record for multi-xid {} with unexpected key {}", - mid, - key + "MultiXactOffsetsCreate record for multi-xid {mid} with unexpected key {key}" ); LittleEndian::write_u32(&mut page[offset..offset + 4], *moff); @@ -231,8 +214,7 @@ pub(crate) fn apply_in_neon( assert_eq!( slru_kind, SlruKind::MultiXactMembers, - "MultixactMembersCreate record with unexpected key {}", - key + "MultixactMembersCreate record with unexpected key {key}" ); for (i, member) in members.iter().enumerate() { let offset = moff + i as u32; @@ -249,15 +231,11 @@ pub(crate) fn apply_in_neon( let expected_blknum = pageno % pg_constants::SLRU_PAGES_PER_SEGMENT; assert!( segno == expected_segno, - "MultiXactMembersCreate record for offset {} with unexpected key {}", - moff, - key + "MultiXactMembersCreate record for offset {moff} with unexpected key {key}" ); assert!( blknum == expected_blknum, - "MultiXactMembersCreate record for offset {} with unexpected key {}", - moff, - key + "MultiXactMembersCreate record for offset {moff} with unexpected key {key}" ); let mut flagsval = LittleEndian::read_u32(&page[flagsoff..flagsoff + 4]); diff --git a/pageserver/src/walredo/process.rs b/pageserver/src/walredo/process.rs index 6d4a38d4ff..c8b0846480 100644 --- a/pageserver/src/walredo/process.rs +++ b/pageserver/src/walredo/process.rs @@ -10,14 +10,14 @@ use std::time::Duration; use anyhow::Context; use bytes::Bytes; -use pageserver_api::record::NeonWalRecord; use pageserver_api::reltag::RelTag; use pageserver_api::shard::TenantShardId; -use postgres_ffi::BLCKSZ; +use postgres_ffi::{BLCKSZ, PgMajorVersion}; use tokio::io::{AsyncReadExt, AsyncWriteExt}; use tracing::{Instrument, debug, error, instrument}; use utils::lsn::Lsn; use utils::poison::Poison; +use wal_decoder::models::record::NeonWalRecord; use self::no_leak_child::NoLeakChild; use crate::config::PageServerConf; @@ -54,11 +54,11 @@ impl WalRedoProcess { // // Start postgres binary in special WAL redo mode. // - #[instrument(skip_all,fields(pg_version=pg_version))] + #[instrument(skip_all,fields(pg_version=pg_version.major_version_num()))] pub(crate) fn launch( conf: &'static PageServerConf, tenant_shard_id: TenantShardId, - pg_version: u32, + pg_version: PgMajorVersion, ) -> anyhow::Result { crate::span::debug_assert_current_span_has_tenant_id(); diff --git a/pgxn/Makefile b/pgxn/Makefile new file mode 100644 index 0000000000..8f190668ea --- /dev/null +++ b/pgxn/Makefile @@ -0,0 +1,28 @@ +# This makefile assumes that 'pg_config' is in the path, or is passed in the +# PG_CONFIG variable. +# +# This is used in two different ways: +# +# 1. The main makefile calls this, when you invoke the `make neon-pg-ext-%` +# target. It passes PG_CONFIG pointing to pg_install/%/bin/pg_config. +# This is a VPATH build; the current directory is build/pgxn-%, and +# the path to the Makefile is passed with the -f argument. +# +# 2. compute-node.Dockerfile invokes this to build the compute extensions +# for the specific Postgres version. It relies on pg_config already +# being in $(PATH). + +srcdir = $(dir $(firstword $(MAKEFILE_LIST))) + +PG_CONFIG = pg_config + +subdirs = neon neon_rmgr neon_walredo neon_utils neon_test_utils + +.PHONY: install install-compute install-storage $(subdirs) +install: $(subdirs) +install-compute: neon neon_utils neon_test_utils neon_rmgr +install-storage: neon_rmgr neon_walredo + +$(subdirs): %: + mkdir -p $* + $(MAKE) PG_CONFIG=$(PG_CONFIG) -C $* -f $(abspath $(srcdir)/$@/Makefile) install diff --git a/pgxn/neon/Makefile b/pgxn/neon/Makefile index bc0d3cdeb7..322ab039f5 100644 --- a/pgxn/neon/Makefile +++ b/pgxn/neon/Makefile @@ -1,9 +1,11 @@ # pgxs/neon/Makefile + MODULE_big = neon OBJS = \ $(WIN32RES) \ communicator.o \ + communicator_new.o \ extension_server.o \ file_cache.o \ hll.o \ @@ -20,14 +22,19 @@ OBJS = \ unstable_extensions.o \ walproposer.o \ walproposer_pg.o \ - control_plane_connector.o \ + neon_ddl_handler.o \ walsender_hooks.o \ - $(LIBCOMMUNICATOR_PATH)/libcommunicator.a + $(NEON_CARGO_ARTIFACT_TARGET_DIR)/libcommunicator.a PG_CPPFLAGS = -I$(libpq_srcdir) SHLIB_LINK_INTERNAL = $(libpq) SHLIB_LINK = -lcurl +UNAME_S := $(shell uname -s) +ifeq ($(UNAME_S), Darwin) + SHLIB_LINK += -framework Security -framework CoreFoundation -framework SystemConfiguration +endif + EXTENSION = neon DATA = \ neon--1.0.sql \ @@ -54,6 +61,17 @@ WALPROP_OBJS = \ neon_utils.o \ walproposer_compat.o +# libcommunicator.a is built by cargo from the Rust sources under communicator/ +# subdirectory. `cargo build` also generates communicator_bindings.h. +communicator_new.o: communicator/communicator_bindings.h + +$(NEON_CARGO_ARTIFACT_TARGET_DIR)/libcommunicator.a communicator/communicator_bindings.h &: + (cd $(srcdir)/communicator && cargo build $(CARGO_BUILD_FLAGS) $(CARGO_PROFILE)) + +# Force `cargo build` every time. Some of the Rust sources might have +# changed. +.PHONY: $(NEON_CARGO_ARTIFACT_TARGET_DIR)/libcommunicator.a communicator/communicator_bindings.h + .PHONY: walproposer-lib walproposer-lib: CPPFLAGS += -DWALPROPOSER_LIB walproposer-lib: libwalproposer.a; diff --git a/pgxn/neon/communicator.c b/pgxn/neon/communicator.c index 2655a45bcc..7c84be7d15 100644 --- a/pgxn/neon/communicator.c +++ b/pgxn/neon/communicator.c @@ -1092,13 +1092,15 @@ communicator_prefetch_register_bufferv(BufferTag tag, neon_request_lsns *frlsns, MyPState->ring_last <= ring_index); } -/* internal version. Returns the ring index */ +/* Internal version. Returns the ring index of the last block (result of this function is used only +* when nblocks==1) +*/ static uint64 prefetch_register_bufferv(BufferTag tag, neon_request_lsns *frlsns, BlockNumber nblocks, const bits8 *mask, bool is_prefetch) { - uint64 min_ring_index; + uint64 last_ring_index; PrefetchRequest hashkey; #ifdef USE_ASSERT_CHECKING bool any_hits = false; @@ -1122,13 +1124,12 @@ Retry: MyPState->ring_unused - MyPState->ring_receive; MyNeonCounters->getpage_prefetches_buffered = MyPState->n_responses_buffered; + last_ring_index = UINT64_MAX; - min_ring_index = UINT64_MAX; for (int i = 0; i < nblocks; i++) { PrefetchRequest *slot = NULL; PrfHashEntry *entry = NULL; - uint64 ring_index; neon_request_lsns *lsns; if (PointerIsValid(mask) && BITMAP_ISSET(mask, i)) @@ -1152,12 +1153,12 @@ Retry: if (entry != NULL) { slot = entry->slot; - ring_index = slot->my_ring_index; - Assert(slot == GetPrfSlot(ring_index)); + last_ring_index = slot->my_ring_index; + Assert(slot == GetPrfSlot(last_ring_index)); Assert(slot->status != PRFS_UNUSED); - Assert(MyPState->ring_last <= ring_index && - ring_index < MyPState->ring_unused); + Assert(MyPState->ring_last <= last_ring_index && + last_ring_index < MyPState->ring_unused); Assert(BufferTagsEqual(&slot->buftag, &hashkey.buftag)); /* @@ -1169,9 +1170,9 @@ Retry: if (!neon_prefetch_response_usable(lsns, slot)) { /* Wait for the old request to finish and discard it */ - if (!prefetch_wait_for(ring_index)) + if (!prefetch_wait_for(last_ring_index)) goto Retry; - prefetch_set_unused(ring_index); + prefetch_set_unused(last_ring_index); entry = NULL; slot = NULL; pgBufferUsage.prefetch.expired += 1; @@ -1188,13 +1189,12 @@ Retry: */ if (slot->status == PRFS_TAG_REMAINS) { - prefetch_set_unused(ring_index); + prefetch_set_unused(last_ring_index); entry = NULL; slot = NULL; } else { - min_ring_index = Min(min_ring_index, ring_index); /* The buffered request is good enough, return that index */ if (is_prefetch) pgBufferUsage.prefetch.duplicates++; @@ -1283,12 +1283,12 @@ Retry: * The next buffer pointed to by `ring_unused` is now definitely empty, so * we can insert the new request to it. */ - ring_index = MyPState->ring_unused; + last_ring_index = MyPState->ring_unused; - Assert(MyPState->ring_last <= ring_index && - ring_index <= MyPState->ring_unused); + Assert(MyPState->ring_last <= last_ring_index && + last_ring_index <= MyPState->ring_unused); - slot = GetPrfSlotNoCheck(ring_index); + slot = GetPrfSlotNoCheck(last_ring_index); Assert(slot->status == PRFS_UNUSED); @@ -1298,11 +1298,9 @@ Retry: */ slot->buftag = hashkey.buftag; slot->shard_no = get_shard_number(&tag); - slot->my_ring_index = ring_index; + slot->my_ring_index = last_ring_index; slot->flags = 0; - min_ring_index = Min(min_ring_index, ring_index); - if (is_prefetch) MyNeonCounters->getpage_prefetch_requests_total++; else @@ -1315,11 +1313,12 @@ Retry: MyPState->ring_unused - MyPState->ring_receive; Assert(any_hits); + Assert(last_ring_index != UINT64_MAX); - Assert(GetPrfSlot(min_ring_index)->status == PRFS_REQUESTED || - GetPrfSlot(min_ring_index)->status == PRFS_RECEIVED); - Assert(MyPState->ring_last <= min_ring_index && - min_ring_index < MyPState->ring_unused); + Assert(GetPrfSlot(last_ring_index)->status == PRFS_REQUESTED || + GetPrfSlot(last_ring_index)->status == PRFS_RECEIVED); + Assert(MyPState->ring_last <= last_ring_index && + last_ring_index < MyPState->ring_unused); if (flush_every_n_requests > 0 && MyPState->ring_unused - MyPState->ring_flush >= flush_every_n_requests) @@ -1335,7 +1334,7 @@ Retry: MyPState->ring_flush = MyPState->ring_unused; } - return min_ring_index; + return last_ring_index; } static bool diff --git a/pgxn/neon/communicator/.gitignore b/pgxn/neon/communicator/.gitignore new file mode 100644 index 0000000000..d713be0a35 --- /dev/null +++ b/pgxn/neon/communicator/.gitignore @@ -0,0 +1,2 @@ +# generated file (with cbindgen, see build.rs) +communicator_bindings.h diff --git a/pgxn/neon/communicator/Cargo.lock b/pgxn/neon/communicator/Cargo.lock new file mode 100644 index 0000000000..2a62aacfba --- /dev/null +++ b/pgxn/neon/communicator/Cargo.lock @@ -0,0 +1,372 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 4 + +[[package]] +name = "addr2line" +version = "0.24.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dfbe277e56a376000877090da837660b4427aad530e3028d44e0bffe4f89a1c1" +dependencies = [ + "gimli", +] + +[[package]] +name = "adler2" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627" + +[[package]] +name = "backtrace" +version = "0.3.74" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d82cb332cdfaed17ae235a638438ac4d4839913cc2af585c3c6746e8f8bee1a" +dependencies = [ + "addr2line", + "cfg-if", + "libc", + "miniz_oxide", + "object", + "rustc-demangle", + "windows-targets", +] + +[[package]] +name = "base64" +version = "0.22.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" + +[[package]] +name = "bytes" +version = "1.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d71b6127be86fdcfddb610f7182ac57211d4b18a3e9c82eb2d17662f2227ad6a" + +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + +[[package]] +name = "communicator" +version = "0.1.0" +dependencies = [ + "tonic", +] + +[[package]] +name = "fnv" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" + +[[package]] +name = "futures-core" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" + +[[package]] +name = "gimli" +version = "0.31.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f" + +[[package]] +name = "http" +version = "1.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f4a85d31aea989eead29a3aaf9e1115a180df8282431156e533de47660892565" +dependencies = [ + "bytes", + "fnv", + "itoa", +] + +[[package]] +name = "http-body" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" +dependencies = [ + "bytes", + "http", +] + +[[package]] +name = "http-body-util" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b021d93e26becf5dc7e1b75b1bed1fd93124b374ceb73f43d4d4eafec896a64a" +dependencies = [ + "bytes", + "futures-core", + "http", + "http-body", + "pin-project-lite", +] + +[[package]] +name = "itoa" +version = "1.0.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" + +[[package]] +name = "libc" +version = "0.2.171" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c19937216e9d3aa9956d9bb8dfc0b0c8beb6058fc4f7a4dc4d850edf86a237d6" + +[[package]] +name = "memchr" +version = "2.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" + +[[package]] +name = "miniz_oxide" +version = "0.8.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff70ce3e48ae43fa075863cef62e8b43b71a4f2382229920e0df362592919430" +dependencies = [ + "adler2", +] + +[[package]] +name = "object" +version = "0.36.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62948e14d923ea95ea2c7c86c71013138b66525b86bdc08d2dcc262bdb497b87" +dependencies = [ + "memchr", +] + +[[package]] +name = "once_cell" +version = "1.21.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" + +[[package]] +name = "percent-encoding" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" + +[[package]] +name = "pin-project" +version = "1.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "677f1add503faace112b9f1373e43e9e054bfdd22ff1a63c1bc485eaec6a6a8a" +dependencies = [ + "pin-project-internal", +] + +[[package]] +name = "pin-project-internal" +version = "1.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "pin-project-lite" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" + +[[package]] +name = "proc-macro2" +version = "1.0.94" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a31971752e70b8b2686d7e46ec17fb38dad4051d94024c88df49b667caea9c84" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "quote" +version = "1.0.40" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "rustc-demangle" +version = "0.1.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" + +[[package]] +name = "syn" +version = "2.0.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b09a44accad81e1ba1cd74a32461ba89dee89095ba17b32f5d03683b1b1fc2a0" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "tokio" +version = "1.44.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6b88822cbe49de4185e3a4cbf8321dd487cf5fe0c5c65695fef6346371e9c48" +dependencies = [ + "backtrace", + "pin-project-lite", +] + +[[package]] +name = "tokio-stream" +version = "0.1.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eca58d7bba4a75707817a2c44174253f9236b2d5fbd055602e9d5c07c139a047" +dependencies = [ + "futures-core", + "pin-project-lite", + "tokio", +] + +[[package]] +name = "tonic" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85839f0b32fd242bb3209262371d07feda6d780d16ee9d2bc88581b89da1549b" +dependencies = [ + "base64", + "bytes", + "http", + "http-body", + "http-body-util", + "percent-encoding", + "pin-project", + "tokio-stream", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tower-layer" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "121c2a6cda46980bb0fcd1647ffaf6cd3fc79a013de288782836f6df9c48780e" + +[[package]] +name = "tower-service" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" + +[[package]] +name = "tracing" +version = "0.1.41" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0" +dependencies = [ + "pin-project-lite", + "tracing-attributes", + "tracing-core", +] + +[[package]] +name = "tracing-attributes" +version = "0.1.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "395ae124c09f9e6918a2310af6038fba074bcf474ac352496d5910dd59a2226d" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "tracing-core" +version = "0.1.33" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e672c95779cf947c5311f83787af4fa8fffd12fb27e4993211a84bdfd9610f9c" +dependencies = [ + "once_cell", +] + +[[package]] +name = "unicode-ident" +version = "1.0.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512" + +[[package]] +name = "windows-targets" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" +dependencies = [ + "windows_aarch64_gnullvm", + "windows_aarch64_msvc", + "windows_i686_gnu", + "windows_i686_gnullvm", + "windows_i686_msvc", + "windows_x86_64_gnu", + "windows_x86_64_gnullvm", + "windows_x86_64_msvc", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" + +[[package]] +name = "windows_i686_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" + +[[package]] +name = "windows_i686_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" diff --git a/pgxn/neon/communicator/Cargo.toml b/pgxn/neon/communicator/Cargo.toml index f09b9d7a14..677e6ed388 100644 --- a/pgxn/neon/communicator/Cargo.toml +++ b/pgxn/neon/communicator/Cargo.toml @@ -1,13 +1,43 @@ [package] name = "communicator" -version = "0.1.0" -edition = "2024" +license.workspace = true +edition.workspace = true + +[features] +# 'testing' feature is currently unused in the communicator, but we accept it for convenience of +# calling build scripts, so that you can pass the same feature to all packages. +testing = [] [lib] crate-type = ["staticlib"] [dependencies] +axum.workspace = true +bytes.workspace = true +clashmap.workspace = true +http.workspace = true +libc.workspace = true +nix.workspace = true +atomic_enum = "0.3.0" +prometheus.workspace = true +prost.workspace = true +tonic = { version = "0.12.0", default-features = false, features=["codegen", "prost", "transport"] } +tokio = { version = "1.43.1", features = ["macros", "net", "io-util", "rt", "rt-multi-thread"] } +tokio-pipe = { version = "0.2.12" } +thiserror.workspace = true +tracing.workspace = true +tracing-subscriber.workspace = true + +metrics.workspace = true +uring-common = { workspace = true, features = ["bytes"] } + +pageserver_client_grpc.workspace = true +pageserver_api.workspace = true +pageserver_page_api.workspace = true + neon-shmem.workspace = true +utils.workspace = true +workspace_hack = { version = "0.1", path = "../../../workspace_hack" } [build-dependencies] cbindgen.workspace = true diff --git a/pgxn/neon/communicator/README.md b/pgxn/neon/communicator/README.md index 48fda68721..8887a01cbc 100644 --- a/pgxn/neon/communicator/README.md +++ b/pgxn/neon/communicator/README.md @@ -1,8 +1,123 @@ -This package will evolve into a "compute-pageserver communicator" -process and machinery. For now, it just provides wrappers on the -neon-shmem Rust crate, to allow using it in the C implementation of -the LFC. +# Communicator + +This package provides the so-called "compute-pageserver communicator", +or just "communicator" in short. It runs in a PostgreSQL server, as +part of the neon extension, and handles the communication with the +pageservers. On the PostgreSQL side, the glue code in pgxn/neon/ uses +the communicator to implement the PostgreSQL Storage Manager (SMGR) +interface. + +## Design criteria + +- Low latency +- Saturate a 10 Gbit / s network interface without becoming a bottleneck + +## Source code view + +pgxn/neon/communicator_new.c + Contains the glue that interact with PostgreSQL code and the Rust + communicator code. + +pgxn/neon/communicator/src/backend_interface.rs + The entry point for calls from each backend. + +pgxn/neon/communicator/src/init.rs + Initialization at server startup + +pgxn/neon/communicator/src/worker_process/ + Worker process main loop and glue code At compilation time, pgxn/neon/communicator/ produces a static library, libcommunicator.a. It is linked to the neon.so extension -library. +library. + +The real networking code, which is independent of PostgreSQL, is in +the pageserver/client_grpc crate. + +## Process view + +The communicator runs in a dedicated background worker process, the +"communicator process". The communicator uses a multi-threaded Tokio +runtime to execute the IO requests. So the communicator process has +multiple threads running. That's unusual for Postgres processes and +care must be taken to make that work. + +### Backend <-> worker communication + +Each backend has a number of I/O request slots in shared memory. The +slots are statically allocated for each backend, and must not be +accessed by other backends. The worker process reads requests from the +shared memory slots, and writes responses back to the slots. + +To submit an IO request, first pick one of your backend's free slots, +and write the details of the IO request in the slot. Finally, update +the 'state' field of the slot to Submitted. That informs the worker +process that it can start processing the request. Once the state has +been set to Submitted, the backend *must not* access the slot anymore, +until the worker process sets its state to 'Completed'. In other +words, each slot is owned by either the backend or the worker process +at all times, and the 'state' field indicates who has ownership at the +moment. + +To inform the worker process that a request slot has a pending IO +request, there's a pipe shared by the worker process and all backend +processes. After you have changed the slot's state to Submitted, write +the index of the request slot to the pipe. This wakes up the worker +process. + +(Note that the pipe is just used for wakeups, but the worker process +is free to pick up Submitted IO requests even without receiving the +wakeup. As of this writing, it doesn't do that, but it might be useful +in the future to reduce latency even further, for example.) + +When the worker process has completed processing the request, it +writes the result back in the request slot. A GetPage request can also +contain a pointer to buffer in the shared buffer cache. In that case, +the worker process writes the resulting page contents directly to the +buffer, and just a result code in the request slot. It then updates +the 'state' field to Completed, which passes the owner ship back to +the originating backend. Finally, it signals the process Latch of the +originating backend, waking it up. + +### Differences between PostgreSQL v16, v17 and v18 + +PostgreSQL v18 introduced the new AIO mechanism. The PostgreSQL AIO +mechanism uses a very similar mechanism as described in the previous +section, for the communication between AIO worker processes and +backends. With our communicator, the AIO worker processes are not +used, but we use the same PgAioHandle request slots as in upstream. +For Neon-specific IO requests like GetDbSize, a neon request slot is +used. But for the actual IO requests, the request slot merely contains +a pointer to the PgAioHandle slot. The worker process updates the +status of that, calls the IO callbacks upon completionetc, just like +the upstream AIO worker processes do. + +## Sequence diagram + + neon + PostgreSQL extension backend_interface.rs worker_process.rs processor tonic + | . . . . + | smgr_read() . . . . + +-------------> + . . . + . | . . . + . | rcommunicator_ . . . + . | get_page_at_lsn . . . + . +------------------> + . . + | . . + | write request to . . . + | slot . . + | . . + | . . + | submit_request() . . + +-----------------> + . + | | . + | | db_size_request . . + +---------------->. + . TODO + + + +### Compute <-> pageserver protocol + +The protocol between Compute and the pageserver is based on gRPC. See `protos/`. + diff --git a/pgxn/neon/communicator/build.rs b/pgxn/neon/communicator/build.rs index ef570c3d0a..2b83b4238d 100644 --- a/pgxn/neon/communicator/build.rs +++ b/pgxn/neon/communicator/build.rs @@ -3,20 +3,18 @@ use std::env; fn main() -> Result<(), Box> { let crate_dir = env::var("CARGO_MANIFEST_DIR").unwrap(); - cbindgen::generate(crate_dir).map_or_else( - |error| match error { - cbindgen::Error::ParseSyntaxError { .. } => { - // This means there was a syntax error in the Rust sources. Don't panic, because - // we want the build to continue and the Rust compiler to hit the error. The - // Rust compiler produces a better error message than cbindgen. - eprintln!("Generating C bindings failed because of a Rust syntax error"); - } - e => panic!("Unable to generate C bindings: {:?}", e), - }, - |bindings| { + match cbindgen::generate(crate_dir) { + Ok(bindings) => { bindings.write_to_file("communicator_bindings.h"); - }, - ); + } + Err(cbindgen::Error::ParseSyntaxError { .. }) => { + // This means there was a syntax error in the Rust sources. Don't panic, because + // we want the build to continue and the Rust compiler to hit the error. The + // Rust compiler produces a better error message than cbindgen. + eprintln!("Generating C bindings failed because of a Rust syntax error"); + } + Err(err) => panic!("Unable to generate C bindings: {err:?}"), + }; Ok(()) } diff --git a/pgxn/neon/communicator/src/backend_comms.rs b/pgxn/neon/communicator/src/backend_comms.rs new file mode 100644 index 0000000000..998e0daf71 --- /dev/null +++ b/pgxn/neon/communicator/src/backend_comms.rs @@ -0,0 +1,207 @@ +//! This module implements a request/response "slot" for submitting requests from backends +//! to the communicator process. +//! +//! NB: The "backend" side of this code runs in Postgres backend processes, +//! which means that it is not safe to use the 'tracing' crate for logging, nor +//! to launch threads or use tokio tasks. +use std::cell::UnsafeCell; +use std::sync::atomic::fence; +use std::sync::atomic::{AtomicI32, Ordering}; + +use crate::neon_request::{NeonIORequest, NeonIOResult}; + +use atomic_enum::atomic_enum; + +/// One request/response slot. Each backend has its own set of slots that it uses. +/// +/// This is the moral equivalent of PgAioHandle for Postgres AIO requests +/// Like PgAioHandle, try to keep this small. +/// +/// There is an array of these in shared memory. Therefore, this must be Sized. +/// +/// ## Lifecycle of a request +/// +/// The slot is always owned by either the backend process or the communicator +/// process, depending on the 'state'. Only the owning process is allowed to +/// read or modify the slot, except for reading the 'state' itself to check who +/// owns it. +/// +/// A slot begins in the Idle state, where it is owned by the backend process. +/// To submit a request, the backend process fills the slot with the request +/// data, and changes it to the Submitted state. After changing the state, the +/// slot is owned by the communicator process, and the backend is not allowed +/// to access it until the communicator process marks it as Completed. +/// +/// When the communicator process sees that the slot is in Submitted state, it +/// starts to process the request. After processing the request, it stores the +/// result in the slot, and changes the state to Completed. It is now owned by +/// the backend process again, which may now read the result, and reuse the +/// slot for a new request. +/// +/// For correctness of the above protocol, we really only need two states: +/// "owned by backend" and "owned by communicator process. But to help with +/// debugging, there are a few more states. When the backend starts to fill in +/// the request details in the slot, it first sets the state from Idle to +/// Filling, and when it's done with that, from Filling to Submitted. In the +/// Filling state, the slot is still owned by the backend. Similarly, when the +/// communicator process starts to process a request, it sets it to Processing +/// state first, but the slot is still owned by the communicator process. +/// +/// This struct doesn't handle waking up the communicator process when a request +/// has been submitted or when a response is ready. We only store the 'owner_procno' +/// which can be used for waking up the backend on completion, but the wakeups are +/// performed elsewhere. +pub struct NeonIOHandle { + /// similar to PgAioHandleState + state: AtomicNeonIOHandleState, + + /// The owning process's ProcNumber. The worker process uses this to set the process's + /// latch on completion. + /// + /// (This could be calculated from num_neon_request_slots_per_backend and the index of + /// this slot in the overall 'neon_requst_slots array') + owner_procno: AtomicI32, + + /// SAFETY: This is modified by fill_request(), after it has established ownership + /// of the slot by setting state from Idle to Filling + request: UnsafeCell, + + /// valid when state is Completed + /// + /// SAFETY: This is modified by RequestProcessingGuard::complete(). There can be + /// only one RequestProcessingGuard outstanding for a slot at a time, because + /// it is returned by start_processing_request() which checks the state, so + /// RequestProcessingGuard has exclusive access to the slot. + result: UnsafeCell, +} + +// The protocol described in the "Lifecycle of a request" section above ensures +// the safe access to the fields +unsafe impl Send for NeonIOHandle {} +unsafe impl Sync for NeonIOHandle {} + +impl Default for NeonIOHandle { + fn default() -> NeonIOHandle { + NeonIOHandle { + owner_procno: AtomicI32::new(-1), + request: UnsafeCell::new(NeonIORequest::Empty), + result: UnsafeCell::new(NeonIOResult::Empty), + state: AtomicNeonIOHandleState::new(NeonIOHandleState::Idle), + } + } +} + +#[atomic_enum] +#[derive(Eq, PartialEq)] +pub enum NeonIOHandleState { + Idle, + + /// backend is filling in the request + Filling, + + /// Backend has submitted the request to the communicator, but the + /// communicator process has not yet started processing it. + Submitted, + + /// Communicator is processing the request + Processing, + + /// Communicator has completed the request, and the 'result' field is now + /// valid, but the backend has not read the result yet. + Completed, +} + +pub struct RequestProcessingGuard<'a>(&'a NeonIOHandle); + +unsafe impl<'a> Send for RequestProcessingGuard<'a> {} +unsafe impl<'a> Sync for RequestProcessingGuard<'a> {} + +impl<'a> RequestProcessingGuard<'a> { + pub fn get_request(&self) -> &NeonIORequest { + unsafe { &*self.0.request.get() } + } + + pub fn get_owner_procno(&self) -> i32 { + self.0.owner_procno.load(Ordering::Relaxed) + } + + pub fn completed(self, result: NeonIOResult) { + unsafe { + *self.0.result.get() = result; + }; + + // Ok, we have completed the IO. Mark the request as completed. After that, + // we no longer have ownership of the slot, and must not modify it. + let old_state = self + .0 + .state + .swap(NeonIOHandleState::Completed, Ordering::Release); + assert!(old_state == NeonIOHandleState::Processing); + } +} + +impl NeonIOHandle { + pub fn fill_request(&self, request: &NeonIORequest, proc_number: i32) { + // Verify that the slot is in Idle state previously, and start filling it. + // + // XXX: This step isn't strictly necessary. Assuming the caller didn't screw up + // and try to use a slot that's already in use, we could fill the slot and + // switch it directly from Idle to Submitted state. + if let Err(s) = self.state.compare_exchange( + NeonIOHandleState::Idle, + NeonIOHandleState::Filling, + Ordering::Relaxed, + Ordering::Relaxed, + ) { + panic!("unexpected state in request slot: {s:?}"); + } + + // This fence synchronizes-with store/swap in `communicator_process_main_loop`. + fence(Ordering::Acquire); + + self.owner_procno.store(proc_number, Ordering::Relaxed); + unsafe { *self.request.get() = *request } + self.state + .store(NeonIOHandleState::Submitted, Ordering::Release); + } + + pub fn get_state(&self) -> NeonIOHandleState { + self.state.load(Ordering::Relaxed) + } + + pub fn try_get_result(&self) -> Option { + // FIXME: ordering? + let state = self.state.load(Ordering::Relaxed); + if state == NeonIOHandleState::Completed { + // This fence synchronizes-with store/swap in `communicator_process_main_loop`. + fence(Ordering::Acquire); + let result = unsafe { *self.result.get() }; + self.state.store(NeonIOHandleState::Idle, Ordering::Relaxed); + Some(result) + } else { + None + } + } + + /// Read the IO request from the slot indicated in the wakeup + pub fn start_processing_request<'a>(&'a self) -> Option> { + // XXX: using compare_exchange for this is not strictly necessary, as long as + // the communicator process has _some_ means of tracking which requests it's + // already processing. That could be a flag somewhere in communicator's private + // memory, for example. + if let Err(s) = self.state.compare_exchange( + NeonIOHandleState::Submitted, + NeonIOHandleState::Processing, + Ordering::Relaxed, + Ordering::Relaxed, + ) { + // FIXME surprising state. This is unexpected at the moment, but if we + // started to process requests more aggressively, without waiting for the + // read from the pipe, then this could happen + panic!("unexpected state in request slot: {s:?}"); + } + fence(Ordering::Acquire); + + Some(RequestProcessingGuard(self)) + } +} diff --git a/pgxn/neon/communicator/src/backend_interface.rs b/pgxn/neon/communicator/src/backend_interface.rs new file mode 100644 index 0000000000..9ed9028b96 --- /dev/null +++ b/pgxn/neon/communicator/src/backend_interface.rs @@ -0,0 +1,234 @@ +//! This code runs in each backend process. That means that launching Rust threads, panicking +//! etc. is forbidden! + +use std::os::fd::OwnedFd; + +use crate::backend_comms::NeonIOHandle; +use crate::init::CommunicatorInitStruct; +use crate::integrated_cache::{BackendCacheReadOp, IntegratedCacheReadAccess}; +use crate::neon_request::{CCachedGetPageVResult, COid}; +use crate::neon_request::{NeonIORequest, NeonIOResult}; + +pub struct CommunicatorBackendStruct<'t> { + my_proc_number: i32, + + neon_request_slots: &'t [NeonIOHandle], + + submission_pipe_write_fd: OwnedFd, + + pending_cache_read_op: Option>, + + integrated_cache: &'t IntegratedCacheReadAccess<'t>, +} + +#[unsafe(no_mangle)] +pub extern "C" fn rcommunicator_backend_init( + cis: Box, + my_proc_number: i32, +) -> &'static mut CommunicatorBackendStruct<'static> { + if my_proc_number < 0 { + panic!( + "cannot attach to communicator shared memory with procnumber {}", + my_proc_number, + ); + } + + let integrated_cache = Box::leak(Box::new(cis.integrated_cache_init_struct.backend_init())); + + let bs: &'static mut CommunicatorBackendStruct = + Box::leak(Box::new(CommunicatorBackendStruct { + my_proc_number, + neon_request_slots: cis.neon_request_slots, + + submission_pipe_write_fd: cis.submission_pipe_write_fd, + pending_cache_read_op: None, + + integrated_cache, + })); + bs +} + +/// Start a request. You can poll for its completion and get the result by +/// calling bcomm_poll_dbsize_request_completion(). The communicator will wake +/// us up by setting our process latch, so to wait for the completion, wait on +/// the latch and call bcomm_poll_dbsize_request_completion() every time the +/// latch is set. +/// +/// Safety: The C caller must ensure that the references are valid. +/// The requested slot must be free, or this panics. +#[unsafe(no_mangle)] +pub extern "C" fn bcomm_start_io_request( + bs: &'_ mut CommunicatorBackendStruct, + slot_idx: i32, + request: &NeonIORequest, + immediate_result_ptr: &mut NeonIOResult, +) -> i32 { + assert!(bs.pending_cache_read_op.is_none()); + + // Check if the request can be satisfied from the cache first + if let NeonIORequest::RelSize(req) = request { + if let Some(nblocks) = bs.integrated_cache.get_rel_size(&req.reltag()) { + *immediate_result_ptr = NeonIOResult::RelSize(nblocks); + return -1; + } + } + + // Create neon request and submit it + bs.start_neon_io_request(slot_idx, request); + + // Tell the communicator about it + bs.submit_request(slot_idx); + + slot_idx +} + +#[unsafe(no_mangle)] +pub extern "C" fn bcomm_start_get_page_v_request( + bs: &mut CommunicatorBackendStruct, + slot_idx: i32, + request: &NeonIORequest, + immediate_result_ptr: &mut CCachedGetPageVResult, +) -> i32 { + let NeonIORequest::GetPageV(get_pagev_request) = request else { + panic!("invalid request passed to bcomm_start_get_page_v_request()"); + }; + assert!(matches!(request, NeonIORequest::GetPageV(_))); + assert!(bs.pending_cache_read_op.is_none()); + + // Check if the request can be satisfied from the cache first + let mut all_cached = true; + let mut read_op = bs.integrated_cache.start_read_op(); + for i in 0..get_pagev_request.nblocks { + if let Some(cache_block) = read_op.get_page( + &get_pagev_request.reltag(), + get_pagev_request.block_number + i as u32, + ) { + immediate_result_ptr.cache_block_numbers[i as usize] = cache_block; + } else { + // not found in cache + all_cached = false; + break; + } + } + if all_cached { + bs.pending_cache_read_op = Some(read_op); + return -1; + } + + // Create neon request and submit it + bs.start_neon_io_request(slot_idx, request); + + // Tell the communicator about it + bs.submit_request(slot_idx); + + slot_idx +} + +/// Check if a request has completed. Returns: +/// +/// -1 if the request is still being processed +/// 0 on success +#[unsafe(no_mangle)] +pub extern "C" fn bcomm_poll_request_completion( + bs: &mut CommunicatorBackendStruct, + request_slot_idx: u32, + result_p: &mut NeonIOResult, +) -> i32 { + match bs.neon_request_slots[request_slot_idx as usize].try_get_result() { + None => -1, // still processing + Some(result) => { + *result_p = result; + 0 + } + } +} + +/// Check if a request has completed. Returns: +/// +/// 'false' if the slot is Idle. The backend process has ownership. +/// 'true' if the slot is busy, and should be polled for result. +#[unsafe(no_mangle)] +pub extern "C" fn bcomm_get_request_slot_status( + bs: &mut CommunicatorBackendStruct, + request_slot_idx: u32, +) -> bool { + use crate::backend_comms::NeonIOHandleState; + match bs.neon_request_slots[request_slot_idx as usize].get_state() { + NeonIOHandleState::Idle => false, + NeonIOHandleState::Filling => { + // 'false' would be the right result here. However, this + // is a very transient state. The C code should never + // leave a slot in this state, so if it sees that, + // something's gone wrong and it's not clear what to do + // with it. + panic!("unexpected Filling state in request slot {}", request_slot_idx); + }, + NeonIOHandleState::Submitted => true, + NeonIOHandleState::Processing => true, + NeonIOHandleState::Completed => true, + } +} + +// LFC functions + +/// Finish a local file cache read +/// +// +#[unsafe(no_mangle)] +pub extern "C" fn bcomm_finish_cache_read(bs: &mut CommunicatorBackendStruct) -> bool { + if let Some(op) = bs.pending_cache_read_op.take() { + op.finish() + } else { + panic!("bcomm_finish_cache_read() called with no cached read pending"); + } +} + +/// Check if the local file cache contians the given block +#[unsafe(no_mangle)] +pub extern "C" fn bcomm_cache_contains( + bs: &mut CommunicatorBackendStruct, + spc_oid: COid, + db_oid: COid, + rel_number: u32, + fork_number: u8, + block_number: u32, +) -> bool { + bs.integrated_cache.cache_contains_page( + &pageserver_page_api::RelTag { + spcnode: spc_oid, + dbnode: db_oid, + relnode: rel_number, + forknum: fork_number, + }, + block_number, + ) +} + +impl<'t> CommunicatorBackendStruct<'t> { + /// Send a wakeup to the communicator process + fn submit_request(self: &CommunicatorBackendStruct<'t>, request_slot_idx: i32) { + // wake up communicator by writing the idx to the submission pipe + // + // This can block, if the pipe is full. That should be very rare, + // because the communicator tries hard to drain the pipe to prevent + // that. Also, there's a natural upper bound on how many wakeups can be + // queued up: there is only a limited number of request slots for each + // backend. + // + // If it does block very briefly, that's not too serious. + let idxbuf = request_slot_idx.to_ne_bytes(); + + let _res = nix::unistd::write(&self.submission_pipe_write_fd, &idxbuf); + // FIXME: check result, return any errors + } + + /// Note: there's no guarantee on when the communicator might pick it up. You should ring + /// the doorbell. But it might pick it up immediately. + /// + /// The slot must be free, or this panics. + pub(crate) fn start_neon_io_request(&mut self, request_slot_idx: i32, request: &NeonIORequest) { + let my_proc_number = self.my_proc_number; + + self.neon_request_slots[request_slot_idx as usize].fill_request(request, my_proc_number); + } +} diff --git a/pgxn/neon/communicator/src/file_cache.rs b/pgxn/neon/communicator/src/file_cache.rs new file mode 100644 index 0000000000..1f60c97f2c --- /dev/null +++ b/pgxn/neon/communicator/src/file_cache.rs @@ -0,0 +1,160 @@ +//! Implement the "low-level" parts of the file cache. +//! +//! This module just deals with reading and writing the file, and keeping track +//! which blocks in the cache file are in use and which are free. The "high +//! level" parts of tracking which block in the cache file corresponds to which +//! relation block is handled in 'integrated_cache' instead. +//! +//! This module is only used to access the file from the communicator +//! process. The backend processes *also* read the file (and sometimes also +//! write it? ), but the backends use direct C library calls for that. +use std::fs::File; +use std::os::unix::fs::FileExt; +use std::path::Path; +use std::sync::Arc; +use std::sync::Mutex; + +use crate::BLCKSZ; + +use tokio::task::spawn_blocking; + +pub type CacheBlock = u64; + +pub const INVALID_CACHE_BLOCK: CacheBlock = u64::MAX; + +pub struct FileCache { + file: Arc, + + free_list: Mutex, + + // metrics + max_blocks_gauge: metrics::IntGauge, + num_free_blocks_gauge: metrics::IntGauge, +} + +// TODO: We keep track of all free blocks in this vec. That doesn't really scale. +// Idea: when free_blocks fills up with more than 1024 entries, write them all to +// one block on disk. +struct FreeList { + next_free_block: CacheBlock, + max_blocks: u64, + + free_blocks: Vec, +} + +impl FileCache { + pub fn new(file_cache_path: &Path, mut initial_size: u64) -> Result { + if initial_size < 100 { + tracing::warn!( + "min size for file cache is 100 blocks, {} requested", + initial_size + ); + initial_size = 100; + } + + let file = std::fs::OpenOptions::new() + .read(true) + .write(true) + .truncate(true) + .create(true) + .open(file_cache_path)?; + + let max_blocks_gauge = metrics::IntGauge::new( + "file_cache_max_blocks", + "Local File Cache size in 8KiB blocks", + ) + .unwrap(); + let num_free_blocks_gauge = metrics::IntGauge::new( + "file_cache_num_free_blocks", + "Number of free 8KiB blocks in Local File Cache", + ) + .unwrap(); + + tracing::info!("initialized file cache with {} blocks", initial_size); + + Ok(FileCache { + file: Arc::new(file), + free_list: Mutex::new(FreeList { + next_free_block: 0, + max_blocks: initial_size, + free_blocks: Vec::new(), + }), + max_blocks_gauge, + num_free_blocks_gauge, + }) + } + + // File cache management + + pub async fn read_block( + &self, + cache_block: CacheBlock, + mut dst: impl uring_common::buf::IoBufMut + Send + Sync, + ) -> Result<(), std::io::Error> { + assert!(dst.bytes_total() == BLCKSZ); + let file = self.file.clone(); + + let dst_ref = unsafe { std::slice::from_raw_parts_mut(dst.stable_mut_ptr(), BLCKSZ) }; + + spawn_blocking(move || file.read_exact_at(dst_ref, cache_block * BLCKSZ as u64)).await??; + Ok(()) + } + + pub async fn write_block( + &self, + cache_block: CacheBlock, + src: impl uring_common::buf::IoBuf + Send + Sync, + ) -> Result<(), std::io::Error> { + assert!(src.bytes_init() == BLCKSZ); + let file = self.file.clone(); + + let src_ref = unsafe { std::slice::from_raw_parts(src.stable_ptr(), BLCKSZ) }; + + spawn_blocking(move || file.write_all_at(src_ref, cache_block * BLCKSZ as u64)).await??; + + Ok(()) + } + + pub fn alloc_block(&self) -> Option { + let mut free_list = self.free_list.lock().unwrap(); + if let Some(x) = free_list.free_blocks.pop() { + return Some(x); + } + if free_list.next_free_block < free_list.max_blocks { + let result = free_list.next_free_block; + free_list.next_free_block += 1; + return Some(result); + } + None + } + + pub fn dealloc_block(&self, cache_block: CacheBlock) { + let mut free_list = self.free_list.lock().unwrap(); + free_list.free_blocks.push(cache_block); + } +} + +impl metrics::core::Collector for FileCache { + fn desc(&self) -> Vec<&metrics::core::Desc> { + let mut descs = Vec::new(); + descs.append(&mut self.max_blocks_gauge.desc()); + descs.append(&mut self.num_free_blocks_gauge.desc()); + descs + } + fn collect(&self) -> Vec { + // Update the gauges with fresh values first + { + let free_list = self.free_list.lock().unwrap(); + self.max_blocks_gauge.set(free_list.max_blocks as i64); + + let total_free_blocks: i64 = free_list.free_blocks.len() as i64 + + (free_list.max_blocks as i64 - free_list.next_free_block as i64); + self.num_free_blocks_gauge.set(total_free_blocks); + } + + let mut values = Vec::new(); + values.append(&mut self.max_blocks_gauge.collect()); + values.append(&mut self.num_free_blocks_gauge.collect()); + values + } +} diff --git a/pgxn/neon/communicator/src/file_cache_hashmap.rs b/pgxn/neon/communicator/src/file_cache_hashmap.rs deleted file mode 100644 index 0a9ec3db31..0000000000 --- a/pgxn/neon/communicator/src/file_cache_hashmap.rs +++ /dev/null @@ -1,240 +0,0 @@ -//! Glue code to allow using the Rust shmem hash map implementation from C code -//! -//! For convience of adapting existing code, the interface provided somewhat resembles the dynahash -//! interface. -//! -//! NOTE: The caller is responsible for locking! The caller is expected to hold the PostgreSQL -//! LWLock, 'lfc_lock', while accessing the hash table, in shared or exclusive mode as appropriate. - -use std::ffi::c_void; -use std::marker::PhantomData; - -use neon_shmem::hash::entry::Entry; -use neon_shmem::hash::{HashMapAccess, HashMapInit}; -use neon_shmem::shmem::ShmemHandle; - -/// NB: This must match the definition of BufferTag in Postgres C headers. We could use bindgen to -/// generate this from the C headers, but prefer to not introduce dependency on bindgen for now. -/// -/// Note that there are no padding bytes. If the corresponding C struct has padding bytes, the C C -/// code must clear them. -#[derive(Clone, Debug, Hash, Eq, PartialEq)] -#[repr(C)] -pub struct FileCacheKey { - pub _spc_id: u32, - pub _db_id: u32, - pub _rel_number: u32, - pub _fork_num: u32, - pub _block_num: u32, -} - -/// Like with FileCacheKey, this must match the definition of FileCacheEntry in file_cache.c. We -/// don't look at the contents here though, it's sufficent that the size and alignment matches. -#[derive(Clone, Debug, Default)] -#[repr(C)] -pub struct FileCacheEntry { - pub _offset: u32, - pub _access_count: u32, - pub _prev: *mut FileCacheEntry, - pub _next: *mut FileCacheEntry, - pub _state: [u32; 8], -} - -/// XXX: This could be just: -/// -/// ```ignore -/// type FileCacheHashMapHandle = HashMapInit<'a, FileCacheKey, FileCacheEntry> -/// ``` -/// -/// but with that, cbindgen generates a broken typedef in the C header file which doesn't -/// compile. It apparently gets confused by the generics. -#[repr(transparent)] -pub struct FileCacheHashMapHandle<'a>( - pub *mut c_void, - PhantomData>, -); -impl<'a> From>> for FileCacheHashMapHandle<'a> { - fn from(x: Box>) -> Self { - FileCacheHashMapHandle(Box::into_raw(x) as *mut c_void, PhantomData::default()) - } -} -impl<'a> From> for Box> { - fn from(x: FileCacheHashMapHandle) -> Self { - unsafe { Box::from_raw(x.0.cast()) } - } -} - -/// XXX: same for this -#[repr(transparent)] -pub struct FileCacheHashMapAccess<'a>( - pub *mut c_void, - PhantomData>, -); -impl<'a> From>> for FileCacheHashMapAccess<'a> { - fn from(x: Box>) -> Self { - // Convert the Box into a raw mutable pointer to the HashMapAccess itself. - // This transfers ownership of the HashMapAccess (and its contained ShmemHandle) - // to the raw pointer. The C caller is now responsible for managing this memory. - FileCacheHashMapAccess(Box::into_raw(x) as *mut c_void, PhantomData::default()) - } -} -impl<'a> FileCacheHashMapAccess<'a> { - fn as_ref(self) -> &'a HashMapAccess<'a, FileCacheKey, FileCacheEntry> { - let ptr: *mut HashMapAccess<'_, FileCacheKey, FileCacheEntry> = self.0.cast(); - unsafe { ptr.as_ref().unwrap() } - } - fn as_mut(self) -> &'a mut HashMapAccess<'a, FileCacheKey, FileCacheEntry> { - let ptr: *mut HashMapAccess<'_, FileCacheKey, FileCacheEntry> = self.0.cast(); - unsafe { ptr.as_mut().unwrap() } - } -} - -/// Initialize the shared memory area at postmaster startup. The returned handle is inherited -/// by all the backend processes across fork() -#[unsafe(no_mangle)] -pub extern "C" fn bcomm_file_cache_shmem_init<'a>( - initial_num_buckets: u32, - max_num_buckets: u32, -) -> FileCacheHashMapHandle<'a> { - let max_bytes = HashMapInit::::estimate_size(max_num_buckets); - let shmem_handle = - ShmemHandle::new("lfc mapping", 0, max_bytes).expect("shmem initialization failed"); - - let handle = HashMapInit::::init_in_shmem( - initial_num_buckets, - shmem_handle, - ); - - Box::new(handle).into() -} - -/// Initialize the access to the shared memory area in a backend process. -/// -/// XXX: I'm not sure if this actually gets called in each process, or if the returned struct -/// is also inherited across fork(). It currently works either way but if this did more -/// initialization that needed to be done after fork(), then it would matter. -#[unsafe(no_mangle)] -pub extern "C" fn bcomm_file_cache_shmem_access<'a>( - handle: FileCacheHashMapHandle<'a>, -) -> FileCacheHashMapAccess<'a> { - let handle: Box> = handle.into(); - Box::new(handle.attach_writer()).into() -} - -/// Return the current number of buckets in the hash table -#[unsafe(no_mangle)] -pub extern "C" fn bcomm_file_cache_get_num_buckets<'a>( - map: FileCacheHashMapAccess<'static>, -) -> u32 { - let map = map.as_ref(); - map.get_num_buckets().try_into().unwrap() -} - -/// Look up the entry with given key and hash. -/// -/// This is similar to dynahash's hash_search(... , HASH_FIND) -#[unsafe(no_mangle)] -pub extern "C" fn bcomm_file_cache_hash_find<'a>( - map: FileCacheHashMapAccess<'static>, - key: &FileCacheKey, - hash: u64, -) -> Option<&'static FileCacheEntry> { - let map = map.as_ref(); - map.get_with_hash(key, hash) -} - -/// Look up the entry at given bucket position -/// -/// This has no direct equivalent in the dynahash interface, but can be used to -/// iterate through all entries in the hash table. -#[unsafe(no_mangle)] -pub extern "C" fn bcomm_file_cache_hash_get_at_pos<'a>( - map: FileCacheHashMapAccess<'static>, - pos: u32, -) -> Option<&'static FileCacheEntry> { - let map = map.as_ref(); - map.get_at_bucket(pos as usize).map(|(_k, v)| v) -} - -/// Remove entry, given a pointer to the value. -/// -/// This is equivalent to dynahash hash_search(entry->key, HASH_REMOVE), where 'entry' -/// is an entry you have previously looked up -#[unsafe(no_mangle)] -pub extern "C" fn bcomm_file_cache_hash_remove_entry<'a, 'b>( - map: FileCacheHashMapAccess, - entry: *mut FileCacheEntry, -) { - let map = map.as_mut(); - let pos = map.get_bucket_for_value(entry); - match map.entry_at_bucket(pos) { - Some(e) => { - e.remove(); - } - None => { - // todo: shouldn't happen, panic? - } - } -} - -/// Compute the hash for given key -/// -/// This is equivalent to dynahash get_hash_value() function. We use Rust's default hasher -/// for calculating the hash though. -#[unsafe(no_mangle)] -pub extern "C" fn bcomm_file_cache_get_hash_value<'a, 'b>( - map: FileCacheHashMapAccess<'static>, - key: &FileCacheKey, -) -> u64 { - map.as_ref().get_hash_value(key) -} - -/// Insert a new entry to the hash table -/// -/// This is equivalent to dynahash hash_search(..., HASH_ENTER). -#[unsafe(no_mangle)] -pub extern "C" fn bcomm_file_cache_hash_enter<'a, 'b>( - map: FileCacheHashMapAccess, - key: &FileCacheKey, - hash: u64, - found: &mut bool, -) -> *mut FileCacheEntry { - match map.as_mut().entry_with_hash(key.clone(), hash) { - Entry::Occupied(mut e) => { - *found = true; - e.get_mut() - } - Entry::Vacant(e) => { - *found = false; - let initial_value = FileCacheEntry::default(); - e.insert(initial_value).expect("TODO: hash table full") - } - } -} - -/// Get the key for a given entry, which must be present in the hash table. -/// -/// Dynahash requires the key to be part of the "value" struct, so you can always -/// access the key with something like `entry->key`. The Rust implementation however -/// stores the key separately. This function extracts the separately stored key. -#[unsafe(no_mangle)] -pub extern "C" fn bcomm_file_cache_hash_get_key_for_entry<'a, 'b>( - map: FileCacheHashMapAccess, - entry: *const FileCacheEntry, -) -> Option<&FileCacheKey> { - let map = map.as_ref(); - let pos = map.get_bucket_for_value(entry); - map.get_at_bucket(pos as usize).map(|(k, _v)| k) -} - -/// Remove all entries from the hash table -#[unsafe(no_mangle)] -pub extern "C" fn bcomm_file_cache_hash_reset<'a, 'b>(map: FileCacheHashMapAccess) { - let map = map.as_mut(); - let num_buckets = map.get_num_buckets(); - for i in 0..num_buckets { - if let Some(e) = map.entry_at_bucket(i) { - e.remove(); - } - } -} diff --git a/pgxn/neon/communicator/src/global_allocator.rs b/pgxn/neon/communicator/src/global_allocator.rs new file mode 100644 index 0000000000..0c8e88071f --- /dev/null +++ b/pgxn/neon/communicator/src/global_allocator.rs @@ -0,0 +1,109 @@ +//! Global allocator, for tracking memory usage of the Rust parts +//! +//! Postgres is designed to handle allocation failure (ie. malloc() returning NULL) gracefully. It +//! rolls backs the transaction and gives the user an "ERROR: out of memory" error. Rust code +//! however panics if an allocation fails. We don't want that to ever happen, because an unhandled +//! panic leads to Postgres crash and restart. Our strategy is to pre-allocate a large enough chunk +//! of memory for use by the Rust code, so that the allocations never fail. +//! +//! To pick the size for the pre-allocated chunk, we have a metric to track the high watermark +//! memory usage of all the Rust allocations in total. +//! +//! TODO: +//! +//! - Currently we just export the metrics. Actual allocations are still just passed through to +//! the system allocator. +//! - Take padding etc. overhead into account + +use std::alloc::{GlobalAlloc, Layout, System}; +use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering}; + +use metrics::IntGauge; + +struct MyAllocator { + allocations: AtomicU64, + deallocations: AtomicU64, + + allocated: AtomicUsize, + high: AtomicUsize, +} + +unsafe impl GlobalAlloc for MyAllocator { + unsafe fn alloc(&self, layout: Layout) -> *mut u8 { + self.allocations.fetch_add(1, Ordering::Relaxed); + let mut allocated = self.allocated.fetch_add(layout.size(), Ordering::Relaxed); + allocated += layout.size(); + self.high.fetch_max(allocated, Ordering::Relaxed); + unsafe { System.alloc(layout) } + } + + unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { + self.deallocations.fetch_add(1, Ordering::Relaxed); + self.allocated.fetch_sub(layout.size(), Ordering::Relaxed); + unsafe { System.dealloc(ptr, layout) } + } +} + +#[global_allocator] +static GLOBAL: MyAllocator = MyAllocator { + allocations: AtomicU64::new(0), + deallocations: AtomicU64::new(0), + allocated: AtomicUsize::new(0), + high: AtomicUsize::new(0), +}; + +pub struct MyAllocatorCollector { + allocations: IntGauge, + deallocations: IntGauge, + allocated: IntGauge, + high: IntGauge, +} + +impl MyAllocatorCollector { + pub fn new() -> MyAllocatorCollector { + MyAllocatorCollector { + allocations: IntGauge::new("allocations_total", "Number of allocations in Rust code") + .unwrap(), + deallocations: IntGauge::new( + "deallocations_total", + "Number of deallocations in Rust code", + ) + .unwrap(), + allocated: IntGauge::new("allocated_total", "Bytes currently allocated").unwrap(), + high: IntGauge::new("allocated_high", "High watermark of allocated bytes").unwrap(), + } + } +} + +impl metrics::core::Collector for MyAllocatorCollector { + fn desc(&self) -> Vec<&metrics::core::Desc> { + let mut descs = Vec::new(); + + descs.append(&mut self.allocations.desc()); + descs.append(&mut self.deallocations.desc()); + descs.append(&mut self.allocated.desc()); + descs.append(&mut self.high.desc()); + + descs + } + + fn collect(&self) -> Vec { + let mut values = Vec::new(); + + // update the gauges + self.allocations + .set(GLOBAL.allocations.load(Ordering::Relaxed) as i64); + self.deallocations + .set(GLOBAL.allocations.load(Ordering::Relaxed) as i64); + self.allocated + .set(GLOBAL.allocated.load(Ordering::Relaxed) as i64); + self.high.set(GLOBAL.high.load(Ordering::Relaxed) as i64); + + values.append(&mut self.allocations.collect()); + values.append(&mut self.deallocations.collect()); + values.append(&mut self.allocated.collect()); + values.append(&mut self.high.collect()); + + values + } +} diff --git a/pgxn/neon/communicator/src/init.rs b/pgxn/neon/communicator/src/init.rs new file mode 100644 index 0000000000..5f7d593c35 --- /dev/null +++ b/pgxn/neon/communicator/src/init.rs @@ -0,0 +1,171 @@ +//! Initialization functions. These are executed in the postmaster process, +//! at different stages of server startup. +//! +//! +//! Communicator initialization steps: +//! +//! 1. At postmaster startup, before shared memory is allocated, +//! rcommunicator_shmem_size() is called to get the amount of +//! shared memory that this module needs. +//! +//! 2. Later, after the shared memory has been allocated, +//! rcommunicator_shmem_init() is called to initialize the shmem +//! area. +//! +//! Per process initialization: +//! +//! When a backend process starts up, it calls rcommunicator_backend_init(). +//! In the communicator worker process, other functions are called, see +//! `worker_process` module. + +use std::ffi::c_int; +use std::mem; +use std::mem::MaybeUninit; +use std::os::fd::OwnedFd; + +use crate::backend_comms::NeonIOHandle; +use crate::integrated_cache::IntegratedCacheInitStruct; + +/// This struct is created in the postmaster process, and inherited to +/// the communicator process and all backend processes through fork() +#[repr(C)] +pub struct CommunicatorInitStruct { + pub submission_pipe_read_fd: OwnedFd, + pub submission_pipe_write_fd: OwnedFd, + + // Shared memory data structures + pub num_neon_request_slots: u32, + + pub neon_request_slots: &'static [NeonIOHandle], + + pub integrated_cache_init_struct: IntegratedCacheInitStruct<'static>, +} + +impl std::fmt::Debug for CommunicatorInitStruct { + fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> { + fmt.debug_struct("CommunicatorInitStruct") + .field("submission_pipe_read_fd", &self.submission_pipe_read_fd) + .field("submission_pipe_write_fd", &self.submission_pipe_write_fd) + .field( + "num_neon_request_slots", + &self.num_neon_request_slots, + ) + .field("neon_request_slots length", &self.neon_request_slots.len()) + .finish() + } +} + +#[unsafe(no_mangle)] +pub extern "C" fn rcommunicator_shmem_size(num_neon_request_slots: u32) -> u64 { + let mut size = 0; + + size += mem::size_of::() * num_neon_request_slots as usize; + + // For integrated_cache's Allocator. TODO: make this adjustable + size += IntegratedCacheInitStruct::shmem_size(); + + size as u64 +} + +/// Initialize the shared memory segment. Returns a backend-private +/// struct, which will be inherited by backend processes through fork +#[unsafe(no_mangle)] +pub extern "C" fn rcommunicator_shmem_init( + submission_pipe_read_fd: c_int, + submission_pipe_write_fd: c_int, + num_neon_request_slots: u32, + shmem_area_ptr: *mut MaybeUninit, + shmem_area_len: u64, + initial_file_cache_size: u64, + max_file_cache_size: u64, +) -> &'static mut CommunicatorInitStruct { + let shmem_area: &'static mut [MaybeUninit] = + unsafe { std::slice::from_raw_parts_mut(shmem_area_ptr, shmem_area_len as usize) }; + + let (neon_request_slots, remaining_area) = + alloc_array_from_slice::(shmem_area, num_neon_request_slots as usize); + + for slot in neon_request_slots.iter_mut() { + slot.write(NeonIOHandle::default()); + } + + // 'neon_request_slots' is initialized now. (MaybeUninit::slice_assume_init_mut() is nightly-only + // as of this writing.) + let neon_request_slots = unsafe { + std::mem::transmute::<&mut [MaybeUninit], &mut [NeonIOHandle]>( + neon_request_slots, + ) + }; + + // Give the rest of the area to the integrated cache + let integrated_cache_init_struct = IntegratedCacheInitStruct::shmem_init( + remaining_area, + initial_file_cache_size, + max_file_cache_size, + ); + + let (submission_pipe_read_fd, submission_pipe_write_fd) = unsafe { + use std::os::fd::FromRawFd; + ( + OwnedFd::from_raw_fd(submission_pipe_read_fd), + OwnedFd::from_raw_fd(submission_pipe_write_fd), + ) + }; + + let cis: &'static mut CommunicatorInitStruct = Box::leak(Box::new(CommunicatorInitStruct { + submission_pipe_read_fd, + submission_pipe_write_fd, + + num_neon_request_slots, + neon_request_slots, + + integrated_cache_init_struct, + })); + + cis +} + +// fixme: currently unused +#[allow(dead_code)] +pub fn alloc_from_slice( + area: &mut [MaybeUninit], +) -> (&mut MaybeUninit, &mut [MaybeUninit]) { + let layout = std::alloc::Layout::new::(); + + let area_start = area.as_mut_ptr(); + + // pad to satisfy alignment requirements + let padding = area_start.align_offset(layout.align()); + if padding + layout.size() > area.len() { + panic!("out of memory"); + } + let area = &mut area[padding..]; + let (result_area, remain) = area.split_at_mut(layout.size()); + + let result_ptr: *mut MaybeUninit = result_area.as_mut_ptr().cast(); + let result = unsafe { result_ptr.as_mut().unwrap() }; + + (result, remain) +} + +pub fn alloc_array_from_slice( + area: &mut [MaybeUninit], + len: usize, +) -> (&mut [MaybeUninit], &mut [MaybeUninit]) { + let layout = std::alloc::Layout::new::(); + + let area_start = area.as_mut_ptr(); + + // pad to satisfy alignment requirements + let padding = area_start.align_offset(layout.align()); + if padding + layout.size() * len > area.len() { + panic!("out of memory"); + } + let area = &mut area[padding..]; + let (result_area, remain) = area.split_at_mut(layout.size() * len); + + let result_ptr: *mut MaybeUninit = result_area.as_mut_ptr().cast(); + let result = unsafe { std::slice::from_raw_parts_mut(result_ptr.as_mut().unwrap(), len) }; + + (result, remain) +} diff --git a/pgxn/neon/communicator/src/integrated_cache.rs b/pgxn/neon/communicator/src/integrated_cache.rs new file mode 100644 index 0000000000..5f0ca5f510 --- /dev/null +++ b/pgxn/neon/communicator/src/integrated_cache.rs @@ -0,0 +1,794 @@ +//! Integrated communicator cache +//! +//! It tracks: +//! - Relation sizes and existence +//! - Last-written LSN +//! - Block cache (also known as LFC) +//! +//! TODO: limit the size +//! TODO: concurrency +//! +//! Note: This deals with "relations" which is really just one "relation fork" in Postgres +//! terms. RelFileLocator + ForkNumber is the key. + +// +// TODO: Thoughts on eviction: +// +// There are two things we need to track, and evict if we run out of space: +// - blocks in the file cache's file. If the file grows too large, need to evict something. +// Also if the cache is resized +// +// - entries in the cache map. If we run out of memory in the shmem area, need to evict +// something +// + +use std::mem::MaybeUninit; +use std::sync::atomic::{AtomicBool, AtomicU32, AtomicU64, Ordering}; + +use utils::lsn::{AtomicLsn, Lsn}; + +use crate::file_cache::INVALID_CACHE_BLOCK; +use crate::file_cache::{CacheBlock, FileCache}; +use pageserver_page_api::RelTag; + +use metrics::{IntCounter, IntGauge}; + +use neon_shmem::hash::{HashMapInit, entry::Entry}; +use neon_shmem::shmem::ShmemHandle; + +// in # of entries +const RELSIZE_CACHE_SIZE: u32 = 64 * 1024; + +/// This struct is initialized at postmaster startup, and passed to all the processes via fork(). +pub struct IntegratedCacheInitStruct<'t> { + relsize_cache_handle: HashMapInit<'t, RelKey, RelEntry>, + block_map_handle: HashMapInit<'t, BlockKey, BlockEntry>, +} + +/// Represents write-access to the integrated cache. This is used by the communicator process. +pub struct IntegratedCacheWriteAccess<'t> { + relsize_cache: neon_shmem::hash::HashMapAccess<'t, RelKey, RelEntry>, + block_map: neon_shmem::hash::HashMapAccess<'t, BlockKey, BlockEntry>, + + global_lw_lsn: AtomicU64, + + pub(crate) file_cache: Option, + + // Fields for eviction + clock_hand: std::sync::Mutex, + + // Metrics + page_evictions_counter: IntCounter, + clock_iterations_counter: IntCounter, + + // metrics from the hash map + block_map_num_buckets: IntGauge, + block_map_num_buckets_in_use: IntGauge, + + relsize_cache_num_buckets: IntGauge, + relsize_cache_num_buckets_in_use: IntGauge, +} + +/// Represents read-only access to the integrated cache. Backend processes have this. +pub struct IntegratedCacheReadAccess<'t> { + relsize_cache: neon_shmem::hash::HashMapAccess<'t, RelKey, RelEntry>, + block_map: neon_shmem::hash::HashMapAccess<'t, BlockKey, BlockEntry>, +} + +impl<'t> IntegratedCacheInitStruct<'t> { + /// Return the desired size in bytes of the fixed-size shared memory area to reserve for the + /// integrated cache. + pub fn shmem_size() -> usize { + // The relsize cache is fixed-size. The block map is allocated in a separate resizable + // area. + HashMapInit::::estimate_size(RELSIZE_CACHE_SIZE) + } + + /// Initialize the shared memory segment. This runs once in postmaster. Returns a struct which + /// will be inherited by all processes through fork. + pub fn shmem_init( + shmem_area: &'t mut [MaybeUninit], + initial_file_cache_size: u64, + max_file_cache_size: u64, + ) -> IntegratedCacheInitStruct<'t> { + // Initialize the relsize cache in the fixed-size area + let relsize_cache_handle = + neon_shmem::hash::HashMapInit::with_fixed(RELSIZE_CACHE_SIZE, shmem_area); + + let max_bytes = + HashMapInit::::estimate_size(max_file_cache_size as u32); + + // Initialize the block map in a separate resizable shared memory area + let shmem_handle = ShmemHandle::new("block mapping", 0, max_bytes).unwrap(); + + let block_map_handle = + neon_shmem::hash::HashMapInit::with_shmem(initial_file_cache_size as u32, shmem_handle); + IntegratedCacheInitStruct { + relsize_cache_handle, + block_map_handle, + } + } + + /// Initialize access to the integrated cache for the communicator worker process + pub fn worker_process_init( + self, + lsn: Lsn, + file_cache: Option, + ) -> IntegratedCacheWriteAccess<'t> { + let IntegratedCacheInitStruct { + relsize_cache_handle, + block_map_handle, + } = self; + IntegratedCacheWriteAccess { + relsize_cache: relsize_cache_handle.attach_writer(), + block_map: block_map_handle.attach_writer(), + global_lw_lsn: AtomicU64::new(lsn.0), + file_cache, + clock_hand: std::sync::Mutex::new(0), + + page_evictions_counter: metrics::IntCounter::new( + "integrated_cache_evictions", + "Page evictions from the Local File Cache", + ) + .unwrap(), + + clock_iterations_counter: metrics::IntCounter::new( + "clock_iterations", + "Number of times the clock hand has moved", + ) + .unwrap(), + + block_map_num_buckets: metrics::IntGauge::new( + "block_map_num_buckets", + "Allocated size of the block cache hash map", + ) + .unwrap(), + block_map_num_buckets_in_use: metrics::IntGauge::new( + "block_map_num_buckets_in_use", + "Number of buckets in use in the block cache hash map", + ) + .unwrap(), + + relsize_cache_num_buckets: metrics::IntGauge::new( + "relsize_cache_num_buckets", + "Allocated size of the relsize cache hash map", + ) + .unwrap(), + relsize_cache_num_buckets_in_use: metrics::IntGauge::new( + "relsize_cache_num_buckets_in_use", + "Number of buckets in use in the relsize cache hash map", + ) + .unwrap(), + } + } + + /// Initialize access to the integrated cache for a backend process + pub fn backend_init(self) -> IntegratedCacheReadAccess<'t> { + let IntegratedCacheInitStruct { + relsize_cache_handle, + block_map_handle, + } = self; + + IntegratedCacheReadAccess { + relsize_cache: relsize_cache_handle.attach_reader(), + block_map: block_map_handle.attach_reader(), + } + } +} + +/// Value stored in the cache mapping hash table. +struct BlockEntry { + lw_lsn: AtomicLsn, + cache_block: AtomicU64, + + pinned: AtomicU64, + + // 'referenced' bit for the clock algorithm + referenced: AtomicBool, +} + +/// Value stored in the relsize cache hash table. +struct RelEntry { + /// cached size of the relation + /// u32::MAX means 'not known' (that's InvalidBlockNumber in Postgres) + nblocks: AtomicU32, +} + +impl std::fmt::Debug for RelEntry { + fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> { + fmt.debug_struct("Rel") + .field("nblocks", &self.nblocks.load(Ordering::Relaxed)) + .finish() + } +} +impl std::fmt::Debug for BlockEntry { + fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> { + fmt.debug_struct("Block") + .field("lw_lsn", &self.lw_lsn.load()) + .field("cache_block", &self.cache_block.load(Ordering::Relaxed)) + .field("pinned", &self.pinned.load(Ordering::Relaxed)) + .field("referenced", &self.referenced.load(Ordering::Relaxed)) + .finish() + } +} + +#[derive(Clone, Debug, PartialEq, PartialOrd, Eq, Hash, Ord)] +struct RelKey(RelTag); + +impl From<&RelTag> for RelKey { + fn from(val: &RelTag) -> RelKey { + RelKey(*val) + } +} + +#[derive(Clone, Debug, PartialEq, PartialOrd, Eq, Hash, Ord)] +struct BlockKey { + rel: RelTag, + block_number: u32, +} + +impl From<(&RelTag, u32)> for BlockKey { + fn from(val: (&RelTag, u32)) -> BlockKey { + BlockKey { + rel: *val.0, + block_number: val.1, + } + } +} + +/// Return type used in the cache's get_*() functions. 'Found' means that the page, or other +/// information that was enqueried, exists in the cache. ' +pub enum CacheResult { + /// The enqueried page or other information existed in the cache. + Found(V), + + /// The cache doesn't contain the page (or other enqueried information, like relation size). The + /// Lsn is the 'not_modified_since' LSN that should be used in the request to the pageserver to + /// read the page. + NotFound(Lsn), +} + +impl<'t> IntegratedCacheWriteAccess<'t> { + pub fn get_rel_size(&'t self, rel: &RelTag) -> CacheResult { + if let Some(nblocks) = get_rel_size(&self.relsize_cache, rel) { + CacheResult::Found(nblocks) + } else { + let lsn = Lsn(self.global_lw_lsn.load(Ordering::Relaxed)); + CacheResult::NotFound(lsn) + } + } + + pub async fn get_page( + &'t self, + rel: &RelTag, + block_number: u32, + dst: impl uring_common::buf::IoBufMut + Send + Sync, + ) -> Result, std::io::Error> { + let x = if let Some(block_entry) = self.block_map.get(&BlockKey::from((rel, block_number))) + { + block_entry.referenced.store(true, Ordering::Relaxed); + + let cache_block = block_entry.cache_block.load(Ordering::Relaxed); + if cache_block != INVALID_CACHE_BLOCK { + // pin it and release lock + block_entry.pinned.fetch_add(1, Ordering::Relaxed); + + (cache_block, DeferredUnpin(block_entry.pinned.as_ptr())) + } else { + return Ok(CacheResult::NotFound(block_entry.lw_lsn.load())); + } + } else { + let lsn = Lsn(self.global_lw_lsn.load(Ordering::Relaxed)); + return Ok(CacheResult::NotFound(lsn)); + }; + + let (cache_block, _deferred_pin) = x; + self.file_cache + .as_ref() + .unwrap() + .read_block(cache_block, dst) + .await?; + + // unpin the entry (by implicitly dropping deferred_pin) + Ok(CacheResult::Found(())) + } + + pub async fn page_is_cached( + &'t self, + rel: &RelTag, + block_number: u32, + ) -> Result, std::io::Error> { + if let Some(block_entry) = self.block_map.get(&BlockKey::from((rel, block_number))) { + // This is used for prefetch requests. Treat the probe as an 'access', to keep it + // in cache. + block_entry.referenced.store(true, Ordering::Relaxed); + + let cache_block = block_entry.cache_block.load(Ordering::Relaxed); + + if cache_block != INVALID_CACHE_BLOCK { + Ok(CacheResult::Found(())) + } else { + Ok(CacheResult::NotFound(block_entry.lw_lsn.load())) + } + } else { + let lsn = Lsn(self.global_lw_lsn.load(Ordering::Relaxed)); + Ok(CacheResult::NotFound(lsn)) + } + } + + /// Does the relation exists? CacheResult::NotFound means that the cache doesn't contain that + /// information, i.e. we don't know if the relation exists or not. + pub fn get_rel_exists(&'t self, rel: &RelTag) -> CacheResult { + // we don't currently cache negative entries, so if the relation is in the cache, it exists + if let Some(_rel_entry) = self.relsize_cache.get(&RelKey::from(rel)) { + CacheResult::Found(true) + } else { + let lsn = Lsn(self.global_lw_lsn.load(Ordering::Relaxed)); + CacheResult::NotFound(lsn) + } + } + + pub fn get_db_size(&'t self, _db_oid: u32) -> CacheResult { + // TODO: it would be nice to cache database sizes too. Getting the database size + // is not a very common operation, but when you do it, it's often interactive, with + // e.g. psql \l+ command, so the user will feel the latency. + + // fixme: is this right lsn? + let lsn = Lsn(self.global_lw_lsn.load(Ordering::Relaxed)); + CacheResult::NotFound(lsn) + } + + pub fn remember_rel_size(&'t self, rel: &RelTag, nblocks: u32) { + match self.relsize_cache.entry(RelKey::from(rel)) { + Entry::Vacant(e) => { + tracing::info!("inserting rel entry for {rel:?}, {nblocks} blocks"); + // FIXME: what to do if we run out of memory? Evict other relation entries? + _ = e + .insert(RelEntry { + nblocks: AtomicU32::new(nblocks), + }) + .expect("out of memory"); + } + Entry::Occupied(e) => { + tracing::info!("updating rel entry for {rel:?}, {nblocks} blocks"); + e.get().nblocks.store(nblocks, Ordering::Relaxed); + } + }; + } + + /// Remember the given page contents in the cache. + pub async fn remember_page( + &'t self, + rel: &RelTag, + block_number: u32, + src: impl uring_common::buf::IoBuf + Send + Sync, + lw_lsn: Lsn, + is_write: bool, + ) { + let key = BlockKey::from((rel, block_number)); + + // FIXME: make this work when file cache is disabled. Or make it mandatory + let file_cache = self.file_cache.as_ref().unwrap(); + + if is_write { + // there should be no concurrent IOs. If a backend tries to read the page + // at the same time, they may get a torn write. That's the same as with + // regular POSIX filesystem read() and write() + + // First check if we have a block in cache already + let mut old_cache_block = None; + let mut found_existing = false; + + // NOTE(quantumish): honoring original semantics here (used to be update_with_fn) + // but I don't see any reason why this has to take a write lock. + if let Entry::Occupied(e) = self.block_map.entry(key.clone()) { + let block_entry = e.get(); + found_existing = true; + + // Prevent this entry from being evicted + let pin_count = block_entry.pinned.fetch_add(1, Ordering::Relaxed); + if pin_count > 0 { + // this is unexpected, because the caller has obtained the io-in-progress lock, + // so no one else should try to modify the page at the same time. + // XXX: and I think a read should not be happening either, because the postgres + // buffer is held locked. TODO: check these conditions and tidy this up a little. Seems fragile to just panic. + panic!("block entry was unexpectedly pinned"); + } + + let cache_block = block_entry.cache_block.load(Ordering::Relaxed); + old_cache_block = if cache_block != INVALID_CACHE_BLOCK { + Some(cache_block) + } else { + None + }; + } + + // Allocate a new block if required + let cache_block = old_cache_block.unwrap_or_else(|| { + loop { + if let Some(x) = file_cache.alloc_block() { + break x; + } + if let Some(x) = self.try_evict_one_cache_block() { + break x; + } + } + }); + + // Write the page to the cache file + file_cache + .write_block(cache_block, src) + .await + .expect("error writing to cache"); + // FIXME: handle errors gracefully. + // FIXME: unpin the block entry on error + + // Update the block entry + let entry = self.block_map.entry(key); + assert_eq!(found_existing, matches!(entry, Entry::Occupied(_))); + match entry { + Entry::Occupied(e) => { + let block_entry = e.get(); + // Update the cache block + let old_blk = block_entry.cache_block.compare_exchange( + INVALID_CACHE_BLOCK, + cache_block, + Ordering::Relaxed, + Ordering::Relaxed, + ); + assert!(old_blk == Ok(INVALID_CACHE_BLOCK) || old_blk == Err(cache_block)); + + block_entry.lw_lsn.store(lw_lsn); + + block_entry.referenced.store(true, Ordering::Relaxed); + + let pin_count = block_entry.pinned.fetch_sub(1, Ordering::Relaxed); + assert!(pin_count > 0); + } + Entry::Vacant(e) => { + // FIXME: what to do if we run out of memory? Evict other relation entries? Remove + // block entries first? + _ = e + .insert(BlockEntry { + lw_lsn: AtomicLsn::new(lw_lsn.0), + cache_block: AtomicU64::new(cache_block), + pinned: AtomicU64::new(0), + referenced: AtomicBool::new(true), + }) + .expect("out of memory"); + } + } + } else { + // !is_write + // + // We can assume that it doesn't already exist, because the + // caller is assumed to have already checked it, and holds + // the io-in-progress lock. (The BlockEntry might exist, but no cache block) + + // Allocate a new block first + let cache_block = { + loop { + if let Some(x) = file_cache.alloc_block() { + break x; + } + if let Some(x) = self.try_evict_one_cache_block() { + break x; + } + } + }; + + // Write the page to the cache file + file_cache + .write_block(cache_block, src) + .await + .expect("error writing to cache"); + // FIXME: handle errors gracefully. + + match self.block_map.entry(key) { + Entry::Occupied(e) => { + let block_entry = e.get(); + // FIXME: could there be concurrent readers? + assert!(block_entry.pinned.load(Ordering::Relaxed) == 0); + + let old_cache_block = + block_entry.cache_block.swap(cache_block, Ordering::Relaxed); + if old_cache_block != INVALID_CACHE_BLOCK { + panic!( + "remember_page called in !is_write mode, but page is already cached at blk {old_cache_block}" + ); + } + } + Entry::Vacant(e) => { + // FIXME: what to do if we run out of memory? Evict other relation entries? Remove + // block entries first? + _ = e + .insert(BlockEntry { + lw_lsn: AtomicLsn::new(lw_lsn.0), + cache_block: AtomicU64::new(cache_block), + pinned: AtomicU64::new(0), + referenced: AtomicBool::new(true), + }) + .expect("out of memory"); + } + } + } + } + + /// Forget information about given relation in the cache. (For DROP TABLE and such) + pub fn forget_rel(&'t self, rel: &RelTag) { + tracing::info!("forgetting rel entry for {rel:?}"); + self.relsize_cache.remove(&RelKey::from(rel)); + + // also forget all cached blocks for the relation + // FIXME + /* + let mut iter = MapIterator::new(&key_range_for_rel_blocks(rel)); + let r = self.cache_tree.start_read(); + while let Some((k, _v)) = iter.next(&r) { + let w = self.cache_tree.start_write(); + + let mut evicted_cache_block = None; + + let res = w.update_with_fn(&k, |e| { + if let Some(e) = e { + let block_entry = if let MapEntry::Block(e) = e { + e + } else { + panic!("unexpected map entry type for block key"); + }; + let cache_block = block_entry + .cache_block + .swap(INVALID_CACHE_BLOCK, Ordering::Relaxed); + if cache_block != INVALID_CACHE_BLOCK { + evicted_cache_block = Some(cache_block); + } + UpdateAction::Remove + } else { + UpdateAction::Nothing + } + }); + + // FIXME: It's pretty surprising to run out of memory while removing. But + // maybe it can happen because of trying to shrink a node? + res.expect("out of memory"); + + if let Some(evicted_cache_block) = evicted_cache_block { + self.file_cache + .as_ref() + .unwrap() + .dealloc_block(evicted_cache_block); + } + } + + */ + } + + // Maintenance routines + + /// Evict one block from the file cache. This is used when the file cache fills up + /// Returns the evicted block. It's not put to the free list, so it's available for the + /// caller to use immediately. + pub fn try_evict_one_cache_block(&self) -> Option { + let mut clock_hand = self.clock_hand.lock().unwrap(); + for _ in 0..100 { + self.clock_iterations_counter.inc(); + + (*clock_hand) += 1; + + let mut evict_this = false; + let num_buckets = self.block_map.get_num_buckets(); + match self + .block_map + .get_at_bucket((*clock_hand) % num_buckets) + .as_deref() + { + None => { + // This bucket was unused + } + Some((_, blk_entry)) => { + if !blk_entry.referenced.swap(false, Ordering::Relaxed) { + // Evict this. Maybe. + evict_this = true; + } + } + }; + + if evict_this { + // grab the write lock + let mut evicted_cache_block = None; + if let Some(e) = self.block_map.entry_at_bucket(*clock_hand % num_buckets) { + let old = e.get(); + // note: all the accesses to 'pinned' currently happen + // within update_with_fn(), or while holding ValueReadGuard, which protects from concurrent + // updates. Otherwise, another thread could set the 'pinned' + // flag just after we have checked it here. + if old.pinned.load(Ordering::Relaxed) == 0 { + let _ = self + .global_lw_lsn + .fetch_max(old.lw_lsn.load().0, Ordering::Relaxed); + let cache_block = + old.cache_block.swap(INVALID_CACHE_BLOCK, Ordering::Relaxed); + if cache_block != INVALID_CACHE_BLOCK { + evicted_cache_block = Some(cache_block); + } + e.remove(); + } + } + + if evicted_cache_block.is_some() { + self.page_evictions_counter.inc(); + return evicted_cache_block; + } + } + } + // Give up if we didn't find anything + None + } + + /// Resize the local file cache. + pub fn resize_file_cache(&self, num_blocks: u32) { + let old_num_blocks = self.block_map.get_num_buckets() as u32; + + if old_num_blocks < num_blocks { + if let Err(err) = self.block_map.grow(num_blocks) { + tracing::warn!( + "could not grow file cache to {} blocks (old size {}): {}", + num_blocks, + old_num_blocks, + err + ); + } + } else { + // TODO: Shrinking not implemented yet + } + } + + pub fn dump_map(&self, _dst: &mut dyn std::io::Write) { + //FIXME self.cache_map.start_read().dump(dst); + } +} + +impl metrics::core::Collector for IntegratedCacheWriteAccess<'_> { + fn desc(&self) -> Vec<&metrics::core::Desc> { + let mut descs = Vec::new(); + descs.append(&mut self.page_evictions_counter.desc()); + descs.append(&mut self.clock_iterations_counter.desc()); + + descs.append(&mut self.block_map_num_buckets.desc()); + descs.append(&mut self.block_map_num_buckets_in_use.desc()); + + descs.append(&mut self.relsize_cache_num_buckets.desc()); + descs.append(&mut self.relsize_cache_num_buckets_in_use.desc()); + + descs + } + fn collect(&self) -> Vec { + // Update gauges + self.block_map_num_buckets + .set(self.block_map.get_num_buckets() as i64); + self.block_map_num_buckets_in_use + .set(self.block_map.get_num_buckets_in_use() as i64); + self.relsize_cache_num_buckets + .set(self.relsize_cache.get_num_buckets() as i64); + self.relsize_cache_num_buckets_in_use + .set(self.relsize_cache.get_num_buckets_in_use() as i64); + + let mut values = Vec::new(); + values.append(&mut self.page_evictions_counter.collect()); + values.append(&mut self.clock_iterations_counter.collect()); + + values.append(&mut self.block_map_num_buckets.collect()); + values.append(&mut self.block_map_num_buckets_in_use.collect()); + + values.append(&mut self.relsize_cache_num_buckets.collect()); + values.append(&mut self.relsize_cache_num_buckets_in_use.collect()); + + values + } +} + +/// Read relation size from the cache. +/// +/// This is in a separate function so that it can be shared by +/// IntegratedCacheReadAccess::get_rel_size() and IntegratedCacheWriteAccess::get_rel_size() +fn get_rel_size( + r: &neon_shmem::hash::HashMapAccess, + rel: &RelTag, +) -> Option { + if let Some(rel_entry) = r.get(&RelKey::from(rel)) { + let nblocks = rel_entry.nblocks.load(Ordering::Relaxed); + if nblocks != u32::MAX { + Some(nblocks) + } else { + None + } + } else { + None + } +} + +/// Accessor for other backends +/// +/// This allows backends to read pages from the cache directly, on their own, without making a +/// request to the communicator process. +impl<'t> IntegratedCacheReadAccess<'t> { + pub fn get_rel_size(&'t self, rel: &RelTag) -> Option { + get_rel_size(&self.relsize_cache, rel) + } + + pub fn start_read_op(&'t self) -> BackendCacheReadOp<'t> { + BackendCacheReadOp { + read_guards: Vec::new(), + map_access: self, + } + } + + /// Check if the given page is present in the cache + pub fn cache_contains_page(&'t self, rel: &RelTag, block_number: u32) -> bool { + self.block_map + .get(&BlockKey::from((rel, block_number))) + .is_some() + } +} + +pub struct BackendCacheReadOp<'t> { + read_guards: Vec, + map_access: &'t IntegratedCacheReadAccess<'t>, +} + +impl<'e> BackendCacheReadOp<'e> { + /// Initiate a read of the page from the cache. + /// + /// This returns the "cache block number", i.e. the block number within the cache file, where + /// the page's contents is stored. To get the page contents, the caller needs to read that block + /// from the cache file. This returns a guard object that you must hold while it performs the + /// read. It's possible that while you are performing the read, the cache block is invalidated. + /// After you have completed the read, call BackendCacheReadResult::finish() to check if the + /// read was in fact valid or not. If it was concurrently invalidated, you need to retry. + pub fn get_page(&mut self, rel: &RelTag, block_number: u32) -> Option { + if let Some(block_entry) = self + .map_access + .block_map + .get(&BlockKey::from((rel, block_number))) + { + block_entry.referenced.store(true, Ordering::Relaxed); + + let cache_block = block_entry.cache_block.load(Ordering::Relaxed); + if cache_block != INVALID_CACHE_BLOCK { + block_entry.pinned.fetch_add(1, Ordering::Relaxed); + self.read_guards + .push(DeferredUnpin(block_entry.pinned.as_ptr())); + Some(cache_block) + } else { + None + } + } else { + None + } + } + + pub fn finish(self) -> bool { + // TODO: currently, we hold a pin on the in-memory map, so concurrent invalidations are not + // possible. But if we switch to optimistic locking, this would return 'false' if the + // optimistic locking failed and you need to retry. + true + } +} + +/// A hack to decrement an AtomicU64 on drop. This is used to decrement the pin count +/// of a BlockEntry. The safety depends on the fact that the BlockEntry is not evicted +/// or moved while it's pinned. +struct DeferredUnpin(*mut u64); + +unsafe impl Sync for DeferredUnpin {} +unsafe impl Send for DeferredUnpin {} + +impl Drop for DeferredUnpin { + fn drop(&mut self) { + // unpin it + unsafe { + let pin_ref = AtomicU64::from_ptr(self.0); + pin_ref.fetch_sub(1, Ordering::Relaxed); + } + } +} diff --git a/pgxn/neon/communicator/src/lib.rs b/pgxn/neon/communicator/src/lib.rs index 4120ce0d38..fbe582df78 100644 --- a/pgxn/neon/communicator/src/lib.rs +++ b/pgxn/neon/communicator/src/lib.rs @@ -1 +1,27 @@ -pub mod file_cache_hashmap; +//! +//! Three main parts: +//! - async tokio communicator core, which receives requests and processes them. +//! - Main loop and requests queues, which routes requests from backends to the core +//! - the per-backend glue code, which submits requests +//! + +mod backend_comms; + +// mark this 'pub', because these functions are called from C code. Otherwise, the compiler +// complains about a bunch of structs and enum variants being unused, because it thinkgs +// the functions that use them are never called. There are some C-callable functions in +// other modules too, but marking this as pub is currently enough to silence the warnings +// +// TODO: perhaps collect *all* the extern "C" functions to one module? +pub mod backend_interface; + +mod file_cache; +mod init; +mod integrated_cache; +mod neon_request; +mod worker_process; + +mod global_allocator; + +// FIXME get this from postgres headers somehow +pub const BLCKSZ: usize = 8192; diff --git a/pgxn/neon/communicator/src/neon_request.rs b/pgxn/neon/communicator/src/neon_request.rs new file mode 100644 index 0000000000..f54dcd9222 --- /dev/null +++ b/pgxn/neon/communicator/src/neon_request.rs @@ -0,0 +1,377 @@ +pub type CLsn = u64; +pub type COid = u32; + +// This conveniently matches PG_IOV_MAX +pub const MAX_GETPAGEV_PAGES: usize = 32; + +use pageserver_page_api as page_api; + +#[allow(clippy::large_enum_variant)] +#[repr(C)] +#[derive(Copy, Clone, Debug)] +pub enum NeonIORequest { + Empty, + + // Read requests. These are C-friendly variants of the corresponding structs in + // pageserver_page_api. + RelExists(CRelExistsRequest), + RelSize(CRelSizeRequest), + GetPageV(CGetPageVRequest), + PrefetchV(CPrefetchVRequest), + DbSize(CDbSizeRequest), + + // Write requests. These are needed to keep the relation size cache and LFC up-to-date. + // They are not sent to the pageserver. + WritePage(CWritePageRequest), + RelExtend(CRelExtendRequest), + RelZeroExtend(CRelZeroExtendRequest), + RelCreate(CRelCreateRequest), + RelTruncate(CRelTruncateRequest), + RelUnlink(CRelUnlinkRequest), +} + +#[repr(C)] +#[derive(Copy, Clone, Debug)] +pub enum NeonIOResult { + Empty, + RelExists(bool), + RelSize(u32), + + /// the result pages are written to the shared memory addresses given in the request + GetPageV, + + /// A prefetch request returns as soon as the request has been received by the communicator. + /// It is processed in the background. + PrefetchVLaunched, + + DbSize(u64), + + // FIXME design compact error codes. Can't easily pass a string or other dynamic data. + // currently, this is 'errno' + Error(i32), + + Aborted, + + /// used for all write requests + WriteOK, +} + +impl NeonIORequest { + pub fn request_id(&self) -> u64 { + use NeonIORequest::*; + match self { + Empty => 0, + RelExists(req) => req.request_id, + RelSize(req) => req.request_id, + GetPageV(req) => req.request_id, + PrefetchV(req) => req.request_id, + DbSize(req) => req.request_id, + WritePage(req) => req.request_id, + RelExtend(req) => req.request_id, + RelZeroExtend(req) => req.request_id, + RelCreate(req) => req.request_id, + RelTruncate(req) => req.request_id, + RelUnlink(req) => req.request_id, + } + } +} + +#[repr(C)] +#[derive(Copy, Clone, Debug)] +pub struct CCachedGetPageVResult { + pub cache_block_numbers: [u64; MAX_GETPAGEV_PAGES], +} + +/// ShmemBuf represents a buffer in shared memory. +/// +/// SAFETY: The pointer must point to an area in shared memory. The functions allow you to liberally +/// get a mutable pointer to the contents; it is the caller's responsibility to ensure that you +/// don't access a buffer that's you're not allowed to. Inappropriate access to the buffer doesn't +/// violate Rust's safety semantics, but it will mess up and crash Postgres. +/// +#[repr(C)] +#[derive(Copy, Clone, Debug)] +pub struct ShmemBuf { + // These fields define where the result is written. Must point into a buffer in shared memory! + pub ptr: *mut u8, +} + +unsafe impl Send for ShmemBuf {} +unsafe impl Sync for ShmemBuf {} + +unsafe impl uring_common::buf::IoBuf for ShmemBuf { + fn stable_ptr(&self) -> *const u8 { + self.ptr + } + + fn bytes_init(&self) -> usize { + crate::BLCKSZ + } + + fn bytes_total(&self) -> usize { + crate::BLCKSZ + } +} + +unsafe impl uring_common::buf::IoBufMut for ShmemBuf { + fn stable_mut_ptr(&mut self) -> *mut u8 { + self.ptr + } + + unsafe fn set_init(&mut self, pos: usize) { + if pos > crate::BLCKSZ { + panic!( + "set_init called past end of buffer, pos {}, buffer size {}", + pos, + crate::BLCKSZ + ); + } + } +} + +impl ShmemBuf { + pub fn as_mut_ptr(&self) -> *mut u8 { + self.ptr + } +} + +#[repr(C)] +#[derive(Copy, Clone, Debug)] +pub struct CRelExistsRequest { + pub request_id: u64, + pub spc_oid: COid, + pub db_oid: COid, + pub rel_number: u32, + pub fork_number: u8, +} + +#[repr(C)] +#[derive(Copy, Clone, Debug)] +pub struct CRelSizeRequest { + pub request_id: u64, + pub spc_oid: COid, + pub db_oid: COid, + pub rel_number: u32, + pub fork_number: u8, +} + +#[repr(C)] +#[derive(Copy, Clone, Debug)] +pub struct CGetPageVRequest { + pub request_id: u64, + pub spc_oid: COid, + pub db_oid: COid, + pub rel_number: u32, + pub fork_number: u8, + pub block_number: u32, + pub nblocks: u8, + + // These fields define where the result is written. Must point into a buffer in shared memory! + pub dest: [ShmemBuf; MAX_GETPAGEV_PAGES], +} + +#[repr(C)] +#[derive(Copy, Clone, Debug)] +pub struct CPrefetchVRequest { + pub request_id: u64, + pub spc_oid: COid, + pub db_oid: COid, + pub rel_number: u32, + pub fork_number: u8, + pub block_number: u32, + pub nblocks: u8, +} + +#[repr(C)] +#[derive(Copy, Clone, Debug)] +pub struct CDbSizeRequest { + pub request_id: u64, + pub db_oid: COid, + pub request_lsn: CLsn, +} + +#[repr(C)] +#[derive(Copy, Clone, Debug)] +pub struct CWritePageRequest { + pub request_id: u64, + pub spc_oid: COid, + pub db_oid: COid, + pub rel_number: u32, + pub fork_number: u8, + pub block_number: u32, + pub lsn: CLsn, + + // These fields define where the result is written. Must point into a buffer in shared memory! + pub src: ShmemBuf, +} + +#[repr(C)] +#[derive(Copy, Clone, Debug)] +pub struct CRelExtendRequest { + pub request_id: u64, + pub spc_oid: COid, + pub db_oid: COid, + pub rel_number: u32, + pub fork_number: u8, + pub block_number: u32, + pub lsn: CLsn, + + // These fields define page contents. Must point into a buffer in shared memory! + pub src: ShmemBuf, +} + +#[repr(C)] +#[derive(Copy, Clone, Debug)] +pub struct CRelZeroExtendRequest { + pub request_id: u64, + pub spc_oid: COid, + pub db_oid: COid, + pub rel_number: u32, + pub fork_number: u8, + pub block_number: u32, + pub nblocks: u32, + pub lsn: CLsn, +} + +#[repr(C)] +#[derive(Copy, Clone, Debug)] +pub struct CRelCreateRequest { + pub request_id: u64, + pub spc_oid: COid, + pub db_oid: COid, + pub rel_number: u32, + pub fork_number: u8, +} + +#[repr(C)] +#[derive(Copy, Clone, Debug)] +pub struct CRelTruncateRequest { + pub request_id: u64, + pub spc_oid: COid, + pub db_oid: COid, + pub rel_number: u32, + pub fork_number: u8, + pub nblocks: u32, +} + +#[repr(C)] +#[derive(Copy, Clone, Debug)] +pub struct CRelUnlinkRequest { + pub request_id: u64, + pub spc_oid: COid, + pub db_oid: COid, + pub rel_number: u32, + pub fork_number: u8, + pub block_number: u32, + pub nblocks: u32, +} + +impl CRelExistsRequest { + pub fn reltag(&self) -> page_api::RelTag { + page_api::RelTag { + spcnode: self.spc_oid, + dbnode: self.db_oid, + relnode: self.rel_number, + forknum: self.fork_number, + } + } +} + +impl CRelSizeRequest { + pub fn reltag(&self) -> page_api::RelTag { + page_api::RelTag { + spcnode: self.spc_oid, + dbnode: self.db_oid, + relnode: self.rel_number, + forknum: self.fork_number, + } + } +} + +impl CGetPageVRequest { + pub fn reltag(&self) -> page_api::RelTag { + page_api::RelTag { + spcnode: self.spc_oid, + dbnode: self.db_oid, + relnode: self.rel_number, + forknum: self.fork_number, + } + } +} + +impl CPrefetchVRequest { + pub fn reltag(&self) -> page_api::RelTag { + page_api::RelTag { + spcnode: self.spc_oid, + dbnode: self.db_oid, + relnode: self.rel_number, + forknum: self.fork_number, + } + } +} + +impl CWritePageRequest { + pub fn reltag(&self) -> page_api::RelTag { + page_api::RelTag { + spcnode: self.spc_oid, + dbnode: self.db_oid, + relnode: self.rel_number, + forknum: self.fork_number, + } + } +} + +impl CRelExtendRequest { + pub fn reltag(&self) -> page_api::RelTag { + page_api::RelTag { + spcnode: self.spc_oid, + dbnode: self.db_oid, + relnode: self.rel_number, + forknum: self.fork_number, + } + } +} + +impl CRelZeroExtendRequest { + pub fn reltag(&self) -> page_api::RelTag { + page_api::RelTag { + spcnode: self.spc_oid, + dbnode: self.db_oid, + relnode: self.rel_number, + forknum: self.fork_number, + } + } +} + +impl CRelCreateRequest { + pub fn reltag(&self) -> page_api::RelTag { + page_api::RelTag { + spcnode: self.spc_oid, + dbnode: self.db_oid, + relnode: self.rel_number, + forknum: self.fork_number, + } + } +} + +impl CRelTruncateRequest { + pub fn reltag(&self) -> page_api::RelTag { + page_api::RelTag { + spcnode: self.spc_oid, + dbnode: self.db_oid, + relnode: self.rel_number, + forknum: self.fork_number, + } + } +} + +impl CRelUnlinkRequest { + pub fn reltag(&self) -> page_api::RelTag { + page_api::RelTag { + spcnode: self.spc_oid, + dbnode: self.db_oid, + relnode: self.rel_number, + forknum: self.fork_number, + } + } +} diff --git a/pgxn/neon/communicator/src/worker_process/callbacks.rs b/pgxn/neon/communicator/src/worker_process/callbacks.rs new file mode 100644 index 0000000000..c3b3a8e3b5 --- /dev/null +++ b/pgxn/neon/communicator/src/worker_process/callbacks.rs @@ -0,0 +1,28 @@ +//! C callbacks to PostgreSQL facilities that the neon extension needs +//! to provide. These are implemented in `neon/pgxn/communicator_new.c`. +//! The function signatures better match! +//! +//! These are called from the communicator threads! Careful what you do, most +//! Postgres functions are not safe to call in that context. + +use utils::lsn::Lsn; + +unsafe extern "C" { + pub fn notify_proc_unsafe(procno: std::ffi::c_int); + pub fn callback_set_my_latch_unsafe(); + pub fn callback_get_request_lsn_unsafe() -> u64; +} + +// safe wrappers + +pub(super) fn notify_proc(procno: std::ffi::c_int) { + unsafe { notify_proc_unsafe(procno) }; +} + +pub(super) fn callback_set_my_latch() { + unsafe { callback_set_my_latch_unsafe() }; +} + +pub(super) fn get_request_lsn() -> Lsn { + Lsn(unsafe { callback_get_request_lsn_unsafe() }) +} diff --git a/pgxn/neon/communicator/src/worker_process/in_progress_ios.rs b/pgxn/neon/communicator/src/worker_process/in_progress_ios.rs new file mode 100644 index 0000000000..5979c9c23b --- /dev/null +++ b/pgxn/neon/communicator/src/worker_process/in_progress_ios.rs @@ -0,0 +1,95 @@ +//! Lock table to ensure that only one IO request is in flight for a given +//! block (or relation or database metadata) at a time + +use std::cmp::Eq; +use std::hash::Hash; +use std::sync::Arc; + +use tokio::sync::{Mutex, OwnedMutexGuard}; + +use clashmap::ClashMap; +use clashmap::Entry; + +use pageserver_page_api::RelTag; + +#[derive(Clone, Eq, Hash, PartialEq)] +pub enum RequestInProgressKey { + Db(u32), + Rel(RelTag), + Block(RelTag, u32), +} + +type RequestId = u64; + +pub type RequestInProgressTable = MutexHashMap; + +// more primitive locking thingie: + +pub struct MutexHashMap +where + K: Clone + Eq + Hash, +{ + lock_table: ClashMap>)>, +} + +pub struct MutexHashMapGuard<'a, K, V> +where + K: Clone + Eq + Hash, +{ + pub key: K, + map: &'a MutexHashMap, + mutex: Arc>, + _guard: OwnedMutexGuard<()>, +} + +impl<'a, K, V> Drop for MutexHashMapGuard<'a, K, V> +where + K: Clone + Eq + Hash, +{ + fn drop(&mut self) { + let (_old_key, old_val) = self.map.lock_table.remove(&self.key).unwrap(); + assert!(Arc::ptr_eq(&old_val.1, &self.mutex)); + + // the guard will be dropped as we return + } +} + +impl MutexHashMap +where + K: Clone + Eq + Hash, + V: std::fmt::Display + Copy, +{ + pub fn new() -> MutexHashMap { + MutexHashMap { + lock_table: ClashMap::new(), + } + } + + pub async fn lock<'a>(&'a self, key: K, val: V) -> MutexHashMapGuard<'a, K, V> { + let my_mutex = Arc::new(Mutex::new(())); + let my_guard = Arc::clone(&my_mutex).lock_owned().await; + + loop { + let (request_id, lock) = match self.lock_table.entry(key.clone()) { + Entry::Occupied(e) => { + let e = e.get(); + (e.0, Arc::clone(&e.1)) + } + Entry::Vacant(e) => { + e.insert((val, Arc::clone(&my_mutex))); + break; + } + }; + tracing::info!("waiting for conflicting IO {request_id} to complete"); + let _ = lock.lock().await; + tracing::info!("conflicting IO {request_id} completed"); + } + + MutexHashMapGuard { + key, + map: self, + mutex: my_mutex, + _guard: my_guard, + } + } +} diff --git a/pgxn/neon/communicator/src/worker_process/logging.rs b/pgxn/neon/communicator/src/worker_process/logging.rs new file mode 100644 index 0000000000..3b652e203f --- /dev/null +++ b/pgxn/neon/communicator/src/worker_process/logging.rs @@ -0,0 +1,231 @@ +//! Glue code to hook up Rust logging with the `tracing` crate to the PostgreSQL log +//! +//! In the Rust threads, the log messages are written to a mpsc Channel, and the Postgres +//! process latch is raised. That wakes up the loop in the main thread. It reads the +//! message from the channel and ereport()s it. This ensures that only one thread, the main +//! thread, calls the PostgreSQL logging routines at any time. + +use std::sync::mpsc::sync_channel; +use std::sync::mpsc::{Receiver, SyncSender}; +use std::sync::mpsc::{TryRecvError, TrySendError}; + +use tracing::info; +use tracing::{Event, Level, Metadata, Subscriber}; +use tracing_subscriber::filter::LevelFilter; +use tracing_subscriber::fmt::FmtContext; +use tracing_subscriber::fmt::FormatEvent; +use tracing_subscriber::fmt::FormatFields; +use tracing_subscriber::fmt::FormattedFields; +use tracing_subscriber::fmt::MakeWriter; +use tracing_subscriber::fmt::format::Writer; +use tracing_subscriber::registry::LookupSpan; + +use crate::worker_process::callbacks::callback_set_my_latch; + +pub struct LoggingState { + receiver: Receiver, +} + +/// Called once, at worker process startup. The returned LoggingState is passed back +/// in the subsequent calls to `pump_logging`. It is opaque to the C code. +#[unsafe(no_mangle)] +pub extern "C" fn configure_logging() -> Box { + let (sender, receiver) = sync_channel(1000); + + let maker = Maker { channel: sender }; + + use tracing_subscriber::prelude::*; + let r = tracing_subscriber::registry(); + + let r = r.with( + tracing_subscriber::fmt::layer() + .with_ansi(false) + .event_format(SimpleFormatter::new()) + .with_writer(maker) + // TODO: derive this from log_min_messages? + .with_filter(LevelFilter::from_level(Level::INFO)), + ); + r.init(); + + info!("communicator process logging started"); + + let state = LoggingState { receiver }; + + Box::new(state) +} + +/// Read one message from the logging queue. This is essentially a wrapper to Receiver, +/// with a C-friendly signature. +/// +/// The message is copied into *errbuf, which is a caller-supplied buffer of size `errbuf_len`. +/// If the message doesn't fit in the buffer, it is truncated. It is always NULL-terminated. +/// +/// The error level is returned *elevel_p. It's one of the PostgreSQL error levels, see elog.h +#[unsafe(no_mangle)] +pub extern "C" fn pump_logging( + state: &mut LoggingState, + errbuf: *mut u8, + errbuf_len: u32, + elevel_p: &mut i32, +) -> i32 { + let msg = match state.receiver.try_recv() { + Err(TryRecvError::Empty) => return 0, + Err(TryRecvError::Disconnected) => return -1, + Ok(msg) => msg, + }; + + let src: &[u8] = &msg.message; + let dst = errbuf; + let len = std::cmp::min(src.len(), errbuf_len as usize - 1); + unsafe { + std::ptr::copy_nonoverlapping(src.as_ptr(), dst, len); + *(errbuf.add(len)) = b'\0'; // NULL terminator + } + + // XXX: these levels are copied from PostgreSQL's elog.h. Introduce another enum + // to hide these? + *elevel_p = match msg.level { + Level::TRACE => 10, // DEBUG5 + Level::DEBUG => 14, // DEBUG1 + Level::INFO => 17, // INFO + Level::WARN => 19, // WARNING + Level::ERROR => 21, // ERROR + }; + + 1 +} + +//---- The following functions can be called from any thread ---- + +#[derive(Clone)] +struct FormattedEventWithMeta { + message: Vec, + level: tracing::Level, +} + +impl Default for FormattedEventWithMeta { + fn default() -> Self { + FormattedEventWithMeta { + message: Vec::new(), + level: tracing::Level::DEBUG, + } + } +} + +struct EventBuilder<'a> { + event: FormattedEventWithMeta, + + maker: &'a Maker, +} + +impl std::io::Write for EventBuilder<'_> { + fn write(&mut self, buf: &[u8]) -> std::io::Result { + self.event.message.write(buf) + } + fn flush(&mut self) -> std::io::Result<()> { + self.maker.send_event(self.event.clone()); + Ok(()) + } +} + +impl Drop for EventBuilder<'_> { + fn drop(&mut self) { + let maker = self.maker; + let event = std::mem::take(&mut self.event); + + maker.send_event(event); + } +} + +struct Maker { + channel: SyncSender, +} + +impl<'a> MakeWriter<'a> for Maker { + type Writer = EventBuilder<'a>; + + fn make_writer(&'a self) -> Self::Writer { + panic!("not expected to be called when make_writer_for is implemented"); + } + + fn make_writer_for(&'a self, meta: &Metadata<'_>) -> Self::Writer { + EventBuilder { + event: FormattedEventWithMeta { + message: Vec::new(), + level: *meta.level(), + }, + maker: self, + } + } +} + +impl Maker { + fn send_event(&self, e: FormattedEventWithMeta) { + match self.channel.try_send(e) { + Ok(()) => { + // notify the main thread + callback_set_my_latch(); + } + Err(TrySendError::Disconnected(_)) => {} + Err(TrySendError::Full(_)) => { + // TODO: record that some messages were lost + } + } + } +} + +/// Simple formatter implementation for tracing_subscriber, which prints the log +/// spans and message part like the default formatter, but no timestamp or error +/// level. The error level is captured separately by `FormattedEventWithMeta', +/// and when the error is printed by the main thread, with PostgreSQL ereport(), +/// it gets a timestamp at that point. (The timestamp printed will therefore lag +/// behind the timestamp on the event here, if the main thread doesn't process +/// the log message promptly) +struct SimpleFormatter; + +impl FormatEvent for SimpleFormatter +where + S: Subscriber + for<'a> LookupSpan<'a>, + N: for<'a> FormatFields<'a> + 'static, +{ + fn format_event( + &self, + ctx: &FmtContext<'_, S, N>, + mut writer: Writer<'_>, + event: &Event<'_>, + ) -> std::fmt::Result { + // Format all the spans in the event's span context. + if let Some(scope) = ctx.event_scope() { + for span in scope.from_root() { + write!(writer, "{}", span.name())?; + + // `FormattedFields` is a formatted representation of the span's + // fields, which is stored in its extensions by the `fmt` layer's + // `new_span` method. The fields will have been formatted + // by the same field formatter that's provided to the event + // formatter in the `FmtContext`. + let ext = span.extensions(); + let fields = &ext + .get::>() + .expect("will never be `None`"); + + // Skip formatting the fields if the span had no fields. + if !fields.is_empty() { + write!(writer, "{{{fields}}}")?; + } + write!(writer, ": ")?; + } + } + + // Write fields on the event + ctx.field_format().format_fields(writer.by_ref(), event)?; + + writeln!(writer) + } +} + +impl SimpleFormatter { + fn new() -> Self { + SimpleFormatter {} + } +} diff --git a/pgxn/neon/communicator/src/worker_process/main_loop.rs b/pgxn/neon/communicator/src/worker_process/main_loop.rs new file mode 100644 index 0000000000..fe6acbf049 --- /dev/null +++ b/pgxn/neon/communicator/src/worker_process/main_loop.rs @@ -0,0 +1,731 @@ +use std::collections::HashMap; +use std::os::fd::AsRawFd; +use std::os::fd::OwnedFd; +use std::path::PathBuf; +use std::str::FromStr as _; + +use crate::backend_comms::NeonIOHandle; +use crate::file_cache::FileCache; +use crate::global_allocator::MyAllocatorCollector; +use crate::init::CommunicatorInitStruct; +use crate::integrated_cache::{CacheResult, IntegratedCacheWriteAccess}; +use crate::neon_request::{CGetPageVRequest, CPrefetchVRequest}; +use crate::neon_request::{NeonIORequest, NeonIOResult}; +use crate::worker_process::in_progress_ios::{RequestInProgressKey, RequestInProgressTable}; +use pageserver_client_grpc::{PageserverClient, ShardSpec}; +use pageserver_page_api as page_api; + +use metrics::{IntCounter, IntCounterVec}; + +use tokio::io::AsyncReadExt; +use tokio_pipe::PipeRead; +use uring_common::buf::IoBuf; +use utils::id::{TenantId, TimelineId}; + +use super::callbacks::{get_request_lsn, notify_proc}; + +use tracing::{error, info, info_span, trace}; + +use utils::lsn::Lsn; + +pub struct CommunicatorWorkerProcessStruct<'a> { + neon_request_slots: &'a [NeonIOHandle], + + client: PageserverClient, + + pub(crate) cache: IntegratedCacheWriteAccess<'a>, + + submission_pipe_read_fd: OwnedFd, + + in_progress_table: RequestInProgressTable, + + // Metrics + request_counters: IntCounterVec, + request_rel_exists_counter: IntCounter, + request_rel_size_counter: IntCounter, + request_get_pagev_counter: IntCounter, + request_prefetchv_counter: IntCounter, + request_db_size_counter: IntCounter, + request_write_page_counter: IntCounter, + request_rel_extend_counter: IntCounter, + request_rel_zero_extend_counter: IntCounter, + request_rel_create_counter: IntCounter, + request_rel_truncate_counter: IntCounter, + request_rel_unlink_counter: IntCounter, + + getpage_cache_misses_counter: IntCounter, + getpage_cache_hits_counter: IntCounter, + + request_nblocks_counters: IntCounterVec, + request_get_pagev_nblocks_counter: IntCounter, + request_prefetchv_nblocks_counter: IntCounter, + request_rel_zero_extend_nblocks_counter: IntCounter, + + allocator_metrics: MyAllocatorCollector, +} + +pub(super) async fn init( + cis: Box, + tenant_id: String, + timeline_id: String, + auth_token: Option, + shard_map: HashMap, + initial_file_cache_size: u64, + file_cache_path: Option, +) -> CommunicatorWorkerProcessStruct<'static> { + info!("Test log message"); + let last_lsn = get_request_lsn(); + + let file_cache = if let Some(path) = file_cache_path { + Some(FileCache::new(&path, initial_file_cache_size).expect("could not create cache file")) + } else { + // FIXME: temporarily for testing, use LFC even if disabled + Some( + FileCache::new(&PathBuf::from("new_filecache"), 1000) + .expect("could not create cache file"), + ) + }; + + // Initialize subsystems + let cache = cis + .integrated_cache_init_struct + .worker_process_init(last_lsn, file_cache); + + // TODO: plumb through the stripe size. + let tenant_id = TenantId::from_str(&tenant_id).expect("invalid tenant ID"); + let timeline_id = TimelineId::from_str(&timeline_id).expect("invalid timeline ID"); + let shard_spec = ShardSpec::new(shard_map, None).expect("invalid shard spec"); + let client = PageserverClient::new(tenant_id, timeline_id, shard_spec, auth_token) + .expect("could not create client"); + + let request_counters = IntCounterVec::new( + metrics::core::Opts::new( + "backend_requests_total", + "Number of requests from backends.", + ), + &["request_kind"], + ) + .unwrap(); + let request_rel_exists_counter = request_counters.with_label_values(&["rel_exists"]); + let request_rel_size_counter = request_counters.with_label_values(&["rel_size"]); + let request_get_pagev_counter = request_counters.with_label_values(&["get_pagev"]); + let request_prefetchv_counter = request_counters.with_label_values(&["prefetchv"]); + let request_db_size_counter = request_counters.with_label_values(&["db_size"]); + let request_write_page_counter = request_counters.with_label_values(&["write_page"]); + let request_rel_extend_counter = request_counters.with_label_values(&["rel_extend"]); + let request_rel_zero_extend_counter = request_counters.with_label_values(&["rel_zero_extend"]); + let request_rel_create_counter = request_counters.with_label_values(&["rel_create"]); + let request_rel_truncate_counter = request_counters.with_label_values(&["rel_truncate"]); + let request_rel_unlink_counter = request_counters.with_label_values(&["rel_unlink"]); + + let getpage_cache_misses_counter = IntCounter::new( + "getpage_cache_misses", + "Number of file cache misses in get_pagev requests.", + ) + .unwrap(); + let getpage_cache_hits_counter = IntCounter::new( + "getpage_cache_hits", + "Number of file cache hits in get_pagev requests.", + ) + .unwrap(); + + // For the requests that affect multiple blocks, have separate counters for the # of blocks affected + let request_nblocks_counters = IntCounterVec::new( + metrics::core::Opts::new( + "request_nblocks_total", + "Number of blocks in backend requests.", + ), + &["request_kind"], + ) + .unwrap(); + let request_get_pagev_nblocks_counter = + request_nblocks_counters.with_label_values(&["get_pagev"]); + let request_prefetchv_nblocks_counter = + request_nblocks_counters.with_label_values(&["prefetchv"]); + let request_rel_zero_extend_nblocks_counter = + request_nblocks_counters.with_label_values(&["rel_zero_extend"]); + + CommunicatorWorkerProcessStruct { + neon_request_slots: cis.neon_request_slots, + client, + cache, + submission_pipe_read_fd: cis.submission_pipe_read_fd, + in_progress_table: RequestInProgressTable::new(), + + // metrics + request_counters, + request_rel_exists_counter, + request_rel_size_counter, + request_get_pagev_counter, + request_prefetchv_counter, + request_db_size_counter, + request_write_page_counter, + request_rel_extend_counter, + request_rel_zero_extend_counter, + request_rel_create_counter, + request_rel_truncate_counter, + request_rel_unlink_counter, + + getpage_cache_misses_counter, + getpage_cache_hits_counter, + + request_nblocks_counters, + request_get_pagev_nblocks_counter, + request_prefetchv_nblocks_counter, + request_rel_zero_extend_nblocks_counter, + + allocator_metrics: MyAllocatorCollector::new(), + } +} + +impl<'t> CommunicatorWorkerProcessStruct<'t> { + /// Main loop of the worker process. Receive requests from the backends and process them. + pub(super) async fn run(&'static self) { + let mut idxbuf: [u8; 4] = [0; 4]; + + let mut submission_pipe_read = + PipeRead::try_from(self.submission_pipe_read_fd.as_raw_fd()).expect("invalid pipe fd"); + + loop { + // Wait for a backend to ring the doorbell + match submission_pipe_read.read(&mut idxbuf).await { + Ok(4) => {} + Ok(nbytes) => panic!("short read ({nbytes} bytes) on communicator pipe"), + Err(e) => panic!("error reading from communicator pipe: {e}"), + } + let slot_idx = u32::from_ne_bytes(idxbuf) as usize; + + // Read the IO request from the slot indicated in the wakeup + let Some(slot) = self.neon_request_slots[slot_idx].start_processing_request() else { + // This currently should not happen. But if we had multiple threads picking up + // requests, and without waiting for the notifications, it could. + panic!("no request in slot"); + }; + + // Ok, we have ownership of this request now. We must process it now, there's no going + // back. + // + // Spawn a separate task for every request. That's a little excessive for requests that + // can be quickly satisfied from the cache, but we expect that to be rare, because the + // requesting backend would have already checked the cache. + tokio::spawn(async move { + use tracing::Instrument; + + let request_id = slot.get_request().request_id(); + let owner_procno = slot.get_owner_procno(); + + let span = info_span!( + "processing", + request_id = request_id, + slot_idx = slot_idx, + procno = owner_procno, + ); + async { + // FIXME: as a temporary hack, abort the request if we don't get a response + // promptly. + // + // Lots of regression tests are getting stuck and failing at the moment, + // this makes them fail a little faster, which it faster to iterate. + // This needs to be removed once more regression tests are passing. + // See also similar hack in the backend code, in wait_request_completion() + let result = tokio::time::timeout( + tokio::time::Duration::from_secs(30), + self.handle_request(slot.get_request()), + ) + .await + .unwrap_or_else(|_elapsed| { + info!("request {request_id} timed out"); + NeonIOResult::Error(libc::ETIMEDOUT) + }); + trace!("request {request_id} at slot {slot_idx} completed"); + + // Ok, we have completed the IO. Mark the request as completed. After that, + // we no longer have ownership of the slot, and must not modify it. + slot.completed(result); + + // Notify the backend about the completion. (Note that the backend might see + // the completed status even before this; this is just a wakeup) + notify_proc(owner_procno); + } + .instrument(span) + .await + }); + } + } + + /// Compute the 'request_lsn' to use for a pageserver request + fn request_lsns(&self, not_modified_since_lsn: Lsn) -> page_api::ReadLsn { + let mut request_lsn = get_request_lsn(); + + // Is it possible that the last-written LSN is ahead of last flush LSN? Generally not, we + // shouldn't evict a page from the buffer cache before all its modifications have been + // safely flushed. That's the "WAL before data" rule. However, such case does exist at index + // building: _bt_blwritepage logs the full page without flushing WAL before smgrextend + // (files are fsynced before build ends). + // + // XXX: If we make a request LSN greater than the current WAL flush LSN, the pageserver would + // block waiting for the WAL arrive, until we flush it and it propagates through the + // safekeepers to the pageserver. If there's nothing that forces the WAL to be flushed, + // the pageserver would get stuck waiting forever. To avoid that, all the write- + // functions in communicator_new.c call XLogSetAsyncXactLSN(). That nudges the WAL writer to + // perform the flush relatively soon. + // + // It would perhaps be nicer to do the WAL flush here, but it's tricky to call back into + // Postgres code to do that from here. That's why we rely on communicator_new.c to do the + // calls "pre-emptively". + // + // FIXME: Because of the above, it can still happen that the flush LSN is ahead of + // not_modified_since, if the WAL writer hasn't done the flush yet. It would be nice to know + // if there are other cases like that that we have mised, but unfortunately we cannot turn + // this into an assertion because of that legit case. + // + // See also the old logic in neon_get_request_lsns() C function + if not_modified_since_lsn > request_lsn { + tracing::info!( + "not_modified_since_lsn {} is ahead of last flushed LSN {}", + not_modified_since_lsn, + request_lsn + ); + request_lsn = not_modified_since_lsn; + } + + page_api::ReadLsn { + request_lsn, + not_modified_since_lsn: Some(not_modified_since_lsn), + } + } + + /// Handle one IO request + async fn handle_request(&'static self, req: &'_ NeonIORequest) -> NeonIOResult { + match req { + NeonIORequest::Empty => { + error!("unexpected Empty IO request"); + NeonIOResult::Error(0) + } + NeonIORequest::RelExists(req) => { + self.request_rel_exists_counter.inc(); + let rel = req.reltag(); + + let _in_progress_guard = self + .in_progress_table + .lock(RequestInProgressKey::Rel(rel), req.request_id) + .await; + + // Check the cache first + let not_modified_since = match self.cache.get_rel_exists(&rel) { + CacheResult::Found(exists) => return NeonIOResult::RelExists(exists), + CacheResult::NotFound(lsn) => lsn, + }; + + match self + .client + .check_rel_exists(page_api::CheckRelExistsRequest { + read_lsn: self.request_lsns(not_modified_since), + rel, + }) + .await + { + Ok(exists) => NeonIOResult::RelExists(exists), + Err(err) => { + info!("tonic error: {err:?}"); + NeonIOResult::Error(0) + } + } + } + + NeonIORequest::RelSize(req) => { + self.request_rel_size_counter.inc(); + let rel = req.reltag(); + + let _in_progress_guard = self + .in_progress_table + .lock(RequestInProgressKey::Rel(rel), req.request_id) + .await; + + // Check the cache first + let not_modified_since = match self.cache.get_rel_size(&rel) { + CacheResult::Found(nblocks) => { + tracing::trace!("found relsize for {:?} in cache: {}", rel, nblocks); + return NeonIOResult::RelSize(nblocks); + } + CacheResult::NotFound(lsn) => lsn, + }; + + let read_lsn = self.request_lsns(not_modified_since); + match self + .client + .get_rel_size(page_api::GetRelSizeRequest { read_lsn, rel }) + .await + { + Ok(nblocks) => { + // update the cache + tracing::info!("updated relsize for {:?} in cache: {}", rel, nblocks); + self.cache.remember_rel_size(&rel, nblocks); + + NeonIOResult::RelSize(nblocks) + } + Err(err) => { + info!("tonic error: {err:?}"); + NeonIOResult::Error(0) + } + } + } + NeonIORequest::GetPageV(req) => { + self.request_get_pagev_counter.inc(); + self.request_get_pagev_nblocks_counter + .inc_by(req.nblocks as u64); + match self.handle_get_pagev_request(req).await { + Ok(()) => NeonIOResult::GetPageV, + Err(errno) => NeonIOResult::Error(errno), + } + } + NeonIORequest::PrefetchV(req) => { + self.request_prefetchv_counter.inc(); + self.request_prefetchv_nblocks_counter + .inc_by(req.nblocks as u64); + let req = *req; + tokio::spawn(async move { self.handle_prefetchv_request(&req).await }); + NeonIOResult::PrefetchVLaunched + } + NeonIORequest::DbSize(req) => { + self.request_db_size_counter.inc(); + let _in_progress_guard = self + .in_progress_table + .lock(RequestInProgressKey::Db(req.db_oid), req.request_id) + .await; + + // Check the cache first + let not_modified_since = match self.cache.get_db_size(req.db_oid) { + CacheResult::Found(db_size) => { + // get_page already copied the block content to the destination + return NeonIOResult::DbSize(db_size); + } + CacheResult::NotFound(lsn) => lsn, + }; + + match self + .client + .get_db_size(page_api::GetDbSizeRequest { + read_lsn: self.request_lsns(not_modified_since), + db_oid: req.db_oid, + }) + .await + { + Ok(db_size) => NeonIOResult::DbSize(db_size), + Err(err) => { + info!("tonic error: {err:?}"); + NeonIOResult::Error(0) + } + } + } + + // Write requests + NeonIORequest::WritePage(req) => { + self.request_write_page_counter.inc(); + + let rel = req.reltag(); + let _in_progress_guard = self + .in_progress_table + .lock( + RequestInProgressKey::Block(rel, req.block_number), + req.request_id, + ) + .await; + + // We must at least update the last-written LSN on the page, but also store the page + // image in the LFC while we still have it + self.cache + .remember_page(&rel, req.block_number, req.src, Lsn(req.lsn), true) + .await; + NeonIOResult::WriteOK + } + NeonIORequest::RelExtend(req) => { + self.request_rel_extend_counter.inc(); + + let rel = req.reltag(); + let _in_progress_guard = self + .in_progress_table + .lock( + RequestInProgressKey::Block(rel, req.block_number), + req.request_id, + ) + .await; + + // We must at least update the last-written LSN on the page and the relation size, + // but also store the page image in the LFC while we still have it + self.cache + .remember_page(&rel, req.block_number, req.src, Lsn(req.lsn), true) + .await; + self.cache + .remember_rel_size(&req.reltag(), req.block_number + 1); + NeonIOResult::WriteOK + } + NeonIORequest::RelZeroExtend(req) => { + self.request_rel_zero_extend_counter.inc(); + self.request_rel_zero_extend_nblocks_counter + .inc_by(req.nblocks as u64); + + // TODO: need to grab an io-in-progress lock for this? I guess not + // TODO: I think we should put the empty pages to the cache, or at least + // update the last-written LSN. + self.cache + .remember_rel_size(&req.reltag(), req.block_number + req.nblocks); + NeonIOResult::WriteOK + } + NeonIORequest::RelCreate(req) => { + self.request_rel_create_counter.inc(); + + // TODO: need to grab an io-in-progress lock for this? I guess not + self.cache.remember_rel_size(&req.reltag(), 0); + NeonIOResult::WriteOK + } + NeonIORequest::RelTruncate(req) => { + self.request_rel_truncate_counter.inc(); + + // TODO: need to grab an io-in-progress lock for this? I guess not + self.cache.remember_rel_size(&req.reltag(), req.nblocks); + NeonIOResult::WriteOK + } + NeonIORequest::RelUnlink(req) => { + self.request_rel_unlink_counter.inc(); + + // TODO: need to grab an io-in-progress lock for this? I guess not + self.cache.forget_rel(&req.reltag()); + NeonIOResult::WriteOK + } + } + } + + /// Subroutine to handle a GetPageV request, since it's a little more complicated than + /// others. + async fn handle_get_pagev_request(&'t self, req: &CGetPageVRequest) -> Result<(), i32> { + let rel = req.reltag(); + + // Check the cache first + // + // Note: Because the backends perform a direct lookup in the cache before sending + // the request to the communicator process, we expect the pages to almost never + // be already in cache. It could happen if: + // 1. two backends try to read the same page at the same time, but that should never + // happen because there's higher level locking in the Postgres buffer manager, or + // 2. a prefetch request finished at the same time as a backend requested the + // page. That's much more likely. + let mut cache_misses = Vec::with_capacity(req.nblocks as usize); + for i in 0..req.nblocks { + let blkno = req.block_number + i as u32; + + // note: this is deadlock-safe even though we hold multiple locks at the same time, + // because they're always acquired in the same order. + let in_progress_guard = self + .in_progress_table + .lock(RequestInProgressKey::Block(rel, blkno), req.request_id) + .await; + + let dest = req.dest[i as usize]; + let not_modified_since = match self.cache.get_page(&rel, blkno, dest).await { + Ok(CacheResult::Found(_)) => { + // get_page already copied the block content to the destination + trace!("found blk {} in rel {:?} in LFC", blkno, rel); + continue; + } + Ok(CacheResult::NotFound(lsn)) => lsn, + Err(_io_error) => return Err(-1), // FIXME errno? + }; + cache_misses.push((blkno, not_modified_since, dest, in_progress_guard)); + } + self.getpage_cache_misses_counter + .inc_by(cache_misses.len() as u64); + self.getpage_cache_hits_counter + .inc_by(req.nblocks as u64 - cache_misses.len() as u64); + + if cache_misses.is_empty() { + return Ok(()); + } + let not_modified_since = cache_misses + .iter() + .map(|(_blkno, lsn, _dest, _guard)| *lsn) + .max() + .unwrap(); + + // Construct a pageserver request for the cache misses + let block_numbers: Vec = cache_misses + .iter() + .map(|(blkno, _lsn, _dest, _guard)| *blkno) + .collect(); + let read_lsn = self.request_lsns(not_modified_since); + info!( + "sending getpage request for blocks {:?} in rel {:?} lsns {}", + block_numbers, rel, read_lsn + ); + match self + .client + .get_page(page_api::GetPageRequest { + request_id: req.request_id, + request_class: page_api::GetPageClass::Normal, + read_lsn, + rel, + block_numbers: block_numbers.clone(), + }) + .await + { + Ok(resp) => { + // Write the received page images directly to the shared memory location + // that the backend requested. + if resp.page_images.len() != block_numbers.len() { + error!( + "received unexpected response with {} page images from pageserver for a request for {} pages", + resp.page_images.len(), + block_numbers.len(), + ); + return Err(-1); + } + for (page_image, (blkno, _lsn, dest, _guard)) in + resp.page_images.into_iter().zip(cache_misses) + { + let src: &[u8] = page_image.as_ref(); + let len = std::cmp::min(src.len(), dest.bytes_total()); + unsafe { + std::ptr::copy_nonoverlapping(src.as_ptr(), dest.as_mut_ptr(), len); + }; + + // Also store it in the LFC while we have it + self.cache + .remember_page( + &rel, + blkno, + page_image, + read_lsn.not_modified_since_lsn.unwrap(), + false, + ) + .await; + } + } + Err(err) => { + info!("tonic error: {err:?}"); + return Err(-1); + } + } + Ok(()) + } + + /// Subroutine to handle a PrefetchV request, since it's a little more complicated than + /// others. + /// + /// This is very similar to a GetPageV request, but the results are only stored in the cache. + async fn handle_prefetchv_request(&'static self, req: &CPrefetchVRequest) -> Result<(), i32> { + let rel = req.reltag(); + + // Check the cache first + let mut cache_misses = Vec::with_capacity(req.nblocks as usize); + for i in 0..req.nblocks { + let blkno = req.block_number + i as u32; + + // note: this is deadlock-safe even though we hold multiple locks at the same time, + // because they're always acquired in the same order. + let in_progress_guard = self + .in_progress_table + .lock(RequestInProgressKey::Block(rel, blkno), req.request_id) + .await; + + let not_modified_since = match self.cache.page_is_cached(&rel, blkno).await { + Ok(CacheResult::Found(_)) => { + trace!("found blk {} in rel {:?} in LFC", blkno, rel); + continue; + } + Ok(CacheResult::NotFound(lsn)) => lsn, + Err(_io_error) => return Err(-1), // FIXME errno? + }; + cache_misses.push((blkno, not_modified_since, in_progress_guard)); + } + if cache_misses.is_empty() { + return Ok(()); + } + let not_modified_since = cache_misses + .iter() + .map(|(_blkno, lsn, _guard)| *lsn) + .max() + .unwrap(); + let block_numbers: Vec = cache_misses + .iter() + .map(|(blkno, _lsn, _guard)| *blkno) + .collect(); + + // TODO: spawn separate tasks for these. Use the integrated cache to keep track of the + // in-flight requests + + match self + .client + .get_page(page_api::GetPageRequest { + request_id: req.request_id, + request_class: page_api::GetPageClass::Prefetch, + read_lsn: self.request_lsns(not_modified_since), + rel, + block_numbers: block_numbers.clone(), + }) + .await + { + Ok(resp) => { + trace!( + "prefetch completed, remembering blocks {:?} in rel {:?} in LFC", + block_numbers, rel + ); + if resp.page_images.len() != block_numbers.len() { + error!( + "received unexpected response with {} page images from pageserver for a request for {} pages", + resp.page_images.len(), + block_numbers.len(), + ); + return Err(-1); + } + + for (page_image, (blkno, _lsn, _guard)) in + resp.page_images.into_iter().zip(cache_misses) + { + self.cache + .remember_page(&rel, blkno, page_image, not_modified_since, false) + .await; + } + } + Err(err) => { + info!("tonic error: {err:?}"); + return Err(-1); + } + } + Ok(()) + } +} + +impl<'t> metrics::core::Collector for CommunicatorWorkerProcessStruct<'t> { + fn desc(&self) -> Vec<&metrics::core::Desc> { + let mut descs = Vec::new(); + + descs.append(&mut self.request_counters.desc()); + descs.append(&mut self.getpage_cache_misses_counter.desc()); + descs.append(&mut self.getpage_cache_hits_counter.desc()); + descs.append(&mut self.request_nblocks_counters.desc()); + + if let Some(file_cache) = &self.cache.file_cache { + descs.append(&mut file_cache.desc()); + } + descs.append(&mut self.cache.desc()); + descs.append(&mut self.allocator_metrics.desc()); + + descs + } + fn collect(&self) -> Vec { + let mut values = Vec::new(); + + values.append(&mut self.request_counters.collect()); + values.append(&mut self.getpage_cache_misses_counter.collect()); + values.append(&mut self.getpage_cache_hits_counter.collect()); + values.append(&mut self.request_nblocks_counters.collect()); + + if let Some(file_cache) = &self.cache.file_cache { + values.append(&mut file_cache.collect()); + } + values.append(&mut self.cache.collect()); + values.append(&mut self.allocator_metrics.collect()); + + values + } +} diff --git a/pgxn/neon/communicator/src/worker_process/metrics_exporter.rs b/pgxn/neon/communicator/src/worker_process/metrics_exporter.rs new file mode 100644 index 0000000000..9b0891b5aa --- /dev/null +++ b/pgxn/neon/communicator/src/worker_process/metrics_exporter.rs @@ -0,0 +1,82 @@ +//! Export information about Postgres, the communicator process, file cache etc. as +//! prometheus metrics. + +use axum::Router; +use axum::body::Body; +use axum::extract::State; +use axum::response::Response; +use http::StatusCode; +use http::header::CONTENT_TYPE; + +use metrics::proto::MetricFamily; +use metrics::{Encoder, TextEncoder}; + +use std::path::PathBuf; + +use tokio::net::UnixListener; + +use crate::worker_process::main_loop::CommunicatorWorkerProcessStruct; + +impl<'a> CommunicatorWorkerProcessStruct<'a> { + pub(crate) async fn launch_exporter_task(&'static self) { + use axum::routing::get; + let app = Router::new() + .route("/metrics", get(get_metrics)) + .route("/dump_cache_map", get(dump_cache_map)) + .with_state(self); + + // Listen on unix domain socket, in the data directory. That should be unique. + let path = PathBuf::from(".metrics.socket"); + + let listener = UnixListener::bind(path.clone()).unwrap(); + + tokio::spawn(async { + tracing::info!("metrics listener spawned"); + axum::serve(listener, app).await.unwrap() + }); + } +} + +async fn dump_cache_map( + State(state): State<&CommunicatorWorkerProcessStruct<'static>>, +) -> Response { + let mut buf: Vec = Vec::new(); + state.cache.dump_map(&mut buf); + + Response::builder() + .status(StatusCode::OK) + .header(CONTENT_TYPE, "application/text") + .body(Body::from(buf)) + .unwrap() +} + +/// Expose Prometheus metrics. +async fn get_metrics(State(state): State<&CommunicatorWorkerProcessStruct<'static>>) -> Response { + use metrics::core::Collector; + let metrics = state.collect(); + + // When we call TextEncoder::encode() below, it will immediately return an + // error if a metric family has no metrics, so we need to preemptively + // filter out metric families with no metrics. + let metrics = metrics + .into_iter() + .filter(|m| !m.get_metric().is_empty()) + .collect::>(); + + let encoder = TextEncoder::new(); + let mut buffer = vec![]; + + if let Err(e) = encoder.encode(&metrics, &mut buffer) { + Response::builder() + .status(StatusCode::INTERNAL_SERVER_ERROR) + .header(CONTENT_TYPE, "application/text") + .body(Body::from(e.to_string())) + .unwrap() + } else { + Response::builder() + .status(StatusCode::OK) + .header(CONTENT_TYPE, encoder.format_type()) + .body(Body::from(buffer)) + .unwrap() + } +} diff --git a/pgxn/neon/communicator/src/worker_process/mod.rs b/pgxn/neon/communicator/src/worker_process/mod.rs new file mode 100644 index 0000000000..064d106d4c --- /dev/null +++ b/pgxn/neon/communicator/src/worker_process/mod.rs @@ -0,0 +1,14 @@ +//! This code runs in the communicator worker process. This provides +//! the glue code to: +//! +//! - launch the 'processor', +//! - receive IO requests from backends and pass them to the processor, +//! - write results back to backends. + +mod callbacks; +mod logging; +mod main_loop; +mod metrics_exporter; +mod worker_interface; + +mod in_progress_ios; diff --git a/pgxn/neon/communicator/src/worker_process/worker_interface.rs b/pgxn/neon/communicator/src/worker_process/worker_interface.rs new file mode 100644 index 0000000000..9aaa483c9e --- /dev/null +++ b/pgxn/neon/communicator/src/worker_process/worker_interface.rs @@ -0,0 +1,121 @@ +//! Functions called from the C code in the worker process + +use std::collections::HashMap; +use std::ffi::{CStr, c_char}; +use std::path::PathBuf; + +use tracing::error; + +use crate::init::CommunicatorInitStruct; +use crate::worker_process::main_loop; +use crate::worker_process::main_loop::CommunicatorWorkerProcessStruct; + +/// Launch the communicator's tokio tasks, which do most of the work. +/// +/// The caller has initialized the process as a regular PostgreSQL +/// background worker process. The shared memory segment used to +/// communicate with the backends has been allocated and initialized +/// earlier, at postmaster startup, in rcommunicator_shmem_init(). +#[unsafe(no_mangle)] +pub extern "C" fn communicator_worker_process_launch( + cis: Box, + tenant_id: *const c_char, + timeline_id: *const c_char, + auth_token: *const c_char, + shard_map: *mut *mut c_char, + nshards: u32, + file_cache_path: *const c_char, + initial_file_cache_size: u64, +) -> &'static CommunicatorWorkerProcessStruct<'static> { + // Convert the arguments into more convenient Rust types + let tenant_id = unsafe { CStr::from_ptr(tenant_id) }.to_str().unwrap(); + let timeline_id = unsafe { CStr::from_ptr(timeline_id) }.to_str().unwrap(); + let auth_token = if auth_token.is_null() { + None + } else { + Some( + unsafe { CStr::from_ptr(auth_token) } + .to_str() + .unwrap() + .to_string(), + ) + }; + let file_cache_path = { + if file_cache_path.is_null() { + None + } else { + let c_str = unsafe { CStr::from_ptr(file_cache_path) }; + Some(PathBuf::from(c_str.to_str().unwrap())) + } + }; + let shard_map = parse_shard_map(nshards, shard_map); + + // start main loop + let runtime = tokio::runtime::Builder::new_multi_thread() + .enable_all() + .thread_name("communicator thread") + .build() + .unwrap(); + + let worker_struct = runtime.block_on(main_loop::init( + cis, + tenant_id.to_string(), + timeline_id.to_string(), + auth_token, + shard_map, + initial_file_cache_size, + file_cache_path, + )); + let worker_struct = Box::leak(Box::new(worker_struct)); + + let main_loop_handle = runtime.spawn(worker_struct.run()); + + runtime.spawn(async { + let err = main_loop_handle.await.unwrap_err(); + error!("error: {err:?}"); + }); + + runtime.block_on(worker_struct.launch_exporter_task()); + + // keep the runtime running after we exit this function + Box::leak(Box::new(runtime)); + + worker_struct +} + +/// Convert the "shard map" from an array of C strings, indexed by shard no to a rust HashMap +fn parse_shard_map( + nshards: u32, + shard_map: *mut *mut c_char, +) -> HashMap { + use utils::shard::*; + + assert!(nshards <= u8::MAX as u32); + + let mut result: HashMap = HashMap::new(); + let mut p = shard_map; + + for i in 0..nshards { + let c_str = unsafe { CStr::from_ptr(*p) }; + + p = unsafe { p.add(1) }; + + let s = c_str.to_str().unwrap(); + let k = if nshards > 1 { + ShardIndex::new(ShardNumber(i as u8), ShardCount(nshards as u8)) + } else { + ShardIndex::unsharded() + }; + result.insert(k, s.into()); + } + result +} + +/// Inform the rust code about a configuration change +#[unsafe(no_mangle)] +pub extern "C" fn communicator_worker_config_reload( + proc_handle: &'static CommunicatorWorkerProcessStruct<'static>, + file_cache_size: u64, +) { + proc_handle.cache.resize_file_cache(file_cache_size as u32); +} diff --git a/pgxn/neon/communicator_new.c b/pgxn/neon/communicator_new.c new file mode 100644 index 0000000000..b809358c45 --- /dev/null +++ b/pgxn/neon/communicator_new.c @@ -0,0 +1,1190 @@ +/*------------------------------------------------------------------------- + * + * communicator_new.c + * Functions for communicating with remote pageservers. + * + * This is the "new" communicator. It consists of functions that + * are called from the smgr implementation, in pagestore_smgr.c. + * + * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + *------------------------------------------------------------------------- + */ +#include "postgres.h" + +#include + +#include "access/xlog.h" +#include "access/xlogdefs.h" +#if PG_VERSION_NUM >= 150000 +#include "access/xlogrecovery.h" +#endif +#include "access/xlog_internal.h" +#include "access/xlogutils.h" +#include "executor/instrument.h" +#include "miscadmin.h" +#include "postmaster/bgworker.h" +#include "postmaster/interrupt.h" +#include "postmaster/postmaster.h" +#include "replication/walsender.h" +#include "storage/fd.h" +#include "storage/ipc.h" +#include "storage/latch.h" +#include "storage/pmsignal.h" +#include "storage/procarray.h" +#if PG_VERSION_NUM >= 170000 +#include "storage/procnumber.h" +#endif +#include "storage/spin.h" +#include "tcop/tcopprot.h" + +#include "communicator_new.h" +#include "neon.h" +#include "neon_perf_counters.h" +#include "pagestore_client.h" + +/* + * FIXME: these are in file_cache.h, but I don't want to #include that + * here. This code shouldn't be using the C file cache for anything else than + * the GUCs. + */ +extern int lfc_max_size; +extern int lfc_size_limit; +extern char *lfc_path; + + +/* the rust bindings, generated by cbindgen */ +#include "communicator/communicator_bindings.h" + +#define MaxProcs (MaxBackends + NUM_AUXILIARY_PROCS) + +static CommunicatorInitStruct *cis; +static CommunicatorBackendStruct *my_bs; + +static File cache_file = 0; + +typedef struct CommunicatorShmemPerBackendData +{ + /* + * Latch used to notify backend of IO completion. We cannot use the + * standard process latch (MyProc->latch) because we cannot clear that + * latch as part of the IO handling, or we might cause the caller to miss + * some other events. + */ + Latch io_completion_latch; + + /* + * Request counter, for assigning unique request IDs. + * + * This is only accessed by the backend itself, but we keep it in shared + * memory so that it survives across backend processes that are assigned + * the same proc number, to avoid reusing request IDs too fast. + */ + uint64 request_counter; + + /* + * Normally, when reading or writing pages from shared buffer cache, the + * worker process can operate directly on the shared buffer. But when + * working with a local buffer, we use this "bounce buffer" to pass the + * data to the worker process. + * + * TODO: That's slow, because it incurs an extra memory copy, and there's + * currently only one of these per backend, which means you can have only + * one such IO in progress at a time. + */ + PGIOAlignedBlock bounce_buffer; +} CommunicatorShmemPerBackendData; + +typedef struct CommunicatorShmemData +{ + int dummy; + + CommunicatorShmemPerBackendData backends[]; /* MaxProcs */ + + /* rust-managed shmem area follows at next MAXALIGN boundary */ +} CommunicatorShmemData; + +static CommunicatorShmemData *communicator_shmem_ptr; + +#define MyIOCompletionLatch (&communicator_shmem_ptr->backends[MyProcNumber].io_completion_latch) + +#define MAX_INFLIGHT_ASYNC_REQUESTS 5 + +/* request indexes of (prefetch) requests that have been started */ +static int inflight_requests[MAX_INFLIGHT_ASYNC_REQUESTS]; +static int num_inflight_requests = 0; + +static int my_start_slot_idx; +static int my_end_slot_idx; +static int my_next_slot_idx; + +static int start_request(NeonIORequest *request, struct NeonIOResult *immediate_result_p); +static void wait_request_completion(int request_idx, struct NeonIOResult *result_p); +static void perform_request(NeonIORequest *request, struct NeonIOResult *result_p); +static void process_inflight_requests(void); + +static bool bounce_needed(void *buffer); +static void *bounce_buf(void); +static void *bounce_write_if_needed(void *buffer); + +PGDLLEXPORT void communicator_new_bgworker_main(Datum main_arg); +static void communicator_new_backend_exit(int code, Datum arg); + +/* + * Request ID assignment. + * + * Request IDs better be unique across all this compute's in-flight requests, + * because they are used to match up responses to requests in the gRPC client + * code. Furthermore, for logging and debugging purposes, it's nice to avoid + * reusing them too fast, so that you can easily match up logs from different + * components based on the request id. + * + * The request IDs we generate consist of two parts: the backend's ProcNumber + * and a counter that can wrap-around. + */ +StaticAssertDecl(MAX_BACKENDS == 0x3FFFF, "Unexpected MAX_BACKENDS"); +#define PROCNUMBER_BITS UINT64CONST(18) +#define REQUEST_COUNTER_BITS UINT64CONST(46) +#define REQUEST_COUNTER_MASK ((UINT64CONST(1) << REQUEST_COUNTER_BITS) - 1) + +static inline uint64 +assign_request_id(void) +{ + uint64 counter; + uint64 result; + + counter = communicator_shmem_ptr->backends[MyProcNumber].request_counter++; + result = (((uint64) MyProcNumber) << PROCNUMBER_BITS) | (counter & REQUEST_COUNTER_MASK); + elog(DEBUG5, "assigned request id " UINT64_FORMAT " (counter " UINT64_FORMAT ", procno %d)", result, counter, (int) MyProcNumber); + + return result; +} + +/**** Initialization functions. These run in postmaster ****/ + +void +pg_init_communicator_new(void) +{ + BackgroundWorker bgw; + + if (pageserver_connstring[0] == '\0' && pageserver_grpc_urls[0] == '\0') + { + /* running with local storage */ + return; + } + + /* Initialize the background worker process */ + memset(&bgw, 0, sizeof(bgw)); + bgw.bgw_flags = BGWORKER_SHMEM_ACCESS; + bgw.bgw_start_time = BgWorkerStart_PostmasterStart; + snprintf(bgw.bgw_library_name, BGW_MAXLEN, "neon"); + snprintf(bgw.bgw_function_name, BGW_MAXLEN, "communicator_new_bgworker_main"); + snprintf(bgw.bgw_name, BGW_MAXLEN, "Storage communicator process"); + snprintf(bgw.bgw_type, BGW_MAXLEN, "Storage communicator process"); + bgw.bgw_restart_time = 5; + bgw.bgw_notify_pid = 0; + bgw.bgw_main_arg = (Datum) 0; + + RegisterBackgroundWorker(&bgw); +} + +static size_t +communicator_new_shmem_size(void) +{ + size_t size = 0; + int num_request_slots; + + size += MAXALIGN( + offsetof(CommunicatorShmemData, backends) + + MaxProcs * sizeof(CommunicatorShmemPerBackendData) + ); + + num_request_slots = MaxProcs * MAX_INFLIGHT_ASYNC_REQUESTS; + + /* space needed by the rust code */ + size += rcommunicator_shmem_size(num_request_slots); + + return size; +} + +void +communicator_new_shmem_request(void) +{ + RequestAddinShmemSpace(communicator_new_shmem_size()); +} + +void +communicator_new_shmem_startup(void) +{ + bool found; + int pipefd[2]; + int rc; + size_t communicator_size; + size_t shmem_size; + void *shmem_ptr; + uint64 initial_file_cache_size; + uint64 max_file_cache_size; + + rc = pipe(pipefd); + if (rc != 0) + ereport(ERROR, + (errcode_for_file_access(), + errmsg_internal("could not create pipe between neon communicator and backends : %m"))); + if (fcntl(pipefd[0], F_SETFL, O_NONBLOCK) == -1) + elog(FATAL, "fcntl(F_SETFL) failed on read-end of communicator pipe: %m"); + if (fcntl(pipefd[1], F_SETFL, O_NONBLOCK) == -1) + elog(FATAL, "fcntl(F_SETFL) failed on write-end of communicator pipe: %m"); + + shmem_size = communicator_new_shmem_size(); + shmem_ptr = ShmemInitStruct("Communicator shmem state", + shmem_size, + &found); + Assert(!found); + + /* Initialize the C-managed parts */ + communicator_shmem_ptr = (CommunicatorShmemData *) shmem_ptr; + communicator_size = MAXALIGN(offsetof(CommunicatorShmemData, backends) + MaxProcs * sizeof(CommunicatorShmemPerBackendData)); + shmem_ptr = (char *) shmem_ptr + communicator_size; + shmem_size -= communicator_size; + + for (int i = 0; i < MaxProcs; i++) + { + InitSharedLatch(&communicator_shmem_ptr->backends[i].io_completion_latch); + communicator_shmem_ptr->backends[i].request_counter = 0; + } + + /* lfc_size_limit is in MBs */ + initial_file_cache_size = lfc_size_limit * (1024 * 1024 / BLCKSZ); + max_file_cache_size = lfc_max_size * (1024 * 1024 / BLCKSZ); + if (initial_file_cache_size < 100) + initial_file_cache_size = 100; + if (max_file_cache_size < 100) + max_file_cache_size = 100; + + /* Initialize the rust-managed parts */ + cis = rcommunicator_shmem_init(pipefd[0], pipefd[1], MaxProcs * MAX_INFLIGHT_ASYNC_REQUESTS, shmem_ptr, shmem_size, + initial_file_cache_size, max_file_cache_size); +} + +/**** Worker process functions. These run in the communicator worker process ****/ + +/* Entry point for the communicator bgworker process */ +void +communicator_new_bgworker_main(Datum main_arg) +{ + char **connstrs; + shardno_t num_shards; + struct LoggingState *logging; + char errbuf[1000]; + int elevel; + uint64 file_cache_size; + const struct CommunicatorWorkerProcessStruct *proc_handle; + + /* + * Pretend that this process is a WAL sender. That affects the shutdown + * sequence: WAL senders are shut down last, after the final checkpoint + * has been written. That's what we want for the communicator process too + */ + am_walsender = true; + MarkPostmasterChildWalSender(); + + /* lfc_size_limit is in MBs */ + file_cache_size = lfc_size_limit * (1024 * 1024 / BLCKSZ); + if (file_cache_size < 100) + file_cache_size = 100; + + /* Establish signal handlers. */ + pqsignal(SIGUSR1, procsignal_sigusr1_handler); + /* + * Postmaster sends us SIGUSR2 when all regular backends and bgworkers + * have exited, and it's time for us to exit too + */ + pqsignal(SIGUSR2, die); + pqsignal(SIGHUP, SignalHandlerForConfigReload); + pqsignal(SIGTERM, die); + + BackgroundWorkerUnblockSignals(); + + get_shard_map(&connstrs, &num_shards); + + logging = configure_logging(); + + proc_handle = communicator_worker_process_launch( + cis, + neon_tenant, + neon_timeline, + neon_auth_token, + connstrs, + num_shards, + lfc_path, + file_cache_size); + cis = NULL; + + elog(LOG, "communicator threads started"); + for (;;) + { + int32 rc; + + CHECK_FOR_INTERRUPTS(); + + if (ConfigReloadPending) + { + ConfigReloadPending = false; + ProcessConfigFile(PGC_SIGHUP); + + /* lfc_size_limit is in MBs */ + file_cache_size = lfc_size_limit * (1024 * 1024 / BLCKSZ); + if (file_cache_size < 100) + file_cache_size = 100; + communicator_worker_config_reload(proc_handle, file_cache_size); + } + + for (;;) + { + rc = pump_logging(logging, (uint8 *) errbuf, sizeof(errbuf), &elevel); + if (rc == 0) + { + /* nothing to do */ + break; + } + else if (rc == 1) + { + /* Because we don't want to exit on error */ + if (elevel == ERROR) + elevel = LOG; + if (elevel == INFO) + elevel = LOG; + elog(elevel, "[COMMUNICATOR] %s", errbuf); + } + else if (rc == -1) + { + elog(ERROR, "logging channel was closed unexpectedly"); + } + } + + (void) WaitLatch(MyLatch, + WL_LATCH_SET | WL_EXIT_ON_PM_DEATH, + 0, + PG_WAIT_EXTENSION); + ResetLatch(MyLatch); + } +} + +/* + * Callbacks from the rust code, in the communicator process. + * + * NOTE: These must be thread safe! It's very limited which PostgreSQL functions you can use!!! + * + * NOTE: the signatures of these better match the Rust definitions! + */ + +void +notify_proc_unsafe(int procno) +{ + SetLatch(&communicator_shmem_ptr->backends[procno].io_completion_latch); + +} + +void +callback_set_my_latch_unsafe(void) +{ + SetLatch(MyLatch); +} + +/* + * FIXME: The logic from neon_get_request_lsns() needs to go here, except for + * the last-written LSN cache stuff, which is managed by the rust code now. + */ +uint64_t +callback_get_request_lsn_unsafe(void) +{ + /* + * NB: be very careful with what you do here! This is called from tokio + * threads, so anything tha tries to take LWLocks is unsafe, for example. + * + * RecoveryInProgress() is OK + */ + if (RecoveryInProgress()) + { + XLogRecPtr replay_lsn = GetXLogReplayRecPtr(NULL); + + return replay_lsn; + } + else + { + XLogRecPtr flushlsn; + +#if PG_VERSION_NUM >= 150000 + flushlsn = GetFlushRecPtr(NULL); +#else + flushlsn = GetFlushRecPtr(); +#endif + + return flushlsn; + } +} + +/**** Backend functions. These run in each backend ****/ + +/* Initialize per-backend private state */ +void +communicator_new_init(void) +{ + Assert(cis != NULL); + Assert(my_bs == NULL); + + if (pageserver_connstring[0] == '\0' && pageserver_grpc_urls[0] == '\0') + { + /* running with local storage */ + return; + } + + /* The communicator process performs different initialization */ + if (MyBgworkerEntry && strcmp(MyBgworkerEntry->bgw_function_name, "communicator_new_bgworker_main") == 0) + return; + + OwnLatch(MyIOCompletionLatch); + + my_bs = rcommunicator_backend_init(cis, MyProcNumber); + cis = NULL; + + /* + * Check the status of all the request slots. A previous backend with the + * same proc number might've left behind some prefetch requests or aborted + * requests + */ + my_start_slot_idx = MyProcNumber * MAX_INFLIGHT_ASYNC_REQUESTS; + my_end_slot_idx = my_start_slot_idx + MAX_INFLIGHT_ASYNC_REQUESTS; + my_next_slot_idx = my_start_slot_idx; + + for (int idx = my_start_slot_idx; idx < my_end_slot_idx; idx++) + { + struct NeonIOResult result; + + if (bcomm_get_request_slot_status(my_bs, idx)) + { + elog(LOG, "processing leftover IO request from previous session at slot %d", idx); + wait_request_completion(idx, &result); + + /* FIXME: log the result if it was an error */ + } + } + + /* + * Arrange to clean up at backend exit. + */ + on_shmem_exit(communicator_new_backend_exit, 0); +} + +static void +communicator_new_backend_exit(int code, Datum arg) +{ + DisownLatch(MyIOCompletionLatch); +} + +/* + * prefetch_register_bufferv() - register and prefetch buffers + * + * Register that we may want the contents of BufferTag in the near future. + * This is used when issuing a speculative prefetch request, but also when + * performing a synchronous request and need the buffer right now. + * + * When performing a prefetch rather than a synchronous request, + * is_prefetch==true. Currently, it only affects how the request is accounted + * in the perf counters. + * + * NOTE: this function may indirectly update MyPState->pfs_hash; which + * invalidates any active pointers into the hash table. + */ +void +communicator_new_prefetch_register_bufferv(NRelFileInfo rinfo, ForkNumber forkNum, + BlockNumber blockno, BlockNumber nblocks) +{ + NeonIORequest request = { + .tag = NeonIORequest_PrefetchV, + .prefetch_v = { + .request_id = assign_request_id(), + .spc_oid = NInfoGetSpcOid(rinfo), + .db_oid = NInfoGetDbOid(rinfo), + .rel_number = NInfoGetRelNumber(rinfo), + .fork_number = forkNum, + .block_number = blockno, + .nblocks = nblocks, + } + }; + struct NeonIOResult result; + + elog(DEBUG5, "prefetch called for rel %u/%u/%u.%u block %u (%u blocks)", + RelFileInfoFmt(rinfo), forkNum, blockno, nblocks); + + if (num_inflight_requests >= MAX_INFLIGHT_ASYNC_REQUESTS) + process_inflight_requests(); + + /* Fire and forget the request */ + (void) start_request(&request, &result); +} + +/* + * Does the LFC contains the given buffer? + * + * This is used in WAL replay in read replica, to skip updating pages that are + * not in cache. + */ +bool +communicator_new_cache_contains(NRelFileInfo rinfo, ForkNumber forkNum, + BlockNumber blockno) +{ + return bcomm_cache_contains(my_bs, + NInfoGetSpcOid(rinfo), + NInfoGetDbOid(rinfo), + NInfoGetRelNumber(rinfo), + forkNum, + blockno); +} + +/* + * Drain all in-flight requests from the queue. + * + * This is used to drain prefetch requests that have been acknowledged by the + * communicator, before we perform a synchronous request. (With Postgres v18 + * and async IO, managing the in-flight requests will get more complicated, + * but this will do for now.) + * + * We can also have some an in-flight request queued up, if the query is + * cancelled while a synchronous request is being processed, in + * wait_request_completion(). + */ +static void +process_inflight_requests(void) +{ + struct NeonIOResult result; + + /* FIXME: log errors */ + for (int i = 0; i < num_inflight_requests; i++) + { + elog(DEBUG4, "processing prefetch request with idx %d", inflight_requests[i]); + wait_request_completion(inflight_requests[i], &result); + } + num_inflight_requests = 0; +} + +/* + * Perform an IO request in a synchronous fashion. + * + * Returns a pointer to the result slot. It is valid until the next time a + * request is submitted. + */ +static void +perform_request(NeonIORequest *request, struct NeonIOResult *result_p) +{ + int request_idx; + + process_inflight_requests(); + + request_idx = start_request(request, result_p); + if (request_idx == -1) + { + /* it was completed immediately */ + return; + } + wait_request_completion(request_idx, result_p); + Assert(num_inflight_requests == 1); + Assert(inflight_requests[0] == request_idx); + num_inflight_requests = 0; +} + +static int +start_request(NeonIORequest *request, struct NeonIOResult *immediate_result_p) +{ + int request_idx; + + Assert(num_inflight_requests < MAX_INFLIGHT_ASYNC_REQUESTS); + + request_idx = bcomm_start_io_request(my_bs, my_next_slot_idx, request, immediate_result_p); + if (request_idx == -1) + { + /* -1 means the request was satisfied immediately. */ + elog(DEBUG4, "communicator request %lu was satisfied immediately", request->rel_exists.request_id); + return -1; + } + Assert(request_idx == my_next_slot_idx); + my_next_slot_idx++; + if (my_next_slot_idx == my_end_slot_idx) + my_next_slot_idx = my_start_slot_idx; + inflight_requests[num_inflight_requests] = request_idx; + num_inflight_requests++; + + elog(LOG, "started communicator request %lu at slot %d", request->rel_exists.request_id, request_idx); + return request_idx; +} + +static void +wait_request_completion(int request_idx, struct NeonIOResult *result_p) +{ + int32_t poll_res; + TimestampTz start_time; + + /* fixme: check 'request_idx' ? */ + + start_time = GetCurrentTimestamp(); + for (;;) + { + TimestampTz now; + + ResetLatch(MyIOCompletionLatch); + + poll_res = bcomm_poll_request_completion(my_bs, request_idx, result_p); + if (poll_res == -1) + { + /* + * Wake up periodically for CHECK_FOR_INTERRUPTS(). Because + * we wait on MyIOCompletionLatch rather than MyLatch, we won't be + * woken up for the standard interrupts. + */ + long timeout_ms = 1000; + + /* + * If the query is cancelled, we will bail out here, and leave the + * in-flight request in the request queue. It will be waited for + * again and processed when the next request is issued, in + * process_inflight_requests(). + */ + CHECK_FOR_INTERRUPTS(); + + /* + * FIXME: as a temporary hack, panic if we don't get a response promptly. + * Lots of regression tests are getting stuck and failing at the moment, + * this makes them fail a little faster, which it faster to iterate. + * This needs to be removed once more regression tests are passing. + */ + now = GetCurrentTimestamp(); + if (now - start_time > 60 * 1000 * 1000) + { + elog(PANIC, "timed out waiting for response from communicator process at slot %d", request_idx); + } + + (void) WaitLatch(MyIOCompletionLatch, + WL_EXIT_ON_PM_DEATH | WL_LATCH_SET | WL_TIMEOUT, + timeout_ms, + WAIT_EVENT_NEON_PS_STARTING); + continue; /* still busy */ + } + else if (poll_res == 0) + { + return; + } + else + { + elog(ERROR, "unexpected return code from bcomm_poll_request_completion()"); + } + } +} + +/* + * Does the physical file exist? + */ +bool +communicator_new_rel_exists(NRelFileInfo rinfo, ForkNumber forkNum) +{ + NeonIORequest request = { + .tag = NeonIORequest_RelExists, + .rel_exists = { + .request_id = assign_request_id(), + .spc_oid = NInfoGetSpcOid(rinfo), + .db_oid = NInfoGetDbOid(rinfo), + .rel_number = NInfoGetRelNumber(rinfo), + .fork_number = forkNum, + } + }; + NeonIOResult result; + + perform_request(&request, &result); + switch (result.tag) + { + case NeonIOResult_RelExists: + return result.rel_exists; + case NeonIOResult_Error: + ereport(ERROR, + (errcode_for_file_access(), + errmsg("could not check existence of rel %u/%u/%u.%u: %s", + RelFileInfoFmt(rinfo), forkNum, pg_strerror(result.error)))); + break; + default: + elog(ERROR, "unexpected result for RelExists operation: %d", result.tag); + break; + } +} + +/* + * Read N consecutive pages from a relation + */ +void +communicator_new_read_at_lsnv(NRelFileInfo rinfo, ForkNumber forkNum, BlockNumber blockno, + void **buffers, BlockNumber nblocks) +{ + NeonIOResult result; + CCachedGetPageVResult cached_result; + void *bounce_buf_used = NULL; + int request_idx; + NeonIORequest request = { + .tag = NeonIORequest_GetPageV, + .get_page_v = { + .request_id = assign_request_id(), + .spc_oid = NInfoGetSpcOid(rinfo), + .db_oid = NInfoGetDbOid(rinfo), + .rel_number = NInfoGetRelNumber(rinfo), + .fork_number = forkNum, + .block_number = blockno, + .nblocks = nblocks, + } + }; + + elog(DEBUG5, "getpagev called for rel %u/%u/%u.%u block %u (%u blocks)", + RelFileInfoFmt(rinfo), forkNum, blockno, nblocks); + + /* + * Fill in the destination buffer pointers in the request. If the + * destination is a buffer in shared memory, the communicator process can + * write the result directly to the buffer. Otherwise, we need to use a + * "bounce buffer". We only have one bounce buffer, so if bouncing is + * needed and multiple pages were requested, we need to serially perform a + * separate request for each page. Hopefully that is rare. + */ + if (nblocks == 1) + { + if (bounce_needed(buffers[0])) + { + bounce_buf_used = bounce_buf(); + request.get_page_v.dest[0].ptr = bounce_buf_used; + } + else + request.get_page_v.dest[0].ptr = buffers[0]; + } + else + { + for (int i = 0; i < nblocks; i++) + { + if (bounce_needed(buffers[i])) + { + /* Split the vector-request into single page requests */ + for (int j = 0; j < nblocks; j++) + { + communicator_new_read_at_lsnv(rinfo, forkNum, blockno + j, + &buffers[j], 1); + } + return; + } + request.get_page_v.dest[i].ptr = buffers[i]; + } + } + + process_inflight_requests(); + +retry: + request_idx = bcomm_start_get_page_v_request(my_bs, my_next_slot_idx, &request, &cached_result); + if (request_idx == -1) + { + bool completed; + + /* + * LFC hit, but we are responsible for completing the I/O on the local + * file + */ + if (cache_file == 0) + cache_file = PathNameOpenFile(lfc_path, O_RDONLY | PG_BINARY); + + for (int i = 0; i < nblocks; i++) + { + uint64_t cached_block = cached_result.cache_block_numbers[i]; + char *buffer = buffers[i]; + ssize_t bytes_total = 0; + + while (bytes_total < BLCKSZ) + { + ssize_t nbytes; + + nbytes = FileRead(cache_file, buffer + bytes_total, BLCKSZ - bytes_total, cached_block * BLCKSZ + bytes_total, WAIT_EVENT_NEON_LFC_READ); + if (nbytes == -1) + ereport(ERROR, + (errcode_for_file_access(), + errmsg("could not read block %lu in local cache file: %m", + cached_block))); + if (nbytes == 0) + { + /* + * FIXME: if the file was concurrently truncated, I guess + * this is expected. We should finish the read by calling + * bcomm_finish_cache_read(), and only throw the error if + * it reported success. + */ + ereport(ERROR, + (errcode_for_file_access(), + errmsg("could not read block %lu in local cache file (unexpected EOF)", + cached_block))); + } + bytes_total += nbytes; + } + } + completed = bcomm_finish_cache_read(my_bs); + if (!completed) + { + elog(DEBUG1, "read from local cache file was superseded by concurrent update"); + goto retry; + } + return; + } + Assert(request_idx == my_next_slot_idx); + my_next_slot_idx++; + if (my_next_slot_idx == my_end_slot_idx) + my_next_slot_idx = my_start_slot_idx; + inflight_requests[num_inflight_requests] = request_idx; + num_inflight_requests++; + + wait_request_completion(request_idx, &result); + Assert(num_inflight_requests == 1); + Assert(inflight_requests[0] == request_idx); + num_inflight_requests = 0; + switch (result.tag) + { + case NeonIOResult_GetPageV: + if (bounce_buf_used) + memcpy(buffers[0], bounce_buf_used, BLCKSZ); + return; + case NeonIOResult_Error: + ereport(ERROR, + (errcode_for_file_access(), + errmsg("could not read block %u in rel %u/%u/%u.%u: %s", + blockno, RelFileInfoFmt(rinfo), forkNum, pg_strerror(result.error)))); + break; + default: + elog(ERROR, "unexpected result for GetPage operation: %d", result.tag); + break; + } +} + +/* + * neon_nblocks() -- Get the number of blocks stored in a relation. + */ +BlockNumber +communicator_new_rel_nblocks(NRelFileInfo rinfo, ForkNumber forkNum) +{ + NeonIORequest request = { + .tag = NeonIORequest_RelSize, + .rel_size = { + .request_id = assign_request_id(), + .spc_oid = NInfoGetSpcOid(rinfo), + .db_oid = NInfoGetDbOid(rinfo), + .rel_number = NInfoGetRelNumber(rinfo), + .fork_number = forkNum, + } + }; + NeonIOResult result; + + perform_request(&request, &result); + switch (result.tag) + { + case NeonIOResult_RelSize: + return result.rel_size; + case NeonIOResult_Error: + ereport(ERROR, + (errcode_for_file_access(), + errmsg("could not read size of rel %u/%u/%u.%u: %s", + RelFileInfoFmt(rinfo), forkNum, pg_strerror(result.error)))); + break; + default: + elog(ERROR, "unexpected result for RelSize operation: %d", result.tag); + break; + } +} + +/* + * neon_db_size() -- Get the size of the database in bytes. + */ +int64 +communicator_new_dbsize(Oid dbNode) +{ + NeonIORequest request = { + .tag = NeonIORequest_DbSize, + .db_size = { + .request_id = assign_request_id(), + .db_oid = dbNode, + } + }; + NeonIOResult result; + + perform_request(&request, &result); + switch (result.tag) + { + case NeonIOResult_DbSize: + return (int64) result.db_size; + case NeonIOResult_Error: + ereport(ERROR, + (errcode_for_file_access(), + errmsg("could not read database size of database %u: %s", + dbNode, pg_strerror(result.error)))); + break; + default: + elog(ERROR, "unexpected result for DbSize operation: %d", result.tag); + break; + } +} + +int +communicator_new_read_slru_segment(SlruKind kind, int64 segno, void *buffer) +{ + /* TODO */ + elog(ERROR, "not implemented"); +} + +/* Write requests */ +void +communicator_new_write_page(NRelFileInfo rinfo, ForkNumber forkNum, BlockNumber blockno, + const void *buffer, XLogRecPtr lsn) +{ + void *src = bounce_write_if_needed((void *) buffer); + NeonIORequest request = { + .tag = NeonIORequest_WritePage, + .write_page = { + .request_id = assign_request_id(), + .spc_oid = NInfoGetSpcOid(rinfo), + .db_oid = NInfoGetDbOid(rinfo), + .rel_number = NInfoGetRelNumber(rinfo), + .fork_number = forkNum, + .block_number = blockno, + .lsn = lsn, + .src.ptr = src, + } + }; + NeonIOResult result; + + /* FIXME: see `request_lsns` in main_loop.rs for why this is needed */ + XLogSetAsyncXactLSN(lsn); + + perform_request(&request, &result); + switch (result.tag) + { + case NeonIOResult_WriteOK: + return; + case NeonIOResult_Error: + ereport(ERROR, + (errcode_for_file_access(), + errmsg("could not write block %u in rel %u/%u/%u.%u: %s", + blockno, RelFileInfoFmt(rinfo), forkNum, pg_strerror(result.error)))); + break; + default: + elog(ERROR, "unexpected result for WritePage operation: %d", result.tag); + break; + } +} + +void +communicator_new_rel_extend(NRelFileInfo rinfo, ForkNumber forkNum, BlockNumber blockno, + const void *buffer, XLogRecPtr lsn) +{ + void *src = bounce_write_if_needed((void *) buffer); + NeonIORequest request = { + .tag = NeonIORequest_RelExtend, + .rel_extend = { + .request_id = assign_request_id(), + .spc_oid = NInfoGetSpcOid(rinfo), + .db_oid = NInfoGetDbOid(rinfo), + .rel_number = NInfoGetRelNumber(rinfo), + .fork_number = forkNum, + .block_number = blockno, + .lsn = lsn, + .src.ptr = src, + } + }; + NeonIOResult result; + + /* FIXME: see `request_lsns` in main_loop.rs for why this is needed */ + XLogSetAsyncXactLSN(lsn); + + perform_request(&request, &result); + switch (result.tag) + { + case NeonIOResult_WriteOK: + return; + case NeonIOResult_Error: + ereport(ERROR, + (errcode_for_file_access(), + errmsg("could not extend to block %u in rel %u/%u/%u.%u: %s", + blockno, RelFileInfoFmt(rinfo), forkNum, pg_strerror(result.error)))); + break; + default: + elog(ERROR, "unexpected result for Extend operation: %d", result.tag); + break; + } +} + +void +communicator_new_rel_zeroextend(NRelFileInfo rinfo, ForkNumber forkNum, BlockNumber blockno, + BlockNumber nblocks, XLogRecPtr lsn) +{ + NeonIORequest request = { + .tag = NeonIORequest_RelZeroExtend, + .rel_zero_extend = { + .request_id = assign_request_id(), + .spc_oid = NInfoGetSpcOid(rinfo), + .db_oid = NInfoGetDbOid(rinfo), + .rel_number = NInfoGetRelNumber(rinfo), + .fork_number = forkNum, + .block_number = blockno, + .nblocks = nblocks, + .lsn = lsn, + } + }; + NeonIOResult result; + + /* FIXME: see `request_lsns` in main_loop.rs for why this is needed */ + XLogSetAsyncXactLSN(lsn); + + perform_request(&request, &result); + switch (result.tag) + { + case NeonIOResult_WriteOK: + return; + case NeonIOResult_Error: + ereport(ERROR, + (errcode_for_file_access(), + errmsg("could not zeroextend to block %u in rel %u/%u/%u.%u: %s", + blockno, RelFileInfoFmt(rinfo), forkNum, pg_strerror(result.error)))); + break; + default: + elog(ERROR, "unexpected result for ZeroExtend operation: %d", result.tag); + break; + } +} + +void +communicator_new_rel_create(NRelFileInfo rinfo, ForkNumber forkNum) +{ + NeonIORequest request = { + .tag = NeonIORequest_RelCreate, + .rel_create = { + .request_id = assign_request_id(), + .spc_oid = NInfoGetSpcOid(rinfo), + .db_oid = NInfoGetDbOid(rinfo), + .rel_number = NInfoGetRelNumber(rinfo), + .fork_number = forkNum, + } + }; + NeonIOResult result; + + perform_request(&request, &result); + switch (result.tag) + { + case NeonIOResult_WriteOK: + return; + case NeonIOResult_Error: + ereport(ERROR, + (errcode_for_file_access(), + errmsg("could not create rel %u/%u/%u.%u: %s", + RelFileInfoFmt(rinfo), forkNum, pg_strerror(result.error)))); + break; + default: + elog(ERROR, "unexpected result for Create operation: %d", result.tag); + break; + } +} + +void +communicator_new_rel_truncate(NRelFileInfo rinfo, ForkNumber forkNum, BlockNumber nblocks) +{ + NeonIORequest request = { + .tag = NeonIORequest_RelTruncate, + .rel_truncate = { + .request_id = assign_request_id(), + .spc_oid = NInfoGetSpcOid(rinfo), + .db_oid = NInfoGetDbOid(rinfo), + .rel_number = NInfoGetRelNumber(rinfo), + .fork_number = forkNum, + .nblocks = nblocks, + } + }; + NeonIOResult result; + + perform_request(&request, &result); + switch (result.tag) + { + case NeonIOResult_WriteOK: + return; + case NeonIOResult_Error: + ereport(ERROR, + (errcode_for_file_access(), + errmsg("could not truncate rel %u/%u/%u.%u to %u blocks: %s", + RelFileInfoFmt(rinfo), forkNum, nblocks, pg_strerror(result.error)))); + break; + default: + elog(ERROR, "unexpected result for Truncate operation: %d", result.tag); + break; + } +} + +void +communicator_new_rel_unlink(NRelFileInfo rinfo, ForkNumber forkNum) +{ + NeonIORequest request = { + .tag = NeonIORequest_RelUnlink, + .rel_unlink = { + .request_id = assign_request_id(), + .spc_oid = NInfoGetSpcOid(rinfo), + .db_oid = NInfoGetDbOid(rinfo), + .rel_number = NInfoGetRelNumber(rinfo), + .fork_number = forkNum, + } + }; + NeonIOResult result; + + perform_request(&request, &result); + switch (result.tag) + { + case NeonIOResult_WriteOK: + return; + case NeonIOResult_Error: + ereport(ERROR, + (errcode_for_file_access(), + errmsg("could not unlink rel %u/%u/%u.%u: %s", + RelFileInfoFmt(rinfo), forkNum, pg_strerror(result.error)))); + break; + default: + elog(ERROR, "unexpected result for Unlink operation: %d", result.tag); + break; + } +} + +/* + * The worker process can read / write shared buffers directly. But if smgrread() or + * smgrwrite() is called with a private temporary buffer, we need to copy it to the + * "bounce buffer", to make it available for the worker process. + */ +static bool +bounce_needed(void *buffer) +{ + if ((uintptr_t) buffer >= (uintptr_t) BufferBlocks && + (uintptr_t) buffer < (uintptr_t) BufferBlocks + NBuffers * BLCKSZ) + { + return false; + } + return true; +} + +static void * +bounce_buf(void) +{ + return &communicator_shmem_ptr->backends[MyProcNumber].bounce_buffer; +} + +static void * +bounce_write_if_needed(void *buffer) +{ + void *p; + + if (!bounce_needed(buffer)) + return buffer; + + p = bounce_buf(); + memcpy(p, buffer, BLCKSZ); + return p; +} diff --git a/pgxn/neon/communicator_new.h b/pgxn/neon/communicator_new.h new file mode 100644 index 0000000000..bbab3f8f5a --- /dev/null +++ b/pgxn/neon/communicator_new.h @@ -0,0 +1,56 @@ +/*------------------------------------------------------------------------- + * + * communicator_new.h + * new implementation + * + * + * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + *------------------------------------------------------------------------- + */ +#ifndef COMMUNICATOR_NEW_H +#define COMMUNICATOR_NEW_H + +#include "neon_pgversioncompat.h" + +#include "storage/buf_internals.h" + +#include "pagestore_client.h" + +/* initialization at postmaster startup */ +extern void pg_init_communicator_new(void); +extern void communicator_new_shmem_request(void); +extern void communicator_new_shmem_startup(void); + +/* initialization at backend startup */ +extern void communicator_new_init(void); + +/* Read requests */ +extern bool communicator_new_rel_exists(NRelFileInfo rinfo, ForkNumber forkNum); +extern BlockNumber communicator_new_rel_nblocks(NRelFileInfo rinfo, ForkNumber forknum); +extern int64 communicator_new_dbsize(Oid dbNode); +extern void communicator_new_read_at_lsnv(NRelFileInfo rinfo, ForkNumber forkNum, + BlockNumber base_blockno, + void **buffers, BlockNumber nblocks); +extern void communicator_new_prefetch_register_bufferv(NRelFileInfo rinfo, ForkNumber forkNum, + BlockNumber blockno, + BlockNumber nblocks); +extern bool communicator_new_cache_contains(NRelFileInfo rinfo, ForkNumber forkNum, + BlockNumber blockno); +extern int communicator_new_read_slru_segment(SlruKind kind, int64 segno, + void *buffer); + +/* Write requests, to keep the caches up-to-date */ +extern void communicator_new_write_page(NRelFileInfo rinfo, ForkNumber forkNum, BlockNumber blockno, + const void *buffer, XLogRecPtr lsn); +extern void communicator_new_rel_extend(NRelFileInfo rinfo, ForkNumber forkNum, BlockNumber blockno, + const void *buffer, XLogRecPtr lsn); +extern void communicator_new_rel_zeroextend(NRelFileInfo rinfo, ForkNumber forkNum, + BlockNumber blockno, BlockNumber nblocks, + XLogRecPtr lsn); +extern void communicator_new_rel_create(NRelFileInfo rinfo, ForkNumber forkNum); +extern void communicator_new_rel_truncate(NRelFileInfo rinfo, ForkNumber forkNum, BlockNumber nblocks); +extern void communicator_new_rel_unlink(NRelFileInfo rinfo, ForkNumber forkNum); + +#endif /* COMMUNICATOR_NEW_H */ diff --git a/pgxn/neon/control_plane_connector.h b/pgxn/neon/control_plane_connector.h deleted file mode 100644 index 7eed449200..0000000000 --- a/pgxn/neon/control_plane_connector.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef CONTROL_PLANE_CONNECTOR_H -#define CONTROL_PLANE_CONNECTOR_H - -void InitControlPlaneConnector(void); - -#endif diff --git a/pgxn/neon/file_cache.c b/pgxn/neon/file_cache.c index 5d199716d2..e5e2bb9183 100644 --- a/pgxn/neon/file_cache.c +++ b/pgxn/neon/file_cache.c @@ -21,7 +21,7 @@ #include "access/xlog.h" #include "funcapi.h" #include "miscadmin.h" -#include "common/file_utils.h" +#include "common/hashfn.h" #include "pgstat.h" #include "port/pg_iovec.h" #include "postmaster/bgworker.h" @@ -36,6 +36,7 @@ #include "storage/procsignal.h" #include "tcop/tcopprot.h" #include "utils/builtins.h" +#include "utils/dynahash.h" #include "utils/guc.h" #if PG_VERSION_NUM >= 150000 @@ -45,7 +46,6 @@ #include "hll.h" #include "bitmap.h" #include "file_cache.h" -#include "file_cache_rust_hash.h" #include "neon.h" #include "neon_lwlsncache.h" #include "neon_perf_counters.h" @@ -64,7 +64,7 @@ * * Cache is always reconstructed at node startup, so we do not need to save mapping somewhere and worry about * its consistency. - * + * * ## Holes * @@ -76,15 +76,13 @@ * fallocate(FALLOC_FL_PUNCH_HOLE) call. The nominal size of the file doesn't * shrink, but the disk space it uses does. * - * Each hole is tracked in a freelist. The freelist consists of two parts: a - * fixed-size array in shared memory, and a linked chain of on-disk - * blocks. When the in-memory array fills up, it's flushed to a new on-disk - * chunk. If the soft limit is raised again, we reuse the holes before - * extending the nominal size of the file. - * - * The in-memory freelist array is protected by 'lfc_lock', while the on-disk - * chain is protected by a separate 'lfc_freelist_lock'. Locking rule to - * avoid deadlocks: always acquire lfc_freelist_lock first, then lfc_lock. + * Each hole is tracked by a dummy FileCacheEntry, which are kept in the + * 'holes' linked list. They are entered into the chunk hash table, with a + * special key where the blockNumber is used to store the 'offset' of the + * hole, and all other fields are zero. Holes are never looked up in the hash + * table, we only enter them there to have a FileCacheEntry that we can keep + * in the linked list. If the soft limit is raised again, we reuse the holes + * before extending the nominal size of the file. */ /* Local file storage allocation chunk. @@ -94,15 +92,13 @@ * 1Mb chunks can reduce hash map size to 320Mb. * 2. Improve access locality, subsequent pages will be allocated together improving seqscan speed */ -#define BLOCKS_PER_CHUNK_LOG 7 /* 1Mb chunk */ -#define BLOCKS_PER_CHUNK (1 << BLOCKS_PER_CHUNK_LOG) +#define MAX_BLOCKS_PER_CHUNK_LOG 7 /* 1Mb chunk */ +#define MAX_BLOCKS_PER_CHUNK (1 << MAX_BLOCKS_PER_CHUNK_LOG) #define MB ((uint64)1024*1024) -#define SIZE_MB_TO_CHUNKS(size) ((uint32)((size) * MB / BLCKSZ >> BLOCKS_PER_CHUNK_LOG)) -#define BLOCK_TO_CHUNK_OFF(blkno) ((blkno) & (BLOCKS_PER_CHUNK-1)) - -#define INVALID_OFFSET (0xffffffff) +#define SIZE_MB_TO_CHUNKS(size) ((uint32)((size) * MB / BLCKSZ >> lfc_chunk_size_log)) +#define BLOCK_TO_CHUNK_OFF(blkno) ((blkno) & (lfc_blocks_per_chunk-1)) /* * Blocks are read or written to LFC file outside LFC critical section. @@ -123,18 +119,15 @@ typedef enum FileCacheBlockState typedef struct FileCacheEntry { + BufferTag key; + uint32 hash; uint32 offset; uint32 access_count; - dlist_node list_node; /* LRU list node */ - uint32 state[(BLOCKS_PER_CHUNK * 2 + 31) / 32]; /* two bits per block */ + dlist_node list_node; /* LRU/holes list node */ + uint32 state[FLEXIBLE_ARRAY_MEMBER]; /* two bits per block */ } FileCacheEntry; -/* Todo: alignment must be the same too */ -StaticAssertDecl(sizeof(FileCacheEntry) == sizeof(RustFileCacheEntry), - "Rust and C declarations of FileCacheEntry are incompatible"); -StaticAssertDecl(sizeof(BufferTag) == sizeof(RustFileCacheKey), - "Rust and C declarations of FileCacheKey are incompatible"); - +#define FILE_CACHE_ENRTY_SIZE MAXALIGN(offsetof(FileCacheEntry, state) + (lfc_blocks_per_chunk*2+31)/32*4) #define GET_STATE(entry, i) (((entry)->state[(i) / 16] >> ((i) % 16 * 2)) & 3) #define SET_STATE(entry, i, new_state) (entry)->state[(i) / 16] = ((entry)->state[(i) / 16] & ~(3 << ((i) % 16 * 2))) | ((new_state) << ((i) % 16 * 2)) @@ -143,9 +136,6 @@ StaticAssertDecl(sizeof(BufferTag) == sizeof(RustFileCacheKey), #define MAX_PREWARM_WORKERS 8 - -#define FREELIST_ENTRIES_PER_CHUNK (BLOCKS_PER_CHUNK * BLCKSZ / sizeof(uint32) - 2) - typedef struct PrewarmWorkerState { uint32 prewarmed_pages; @@ -171,6 +161,7 @@ typedef struct FileCacheControl uint64 evicted_pages; /* number of evicted pages */ dlist_head lru; /* double linked list for LRU replacement * algorithm */ + dlist_head holes; /* double linked list of punched holes */ HyperLogLogState wss_estimation; /* estimation of working set size */ ConditionVariable cv[N_COND_VARS]; /* turnstile of condition variables */ PrewarmWorkerState prewarm_workers[MAX_PREWARM_WORKERS]; @@ -181,40 +172,24 @@ typedef struct FileCacheControl bool prewarm_active; bool prewarm_canceled; dsm_handle prewarm_lfc_state_handle; - - /* - * Free list. This is large enough to hold one chunks worth of entries. - */ - uint32 freelist_size; - uint32 freelist_head; - uint32 num_free_pages; - uint32 free_pages[FREELIST_ENTRIES_PER_CHUNK]; } FileCacheControl; -typedef struct FreeListChunk -{ - uint32 next; - uint32 num_free_pages; - uint32 free_pages[FREELIST_ENTRIES_PER_CHUNK]; -} FreeListChunk; - #define FILE_CACHE_STATE_MAGIC 0xfcfcfcfc #define FILE_CACHE_STATE_BITMAP(fcs) ((uint8*)&(fcs)->chunks[(fcs)->n_chunks]) -#define FILE_CACHE_STATE_SIZE_FOR_CHUNKS(n_chunks) (sizeof(FileCacheState) + (n_chunks)*sizeof(BufferTag) + (((n_chunks) * BLOCKS_PER_CHUNK)+7)/8) +#define FILE_CACHE_STATE_SIZE_FOR_CHUNKS(n_chunks) (sizeof(FileCacheState) + (n_chunks)*sizeof(BufferTag) + (((n_chunks) * lfc_blocks_per_chunk)+7)/8) #define FILE_CACHE_STATE_SIZE(fcs) (sizeof(FileCacheState) + (fcs->n_chunks)*sizeof(BufferTag) + (((fcs->n_chunks) << fcs->chunk_size_log)+7)/8) -static FileCacheHashMapHandle lfc_hash_handle; -static FileCacheHashMapAccess lfc_hash; +static HTAB *lfc_hash; static int lfc_desc = -1; static LWLockId lfc_lock; -static LWLockId lfc_freelist_lock; -static int lfc_max_size; -static int lfc_size_limit; +int lfc_max_size; +int lfc_size_limit; static int lfc_prewarm_limit; static int lfc_prewarm_batch; -static int lfc_blocks_per_chunk_ro = BLOCKS_PER_CHUNK; -static char *lfc_path; +static int lfc_chunk_size_log = MAX_BLOCKS_PER_CHUNK_LOG; +static int lfc_blocks_per_chunk = MAX_BLOCKS_PER_CHUNK; +char *lfc_path; static uint64 lfc_generation; static FileCacheControl *lfc_ctl; static bool lfc_do_prewarm; @@ -230,11 +205,6 @@ bool AmPrewarmWorker; #define LFC_ENABLED() (lfc_ctl->limit != 0) -static bool freelist_push(uint32 offset); -static bool freelist_prepare_pop(void); -static uint32 freelist_pop(void); -static bool freelist_is_empty(void); - /* * Close LFC file if opened. * All backends should close their LFC files once LFC is disabled. @@ -260,11 +230,19 @@ lfc_switch_off(void) { int fd; + Assert(!neon_enable_new_communicator); + if (LFC_ENABLED()) { - /* Invalidate hash */ - file_cache_hash_reset(lfc_hash); + HASH_SEQ_STATUS status; + FileCacheEntry *entry; + /* Invalidate hash */ + hash_seq_init(&status, lfc_hash); + while ((entry = hash_seq_search(&status)) != NULL) + { + hash_search_with_hash_value(lfc_hash, &entry->key, entry->hash, HASH_REMOVE, NULL); + } lfc_ctl->generation += 1; lfc_ctl->size = 0; lfc_ctl->pinned = 0; @@ -272,9 +250,7 @@ lfc_switch_off(void) lfc_ctl->used_pages = 0; lfc_ctl->limit = 0; dlist_init(&lfc_ctl->lru); - - lfc_ctl->freelist_head = INVALID_OFFSET; - lfc_ctl->num_free_pages = 0; + dlist_init(&lfc_ctl->holes); /* * We need to use unlink to to avoid races in LFC write, because it is not @@ -321,6 +297,8 @@ lfc_maybe_disabled(void) static bool lfc_ensure_opened(void) { + Assert(!neon_enable_new_communicator); + if (lfc_generation != lfc_ctl->generation) { lfc_close_file(); @@ -343,8 +321,10 @@ lfc_ensure_opened(void) static void lfc_shmem_startup(void) { - size_t size; bool found; + static HASHCTL info; + + Assert(!neon_enable_new_communicator); if (prev_shmem_startup_hook) { @@ -353,29 +333,27 @@ lfc_shmem_startup(void) LWLockAcquire(AddinShmemInitLock, LW_EXCLUSIVE); - size = sizeof(FileCacheControl); - - lfc_ctl = (FileCacheControl *) ShmemInitStruct("lfc", size, &found); + lfc_ctl = (FileCacheControl *) ShmemInitStruct("lfc", sizeof(FileCacheControl), &found); if (!found) { int fd; uint32 n_chunks = SIZE_MB_TO_CHUNKS(lfc_max_size); lfc_lock = (LWLockId) GetNamedLWLockTranche("lfc_lock"); - lfc_freelist_lock = (LWLockId) GetNamedLWLockTranche("lfc_freelist_lock"); + info.keysize = sizeof(BufferTag); + info.entrysize = FILE_CACHE_ENRTY_SIZE; /* * n_chunks+1 because we add new element to hash table before eviction * of victim */ - lfc_hash_handle = file_cache_hash_shmem_init(n_chunks + 1, n_chunks + 1); - - memset(lfc_ctl, 0, offsetof(FileCacheControl, free_pages)); + lfc_hash = ShmemInitHash("lfc_hash", + n_chunks + 1, n_chunks + 1, + &info, + HASH_ELEM | HASH_BLOBS); + memset(lfc_ctl, 0, sizeof(FileCacheControl)); dlist_init(&lfc_ctl->lru); - - lfc_ctl->freelist_size = FREELIST_ENTRIES_PER_CHUNK; - lfc_ctl->freelist_head = INVALID_OFFSET; - lfc_ctl->num_free_pages = 0; + dlist_init(&lfc_ctl->holes); /* Initialize hyper-log-log structure for estimating working set size */ initSHLL(&lfc_ctl->wss_estimation); @@ -399,25 +377,18 @@ lfc_shmem_startup(void) } LWLockRelease(AddinShmemInitLock); - - lfc_hash = file_cache_hash_shmem_access(lfc_hash_handle); } static void lfc_shmem_request(void) { - size_t size; - #if PG_VERSION_NUM>=150000 if (prev_shmem_request_hook) prev_shmem_request_hook(); #endif - size = sizeof(FileCacheControl); - - RequestAddinShmemSpace(size); + RequestAddinShmemSpace(sizeof(FileCacheControl) + hash_estimate_size(SIZE_MB_TO_CHUNKS(lfc_max_size) + 1, FILE_CACHE_ENRTY_SIZE)); RequestNamedLWLockTranche("lfc_lock", 1); - RequestNamedLWLockTranche("lfc_freelist_lock", 2); } static bool @@ -433,6 +404,24 @@ is_normal_backend(void) return lfc_ctl && MyProc && UsedShmemSegAddr && !IsParallelWorker(); } +static bool +lfc_check_chunk_size(int *newval, void **extra, GucSource source) +{ + if (*newval & (*newval - 1)) + { + elog(ERROR, "LFC chunk size should be power of two"); + return false; + } + return true; +} + +static void +lfc_change_chunk_size(int newval, void* extra) +{ + lfc_chunk_size_log = pg_ceil_log2_32(newval); +} + + static bool lfc_check_limit_hook(int *newval, void **extra, GucSource source) { @@ -452,14 +441,12 @@ lfc_change_limit_hook(int newval, void *extra) if (!lfc_ctl || !is_normal_backend()) return; - LWLockAcquire(lfc_freelist_lock, LW_EXCLUSIVE); LWLockAcquire(lfc_lock, LW_EXCLUSIVE); /* Open LFC file only if LFC was enabled or we are going to reenable it */ if (newval == 0 && !LFC_ENABLED()) { LWLockRelease(lfc_lock); - LWLockRelease(lfc_freelist_lock); /* File should be reopened if LFC is reenabled */ lfc_close_file(); return; @@ -468,7 +455,6 @@ lfc_change_limit_hook(int newval, void *extra) if (!lfc_ensure_opened()) { LWLockRelease(lfc_lock); - LWLockRelease(lfc_freelist_lock); return; } @@ -484,30 +470,35 @@ lfc_change_limit_hook(int newval, void *extra) * returning their space to file system */ FileCacheEntry *victim = dlist_container(FileCacheEntry, list_node, dlist_pop_head_node(&lfc_ctl->lru)); + FileCacheEntry *hole; uint32 offset = victim->offset; + uint32 hash; + bool found; + BufferTag holetag; CriticalAssert(victim->access_count == 0); #ifdef FALLOC_FL_PUNCH_HOLE - if (fallocate(lfc_desc, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, (off_t) victim->offset * BLOCKS_PER_CHUNK * BLCKSZ, BLOCKS_PER_CHUNK * BLCKSZ) < 0) + if (fallocate(lfc_desc, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, (off_t) victim->offset * lfc_blocks_per_chunk * BLCKSZ, lfc_blocks_per_chunk * BLCKSZ) < 0) neon_log(LOG, "Failed to punch hole in file: %m"); #endif - /* We remove the entry, and enter a hole to the freelist */ - for (int i = 0; i < BLOCKS_PER_CHUNK; i++) + /* We remove the old entry, and re-enter a hole to the hash table */ + for (int i = 0; i < lfc_blocks_per_chunk; i++) { bool is_page_cached = GET_STATE(victim, i) == AVAILABLE; lfc_ctl->used_pages -= is_page_cached; lfc_ctl->evicted_pages += is_page_cached; } - file_cache_hash_remove_entry(lfc_hash, victim); + hash_search_with_hash_value(lfc_hash, &victim->key, victim->hash, HASH_REMOVE, NULL); - if (!freelist_push(offset)) - { - /* freelist_push already logged the error */ - lfc_switch_off(); - LWLockRelease(lfc_lock); - LWLockRelease(lfc_freelist_lock); - return; - } + memset(&holetag, 0, sizeof(holetag)); + holetag.blockNum = offset; + hash = get_hash_value(lfc_hash, &holetag); + hole = hash_search_with_hash_value(lfc_hash, &holetag, hash, HASH_ENTER, &found); + hole->hash = hash; + hole->offset = offset; + hole->access_count = 0; + CriticalAssert(!found); + dlist_push_tail(&lfc_ctl->holes, &hole->list_node); lfc_ctl->used -= 1; } @@ -519,7 +510,6 @@ lfc_change_limit_hook(int newval, void *extra) neon_log(DEBUG1, "set local file cache limit to %d", new_size); LWLockRelease(lfc_lock); - LWLockRelease(lfc_freelist_lock); } void @@ -595,14 +585,14 @@ lfc_init(void) DefineCustomIntVariable("neon.file_cache_chunk_size", "LFC chunk size in blocks (should be power of two)", NULL, - &lfc_blocks_per_chunk_ro, - BLOCKS_PER_CHUNK, - BLOCKS_PER_CHUNK, - BLOCKS_PER_CHUNK, - PGC_INTERNAL, + &lfc_blocks_per_chunk, + MAX_BLOCKS_PER_CHUNK, + 1, + MAX_BLOCKS_PER_CHUNK, + PGC_POSTMASTER, GUC_UNIT_BLOCKS, - NULL, - NULL, + lfc_check_chunk_size, + lfc_change_chunk_size, NULL); DefineCustomIntVariable("neon.file_cache_prewarm_limit", @@ -634,6 +624,9 @@ lfc_init(void) if (lfc_max_size == 0) return; + if (neon_enable_new_communicator) + return; + prev_shmem_startup_hook = shmem_startup_hook; shmem_startup_hook = lfc_shmem_startup; #if PG_VERSION_NUM>=150000 @@ -665,19 +658,19 @@ lfc_get_state(size_t max_entries) fcs = (FileCacheState*)palloc0(state_size); SET_VARSIZE(fcs, state_size); fcs->magic = FILE_CACHE_STATE_MAGIC; - fcs->chunk_size_log = BLOCKS_PER_CHUNK_LOG; + fcs->chunk_size_log = lfc_chunk_size_log; fcs->n_chunks = n_entries; bitmap = FILE_CACHE_STATE_BITMAP(fcs); dlist_reverse_foreach(iter, &lfc_ctl->lru) { FileCacheEntry *entry = dlist_container(FileCacheEntry, list_node, iter.cur); - fcs->chunks[i] = *file_cache_hash_get_key_for_entry(lfc_hash, entry); - for (int j = 0; j < BLOCKS_PER_CHUNK; j++) + fcs->chunks[i] = entry->key; + for (int j = 0; j < lfc_blocks_per_chunk; j++) { if (GET_STATE(entry, j) != UNAVAILABLE) { - BITMAP_SET(bitmap, i*BLOCKS_PER_CHUNK + j); + BITMAP_SET(bitmap, i*lfc_blocks_per_chunk + j); n_pages += 1; } } @@ -686,7 +679,7 @@ lfc_get_state(size_t max_entries) } Assert(i == n_entries); fcs->n_pages = n_pages; - Assert(pg_popcount((char*)bitmap, ((n_entries << BLOCKS_PER_CHUNK_LOG) + 7)/8) == n_pages); + Assert(pg_popcount((char*)bitmap, ((n_entries << lfc_chunk_size_log) + 7)/8) == n_pages); elog(LOG, "LFC: save state of %d chunks %d pages", (int)n_entries, (int)n_pages); } @@ -709,6 +702,7 @@ lfc_prewarm(FileCacheState* fcs, uint32 n_workers) dsm_segment *seg; BackgroundWorkerHandle* bgw_handle[MAX_PREWARM_WORKERS]; + Assert(!neon_enable_new_communicator); if (!lfc_ensure_opened()) return; @@ -742,7 +736,7 @@ lfc_prewarm(FileCacheState* fcs, uint32 n_workers) } fcs_chunk_size_log = fcs->chunk_size_log; - if (fcs_chunk_size_log > BLOCKS_PER_CHUNK_LOG) + if (fcs_chunk_size_log > MAX_BLOCKS_PER_CHUNK_LOG) { elog(ERROR, "LFC: Invalid chunk size log: %u", fcs->chunk_size_log); } @@ -863,6 +857,8 @@ lfc_prewarm_main(Datum main_arg) PrewarmWorkerState* ws; uint32 worker_id = DatumGetInt32(main_arg); + Assert(!neon_enable_new_communicator); + AmPrewarmWorker = true; pqsignal(SIGTERM, die); @@ -961,7 +957,9 @@ lfc_invalidate(NRelFileInfo rinfo, ForkNumber forkNum, BlockNumber nblocks) { BufferTag tag; FileCacheEntry *entry; - uint64 hash; + uint32 hash; + + Assert(!neon_enable_new_communicator); if (lfc_maybe_disabled()) /* fast exit if file cache is disabled */ return; @@ -974,14 +972,14 @@ lfc_invalidate(NRelFileInfo rinfo, ForkNumber forkNum, BlockNumber nblocks) LWLockAcquire(lfc_lock, LW_EXCLUSIVE); if (LFC_ENABLED()) { - for (BlockNumber blkno = 0; blkno < nblocks; blkno += BLOCKS_PER_CHUNK) + for (BlockNumber blkno = 0; blkno < nblocks; blkno += lfc_blocks_per_chunk) { tag.blockNum = blkno; - hash = file_cache_hash_get_hash_value(lfc_hash, &tag); - entry = file_cache_hash_find(lfc_hash, &tag, hash); + hash = get_hash_value(lfc_hash, &tag); + entry = hash_search_with_hash_value(lfc_hash, &tag, hash, HASH_FIND, NULL); if (entry != NULL) { - for (int i = 0; i < BLOCKS_PER_CHUNK; i++) + for (int i = 0; i < lfc_blocks_per_chunk; i++) { if (GET_STATE(entry, i) == AVAILABLE) { @@ -1006,7 +1004,9 @@ lfc_cache_contains(NRelFileInfo rinfo, ForkNumber forkNum, BlockNumber blkno) FileCacheEntry *entry; int chunk_offs = BLOCK_TO_CHUNK_OFF(blkno); bool found = false; - uint64 hash; + uint32 hash; + + Assert(!neon_enable_new_communicator); if (lfc_maybe_disabled()) /* fast exit if file cache is disabled */ return false; @@ -1016,12 +1016,12 @@ lfc_cache_contains(NRelFileInfo rinfo, ForkNumber forkNum, BlockNumber blkno) tag.blockNum = blkno - chunk_offs; CriticalAssert(BufTagGetRelNumber(&tag) != InvalidRelFileNumber); - hash = file_cache_hash_get_hash_value(lfc_hash, &tag); + hash = get_hash_value(lfc_hash, &tag); LWLockAcquire(lfc_lock, LW_SHARED); if (LFC_ENABLED()) { - entry = file_cache_hash_find(lfc_hash, &tag, hash); + entry = hash_search_with_hash_value(lfc_hash, &tag, hash, HASH_FIND, NULL); found = entry != NULL && GET_STATE(entry, chunk_offs) != UNAVAILABLE; } LWLockRelease(lfc_lock); @@ -1040,9 +1040,11 @@ lfc_cache_containsv(NRelFileInfo rinfo, ForkNumber forkNum, BlockNumber blkno, FileCacheEntry *entry; uint32 chunk_offs; int found = 0; - uint64 hash; + uint32 hash; int i = 0; + Assert(!neon_enable_new_communicator); + if (lfc_maybe_disabled()) /* fast exit if file cache is disabled */ return 0; @@ -1053,7 +1055,7 @@ lfc_cache_containsv(NRelFileInfo rinfo, ForkNumber forkNum, BlockNumber blkno, chunk_offs = BLOCK_TO_CHUNK_OFF(blkno); tag.blockNum = blkno - chunk_offs; - hash = file_cache_hash_get_hash_value(lfc_hash, &tag); + hash = get_hash_value(lfc_hash, &tag); LWLockAcquire(lfc_lock, LW_SHARED); @@ -1064,12 +1066,12 @@ lfc_cache_containsv(NRelFileInfo rinfo, ForkNumber forkNum, BlockNumber blkno, } while (true) { - int this_chunk = Min(nblocks - i, BLOCKS_PER_CHUNK - chunk_offs); - entry = file_cache_hash_find(lfc_hash, &tag, hash); + int this_chunk = Min(nblocks - i, lfc_blocks_per_chunk - chunk_offs); + entry = hash_search_with_hash_value(lfc_hash, &tag, hash, HASH_FIND, NULL); if (entry != NULL) { - for (; chunk_offs < BLOCKS_PER_CHUNK && i < nblocks; chunk_offs++, i++) + for (; chunk_offs < lfc_blocks_per_chunk && i < nblocks; chunk_offs++, i++) { if (GET_STATE(entry, chunk_offs) != UNAVAILABLE) { @@ -1095,7 +1097,7 @@ lfc_cache_containsv(NRelFileInfo rinfo, ForkNumber forkNum, BlockNumber blkno, */ chunk_offs = BLOCK_TO_CHUNK_OFF(blkno + i); tag.blockNum = (blkno + i) - chunk_offs; - hash = file_cache_hash_get_hash_value(lfc_hash, &tag); + hash = get_hash_value(lfc_hash, &tag); } LWLockRelease(lfc_lock); @@ -1144,12 +1146,14 @@ lfc_readv_select(NRelFileInfo rinfo, ForkNumber forkNum, BlockNumber blkno, BufferTag tag; FileCacheEntry *entry; ssize_t rc; - uint64 hash; + uint32 hash; uint64 generation; uint32 entry_offset; int blocks_read = 0; int buf_offset = 0; + Assert(!neon_enable_new_communicator); + if (lfc_maybe_disabled()) /* fast exit if file cache is disabled */ return -1; @@ -1170,9 +1174,9 @@ lfc_readv_select(NRelFileInfo rinfo, ForkNumber forkNum, BlockNumber blkno, while (nblocks > 0) { struct iovec iov[PG_IOV_MAX]; - uint8 chunk_mask[BLOCKS_PER_CHUNK / 8] = {0}; + uint8 chunk_mask[MAX_BLOCKS_PER_CHUNK / 8] = {0}; int chunk_offs = BLOCK_TO_CHUNK_OFF(blkno); - int blocks_in_chunk = Min(nblocks, BLOCKS_PER_CHUNK - chunk_offs); + int blocks_in_chunk = Min(nblocks, lfc_blocks_per_chunk - chunk_offs); int iteration_hits = 0; int iteration_misses = 0; uint64 io_time_us = 0; @@ -1222,7 +1226,7 @@ lfc_readv_select(NRelFileInfo rinfo, ForkNumber forkNum, BlockNumber blkno, Assert(iov_last_used - first_block_in_chunk_read >= n_blocks_to_read); tag.blockNum = blkno - chunk_offs; - hash = file_cache_hash_get_hash_value(lfc_hash, &tag); + hash = get_hash_value(lfc_hash, &tag); cv = &lfc_ctl->cv[hash % N_COND_VARS]; LWLockAcquire(lfc_lock, LW_EXCLUSIVE); @@ -1235,13 +1239,13 @@ lfc_readv_select(NRelFileInfo rinfo, ForkNumber forkNum, BlockNumber blkno, return blocks_read; } - entry = file_cache_hash_find(lfc_hash, &tag, hash); + entry = hash_search_with_hash_value(lfc_hash, &tag, hash, HASH_FIND, NULL); /* Approximate working set for the blocks assumed in this entry */ for (int i = 0; i < blocks_in_chunk; i++) { tag.blockNum = blkno + i; - addSHLL(&lfc_ctl->wss_estimation, file_cache_hash_get_hash_value(lfc_hash, &tag)); + addSHLL(&lfc_ctl->wss_estimation, hash_bytes((uint8_t const*)&tag, sizeof(tag))); } if (entry == NULL) @@ -1311,8 +1315,9 @@ lfc_readv_select(NRelFileInfo rinfo, ForkNumber forkNum, BlockNumber blkno, if (iteration_hits != 0) { - /* chunk offset (# of pages) into the LFC file */ - off_t first_read_offset = (off_t) entry_offset * BLOCKS_PER_CHUNK; + /* chunk offset (# + of pages) into the LFC file */ + off_t first_read_offset = (off_t) entry_offset * lfc_blocks_per_chunk; int nwrite = iov_last_used - first_block_in_chunk_read; /* offset of first IOV */ first_read_offset += chunk_offs + first_block_in_chunk_read; @@ -1329,16 +1334,6 @@ lfc_readv_select(NRelFileInfo rinfo, ForkNumber forkNum, BlockNumber blkno, lfc_disable("read"); return -1; } - - /* - * We successfully read the pages we know were valid when we - * started reading; now mark those pages as read - */ - for (int i = first_block_in_chunk_read; i < iov_last_used; i++) - { - if (BITMAP_ISSET(chunk_mask, i)) - BITMAP_SET(mask, buf_offset + i); - } } /* Place entry to the head of LRU list */ @@ -1356,6 +1351,15 @@ lfc_readv_select(NRelFileInfo rinfo, ForkNumber forkNum, BlockNumber blkno, { lfc_ctl->time_read += io_time_us; inc_page_cache_read_wait(io_time_us); + /* + * We successfully read the pages we know were valid when we + * started reading; now mark those pages as read + */ + for (int i = first_block_in_chunk_read; i < iov_last_used; i++) + { + if (BITMAP_ISSET(chunk_mask, i)) + BITMAP_SET(mask, buf_offset + i); + } } CriticalAssert(entry->access_count > 0); @@ -1389,14 +1393,14 @@ lfc_readv_select(NRelFileInfo rinfo, ForkNumber forkNum, BlockNumber blkno, * Returns false if there are no unpinned entries and chunk can not be added. */ static bool -lfc_init_new_entry(FileCacheEntry *entry) +lfc_init_new_entry(FileCacheEntry* entry, uint32 hash) { /*----------- * If the chunk wasn't already in the LFC then we have these * options, in order of preference: * * Unless there is no space available, we can: - * 1. Use an entry from the freelist, and + * 1. Use an entry from the `holes` list, and * 2. Create a new entry. * We can always, regardless of space in the LFC: * 3. evict an entry from LRU, and @@ -1404,10 +1408,17 @@ lfc_init_new_entry(FileCacheEntry *entry) */ if (lfc_ctl->used < lfc_ctl->limit) { - if (!freelist_is_empty()) + if (!dlist_is_empty(&lfc_ctl->holes)) { /* We can reuse a hole that was left behind when the LFC was shrunk previously */ - uint32 offset = freelist_pop(); + FileCacheEntry *hole = dlist_container(FileCacheEntry, list_node, + dlist_pop_head_node(&lfc_ctl->holes)); + uint32 offset = hole->offset; + bool hole_found; + + hash_search_with_hash_value(lfc_hash, &hole->key, + hole->hash, HASH_REMOVE, &hole_found); + CriticalAssert(hole_found); lfc_ctl->used += 1; entry->offset = offset; /* reuse the hole */ @@ -1436,7 +1447,7 @@ lfc_init_new_entry(FileCacheEntry *entry) FileCacheEntry *victim = dlist_container(FileCacheEntry, list_node, dlist_pop_head_node(&lfc_ctl->lru)); - for (int i = 0; i < BLOCKS_PER_CHUNK; i++) + for (int i = 0; i < lfc_blocks_per_chunk; i++) { bool is_page_cached = GET_STATE(victim, i) == AVAILABLE; lfc_ctl->used_pages -= is_page_cached; @@ -1445,21 +1456,24 @@ lfc_init_new_entry(FileCacheEntry *entry) CriticalAssert(victim->access_count == 0); entry->offset = victim->offset; /* grab victim's chunk */ - file_cache_hash_remove_entry(lfc_hash, victim); + hash_search_with_hash_value(lfc_hash, &victim->key, + victim->hash, HASH_REMOVE, NULL); neon_log(DEBUG2, "Swap file cache page"); } else { /* Can't add this chunk - we don't have the space for it */ - file_cache_hash_remove_entry(lfc_hash, entry); + hash_search_with_hash_value(lfc_hash, &entry->key, hash, + HASH_REMOVE, NULL); lfc_ctl->prewarm_canceled = true; /* cancel prewarm if LFC limit is reached */ return false; } entry->access_count = 1; + entry->hash = hash; lfc_ctl->pinned += 1; - for (int i = 0; i < BLOCKS_PER_CHUNK; i++) + for (int i = 0; i < lfc_blocks_per_chunk; i++) SET_STATE(entry, i, UNAVAILABLE); return true; @@ -1496,7 +1510,7 @@ lfc_prefetch(NRelFileInfo rinfo, ForkNumber forknum, BlockNumber blkno, FileCacheEntry *entry; ssize_t rc; bool found; - uint64 hash; + uint32 hash; uint64 generation; uint32 entry_offset; instr_time io_start, io_end; @@ -1506,6 +1520,8 @@ lfc_prefetch(NRelFileInfo rinfo, ForkNumber forknum, BlockNumber blkno, int chunk_offs = BLOCK_TO_CHUNK_OFF(blkno); + Assert(!neon_enable_new_communicator); + if (lfc_maybe_disabled()) /* fast exit if file cache is disabled */ return false; @@ -1515,10 +1531,9 @@ lfc_prefetch(NRelFileInfo rinfo, ForkNumber forknum, BlockNumber blkno, CriticalAssert(BufTagGetRelNumber(&tag) != InvalidRelFileNumber); tag.blockNum = blkno - chunk_offs; - hash = file_cache_hash_get_hash_value(lfc_hash, &tag); + hash = get_hash_value(lfc_hash, &tag); cv = &lfc_ctl->cv[hash % N_COND_VARS]; - retry: LWLockAcquire(lfc_lock, LW_EXCLUSIVE); if (!LFC_ENABLED() || !lfc_ensure_opened()) @@ -1527,9 +1542,6 @@ lfc_prefetch(NRelFileInfo rinfo, ForkNumber forknum, BlockNumber blkno, return false; } - if (!freelist_prepare_pop()) - goto retry; - lwlsn = neon_get_lwlsn(rinfo, forknum, blkno); if (lwlsn > lsn) @@ -1540,12 +1552,12 @@ lfc_prefetch(NRelFileInfo rinfo, ForkNumber forknum, BlockNumber blkno, return false; } - entry = file_cache_hash_enter(lfc_hash, &tag, hash, &found); + entry = hash_search_with_hash_value(lfc_hash, &tag, hash, HASH_ENTER, &found); if (lfc_prewarm_update_ws_estimation) { tag.blockNum = blkno; - addSHLL(&lfc_ctl->wss_estimation, file_cache_hash_get_hash_value(lfc_hash, &tag)); + addSHLL(&lfc_ctl->wss_estimation, hash_bytes((uint8_t const*)&tag, sizeof(tag))); } if (found) { @@ -1567,7 +1579,7 @@ lfc_prefetch(NRelFileInfo rinfo, ForkNumber forknum, BlockNumber blkno, } else { - if (!lfc_init_new_entry(entry)) + if (!lfc_init_new_entry(entry, hash)) { /* * We can't process this chunk due to lack of space in LFC, @@ -1588,7 +1600,7 @@ lfc_prefetch(NRelFileInfo rinfo, ForkNumber forknum, BlockNumber blkno, pgstat_report_wait_start(WAIT_EVENT_NEON_LFC_WRITE); INSTR_TIME_SET_CURRENT(io_start); rc = pwrite(lfc_desc, buffer, BLCKSZ, - ((off_t) entry_offset * BLOCKS_PER_CHUNK + chunk_offs) * BLCKSZ); + ((off_t) entry_offset * lfc_blocks_per_chunk + chunk_offs) * BLCKSZ); INSTR_TIME_SET_CURRENT(io_end); pgstat_report_wait_end(); @@ -1650,11 +1662,13 @@ lfc_writev(NRelFileInfo rinfo, ForkNumber forkNum, BlockNumber blkno, FileCacheEntry *entry; ssize_t rc; bool found; - uint64 hash; + uint32 hash; uint64 generation; uint32 entry_offset; int buf_offset = 0; + Assert(!neon_enable_new_communicator); + if (lfc_maybe_disabled()) /* fast exit if file cache is disabled */ return; @@ -1663,7 +1677,6 @@ lfc_writev(NRelFileInfo rinfo, ForkNumber forkNum, BlockNumber blkno, CriticalAssert(BufTagGetRelNumber(&tag) != InvalidRelFileNumber); - retry: LWLockAcquire(lfc_lock, LW_EXCLUSIVE); if (!LFC_ENABLED() || !lfc_ensure_opened()) @@ -1673,9 +1686,6 @@ lfc_writev(NRelFileInfo rinfo, ForkNumber forkNum, BlockNumber blkno, } generation = lfc_ctl->generation; - if (!freelist_prepare_pop()) - goto retry; - /* * For every chunk that has blocks we're interested in, we * 1. get the chunk header @@ -1689,7 +1699,7 @@ lfc_writev(NRelFileInfo rinfo, ForkNumber forkNum, BlockNumber blkno, { struct iovec iov[PG_IOV_MAX]; int chunk_offs = BLOCK_TO_CHUNK_OFF(blkno); - int blocks_in_chunk = Min(nblocks, BLOCKS_PER_CHUNK - chunk_offs); + int blocks_in_chunk = Min(nblocks, lfc_blocks_per_chunk - chunk_offs); instr_time io_start, io_end; ConditionVariable* cv; @@ -1702,16 +1712,16 @@ lfc_writev(NRelFileInfo rinfo, ForkNumber forkNum, BlockNumber blkno, } tag.blockNum = blkno - chunk_offs; - hash = file_cache_hash_get_hash_value(lfc_hash, &tag); + hash = get_hash_value(lfc_hash, &tag); cv = &lfc_ctl->cv[hash % N_COND_VARS]; - entry = file_cache_hash_enter(lfc_hash, &tag, hash, &found); + entry = hash_search_with_hash_value(lfc_hash, &tag, hash, HASH_ENTER, &found); /* Approximate working set for the blocks assumed in this entry */ for (int i = 0; i < blocks_in_chunk; i++) { tag.blockNum = blkno + i; - addSHLL(&lfc_ctl->wss_estimation, file_cache_hash_get_hash_value(lfc_hash, &tag)); + addSHLL(&lfc_ctl->wss_estimation, hash_bytes((uint8_t const*)&tag, sizeof(tag))); } if (found) @@ -1728,7 +1738,7 @@ lfc_writev(NRelFileInfo rinfo, ForkNumber forkNum, BlockNumber blkno, } else { - if (!lfc_init_new_entry(entry)) + if (!lfc_init_new_entry(entry, hash)) { /* * We can't process this chunk due to lack of space in LFC, @@ -1777,7 +1787,7 @@ lfc_writev(NRelFileInfo rinfo, ForkNumber forkNum, BlockNumber blkno, pgstat_report_wait_start(WAIT_EVENT_NEON_LFC_WRITE); INSTR_TIME_SET_CURRENT(io_start); rc = pwritev(lfc_desc, iov, blocks_in_chunk, - ((off_t) entry_offset * BLOCKS_PER_CHUNK + chunk_offs) * BLCKSZ); + ((off_t) entry_offset * lfc_blocks_per_chunk + chunk_offs) * BLCKSZ); INSTR_TIME_SET_CURRENT(io_end); pgstat_report_wait_end(); @@ -1837,140 +1847,6 @@ lfc_writev(NRelFileInfo rinfo, ForkNumber forkNum, BlockNumber blkno, LWLockRelease(lfc_lock); } -/**** freelist management ****/ - - -/* - * Prerequisites: - * - The caller is holding 'lfc_lock'. XXX - */ -static bool -freelist_prepare_pop(void) -{ - /* - * If the in-memory freelist is empty, but there are more blocks available, load them. - * - * TODO: if there - */ - if (lfc_ctl->num_free_pages == 0 && lfc_ctl->freelist_head != INVALID_OFFSET) - { - uint32 freelist_head; - FreeListChunk *freelist_chunk; - size_t bytes_read; - - LWLockRelease(lfc_lock); - LWLockAcquire(lfc_freelist_lock, LW_EXCLUSIVE); - - if (!(lfc_ctl->num_free_pages == 0 && lfc_ctl->freelist_head != INVALID_OFFSET)) - { - /* someone else did the work for us while we were not holding the lock */ - LWLockRelease(lfc_freelist_lock); - return false; - } - - freelist_head = lfc_ctl->freelist_head; - freelist_chunk = palloc(BLOCKS_PER_CHUNK * BLCKSZ); - - bytes_read = 0; - while (bytes_read < BLOCKS_PER_CHUNK * BLCKSZ) - { - ssize_t rc; - - rc = pread(lfc_desc, freelist_chunk, BLOCKS_PER_CHUNK * BLCKSZ - bytes_read, (off_t) freelist_head * BLOCKS_PER_CHUNK * BLCKSZ + bytes_read); - if (rc < 0) - { - lfc_disable("read freelist page"); - return false; - } - bytes_read += rc; - } - - LWLockAcquire(lfc_lock, LW_EXCLUSIVE); - if (lfc_generation != lfc_ctl->generation) - { - LWLockRelease(lfc_lock); - return false; - } - - Assert(lfc_ctl->freelist_head == freelist_head); - Assert(lfc_ctl->num_free_pages == 0); - lfc_ctl->freelist_head = freelist_chunk->next; - lfc_ctl->num_free_pages = freelist_chunk->num_free_pages; - memcpy(lfc_ctl->free_pages, freelist_chunk->free_pages, lfc_ctl->num_free_pages * sizeof(uint32)); - pfree(freelist_chunk); - - LWLockRelease(lfc_lock); - LWLockRelease(lfc_freelist_lock); - return false; - } - - return true; -} - -/* - * Prerequisites: - * - The caller is holding 'lfc_lock' and 'lfc_freelist_lock'. - * - * Returns 'false' on error. - */ -static bool -freelist_push(uint32 offset) -{ - Assert(lfc_ctl->freelist_size == FREELIST_ENTRIES_PER_CHUNK); - if (lfc_ctl->num_free_pages == lfc_ctl->freelist_size) - { - FreeListChunk *freelist_chunk; - struct iovec iov; - ssize_t rc; - - freelist_chunk = palloc(BLOCKS_PER_CHUNK * BLCKSZ); - - /* write the existing entries to the chunk on disk */ - freelist_chunk->next = lfc_ctl->freelist_head; - freelist_chunk->num_free_pages = lfc_ctl->num_free_pages; - memcpy(freelist_chunk->free_pages, lfc_ctl->free_pages, lfc_ctl->num_free_pages * sizeof(uint32)); - - /* Use the passed-in offset to hold the freelist chunk itself */ - iov.iov_base = freelist_chunk; - iov.iov_len = BLOCKS_PER_CHUNK * BLCKSZ; - rc = pg_pwritev_with_retry(lfc_desc, &iov, 1, (off_t) offset * BLOCKS_PER_CHUNK * BLCKSZ); - - pfree(freelist_chunk); - - if (rc < 0) - return false; - - lfc_ctl->freelist_head = offset; - lfc_ctl->num_free_pages = 0; - } - else - { - lfc_ctl->free_pages[lfc_ctl->num_free_pages] = offset; - lfc_ctl->num_free_pages++; - } - return true; -} - -static uint32 -freelist_pop(void) -{ - uint32 result; - - /* The caller should've checked that the list is not empty */ - Assert(lfc_ctl->num_free_pages > 0); - - result = lfc_ctl->free_pages[lfc_ctl->num_free_pages - 1]; - lfc_ctl->num_free_pages--; - - return result; -} - -static bool -freelist_is_empty(void) -{ - return lfc_ctl->num_free_pages == 0; -} - typedef struct { TupleDesc tupdesc; @@ -2067,7 +1943,7 @@ neon_get_lfc_stats(PG_FUNCTION_ARGS) break; case 8: key = "file_cache_chunk_size_pages"; - value = BLOCKS_PER_CHUNK; + value = lfc_blocks_per_chunk; break; case 9: key = "file_cache_chunks_pinned"; @@ -2138,6 +2014,7 @@ local_cache_pages(PG_FUNCTION_ARGS) if (SRF_IS_FIRSTCALL()) { + HASH_SEQ_STATUS status; FileCacheEntry *entry; uint32 n_pages = 0; @@ -2193,16 +2070,15 @@ local_cache_pages(PG_FUNCTION_ARGS) if (LFC_ENABLED()) { - uint32 num_buckets = file_cache_hash_get_num_buckets(lfc_hash); - - for (uint32 pos = 0; pos < num_buckets; pos++) + hash_seq_init(&status, lfc_hash); + while ((entry = hash_seq_search(&status)) != NULL) { - entry = file_cache_hash_get_at_pos(lfc_hash, pos); - if (entry == NULL) - continue; - - for (int i = 0; i < BLOCKS_PER_CHUNK; i++) - n_pages += GET_STATE(entry, i) == AVAILABLE; + /* Skip hole tags */ + if (NInfoGetRelNumber(BufTagGetNRelFileInfo(entry->key)) != 0) + { + for (int i = 0; i < lfc_blocks_per_chunk; i++) + n_pages += GET_STATE(entry, i) == AVAILABLE; + } } } } @@ -2224,28 +2100,25 @@ local_cache_pages(PG_FUNCTION_ARGS) * in the fctx->record structure. */ uint32 n = 0; - uint32 num_buckets = file_cache_hash_get_num_buckets(lfc_hash); - for (uint32 pos = 0; pos < num_buckets; pos++) + hash_seq_init(&status, lfc_hash); + while ((entry = hash_seq_search(&status)) != NULL) { - entry = file_cache_hash_get_at_pos(lfc_hash, pos); - if (entry == NULL) - continue; - - for (int i = 0; i < BLOCKS_PER_CHUNK; i++) + for (int i = 0; i < lfc_blocks_per_chunk; i++) { - const BufferTag *key = file_cache_hash_get_key_for_entry(lfc_hash, entry); - - if (GET_STATE(entry, i) == AVAILABLE) + if (NInfoGetRelNumber(BufTagGetNRelFileInfo(entry->key)) != 0) { - fctx->record[n].pageoffs = entry->offset * BLOCKS_PER_CHUNK + i; - fctx->record[n].relfilenode = NInfoGetRelNumber(BufTagGetNRelFileInfo(*key)); - fctx->record[n].reltablespace = NInfoGetSpcOid(BufTagGetNRelFileInfo(*key)); - fctx->record[n].reldatabase = NInfoGetDbOid(BufTagGetNRelFileInfo(*key)); - fctx->record[n].forknum = key->forkNum; - fctx->record[n].blocknum = key->blockNum + i; - fctx->record[n].accesscount = entry->access_count; - n += 1; + if (GET_STATE(entry, i) == AVAILABLE) + { + fctx->record[n].pageoffs = entry->offset * lfc_blocks_per_chunk + i; + fctx->record[n].relfilenode = NInfoGetRelNumber(BufTagGetNRelFileInfo(entry->key)); + fctx->record[n].reltablespace = NInfoGetSpcOid(BufTagGetNRelFileInfo(entry->key)); + fctx->record[n].reldatabase = NInfoGetDbOid(BufTagGetNRelFileInfo(entry->key)); + fctx->record[n].forknum = entry->key.forkNum; + fctx->record[n].blocknum = entry->key.blockNum + i; + fctx->record[n].accesscount = entry->access_count; + n += 1; + } } } } @@ -2291,6 +2164,9 @@ PG_FUNCTION_INFO_V1(approximate_working_set_size_seconds); Datum approximate_working_set_size_seconds(PG_FUNCTION_ARGS) { + if (neon_enable_new_communicator) + elog(ERROR, "TODO: not implemented"); + if (lfc_size_limit != 0) { int32 dc; @@ -2308,6 +2184,9 @@ PG_FUNCTION_INFO_V1(approximate_working_set_size); Datum approximate_working_set_size(PG_FUNCTION_ARGS) { + if (neon_enable_new_communicator) + elog(ERROR, "TODO: not implemented"); + if (lfc_size_limit != 0) { int32 dc; @@ -2328,7 +2207,13 @@ Datum get_local_cache_state(PG_FUNCTION_ARGS) { size_t max_entries = PG_ARGISNULL(0) ? lfc_prewarm_limit : PG_GETARG_INT32(0); - FileCacheState* fcs = lfc_get_state(max_entries); + FileCacheState* fcs; + + if (neon_enable_new_communicator) + elog(ERROR, "TODO: not implemented"); + + fcs = lfc_get_state(max_entries); + if (fcs != NULL) PG_RETURN_BYTEA_P((bytea*)fcs); else @@ -2342,8 +2227,12 @@ prewarm_local_cache(PG_FUNCTION_ARGS) { bytea* state = PG_GETARG_BYTEA_PP(0); uint32 n_workers = PG_GETARG_INT32(1); - FileCacheState* fcs = (FileCacheState*)state; + FileCacheState* fcs; + if (neon_enable_new_communicator) + elog(ERROR, "TODO: not implemented"); + + fcs = (FileCacheState*)state; lfc_prewarm(fcs, n_workers); PG_RETURN_NULL(); @@ -2363,6 +2252,9 @@ get_prewarm_info(PG_FUNCTION_ARGS) uint32 total_pages; size_t n_workers; + if (neon_enable_new_communicator) + elog(ERROR, "TODO: not implemented"); + if (lfc_size_limit == 0) PG_RETURN_NULL(); diff --git a/pgxn/neon/file_cache.h b/pgxn/neon/file_cache.h index d5ac55d5ba..1b6ff36164 100644 --- a/pgxn/neon/file_cache.h +++ b/pgxn/neon/file_cache.h @@ -26,6 +26,9 @@ typedef struct FileCacheState /* GUCs */ extern bool lfc_store_prefetch_result; +extern int lfc_max_size; +extern int lfc_size_limit; +extern char *lfc_path; /* functions for local file cache */ extern void lfc_invalidate(NRelFileInfo rinfo, ForkNumber forkNum, BlockNumber nblocks); diff --git a/pgxn/neon/libpagestore.c b/pgxn/neon/libpagestore.c index 3b6c4247c3..ee17b5d33b 100644 --- a/pgxn/neon/libpagestore.c +++ b/pgxn/neon/libpagestore.c @@ -69,7 +69,8 @@ char *neon_project_id; char *neon_branch_id; char *neon_endpoint_id; int32 max_cluster_size; -char *page_server_connstring; +char *pageserver_connstring; +char *pageserver_grpc_urls; char *neon_auth_token; int readahead_buffer_size = 128; @@ -177,6 +178,8 @@ static bool pageserver_flush(shardno_t shard_no); static void pageserver_disconnect(shardno_t shard_no); static void pageserver_disconnect_shard(shardno_t shard_no); +static void AssignShardMap(const char *newval); + static bool PagestoreShmemIsValid(void) { @@ -239,6 +242,7 @@ ParseShardMap(const char *connstr, ShardMap *result) return true; } +/* GUC hooks for neon.pageserver_connstring */ static bool CheckPageserverConnstring(char **newval, void **extra, GucSource source) { @@ -249,6 +253,45 @@ CheckPageserverConnstring(char **newval, void **extra, GucSource source) static void AssignPageserverConnstring(const char *newval, void *extra) +{ + /* + * 'neon.pageserver_connstring' is ignored if the new communicator is used. + * In that case, the shard map is loaded from 'neon.pageserver_grpc_urls' + * instead. + */ + if (neon_enable_new_communicator) + return; + + AssignShardMap(newval); +} + + +/* GUC hooks for neon.pageserver_connstring */ +static bool +CheckPageserverGrpcUrls(char **newval, void **extra, GucSource source) +{ + char *p = *newval; + + return ParseShardMap(p, NULL); +} + +static void +AssignPageserverGrpcUrls(const char *newval, void *extra) +{ + /* + * 'neon.pageserver_grpc-urls' is ignored if the new communicator is not + * used. In that case, the shard map is loaded from 'neon.pageserver_connstring' + instead. + */ + if (!neon_enable_new_communicator) + return; + + AssignShardMap(newval); +} + + +static void +AssignShardMap(const char *newval) { ShardMap shard_map; @@ -262,7 +305,7 @@ AssignPageserverConnstring(const char *newval, void *extra) { /* * shouldn't happen, because we already checked the value in - * CheckPageserverConnstring + * CheckPageserverConnstring/CheckPageserverGrpcUrls */ elog(ERROR, "could not parse shard map"); } @@ -281,6 +324,54 @@ AssignPageserverConnstring(const char *newval, void *extra) } } +/* Return a copy of the whole shard map from shared memory */ +void +get_shard_map(char ***connstrs_p, shardno_t *num_shards_p) +{ + uint64 begin_update_counter; + uint64 end_update_counter; + ShardMap *shard_map = &pagestore_shared->shard_map; + shardno_t num_shards; + char *buf; + char **connstrs; + + buf = palloc(MAX_SHARDS*MAX_PAGESERVER_CONNSTRING_SIZE); + connstrs = palloc(sizeof(char *) * MAX_SHARDS); + + /* + * Postmaster can update the shared memory values concurrently, in which + * case we would copy a garbled mix of the old and new values. We will + * detect it because the counter's won't match, and retry. But it's + * important that we don't do anything within the retry-loop that would + * depend on the string having valid contents. + */ + do + { + char *p; + + begin_update_counter = pg_atomic_read_u64(&pagestore_shared->begin_update_counter); + end_update_counter = pg_atomic_read_u64(&pagestore_shared->end_update_counter); + + num_shards = shard_map->num_shards; + + p = buf; + for (int i = 0; i < Min(num_shards, MAX_SHARDS); i++) + { + strlcpy(p, shard_map->connstring[i], MAX_PAGESERVER_CONNSTRING_SIZE); + connstrs[i] = p; + p += MAX_PAGESERVER_CONNSTRING_SIZE; + } + + pg_memory_barrier(); + } + while (begin_update_counter != end_update_counter + || begin_update_counter != pg_atomic_read_u64(&pagestore_shared->begin_update_counter) + || end_update_counter != pg_atomic_read_u64(&pagestore_shared->end_update_counter)); + + *connstrs_p = connstrs; + *num_shards_p = num_shards; +} + /* * Get the current number of shards, and/or the connection string for a * particular shard from the shard map in shared memory. @@ -1304,7 +1395,8 @@ PagestoreShmemInit(void) pg_atomic_init_u64(&pagestore_shared->begin_update_counter, 0); pg_atomic_init_u64(&pagestore_shared->end_update_counter, 0); memset(&pagestore_shared->shard_map, 0, sizeof(ShardMap)); - AssignPageserverConnstring(page_server_connstring, NULL); + AssignPageserverConnstring(pageserver_connstring, NULL); + AssignPageserverGrpcUrls(pageserver_grpc_urls, NULL); } NeonPerfCountersShmemInit(); @@ -1357,12 +1449,21 @@ pg_init_libpagestore(void) DefineCustomStringVariable("neon.pageserver_connstring", "connection string to the page server", NULL, - &page_server_connstring, + &pageserver_connstring, "", PGC_SIGHUP, 0, /* no flags required */ CheckPageserverConnstring, AssignPageserverConnstring, NULL); + DefineCustomStringVariable("neon.pageserver_grpc_urls", + "list of gRPC URLs for the page servers", + NULL, + &pageserver_grpc_urls, + "", + PGC_SIGHUP, + 0, /* no flags required */ + CheckPageserverGrpcUrls, AssignPageserverGrpcUrls, NULL); + DefineCustomStringVariable("neon.timeline_id", "Neon timeline_id the server is running on", NULL, @@ -1520,7 +1621,7 @@ pg_init_libpagestore(void) if (neon_auth_token) neon_log(LOG, "using storage auth token from NEON_AUTH_TOKEN environment variable"); - if (page_server_connstring && page_server_connstring[0]) + if (pageserver_connstring[0] || pageserver_grpc_urls[0]) { neon_log(PageStoreTrace, "set neon_smgr hook"); smgr_hook = smgr_neon; diff --git a/pgxn/neon/neon--1.6--1.5.sql b/pgxn/neon/neon--1.6--1.5.sql index 57512980f5..50c62238a3 100644 --- a/pgxn/neon/neon--1.6--1.5.sql +++ b/pgxn/neon/neon--1.6--1.5.sql @@ -2,6 +2,6 @@ DROP FUNCTION IF EXISTS get_prewarm_info(out total_pages integer, out prewarmed_ DROP FUNCTION IF EXISTS get_local_cache_state(max_chunks integer); -DROP FUNCTION IF EXISTS prewarm_local_cache(state bytea, n_workers integer default 1); +DROP FUNCTION IF EXISTS prewarm_local_cache(state bytea, n_workers integer); diff --git a/pgxn/neon/neon.c b/pgxn/neon/neon.c index 5b4ced7cf0..ab51abc1de 100644 --- a/pgxn/neon/neon.c +++ b/pgxn/neon/neon.c @@ -21,6 +21,7 @@ #include "replication/logicallauncher.h" #include "replication/slot.h" #include "replication/walsender.h" +#include "storage/ipc.h" #include "storage/proc.h" #include "funcapi.h" #include "access/htup_details.h" @@ -30,12 +31,13 @@ #include "utils/guc_tables.h" #include "communicator.h" +#include "communicator_new.h" #include "extension_server.h" #include "file_cache.h" #include "neon.h" +#include "neon_ddl_handler.h" #include "neon_lwlsncache.h" #include "neon_perf_counters.h" -#include "control_plane_connector.h" #include "logical_replication_monitor.h" #include "unstable_extensions.h" #include "walsender_hooks.h" @@ -43,10 +45,14 @@ #include "storage/ipc.h" #endif +/* the rust bindings, generated by cbindgen */ +#include "communicator/communicator_bindings.h" + PG_MODULE_MAGIC; void _PG_init(void); +bool neon_enable_new_communicator; static int running_xacts_overflow_policy; static bool monitor_query_exec_time = false; @@ -56,11 +62,14 @@ static ExecutorEnd_hook_type prev_ExecutorEnd = NULL; static void neon_ExecutorStart(QueryDesc *queryDesc, int eflags); static void neon_ExecutorEnd(QueryDesc *queryDesc); -#if PG_MAJORVERSION_NUM >= 16 static shmem_startup_hook_type prev_shmem_startup_hook; - -static void neon_shmem_startup_hook(void); +#if PG_VERSION_NUM>=150000 +static shmem_request_hook_type prev_shmem_request_hook; #endif + +static void neon_shmem_request(void); +static void neon_shmem_startup_hook(void); + #if PG_MAJORVERSION_NUM >= 17 uint32 WAIT_EVENT_NEON_LFC_MAINTENANCE; uint32 WAIT_EVENT_NEON_LFC_READ; @@ -87,6 +96,14 @@ static const struct config_enum_entry running_xacts_overflow_policies[] = { {NULL, 0, false} }; +static const struct config_enum_entry debug_compare_local_modes[] = { + {"none", DEBUG_COMPARE_LOCAL_NONE, false}, + {"prefetch", DEBUG_COMPARE_LOCAL_PREFETCH, false}, + {"lfc", DEBUG_COMPARE_LOCAL_LFC, false}, + {"all", DEBUG_COMPARE_LOCAL_ALL, false}, + {NULL, 0, false} +}; + /* * XXX: These private to procarray.c, but we need them here. */ @@ -439,22 +456,41 @@ _PG_init(void) */ #if PG_VERSION_NUM >= 160000 load_file("$libdir/neon_rmgr", false); +#endif prev_shmem_startup_hook = shmem_startup_hook; shmem_startup_hook = neon_shmem_startup_hook; +#if PG_VERSION_NUM>=150000 + prev_shmem_request_hook = shmem_request_hook; + shmem_request_hook = neon_shmem_request; +#else + neon_shmem_request(); #endif + DefineCustomBoolVariable( + "neon.enable_new_communicator", + "Enables new communicator implementation", + NULL, + &neon_enable_new_communicator, + true, + PGC_POSTMASTER, + 0, + NULL, NULL, NULL); + pg_init_libpagestore(); lfc_init(); pg_init_walproposer(); init_lwlsncache(); pg_init_communicator(); + if (neon_enable_new_communicator) + pg_init_communicator_new(); + Custom_XLogReaderRoutines = NeonOnDemandXLogReaderRoutines; InitUnstableExtensionsSupport(); InitLogicalReplicationMonitor(); - InitControlPlaneConnector(); + InitDDLHandler(); pg_init_extension_server(); @@ -519,6 +555,16 @@ _PG_init(void) GUC_UNIT_KB, NULL, NULL, NULL); + DefineCustomEnumVariable( + "neon.debug_compare_local", + "Debug mode for compaing content of pages in prefetch ring/LFC/PS and local disk", + NULL, + &debug_compare_local, + DEBUG_COMPARE_LOCAL_NONE, + debug_compare_local_modes, + PGC_POSTMASTER, + 0, + NULL, NULL, NULL); /* * Important: This must happen after other parts of the extension are * loaded, otherwise any settings to GUCs that were set before the @@ -583,7 +629,17 @@ backpressure_throttling_time(PG_FUNCTION_ARGS) PG_RETURN_UINT64(BackpressureThrottlingTime()); } -#if PG_MAJORVERSION_NUM >= 16 +static void +neon_shmem_request(void) +{ +#if PG_VERSION_NUM>=150000 + if (prev_shmem_request_hook) + prev_shmem_request_hook(); +#endif + + communicator_new_shmem_request(); +} + static void neon_shmem_startup_hook(void) { @@ -603,8 +659,9 @@ neon_shmem_startup_hook(void) WAIT_EVENT_NEON_PS_READ = WaitEventExtensionNew("Neon/PS_ReadIO"); WAIT_EVENT_NEON_WAL_DL = WaitEventExtensionNew("Neon/WAL_Download"); #endif + + communicator_new_shmem_startup(); } -#endif /* * ExecutorStart hook: start up tracking if needed diff --git a/pgxn/neon/neon.control b/pgxn/neon/neon.control index af69116e21..51193f63c8 100644 --- a/pgxn/neon/neon.control +++ b/pgxn/neon/neon.control @@ -1,6 +1,6 @@ # neon extension comment = 'cloud storage for PostgreSQL' -default_version = '1.5' +default_version = '1.6' module_pathname = '$libdir/neon' relocatable = true trusted = true diff --git a/pgxn/neon/neon.h b/pgxn/neon/neon.h index 431dacb708..149ed5ebed 100644 --- a/pgxn/neon/neon.h +++ b/pgxn/neon/neon.h @@ -13,6 +13,7 @@ #include "utils/wait_event.h" /* GUCs */ +extern bool neon_enable_new_communicator; extern char *neon_auth_token; extern char *neon_timeline; extern char *neon_tenant; diff --git a/pgxn/neon/control_plane_connector.c b/pgxn/neon/neon_ddl_handler.c similarity index 55% rename from pgxn/neon/control_plane_connector.c rename to pgxn/neon/neon_ddl_handler.c index 47ed37da06..2ce7b0086b 100644 --- a/pgxn/neon/control_plane_connector.c +++ b/pgxn/neon/neon_ddl_handler.c @@ -1,6 +1,6 @@ /*------------------------------------------------------------------------- * - * control_plane_connector.c + * neon_ddl_handler.c * Captures updates to roles/databases using ProcessUtility_hook and * sends them to the control ProcessUtility_hook. The changes are sent * via HTTP to the URL specified by the GUC neon.console_url when the @@ -13,18 +13,30 @@ * accumulate changes. On subtransaction commit, the top of the stack * is merged with the table below it. * + * Support event triggers for neon_superuser + * + * IDENTIFICATION + * contrib/neon/neon_dll_handler.c + * *------------------------------------------------------------------------- */ #include "postgres.h" #include +#include #include "access/xact.h" +#include "catalog/pg_authid.h" +#include "catalog/pg_proc.h" #include "commands/defrem.h" +#include "commands/event_trigger.h" +#include "commands/user.h" #include "fmgr.h" #include "libpq/crypt.h" #include "miscadmin.h" +#include "nodes/makefuncs.h" +#include "parser/parse_func.h" #include "tcop/pquery.h" #include "tcop/utility.h" #include "utils/acl.h" @@ -32,11 +44,16 @@ #include "utils/hsearch.h" #include "utils/memutils.h" #include "utils/jsonb.h" +#include +#include -#include "control_plane_connector.h" +#include "neon_ddl_handler.h" #include "neon_utils.h" static ProcessUtility_hook_type PreviousProcessUtilityHook = NULL; +static fmgr_hook_type next_fmgr_hook = NULL; +static needs_fmgr_hook_type next_needs_fmgr_hook = NULL; +static bool neon_event_triggers = true; static const char *jwt_token = NULL; @@ -81,12 +98,14 @@ typedef struct typedef struct DdlHashTable { struct DdlHashTable *prev_table; + size_t subtrans_level; HTAB *db_table; HTAB *role_table; } DdlHashTable; static DdlHashTable RootTable; static DdlHashTable *CurrentDdlTable = &RootTable; +static int SubtransLevel; /* current nesting level of subtransactions */ static void PushKeyValue(JsonbParseState **state, char *key, char *value) @@ -315,9 +334,25 @@ SendDeltasToControlPlane() } } +static void +InitCurrentDdlTableIfNeeded() +{ + /* Lazy construction of DllHashTable chain */ + if (SubtransLevel > CurrentDdlTable->subtrans_level) + { + DdlHashTable *new_table = MemoryContextAlloc(CurTransactionContext, sizeof(DdlHashTable)); + new_table->prev_table = CurrentDdlTable; + new_table->subtrans_level = SubtransLevel; + new_table->role_table = NULL; + new_table->db_table = NULL; + CurrentDdlTable = new_table; + } +} + static void InitDbTableIfNeeded() { + InitCurrentDdlTableIfNeeded(); if (!CurrentDdlTable->db_table) { HASHCTL db_ctl = {}; @@ -336,6 +371,7 @@ InitDbTableIfNeeded() static void InitRoleTableIfNeeded() { + InitCurrentDdlTableIfNeeded(); if (!CurrentDdlTable->role_table) { HASHCTL role_ctl = {}; @@ -354,19 +390,21 @@ InitRoleTableIfNeeded() static void PushTable() { - DdlHashTable *new_table = MemoryContextAlloc(CurTransactionContext, sizeof(DdlHashTable)); - - new_table->prev_table = CurrentDdlTable; - new_table->role_table = NULL; - new_table->db_table = NULL; - CurrentDdlTable = new_table; + SubtransLevel += 1; } static void MergeTable() { - DdlHashTable *old_table = CurrentDdlTable; + DdlHashTable *old_table; + Assert(SubtransLevel >= CurrentDdlTable->subtrans_level); + if (--SubtransLevel >= CurrentDdlTable->subtrans_level) + { + return; + } + + old_table = CurrentDdlTable; CurrentDdlTable = old_table->prev_table; if (old_table->db_table) @@ -459,11 +497,15 @@ MergeTable() static void PopTable() { - /* - * Current table gets freed because it is allocated in aborted - * subtransaction's memory context. - */ - CurrentDdlTable = CurrentDdlTable->prev_table; + Assert(SubtransLevel >= CurrentDdlTable->subtrans_level); + if (--SubtransLevel < CurrentDdlTable->subtrans_level) + { + /* + * Current table gets freed because it is allocated in aborted + * subtransaction's memory context. + */ + CurrentDdlTable = CurrentDdlTable->prev_table; + } } static void @@ -773,6 +815,7 @@ HandleDropRole(DropRoleStmt *stmt) } } + static void HandleRename(RenameStmt *stmt) { @@ -782,6 +825,460 @@ HandleRename(RenameStmt *stmt) return HandleRoleRename(stmt); } + +/* + * Support for Event Triggers. + * + * In vanilla only superuser can create Event Triggers. + * + * We allow it for neon_superuser by temporary switching to superuser. But as + * far as event trigger can fire in superuser context we should protect + * superuser from execution of arbitrary user's code. + * + * The idea was taken from Supabase PR series starting at + * https://github.com/supabase/supautils/pull/98 + */ + +static bool +neon_needs_fmgr_hook(Oid functionId) { + + return (next_needs_fmgr_hook && (*next_needs_fmgr_hook) (functionId)) + || get_func_rettype(functionId) == EVENT_TRIGGEROID; +} + +static void +LookupFuncOwnerSecDef(Oid functionId, Oid *funcOwner, bool *is_secdef) +{ + Form_pg_proc procForm; + HeapTuple proc_tup = SearchSysCache1(PROCOID, ObjectIdGetDatum(functionId)); + + if (!HeapTupleIsValid(proc_tup)) + ereport(ERROR, + (errmsg("cache lookup failed for function %u", functionId))); + + procForm = (Form_pg_proc) GETSTRUCT(proc_tup); + + *funcOwner = procForm->proowner; + *is_secdef = procForm->prosecdef; + + ReleaseSysCache(proc_tup); +} + + +PG_FUNCTION_INFO_V1(noop); +Datum noop(__attribute__ ((unused)) PG_FUNCTION_ARGS) { PG_RETURN_VOID();} + +static void +force_noop(FmgrInfo *finfo) +{ + finfo->fn_addr = (PGFunction) noop; + finfo->fn_oid = InvalidOid; /* not a known function OID anymore */ + finfo->fn_nargs = 0; /* no arguments for noop */ + finfo->fn_strict = false; + finfo->fn_retset = false; + finfo->fn_stats = 0; /* no stats collection */ + finfo->fn_extra = NULL; /* clear out old context data */ + finfo->fn_mcxt = CurrentMemoryContext; + finfo->fn_expr = NULL; /* no parse tree */ +} + + +/* + * Skip executing Event Triggers execution for superusers, because Event + * Triggers are SECURITY DEFINER and user provided code could then attempt + * privilege escalation. + * + * Also skip executing Event Triggers when GUC neon.event_triggers has been + * set to false. This might be necessary to be able to connect again after a + * LOGIN Event Trigger has been installed that would prevent connections as + * neon_superuser. + */ +static void +neon_fmgr_hook(FmgrHookEventType event, FmgrInfo *flinfo, Datum *private) +{ + /* + * It can be other needs_fmgr_hook which cause our hook to be invoked for + * non-trigger function, so recheck that is is trigger function. + */ + if (flinfo->fn_oid != InvalidOid && + get_func_rettype(flinfo->fn_oid) != EVENT_TRIGGEROID) + { + if (next_fmgr_hook) + (*next_fmgr_hook) (event, flinfo, private); + + return; + } + + /* + * The neon_superuser role can use the GUC neon.event_triggers to disable + * firing Event Trigger. + * + * SET neon.event_triggers TO false; + * + * This only applies to the neon_superuser role though, and only allows + * skipping Event Triggers owned by neon_superuser, which we check by + * proxy of the Event Trigger function being owned by neon_superuser. + * + * A role that is created in role neon_superuser should be allowed to also + * benefit from the neon_event_triggers GUC, and will be considered the + * same as the neon_superuser role. + */ + if (event == FHET_START + && !neon_event_triggers + && is_neon_superuser()) + { + Oid neon_superuser_oid = get_role_oid("neon_superuser", false); + + /* Find the Function Attributes (owner Oid, security definer) */ + const char *fun_owner_name = NULL; + Oid fun_owner = InvalidOid; + bool fun_is_secdef = false; + + LookupFuncOwnerSecDef(flinfo->fn_oid, &fun_owner, &fun_is_secdef); + fun_owner_name = GetUserNameFromId(fun_owner, false); + + if (RoleIsNeonSuperuser(fun_owner_name) + || has_privs_of_role(fun_owner, neon_superuser_oid)) + { + elog(WARNING, + "Skipping Event Trigger: neon.event_triggers is false"); + + /* + * we can't skip execution directly inside the fmgr_hook so instead we + * change the event trigger function to a noop function. + */ + force_noop(flinfo); + } + } + + /* + * Fire Event Trigger if both function owner and current user are + * superuser, or none of them are. + */ + else if (event == FHET_START + /* still enable it to pass pg_regress tests */ + && !RegressTestMode) + { + /* + * Get the current user oid as of before SECURITY DEFINER change of + * CurrentUserId, and that would be SessionUserId. + */ + Oid current_role_oid = GetSessionUserId(); + bool role_is_super = superuser_arg(current_role_oid); + + /* Find the Function Attributes (owner Oid, security definer) */ + Oid function_owner = InvalidOid; + bool function_is_secdef = false; + bool function_is_owned_by_super = false; + + LookupFuncOwnerSecDef(flinfo->fn_oid, &function_owner, &function_is_secdef); + + function_is_owned_by_super = superuser_arg(function_owner); + + /* + * 1. Refuse to run SECURITY DEFINER function that belongs to a + * superuser when the current user is not a superuser itself. + */ + if (!role_is_super + && function_is_owned_by_super + && function_is_secdef) + { + char *func_name = get_func_name(flinfo->fn_oid); + + ereport(WARNING, + (errmsg("Skipping Event Trigger"), + errdetail("Event Trigger function \"%s\" is owned by \"%s\" " + "and is SECURITY DEFINER", + func_name, + GetUserNameFromId(function_owner, false)))); + + /* + * we can't skip execution directly inside the fmgr_hook so + * instead we change the event trigger function to a noop + * function. + */ + force_noop(flinfo); + } + + /* + * 2. Refuse to run functions that belongs to a non-superuser when the + * current user is a superuser. + * + * We could run a SECURITY DEFINER user-function here and be safe with + * privilege escalation risks, but superuser roles are only used for + * infrastructure maintenance operations, where we prefer to skip + * running user-defined code. + */ + else if (role_is_super && !function_is_owned_by_super) + { + char *func_name = get_func_name(flinfo->fn_oid); + + ereport(WARNING, + (errmsg("Skipping Event Trigger"), + errdetail("Event Trigger function \"%s\" " + "is owned by non-superuser role \"%s\", " + "and current_user \"%s\" is superuser", + func_name, + GetUserNameFromId(function_owner, false), + GetUserNameFromId(current_role_oid, false)))); + + /* + * we can't skip execution directly inside the fmgr_hook so + * instead we change the event trigger function to a noop + * function. + */ + force_noop(flinfo); + } + + } + + if (next_fmgr_hook) + (*next_fmgr_hook) (event, flinfo, private); +} + +static Oid prev_role_oid = 0; +static int prev_role_sec_context = 0; +static bool switched_to_superuser = false; + +/* + * Switch tp superuser if not yet superuser. + * Returns false if already switched to superuser. + */ +static bool +switch_to_superuser(void) +{ + Oid superuser_oid; + + if (switched_to_superuser) + return false; + switched_to_superuser = true; + + superuser_oid = get_role_oid("cloud_admin", true /*missing_ok*/); + if (superuser_oid == InvalidOid) + superuser_oid = BOOTSTRAP_SUPERUSERID; + + GetUserIdAndSecContext(&prev_role_oid, &prev_role_sec_context); + SetUserIdAndSecContext(superuser_oid, prev_role_sec_context | + SECURITY_LOCAL_USERID_CHANGE | + SECURITY_RESTRICTED_OPERATION); + return true; +} + +static void +switch_to_original_role(void) +{ + SetUserIdAndSecContext(prev_role_oid, prev_role_sec_context); + switched_to_superuser = false; +} + +/* + * ALTER ROLE ... SUPERUSER; + * + * Used internally to give superuser to a non-privileged role to allow + * ownership of superuser-only objects such as Event Trigger. + * + * ALTER ROLE foo SUPERUSER; + * ALTER EVENT TRIGGER ... OWNED BY foo; + * ALTER ROLE foo NOSUPERUSER; + * + * Now the EVENT TRIGGER is owned by foo, who can DROP it without having to be + * superuser again. + */ +static void +alter_role_super(const char* rolename, bool make_super) +{ + AlterRoleStmt *alter_stmt = makeNode(AlterRoleStmt); + + DefElem *defel_superuser = +#if PG_MAJORVERSION_NUM <= 14 + makeDefElem("superuser", (Node *) makeInteger(make_super), -1); +#else + makeDefElem("superuser", (Node *) makeBoolean(make_super), -1); +#endif + + RoleSpec *rolespec = makeNode(RoleSpec); + rolespec->roletype = ROLESPEC_CSTRING; + rolespec->rolename = pstrdup(rolename); + rolespec->location = -1; + + alter_stmt->role = rolespec; + alter_stmt->options = list_make1(defel_superuser); + +#if PG_MAJORVERSION_NUM < 15 + AlterRole(alter_stmt); +#else + /* ParseState *pstate, AlterRoleStmt *stmt */ + AlterRole(NULL, alter_stmt); +#endif + + CommandCounterIncrement(); +} + + +/* + * Changes the OWNER of an Event Trigger. + * + * Event Triggers can only be owned by superusers, so this ALTER ROLE with + * SUPERUSER and then removes the property. + */ +static void +alter_event_trigger_owner(const char *obj_name, Oid role_oid) +{ + char* role_name = GetUserNameFromId(role_oid, false); + + alter_role_super(role_name, true); + + AlterEventTriggerOwner(obj_name, role_oid); + CommandCounterIncrement(); + + alter_role_super(role_name, false); +} + + +/* + * Neon processing of the CREATE EVENT TRIGGER requires special attention and + * is worth having its own ProcessUtility_hook for that. + */ +static void +ProcessCreateEventTrigger( + PlannedStmt *pstmt, + const char *queryString, + bool readOnlyTree, + ProcessUtilityContext context, + ParamListInfo params, + QueryEnvironment *queryEnv, + DestReceiver *dest, + QueryCompletion *qc) +{ + Node *parseTree = pstmt->utilityStmt; + bool sudo = false; + + /* We double-check that after local variable declaration block */ + CreateEventTrigStmt *stmt = (CreateEventTrigStmt *) parseTree; + + /* + * We are going to change the current user privileges (sudo) and might + * need after execution cleanup. For that we want to capture the UserId + * before changing it for our sudo implementation. + */ + const Oid current_user_id = GetUserId(); + bool current_user_is_super = superuser_arg(current_user_id); + + if (nodeTag(parseTree) != T_CreateEventTrigStmt) + { + ereport(ERROR, + errcode(ERRCODE_INTERNAL_ERROR), + errmsg("ProcessCreateEventTrigger called for the wrong command")); + } + + /* + * Allow neon_superuser to create Event Trigger, while keeping the + * ownership of the object. + * + * For that we give superuser membership to the role for the execution of + * the command. + */ + if (IsTransactionState() && is_neon_superuser()) + { + /* Find the Event Trigger function Oid */ + Oid func_oid = LookupFuncName(stmt->funcname, 0, NULL, false); + + /* Find the Function Owner Oid */ + Oid func_owner = InvalidOid; + bool is_secdef = false; + bool function_is_owned_by_super = false; + + LookupFuncOwnerSecDef(func_oid, &func_owner, &is_secdef); + + function_is_owned_by_super = superuser_arg(func_owner); + + if(!current_user_is_super && function_is_owned_by_super) + { + ereport(ERROR, + (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), + errmsg("Permission denied to execute " + "a function owned by a superuser role"), + errdetail("current user \"%s\" is not a superuser " + "and Event Trigger function \"%s\" " + "is owned by a superuser", + GetUserNameFromId(current_user_id, false), + NameListToString(stmt->funcname)))); + } + + if(current_user_is_super && !function_is_owned_by_super) + { + ereport(ERROR, + (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), + errmsg("Permission denied to execute " + "a function owned by a non-superuser role"), + errdetail("current user \"%s\" is a superuser " + "and function \"%s\" is " + "owned by a non-superuser", + GetUserNameFromId(current_user_id, false), + NameListToString(stmt->funcname)))); + } + + sudo = switch_to_superuser(); + } + + PG_TRY(); + { + if (PreviousProcessUtilityHook) + { + PreviousProcessUtilityHook( + pstmt, + queryString, + readOnlyTree, + context, + params, + queryEnv, + dest, + qc); + } + else + { + standard_ProcessUtility( + pstmt, + queryString, + readOnlyTree, + context, + params, + queryEnv, + dest, + qc); + } + + /* + * Now that the Event Trigger has been installed via our sudo + * mechanism, if the original role was not a superuser then change + * the event trigger ownership back to the original role. + * + * That way [ ALTER | DROP ] EVENT TRIGGER commands just work. + */ + if (IsTransactionState() && is_neon_superuser()) + { + if (!current_user_is_super) + { + /* + * Change event trigger owner to the current role (making + * it a privileged role during the ALTER OWNER command). + */ + alter_event_trigger_owner(stmt->trigname, current_user_id); + } + } + } + PG_FINALLY(); + { + if (sudo) + switch_to_original_role(); + } + PG_END_TRY(); +} + + +/* + * Neon hooks for DDLs (handling privileges, limiting features, etc). + */ static void NeonProcessUtility( PlannedStmt *pstmt, @@ -795,6 +1292,27 @@ NeonProcessUtility( { Node *parseTree = pstmt->utilityStmt; + /* + * The process utility hook for CREATE EVENT TRIGGER is its own + * implementation and warrant being addressed separately from here. + */ + if (nodeTag(parseTree) == T_CreateEventTrigStmt) + { + ProcessCreateEventTrigger( + pstmt, + queryString, + readOnlyTree, + context, + params, + queryEnv, + dest, + qc); + return; + } + + /* + * Other commands that need Neon specific implementations are handled here: + */ switch (nodeTag(parseTree)) { case T_CreatedbStmt: @@ -833,37 +1351,82 @@ NeonProcessUtility( if (PreviousProcessUtilityHook) { PreviousProcessUtilityHook( - pstmt, - queryString, - readOnlyTree, - context, - params, - queryEnv, - dest, - qc); + pstmt, + queryString, + readOnlyTree, + context, + params, + queryEnv, + dest, + qc); } else { standard_ProcessUtility( - pstmt, - queryString, - readOnlyTree, - context, - params, - queryEnv, - dest, - qc); + pstmt, + queryString, + readOnlyTree, + context, + params, + queryEnv, + dest, + qc); } } +/* + * Only neon_superuser is granted privilege to edit neon.event_triggers GUC. + */ +static void +neon_event_triggers_assign_hook(bool newval, void *extra) +{ + /* MyDatabaseId == InvalidOid || !OidIsValid(GetUserId()) */ + + if (IsTransactionState() && !is_neon_superuser()) + { + ereport(ERROR, + (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), + errmsg("permission denied to set neon.event_triggers"), + errdetail("Only \"neon_superuser\" is allowed to set the GUC"))); + } +} + + void -InitControlPlaneConnector() +InitDDLHandler() { PreviousProcessUtilityHook = ProcessUtility_hook; ProcessUtility_hook = NeonProcessUtility; + + next_needs_fmgr_hook = needs_fmgr_hook; + needs_fmgr_hook = neon_needs_fmgr_hook; + + next_fmgr_hook = fmgr_hook; + fmgr_hook = neon_fmgr_hook; + RegisterXactCallback(NeonXactCallback, NULL); RegisterSubXactCallback(NeonSubXactCallback, NULL); + /* + * The GUC neon.event_triggers should provide the same effect as the + * Postgres GUC event_triggers, but the neon one is PGC_USERSET. + * + * This allows using the GUC in the connection string and work out of a + * LOGIN Event Trigger that would break database access, all without + * having to edit and reload the Postgres configuration file. + */ + DefineCustomBoolVariable( + "neon.event_triggers", + "Enable firing of event triggers", + NULL, + &neon_event_triggers, + true, + PGC_USERSET, + 0, + NULL, + neon_event_triggers_assign_hook, + NULL); + DefineCustomStringVariable( "neon.console_url", "URL of the Neon Console, which will be forwarded changes to dbs and roles", diff --git a/pgxn/neon/neon_ddl_handler.h b/pgxn/neon/neon_ddl_handler.h new file mode 100644 index 0000000000..de18ed3d82 --- /dev/null +++ b/pgxn/neon/neon_ddl_handler.h @@ -0,0 +1,6 @@ +#ifndef CONTROL_DDL_HANDLER_H +#define CONTROL_DDL_HANDLER_H + +void InitDDLHandler(void); + +#endif diff --git a/pgxn/neon/neon_pgversioncompat.h b/pgxn/neon/neon_pgversioncompat.h index 787bd552f8..7093a790d0 100644 --- a/pgxn/neon/neon_pgversioncompat.h +++ b/pgxn/neon/neon_pgversioncompat.h @@ -9,6 +9,10 @@ #include "fmgr.h" #include "storage/buf_internals.h" +#if PG_MAJORVERSION_NUM < 16 +typedef PGAlignedBlock PGIOAlignedBlock; +#endif + #if PG_MAJORVERSION_NUM < 17 #define NRelFileInfoBackendIsTemp(rinfo) (rinfo.backend != InvalidBackendId) #else @@ -160,6 +164,10 @@ InitBufferTag(BufferTag *tag, const RelFileNode *rnode, #define AmAutoVacuumWorkerProcess() (IsAutoVacuumWorkerProcess()) #endif +#if PG_MAJORVERSION_NUM < 17 +#define MyProcNumber (MyProc - &ProcGlobal->allProcs[0]) +#endif + #if PG_MAJORVERSION_NUM < 15 extern void InitMaterializedSRF(FunctionCallInfo fcinfo, bits32 flags); extern TimeLineID GetWALInsertionTimeLine(void); diff --git a/pgxn/neon/pagestore_client.h b/pgxn/neon/pagestore_client.h index 9df202290d..c2727e232b 100644 --- a/pgxn/neon/pagestore_client.h +++ b/pgxn/neon/pagestore_client.h @@ -177,6 +177,22 @@ extern StringInfoData nm_pack_request(NeonRequest *msg); extern NeonResponse *nm_unpack_response(StringInfo s); extern char *nm_to_string(NeonMessage *msg); +/* + * If debug_compare_local>DEBUG_COMPARE_LOCAL_NONE, we pass through all the SMGR API + * calls to md.c, and *also* do the calls to the Page Server. On every + * read, compare the versions we read from local disk and Page Server, + * and Assert that they are identical. + */ +typedef enum +{ + DEBUG_COMPARE_LOCAL_NONE, /* normal mode - pages are storted locally only for unlogged relations */ + DEBUG_COMPARE_LOCAL_PREFETCH, /* if page is found in prefetch ring, then compare it with local and return */ + DEBUG_COMPARE_LOCAL_LFC, /* if page is found in LFC or prefetch ring, then compare it with local and return */ + DEBUG_COMPARE_LOCAL_ALL /* always fetch page from PS and compare it with local */ +} DebugCompareLocalMode; + +extern int debug_compare_local; + /* * API */ @@ -220,7 +236,8 @@ extern void prefetch_on_ps_disconnect(void); extern page_server_api *page_server; -extern char *page_server_connstring; +extern char *pageserver_connstring; +extern char *pageserver_grpc_urls; extern int flush_every_n_requests; extern int readahead_buffer_size; extern char *neon_timeline; @@ -228,6 +245,7 @@ extern char *neon_tenant; extern int32 max_cluster_size; extern int neon_protocol_version; +extern void get_shard_map(char ***connstrs_p, shardno_t *num_shards_p); extern shardno_t get_shard_number(BufferTag* tag); extern const f_smgr *smgr_neon(ProcNumber backend, NRelFileInfo rinfo); diff --git a/pgxn/neon/pagestore_smgr.c b/pgxn/neon/pagestore_smgr.c index 43fd715bbb..4189af4d32 100644 --- a/pgxn/neon/pagestore_smgr.c +++ b/pgxn/neon/pagestore_smgr.c @@ -62,6 +62,7 @@ #include "bitmap.h" #include "communicator.h" +#include "communicator_new.h" #include "file_cache.h" #include "neon.h" #include "neon_lwlsncache.h" @@ -76,28 +77,18 @@ typedef PGAlignedBlock PGIOAlignedBlock; #endif -/* - * If DEBUG_COMPARE_LOCAL is defined, we pass through all the SMGR API - * calls to md.c, and *also* do the calls to the Page Server. On every - * read, compare the versions we read from local disk and Page Server, - * and Assert that they are identical. - */ -/* #define DEBUG_COMPARE_LOCAL */ - -#ifdef DEBUG_COMPARE_LOCAL #include "access/nbtree.h" #include "storage/bufpage.h" #include "access/xlog_internal.h" static char *hexdump_page(char *page); -#endif #define IS_LOCAL_REL(reln) (\ NInfoGetDbOid(InfoFromSMgrRel(reln)) != 0 && \ NInfoGetRelNumber(InfoFromSMgrRel(reln)) >= FirstNormalObjectId \ ) -const int SmgrTrace = DEBUG5; +const int SmgrTrace = DEBUG1; /* unlogged relation build states */ typedef enum @@ -108,6 +99,8 @@ typedef enum UNLOGGED_BUILD_NOT_PERMANENT } UnloggedBuildPhase; +int debug_compare_local; + static NRelFileInfo unlogged_build_rel_info; static UnloggedBuildPhase unlogged_build_phase = UNLOGGED_BUILD_NOT_IN_PROGRESS; @@ -478,9 +471,10 @@ neon_init(void) old_redo_read_buffer_filter = redo_read_buffer_filter; redo_read_buffer_filter = neon_redo_read_buffer_filter; -#ifdef DEBUG_COMPARE_LOCAL - mdinit(); -#endif + if (debug_compare_local) + { + mdinit(); + } } /* @@ -751,11 +745,6 @@ neon_exists(SMgrRelation reln, ForkNumber forkNum) neon_log(ERROR, "unknown relpersistence '%c'", reln->smgr_relpersistence); } - if (get_cached_relsize(InfoFromSMgrRel(reln), forkNum, &n_blocks)) - { - return true; - } - /* * \d+ on a view calls smgrexists with 0/0/0 relfilenode. The page server * will error out if you check that, because the whole dbdir for @@ -779,10 +768,20 @@ neon_exists(SMgrRelation reln, ForkNumber forkNum) return false; } - neon_get_request_lsns(InfoFromSMgrRel(reln), forkNum, - REL_METADATA_PSEUDO_BLOCKNO, &request_lsns, 1); + if (neon_enable_new_communicator) + return communicator_new_rel_exists(InfoFromSMgrRel(reln), forkNum); + else + { + if (get_cached_relsize(InfoFromSMgrRel(reln), forkNum, &n_blocks)) + { + return true; + } - return communicator_exists(InfoFromSMgrRel(reln), forkNum, &request_lsns); + neon_get_request_lsns(InfoFromSMgrRel(reln), forkNum, + REL_METADATA_PSEUDO_BLOCKNO, &request_lsns, 1); + + return communicator_exists(InfoFromSMgrRel(reln), forkNum, &request_lsns); + } } /* @@ -803,13 +802,16 @@ neon_create(SMgrRelation reln, ForkNumber forkNum, bool isRedo) case RELPERSISTENCE_TEMP: case RELPERSISTENCE_UNLOGGED: -#ifdef DEBUG_COMPARE_LOCAL - mdcreate(reln, forkNum, forkNum == INIT_FORKNUM || isRedo); - if (forkNum == MAIN_FORKNUM) - mdcreate(reln, INIT_FORKNUM, true); -#else - mdcreate(reln, forkNum, isRedo); -#endif + if (debug_compare_local) + { + mdcreate(reln, forkNum, forkNum == INIT_FORKNUM || isRedo); + if (forkNum == MAIN_FORKNUM) + mdcreate(reln, INIT_FORKNUM, true); + } + else + { + mdcreate(reln, forkNum, isRedo); + } return; default: @@ -820,38 +822,52 @@ neon_create(SMgrRelation reln, ForkNumber forkNum, bool isRedo) RelFileInfoFmt(InfoFromSMgrRel(reln)), forkNum); - /* - * Newly created relation is empty, remember that in the relsize cache. - * - * Note that in REDO, this is called to make sure the relation fork - * exists, but it does not truncate the relation. So, we can only update - * the relsize if it didn't exist before. - * - * Also, in redo, we must make sure to update the cached size of the - * relation, as that is the primary source of truth for REDO's file length - * considerations, and as file extension isn't (perfectly) logged, we need - * to take care of that before we hit file size checks. - * - * FIXME: This is currently not just an optimization, but required for - * correctness. Postgres can call smgrnblocks() on the newly-created - * relation. Currently, we don't call SetLastWrittenLSN() when a new - * relation created, so if we didn't remember the size in the relsize - * cache, we might call smgrnblocks() on the newly-created relation before - * the creation WAL record hass been received by the page server. - */ - if (isRedo) + if (neon_enable_new_communicator) { - update_cached_relsize(InfoFromSMgrRel(reln), forkNum, 0); - get_cached_relsize(InfoFromSMgrRel(reln), forkNum, - &reln->smgr_cached_nblocks[forkNum]); + if (isRedo) + { + if (!communicator_new_rel_exists(InfoFromSMgrRel(reln), forkNum)) + communicator_new_rel_create(InfoFromSMgrRel(reln), forkNum); + } + else + communicator_new_rel_create(InfoFromSMgrRel(reln), forkNum); } else - set_cached_relsize(InfoFromSMgrRel(reln), forkNum, 0); + { + /* + * Newly created relation is empty, remember that in the relsize cache. + * + * Note that in REDO, this is called to make sure the relation fork + * exists, but it does not truncate the relation. So, we can only update + * the relsize if it didn't exist before. + * + * Also, in redo, we must make sure to update the cached size of the + * relation, as that is the primary source of truth for REDO's file length + * considerations, and as file extension isn't (perfectly) logged, we need + * to take care of that before we hit file size checks. + * + * FIXME: This is currently not just an optimization, but required for + * correctness. Postgres can call smgrnblocks() on the newly-created + * relation. Currently, we don't call SetLastWrittenLSN() when a new + * relation created, so if we didn't remember the size in the relsize + * cache, we might call smgrnblocks() on the newly-created relation before + * the creation WAL record hass been received by the page server. + */ + if (isRedo) + { + update_cached_relsize(InfoFromSMgrRel(reln), forkNum, 0); + get_cached_relsize(InfoFromSMgrRel(reln), forkNum, + &reln->smgr_cached_nblocks[forkNum]); + } + else + set_cached_relsize(InfoFromSMgrRel(reln), forkNum, 0); + } -#ifdef DEBUG_COMPARE_LOCAL - if (IS_LOCAL_REL(reln)) - mdcreate(reln, forkNum, isRedo); -#endif + if (debug_compare_local) + { + if (IS_LOCAL_REL(reln)) + mdcreate(reln, forkNum, isRedo); + } } /* @@ -877,13 +893,19 @@ neon_unlink(NRelFileInfoBackend rinfo, ForkNumber forkNum, bool isRedo) { /* * Might or might not exist locally, depending on whether it's an unlogged - * or permanent relation (or if DEBUG_COMPARE_LOCAL is set). Try to + * or permanent relation (or if debug_compare_local is set). Try to * unlink, it won't do any harm if the file doesn't exist. */ mdunlink(rinfo, forkNum, isRedo); + if (!NRelFileInfoBackendIsTemp(rinfo)) { - forget_cached_relsize(InfoFromNInfoB(rinfo), forkNum); + if (neon_enable_new_communicator) + { + communicator_new_rel_unlink(InfoFromNInfoB(rinfo), forkNum); + } + else + forget_cached_relsize(InfoFromNInfoB(rinfo), forkNum); } } @@ -963,7 +985,6 @@ neon_extend(SMgrRelation reln, ForkNumber forkNum, BlockNumber blkno, neon_wallog_page(reln, forkNum, n_blocks++, buffer, true); neon_wallog_page(reln, forkNum, blkno, buffer, false); - set_cached_relsize(InfoFromSMgrRel(reln), forkNum, blkno + 1); lsn = PageGetLSN((Page) buffer); neon_log(SmgrTrace, "smgrextend called for %u/%u/%u.%u blk %u, page LSN: %X/%08X", @@ -971,34 +992,51 @@ neon_extend(SMgrRelation reln, ForkNumber forkNum, BlockNumber blkno, forkNum, blkno, (uint32) (lsn >> 32), (uint32) lsn); - lfc_write(InfoFromSMgrRel(reln), forkNum, blkno, buffer); - -#ifdef DEBUG_COMPARE_LOCAL - if (IS_LOCAL_REL(reln)) - mdextend(reln, forkNum, blkno, buffer, skipFsync); -#endif - - /* - * smgr_extend is often called with an all-zeroes page, so - * lsn==InvalidXLogRecPtr. An smgr_write() call will come for the buffer - * later, after it has been initialized with the real page contents, and - * it is eventually evicted from the buffer cache. But we need a valid LSN - * to the relation metadata update now. - */ - if (lsn == InvalidXLogRecPtr) + if (neon_enable_new_communicator) { - lsn = GetXLogInsertRecPtr(); - neon_set_lwlsn_block(lsn, InfoFromSMgrRel(reln), forkNum, blkno); + // FIXME: this can pass lsn == invalid. Is that ok? + communicator_new_rel_extend(InfoFromSMgrRel(reln), forkNum, blkno, (const void *) buffer, lsn); + + if (debug_compare_local) + { + if (IS_LOCAL_REL(reln)) + mdextend(reln, forkNum, blkno, buffer, skipFsync); + } + } + else + { + set_cached_relsize(InfoFromSMgrRel(reln), forkNum, blkno + 1); + lfc_write(InfoFromSMgrRel(reln), forkNum, blkno, buffer); + + if (debug_compare_local) + { + if (IS_LOCAL_REL(reln)) + mdextend(reln, forkNum, blkno, buffer, skipFsync); + } + + /* + * smgr_extend is often called with an all-zeroes page, so + * lsn==InvalidXLogRecPtr. An smgr_write() call will come for the buffer + * later, after it has been initialized with the real page contents, and + * it is eventually evicted from the buffer cache. But we need a valid LSN + * to the relation metadata update now. + */ + if (lsn == InvalidXLogRecPtr) + { + lsn = GetXLogInsertRecPtr(); + neon_set_lwlsn_block(lsn, InfoFromSMgrRel(reln), forkNum, blkno); + } + neon_set_lwlsn_relation(lsn, InfoFromSMgrRel(reln), forkNum); } - neon_set_lwlsn_relation(lsn, InfoFromSMgrRel(reln), forkNum); } #if PG_MAJORVERSION_NUM >= 16 static void -neon_zeroextend(SMgrRelation reln, ForkNumber forkNum, BlockNumber blocknum, +neon_zeroextend(SMgrRelation reln, ForkNumber forkNum, BlockNumber start_block, int nblocks, bool skipFsync) { const PGIOAlignedBlock buffer = {0}; + BlockNumber blocknum = start_block; int remblocks = nblocks; XLogRecPtr lsn = 0; @@ -1051,10 +1089,11 @@ neon_zeroextend(SMgrRelation reln, ForkNumber forkNum, BlockNumber blocknum, relpath(reln->smgr_rlocator, forkNum), InvalidBlockNumber))); -#ifdef DEBUG_COMPARE_LOCAL - if (IS_LOCAL_REL(reln)) - mdzeroextend(reln, forkNum, blocknum, nblocks, skipFsync); -#endif + if (debug_compare_local) + { + if (IS_LOCAL_REL(reln)) + mdzeroextend(reln, forkNum, blocknum, nblocks, skipFsync); + } /* Don't log any pages if we're not allowed to do so. */ if (!XLogInsertAllowed()) @@ -1080,11 +1119,14 @@ neon_zeroextend(SMgrRelation reln, ForkNumber forkNum, BlockNumber blocknum, lsn = XLogInsert(RM_XLOG_ID, XLOG_FPI); - for (int i = 0; i < count; i++) + if (!neon_enable_new_communicator) { - lfc_write(InfoFromSMgrRel(reln), forkNum, blocknum + i, buffer.data); - neon_set_lwlsn_block(lsn, InfoFromSMgrRel(reln), forkNum, - blocknum + i); + for (int i = 0; i < count; i++) + { + lfc_write(InfoFromSMgrRel(reln), forkNum, blocknum + i, buffer.data); + neon_set_lwlsn_block(lsn, InfoFromSMgrRel(reln), forkNum, + blocknum + i); + } } blocknum += count; @@ -1093,8 +1135,15 @@ neon_zeroextend(SMgrRelation reln, ForkNumber forkNum, BlockNumber blocknum, Assert(lsn != 0); - neon_set_lwlsn_relation(lsn, InfoFromSMgrRel(reln), forkNum); - set_cached_relsize(InfoFromSMgrRel(reln), forkNum, blocknum); + if (neon_enable_new_communicator) + { + communicator_new_rel_zeroextend(InfoFromSMgrRel(reln), forkNum, start_block, nblocks, lsn); + } + else + { + neon_set_lwlsn_relation(lsn, InfoFromSMgrRel(reln), forkNum); + set_cached_relsize(InfoFromSMgrRel(reln), forkNum, blocknum); + } } #endif @@ -1154,11 +1203,17 @@ neon_prefetch(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, neon_log(ERROR, "unknown relpersistence '%c'", reln->smgr_relpersistence); } + if (neon_enable_new_communicator) + { + communicator_new_prefetch_register_bufferv(InfoFromSMgrRel(reln), forknum, blocknum, nblocks); + return false; + } + tag.spcOid = reln->smgr_rlocator.locator.spcOid; tag.dbOid = reln->smgr_rlocator.locator.dbOid; tag.relNumber = reln->smgr_rlocator.locator.relNumber; tag.forkNum = forknum; - + while (nblocks > 0) { int iterblocks = Min(nblocks, PG_IOV_MAX); @@ -1180,7 +1235,8 @@ neon_prefetch(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, blocknum += iterblocks; } - communicator_prefetch_pump_state(); + if (!neon_enable_new_communicator) + communicator_prefetch_pump_state(); return false; } @@ -1193,8 +1249,6 @@ neon_prefetch(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, static bool neon_prefetch(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum) { - BufferTag tag; - switch (reln->smgr_relpersistence) { case 0: /* probably shouldn't happen, but ignore it */ @@ -1209,17 +1263,25 @@ neon_prefetch(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum) neon_log(ERROR, "unknown relpersistence '%c'", reln->smgr_relpersistence); } - if (lfc_cache_contains(InfoFromSMgrRel(reln), forknum, blocknum)) - return false; + if (neon_enable_new_communicator) + { + communicator_new_prefetch_register_bufferv(InfoFromSMgrRel(reln), forknum, blocknum, 1); + } + else + { + BufferTag tag; - tag.forkNum = forknum; - tag.blockNum = blocknum; + if (lfc_cache_contains(InfoFromSMgrRel(reln), forknum, blocknum)) + return false; - CopyNRelFileInfoToBufTag(tag, InfoFromSMgrRel(reln)); + tag.forkNum = forknum; + tag.blockNum = blocknum; - communicator_prefetch_register_bufferv(tag, NULL, 1, NULL); + CopyNRelFileInfoToBufTag(tag, InfoFromSMgrRel(reln)); + communicator_prefetch_register_bufferv(tag, NULL, 1, NULL); - communicator_prefetch_pump_state(); + communicator_prefetch_pump_state(); + } return false; } @@ -1263,12 +1325,14 @@ neon_writeback(SMgrRelation reln, ForkNumber forknum, */ neon_log(SmgrTrace, "writeback noop"); - communicator_prefetch_pump_state(); + if (!neon_enable_new_communicator) + communicator_prefetch_pump_state(); -#ifdef DEBUG_COMPARE_LOCAL - if (IS_LOCAL_REL(reln)) - mdwriteback(reln, forknum, blocknum, nblocks); -#endif + if (debug_compare_local) + { + if (IS_LOCAL_REL(reln)) + mdwriteback(reln, forknum, blocknum, nblocks); + } } /* @@ -1279,10 +1343,16 @@ void neon_read_at_lsn(NRelFileInfo rinfo, ForkNumber forkNum, BlockNumber blkno, neon_request_lsns request_lsns, void *buffer) { - communicator_read_at_lsnv(rinfo, forkNum, blkno, &request_lsns, &buffer, 1, NULL); + if (neon_enable_new_communicator) + { + // FIXME: request_lsns is ignored. That affects the neon_test_utils callers. + // Add the capability to specify the LSNs explicitly, for the sake of neon_test_utils ? + communicator_new_read_at_lsnv(rinfo, forkNum, blkno, &buffer, 1); + } + else + communicator_read_at_lsnv(rinfo, forkNum, blkno, &request_lsns, &buffer, 1, NULL); } -#ifdef DEBUG_COMPARE_LOCAL static void compare_with_local(SMgrRelation reln, ForkNumber forkNum, BlockNumber blkno, void* buffer, XLogRecPtr request_lsn) { @@ -1364,7 +1434,6 @@ compare_with_local(SMgrRelation reln, ForkNumber forkNum, BlockNumber blkno, voi } } } -#endif #if PG_MAJORVERSION_NUM < 17 @@ -1407,50 +1476,64 @@ neon_read(SMgrRelation reln, ForkNumber forkNum, BlockNumber blkno, void *buffer neon_log(ERROR, "unknown relpersistence '%c'", reln->smgr_relpersistence); } - /* Try to read PS results if they are available */ - communicator_prefetch_pump_state(); - - neon_get_request_lsns(InfoFromSMgrRel(reln), forkNum, blkno, &request_lsns, 1); - - present = 0; - bufferp = buffer; - if (communicator_prefetch_lookupv(InfoFromSMgrRel(reln), forkNum, blkno, &request_lsns, 1, &bufferp, &present)) + if (neon_enable_new_communicator) { - /* Prefetch hit */ -#ifdef DEBUG_COMPARE_LOCAL - compare_with_local(reln, forkNum, blkno, buffer, request_lsns.request_lsn); -#else - return; -#endif + communicator_new_read_at_lsnv(InfoFromSMgrRel(reln), forkNum, blkno, + (void *) &buffer, 1); + } + else + { + /* Try to read PS results if they are available */ + communicator_prefetch_pump_state(); + + neon_get_request_lsns(InfoFromSMgrRel(reln), forkNum, blkno, &request_lsns, 1); + + present = 0; + bufferp = buffer; + if (communicator_prefetch_lookupv(InfoFromSMgrRel(reln), forkNum, blkno, &request_lsns, 1, &bufferp, &present)) + { + /* Prefetch hit */ + if (debug_compare_local >= DEBUG_COMPARE_LOCAL_PREFETCH) + { + compare_with_local(reln, forkNum, blkno, buffer, request_lsns.request_lsn); + } + if (debug_compare_local <= DEBUG_COMPARE_LOCAL_PREFETCH) + { + return; + } + } + + /* Try to read from local file cache */ + if (lfc_read(InfoFromSMgrRel(reln), forkNum, blkno, buffer)) + { + MyNeonCounters->file_cache_hits_total++; + if (debug_compare_local >= DEBUG_COMPARE_LOCAL_LFC) + { + compare_with_local(reln, forkNum, blkno, buffer, request_lsns.request_lsn); + } + if (debug_compare_local <= DEBUG_COMPARE_LOCAL_LFC) + { + return; + } + } + + neon_read_at_lsn(InfoFromSMgrRel(reln), forkNum, blkno, request_lsns, buffer); + + /* + * Try to receive prefetch results once again just to make sure we don't leave the smgr code while the OS might still have buffered bytes. + */ + communicator_prefetch_pump_state(); } - /* Try to read from local file cache */ - if (lfc_read(InfoFromSMgrRel(reln), forkNum, blkno, buffer)) + if (debug_compare_local) { - MyNeonCounters->file_cache_hits_total++; -#ifdef DEBUG_COMPARE_LOCAL compare_with_local(reln, forkNum, blkno, buffer, request_lsns.request_lsn); -#else - return; -#endif } - - neon_read_at_lsn(InfoFromSMgrRel(reln), forkNum, blkno, request_lsns, buffer); - - /* - * Try to receive prefetch results once again just to make sure we don't leave the smgr code while the OS might still have buffered bytes. - */ - communicator_prefetch_pump_state(); - -#ifdef DEBUG_COMPARE_LOCAL - compare_with_local(reln, forkNum, blkno, buffer, request_lsns.request_lsn); -#endif } #endif /* PG_MAJORVERSION_NUM <= 16 */ #if PG_MAJORVERSION_NUM >= 17 -#ifdef DEBUG_COMPARE_LOCAL static void compare_with_localv(SMgrRelation reln, ForkNumber forkNum, BlockNumber blkno, void** buffers, BlockNumber nblocks, neon_request_lsns* request_lsns, bits8* read_pages) { @@ -1465,7 +1548,6 @@ compare_with_localv(SMgrRelation reln, ForkNumber forkNum, BlockNumber blkno, vo } } } -#endif static void @@ -1505,57 +1587,76 @@ neon_readv(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, nblocks, PG_IOV_MAX); /* Try to read PS results if they are available */ - communicator_prefetch_pump_state(); - - neon_get_request_lsns(InfoFromSMgrRel(reln), forknum, blocknum, - request_lsns, nblocks); + if (!neon_enable_new_communicator) + communicator_prefetch_pump_state(); memset(read_pages, 0, sizeof(read_pages)); - prefetch_result = communicator_prefetch_lookupv(InfoFromSMgrRel(reln), forknum, - blocknum, request_lsns, nblocks, - buffers, read_pages); + if (neon_enable_new_communicator) + { + communicator_new_read_at_lsnv(InfoFromSMgrRel(reln), forknum, blocknum, + buffers, nblocks); + } + else + { + neon_get_request_lsns(InfoFromSMgrRel(reln), forknum, blocknum, + request_lsns, nblocks); + + prefetch_result = communicator_prefetch_lookupv(InfoFromSMgrRel(reln), forknum, + blocknum, request_lsns, nblocks, + buffers, read_pages); -#ifdef DEBUG_COMPARE_LOCAL - compare_with_localv(reln, forknum, blocknum, buffers, nblocks, request_lsns, read_pages); - memset(read_pages, 0, sizeof(read_pages)); -#else - if (prefetch_result == nblocks) - return; -#endif + if (debug_compare_local >= DEBUG_COMPARE_LOCAL_PREFETCH) + { + compare_with_localv(reln, forknum, blocknum, buffers, nblocks, request_lsns, read_pages); + } + if (debug_compare_local <= DEBUG_COMPARE_LOCAL_PREFETCH && prefetch_result == nblocks) + { + return; + } + if (debug_compare_local > DEBUG_COMPARE_LOCAL_PREFETCH) + { + memset(read_pages, 0, sizeof(read_pages)); + } - /* Try to read from local file cache */ - lfc_result = lfc_readv_select(InfoFromSMgrRel(reln), forknum, blocknum, buffers, - nblocks, read_pages); + /* Try to read from local file cache */ + lfc_result = lfc_readv_select(InfoFromSMgrRel(reln), forknum, blocknum, buffers, + nblocks, read_pages); - if (lfc_result > 0) - MyNeonCounters->file_cache_hits_total += lfc_result; + if (lfc_result > 0) + MyNeonCounters->file_cache_hits_total += lfc_result; -#ifdef DEBUG_COMPARE_LOCAL - compare_with_localv(reln, forknum, blocknum, buffers, nblocks, request_lsns, read_pages); - memset(read_pages, 0, sizeof(read_pages)); -#else - /* Read all blocks from LFC, so we're done */ - if (prefetch_result + lfc_result == nblocks) - return; -#endif + if (debug_compare_local >= DEBUG_COMPARE_LOCAL_LFC) + { + compare_with_localv(reln, forknum, blocknum, buffers, nblocks, request_lsns, read_pages); + } + if (debug_compare_local <= DEBUG_COMPARE_LOCAL_LFC && prefetch_result + lfc_result == nblocks) + { + /* Read all blocks from LFC, so we're done */ + return; + } + if (debug_compare_local > DEBUG_COMPARE_LOCAL_LFC) + { + memset(read_pages, 0, sizeof(read_pages)); + } - communicator_read_at_lsnv(InfoFromSMgrRel(reln), forknum, blocknum, request_lsns, - buffers, nblocks, read_pages); + communicator_read_at_lsnv(InfoFromSMgrRel(reln), forknum, blocknum, request_lsns, + buffers, nblocks, read_pages); - /* - * Try to receive prefetch results once again just to make sure we don't leave the smgr code while the OS might still have buffered bytes. - */ - communicator_prefetch_pump_state(); + /* + * Try to receive prefetch results once again just to make sure we don't leave the smgr code while the OS might still have buffered bytes. + */ + communicator_prefetch_pump_state(); + } -#ifdef DEBUG_COMPARE_LOCAL - memset(read_pages, 0xFF, sizeof(read_pages)); - compare_with_localv(reln, forknum, blocknum, buffers, nblocks, request_lsns, read_pages); -#endif + if (debug_compare_local) + { + memset(read_pages, 0xFF, sizeof(read_pages)); + compare_with_localv(reln, forknum, blocknum, buffers, nblocks, request_lsns, read_pages); + } } #endif -#ifdef DEBUG_COMPARE_LOCAL static char * hexdump_page(char *page) { @@ -1574,7 +1675,6 @@ hexdump_page(char *page) return result.data; } -#endif #if PG_MAJORVERSION_NUM < 17 /* @@ -1596,12 +1696,8 @@ neon_write(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, const vo switch (reln->smgr_relpersistence) { case 0: -#ifndef DEBUG_COMPARE_LOCAL /* This is a bit tricky. Check if the relation exists locally */ - if (mdexists(reln, forknum)) -#else - if (mdexists(reln, INIT_FORKNUM)) -#endif + if (mdexists(reln, debug_compare_local ? INIT_FORKNUM : forknum)) { /* It exists locally. Guess it's unlogged then. */ #if PG_MAJORVERSION_NUM >= 17 @@ -1652,18 +1748,28 @@ neon_write(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, const vo forknum, blocknum, (uint32) (lsn >> 32), (uint32) lsn); - lfc_write(InfoFromSMgrRel(reln), forknum, blocknum, buffer); + if (neon_enable_new_communicator) + { + communicator_new_write_page(InfoFromSMgrRel(reln), forknum, blocknum, buffer, lsn); + } + else + { + lfc_write(InfoFromSMgrRel(reln), forknum, blocknum, buffer); - communicator_prefetch_pump_state(); + communicator_prefetch_pump_state(); + } -#ifdef DEBUG_COMPARE_LOCAL - if (IS_LOCAL_REL(reln)) + if (debug_compare_local) + { + if (IS_LOCAL_REL(reln)) + { #if PG_MAJORVERSION_NUM >= 17 - mdwritev(reln, forknum, blocknum, &buffer, 1, skipFsync); + mdwritev(reln, forknum, blocknum, &buffer, 1, skipFsync); #else - mdwrite(reln, forknum, blocknum, buffer, skipFsync); + mdwrite(reln, forknum, blocknum, buffer, skipFsync); #endif -#endif + } + } } #endif @@ -1677,12 +1783,8 @@ neon_writev(SMgrRelation reln, ForkNumber forknum, BlockNumber blkno, switch (reln->smgr_relpersistence) { case 0: -#ifndef DEBUG_COMPARE_LOCAL /* This is a bit tricky. Check if the relation exists locally */ - if (mdexists(reln, forknum)) -#else - if (mdexists(reln, INIT_FORKNUM)) -#endif + if (mdexists(reln, debug_compare_local ? INIT_FORKNUM : forknum)) { /* It exists locally. Guess it's unlogged then. */ mdwritev(reln, forknum, blkno, buffers, nblocks, skipFsync); @@ -1716,14 +1818,27 @@ neon_writev(SMgrRelation reln, ForkNumber forknum, BlockNumber blkno, neon_wallog_pagev(reln, forknum, blkno, nblocks, (const char **) buffers, false); - lfc_writev(InfoFromSMgrRel(reln), forknum, blkno, buffers, nblocks); + if (neon_enable_new_communicator) + { + for (int i = 0; i < nblocks; i++) + { + XLogRecPtr lsn = PageGetLSN((Page) buffers[i]); - communicator_prefetch_pump_state(); + communicator_new_write_page(InfoFromSMgrRel(reln), forknum, blkno + i, buffers[i], lsn); + } + } + else + { + lfc_writev(InfoFromSMgrRel(reln), forknum, blkno, buffers, nblocks); -#ifdef DEBUG_COMPARE_LOCAL - if (IS_LOCAL_REL(reln)) - mdwritev(reln, forknum, blkno, buffers, nblocks, skipFsync); -#endif + communicator_prefetch_pump_state(); + } + + if (debug_compare_local) + { + if (IS_LOCAL_REL(reln)) + mdwritev(reln, forknum, blkno, buffers, nblocks, skipFsync); + } } #endif @@ -1758,19 +1873,26 @@ neon_nblocks(SMgrRelation reln, ForkNumber forknum) neon_log(ERROR, "unknown relpersistence '%c'", reln->smgr_relpersistence); } - if (get_cached_relsize(InfoFromSMgrRel(reln), forknum, &n_blocks)) + if (neon_enable_new_communicator) { - neon_log(SmgrTrace, "cached nblocks for %u/%u/%u.%u: %u blocks", - RelFileInfoFmt(InfoFromSMgrRel(reln)), - forknum, n_blocks); - return n_blocks; + n_blocks = communicator_new_rel_nblocks(InfoFromSMgrRel(reln), forknum); } + else + { + if (get_cached_relsize(InfoFromSMgrRel(reln), forknum, &n_blocks)) + { + neon_log(SmgrTrace, "cached nblocks for %u/%u/%u.%u: %u blocks", + RelFileInfoFmt(InfoFromSMgrRel(reln)), + forknum, n_blocks); + return n_blocks; + } - neon_get_request_lsns(InfoFromSMgrRel(reln), forknum, - REL_METADATA_PSEUDO_BLOCKNO, &request_lsns, 1); + neon_get_request_lsns(InfoFromSMgrRel(reln), forknum, + REL_METADATA_PSEUDO_BLOCKNO, &request_lsns, 1); - n_blocks = communicator_nblocks(InfoFromSMgrRel(reln), forknum, &request_lsns); - update_cached_relsize(InfoFromSMgrRel(reln), forknum, n_blocks); + n_blocks = communicator_nblocks(InfoFromSMgrRel(reln), forknum, &request_lsns); + update_cached_relsize(InfoFromSMgrRel(reln), forknum, n_blocks); + } neon_log(SmgrTrace, "neon_nblocks: rel %u/%u/%u fork %u (request LSN %X/%08X): %u blocks", RelFileInfoFmt(InfoFromSMgrRel(reln)), @@ -1791,10 +1913,17 @@ neon_dbsize(Oid dbNode) neon_request_lsns request_lsns; NRelFileInfo dummy_node = {0}; - neon_get_request_lsns(dummy_node, MAIN_FORKNUM, - REL_METADATA_PSEUDO_BLOCKNO, &request_lsns, 1); + if (neon_enable_new_communicator) + { + db_size = communicator_new_dbsize(dbNode); + } + else + { + neon_get_request_lsns(dummy_node, MAIN_FORKNUM, + REL_METADATA_PSEUDO_BLOCKNO, &request_lsns, 1); - db_size = communicator_dbsize(dbNode, &request_lsns); + db_size = communicator_dbsize(dbNode, &request_lsns); + } neon_log(SmgrTrace, "neon_dbsize: db %u (request LSN %X/%08X): %ld bytes", dbNode, LSN_FORMAT_ARGS(request_lsns.effective_request_lsn), db_size); @@ -1808,8 +1937,6 @@ neon_dbsize(Oid dbNode) static void neon_truncate(SMgrRelation reln, ForkNumber forknum, BlockNumber old_blocks, BlockNumber nblocks) { - XLogRecPtr lsn; - switch (reln->smgr_relpersistence) { case 0: @@ -1833,39 +1960,49 @@ neon_truncate(SMgrRelation reln, ForkNumber forknum, BlockNumber old_blocks, Blo neon_log(ERROR, "unknown relpersistence '%c'", reln->smgr_relpersistence); } - set_cached_relsize(InfoFromSMgrRel(reln), forknum, nblocks); + if (neon_enable_new_communicator) + { + communicator_new_rel_truncate(InfoFromSMgrRel(reln), forknum, nblocks); + } + else + { + XLogRecPtr lsn; - /* - * Truncating a relation drops all its buffers from the buffer cache - * without calling smgrwrite() on them. But we must account for that in - * our tracking of last-written-LSN all the same: any future smgrnblocks() - * request must return the new size after the truncation. We don't know - * what the LSN of the truncation record was, so be conservative and use - * the most recently inserted WAL record's LSN. - */ - lsn = GetXLogInsertRecPtr(); - lsn = nm_adjust_lsn(lsn); + set_cached_relsize(InfoFromSMgrRel(reln), forknum, nblocks); - /* - * Flush it, too. We don't actually care about it here, but let's uphold - * the invariant that last-written LSN <= flush LSN. - */ - XLogFlush(lsn); + /* + * Truncating a relation drops all its buffers from the buffer cache + * without calling smgrwrite() on them. But we must account for that in + * our tracking of last-written-LSN all the same: any future smgrnblocks() + * request must return the new size after the truncation. We don't know + * what the LSN of the truncation record was, so be conservative and use + * the most recently inserted WAL record's LSN. + */ + lsn = GetXLogInsertRecPtr(); + lsn = nm_adjust_lsn(lsn); - /* - * Truncate may affect several chunks of relations. So we should either - * update last written LSN for all of them, or update LSN for "dummy" - * metadata block. Second approach seems more efficient. If the relation - * is extended again later, the extension will update the last-written LSN - * for the extended pages, so there's no harm in leaving behind obsolete - * entries for the truncated chunks. - */ - neon_set_lwlsn_relation(lsn, InfoFromSMgrRel(reln), forknum); + /* + * Flush it, too. We don't actually care about it here, but let's uphold + * the invariant that last-written LSN <= flush LSN. + */ + XLogFlush(lsn); -#ifdef DEBUG_COMPARE_LOCAL - if (IS_LOCAL_REL(reln)) - mdtruncate(reln, forknum, old_blocks, nblocks); -#endif + /* + * Truncate may affect several chunks of relations. So we should either + * update last written LSN for all of them, or update LSN for "dummy" + * metadata block. Second approach seems more efficient. If the relation + * is extended again later, the extension will update the last-written LSN + * for the extended pages, so there's no harm in leaving behind obsolete + * entries for the truncated chunks. + */ + neon_set_lwlsn_relation(lsn, InfoFromSMgrRel(reln), forknum); + } + + if (debug_compare_local) + { + if (IS_LOCAL_REL(reln)) + mdtruncate(reln, forknum, old_blocks, nblocks); + } } /* @@ -1902,12 +2039,14 @@ neon_immedsync(SMgrRelation reln, ForkNumber forknum) neon_log(SmgrTrace, "[NEON_SMGR] immedsync noop"); - communicator_prefetch_pump_state(); + if (!neon_enable_new_communicator) + communicator_prefetch_pump_state(); -#ifdef DEBUG_COMPARE_LOCAL - if (IS_LOCAL_REL(reln)) - mdimmedsync(reln, forknum); -#endif + if (debug_compare_local) + { + if (IS_LOCAL_REL(reln)) + mdimmedsync(reln, forknum); + } } #if PG_MAJORVERSION_NUM >= 17 @@ -1934,10 +2073,11 @@ neon_registersync(SMgrRelation reln, ForkNumber forknum) neon_log(SmgrTrace, "[NEON_SMGR] registersync noop"); -#ifdef DEBUG_COMPARE_LOCAL - if (IS_LOCAL_REL(reln)) - mdimmedsync(reln, forknum); -#endif + if (debug_compare_local) + { + if (IS_LOCAL_REL(reln)) + mdimmedsync(reln, forknum); + } } #endif @@ -1978,10 +2118,11 @@ neon_start_unlogged_build(SMgrRelation reln) case RELPERSISTENCE_UNLOGGED: unlogged_build_rel_info = InfoFromSMgrRel(reln); unlogged_build_phase = UNLOGGED_BUILD_NOT_PERMANENT; -#ifdef DEBUG_COMPARE_LOCAL - if (!IsParallelWorker()) - mdcreate(reln, INIT_FORKNUM, true); -#endif + if (debug_compare_local) + { + if (!IsParallelWorker()) + mdcreate(reln, INIT_FORKNUM, true); + } return; default: @@ -2009,11 +2150,7 @@ neon_start_unlogged_build(SMgrRelation reln) */ if (!IsParallelWorker()) { -#ifndef DEBUG_COMPARE_LOCAL - mdcreate(reln, MAIN_FORKNUM, false); -#else - mdcreate(reln, INIT_FORKNUM, true); -#endif + mdcreate(reln, debug_compare_local ? INIT_FORKNUM : MAIN_FORKNUM, false); } } @@ -2103,18 +2240,22 @@ neon_end_unlogged_build(SMgrRelation reln) RelFileInfoFmt(InfoFromNInfoB(rinfob)), forknum); - forget_cached_relsize(InfoFromNInfoB(rinfob), forknum); - lfc_invalidate(InfoFromNInfoB(rinfob), forknum, nblocks); + // FIXME: also do this with the new communicator + if (!neon_enable_new_communicator) + { + forget_cached_relsize(InfoFromNInfoB(rinfob), forknum); + lfc_invalidate(InfoFromNInfoB(rinfob), forknum, nblocks); + } mdclose(reln, forknum); -#ifndef DEBUG_COMPARE_LOCAL - /* use isRedo == true, so that we drop it immediately */ - mdunlink(rinfob, forknum, true); -#endif + if (!debug_compare_local) + { + /* use isRedo == true, so that we drop it immediately */ + mdunlink(rinfob, forknum, true); + } } -#ifdef DEBUG_COMPARE_LOCAL - mdunlink(rinfob, INIT_FORKNUM, true); -#endif + if (debug_compare_local) + mdunlink(rinfob, INIT_FORKNUM, true); } NRelFileInfoInvalidate(unlogged_build_rel_info); unlogged_build_phase = UNLOGGED_BUILD_NOT_IN_PROGRESS; @@ -2172,7 +2313,10 @@ neon_read_slru_segment(SMgrRelation reln, const char* path, int segno, void* buf request_lsns.not_modified_since = not_modified_since; request_lsns.effective_request_lsn = request_lsn; - n_blocks = communicator_read_slru_segment(kind, segno, &request_lsns, buffer); + if (neon_enable_new_communicator) + n_blocks = communicator_new_read_slru_segment(kind, segno, buffer); + else + n_blocks = communicator_read_slru_segment(kind, segno, &request_lsns, buffer); return n_blocks; } @@ -2209,7 +2353,8 @@ AtEOXact_neon(XactEvent event, void *arg) } break; } - communicator_reconfigure_timeout_if_needed(); + if (!neon_enable_new_communicator) + communicator_reconfigure_timeout_if_needed(); } static const struct f_smgr neon_smgr = @@ -2267,7 +2412,10 @@ smgr_init_neon(void) smgr_init_standard(); neon_init(); - communicator_init(); + if (neon_enable_new_communicator) + communicator_new_init(); + else + communicator_init(); } @@ -2279,6 +2427,20 @@ neon_extend_rel_size(NRelFileInfo rinfo, ForkNumber forknum, BlockNumber blkno, /* This is only used in WAL replay */ Assert(RecoveryInProgress()); + if (neon_enable_new_communicator) + { + relsize = communicator_new_rel_nblocks(rinfo, forknum); + + if (blkno >= relsize) + communicator_new_rel_zeroextend(rinfo, forknum, relsize, (blkno - relsize) + 1, end_recptr); + + /* + * FIXME: does this need to update the last-written LSN too, like the + * old implementation? + */ + return; + } + /* Extend the relation if we know its size */ if (get_cached_relsize(rinfo, forknum, &relsize)) { @@ -2444,7 +2606,10 @@ neon_redo_read_buffer_filter(XLogReaderState *record, uint8 block_id) * We should perform this check after assigning LwLSN to prevent * prefetching of some older version of the page by some other backend. */ - no_redo_needed = !lfc_cache_contains(rinfo, forknum, blkno); + if (neon_enable_new_communicator) + no_redo_needed = communicator_new_cache_contains(rinfo, forknum, blkno); + else + no_redo_needed = !lfc_cache_contains(rinfo, forknum, blkno); } LWLockRelease(partitionLock); diff --git a/pgxn/neon/relsize_cache.c b/pgxn/neon/relsize_cache.c index 60ca1675d9..4ea303f996 100644 --- a/pgxn/neon/relsize_cache.c +++ b/pgxn/neon/relsize_cache.c @@ -10,6 +10,7 @@ */ #include "postgres.h" +#include "neon.h" #include "neon_pgversioncompat.h" #include "pagestore_client.h" @@ -99,6 +100,8 @@ get_cached_relsize(NRelFileInfo rinfo, ForkNumber forknum, BlockNumber *size) { bool found = false; + Assert(!neon_enable_new_communicator); + if (relsize_hash_size > 0) { RelTag tag; @@ -130,6 +133,8 @@ get_cached_relsize(NRelFileInfo rinfo, ForkNumber forknum, BlockNumber *size) void set_cached_relsize(NRelFileInfo rinfo, ForkNumber forknum, BlockNumber size) { + Assert(!neon_enable_new_communicator); + if (relsize_hash_size > 0) { RelTag tag; @@ -178,6 +183,8 @@ set_cached_relsize(NRelFileInfo rinfo, ForkNumber forknum, BlockNumber size) void update_cached_relsize(NRelFileInfo rinfo, ForkNumber forknum, BlockNumber size) { + Assert(!neon_enable_new_communicator); + if (relsize_hash_size > 0) { RelTag tag; @@ -212,6 +219,8 @@ update_cached_relsize(NRelFileInfo rinfo, ForkNumber forknum, BlockNumber size) void forget_cached_relsize(NRelFileInfo rinfo, ForkNumber forknum) { + Assert(!neon_enable_new_communicator); + if (relsize_hash_size > 0) { RelTag tag; diff --git a/pgxn/neon/walproposer.c b/pgxn/neon/walproposer.c index 91d39345e2..ba6e4a54ff 100644 --- a/pgxn/neon/walproposer.c +++ b/pgxn/neon/walproposer.c @@ -1135,7 +1135,7 @@ VotesCollectedMset(WalProposer *wp, MemberSet *mset, Safekeeper **msk, StringInf wp->propTermStartLsn = sk->voteResponse.flushLsn; wp->donor = sk; } - wp->truncateLsn = Max(wp->safekeeper[i].voteResponse.truncateLsn, wp->truncateLsn); + wp->truncateLsn = Max(sk->voteResponse.truncateLsn, wp->truncateLsn); if (n_votes > 0) appendStringInfoString(s, ", "); diff --git a/pgxn/neon/walproposer.h b/pgxn/neon/walproposer.h index 08087e5a55..4b223b6b18 100644 --- a/pgxn/neon/walproposer.h +++ b/pgxn/neon/walproposer.h @@ -679,8 +679,7 @@ typedef struct walproposer_api * Finish sync safekeepers with the given LSN. This function should not * return and should exit the program. */ - void (*finish_sync_safekeepers) (WalProposer *wp, XLogRecPtr lsn); - + void (*finish_sync_safekeepers) (WalProposer *wp, XLogRecPtr lsn) __attribute__((noreturn)) ; /* * Called after every AppendResponse from the safekeeper. Used to * propagate backpressure feedback and to confirm WAL persistence (has diff --git a/pgxn/neon/walproposer_pg.c b/pgxn/neon/walproposer_pg.c index 3d6a92ad79..185fc83ace 100644 --- a/pgxn/neon/walproposer_pg.c +++ b/pgxn/neon/walproposer_pg.c @@ -1890,7 +1890,7 @@ walprop_pg_wait_event_set(WalProposer *wp, long timeout, Safekeeper **sk, uint32 return rc; } -static void +static void __attribute__((noreturn)) walprop_pg_finish_sync_safekeepers(WalProposer *wp, XLogRecPtr lsn) { fprintf(stdout, "%X/%X\n", LSN_FORMAT_ARGS(lsn)); diff --git a/poetry.lock b/poetry.lock index 21a2664555..1bc5077eb7 100644 --- a/poetry.lock +++ b/poetry.lock @@ -746,23 +746,23 @@ xray = ["mypy-boto3-xray (>=1.26.0,<1.27.0)"] [[package]] name = "botocore" -version = "1.34.11" +version = "1.34.162" description = "Low-level, data-driven core of boto 3." optional = false -python-versions = ">= 3.8" +python-versions = ">=3.8" groups = ["main"] files = [ - {file = "botocore-1.34.11-py3-none-any.whl", hash = "sha256:1ff1398b6ea670e1c01ac67a33af3da854f8e700d3528289c04f319c330d8250"}, - {file = "botocore-1.34.11.tar.gz", hash = "sha256:51905c3d623c60df5dc5794387de7caf886d350180a01a3dfa762e903edb45a9"}, + {file = "botocore-1.34.162-py3-none-any.whl", hash = "sha256:2d918b02db88d27a75b48275e6fb2506e9adaaddbec1ffa6a8a0898b34e769be"}, + {file = "botocore-1.34.162.tar.gz", hash = "sha256:adc23be4fb99ad31961236342b7cbf3c0bfc62532cd02852196032e8c0d682f3"}, ] [package.dependencies] jmespath = ">=0.7.1,<2.0.0" python-dateutil = ">=2.1,<3.0.0" -urllib3 = {version = ">=1.25.4,<2.1", markers = "python_version >= \"3.10\""} +urllib3 = {version = ">=1.25.4,<2.2.0 || >2.2.0,<3", markers = "python_version >= \"3.10\""} [package.extras] -crt = ["awscrt (==0.19.19)"] +crt = ["awscrt (==0.21.2)"] [[package]] name = "botocore-stubs" @@ -3051,19 +3051,19 @@ files = [ [[package]] name = "requests" -version = "2.32.3" +version = "2.32.4" description = "Python HTTP for Humans." optional = false python-versions = ">=3.8" groups = ["main"] files = [ - {file = "requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"}, - {file = "requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760"}, + {file = "requests-2.32.4-py3-none-any.whl", hash = "sha256:27babd3cda2a6d50b30443204ee89830707d396671944c998b5975b031ac2b2c"}, + {file = "requests-2.32.4.tar.gz", hash = "sha256:27d0316682c8a29834d3264820024b62a36942083d52caf2f14c0591336d3422"}, ] [package.dependencies] certifi = ">=2017.4.17" -charset-normalizer = ">=2,<4" +charset_normalizer = ">=2,<4" idna = ">=2.5,<4" urllib3 = ">=1.21.1,<3" @@ -3422,20 +3422,21 @@ files = [ [[package]] name = "urllib3" -version = "1.26.19" +version = "2.5.0" description = "HTTP library with thread-safe connection pooling, file post, and more." optional = false -python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7" +python-versions = ">=3.9" groups = ["main"] files = [ - {file = "urllib3-1.26.19-py2.py3-none-any.whl", hash = "sha256:37a0344459b199fce0e80b0d3569837ec6b6937435c5244e7fd73fa6006830f3"}, - {file = "urllib3-1.26.19.tar.gz", hash = "sha256:3e3d753a8618b86d7de333b4223005f68720bcd6a7d2bcb9fbd2229ec7c1e429"}, + {file = "urllib3-2.5.0-py3-none-any.whl", hash = "sha256:e6b01673c0fa6a13e374b50871808eb3bf7046c4b125b216f6bf1cc604cff0dc"}, + {file = "urllib3-2.5.0.tar.gz", hash = "sha256:3fc47733c7e419d4bc3f6b3dc2b4f890bb743906a30d56ba4a5bfa4bbff92760"}, ] [package.extras] -brotli = ["brotli (==1.0.9) ; os_name != \"nt\" and python_version < \"3\" and platform_python_implementation == \"CPython\"", "brotli (>=1.0.9) ; python_version >= \"3\" and platform_python_implementation == \"CPython\"", "brotlicffi (>=0.8.0) ; (os_name != \"nt\" or python_version >= \"3\") and platform_python_implementation != \"CPython\"", "brotlipy (>=0.6.0) ; os_name == \"nt\" and python_version < \"3\""] -secure = ["certifi", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "ipaddress ; python_version == \"2.7\"", "pyOpenSSL (>=0.14)", "urllib3-secure-extra"] -socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"] +brotli = ["brotli (>=1.0.9) ; platform_python_implementation == \"CPython\"", "brotlicffi (>=0.8.0) ; platform_python_implementation != \"CPython\""] +h2 = ["h2 (>=4,<5)"] +socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] +zstd = ["zstandard (>=0.18.0)"] [[package]] name = "websockets" @@ -3846,4 +3847,4 @@ cffi = ["cffi (>=1.11)"] [metadata] lock-version = "2.1" python-versions = "^3.11" -content-hash = "7ab1e7b975af34b3271b7c6018fa22a261d3f73c7c0a0403b6b2bb86b5fbd36e" +content-hash = "bd93313f110110aa53b24a3ed47ba2d7f60e2c658a79cdff7320fed1bb1b57b5" diff --git a/postgres.mk b/postgres.mk new file mode 100644 index 0000000000..eff882d1ff --- /dev/null +++ b/postgres.mk @@ -0,0 +1,121 @@ +# Sub-makefile for compiling PostgreSQL as part of Neon. This is +# included from the main Makefile, and is not meant to be called +# directly. +# +# CI workflows and Dockerfiles can take advantage of the following +# properties for caching: +# +# - Compiling the targets in this file only builds the PostgreSQL sources +# under the vendor/ subdirectory, nothing else from the repository. +# - All outputs go to POSTGRES_INSTALL_DIR (by default 'pg_install', +# see parent Makefile) +# - intermediate build artifacts go to BUILD_DIR +# +# +# Variables passed from the parent Makefile that control what gets +# installed and where: +# - POSTGRES_VERSIONS +# - POSTGRES_INSTALL_DIR +# - BUILD_DIR +# +# Variables passed from the parent Makefile that affect the build +# process and the resulting binaries: +# - PG_CONFIGURE_OPTS +# - PG_CFLAGS +# - PG_LDFLAGS +# - EXTRA_PATH_OVERRIDES + +### +### Main targets +### +### These are called from the main Makefile, and can also be called +### directly from command line + +# Compile and install a specific PostgreSQL version +postgres-install-%: postgres-configure-% \ + postgres-headers-install-% # to prevent `make install` conflicts with neon's `postgres-headers` + +# Install the PostgreSQL header files into $(POSTGRES_INSTALL_DIR)//include +# +# This is implicitly part of the 'postgres-install-%' target, but this can be handy +# if you want to install just the headers without building PostgreSQL, e.g. for building +# extensions. +postgres-headers-install-%: postgres-configure-% + +@echo "Installing PostgreSQL $* headers" + $(MAKE) -C $(BUILD_DIR)/$*/src/include MAKELEVEL=0 install + +# Run Postgres regression tests +postgres-check-%: postgres-install-% + $(MAKE) -C $(BUILD_DIR)/$* MAKELEVEL=0 check + +### +### Shorthands for the main targets, for convenience +### + +# Same as the above main targets, but for all supported PostgreSQL versions +# For example, 'make postgres-install' is equivalent to +# 'make postgres-install-v14 postgres-install-v15 postgres-install-v16 postgres-install-v17' +all_version_targets=postgres-install postgres-headers-install postgres-check +.PHONY: $(all_version_targets) +$(all_version_targets): postgres-%: $(foreach pg_version,$(POSTGRES_VERSIONS),postgres-%-$(pg_version)) + +.PHONY: postgres +postgres: postgres-install + +.PHONY: postgres-headers +postgres-headers: postgres-headers-install + +# 'postgres-v17' is an alias for 'postgres-install-v17' etc. +$(foreach pg_version,$(POSTGRES_VERSIONS),postgres-$(pg_version)): postgres-%: postgres-install-% + +### +### Intermediate targets +### +### These are not intended to be called directly, but are dependencies for the +### main targets. + +# Run 'configure' +$(BUILD_DIR)/%/config.status: + mkdir -p $(BUILD_DIR) + test -e $(BUILD_DIR)/CACHEDIR.TAG || echo "$(CACHEDIR_TAG_CONTENTS)" > $(BUILD_DIR)/CACHEDIR.TAG + + +@echo "Configuring Postgres $* build" + @test -s $(ROOT_PROJECT_DIR)/vendor/postgres-$*/configure || { \ + echo "\nPostgres submodule not found in $(ROOT_PROJECT_DIR)/vendor/postgres-$*/, execute "; \ + echo "'git submodule update --init --recursive --depth 2 --progress .' in project root.\n"; \ + exit 1; } + mkdir -p $(BUILD_DIR)/$* + + VERSION=$*; \ + EXTRA_VERSION=$$(cd $(ROOT_PROJECT_DIR)/vendor/postgres-$$VERSION && git rev-parse HEAD); \ + (cd $(BUILD_DIR)/$$VERSION && \ + env PATH="$(EXTRA_PATH_OVERRIDES):$$PATH" $(ROOT_PROJECT_DIR)/vendor/postgres-$$VERSION/configure \ + CFLAGS='$(PG_CFLAGS)' LDFLAGS='$(PG_LDFLAGS)' \ + $(PG_CONFIGURE_OPTS) --with-extra-version=" ($$EXTRA_VERSION)" \ + --prefix=$(abspath $(POSTGRES_INSTALL_DIR))/$$VERSION > configure.log) + +# nicer alias to run 'configure'. +# +# This tries to accomplish this rule: +# +# postgres-configure-%: $(BUILD_DIR)/%/config.status +# +# XXX: I'm not sure why the above rule doesn't work directly. But this accomplishses +# the same thing +$(foreach pg_version,$(POSTGRES_VERSIONS),postgres-configure-$(pg_version)): postgres-configure-%: FORCE $(BUILD_DIR)/%/config.status + +# Compile and install PostgreSQL (and a few contrib modules used in tests) +postgres-install-%: postgres-configure-% \ + postgres-headers-install-% # to prevent `make install` conflicts with neon's `postgres-headers-install` + +@echo "Compiling PostgreSQL $*" + $(MAKE) -C $(BUILD_DIR)/$* MAKELEVEL=0 install + $(MAKE) -C $(BUILD_DIR)/$*/contrib/pg_prewarm install + $(MAKE) -C $(BUILD_DIR)/$*/contrib/pg_buffercache install + $(MAKE) -C $(BUILD_DIR)/$*/contrib/pg_visibility install + $(MAKE) -C $(BUILD_DIR)/$*/contrib/pageinspect install + $(MAKE) -C $(BUILD_DIR)/$*/contrib/pg_trgm install + $(MAKE) -C $(BUILD_DIR)/$*/contrib/amcheck install + $(MAKE) -C $(BUILD_DIR)/$*/contrib/test_decoding install + +.PHONY: FORCE +FORCE: diff --git a/proxy/README.md b/proxy/README.md index 583db36f28..e10ff3d710 100644 --- a/proxy/README.md +++ b/proxy/README.md @@ -138,3 +138,62 @@ Now from client you can start a new session: ```sh PGSSLROOTCERT=./server.crt psql "postgresql://proxy:password@endpoint.local.neon.build:4432/postgres?sslmode=verify-full" ``` + +## auth broker setup: + +Create a postgres instance: +```sh +docker run \ + --detach \ + --name proxy-postgres \ + --env POSTGRES_HOST_AUTH_METHOD=trust \ + --env POSTGRES_USER=authenticated \ + --env POSTGRES_DB=database \ + --publish 5432:5432 \ + postgres:17-bookworm +``` + +Create a configuration file called `local_proxy.json` in the root of the repo (used also by the auth broker to validate JWTs) +```sh +{ + "jwks": [ + { + "id": "1", + "role_names": ["authenticator", "authenticated", "anon"], + "jwks_url": "https://climbing-minnow-11.clerk.accounts.dev/.well-known/jwks.json", + "provider_name": "foo", + "jwt_audience": null + } + ] +} +``` + +Start the local proxy: +```sh +cargo run --bin local_proxy -- \ + --disable_pg_session_jwt true \ + --http 0.0.0.0:7432 +``` + +Start the auth broker: +```sh +LOGFMT=text OTEL_SDK_DISABLED=true cargo run --bin proxy --features testing -- \ + -c server.crt -k server.key \ + --is-auth-broker true \ + --wss 0.0.0.0:8080 \ + --http 0.0.0.0:7002 \ + --auth-backend local +``` + +Create a JWT in your auth provider (e.g. Clerk) and set it in the `NEON_JWT` environment variable. +```sh +export NEON_JWT="..." +``` + +Run a query against the auth broker: +```sh +curl -k "https://foo.local.neon.build:8080/sql" \ + -H "Authorization: Bearer $NEON_JWT" \ + -H "neon-connection-string: postgresql://authenticator@foo.local.neon.build/database" \ + -d '{"query":"select 1","params":[]}' +``` diff --git a/proxy/src/auth/backend/classic.rs b/proxy/src/auth/backend/classic.rs index 8445368740..f35b3ecc05 100644 --- a/proxy/src/auth/backend/classic.rs +++ b/proxy/src/auth/backend/classic.rs @@ -18,11 +18,6 @@ pub(super) async fn authenticate( secret: AuthSecret, ) -> auth::Result { let scram_keys = match secret { - #[cfg(any(test, feature = "testing"))] - AuthSecret::Md5(_) => { - debug!("auth endpoint chooses MD5"); - return Err(auth::AuthError::MalformedPassword("MD5 not supported")); - } AuthSecret::Scram(secret) => { debug!("auth endpoint chooses SCRAM"); diff --git a/proxy/src/auth/backend/console_redirect.rs b/proxy/src/auth/backend/console_redirect.rs index c388848926..f561df9202 100644 --- a/proxy/src/auth/backend/console_redirect.rs +++ b/proxy/src/auth/backend/console_redirect.rs @@ -6,18 +6,17 @@ use thiserror::Error; use tokio::io::{AsyncRead, AsyncWrite}; use tracing::{info, info_span}; -use super::ComputeCredentialKeys; -use crate::auth::IpPattern; use crate::auth::backend::ComputeUserInfo; use crate::cache::Cached; +use crate::compute::AuthInfo; use crate::config::AuthenticationConfig; use crate::context::RequestContext; use crate::control_plane::client::cplane_proxy_v1; use crate::control_plane::{self, CachedNodeInfo, NodeInfo}; use crate::error::{ReportableError, UserFacingError}; -use crate::pglb::connect_compute::ComputeConnectBackend; use crate::pqproto::BeMessage; use crate::proxy::NeonOptions; +use crate::proxy::wake_compute::WakeComputeBackend; use crate::stream::PqStream; use crate::types::RoleName; use crate::{auth, compute, waiters}; @@ -98,15 +97,11 @@ impl ConsoleRedirectBackend { ctx: &RequestContext, auth_config: &'static AuthenticationConfig, client: &mut PqStream, - ) -> auth::Result<( - ConsoleRedirectNodeInfo, - ComputeUserInfo, - Option>, - )> { + ) -> auth::Result<(ConsoleRedirectNodeInfo, AuthInfo, ComputeUserInfo)> { authenticate(ctx, auth_config, &self.console_uri, client) .await - .map(|(node_info, user_info, ip_allowlist)| { - (ConsoleRedirectNodeInfo(node_info), user_info, ip_allowlist) + .map(|(node_info, auth_info, user_info)| { + (ConsoleRedirectNodeInfo(node_info), auth_info, user_info) }) } } @@ -114,17 +109,13 @@ impl ConsoleRedirectBackend { pub struct ConsoleRedirectNodeInfo(pub(super) NodeInfo); #[async_trait] -impl ComputeConnectBackend for ConsoleRedirectNodeInfo { +impl WakeComputeBackend for ConsoleRedirectNodeInfo { async fn wake_compute( &self, _ctx: &RequestContext, ) -> Result { Ok(Cached::new_uncached(self.0.clone())) } - - fn get_keys(&self) -> &ComputeCredentialKeys { - &ComputeCredentialKeys::None - } } async fn authenticate( @@ -132,7 +123,7 @@ async fn authenticate( auth_config: &'static AuthenticationConfig, link_uri: &reqwest::Url, client: &mut PqStream, -) -> auth::Result<(NodeInfo, ComputeUserInfo, Option>)> { +) -> auth::Result<(NodeInfo, AuthInfo, ComputeUserInfo)> { ctx.set_auth_method(crate::context::AuthMethod::ConsoleRedirect); // registering waiter can fail if we get unlucky with rng. @@ -173,29 +164,42 @@ async fn authenticate( })? .map_err(ConsoleRedirectError::from)?; - if auth_config.ip_allowlist_check_enabled { - if let Some(allowed_ips) = &db_info.allowed_ips { - if !auth::check_peer_addr_is_in_list(&ctx.peer_addr(), allowed_ips) { - return Err(auth::AuthError::ip_address_not_allowed(ctx.peer_addr())); - } - } + if auth_config.ip_allowlist_check_enabled + && let Some(allowed_ips) = &db_info.allowed_ips + && !auth::check_peer_addr_is_in_list(&ctx.peer_addr(), allowed_ips) + { + return Err(auth::AuthError::ip_address_not_allowed(ctx.peer_addr())); } // Check if the access over the public internet is allowed, otherwise block. Note that // the console redirect is not behind the VPC service endpoint, so we don't need to check // the VPC endpoint ID. - if let Some(public_access_allowed) = db_info.public_access_allowed { - if !public_access_allowed { - return Err(auth::AuthError::NetworkNotAllowed); - } + if let Some(public_access_allowed) = db_info.public_access_allowed + && !public_access_allowed + { + return Err(auth::AuthError::NetworkNotAllowed); } client.write_message(BeMessage::NoticeResponse("Connecting to database.")); - // This config should be self-contained, because we won't - // take username or dbname from client's startup message. - let mut config = compute::ConnCfg::new(db_info.host.to_string(), db_info.port); - config.dbname(&db_info.dbname).user(&db_info.user); + // Backwards compatibility. pg_sni_proxy uses "--" in domain names + // while direct connections do not. Once we migrate to pg_sni_proxy + // everywhere, we can remove this. + let ssl_mode = if db_info.host.contains("--") { + // we need TLS connection with SNI info to properly route it + SslMode::Require + } else { + SslMode::Disable + }; + + let conn_info = compute::ConnectInfo { + host: db_info.host.into(), + port: db_info.port, + ssl_mode, + host_addr: None, + }; + let auth_info = + AuthInfo::for_console_redirect(&db_info.dbname, &db_info.user, db_info.password.as_deref()); let user: RoleName = db_info.user.into(); let user_info = ComputeUserInfo { @@ -209,26 +213,12 @@ async fn authenticate( ctx.set_project(db_info.aux.clone()); info!("woken up a compute node"); - // Backwards compatibility. pg_sni_proxy uses "--" in domain names - // while direct connections do not. Once we migrate to pg_sni_proxy - // everywhere, we can remove this. - if db_info.host.contains("--") { - // we need TLS connection with SNI info to properly route it - config.ssl_mode(SslMode::Require); - } else { - config.ssl_mode(SslMode::Disable); - } - - if let Some(password) = db_info.password { - config.password(password.as_ref()); - } - Ok(( NodeInfo { - config, + conn_info, aux: db_info.aux, }, + auth_info, user_info, - db_info.allowed_ips, )) } diff --git a/proxy/src/auth/backend/jwt.rs b/proxy/src/auth/backend/jwt.rs index a48f67199a..a716890a00 100644 --- a/proxy/src/auth/backend/jwt.rs +++ b/proxy/src/auth/backend/jwt.rs @@ -4,6 +4,8 @@ use std::sync::Arc; use std::time::{Duration, SystemTime}; use arc_swap::ArcSwapOption; +use base64::Engine as _; +use base64::prelude::BASE64_URL_SAFE_NO_PAD; use clashmap::ClashMap; use jose_jwk::crypto::KeyInfo; use reqwest::{Client, redirect}; @@ -347,17 +349,17 @@ impl JwkCacheEntryLock { .split_once('.') .ok_or(JwtEncodingError::InvalidCompactForm)?; - let header = base64::decode_config(header, base64::URL_SAFE_NO_PAD)?; + let header = BASE64_URL_SAFE_NO_PAD.decode(header)?; let header = serde_json::from_slice::>(&header)?; - let payloadb = base64::decode_config(payload, base64::URL_SAFE_NO_PAD)?; + let payloadb = BASE64_URL_SAFE_NO_PAD.decode(payload)?; let payload = serde_json::from_slice::>(&payloadb)?; if let Some(iss) = &payload.issuer { ctx.set_jwt_issuer(iss.as_ref().to_owned()); } - let sig = base64::decode_config(signature, base64::URL_SAFE_NO_PAD)?; + let sig = BASE64_URL_SAFE_NO_PAD.decode(signature)?; let kid = header.key_id.ok_or(JwtError::MissingKeyId)?; @@ -397,36 +399,36 @@ impl JwkCacheEntryLock { tracing::debug!(?payload, "JWT signature valid with claims"); - if let Some(aud) = expected_audience { - if payload.audience.0.iter().all(|s| s != aud) { - return Err(JwtError::InvalidClaims( - JwtClaimsError::InvalidJwtTokenAudience, - )); - } + if let Some(aud) = expected_audience + && payload.audience.0.iter().all(|s| s != aud) + { + return Err(JwtError::InvalidClaims( + JwtClaimsError::InvalidJwtTokenAudience, + )); } let now = SystemTime::now(); - if let Some(exp) = payload.expiration { - if now >= exp + CLOCK_SKEW_LEEWAY { - return Err(JwtError::InvalidClaims(JwtClaimsError::JwtTokenHasExpired( - exp.duration_since(SystemTime::UNIX_EPOCH) - .unwrap_or_default() - .as_secs(), - ))); - } + if let Some(exp) = payload.expiration + && now >= exp + CLOCK_SKEW_LEEWAY + { + return Err(JwtError::InvalidClaims(JwtClaimsError::JwtTokenHasExpired( + exp.duration_since(SystemTime::UNIX_EPOCH) + .unwrap_or_default() + .as_secs(), + ))); } - if let Some(nbf) = payload.not_before { - if nbf >= now + CLOCK_SKEW_LEEWAY { - return Err(JwtError::InvalidClaims( - JwtClaimsError::JwtTokenNotYetReadyToUse( - nbf.duration_since(SystemTime::UNIX_EPOCH) - .unwrap_or_default() - .as_secs(), - ), - )); - } + if let Some(nbf) = payload.not_before + && nbf >= now + CLOCK_SKEW_LEEWAY + { + return Err(JwtError::InvalidClaims( + JwtClaimsError::JwtTokenNotYetReadyToUse( + nbf.duration_since(SystemTime::UNIX_EPOCH) + .unwrap_or_default() + .as_secs(), + ), + )); } Ok(ComputeCredentialKeys::JwtPayload(payloadb)) @@ -796,7 +798,6 @@ mod tests { use std::net::SocketAddr; use std::time::SystemTime; - use base64::URL_SAFE_NO_PAD; use bytes::Bytes; use http::Response; use http_body_util::Full; @@ -871,9 +872,8 @@ mod tests { key_id: Some(Cow::Owned(kid)), }; - let header = - base64::encode_config(serde_json::to_string(&header).unwrap(), URL_SAFE_NO_PAD); - let body = base64::encode_config(serde_json::to_string(&body).unwrap(), URL_SAFE_NO_PAD); + let header = BASE64_URL_SAFE_NO_PAD.encode(serde_json::to_string(&header).unwrap()); + let body = BASE64_URL_SAFE_NO_PAD.encode(serde_json::to_string(&body).unwrap()); format!("{header}.{body}") } @@ -883,7 +883,7 @@ mod tests { let payload = build_jwt_payload(kid, jose_jwa::Signing::Es256); let sig: Signature = SigningKey::from(key).sign(payload.as_bytes()); - let sig = base64::encode_config(sig.to_bytes(), URL_SAFE_NO_PAD); + let sig = BASE64_URL_SAFE_NO_PAD.encode(sig.to_bytes()); format!("{payload}.{sig}") } @@ -893,7 +893,7 @@ mod tests { let payload = build_custom_jwt_payload(kid, body, jose_jwa::Signing::Es256); let sig: Signature = SigningKey::from(key).sign(payload.as_bytes()); - let sig = base64::encode_config(sig.to_bytes(), URL_SAFE_NO_PAD); + let sig = BASE64_URL_SAFE_NO_PAD.encode(sig.to_bytes()); format!("{payload}.{sig}") } @@ -904,7 +904,7 @@ mod tests { let payload = build_jwt_payload(kid, jose_jwa::Signing::Rs256); let sig = SigningKey::::new(key).sign(payload.as_bytes()); - let sig = base64::encode_config(sig.to_bytes(), URL_SAFE_NO_PAD); + let sig = BASE64_URL_SAFE_NO_PAD.encode(sig.to_bytes()); format!("{payload}.{sig}") } diff --git a/proxy/src/auth/backend/local.rs b/proxy/src/auth/backend/local.rs index 7a6dceb194..2224f492b8 100644 --- a/proxy/src/auth/backend/local.rs +++ b/proxy/src/auth/backend/local.rs @@ -1,11 +1,12 @@ use std::net::SocketAddr; use arc_swap::ArcSwapOption; +use postgres_client::config::SslMode; use tokio::sync::Semaphore; use super::jwt::{AuthRule, FetchAuthRules}; use crate::auth::backend::jwt::FetchAuthRulesError; -use crate::compute::ConnCfg; +use crate::compute::ConnectInfo; use crate::compute_ctl::ComputeCtlApi; use crate::context::RequestContext; use crate::control_plane::NodeInfo; @@ -29,7 +30,12 @@ impl LocalBackend { api: http::Endpoint::new(compute_ctl, http::new_client()), }, node_info: NodeInfo { - config: ConnCfg::new(postgres_addr.ip().to_string(), postgres_addr.port()), + conn_info: ConnectInfo { + host_addr: Some(postgres_addr.ip()), + host: postgres_addr.ip().to_string().into(), + port: postgres_addr.port(), + ssl_mode: SslMode::Disable, + }, // TODO(conrad): make this better reflect compute info rather than endpoint info. aux: MetricsAuxInfo { endpoint_id: EndpointIdTag::get_interner().get_or_intern("local"), diff --git a/proxy/src/auth/backend/mod.rs b/proxy/src/auth/backend/mod.rs index f978f655c4..e7805d8bfe 100644 --- a/proxy/src/auth/backend/mod.rs +++ b/proxy/src/auth/backend/mod.rs @@ -14,20 +14,21 @@ use serde::{Deserialize, Serialize}; use tokio::io::{AsyncRead, AsyncWrite}; use tracing::{debug, info}; -use crate::auth::{self, AuthError, ComputeUserInfoMaybeEndpoint, validate_password_and_exchange}; +use crate::auth::{self, ComputeUserInfoMaybeEndpoint, validate_password_and_exchange}; use crate::cache::Cached; use crate::config::AuthenticationConfig; use crate::context::RequestContext; use crate::control_plane::client::ControlPlaneClient; use crate::control_plane::errors::GetAuthInfoError; +use crate::control_plane::messages::EndpointRateLimitConfig; use crate::control_plane::{ self, AccessBlockerFlags, AuthSecret, CachedNodeInfo, ControlPlaneApi, EndpointAccessControl, RoleAccessControl, }; use crate::intern::EndpointIdInt; -use crate::pglb::connect_compute::ComputeConnectBackend; use crate::pqproto::BeMessage; use crate::proxy::NeonOptions; +use crate::proxy::wake_compute::WakeComputeBackend; use crate::rate_limiter::EndpointRateLimiter; use crate::stream::Stream; use crate::types::{EndpointCacheKey, EndpointId, RoleName}; @@ -168,11 +169,8 @@ impl ComputeUserInfo { #[cfg_attr(test, derive(Debug))] pub(crate) enum ComputeCredentialKeys { - #[cfg(any(test, feature = "testing"))] - Password(Vec), AuthKeys(AuthKeys), JwtPayload(Vec), - None, } impl TryFrom for ComputeUserInfo { @@ -232,11 +230,8 @@ async fn auth_quirks( config.is_vpc_acccess_proxy, )?; - let endpoint = EndpointIdInt::from(&info.endpoint); - let rate_limit_config = None; - if !endpoint_rate_limiter.check(endpoint, rate_limit_config, 1) { - return Err(AuthError::too_many_connections()); - } + access_controls.connection_attempt_rate_limit(ctx, &info.endpoint, &endpoint_rate_limiter)?; + let role_access = api .get_role_access_control(ctx, &info.endpoint, &info.user) .await?; @@ -350,15 +345,13 @@ impl<'a> Backend<'a, ComputeUserInfoMaybeEndpoint> { Err(e) => { // The password could have been changed, so we invalidate the cache. // We should only invalidate the cache if the TTL might have expired. - if e.is_password_failed() { - #[allow(irrefutable_let_patterns)] - if let ControlPlaneClient::ProxyV1(api) = &*api { - if let Some(ep) = &user_info.endpoint_id { - api.caches - .project_info - .maybe_invalidate_role_secret(ep, &user_info.user); - } - } + if e.is_password_failed() + && let ControlPlaneClient::ProxyV1(api) = &*api + && let Some(ep) = &user_info.endpoint_id + { + api.caches + .project_info + .maybe_invalidate_role_secret(ep, &user_info.user); } Err(e) @@ -403,29 +396,23 @@ impl Backend<'_, ComputeUserInfo> { allowed_ips: Arc::new(vec![]), allowed_vpce: Arc::new(vec![]), flags: AccessBlockerFlags::default(), + rate_limits: EndpointRateLimitConfig::default(), }), } } } #[async_trait::async_trait] -impl ComputeConnectBackend for Backend<'_, ComputeCredentials> { +impl WakeComputeBackend for Backend<'_, ComputeUserInfo> { async fn wake_compute( &self, ctx: &RequestContext, ) -> Result { match self { - Self::ControlPlane(api, creds) => api.wake_compute(ctx, &creds.info).await, + Self::ControlPlane(api, info) => api.wake_compute(ctx, info).await, Self::Local(local) => Ok(Cached::new_uncached(local.node_info.clone())), } } - - fn get_keys(&self) -> &ComputeCredentialKeys { - match self { - Self::ControlPlane(_, creds) => &creds.keys, - Self::Local(_) => &ComputeCredentialKeys::None, - } - } } #[cfg(test)] @@ -448,6 +435,7 @@ mod tests { use crate::auth::{ComputeUserInfoMaybeEndpoint, IpPattern}; use crate::config::AuthenticationConfig; use crate::context::RequestContext; + use crate::control_plane::messages::EndpointRateLimitConfig; use crate::control_plane::{ self, AccessBlockerFlags, CachedNodeInfo, EndpointAccessControl, RoleAccessControl, }; @@ -486,6 +474,7 @@ mod tests { allowed_ips: Arc::new(self.ips.clone()), allowed_vpce: Arc::new(self.vpc_endpoint_ids.clone()), flags: self.access_blocker_flags, + rate_limits: EndpointRateLimitConfig::default(), }) } diff --git a/proxy/src/auth/flow.rs b/proxy/src/auth/flow.rs index 8fbc4577e9..c825d5bf4b 100644 --- a/proxy/src/auth/flow.rs +++ b/proxy/src/auth/flow.rs @@ -169,13 +169,6 @@ pub(crate) async fn validate_password_and_exchange( secret: AuthSecret, ) -> super::Result> { match secret { - #[cfg(any(test, feature = "testing"))] - AuthSecret::Md5(_) => { - // test only - Ok(sasl::Outcome::Success(ComputeCredentialKeys::Password( - password.to_owned(), - ))) - } // perform scram authentication as both client and server to validate the keys AuthSecret::Scram(scram_secret) => { let outcome = crate::scram::exchange(pool, endpoint, &scram_secret, password).await?; diff --git a/proxy/src/batch.rs b/proxy/src/batch.rs new file mode 100644 index 0000000000..33e08797f2 --- /dev/null +++ b/proxy/src/batch.rs @@ -0,0 +1,180 @@ +//! Batch processing system based on intrusive linked lists. +//! +//! Enqueuing a batch job requires no allocations, with +//! direct support for cancelling jobs early. +use std::collections::BTreeMap; +use std::pin::pin; +use std::sync::Mutex; + +use scopeguard::ScopeGuard; +use tokio::sync::oneshot::error::TryRecvError; + +use crate::ext::LockExt; + +pub trait QueueProcessing: Send + 'static { + type Req: Send + 'static; + type Res: Send; + + /// Get the desired batch size. + fn batch_size(&self, queue_size: usize) -> usize; + + /// This applies a full batch of events. + /// Must respond with a full batch of replies. + /// + /// If this apply can error, it's expected that errors be forwarded to each Self::Res. + /// + /// Batching does not need to happen atomically. + fn apply(&mut self, req: Vec) -> impl Future> + Send; +} + +pub struct BatchQueue { + processor: tokio::sync::Mutex

, + inner: Mutex>, +} + +struct BatchJob { + req: P::Req, + res: tokio::sync::oneshot::Sender, +} + +impl BatchQueue

{ + pub fn new(p: P) -> Self { + Self { + processor: tokio::sync::Mutex::new(p), + inner: Mutex::new(BatchQueueInner { + version: 0, + queue: BTreeMap::new(), + }), + } + } + + /// Perform a single request-response process, this may be batched internally. + /// + /// This function is not cancel safe. + pub async fn call( + &self, + req: P::Req, + cancelled: impl Future, + ) -> Result { + let (id, mut rx) = self.inner.lock_propagate_poison().register_job(req); + + let mut cancelled = pin!(cancelled); + let resp = loop { + // try become the leader, or try wait for success. + let mut processor = tokio::select! { + // try become leader. + p = self.processor.lock() => p, + // wait for success. + resp = &mut rx => break resp.ok(), + // wait for cancellation. + cancel = cancelled.as_mut() => { + let mut inner = self.inner.lock_propagate_poison(); + if inner.queue.remove(&id).is_some() { + tracing::warn!("batched task cancelled before completion"); + } + return Err(cancel); + }, + }; + + tracing::debug!(id, "batch: became leader"); + let (reqs, resps) = self.inner.lock_propagate_poison().get_batch(&processor); + + // snitch incase the task gets cancelled. + let cancel_safety = scopeguard::guard((), |()| { + if !std::thread::panicking() { + tracing::error!( + id, + "batch: leader cancelled, despite not being cancellation safe" + ); + } + }); + + // apply a batch. + // if this is cancelled, jobs will not be completed and will panic. + let values = processor.apply(reqs).await; + + // good: we didn't get cancelled. + ScopeGuard::into_inner(cancel_safety); + + if values.len() != resps.len() { + tracing::error!( + "batch: invalid response size, expected={}, got={}", + resps.len(), + values.len() + ); + } + + // send response values. + for (tx, value) in std::iter::zip(resps, values) { + if tx.send(value).is_err() { + // receiver hung up but that's fine. + } + } + + match rx.try_recv() { + Ok(resp) => break Some(resp), + Err(TryRecvError::Closed) => break None, + // edge case - there was a race condition where + // we became the leader but were not in the batch. + // + // Example: + // thread 1: register job id=1 + // thread 2: register job id=2 + // thread 2: processor.lock().await + // thread 1: processor.lock().await + // thread 2: becomes leader, batch_size=1, jobs=[1]. + Err(TryRecvError::Empty) => {} + } + }; + + tracing::debug!(id, "batch: job completed"); + + Ok(resp.expect("no response found. batch processer should not panic")) + } +} + +struct BatchQueueInner { + version: u64, + queue: BTreeMap>, +} + +impl BatchQueueInner

{ + fn register_job(&mut self, req: P::Req) -> (u64, tokio::sync::oneshot::Receiver) { + let (tx, rx) = tokio::sync::oneshot::channel(); + + let id = self.version; + + // Overflow concern: + // This is a u64, and we might enqueue 2^16 tasks per second. + // This gives us 2^48 seconds (9 million years). + // Even if this does overflow, it will not break, but some + // jobs with the higher version might never get prioritised. + self.version += 1; + + self.queue.insert(id, BatchJob { req, res: tx }); + + tracing::debug!(id, "batch: registered job in the queue"); + + (id, rx) + } + + fn get_batch(&mut self, p: &P) -> (Vec, Vec>) { + let batch_size = p.batch_size(self.queue.len()); + let mut reqs = Vec::with_capacity(batch_size); + let mut resps = Vec::with_capacity(batch_size); + let mut ids = Vec::with_capacity(batch_size); + + while reqs.len() < batch_size { + let Some((id, job)) = self.queue.pop_first() else { + break; + }; + reqs.push(job.req); + resps.push(job.res); + ids.push(id); + } + + tracing::debug!(ids=?ids, "batch: acquired jobs"); + + (reqs, resps) + } +} diff --git a/proxy/src/binary/local_proxy.rs b/proxy/src/binary/local_proxy.rs index ba10fce7b4..401203d48c 100644 --- a/proxy/src/binary/local_proxy.rs +++ b/proxy/src/binary/local_proxy.rs @@ -1,43 +1,37 @@ use std::net::SocketAddr; use std::pin::pin; -use std::str::FromStr; use std::sync::Arc; use std::time::Duration; -use anyhow::{Context, bail, ensure}; +use anyhow::bail; use arc_swap::ArcSwapOption; -use camino::{Utf8Path, Utf8PathBuf}; +use camino::Utf8PathBuf; use clap::Parser; -use compute_api::spec::LocalProxySpec; use futures::future::Either; -use thiserror::Error; use tokio::net::TcpListener; use tokio::sync::Notify; use tokio::task::JoinSet; use tokio_util::sync::CancellationToken; -use tracing::{debug, error, info, warn}; +use tracing::{debug, error, info}; use utils::sentry_init::init_sentry; use utils::{pid_file, project_build_tag, project_git_version}; use crate::auth::backend::jwt::JwkCache; -use crate::auth::backend::local::{JWKS_ROLE_MAP, LocalBackend}; +use crate::auth::backend::local::LocalBackend; use crate::auth::{self}; use crate::cancellation::CancellationHandler; use crate::config::{ self, AuthenticationConfig, ComputeConfig, HttpConfig, ProxyConfig, RetryConfig, + refresh_config_loop, }; use crate::control_plane::locks::ApiLocks; -use crate::control_plane::messages::{EndpointJwksResponse, JwksSettings}; -use crate::ext::TaskExt; use crate::http::health_server::AppMetrics; -use crate::intern::RoleNameInt; use crate::metrics::{Metrics, ThreadPoolMetrics}; use crate::rate_limiter::{EndpointRateLimiter, LeakyBucketConfig, RateBucketInfo}; use crate::scram::threadpool::ThreadPool; use crate::serverless::cancel_set::CancelSet; use crate::serverless::{self, GlobalConnPoolOptions}; use crate::tls::client_config::compute_client_config_with_root_certs; -use crate::types::RoleName; use crate::url::ApiUrl; project_git_version!(GIT_VERSION); @@ -82,6 +76,11 @@ struct LocalProxyCliArgs { /// Path of the local proxy PID file #[clap(long, default_value = "./local_proxy.pid")] pid_path: Utf8PathBuf, + /// Disable pg_session_jwt extension installation + /// This is useful for testing the local proxy with vanilla postgres. + #[clap(long, default_value = "false")] + #[cfg(feature = "testing")] + disable_pg_session_jwt: bool, } #[derive(clap::Args, Clone, Copy, Debug)] @@ -201,7 +200,7 @@ pub async fn run() -> anyhow::Result<()> { auth_backend, http_listener, shutdown.clone(), - Arc::new(CancellationHandler::new(&config.connect_to_compute, None)), + Arc::new(CancellationHandler::new(&config.connect_to_compute)), endpoint_rate_limiter, ); @@ -279,10 +278,11 @@ fn build_config(args: &LocalProxyCliArgs) -> anyhow::Result<&'static ProxyConfig }, proxy_protocol_v2: config::ProxyProtocolV2::Rejected, handshake_timeout: Duration::from_secs(10), - region: "local".into(), wake_compute_retry_config: RetryConfig::parse(RetryConfig::WAKE_COMPUTE_DEFAULT_VALUES)?, connect_compute_locks, connect_to_compute: compute_config, + #[cfg(feature = "testing")] + disable_pg_session_jwt: args.disable_pg_session_jwt, }))) } @@ -294,132 +294,3 @@ fn build_auth_backend(args: &LocalProxyCliArgs) -> &'static auth::Backend<'stati Box::leak(Box::new(auth_backend)) } - -#[derive(Error, Debug)] -enum RefreshConfigError { - #[error(transparent)] - Read(#[from] std::io::Error), - #[error(transparent)] - Parse(#[from] serde_json::Error), - #[error(transparent)] - Validate(anyhow::Error), - #[error(transparent)] - Tls(anyhow::Error), -} - -async fn refresh_config_loop(config: &ProxyConfig, path: Utf8PathBuf, rx: Arc) { - let mut init = true; - loop { - rx.notified().await; - - match refresh_config_inner(config, &path).await { - Ok(()) => {} - // don't log for file not found errors if this is the first time we are checking - // for computes that don't use local_proxy, this is not an error. - Err(RefreshConfigError::Read(e)) - if init && e.kind() == std::io::ErrorKind::NotFound => - { - debug!(error=?e, ?path, "could not read config file"); - } - Err(RefreshConfigError::Tls(e)) => { - error!(error=?e, ?path, "could not read TLS certificates"); - } - Err(e) => { - error!(error=?e, ?path, "could not read config file"); - } - } - - init = false; - } -} - -async fn refresh_config_inner( - config: &ProxyConfig, - path: &Utf8Path, -) -> Result<(), RefreshConfigError> { - let bytes = tokio::fs::read(&path).await?; - let data: LocalProxySpec = serde_json::from_slice(&bytes)?; - - let mut jwks_set = vec![]; - - fn parse_jwks_settings(jwks: compute_api::spec::JwksSettings) -> anyhow::Result { - let mut jwks_url = url::Url::from_str(&jwks.jwks_url).context("parsing JWKS url")?; - - ensure!( - jwks_url.has_authority() - && (jwks_url.scheme() == "http" || jwks_url.scheme() == "https"), - "Invalid JWKS url. Must be HTTP", - ); - - ensure!( - jwks_url.host().is_some_and(|h| h != url::Host::Domain("")), - "Invalid JWKS url. No domain listed", - ); - - // clear username, password and ports - jwks_url - .set_username("") - .expect("url can be a base and has a valid host and is not a file. should not error"); - jwks_url - .set_password(None) - .expect("url can be a base and has a valid host and is not a file. should not error"); - // local testing is hard if we need to have a specific restricted port - if cfg!(not(feature = "testing")) { - jwks_url.set_port(None).expect( - "url can be a base and has a valid host and is not a file. should not error", - ); - } - - // clear query params - jwks_url.set_fragment(None); - jwks_url.query_pairs_mut().clear().finish(); - - if jwks_url.scheme() != "https" { - // local testing is hard if we need to set up https support. - if cfg!(not(feature = "testing")) { - jwks_url - .set_scheme("https") - .expect("should not error to set the scheme to https if it was http"); - } else { - warn!(scheme = jwks_url.scheme(), "JWKS url is not HTTPS"); - } - } - - Ok(JwksSettings { - id: jwks.id, - jwks_url, - _provider_name: jwks.provider_name, - jwt_audience: jwks.jwt_audience, - role_names: jwks - .role_names - .into_iter() - .map(RoleName::from) - .map(|s| RoleNameInt::from(&s)) - .collect(), - }) - } - - for jwks in data.jwks.into_iter().flatten() { - jwks_set.push(parse_jwks_settings(jwks).map_err(RefreshConfigError::Validate)?); - } - - info!("successfully loaded new config"); - JWKS_ROLE_MAP.store(Some(Arc::new(EndpointJwksResponse { jwks: jwks_set }))); - - if let Some(tls_config) = data.tls { - let tls_config = tokio::task::spawn_blocking(move || { - crate::tls::server_config::configure_tls( - tls_config.key_path.as_ref(), - tls_config.cert_path.as_ref(), - None, - false, - ) - }) - .await - .propagate_task_panic() - .map_err(RefreshConfigError::Tls)?; - config.tls_config.store(Some(Arc::new(tls_config))); - } - - Ok(()) -} diff --git a/proxy/src/binary/pg_sni_router.rs b/proxy/src/binary/pg_sni_router.rs index a4f517fead..4ac8b6a995 100644 --- a/proxy/src/binary/pg_sni_router.rs +++ b/proxy/src/binary/pg_sni_router.rs @@ -4,6 +4,7 @@ //! This allows connecting to pods/services running in the same Kubernetes cluster from //! the outside. Similar to an ingress controller for HTTPS. +use std::io; use std::net::SocketAddr; use std::path::Path; use std::sync::Arc; @@ -26,12 +27,12 @@ use utils::sentry_init::init_sentry; use crate::context::RequestContext; use crate::metrics::{Metrics, ThreadPoolMetrics}; +use crate::pglb::TlsRequired; use crate::pqproto::FeStartupPacket; use crate::protocol2::ConnectionInfo; -use crate::proxy::{ - ErrorSource, TlsRequired, copy_bidirectional_client_compute, run_until_cancelled, -}; +use crate::proxy::{ErrorSource, copy_bidirectional_client_compute}; use crate::stream::{PqStream, Stream}; +use crate::util::run_until_cancelled; project_git_version!(GIT_VERSION); @@ -229,7 +230,6 @@ pub(super) async fn task_main( .set_nodelay(true) .context("failed to set socket option")?; - info!(%peer_addr, "serving"); let ctx = RequestContext::new( session_id, ConnectionInfo { @@ -237,11 +237,18 @@ pub(super) async fn task_main( extra: None, }, crate::metrics::Protocol::SniRouter, - "sni", ); handle_client(ctx, dest_suffix, tls_config, compute_tls_config, socket).await } .unwrap_or_else(|e| { + if let Some(FirstMessage(io_error)) = e.downcast_ref() { + // this is noisy. if we get EOF on the very first message that's likely + // just NLB doing a healthcheck. + if io_error.kind() == io::ErrorKind::UnexpectedEof { + return; + } + } + // Acknowledge that the task has finished with an error. error!("per-client task finished with an error: {e:#}"); }) @@ -258,12 +265,19 @@ pub(super) async fn task_main( Ok(()) } +#[derive(Debug, thiserror::Error)] +#[error(transparent)] +struct FirstMessage(io::Error); + async fn ssl_handshake( ctx: &RequestContext, raw_stream: S, tls_config: Arc, ) -> anyhow::Result> { - let (mut stream, msg) = PqStream::parse_startup(Stream::from_raw(raw_stream)).await?; + let (mut stream, msg) = PqStream::parse_startup(Stream::from_raw(raw_stream)) + .await + .map_err(FirstMessage)?; + match msg { FeStartupPacket::SslRequest { direct: None } => { let raw = stream.accept_tls().await?; diff --git a/proxy/src/binary/proxy.rs b/proxy/src/binary/proxy.rs index 757c1e988b..c10678dc68 100644 --- a/proxy/src/binary/proxy.rs +++ b/proxy/src/binary/proxy.rs @@ -10,18 +10,29 @@ use std::time::Duration; use anyhow::Context; use anyhow::{bail, ensure}; use arc_swap::ArcSwapOption; +#[cfg(any(test, feature = "testing"))] +use camino::Utf8PathBuf; use futures::future::Either; +use itertools::{Itertools, Position}; +use rand::{Rng, thread_rng}; use remote_storage::RemoteStorageConfig; use tokio::net::TcpListener; +#[cfg(any(test, feature = "testing"))] +use tokio::sync::Notify; use tokio::task::JoinSet; use tokio_util::sync::CancellationToken; -use tracing::{Instrument, info, warn}; +use tracing::{Instrument, error, info, warn}; use utils::sentry_init::init_sentry; use utils::{project_build_tag, project_git_version}; use crate::auth::backend::jwt::JwkCache; +#[cfg(any(test, feature = "testing"))] +use crate::auth::backend::local::LocalBackend; use crate::auth::backend::{ConsoleRedirectBackend, MaybeOwned}; -use crate::cancellation::{CancellationHandler, handle_cancel_messages}; +use crate::batch::BatchQueue; +use crate::cancellation::{CancellationHandler, CancellationProcessor}; +#[cfg(any(test, feature = "testing"))] +use crate::config::refresh_config_loop; use crate::config::{ self, AuthenticationConfig, CacheOptions, ComputeConfig, HttpConfig, ProjectInfoCacheOptions, ProxyConfig, ProxyProtocolV2, remote_storage_from_toml, @@ -57,6 +68,9 @@ enum AuthBackendType { #[cfg(any(test, feature = "testing"))] Postgres, + + #[cfg(any(test, feature = "testing"))] + Local, } /// Neon proxy/router @@ -71,6 +85,10 @@ struct ProxyCliArgs { proxy: SocketAddr, #[clap(value_enum, long, default_value_t = AuthBackendType::ConsoleRedirect)] auth_backend: AuthBackendType, + /// Path of the local proxy config file (used for local-file auth backend) + #[clap(long, default_value = "./local_proxy.json")] + #[cfg(any(test, feature = "testing"))] + config_path: Utf8PathBuf, /// listen for management callback connection on ip:port #[clap(short, long, default_value = "127.0.0.1:7000")] mgmt: SocketAddr, @@ -120,12 +138,6 @@ struct ProxyCliArgs { /// timeout for the TLS handshake #[clap(long, default_value = "15s", value_parser = humantime::parse_duration)] handshake_timeout: tokio::time::Duration, - /// http endpoint to receive periodic metric updates - #[clap(long)] - metric_collection_endpoint: Option, - /// how often metrics should be sent to a collection endpoint - #[clap(long)] - metric_collection_interval: Option, /// cache for `wake_compute` api method (use `size=0` to disable) #[clap(long, default_value = config::CacheOptions::CACHE_DEFAULT_OPTIONS)] wake_compute_cache: String, @@ -152,40 +164,31 @@ struct ProxyCliArgs { /// Wake compute rate limiter max number of requests per second. #[clap(long, default_values_t = RateBucketInfo::DEFAULT_SET)] wake_compute_limit: Vec, - /// Redis rate limiter max number of requests per second. - #[clap(long, default_values_t = RateBucketInfo::DEFAULT_REDIS_SET)] - redis_rps_limit: Vec, /// Cancellation channel size (max queue size for redis kv client) #[clap(long, default_value_t = 1024)] cancellation_ch_size: usize, /// Cancellation ops batch size for redis #[clap(long, default_value_t = 8)] cancellation_batch_size: usize, - /// cache for `allowed_ips` (use `size=0` to disable) - #[clap(long, default_value = config::CacheOptions::CACHE_DEFAULT_OPTIONS)] - allowed_ips_cache: String, - /// cache for `role_secret` (use `size=0` to disable) - #[clap(long, default_value = config::CacheOptions::CACHE_DEFAULT_OPTIONS)] - role_secret_cache: String, - /// redis url for notifications (if empty, redis_host:port will be used for both notifications and streaming connections) - #[clap(long)] - redis_notifications: Option, - /// what from the available authentications type to use for the regional redis we have. Supported are "irsa" and "plain". + /// redis url for plain authentication + #[clap(long, alias("redis-notifications"))] + redis_plain: Option, + /// what from the available authentications type to use for redis. Supported are "irsa" and "plain". #[clap(long, default_value = "irsa")] redis_auth_type: String, - /// redis host for streaming connections (might be different from the notifications host) + /// redis host for irsa authentication #[clap(long)] redis_host: Option, - /// redis port for streaming connections (might be different from the notifications host) + /// redis port for irsa authentication #[clap(long)] redis_port: Option, - /// redis cluster name, used in aws elasticache + /// redis cluster name for irsa authentication #[clap(long)] redis_cluster_name: Option, - /// redis user_id, used in aws elasticache + /// redis user_id for irsa authentication #[clap(long)] redis_user_id: Option, - /// aws region to retrieve credentials + /// aws region for irsa authentication #[clap(long, default_value_t = String::new())] aws_region: String, /// cache for `project_info` (use `size=0` to disable) @@ -197,6 +200,12 @@ struct ProxyCliArgs { #[clap(flatten)] parquet_upload: ParquetUploadArgs, + /// http endpoint to receive periodic metric updates + #[clap(long)] + metric_collection_endpoint: Option, + /// how often metrics should be sent to a collection endpoint + #[clap(long)] + metric_collection_interval: Option, /// interval for backup metric collection #[clap(long, default_value = "10m", value_parser = humantime::parse_duration)] metric_backup_collection_interval: std::time::Duration, @@ -209,6 +218,7 @@ struct ProxyCliArgs { /// Size of each event is no more than 400 bytes, so 2**22 is about 200MB before the compression. #[clap(long, default_value = "4194304")] metric_backup_collection_chunk_size: usize, + /// Whether to retry the connection to the compute node #[clap(long, default_value = config::RetryConfig::CONNECT_TO_COMPUTE_DEFAULT_VALUES)] connect_to_compute_retry: String, @@ -231,6 +241,14 @@ struct ProxyCliArgs { #[clap(flatten)] pg_sni_router: PgSniRouterArgs, + + /// if this is not local proxy, this toggles whether we accept Postgres REST requests + #[clap(long, default_value_t = false, value_parser = clap::builder::BoolishValueParser::new(), action = clap::ArgAction::Set)] + is_rest_broker: bool, + + /// cache for `db_schema_cache` introspection (use `size=0` to disable) + #[clap(long, default_value = "size=1000,ttl=1h")] + db_schema_cache: String, } #[derive(clap::Args, Clone, Copy, Debug)] @@ -314,7 +332,7 @@ pub async fn run() -> anyhow::Result<()> { let jemalloc = match crate::jemalloc::MetricRecorder::new() { Ok(t) => Some(t), Err(e) => { - tracing::error!(error = ?e, "could not start jemalloc metrics loop"); + error!(error = ?e, "could not start jemalloc metrics loop"); None } }; @@ -328,7 +346,7 @@ pub async fn run() -> anyhow::Result<()> { Either::Right(auth_backend) => info!("Authentication backend: {auth_backend:?}"), } info!("Using region: {}", args.aws_region); - let (regional_redis_client, redis_notifications_client) = configure_redis(&args).await?; + let redis_client = configure_redis(&args).await?; // Check that we can bind to address before further initialization info!("Starting http on {}", args.http); @@ -383,20 +401,7 @@ pub async fn run() -> anyhow::Result<()> { let cancellation_token = CancellationToken::new(); - let redis_rps_limit = Vec::leak(args.redis_rps_limit.clone()); - RateBucketInfo::validate(redis_rps_limit)?; - - let redis_kv_client = regional_redis_client - .as_ref() - .map(|redis_publisher| RedisKVClient::new(redis_publisher.clone(), redis_rps_limit)); - - // channel size should be higher than redis client limit to avoid blocking - let cancel_ch_size = args.cancellation_ch_size; - let (tx_cancel, rx_cancel) = tokio::sync::mpsc::channel(cancel_ch_size); - let cancellation_handler = Arc::new(CancellationHandler::new( - &config.connect_to_compute, - Some(tx_cancel), - )); + let cancellation_handler = Arc::new(CancellationHandler::new(&config.connect_to_compute)); let endpoint_rate_limiter = Arc::new(EndpointRateLimiter::new_with_shards( RateBucketInfo::to_leaky_bucket(&args.endpoint_rps_limit) @@ -404,13 +409,15 @@ pub async fn run() -> anyhow::Result<()> { 64, )); + #[cfg(any(test, feature = "testing"))] + let refresh_config_notify = Arc::new(Notify::new()); // client facing tasks. these will exit on error or on cancellation // cancellation returns Ok(()) let mut client_tasks = JoinSet::new(); match auth_backend { Either::Left(auth_backend) => { if let Some(proxy_listener) = proxy_listener { - client_tasks.spawn(crate::proxy::task_main( + client_tasks.spawn(crate::pglb::task_main( config, auth_backend, proxy_listener, @@ -430,6 +437,17 @@ pub async fn run() -> anyhow::Result<()> { endpoint_rate_limiter.clone(), )); } + + // if auth backend is local, we need to load the config file + #[cfg(any(test, feature = "testing"))] + if let auth::Backend::Local(_) = &auth_backend { + refresh_config_notify.notify_one(); + tokio::spawn(refresh_config_loop( + config, + args.config_path, + refresh_config_notify.clone(), + )); + } } Either::Right(auth_backend) => { if let Some(proxy_listener) = proxy_listener { @@ -475,11 +493,18 @@ pub async fn run() -> anyhow::Result<()> { client_tasks.spawn(crate::context::parquet::worker( cancellation_token.clone(), args.parquet_upload, + args.region, )); // maintenance tasks. these never return unless there's an error let mut maintenance_tasks = JoinSet::new(); - maintenance_tasks.spawn(crate::signals::handle(cancellation_token.clone(), || {})); + + maintenance_tasks.spawn(crate::signals::handle(cancellation_token.clone(), { + move || { + #[cfg(any(test, feature = "testing"))] + refresh_config_notify.notify_one(); + } + })); maintenance_tasks.spawn(http::health_server::task_main( http_listener, AppMetrics { @@ -495,60 +520,51 @@ pub async fn run() -> anyhow::Result<()> { maintenance_tasks.spawn(usage_metrics::task_main(metrics_config)); } - #[cfg_attr(not(any(test, feature = "testing")), expect(irrefutable_let_patterns))] - if let Either::Left(auth::Backend::ControlPlane(api, ())) = &auth_backend { - if let crate::control_plane::client::ControlPlaneClient::ProxyV1(api) = &**api { - match (redis_notifications_client, regional_redis_client.clone()) { - (None, None) => {} - (client1, client2) => { - let cache = api.caches.project_info.clone(); - if let Some(client) = client1 { - maintenance_tasks.spawn(notifications::task_main( - client, - cache.clone(), - args.region.clone(), - )); + if let Either::Left(auth::Backend::ControlPlane(api, ())) = &auth_backend + && let crate::control_plane::client::ControlPlaneClient::ProxyV1(api) = &**api + && let Some(client) = redis_client + { + // project info cache and invalidation of that cache. + let cache = api.caches.project_info.clone(); + maintenance_tasks.spawn(notifications::task_main(client.clone(), cache.clone())); + maintenance_tasks.spawn(async move { cache.clone().gc_worker().await }); + + // Try to connect to Redis 3 times with 1 + (0..0.1) second interval. + // This prevents immediate exit and pod restart, + // which can cause hammering of the redis in case of connection issues. + // cancellation key management + let mut redis_kv_client = RedisKVClient::new(client.clone()); + for attempt in (0..3).with_position() { + match redis_kv_client.try_connect().await { + Ok(()) => { + info!("Connected to Redis KV client"); + cancellation_handler.init_tx(BatchQueue::new(CancellationProcessor { + client: redis_kv_client, + batch_size: args.cancellation_batch_size, + })); + + break; + } + Err(e) => { + error!("Failed to connect to Redis KV client: {e}"); + if matches!(attempt, Position::Last(_)) { + bail!( + "Failed to connect to Redis KV client after {} attempts", + attempt.into_inner() + ); } - if let Some(client) = client2 { - maintenance_tasks.spawn(notifications::task_main( - client, - cache.clone(), - args.region.clone(), - )); - } - maintenance_tasks.spawn(async move { cache.clone().gc_worker().await }); + let jitter = thread_rng().gen_range(0..100); + tokio::time::sleep(Duration::from_millis(1000 + jitter)).await; } } - - if let Some(mut redis_kv_client) = redis_kv_client { - maintenance_tasks.spawn(async move { - redis_kv_client.try_connect().await?; - handle_cancel_messages( - &mut redis_kv_client, - rx_cancel, - args.cancellation_batch_size, - ) - .await?; - - drop(redis_kv_client); - - // `handle_cancel_messages` was terminated due to the tx_cancel - // being dropped. this is not worthy of an error, and this task can only return `Err`, - // so let's wait forever instead. - std::future::pending().await - }); - } - - if let Some(regional_redis_client) = regional_redis_client { - let cache = api.caches.endpoints_cache.clone(); - let con = regional_redis_client; - let span = tracing::info_span!("endpoints_cache"); - maintenance_tasks.spawn( - async move { cache.do_read(con, cancellation_token.clone()).await } - .instrument(span), - ); - } } + + // listen for notifications of new projects/endpoints/branches + let cache = api.caches.endpoints_cache.clone(); + let span = tracing::info_span!("endpoints_cache"); + maintenance_tasks.spawn( + async move { cache.do_read(client, cancellation_token.clone()).await }.instrument(span), + ); } let maintenance = loop { @@ -673,10 +689,11 @@ fn build_config(args: &ProxyCliArgs) -> anyhow::Result<&'static ProxyConfig> { authentication_config, proxy_protocol_v2: args.proxy_protocol_v2, handshake_timeout: args.handshake_timeout, - region: args.region.clone(), wake_compute_retry_config: config::RetryConfig::parse(&args.wake_compute_retry)?, connect_compute_locks, connect_to_compute: compute_config, + #[cfg(feature = "testing")] + disable_pg_session_jwt: false, }; let config = Box::leak(Box::new(config)); @@ -830,26 +847,36 @@ fn build_auth_backend( Ok(Either::Right(config)) } + + #[cfg(any(test, feature = "testing"))] + AuthBackendType::Local => { + let postgres: SocketAddr = "127.0.0.1:7432".parse()?; + let compute_ctl: ApiUrl = "http://127.0.0.1:3081/".parse()?; + let auth_backend = crate::auth::Backend::Local( + crate::auth::backend::MaybeOwned::Owned(LocalBackend::new(postgres, compute_ctl)), + ); + + let config = Box::leak(Box::new(auth_backend)); + + Ok(Either::Left(config)) + } } } async fn configure_redis( args: &ProxyCliArgs, -) -> anyhow::Result<( - Option, - Option, -)> { +) -> anyhow::Result> { // TODO: untangle the config args - let regional_redis_client = match (args.redis_auth_type.as_str(), &args.redis_notifications) { - ("plain", redis_url) => match redis_url { + let redis_client = match &*args.redis_auth_type { + "plain" => match &args.redis_plain { None => { - bail!("plain auth requires redis_notifications to be set"); + bail!("plain auth requires redis_plain to be set"); } Some(url) => { Some(ConnectionWithCredentialsProvider::new_with_static_credentials(url.clone())) } }, - ("irsa", _) => match (&args.redis_host, args.redis_port) { + "irsa" => match (&args.redis_host, args.redis_port) { (Some(host), Some(port)) => Some( ConnectionWithCredentialsProvider::new_with_credentials_provider( host.clone(), @@ -873,18 +900,12 @@ async fn configure_redis( bail!("redis-host and redis-port must be specified together"); } }, - _ => { - bail!("unknown auth type given"); + auth_type => { + bail!("unknown auth type {auth_type:?} given") } }; - let redis_notifications_client = if let Some(url) = &args.redis_notifications { - Some(ConnectionWithCredentialsProvider::new_with_static_credentials(&**url)) - } else { - regional_redis_client.clone() - }; - - Ok((regional_redis_client, redis_notifications_client)) + Ok(redis_client) } #[cfg(test)] diff --git a/proxy/src/cache/project_info.rs b/proxy/src/cache/project_info.rs index 81c88e3ddd..d37c107323 100644 --- a/proxy/src/cache/project_info.rs +++ b/proxy/src/cache/project_info.rs @@ -18,6 +18,7 @@ use crate::types::{EndpointId, RoleName}; #[async_trait] pub(crate) trait ProjectInfoCache { + fn invalidate_endpoint_access(&self, endpoint_id: EndpointIdInt); fn invalidate_endpoint_access_for_project(&self, project_id: ProjectIdInt); fn invalidate_endpoint_access_for_org(&self, account_id: AccountIdInt); fn invalidate_role_secret_for_project(&self, project_id: ProjectIdInt, role_name: RoleNameInt); @@ -100,6 +101,13 @@ pub struct ProjectInfoCacheImpl { #[async_trait] impl ProjectInfoCache for ProjectInfoCacheImpl { + fn invalidate_endpoint_access(&self, endpoint_id: EndpointIdInt) { + info!("invalidating endpoint access for `{endpoint_id}`"); + if let Some(mut endpoint_info) = self.cache.get_mut(&endpoint_id) { + endpoint_info.invalidate_endpoint(); + } + } + fn invalidate_endpoint_access_for_project(&self, project_id: ProjectIdInt) { info!("invalidating endpoint access for project `{project_id}`"); let endpoints = self @@ -356,6 +364,7 @@ mod tests { use std::sync::Arc; use super::*; + use crate::control_plane::messages::EndpointRateLimitConfig; use crate::control_plane::{AccessBlockerFlags, AuthSecret}; use crate::scram::ServerSecret; use crate::types::ProjectId; @@ -391,6 +400,7 @@ mod tests { allowed_ips: allowed_ips.clone(), allowed_vpce: Arc::new(vec![]), flags: AccessBlockerFlags::default(), + rate_limits: EndpointRateLimitConfig::default(), }, RoleAccessControl { secret: secret1.clone(), @@ -406,6 +416,7 @@ mod tests { allowed_ips: allowed_ips.clone(), allowed_vpce: Arc::new(vec![]), flags: AccessBlockerFlags::default(), + rate_limits: EndpointRateLimitConfig::default(), }, RoleAccessControl { secret: secret2.clone(), @@ -431,6 +442,7 @@ mod tests { allowed_ips: allowed_ips.clone(), allowed_vpce: Arc::new(vec![]), flags: AccessBlockerFlags::default(), + rate_limits: EndpointRateLimitConfig::default(), }, RoleAccessControl { secret: secret3.clone(), diff --git a/proxy/src/cache/timed_lru.rs b/proxy/src/cache/timed_lru.rs index 7cfe5100ea..183e1ea449 100644 --- a/proxy/src/cache/timed_lru.rs +++ b/proxy/src/cache/timed_lru.rs @@ -30,7 +30,7 @@ use super::{Cache, timed_lru}; /// /// * There's an API for immediate invalidation (removal) of a cache entry; /// It's useful in case we know for sure that the entry is no longer correct. -/// See [`timed_lru::LookupInfo`] & [`timed_lru::Cached`] for more information. +/// See [`timed_lru::Cached`] for more information. /// /// * Expired entries are kept in the cache, until they are evicted by the LRU policy, /// or by a successful lookup (i.e. the entry hasn't expired yet). @@ -54,7 +54,7 @@ pub(crate) struct TimedLru { impl Cache for TimedLru { type Key = K; type Value = V; - type LookupInfo = LookupInfo; + type LookupInfo = Key; fn invalidate(&self, info: &Self::LookupInfo) { self.invalidate_raw(info); @@ -87,30 +87,24 @@ impl TimedLru { /// Drop an entry from the cache if it's outdated. #[tracing::instrument(level = "debug", fields(cache = self.name), skip_all)] - fn invalidate_raw(&self, info: &LookupInfo) { - let now = Instant::now(); - + fn invalidate_raw(&self, key: &K) { // Do costly things before taking the lock. let mut cache = self.cache.lock(); - let raw_entry = match cache.raw_entry_mut().from_key(&info.key) { + let entry = match cache.raw_entry_mut().from_key(key) { RawEntryMut::Vacant(_) => return, - RawEntryMut::Occupied(x) => x, + RawEntryMut::Occupied(x) => x.remove(), }; - - // Remove the entry if it was created prior to lookup timestamp. - let entry = raw_entry.get(); - let (created_at, expires_at) = (entry.created_at, entry.expires_at); - let should_remove = created_at <= info.created_at || expires_at <= now; - - if should_remove { - raw_entry.remove(); - } - drop(cache); // drop lock before logging + + let Entry { + created_at, + expires_at, + .. + } = entry; + debug!( - created_at = format_args!("{created_at:?}"), - expires_at = format_args!("{expires_at:?}"), - entry_removed = should_remove, + ?created_at, + ?expires_at, "processed a cache entry invalidation event" ); } @@ -211,10 +205,10 @@ impl TimedLru { } pub(crate) fn insert_unit(&self, key: K, value: V) -> (Option, Cached<&Self, ()>) { - let (created_at, old) = self.insert_raw(key.clone(), value); + let (_, old) = self.insert_raw(key.clone(), value); let cached = Cached { - token: Some((self, LookupInfo { created_at, key })), + token: Some((self, key)), value: (), }; @@ -229,28 +223,9 @@ impl TimedLru { K: Borrow + Clone, Q: Hash + Eq + ?Sized, { - self.get_raw(key, |key, entry| { - let info = LookupInfo { - created_at: entry.created_at, - key: key.clone(), - }; - - Cached { - token: Some((self, info)), - value: entry.value.clone(), - } + self.get_raw(key, |key, entry| Cached { + token: Some((self, key.clone())), + value: entry.value.clone(), }) } } - -/// Lookup information for key invalidation. -pub(crate) struct LookupInfo { - /// Time of creation of a cache [`Entry`]. - /// We use this during invalidation lookups to prevent eviction of a newer - /// entry sharing the same key (it might've been inserted by a different - /// task after we got the entry we're trying to invalidate now). - created_at: Instant, - - /// Search by this key. - key: K, -} diff --git a/proxy/src/cancellation.rs b/proxy/src/cancellation.rs index d26641db46..74413f1a7d 100644 --- a/proxy/src/cancellation.rs +++ b/proxy/src/cancellation.rs @@ -1,19 +1,24 @@ +use std::convert::Infallible; use std::net::{IpAddr, SocketAddr}; -use std::sync::Arc; +use std::pin::pin; +use std::sync::{Arc, OnceLock}; +use std::time::Duration; -use anyhow::{Context, anyhow}; +use anyhow::anyhow; +use futures::FutureExt; use ipnet::{IpNet, Ipv4Net, Ipv6Net}; -use postgres_client::CancelToken; +use postgres_client::RawCancelToken; use postgres_client::tls::MakeTlsConnect; use redis::{Cmd, FromRedisValue, Value}; use serde::{Deserialize, Serialize}; use thiserror::Error; use tokio::net::TcpStream; -use tokio::sync::{mpsc, oneshot}; -use tracing::{debug, error, info, warn}; +use tokio::time::timeout; +use tracing::{debug, error, info}; use crate::auth::AuthError; use crate::auth::backend::ComputeUserInfo; +use crate::batch::{BatchQueue, QueueProcessing}; use crate::config::ComputeConfig; use crate::context::RequestContext; use crate::control_plane::ControlPlaneApi; @@ -24,52 +29,48 @@ use crate::pqproto::CancelKeyData; use crate::rate_limiter::LeakyBucketRateLimiter; use crate::redis::keys::KeyPrefix; use crate::redis::kv_ops::RedisKVClient; -use crate::tls::postgres_rustls::MakeRustlsConnect; type IpSubnetKey = IpNet; -const CANCEL_KEY_TTL: i64 = 1_209_600; // 2 weeks cancellation key expire time +const CANCEL_KEY_TTL: std::time::Duration = std::time::Duration::from_secs(600); +const CANCEL_KEY_REFRESH: std::time::Duration = std::time::Duration::from_secs(570); // Message types for sending through mpsc channel pub enum CancelKeyOp { StoreCancelKey { - key: String, - field: String, - value: String, - resp_tx: Option>>, - _guard: CancelChannelSizeGuard<'static>, - expire: i64, // TTL for key + key: CancelKeyData, + value: Box, + expire: std::time::Duration, }, GetCancelData { - key: String, - resp_tx: oneshot::Sender>>, - _guard: CancelChannelSizeGuard<'static>, - }, - RemoveCancelKey { - key: String, - field: String, - resp_tx: Option>>, - _guard: CancelChannelSizeGuard<'static>, + key: CancelKeyData, }, } pub struct Pipeline { inner: redis::Pipeline, - replies: Vec, + replies: usize, } impl Pipeline { fn with_capacity(n: usize) -> Self { Self { inner: redis::Pipeline::with_capacity(n), - replies: Vec::with_capacity(n), + replies: 0, } } - async fn execute(&mut self, client: &mut RedisKVClient) { - let responses = self.replies.len(); + async fn execute(self, client: &mut RedisKVClient) -> Vec> { + let responses = self.replies; let batch_size = self.inner.len(); + if !client.credentials_refreshed() { + tracing::debug!( + "Redis credentials are not refreshed. Sleeping for 5 seconds before retrying..." + ); + tokio::time::sleep(Duration::from_secs(5)).await; + } + match client.query(&self.inner).await { // for each reply, we expect that many values. Ok(Value::Array(values)) if values.len() == responses => { @@ -77,176 +78,80 @@ impl Pipeline { batch_size, responses, "successfully completed cancellation jobs", ); - for (value, reply) in std::iter::zip(values, self.replies.drain(..)) { - reply.send_value(value); - } + values.into_iter().map(Ok).collect() } Ok(value) => { error!(batch_size, ?value, "unexpected redis return value"); - for reply in self.replies.drain(..) { - reply.send_err(anyhow!("incorrect response type from redis")); - } + std::iter::repeat_with(|| Err(anyhow!("incorrect response type from redis"))) + .take(responses) + .collect() } Err(err) => { - for reply in self.replies.drain(..) { - reply.send_err(anyhow!("could not send cmd to redis: {err}")); - } + std::iter::repeat_with(|| Err(anyhow!("could not send cmd to redis: {err}"))) + .take(responses) + .collect() } } - - self.inner.clear(); - self.replies.clear(); } - fn add_command_with_reply(&mut self, cmd: Cmd, reply: CancelReplyOp) { + fn add_command_with_reply(&mut self, cmd: Cmd) { self.inner.add_command(cmd); - self.replies.push(reply); + self.replies += 1; } fn add_command_no_reply(&mut self, cmd: Cmd) { self.inner.add_command(cmd).ignore(); } - - fn add_command(&mut self, cmd: Cmd, reply: Option) { - match reply { - Some(reply) => self.add_command_with_reply(cmd, reply), - None => self.add_command_no_reply(cmd), - } - } } impl CancelKeyOp { - fn register(self, pipe: &mut Pipeline) { - #[allow(clippy::used_underscore_binding)] + fn register(&self, pipe: &mut Pipeline) { match self { - CancelKeyOp::StoreCancelKey { - key, - field, - value, - resp_tx, - _guard, - expire, - } => { - let reply = - resp_tx.map(|resp_tx| CancelReplyOp::StoreCancelKey { resp_tx, _guard }); - pipe.add_command(Cmd::hset(&key, field, value), reply); - pipe.add_command_no_reply(Cmd::expire(key, expire)); + CancelKeyOp::StoreCancelKey { key, value, expire } => { + let key = KeyPrefix::Cancel(*key).build_redis_key(); + pipe.add_command_with_reply(Cmd::hset(&key, "data", &**value)); + pipe.add_command_no_reply(Cmd::expire(&key, expire.as_secs() as i64)); } - CancelKeyOp::GetCancelData { - key, - resp_tx, - _guard, - } => { - let reply = CancelReplyOp::GetCancelData { resp_tx, _guard }; - pipe.add_command_with_reply(Cmd::hgetall(key), reply); - } - CancelKeyOp::RemoveCancelKey { - key, - field, - resp_tx, - _guard, - } => { - let reply = - resp_tx.map(|resp_tx| CancelReplyOp::RemoveCancelKey { resp_tx, _guard }); - pipe.add_command(Cmd::hdel(key, field), reply); + CancelKeyOp::GetCancelData { key } => { + let key = KeyPrefix::Cancel(*key).build_redis_key(); + pipe.add_command_with_reply(Cmd::hget(key, "data")); } } } } -// Message types for sending through mpsc channel -pub enum CancelReplyOp { - StoreCancelKey { - resp_tx: oneshot::Sender>, - _guard: CancelChannelSizeGuard<'static>, - }, - GetCancelData { - resp_tx: oneshot::Sender>>, - _guard: CancelChannelSizeGuard<'static>, - }, - RemoveCancelKey { - resp_tx: oneshot::Sender>, - _guard: CancelChannelSizeGuard<'static>, - }, +pub struct CancellationProcessor { + pub client: RedisKVClient, + pub batch_size: usize, } -impl CancelReplyOp { - fn send_err(self, e: anyhow::Error) { - match self { - CancelReplyOp::StoreCancelKey { resp_tx, _guard } => { - resp_tx - .send(Err(e)) - .inspect_err(|_| tracing::debug!("could not send reply")) - .ok(); - } - CancelReplyOp::GetCancelData { resp_tx, _guard } => { - resp_tx - .send(Err(e)) - .inspect_err(|_| tracing::debug!("could not send reply")) - .ok(); - } - CancelReplyOp::RemoveCancelKey { resp_tx, _guard } => { - resp_tx - .send(Err(e)) - .inspect_err(|_| tracing::debug!("could not send reply")) - .ok(); - } - } +impl QueueProcessing for CancellationProcessor { + type Req = (CancelChannelSizeGuard<'static>, CancelKeyOp); + type Res = anyhow::Result; + + fn batch_size(&self, _queue_size: usize) -> usize { + self.batch_size } - fn send_value(self, v: redis::Value) { - match self { - CancelReplyOp::StoreCancelKey { resp_tx, _guard } => { - let send = - FromRedisValue::from_owned_redis_value(v).context("could not parse value"); - resp_tx - .send(send) - .inspect_err(|_| tracing::debug!("could not send reply")) - .ok(); - } - CancelReplyOp::GetCancelData { resp_tx, _guard } => { - let send = - FromRedisValue::from_owned_redis_value(v).context("could not parse value"); - resp_tx - .send(send) - .inspect_err(|_| tracing::debug!("could not send reply")) - .ok(); - } - CancelReplyOp::RemoveCancelKey { resp_tx, _guard } => { - let send = - FromRedisValue::from_owned_redis_value(v).context("could not parse value"); - resp_tx - .send(send) - .inspect_err(|_| tracing::debug!("could not send reply")) - .ok(); - } + async fn apply(&mut self, batch: Vec) -> Vec { + if !self.client.credentials_refreshed() { + // this will cause a timeout for cancellation operations + tracing::debug!( + "Redis credentials are not refreshed. Sleeping for 5 seconds before retrying..." + ); + tokio::time::sleep(Duration::from_secs(5)).await; } - } -} -// Running as a separate task to accept messages through the rx channel -pub async fn handle_cancel_messages( - client: &mut RedisKVClient, - mut rx: mpsc::Receiver, - batch_size: usize, -) -> anyhow::Result<()> { - let mut batch = Vec::with_capacity(batch_size); - let mut pipeline = Pipeline::with_capacity(batch_size); - - loop { - if rx.recv_many(&mut batch, batch_size).await == 0 { - warn!("shutting down cancellation queue"); - break Ok(()); - } + let mut pipeline = Pipeline::with_capacity(batch.len()); let batch_size = batch.len(); debug!(batch_size, "running cancellation jobs"); - for msg in batch.drain(..) { - msg.register(&mut pipeline); + for (_, op) in &batch { + op.register(&mut pipeline); } - pipeline.execute(client).await; + pipeline.execute(&mut self.client).await } } @@ -257,7 +162,7 @@ pub struct CancellationHandler { compute_config: &'static ComputeConfig, // rate limiter of cancellation requests limiter: Arc>>, - tx: Option>, // send messages to the redis KV client task + tx: OnceLock>, // send messages to the redis KV client task } #[derive(Debug, Error)] @@ -297,13 +202,10 @@ impl ReportableError for CancelError { } impl CancellationHandler { - pub fn new( - compute_config: &'static ComputeConfig, - tx: Option>, - ) -> Self { + pub fn new(compute_config: &'static ComputeConfig) -> Self { Self { compute_config, - tx, + tx: OnceLock::new(), limiter: Arc::new(std::sync::Mutex::new( LeakyBucketRateLimiter::::new_with_shards( LeakyBucketRateLimiter::::DEFAULT, @@ -313,7 +215,14 @@ impl CancellationHandler { } } - pub(crate) fn get_key(self: &Arc) -> Session { + pub fn init_tx(&self, queue: BatchQueue) { + self.tx + .set(queue) + .map_err(|_| {}) + .expect("cancellation queue should be registered once"); + } + + pub(crate) fn get_key(self: Arc) -> Session { // we intentionally generate a random "backend pid" and "secret key" here. // we use the corresponding u64 as an identifier for the // actual endpoint+pid+secret for postgres/pgbouncer. @@ -323,83 +232,68 @@ impl CancellationHandler { let key: CancelKeyData = rand::random(); - let prefix_key: KeyPrefix = KeyPrefix::Cancel(key); - let redis_key = prefix_key.build_redis_key(); - debug!("registered new query cancellation key {key}"); Session { key, - redis_key, - cancellation_handler: Arc::clone(self), + cancellation_handler: self, } } + /// This is not cancel safe async fn get_cancel_key( &self, key: CancelKeyData, ) -> Result, CancelError> { - let prefix_key: KeyPrefix = KeyPrefix::Cancel(key); - let redis_key = prefix_key.build_redis_key(); + let guard = Metrics::get() + .proxy + .cancel_channel_size + .guard(RedisMsgKind::HGet); + let op = CancelKeyOp::GetCancelData { key }; - let (resp_tx, resp_rx) = tokio::sync::oneshot::channel(); - let op = CancelKeyOp::GetCancelData { - key: redis_key, - resp_tx, - _guard: Metrics::get() - .proxy - .cancel_channel_size - .guard(RedisMsgKind::HGetAll), - }; - - let Some(tx) = &self.tx else { + let Some(tx) = self.tx.get() else { tracing::warn!("cancellation handler is not available"); return Err(CancelError::InternalError); }; - tx.try_send(op) - .map_err(|e| { - tracing::warn!("failed to send GetCancelData for {key}: {e}"); - }) - .map_err(|()| CancelError::InternalError)?; - - let result = resp_rx.await.map_err(|e| { + const TIMEOUT: Duration = Duration::from_secs(5); + let result = timeout( + TIMEOUT, + tx.call((guard, op), std::future::pending::()), + ) + .await + .map_err(|_| { + tracing::warn!("timed out waiting to receive GetCancelData response"); + CancelError::RateLimit + })? + // cannot be cancelled + .unwrap_or_else(|x| match x {}) + .map_err(|e| { tracing::warn!("failed to receive GetCancelData response: {e}"); CancelError::InternalError })?; - let cancel_state_str: Option = match result { - Ok(mut state) => { - if state.len() == 1 { - Some(state.remove(0).1) - } else { - tracing::warn!("unexpected number of entries in cancel state: {state:?}"); - return Err(CancelError::InternalError); - } - } - Err(e) => { - tracing::warn!("failed to receive cancel state from redis: {e}"); - return Err(CancelError::InternalError); - } - }; + let cancel_state_str = String::from_owned_redis_value(result).map_err(|e| { + tracing::warn!("failed to receive GetCancelData response: {e}"); + CancelError::InternalError + })?; - let cancel_state: Option = match cancel_state_str { - Some(state) => { - let cancel_closure: CancelClosure = serde_json::from_str(&state).map_err(|e| { - tracing::warn!("failed to deserialize cancel state: {e}"); - CancelError::InternalError - })?; - Some(cancel_closure) - } - None => None, - }; - Ok(cancel_state) + let cancel_closure: CancelClosure = + serde_json::from_str(&cancel_state_str).map_err(|e| { + tracing::warn!("failed to deserialize cancel state: {e}"); + CancelError::InternalError + })?; + + Ok(Some(cancel_closure)) } + /// Try to cancel a running query for the corresponding connection. /// If the cancellation key is not found, it will be published to Redis. /// check_allowed - if true, check if the IP is allowed to cancel the query. /// Will fetch IP allowlist internally. /// /// return Result primarily for tests + /// + /// This is not cancel safe pub(crate) async fn cancel_session( &self, key: CancelKeyData, @@ -468,10 +362,10 @@ impl CancellationHandler { /// This should've been a [`std::future::Future`], but /// it's impossible to name a type of an unboxed future /// (we'd need something like `#![feature(type_alias_impl_trait)]`). -#[derive(Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize)] pub struct CancelClosure { socket_addr: SocketAddr, - cancel_token: CancelToken, + cancel_token: RawCancelToken, hostname: String, // for pg_sni router user_info: ComputeUserInfo, } @@ -479,7 +373,7 @@ pub struct CancelClosure { impl CancelClosure { pub(crate) fn new( socket_addr: SocketAddr, - cancel_token: CancelToken, + cancel_token: RawCancelToken, hostname: String, user_info: ComputeUserInfo, ) -> Self { @@ -492,15 +386,13 @@ impl CancelClosure { } /// Cancels the query running on user's compute node. pub(crate) async fn try_cancel_query( - self, + &self, compute_config: &ComputeConfig, ) -> Result<(), CancelError> { let socket = TcpStream::connect(self.socket_addr).await?; - let mut mk_tls = - crate::tls::postgres_rustls::MakeRustlsConnect::new(compute_config.tls.clone()); - let tls = >::make_tls_connect( - &mut mk_tls, + let tls = <_ as MakeTlsConnect>::make_tls_connect( + compute_config, &self.hostname, ) .map_err(|e| CancelError::IO(std::io::Error::other(e.to_string())))?; @@ -515,7 +407,6 @@ impl CancelClosure { pub(crate) struct Session { /// The user-facing key identifying this session. key: CancelKeyData, - redis_key: String, cancellation_handler: Arc, } @@ -524,60 +415,75 @@ impl Session { &self.key } - // Send the store key op to the cancellation handler and set TTL for the key - pub(crate) fn write_cancel_key( + /// Ensure the cancel key is continously refreshed, + /// but stop when the channel is dropped. + /// + /// This is not cancel safe + pub(crate) async fn maintain_cancel_key( &self, - cancel_closure: CancelClosure, - ) -> Result<(), CancelError> { - let Some(tx) = &self.cancellation_handler.tx else { + session_id: uuid::Uuid, + cancel: tokio::sync::oneshot::Receiver, + cancel_closure: &CancelClosure, + compute_config: &ComputeConfig, + ) { + let Some(tx) = self.cancellation_handler.tx.get() else { tracing::warn!("cancellation handler is not available"); - return Err(CancelError::InternalError); + // don't exit, as we only want to exit if cancelled externally. + std::future::pending().await }; - let closure_json = serde_json::to_string(&cancel_closure).map_err(|e| { - tracing::warn!("failed to serialize cancel closure: {e}"); - CancelError::InternalError - })?; + let closure_json = serde_json::to_string(&cancel_closure) + .expect("serialising to json string should not fail") + .into_boxed_str(); - let op = CancelKeyOp::StoreCancelKey { - key: self.redis_key.clone(), - field: "data".to_string(), - value: closure_json, - resp_tx: None, - _guard: Metrics::get() + let mut cancel = pin!(cancel); + + loop { + let guard = Metrics::get() .proxy .cancel_channel_size - .guard(RedisMsgKind::HSet), - expire: CANCEL_KEY_TTL, - }; + .guard(RedisMsgKind::HSet); + let op = CancelKeyOp::StoreCancelKey { + key: self.key, + value: closure_json.clone(), + expire: CANCEL_KEY_TTL, + }; - let _ = tx.try_send(op).map_err(|e| { - let key = self.key; - tracing::warn!("failed to send StoreCancelKey for {key}: {e}"); - }); - Ok(()) - } + tracing::debug!( + src=%self.key, + dest=?cancel_closure.cancel_token, + "registering cancellation key" + ); - pub(crate) fn remove_cancel_key(&self) -> Result<(), CancelError> { - let Some(tx) = &self.cancellation_handler.tx else { - tracing::warn!("cancellation handler is not available"); - return Err(CancelError::InternalError); - }; + match tx.call((guard, op), cancel.as_mut()).await { + Ok(Ok(_)) => { + tracing::debug!( + src=%self.key, + dest=?cancel_closure.cancel_token, + "registered cancellation key" + ); - let op = CancelKeyOp::RemoveCancelKey { - key: self.redis_key.clone(), - field: "data".to_string(), - resp_tx: None, - _guard: Metrics::get() - .proxy - .cancel_channel_size - .guard(RedisMsgKind::HDel), - }; + // wait before continuing. + tokio::time::sleep(CANCEL_KEY_REFRESH).await; + } + // retry immediately. + Ok(Err(error)) => { + tracing::warn!(?error, "error registering cancellation key"); + } + Err(Err(_cancelled)) => break, + } + } - let _ = tx.try_send(op).map_err(|e| { - let key = self.key; - tracing::warn!("failed to send RemoveCancelKey for {key}: {e}"); - }); - Ok(()) + if let Err(err) = cancel_closure + .try_cancel_query(compute_config) + .boxed() + .await + { + tracing::warn!( + ?session_id, + ?err, + "could not cancel the query in the database" + ); + } } } diff --git a/proxy/src/compute.rs b/proxy/src/compute/mod.rs similarity index 55% rename from proxy/src/compute.rs rename to proxy/src/compute/mod.rs index 2899f25129..7b9183b05e 100644 --- a/proxy/src/compute.rs +++ b/proxy/src/compute/mod.rs @@ -1,21 +1,24 @@ +mod tls; + use std::fmt::Debug; use std::io; -use std::net::SocketAddr; -use std::time::Duration; +use std::net::{IpAddr, SocketAddr}; use futures::{FutureExt, TryFutureExt}; use itertools::Itertools; +use postgres_client::config::{AuthKeys, ChannelBinding, SslMode}; +use postgres_client::maybe_tls_stream::MaybeTlsStream; use postgres_client::tls::MakeTlsConnect; -use postgres_client::{CancelToken, RawConnection}; +use postgres_client::{NoTls, RawCancelToken, RawConnection}; use postgres_protocol::message::backend::NoticeResponseBody; -use rustls::pki_types::InvalidDnsNameError; use thiserror::Error; use tokio::net::{TcpStream, lookup_host}; use tracing::{debug, error, info, warn}; -use crate::auth::backend::ComputeUserInfo; +use crate::auth::backend::{ComputeCredentialKeys, ComputeUserInfo}; use crate::auth::parse_endpoint_param; use crate::cancellation::CancelClosure; +use crate::compute::tls::TlsError; use crate::config::ComputeConfig; use crate::context::RequestContext; use crate::control_plane::client::ApiLockError; @@ -25,23 +28,58 @@ use crate::error::{ReportableError, UserFacingError}; use crate::metrics::{Metrics, NumDbConnectionsGuard}; use crate::pqproto::StartupMessageParams; use crate::proxy::neon_option; -use crate::tls::postgres_rustls::MakeRustlsConnect; use crate::types::Host; pub const COULD_NOT_CONNECT: &str = "Couldn't connect to compute node"; #[derive(Debug, Error)] -pub(crate) enum ConnectionError { +pub(crate) enum PostgresError { /// This error doesn't seem to reveal any secrets; for instance, /// `postgres_client::error::Kind` doesn't contain ip addresses and such. #[error("{COULD_NOT_CONNECT}: {0}")] Postgres(#[from] postgres_client::Error), +} - #[error("{COULD_NOT_CONNECT}: {0}")] - CouldNotConnect(#[from] io::Error), +impl UserFacingError for PostgresError { + fn to_string_client(&self) -> String { + match self { + // This helps us drop irrelevant library-specific prefixes. + // TODO: propagate severity level and other parameters. + PostgresError::Postgres(err) => match err.as_db_error() { + Some(err) => { + let msg = err.message(); + if msg.starts_with("unsupported startup parameter: ") + || msg.starts_with("unsupported startup parameter in options: ") + { + format!( + "{msg}. Please use unpooled connection or remove this parameter from the startup package. More details: https://neon.tech/docs/connect/connection-errors#unsupported-startup-parameter" + ) + } else { + msg.to_owned() + } + } + None => err.to_string(), + }, + } + } +} + +impl ReportableError for PostgresError { + fn get_error_kind(&self) -> crate::error::ErrorKind { + match self { + PostgresError::Postgres(e) if e.as_db_error().is_some() => { + crate::error::ErrorKind::Postgres + } + PostgresError::Postgres(_) => crate::error::ErrorKind::Compute, + } + } +} + +#[derive(Debug, Error)] +pub(crate) enum ConnectionError { #[error("{COULD_NOT_CONNECT}: {0}")] - TlsError(#[from] InvalidDnsNameError), + TlsError(#[from] TlsError), #[error("{COULD_NOT_CONNECT}: {0}")] WakeComputeError(#[from] WakeComputeError), @@ -53,27 +91,11 @@ pub(crate) enum ConnectionError { impl UserFacingError for ConnectionError { fn to_string_client(&self) -> String { match self { - // This helps us drop irrelevant library-specific prefixes. - // TODO: propagate severity level and other parameters. - ConnectionError::Postgres(err) => match err.as_db_error() { - Some(err) => { - let msg = err.message(); - - if msg.starts_with("unsupported startup parameter: ") - || msg.starts_with("unsupported startup parameter in options: ") - { - format!("{msg}. Please use unpooled connection or remove this parameter from the startup package. More details: https://neon.tech/docs/connect/connection-errors#unsupported-startup-parameter") - } else { - msg.to_owned() - } - } - None => err.to_string(), - }, ConnectionError::WakeComputeError(err) => err.to_string_client(), ConnectionError::TooManyConnectionAttempts(_) => { "Failed to acquire permit to connect to the database. Too many database connection attempts are currently ongoing.".to_owned() } - _ => COULD_NOT_CONNECT.to_owned(), + ConnectionError::TlsError(_) => COULD_NOT_CONNECT.to_owned(), } } } @@ -81,11 +103,6 @@ impl UserFacingError for ConnectionError { impl ReportableError for ConnectionError { fn get_error_kind(&self) -> crate::error::ErrorKind { match self { - ConnectionError::Postgres(e) if e.as_db_error().is_some() => { - crate::error::ErrorKind::Postgres - } - ConnectionError::Postgres(_) => crate::error::ErrorKind::Compute, - ConnectionError::CouldNotConnect(_) => crate::error::ErrorKind::Compute, ConnectionError::TlsError(_) => crate::error::ErrorKind::Compute, ConnectionError::WakeComputeError(e) => e.get_error_kind(), ConnectionError::TooManyConnectionAttempts(e) => e.get_error_kind(), @@ -96,34 +113,91 @@ impl ReportableError for ConnectionError { /// A pair of `ClientKey` & `ServerKey` for `SCRAM-SHA-256`. pub(crate) type ScramKeys = postgres_client::config::ScramKeys<32>; -/// A config for establishing a connection to compute node. -/// Eventually, `postgres_client` will be replaced with something better. -/// Newtype allows us to implement methods on top of it. #[derive(Clone)] -pub(crate) struct ConnCfg(Box); +pub enum Auth { + /// Only used during console-redirect. + Password(Vec), + /// Used by sql-over-http, ws, tcp. + Scram(Box), +} + +/// A config for authenticating to the compute node. +pub(crate) struct AuthInfo { + /// None for local-proxy, as we use trust-based localhost auth. + /// Some for sql-over-http, ws, tcp, and in most cases for console-redirect. + /// Might be None for console-redirect, but that's only a consequence of testing environments ATM. + auth: Option, + server_params: StartupMessageParams, + + channel_binding: ChannelBinding, + + /// Console redirect sets user and database, we shouldn't re-use those from the params. + skip_db_user: bool, +} + +/// Contains only the data needed to establish a secure connection to compute. +#[derive(Clone)] +pub struct ConnectInfo { + pub host_addr: Option, + pub host: Host, + pub port: u16, + pub ssl_mode: SslMode, +} /// Creation and initialization routines. -impl ConnCfg { - pub(crate) fn new(host: String, port: u16) -> Self { - Self(Box::new(postgres_client::Config::new(host, port))) - } - - /// Reuse password or auth keys from the other config. - pub(crate) fn reuse_password(&mut self, other: Self) { - if let Some(password) = other.get_password() { - self.password(password); - } - - if let Some(keys) = other.get_auth_keys() { - self.auth_keys(keys); +impl AuthInfo { + pub(crate) fn for_console_redirect(db: &str, user: &str, pw: Option<&str>) -> Self { + let mut server_params = StartupMessageParams::default(); + server_params.insert("database", db); + server_params.insert("user", user); + Self { + auth: pw.map(|pw| Auth::Password(pw.as_bytes().to_owned())), + server_params, + skip_db_user: true, + // pg-sni-router is a mitm so this would fail. + channel_binding: ChannelBinding::Disable, } } - pub(crate) fn get_host(&self) -> Host { - match self.0.get_host() { - postgres_client::config::Host::Tcp(s) => s.into(), + pub(crate) fn with_auth_keys(keys: ComputeCredentialKeys) -> Self { + Self { + auth: match keys { + ComputeCredentialKeys::AuthKeys(AuthKeys::ScramSha256(auth_keys)) => { + Some(Auth::Scram(Box::new(auth_keys))) + } + ComputeCredentialKeys::JwtPayload(_) => None, + }, + server_params: StartupMessageParams::default(), + skip_db_user: false, + channel_binding: ChannelBinding::Prefer, } } +} + +impl ConnectInfo { + pub fn to_postgres_client_config(&self) -> postgres_client::Config { + let mut config = postgres_client::Config::new(self.host.to_string(), self.port); + config.ssl_mode(self.ssl_mode); + if let Some(host_addr) = self.host_addr { + config.set_host_addr(host_addr); + } + config + } +} + +impl AuthInfo { + fn enrich(&self, mut config: postgres_client::Config) -> postgres_client::Config { + match &self.auth { + Some(Auth::Scram(keys)) => config.auth_keys(AuthKeys::ScramSha256(**keys)), + Some(Auth::Password(pw)) => config.password(pw), + None => &mut config, + }; + config.channel_binding(self.channel_binding); + for (k, v) in self.server_params.iter() { + config.set_param(k, v); + } + config + } /// Apply startup message params to the connection config. pub(crate) fn set_startup_params( @@ -132,53 +206,90 @@ impl ConnCfg { arbitrary_params: bool, ) { if !arbitrary_params { - self.set_param("client_encoding", "UTF8"); + self.server_params.insert("client_encoding", "UTF8"); } for (k, v) in params.iter() { match k { // Only set `user` if it's not present in the config. // Console redirect auth flow takes username from the console's response. - "user" if self.user_is_set() => {} - "database" if self.db_is_set() => {} + "user" | "database" if self.skip_db_user => {} "options" => { if let Some(options) = filtered_options(v) { - self.set_param(k, &options); + self.server_params.insert(k, &options); } } "user" | "database" | "application_name" | "replication" => { - self.set_param(k, v); + self.server_params.insert(k, v); } // if we allow arbitrary params, then we forward them through. // this is a flag for a period of backwards compatibility k if arbitrary_params => { - self.set_param(k, v); + self.server_params.insert(k, v); } _ => {} } } } -} -impl std::ops::Deref for ConnCfg { - type Target = postgres_client::Config; + pub async fn authenticate( + &self, + ctx: &RequestContext, + compute: &mut ComputeConnection, + user_info: &ComputeUserInfo, + ) -> Result { + // client config with stubbed connect info. + // TODO(conrad): should we rewrite this to bypass tokio-postgres2 entirely, + // utilising pqproto.rs. + let mut tmp_config = postgres_client::Config::new(String::new(), 0); + // We have already established SSL if necessary. + tmp_config.ssl_mode(SslMode::Disable); + let tmp_config = self.enrich(tmp_config); - fn deref(&self) -> &Self::Target { - &self.0 + let pause = ctx.latency_timer_pause(crate::metrics::Waiting::Compute); + let connection = tmp_config + .tls_and_authenticate(&mut compute.stream, NoTls) + .await?; + drop(pause); + + let RawConnection { + stream: _, + parameters, + delayed_notice, + process_id, + secret_key, + } = connection; + + tracing::Span::current().record("pid", tracing::field::display(process_id)); + + // NB: CancelToken is supposed to hold socket_addr, but we use connect_raw. + // Yet another reason to rework the connection establishing code. + let cancel_closure = CancelClosure::new( + compute.socket_addr, + RawCancelToken { + ssl_mode: compute.ssl_mode, + process_id, + secret_key, + }, + compute.hostname.to_string(), + user_info.clone(), + ); + + Ok(PostgresSettings { + params: parameters, + cancel_closure, + delayed_notice, + }) } } -/// For now, let's make it easier to setup the config. -impl std::ops::DerefMut for ConnCfg { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.0 - } -} - -impl ConnCfg { - /// Establish a raw TCP connection to the compute node. - async fn connect_raw(&self, timeout: Duration) -> io::Result<(SocketAddr, TcpStream, &str)> { - use postgres_client::config::Host; +impl ConnectInfo { + /// Establish a raw TCP+TLS connection to the compute node. + async fn connect_raw( + &self, + config: &ComputeConfig, + ) -> Result<(SocketAddr, MaybeTlsStream), TlsError> { + let timeout = config.timeout; // wrap TcpStream::connect with timeout let connect_with_timeout = |addrs| { @@ -208,112 +319,84 @@ impl ConnCfg { // We can't reuse connection establishing logic from `postgres_client` here, // because it has no means for extracting the underlying socket which we // require for our business. - let port = self.0.get_port(); - let host = self.0.get_host(); + let port = self.port; + let host = &*self.host; - let host = match host { - Host::Tcp(host) => host.as_str(), - }; - - let addrs = match self.0.get_host_addr() { + let addrs = match self.host_addr { Some(addr) => vec![SocketAddr::new(addr, port)], None => lookup_host((host, port)).await?.collect(), }; match connect_once(&*addrs).await { - Ok((sockaddr, stream)) => Ok((sockaddr, stream, host)), + Ok((sockaddr, stream)) => Ok(( + sockaddr, + tls::connect_tls(stream, self.ssl_mode, config, host).await?, + )), Err(err) => { warn!("couldn't connect to compute node at {host}:{port}: {err}"); - Err(err) + Err(TlsError::Connection(err)) } } } } -type RustlsStream = >::Stream; +pub type RustlsStream = >::Stream; +pub type MaybeRustlsStream = MaybeTlsStream; -pub(crate) struct PostgresConnection { - /// Socket connected to a compute node. - pub(crate) stream: - postgres_client::maybe_tls_stream::MaybeTlsStream, +// TODO(conrad): we don't need to parse these. +// These are just immediately forwarded back to the client. +// We could instead stream them out instead of reading them into memory. +pub struct PostgresSettings { /// PostgreSQL connection parameters. - pub(crate) params: std::collections::HashMap, + pub params: std::collections::HashMap, /// Query cancellation token. - pub(crate) cancel_closure: CancelClosure, - /// Labels for proxy's metrics. - pub(crate) aux: MetricsAuxInfo, + pub cancel_closure: CancelClosure, /// Notices received from compute after authenticating - pub(crate) delayed_notice: Vec, - - _guage: NumDbConnectionsGuard<'static>, + pub delayed_notice: Vec, } -impl ConnCfg { +pub struct ComputeConnection { + /// Socket connected to a compute node. + pub stream: MaybeTlsStream, + /// Labels for proxy's metrics. + pub aux: MetricsAuxInfo, + pub hostname: Host, + pub ssl_mode: SslMode, + pub socket_addr: SocketAddr, + pub guage: NumDbConnectionsGuard<'static>, +} + +impl ConnectInfo { /// Connect to a corresponding compute node. - pub(crate) async fn connect( + pub async fn connect( &self, ctx: &RequestContext, - aux: MetricsAuxInfo, + aux: &MetricsAuxInfo, config: &ComputeConfig, - user_info: ComputeUserInfo, - ) -> Result { + ) -> Result { let pause = ctx.latency_timer_pause(crate::metrics::Waiting::Compute); - let (socket_addr, stream, host) = self.connect_raw(config.timeout).await?; + let (socket_addr, stream) = self.connect_raw(config).await?; drop(pause); - let mut mk_tls = crate::tls::postgres_rustls::MakeRustlsConnect::new(config.tls.clone()); - let tls = >::make_tls_connect( - &mut mk_tls, - host, - )?; - - // connect_raw() will not use TLS if sslmode is "disable" - let pause = ctx.latency_timer_pause(crate::metrics::Waiting::Compute); - let connection = self.0.connect_raw(stream, tls).await?; - drop(pause); - - let RawConnection { - stream, - parameters, - delayed_notice, - process_id, - secret_key, - } = connection; - - tracing::Span::current().record("pid", tracing::field::display(process_id)); tracing::Span::current().record("compute_id", tracing::field::display(&aux.compute_id)); - let stream = stream.into_inner(); // TODO: lots of useful info but maybe we can move it elsewhere (eg traces?) info!( cold_start_info = ctx.cold_start_info().as_str(), - "connected to compute node at {host} ({socket_addr}) sslmode={:?}, latency={}, query_id={}", - self.0.get_ssl_mode(), + "connected to compute node at {} ({socket_addr}) sslmode={:?}, latency={}, query_id={}", + self.host, + self.ssl_mode, ctx.get_proxy_latency(), ctx.get_testodrome_id().unwrap_or_default(), ); - // NB: CancelToken is supposed to hold socket_addr, but we use connect_raw. - // Yet another reason to rework the connection establishing code. - let cancel_closure = CancelClosure::new( - socket_addr, - CancelToken { - socket_config: None, - ssl_mode: self.0.get_ssl_mode(), - process_id, - secret_key, - }, - host.to_string(), - user_info, - ); - - let connection = PostgresConnection { + let connection = ComputeConnection { stream, - params: parameters, - delayed_notice, - cancel_closure, - aux, - _guage: Metrics::get().proxy.db_connections.guard(ctx.protocol()), + socket_addr, + hostname: self.host.clone(), + ssl_mode: self.ssl_mode, + aux: aux.clone(), + guage: Metrics::get().proxy.db_connections.guard(ctx.protocol()), }; Ok(connection) diff --git a/proxy/src/compute/tls.rs b/proxy/src/compute/tls.rs new file mode 100644 index 0000000000..000d75fca5 --- /dev/null +++ b/proxy/src/compute/tls.rs @@ -0,0 +1,63 @@ +use futures::FutureExt; +use postgres_client::config::SslMode; +use postgres_client::maybe_tls_stream::MaybeTlsStream; +use postgres_client::tls::{MakeTlsConnect, TlsConnect}; +use rustls::pki_types::InvalidDnsNameError; +use thiserror::Error; +use tokio::io::{AsyncRead, AsyncWrite}; + +use crate::pqproto::request_tls; +use crate::proxy::retry::CouldRetry; + +#[derive(Debug, Error)] +pub enum TlsError { + #[error(transparent)] + Dns(#[from] InvalidDnsNameError), + #[error(transparent)] + Connection(#[from] std::io::Error), + #[error("TLS required but not provided")] + Required, +} + +impl CouldRetry for TlsError { + fn could_retry(&self) -> bool { + match self { + TlsError::Dns(_) => false, + TlsError::Connection(err) => err.could_retry(), + // perhaps compute didn't realise it supports TLS? + TlsError::Required => true, + } + } +} + +pub async fn connect_tls( + mut stream: S, + mode: SslMode, + tls: &T, + host: &str, +) -> Result, TlsError> +where + S: AsyncRead + AsyncWrite + Unpin + Send, + T: MakeTlsConnect< + S, + Error = InvalidDnsNameError, + TlsConnect: TlsConnect, + >, +{ + match mode { + SslMode::Disable => return Ok(MaybeTlsStream::Raw(stream)), + SslMode::Prefer | SslMode::Require => {} + } + + if !request_tls(&mut stream).await? { + if SslMode::Require == mode { + return Err(TlsError::Required); + } + + return Ok(MaybeTlsStream::Raw(stream)); + } + + Ok(MaybeTlsStream::Tls( + tls.make_tls_connect(host)?.connect(stream).boxed().await?, + )) +} diff --git a/proxy/src/config.rs b/proxy/src/config.rs index 248584a19a..f97006e206 100644 --- a/proxy/src/config.rs +++ b/proxy/src/config.rs @@ -4,17 +4,26 @@ use std::time::Duration; use anyhow::{Context, Ok, bail, ensure}; use arc_swap::ArcSwapOption; +use camino::{Utf8Path, Utf8PathBuf}; use clap::ValueEnum; +use compute_api::spec::LocalProxySpec; use remote_storage::RemoteStorageConfig; +use thiserror::Error; +use tokio::sync::Notify; +use tracing::{debug, error, info, warn}; use crate::auth::backend::jwt::JwkCache; +use crate::auth::backend::local::JWKS_ROLE_MAP; use crate::control_plane::locks::ApiLocks; +use crate::control_plane::messages::{EndpointJwksResponse, JwksSettings}; +use crate::ext::TaskExt; +use crate::intern::RoleNameInt; use crate::rate_limiter::{RateBucketInfo, RateLimitAlgorithm, RateLimiterConfig}; use crate::scram::threadpool::ThreadPool; use crate::serverless::GlobalConnPoolOptions; use crate::serverless::cancel_set::CancelSet; pub use crate::tls::server_config::{TlsConfig, configure_tls}; -use crate::types::Host; +use crate::types::{Host, RoleName}; pub struct ProxyConfig { pub tls_config: ArcSwapOption, @@ -22,11 +31,12 @@ pub struct ProxyConfig { pub http_config: HttpConfig, pub authentication_config: AuthenticationConfig, pub proxy_protocol_v2: ProxyProtocolV2, - pub region: String, pub handshake_timeout: Duration, pub wake_compute_retry_config: RetryConfig, pub connect_compute_locks: ApiLocks, pub connect_to_compute: ComputeConfig, + #[cfg(feature = "testing")] + pub disable_pg_session_jwt: bool, } pub struct ComputeConfig { @@ -410,6 +420,135 @@ impl FromStr for ConcurrencyLockOptions { } } +#[derive(Error, Debug)] +pub(crate) enum RefreshConfigError { + #[error(transparent)] + Read(#[from] std::io::Error), + #[error(transparent)] + Parse(#[from] serde_json::Error), + #[error(transparent)] + Validate(anyhow::Error), + #[error(transparent)] + Tls(anyhow::Error), +} + +pub(crate) async fn refresh_config_loop(config: &ProxyConfig, path: Utf8PathBuf, rx: Arc) { + let mut init = true; + loop { + rx.notified().await; + + match refresh_config_inner(config, &path).await { + std::result::Result::Ok(()) => {} + // don't log for file not found errors if this is the first time we are checking + // for computes that don't use local_proxy, this is not an error. + Err(RefreshConfigError::Read(e)) + if init && e.kind() == std::io::ErrorKind::NotFound => + { + debug!(error=?e, ?path, "could not read config file"); + } + Err(RefreshConfigError::Tls(e)) => { + error!(error=?e, ?path, "could not read TLS certificates"); + } + Err(e) => { + error!(error=?e, ?path, "could not read config file"); + } + } + + init = false; + } +} + +pub(crate) async fn refresh_config_inner( + config: &ProxyConfig, + path: &Utf8Path, +) -> Result<(), RefreshConfigError> { + let bytes = tokio::fs::read(&path).await?; + let data: LocalProxySpec = serde_json::from_slice(&bytes)?; + + let mut jwks_set = vec![]; + + fn parse_jwks_settings(jwks: compute_api::spec::JwksSettings) -> anyhow::Result { + let mut jwks_url = url::Url::from_str(&jwks.jwks_url).context("parsing JWKS url")?; + + ensure!( + jwks_url.has_authority() + && (jwks_url.scheme() == "http" || jwks_url.scheme() == "https"), + "Invalid JWKS url. Must be HTTP", + ); + + ensure!( + jwks_url.host().is_some_and(|h| h != url::Host::Domain("")), + "Invalid JWKS url. No domain listed", + ); + + // clear username, password and ports + jwks_url + .set_username("") + .expect("url can be a base and has a valid host and is not a file. should not error"); + jwks_url + .set_password(None) + .expect("url can be a base and has a valid host and is not a file. should not error"); + // local testing is hard if we need to have a specific restricted port + if cfg!(not(feature = "testing")) { + jwks_url.set_port(None).expect( + "url can be a base and has a valid host and is not a file. should not error", + ); + } + + // clear query params + jwks_url.set_fragment(None); + jwks_url.query_pairs_mut().clear().finish(); + + if jwks_url.scheme() != "https" { + // local testing is hard if we need to set up https support. + if cfg!(not(feature = "testing")) { + jwks_url + .set_scheme("https") + .expect("should not error to set the scheme to https if it was http"); + } else { + warn!(scheme = jwks_url.scheme(), "JWKS url is not HTTPS"); + } + } + + Ok(JwksSettings { + id: jwks.id, + jwks_url, + _provider_name: jwks.provider_name, + jwt_audience: jwks.jwt_audience, + role_names: jwks + .role_names + .into_iter() + .map(RoleName::from) + .map(|s| RoleNameInt::from(&s)) + .collect(), + }) + } + + for jwks in data.jwks.into_iter().flatten() { + jwks_set.push(parse_jwks_settings(jwks).map_err(RefreshConfigError::Validate)?); + } + + info!("successfully loaded new config"); + JWKS_ROLE_MAP.store(Some(Arc::new(EndpointJwksResponse { jwks: jwks_set }))); + + if let Some(tls_config) = data.tls { + let tls_config = tokio::task::spawn_blocking(move || { + crate::tls::server_config::configure_tls( + tls_config.key_path.as_ref(), + tls_config.cert_path.as_ref(), + None, + false, + ) + }) + .await + .propagate_task_panic() + .map_err(RefreshConfigError::Tls)?; + config.tls_config.store(Some(Arc::new(tls_config))); + } + + std::result::Result::Ok(()) +} + #[cfg(test)] mod tests { use super::*; diff --git a/proxy/src/console_redirect_proxy.rs b/proxy/src/console_redirect_proxy.rs index f2484b54b8..041a56e032 100644 --- a/proxy/src/console_redirect_proxy.rs +++ b/proxy/src/console_redirect_proxy.rs @@ -11,13 +11,13 @@ use crate::config::{ProxyConfig, ProxyProtocolV2}; use crate::context::RequestContext; use crate::error::ReportableError; use crate::metrics::{Metrics, NumClientConnectionsGuard}; -use crate::pglb::connect_compute::{TcpMechanism, connect_to_compute}; +use crate::pglb::ClientRequestError; use crate::pglb::handshake::{HandshakeData, handshake}; use crate::pglb::passthrough::ProxyPassthrough; use crate::protocol2::{ConnectHeader, ConnectionInfo, read_proxy_protocol}; -use crate::proxy::{ - ClientRequestError, ErrorSource, prepare_client_connection, run_until_cancelled, -}; +use crate::proxy::connect_compute::{TcpMechanism, connect_to_compute}; +use crate::proxy::{ErrorSource, finish_client_init}; +use crate::util::run_until_cancelled; pub async fn task_main( config: &'static ProxyConfig, @@ -90,12 +90,7 @@ pub async fn task_main( } } - let ctx = RequestContext::new( - session_id, - conn_info, - crate::metrics::Protocol::Tcp, - &config.region, - ); + let ctx = RequestContext::new(session_id, conn_info, crate::metrics::Protocol::Tcp); let res = handle_client( config, @@ -121,7 +116,7 @@ pub async fn task_main( Ok(Some(p)) => { ctx.set_success(); let _disconnect = ctx.log_connect(); - match p.proxy_pass(&config.connect_to_compute).await { + match p.proxy_pass().await { Ok(()) => {} Err(ErrorSource::Client(e)) => { error!( @@ -210,20 +205,18 @@ pub(crate) async fn handle_client( ctx.set_db_options(params.clone()); - let (node_info, user_info, _ip_allowlist) = match backend + let (node_info, mut auth_info, user_info) = match backend .authenticate(ctx, &config.authentication_config, &mut stream) .await { Ok(auth_result) => auth_result, Err(e) => Err(stream.throw_error(e, Some(ctx)).await)?, }; + auth_info.set_startup_params(¶ms, true); - let node = connect_to_compute( + let mut node = connect_to_compute( ctx, &TcpMechanism { - user_info, - params_compat: true, - params: ¶ms, locks: &config.connect_compute_locks, }, &node_info, @@ -233,22 +226,40 @@ pub(crate) async fn handle_client( .or_else(|e| async { Err(stream.throw_error(e, Some(ctx)).await) }) .await?; - let cancellation_handler_clone = Arc::clone(&cancellation_handler); - let session = cancellation_handler_clone.get_key(); + let pg_settings = auth_info + .authenticate(ctx, &mut node, &user_info) + .or_else(|e| async { Err(stream.throw_error(e, Some(ctx)).await) }) + .await?; - session.write_cancel_key(node.cancel_closure.clone())?; + let session = cancellation_handler.get_key(); - prepare_client_connection(&node, *session.key(), &mut stream); + finish_client_init(&pg_settings, *session.key(), &mut stream); let stream = stream.flush_and_into_inner().await?; + let session_id = ctx.session_id(); + let (cancel_on_shutdown, cancel) = tokio::sync::oneshot::channel(); + tokio::spawn(async move { + session + .maintain_cancel_key( + session_id, + cancel, + &pg_settings.cancel_closure, + &config.connect_to_compute, + ) + .await; + }); + Ok(Some(ProxyPassthrough { client: stream, - aux: node.aux.clone(), + compute: node.stream, + + aux: node.aux, private_link_id: None, - compute: node, - session_id: ctx.session_id(), - cancel: session, + + _cancel_on_shutdown: cancel_on_shutdown, + _req: request_gauge, _conn: conn_gauge, + _db_conn: node.guage, })) } diff --git a/proxy/src/context/mod.rs b/proxy/src/context/mod.rs index 24268997ba..7b0549e76f 100644 --- a/proxy/src/context/mod.rs +++ b/proxy/src/context/mod.rs @@ -46,7 +46,6 @@ struct RequestContextInner { pub(crate) session_id: Uuid, pub(crate) protocol: Protocol, first_packet: chrono::DateTime, - region: &'static str, pub(crate) span: Span, // filled in as they are discovered @@ -94,7 +93,6 @@ impl Clone for RequestContext { session_id: inner.session_id, protocol: inner.protocol, first_packet: inner.first_packet, - region: inner.region, span: info_span!("background_task"), project: inner.project, @@ -124,12 +122,7 @@ impl Clone for RequestContext { } impl RequestContext { - pub fn new( - session_id: Uuid, - conn_info: ConnectionInfo, - protocol: Protocol, - region: &'static str, - ) -> Self { + pub fn new(session_id: Uuid, conn_info: ConnectionInfo, protocol: Protocol) -> Self { // TODO: be careful with long lived spans let span = info_span!( "connect_request", @@ -145,7 +138,6 @@ impl RequestContext { session_id, protocol, first_packet: Utc::now(), - region, span, project: None, @@ -179,7 +171,7 @@ impl RequestContext { let ip = IpAddr::from([127, 0, 0, 1]); let addr = SocketAddr::new(ip, 5432); let conn_info = ConnectionInfo { addr, extra: None }; - RequestContext::new(Uuid::now_v7(), conn_info, Protocol::Tcp, "test") + RequestContext::new(Uuid::now_v7(), conn_info, Protocol::Tcp) } pub(crate) fn console_application_name(&self) -> String { @@ -217,11 +209,9 @@ impl RequestContext { if let Some(options_str) = options.get("options") { // If not found directly, try to extract it from the options string for option in options_str.split_whitespace() { - if option.starts_with("neon_query_id:") { - if let Some(value) = option.strip_prefix("neon_query_id:") { - this.set_testodrome_id(value.into()); - break; - } + if let Some(value) = option.strip_prefix("neon_query_id:") { + this.set_testodrome_id(value.into()); + break; } } } diff --git a/proxy/src/context/parquet.rs b/proxy/src/context/parquet.rs index c9d3905abd..b55cc14532 100644 --- a/proxy/src/context/parquet.rs +++ b/proxy/src/context/parquet.rs @@ -74,7 +74,7 @@ pub(crate) const FAILED_UPLOAD_MAX_RETRIES: u32 = 10; #[derive(parquet_derive::ParquetRecordWriter)] pub(crate) struct RequestData { - region: &'static str, + region: String, protocol: &'static str, /// Must be UTC. The derive macro doesn't like the timezones timestamp: chrono::NaiveDateTime, @@ -147,7 +147,7 @@ impl From<&RequestContextInner> for RequestData { }), jwt_issuer: value.jwt_issuer.clone(), protocol: value.protocol.as_str(), - region: value.region, + region: String::new(), error: value.error_kind.as_ref().map(|e| e.to_metric_label()), success: value.success, cold_start_info: value.cold_start_info.as_str(), @@ -167,6 +167,7 @@ impl From<&RequestContextInner> for RequestData { pub async fn worker( cancellation_token: CancellationToken, config: ParquetUploadArgs, + region: String, ) -> anyhow::Result<()> { let Some(remote_storage_config) = config.parquet_upload_remote_storage else { tracing::warn!("parquet request upload: no s3 bucket configured"); @@ -232,12 +233,17 @@ pub async fn worker( .context("remote storage for disconnect events init")?; let parquet_config_disconnect = parquet_config.clone(); tokio::try_join!( - worker_inner(storage, rx, parquet_config), - worker_inner(storage_disconnect, rx_disconnect, parquet_config_disconnect) + worker_inner(storage, rx, parquet_config, ®ion), + worker_inner( + storage_disconnect, + rx_disconnect, + parquet_config_disconnect, + ®ion + ) ) .map(|_| ()) } else { - worker_inner(storage, rx, parquet_config).await + worker_inner(storage, rx, parquet_config, ®ion).await } } @@ -257,6 +263,7 @@ async fn worker_inner( storage: GenericRemoteStorage, rx: impl Stream, config: ParquetConfig, + region: &str, ) -> anyhow::Result<()> { #[cfg(any(test, feature = "testing"))] let storage = if config.test_remote_failures > 0 { @@ -277,7 +284,8 @@ async fn worker_inner( let mut last_upload = time::Instant::now(); let mut len = 0; - while let Some(row) = rx.next().await { + while let Some(mut row) = rx.next().await { + region.clone_into(&mut row.region); rows.push(row); let force = last_upload.elapsed() > config.max_duration; if rows.len() == config.rows_per_group || force { @@ -533,7 +541,7 @@ mod tests { auth_method: None, jwt_issuer: None, protocol: ["tcp", "ws", "http"][rng.gen_range(0..3)], - region: "us-east-1", + region: String::new(), error: None, success: rng.r#gen(), cold_start_info: "no", @@ -565,7 +573,9 @@ mod tests { .await .unwrap(); - worker_inner(storage, rx, config).await.unwrap(); + worker_inner(storage, rx, config, "us-east-1") + .await + .unwrap(); let mut files = WalkDir::new(tmpdir.as_std_path()) .into_iter() diff --git a/proxy/src/control_plane/client/cplane_proxy_v1.rs b/proxy/src/control_plane/client/cplane_proxy_v1.rs index da548d6b2c..fbacc97661 100644 --- a/proxy/src/control_plane/client/cplane_proxy_v1.rs +++ b/proxy/src/control_plane/client/cplane_proxy_v1.rs @@ -146,6 +146,7 @@ impl NeonControlPlaneClient { public_access_blocked: block_public_connections, vpc_access_blocked: block_vpc_connections, }, + rate_limits: body.rate_limits, }) } .inspect_err(|e| tracing::debug!(error = ?e)) @@ -249,10 +250,8 @@ impl NeonControlPlaneClient { info!(duration = ?start.elapsed(), "received http response"); let body = parse_body::(response.status(), response.bytes().await?)?; - // Unfortunately, ownership won't let us use `Option::ok_or` here. - let (host, port) = match parse_host_port(&body.address) { - None => return Err(WakeComputeError::BadComputeAddress(body.address)), - Some(x) => x, + let Some((host, port)) = parse_host_port(&body.address) else { + return Err(WakeComputeError::BadComputeAddress(body.address)); }; let host_addr = IpAddr::from_str(host).ok(); @@ -261,24 +260,18 @@ impl NeonControlPlaneClient { Some(_) => SslMode::Require, None => SslMode::Disable, }; - let host_name = match body.server_name { - Some(host) => host, - None => host.to_owned(), + let host = match body.server_name { + Some(host) => host.into(), + None => host.into(), }; - // Don't set anything but host and port! This config will be cached. - // We'll set username and such later using the startup message. - // TODO: add more type safety (in progress). - let mut config = compute::ConnCfg::new(host_name, port); - - if let Some(addr) = host_addr { - config.set_host_addr(addr); - } - - config.ssl_mode(ssl_mode); - let node = NodeInfo { - config, + conn_info: compute::ConnectInfo { + host_addr, + host, + port, + ssl_mode, + }, aux: body.aux, }; @@ -318,6 +311,7 @@ impl super::ControlPlaneApi for NeonControlPlaneClient { allowed_ips: Arc::new(auth_info.allowed_ips), allowed_vpce: Arc::new(auth_info.allowed_vpc_endpoint_ids), flags: auth_info.access_blocker_flags, + rate_limits: auth_info.rate_limits, }; let role_control = RoleAccessControl { secret: auth_info.secret, @@ -363,6 +357,7 @@ impl super::ControlPlaneApi for NeonControlPlaneClient { allowed_ips: Arc::new(auth_info.allowed_ips), allowed_vpce: Arc::new(auth_info.allowed_vpc_endpoint_ids), flags: auth_info.access_blocker_flags, + rate_limits: auth_info.rate_limits, }; let role_control = RoleAccessControl { secret: auth_info.secret, diff --git a/proxy/src/control_plane/client/mock.rs b/proxy/src/control_plane/client/mock.rs index ece7153fce..b84dba6b09 100644 --- a/proxy/src/control_plane/client/mock.rs +++ b/proxy/src/control_plane/client/mock.rs @@ -6,6 +6,7 @@ use std::str::FromStr; use std::sync::Arc; use futures::TryFutureExt; +use postgres_client::config::SslMode; use thiserror::Error; use tokio_postgres::Client; use tracing::{Instrument, error, info, info_span, warn}; @@ -14,19 +15,20 @@ use crate::auth::IpPattern; use crate::auth::backend::ComputeUserInfo; use crate::auth::backend::jwt::AuthRule; use crate::cache::Cached; +use crate::compute::ConnectInfo; use crate::context::RequestContext; use crate::control_plane::errors::{ ControlPlaneError, GetAuthInfoError, GetEndpointJwksError, WakeComputeError, }; -use crate::control_plane::messages::MetricsAuxInfo; +use crate::control_plane::messages::{EndpointRateLimitConfig, MetricsAuxInfo}; use crate::control_plane::{ AccessBlockerFlags, AuthInfo, AuthSecret, CachedNodeInfo, EndpointAccessControl, NodeInfo, RoleAccessControl, }; use crate::intern::RoleNameInt; +use crate::scram; use crate::types::{BranchId, EndpointId, ProjectId, RoleName}; use crate::url::ApiUrl; -use crate::{compute, scram}; #[derive(Debug, Error)] enum MockApiError { @@ -87,8 +89,7 @@ impl MockControlPlane { .await? { info!("got a secret: {entry}"); // safe since it's not a prod scenario - let secret = scram::ServerSecret::parse(&entry).map(AuthSecret::Scram); - secret.or_else(|| parse_md5(&entry).map(AuthSecret::Md5)) + scram::ServerSecret::parse(&entry).map(AuthSecret::Scram) } else { warn!("user '{role}' does not exist"); None @@ -129,6 +130,7 @@ impl MockControlPlane { project_id: None, account_id: None, access_blocker_flags: AccessBlockerFlags::default(), + rate_limits: EndpointRateLimitConfig::default(), }) } @@ -170,25 +172,23 @@ impl MockControlPlane { async fn do_wake_compute(&self) -> Result { let port = self.endpoint.port().unwrap_or(5432); - let mut config = match self.endpoint.host_str() { - None => { - let mut config = compute::ConnCfg::new("localhost".to_string(), port); - config.set_host_addr(IpAddr::V4(Ipv4Addr::LOCALHOST)); - config - } - Some(host) => { - let mut config = compute::ConnCfg::new(host.to_string(), port); - if let Ok(addr) = IpAddr::from_str(host) { - config.set_host_addr(addr); - } - config - } + let conn_info = match self.endpoint.host_str() { + None => ConnectInfo { + host_addr: Some(IpAddr::V4(Ipv4Addr::LOCALHOST)), + host: "localhost".into(), + port, + ssl_mode: SslMode::Disable, + }, + Some(host) => ConnectInfo { + host_addr: IpAddr::from_str(host).ok(), + host: host.into(), + port, + ssl_mode: SslMode::Disable, + }, }; - config.ssl_mode(postgres_client::config::SslMode::Disable); - let node = NodeInfo { - config, + conn_info, aux: MetricsAuxInfo { endpoint_id: (&EndpointId::from("endpoint")).into(), project_id: (&ProjectId::from("project")).into(), @@ -234,6 +234,7 @@ impl super::ControlPlaneApi for MockControlPlane { allowed_ips: Arc::new(info.allowed_ips), allowed_vpce: Arc::new(info.allowed_vpc_endpoint_ids), flags: info.access_blocker_flags, + rate_limits: info.rate_limits, }) } @@ -266,12 +267,3 @@ impl super::ControlPlaneApi for MockControlPlane { self.do_wake_compute().map_ok(Cached::new_uncached).await } } - -fn parse_md5(input: &str) -> Option<[u8; 16]> { - let text = input.strip_prefix("md5")?; - - let mut bytes = [0u8; 16]; - hex::decode_to_slice(text, &mut bytes).ok()?; - - Some(bytes) -} diff --git a/proxy/src/control_plane/client/mod.rs b/proxy/src/control_plane/client/mod.rs index 9b9d1e25ea..2ffc589df6 100644 --- a/proxy/src/control_plane/client/mod.rs +++ b/proxy/src/control_plane/client/mod.rs @@ -10,6 +10,7 @@ use clashmap::ClashMap; use tokio::time::Instant; use tracing::{debug, info}; +use super::{EndpointAccessControl, RoleAccessControl}; use crate::auth::backend::ComputeUserInfo; use crate::auth::backend::jwt::{AuthRule, FetchAuthRules, FetchAuthRulesError}; use crate::cache::endpoints::EndpointsCache; @@ -22,8 +23,6 @@ use crate::metrics::ApiLockMetrics; use crate::rate_limiter::{DynamicLimiter, Outcome, RateLimiterConfig, Token}; use crate::types::EndpointId; -use super::{EndpointAccessControl, RoleAccessControl}; - #[non_exhaustive] #[derive(Clone)] pub enum ControlPlaneClient { @@ -214,7 +213,12 @@ impl ApiLocks { self.metrics .semaphore_acquire_seconds .observe(now.elapsed().as_secs_f64()); - debug!("acquired permit {:?}", now.elapsed().as_secs_f64()); + + if permit.is_ok() { + debug!(elapsed = ?now.elapsed(), "acquired permit"); + } else { + debug!(elapsed = ?now.elapsed(), "timed out acquiring permit"); + } Ok(WakeComputePermit { permit: permit? }) } diff --git a/proxy/src/control_plane/messages.rs b/proxy/src/control_plane/messages.rs index ec4554eab5..f0314f91f0 100644 --- a/proxy/src/control_plane/messages.rs +++ b/proxy/src/control_plane/messages.rs @@ -227,12 +227,35 @@ pub(crate) struct UserFacingMessage { #[derive(Deserialize)] pub(crate) struct GetEndpointAccessControl { pub(crate) role_secret: Box, - pub(crate) allowed_ips: Option>, - pub(crate) allowed_vpc_endpoint_ids: Option>, + pub(crate) project_id: Option, pub(crate) account_id: Option, + + pub(crate) allowed_ips: Option>, + pub(crate) allowed_vpc_endpoint_ids: Option>, pub(crate) block_public_connections: Option, pub(crate) block_vpc_connections: Option, + + #[serde(default)] + pub(crate) rate_limits: EndpointRateLimitConfig, +} + +#[derive(Copy, Clone, Deserialize, Default)] +pub struct EndpointRateLimitConfig { + pub connection_attempts: ConnectionAttemptsLimit, +} + +#[derive(Copy, Clone, Deserialize, Default)] +pub struct ConnectionAttemptsLimit { + pub tcp: Option, + pub ws: Option, + pub http: Option, +} + +#[derive(Copy, Clone, Deserialize)] +pub struct LeakyBucketSetting { + pub rps: f64, + pub burst: f64, } /// Response which holds compute node's `host:port` pair. diff --git a/proxy/src/control_plane/mod.rs b/proxy/src/control_plane/mod.rs index 7ff093d9dc..a8c59dad0c 100644 --- a/proxy/src/control_plane/mod.rs +++ b/proxy/src/control_plane/mod.rs @@ -11,15 +11,18 @@ pub(crate) mod errors; use std::sync::Arc; +use messages::EndpointRateLimitConfig; + +use crate::auth::backend::ComputeUserInfo; use crate::auth::backend::jwt::AuthRule; -use crate::auth::backend::{ComputeCredentialKeys, ComputeUserInfo}; use crate::auth::{AuthError, IpPattern, check_peer_addr_is_in_list}; use crate::cache::{Cached, TimedLru}; use crate::config::ComputeConfig; use crate::context::RequestContext; use crate::control_plane::messages::{ControlPlaneErrorMessage, MetricsAuxInfo}; -use crate::intern::{AccountIdInt, ProjectIdInt}; +use crate::intern::{AccountIdInt, EndpointIdInt, ProjectIdInt}; use crate::protocol2::ConnectionInfoExtra; +use crate::rate_limiter::{EndpointRateLimiter, LeakyBucketConfig}; use crate::types::{EndpointCacheKey, EndpointId, RoleName}; use crate::{compute, scram}; @@ -39,10 +42,6 @@ pub mod mgmt; /// Auth secret which is managed by the cloud. #[derive(Clone, Eq, PartialEq, Debug)] pub(crate) enum AuthSecret { - #[cfg(any(test, feature = "testing"))] - /// Md5 hash of user's password. - Md5([u8; 16]), - /// [SCRAM](crate::scram) authentication info. Scram(scram::ServerSecret), } @@ -60,16 +59,14 @@ pub(crate) struct AuthInfo { pub(crate) account_id: Option, /// Are public connections or VPC connections blocked? pub(crate) access_blocker_flags: AccessBlockerFlags, + /// The rate limits for this endpoint. + pub(crate) rate_limits: EndpointRateLimitConfig, } /// Info for establishing a connection to a compute node. -/// This is what we get after auth succeeded, but not before! #[derive(Clone)] pub(crate) struct NodeInfo { - /// Compute node connection params. - /// It's sad that we have to clone this, but this will improve - /// once we migrate to a bespoke connection logic. - pub(crate) config: compute::ConnCfg, + pub(crate) conn_info: compute::ConnectInfo, /// Labels for proxy's metrics. pub(crate) aux: MetricsAuxInfo, @@ -80,24 +77,8 @@ impl NodeInfo { &self, ctx: &RequestContext, config: &ComputeConfig, - user_info: ComputeUserInfo, - ) -> Result { - self.config - .connect(ctx, self.aux.clone(), config, user_info) - .await - } - - pub(crate) fn reuse_settings(&mut self, other: Self) { - self.config.reuse_password(other.config); - } - - pub(crate) fn set_keys(&mut self, keys: &ComputeCredentialKeys) { - match keys { - #[cfg(any(test, feature = "testing"))] - ComputeCredentialKeys::Password(password) => self.config.password(password), - ComputeCredentialKeys::AuthKeys(auth_keys) => self.config.auth_keys(*auth_keys), - ComputeCredentialKeys::JwtPayload(_) | ComputeCredentialKeys::None => &mut self.config, - }; + ) -> Result { + self.conn_info.connect(ctx, &self.aux, config).await } } @@ -121,6 +102,8 @@ pub struct EndpointAccessControl { pub allowed_ips: Arc>, pub allowed_vpce: Arc>, pub flags: AccessBlockerFlags, + + pub rate_limits: EndpointRateLimitConfig, } impl EndpointAccessControl { @@ -159,6 +142,36 @@ impl EndpointAccessControl { Ok(()) } + + pub fn connection_attempt_rate_limit( + &self, + ctx: &RequestContext, + endpoint: &EndpointId, + rate_limiter: &EndpointRateLimiter, + ) -> Result<(), AuthError> { + let endpoint = EndpointIdInt::from(endpoint); + + let limits = &self.rate_limits.connection_attempts; + let config = match ctx.protocol() { + crate::metrics::Protocol::Http => limits.http, + crate::metrics::Protocol::Ws => limits.ws, + crate::metrics::Protocol::Tcp => limits.tcp, + crate::metrics::Protocol::SniRouter => return Ok(()), + }; + let config = config.and_then(|config| { + if config.rps <= 0.0 || config.burst <= 0.0 { + return None; + } + + Some(LeakyBucketConfig::new(config.rps, config.burst)) + }); + + if !rate_limiter.check(endpoint, config, 1) { + return Err(AuthError::too_many_connections()); + } + + Ok(()) + } } /// This will allocate per each call, but the http requests alone diff --git a/proxy/src/error.rs b/proxy/src/error.rs index aa02b211d9..e880d63075 100644 --- a/proxy/src/error.rs +++ b/proxy/src/error.rs @@ -78,16 +78,6 @@ pub(crate) trait ReportableError: fmt::Display + Send + 'static { fn get_error_kind(&self) -> ErrorKind; } -impl ReportableError for postgres_client::error::Error { - fn get_error_kind(&self) -> ErrorKind { - if self.as_db_error().is_some() { - ErrorKind::Postgres - } else { - ErrorKind::Compute - } - } -} - /// Flattens `Result>` into `Result`. pub fn flatten_err(r: Result, JoinError>) -> anyhow::Result { r.context("join error").and_then(|x| x) diff --git a/proxy/src/lib.rs b/proxy/src/lib.rs index d65d056585..263d784e78 100644 --- a/proxy/src/lib.rs +++ b/proxy/src/lib.rs @@ -61,6 +61,10 @@ clippy::too_many_lines, clippy::unused_self )] +#![allow( + clippy::unsafe_derive_deserialize, + reason = "false positive: https://github.com/rust-lang/rust-clippy/issues/15120" +)] #![cfg_attr( any(test, feature = "testing"), allow( @@ -75,6 +79,7 @@ pub mod binary; mod auth; +mod batch; mod cache; mod cancellation; mod compute; @@ -106,4 +111,5 @@ mod tls; mod types; mod url; mod usage_metrics; +mod util; mod waiters; diff --git a/proxy/src/logging.rs b/proxy/src/logging.rs index a58b55a704..e608300bd2 100644 --- a/proxy/src/logging.rs +++ b/proxy/src/logging.rs @@ -52,7 +52,7 @@ pub async fn init() -> anyhow::Result { StderrWriter { stderr: std::io::stderr(), }, - &["request_id", "session_id", "conn_id"], + &["conn_id", "ep", "query_id", "request_id", "session_id"], )) } else { None @@ -271,18 +271,18 @@ where }); // In case logging fails we generate a simpler JSON object. - if let Err(err) = res { - if let Ok(mut line) = serde_json::to_vec(&serde_json::json!( { + if let Err(err) = res + && let Ok(mut line) = serde_json::to_vec(&serde_json::json!( { "timestamp": now.to_rfc3339_opts(chrono::SecondsFormat::Micros, true), "level": "ERROR", "message": format_args!("cannot log event: {err:?}"), "fields": { "event": format_args!("{event:?}"), }, - })) { - line.push(b'\n'); - self.writer.make_writer().write_all(&line).ok(); - } + })) + { + line.push(b'\n'); + self.writer.make_writer().write_all(&line).ok(); } } @@ -583,10 +583,11 @@ impl EventFormatter { THREAD_ID.with(|tid| serializer.serialize_entry("thread_id", tid))?; // TODO: tls cache? name could change - if let Some(thread_name) = std::thread::current().name() { - if !thread_name.is_empty() && thread_name != "tokio-runtime-worker" { - serializer.serialize_entry("thread_name", thread_name)?; - } + if let Some(thread_name) = std::thread::current().name() + && !thread_name.is_empty() + && thread_name != "tokio-runtime-worker" + { + serializer.serialize_entry("thread_name", thread_name)?; } if let Some(task_id) = tokio::task::try_id() { @@ -596,10 +597,10 @@ impl EventFormatter { serializer.serialize_entry("target", meta.target())?; // Skip adding module if it's the same as target. - if let Some(module) = meta.module_path() { - if module != meta.target() { - serializer.serialize_entry("module", module)?; - } + if let Some(module) = meta.module_path() + && module != meta.target() + { + serializer.serialize_entry("module", module)?; } if let Some(file) = meta.file() { diff --git a/proxy/src/metrics.rs b/proxy/src/metrics.rs index 4b22c912eb..7a21e4ecee 100644 --- a/proxy/src/metrics.rs +++ b/proxy/src/metrics.rs @@ -236,13 +236,6 @@ pub enum Bool { False, } -#[derive(FixedCardinalityLabel, Copy, Clone)] -#[label(singleton = "outcome")] -pub enum Outcome { - Success, - Failed, -} - #[derive(FixedCardinalityLabel, Copy, Clone)] #[label(singleton = "outcome")] pub enum CacheOutcome { @@ -610,11 +603,11 @@ pub enum RedisEventsCount { BranchCreated, ProjectCreated, CancelSession, - PasswordUpdate, - AllowedIpsUpdate, - AllowedVpcEndpointIdsUpdateForProjects, - AllowedVpcEndpointIdsUpdateForAllProjectsInOrg, - BlockPublicOrVpcAccessUpdate, + InvalidateRole, + InvalidateEndpoint, + InvalidateProject, + InvalidateProjects, + InvalidateOrg, } pub struct ThreadPoolWorkers(usize); diff --git a/proxy/src/pglb/copy_bidirectional.rs b/proxy/src/pglb/copy_bidirectional.rs index 97f8d7c6af..5e4262a323 100644 --- a/proxy/src/pglb/copy_bidirectional.rs +++ b/proxy/src/pglb/copy_bidirectional.rs @@ -90,27 +90,27 @@ where // TODO: 1 info log, with a enum label for close direction. // Early termination checks from compute to client. - if let TransferState::Done(_) = compute_to_client { - if let TransferState::Running(buf) = &client_to_compute { - info!("Compute is done, terminate client"); - // Initiate shutdown - client_to_compute = TransferState::ShuttingDown(buf.amt); - client_to_compute_result = - transfer_one_direction(cx, &mut client_to_compute, client, compute) - .map_err(ErrorSource::from_client)?; - } + if let TransferState::Done(_) = compute_to_client + && let TransferState::Running(buf) = &client_to_compute + { + info!("Compute is done, terminate client"); + // Initiate shutdown + client_to_compute = TransferState::ShuttingDown(buf.amt); + client_to_compute_result = + transfer_one_direction(cx, &mut client_to_compute, client, compute) + .map_err(ErrorSource::from_client)?; } // Early termination checks from client to compute. - if let TransferState::Done(_) = client_to_compute { - if let TransferState::Running(buf) = &compute_to_client { - info!("Client is done, terminate compute"); - // Initiate shutdown - compute_to_client = TransferState::ShuttingDown(buf.amt); - compute_to_client_result = - transfer_one_direction(cx, &mut compute_to_client, compute, client) - .map_err(ErrorSource::from_compute)?; - } + if let TransferState::Done(_) = client_to_compute + && let TransferState::Running(buf) = &compute_to_client + { + info!("Client is done, terminate compute"); + // Initiate shutdown + compute_to_client = TransferState::ShuttingDown(buf.amt); + compute_to_client_result = + transfer_one_direction(cx, &mut compute_to_client, compute, client) + .map_err(ErrorSource::from_compute)?; } // It is not a problem if ready! returns early ... (comment remains the same) diff --git a/proxy/src/pglb/handshake.rs b/proxy/src/pglb/handshake.rs index 6970ab8714..25a2d01b4a 100644 --- a/proxy/src/pglb/handshake.rs +++ b/proxy/src/pglb/handshake.rs @@ -8,10 +8,10 @@ use crate::config::TlsConfig; use crate::context::RequestContext; use crate::error::ReportableError; use crate::metrics::Metrics; +use crate::pglb::TlsRequired; use crate::pqproto::{ BeMessage, CancelKeyData, FeStartupPacket, ProtocolVersion, StartupMessageParams, }; -use crate::proxy::TlsRequired; use crate::stream::{PqStream, Stream, StreamUpgradeError}; use crate::tls::PG_ALPN_PROTOCOL; diff --git a/proxy/src/pglb/mod.rs b/proxy/src/pglb/mod.rs index 4b107142a7..c4cab155c5 100644 --- a/proxy/src/pglb/mod.rs +++ b/proxy/src/pglb/mod.rs @@ -1,5 +1,333 @@ -pub mod connect_compute; pub mod copy_bidirectional; pub mod handshake; pub mod inprocess; pub mod passthrough; + +use std::sync::Arc; + +use futures::FutureExt; +use smol_str::ToSmolStr; +use thiserror::Error; +use tokio::io::{AsyncRead, AsyncWrite}; +use tokio_util::sync::CancellationToken; +use tracing::{Instrument, debug, error, info, warn}; + +use crate::auth; +use crate::cancellation::{self, CancellationHandler}; +use crate::config::{ProxyConfig, ProxyProtocolV2, TlsConfig}; +use crate::context::RequestContext; +use crate::error::{ReportableError, UserFacingError}; +use crate::metrics::{Metrics, NumClientConnectionsGuard}; +pub use crate::pglb::copy_bidirectional::ErrorSource; +use crate::pglb::handshake::{HandshakeData, HandshakeError, handshake}; +use crate::pglb::passthrough::ProxyPassthrough; +use crate::protocol2::{ConnectHeader, ConnectionInfo, ConnectionInfoExtra, read_proxy_protocol}; +use crate::proxy::handle_client; +use crate::rate_limiter::EndpointRateLimiter; +use crate::stream::Stream; +use crate::util::run_until_cancelled; + +pub const ERR_INSECURE_CONNECTION: &str = "connection is insecure (try using `sslmode=require`)"; + +#[derive(Error, Debug)] +#[error("{ERR_INSECURE_CONNECTION}")] +pub struct TlsRequired; + +impl ReportableError for TlsRequired { + fn get_error_kind(&self) -> crate::error::ErrorKind { + crate::error::ErrorKind::User + } +} + +impl UserFacingError for TlsRequired {} + +pub async fn task_main( + config: &'static ProxyConfig, + auth_backend: &'static auth::Backend<'static, ()>, + listener: tokio::net::TcpListener, + cancellation_token: CancellationToken, + cancellation_handler: Arc, + endpoint_rate_limiter: Arc, +) -> anyhow::Result<()> { + scopeguard::defer! { + info!("proxy has shut down"); + } + + // When set for the server socket, the keepalive setting + // will be inherited by all accepted client sockets. + socket2::SockRef::from(&listener).set_keepalive(true)?; + + let connections = tokio_util::task::task_tracker::TaskTracker::new(); + let cancellations = tokio_util::task::task_tracker::TaskTracker::new(); + + while let Some(accept_result) = + run_until_cancelled(listener.accept(), &cancellation_token).await + { + let (socket, peer_addr) = accept_result?; + + let conn_gauge = Metrics::get() + .proxy + .client_connections + .guard(crate::metrics::Protocol::Tcp); + + let session_id = uuid::Uuid::new_v4(); + let cancellation_handler = Arc::clone(&cancellation_handler); + let cancellations = cancellations.clone(); + + debug!(protocol = "tcp", %session_id, "accepted new TCP connection"); + let endpoint_rate_limiter2 = endpoint_rate_limiter.clone(); + + connections.spawn(async move { + let (socket, conn_info) = match config.proxy_protocol_v2 { + ProxyProtocolV2::Required => { + match read_proxy_protocol(socket).await { + Err(e) => { + warn!("per-client task finished with an error: {e:#}"); + return; + } + // our load balancers will not send any more data. let's just exit immediately + Ok((_socket, ConnectHeader::Local)) => { + debug!("healthcheck received"); + return; + } + Ok((socket, ConnectHeader::Proxy(info))) => (socket, info), + } + } + // ignore the header - it cannot be confused for a postgres or http connection so will + // error later. + ProxyProtocolV2::Rejected => ( + socket, + ConnectionInfo { + addr: peer_addr, + extra: None, + }, + ), + }; + + match socket.set_nodelay(true) { + Ok(()) => {} + Err(e) => { + error!( + "per-client task finished with an error: failed to set socket option: {e:#}" + ); + return; + } + } + + let ctx = RequestContext::new(session_id, conn_info, crate::metrics::Protocol::Tcp); + + let res = handle_connection( + config, + auth_backend, + &ctx, + cancellation_handler, + socket, + ClientMode::Tcp, + endpoint_rate_limiter2, + conn_gauge, + cancellations, + ) + .instrument(ctx.span()) + .boxed() + .await; + + match res { + Err(e) => { + ctx.set_error_kind(e.get_error_kind()); + warn!(parent: &ctx.span(), "per-client task finished with an error: {e:#}"); + } + Ok(None) => { + ctx.set_success(); + } + Ok(Some(p)) => { + ctx.set_success(); + let _disconnect = ctx.log_connect(); + match p.proxy_pass().await { + Ok(()) => {} + Err(ErrorSource::Client(e)) => { + warn!( + ?session_id, + "per-client task finished with an IO error from the client: {e:#}" + ); + } + Err(ErrorSource::Compute(e)) => { + error!( + ?session_id, + "per-client task finished with an IO error from the compute: {e:#}" + ); + } + } + } + } + }); + } + + connections.close(); + cancellations.close(); + drop(listener); + + // Drain connections + connections.wait().await; + cancellations.wait().await; + + Ok(()) +} + +pub(crate) enum ClientMode { + Tcp, + Websockets { hostname: Option }, +} + +/// Abstracts the logic of handling TCP vs WS clients +impl ClientMode { + pub fn allow_cleartext(&self) -> bool { + match self { + ClientMode::Tcp => false, + ClientMode::Websockets { .. } => true, + } + } + + pub fn hostname<'a, S>(&'a self, s: &'a Stream) -> Option<&'a str> { + match self { + ClientMode::Tcp => s.sni_hostname(), + ClientMode::Websockets { hostname } => hostname.as_deref(), + } + } + + pub fn handshake_tls<'a>(&self, tls: Option<&'a TlsConfig>) -> Option<&'a TlsConfig> { + match self { + ClientMode::Tcp => tls, + // TLS is None here if using websockets, because the connection is already encrypted. + ClientMode::Websockets { .. } => None, + } + } +} + +#[derive(Debug, Error)] +// almost all errors should be reported to the user, but there's a few cases where we cannot +// 1. Cancellation: we are not allowed to tell the client any cancellation statuses for security reasons +// 2. Handshake: handshake reports errors if it can, otherwise if the handshake fails due to protocol violation, +// we cannot be sure the client even understands our error message +// 3. PrepareClient: The client disconnected, so we can't tell them anyway... +pub(crate) enum ClientRequestError { + #[error("{0}")] + Cancellation(#[from] cancellation::CancelError), + #[error("{0}")] + Handshake(#[from] HandshakeError), + #[error("{0}")] + HandshakeTimeout(#[from] tokio::time::error::Elapsed), + #[error("{0}")] + PrepareClient(#[from] std::io::Error), + #[error("{0}")] + ReportedError(#[from] crate::stream::ReportedError), +} + +impl ReportableError for ClientRequestError { + fn get_error_kind(&self) -> crate::error::ErrorKind { + match self { + ClientRequestError::Cancellation(e) => e.get_error_kind(), + ClientRequestError::Handshake(e) => e.get_error_kind(), + ClientRequestError::HandshakeTimeout(_) => crate::error::ErrorKind::RateLimit, + ClientRequestError::ReportedError(e) => e.get_error_kind(), + ClientRequestError::PrepareClient(_) => crate::error::ErrorKind::ClientDisconnect, + } + } +} + +#[allow(clippy::too_many_arguments)] +pub(crate) async fn handle_connection( + config: &'static ProxyConfig, + auth_backend: &'static auth::Backend<'static, ()>, + ctx: &RequestContext, + cancellation_handler: Arc, + client: S, + mode: ClientMode, + endpoint_rate_limiter: Arc, + conn_gauge: NumClientConnectionsGuard<'static>, + cancellations: tokio_util::task::task_tracker::TaskTracker, +) -> Result>, ClientRequestError> { + debug!( + protocol = %ctx.protocol(), + "handling interactive connection from client" + ); + + let metrics = &Metrics::get().proxy; + let proto = ctx.protocol(); + let request_gauge = metrics.connection_requests.guard(proto); + + let tls = config.tls_config.load(); + let tls = tls.as_deref(); + + let record_handshake_error = !ctx.has_private_peer_addr(); + let pause = ctx.latency_timer_pause(crate::metrics::Waiting::Client); + let do_handshake = handshake(ctx, client, mode.handshake_tls(tls), record_handshake_error); + + let (mut client, params) = match tokio::time::timeout(config.handshake_timeout, do_handshake) + .await?? + { + HandshakeData::Startup(client, params) => (client, params), + HandshakeData::Cancel(cancel_key_data) => { + // spawn a task to cancel the session, but don't wait for it + cancellations.spawn({ + let cancellation_handler_clone = Arc::clone(&cancellation_handler); + let ctx = ctx.clone(); + let cancel_span = tracing::span!(parent: None, tracing::Level::INFO, "cancel_session", session_id = ?ctx.session_id()); + cancel_span.follows_from(tracing::Span::current()); + async move { + cancellation_handler_clone + .cancel_session( + cancel_key_data, + ctx, + config.authentication_config.ip_allowlist_check_enabled, + config.authentication_config.is_vpc_acccess_proxy, + auth_backend.get_api(), + ) + .await + .inspect_err(|e | debug!(error = ?e, "cancel_session failed")).ok(); + }.instrument(cancel_span) + }); + + return Ok(None); + } + }; + drop(pause); + + ctx.set_db_options(params.clone()); + + let common_names = tls.map(|tls| &tls.common_names); + + let (node, cancel_on_shutdown) = handle_client( + config, + auth_backend, + ctx, + cancellation_handler, + &mut client, + &mode, + endpoint_rate_limiter, + common_names, + ¶ms, + ) + .await?; + + let client = client.flush_and_into_inner().await?; + + let private_link_id = match ctx.extra() { + Some(ConnectionInfoExtra::Aws { vpce_id }) => Some(vpce_id.clone()), + Some(ConnectionInfoExtra::Azure { link_id }) => Some(link_id.to_smolstr()), + None => None, + }; + + Ok(Some(ProxyPassthrough { + client, + compute: node.stream, + + aux: node.aux, + private_link_id, + + _cancel_on_shutdown: cancel_on_shutdown, + + _req: request_gauge, + _conn: conn_gauge, + _db_conn: node.guage, + })) +} diff --git a/proxy/src/pglb/passthrough.rs b/proxy/src/pglb/passthrough.rs index 6f651d383d..d4c029f6d9 100644 --- a/proxy/src/pglb/passthrough.rs +++ b/proxy/src/pglb/passthrough.rs @@ -1,15 +1,17 @@ -use futures::FutureExt; +use std::convert::Infallible; + use smol_str::SmolStr; use tokio::io::{AsyncRead, AsyncWrite}; use tracing::debug; use utils::measured_stream::MeasuredStream; use super::copy_bidirectional::ErrorSource; -use crate::cancellation; -use crate::compute::PostgresConnection; -use crate::config::ComputeConfig; +use crate::compute::MaybeRustlsStream; use crate::control_plane::messages::MetricsAuxInfo; -use crate::metrics::{Direction, Metrics, NumClientConnectionsGuard, NumConnectionRequestsGuard}; +use crate::metrics::{ + Direction, Metrics, NumClientConnectionsGuard, NumConnectionRequestsGuard, + NumDbConnectionsGuard, +}; use crate::stream::Stream; use crate::usage_metrics::{Ids, MetricCounterRecorder, USAGE_METRICS}; @@ -64,40 +66,20 @@ pub(crate) async fn proxy_pass( pub(crate) struct ProxyPassthrough { pub(crate) client: Stream, - pub(crate) compute: PostgresConnection, + pub(crate) compute: MaybeRustlsStream, + pub(crate) aux: MetricsAuxInfo, - pub(crate) session_id: uuid::Uuid, pub(crate) private_link_id: Option, - pub(crate) cancel: cancellation::Session, + + pub(crate) _cancel_on_shutdown: tokio::sync::oneshot::Sender, pub(crate) _req: NumConnectionRequestsGuard<'static>, pub(crate) _conn: NumClientConnectionsGuard<'static>, + pub(crate) _db_conn: NumDbConnectionsGuard<'static>, } impl ProxyPassthrough { - pub(crate) async fn proxy_pass( - self, - compute_config: &ComputeConfig, - ) -> Result<(), ErrorSource> { - let res = proxy_pass( - self.client, - self.compute.stream, - self.aux, - self.private_link_id, - ) - .await; - if let Err(err) = self - .compute - .cancel_closure - .try_cancel_query(compute_config) - .boxed() - .await - { - tracing::warn!(session_id = ?self.session_id, ?err, "could not cancel the query in the database"); - } - - drop(self.cancel.remove_cancel_key()); // we don't need a result. If the queue is full, we just log the error - - res + pub(crate) async fn proxy_pass(self) -> Result<(), ErrorSource> { + proxy_pass(self.client, self.compute, self.aux, self.private_link_id).await } } diff --git a/proxy/src/pqproto.rs b/proxy/src/pqproto.rs index 43074bf208..ad99eecda5 100644 --- a/proxy/src/pqproto.rs +++ b/proxy/src/pqproto.rs @@ -8,7 +8,7 @@ use std::io::{self, Cursor}; use bytes::{Buf, BufMut}; use itertools::Itertools; use rand::distributions::{Distribution, Standard}; -use tokio::io::{AsyncRead, AsyncReadExt}; +use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt}; use zerocopy::{FromBytes, Immutable, IntoBytes, big_endian}; pub type ErrorCode = [u8; 5]; @@ -53,6 +53,28 @@ impl fmt::Debug for ProtocolVersion { } } +/// +const MAX_STARTUP_PACKET_LENGTH: usize = 10000; +const RESERVED_INVALID_MAJOR_VERSION: u16 = 1234; +/// +const CANCEL_REQUEST_CODE: ProtocolVersion = ProtocolVersion::new(1234, 5678); +/// +const NEGOTIATE_SSL_CODE: ProtocolVersion = ProtocolVersion::new(1234, 5679); +/// +const NEGOTIATE_GSS_CODE: ProtocolVersion = ProtocolVersion::new(1234, 5680); + +/// This first reads the startup message header, is 8 bytes. +/// The first 4 bytes is a big-endian message length, and the next 4 bytes is a version number. +/// +/// The length value is inclusive of the header. For example, +/// an empty message will always have length 8. +#[derive(Clone, Copy, FromBytes, IntoBytes, Immutable)] +#[repr(C)] +struct StartupHeader { + len: big_endian::U32, + version: ProtocolVersion, +} + /// read the type from the stream using zerocopy. /// /// not cancel safe. @@ -66,32 +88,38 @@ macro_rules! read { }}; } +/// Returns true if TLS is supported. +/// +/// This is not cancel safe. +pub async fn request_tls(stream: &mut S) -> io::Result +where + S: AsyncRead + AsyncWrite + Unpin, +{ + let payload = StartupHeader { + len: 8.into(), + version: NEGOTIATE_SSL_CODE, + }; + stream.write_all(payload.as_bytes()).await?; + stream.flush().await?; + + // we expect back either `S` or `N` as a single byte. + let mut res = *b"0"; + stream.read_exact(&mut res).await?; + + debug_assert!( + res == *b"S" || res == *b"N", + "unexpected SSL negotiation response: {}", + char::from(res[0]), + ); + + // S for SSL. + Ok(res == *b"S") +} + pub async fn read_startup(stream: &mut S) -> io::Result where S: AsyncRead + Unpin, { - /// - const MAX_STARTUP_PACKET_LENGTH: usize = 10000; - const RESERVED_INVALID_MAJOR_VERSION: u16 = 1234; - /// - const CANCEL_REQUEST_CODE: ProtocolVersion = ProtocolVersion::new(1234, 5678); - /// - const NEGOTIATE_SSL_CODE: ProtocolVersion = ProtocolVersion::new(1234, 5679); - /// - const NEGOTIATE_GSS_CODE: ProtocolVersion = ProtocolVersion::new(1234, 5680); - - /// This first reads the startup message header, is 8 bytes. - /// The first 4 bytes is a big-endian message length, and the next 4 bytes is a version number. - /// - /// The length value is inclusive of the header. For example, - /// an empty message will always have length 8. - #[derive(Clone, Copy, FromBytes, IntoBytes, Immutable)] - #[repr(C)] - struct StartupHeader { - len: big_endian::U32, - version: ProtocolVersion, - } - let header = read!(stream => StartupHeader); // @@ -564,9 +592,8 @@ mod tests { use tokio::io::{AsyncWriteExt, duplex}; use zerocopy::IntoBytes; - use crate::pqproto::{FeStartupPacket, read_message, read_startup}; - use super::ProtocolVersion; + use crate::pqproto::{FeStartupPacket, read_message, read_startup}; #[tokio::test] async fn reject_large_startup() { diff --git a/proxy/src/pglb/connect_compute.rs b/proxy/src/proxy/connect_compute.rs similarity index 74% rename from proxy/src/pglb/connect_compute.rs rename to proxy/src/proxy/connect_compute.rs index 1d6ca5fbb3..9f642f52ab 100644 --- a/proxy/src/pglb/connect_compute.rs +++ b/proxy/src/proxy/connect_compute.rs @@ -2,26 +2,24 @@ use async_trait::async_trait; use tokio::time; use tracing::{debug, info, warn}; -use crate::auth::backend::{ComputeCredentialKeys, ComputeUserInfo}; -use crate::compute::{self, COULD_NOT_CONNECT, PostgresConnection}; +use crate::compute::{self, COULD_NOT_CONNECT, ComputeConnection}; use crate::config::{ComputeConfig, RetryConfig}; use crate::context::RequestContext; use crate::control_plane::errors::WakeComputeError; use crate::control_plane::locks::ApiLocks; -use crate::control_plane::{self, CachedNodeInfo, NodeInfo}; +use crate::control_plane::{self, NodeInfo}; use crate::error::ReportableError; use crate::metrics::{ ConnectOutcome, ConnectionFailureKind, Metrics, RetriesMetricGroup, RetryType, }; -use crate::pqproto::StartupMessageParams; use crate::proxy::retry::{CouldRetry, ShouldRetryWakeCompute, retry_after, should_retry}; -use crate::proxy::wake_compute::wake_compute; +use crate::proxy::wake_compute::{WakeComputeBackend, wake_compute}; use crate::types::Host; /// If we couldn't connect, a cached connection info might be to blame /// (e.g. the compute node's address might've changed at the wrong time). /// Invalidate the cache entry (if any) to prevent subsequent errors. -#[tracing::instrument(name = "invalidate_cache", skip_all)] +#[tracing::instrument(skip_all)] pub(crate) fn invalidate_cache(node_info: control_plane::CachedNodeInfo) -> NodeInfo { let is_cached = node_info.cached(); if is_cached { @@ -48,35 +46,16 @@ pub(crate) trait ConnectMechanism { node_info: &control_plane::CachedNodeInfo, config: &ComputeConfig, ) -> Result; - - fn update_connect_config(&self, conf: &mut compute::ConnCfg); } -#[async_trait] -pub(crate) trait ComputeConnectBackend { - async fn wake_compute( - &self, - ctx: &RequestContext, - ) -> Result; - - fn get_keys(&self) -> &ComputeCredentialKeys; -} - -pub(crate) struct TcpMechanism<'a> { - pub(crate) params_compat: bool, - - /// KV-dictionary with PostgreSQL connection params. - pub(crate) params: &'a StartupMessageParams, - +pub(crate) struct TcpMechanism { /// connect_to_compute concurrency lock pub(crate) locks: &'static ApiLocks, - - pub(crate) user_info: ComputeUserInfo, } #[async_trait] -impl ConnectMechanism for TcpMechanism<'_> { - type Connection = PostgresConnection; +impl ConnectMechanism for TcpMechanism { + type Connection = ComputeConnection; type ConnectError = compute::ConnectionError; type Error = compute::ConnectionError; @@ -89,20 +68,15 @@ impl ConnectMechanism for TcpMechanism<'_> { ctx: &RequestContext, node_info: &control_plane::CachedNodeInfo, config: &ComputeConfig, - ) -> Result { - let host = node_info.config.get_host(); - let permit = self.locks.get_permit(&host).await?; - permit.release_result(node_info.connect(ctx, config, self.user_info.clone()).await) - } - - fn update_connect_config(&self, config: &mut compute::ConnCfg) { - config.set_startup_params(self.params, self.params_compat); + ) -> Result { + let permit = self.locks.get_permit(&node_info.conn_info.host).await?; + permit.release_result(node_info.connect(ctx, config).await) } } /// Try to connect to the compute node, retrying if necessary. #[tracing::instrument(skip_all)] -pub(crate) async fn connect_to_compute( +pub(crate) async fn connect_to_compute( ctx: &RequestContext, mechanism: &M, user_info: &B, @@ -114,12 +88,9 @@ where M::Error: From, { let mut num_retries = 0; - let mut node_info = + let node_info = wake_compute(&mut num_retries, ctx, user_info, wake_compute_retry_config).await?; - node_info.set_keys(user_info.get_keys()); - mechanism.update_connect_config(&mut node_info.config); - // try once let err = match mechanism.connect_once(ctx, &node_info, compute).await { Ok(res) => { @@ -141,7 +112,7 @@ where let node_info = if !node_info.cached() || !err.should_retry_wake_compute() { // If we just recieved this from cplane and didn't get it from cache, we shouldn't retry. // Do not need to retrieve a new node_info, just return the old one. - if should_retry(&err, num_retries, compute.retry) { + if !should_retry(&err, num_retries, compute.retry) { Metrics::get().proxy.retries_metric.observe( RetriesMetricGroup { outcome: ConnectOutcome::Failed, @@ -155,14 +126,9 @@ where } else { // if we failed to connect, it's likely that the compute node was suspended, wake a new compute node debug!("compute node's state has likely changed; requesting a wake-up"); - let old_node_info = invalidate_cache(node_info); + invalidate_cache(node_info); // TODO: increment num_retries? - let mut node_info = - wake_compute(&mut num_retries, ctx, user_info, wake_compute_retry_config).await?; - node_info.reuse_settings(old_node_info); - - mechanism.update_connect_config(&mut node_info.config); - node_info + wake_compute(&mut num_retries, ctx, user_info, wake_compute_retry_config).await? }; // now that we have a new node, try connect to it repeatedly. diff --git a/proxy/src/proxy/mod.rs b/proxy/src/proxy/mod.rs index 0e138cc0c7..08c81afa04 100644 --- a/proxy/src/proxy/mod.rs +++ b/proxy/src/proxy/mod.rs @@ -1,344 +1,68 @@ #[cfg(test)] mod tests; +pub(crate) mod connect_compute; pub(crate) mod retry; pub(crate) mod wake_compute; + +use std::collections::HashSet; +use std::convert::Infallible; use std::sync::Arc; -use futures::FutureExt; use itertools::Itertools; use once_cell::sync::OnceCell; use regex::Regex; use serde::{Deserialize, Serialize}; -use smol_str::{SmolStr, ToSmolStr, format_smolstr}; -use thiserror::Error; +use smol_str::{SmolStr, format_smolstr}; use tokio::io::{AsyncRead, AsyncWrite}; -use tokio_util::sync::CancellationToken; -use tracing::{Instrument, debug, error, info, warn}; +use tokio::sync::oneshot; +use tracing::Instrument; -use crate::cancellation::{self, CancellationHandler}; -use crate::config::{ProxyConfig, ProxyProtocolV2, TlsConfig}; +use crate::cache::Cache; +use crate::cancellation::CancellationHandler; +use crate::compute::ComputeConnection; +use crate::config::ProxyConfig; use crate::context::RequestContext; -use crate::error::{ReportableError, UserFacingError}; -use crate::metrics::{Metrics, NumClientConnectionsGuard}; -use crate::pglb::connect_compute::{TcpMechanism, connect_to_compute}; +use crate::control_plane::client::ControlPlaneClient; pub use crate::pglb::copy_bidirectional::{ErrorSource, copy_bidirectional_client_compute}; -use crate::pglb::handshake::{HandshakeData, HandshakeError, handshake}; -use crate::pglb::passthrough::ProxyPassthrough; +use crate::pglb::{ClientMode, ClientRequestError}; use crate::pqproto::{BeMessage, CancelKeyData, StartupMessageParams}; -use crate::protocol2::{ConnectHeader, ConnectionInfo, ConnectionInfoExtra, read_proxy_protocol}; +use crate::proxy::connect_compute::{TcpMechanism, connect_to_compute}; +use crate::proxy::retry::ShouldRetryWakeCompute; use crate::rate_limiter::EndpointRateLimiter; use crate::stream::{PqStream, Stream}; use crate::types::EndpointCacheKey; use crate::{auth, compute}; -const ERR_INSECURE_CONNECTION: &str = "connection is insecure (try using `sslmode=require`)"; - -#[derive(Error, Debug)] -#[error("{ERR_INSECURE_CONNECTION}")] -pub struct TlsRequired; - -impl ReportableError for TlsRequired { - fn get_error_kind(&self) -> crate::error::ErrorKind { - crate::error::ErrorKind::User - } -} - -impl UserFacingError for TlsRequired {} - -pub async fn run_until_cancelled( - f: F, - cancellation_token: &CancellationToken, -) -> Option { - match futures::future::select( - std::pin::pin!(f), - std::pin::pin!(cancellation_token.cancelled()), - ) - .await - { - futures::future::Either::Left((f, _)) => Some(f), - futures::future::Either::Right(((), _)) => None, - } -} - -pub async fn task_main( - config: &'static ProxyConfig, - auth_backend: &'static auth::Backend<'static, ()>, - listener: tokio::net::TcpListener, - cancellation_token: CancellationToken, - cancellation_handler: Arc, - endpoint_rate_limiter: Arc, -) -> anyhow::Result<()> { - scopeguard::defer! { - info!("proxy has shut down"); - } - - // When set for the server socket, the keepalive setting - // will be inherited by all accepted client sockets. - socket2::SockRef::from(&listener).set_keepalive(true)?; - - let connections = tokio_util::task::task_tracker::TaskTracker::new(); - let cancellations = tokio_util::task::task_tracker::TaskTracker::new(); - - while let Some(accept_result) = - run_until_cancelled(listener.accept(), &cancellation_token).await - { - let (socket, peer_addr) = accept_result?; - - let conn_gauge = Metrics::get() - .proxy - .client_connections - .guard(crate::metrics::Protocol::Tcp); - - let session_id = uuid::Uuid::new_v4(); - let cancellation_handler = Arc::clone(&cancellation_handler); - let cancellations = cancellations.clone(); - - debug!(protocol = "tcp", %session_id, "accepted new TCP connection"); - let endpoint_rate_limiter2 = endpoint_rate_limiter.clone(); - - connections.spawn(async move { - let (socket, conn_info) = match config.proxy_protocol_v2 { - ProxyProtocolV2::Required => { - match read_proxy_protocol(socket).await { - Err(e) => { - warn!("per-client task finished with an error: {e:#}"); - return; - } - // our load balancers will not send any more data. let's just exit immediately - Ok((_socket, ConnectHeader::Local)) => { - debug!("healthcheck received"); - return; - } - Ok((socket, ConnectHeader::Proxy(info))) => (socket, info), - } - } - // ignore the header - it cannot be confused for a postgres or http connection so will - // error later. - ProxyProtocolV2::Rejected => ( - socket, - ConnectionInfo { - addr: peer_addr, - extra: None, - }, - ), - }; - - match socket.set_nodelay(true) { - Ok(()) => {} - Err(e) => { - error!( - "per-client task finished with an error: failed to set socket option: {e:#}" - ); - return; - } - } - - let ctx = RequestContext::new( - session_id, - conn_info, - crate::metrics::Protocol::Tcp, - &config.region, - ); - - let res = handle_client( - config, - auth_backend, - &ctx, - cancellation_handler, - socket, - ClientMode::Tcp, - endpoint_rate_limiter2, - conn_gauge, - cancellations, - ) - .instrument(ctx.span()) - .boxed() - .await; - - match res { - Err(e) => { - ctx.set_error_kind(e.get_error_kind()); - warn!(parent: &ctx.span(), "per-client task finished with an error: {e:#}"); - } - Ok(None) => { - ctx.set_success(); - } - Ok(Some(p)) => { - ctx.set_success(); - let _disconnect = ctx.log_connect(); - match p.proxy_pass(&config.connect_to_compute).await { - Ok(()) => {} - Err(ErrorSource::Client(e)) => { - warn!( - ?session_id, - "per-client task finished with an IO error from the client: {e:#}" - ); - } - Err(ErrorSource::Compute(e)) => { - error!( - ?session_id, - "per-client task finished with an IO error from the compute: {e:#}" - ); - } - } - } - } - }); - } - - connections.close(); - cancellations.close(); - drop(listener); - - // Drain connections - connections.wait().await; - cancellations.wait().await; - - Ok(()) -} - -pub(crate) enum ClientMode { - Tcp, - Websockets { hostname: Option }, -} - -/// Abstracts the logic of handling TCP vs WS clients -impl ClientMode { - pub(crate) fn allow_cleartext(&self) -> bool { - match self { - ClientMode::Tcp => false, - ClientMode::Websockets { .. } => true, - } - } - - fn hostname<'a, S>(&'a self, s: &'a Stream) -> Option<&'a str> { - match self { - ClientMode::Tcp => s.sni_hostname(), - ClientMode::Websockets { hostname } => hostname.as_deref(), - } - } - - fn handshake_tls<'a>(&self, tls: Option<&'a TlsConfig>) -> Option<&'a TlsConfig> { - match self { - ClientMode::Tcp => tls, - // TLS is None here if using websockets, because the connection is already encrypted. - ClientMode::Websockets { .. } => None, - } - } -} - -#[derive(Debug, Error)] -// almost all errors should be reported to the user, but there's a few cases where we cannot -// 1. Cancellation: we are not allowed to tell the client any cancellation statuses for security reasons -// 2. Handshake: handshake reports errors if it can, otherwise if the handshake fails due to protocol violation, -// we cannot be sure the client even understands our error message -// 3. PrepareClient: The client disconnected, so we can't tell them anyway... -pub(crate) enum ClientRequestError { - #[error("{0}")] - Cancellation(#[from] cancellation::CancelError), - #[error("{0}")] - Handshake(#[from] HandshakeError), - #[error("{0}")] - HandshakeTimeout(#[from] tokio::time::error::Elapsed), - #[error("{0}")] - PrepareClient(#[from] std::io::Error), - #[error("{0}")] - ReportedError(#[from] crate::stream::ReportedError), -} - -impl ReportableError for ClientRequestError { - fn get_error_kind(&self) -> crate::error::ErrorKind { - match self { - ClientRequestError::Cancellation(e) => e.get_error_kind(), - ClientRequestError::Handshake(e) => e.get_error_kind(), - ClientRequestError::HandshakeTimeout(_) => crate::error::ErrorKind::RateLimit, - ClientRequestError::ReportedError(e) => e.get_error_kind(), - ClientRequestError::PrepareClient(_) => crate::error::ErrorKind::ClientDisconnect, - } - } -} - #[allow(clippy::too_many_arguments)] pub(crate) async fn handle_client( config: &'static ProxyConfig, auth_backend: &'static auth::Backend<'static, ()>, ctx: &RequestContext, cancellation_handler: Arc, - stream: S, - mode: ClientMode, + client: &mut PqStream>, + mode: &ClientMode, endpoint_rate_limiter: Arc, - conn_gauge: NumClientConnectionsGuard<'static>, - cancellations: tokio_util::task::task_tracker::TaskTracker, -) -> Result>, ClientRequestError> { - debug!( - protocol = %ctx.protocol(), - "handling interactive connection from client" - ); - - let metrics = &Metrics::get().proxy; - let proto = ctx.protocol(); - let request_gauge = metrics.connection_requests.guard(proto); - - let tls = config.tls_config.load(); - let tls = tls.as_deref(); - - let record_handshake_error = !ctx.has_private_peer_addr(); - let pause = ctx.latency_timer_pause(crate::metrics::Waiting::Client); - let do_handshake = handshake(ctx, stream, mode.handshake_tls(tls), record_handshake_error); - - let (mut stream, params) = match tokio::time::timeout(config.handshake_timeout, do_handshake) - .await?? - { - HandshakeData::Startup(stream, params) => (stream, params), - HandshakeData::Cancel(cancel_key_data) => { - // spawn a task to cancel the session, but don't wait for it - cancellations.spawn({ - let cancellation_handler_clone = Arc::clone(&cancellation_handler); - let ctx = ctx.clone(); - let cancel_span = tracing::span!(parent: None, tracing::Level::INFO, "cancel_session", session_id = ?ctx.session_id()); - cancel_span.follows_from(tracing::Span::current()); - async move { - cancellation_handler_clone - .cancel_session( - cancel_key_data, - ctx, - config.authentication_config.ip_allowlist_check_enabled, - config.authentication_config.is_vpc_acccess_proxy, - auth_backend.get_api(), - ) - .await - .inspect_err(|e | debug!(error = ?e, "cancel_session failed")).ok(); - }.instrument(cancel_span) - }); - - return Ok(None); - } - }; - drop(pause); - - ctx.set_db_options(params.clone()); - - let hostname = mode.hostname(stream.get_ref()); - - let common_names = tls.map(|tls| &tls.common_names); - + common_names: Option<&HashSet>, + params: &StartupMessageParams, +) -> Result<(ComputeConnection, oneshot::Sender), ClientRequestError> { + let hostname = mode.hostname(client.get_ref()); // Extract credentials which we're going to use for auth. let result = auth_backend .as_ref() - .map(|()| auth::ComputeUserInfoMaybeEndpoint::parse(ctx, ¶ms, hostname, common_names)) + .map(|()| auth::ComputeUserInfoMaybeEndpoint::parse(ctx, params, hostname, common_names)) .transpose(); let user_info = match result { Ok(user_info) => user_info, - Err(e) => Err(stream.throw_error(e, Some(ctx)).await)?, + Err(e) => Err(client.throw_error(e, Some(ctx)).await)?, }; let user = user_info.get_user().to_owned(); let user_info = match user_info .authenticate( ctx, - &mut stream, + client, mode.allow_cleartext(), &config.authentication_config, endpoint_rate_limiter, @@ -351,89 +75,117 @@ pub(crate) async fn handle_client( let app = params.get("application_name"); let params_span = tracing::info_span!("", ?user, ?db, ?app); - return Err(stream + return Err(client .throw_error(e, Some(ctx)) .instrument(params_span) .await)?; } }; - let compute_user_info = match &user_info { - auth::Backend::ControlPlane(_, info) => &info.info, + let (cplane, creds) = match user_info { + auth::Backend::ControlPlane(cplane, creds) => (cplane, creds), auth::Backend::Local(_) => unreachable!("local proxy does not run tcp proxy service"), }; - let params_compat = compute_user_info - .options - .get(NeonOptions::PARAMS_COMPAT) - .is_some(); + let params_compat = creds.info.options.get(NeonOptions::PARAMS_COMPAT).is_some(); + let mut auth_info = compute::AuthInfo::with_auth_keys(creds.keys); + auth_info.set_startup_params(params, params_compat); - let res = connect_to_compute( - ctx, - &TcpMechanism { - user_info: compute_user_info.clone(), - params_compat, - params: ¶ms, - locks: &config.connect_compute_locks, - }, - &user_info, - config.wake_compute_retry_config, - &config.connect_to_compute, - ) - .await; + let mut node; + let mut attempt = 0; + let connect = TcpMechanism { + locks: &config.connect_compute_locks, + }; + let backend = auth::Backend::ControlPlane(cplane, creds.info); - let node = match res { - Ok(node) => node, - Err(e) => Err(stream.throw_error(e, Some(ctx)).await)?, + // NOTE: This is messy, but should hopefully be detangled with PGLB. + // We wanted to separate the concerns of **connect** to compute (a PGLB operation), + // from **authenticate** to compute (a NeonKeeper operation). + // + // This unfortunately removed retry handling for one error case where + // the compute was cached, and we connected, but the compute cache was actually stale + // and is associated with the wrong endpoint. We detect this when the **authentication** fails. + // As such, we retry once here if the `authenticate` function fails and the error is valid to retry. + let pg_settings = loop { + attempt += 1; + + // TODO: callback to pglb + let res = connect_to_compute( + ctx, + &connect, + &backend, + config.wake_compute_retry_config, + &config.connect_to_compute, + ) + .await; + + match res { + Ok(n) => node = n, + Err(e) => return Err(client.throw_error(e, Some(ctx)).await)?, + } + + let auth::Backend::ControlPlane(cplane, user_info) = &backend else { + unreachable!("ensured above"); + }; + + let res = auth_info.authenticate(ctx, &mut node, user_info).await; + match res { + Ok(pg_settings) => break pg_settings, + Err(e) if attempt < 2 && e.should_retry_wake_compute() => { + tracing::warn!(error = ?e, "retrying wake compute"); + + #[allow(irrefutable_let_patterns)] + if let ControlPlaneClient::ProxyV1(cplane_proxy_v1) = &**cplane { + let key = user_info.endpoint_cache_key(); + cplane_proxy_v1.caches.node_info.invalidate(&key); + } + } + Err(e) => Err(client.throw_error(e, Some(ctx)).await)?, + } }; - let cancellation_handler_clone = Arc::clone(&cancellation_handler); - let session = cancellation_handler_clone.get_key(); + let session = cancellation_handler.get_key(); - session.write_cancel_key(node.cancel_closure.clone())?; - prepare_client_connection(&node, *session.key(), &mut stream); - let stream = stream.flush_and_into_inner().await?; + finish_client_init(&pg_settings, *session.key(), client); - let private_link_id = match ctx.extra() { - Some(ConnectionInfoExtra::Aws { vpce_id }) => Some(vpce_id.clone()), - Some(ConnectionInfoExtra::Azure { link_id }) => Some(link_id.to_smolstr()), - None => None, - }; + let session_id = ctx.session_id(); + let (cancel_on_shutdown, cancel) = oneshot::channel(); + tokio::spawn(async move { + session + .maintain_cancel_key( + session_id, + cancel, + &pg_settings.cancel_closure, + &config.connect_to_compute, + ) + .await; + }); - Ok(Some(ProxyPassthrough { - client: stream, - aux: node.aux.clone(), - private_link_id, - compute: node, - session_id: ctx.session_id(), - cancel: session, - _req: request_gauge, - _conn: conn_gauge, - })) + Ok((node, cancel_on_shutdown)) } /// Finish client connection initialization: confirm auth success, send params, etc. -pub(crate) fn prepare_client_connection( - node: &compute::PostgresConnection, +pub(crate) fn finish_client_init( + settings: &compute::PostgresSettings, cancel_key_data: CancelKeyData, - stream: &mut PqStream, + client: &mut PqStream, ) { // Forward all deferred notices to the client. - for notice in &node.delayed_notice { - stream.write_raw(notice.as_bytes().len(), b'N', |buf| { + for notice in &settings.delayed_notice { + client.write_raw(notice.as_bytes().len(), b'N', |buf| { buf.extend_from_slice(notice.as_bytes()); }); } // Forward all postgres connection params to the client. - for (name, value) in &node.params { - stream.write_message(BeMessage::ParameterStatus { + for (name, value) in &settings.params { + client.write_message(BeMessage::ParameterStatus { name: name.as_bytes(), value: value.as_bytes(), }); } - stream.write_message(BeMessage::BackendKeyData(cancel_key_data)); - stream.write_message(BeMessage::ReadyForQuery); + client.write_message(BeMessage::BackendKeyData(cancel_key_data)); + client.write_message(BeMessage::ReadyForQuery); } #[derive(Debug, Clone, PartialEq, Eq, Default, Serialize, Deserialize)] @@ -443,7 +195,7 @@ impl NeonOptions { // proxy options: /// `PARAMS_COMPAT` allows opting in to forwarding all startup parameters from client to compute. - const PARAMS_COMPAT: &str = "proxy_params_compat"; + pub const PARAMS_COMPAT: &str = "proxy_params_compat"; // cplane options: diff --git a/proxy/src/proxy/retry.rs b/proxy/src/proxy/retry.rs index 01e603ec14..b06c3be72c 100644 --- a/proxy/src/proxy/retry.rs +++ b/proxy/src/proxy/retry.rs @@ -3,7 +3,7 @@ use std::io; use tokio::time; -use crate::compute; +use crate::compute::{self, PostgresError}; use crate::config::RetryConfig; pub(crate) trait CouldRetry { @@ -99,17 +99,15 @@ impl ShouldRetryWakeCompute for postgres_client::Error { impl CouldRetry for compute::ConnectionError { fn could_retry(&self) -> bool { match self { - compute::ConnectionError::Postgres(err) => err.could_retry(), - compute::ConnectionError::CouldNotConnect(err) => err.could_retry(), + compute::ConnectionError::TlsError(err) => err.could_retry(), compute::ConnectionError::WakeComputeError(err) => err.could_retry(), - _ => false, + compute::ConnectionError::TooManyConnectionAttempts(_) => false, } } } impl ShouldRetryWakeCompute for compute::ConnectionError { fn should_retry_wake_compute(&self) -> bool { match self { - compute::ConnectionError::Postgres(err) => err.should_retry_wake_compute(), // the cache entry was not checked for validity compute::ConnectionError::TooManyConnectionAttempts(_) => false, _ => true, @@ -117,6 +115,14 @@ impl ShouldRetryWakeCompute for compute::ConnectionError { } } +impl ShouldRetryWakeCompute for PostgresError { + fn should_retry_wake_compute(&self) -> bool { + match self { + PostgresError::Postgres(error) => error.should_retry_wake_compute(), + } + } +} + pub(crate) fn retry_after(num_retries: u32, config: RetryConfig) -> time::Duration { config .base_delay diff --git a/proxy/src/proxy/tests/mitm.rs b/proxy/src/proxy/tests/mitm.rs index c92ee49b8d..b09d8edc4c 100644 --- a/proxy/src/proxy/tests/mitm.rs +++ b/proxy/src/proxy/tests/mitm.rs @@ -14,6 +14,9 @@ use tokio::io::{AsyncReadExt, AsyncWriteExt, DuplexStream}; use tokio_util::codec::{Decoder, Encoder}; use super::*; +use crate::config::TlsConfig; +use crate::context::RequestContext; +use crate::pglb::handshake::{HandshakeData, handshake}; enum Intercept { None, @@ -169,7 +172,7 @@ async fn scram_auth_disable_channel_binding() -> anyhow::Result<()> { .dbname("db") .password("password") .ssl_mode(SslMode::Require) - .connect_raw(server, client_config.make_tls_connect()?) + .tls_and_authenticate(server, client_config.make_tls_connect()?) .await?; proxy.await? @@ -252,7 +255,7 @@ async fn connect_failure( .dbname("db") .password("password") .ssl_mode(SslMode::Require) - .connect_raw(server, client_config.make_tls_connect()?) + .tls_and_authenticate(server, client_config.make_tls_connect()?) .await .err() .context("client shouldn't be able to connect")?; diff --git a/proxy/src/proxy/tests/mod.rs b/proxy/src/proxy/tests/mod.rs index e5db0013a7..dd89b05426 100644 --- a/proxy/src/proxy/tests/mod.rs +++ b/proxy/src/proxy/tests/mod.rs @@ -3,6 +3,7 @@ mod mitm; +use std::sync::Arc; use std::time::Duration; use anyhow::{Context, bail}; @@ -10,29 +11,31 @@ use async_trait::async_trait; use http::StatusCode; use postgres_client::config::SslMode; use postgres_client::tls::{MakeTlsConnect, NoTls}; -use retry::{ShouldRetryWakeCompute, retry_after}; use rstest::rstest; use rustls::crypto::ring; use rustls::pki_types; -use tokio::io::DuplexStream; +use tokio::io::{AsyncRead, AsyncWrite, DuplexStream}; use tracing_test::traced_test; use super::retry::CouldRetry; -use super::*; -use crate::auth::backend::{ - ComputeCredentialKeys, ComputeCredentials, ComputeUserInfo, MaybeOwned, -}; -use crate::config::{ComputeConfig, RetryConfig}; +use crate::auth::backend::{ComputeUserInfo, MaybeOwned}; +use crate::config::{ComputeConfig, RetryConfig, TlsConfig}; +use crate::context::RequestContext; use crate::control_plane::client::{ControlPlaneClient, TestControlPlaneClient}; use crate::control_plane::messages::{ControlPlaneErrorMessage, Details, MetricsAuxInfo, Status}; use crate::control_plane::{self, CachedNodeInfo, NodeInfo, NodeInfoCache}; -use crate::error::ErrorKind; -use crate::pglb::connect_compute::ConnectMechanism; +use crate::error::{ErrorKind, ReportableError}; +use crate::pglb::ERR_INSECURE_CONNECTION; +use crate::pglb::handshake::{HandshakeData, handshake}; +use crate::pqproto::BeMessage; +use crate::proxy::NeonOptions; +use crate::proxy::connect_compute::{ConnectMechanism, connect_to_compute}; +use crate::proxy::retry::{ShouldRetryWakeCompute, retry_after}; +use crate::stream::{PqStream, Stream}; use crate::tls::client_config::compute_client_config_with_certs; -use crate::tls::postgres_rustls::MakeRustlsConnect; use crate::tls::server_config::CertResolver; use crate::types::{BranchId, EndpointId, ProjectId}; -use crate::{sasl, scram}; +use crate::{auth, compute, sasl, scram}; /// Generate a set of TLS certificates: CA + server. fn generate_certs( @@ -72,13 +75,14 @@ struct ClientConfig<'a> { hostname: &'a str, } -type TlsConnect = >::TlsConnect; +type TlsConnect = >::TlsConnect; impl ClientConfig<'_> { fn make_tls_connect(self) -> anyhow::Result> { - let mut mk = MakeRustlsConnect::new(self.config); - let tls = MakeTlsConnect::::make_tls_connect(&mut mk, self.hostname)?; - Ok(tls) + Ok(crate::tls::postgres_rustls::make_tls_connect( + &self.config, + self.hostname, + )?) } } @@ -201,7 +205,7 @@ async fn handshake_tls_is_enforced_by_proxy() -> anyhow::Result<()> { .user("john_doe") .dbname("earth") .ssl_mode(SslMode::Disable) - .connect_raw(server, NoTls) + .tls_and_authenticate(server, NoTls) .await .err() // -> Option .context("client shouldn't be able to connect")?; @@ -230,7 +234,7 @@ async fn handshake_tls() -> anyhow::Result<()> { .user("john_doe") .dbname("earth") .ssl_mode(SslMode::Require) - .connect_raw(server, client_config.make_tls_connect()?) + .tls_and_authenticate(server, client_config.make_tls_connect()?) .await?; proxy.await? @@ -247,7 +251,7 @@ async fn handshake_raw() -> anyhow::Result<()> { .dbname("earth") .set_param("options", "project=generic-project-name") .ssl_mode(SslMode::Prefer) - .connect_raw(server, NoTls) + .tls_and_authenticate(server, NoTls) .await?; proxy.await? @@ -295,7 +299,7 @@ async fn scram_auth_good(#[case] password: &str) -> anyhow::Result<()> { .dbname("db") .password(password) .ssl_mode(SslMode::Require) - .connect_raw(server, client_config.make_tls_connect()?) + .tls_and_authenticate(server, client_config.make_tls_connect()?) .await?; proxy.await? @@ -319,7 +323,7 @@ async fn scram_auth_disable_channel_binding() -> anyhow::Result<()> { .dbname("db") .password("password") .ssl_mode(SslMode::Require) - .connect_raw(server, client_config.make_tls_connect()?) + .tls_and_authenticate(server, client_config.make_tls_connect()?) .await?; proxy.await? @@ -346,7 +350,7 @@ async fn scram_auth_mock() -> anyhow::Result<()> { .dbname("db") .password(&password) // no password will match the mocked secret .ssl_mode(SslMode::Require) - .connect_raw(server, client_config.make_tls_connect()?) + .tls_and_authenticate(server, client_config.make_tls_connect()?) .await .err() // -> Option .context("client shouldn't be able to connect")?; @@ -376,6 +380,7 @@ fn connect_compute_total_wait() { #[derive(Clone, Copy, Debug)] enum ConnectAction { Wake, + WakeCold, WakeFail, WakeRetry, Connect, @@ -497,8 +502,6 @@ impl ConnectMechanism for TestConnectMechanism { x => panic!("expecting action {x:?}, connect is called instead"), } } - - fn update_connect_config(&self, _conf: &mut compute::ConnCfg) {} } impl TestControlPlaneClient for TestConnectMechanism { @@ -508,6 +511,9 @@ impl TestControlPlaneClient for TestConnectMechanism { *counter += 1; match action { ConnectAction::Wake => Ok(helper_create_cached_node_info(self.cache)), + ConnectAction::WakeCold => Ok(CachedNodeInfo::new_uncached( + helper_create_uncached_node_info(), + )), ConnectAction::WakeFail => { let err = control_plane::errors::ControlPlaneError::Message(Box::new( ControlPlaneErrorMessage { @@ -555,9 +561,14 @@ impl TestControlPlaneClient for TestConnectMechanism { } } -fn helper_create_cached_node_info(cache: &'static NodeInfoCache) -> CachedNodeInfo { - let node = NodeInfo { - config: compute::ConnCfg::new("test".to_owned(), 5432), +fn helper_create_uncached_node_info() -> NodeInfo { + NodeInfo { + conn_info: compute::ConnectInfo { + host: "test".into(), + port: 5432, + ssl_mode: SslMode::Disable, + host_addr: None, + }, aux: MetricsAuxInfo { endpoint_id: (&EndpointId::from("endpoint")).into(), project_id: (&ProjectId::from("project")).into(), @@ -565,23 +576,24 @@ fn helper_create_cached_node_info(cache: &'static NodeInfoCache) -> CachedNodeIn compute_id: "compute".into(), cold_start_info: crate::control_plane::messages::ColdStartInfo::Warm, }, - }; + } +} + +fn helper_create_cached_node_info(cache: &'static NodeInfoCache) -> CachedNodeInfo { + let node = helper_create_uncached_node_info(); let (_, node2) = cache.insert_unit("key".into(), Ok(node.clone())); node2.map(|()| node) } fn helper_create_connect_info( mechanism: &TestConnectMechanism, -) -> auth::Backend<'static, ComputeCredentials> { +) -> auth::Backend<'static, ComputeUserInfo> { auth::Backend::ControlPlane( MaybeOwned::Owned(ControlPlaneClient::Test(Box::new(mechanism.clone()))), - ComputeCredentials { - info: ComputeUserInfo { - endpoint: "endpoint".into(), - user: "user".into(), - options: NeonOptions::parse_options_raw(""), - }, - keys: ComputeCredentialKeys::Password("password".into()), + ComputeUserInfo { + endpoint: "endpoint".into(), + user: "user".into(), + options: NeonOptions::parse_options_raw(""), }, ) } @@ -744,7 +756,7 @@ async fn fail_no_wake_skips_cache_invalidation() { let ctx = RequestContext::test(); let mech = TestConnectMechanism::new(vec![ ConnectAction::Wake, - ConnectAction::FailNoWake, + ConnectAction::RetryNoWake, ConnectAction::Connect, ]); let user = helper_create_connect_info(&mech); @@ -790,7 +802,7 @@ async fn retry_no_wake_skips_invalidation() { let ctx = RequestContext::test(); // Wake → RetryNoWake (retryable + NOT wakeable) - let mechanism = TestConnectMechanism::new(vec![Wake, RetryNoWake]); + let mechanism = TestConnectMechanism::new(vec![Wake, RetryNoWake, Fail]); let user_info = helper_create_connect_info(&mechanism); let cfg = config(); @@ -804,3 +816,44 @@ async fn retry_no_wake_skips_invalidation() { "invalidating stalled compute node info cache entry" )); } + +#[tokio::test] +#[traced_test] +async fn retry_no_wake_error_fast() { + let _ = env_logger::try_init(); + use ConnectAction::*; + + let ctx = RequestContext::test(); + // Wake → FailNoWake (not retryable + NOT wakeable) + let mechanism = TestConnectMechanism::new(vec![Wake, FailNoWake]); + let user_info = helper_create_connect_info(&mechanism); + let cfg = config(); + + connect_to_compute(&ctx, &mechanism, &user_info, cfg.retry, &cfg) + .await + .unwrap_err(); + mechanism.verify(); + + // Because FailNoWake has wakeable=false, we must NOT see invalidate_cache + assert!(!logs_contain( + "invalidating stalled compute node info cache entry" + )); +} + +#[tokio::test] +#[traced_test] +async fn retry_cold_wake_skips_invalidation() { + let _ = env_logger::try_init(); + use ConnectAction::*; + + let ctx = RequestContext::test(); + // WakeCold → FailNoWake (not retryable + NOT wakeable) + let mechanism = TestConnectMechanism::new(vec![WakeCold, Retry, Connect]); + let user_info = helper_create_connect_info(&mechanism); + let cfg = config(); + + connect_to_compute(&ctx, &mechanism, &user_info, cfg.retry, &cfg) + .await + .unwrap(); + mechanism.verify(); +} diff --git a/proxy/src/proxy/wake_compute.rs b/proxy/src/proxy/wake_compute.rs index 06c2da58db..b8edf9fd5c 100644 --- a/proxy/src/proxy/wake_compute.rs +++ b/proxy/src/proxy/wake_compute.rs @@ -1,3 +1,4 @@ +use async_trait::async_trait; use tracing::{error, info}; use crate::config::RetryConfig; @@ -8,7 +9,6 @@ use crate::error::ReportableError; use crate::metrics::{ ConnectOutcome, ConnectionFailuresBreakdownGroup, Metrics, RetriesMetricGroup, RetryType, }; -use crate::pglb::connect_compute::ComputeConnectBackend; use crate::proxy::retry::{retry_after, should_retry}; // Use macro to retain original callsite. @@ -23,7 +23,12 @@ macro_rules! log_wake_compute_error { }; } -pub(crate) async fn wake_compute( +#[async_trait] +pub(crate) trait WakeComputeBackend { + async fn wake_compute(&self, ctx: &RequestContext) -> Result; +} + +pub(crate) async fn wake_compute( num_retries: &mut u32, ctx: &RequestContext, api: &B, diff --git a/proxy/src/rate_limiter/leaky_bucket.rs b/proxy/src/rate_limiter/leaky_bucket.rs index 0c79b5e92f..12b4bda0c0 100644 --- a/proxy/src/rate_limiter/leaky_bucket.rs +++ b/proxy/src/rate_limiter/leaky_bucket.rs @@ -39,7 +39,11 @@ impl LeakyBucketRateLimiter { let config = config.map_or(self.default_config, Into::into); - if self.access_count.fetch_add(1, Ordering::AcqRel) % 2048 == 0 { + if self + .access_count + .fetch_add(1, Ordering::AcqRel) + .is_multiple_of(2048) + { self.do_gc(now); } @@ -69,9 +73,8 @@ pub struct LeakyBucketConfig { pub max: f64, } -#[cfg(test)] impl LeakyBucketConfig { - pub(crate) fn new(rps: f64, max: f64) -> Self { + pub fn new(rps: f64, max: f64) -> Self { assert!(rps > 0.0, "rps must be positive"); assert!(max > 0.0, "max must be positive"); Self { rps, max } diff --git a/proxy/src/rate_limiter/limiter.rs b/proxy/src/rate_limiter/limiter.rs index 9d700c1b52..61d4636c2b 100644 --- a/proxy/src/rate_limiter/limiter.rs +++ b/proxy/src/rate_limiter/limiter.rs @@ -12,11 +12,10 @@ use rand::{Rng, SeedableRng}; use tokio::time::{Duration, Instant}; use tracing::info; +use super::LeakyBucketConfig; use crate::ext::LockExt; use crate::intern::EndpointIdInt; -use super::LeakyBucketConfig; - pub struct GlobalRateLimiter { data: Vec, info: Vec, @@ -140,12 +139,6 @@ impl RateBucketInfo { Self::new(200, Duration::from_secs(600)), ]; - // For all the sessions will be cancel key. So this limit is essentially global proxy limit. - pub const DEFAULT_REDIS_SET: [Self; 2] = [ - Self::new(100_000, Duration::from_secs(1)), - Self::new(50_000, Duration::from_secs(10)), - ]; - pub fn rps(&self) -> f64 { (self.max_rpi as f64) / self.interval.as_secs_f64() } @@ -218,7 +211,11 @@ impl BucketRateLimiter { // worst case memory usage is about: // = 2 * 2048 * 64 * (48B + 72B) // = 30MB - if self.access_count.fetch_add(1, Ordering::AcqRel) % 2048 == 0 { + if self + .access_count + .fetch_add(1, Ordering::AcqRel) + .is_multiple_of(2048) + { self.do_gc(); } diff --git a/proxy/src/redis/cancellation_publisher.rs b/proxy/src/redis/cancellation_publisher.rs deleted file mode 100644 index 6f56aeea06..0000000000 --- a/proxy/src/redis/cancellation_publisher.rs +++ /dev/null @@ -1,79 +0,0 @@ -use core::net::IpAddr; -use std::sync::Arc; - -use tokio::sync::Mutex; -use uuid::Uuid; - -use crate::pqproto::CancelKeyData; - -pub trait CancellationPublisherMut: Send + Sync + 'static { - #[allow(async_fn_in_trait)] - async fn try_publish( - &mut self, - cancel_key_data: CancelKeyData, - session_id: Uuid, - peer_addr: IpAddr, - ) -> anyhow::Result<()>; -} - -pub trait CancellationPublisher: Send + Sync + 'static { - #[allow(async_fn_in_trait)] - async fn try_publish( - &self, - cancel_key_data: CancelKeyData, - session_id: Uuid, - peer_addr: IpAddr, - ) -> anyhow::Result<()>; -} - -impl CancellationPublisher for () { - async fn try_publish( - &self, - _cancel_key_data: CancelKeyData, - _session_id: Uuid, - _peer_addr: IpAddr, - ) -> anyhow::Result<()> { - Ok(()) - } -} - -impl CancellationPublisherMut for P { - async fn try_publish( - &mut self, - cancel_key_data: CancelKeyData, - session_id: Uuid, - peer_addr: IpAddr, - ) -> anyhow::Result<()> { -

::try_publish(self, cancel_key_data, session_id, peer_addr) - .await - } -} - -impl CancellationPublisher for Option

{ - async fn try_publish( - &self, - cancel_key_data: CancelKeyData, - session_id: Uuid, - peer_addr: IpAddr, - ) -> anyhow::Result<()> { - if let Some(p) = self { - p.try_publish(cancel_key_data, session_id, peer_addr).await - } else { - Ok(()) - } - } -} - -impl CancellationPublisher for Arc> { - async fn try_publish( - &self, - cancel_key_data: CancelKeyData, - session_id: Uuid, - peer_addr: IpAddr, - ) -> anyhow::Result<()> { - self.lock() - .await - .try_publish(cancel_key_data, session_id, peer_addr) - .await - } -} diff --git a/proxy/src/redis/connection_with_credentials_provider.rs b/proxy/src/redis/connection_with_credentials_provider.rs index fe656557ac..35a3fe4334 100644 --- a/proxy/src/redis/connection_with_credentials_provider.rs +++ b/proxy/src/redis/connection_with_credentials_provider.rs @@ -1,11 +1,12 @@ use std::sync::Arc; +use std::sync::atomic::{AtomicBool, Ordering}; use std::time::Duration; use futures::FutureExt; use redis::aio::{ConnectionLike, MultiplexedConnection}; use redis::{ConnectionInfo, IntoConnectionInfo, RedisConnectionInfo, RedisResult}; -use tokio::task::JoinHandle; -use tracing::{debug, error, info, warn}; +use tokio::task::AbortHandle; +use tracing::{error, info, warn}; use super::elasticache::CredentialsProvider; @@ -31,8 +32,9 @@ pub struct ConnectionWithCredentialsProvider { credentials: Credentials, // TODO: with more load on the connection, we should consider using a connection pool con: Option, - refresh_token_task: Option>, + refresh_token_task: Option, mutex: tokio::sync::Mutex<()>, + credentials_refreshed: Arc, } impl Clone for ConnectionWithCredentialsProvider { @@ -42,6 +44,7 @@ impl Clone for ConnectionWithCredentialsProvider { con: None, refresh_token_task: None, mutex: tokio::sync::Mutex::new(()), + credentials_refreshed: Arc::new(AtomicBool::new(false)), } } } @@ -65,6 +68,7 @@ impl ConnectionWithCredentialsProvider { con: None, refresh_token_task: None, mutex: tokio::sync::Mutex::new(()), + credentials_refreshed: Arc::new(AtomicBool::new(false)), } } @@ -78,6 +82,7 @@ impl ConnectionWithCredentialsProvider { con: None, refresh_token_task: None, mutex: tokio::sync::Mutex::new(()), + credentials_refreshed: Arc::new(AtomicBool::new(true)), } } @@ -85,6 +90,10 @@ impl ConnectionWithCredentialsProvider { redis::cmd("PING").query_async(con).await } + pub(crate) fn credentials_refreshed(&self) -> bool { + self.credentials_refreshed.load(Ordering::Relaxed) + } + pub(crate) async fn connect(&mut self) -> anyhow::Result<()> { let _guard = self.mutex.lock().await; if let Some(con) = self.con.as_mut() { @@ -112,13 +121,13 @@ impl ConnectionWithCredentialsProvider { if let Credentials::Dynamic(credentials_provider, _) = &self.credentials { let credentials_provider = credentials_provider.clone(); let con2 = con.clone(); - let f = tokio::spawn(async move { - Self::keep_connection(con2, credentials_provider) - .await - .inspect_err(|e| debug!("keep_connection failed: {e}")) - .ok(); - }); - self.refresh_token_task = Some(f); + let credentials_refreshed = self.credentials_refreshed.clone(); + let f = tokio::spawn(Self::keep_connection( + con2, + credentials_provider, + credentials_refreshed, + )); + self.refresh_token_task = Some(f.abort_handle()); } match Self::ping(&mut con).await { Ok(()) => { @@ -153,6 +162,7 @@ impl ConnectionWithCredentialsProvider { async fn get_client(&self) -> anyhow::Result { let client = redis::Client::open(self.get_connection_info().await?)?; + self.credentials_refreshed.store(true, Ordering::Relaxed); Ok(client) } @@ -168,16 +178,19 @@ impl ConnectionWithCredentialsProvider { async fn keep_connection( mut con: MultiplexedConnection, credentials_provider: Arc, - ) -> anyhow::Result<()> { + credentials_refreshed: Arc, + ) -> ! { loop { // The connection lives for 12h, for the sanity check we refresh it every hour. tokio::time::sleep(Duration::from_secs(60 * 60)).await; match Self::refresh_token(&mut con, credentials_provider.clone()).await { Ok(()) => { info!("Token refreshed"); + credentials_refreshed.store(true, Ordering::Relaxed); } Err(e) => { error!("Error during token refresh: {e:?}"); + credentials_refreshed.store(false, Ordering::Relaxed); } } } @@ -231,7 +244,7 @@ impl ConnectionLike for ConnectionWithCredentialsProvider { &'a mut self, cmd: &'a redis::Cmd, ) -> redis::RedisFuture<'a, redis::Value> { - (async move { self.send_packed_command(cmd).await }).boxed() + self.send_packed_command(cmd).boxed() } fn req_packed_commands<'a>( @@ -240,10 +253,10 @@ impl ConnectionLike for ConnectionWithCredentialsProvider { offset: usize, count: usize, ) -> redis::RedisFuture<'a, Vec> { - (async move { self.send_packed_commands(cmd, offset, count).await }).boxed() + self.send_packed_commands(cmd, offset, count).boxed() } fn get_db(&self) -> i64 { - 0 + self.con.as_ref().map_or(0, |c| c.get_db()) } } diff --git a/proxy/src/redis/keys.rs b/proxy/src/redis/keys.rs index 3113bad949..ffb7bc876b 100644 --- a/proxy/src/redis/keys.rs +++ b/proxy/src/redis/keys.rs @@ -1,8 +1,4 @@ -use std::io::ErrorKind; - -use anyhow::Ok; - -use crate::pqproto::{CancelKeyData, id_to_cancel_key}; +use crate::pqproto::CancelKeyData; pub mod keyspace { pub const CANCEL_PREFIX: &str = "cancel"; @@ -23,40 +19,12 @@ impl KeyPrefix { } } } - - #[allow(dead_code)] - pub(crate) fn as_str(&self) -> &'static str { - match self { - KeyPrefix::Cancel(_) => keyspace::CANCEL_PREFIX, - } - } -} - -#[allow(dead_code)] -pub(crate) fn parse_redis_key(key: &str) -> anyhow::Result { - let (prefix, key_str) = key.split_once(':').ok_or_else(|| { - anyhow::anyhow!(std::io::Error::new( - ErrorKind::InvalidData, - "missing prefix" - )) - })?; - - match prefix { - keyspace::CANCEL_PREFIX => { - let id = u64::from_str_radix(key_str, 16)?; - - Ok(KeyPrefix::Cancel(id_to_cancel_key(id))) - } - _ => Err(anyhow::anyhow!(std::io::Error::new( - ErrorKind::InvalidData, - "unknown prefix" - ))), - } } #[cfg(test)] mod tests { use super::*; + use crate::pqproto::id_to_cancel_key; #[test] fn test_build_redis_key() { @@ -65,16 +33,4 @@ mod tests { let redis_key = cancel_key.build_redis_key(); assert_eq!(redis_key, "cancel:30390000d431"); } - - #[test] - fn test_parse_redis_key() { - let redis_key = "cancel:30390000d431"; - let key: KeyPrefix = parse_redis_key(redis_key).expect("Failed to parse key"); - - let ref_key = id_to_cancel_key(12345 << 32 | 54321); - - assert_eq!(key.as_str(), KeyPrefix::Cancel(ref_key).as_str()); - let KeyPrefix::Cancel(cancel_key) = key; - assert_eq!(ref_key, cancel_key); - } } diff --git a/proxy/src/redis/kv_ops.rs b/proxy/src/redis/kv_ops.rs index f71730c533..cfdbc21839 100644 --- a/proxy/src/redis/kv_ops.rs +++ b/proxy/src/redis/kv_ops.rs @@ -1,12 +1,13 @@ +use std::time::Duration; + +use futures::FutureExt; use redis::aio::ConnectionLike; use redis::{Cmd, FromRedisValue, Pipeline, RedisResult}; use super::connection_with_credentials_provider::ConnectionWithCredentialsProvider; -use crate::rate_limiter::{GlobalRateLimiter, RateBucketInfo}; pub struct RedisKVClient { client: ConnectionWithCredentialsProvider, - limiter: GlobalRateLimiter, } #[allow(async_fn_in_trait)] @@ -27,42 +28,45 @@ impl Queryable for Cmd { } impl RedisKVClient { - pub fn new(client: ConnectionWithCredentialsProvider, info: &'static [RateBucketInfo]) -> Self { - Self { - client, - limiter: GlobalRateLimiter::new(info.into()), - } + pub fn new(client: ConnectionWithCredentialsProvider) -> Self { + Self { client } } pub async fn try_connect(&mut self) -> anyhow::Result<()> { - match self.client.connect().await { - Ok(()) => {} - Err(e) => { - tracing::error!("failed to connect to redis: {e}"); - return Err(e); - } - } - Ok(()) + self.client + .connect() + .boxed() + .await + .inspect_err(|e| tracing::error!("failed to connect to redis: {e}")) + } + + pub(crate) fn credentials_refreshed(&self) -> bool { + self.client.credentials_refreshed() } pub(crate) async fn query( &mut self, q: &impl Queryable, ) -> anyhow::Result { - if !self.limiter.check() { - tracing::info!("Rate limit exceeded. Skipping query"); - return Err(anyhow::anyhow!("Rate limit exceeded")); - } - - match q.query(&mut self.client).await { + let e = match q.query(&mut self.client).await { Ok(t) => return Ok(t), - Err(e) => { - tracing::error!("failed to run query: {e}"); + Err(e) => e, + }; + + tracing::debug!("failed to run query: {e}"); + match e.retry_method() { + redis::RetryMethod::Reconnect => { + tracing::info!("Redis client is disconnected. Reconnecting..."); + self.try_connect().await?; } + redis::RetryMethod::RetryImmediately => {} + redis::RetryMethod::WaitAndRetry => { + // somewhat arbitrary. + tokio::time::sleep(Duration::from_millis(100)).await; + } + _ => Err(e)?, } - tracing::info!("Redis client is disconnected. Reconnecting..."); - self.try_connect().await?; Ok(q.query(&mut self.client).await?) } } diff --git a/proxy/src/redis/mod.rs b/proxy/src/redis/mod.rs index 8b46a8e6ca..4f5e24ab5f 100644 --- a/proxy/src/redis/mod.rs +++ b/proxy/src/redis/mod.rs @@ -1,4 +1,3 @@ -pub mod cancellation_publisher; pub mod connection_with_credentials_provider; pub mod elasticache; pub mod keys; diff --git a/proxy/src/redis/notifications.rs b/proxy/src/redis/notifications.rs index a9d6b40603..973a4c5b02 100644 --- a/proxy/src/redis/notifications.rs +++ b/proxy/src/redis/notifications.rs @@ -3,12 +3,12 @@ use std::sync::Arc; use futures::StreamExt; use redis::aio::PubSub; -use serde::{Deserialize, Serialize}; +use serde::Deserialize; use tokio_util::sync::CancellationToken; use super::connection_with_credentials_provider::ConnectionWithCredentialsProvider; use crate::cache::project_info::ProjectInfoCache; -use crate::intern::{AccountIdInt, ProjectIdInt, RoleNameInt}; +use crate::intern::{AccountIdInt, EndpointIdInt, ProjectIdInt, RoleNameInt}; use crate::metrics::{Metrics, RedisErrors, RedisEventsCount}; const CPLANE_CHANNEL_NAME: &str = "neondb-proxy-ws-updates"; @@ -27,42 +27,37 @@ struct NotificationHeader<'a> { topic: &'a str, } -#[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq)] +#[derive(Clone, Debug, Deserialize, Eq, PartialEq)] #[serde(tag = "topic", content = "data")] -pub(crate) enum Notification { +enum Notification { #[serde( - rename = "/allowed_ips_updated", + rename = "/account_settings_update", + alias = "/allowed_vpc_endpoints_updated_for_org", deserialize_with = "deserialize_json_string" )] - AllowedIpsUpdate { - allowed_ips_update: AllowedIpsUpdate, - }, + AccountSettingsUpdate(InvalidateAccount), + #[serde( - rename = "/block_public_or_vpc_access_updated", + rename = "/endpoint_settings_update", deserialize_with = "deserialize_json_string" )] - BlockPublicOrVpcAccessUpdated { - block_public_or_vpc_access_updated: BlockPublicOrVpcAccessUpdated, - }, + EndpointSettingsUpdate(InvalidateEndpoint), + #[serde( - rename = "/allowed_vpc_endpoints_updated_for_org", + rename = "/project_settings_update", + alias = "/allowed_ips_updated", + alias = "/block_public_or_vpc_access_updated", + alias = "/allowed_vpc_endpoints_updated_for_projects", deserialize_with = "deserialize_json_string" )] - AllowedVpcEndpointsUpdatedForOrg { - allowed_vpc_endpoints_updated_for_org: AllowedVpcEndpointsUpdatedForOrg, - }, + ProjectSettingsUpdate(InvalidateProject), + #[serde( - rename = "/allowed_vpc_endpoints_updated_for_projects", + rename = "/role_setting_update", + alias = "/password_updated", deserialize_with = "deserialize_json_string" )] - AllowedVpcEndpointsUpdatedForProjects { - allowed_vpc_endpoints_updated_for_projects: AllowedVpcEndpointsUpdatedForProjects, - }, - #[serde( - rename = "/password_updated", - deserialize_with = "deserialize_json_string" - )] - PasswordUpdate { password_update: PasswordUpdate }, + RoleSettingUpdate(InvalidateRole), #[serde( other, @@ -72,28 +67,56 @@ pub(crate) enum Notification { UnknownTopic, } -#[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq)] -pub(crate) struct AllowedIpsUpdate { - project_id: ProjectIdInt, +#[derive(Clone, Debug, Deserialize, Eq, PartialEq)] +#[serde(rename_all = "snake_case")] +enum InvalidateEndpoint { + EndpointId(EndpointIdInt), + EndpointIds(Vec), +} +impl std::ops::Deref for InvalidateEndpoint { + type Target = [EndpointIdInt]; + fn deref(&self) -> &Self::Target { + match self { + Self::EndpointId(id) => std::slice::from_ref(id), + Self::EndpointIds(ids) => ids, + } + } } -#[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq)] -pub(crate) struct BlockPublicOrVpcAccessUpdated { - project_id: ProjectIdInt, +#[derive(Clone, Debug, Deserialize, Eq, PartialEq)] +#[serde(rename_all = "snake_case")] +enum InvalidateProject { + ProjectId(ProjectIdInt), + ProjectIds(Vec), +} +impl std::ops::Deref for InvalidateProject { + type Target = [ProjectIdInt]; + fn deref(&self) -> &Self::Target { + match self { + Self::ProjectId(id) => std::slice::from_ref(id), + Self::ProjectIds(ids) => ids, + } + } } -#[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq)] -pub(crate) struct AllowedVpcEndpointsUpdatedForOrg { - account_id: AccountIdInt, +#[derive(Clone, Debug, Deserialize, Eq, PartialEq)] +#[serde(rename_all = "snake_case")] +enum InvalidateAccount { + AccountId(AccountIdInt), + AccountIds(Vec), +} +impl std::ops::Deref for InvalidateAccount { + type Target = [AccountIdInt]; + fn deref(&self) -> &Self::Target { + match self { + Self::AccountId(id) => std::slice::from_ref(id), + Self::AccountIds(ids) => ids, + } + } } -#[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq)] -pub(crate) struct AllowedVpcEndpointsUpdatedForProjects { - project_ids: Vec, -} - -#[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq)] -pub(crate) struct PasswordUpdate { +#[derive(Clone, Debug, Deserialize, Eq, PartialEq)] +struct InvalidateRole { project_id: ProjectIdInt, role_name: RoleNameInt, } @@ -118,29 +141,19 @@ where struct MessageHandler { cache: Arc, - region_id: String, } impl Clone for MessageHandler { fn clone(&self) -> Self { Self { cache: self.cache.clone(), - region_id: self.region_id.clone(), } } } impl MessageHandler { - pub(crate) fn new(cache: Arc, region_id: String) -> Self { - Self { cache, region_id } - } - - pub(crate) async fn increment_active_listeners(&self) { - self.cache.increment_active_listeners().await; - } - - pub(crate) async fn decrement_active_listeners(&self) { - self.cache.decrement_active_listeners().await; + pub(crate) fn new(cache: Arc) -> Self { + Self { cache } } #[tracing::instrument(skip(self, msg), fields(session_id = tracing::field::Empty))] @@ -177,41 +190,29 @@ impl MessageHandler { tracing::debug!(?msg, "received a message"); match msg { - Notification::AllowedIpsUpdate { .. } - | Notification::PasswordUpdate { .. } - | Notification::BlockPublicOrVpcAccessUpdated { .. } - | Notification::AllowedVpcEndpointsUpdatedForOrg { .. } - | Notification::AllowedVpcEndpointsUpdatedForProjects { .. } => { + Notification::RoleSettingUpdate { .. } + | Notification::EndpointSettingsUpdate { .. } + | Notification::ProjectSettingsUpdate { .. } + | Notification::AccountSettingsUpdate { .. } => { invalidate_cache(self.cache.clone(), msg.clone()); - if matches!(msg, Notification::AllowedIpsUpdate { .. }) { - Metrics::get() - .proxy - .redis_events_count - .inc(RedisEventsCount::AllowedIpsUpdate); - } else if matches!(msg, Notification::PasswordUpdate { .. }) { - Metrics::get() - .proxy - .redis_events_count - .inc(RedisEventsCount::PasswordUpdate); - } else if matches!( - msg, - Notification::AllowedVpcEndpointsUpdatedForProjects { .. } - ) { - Metrics::get() - .proxy - .redis_events_count - .inc(RedisEventsCount::AllowedVpcEndpointIdsUpdateForProjects); - } else if matches!(msg, Notification::AllowedVpcEndpointsUpdatedForOrg { .. }) { - Metrics::get() - .proxy - .redis_events_count - .inc(RedisEventsCount::AllowedVpcEndpointIdsUpdateForAllProjectsInOrg); - } else if matches!(msg, Notification::BlockPublicOrVpcAccessUpdated { .. }) { - Metrics::get() - .proxy - .redis_events_count - .inc(RedisEventsCount::BlockPublicOrVpcAccessUpdate); + + let m = &Metrics::get().proxy.redis_events_count; + match msg { + Notification::RoleSettingUpdate { .. } => { + m.inc(RedisEventsCount::InvalidateRole); + } + Notification::EndpointSettingsUpdate { .. } => { + m.inc(RedisEventsCount::InvalidateEndpoint); + } + Notification::ProjectSettingsUpdate { .. } => { + m.inc(RedisEventsCount::InvalidateProject); + } + Notification::AccountSettingsUpdate { .. } => { + m.inc(RedisEventsCount::InvalidateOrg); + } + Notification::UnknownTopic => {} } + // TODO: add additional metrics for the other event types. // It might happen that the invalid entry is on the way to be cached. @@ -233,30 +234,23 @@ impl MessageHandler { fn invalidate_cache(cache: Arc, msg: Notification) { match msg { - Notification::AllowedIpsUpdate { - allowed_ips_update: AllowedIpsUpdate { project_id }, - } - | Notification::BlockPublicOrVpcAccessUpdated { - block_public_or_vpc_access_updated: BlockPublicOrVpcAccessUpdated { project_id }, - } => cache.invalidate_endpoint_access_for_project(project_id), - Notification::AllowedVpcEndpointsUpdatedForOrg { - allowed_vpc_endpoints_updated_for_org: AllowedVpcEndpointsUpdatedForOrg { account_id }, - } => cache.invalidate_endpoint_access_for_org(account_id), - Notification::AllowedVpcEndpointsUpdatedForProjects { - allowed_vpc_endpoints_updated_for_projects: - AllowedVpcEndpointsUpdatedForProjects { project_ids }, - } => { - for project in project_ids { - cache.invalidate_endpoint_access_for_project(project); - } - } - Notification::PasswordUpdate { - password_update: - PasswordUpdate { - project_id, - role_name, - }, - } => cache.invalidate_role_secret_for_project(project_id, role_name), + Notification::EndpointSettingsUpdate(ids) => ids + .iter() + .for_each(|&id| cache.invalidate_endpoint_access(id)), + + Notification::AccountSettingsUpdate(ids) => ids + .iter() + .for_each(|&id| cache.invalidate_endpoint_access_for_org(id)), + + Notification::ProjectSettingsUpdate(ids) => ids + .iter() + .for_each(|&id| cache.invalidate_endpoint_access_for_project(id)), + + Notification::RoleSettingUpdate(InvalidateRole { + project_id, + role_name, + }) => cache.invalidate_role_secret_for_project(project_id, role_name), + Notification::UnknownTopic => unreachable!(), } } @@ -272,7 +266,7 @@ async fn handle_messages( } let mut conn = match try_connect(&redis).await { Ok(conn) => { - handler.increment_active_listeners().await; + handler.cache.increment_active_listeners().await; conn } Err(e) => { @@ -293,11 +287,11 @@ async fn handle_messages( } } if cancellation_token.is_cancelled() { - handler.decrement_active_listeners().await; + handler.cache.decrement_active_listeners().await; return Ok(()); } } - handler.decrement_active_listeners().await; + handler.cache.decrement_active_listeners().await; } } @@ -306,12 +300,11 @@ async fn handle_messages( pub async fn task_main( redis: ConnectionWithCredentialsProvider, cache: Arc, - region_id: String, ) -> anyhow::Result where C: ProjectInfoCache + Send + Sync + 'static, { - let handler = MessageHandler::new(cache, region_id); + let handler = MessageHandler::new(cache); // 6h - 1m. // There will be 1 minute overlap between two tasks. But at least we can be sure that no message is lost. let mut interval = tokio::time::interval(std::time::Duration::from_secs(6 * 60 * 60 - 60)); @@ -353,11 +346,32 @@ mod tests { let result: Notification = serde_json::from_str(&text)?; assert_eq!( result, - Notification::AllowedIpsUpdate { - allowed_ips_update: AllowedIpsUpdate { - project_id: (&project_id).into() - } - } + Notification::ProjectSettingsUpdate(InvalidateProject::ProjectId((&project_id).into())) + ); + + Ok(()) + } + + #[test] + fn parse_multiple_projects() -> anyhow::Result<()> { + let project_id1: ProjectId = "new_project1".into(); + let project_id2: ProjectId = "new_project2".into(); + let data = format!("{{\"project_ids\": [\"{project_id1}\",\"{project_id2}\"]}}"); + let text = json!({ + "type": "message", + "topic": "/allowed_vpc_endpoints_updated_for_projects", + "data": data, + "extre_fields": "something" + }) + .to_string(); + + let result: Notification = serde_json::from_str(&text)?; + assert_eq!( + result, + Notification::ProjectSettingsUpdate(InvalidateProject::ProjectIds(vec![ + (&project_id1).into(), + (&project_id2).into() + ])) ); Ok(()) @@ -379,12 +393,10 @@ mod tests { let result: Notification = serde_json::from_str(&text)?; assert_eq!( result, - Notification::PasswordUpdate { - password_update: PasswordUpdate { - project_id: (&project_id).into(), - role_name: (&role_name).into(), - } - } + Notification::RoleSettingUpdate(InvalidateRole { + project_id: (&project_id).into(), + role_name: (&role_name).into(), + }) ); Ok(()) diff --git a/proxy/src/sasl/channel_binding.rs b/proxy/src/sasl/channel_binding.rs index fdd011448e..fcc262f415 100644 --- a/proxy/src/sasl/channel_binding.rs +++ b/proxy/src/sasl/channel_binding.rs @@ -1,5 +1,8 @@ //! Definition and parser for channel binding flag (a part of the `GS2` header). +use base64::Engine as _; +use base64::prelude::BASE64_STANDARD; + /// Channel binding flag (possibly with params). #[derive(Debug, PartialEq, Eq)] pub(crate) enum ChannelBinding { @@ -51,11 +54,9 @@ impl ChannelBinding { "eSws".into() } Self::Required(mode) => { - use std::io::Write; - let mut cbind_input = vec![]; - write!(&mut cbind_input, "p={mode},,",).unwrap(); + let mut cbind_input = format!("p={mode},,",).into_bytes(); cbind_input.extend_from_slice(get_cbind_data(mode)?); - base64::encode(&cbind_input).into() + BASE64_STANDARD.encode(&cbind_input).into() } }) } @@ -70,9 +71,9 @@ mod tests { use ChannelBinding::*; let cases = [ - (NotSupportedClient, base64::encode("n,,")), - (NotSupportedServer, base64::encode("y,,")), - (Required("foo"), base64::encode("p=foo,,bar")), + (NotSupportedClient, BASE64_STANDARD.encode("n,,")), + (NotSupportedServer, BASE64_STANDARD.encode("y,,")), + (Required("foo"), BASE64_STANDARD.encode("p=foo,,bar")), ]; for (cb, input) in cases { diff --git a/proxy/src/scram/exchange.rs b/proxy/src/scram/exchange.rs index abd5aeae5b..a0918fca9f 100644 --- a/proxy/src/scram/exchange.rs +++ b/proxy/src/scram/exchange.rs @@ -2,6 +2,8 @@ use std::convert::Infallible; +use base64::Engine as _; +use base64::prelude::BASE64_STANDARD; use hmac::{Hmac, Mac}; use sha2::Sha256; @@ -105,7 +107,7 @@ pub(crate) async fn exchange( secret: &ServerSecret, password: &[u8], ) -> sasl::Result> { - let salt = base64::decode(&secret.salt_base64)?; + let salt = BASE64_STANDARD.decode(&*secret.salt_base64)?; let client_key = derive_client_key(pool, endpoint, password, &salt, secret.iterations).await; if secret.is_password_invalid(&client_key).into() { diff --git a/proxy/src/scram/messages.rs b/proxy/src/scram/messages.rs index e071417dab..c0073917a1 100644 --- a/proxy/src/scram/messages.rs +++ b/proxy/src/scram/messages.rs @@ -3,6 +3,9 @@ use std::fmt; use std::ops::Range; +use base64::Engine as _; +use base64::prelude::BASE64_STANDARD; + use super::base64_decode_array; use super::key::{SCRAM_KEY_LEN, ScramKey}; use super::signature::SignatureBuilder; @@ -84,13 +87,20 @@ impl<'a> ClientFirstMessage<'a> { salt_base64: &str, iterations: u32, ) -> OwnedServerFirstMessage { - use std::fmt::Write; + let mut message = String::with_capacity(128); + message.push_str("r="); - let mut message = String::new(); - write!(&mut message, "r={}", self.nonce).unwrap(); - base64::encode_config_buf(nonce, base64::STANDARD, &mut message); - let combined_nonce = 2..message.len(); - write!(&mut message, ",s={salt_base64},i={iterations}").unwrap(); + // write combined nonce + let combined_nonce_start = message.len(); + message.push_str(self.nonce); + BASE64_STANDARD.encode_string(nonce, &mut message); + let combined_nonce = combined_nonce_start..message.len(); + + // write salt and iterations + message.push_str(",s="); + message.push_str(salt_base64); + message.push_str(",i="); + message.push_str(itoa::Buffer::new().format(iterations)); // This design guarantees that it's impossible to create a // server-first-message without receiving a client-first-message @@ -142,11 +152,7 @@ impl<'a> ClientFinalMessage<'a> { server_key: &ScramKey, ) -> String { let mut buf = String::from("v="); - base64::encode_config_buf( - signature_builder.build(server_key), - base64::STANDARD, - &mut buf, - ); + BASE64_STANDARD.encode_string(signature_builder.build(server_key), &mut buf); buf } @@ -251,7 +257,7 @@ mod tests { "iiYEfS3rOgn8S3rtpSdrOsHtPLWvIkdgmHxA0hf3JNOAG4dU" ); assert_eq!( - base64::encode(msg.proof), + BASE64_STANDARD.encode(msg.proof), "SRpfsIVS4Gk11w1LqQ4QvCUBZYQmqXNSDEcHqbQ3CHI=" ); } diff --git a/proxy/src/scram/mod.rs b/proxy/src/scram/mod.rs index 4f764c6087..5f627e062c 100644 --- a/proxy/src/scram/mod.rs +++ b/proxy/src/scram/mod.rs @@ -15,6 +15,8 @@ mod secret; mod signature; pub mod threadpool; +use base64::Engine as _; +use base64::prelude::BASE64_STANDARD; pub(crate) use exchange::{Exchange, exchange}; use hmac::{Hmac, Mac}; pub(crate) use key::ScramKey; @@ -32,7 +34,7 @@ pub(crate) const METHODS_WITHOUT_PLUS: &[&str] = &[SCRAM_SHA_256]; fn base64_decode_array(input: impl AsRef<[u8]>) -> Option<[u8; N]> { let mut bytes = [0u8; N]; - let size = base64::decode_config_slice(input, base64::STANDARD, &mut bytes).ok()?; + let size = BASE64_STANDARD.decode_slice(input, &mut bytes).ok()?; if size != N { return None; } diff --git a/proxy/src/scram/secret.rs b/proxy/src/scram/secret.rs index 8c6a08d432..0e070c2f27 100644 --- a/proxy/src/scram/secret.rs +++ b/proxy/src/scram/secret.rs @@ -1,5 +1,7 @@ //! Tools for SCRAM server secret management. +use base64::Engine as _; +use base64::prelude::BASE64_STANDARD; use subtle::{Choice, ConstantTimeEq}; use super::base64_decode_array; @@ -12,7 +14,7 @@ pub(crate) struct ServerSecret { /// Number of iterations for `PBKDF2` function. pub(crate) iterations: u32, /// Salt used to hash user's password. - pub(crate) salt_base64: String, + pub(crate) salt_base64: Box, /// Hashed `ClientKey`. pub(crate) stored_key: ScramKey, /// Used by client to verify server's signature. @@ -33,7 +35,7 @@ impl ServerSecret { let secret = ServerSecret { iterations: iterations.parse().ok()?, - salt_base64: salt.to_owned(), + salt_base64: salt.into(), stored_key: base64_decode_array(stored_key)?.into(), server_key: base64_decode_array(server_key)?.into(), doomed: false, @@ -56,7 +58,7 @@ impl ServerSecret { // iteration count 1 for our generated passwords going forward. // PG16 users can set iteration count=1 already today. iterations: 1, - salt_base64: base64::encode(nonce), + salt_base64: BASE64_STANDARD.encode(nonce).into_boxed_str(), stored_key: ScramKey::default(), server_key: ScramKey::default(), doomed: true, @@ -86,9 +88,9 @@ mod tests { let parsed = ServerSecret::parse(&secret).unwrap(); assert_eq!(parsed.iterations, iterations); - assert_eq!(parsed.salt_base64, salt); + assert_eq!(&*parsed.salt_base64, salt); - assert_eq!(base64::encode(parsed.stored_key), stored_key); - assert_eq!(base64::encode(parsed.server_key), server_key); + assert_eq!(BASE64_STANDARD.encode(parsed.stored_key), stored_key); + assert_eq!(BASE64_STANDARD.encode(parsed.server_key), server_key); } } diff --git a/proxy/src/scram/threadpool.rs b/proxy/src/scram/threadpool.rs index 8f1684c75b..1aa402227f 100644 --- a/proxy/src/scram/threadpool.rs +++ b/proxy/src/scram/threadpool.rs @@ -137,7 +137,7 @@ impl Future for JobSpec { let state = state.as_mut().expect("should be set on thread startup"); state.tick = state.tick.wrapping_add(1); - if state.tick % SKETCH_RESET_INTERVAL == 0 { + if state.tick.is_multiple_of(SKETCH_RESET_INTERVAL) { state.countmin.reset(); } diff --git a/proxy/src/serverless/backend.rs b/proxy/src/serverless/backend.rs index 748e0ce6f2..daa6429039 100644 --- a/proxy/src/serverless/backend.rs +++ b/proxy/src/serverless/backend.rs @@ -21,9 +21,8 @@ use super::conn_pool_lib::{Client, ConnInfo, EndpointConnPool, GlobalConnPool}; use super::http_conn_pool::{self, HttpConnPool, Send, poll_http2_client}; use super::local_conn_pool::{self, EXT_NAME, EXT_SCHEMA, EXT_VERSION, LocalConnPool}; use crate::auth::backend::local::StaticAuthRules; -use crate::auth::backend::{ComputeCredentials, ComputeUserInfo}; +use crate::auth::backend::{ComputeCredentialKeys, ComputeCredentials, ComputeUserInfo}; use crate::auth::{self, AuthError}; -use crate::compute; use crate::compute_ctl::{ ComputeCtlError, ExtensionInstallRequest, Privilege, SetRoleGrantsRequest, }; @@ -35,7 +34,7 @@ use crate::control_plane::errors::{GetAuthInfoError, WakeComputeError}; use crate::control_plane::locks::ApiLocks; use crate::error::{ErrorKind, ReportableError, UserFacingError}; use crate::intern::EndpointIdInt; -use crate::pglb::connect_compute::ConnectMechanism; +use crate::proxy::connect_compute::ConnectMechanism; use crate::proxy::retry::{CouldRetry, ShouldRetryWakeCompute}; use crate::rate_limiter::EndpointRateLimiter; use crate::types::{EndpointId, Host, LOCAL_PROXY_SUFFIX}; @@ -69,17 +68,20 @@ impl PoolingBackend { self.config.authentication_config.is_vpc_acccess_proxy, )?; - let ep = EndpointIdInt::from(&user_info.endpoint); - let rate_limit_config = None; - if !self.endpoint_rate_limiter.check(ep, rate_limit_config, 1) { - return Err(AuthError::too_many_connections()); - } + access_control.connection_attempt_rate_limit( + ctx, + &user_info.endpoint, + &self.endpoint_rate_limiter, + )?; + let role_access = backend.get_role_secret(ctx).await?; let Some(secret) = role_access.secret else { // If we don't have an authentication secret, for the http flow we can just return an error. info!("authentication info not found"); return Err(AuthError::password_failed(&*user_info.user)); }; + + let ep = EndpointIdInt::from(&user_info.endpoint); let auth_outcome = crate::auth::validate_password_and_exchange( &self.config.authentication_config.thread_pool, ep, @@ -113,7 +115,8 @@ impl PoolingBackend { match &self.auth_backend { crate::auth::Backend::ControlPlane(console, ()) => { - self.config + let keys = self + .config .authentication_config .jwks_cache .check_jwt( @@ -127,7 +130,7 @@ impl PoolingBackend { Ok(ComputeCredentials { info: user_info.clone(), - keys: crate::auth::backend::ComputeCredentialKeys::None, + keys, }) } crate::auth::Backend::Local(_) => { @@ -181,14 +184,15 @@ impl PoolingBackend { let conn_id = uuid::Uuid::new_v4(); tracing::Span::current().record("conn_id", display(conn_id)); info!(%conn_id, "pool: opening a new connection '{conn_info}'"); - let backend = self.auth_backend.as_ref().map(|()| keys); - crate::pglb::connect_compute::connect_to_compute( + let backend = self.auth_backend.as_ref().map(|()| keys.info); + crate::proxy::connect_compute::connect_to_compute( ctx, &TokioMechanism { conn_id, conn_info, pool: self.pool.clone(), locks: &self.config.connect_compute_locks, + keys: keys.keys, }, &backend, self.config.wake_compute_retry_config, @@ -215,18 +219,15 @@ impl PoolingBackend { let conn_id = uuid::Uuid::new_v4(); tracing::Span::current().record("conn_id", display(conn_id)); debug!(%conn_id, "pool: opening a new connection '{conn_info}'"); - let backend = self.auth_backend.as_ref().map(|()| ComputeCredentials { - info: ComputeUserInfo { - user: conn_info.user_info.user.clone(), - endpoint: EndpointId::from(format!( - "{}{LOCAL_PROXY_SUFFIX}", - conn_info.user_info.endpoint.normalize() - )), - options: conn_info.user_info.options.clone(), - }, - keys: crate::auth::backend::ComputeCredentialKeys::None, + let backend = self.auth_backend.as_ref().map(|()| ComputeUserInfo { + user: conn_info.user_info.user.clone(), + endpoint: EndpointId::from(format!( + "{}{LOCAL_PROXY_SUFFIX}", + conn_info.user_info.endpoint.normalize() + )), + options: conn_info.user_info.options.clone(), }); - crate::pglb::connect_compute::connect_to_compute( + crate::proxy::connect_compute::connect_to_compute( ctx, &HyperMechanism { conn_id, @@ -256,6 +257,7 @@ impl PoolingBackend { &self, ctx: &RequestContext, conn_info: ConnInfo, + disable_pg_session_jwt: bool, ) -> Result, HttpConnError> { if let Some(client) = self.local_pool.get(ctx, &conn_info)? { return Ok(client); @@ -277,7 +279,7 @@ impl PoolingBackend { .expect("semaphore should never be closed"); // check again for race - if !self.local_pool.initialized(&conn_info) { + if !self.local_pool.initialized(&conn_info) && !disable_pg_session_jwt { local_backend .compute_ctl .install_extension(&ExtensionInstallRequest { @@ -305,24 +307,27 @@ impl PoolingBackend { tracing::Span::current().record("conn_id", display(conn_id)); info!(%conn_id, "local_pool: opening a new connection '{conn_info}'"); - let mut node_info = local_backend.node_info.clone(); - let (key, jwk) = create_random_jwk(); - let config = node_info - .config + let mut config = local_backend + .node_info + .conn_info + .to_postgres_client_config(); + config .user(&conn_info.user_info.user) - .dbname(&conn_info.dbname) - .set_param( + .dbname(&conn_info.dbname); + if !disable_pg_session_jwt { + config.set_param( "options", &format!( "-c pg_session_jwt.jwk={}", serde_json::to_string(&jwk).expect("serializing jwk to json should not fail") ), ); + } let pause = ctx.latency_timer_pause(crate::metrics::Waiting::Compute); - let (client, connection) = config.connect(postgres_client::NoTls).await?; + let (client, connection) = config.connect(&postgres_client::NoTls).await?; drop(pause); let pid = client.get_process_id(); @@ -336,7 +341,7 @@ impl PoolingBackend { connection, key, conn_id, - node_info.aux.clone(), + local_backend.node_info.aux.clone(), ); { @@ -344,7 +349,9 @@ impl PoolingBackend { debug!("setting up backend session state"); // initiates the auth session - if let Err(e) = client.batch_execute("select auth.init();").await { + if !disable_pg_session_jwt + && let Err(e) = client.batch_execute("select auth.init();").await + { discard.discard(); return Err(e.into()); } @@ -403,7 +410,15 @@ impl ReportableError for HttpConnError { fn get_error_kind(&self) -> ErrorKind { match self { HttpConnError::ConnectionClosedAbruptly(_) => ErrorKind::Compute, - HttpConnError::PostgresConnectionError(p) => p.get_error_kind(), + HttpConnError::PostgresConnectionError(p) => { + if p.as_db_error().is_some() { + // postgres rejected the connection + ErrorKind::Postgres + } else { + // couldn't even reach postgres + ErrorKind::Compute + } + } HttpConnError::LocalProxyConnectionError(_) => ErrorKind::Compute, HttpConnError::ComputeCtl(_) => ErrorKind::Service, HttpConnError::JwtPayloadError(_) => ErrorKind::User, @@ -495,6 +510,7 @@ struct TokioMechanism { pool: Arc>>, conn_info: ConnInfo, conn_id: uuid::Uuid, + keys: ComputeCredentialKeys, /// connect_to_compute concurrency lock locks: &'static ApiLocks, @@ -512,19 +528,20 @@ impl ConnectMechanism for TokioMechanism { node_info: &CachedNodeInfo, compute_config: &ComputeConfig, ) -> Result { - let host = node_info.config.get_host(); - let permit = self.locks.get_permit(&host).await?; + let permit = self.locks.get_permit(&node_info.conn_info.host).await?; - let mut config = (*node_info.config).clone(); + let mut config = node_info.conn_info.to_postgres_client_config(); let config = config .user(&self.conn_info.user_info.user) .dbname(&self.conn_info.dbname) .connect_timeout(compute_config.timeout); - let mk_tls = - crate::tls::postgres_rustls::MakeRustlsConnect::new(compute_config.tls.clone()); + if let ComputeCredentialKeys::AuthKeys(auth_keys) = self.keys { + config.auth_keys(auth_keys); + } + let pause = ctx.latency_timer_pause(crate::metrics::Waiting::Compute); - let res = config.connect(mk_tls).await; + let res = config.connect(compute_config).await; drop(pause); let (client, connection) = permit.release_result(res)?; @@ -548,8 +565,6 @@ impl ConnectMechanism for TokioMechanism { node_info.aux.clone(), )) } - - fn update_connect_config(&self, _config: &mut compute::ConnCfg) {} } struct HyperMechanism { @@ -573,20 +588,20 @@ impl ConnectMechanism for HyperMechanism { node_info: &CachedNodeInfo, config: &ComputeConfig, ) -> Result { - let host_addr = node_info.config.get_host_addr(); - let host = node_info.config.get_host(); - let permit = self.locks.get_permit(&host).await?; + let host_addr = node_info.conn_info.host_addr; + let host = &node_info.conn_info.host; + let permit = self.locks.get_permit(host).await?; let pause = ctx.latency_timer_pause(crate::metrics::Waiting::Compute); - let tls = if node_info.config.get_ssl_mode() == SslMode::Disable { + let tls = if node_info.conn_info.ssl_mode == SslMode::Disable { None } else { Some(&config.tls) }; - let port = node_info.config.get_port(); - let res = connect_http2(host_addr, &host, port, config.timeout, tls).await; + let port = node_info.conn_info.port; + let res = connect_http2(host_addr, host, port, config.timeout, tls).await; drop(pause); let (client, connection) = permit.release_result(res)?; @@ -609,8 +624,6 @@ impl ConnectMechanism for HyperMechanism { node_info.aux.clone(), )) } - - fn update_connect_config(&self, _config: &mut compute::ConnCfg) {} } async fn connect_http2( diff --git a/proxy/src/serverless/conn_pool.rs b/proxy/src/serverless/conn_pool.rs index 87176ff7d6..672e59f81f 100644 --- a/proxy/src/serverless/conn_pool.rs +++ b/proxy/src/serverless/conn_pool.rs @@ -23,12 +23,12 @@ use super::conn_pool_lib::{ Client, ClientDataEnum, ClientInnerCommon, ClientInnerExt, ConnInfo, EndpointConnPool, GlobalConnPool, }; +use crate::config::ComputeConfig; use crate::context::RequestContext; use crate::control_plane::messages::MetricsAuxInfo; use crate::metrics::Metrics; -use crate::tls::postgres_rustls::MakeRustlsConnect; -type TlsStream = >::Stream; +type TlsStream = >::Stream; #[derive(Debug, Clone)] pub(crate) struct ConnInfoWithAuth { @@ -148,11 +148,10 @@ pub(crate) fn poll_client( } // remove from connection pool - if let Some(pool) = pool.clone().upgrade() { - if pool.write().remove_client(db_user.clone(), conn_id) { + if let Some(pool) = pool.clone().upgrade() + && pool.write().remove_client(db_user.clone(), conn_id) { info!("closed connection removed"); } - } Poll::Ready(()) }).await; diff --git a/proxy/src/serverless/error.rs b/proxy/src/serverless/error.rs index 323c91baa5..786964e764 100644 --- a/proxy/src/serverless/error.rs +++ b/proxy/src/serverless/error.rs @@ -1,5 +1,93 @@ use http::StatusCode; +use http::header::HeaderName; + +use crate::auth::ComputeUserInfoParseError; +use crate::error::{ErrorKind, ReportableError, UserFacingError}; +use crate::http::ReadBodyError; pub trait HttpCodeError { fn get_http_status_code(&self) -> StatusCode; } + +#[derive(Debug, thiserror::Error)] +pub(crate) enum ConnInfoError { + #[error("invalid header: {0}")] + InvalidHeader(&'static HeaderName), + #[error("invalid connection string: {0}")] + UrlParseError(#[from] url::ParseError), + #[error("incorrect scheme")] + IncorrectScheme, + #[error("missing database name")] + MissingDbName, + #[error("invalid database name")] + InvalidDbName, + #[error("missing username")] + MissingUsername, + #[error("invalid username: {0}")] + InvalidUsername(#[from] std::string::FromUtf8Error), + #[error("missing authentication credentials: {0}")] + MissingCredentials(Credentials), + #[error("missing hostname")] + MissingHostname, + #[error("invalid hostname: {0}")] + InvalidEndpoint(#[from] ComputeUserInfoParseError), +} + +#[derive(Debug, thiserror::Error)] +pub(crate) enum Credentials { + #[error("required password")] + Password, + #[error("required authorization bearer token in JWT format")] + BearerJwt, +} + +impl ReportableError for ConnInfoError { + fn get_error_kind(&self) -> ErrorKind { + ErrorKind::User + } +} + +impl UserFacingError for ConnInfoError { + fn to_string_client(&self) -> String { + self.to_string() + } +} + +#[derive(Debug, thiserror::Error)] +pub(crate) enum ReadPayloadError { + #[error("could not read the HTTP request body: {0}")] + Read(#[from] hyper::Error), + #[error("request is too large (max is {limit} bytes)")] + BodyTooLarge { limit: usize }, + #[error("could not parse the HTTP request body: {0}")] + Parse(#[from] serde_json::Error), +} + +impl From> for ReadPayloadError { + fn from(value: ReadBodyError) -> Self { + match value { + ReadBodyError::BodyTooLarge { limit } => Self::BodyTooLarge { limit }, + ReadBodyError::Read(e) => Self::Read(e), + } + } +} + +impl ReportableError for ReadPayloadError { + fn get_error_kind(&self) -> ErrorKind { + match self { + ReadPayloadError::Read(_) => ErrorKind::ClientDisconnect, + ReadPayloadError::BodyTooLarge { .. } => ErrorKind::User, + ReadPayloadError::Parse(_) => ErrorKind::User, + } + } +} + +impl HttpCodeError for ReadPayloadError { + fn get_http_status_code(&self) -> StatusCode { + match self { + ReadPayloadError::Read(_) => StatusCode::BAD_REQUEST, + ReadPayloadError::BodyTooLarge { .. } => StatusCode::PAYLOAD_TOO_LARGE, + ReadPayloadError::Parse(_) => StatusCode::BAD_REQUEST, + } + } +} diff --git a/proxy/src/serverless/http_conn_pool.rs b/proxy/src/serverless/http_conn_pool.rs index 1c6574e57e..7acd816026 100644 --- a/proxy/src/serverless/http_conn_pool.rs +++ b/proxy/src/serverless/http_conn_pool.rs @@ -2,6 +2,8 @@ use std::collections::VecDeque; use std::sync::atomic::{self, AtomicUsize}; use std::sync::{Arc, Weak}; +use bytes::Bytes; +use http_body_util::combinators::BoxBody; use hyper::client::conn::http2; use hyper_util::rt::{TokioExecutor, TokioIo}; use parking_lot::RwLock; @@ -21,8 +23,9 @@ use crate::protocol2::ConnectionInfoExtra; use crate::types::EndpointCacheKey; use crate::usage_metrics::{Ids, MetricCounter, USAGE_METRICS}; -pub(crate) type Send = http2::SendRequest; -pub(crate) type Connect = http2::Connection, hyper::body::Incoming, TokioExecutor>; +pub(crate) type Send = http2::SendRequest>; +pub(crate) type Connect = + http2::Connection, BoxBody, TokioExecutor>; #[derive(Clone)] pub(crate) struct ClientDataHttp(); @@ -237,10 +240,10 @@ pub(crate) fn poll_http2_client( } // remove from connection pool - if let Some(pool) = pool.clone().upgrade() { - if pool.write().remove_conn(conn_id) { - info!("closed connection removed"); - } + if let Some(pool) = pool.clone().upgrade() + && pool.write().remove_conn(conn_id) + { + info!("closed connection removed"); } } .instrument(span), diff --git a/proxy/src/serverless/http_util.rs b/proxy/src/serverless/http_util.rs index 95a28663a5..0c91ac6835 100644 --- a/proxy/src/serverless/http_util.rs +++ b/proxy/src/serverless/http_util.rs @@ -3,11 +3,42 @@ use anyhow::Context; use bytes::Bytes; -use http::{Response, StatusCode}; +use http::header::AUTHORIZATION; +use http::{HeaderMap, HeaderName, HeaderValue, Response, StatusCode}; use http_body_util::combinators::BoxBody; use http_body_util::{BodyExt, Full}; use http_utils::error::ApiError; use serde::Serialize; +use url::Url; +use uuid::Uuid; + +use super::conn_pool::{AuthData, ConnInfoWithAuth}; +use super::conn_pool_lib::ConnInfo; +use super::error::{ConnInfoError, Credentials}; +use crate::auth::backend::ComputeUserInfo; +use crate::config::AuthenticationConfig; +use crate::context::RequestContext; +use crate::metrics::{Metrics, SniGroup, SniKind}; +use crate::pqproto::StartupMessageParams; +use crate::proxy::NeonOptions; +use crate::types::{DbName, EndpointId, RoleName}; + +// Common header names used across serverless modules +pub(super) static NEON_REQUEST_ID: HeaderName = HeaderName::from_static("neon-request-id"); +pub(super) static CONN_STRING: HeaderName = HeaderName::from_static("neon-connection-string"); +pub(super) static RAW_TEXT_OUTPUT: HeaderName = HeaderName::from_static("neon-raw-text-output"); +pub(super) static ARRAY_MODE: HeaderName = HeaderName::from_static("neon-array-mode"); +pub(super) static ALLOW_POOL: HeaderName = HeaderName::from_static("neon-pool-opt-in"); +pub(super) static TXN_ISOLATION_LEVEL: HeaderName = + HeaderName::from_static("neon-batch-isolation-level"); +pub(super) static TXN_READ_ONLY: HeaderName = HeaderName::from_static("neon-batch-read-only"); +pub(super) static TXN_DEFERRABLE: HeaderName = HeaderName::from_static("neon-batch-deferrable"); + +pub(crate) fn uuid_to_header_value(id: Uuid) -> HeaderValue { + let mut uuid = [0; uuid::fmt::Hyphenated::LENGTH]; + HeaderValue::from_str(id.as_hyphenated().encode_lower(&mut uuid[..])) + .expect("uuid hyphenated format should be all valid header characters") +} /// Like [`ApiError::into_response`] pub(crate) fn api_error_into_response(this: ApiError) -> Response> { @@ -107,3 +138,136 @@ pub(crate) fn json_response( .map_err(|e| ApiError::InternalServerError(e.into()))?; Ok(response) } + +pub(crate) fn get_conn_info( + config: &'static AuthenticationConfig, + ctx: &RequestContext, + connection_string: Option<&str>, + headers: &HeaderMap, +) -> Result { + let connection_url = match connection_string { + Some(connection_string) => Url::parse(connection_string)?, + None => { + let connection_string = headers + .get(&CONN_STRING) + .ok_or(ConnInfoError::InvalidHeader(&CONN_STRING))? + .to_str() + .map_err(|_| ConnInfoError::InvalidHeader(&CONN_STRING))?; + Url::parse(connection_string)? + } + }; + + let protocol = connection_url.scheme(); + if protocol != "postgres" && protocol != "postgresql" { + return Err(ConnInfoError::IncorrectScheme); + } + + let mut url_path = connection_url + .path_segments() + .ok_or(ConnInfoError::MissingDbName)?; + + let dbname: DbName = + urlencoding::decode(url_path.next().ok_or(ConnInfoError::InvalidDbName)?)?.into(); + ctx.set_dbname(dbname.clone()); + + let username = RoleName::from(urlencoding::decode(connection_url.username())?); + if username.is_empty() { + return Err(ConnInfoError::MissingUsername); + } + ctx.set_user(username.clone()); + // TODO: make sure this is right in the context of rest broker + let auth = if let Some(auth) = headers.get(&AUTHORIZATION) { + if !config.accept_jwts { + return Err(ConnInfoError::MissingCredentials(Credentials::Password)); + } + + let auth = auth + .to_str() + .map_err(|_| ConnInfoError::InvalidHeader(&AUTHORIZATION))?; + AuthData::Jwt( + auth.strip_prefix("Bearer ") + .ok_or(ConnInfoError::MissingCredentials(Credentials::BearerJwt))? + .into(), + ) + } else if let Some(pass) = connection_url.password() { + // wrong credentials provided + if config.accept_jwts { + return Err(ConnInfoError::MissingCredentials(Credentials::BearerJwt)); + } + + AuthData::Password(match urlencoding::decode_binary(pass.as_bytes()) { + std::borrow::Cow::Borrowed(b) => b.into(), + std::borrow::Cow::Owned(b) => b.into(), + }) + } else if config.accept_jwts { + return Err(ConnInfoError::MissingCredentials(Credentials::BearerJwt)); + } else { + return Err(ConnInfoError::MissingCredentials(Credentials::Password)); + }; + let endpoint: EndpointId = match connection_url.host() { + Some(url::Host::Domain(hostname)) => hostname + .split_once('.') + .map_or(hostname, |(prefix, _)| prefix) + .into(), + Some(url::Host::Ipv4(_) | url::Host::Ipv6(_)) | None => { + return Err(ConnInfoError::MissingHostname); + } + }; + ctx.set_endpoint_id(endpoint.clone()); + + let pairs = connection_url.query_pairs(); + + let mut options = Option::None; + + let mut params = StartupMessageParams::default(); + params.insert("user", &username); + params.insert("database", &dbname); + for (key, value) in pairs { + params.insert(&key, &value); + if key == "options" { + options = Some(NeonOptions::parse_options_raw(&value)); + } + } + + // check the URL that was used, for metrics + { + let host_endpoint = headers + // get the host header + .get("host") + // extract the domain + .and_then(|h| { + let (host, _port) = h.to_str().ok()?.split_once(':')?; + Some(host) + }) + // get the endpoint prefix + .map(|h| h.split_once('.').map_or(h, |(prefix, _)| prefix)); + + let kind = if host_endpoint == Some(&*endpoint) { + SniKind::Sni + } else { + SniKind::NoSni + }; + + let protocol = ctx.protocol(); + Metrics::get() + .proxy + .accepted_connections_by_sni + .inc(SniGroup { protocol, kind }); + } + + ctx.set_user_agent( + headers + .get(hyper::header::USER_AGENT) + .and_then(|h| h.to_str().ok()) + .map(Into::into), + ); + + let user_info = ComputeUserInfo { + endpoint, + user: username, + options: options.unwrap_or_default(), + }; + + let conn_info = ConnInfo { user_info, dbname }; + Ok(ConnInfoWithAuth { conn_info, auth }) +} diff --git a/proxy/src/serverless/json.rs b/proxy/src/serverless/json.rs index 1afc10359f..2e67d07079 100644 --- a/proxy/src/serverless/json.rs +++ b/proxy/src/serverless/json.rs @@ -70,6 +70,34 @@ pub(crate) enum JsonConversionError { ParseJsonError(#[from] serde_json::Error), #[error("unbalanced array")] UnbalancedArray, + #[error("unbalanced quoted string")] + UnbalancedString, +} + +enum OutputMode { + Array(Vec), + Object(Map), +} + +impl OutputMode { + fn key(&mut self, key: &str) -> &mut Value { + match self { + OutputMode::Array(values) => push_entry(values, Value::Null), + OutputMode::Object(map) => map.entry(key.to_string()).or_insert(Value::Null), + } + } + + fn finish(self) -> Value { + match self { + OutputMode::Array(values) => Value::Array(values), + OutputMode::Object(map) => Value::Object(map), + } + } +} + +fn push_entry(arr: &mut Vec, t: T) -> &mut T { + arr.push(t); + arr.last_mut().expect("a value was just inserted") } // @@ -77,182 +105,277 @@ pub(crate) enum JsonConversionError { // pub(crate) fn pg_text_row_to_json( row: &Row, - columns: &[Type], raw_output: bool, array_mode: bool, ) -> Result { - let iter = row - .columns() - .iter() - .zip(columns) - .enumerate() - .map(|(i, (column, typ))| { - let name = column.name(); - let pg_value = row.as_text(i).map_err(JsonConversionError::AsTextError)?; - let json_value = if raw_output { - match pg_value { - Some(v) => Value::String(v.to_string()), - None => Value::Null, - } - } else { - pg_text_to_json(pg_value, typ)? - }; - Ok((name.to_string(), json_value)) - }); - - if array_mode { - // drop keys and aggregate into array - let arr = iter - .map(|r| r.map(|(_key, val)| val)) - .collect::, JsonConversionError>>()?; - Ok(Value::Array(arr)) + let mut entries = if array_mode { + OutputMode::Array(Vec::with_capacity(row.columns().len())) } else { - let obj = iter.collect::, JsonConversionError>>()?; - Ok(Value::Object(obj)) + OutputMode::Object(Map::with_capacity(row.columns().len())) + }; + + for (i, column) in row.columns().iter().enumerate() { + let pg_value = row.as_text(i).map_err(JsonConversionError::AsTextError)?; + + let value = entries.key(column.name()); + + match pg_value { + Some(v) if raw_output => *value = Value::String(v.to_string()), + Some(v) => pg_text_to_json(value, v, column.type_())?, + None => *value = Value::Null, + } } + + Ok(entries.finish()) } // // Convert postgres text-encoded value to JSON value // -fn pg_text_to_json(pg_value: Option<&str>, pg_type: &Type) -> Result { - if let Some(val) = pg_value { - if let Kind::Array(elem_type) = pg_type.kind() { - return pg_array_parse(val, elem_type); - } +fn pg_text_to_json( + output: &mut Value, + val: &str, + pg_type: &Type, +) -> Result<(), JsonConversionError> { + if let Kind::Array(elem_type) = pg_type.kind() { + // todo: we should fetch this from postgres. + let delimiter = ','; - match *pg_type { - Type::BOOL => Ok(Value::Bool(val == "t")), - Type::INT2 | Type::INT4 => { - let val = val.parse::()?; - Ok(Value::Number(serde_json::Number::from(val))) - } - Type::FLOAT4 | Type::FLOAT8 => { - let fval = val.parse::()?; - let num = serde_json::Number::from_f64(fval); - if let Some(num) = num { - Ok(Value::Number(num)) - } else { - // Pass Nan, Inf, -Inf as strings - // JS JSON.stringify() does converts them to null, but we - // want to preserve them, so we pass them as strings - Ok(Value::String(val.to_string())) - } - } - Type::JSON | Type::JSONB => Ok(serde_json::from_str(val)?), - _ => Ok(Value::String(val.to_string())), - } - } else { - Ok(Value::Null) - } -} - -// -// Parse postgres array into JSON array. -// -// This is a bit involved because we need to handle nested arrays and quoted -// values. Unlike postgres we don't check that all nested arrays have the same -// dimensions, we just return them as is. -// -fn pg_array_parse(pg_array: &str, elem_type: &Type) -> Result { - pg_array_parse_inner(pg_array, elem_type, false).map(|(v, _)| v) -} - -fn pg_array_parse_inner( - pg_array: &str, - elem_type: &Type, - nested: bool, -) -> Result<(Value, usize), JsonConversionError> { - let mut pg_array_chr = pg_array.char_indices(); - let mut level = 0; - let mut quote = false; - let mut entries: Vec = Vec::new(); - let mut entry = String::new(); - - // skip bounds decoration - if let Some('[') = pg_array.chars().next() { - for (_, c) in pg_array_chr.by_ref() { - if c == '=' { - break; - } - } + let mut array = vec![]; + pg_array_parse(&mut array, val, elem_type, delimiter)?; + *output = Value::Array(array); + return Ok(()); } - fn push_checked( - entry: &mut String, - entries: &mut Vec, - elem_type: &Type, - ) -> Result<(), JsonConversionError> { - if !entry.is_empty() { - // While in usual postgres response we get nulls as None and everything else - // as Some(&str), in arrays we get NULL as unquoted 'NULL' string (while - // string with value 'NULL' will be represented by '"NULL"'). So catch NULLs - // here while we have quotation info and convert them to None. - if entry == "NULL" { - entries.push(pg_text_to_json(None, elem_type)?); + match *pg_type { + Type::BOOL => *output = Value::Bool(val == "t"), + Type::INT2 | Type::INT4 => { + let val = val.parse::()?; + *output = Value::Number(serde_json::Number::from(val)); + } + Type::FLOAT4 | Type::FLOAT8 => { + let fval = val.parse::()?; + let num = serde_json::Number::from_f64(fval); + if let Some(num) = num { + *output = Value::Number(num); } else { - entries.push(pg_text_to_json(Some(entry), elem_type)?); + // Pass Nan, Inf, -Inf as strings + // JS JSON.stringify() does converts them to null, but we + // want to preserve them, so we pass them as strings + *output = Value::String(val.to_string()); } - entry.clear(); } - - Ok(()) + Type::JSON | Type::JSONB => *output = serde_json::from_str(val)?, + _ => *output = Value::String(val.to_string()), } - while let Some((mut i, mut c)) = pg_array_chr.next() { - let mut escaped = false; + Ok(()) +} - if c == '\\' { - escaped = true; - let Some(x) = pg_array_chr.next() else { - return Err(JsonConversionError::UnbalancedArray); - }; - (i, c) = x; - } - - match c { - '{' if !quote => { - level += 1; - if level > 1 { - let (res, off) = pg_array_parse_inner(&pg_array[i..], elem_type, true)?; - entries.push(res); - for _ in 0..off - 1 { - pg_array_chr.next(); - } - } - } - '}' if !quote => { - level -= 1; - if level == 0 { - push_checked(&mut entry, &mut entries, elem_type)?; - if nested { - return Ok((Value::Array(entries), i)); - } - } - } - '"' if !escaped => { - if quote { - // end of quoted string, so push it manually without any checks - // for emptiness or nulls - entries.push(pg_text_to_json(Some(&entry), elem_type)?); - entry.clear(); - } - quote = !quote; - } - ',' if !quote => { - push_checked(&mut entry, &mut entries, elem_type)?; - } - _ => { - entry.push(c); - } - } +/// Parse postgres array into JSON array. +/// +/// This is a bit involved because we need to handle nested arrays and quoted +/// values. Unlike postgres we don't check that all nested arrays have the same +/// dimensions, we just return them as is. +/// +/// +/// +/// The external text representation of an array value consists of items that are interpreted +/// according to the I/O conversion rules for the array's element type, plus decoration that +/// indicates the array structure. The decoration consists of curly braces (`{` and `}`) around +/// the array value plus delimiter characters between adjacent items. The delimiter character +/// is usually a comma (,) but can be something else: it is determined by the typdelim setting +/// for the array's element type. Among the standard data types provided in the PostgreSQL +/// distribution, all use a comma, except for type box, which uses a semicolon (;). +/// +/// In a multidimensional array, each dimension (row, plane, cube, etc.) +/// gets its own level of curly braces, and delimiters must be written between adjacent +/// curly-braced entities of the same level. +fn pg_array_parse( + elements: &mut Vec, + mut pg_array: &str, + elem: &Type, + delim: char, +) -> Result<(), JsonConversionError> { + // skip bounds decoration, eg: + // `[1:1][-2:-1][3:5]={{{1,2,3},{4,5,6}}}` + // technically these are significant, but we have no way to represent them in json. + if let Some('[') = pg_array.chars().next() { + let Some((_bounds, array)) = pg_array.split_once('=') else { + return Err(JsonConversionError::UnbalancedArray); + }; + pg_array = array; } - if level != 0 { + // whitespace might preceed a `{`. + let pg_array = pg_array.trim_start(); + + let rest = pg_array_parse_inner(elements, pg_array, elem, delim)?; + if !rest.is_empty() { return Err(JsonConversionError::UnbalancedArray); } - Ok((Value::Array(entries), 0)) + Ok(()) +} + +/// reads a single array from the `pg_array` string and pushes each values to `elements`. +/// returns the rest of the `pg_array` string that was not read. +fn pg_array_parse_inner<'a>( + elements: &mut Vec, + mut pg_array: &'a str, + elem: &Type, + delim: char, +) -> Result<&'a str, JsonConversionError> { + // array should have a `{` prefix. + pg_array = pg_array + .strip_prefix('{') + .ok_or(JsonConversionError::UnbalancedArray)?; + + let mut q = String::new(); + + loop { + let value = push_entry(elements, Value::Null); + pg_array = pg_array_parse_item(value, &mut q, pg_array, elem, delim)?; + + // check for separator. + if let Some(next) = pg_array.strip_prefix(delim) { + // next item. + pg_array = next; + } else { + break; + } + } + + let Some(next) = pg_array.strip_prefix('}') else { + // missing `}` terminator. + return Err(JsonConversionError::UnbalancedArray); + }; + + // whitespace might follow a `}`. + Ok(next.trim_start()) +} + +/// reads a single item from the `pg_array` string. +/// returns the rest of the `pg_array` string that was not read. +/// +/// `quoted` is a scratch allocation that has no defined output. +fn pg_array_parse_item<'a>( + output: &mut Value, + quoted: &mut String, + mut pg_array: &'a str, + elem: &Type, + delim: char, +) -> Result<&'a str, JsonConversionError> { + // We are trying to parse an array item. + // This could be a new array, if this is a multi-dimentional array. + // This could be a quoted string representing `elem`. + // This could be an unquoted string representing `elem`. + + // whitespace might preceed an item. + pg_array = pg_array.trim_start(); + + if pg_array.starts_with('{') { + // nested array. + let mut nested = vec![]; + pg_array = pg_array_parse_inner(&mut nested, pg_array, elem, delim)?; + *output = Value::Array(nested); + return Ok(pg_array); + } + + if let Some(mut pg_array) = pg_array.strip_prefix('"') { + // the parsed string is un-escaped and written into quoted. + pg_array = pg_array_parse_quoted(quoted, pg_array)?; + + // we have un-escaped the string, parse it as pgtext. + pg_text_to_json(output, quoted, elem)?; + + return Ok(pg_array); + } + + // we need to parse an item. read until we find a delimiter or `}`. + let index = pg_array + .find([delim, '}']) + .ok_or(JsonConversionError::UnbalancedArray)?; + + let item; + (item, pg_array) = pg_array.split_at(index); + + // item might have trailing whitespace that we need to ignore. + let item = item.trim_end(); + + // we might have an item string: + // check for null + if item == "NULL" { + *output = Value::Null; + } else { + pg_text_to_json(output, item, elem)?; + } + + Ok(pg_array) +} + +/// reads a single quoted item from the `pg_array` string. +/// +/// Returns the rest of the `pg_array` string that was not read. +/// The output is written into `quoted`. +/// +/// The pg_array string must have a `"` terminator, but the `"` initial value +/// must have already been removed from the input. The terminator is removed. +fn pg_array_parse_quoted<'a>( + quoted: &mut String, + mut pg_array: &'a str, +) -> Result<&'a str, JsonConversionError> { + // The array output routine will put double quotes around element values if they are empty strings, + // contain curly braces, delimiter characters, double quotes, backslashes, or white space, + // or match the word `NULL`. Double quotes and backslashes embedded in element values will be backslash-escaped. + // For numeric data types it is safe to assume that double quotes will never appear, + // but for textual data types one should be prepared to cope with either the presence or absence of quotes. + + quoted.clear(); + + // We write to quoted in chunks terminated by an escape character. + // Eg if we have the input `foo\"bar"`, then we write `foo`, then `"`, then finally `bar`. + + loop { + // we need to parse an chunk. read until we find a '\\' or `"`. + let i = pg_array + .find(['\\', '"']) + .ok_or(JsonConversionError::UnbalancedString)?; + + let chunk: &str; + (chunk, pg_array) = pg_array + .split_at_checked(i) + .expect("i is guaranteed to be in-bounds of pg_array"); + + // push the chunk. + quoted.push_str(chunk); + + // consume the chunk_end character. + let chunk_end: char; + (chunk_end, pg_array) = + split_first_char(pg_array).expect("pg_array should start with either '\\\\' or '\"'"); + + // finished. + if chunk_end == '"' { + // whitespace might follow the '"'. + pg_array = pg_array.trim_start(); + + break Ok(pg_array); + } + + // consume the escaped character. + let escaped: char; + (escaped, pg_array) = + split_first_char(pg_array).ok_or(JsonConversionError::UnbalancedString)?; + + quoted.push(escaped); + } +} + +fn split_first_char(s: &str) -> Option<(char, &str)> { + let mut chars = s.chars(); + let c = chars.next()?; + Some((c, chars.as_str())) } #[cfg(test)] @@ -316,37 +439,33 @@ mod tests { ); } + fn pg_text_to_json(val: &str, pg_type: &Type) -> Value { + let mut v = Value::Null; + super::pg_text_to_json(&mut v, val, pg_type).unwrap(); + v + } + + fn pg_array_parse(pg_array: &str, pg_type: &Type) -> Value { + let mut array = vec![]; + super::pg_array_parse(&mut array, pg_array, pg_type, ',').unwrap(); + Value::Array(array) + } + #[test] fn test_atomic_types_parse() { + assert_eq!(pg_text_to_json("foo", &Type::TEXT), json!("foo")); + assert_eq!(pg_text_to_json("42", &Type::INT4), json!(42)); + assert_eq!(pg_text_to_json("42", &Type::INT2), json!(42)); + assert_eq!(pg_text_to_json("42", &Type::INT8), json!("42")); + assert_eq!(pg_text_to_json("42.42", &Type::FLOAT8), json!(42.42)); + assert_eq!(pg_text_to_json("42.42", &Type::FLOAT4), json!(42.42)); + assert_eq!(pg_text_to_json("NaN", &Type::FLOAT4), json!("NaN")); assert_eq!( - pg_text_to_json(Some("foo"), &Type::TEXT).unwrap(), - json!("foo") - ); - assert_eq!(pg_text_to_json(None, &Type::TEXT).unwrap(), json!(null)); - assert_eq!(pg_text_to_json(Some("42"), &Type::INT4).unwrap(), json!(42)); - assert_eq!(pg_text_to_json(Some("42"), &Type::INT2).unwrap(), json!(42)); - assert_eq!( - pg_text_to_json(Some("42"), &Type::INT8).unwrap(), - json!("42") - ); - assert_eq!( - pg_text_to_json(Some("42.42"), &Type::FLOAT8).unwrap(), - json!(42.42) - ); - assert_eq!( - pg_text_to_json(Some("42.42"), &Type::FLOAT4).unwrap(), - json!(42.42) - ); - assert_eq!( - pg_text_to_json(Some("NaN"), &Type::FLOAT4).unwrap(), - json!("NaN") - ); - assert_eq!( - pg_text_to_json(Some("Infinity"), &Type::FLOAT4).unwrap(), + pg_text_to_json("Infinity", &Type::FLOAT4), json!("Infinity") ); assert_eq!( - pg_text_to_json(Some("-Infinity"), &Type::FLOAT4).unwrap(), + pg_text_to_json("-Infinity", &Type::FLOAT4), json!("-Infinity") ); @@ -355,10 +474,9 @@ mod tests { .unwrap(); assert_eq!( pg_text_to_json( - Some(r#"{"s":"str","n":42,"f":4.2,"a":[null,3,"a"]}"#), + r#"{"s":"str","n":42,"f":4.2,"a":[null,3,"a"]}"#, &Type::JSONB - ) - .unwrap(), + ), json ); } @@ -366,7 +484,7 @@ mod tests { #[test] fn test_pg_array_parse_text() { fn pt(pg_arr: &str) -> Value { - pg_array_parse(pg_arr, &Type::TEXT).unwrap() + pg_array_parse(pg_arr, &Type::TEXT) } assert_eq!( pt(r#"{"aa\"\\\,a",cha,"bbbb"}"#), @@ -389,7 +507,7 @@ mod tests { #[test] fn test_pg_array_parse_bool() { fn pb(pg_arr: &str) -> Value { - pg_array_parse(pg_arr, &Type::BOOL).unwrap() + pg_array_parse(pg_arr, &Type::BOOL) } assert_eq!(pb(r#"{t,f,t}"#), json!([true, false, true])); assert_eq!(pb(r#"{{t,f,t}}"#), json!([[true, false, true]])); @@ -406,7 +524,7 @@ mod tests { #[test] fn test_pg_array_parse_numbers() { fn pn(pg_arr: &str, ty: &Type) -> Value { - pg_array_parse(pg_arr, ty).unwrap() + pg_array_parse(pg_arr, ty) } assert_eq!(pn(r#"{1,2,3}"#, &Type::INT4), json!([1, 2, 3])); assert_eq!(pn(r#"{1,2,3}"#, &Type::INT2), json!([1, 2, 3])); @@ -434,7 +552,7 @@ mod tests { #[test] fn test_pg_array_with_decoration() { fn p(pg_arr: &str) -> Value { - pg_array_parse(pg_arr, &Type::INT2).unwrap() + pg_array_parse(pg_arr, &Type::INT2) } assert_eq!( p(r#"[1:1][-2:-1][3:5]={{{1,2,3},{4,5,6}}}"#), @@ -445,7 +563,7 @@ mod tests { #[test] fn test_pg_array_parse_json() { fn pt(pg_arr: &str) -> Value { - pg_array_parse(pg_arr, &Type::JSONB).unwrap() + pg_array_parse(pg_arr, &Type::JSONB) } assert_eq!(pt(r#"{"{}"}"#), json!([{}])); assert_eq!( diff --git a/proxy/src/serverless/local_conn_pool.rs b/proxy/src/serverless/local_conn_pool.rs index bb5637cd5f..e4cbd02bfe 100644 --- a/proxy/src/serverless/local_conn_pool.rs +++ b/proxy/src/serverless/local_conn_pool.rs @@ -16,6 +16,8 @@ use std::sync::atomic::AtomicUsize; use std::task::{Poll, ready}; use std::time::Duration; +use base64::Engine as _; +use base64::prelude::BASE64_URL_SAFE_NO_PAD; use ed25519_dalek::{Signature, Signer, SigningKey}; use futures::Future; use futures::future::poll_fn; @@ -247,11 +249,10 @@ pub(crate) fn poll_client( } // remove from connection pool - if let Some(pool) = pool.clone().upgrade() { - if pool.global_pool.write().remove_client(db_user.clone(), conn_id) { + if let Some(pool) = pool.clone().upgrade() + && pool.global_pool.write().remove_client(db_user.clone(), conn_id) { info!("closed connection removed"); } - } Poll::Ready(()) }).await; @@ -346,7 +347,7 @@ fn sign_jwt(sk: &SigningKey, payload: &[u8]) -> String { jwt.push_str("eyJhbGciOiJFZERTQSJ9."); // encode the jwt payload in-place - base64::encode_config_buf(payload, base64::URL_SAFE_NO_PAD, &mut jwt); + BASE64_URL_SAFE_NO_PAD.encode_string(payload, &mut jwt); // create the signature from the encoded header || payload let sig: Signature = sk.sign(jwt.as_bytes()); @@ -354,7 +355,7 @@ fn sign_jwt(sk: &SigningKey, payload: &[u8]) -> String { jwt.push('.'); // encode the jwt signature in-place - base64::encode_config_buf(sig.to_bytes(), base64::URL_SAFE_NO_PAD, &mut jwt); + BASE64_URL_SAFE_NO_PAD.encode_string(sig.to_bytes(), &mut jwt); debug_assert_eq!( jwt.len(), diff --git a/proxy/src/serverless/mod.rs b/proxy/src/serverless/mod.rs index f6f681ac45..5b7289c53d 100644 --- a/proxy/src/serverless/mod.rs +++ b/proxy/src/serverless/mod.rs @@ -29,13 +29,13 @@ use futures::future::{Either, select}; use http::{Method, Response, StatusCode}; use http_body_util::combinators::BoxBody; use http_body_util::{BodyExt, Empty}; +use http_util::{NEON_REQUEST_ID, uuid_to_header_value}; use http_utils::error::ApiError; use hyper::body::Incoming; use hyper_util::rt::TokioExecutor; use hyper_util::server::conn::auto::Builder; use rand::SeedableRng; use rand::rngs::StdRng; -use sql_over_http::{NEON_REQUEST_ID, uuid_to_header_value}; use tokio::io::{AsyncRead, AsyncWrite}; use tokio::net::{TcpListener, TcpStream}; use tokio::time::timeout; @@ -50,10 +50,10 @@ use crate::context::RequestContext; use crate::ext::TaskExt; use crate::metrics::Metrics; use crate::protocol2::{ConnectHeader, ConnectionInfo, read_proxy_protocol}; -use crate::proxy::run_until_cancelled; use crate::rate_limiter::EndpointRateLimiter; use crate::serverless::backend::PoolingBackend; use crate::serverless::http_util::{api_error_into_response, json_response}; +use crate::util::run_until_cancelled; pub(crate) const SERVERLESS_DRIVER_SNI: &str = "api"; pub(crate) const AUTH_BROKER_SNI: &str = "apiauth"; @@ -417,12 +417,7 @@ async fn request_handler( if config.http_config.accept_websockets && framed_websockets::upgrade::is_upgrade_request(&request) { - let ctx = RequestContext::new( - session_id, - conn_info, - crate::metrics::Protocol::Ws, - &config.region, - ); + let ctx = RequestContext::new(session_id, conn_info, crate::metrics::Protocol::Ws); ctx.set_user_agent( request @@ -462,12 +457,7 @@ async fn request_handler( // Return the response so the spawned future can continue. Ok(response.map(|b| b.map_err(|x| match x {}).boxed())) } else if request.uri().path() == "/sql" && *request.method() == Method::POST { - let ctx = RequestContext::new( - session_id, - conn_info, - crate::metrics::Protocol::Http, - &config.region, - ); + let ctx = RequestContext::new(session_id, conn_info, crate::metrics::Protocol::Http); let span = ctx.span(); let testodrome_id = request diff --git a/proxy/src/serverless/sql_over_http.rs b/proxy/src/serverless/sql_over_http.rs index eb80ac9ad0..7a718d0280 100644 --- a/proxy/src/serverless/sql_over_http.rs +++ b/proxy/src/serverless/sql_over_http.rs @@ -11,7 +11,7 @@ use http_body_util::{BodyExt, Full}; use http_utils::error::ApiError; use hyper::body::Incoming; use hyper::http::{HeaderName, HeaderValue}; -use hyper::{HeaderMap, Request, Response, StatusCode, header}; +use hyper::{Request, Response, StatusCode, header}; use indexmap::IndexMap; use postgres_client::error::{DbError, ErrorPosition, SqlState}; use postgres_client::{ @@ -22,29 +22,27 @@ use serde_json::Value; use serde_json::value::RawValue; use tokio::time::{self, Instant}; use tokio_util::sync::CancellationToken; -use tracing::{debug, error, info}; +use tracing::{Level, debug, error, info}; use typed_json::json; -use url::Url; -use uuid::Uuid; use super::backend::{LocalProxyConnError, PoolingBackend}; -use super::conn_pool::{AuthData, ConnInfoWithAuth}; +use super::conn_pool::AuthData; use super::conn_pool_lib::{self, ConnInfo}; -use super::error::HttpCodeError; -use super::http_util::json_response; +use super::error::{ConnInfoError, HttpCodeError, ReadPayloadError}; +use super::http_util::{ + ALLOW_POOL, ARRAY_MODE, CONN_STRING, NEON_REQUEST_ID, RAW_TEXT_OUTPUT, TXN_DEFERRABLE, + TXN_ISOLATION_LEVEL, TXN_READ_ONLY, get_conn_info, json_response, uuid_to_header_value, +}; use super::json::{JsonConversionError, json_to_pg_text, pg_text_row_to_json}; -use crate::auth::backend::{ComputeCredentialKeys, ComputeUserInfo}; -use crate::auth::{ComputeUserInfoParseError, endpoint_sni}; -use crate::config::{AuthenticationConfig, HttpConfig, ProxyConfig, TlsConfig}; +use crate::auth::backend::ComputeCredentialKeys; +use crate::config::{HttpConfig, ProxyConfig}; use crate::context::RequestContext; use crate::error::{ErrorKind, ReportableError, UserFacingError}; -use crate::http::{ReadBodyError, read_body_with_limit}; -use crate::metrics::{HttpDirection, Metrics, SniGroup, SniKind}; -use crate::pqproto::StartupMessageParams; -use crate::proxy::{NeonOptions, run_until_cancelled}; +use crate::http::read_body_with_limit; +use crate::metrics::{HttpDirection, Metrics}; use crate::serverless::backend::HttpConnError; -use crate::types::{DbName, RoleName}; use crate::usage_metrics::{MetricCounter, MetricCounterRecorder}; +use crate::util::run_until_cancelled; #[derive(serde::Deserialize)] #[serde(rename_all = "camelCase")] @@ -69,16 +67,6 @@ enum Payload { Batch(BatchQueryData), } -pub(super) static NEON_REQUEST_ID: HeaderName = HeaderName::from_static("neon-request-id"); - -static CONN_STRING: HeaderName = HeaderName::from_static("neon-connection-string"); -static RAW_TEXT_OUTPUT: HeaderName = HeaderName::from_static("neon-raw-text-output"); -static ARRAY_MODE: HeaderName = HeaderName::from_static("neon-array-mode"); -static ALLOW_POOL: HeaderName = HeaderName::from_static("neon-pool-opt-in"); -static TXN_ISOLATION_LEVEL: HeaderName = HeaderName::from_static("neon-batch-isolation-level"); -static TXN_READ_ONLY: HeaderName = HeaderName::from_static("neon-batch-read-only"); -static TXN_DEFERRABLE: HeaderName = HeaderName::from_static("neon-batch-deferrable"); - static HEADER_VALUE_TRUE: HeaderValue = HeaderValue::from_static("true"); fn bytes_to_pg_text<'de, D>(deserializer: D) -> Result>, D::Error> @@ -90,188 +78,6 @@ where Ok(json_to_pg_text(json)) } -#[derive(Debug, thiserror::Error)] -pub(crate) enum ConnInfoError { - #[error("invalid header: {0}")] - InvalidHeader(&'static HeaderName), - #[error("invalid connection string: {0}")] - UrlParseError(#[from] url::ParseError), - #[error("incorrect scheme")] - IncorrectScheme, - #[error("missing database name")] - MissingDbName, - #[error("invalid database name")] - InvalidDbName, - #[error("missing username")] - MissingUsername, - #[error("invalid username: {0}")] - InvalidUsername(#[from] std::string::FromUtf8Error), - #[error("missing authentication credentials: {0}")] - MissingCredentials(Credentials), - #[error("missing hostname")] - MissingHostname, - #[error("invalid hostname: {0}")] - InvalidEndpoint(#[from] ComputeUserInfoParseError), - #[error("malformed endpoint")] - MalformedEndpoint, -} - -#[derive(Debug, thiserror::Error)] -pub(crate) enum Credentials { - #[error("required password")] - Password, - #[error("required authorization bearer token in JWT format")] - BearerJwt, -} - -impl ReportableError for ConnInfoError { - fn get_error_kind(&self) -> ErrorKind { - ErrorKind::User - } -} - -impl UserFacingError for ConnInfoError { - fn to_string_client(&self) -> String { - self.to_string() - } -} - -fn get_conn_info( - config: &'static AuthenticationConfig, - ctx: &RequestContext, - headers: &HeaderMap, - tls: Option<&TlsConfig>, -) -> Result { - let connection_string = headers - .get(&CONN_STRING) - .ok_or(ConnInfoError::InvalidHeader(&CONN_STRING))? - .to_str() - .map_err(|_| ConnInfoError::InvalidHeader(&CONN_STRING))?; - - let connection_url = Url::parse(connection_string)?; - - let protocol = connection_url.scheme(); - if protocol != "postgres" && protocol != "postgresql" { - return Err(ConnInfoError::IncorrectScheme); - } - - let mut url_path = connection_url - .path_segments() - .ok_or(ConnInfoError::MissingDbName)?; - - let dbname: DbName = - urlencoding::decode(url_path.next().ok_or(ConnInfoError::InvalidDbName)?)?.into(); - ctx.set_dbname(dbname.clone()); - - let username = RoleName::from(urlencoding::decode(connection_url.username())?); - if username.is_empty() { - return Err(ConnInfoError::MissingUsername); - } - ctx.set_user(username.clone()); - - let auth = if let Some(auth) = headers.get(&AUTHORIZATION) { - if !config.accept_jwts { - return Err(ConnInfoError::MissingCredentials(Credentials::Password)); - } - - let auth = auth - .to_str() - .map_err(|_| ConnInfoError::InvalidHeader(&AUTHORIZATION))?; - AuthData::Jwt( - auth.strip_prefix("Bearer ") - .ok_or(ConnInfoError::MissingCredentials(Credentials::BearerJwt))? - .into(), - ) - } else if let Some(pass) = connection_url.password() { - // wrong credentials provided - if config.accept_jwts { - return Err(ConnInfoError::MissingCredentials(Credentials::BearerJwt)); - } - - AuthData::Password(match urlencoding::decode_binary(pass.as_bytes()) { - std::borrow::Cow::Borrowed(b) => b.into(), - std::borrow::Cow::Owned(b) => b.into(), - }) - } else if config.accept_jwts { - return Err(ConnInfoError::MissingCredentials(Credentials::BearerJwt)); - } else { - return Err(ConnInfoError::MissingCredentials(Credentials::Password)); - }; - - let endpoint = match connection_url.host() { - Some(url::Host::Domain(hostname)) => { - if let Some(tls) = tls { - endpoint_sni(hostname, &tls.common_names).ok_or(ConnInfoError::MalformedEndpoint)? - } else { - hostname - .split_once('.') - .map_or(hostname, |(prefix, _)| prefix) - .into() - } - } - Some(url::Host::Ipv4(_) | url::Host::Ipv6(_)) | None => { - return Err(ConnInfoError::MissingHostname); - } - }; - ctx.set_endpoint_id(endpoint.clone()); - - let pairs = connection_url.query_pairs(); - - let mut options = Option::None; - - let mut params = StartupMessageParams::default(); - params.insert("user", &username); - params.insert("database", &dbname); - for (key, value) in pairs { - params.insert(&key, &value); - if key == "options" { - options = Some(NeonOptions::parse_options_raw(&value)); - } - } - - // check the URL that was used, for metrics - { - let host_endpoint = headers - // get the host header - .get("host") - // extract the domain - .and_then(|h| { - let (host, _port) = h.to_str().ok()?.split_once(':')?; - Some(host) - }) - // get the endpoint prefix - .map(|h| h.split_once('.').map_or(h, |(prefix, _)| prefix)); - - let kind = if host_endpoint == Some(&*endpoint) { - SniKind::Sni - } else { - SniKind::NoSni - }; - - let protocol = ctx.protocol(); - Metrics::get() - .proxy - .accepted_connections_by_sni - .inc(SniGroup { protocol, kind }); - } - - ctx.set_user_agent( - headers - .get(hyper::header::USER_AGENT) - .and_then(|h| h.to_str().ok()) - .map(Into::into), - ); - - let user_info = ComputeUserInfo { - endpoint, - user: username, - options: options.unwrap_or_default(), - }; - - let conn_info = ConnInfo { user_info, dbname }; - Ok(ConnInfoWithAuth { conn_info, auth }) -} - pub(crate) async fn handle( config: &'static ProxyConfig, ctx: RequestContext, @@ -389,12 +195,35 @@ pub(crate) async fn handle( let line = get(db_error, |db| db.line().map(|l| l.to_string())); let routine = get(db_error, |db| db.routine()); - tracing::info!( - kind=error_kind.to_metric_label(), - error=%e, - msg=message, - "forwarding error to user" - ); + match &e { + SqlOverHttpError::Postgres(e) + if e.as_db_error().is_some() && error_kind == ErrorKind::User => + { + // this error contains too much info, and it's not an error we care about. + if tracing::enabled!(Level::DEBUG) { + tracing::debug!( + kind=error_kind.to_metric_label(), + error=%e, + msg=message, + "forwarding error to user" + ); + } else { + tracing::info!( + kind = error_kind.to_metric_label(), + error = "bad query", + "forwarding error to user" + ); + } + } + _ => { + tracing::info!( + kind=error_kind.to_metric_label(), + error=%e, + msg=message, + "forwarding error to user" + ); + } + } json_response( e.get_http_status_code(), @@ -459,7 +288,15 @@ impl ReportableError for SqlOverHttpError { SqlOverHttpError::ConnInfo(e) => e.get_error_kind(), SqlOverHttpError::ResponseTooLarge(_) => ErrorKind::User, SqlOverHttpError::InvalidIsolationLevel => ErrorKind::User, - SqlOverHttpError::Postgres(p) => p.get_error_kind(), + // customer initiated SQL errors. + SqlOverHttpError::Postgres(p) => { + if p.as_db_error().is_some() { + ErrorKind::User + } else { + ErrorKind::Compute + } + } + // proxy initiated SQL errors. SqlOverHttpError::InternalPostgres(p) => { if p.as_db_error().is_some() { ErrorKind::Service @@ -467,6 +304,7 @@ impl ReportableError for SqlOverHttpError { ErrorKind::Compute } } + // postgres returned a bad row format that we couldn't parse. SqlOverHttpError::JsonConversion(_) => ErrorKind::Postgres, SqlOverHttpError::Cancelled(c) => c.get_error_kind(), } @@ -508,45 +346,6 @@ impl HttpCodeError for SqlOverHttpError { } } -#[derive(Debug, thiserror::Error)] -pub(crate) enum ReadPayloadError { - #[error("could not read the HTTP request body: {0}")] - Read(#[from] hyper::Error), - #[error("request is too large (max is {limit} bytes)")] - BodyTooLarge { limit: usize }, - #[error("could not parse the HTTP request body: {0}")] - Parse(#[from] serde_json::Error), -} - -impl From> for ReadPayloadError { - fn from(value: ReadBodyError) -> Self { - match value { - ReadBodyError::BodyTooLarge { limit } => Self::BodyTooLarge { limit }, - ReadBodyError::Read(e) => Self::Read(e), - } - } -} - -impl ReportableError for ReadPayloadError { - fn get_error_kind(&self) -> ErrorKind { - match self { - ReadPayloadError::Read(_) => ErrorKind::ClientDisconnect, - ReadPayloadError::BodyTooLarge { .. } => ErrorKind::User, - ReadPayloadError::Parse(_) => ErrorKind::User, - } - } -} - -impl HttpCodeError for ReadPayloadError { - fn get_http_status_code(&self) -> StatusCode { - match self { - ReadPayloadError::Read(_) => StatusCode::BAD_REQUEST, - ReadPayloadError::BodyTooLarge { .. } => StatusCode::PAYLOAD_TOO_LARGE, - ReadPayloadError::Parse(_) => StatusCode::BAD_REQUEST, - } - } -} - #[derive(Debug, thiserror::Error)] pub(crate) enum SqlOverHttpCancel { #[error("query was cancelled")] @@ -637,14 +436,7 @@ async fn handle_inner( "handling interactive connection from client" ); - let conn_info = get_conn_info( - &config.authentication_config, - ctx, - request.headers(), - // todo: race condition? - // we're unlikely to change the common names. - config.tls_config.load().as_deref(), - )?; + let conn_info = get_conn_info(&config.authentication_config, ctx, None, request.headers())?; info!( user = conn_info.conn_info.user_info.user.as_str(), "credentials" @@ -730,9 +522,17 @@ async fn handle_db_inner( ComputeCredentialKeys::JwtPayload(payload) if backend.auth_backend.is_local_proxy() => { - let mut client = backend.connect_to_local_postgres(ctx, conn_info).await?; - let (cli_inner, _dsc) = client.client_inner(); - cli_inner.set_jwt_session(&payload).await?; + #[cfg(feature = "testing")] + let disable_pg_session_jwt = config.disable_pg_session_jwt; + #[cfg(not(feature = "testing"))] + let disable_pg_session_jwt = false; + let mut client = backend + .connect_to_local_postgres(ctx, conn_info, disable_pg_session_jwt) + .await?; + if !disable_pg_session_jwt { + let (cli_inner, _dsc) = client.client_inner(); + cli_inner.set_jwt_session(&payload).await?; + } Client::Local(client) } _ => { @@ -831,12 +631,6 @@ static HEADERS_TO_FORWARD: &[&HeaderName] = &[ &TXN_DEFERRABLE, ]; -pub(crate) fn uuid_to_header_value(id: Uuid) -> HeaderValue { - let mut uuid = [0; uuid::fmt::Hyphenated::LENGTH]; - HeaderValue::from_str(id.as_hyphenated().encode_lower(&mut uuid[..])) - .expect("uuid hyphenated format should be all valid header characters") -} - async fn handle_auth_broker_inner( ctx: &RequestContext, request: Request, @@ -866,7 +660,7 @@ async fn handle_auth_broker_inner( req = req.header(&NEON_REQUEST_ID, uuid_to_header_value(ctx.session_id())); let req = req - .body(body) + .body(body.map_err(|e| e).boxed()) //TODO: is there a potential for a regression here? .expect("all headers and params received via hyper should be valid for request"); // todo: map body to count egress @@ -1102,7 +896,6 @@ async fn query_to_json( let columns_len = row_stream.statement.columns().len(); let mut fields = Vec::with_capacity(columns_len); - let mut types = Vec::with_capacity(columns_len); for c in row_stream.statement.columns() { fields.push(json!({ @@ -1114,8 +907,6 @@ async fn query_to_json( "dataTypeModifier": c.type_modifier(), "format": "text", })); - - types.push(c.type_().clone()); } let raw_output = parsed_headers.raw_output; @@ -1137,7 +928,7 @@ async fn query_to_json( )); } - let row = pg_text_row_to_json(&row, &types, raw_output, array_mode)?; + let row = pg_text_row_to_json(&row, raw_output, array_mode)?; rows.push(row); // assumption: parsing pg text and converting to json takes CPU time. diff --git a/proxy/src/serverless/websocket.rs b/proxy/src/serverless/websocket.rs index 8648a94869..1960709fba 100644 --- a/proxy/src/serverless/websocket.rs +++ b/proxy/src/serverless/websocket.rs @@ -17,7 +17,8 @@ use crate::config::ProxyConfig; use crate::context::RequestContext; use crate::error::ReportableError; use crate::metrics::Metrics; -use crate::proxy::{ClientMode, ErrorSource, handle_client}; +use crate::pglb::{ClientMode, handle_connection}; +use crate::proxy::ErrorSource; use crate::rate_limiter::EndpointRateLimiter; pin_project! { @@ -142,7 +143,7 @@ pub(crate) async fn serve_websocket( .client_connections .guard(crate::metrics::Protocol::Ws); - let res = Box::pin(handle_client( + let res = Box::pin(handle_connection( config, auth_backend, &ctx, @@ -167,7 +168,7 @@ pub(crate) async fn serve_websocket( Ok(Some(p)) => { ctx.set_success(); ctx.log_connect(); - match p.proxy_pass(&config.connect_to_compute).await { + match p.proxy_pass().await { Ok(()) => Ok(()), Err(ErrorSource::Client(err)) => Err(err).context("client"), Err(ErrorSource::Compute(err)) => Err(err).context("compute"), diff --git a/proxy/src/stream.rs b/proxy/src/stream.rs index c49a431c95..4e55654515 100644 --- a/proxy/src/stream.rs +++ b/proxy/src/stream.rs @@ -199,27 +199,27 @@ impl PqStream { let probe_msg; let mut msg = &*msg; - if let Some(ctx) = ctx { - if ctx.get_testodrome_id().is_some() { - let tag = match error_kind { - ErrorKind::User => "client", - ErrorKind::ClientDisconnect => "client", - ErrorKind::RateLimit => "proxy", - ErrorKind::ServiceRateLimit => "proxy", - ErrorKind::Quota => "proxy", - ErrorKind::Service => "proxy", - ErrorKind::ControlPlane => "controlplane", - ErrorKind::Postgres => "other", - ErrorKind::Compute => "compute", - }; - probe_msg = typed_json::json!({ - "tag": tag, - "msg": msg, - "cold_start_info": ctx.cold_start_info(), - }) - .to_string(); - msg = &probe_msg; - } + if let Some(ctx) = ctx + && ctx.get_testodrome_id().is_some() + { + let tag = match error_kind { + ErrorKind::User => "client", + ErrorKind::ClientDisconnect => "client", + ErrorKind::RateLimit => "proxy", + ErrorKind::ServiceRateLimit => "proxy", + ErrorKind::Quota => "proxy", + ErrorKind::Service => "proxy", + ErrorKind::ControlPlane => "controlplane", + ErrorKind::Postgres => "other", + ErrorKind::Compute => "compute", + }; + probe_msg = typed_json::json!({ + "tag": tag, + "msg": msg, + "cold_start_info": ctx.cold_start_info(), + }) + .to_string(); + msg = &probe_msg; } // TODO: either preserve the error code from postgres, or assign error codes to proxy errors. diff --git a/proxy/src/tls/mod.rs b/proxy/src/tls/mod.rs index 7fe71abf48..f576214255 100644 --- a/proxy/src/tls/mod.rs +++ b/proxy/src/tls/mod.rs @@ -3,6 +3,8 @@ pub mod postgres_rustls; pub mod server_config; use anyhow::Context; +use base64::Engine as _; +use base64::prelude::BASE64_STANDARD; use rustls::pki_types::CertificateDer; use sha2::{Digest, Sha256}; use tracing::{error, info}; @@ -58,7 +60,7 @@ impl TlsServerEndPoint { let oid = certificate.signature_algorithm.oid; if SHA256_OIDS.contains(&oid) { let tls_server_end_point: [u8; 32] = Sha256::new().chain_update(cert).finalize().into(); - info!(%subject, tls_server_end_point = %base64::encode(tls_server_end_point), "determined channel binding"); + info!(%subject, tls_server_end_point = %BASE64_STANDARD.encode(tls_server_end_point), "determined channel binding"); Ok(Self::Sha256(tls_server_end_point)) } else { error!(%subject, "unknown channel binding"); diff --git a/proxy/src/tls/postgres_rustls.rs b/proxy/src/tls/postgres_rustls.rs index 013b307f0b..9269ad8a06 100644 --- a/proxy/src/tls/postgres_rustls.rs +++ b/proxy/src/tls/postgres_rustls.rs @@ -2,10 +2,11 @@ use std::convert::TryFrom; use std::sync::Arc; use postgres_client::tls::MakeTlsConnect; -use rustls::ClientConfig; -use rustls::pki_types::ServerName; +use rustls::pki_types::{InvalidDnsNameError, ServerName}; use tokio::io::{AsyncRead, AsyncWrite}; +use crate::config::ComputeConfig; + mod private { use std::future::Future; use std::io; @@ -123,36 +124,27 @@ mod private { } } -/// A `MakeTlsConnect` implementation using `rustls`. -/// -/// That way you can connect to PostgreSQL using `rustls` as the TLS stack. -#[derive(Clone)] -pub struct MakeRustlsConnect { - pub config: Arc, -} - -impl MakeRustlsConnect { - /// Creates a new `MakeRustlsConnect` from the provided `ClientConfig`. - #[must_use] - pub fn new(config: Arc) -> Self { - Self { config } - } -} - -impl MakeTlsConnect for MakeRustlsConnect +impl MakeTlsConnect for ComputeConfig where S: AsyncRead + AsyncWrite + Unpin + Send + 'static, { type Stream = private::RustlsStream; type TlsConnect = private::RustlsConnect; - type Error = rustls::pki_types::InvalidDnsNameError; + type Error = InvalidDnsNameError; - fn make_tls_connect(&mut self, hostname: &str) -> Result { - ServerName::try_from(hostname).map(|dns_name| { - private::RustlsConnect(private::RustlsConnectData { - hostname: dns_name.to_owned(), - connector: Arc::clone(&self.config).into(), - }) - }) + fn make_tls_connect(&self, hostname: &str) -> Result { + make_tls_connect(&self.tls, hostname) } } + +pub fn make_tls_connect( + tls: &Arc, + hostname: &str, +) -> Result { + ServerName::try_from(hostname).map(|dns_name| { + private::RustlsConnect(private::RustlsConnectData { + hostname: dns_name.to_owned(), + connector: tls.clone().into(), + }) + }) +} diff --git a/proxy/src/usage_metrics.rs b/proxy/src/usage_metrics.rs index 115b958c54..c82c4865a7 100644 --- a/proxy/src/usage_metrics.rs +++ b/proxy/src/usage_metrics.rs @@ -399,7 +399,7 @@ async fn collect_metrics_iteration( fn create_remote_path_prefix(now: DateTime) -> String { format!( - "year={year:04}/month={month:02}/day={day:02}/{hour:02}:{minute:02}:{second:02}Z", + "year={year:04}/month={month:02}/day={day:02}/hour={hour:02}/{hour:02}:{minute:02}:{second:02}Z", year = now.year(), month = now.month(), day = now.day(), @@ -461,7 +461,7 @@ async fn upload_backup_events( real_now.second().into(), real_now.nanosecond(), )); - let path = format!("{path_prefix}_{id}.json.gz"); + let path = format!("{path_prefix}_{id}.ndjson.gz"); let remote_path = match RemotePath::from_string(&path) { Ok(remote_path) => remote_path, Err(e) => { @@ -471,9 +471,12 @@ async fn upload_backup_events( // TODO: This is async compression from Vec to Vec. Rewrite as byte stream. // Use sync compression in blocking threadpool. - let data = serde_json::to_vec(chunk).context("serialize metrics")?; let mut encoder = GzipEncoder::new(Vec::new()); - encoder.write_all(&data).await.context("compress metrics")?; + for event in chunk.events.iter() { + let data = serde_json::to_vec(event).context("serialize metrics")?; + encoder.write_all(&data).await.context("compress metrics")?; + encoder.write_all(b"\n").await.context("compress metrics")?; + } encoder.shutdown().await.context("compress metrics")?; let compressed_data: Bytes = encoder.get_ref().clone().into(); backoff::retry( @@ -499,7 +502,7 @@ async fn upload_backup_events( #[cfg(test)] mod tests { use std::fs; - use std::io::BufReader; + use std::io::{BufRead, BufReader}; use std::sync::{Arc, Mutex}; use anyhow::Error; @@ -673,11 +676,22 @@ mod tests { { let path = local_fs_path.join(&path_prefix).to_string(); if entry.path().to_str().unwrap().starts_with(&path) { - let chunk = serde_json::from_reader(flate2::bufread::GzDecoder::new( - BufReader::new(fs::File::open(entry.into_path()).unwrap()), - )) - .unwrap(); - stored_chunks.push(chunk); + let file = fs::File::open(entry.into_path()).unwrap(); + let decoder = flate2::bufread::GzDecoder::new(BufReader::new(file)); + let reader = BufReader::new(decoder); + + let mut events: Vec> = Vec::new(); + for line in reader.lines() { + let line = line.unwrap(); + let event: Event = serde_json::from_str(&line).unwrap(); + events.push(event); + } + + let report = Report { + events: Cow::Owned(events), + }; + + stored_chunks.push(report); } } storage_test_dir.close().ok(); diff --git a/proxy/src/util.rs b/proxy/src/util.rs new file mode 100644 index 0000000000..7fc2d9fbdb --- /dev/null +++ b/proxy/src/util.rs @@ -0,0 +1,14 @@ +use std::pin::pin; + +use futures::future::{Either, select}; +use tokio_util::sync::CancellationToken; + +pub async fn run_until_cancelled( + f: F, + cancellation_token: &CancellationToken, +) -> Option { + match select(pin!(f), pin!(cancellation_token.cancelled())).await { + Either::Left((f, _)) => Some(f), + Either::Right(((), _)) => None, + } +} diff --git a/pyproject.toml b/pyproject.toml index c6dfdc223c..e7e314d144 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -9,7 +9,7 @@ pytest = "^7.4.4" psycopg2-binary = "^2.9.10" typing-extensions = "^4.12.2" PyJWT = {version = "^2.1.0", extras = ["crypto"]} -requests = "^2.32.3" +requests = "^2.32.4" pytest-xdist = "^3.3.1" asyncpg = "^0.30.0" aiopg = "^1.4.0" diff --git a/rust-toolchain.toml b/rust-toolchain.toml index c48def3483..d20b46e755 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,5 +1,5 @@ [toolchain] -channel = "1.87.0" +channel = "1.88.0" profile = "default" # The default profile includes rustc, rust-std, cargo, rust-docs, rustfmt and clippy. # https://rust-lang.github.io/rustup/concepts/profiles.html diff --git a/safekeeper/Cargo.toml b/safekeeper/Cargo.toml index 0a8cc415be..6955028c73 100644 --- a/safekeeper/Cargo.toml +++ b/safekeeper/Cargo.toml @@ -58,6 +58,7 @@ metrics.workspace = true pem.workspace = true postgres_backend.workspace = true postgres_ffi.workspace = true +postgres_versioninfo.workspace = true pq_proto.workspace = true remote_storage.workspace = true safekeeper_api.workspace = true diff --git a/safekeeper/client/src/mgmt_api.rs b/safekeeper/client/src/mgmt_api.rs index b364ac8e48..b4bb193a4b 100644 --- a/safekeeper/client/src/mgmt_api.rs +++ b/safekeeper/client/src/mgmt_api.rs @@ -8,8 +8,8 @@ use std::error::Error as _; use http_utils::error::HttpErrorBody; use reqwest::{IntoUrl, Method, StatusCode}; use safekeeper_api::models::{ - self, PullTimelineRequest, PullTimelineResponse, SafekeeperUtilization, TimelineCreateRequest, - TimelineStatus, + self, PullTimelineRequest, PullTimelineResponse, SafekeeperStatus, SafekeeperUtilization, + TimelineCreateRequest, TimelineStatus, }; use utils::id::{NodeId, TenantId, TimelineId}; use utils::logging::SecretString; @@ -52,7 +52,7 @@ pub trait ResponseErrorMessageExt: Sized { impl ResponseErrorMessageExt for reqwest::Response { async fn error_from_body(self) -> Result { let status = self.status(); - if !(status.is_client_error() || status.is_server_error()) { + if status.is_success() { return Ok(self); } @@ -183,6 +183,12 @@ impl Client { self.get(&uri).await } + pub async fn status(&self) -> Result { + let uri = format!("{}/v1/status", self.mgmt_api_endpoint); + let resp = self.get(&uri).await?; + resp.json().await.map_err(Error::ReceiveBody) + } + pub async fn utilization(&self) -> Result { let uri = format!("{}/v1/utilization", self.mgmt_api_endpoint); let resp = self.get(&uri).await?; diff --git a/safekeeper/src/bin/safekeeper.rs b/safekeeper/src/bin/safekeeper.rs index 8d31ada24f..8fda625817 100644 --- a/safekeeper/src/bin/safekeeper.rs +++ b/safekeeper/src/bin/safekeeper.rs @@ -18,9 +18,10 @@ use metrics::set_build_info_metric; use remote_storage::RemoteStorageConfig; use safekeeper::defaults::{ DEFAULT_CONTROL_FILE_SAVE_INTERVAL, DEFAULT_EVICTION_MIN_RESIDENT, DEFAULT_HEARTBEAT_TIMEOUT, - DEFAULT_HTTP_LISTEN_ADDR, DEFAULT_MAX_OFFLOADER_LAG_BYTES, DEFAULT_PARTIAL_BACKUP_CONCURRENCY, - DEFAULT_PARTIAL_BACKUP_TIMEOUT, DEFAULT_PG_LISTEN_ADDR, DEFAULT_SSL_CERT_FILE, - DEFAULT_SSL_CERT_RELOAD_PERIOD, DEFAULT_SSL_KEY_FILE, + DEFAULT_HTTP_LISTEN_ADDR, DEFAULT_MAX_OFFLOADER_LAG_BYTES, + DEFAULT_MAX_REELECT_OFFLOADER_LAG_BYTES, DEFAULT_MAX_TIMELINE_DISK_USAGE_BYTES, + DEFAULT_PARTIAL_BACKUP_CONCURRENCY, DEFAULT_PARTIAL_BACKUP_TIMEOUT, DEFAULT_PG_LISTEN_ADDR, + DEFAULT_SSL_CERT_FILE, DEFAULT_SSL_CERT_RELOAD_PERIOD, DEFAULT_SSL_KEY_FILE, }; use safekeeper::wal_backup::WalBackup; use safekeeper::{ @@ -138,6 +139,15 @@ struct Args { /// Safekeeper won't be elected for WAL offloading if it is lagging for more than this value in bytes #[arg(long, default_value_t = DEFAULT_MAX_OFFLOADER_LAG_BYTES)] max_offloader_lag: u64, + /* BEGIN_HADRON */ + /// Safekeeper will re-elect a new offloader if the current backup lagging for more than this value in bytes + #[arg(long, default_value_t = DEFAULT_MAX_REELECT_OFFLOADER_LAG_BYTES)] + max_reelect_offloader_lag_bytes: u64, + /// Safekeeper will stop accepting new WALs if the timeline disk usage exceeds this value in bytes. + /// Setting this value to 0 disables the limit. + #[arg(long, default_value_t = DEFAULT_MAX_TIMELINE_DISK_USAGE_BYTES)] + max_timeline_disk_usage_bytes: u64, + /* END_HADRON */ /// Number of max parallel WAL segments to be offloaded to remote storage. #[arg(long, default_value = "5")] wal_backup_parallel_jobs: usize, @@ -391,6 +401,10 @@ async fn main() -> anyhow::Result<()> { peer_recovery_enabled: args.peer_recovery, remote_storage: args.remote_storage, max_offloader_lag_bytes: args.max_offloader_lag, + /* BEGIN_HADRON */ + max_reelect_offloader_lag_bytes: args.max_reelect_offloader_lag_bytes, + max_timeline_disk_usage_bytes: args.max_timeline_disk_usage_bytes, + /* END_HADRON */ wal_backup_enabled: !args.disable_wal_backup, backup_parallel_jobs: args.wal_backup_parallel_jobs, pg_auth, diff --git a/safekeeper/src/control_file.rs b/safekeeper/src/control_file.rs index 1bf3e4cac1..76c2223891 100644 --- a/safekeeper/src/control_file.rs +++ b/safekeeper/src/control_file.rs @@ -17,6 +17,7 @@ use utils::crashsafe::durable_rename; use crate::control_file_upgrade::{downgrade_v10_to_v9, upgrade_control_file}; use crate::metrics::PERSIST_CONTROL_FILE_SECONDS; +use crate::metrics::WAL_DISK_IO_ERRORS; use crate::state::{EvictionState, TimelinePersistentState}; pub const SK_MAGIC: u32 = 0xcafeceefu32; @@ -192,11 +193,14 @@ impl TimelinePersistentState { impl Storage for FileStorage { /// Persists state durably to the underlying storage. async fn persist(&mut self, s: &TimelinePersistentState) -> Result<()> { + // start timer for metrics let _timer = PERSIST_CONTROL_FILE_SECONDS.start_timer(); - // write data to safekeeper.control.partial let control_partial_path = self.timeline_dir.join(CONTROL_FILE_NAME_PARTIAL); let mut control_partial = File::create(&control_partial_path).await.with_context(|| { + /* BEGIN_HADRON */ + WAL_DISK_IO_ERRORS.inc(); + /*END_HADRON */ format!( "failed to create partial control file at: {}", &control_partial_path @@ -206,20 +210,24 @@ impl Storage for FileStorage { let buf: Vec = s.write_to_buf()?; control_partial.write_all(&buf).await.with_context(|| { - format!( - "failed to write safekeeper state into control file at: {}", - control_partial_path - ) + /* BEGIN_HADRON */ + WAL_DISK_IO_ERRORS.inc(); + /*END_HADRON */ + format!("failed to write safekeeper state into control file at: {control_partial_path}") })?; control_partial.flush().await.with_context(|| { - format!( - "failed to flush safekeeper state into control file at: {}", - control_partial_path - ) + /* BEGIN_HADRON */ + WAL_DISK_IO_ERRORS.inc(); + /*END_HADRON */ + format!("failed to flush safekeeper state into control file at: {control_partial_path}") })?; let control_path = self.timeline_dir.join(CONTROL_FILE_NAME); - durable_rename(&control_partial_path, &control_path, !self.no_sync).await?; + durable_rename(&control_partial_path, &control_path, !self.no_sync) + .await + /* BEGIN_HADRON */ + .inspect_err(|_| WAL_DISK_IO_ERRORS.inc())?; + /* END_HADRON */ // update internal state self.state = s.clone(); diff --git a/safekeeper/src/control_file_upgrade.rs b/safekeeper/src/control_file_upgrade.rs index 1ad9e62f9b..555cbe457b 100644 --- a/safekeeper/src/control_file_upgrade.rs +++ b/safekeeper/src/control_file_upgrade.rs @@ -2,6 +2,7 @@ use std::vec; use anyhow::{Result, bail}; +use postgres_versioninfo::PgVersionId; use pq_proto::SystemId; use safekeeper_api::membership::{Configuration, INVALID_GENERATION}; use safekeeper_api::{ServerInfo, Term}; @@ -46,7 +47,7 @@ struct SafeKeeperStateV1 { #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] pub struct ServerInfoV2 { /// Postgres server version - pub pg_version: u32, + pub pg_version: PgVersionId, pub system_id: SystemId, pub tenant_id: TenantId, pub timeline_id: TimelineId, @@ -75,7 +76,7 @@ pub struct SafeKeeperStateV2 { #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] pub struct ServerInfoV3 { /// Postgres server version - pub pg_version: u32, + pub pg_version: PgVersionId, pub system_id: SystemId, #[serde(with = "hex")] pub tenant_id: TenantId, @@ -444,13 +445,13 @@ pub fn upgrade_control_file(buf: &[u8], version: u32) -> Result TimelinePersisten mod tests { use std::str::FromStr; + use postgres_versioninfo::PgMajorVersion; use utils::Hex; use utils::id::NodeId; @@ -563,7 +565,7 @@ mod tests { epoch: 43, }, server: ServerInfoV2 { - pg_version: 14, + pg_version: PgVersionId::from(PgMajorVersion::PG14), system_id: 0x1234567887654321, tenant_id, timeline_id, @@ -586,8 +588,8 @@ mod tests { 0x2a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // epoch 0x2b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - // pg_version - 0x0e, 0x00, 0x00, 0x00, + // pg_version = 140000 + 0xE0, 0x22, 0x02, 0x00, // system_id 0x21, 0x43, 0x65, 0x87, 0x78, 0x56, 0x34, 0x12, // tenant_id @@ -626,7 +628,7 @@ mod tests { }]), }, server: ServerInfoV2 { - pg_version: 14, + pg_version: PgVersionId::from(PgMajorVersion::PG14), system_id: 0x1234567887654321, tenant_id, timeline_id, @@ -646,7 +648,7 @@ mod tests { let expected = [ 0x2a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x0e, 0x00, 0x00, 0x00, 0x21, 0x43, 0x65, 0x87, 0x78, 0x56, + 0x00, 0x00, 0x00, 0x00, 0xE0, 0x22, 0x02, 0x00, 0x21, 0x43, 0x65, 0x87, 0x78, 0x56, 0x34, 0x12, 0xcf, 0x04, 0x80, 0x92, 0x97, 0x07, 0xee, 0x75, 0x37, 0x23, 0x37, 0xef, 0xaa, 0x5e, 0xcf, 0x96, 0x11, 0x2d, 0xed, 0x66, 0x42, 0x2a, 0xa5, 0xe9, 0x53, 0xe5, 0x44, 0x0f, 0xa5, 0x42, 0x7a, 0xc4, 0x78, 0x56, 0x34, 0x12, 0xc4, 0x7a, 0x42, 0xa5, @@ -675,7 +677,7 @@ mod tests { }]), }, server: ServerInfoV3 { - pg_version: 14, + pg_version: PgVersionId::from(PgMajorVersion::PG14), system_id: 0x1234567887654321, tenant_id, timeline_id, @@ -695,7 +697,7 @@ mod tests { let expected = [ 0x2a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x0e, 0x00, 0x00, 0x00, 0x21, 0x43, 0x65, 0x87, 0x78, 0x56, + 0x00, 0x00, 0x00, 0x00, 0xE0, 0x22, 0x02, 0x00, 0x21, 0x43, 0x65, 0x87, 0x78, 0x56, 0x34, 0x12, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x63, 0x66, 0x30, 0x34, 0x38, 0x30, 0x39, 0x32, 0x39, 0x37, 0x30, 0x37, 0x65, 0x65, 0x37, 0x35, 0x33, 0x37, 0x32, 0x33, 0x33, 0x37, 0x65, 0x66, 0x61, 0x61, 0x35, 0x65, 0x63, 0x66, 0x39, 0x36, @@ -731,7 +733,7 @@ mod tests { }]), }, server: ServerInfo { - pg_version: 14, + pg_version: PgVersionId::from(PgMajorVersion::PG14), system_id: 0x1234567887654321, wal_seg_size: 0x12345678, }, @@ -765,7 +767,7 @@ mod tests { 0x30, 0x66, 0x61, 0x35, 0x34, 0x32, 0x37, 0x61, 0x63, 0x34, 0x2a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x0e, 0x00, 0x00, 0x00, 0x21, 0x43, 0x65, 0x87, 0x78, 0x56, 0x34, 0x12, 0x78, 0x56, + 0xE0, 0x22, 0x02, 0x00, 0x21, 0x43, 0x65, 0x87, 0x78, 0x56, 0x34, 0x12, 0x78, 0x56, 0x34, 0x12, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x63, 0x34, 0x37, 0x61, 0x34, 0x32, 0x61, 0x35, 0x30, 0x66, 0x34, 0x34, 0x65, 0x35, 0x35, 0x33, 0x65, 0x39, 0x61, 0x35, 0x32, 0x61, 0x34, 0x32, 0x36, 0x36, 0x65, 0x64, 0x32, 0x64, 0x31, 0x31, diff --git a/safekeeper/src/handler.rs b/safekeeper/src/handler.rs index b54bee8bfb..373589a18e 100644 --- a/safekeeper/src/handler.rs +++ b/safekeeper/src/handler.rs @@ -73,7 +73,7 @@ fn parse_cmd(cmd: &str) -> anyhow::Result { let re = Regex::new(r"START_WAL_PUSH(\s+?\((.*)\))?").unwrap(); let caps = re .captures(cmd) - .context(format!("failed to parse START_WAL_PUSH command {}", cmd))?; + .context(format!("failed to parse START_WAL_PUSH command {cmd}"))?; // capture () content let options = caps.get(2).map(|m| m.as_str()).unwrap_or(""); // default values @@ -85,24 +85,20 @@ fn parse_cmd(cmd: &str) -> anyhow::Result { } let mut kvit = kvstr.split_whitespace(); let key = kvit.next().context(format!( - "failed to parse key in kv {} in command {}", - kvstr, cmd + "failed to parse key in kv {kvstr} in command {cmd}" ))?; let value = kvit.next().context(format!( - "failed to parse value in kv {} in command {}", - kvstr, cmd + "failed to parse value in kv {kvstr} in command {cmd}" ))?; let value_trimmed = value.trim_matches('\''); if key == "proto_version" { proto_version = value_trimmed.parse::().context(format!( - "failed to parse proto_version value {} in command {}", - value, cmd + "failed to parse proto_version value {value} in command {cmd}" ))?; } if key == "allow_timeline_creation" { allow_timeline_creation = value_trimmed.parse::().context(format!( - "failed to parse allow_timeline_creation value {} in command {}", - value, cmd + "failed to parse allow_timeline_creation value {value} in command {cmd}" ))?; } } @@ -118,7 +114,7 @@ fn parse_cmd(cmd: &str) -> anyhow::Result { .unwrap(); let caps = re .captures(cmd) - .context(format!("failed to parse START_REPLICATION command {}", cmd))?; + .context(format!("failed to parse START_REPLICATION command {cmd}"))?; let start_lsn = Lsn::from_str(&caps[1]).context("parse start LSN from START_REPLICATION command")?; let term = if let Some(m) = caps.get(2) { @@ -224,7 +220,7 @@ impl postgres_backend::Handler stripe_size: ShardStripeSize(stripe_size), }; self.shard = - Some(ShardIdentity::from_params(ShardNumber(number), ¶ms)); + Some(ShardIdentity::from_params(ShardNumber(number), params)); } _ => { return Err(QueryError::Other(anyhow::anyhow!( diff --git a/safekeeper/src/lib.rs b/safekeeper/src/lib.rs index b4d9cadd6d..c461c071da 100644 --- a/safekeeper/src/lib.rs +++ b/safekeeper/src/lib.rs @@ -61,6 +61,13 @@ pub mod defaults { pub const DEFAULT_HEARTBEAT_TIMEOUT: &str = "5000ms"; pub const DEFAULT_MAX_OFFLOADER_LAG_BYTES: u64 = 128 * (1 << 20); + /* BEGIN_HADRON */ + // Default leader re-elect is 0(disabled). SK will re-elect leader if the current leader is lagging this many bytes. + pub const DEFAULT_MAX_REELECT_OFFLOADER_LAG_BYTES: u64 = 0; + // Default disk usage limit is 0 (disabled). It means each timeline by default can use up to this many WAL + // disk space on this SK until SK begins to reject WALs. + pub const DEFAULT_MAX_TIMELINE_DISK_USAGE_BYTES: u64 = 0; + /* END_HADRON */ pub const DEFAULT_PARTIAL_BACKUP_TIMEOUT: &str = "15m"; pub const DEFAULT_CONTROL_FILE_SAVE_INTERVAL: &str = "300s"; pub const DEFAULT_PARTIAL_BACKUP_CONCURRENCY: &str = "5"; @@ -99,6 +106,10 @@ pub struct SafeKeeperConf { pub peer_recovery_enabled: bool, pub remote_storage: Option, pub max_offloader_lag_bytes: u64, + /* BEGIN_HADRON */ + pub max_reelect_offloader_lag_bytes: u64, + pub max_timeline_disk_usage_bytes: u64, + /* END_HADRON */ pub backup_parallel_jobs: usize, pub wal_backup_enabled: bool, pub pg_auth: Option>, @@ -151,6 +162,10 @@ impl SafeKeeperConf { sk_auth_token: None, heartbeat_timeout: Duration::new(5, 0), max_offloader_lag_bytes: defaults::DEFAULT_MAX_OFFLOADER_LAG_BYTES, + /* BEGIN_HADRON */ + max_reelect_offloader_lag_bytes: defaults::DEFAULT_MAX_REELECT_OFFLOADER_LAG_BYTES, + max_timeline_disk_usage_bytes: defaults::DEFAULT_MAX_TIMELINE_DISK_USAGE_BYTES, + /* END_HADRON */ current_thread_runtime: false, walsenders_keep_horizon: false, partial_backup_timeout: Duration::from_secs(0), diff --git a/safekeeper/src/metrics.rs b/safekeeper/src/metrics.rs index cb21a5f6d2..9baa80f73a 100644 --- a/safekeeper/src/metrics.rs +++ b/safekeeper/src/metrics.rs @@ -58,6 +58,25 @@ pub static FLUSH_WAL_SECONDS: Lazy = Lazy::new(|| { ) .expect("Failed to register safekeeper_flush_wal_seconds histogram") }); +/* BEGIN_HADRON */ +pub static WAL_DISK_IO_ERRORS: Lazy = Lazy::new(|| { + register_int_counter!( + "safekeeper_wal_disk_io_errors", + "Number of disk I/O errors when creating and flushing WALs and control files" + ) + .expect("Failed to register safekeeper_wal_disk_io_errors counter") +}); +pub static WAL_STORAGE_LIMIT_ERRORS: Lazy = Lazy::new(|| { + register_int_counter!( + "safekeeper_wal_storage_limit_errors", + concat!( + "Number of errors due to timeline WAL storage utilization exceeding configured limit. ", + "An increase in this metric indicates issues backing up or removing WALs." + ) + ) + .expect("Failed to register safekeeper_wal_storage_limit_errors counter") +}); +/* END_HADRON */ pub static PERSIST_CONTROL_FILE_SECONDS: Lazy = Lazy::new(|| { register_histogram!( "safekeeper_persist_control_file_seconds", @@ -138,6 +157,15 @@ pub static BACKUP_ERRORS: Lazy = Lazy::new(|| { ) .expect("Failed to register safekeeper_backup_errors_total counter") }); +/* BEGIN_HADRON */ +pub static BACKUP_REELECT_LEADER_COUNT: Lazy = Lazy::new(|| { + register_int_counter!( + "safekeeper_backup_reelect_leader_total", + "Number of times the backup leader was reelected" + ) + .expect("Failed to register safekeeper_backup_reelect_leader_total counter") +}); +/* END_HADRON */ pub static BROKER_PUSH_ALL_UPDATES_SECONDS: Lazy = Lazy::new(|| { register_histogram!( "safekeeper_broker_push_update_seconds", diff --git a/safekeeper/src/pull_timeline.rs b/safekeeper/src/pull_timeline.rs index 14aef1ee5e..1c9e5bade5 100644 --- a/safekeeper/src/pull_timeline.rs +++ b/safekeeper/src/pull_timeline.rs @@ -1,5 +1,6 @@ use std::cmp::min; use std::io::{self, ErrorKind}; +use std::ops::RangeInclusive; use std::sync::Arc; use anyhow::{Context, Result, anyhow, bail}; @@ -34,7 +35,7 @@ use crate::control_file::CONTROL_FILE_NAME; use crate::state::{EvictionState, TimelinePersistentState}; use crate::timeline::{Timeline, TimelineError, WalResidentTimeline}; use crate::timelines_global_map::{create_temp_timeline_dir, validate_temp_timeline}; -use crate::wal_storage::open_wal_file; +use crate::wal_storage::{open_wal_file, wal_file_paths}; use crate::{GlobalTimelines, debug_dump, wal_backup}; /// Stream tar archive of timeline to tx. @@ -95,8 +96,8 @@ pub async fn stream_snapshot( /// State needed while streaming the snapshot. pub struct SnapshotContext { - pub from_segno: XLogSegNo, // including - pub upto_segno: XLogSegNo, // including + /// The interval of segment numbers. If None, the timeline hasn't had writes yet, so only send the control file + pub from_to_segno: Option>, pub term: Term, pub last_log_term: Term, pub flush_lsn: Lsn, @@ -174,23 +175,35 @@ pub async fn stream_snapshot_resident_guts( .await?; pausable_failpoint!("sk-snapshot-after-list-pausable"); - let tli_dir = tli.get_timeline_dir(); - info!( - "sending {} segments [{:#X}-{:#X}], term={}, last_log_term={}, flush_lsn={}", - bctx.upto_segno - bctx.from_segno + 1, - bctx.from_segno, - bctx.upto_segno, - bctx.term, - bctx.last_log_term, - bctx.flush_lsn, - ); - for segno in bctx.from_segno..=bctx.upto_segno { - let (mut sf, is_partial) = open_wal_file(&tli_dir, segno, bctx.wal_seg_size).await?; - let mut wal_file_name = XLogFileName(PG_TLI, segno, bctx.wal_seg_size); - if is_partial { - wal_file_name.push_str(".partial"); + if let Some(from_to_segno) = &bctx.from_to_segno { + let tli_dir = tli.get_timeline_dir(); + info!( + "sending {} segments [{:#X}-{:#X}], term={}, last_log_term={}, flush_lsn={}", + from_to_segno.end() - from_to_segno.start() + 1, + from_to_segno.start(), + from_to_segno.end(), + bctx.term, + bctx.last_log_term, + bctx.flush_lsn, + ); + for segno in from_to_segno.clone() { + let Some((mut sf, is_partial)) = + open_wal_file(&tli_dir, segno, bctx.wal_seg_size).await? + else { + // File is not found + let (wal_file_path, _wal_file_partial_path) = + wal_file_paths(&tli_dir, segno, bctx.wal_seg_size); + tracing::warn!("couldn't find WAL segment file {wal_file_path}"); + bail!("couldn't find WAL segment file {wal_file_path}") + }; + let mut wal_file_name = XLogFileName(PG_TLI, segno, bctx.wal_seg_size); + if is_partial { + wal_file_name.push_str(".partial"); + } + ar.append_file(&wal_file_name, &mut sf).await?; } - ar.append_file(&wal_file_name, &mut sf).await?; + } else { + info!("Not including any segments into the snapshot"); } // Do the term check before ar.finish to make archive corrupted in case of @@ -338,19 +351,26 @@ impl WalResidentTimeline { // removed further than `backup_lsn`. Since we're holding shared_state // lock and setting `wal_removal_on_hold` later, it guarantees that WAL // won't be removed until we're done. + let timeline_state = shared_state.sk.state(); let from_lsn = min( - shared_state.sk.state().remote_consistent_lsn, - shared_state.sk.state().backup_lsn, + timeline_state.remote_consistent_lsn, + timeline_state.backup_lsn, + ); + let flush_lsn = shared_state.sk.flush_lsn(); + let (send_segments, msg) = if from_lsn == Lsn::INVALID { + (false, "snapshot is called on uninitialized timeline") + } else { + (true, "timeline is initialized") + }; + tracing::info!( + remote_consistent_lsn=%timeline_state.remote_consistent_lsn, + backup_lsn=%timeline_state.backup_lsn, + %flush_lsn, + "{msg}" ); - if from_lsn == Lsn::INVALID { - // this is possible if snapshot is called before handling first - // elected message - bail!("snapshot is called on uninitialized timeline"); - } let from_segno = from_lsn.segment_number(wal_seg_size); let term = shared_state.sk.state().acceptor_state.term; let last_log_term = shared_state.sk.last_log_term(); - let flush_lsn = shared_state.sk.flush_lsn(); let upto_segno = flush_lsn.segment_number(wal_seg_size); // have some limit on max number of segments as a sanity check const MAX_ALLOWED_SEGS: u64 = 1000; @@ -376,9 +396,9 @@ impl WalResidentTimeline { drop(shared_state); let tli_copy = self.wal_residence_guard().await?; + let from_to_segno = send_segments.then_some(from_segno..=upto_segno); let bctx = SnapshotContext { - from_segno, - upto_segno, + from_to_segno, term, last_log_term, flush_lsn, diff --git a/safekeeper/src/safekeeper.rs b/safekeeper/src/safekeeper.rs index 886cac869d..4d15fc9de3 100644 --- a/safekeeper/src/safekeeper.rs +++ b/safekeeper/src/safekeeper.rs @@ -9,6 +9,7 @@ use anyhow::{Context, Result, bail}; use byteorder::{LittleEndian, ReadBytesExt}; use bytes::{Buf, BufMut, Bytes, BytesMut}; use postgres_ffi::{MAX_SEND_SIZE, TimeLineID}; +use postgres_versioninfo::{PgMajorVersion, PgVersionId}; use pq_proto::SystemId; use safekeeper_api::membership::{ INVALID_GENERATION, MemberSet, SafekeeperGeneration as Generation, SafekeeperId, @@ -29,7 +30,7 @@ use crate::{control_file, wal_storage}; pub const SK_PROTO_VERSION_2: u32 = 2; pub const SK_PROTO_VERSION_3: u32 = 3; -pub const UNKNOWN_SERVER_VERSION: u32 = 0; +pub const UNKNOWN_SERVER_VERSION: PgVersionId = PgVersionId::UNKNOWN; #[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)] pub struct TermLsn { @@ -64,10 +65,10 @@ impl TermHistory { for i in 0..n_entries { let term = bytes .get_u64_f() - .with_context(|| format!("TermHistory pos {} misses term", i))?; + .with_context(|| format!("TermHistory pos {i} misses term"))?; let lsn = bytes .get_u64_f() - .with_context(|| format!("TermHistory pos {} misses lsn", i))? + .with_context(|| format!("TermHistory pos {i} misses lsn"))? .into(); res.push(TermLsn { term, lsn }) } @@ -121,9 +122,7 @@ impl TermHistory { if let Some(sk_th_last) = sk_th.last() { assert!( sk_th_last.lsn <= sk_wal_end, - "safekeeper term history end {:?} LSN is higher than WAL end {:?}", - sk_th_last, - sk_wal_end + "safekeeper term history end {sk_th_last:?} LSN is higher than WAL end {sk_wal_end:?}" ); } @@ -220,7 +219,7 @@ pub struct ProposerGreeting { pub timeline_id: TimelineId, pub mconf: membership::Configuration, /// Postgres server version - pub pg_version: u32, + pub pg_version: PgVersionId, pub system_id: SystemId, pub wal_seg_size: u32, } @@ -231,7 +230,7 @@ pub struct ProposerGreetingV2 { /// proposer-acceptor protocol version pub protocol_version: u32, /// Postgres server version - pub pg_version: u32, + pub pg_version: PgVersionId, pub proposer_id: PgUuid, pub system_id: SystemId, pub timeline_id: TimelineId, @@ -438,11 +437,11 @@ impl ProposerAcceptorMessage { for i in 0..members_len { let id = buf .get_u64_f() - .with_context(|| format!("reading member {} node_id", i))?; - let host = Self::get_cstr(buf).with_context(|| format!("reading member {} host", i))?; + .with_context(|| format!("reading member {i} node_id"))?; + let host = Self::get_cstr(buf).with_context(|| format!("reading member {i} host"))?; let pg_port = buf .get_u16_f() - .with_context(|| format!("reading member {} port", i))?; + .with_context(|| format!("reading member {i} port"))?; let sk = SafekeeperId { id: NodeId(id), host, @@ -463,12 +462,12 @@ impl ProposerAcceptorMessage { for i in 0..new_members_len { let id = buf .get_u64_f() - .with_context(|| format!("reading new member {} node_id", i))?; - let host = Self::get_cstr(buf) - .with_context(|| format!("reading new member {} host", i))?; + .with_context(|| format!("reading new member {i} node_id"))?; + let host = + Self::get_cstr(buf).with_context(|| format!("reading new member {i} host"))?; let pg_port = buf .get_u16_f() - .with_context(|| format!("reading new member {} port", i))?; + .with_context(|| format!("reading new member {i} port"))?; let sk = SafekeeperId { id: NodeId(id), host, @@ -513,7 +512,7 @@ impl ProposerAcceptorMessage { tenant_id, timeline_id, mconf, - pg_version, + pg_version: PgVersionId::from_full_pg_version(pg_version), system_id, wal_seg_size, }; @@ -963,7 +962,8 @@ where * because safekeepers parse WAL headers and the format * may change between versions. */ - if msg.pg_version / 10000 != self.state.server.pg_version / 10000 + if PgMajorVersion::try_from(msg.pg_version)? + != PgMajorVersion::try_from(self.state.server.pg_version)? && self.state.server.pg_version != UNKNOWN_SERVER_VERSION { bail!( @@ -1508,7 +1508,7 @@ mod tests { let mut vote_resp = sk.process_msg(&vote_request).await; match vote_resp.unwrap() { Some(AcceptorProposerMessage::VoteResponse(resp)) => assert!(resp.vote_given), - r => panic!("unexpected response: {:?}", r), + r => panic!("unexpected response: {r:?}"), } // reboot... @@ -1523,7 +1523,7 @@ mod tests { vote_resp = sk.process_msg(&vote_request).await; match vote_resp.unwrap() { Some(AcceptorProposerMessage::VoteResponse(resp)) => assert!(!resp.vote_given), - r => panic!("unexpected response: {:?}", r), + r => panic!("unexpected response: {r:?}"), } } @@ -1750,7 +1750,7 @@ mod tests { }]), }, server: ServerInfo { - pg_version: 14, + pg_version: PgVersionId::from_full_pg_version(140000), system_id: 0x1234567887654321, wal_seg_size: 0x12345678, }, diff --git a/safekeeper/src/send_interpreted_wal.rs b/safekeeper/src/send_interpreted_wal.rs index 2b1fd7b854..3797ac39d1 100644 --- a/safekeeper/src/send_interpreted_wal.rs +++ b/safekeeper/src/send_interpreted_wal.rs @@ -8,15 +8,15 @@ use futures::StreamExt; use futures::future::Either; use pageserver_api::shard::ShardIdentity; use postgres_backend::{CopyStreamHandlerEnd, PostgresBackend}; -use postgres_ffi::get_current_timestamp; use postgres_ffi::waldecoder::{WalDecodeError, WalStreamDecoder}; +use postgres_ffi::{PgMajorVersion, get_current_timestamp}; use pq_proto::{BeMessage, InterpretedWalRecordsBody, WalSndKeepAlive}; use tokio::io::{AsyncRead, AsyncWrite}; use tokio::sync::mpsc::error::SendError; use tokio::task::JoinHandle; use tokio::time::MissedTickBehavior; use tracing::{Instrument, error, info, info_span}; -use utils::critical; +use utils::critical_timeline; use utils::lsn::Lsn; use utils::postgres_client::{Compression, InterpretedFormat}; use wal_decoder::models::{InterpretedWalRecord, InterpretedWalRecords}; @@ -78,7 +78,7 @@ pub(crate) struct InterpretedWalReader { shard_senders: HashMap>, shard_notification_rx: Option>, state: Arc>, - pg_version: u32, + pg_version: PgMajorVersion, } /// A handle for [`InterpretedWalReader`] which allows for interacting with it @@ -258,7 +258,7 @@ impl InterpretedWalReader { start_pos: Lsn, tx: tokio::sync::mpsc::Sender, shard: ShardIdentity, - pg_version: u32, + pg_version: PgMajorVersion, appname: &Option, ) -> InterpretedWalReaderHandle { let state = Arc::new(std::sync::RwLock::new(InterpretedWalReaderState::Running { @@ -268,6 +268,8 @@ impl InterpretedWalReader { let (shard_notification_tx, shard_notification_rx) = tokio::sync::mpsc::unbounded_channel(); + let ttid = wal_stream.ttid; + let reader = InterpretedWalReader { wal_stream, shard_senders: HashMap::from([( @@ -300,7 +302,11 @@ impl InterpretedWalReader { .inspect_err(|err| match err { // TODO: we may want to differentiate these errors further. InterpretedWalReaderError::Decode(_) => { - critical!("failed to decode WAL record: {err:?}"); + critical_timeline!( + ttid.tenant_id, + ttid.timeline_id, + "failed to read WAL record: {err:?}" + ); } err => error!("failed to read WAL record: {err}"), }) @@ -322,7 +328,7 @@ impl InterpretedWalReader { start_pos: Lsn, tx: tokio::sync::mpsc::Sender, shard: ShardIdentity, - pg_version: u32, + pg_version: PgMajorVersion, shard_notification_rx: Option< tokio::sync::mpsc::UnboundedReceiver, >, @@ -363,9 +369,14 @@ impl InterpretedWalReader { metric.dec(); } + let ttid = self.wal_stream.ttid; match self.run_impl(start_pos).await { Err(err @ InterpretedWalReaderError::Decode(_)) => { - critical!("failed to decode WAL record: {err:?}"); + critical_timeline!( + ttid.tenant_id, + ttid.timeline_id, + "failed to decode WAL record: {err:?}" + ); } Err(err) => error!("failed to read WAL record: {err}"), Ok(()) => info!("interpreted wal reader exiting"), @@ -718,7 +729,7 @@ mod tests { use std::time::Duration; use pageserver_api::shard::{ShardIdentity, ShardStripeSize}; - use postgres_ffi::MAX_SEND_SIZE; + use postgres_ffi::{MAX_SEND_SIZE, PgMajorVersion}; use tokio::sync::mpsc::error::TryRecvError; use utils::id::{NodeId, TenantTimelineId}; use utils::lsn::Lsn; @@ -734,7 +745,7 @@ mod tests { const SIZE: usize = 8 * 1024; const MSG_COUNT: usize = 200; - const PG_VERSION: u32 = 17; + const PG_VERSION: PgMajorVersion = PgMajorVersion::PG17; const SHARD_COUNT: u8 = 2; let start_lsn = Lsn::from_str("0/149FD18").unwrap(); @@ -876,7 +887,7 @@ mod tests { const SIZE: usize = 8 * 1024; const MSG_COUNT: usize = 200; - const PG_VERSION: u32 = 17; + const PG_VERSION: PgMajorVersion = PgMajorVersion::PG17; const SHARD_COUNT: u8 = 2; let start_lsn = Lsn::from_str("0/149FD18").unwrap(); @@ -1025,7 +1036,7 @@ mod tests { const SIZE: usize = 64 * 1024; const MSG_COUNT: usize = 10; - const PG_VERSION: u32 = 17; + const PG_VERSION: PgMajorVersion = PgMajorVersion::PG17; const SHARD_COUNT: u8 = 2; const WAL_READER_BATCH_SIZE: usize = 8192; @@ -1148,7 +1159,7 @@ mod tests { const SIZE: usize = 8 * 1024; const MSG_COUNT: usize = 10; - const PG_VERSION: u32 = 17; + const PG_VERSION: PgMajorVersion = PgMajorVersion::PG17; let start_lsn = Lsn::from_str("0/149FD18").unwrap(); let env = Env::new(true).unwrap(); diff --git a/safekeeper/src/send_wal.rs b/safekeeper/src/send_wal.rs index 05f827494e..177e759db5 100644 --- a/safekeeper/src/send_wal.rs +++ b/safekeeper/src/send_wal.rs @@ -12,7 +12,7 @@ use futures::FutureExt; use itertools::Itertools; use parking_lot::Mutex; use postgres_backend::{CopyStreamHandlerEnd, PostgresBackend, PostgresBackendReader, QueryError}; -use postgres_ffi::{MAX_SEND_SIZE, TimestampTz, get_current_timestamp}; +use postgres_ffi::{MAX_SEND_SIZE, PgMajorVersion, TimestampTz, get_current_timestamp}; use pq_proto::{BeMessage, WalSndKeepAlive, XLogDataBody}; use safekeeper_api::Term; use safekeeper_api::models::{ @@ -559,7 +559,9 @@ impl SafekeeperPostgresHandler { format, compression, } => { - let pg_version = tli.tli.get_state().await.1.server.pg_version / 10000; + let pg_version = + PgMajorVersion::try_from(tli.tli.get_state().await.1.server.pg_version) + .unwrap(); let end_watch_view = end_watch.view(); let wal_residence_guard = tli.wal_residence_guard().await?; let (tx, rx) = tokio::sync::mpsc::channel::(2); diff --git a/safekeeper/src/state.rs b/safekeeper/src/state.rs index 7533005c35..32624d260d 100644 --- a/safekeeper/src/state.rs +++ b/safekeeper/src/state.rs @@ -7,8 +7,9 @@ use std::time::SystemTime; use anyhow::{Result, bail}; use postgres_ffi::WAL_SEGMENT_SIZE; +use postgres_versioninfo::{PgMajorVersion, PgVersionId}; use safekeeper_api::membership::Configuration; -use safekeeper_api::models::{TimelineMembershipSwitchResponse, TimelineTermBumpResponse}; +use safekeeper_api::models::TimelineTermBumpResponse; use safekeeper_api::{INITIAL_TERM, ServerInfo, Term}; use serde::{Deserialize, Serialize}; use tracing::info; @@ -82,6 +83,11 @@ pub enum EvictionState { Offloaded(Lsn), } +pub struct MembershipSwitchResult { + pub previous_conf: Configuration, + pub current_conf: Configuration, +} + impl TimelinePersistentState { /// commit_lsn is the same as start_lsn in the normal creaiton; see /// `TimelineCreateRequest` comments.` @@ -149,8 +155,8 @@ impl TimelinePersistentState { &TenantTimelineId::empty(), Configuration::empty(), ServerInfo { - pg_version: 170000, /* Postgres server version (major * 10000) */ - system_id: 0, /* Postgres system identifier */ + pg_version: PgVersionId::from(PgMajorVersion::PG17), + system_id: 0, /* Postgres system identifier */ wal_seg_size: WAL_SEGMENT_SIZE as u32, }, Lsn::INVALID, @@ -260,10 +266,7 @@ where /// Switch into membership configuration `to` if it is higher than the /// current one. - pub async fn membership_switch( - &mut self, - to: Configuration, - ) -> Result { + pub async fn membership_switch(&mut self, to: Configuration) -> Result { let before = self.mconf.clone(); // Is switch allowed? if to.generation <= self.mconf.generation { @@ -277,7 +280,7 @@ where self.finish_change(&state).await?; info!("switched membership conf to {} from {}", to, before); } - Ok(TimelineMembershipSwitchResponse { + Ok(MembershipSwitchResult { previous_conf: before, current_conf: self.mconf.clone(), }) diff --git a/safekeeper/src/timeline.rs b/safekeeper/src/timeline.rs index 588bd4f2c9..95b5fe6d5d 100644 --- a/safekeeper/src/timeline.rs +++ b/safekeeper/src/timeline.rs @@ -26,7 +26,9 @@ use utils::id::{NodeId, TenantId, TenantTimelineId}; use utils::lsn::Lsn; use utils::sync::gate::Gate; -use crate::metrics::{FullTimelineInfo, MISC_OPERATION_SECONDS, WalStorageMetrics}; +use crate::metrics::{ + FullTimelineInfo, MISC_OPERATION_SECONDS, WAL_STORAGE_LIMIT_ERRORS, WalStorageMetrics, +}; use crate::rate_limit::RateLimiter; use crate::receive_wal::WalReceivers; use crate::safekeeper::{AcceptorProposerMessage, ProposerAcceptorMessage, SafeKeeper, TermLsn}; @@ -190,7 +192,14 @@ impl StateSK { &mut self, to: Configuration, ) -> Result { - self.state_mut().membership_switch(to).await + let result = self.state_mut().membership_switch(to).await?; + + Ok(TimelineMembershipSwitchResponse { + previous_conf: result.previous_conf, + current_conf: result.current_conf, + term: self.state().acceptor_state.term, + flush_lsn: self.flush_lsn(), + }) } /// Close open WAL files to release FDs. @@ -395,6 +404,8 @@ pub enum TimelineError { Cancelled(TenantTimelineId), #[error("Timeline {0} was not found in global map")] NotFound(TenantTimelineId), + #[error("Timeline {0} has been deleted")] + Deleted(TenantTimelineId), #[error("Timeline {0} creation is in progress")] CreationInProgress(TenantTimelineId), #[error("Timeline {0} exists on disk, but wasn't loaded on startup")] @@ -1041,6 +1052,39 @@ impl WalResidentTimeline { Ok(ss) } + // BEGIN HADRON + // Check if disk usage by WAL segment files for this timeline exceeds the configured limit. + fn hadron_check_disk_usage( + &self, + shared_state_locked: &mut WriteGuardSharedState<'_>, + ) -> Result<()> { + // The disk usage is calculated based on the number of segments between `last_removed_segno` + // and the current flush LSN segment number. `last_removed_segno` is advanced after + // unneeded WAL files are physically removed from disk (see `update_wal_removal_end()` + // in `timeline_manager.rs`). + let max_timeline_disk_usage_bytes = self.conf.max_timeline_disk_usage_bytes; + if max_timeline_disk_usage_bytes > 0 { + let last_removed_segno = self.last_removed_segno.load(Ordering::Relaxed); + let flush_lsn = shared_state_locked.sk.flush_lsn(); + let wal_seg_size = shared_state_locked.sk.state().server.wal_seg_size as u64; + let current_segno = flush_lsn.segment_number(wal_seg_size as usize); + + let segno_count = current_segno - last_removed_segno; + let disk_usage_bytes = segno_count * wal_seg_size; + + if disk_usage_bytes > max_timeline_disk_usage_bytes { + WAL_STORAGE_LIMIT_ERRORS.inc(); + bail!( + "WAL storage utilization exceeds configured limit of {} bytes: current disk usage: {} bytes", + max_timeline_disk_usage_bytes, + disk_usage_bytes + ); + } + } + Ok(()) + } + // END HADRON + /// Pass arrived message to the safekeeper. pub async fn process_msg( &self, @@ -1053,6 +1097,13 @@ impl WalResidentTimeline { let mut rmsg: Option; { let mut shared_state = self.write_shared_state().await; + // BEGIN HADRON + // Errors from the `hadron_check_disk_usage()` function fail the process_msg() function, which + // gets propagated upward and terminates the entire WalAcceptor. This will cause postgres to + // disconnect from the safekeeper and reestablish another connection. Postgres will keep retrying + // safekeeper connections every second until it can successfully propose WAL to the SK again. + self.hadron_check_disk_usage(&mut shared_state)?; + // END HADRON rmsg = shared_state.sk.safekeeper().process_msg(msg).await?; // if this is AppendResponse, fill in proper hot standby feedback. diff --git a/safekeeper/src/timeline_eviction.rs b/safekeeper/src/timeline_eviction.rs index e817dbf6f9..47b65a579a 100644 --- a/safekeeper/src/timeline_eviction.rs +++ b/safekeeper/src/timeline_eviction.rs @@ -342,7 +342,7 @@ where let bytes_read1 = reader1 .read(&mut buffer1[..bytes_to_read]) .await - .with_context(|| format!("failed to read from reader1 at offset {}", offset))?; + .with_context(|| format!("failed to read from reader1 at offset {offset}"))?; if bytes_read1 == 0 { anyhow::bail!("unexpected EOF from reader1 at offset {}", offset); } @@ -351,10 +351,7 @@ where .read_exact(&mut buffer2[..bytes_read1]) .await .with_context(|| { - format!( - "failed to read {} bytes from reader2 at offset {}", - bytes_read1, offset - ) + format!("failed to read {bytes_read1} bytes from reader2 at offset {offset}") })?; assert!(bytes_read2 == bytes_read1); diff --git a/safekeeper/src/timeline_manager.rs b/safekeeper/src/timeline_manager.rs index 48eda92fed..a68752bfdd 100644 --- a/safekeeper/src/timeline_manager.rs +++ b/safekeeper/src/timeline_manager.rs @@ -108,7 +108,7 @@ impl std::fmt::Debug for ManagerCtlMessage { match self { ManagerCtlMessage::GuardRequest(_) => write!(f, "GuardRequest"), ManagerCtlMessage::TryGuardRequest(_) => write!(f, "TryGuardRequest"), - ManagerCtlMessage::GuardDrop(id) => write!(f, "GuardDrop({:?})", id), + ManagerCtlMessage::GuardDrop(id) => write!(f, "GuardDrop({id:?})"), ManagerCtlMessage::BackupPartialReset(_) => write!(f, "BackupPartialReset"), } } diff --git a/safekeeper/src/timelines_global_map.rs b/safekeeper/src/timelines_global_map.rs index e3f7d88f7c..a81a7298a9 100644 --- a/safekeeper/src/timelines_global_map.rs +++ b/safekeeper/src/timelines_global_map.rs @@ -78,7 +78,13 @@ impl GlobalTimelinesState { Some(GlobalMapTimeline::CreationInProgress) => { Err(TimelineError::CreationInProgress(*ttid)) } - None => Err(TimelineError::NotFound(*ttid)), + None => { + if self.has_tombstone(ttid) { + Err(TimelineError::Deleted(*ttid)) + } else { + Err(TimelineError::NotFound(*ttid)) + } + } } } @@ -141,7 +147,7 @@ impl GlobalTimelines { }; let mut tenant_count = 0; for tenants_dir_entry in std::fs::read_dir(&tenants_dir) - .with_context(|| format!("failed to list tenants dir {}", tenants_dir))? + .with_context(|| format!("failed to list tenants dir {tenants_dir}"))? { match &tenants_dir_entry { Ok(tenants_dir_entry) => { @@ -182,7 +188,7 @@ impl GlobalTimelines { let timelines_dir = get_tenant_dir(&conf, &tenant_id); for timelines_dir_entry in std::fs::read_dir(&timelines_dir) - .with_context(|| format!("failed to list timelines dir {}", timelines_dir))? + .with_context(|| format!("failed to list timelines dir {timelines_dir}"))? { match &timelines_dir_entry { Ok(timeline_dir_entry) => { diff --git a/safekeeper/src/wal_backup.rs b/safekeeper/src/wal_backup.rs index 0beb272a60..7e10847a1b 100644 --- a/safekeeper/src/wal_backup.rs +++ b/safekeeper/src/wal_backup.rs @@ -26,7 +26,9 @@ use utils::id::{NodeId, TenantTimelineId}; use utils::lsn::Lsn; use utils::{backoff, pausable_failpoint}; -use crate::metrics::{BACKED_UP_SEGMENTS, BACKUP_ERRORS, WAL_BACKUP_TASKS}; +use crate::metrics::{ + BACKED_UP_SEGMENTS, BACKUP_ERRORS, BACKUP_REELECT_LEADER_COUNT, WAL_BACKUP_TASKS, +}; use crate::timeline::WalResidentTimeline; use crate::timeline_manager::{Manager, StateSnapshot}; use crate::{SafeKeeperConf, WAL_BACKUP_RUNTIME}; @@ -70,8 +72,9 @@ pub(crate) async fn update_task( need_backup: bool, state: &StateSnapshot, ) { - let (offloader, election_dbg_str) = - determine_offloader(&state.peers, state.backup_lsn, mgr.tli.ttid, &mgr.conf); + /* BEGIN_HADRON */ + let (offloader, election_dbg_str) = hadron_determine_offloader(mgr, state); + /* END_HADRON */ let elected_me = Some(mgr.conf.my_id) == offloader; let should_task_run = need_backup && elected_me; @@ -127,6 +130,70 @@ async fn shut_down_task(entry: &mut Option) { } } +/* BEGIN_HADRON */ +// On top of the neon determine_offloader, we also check if the current offloader is lagging behind too much. +// If it is, we re-elect a new offloader. This mitigates the below issue. It also helps distribute the load across SKs. +// +// We observe that the offloader fails to upload a segment due to race conditions on XLOG SWITCH and PG start streaming WALs. +// wal_backup task continously failing to upload a full segment while the segment remains partial on the disk. +// The consequence is that commit_lsn for all SKs move forward but backup_lsn stays the same. Then, all SKs run out of disk space. +// See go/sk-ood-xlog-switch for more details. +// +// To mitigate this issue, we will re-elect a new offloader if the current offloader is lagging behind too much. +// Each SK makes the decision locally but they are aware of each other's commit and backup lsns. +// +// determine_offloader will pick a SK. say SK-1. +// Each SK checks +// -- if commit_lsn - back_lsn > threshold, +// -- -- remove SK-1 from the candidate and call determine_offloader again. +// SK-1 will step down and all SKs will elect the same leader again. +// After the backup is caught up, the leader will become SK-1 again. +fn hadron_determine_offloader(mgr: &Manager, state: &StateSnapshot) -> (Option, String) { + let mut offloader: Option; + let mut election_dbg_str: String; + let caughtup_peers_count: usize; + (offloader, election_dbg_str, caughtup_peers_count) = + determine_offloader(&state.peers, state.backup_lsn, mgr.tli.ttid, &mgr.conf); + + if offloader.is_none() + || caughtup_peers_count <= 1 + || mgr.conf.max_reelect_offloader_lag_bytes == 0 + { + return (offloader, election_dbg_str); + } + + let offloader_sk_id = offloader.unwrap(); + + let backup_lag = state.commit_lsn.checked_sub(state.backup_lsn); + if backup_lag.is_none() { + info!("Backup lag is None. Skipping re-election."); + return (offloader, election_dbg_str); + } + + let backup_lag = backup_lag.unwrap().0; + + if backup_lag < mgr.conf.max_reelect_offloader_lag_bytes { + return (offloader, election_dbg_str); + } + + info!( + "Electing a new leader: Backup lag is too high backup lsn lag {} threshold {}: {}", + backup_lag, mgr.conf.max_reelect_offloader_lag_bytes, election_dbg_str + ); + BACKUP_REELECT_LEADER_COUNT.inc(); + // Remove the current offloader if lag is too high. + let new_peers: Vec<_> = state + .peers + .iter() + .filter(|p| p.sk_id != offloader_sk_id) + .cloned() + .collect(); + (offloader, election_dbg_str, _) = + determine_offloader(&new_peers, state.backup_lsn, mgr.tli.ttid, &mgr.conf); + (offloader, election_dbg_str) +} +/* END_HADRON */ + /// The goal is to ensure that normally only one safekeepers offloads. However, /// it is fine (and inevitable, as s3 doesn't provide CAS) that for some short /// time we have several ones as they PUT the same files. Also, @@ -141,13 +208,13 @@ fn determine_offloader( wal_backup_lsn: Lsn, ttid: TenantTimelineId, conf: &SafeKeeperConf, -) -> (Option, String) { +) -> (Option, String, usize) { // TODO: remove this once we fill newly joined safekeepers since backup_lsn. let capable_peers = alive_peers .iter() .filter(|p| p.local_start_lsn <= wal_backup_lsn); match capable_peers.clone().map(|p| p.commit_lsn).max() { - None => (None, "no connected peers to elect from".to_string()), + None => (None, "no connected peers to elect from".to_string(), 0), Some(max_commit_lsn) => { let threshold = max_commit_lsn .checked_sub(conf.max_offloader_lag_bytes) @@ -175,6 +242,7 @@ fn determine_offloader( capable_peers_dbg, caughtup_peers.len() ), + caughtup_peers.len(), ) } } @@ -346,6 +414,8 @@ async fn backup_lsn_range( anyhow::bail!("parallel_jobs must be >= 1"); } + pausable_failpoint!("backup-lsn-range-pausable"); + let remote_timeline_path = &timeline.remote_path; let start_lsn = *backup_lsn; let segments = get_segments(start_lsn, end_lsn, wal_seg_size); diff --git a/safekeeper/src/wal_backup_partial.rs b/safekeeper/src/wal_backup_partial.rs index fe0f1b3607..cdf68262dd 100644 --- a/safekeeper/src/wal_backup_partial.rs +++ b/safekeeper/src/wal_backup_partial.rs @@ -364,8 +364,7 @@ impl PartialBackup { // there should always be zero or one uploaded segment assert!( new_segments.is_empty(), - "too many uploaded segments: {:?}", - new_segments + "too many uploaded segments: {new_segments:?}" ); } diff --git a/safekeeper/src/wal_reader_stream.rs b/safekeeper/src/wal_reader_stream.rs index aab82fedb5..cba156888c 100644 --- a/safekeeper/src/wal_reader_stream.rs +++ b/safekeeper/src/wal_reader_stream.rs @@ -1,15 +1,15 @@ use std::pin::Pin; use std::task::{Context, Poll}; -use bytes::Bytes; -use futures::stream::BoxStream; -use futures::{Stream, StreamExt}; -use safekeeper_api::Term; -use utils::lsn::Lsn; - use crate::send_wal::EndWatch; use crate::timeline::WalResidentTimeline; use crate::wal_storage::WalReader; +use bytes::Bytes; +use futures::stream::BoxStream; +use futures::{Stream, StreamExt}; +use safekeeper_api::Term; +use utils::id::TenantTimelineId; +use utils::lsn::Lsn; #[derive(PartialEq, Eq, Debug)] pub(crate) struct WalBytes { @@ -37,6 +37,8 @@ struct PositionedWalReader { pub(crate) struct StreamingWalReader { stream: BoxStream<'static, WalOrReset>, start_changed_tx: tokio::sync::watch::Sender, + // HADRON: Added TenantTimelineId for instrumentation purposes. + pub(crate) ttid: TenantTimelineId, } pub(crate) enum WalOrReset { @@ -63,6 +65,7 @@ impl StreamingWalReader { buffer_size: usize, ) -> Self { let (start_changed_tx, start_changed_rx) = tokio::sync::watch::channel(start); + let ttid = tli.ttid; let state = WalReaderStreamState { tli, @@ -107,6 +110,7 @@ impl StreamingWalReader { Self { stream, start_changed_tx, + ttid, } } diff --git a/safekeeper/src/wal_storage.rs b/safekeeper/src/wal_storage.rs index 8ba3e7cc47..9864ac3229 100644 --- a/safekeeper/src/wal_storage.rs +++ b/safekeeper/src/wal_storage.rs @@ -9,7 +9,7 @@ use std::cmp::{max, min}; use std::future::Future; -use std::io::{self, SeekFrom}; +use std::io::{ErrorKind, SeekFrom}; use std::pin::Pin; use anyhow::{Context, Result, bail}; @@ -19,6 +19,7 @@ use futures::future::BoxFuture; use postgres_ffi::v14::xlog_utils::{IsPartialXLogFileName, IsXLogFileName, XLogFromFileName}; use postgres_ffi::waldecoder::WalStreamDecoder; use postgres_ffi::{PG_TLI, XLogFileName, XLogSegNo, dispatch_pgversion}; +use postgres_versioninfo::{PgMajorVersion, PgVersionId}; use pq_proto::SystemId; use remote_storage::RemotePath; use std::sync::Arc; @@ -30,7 +31,8 @@ use utils::id::TenantTimelineId; use utils::lsn::Lsn; use crate::metrics::{ - REMOVED_WAL_SEGMENTS, WAL_STORAGE_OPERATION_SECONDS, WalStorageMetrics, time_io_closure, + REMOVED_WAL_SEGMENTS, WAL_DISK_IO_ERRORS, WAL_STORAGE_OPERATION_SECONDS, WalStorageMetrics, + time_io_closure, }; use crate::state::TimelinePersistentState; use crate::wal_backup::{WalBackup, read_object, remote_timeline_path}; @@ -92,7 +94,7 @@ pub struct PhysicalStorage { /// Size of WAL segment in bytes. wal_seg_size: usize, - pg_version: u32, + pg_version: PgVersionId, system_id: u64, /// Written to disk, but possibly still in the cache and not fully persisted. @@ -153,8 +155,8 @@ pub struct PhysicalStorage { /// record /// /// Partial segment 002 has no WAL records, and it will be removed by the - /// next truncate_wal(). This flag will be set to true after the first - /// truncate_wal() call. + /// next truncate_wal(). This flag will be set to false after the first + /// successful truncate_wal() call. /// /// [`write_lsn`]: Self::write_lsn pending_wal_truncation: bool, @@ -180,7 +182,7 @@ impl PhysicalStorage { let write_lsn = if state.commit_lsn == Lsn(0) { Lsn(0) } else { - let version = state.server.pg_version / 10000; + let version = PgMajorVersion::try_from(state.server.pg_version).unwrap(); dispatch_pgversion!( version, @@ -201,6 +203,8 @@ impl PhysicalStorage { ttid.timeline_id, flush_lsn, state.commit_lsn, state.peer_horizon_lsn, ); if flush_lsn < state.commit_lsn { + // note: can never happen. find_end_of_wal returns provided start_lsn + // (state.commit_lsn in our case) if it doesn't find anything. bail!( "timeline {} potential data loss: flush_lsn {} by find_end_of_wal is less than commit_lsn {} from control file", ttid.timeline_id, @@ -226,7 +230,10 @@ impl PhysicalStorage { write_record_lsn: write_lsn, flush_lsn, flush_record_lsn: flush_lsn, - decoder: WalStreamDecoder::new(write_lsn, state.server.pg_version / 10000), + decoder: WalStreamDecoder::new( + write_lsn, + PgMajorVersion::try_from(state.server.pg_version).unwrap(), + ), file: None, pending_wal_truncation: true, }) @@ -287,9 +294,12 @@ impl PhysicalStorage { // half initialized segment, first bake it under tmp filename and // then rename. let tmp_path = self.timeline_dir.join("waltmp"); - let file = File::create(&tmp_path) - .await - .with_context(|| format!("Failed to open tmp wal file {:?}", &tmp_path))?; + let file: File = File::create(&tmp_path).await.with_context(|| { + /* BEGIN_HADRON */ + WAL_DISK_IO_ERRORS.inc(); + /* END_HADRON */ + format!("Failed to open tmp wal file {:?}", &tmp_path) + })?; fail::fail_point!("sk-zero-segment", |_| { info!("sk-zero-segment failpoint hit"); @@ -376,7 +386,11 @@ impl PhysicalStorage { let flushed = self .write_in_segment(segno, xlogoff, &buf[..bytes_write]) - .await?; + .await + /* BEGIN_HADRON */ + .inspect_err(|_| WAL_DISK_IO_ERRORS.inc())?; + /* END_HADRON */ + self.write_lsn += bytes_write as u64; if flushed { self.flush_lsn = self.write_lsn; @@ -408,7 +422,7 @@ impl Storage for PhysicalStorage { let segno = init_lsn.segment_number(self.wal_seg_size); let (mut file, _) = self.open_or_create(segno).await?; - let major_pg_version = self.pg_version / 10000; + let major_pg_version = PgMajorVersion::try_from(self.pg_version).unwrap(); let wal_seg = postgres_ffi::generate_wal_segment(segno, self.system_id, major_pg_version, init_lsn)?; file.seek(SeekFrom::Start(0)).await?; @@ -485,7 +499,11 @@ impl Storage for PhysicalStorage { } if let Some(unflushed_file) = self.file.take() { - self.fdatasync_file(&unflushed_file).await?; + self.fdatasync_file(&unflushed_file) + .await + /* BEGIN_HADRON */ + .inspect_err(|_| WAL_DISK_IO_ERRORS.inc())?; + /* END_HADRON */ self.file = Some(unflushed_file); } else { // We have unflushed data (write_lsn != flush_lsn), but no file. This @@ -654,7 +672,7 @@ pub struct WalReader { // pos is in the same segment as timeline_start_lsn. timeline_start_lsn: Lsn, // integer version number of PostgreSQL, e.g. 14; 15; 16 - pg_version: u32, + pg_version: PgMajorVersion, system_id: SystemId, timeline_start_segment: Option, } @@ -697,7 +715,7 @@ impl WalReader { wal_backup, local_start_lsn: state.local_start_lsn, timeline_start_lsn: state.timeline_start_lsn, - pg_version: state.server.pg_version / 10000, + pg_version: PgMajorVersion::try_from(state.server.pg_version).unwrap(), system_id: state.server.system_id, timeline_start_segment: None, }) @@ -790,26 +808,13 @@ impl WalReader { // Try to open local file, if we may have WAL locally if self.pos >= self.local_start_lsn { - let res = open_wal_file(&self.timeline_dir, segno, self.wal_seg_size).await; - match res { - Ok((mut file, _)) => { - file.seek(SeekFrom::Start(xlogoff as u64)).await?; - return Ok(Box::pin(file)); - } - Err(e) => { - let is_not_found = e.chain().any(|e| { - if let Some(e) = e.downcast_ref::() { - e.kind() == io::ErrorKind::NotFound - } else { - false - } - }); - if !is_not_found { - return Err(e); - } - // NotFound is expected, fall through to remote read - } - }; + let res = open_wal_file(&self.timeline_dir, segno, self.wal_seg_size).await?; + if let Some((mut file, _)) = res { + file.seek(SeekFrom::Start(xlogoff as u64)).await?; + return Ok(Box::pin(file)); + } else { + // NotFound is expected, fall through to remote read + } } // Try to open remote file, if remote reads are enabled @@ -828,26 +833,31 @@ pub(crate) async fn open_wal_file( timeline_dir: &Utf8Path, segno: XLogSegNo, wal_seg_size: usize, -) -> Result<(tokio::fs::File, bool)> { +) -> Result> { let (wal_file_path, wal_file_partial_path) = wal_file_paths(timeline_dir, segno, wal_seg_size); // First try to open the .partial file. let mut partial_path = wal_file_path.to_owned(); partial_path.set_extension("partial"); if let Ok(opened_file) = tokio::fs::File::open(&wal_file_partial_path).await { - return Ok((opened_file, true)); + return Ok(Some((opened_file, true))); } // If that failed, try it without the .partial extension. - let pf = tokio::fs::File::open(&wal_file_path) - .await - .with_context(|| format!("failed to open WAL file {:#}", wal_file_path)) + let pf_res = tokio::fs::File::open(&wal_file_path).await; + if let Err(e) = &pf_res { + if e.kind() == ErrorKind::NotFound { + return Ok(None); + } + } + let pf = pf_res + .with_context(|| format!("failed to open WAL file {wal_file_path:#}")) .map_err(|e| { - warn!("{}", e); + warn!("{e}"); e })?; - Ok((pf, false)) + Ok(Some((pf, false))) } /// Helper returning full path to WAL segment file and its .partial brother. diff --git a/safekeeper/tests/walproposer_sim/log.rs b/safekeeper/tests/walproposer_sim/log.rs index e2ba3282ca..cecbc859e6 100644 --- a/safekeeper/tests/walproposer_sim/log.rs +++ b/safekeeper/tests/walproposer_sim/log.rs @@ -33,7 +33,7 @@ impl FormatTime for SimClock { if let Some(clock) = clock.as_ref() { let now = clock.now(); - write!(w, "[{}]", now) + write!(w, "[{now}]") } else { write!(w, "[?]") } diff --git a/safekeeper/tests/walproposer_sim/safekeeper.rs b/safekeeper/tests/walproposer_sim/safekeeper.rs index 5fb29683f2..1f6990c682 100644 --- a/safekeeper/tests/walproposer_sim/safekeeper.rs +++ b/safekeeper/tests/walproposer_sim/safekeeper.rs @@ -159,6 +159,10 @@ pub fn run_server(os: NodeOs, disk: Arc) -> Result<()> { heartbeat_timeout: Duration::from_secs(0), remote_storage: None, max_offloader_lag_bytes: 0, + /* BEGIN_HADRON */ + max_reelect_offloader_lag_bytes: 0, + max_timeline_disk_usage_bytes: 0, + /* END_HADRON */ wal_backup_enabled: false, listen_pg_addr_tenant_only: None, advertise_pg_addr: None, @@ -257,7 +261,7 @@ pub fn run_server(os: NodeOs, disk: Arc) -> Result<()> { let estr = e.to_string(); if !estr.contains("finished processing START_REPLICATION") { warn!("conn {:?} error: {:?}", connection_id, e); - panic!("unexpected error at safekeeper: {:#}", e); + panic!("unexpected error at safekeeper: {e:#}"); } conns.remove(&connection_id); break; diff --git a/safekeeper/tests/walproposer_sim/safekeeper_disk.rs b/safekeeper/tests/walproposer_sim/safekeeper_disk.rs index 94a849b5f0..029f8fab0a 100644 --- a/safekeeper/tests/walproposer_sim/safekeeper_disk.rs +++ b/safekeeper/tests/walproposer_sim/safekeeper_disk.rs @@ -7,8 +7,8 @@ use anyhow::Result; use bytes::{Buf, BytesMut}; use futures::future::BoxFuture; use parking_lot::Mutex; -use postgres_ffi::XLogSegNo; use postgres_ffi::waldecoder::WalStreamDecoder; +use postgres_ffi::{PgMajorVersion, XLogSegNo}; use safekeeper::metrics::WalStorageMetrics; use safekeeper::state::TimelinePersistentState; use safekeeper::{control_file, wal_storage}; @@ -142,7 +142,7 @@ impl DiskWALStorage { write_lsn, write_record_lsn: flush_lsn, flush_record_lsn: flush_lsn, - decoder: WalStreamDecoder::new(flush_lsn, 16), + decoder: WalStreamDecoder::new(flush_lsn, PgMajorVersion::PG16), unflushed_bytes: BytesMut::new(), disk, }) @@ -151,7 +151,7 @@ impl DiskWALStorage { fn find_end_of_wal(disk: Arc, start_lsn: Lsn) -> Result { let mut buf = [0; 8192]; let mut pos = start_lsn.0; - let mut decoder = WalStreamDecoder::new(start_lsn, 16); + let mut decoder = WalStreamDecoder::new(start_lsn, PgMajorVersion::PG16); let mut result = start_lsn; loop { disk.wal.lock().read(pos, &mut buf); @@ -204,7 +204,7 @@ impl wal_storage::Storage for DiskWALStorage { self.decoder.available(), startpos, ); - self.decoder = WalStreamDecoder::new(startpos, 16); + self.decoder = WalStreamDecoder::new(startpos, PgMajorVersion::PG16); } self.decoder.feed_bytes(buf); loop { @@ -242,7 +242,7 @@ impl wal_storage::Storage for DiskWALStorage { self.write_record_lsn = end_pos; self.flush_record_lsn = end_pos; self.unflushed_bytes.clear(); - self.decoder = WalStreamDecoder::new(end_pos, 16); + self.decoder = WalStreamDecoder::new(end_pos, PgMajorVersion::PG16); Ok(()) } diff --git a/safekeeper/tests/walproposer_sim/simulation.rs b/safekeeper/tests/walproposer_sim/simulation.rs index 70fecfbe22..edd3bf2d9e 100644 --- a/safekeeper/tests/walproposer_sim/simulation.rs +++ b/safekeeper/tests/walproposer_sim/simulation.rs @@ -217,7 +217,7 @@ impl TestConfig { ]; let server_ids = [servers[0].id, servers[1].id, servers[2].id]; - let safekeepers_addrs = server_ids.map(|id| format!("node:{}", id)).to_vec(); + let safekeepers_addrs = server_ids.map(|id| format!("node:{id}")).to_vec(); let ttid = TenantTimelineId::generate(); diff --git a/safekeeper/tests/walproposer_sim/walproposer_api.rs b/safekeeper/tests/walproposer_sim/walproposer_api.rs index 82e7a32881..29b361db7e 100644 --- a/safekeeper/tests/walproposer_sim/walproposer_api.rs +++ b/safekeeper/tests/walproposer_sim/walproposer_api.rs @@ -499,7 +499,7 @@ impl ApiImpl for SimulationApi { true } - fn finish_sync_safekeepers(&self, lsn: u64) { + fn finish_sync_safekeepers(&self, lsn: u64) -> ! { debug!("finish_sync_safekeepers, lsn={}", lsn); executor::exit(0, Lsn(lsn).to_string()); } @@ -523,7 +523,7 @@ impl ApiImpl for SimulationApi { // Voting bug when safekeeper disconnects after voting executor::exit(1, msg.to_owned()); } - panic!("unknown FATAL error from walproposer: {}", msg); + panic!("unknown FATAL error from walproposer: {msg}"); } } @@ -544,10 +544,7 @@ impl ApiImpl for SimulationApi { } } - let msg = format!( - "prop_elected;{};{};{};{}", - prop_lsn, prop_term, prev_lsn, prev_term - ); + let msg = format!("prop_elected;{prop_lsn};{prop_term};{prev_lsn};{prev_term}"); debug!(msg); self.os.log_event(msg); diff --git a/scripts/ingest_perf_test_result.py b/scripts/ingest_perf_test_result.py index 804f8a3cde..898e1ee954 100644 --- a/scripts/ingest_perf_test_result.py +++ b/scripts/ingest_perf_test_result.py @@ -26,7 +26,7 @@ CREATE TABLE IF NOT EXISTS perf_test_results ( metric_unit VARCHAR(10), metric_report_type TEXT, recorded_at_timestamp TIMESTAMP WITH TIME ZONE DEFAULT NOW(), - labels JSONB with default '{}' + labels JSONB DEFAULT '{}'::jsonb ) """ diff --git a/scripts/proxy_bench_results_ingest.py b/scripts/proxy_bench_results_ingest.py new file mode 100644 index 0000000000..475d053ed2 --- /dev/null +++ b/scripts/proxy_bench_results_ingest.py @@ -0,0 +1,187 @@ +#!/usr/bin/env python3 + +import argparse +import json +import time +from typing import Any, TypedDict, cast + +import requests + +PROMETHEUS_URL = "http://localhost:9090" +SAMPLE_INTERVAL = 1 # seconds + +DEFAULT_REVISION = "unknown" +DEFAULT_PLATFORM = "unknown" +DEFAULT_SUIT = "proxy_bench" + + +class MetricConfig(TypedDict, total=False): + name: str + promql: str + unit: str + report: str + labels: dict[str, str] + is_vector: bool + label_field: str + + +METRICS: list[MetricConfig] = [ + { + "name": "latency_p99", + "promql": 'histogram_quantile(0.99, sum(rate(proxy_compute_connection_latency_seconds_bucket{outcome="success", excluded="client_and_cplane"}[5m])) by (le))', + "unit": "s", + "report": "LOWER_IS_BETTER", + "labels": {}, + }, + { + "name": "error_rate", + "promql": 'sum(rate(proxy_errors_total{type!~"user|clientdisconnect|quota"}[5m])) / sum(rate(proxy_accepted_connections_total[5m]))', + "unit": "", + "report": "LOWER_IS_BETTER", + "labels": {}, + }, + { + "name": "max_memory_kb", + "promql": "max(libmetrics_maxrss_kb)", + "unit": "kB", + "report": "LOWER_IS_BETTER", + "labels": {}, + }, + { + "name": "jemalloc_active_bytes", + "promql": "sum(jemalloc_active_bytes)", + "unit": "bytes", + "report": "LOWER_IS_BETTER", + "labels": {}, + }, + { + "name": "open_connections", + "promql": "sum by (protocol) (proxy_opened_client_connections_total - proxy_closed_client_connections_total)", + "unit": "", + "report": "HIGHER_IS_BETTER", + "labels": {}, + "is_vector": True, + "label_field": "protocol", + }, +] + + +class PrometheusMetric(TypedDict): + metric: dict[str, str] + value: list[str | float] + + +class PrometheusResult(TypedDict): + result: list[PrometheusMetric] + + +class PrometheusResponse(TypedDict): + data: PrometheusResult + + +def query_prometheus(promql: str) -> PrometheusResponse: + resp = requests.get(f"{PROMETHEUS_URL}/api/v1/query", params={"query": promql}) + resp.raise_for_status() + return cast("PrometheusResponse", resp.json()) + + +def extract_scalar_metric(result_json: PrometheusResponse) -> float | None: + try: + return float(result_json["data"]["result"][0]["value"][1]) + except (IndexError, KeyError, ValueError, TypeError): + return None + + +def extract_vector_metric( + result_json: PrometheusResponse, label_field: str +) -> list[tuple[str | None, float, dict[str, str]]]: + out: list[tuple[str | None, float, dict[str, str]]] = [] + for entry in result_json["data"]["result"]: + try: + value_str = entry["value"][1] + if not isinstance(value_str, (str | float)): + continue + value = float(value_str) + except (IndexError, KeyError, ValueError, TypeError): + continue + labels = entry.get("metric", {}) + label_val = labels.get(label_field, None) + out.append((label_val, value, labels)) + return out + + +def main() -> None: + parser = argparse.ArgumentParser( + description="Collect Prometheus metrics and output in benchmark fixture format" + ) + parser.add_argument("--revision", default=DEFAULT_REVISION) + parser.add_argument("--platform", default=DEFAULT_PLATFORM) + parser.add_argument("--suit", default=DEFAULT_SUIT) + parser.add_argument("--out", default="metrics_benchmarks.json", help="Output JSON file") + parser.add_argument( + "--interval", default=SAMPLE_INTERVAL, type=int, help="Sampling interval (s)" + ) + args = parser.parse_args() + + start_time = int(time.time()) + samples: list[dict[str, Any]] = [] + + print("Collecting metrics (Ctrl+C to stop)...") + try: + while True: + ts = int(time.time()) + for metric in METRICS: + if metric.get("is_vector", False): + # Vector (per-label, e.g. per-protocol) + for label_val, value, labels in extract_vector_metric( + query_prometheus(metric["promql"]), metric["label_field"] + ): + entry = { + "name": f"{metric['name']}.{label_val}" + if label_val + else metric["name"], + "value": value, + "unit": metric["unit"], + "report": metric["report"], + "labels": {**metric.get("labels", {}), **labels}, + "timestamp": ts, + } + samples.append(entry) + else: + result = extract_scalar_metric(query_prometheus(metric["promql"])) + if result is not None: + entry = { + "name": metric["name"], + "value": result, + "unit": metric["unit"], + "report": metric["report"], + "labels": metric.get("labels", {}), + "timestamp": ts, + } + samples.append(entry) + time.sleep(args.interval) + except KeyboardInterrupt: + print("Collection stopped.") + + total_duration = int(time.time()) - start_time + + # Compose output + out = { + "revision": args.revision, + "platform": args.platform, + "result": [ + { + "suit": args.suit, + "total_duration": total_duration, + "data": samples, + } + ], + } + + with open(args.out, "w") as f: + json.dump(out, f, indent=2) + print(f"Wrote metrics in fixture format to {args.out}") + + +if __name__ == "__main__": + main() diff --git a/storage_broker/benches/rps.rs b/storage_broker/benches/rps.rs index 9953ccfa91..5f3e594687 100644 --- a/storage_broker/benches/rps.rs +++ b/storage_broker/benches/rps.rs @@ -161,7 +161,7 @@ async fn publish(client: Option, n_keys: u64) { } }; let response = client.publish_safekeeper_info(Request::new(outbound)).await; - println!("pub response is {:?}", response); + println!("pub response is {response:?}"); } #[tokio::main] diff --git a/storage_broker/build.rs b/storage_broker/build.rs index 08dadeacd5..77c441dddd 100644 --- a/storage_broker/build.rs +++ b/storage_broker/build.rs @@ -6,6 +6,6 @@ fn main() -> Result<(), Box> { // the build then. Anyway, per cargo docs build script shouldn't output to // anywhere but $OUT_DIR. tonic_build::compile_protos("proto/broker.proto") - .unwrap_or_else(|e| panic!("failed to compile protos {:?}", e)); + .unwrap_or_else(|e| panic!("failed to compile protos {e:?}")); Ok(()) } diff --git a/storage_broker/src/lib.rs b/storage_broker/src/lib.rs index 149656a191..7d8b57380f 100644 --- a/storage_broker/src/lib.rs +++ b/storage_broker/src/lib.rs @@ -86,13 +86,9 @@ impl BrokerClientChannel { #[allow(clippy::result_large_err, reason = "TODO")] pub fn parse_proto_ttid(proto_ttid: &ProtoTenantTimelineId) -> Result { let tenant_id = TenantId::from_slice(&proto_ttid.tenant_id) - .map_err(|e| Status::new(Code::InvalidArgument, format!("malformed tenant_id: {}", e)))?; - let timeline_id = TimelineId::from_slice(&proto_ttid.timeline_id).map_err(|e| { - Status::new( - Code::InvalidArgument, - format!("malformed timeline_id: {}", e), - ) - })?; + .map_err(|e| Status::new(Code::InvalidArgument, format!("malformed tenant_id: {e}")))?; + let timeline_id = TimelineId::from_slice(&proto_ttid.timeline_id) + .map_err(|e| Status::new(Code::InvalidArgument, format!("malformed timeline_id: {e}")))?; Ok(TenantTimelineId { tenant_id, timeline_id, diff --git a/storage_controller/Cargo.toml b/storage_controller/Cargo.toml index c41e174d9d..143f4241f4 100644 --- a/storage_controller/Cargo.toml +++ b/storage_controller/Cargo.toml @@ -20,6 +20,7 @@ camino.workspace = true chrono.workspace = true clap.workspace = true clashmap.workspace = true +compute_api.workspace = true cron.workspace = true fail.workspace = true futures.workspace = true @@ -27,6 +28,7 @@ governor.workspace = true hex.workspace = true hyper0.workspace = true humantime.workspace = true +humantime-serde.workspace = true itertools.workspace = true json-structural-diff.workspace = true lasso.workspace = true @@ -34,6 +36,7 @@ once_cell.workspace = true pageserver_api.workspace = true pageserver_client.workspace = true postgres_connection.workspace = true +posthog_client_lite.workspace = true rand.workspace = true reqwest = { workspace = true, features = ["stream"] } routerify.workspace = true diff --git a/storage_controller/migrations/2025-06-01-201442_add_lifecycle_to_nodes/down.sql b/storage_controller/migrations/2025-06-01-201442_add_lifecycle_to_nodes/down.sql new file mode 100644 index 0000000000..a09acb916b --- /dev/null +++ b/storage_controller/migrations/2025-06-01-201442_add_lifecycle_to_nodes/down.sql @@ -0,0 +1 @@ +ALTER TABLE nodes DROP COLUMN lifecycle; diff --git a/storage_controller/migrations/2025-06-01-201442_add_lifecycle_to_nodes/up.sql b/storage_controller/migrations/2025-06-01-201442_add_lifecycle_to_nodes/up.sql new file mode 100644 index 0000000000..e03a0cadba --- /dev/null +++ b/storage_controller/migrations/2025-06-01-201442_add_lifecycle_to_nodes/up.sql @@ -0,0 +1 @@ +ALTER TABLE nodes ADD COLUMN lifecycle VARCHAR NOT NULL DEFAULT 'active'; diff --git a/storage_controller/migrations/2025-06-17-082247_pageserver_grpc_addr/down.sql b/storage_controller/migrations/2025-06-17-082247_pageserver_grpc_addr/down.sql new file mode 100644 index 0000000000..f9f2ebb070 --- /dev/null +++ b/storage_controller/migrations/2025-06-17-082247_pageserver_grpc_addr/down.sql @@ -0,0 +1 @@ +ALTER TABLE nodes DROP listen_grpc_addr, listen_grpc_port; diff --git a/storage_controller/migrations/2025-06-17-082247_pageserver_grpc_addr/up.sql b/storage_controller/migrations/2025-06-17-082247_pageserver_grpc_addr/up.sql new file mode 100644 index 0000000000..8291864b16 --- /dev/null +++ b/storage_controller/migrations/2025-06-17-082247_pageserver_grpc_addr/up.sql @@ -0,0 +1 @@ +ALTER TABLE nodes ADD listen_grpc_addr VARCHAR NULL, ADD listen_grpc_port INTEGER NULL; diff --git a/storage_controller/migrations/2025-07-02-170751_safekeeper_default_no_pause/down.sql b/storage_controller/migrations/2025-07-02-170751_safekeeper_default_no_pause/down.sql new file mode 100644 index 0000000000..bc9b501189 --- /dev/null +++ b/storage_controller/migrations/2025-07-02-170751_safekeeper_default_no_pause/down.sql @@ -0,0 +1 @@ +ALTER TABLE safekeepers ALTER COLUMN scheduling_policy SET DEFAULT 'pause'; \ No newline at end of file diff --git a/storage_controller/migrations/2025-07-02-170751_safekeeper_default_no_pause/up.sql b/storage_controller/migrations/2025-07-02-170751_safekeeper_default_no_pause/up.sql new file mode 100644 index 0000000000..18c89bed7b --- /dev/null +++ b/storage_controller/migrations/2025-07-02-170751_safekeeper_default_no_pause/up.sql @@ -0,0 +1 @@ +ALTER TABLE safekeepers ALTER COLUMN scheduling_policy SET DEFAULT 'activating'; \ No newline at end of file diff --git a/storage_controller/src/background_node_operations.rs b/storage_controller/src/background_node_operations.rs index a630316f46..7a111f6329 100644 --- a/storage_controller/src/background_node_operations.rs +++ b/storage_controller/src/background_node_operations.rs @@ -6,6 +6,11 @@ use utils::id::NodeId; pub(crate) const MAX_RECONCILES_PER_OPERATION: usize = 64; +#[derive(Copy, Clone)] +pub(crate) struct Delete { + pub(crate) node_id: NodeId, +} + #[derive(Copy, Clone)] pub(crate) struct Drain { pub(crate) node_id: NodeId, @@ -18,6 +23,7 @@ pub(crate) struct Fill { #[derive(Copy, Clone)] pub(crate) enum Operation { + Delete(Delete), Drain(Drain), Fill(Fill), } @@ -30,6 +36,8 @@ pub(crate) enum OperationError { FinalizeError(Cow<'static, str>), #[error("Operation cancelled")] Cancelled, + #[error("Impossible constraint error: {0}")] + ImpossibleConstraint(Cow<'static, str>), } pub(crate) struct OperationHandler { @@ -38,6 +46,12 @@ pub(crate) struct OperationHandler { pub(crate) cancel: CancellationToken, } +impl Display for Delete { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(f, "delete {}", self.node_id) + } +} + impl Display for Drain { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { write!(f, "drain {}", self.node_id) @@ -53,6 +67,7 @@ impl Display for Fill { impl Display for Operation { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { match self { + Operation::Delete(op) => write!(f, "{op}"), Operation::Drain(op) => write!(f, "{op}"), Operation::Fill(op) => write!(f, "{op}"), } diff --git a/storage_controller/src/compute_hook.rs b/storage_controller/src/compute_hook.rs index 57709302e1..81f7fedac8 100644 --- a/storage_controller/src/compute_hook.rs +++ b/storage_controller/src/compute_hook.rs @@ -5,18 +5,22 @@ use std::sync::Arc; use std::time::Duration; use anyhow::Context; -use control_plane::endpoint::{ComputeControlPlane, EndpointStatus}; +use control_plane::endpoint::{ + ComputeControlPlane, EndpointStatus, PageserverConnectionInfo, PageserverShardConnectionInfo, +}; use control_plane::local_env::LocalEnv; use futures::StreamExt; use hyper::StatusCode; +use pageserver_api::config::DEFAULT_GRPC_LISTEN_PORT; use pageserver_api::controller_api::AvailabilityZone; use pageserver_api::shard::{ShardCount, ShardNumber, ShardStripeSize, TenantShardId}; use postgres_connection::parse_host_port; +use safekeeper_api::membership::SafekeeperGeneration; use serde::{Deserialize, Serialize}; use tokio_util::sync::CancellationToken; use tracing::{Instrument, info_span}; use utils::backoff::{self}; -use utils::id::{NodeId, TenantId}; +use utils::id::{NodeId, TenantId, TenantTimelineId, TimelineId}; use crate::service::Config; @@ -34,7 +38,7 @@ struct UnshardedComputeHookTenant { preferred_az: Option, // Must hold this lock to send a notification. - send_lock: Arc>>, + send_lock: Arc>>, } struct ShardedComputeHookTenant { stripe_size: ShardStripeSize, @@ -47,7 +51,7 @@ struct ShardedComputeHookTenant { // Must hold this lock to send a notification. The contents represent // the last successfully sent notification, and are used to coalesce multiple // updates by only sending when there is a chance since our last successful send. - send_lock: Arc>>, + send_lock: Arc>>, } /// Represents our knowledge of the compute's state: we can update this when we get a @@ -55,9 +59,9 @@ struct ShardedComputeHookTenant { /// /// Should be wrapped in an Option<>, as we cannot always know the remote state. #[derive(PartialEq, Eq, Debug)] -struct ComputeRemoteState { +struct ComputeRemoteState { // The request body which was acked by the compute - request: ComputeHookNotifyRequest, + request: R, // Whether the cplane indicated that the state was applied to running computes, or just // persisted. In the Neon control plane, this is the difference between a 423 response (meaning @@ -65,6 +69,36 @@ struct ComputeRemoteState { applied: bool, } +type ComputeRemoteTenantState = ComputeRemoteState; +type ComputeRemoteTimelineState = ComputeRemoteState; + +/// The trait which define the handler-specific types and methods. +/// We have two implementations of this trait so far: +/// - [`ComputeHookTenant`] for tenant attach notifications ("/notify-attach") +/// - [`ComputeHookTimeline`] for safekeeper change notifications ("/notify-safekeepers") +trait ApiMethod { + /// Type of the key which identifies the resource. + /// It's either TenantId for tenant attach notifications, + /// or TenantTimelineId for safekeeper change notifications. + type Key: std::cmp::Eq + std::hash::Hash + Clone; + + type Request: serde::Serialize + std::fmt::Debug; + + const API_PATH: &'static str; + + fn maybe_send( + &self, + key: Self::Key, + lock: Option>>>, + ) -> MaybeSendResult; + + async fn notify_local( + env: &LocalEnv, + cplane: &ComputeControlPlane, + req: &Self::Request, + ) -> Result<(), NotifyError>; +} + enum ComputeHookTenant { Unsharded(UnshardedComputeHookTenant), Sharded(ShardedComputeHookTenant), @@ -95,7 +129,7 @@ impl ComputeHookTenant { } } - fn get_send_lock(&self) -> &Arc>> { + fn get_send_lock(&self) -> &Arc>> { match self { Self::Unsharded(unsharded_tenant) => &unsharded_tenant.send_lock, Self::Sharded(sharded_tenant) => &sharded_tenant.send_lock, @@ -189,19 +223,136 @@ impl ComputeHookTenant { } } +/// The state of a timeline we need to notify the compute about. +struct ComputeHookTimeline { + generation: SafekeeperGeneration, + safekeepers: Vec, + + send_lock: Arc>>, +} + +impl ComputeHookTimeline { + /// Construct a new ComputeHookTimeline with the given safekeepers and generation. + fn new(generation: SafekeeperGeneration, safekeepers: Vec) -> Self { + Self { + generation, + safekeepers, + send_lock: Arc::default(), + } + } + + /// Update the state with a new SafekeepersUpdate. + /// Noop if the update generation is not greater than the current generation. + fn update(&mut self, sk_update: SafekeepersUpdate) { + if sk_update.generation > self.generation { + self.generation = sk_update.generation; + self.safekeepers = sk_update.safekeepers; + } + } +} + +impl ApiMethod for ComputeHookTimeline { + type Key = TenantTimelineId; + type Request = NotifySafekeepersRequest; + + const API_PATH: &'static str = "notify-safekeepers"; + + fn maybe_send( + &self, + ttid: TenantTimelineId, + lock: Option>>, + ) -> MaybeSendNotifySafekeepersResult { + let locked = match lock { + Some(already_locked) => already_locked, + None => { + // Lock order: this _must_ be only a try_lock, because we are called inside of the [`ComputeHook::timelines`] lock. + let Ok(locked) = self.send_lock.clone().try_lock_owned() else { + return MaybeSendResult::AwaitLock((ttid, self.send_lock.clone())); + }; + locked + } + }; + + if locked + .as_ref() + .is_some_and(|s| s.request.generation >= self.generation) + { + return MaybeSendResult::Noop; + } + + MaybeSendResult::Transmit(( + NotifySafekeepersRequest { + tenant_id: ttid.tenant_id, + timeline_id: ttid.timeline_id, + generation: self.generation, + safekeepers: self.safekeepers.clone(), + }, + locked, + )) + } + + async fn notify_local( + _env: &LocalEnv, + cplane: &ComputeControlPlane, + req: &NotifySafekeepersRequest, + ) -> Result<(), NotifyError> { + let NotifySafekeepersRequest { + tenant_id, + timeline_id, + generation, + safekeepers, + } = req; + + for (endpoint_name, endpoint) in &cplane.endpoints { + if endpoint.tenant_id == *tenant_id + && endpoint.timeline_id == *timeline_id + && endpoint.status() == EndpointStatus::Running + { + tracing::info!("Reconfiguring safekeepers for endpoint {endpoint_name}"); + + let safekeepers = safekeepers.iter().map(|sk| sk.id).collect::>(); + + endpoint + .reconfigure_safekeepers(safekeepers, *generation) + .await + .map_err(NotifyError::NeonLocal)?; + } + } + + Ok(()) + } +} + #[derive(Serialize, Deserialize, Debug, Eq, PartialEq)] -struct ComputeHookNotifyRequestShard { +struct NotifyAttachRequestShard { node_id: NodeId, shard_number: ShardNumber, } /// Request body that we send to the control plane to notify it of where a tenant is attached #[derive(Serialize, Deserialize, Debug, Eq, PartialEq)] -struct ComputeHookNotifyRequest { +struct NotifyAttachRequest { tenant_id: TenantId, preferred_az: Option, stripe_size: Option, - shards: Vec, + shards: Vec, +} + +#[derive(Serialize, Deserialize, Debug, Eq, PartialEq, Clone)] +pub(crate) struct SafekeeperInfo { + pub id: NodeId, + /// Hostname of the safekeeper. + /// It exists for better debuggability. Might be missing. + /// Should not be used for anything else. + pub hostname: Option, +} + +#[derive(Serialize, Deserialize, Debug, Eq, PartialEq)] +struct NotifySafekeepersRequest { + tenant_id: TenantId, + timeline_id: TimelineId, + generation: SafekeeperGeneration, + safekeepers: Vec, } /// Error type for attempts to call into the control plane compute notification hook @@ -233,42 +384,50 @@ pub(crate) enum NotifyError { NeonLocal(anyhow::Error), } -enum MaybeSendResult { +enum MaybeSendResult { // Please send this request while holding the lock, and if you succeed then write // the request into the lock. Transmit( ( - ComputeHookNotifyRequest, - tokio::sync::OwnedMutexGuard>, + R, + tokio::sync::OwnedMutexGuard>>, ), ), // Something requires sending, but you must wait for a current sender then call again - AwaitLock(Arc>>), + AwaitLock((K, Arc>>>)), // Nothing requires sending Noop, } -impl ComputeHookTenant { +type MaybeSendNotifyAttachResult = MaybeSendResult; +type MaybeSendNotifySafekeepersResult = MaybeSendResult; + +impl ApiMethod for ComputeHookTenant { + type Key = TenantId; + type Request = NotifyAttachRequest; + + const API_PATH: &'static str = "notify-attach"; + fn maybe_send( &self, tenant_id: TenantId, - lock: Option>>, - ) -> MaybeSendResult { + lock: Option>>, + ) -> MaybeSendNotifyAttachResult { let locked = match lock { Some(already_locked) => already_locked, None => { - // Lock order: this _must_ be only a try_lock, because we are called inside of the [`ComputeHook::state`] lock. + // Lock order: this _must_ be only a try_lock, because we are called inside of the [`ComputeHook::tenants`] lock. let Ok(locked) = self.get_send_lock().clone().try_lock_owned() else { - return MaybeSendResult::AwaitLock(self.get_send_lock().clone()); + return MaybeSendResult::AwaitLock((tenant_id, self.get_send_lock().clone())); }; locked } }; let request = match self { - Self::Unsharded(unsharded_tenant) => Some(ComputeHookNotifyRequest { + Self::Unsharded(unsharded_tenant) => Some(NotifyAttachRequest { tenant_id, - shards: vec![ComputeHookNotifyRequestShard { + shards: vec![NotifyAttachRequestShard { shard_number: ShardNumber(0), node_id: unsharded_tenant.node_id, }], @@ -281,12 +440,12 @@ impl ComputeHookTenant { Self::Sharded(sharded_tenant) if sharded_tenant.shards.len() == sharded_tenant.shard_count.count() as usize => { - Some(ComputeHookNotifyRequest { + Some(NotifyAttachRequest { tenant_id, shards: sharded_tenant .shards .iter() - .map(|(shard_number, node_id)| ComputeHookNotifyRequestShard { + .map(|(shard_number, node_id)| NotifyAttachRequestShard { shard_number: *shard_number, node_id: *node_id, }) @@ -331,6 +490,64 @@ impl ComputeHookTenant { } } } + + async fn notify_local( + env: &LocalEnv, + cplane: &ComputeControlPlane, + req: &NotifyAttachRequest, + ) -> Result<(), NotifyError> { + let NotifyAttachRequest { + tenant_id, + shards, + stripe_size, + preferred_az: _preferred_az, + } = req; + + for (endpoint_name, endpoint) in &cplane.endpoints { + if endpoint.tenant_id == *tenant_id && endpoint.status() == EndpointStatus::Running { + tracing::info!("Reconfiguring pageservers for endpoint {endpoint_name}"); + + let mut shard_conninfos = HashMap::new(); + for shard in shards.iter() { + let ps_conf = env + .get_pageserver_conf(shard.node_id) + .expect("Unknown pageserver"); + + let libpq_url = Some({ + let (host, port) = parse_host_port(&ps_conf.listen_pg_addr) + .expect("Unable to parse listen_pg_addr"); + let port = port.unwrap_or(5432); + format!("postgres://no_user@{host}:{port}") + }); + let grpc_url = if let Some(grpc_addr) = &ps_conf.listen_grpc_addr { + let (host, port) = + parse_host_port(grpc_addr).expect("invalid gRPC address"); + let port = port.unwrap_or(DEFAULT_GRPC_LISTEN_PORT); + Some(format!("grpc://no_user@{host}:{port}")) + } else { + None + }; + let pageserver = PageserverShardConnectionInfo { + libpq_url, + grpc_url, + }; + shard_conninfos.insert(shard.shard_number.0 as u32, pageserver); + } + + let pageserver_conninfo = PageserverConnectionInfo { + shards: shard_conninfos, + prefer_grpc: endpoint.grpc, + }; + + endpoint + .reconfigure_pageservers(pageserver_conninfo, *stripe_size) + .await + .map_err(NotifyError::NeonLocal)?; + } + } + + Ok(()) + } } /// The compute hook is a destination for notifications about changes to tenant:pageserver @@ -338,14 +555,15 @@ impl ComputeHookTenant { /// the compute connection string. pub(super) struct ComputeHook { config: Config, - state: std::sync::Mutex>, + tenants: std::sync::Mutex>, + timelines: std::sync::Mutex>, authorization_header: Option, // Concurrency limiter, so that we do not overload the cloud control plane when updating // large numbers of tenants (e.g. when failing over after a node failure) api_concurrency: tokio::sync::Semaphore, - // This lock is only used in testing enviroments, to serialize calls into neon_lock + // This lock is only used in testing enviroments, to serialize calls into neon_local neon_local_lock: tokio::sync::Mutex<()>, // We share a client across all notifications to enable connection re-use etc when @@ -364,12 +582,19 @@ pub(crate) struct ShardUpdate<'a> { pub(crate) preferred_az: Option>, } +pub(crate) struct SafekeepersUpdate { + pub(crate) tenant_id: TenantId, + pub(crate) timeline_id: TimelineId, + pub(crate) generation: SafekeeperGeneration, + pub(crate) safekeepers: Vec, +} + impl ComputeHook { pub(super) fn new(config: Config) -> anyhow::Result { let authorization_header = config .control_plane_jwt_token .clone() - .map(|jwt| format!("Bearer {}", jwt)); + .map(|jwt| format!("Bearer {jwt}")); let mut client = reqwest::ClientBuilder::new().timeout(NOTIFY_REQUEST_TIMEOUT); for cert in &config.ssl_ca_certs { @@ -380,7 +605,8 @@ impl ComputeHook { .context("Failed to build http client for compute hook")?; Ok(Self { - state: Default::default(), + tenants: Default::default(), + timelines: Default::default(), config, authorization_header, neon_local_lock: Default::default(), @@ -390,10 +616,7 @@ impl ComputeHook { } /// For test environments: use neon_local's LocalEnv to update compute - async fn do_notify_local( - &self, - reconfigure_request: &ComputeHookNotifyRequest, - ) -> Result<(), NotifyError> { + async fn do_notify_local(&self, req: &M::Request) -> Result<(), NotifyError> { // neon_local updates are not safe to call concurrently, use a lock to serialize // all calls to this function let _locked = self.neon_local_lock.lock().await; @@ -413,42 +636,14 @@ impl ComputeHook { }; let cplane = ComputeControlPlane::load(env.clone()).expect("Error loading compute control plane"); - let ComputeHookNotifyRequest { - tenant_id, - shards, - stripe_size, - preferred_az: _preferred_az, - } = reconfigure_request; - let compute_pageservers = shards - .iter() - .map(|shard| { - let ps_conf = env - .get_pageserver_conf(shard.node_id) - .expect("Unknown pageserver"); - let (pg_host, pg_port) = parse_host_port(&ps_conf.listen_pg_addr) - .expect("Unable to parse listen_pg_addr"); - (pg_host, pg_port.unwrap_or(5432)) - }) - .collect::>(); - - for (endpoint_name, endpoint) in &cplane.endpoints { - if endpoint.tenant_id == *tenant_id && endpoint.status() == EndpointStatus::Running { - tracing::info!("Reconfiguring endpoint {}", endpoint_name,); - endpoint - .reconfigure(compute_pageservers.clone(), *stripe_size, None) - .await - .map_err(NotifyError::NeonLocal)?; - } - } - - Ok(()) + M::notify_local(&env, &cplane, req).await } - async fn do_notify_iteration( + async fn do_notify_iteration( &self, url: &String, - reconfigure_request: &ComputeHookNotifyRequest, + reconfigure_request: &Req, cancel: &CancellationToken, ) -> Result<(), NotifyError> { let req = self.client.request(reqwest::Method::PUT, url); @@ -470,9 +665,7 @@ impl ComputeHook { }; // Treat all 2xx responses as success - if response.status() >= reqwest::StatusCode::OK - && response.status() < reqwest::StatusCode::MULTIPLE_CHOICES - { + if response.status().is_success() { if response.status() != reqwest::StatusCode::OK { // Non-200 2xx response: it doesn't make sense to retry, but this is unexpected, so // log a warning. @@ -523,10 +716,10 @@ impl ComputeHook { } } - async fn do_notify( + async fn do_notify( &self, url: &String, - reconfigure_request: &ComputeHookNotifyRequest, + reconfigure_request: &R, cancel: &CancellationToken, ) -> Result<(), NotifyError> { // We hold these semaphore units across all retries, rather than only across each @@ -558,13 +751,13 @@ impl ComputeHook { } /// Synchronous phase: update the per-tenant state for the next intended notification - fn notify_prepare(&self, shard_update: ShardUpdate) -> MaybeSendResult { - let mut state_locked = self.state.lock().unwrap(); + fn notify_attach_prepare(&self, shard_update: ShardUpdate) -> MaybeSendNotifyAttachResult { + let mut tenants_locked = self.tenants.lock().unwrap(); use std::collections::hash_map::Entry; let tenant_shard_id = shard_update.tenant_shard_id; - let tenant = match state_locked.entry(tenant_shard_id.tenant_id) { + let tenant = match tenants_locked.entry(tenant_shard_id.tenant_id) { Entry::Vacant(e) => { let ShardUpdate { tenant_shard_id, @@ -588,10 +781,37 @@ impl ComputeHook { tenant.maybe_send(tenant_shard_id.tenant_id, None) } - async fn notify_execute( + fn notify_safekeepers_prepare( &self, - maybe_send_result: MaybeSendResult, - tenant_shard_id: TenantShardId, + safekeepers_update: SafekeepersUpdate, + ) -> MaybeSendNotifySafekeepersResult { + let mut timelines_locked = self.timelines.lock().unwrap(); + + let ttid = TenantTimelineId { + tenant_id: safekeepers_update.tenant_id, + timeline_id: safekeepers_update.timeline_id, + }; + + use std::collections::hash_map::Entry; + let timeline = match timelines_locked.entry(ttid) { + Entry::Vacant(e) => e.insert(ComputeHookTimeline::new( + safekeepers_update.generation, + safekeepers_update.safekeepers, + )), + Entry::Occupied(e) => { + let timeline = e.into_mut(); + timeline.update(safekeepers_update); + timeline + } + }; + + timeline.maybe_send(ttid, None) + } + + async fn notify_execute( + &self, + state: &std::sync::Mutex>, + maybe_send_result: MaybeSendResult, cancel: &CancellationToken, ) -> Result<(), NotifyError> { // Process result: we may get an update to send, or we may have to wait for a lock @@ -600,7 +820,7 @@ impl ComputeHook { MaybeSendResult::Noop => { return Ok(()); } - MaybeSendResult::AwaitLock(send_lock) => { + MaybeSendResult::AwaitLock((key, send_lock)) => { let send_locked = tokio::select! { guard = send_lock.lock_owned() => {guard}, _ = cancel.cancelled() => { @@ -611,11 +831,11 @@ impl ComputeHook { // Lock order: maybe_send is called within the `[Self::state]` lock, and takes the send lock, but here // we have acquired the send lock and take `[Self::state]` lock. This is safe because maybe_send only uses // try_lock. - let state_locked = self.state.lock().unwrap(); - let Some(tenant) = state_locked.get(&tenant_shard_id.tenant_id) else { + let state_locked = state.lock().unwrap(); + let Some(resource_state) = state_locked.get(&key) else { return Ok(()); }; - match tenant.maybe_send(tenant_shard_id.tenant_id, Some(send_locked)) { + match resource_state.maybe_send(key, Some(send_locked)) { MaybeSendResult::AwaitLock(_) => { unreachable!("We supplied lock guard") } @@ -634,14 +854,18 @@ impl ComputeHook { .control_plane_url .as_ref() .map(|control_plane_url| { - format!("{}/notify-attach", control_plane_url.trim_end_matches('/')) + format!( + "{}/{}", + control_plane_url.trim_end_matches('/'), + M::API_PATH + ) }); // We validate this at startup let notify_url = compute_hook_url.as_ref().unwrap(); self.do_notify(notify_url, &request, cancel).await } else { - self.do_notify_local(&request).await.map_err(|e| { + self.do_notify_local::(&request).await.map_err(|e| { // This path is for testing only, so munge the error into our prod-style error type. tracing::error!("neon_local notification hook failed: {e}"); NotifyError::Fatal(StatusCode::INTERNAL_SERVER_ERROR) @@ -677,7 +901,7 @@ impl ComputeHook { /// Infallible synchronous fire-and-forget version of notify(), that sends its results to /// a channel. Something should consume the channel and arrange to try notifying again /// if something failed. - pub(super) fn notify_background( + pub(super) fn notify_attach_background( self: &Arc, notifications: Vec, result_tx: tokio::sync::mpsc::Sender>, @@ -686,7 +910,7 @@ impl ComputeHook { let mut maybe_sends = Vec::new(); for shard_update in notifications { let tenant_shard_id = shard_update.tenant_shard_id; - let maybe_send_result = self.notify_prepare(shard_update); + let maybe_send_result = self.notify_attach_prepare(shard_update); maybe_sends.push((tenant_shard_id, maybe_send_result)) } @@ -705,10 +929,10 @@ impl ComputeHook { async move { this - .notify_execute(maybe_send_result, tenant_shard_id, &cancel) + .notify_execute(&this.tenants, maybe_send_result, &cancel) .await.map_err(|e| (tenant_shard_id, e)) }.instrument(info_span!( - "notify_background", tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug() + "notify_attach_background", tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug() )) }) .buffered(API_CONCURRENCY); @@ -751,14 +975,23 @@ impl ComputeHook { /// ensuring that they eventually call again to ensure that the compute is eventually notified of /// the proper pageserver nodes for a tenant. #[tracing::instrument(skip_all, fields(tenant_id=%shard_update.tenant_shard_id.tenant_id, shard_id=%shard_update.tenant_shard_id.shard_slug(), node_id))] - pub(super) async fn notify<'a>( + pub(super) async fn notify_attach<'a>( &self, shard_update: ShardUpdate<'a>, cancel: &CancellationToken, ) -> Result<(), NotifyError> { - let tenant_shard_id = shard_update.tenant_shard_id; - let maybe_send_result = self.notify_prepare(shard_update); - self.notify_execute(maybe_send_result, tenant_shard_id, cancel) + let maybe_send_result = self.notify_attach_prepare(shard_update); + self.notify_execute(&self.tenants, maybe_send_result, cancel) + .await + } + + pub(super) async fn notify_safekeepers( + &self, + safekeepers_update: SafekeepersUpdate, + cancel: &CancellationToken, + ) -> Result<(), NotifyError> { + let maybe_send_result = self.notify_safekeepers_prepare(safekeepers_update); + self.notify_execute(&self.timelines, maybe_send_result, cancel) .await } @@ -774,8 +1007,8 @@ impl ComputeHook { ) { use std::collections::hash_map::Entry; - let mut state_locked = self.state.lock().unwrap(); - match state_locked.entry(tenant_shard_id.tenant_id) { + let mut tenants_locked = self.tenants.lock().unwrap(); + match tenants_locked.entry(tenant_shard_id.tenant_id) { Entry::Vacant(_) => { // This is a valid but niche case, where the tenant was previously attached // as a Secondary location and then detached, so has no previously notified diff --git a/storage_controller/src/http.rs b/storage_controller/src/http.rs index 2b1c0db12f..ee446ea65d 100644 --- a/storage_controller/src/http.rs +++ b/storage_controller/src/http.rs @@ -22,7 +22,7 @@ use pageserver_api::controller_api::{ MetadataHealthListUnhealthyResponse, MetadataHealthUpdateRequest, MetadataHealthUpdateResponse, NodeAvailability, NodeConfigureRequest, NodeRegisterRequest, SafekeeperSchedulingPolicyRequest, ShardsPreferredAzsRequest, TenantCreateRequest, TenantPolicyRequest, TenantShardMigrateRequest, - TimelineImportRequest, + TimelineImportRequest, TimelineSafekeeperMigrateRequest, }; use pageserver_api::models::{ DetachBehavior, LsnLeaseRequest, TenantConfigPatchRequest, TenantConfigRequest, @@ -34,6 +34,7 @@ use pageserver_api::upcall_api::{ PutTimelineImportStatusRequest, ReAttachRequest, TimelineImportStatusRequest, ValidateRequest, }; use pageserver_client::{BlockUnblock, mgmt_api}; + use routerify::Middleware; use tokio_util::sync::CancellationToken; use tracing::warn; @@ -635,6 +636,32 @@ async fn handle_tenant_timeline_download_heatmap_layers( json_response(StatusCode::OK, ()) } +async fn handle_tenant_timeline_safekeeper_migrate( + service: Arc, + req: Request, +) -> Result, ApiError> { + let tenant_id: TenantId = parse_request_param(&req, "tenant_id")?; + check_permissions(&req, Scope::PageServerApi)?; + maybe_rate_limit(&req, tenant_id).await; + + let timeline_id: TimelineId = parse_request_param(&req, "timeline_id")?; + + let mut req = match maybe_forward(req).await { + ForwardOutcome::Forwarded(res) => { + return res; + } + ForwardOutcome::NotForwarded(req) => req, + }; + + let migrate_req = json_request::(&mut req).await?; + + service + .tenant_timeline_safekeeper_migrate(tenant_id, timeline_id, migrate_req) + .await?; + + json_response(StatusCode::OK, ()) +} + async fn handle_tenant_timeline_lsn_lease( service: Arc, req: Request, @@ -721,9 +748,9 @@ async fn handle_tenant_timeline_passthrough( // Callers will always pass an unsharded tenant ID. Before proxying, we must // rewrite this to a shard-aware shard zero ID. - let path = format!("{}", path); + let path = format!("{path}"); let tenant_str = tenant_or_shard_id.tenant_id.to_string(); - let tenant_shard_str = format!("{}", tenant_shard_id); + let tenant_shard_str = format!("{tenant_shard_id}"); let path = path.replace(&tenant_str, &tenant_shard_str); let latency = &METRICS_REGISTRY @@ -892,7 +919,7 @@ async fn handle_node_drop(req: Request) -> Result, ApiError json_response(StatusCode::OK, state.service.node_drop(node_id).await?) } -async fn handle_node_delete(req: Request) -> Result, ApiError> { +async fn handle_node_delete_old(req: Request) -> Result, ApiError> { check_permissions(&req, Scope::Admin)?; let req = match maybe_forward(req).await { @@ -904,7 +931,46 @@ async fn handle_node_delete(req: Request) -> Result, ApiErr let state = get_state(&req); let node_id: NodeId = parse_request_param(&req, "node_id")?; - json_response(StatusCode::OK, state.service.node_delete(node_id).await?) + json_response( + StatusCode::OK, + state.service.node_delete_old(node_id).await?, + ) +} + +async fn handle_tombstone_list(req: Request) -> Result, ApiError> { + check_permissions(&req, Scope::Admin)?; + + let req = match maybe_forward(req).await { + ForwardOutcome::Forwarded(res) => { + return res; + } + ForwardOutcome::NotForwarded(req) => req, + }; + + let state = get_state(&req); + let mut nodes = state.service.tombstone_list().await?; + nodes.sort_by_key(|n| n.get_id()); + let api_nodes = nodes.into_iter().map(|n| n.describe()).collect::>(); + + json_response(StatusCode::OK, api_nodes) +} + +async fn handle_tombstone_delete(req: Request) -> Result, ApiError> { + check_permissions(&req, Scope::Admin)?; + + let req = match maybe_forward(req).await { + ForwardOutcome::Forwarded(res) => { + return res; + } + ForwardOutcome::NotForwarded(req) => req, + }; + + let state = get_state(&req); + let node_id: NodeId = parse_request_param(&req, "node_id")?; + json_response( + StatusCode::OK, + state.service.tombstone_delete(node_id).await?, + ) } async fn handle_node_configure(req: Request) -> Result, ApiError> { @@ -988,6 +1054,42 @@ async fn handle_get_leader(req: Request) -> Result, ApiErro json_response(StatusCode::OK, leader) } +async fn handle_node_delete(req: Request) -> Result, ApiError> { + check_permissions(&req, Scope::Admin)?; + + let req = match maybe_forward(req).await { + ForwardOutcome::Forwarded(res) => { + return res; + } + ForwardOutcome::NotForwarded(req) => req, + }; + + let state = get_state(&req); + let node_id: NodeId = parse_request_param(&req, "node_id")?; + json_response( + StatusCode::OK, + state.service.start_node_delete(node_id).await?, + ) +} + +async fn handle_cancel_node_delete(req: Request) -> Result, ApiError> { + check_permissions(&req, Scope::Infra)?; + + let req = match maybe_forward(req).await { + ForwardOutcome::Forwarded(res) => { + return res; + } + ForwardOutcome::NotForwarded(req) => req, + }; + + let state = get_state(&req); + let node_id: NodeId = parse_request_param(&req, "node_id")?; + json_response( + StatusCode::ACCEPTED, + state.service.cancel_node_delete(node_id).await?, + ) +} + async fn handle_node_drain(req: Request) -> Result, ApiError> { check_permissions(&req, Scope::Infra)?; @@ -1362,6 +1464,31 @@ async fn handle_timeline_import(req: Request) -> Result, Ap ) } +async fn handle_tenant_timeline_locate( + service: Arc, + req: Request, +) -> Result, ApiError> { + let tenant_id: TenantId = parse_request_param(&req, "tenant_id")?; + let timeline_id: TimelineId = parse_request_param(&req, "timeline_id")?; + + check_permissions(&req, Scope::Admin)?; + maybe_rate_limit(&req, tenant_id).await; + + match maybe_forward(req).await { + ForwardOutcome::Forwarded(res) => { + return res; + } + ForwardOutcome::NotForwarded(_req) => {} + }; + + json_response( + StatusCode::OK, + service + .tenant_timeline_locate(tenant_id, timeline_id) + .await?, + ) +} + async fn handle_tenants_dump(req: Request) -> Result, ApiError> { check_permissions(&req, Scope::Admin)?; @@ -1478,7 +1605,7 @@ async fn handle_ready(req: Request) -> Result, ApiError> { impl From for ApiError { fn from(value: ReconcileError) -> Self { - ApiError::Conflict(format!("Reconciliation error: {}", value)) + ApiError::Conflict(format!("Reconciliation error: {value}")) } } @@ -2009,10 +2136,10 @@ pub fn make_router( router .data(Arc::new(HttpState::new(service, auth, build_info))) + // Non-prefixed generic endpoints (status, metrics, profiling) .get("/metrics", |r| { named_request_span(r, measured_metrics_handler, RequestName("metrics")) }) - // Non-prefixed generic endpoints (status, metrics, profiling) .get("/status", |r| { named_request_span(r, handle_status, RequestName("status")) }) @@ -2062,6 +2189,20 @@ pub fn make_router( .post("/debug/v1/node/:node_id/drop", |r| { named_request_span(r, handle_node_drop, RequestName("debug_v1_node_drop")) }) + .delete("/debug/v1/tombstone/:node_id", |r| { + named_request_span( + r, + handle_tombstone_delete, + RequestName("debug_v1_tombstone_delete"), + ) + }) + .get("/debug/v1/tombstone", |r| { + named_request_span( + r, + handle_tombstone_list, + RequestName("debug_v1_tombstone_list"), + ) + }) .post("/debug/v1/tenant/:tenant_id/import", |r| { named_request_span( r, @@ -2089,6 +2230,16 @@ pub fn make_router( ) }, ) + .get( + "/debug/v1/tenant/:tenant_id/timeline/:timeline_id/locate", + |r| { + tenant_service_handler( + r, + handle_tenant_timeline_locate, + RequestName("v1_tenant_timeline_locate"), + ) + }, + ) .get("/debug/v1/scheduler", |r| { named_request_span(r, handle_scheduler_dump, RequestName("debug_v1_scheduler")) }) @@ -2109,8 +2260,14 @@ pub fn make_router( .post("/control/v1/node", |r| { named_request_span(r, handle_node_register, RequestName("control_v1_node")) }) + // This endpoint is deprecated and will be removed in a future version. + // Use PUT /control/v1/node/:node_id/delete instead. .delete("/control/v1/node/:node_id", |r| { - named_request_span(r, handle_node_delete, RequestName("control_v1_node_delete")) + named_request_span( + r, + handle_node_delete_old, + RequestName("control_v1_node_delete"), + ) }) .get("/control/v1/node", |r| { named_request_span(r, handle_node_list, RequestName("control_v1_node")) @@ -2135,6 +2292,20 @@ pub fn make_router( .get("/control/v1/leader", |r| { named_request_span(r, handle_get_leader, RequestName("control_v1_get_leader")) }) + .put("/control/v1/node/:node_id/delete", |r| { + named_request_span( + r, + handle_node_delete, + RequestName("control_v1_start_node_delete"), + ) + }) + .delete("/control/v1/node/:node_id/delete", |r| { + named_request_span( + r, + handle_cancel_node_delete, + RequestName("control_v1_cancel_node_delete"), + ) + }) .put("/control/v1/node/:node_id/drain", |r| { named_request_span(r, handle_node_drain, RequestName("control_v1_node_drain")) }) @@ -2373,6 +2544,16 @@ pub fn make_router( ) }, ) + .post( + "/v1/tenant/:tenant_id/timeline/:timeline_id/safekeeper_migrate", + |r| { + tenant_service_handler( + r, + handle_tenant_timeline_safekeeper_migrate, + RequestName("v1_tenant_timeline_safekeeper_migrate"), + ) + }, + ) // LSN lease passthrough to all shards .post( "/v1/tenant/:tenant_id/timeline/:timeline_id/lsn_lease", diff --git a/storage_controller/src/lib.rs b/storage_controller/src/lib.rs index a9ec511431..36e3c5dc6c 100644 --- a/storage_controller/src/lib.rs +++ b/storage_controller/src/lib.rs @@ -6,13 +6,13 @@ extern crate hyper0 as hyper; mod auth; mod background_node_operations; mod compute_hook; -mod drain_utils; mod heartbeater; pub mod http; mod id_lock_map; mod leadership; pub mod metrics; mod node; +mod operation_utils; mod pageserver_client; mod peer_client; pub mod persistence; diff --git a/storage_controller/src/main.rs b/storage_controller/src/main.rs index 2eea2f9d10..2a851dc25b 100644 --- a/storage_controller/src/main.rs +++ b/storage_controller/src/main.rs @@ -5,17 +5,20 @@ use std::time::Duration; use anyhow::{Context, anyhow}; use camino::Utf8PathBuf; -use clap::Parser; + +use clap::{ArgAction, Parser}; use futures::future::OptionFuture; use http_utils::tls_certs::ReloadingCertificateResolver; use hyper0::Uri; use metrics::BuildInfo; use metrics::launch_timestamp::LaunchTimestamp; +use pageserver_api::config::PostHogConfig; use reqwest::Certificate; use storage_controller::http::make_router; use storage_controller::metrics::preinitialize_metrics; use storage_controller::persistence::Persistence; use storage_controller::service::chaos_injector::ChaosInjector; +use storage_controller::service::feature_flag::FeatureFlagService; use storage_controller::service::{ Config, HEARTBEAT_INTERVAL_DEFAULT, LONG_RECONCILE_THRESHOLD_DEFAULT, MAX_OFFLINE_INTERVAL_DEFAULT, MAX_WARMING_UP_INTERVAL_DEFAULT, @@ -207,6 +210,18 @@ struct Cli { /// the compute notification directly (instead of via control plane). #[arg(long, default_value = "false")] use_local_compute_notifications: bool, + + /// Number of safekeepers to choose for a timeline when creating it. + /// Safekeepers will be choosen from different availability zones. + /// This option exists primarily for testing purposes. + #[arg(long, default_value = "3", value_parser = clap::builder::RangedU64ValueParser::::new().range(1..))] + timeline_safekeeper_count: usize, + + /// When set, actively checks and initiates heatmap downloads/uploads during reconciliation. + /// This speed up migrations by avoiding the default wait for the heatmap download interval. + /// Primarily useful for testing to reduce test execution time. + #[arg(long, default_value = "false", action=ArgAction::Set)] + kick_secondary_downloads: bool, } enum StrictMode { @@ -236,6 +251,8 @@ struct Secrets { peer_jwt_token: Option, } +const POSTHOG_CONFIG_ENV: &str = "POSTHOG_CONFIG"; + impl Secrets { const DATABASE_URL_ENV: &'static str = "DATABASE_URL"; const PAGESERVER_JWT_TOKEN_ENV: &'static str = "PAGESERVER_JWT_TOKEN"; @@ -371,6 +388,11 @@ async fn async_main() -> anyhow::Result<()> { StrictMode::Strict if args.use_local_compute_notifications => { anyhow::bail!("`--use-local-compute-notifications` is only permitted in `--dev` mode"); } + StrictMode::Strict if args.timeline_safekeeper_count < 3 => { + anyhow::bail!( + "Running with less than 3 safekeepers per timeline is only permitted in `--dev` mode" + ); + } StrictMode::Strict => { tracing::info!("Starting in strict mode: configuration is OK.") } @@ -388,6 +410,18 @@ async fn async_main() -> anyhow::Result<()> { None => Vec::new(), }; + let posthog_config = if let Ok(json) = std::env::var(POSTHOG_CONFIG_ENV) { + let res: Result = serde_json::from_str(&json); + if let Ok(config) = res { + Some(config) + } else { + tracing::warn!("Invalid posthog config: {json}"); + None + } + } else { + None + }; + let config = Config { pageserver_jwt_token: secrets.pageserver_jwt_token, safekeeper_jwt_token: secrets.safekeeper_jwt_token, @@ -433,6 +467,9 @@ async fn async_main() -> anyhow::Result<()> { ssl_ca_certs, timelines_onto_safekeepers: args.timelines_onto_safekeepers, use_local_compute_notifications: args.use_local_compute_notifications, + timeline_safekeeper_count: args.timeline_safekeeper_count, + posthog_config: posthog_config.clone(), + kick_secondary_downloads: args.kick_secondary_downloads, }; // Validate that we can connect to the database @@ -513,6 +550,29 @@ async fn async_main() -> anyhow::Result<()> { ) }); + let feature_flag_task = if let Some(posthog_config) = posthog_config { + let service = service.clone(); + let cancel = CancellationToken::new(); + let cancel_bg = cancel.clone(); + let task = tokio::task::spawn( + async move { + match FeatureFlagService::new(service, posthog_config) { + Ok(feature_flag_service) => { + let feature_flag_service = Arc::new(feature_flag_service); + feature_flag_service.run(cancel_bg).await + } + Err(e) => { + tracing::warn!("Failed to create feature flag service: {}", e); + } + }; + } + .instrument(tracing::info_span!("feature_flag_service")), + ); + Some((task, cancel)) + } else { + None + }; + // Wait until we receive a signal let mut sigint = tokio::signal::unix::signal(SignalKind::interrupt())?; let mut sigquit = tokio::signal::unix::signal(SignalKind::quit())?; @@ -560,6 +620,12 @@ async fn async_main() -> anyhow::Result<()> { chaos_jh.await.ok(); } + // If we were running the feature flag service, stop that so that we're not calling into Service while it shuts down + if let Some((feature_flag_task, feature_flag_cancel)) = feature_flag_task { + feature_flag_cancel.cancel(); + feature_flag_task.await.ok(); + } + service.shutdown().await; tracing::info!("Service shutdown complete"); diff --git a/storage_controller/src/metrics.rs b/storage_controller/src/metrics.rs index ccdbcad139..8738386968 100644 --- a/storage_controller/src/metrics.rs +++ b/storage_controller/src/metrics.rs @@ -76,6 +76,9 @@ pub(crate) struct StorageControllerMetricGroup { /// How many shards would like to reconcile but were blocked by concurrency limits pub(crate) storage_controller_pending_reconciles: measured::Gauge, + /// How many shards are keep-failing and will be ignored when considering to run optimizations + pub(crate) storage_controller_keep_failing_reconciles: measured::Gauge, + /// HTTP request status counters for handled requests pub(crate) storage_controller_http_request_status: measured::CounterVec, @@ -97,7 +100,7 @@ pub(crate) struct StorageControllerMetricGroup { /// Count of HTTP requests to the safekeeper that resulted in an error, /// broken down by the safekeeper node id, request name and method pub(crate) storage_controller_safekeeper_request_error: - measured::CounterVec, + measured::CounterVec, /// Latency of HTTP requests to the pageserver, broken down by pageserver /// node id, request name and method. This include both successful and unsuccessful @@ -111,7 +114,7 @@ pub(crate) struct StorageControllerMetricGroup { /// requests. #[metric(metadata = histogram::Thresholds::exponential_buckets(0.1, 2.0))] pub(crate) storage_controller_safekeeper_request_latency: - measured::HistogramVec, + measured::HistogramVec, /// Count of pass-through HTTP requests to the pageserver that resulted in an error, /// broken down by the pageserver node id, request name and method @@ -136,16 +139,17 @@ pub(crate) struct StorageControllerMetricGroup { pub(crate) storage_controller_leadership_status: measured::GaugeVec, - /// HTTP request status counters for handled requests + /// Indicator of stucked (long-running) reconciles, broken down by tenant, shard and sequence. + /// The metric is automatically removed once the reconciliation completes. pub(crate) storage_controller_reconcile_long_running: measured::CounterVec, /// Indicator of safekeeper reconciler queue depth, broken down by safekeeper, excluding ongoing reconciles. - pub(crate) storage_controller_safkeeper_reconciles_queued: + pub(crate) storage_controller_safekeeper_reconciles_queued: measured::GaugeVec, /// Indicator of completed safekeeper reconciles, broken down by safekeeper. - pub(crate) storage_controller_safkeeper_reconciles_complete: + pub(crate) storage_controller_safekeeper_reconciles_complete: measured::CounterVec, } @@ -218,6 +222,16 @@ pub(crate) struct PageserverRequestLabelGroup<'a> { pub(crate) method: Method, } +#[derive(measured::LabelGroup, Clone)] +#[label(set = SafekeeperRequestLabelGroupSet)] +pub(crate) struct SafekeeperRequestLabelGroup<'a> { + #[label(dynamic_with = lasso::ThreadedRodeo, default)] + pub(crate) safekeeper_id: &'a str, + #[label(dynamic_with = lasso::ThreadedRodeo, default)] + pub(crate) path: &'a str, + pub(crate) method: Method, +} + #[derive(measured::LabelGroup)] #[label(set = DatabaseQueryErrorLabelGroupSet)] pub(crate) struct DatabaseQueryErrorLabelGroup { @@ -322,6 +336,7 @@ pub(crate) enum DatabaseErrorLabel { ConnectionPool, Logical, Migration, + Cas, } impl DatabaseError { @@ -332,6 +347,7 @@ impl DatabaseError { Self::ConnectionPool(_) => DatabaseErrorLabel::ConnectionPool, Self::Logical(_) => DatabaseErrorLabel::Logical, Self::Migration(_) => DatabaseErrorLabel::Migration, + Self::Cas(_) => DatabaseErrorLabel::Cas, } } } diff --git a/storage_controller/src/node.rs b/storage_controller/src/node.rs index e180c49b43..6642c72f3c 100644 --- a/storage_controller/src/node.rs +++ b/storage_controller/src/node.rs @@ -2,7 +2,7 @@ use std::str::FromStr; use std::time::Duration; use pageserver_api::controller_api::{ - AvailabilityZone, NodeAvailability, NodeDescribeResponse, NodeRegisterRequest, + AvailabilityZone, NodeAvailability, NodeDescribeResponse, NodeLifecycle, NodeRegisterRequest, NodeSchedulingPolicy, TenantLocateResponseShard, }; use pageserver_api::shard::TenantShardId; @@ -29,6 +29,7 @@ pub(crate) struct Node { availability: NodeAvailability, scheduling: NodeSchedulingPolicy, + lifecycle: NodeLifecycle, listen_http_addr: String, listen_http_port: u16, @@ -36,6 +37,8 @@ pub(crate) struct Node { listen_pg_addr: String, listen_pg_port: u16, + listen_grpc_addr: Option, + listen_grpc_port: Option, availability_zone_id: AvailabilityZone, @@ -99,8 +102,8 @@ impl Node { self.id == register_req.node_id && self.listen_http_addr == register_req.listen_http_addr && self.listen_http_port == register_req.listen_http_port - // Note: listen_https_port may change. See [`Self::need_update`] for mode details. - // && self.listen_https_port == register_req.listen_https_port + // Note: HTTPS and gRPC addresses may change, to allow for migrations. See + // [`Self::need_update`] for more details. && self.listen_pg_addr == register_req.listen_pg_addr && self.listen_pg_port == register_req.listen_pg_port && self.availability_zone_id == register_req.availability_zone_id @@ -108,9 +111,10 @@ impl Node { // Do we need to update an existing record in DB on this registration request? pub(crate) fn need_update(&self, register_req: &NodeRegisterRequest) -> bool { - // listen_https_port is checked here because it may change during migration to https. - // After migration, this check may be moved to registration_match. + // These are checked here, since they may change before we're fully migrated. self.listen_https_port != register_req.listen_https_port + || self.listen_grpc_addr != register_req.listen_grpc_addr + || self.listen_grpc_port != register_req.listen_grpc_port } /// For a shard located on this node, populate a response object @@ -124,6 +128,8 @@ impl Node { listen_https_port: self.listen_https_port, listen_pg_addr: self.listen_pg_addr.clone(), listen_pg_port: self.listen_pg_port, + listen_grpc_addr: self.listen_grpc_addr.clone(), + listen_grpc_port: self.listen_grpc_port, } } @@ -195,6 +201,7 @@ impl Node { match self.scheduling { NodeSchedulingPolicy::Active => MaySchedule::Yes(utilization), + NodeSchedulingPolicy::Deleting => MaySchedule::No, NodeSchedulingPolicy::Draining => MaySchedule::No, NodeSchedulingPolicy::Filling => MaySchedule::Yes(utilization), NodeSchedulingPolicy::Pause => MaySchedule::No, @@ -210,6 +217,8 @@ impl Node { listen_https_port: Option, listen_pg_addr: String, listen_pg_port: u16, + listen_grpc_addr: Option, + listen_grpc_port: Option, availability_zone_id: AvailabilityZone, use_https: bool, ) -> anyhow::Result { @@ -220,6 +229,10 @@ impl Node { ); } + if listen_grpc_addr.is_some() != listen_grpc_port.is_some() { + anyhow::bail!("cannot create node {id}: must specify both gRPC address and port"); + } + Ok(Self { id, listen_http_addr, @@ -227,7 +240,10 @@ impl Node { listen_https_port, listen_pg_addr, listen_pg_port, + listen_grpc_addr, + listen_grpc_port, scheduling: NodeSchedulingPolicy::Active, + lifecycle: NodeLifecycle::Active, availability: NodeAvailability::Offline, availability_zone_id, use_https, @@ -239,11 +255,14 @@ impl Node { NodePersistence { node_id: self.id.0 as i64, scheduling_policy: self.scheduling.into(), + lifecycle: self.lifecycle.into(), listen_http_addr: self.listen_http_addr.clone(), listen_http_port: self.listen_http_port as i32, listen_https_port: self.listen_https_port.map(|x| x as i32), listen_pg_addr: self.listen_pg_addr.clone(), listen_pg_port: self.listen_pg_port as i32, + listen_grpc_addr: self.listen_grpc_addr.clone(), + listen_grpc_port: self.listen_grpc_port.map(|port| port as i32), availability_zone_id: self.availability_zone_id.0.clone(), } } @@ -257,17 +276,27 @@ impl Node { ); } + if np.listen_grpc_addr.is_some() != np.listen_grpc_port.is_some() { + anyhow::bail!( + "can't load node {}: must specify both gRPC address and port", + np.node_id + ); + } + Ok(Self { id: NodeId(np.node_id as u64), // At startup we consider a node offline until proven otherwise. availability: NodeAvailability::Offline, scheduling: NodeSchedulingPolicy::from_str(&np.scheduling_policy) .expect("Bad scheduling policy in DB"), + lifecycle: NodeLifecycle::from_str(&np.lifecycle).expect("Bad lifecycle in DB"), listen_http_addr: np.listen_http_addr, listen_http_port: np.listen_http_port as u16, listen_https_port: np.listen_https_port.map(|x| x as u16), listen_pg_addr: np.listen_pg_addr, listen_pg_port: np.listen_pg_port as u16, + listen_grpc_addr: np.listen_grpc_addr, + listen_grpc_port: np.listen_grpc_port.map(|port| port as u16), availability_zone_id: AvailabilityZone(np.availability_zone_id), use_https, cancel: CancellationToken::new(), @@ -357,6 +386,8 @@ impl Node { listen_https_port: self.listen_https_port, listen_pg_addr: self.listen_pg_addr.clone(), listen_pg_port: self.listen_pg_port, + listen_grpc_addr: self.listen_grpc_addr.clone(), + listen_grpc_port: self.listen_grpc_port, } } } diff --git a/storage_controller/src/drain_utils.rs b/storage_controller/src/operation_utils.rs similarity index 61% rename from storage_controller/src/drain_utils.rs rename to storage_controller/src/operation_utils.rs index bd4b8ba38f..af86010ab7 100644 --- a/storage_controller/src/drain_utils.rs +++ b/storage_controller/src/operation_utils.rs @@ -10,67 +10,23 @@ use crate::node::Node; use crate::scheduler::Scheduler; use crate::tenant_shard::TenantShard; -pub(crate) struct TenantShardIterator { - tenants_accessor: F, - inspected_all_shards: bool, - last_inspected_shard: Option, -} - -/// A simple iterator which can be used in tandem with [`crate::service::Service`] -/// to iterate over all known tenant shard ids without holding the lock on the -/// service state at all times. -impl TenantShardIterator -where - F: Fn(Option) -> Option, -{ - pub(crate) fn new(tenants_accessor: F) -> Self { - Self { - tenants_accessor, - inspected_all_shards: false, - last_inspected_shard: None, - } - } - - /// Returns the next tenant shard id if one exists - pub(crate) fn next(&mut self) -> Option { - if self.inspected_all_shards { - return None; - } - - match (self.tenants_accessor)(self.last_inspected_shard) { - Some(tid) => { - self.last_inspected_shard = Some(tid); - Some(tid) - } - None => { - self.inspected_all_shards = true; - None - } - } - } - - /// Returns true when the end of the iterator is reached and false otherwise - pub(crate) fn finished(&self) -> bool { - self.inspected_all_shards - } -} - /// Check that the state of the node being drained is as expected: -/// node is present in memory and scheduling policy is set to [`NodeSchedulingPolicy::Draining`] +/// node is present in memory and scheduling policy is set to expected_policy pub(crate) fn validate_node_state( node_id: &NodeId, nodes: Arc>, + expected_policy: NodeSchedulingPolicy, ) -> Result<(), OperationError> { let node = nodes.get(node_id).ok_or(OperationError::NodeStateChanged( - format!("node {} was removed", node_id).into(), + format!("node {node_id} was removed").into(), ))?; let current_policy = node.get_scheduling(); - if !matches!(current_policy, NodeSchedulingPolicy::Draining) { + if current_policy != expected_policy { // TODO(vlad): maybe cancel pending reconciles before erroring out. need to think // about it return Err(OperationError::NodeStateChanged( - format!("node {} changed state to {:?}", node_id, current_policy).into(), + format!("node {node_id} changed state to {current_policy:?}").into(), )); } @@ -145,7 +101,7 @@ impl TenantShardDrain { if !nodes.contains_key(&destination) { return Err(OperationError::NodeStateChanged( - format!("node {} was removed", destination).into(), + format!("node {destination} was removed").into(), )); } @@ -182,55 +138,3 @@ impl TenantShardDrain { } } } - -#[cfg(test)] -mod tests { - use std::sync::Arc; - - use utils::id::TenantId; - use utils::shard::{ShardCount, ShardNumber, TenantShardId}; - - use super::TenantShardIterator; - - #[test] - fn test_tenant_shard_iterator() { - let tenant_id = TenantId::generate(); - let shard_count = ShardCount(8); - - let mut tenant_shards = Vec::default(); - for i in 0..shard_count.0 { - tenant_shards.push(( - TenantShardId { - tenant_id, - shard_number: ShardNumber(i), - shard_count, - }, - (), - )) - } - - let tenant_shards = Arc::new(tenant_shards); - - let mut tid_iter = TenantShardIterator::new({ - let tenants = tenant_shards.clone(); - move |last_inspected_shard: Option| { - let entry = match last_inspected_shard { - Some(skip_past) => { - let mut cursor = tenants.iter().skip_while(|(tid, _)| *tid != skip_past); - cursor.nth(1) - } - None => tenants.first(), - }; - - entry.map(|(tid, _)| tid).copied() - } - }); - - let mut iterated_over = Vec::default(); - while let Some(tid) = tid_iter.next() { - iterated_over.push((tid, ())); - } - - assert_eq!(iterated_over, *tenant_shards); - } -} diff --git a/storage_controller/src/pageserver_client.rs b/storage_controller/src/pageserver_client.rs index 817409e112..d6fe173eb3 100644 --- a/storage_controller/src/pageserver_client.rs +++ b/storage_controller/src/pageserver_client.rs @@ -376,4 +376,13 @@ impl PageserverClient { .await ) } + + pub(crate) async fn update_feature_flag_spec(&self, spec: String) -> Result<()> { + measured_request!( + "update_feature_flag_spec", + crate::metrics::Method::Post, + &self.node_id_label, + self.inner.update_feature_flag_spec(spec).await + ) + } } diff --git a/storage_controller/src/persistence.rs b/storage_controller/src/persistence.rs index 052c0f02eb..ed9a268064 100644 --- a/storage_controller/src/persistence.rs +++ b/storage_controller/src/persistence.rs @@ -19,7 +19,7 @@ use futures::FutureExt; use futures::future::BoxFuture; use itertools::Itertools; use pageserver_api::controller_api::{ - AvailabilityZone, MetadataHealthRecord, NodeSchedulingPolicy, PlacementPolicy, + AvailabilityZone, MetadataHealthRecord, NodeLifecycle, NodeSchedulingPolicy, PlacementPolicy, SafekeeperDescribeResponse, ShardSchedulingPolicy, SkSchedulingPolicy, }; use pageserver_api::models::{ShardImportStatus, TenantConfig}; @@ -29,6 +29,7 @@ use pageserver_api::shard::{ use rustls::client::WebPkiServerVerifier; use rustls::client::danger::{ServerCertVerified, ServerCertVerifier}; use rustls::crypto::ring; +use safekeeper_api::membership::SafekeeperGeneration; use scoped_futures::ScopedBoxFuture; use serde::{Deserialize, Serialize}; use utils::generation::Generation; @@ -94,6 +95,8 @@ pub(crate) enum DatabaseError { Logical(String), #[error("Migration error: {0}")] Migration(String), + #[error("CAS error: {0}")] + Cas(String), } #[derive(measured::FixedCardinalityLabel, Copy, Clone)] @@ -102,6 +105,7 @@ pub(crate) enum DatabaseOperation { UpdateNode, DeleteNode, ListNodes, + ListTombstones, BeginShardSplit, CompleteShardSplit, AbortShardSplit, @@ -125,6 +129,7 @@ pub(crate) enum DatabaseOperation { UpdateLeader, SetPreferredAzs, InsertTimeline, + UpdateTimelineMembership, GetTimeline, InsertTimelineReconcile, RemoveTimelineReconcile, @@ -357,6 +362,8 @@ impl Persistence { } /// When a node is first registered, persist it before using it for anything + /// If the provided node_id already exists, it will be error. + /// The common case is when a node marked for deletion wants to register. pub(crate) async fn insert_node(&self, node: &Node) -> DatabaseResult<()> { let np = &node.to_persistent(); self.with_measured_conn(DatabaseOperation::InsertNode, move |conn| { @@ -373,19 +380,41 @@ impl Persistence { /// At startup, populate the list of nodes which our shards may be placed on pub(crate) async fn list_nodes(&self) -> DatabaseResult> { - let nodes: Vec = self + use crate::schema::nodes::dsl::*; + + let result: Vec = self .with_measured_conn(DatabaseOperation::ListNodes, move |conn| { Box::pin(async move { Ok(crate::schema::nodes::table + .filter(lifecycle.ne(String::from(NodeLifecycle::Deleted))) .load::(conn) .await?) }) }) .await?; - tracing::info!("list_nodes: loaded {} nodes", nodes.len()); + tracing::info!("list_nodes: loaded {} nodes", result.len()); - Ok(nodes) + Ok(result) + } + + pub(crate) async fn list_tombstones(&self) -> DatabaseResult> { + use crate::schema::nodes::dsl::*; + + let result: Vec = self + .with_measured_conn(DatabaseOperation::ListTombstones, move |conn| { + Box::pin(async move { + Ok(crate::schema::nodes::table + .filter(lifecycle.eq(String::from(NodeLifecycle::Deleted))) + .load::(conn) + .await?) + }) + }) + .await?; + + tracing::info!("list_tombstones: loaded {} nodes", result.len()); + + Ok(result) } pub(crate) async fn update_node( @@ -404,6 +433,7 @@ impl Persistence { Box::pin(async move { let updated = diesel::update(nodes) .filter(node_id.eq(input_node_id.0 as i64)) + .filter(lifecycle.ne(String::from(NodeLifecycle::Deleted))) .set(values) .execute(conn) .await?; @@ -447,6 +477,55 @@ impl Persistence { .await } + /// Tombstone is a special state where the node is not deleted from the database, + /// but it is not available for usage. + /// The main reason for it is to prevent the flaky node to register. + pub(crate) async fn set_tombstone(&self, del_node_id: NodeId) -> DatabaseResult<()> { + use crate::schema::nodes::dsl::*; + self.update_node( + del_node_id, + lifecycle.eq(String::from(NodeLifecycle::Deleted)), + ) + .await + } + + pub(crate) async fn delete_node(&self, del_node_id: NodeId) -> DatabaseResult<()> { + use crate::schema::nodes::dsl::*; + self.with_measured_conn(DatabaseOperation::DeleteNode, move |conn| { + Box::pin(async move { + // You can hard delete a node only if it has a tombstone. + // So we need to check if the node has lifecycle set to deleted. + let node_to_delete = nodes + .filter(node_id.eq(del_node_id.0 as i64)) + .first::(conn) + .await + .optional()?; + + if let Some(np) = node_to_delete { + let lc = NodeLifecycle::from_str(&np.lifecycle).map_err(|e| { + DatabaseError::Logical(format!( + "Node {del_node_id} has invalid lifecycle: {e}" + )) + })?; + + if lc != NodeLifecycle::Deleted { + return Err(DatabaseError::Logical(format!( + "Node {del_node_id} was not soft deleted before, cannot hard delete it" + ))); + } + + diesel::delete(nodes) + .filter(node_id.eq(del_node_id.0 as i64)) + .execute(conn) + .await?; + } + + Ok(()) + }) + }) + .await + } + /// At startup, load the high level state for shards, such as their config + policy. This will /// be enriched at runtime with state discovered on pageservers. /// @@ -543,21 +622,6 @@ impl Persistence { .await } - pub(crate) async fn delete_node(&self, del_node_id: NodeId) -> DatabaseResult<()> { - use crate::schema::nodes::dsl::*; - self.with_measured_conn(DatabaseOperation::DeleteNode, move |conn| { - Box::pin(async move { - diesel::delete(nodes) - .filter(node_id.eq(del_node_id.0 as i64)) - .execute(conn) - .await?; - - Ok(()) - }) - }) - .await - } - /// When a tenant invokes the /re-attach API, this function is responsible for doing an efficient /// batched increment of the generations of all tenants whose generation_pageserver is equal to /// the node that called /re-attach. @@ -571,6 +635,24 @@ impl Persistence { let updated = self .with_measured_conn(DatabaseOperation::ReAttach, move |conn| { Box::pin(async move { + let node: Option = nodes + .filter(node_id.eq(input_node_id.0 as i64)) + .first::(conn) + .await + .optional()?; + + // Check if the node is not marked as deleted + match node { + Some(node) if matches!(NodeLifecycle::from_str(&node.lifecycle), Ok(NodeLifecycle::Deleted)) => { + return Err(DatabaseError::Logical(format!( + "Node {input_node_id} is marked as deleted, re-attach is not allowed" + ))); + } + _ => { + // go through + } + }; + let rows_updated = diesel::update(tenant_shards) .filter(generation_pageserver.eq(input_node_id.0 as i64)) .set(generation.eq(generation + 1)) @@ -587,21 +669,23 @@ impl Persistence { .load(conn) .await?; - // If the node went through a drain and restart phase before re-attaching, - // then reset it's node scheduling policy to active. - diesel::update(nodes) - .filter(node_id.eq(input_node_id.0 as i64)) - .filter( - scheduling_policy - .eq(String::from(NodeSchedulingPolicy::PauseForRestart)) - .or(scheduling_policy - .eq(String::from(NodeSchedulingPolicy::Draining))) - .or(scheduling_policy - .eq(String::from(NodeSchedulingPolicy::Filling))), - ) - .set(scheduling_policy.eq(String::from(NodeSchedulingPolicy::Active))) - .execute(conn) - .await?; + if let Some(node) = node { + let old_scheduling_policy = + NodeSchedulingPolicy::from_str(&node.scheduling_policy).unwrap(); + let new_scheduling_policy = match old_scheduling_policy { + NodeSchedulingPolicy::Active => NodeSchedulingPolicy::Active, + NodeSchedulingPolicy::PauseForRestart => NodeSchedulingPolicy::Active, + NodeSchedulingPolicy::Draining => NodeSchedulingPolicy::Active, + NodeSchedulingPolicy::Filling => NodeSchedulingPolicy::Active, + NodeSchedulingPolicy::Pause => NodeSchedulingPolicy::Pause, + NodeSchedulingPolicy::Deleting => NodeSchedulingPolicy::Pause, + }; + diesel::update(nodes) + .filter(node_id.eq(input_node_id.0 as i64)) + .set(scheduling_policy.eq(String::from(new_scheduling_policy))) + .execute(conn) + .await?; + } Ok(updated) }) @@ -927,7 +1011,7 @@ impl Persistence { .execute(conn).await?; if u8::try_from(updated) .map_err(|_| DatabaseError::Logical( - format!("Overflow existing shard count {} while splitting", updated)) + format!("Overflow existing shard count {updated} while splitting")) )? != old_shard_count.count() { // Perhaps a deletion or another split raced with this attempt to split, mutating // the parent shards that we intend to split. In this case the split request should fail. @@ -1267,8 +1351,7 @@ impl Persistence { if inserted_updated != 1 { return Err(DatabaseError::Logical(format!( - "unexpected number of rows ({})", - inserted_updated + "unexpected number of rows ({inserted_updated})" ))); } @@ -1312,6 +1395,48 @@ impl Persistence { .await } + /// Activate the given safekeeper, ensuring that there is no TOCTOU. + /// Returns `Some` if the safekeeper has indeed been activating (or already active). Other states return `None`. + pub(crate) async fn activate_safekeeper(&self, id_: i64) -> Result, DatabaseError> { + use crate::schema::safekeepers::dsl::*; + + self.with_conn(move |conn| { + Box::pin(async move { + #[derive(Insertable, AsChangeset)] + #[diesel(table_name = crate::schema::safekeepers)] + struct UpdateSkSchedulingPolicy<'a> { + id: i64, + scheduling_policy: &'a str, + } + let scheduling_policy_active = String::from(SkSchedulingPolicy::Active); + let scheduling_policy_activating = String::from(SkSchedulingPolicy::Activating); + + let rows_affected = diesel::update( + safekeepers.filter(id.eq(id_)).filter( + scheduling_policy + .eq(scheduling_policy_activating) + .or(scheduling_policy.eq(&scheduling_policy_active)), + ), + ) + .set(scheduling_policy.eq(&scheduling_policy_active)) + .execute(conn) + .await?; + + if rows_affected == 0 { + return Ok(Some(())); + } + if rows_affected != 1 { + return Err(DatabaseError::Logical(format!( + "unexpected number of rows ({rows_affected})", + ))); + } + + Ok(Some(())) + }) + }) + .await + } + /// Persist timeline. Returns if the timeline was newly inserted. If it wasn't, we haven't done any writes. pub(crate) async fn insert_timeline(&self, entry: TimelinePersistence) -> DatabaseResult { use crate::schema::timelines; @@ -1330,8 +1455,57 @@ impl Persistence { 0 => Ok(false), 1 => Ok(true), _ => Err(DatabaseError::Logical(format!( - "unexpected number of rows ({})", - inserted_updated + "unexpected number of rows ({inserted_updated})" + ))), + } + }) + }) + .await + } + + /// Update timeline membership configuration in the database. + /// Perform a compare-and-swap (CAS) operation on the timeline's generation. + /// The `new_generation` must be the next (+1) generation after the one in the database. + pub(crate) async fn update_timeline_membership( + &self, + tenant_id: TenantId, + timeline_id: TimelineId, + new_generation: SafekeeperGeneration, + sk_set: &[NodeId], + new_sk_set: Option<&[NodeId]>, + ) -> DatabaseResult<()> { + use crate::schema::timelines::dsl; + + let prev_generation = new_generation.previous().unwrap(); + + let tenant_id = &tenant_id; + let timeline_id = &timeline_id; + self.with_measured_conn(DatabaseOperation::UpdateTimelineMembership, move |conn| { + Box::pin(async move { + let updated = diesel::update(dsl::timelines) + .filter(dsl::tenant_id.eq(&tenant_id.to_string())) + .filter(dsl::timeline_id.eq(&timeline_id.to_string())) + .filter(dsl::generation.eq(prev_generation.into_inner() as i32)) + .set(( + dsl::generation.eq(new_generation.into_inner() as i32), + dsl::sk_set.eq(sk_set.iter().map(|id| id.0 as i64).collect::>()), + dsl::new_sk_set.eq(new_sk_set + .map(|set| set.iter().map(|id| id.0 as i64).collect::>())), + )) + .execute(conn) + .await?; + + match updated { + 0 => { + // TODO(diko): It makes sense to select the current generation + // and include it in the error message for better debuggability. + Err(DatabaseError::Cas( + "Failed to update membership configuration".to_string(), + )) + } + 1 => Ok(()), + _ => Err(DatabaseError::Logical(format!( + "unexpected number of rows ({updated})" ))), } }) @@ -1400,8 +1574,7 @@ impl Persistence { 0 => Ok(()), 1 => Ok(()), _ => Err(DatabaseError::Logical(format!( - "unexpected number of rows ({})", - updated + "unexpected number of rows ({updated})" ))), } }) @@ -1494,8 +1667,7 @@ impl Persistence { 0 => Ok(false), 1 => Ok(true), _ => Err(DatabaseError::Logical(format!( - "unexpected number of rows ({})", - inserted_updated + "unexpected number of rows ({inserted_updated})" ))), } }) @@ -2048,6 +2220,9 @@ pub(crate) struct NodePersistence { pub(crate) listen_pg_port: i32, pub(crate) availability_zone_id: String, pub(crate) listen_https_port: Option, + pub(crate) lifecycle: String, + pub(crate) listen_grpc_addr: Option, + pub(crate) listen_grpc_port: Option, } /// Tenant metadata health status that are stored durably. diff --git a/storage_controller/src/reconciler.rs b/storage_controller/src/reconciler.rs index b03a6dae04..a2fba0fa56 100644 --- a/storage_controller/src/reconciler.rs +++ b/storage_controller/src/reconciler.rs @@ -65,7 +65,7 @@ pub(super) struct Reconciler { pub(crate) compute_hook: Arc, /// To avoid stalling if the cloud control plane is unavailable, we may proceed - /// past failures in [`ComputeHook::notify`], but we _must_ remember that we failed + /// past failures in [`ComputeHook::notify_attach`], but we _must_ remember that we failed /// so that we can set [`crate::tenant_shard::TenantShard::pending_compute_notification`] to ensure a later retry. pub(crate) compute_notify_failure: bool, @@ -856,6 +856,7 @@ impl Reconciler { &self.shard, &self.config, &self.placement_policy, + self.intent.secondary.len(), ); match self.observed.locations.get(&node.get_id()) { Some(conf) if conf.conf.as_ref() == Some(&wanted_conf) => { @@ -1022,7 +1023,7 @@ impl Reconciler { if let Some(node) = &self.intent.attached { let result = self .compute_hook - .notify( + .notify_attach( compute_hook::ShardUpdate { tenant_shard_id: self.tenant_shard_id, node_id: node.get_id(), @@ -1235,11 +1236,11 @@ pub(crate) fn attached_location_conf( shard: &ShardIdentity, config: &TenantConfig, policy: &PlacementPolicy, + secondary_count: usize, ) -> LocationConfig { let has_secondaries = match policy { - PlacementPolicy::Attached(0) | PlacementPolicy::Detached | PlacementPolicy::Secondary => { - false - } + PlacementPolicy::Detached | PlacementPolicy::Secondary => false, + PlacementPolicy::Attached(0) => secondary_count > 0, PlacementPolicy::Attached(_) => true, }; diff --git a/storage_controller/src/safekeeper.rs b/storage_controller/src/safekeeper.rs index 5a13ef750e..91154f4fa3 100644 --- a/storage_controller/src/safekeeper.rs +++ b/storage_controller/src/safekeeper.rs @@ -2,6 +2,7 @@ use std::time::Duration; use pageserver_api::controller_api::{SafekeeperDescribeResponse, SkSchedulingPolicy}; use reqwest::StatusCode; +use safekeeper_api::membership::SafekeeperId; use safekeeper_client::mgmt_api; use tokio_util::sync::CancellationToken; use utils::backoff; @@ -92,6 +93,13 @@ impl Safekeeper { pub(crate) fn has_https_port(&self) -> bool { self.listen_https_port.is_some() } + pub(crate) fn get_safekeeper_id(&self) -> SafekeeperId { + SafekeeperId { + id: self.id, + host: self.skp.host.clone(), + pg_port: self.skp.port as u16, + } + } /// Perform an operation (which is given a [`SafekeeperClient`]) with retries #[allow(clippy::too_many_arguments)] pub(crate) async fn with_client_retries( diff --git a/storage_controller/src/safekeeper_client.rs b/storage_controller/src/safekeeper_client.rs index 1f3ea96d96..47a785e7d3 100644 --- a/storage_controller/src/safekeeper_client.rs +++ b/storage_controller/src/safekeeper_client.rs @@ -5,7 +5,7 @@ use safekeeper_client::mgmt_api::{Client, Result}; use utils::id::{NodeId, TenantId, TimelineId}; use utils::logging::SecretString; -use crate::metrics::PageserverRequestLabelGroup; +use crate::metrics::SafekeeperRequestLabelGroup; /// Thin wrapper around [`safekeeper_client::mgmt_api::Client`]. It allows the storage /// controller to collect metrics in a non-intrusive manner. @@ -19,8 +19,8 @@ pub(crate) struct SafekeeperClient { macro_rules! measured_request { ($name:literal, $method:expr, $node_id: expr, $invoke:expr) => {{ - let labels = PageserverRequestLabelGroup { - pageserver_id: $node_id, + let labels = SafekeeperRequestLabelGroup { + safekeeper_id: $node_id, path: $name, method: $method, }; @@ -35,7 +35,7 @@ macro_rules! measured_request { if res.is_err() { let error_counters = &crate::metrics::METRICS_REGISTRY .metrics_group - .storage_controller_pageserver_request_error; + .storage_controller_safekeeper_request_error; error_counters.inc(labels) } @@ -56,6 +56,10 @@ impl SafekeeperClient { } } + pub(crate) fn node_id_label(&self) -> &str { + &self.node_id_label + } + pub(crate) async fn create_timeline( &self, req: &TimelineCreateRequest, diff --git a/storage_controller/src/scheduler.rs b/storage_controller/src/scheduler.rs index 773373391e..b86b4dfab1 100644 --- a/storage_controller/src/scheduler.rs +++ b/storage_controller/src/scheduler.rs @@ -23,7 +23,7 @@ pub enum ScheduleError { impl From for ApiError { fn from(value: ScheduleError) -> Self { - ApiError::Conflict(format!("Scheduling error: {}", value)) + ApiError::Conflict(format!("Scheduling error: {value}")) } } @@ -825,6 +825,7 @@ impl Scheduler { struct AzScore { home_shard_count: usize, scheduleable: bool, + node_count: usize, } let mut azs: HashMap<&AvailabilityZone, AzScore> = HashMap::new(); @@ -832,6 +833,7 @@ impl Scheduler { let az = azs.entry(&node.az).or_default(); az.home_shard_count += node.home_shard_count; az.scheduleable |= matches!(node.may_schedule, MaySchedule::Yes(_)); + az.node_count += 1; } // If any AZs are schedulable, then filter out the non-schedulable ones (i.e. AZs where @@ -840,10 +842,20 @@ impl Scheduler { azs.retain(|_, i| i.scheduleable); } + // We will multiply up shard counts by the max node count for scoring, before dividing + // by per-node max node count, to get a normalized score that doesn't collapse to zero + // when the absolute shard count is less than the node count. + let max_node_count = azs.values().map(|i| i.node_count).max().unwrap_or(0); + // Find the AZ with the lowest number of shards currently allocated Some( azs.into_iter() - .min_by_key(|i| (i.1.home_shard_count, i.0)) + .min_by_key(|i| { + ( + (i.1.home_shard_count * max_node_count) / i.1.node_count, + i.0, + ) + }) .unwrap() .0 .clone(), @@ -891,7 +903,7 @@ impl Scheduler { /// rigorously updating them on every change. pub(crate) fn update_metrics(&self) { for (node_id, node) in &self.nodes { - let node_id_str = format!("{}", node_id); + let node_id_str = format!("{node_id}"); let label_group = NodeLabelGroup { az: &node.az.0, node_id: &node_id_str, @@ -945,6 +957,8 @@ pub(crate) mod test_utils { None, format!("pghost-{i}"), 5432 + i as u16, + Some(format!("grpchost-{i}")), + Some(51051 + i as u16), az_iter .next() .cloned() @@ -1312,7 +1326,7 @@ mod tests { .map(|(node_id, node)| (node_id, node.home_shard_count)) .collect::>(); node_home_counts.sort_by_key(|i| i.0); - eprintln!("Selected {}, vs nodes {:?}", preferred_az, node_home_counts); + eprintln!("Selected {preferred_az}, vs nodes {node_home_counts:?}"); let tenant_shard_id = TenantShardId { tenant_id: TenantId::generate(), diff --git a/storage_controller/src/schema.rs b/storage_controller/src/schema.rs index 20be9bb5ca..312f7e0b0e 100644 --- a/storage_controller/src/schema.rs +++ b/storage_controller/src/schema.rs @@ -33,6 +33,9 @@ diesel::table! { listen_pg_port -> Int4, availability_zone_id -> Varchar, listen_https_port -> Nullable, + lifecycle -> Varchar, + listen_grpc_addr -> Nullable, + listen_grpc_port -> Nullable, } } diff --git a/storage_controller/src/service.rs b/storage_controller/src/service.rs index 790797bae2..9360225396 100644 --- a/storage_controller/src/service.rs +++ b/storage_controller/src/service.rs @@ -1,7 +1,8 @@ pub mod chaos_injector; -mod context_iterator; +pub mod feature_flag; pub(crate) mod safekeeper_reconciler; mod safekeeper_service; +mod tenant_shard_iterator; use std::borrow::Cow; use std::cmp::Ordering; @@ -15,7 +16,6 @@ use std::sync::{Arc, OnceLock}; use std::time::{Duration, Instant, SystemTime}; use anyhow::Context; -use context_iterator::TenantShardContextIterator; use control_plane::storage_controller::{ AttachHookRequest, AttachHookResponse, InspectRequest, InspectResponse, }; @@ -25,12 +25,13 @@ use futures::stream::FuturesUnordered; use http_utils::error::ApiError; use hyper::Uri; use itertools::Itertools; +use pageserver_api::config::PostHogConfig; use pageserver_api::controller_api::{ AvailabilityZone, MetadataHealthRecord, MetadataHealthUpdateRequest, NodeAvailability, NodeRegisterRequest, NodeSchedulingPolicy, NodeShard, NodeShardResponse, PlacementPolicy, ShardSchedulingPolicy, ShardsPreferredAzsRequest, ShardsPreferredAzsResponse, - TenantCreateRequest, TenantCreateResponse, TenantCreateResponseShard, TenantDescribeResponse, - TenantDescribeResponseShard, TenantLocateResponse, TenantPolicyRequest, + SkSchedulingPolicy, TenantCreateRequest, TenantCreateResponse, TenantCreateResponseShard, + TenantDescribeResponse, TenantDescribeResponseShard, TenantLocateResponse, TenantPolicyRequest, TenantShardMigrateRequest, TenantShardMigrateResponse, }; use pageserver_api::models::{ @@ -53,6 +54,7 @@ use pageserver_client::{BlockUnblock, mgmt_api}; use reqwest::{Certificate, StatusCode}; use safekeeper_api::models::SafekeeperUtilization; use safekeeper_reconciler::SafekeeperReconcilers; +use tenant_shard_iterator::{TenantShardExclusiveIterator, create_shared_shard_iterator}; use tokio::sync::TryAcquireError; use tokio::sync::mpsc::error::TrySendError; use tokio_util::sync::CancellationToken; @@ -66,10 +68,9 @@ use utils::sync::gate::{Gate, GateGuard}; use utils::{failpoint_support, pausable_failpoint}; use crate::background_node_operations::{ - Drain, Fill, MAX_RECONCILES_PER_OPERATION, Operation, OperationError, OperationHandler, + Delete, Drain, Fill, MAX_RECONCILES_PER_OPERATION, Operation, OperationError, OperationHandler, }; use crate::compute_hook::{self, ComputeHook, NotifyError}; -use crate::drain_utils::{self, TenantShardDrain, TenantShardIterator}; use crate::heartbeater::{Heartbeater, PageserverState, SafekeeperState}; use crate::id_lock_map::{ IdLockMap, TracingExclusiveGuard, trace_exclusive_lock, trace_shared_lock, @@ -77,6 +78,7 @@ use crate::id_lock_map::{ use crate::leadership::Leadership; use crate::metrics; use crate::node::{AvailabilityTransition, Node}; +use crate::operation_utils::{self, TenantShardDrain}; use crate::pageserver_client::PageserverClient; use crate::peer_client::GlobalObservedState; use crate::persistence::split_state::SplitState; @@ -103,7 +105,7 @@ use crate::timeline_import::{ TimelineImportFinalizeError, TimelineImportState, UpcallClient, }; -const WAITER_FILL_DRAIN_POLL_TIMEOUT: Duration = Duration::from_millis(500); +const WAITER_OPERATION_POLL_TIMEOUT: Duration = Duration::from_millis(500); // For operations that should be quick, like attaching a new tenant const SHORT_RECONCILE_TIMEOUT: Duration = Duration::from_secs(5); @@ -159,6 +161,7 @@ enum TenantOperations { DropDetached, DownloadHeatmapLayers, TimelineLsnLease, + TimelineSafekeeperMigrate, } #[derive(Clone, strum_macros::Display)] @@ -166,6 +169,7 @@ enum NodeOperations { Register, Configure, Delete, + DeleteTombstone, } /// The leadership status for the storage controller process. @@ -206,6 +210,10 @@ pub const RECONCILER_CONCURRENCY_DEFAULT: usize = 128; pub const PRIORITY_RECONCILER_CONCURRENCY_DEFAULT: usize = 256; pub const SAFEKEEPER_RECONCILER_CONCURRENCY_DEFAULT: usize = 32; +// Number of consecutive reconciliation errors, occured for one shard, +// after which the shard is ignored when considering to run optimizations. +const MAX_CONSECUTIVE_RECONCILIATION_ERRORS: usize = 5; + // Depth of the channel used to enqueue shards for reconciliation when they can't do it immediately. // This channel is finite-size to avoid using excessive memory if we get into a state where reconciles are finishing more slowly // than they're being pushed onto the queue. @@ -259,7 +267,7 @@ fn passthrough_api_error(node: &Node, e: mgmt_api::Error) -> ApiError { // Presume errors receiving body are connectivity/availability issues except for decoding errors let src_str = err.source().map(|e| e.to_string()).unwrap_or_default(); ApiError::ResourceUnavailable( - format!("{node} error receiving error body: {err} {}", src_str).into(), + format!("{node} error receiving error body: {err} {src_str}").into(), ) } mgmt_api::Error::ApiError(StatusCode::NOT_FOUND, msg) => { @@ -465,6 +473,16 @@ pub struct Config { pub timelines_onto_safekeepers: bool, pub use_local_compute_notifications: bool, + + /// Number of safekeepers to choose for a timeline when creating it. + /// Safekeepers will be choosen from different availability zones. + pub timeline_safekeeper_count: usize, + + /// PostHog integration config + pub posthog_config: Option, + + /// When set, actively checks and initiates heatmap downloads/uploads. + pub kick_secondary_downloads: bool, } impl From for ApiError { @@ -478,6 +496,7 @@ impl From for ApiError { DatabaseError::Logical(reason) | DatabaseError::Migration(reason) => { ApiError::InternalServerError(anyhow::anyhow!(reason)) } + DatabaseError::Cas(reason) => ApiError::Conflict(reason), } } } @@ -562,7 +581,9 @@ impl From for ApiError { impl From for ApiError { fn from(value: OperationError) -> Self { match value { - OperationError::NodeStateChanged(err) | OperationError::FinalizeError(err) => { + OperationError::NodeStateChanged(err) + | OperationError::FinalizeError(err) + | OperationError::ImpossibleConstraint(err) => { ApiError::InternalServerError(anyhow::anyhow!(err)) } OperationError::Cancelled => ApiError::Conflict("Operation was cancelled".into()), @@ -663,7 +684,7 @@ impl std::fmt::Display for StopReconciliationsReason { Self::ShuttingDown => "Shutting down", Self::SteppingDown => "Stepping down", }; - write!(writer, "{}", s) + write!(writer, "{s}") } } @@ -687,6 +708,36 @@ struct ShardMutationLocations { #[derive(Default, Clone)] struct TenantMutationLocations(BTreeMap); +struct ReconcileAllResult { + spawned_reconciles: usize, + keep_failing_reconciles: usize, + has_delayed_reconciles: bool, +} + +impl ReconcileAllResult { + fn new( + spawned_reconciles: usize, + keep_failing_reconciles: usize, + has_delayed_reconciles: bool, + ) -> Self { + assert!( + spawned_reconciles >= keep_failing_reconciles, + "It is impossible to have more keep-failing reconciles than spawned reconciles" + ); + Self { + spawned_reconciles, + keep_failing_reconciles, + has_delayed_reconciles, + } + } + + /// We can run optimizations only if we don't have any delayed reconciles and + /// all spawned reconciles are also keep-failing reconciles. + fn can_run_optimizations(&self) -> bool { + !self.has_delayed_reconciles && self.spawned_reconciles == self.keep_failing_reconciles + } +} + impl Service { pub fn get_config(&self) -> &Config { &self.config @@ -863,18 +914,18 @@ impl Service { // Emit compute hook notifications for all tenants which are already stably attached. Other tenants // will emit compute hook notifications when they reconcile. // - // Ordering: our calls to notify_background synchronously establish a relative order for these notifications vs. any later + // Ordering: our calls to notify_attach_background synchronously establish a relative order for these notifications vs. any later // calls into the ComputeHook for the same tenant: we can leave these to run to completion in the background and any later // calls will be correctly ordered wrt these. // - // Concurrency: we call notify_background for all tenants, which will create O(N) tokio tasks, but almost all of them + // Concurrency: we call notify_attach_background for all tenants, which will create O(N) tokio tasks, but almost all of them // will just wait on the ComputeHook::API_CONCURRENCY semaphore immediately, so very cheap until they get that semaphore // unit and start doing I/O. tracing::info!( "Sending {} compute notifications", compute_notifications.len() ); - self.compute_hook.notify_background( + self.compute_hook.notify_attach_background( compute_notifications, bg_compute_notify_result_tx.clone(), &self.cancel, @@ -884,7 +935,7 @@ impl Service { // which require it: under normal circumstances this should only include tenants that were in some // transient state before we restarted, or any tenants whose compute hooks failed above. tracing::info!("Checking for shards in need of reconciliation..."); - let reconcile_tasks = self.reconcile_all(); + let reconcile_all_result = self.reconcile_all(); // We will not wait for these reconciliation tasks to run here: we're now done with startup and // normal operations may proceed. @@ -932,8 +983,9 @@ impl Service { } } + let spawned_reconciles = reconcile_all_result.spawned_reconciles; tracing::info!( - "Startup complete, spawned {reconcile_tasks} reconciliation tasks ({shard_count} shards total)" + "Startup complete, spawned {spawned_reconciles} reconciliation tasks ({shard_count} shards total)" ); } @@ -1107,7 +1159,8 @@ impl Service { observed } - /// Used during [`Self::startup_reconcile`]: detach a list of unknown-to-us tenants from pageservers. + /// Used during [`Self::startup_reconcile`] and shard splits: detach a list of unknown-to-us + /// tenants from pageservers. /// /// This is safe to run in the background, because if we don't have this TenantShardId in our map of /// tenants, then it is probably something incompletely deleted before: we will not fight with any @@ -1183,8 +1236,8 @@ impl Service { while !self.reconcilers_cancel.is_cancelled() { tokio::select! { _ = interval.tick() => { - let reconciles_spawned = self.reconcile_all(); - if reconciles_spawned == 0 { + let reconcile_all_result = self.reconcile_all(); + if reconcile_all_result.can_run_optimizations() { // Run optimizer only when we didn't find any other work to do self.optimize_all().await; } @@ -1198,7 +1251,7 @@ impl Service { } /// Heartbeat all storage nodes once in a while. #[instrument(skip_all)] - async fn spawn_heartbeat_driver(&self) { + async fn spawn_heartbeat_driver(self: &Arc) { self.startup_complete.clone().wait().await; let mut interval = tokio::time::interval(self.config.heartbeat_interval); @@ -1325,18 +1378,51 @@ impl Service { } } if let Ok(deltas) = res_sk { - let mut locked = self.inner.write().unwrap(); - let mut safekeepers = (*locked.safekeepers).clone(); - for (id, state) in deltas.0 { - let Some(sk) = safekeepers.get_mut(&id) else { - tracing::info!( - "Couldn't update safekeeper safekeeper state for id {id} from heartbeat={state:?}" - ); - continue; - }; - sk.set_availability(state); + let mut to_activate = Vec::new(); + { + let mut locked = self.inner.write().unwrap(); + let mut safekeepers = (*locked.safekeepers).clone(); + + for (id, state) in deltas.0 { + let Some(sk) = safekeepers.get_mut(&id) else { + tracing::info!( + "Couldn't update safekeeper safekeeper state for id {id} from heartbeat={state:?}" + ); + continue; + }; + if sk.scheduling_policy() == SkSchedulingPolicy::Activating + && let SafekeeperState::Available { .. } = state + { + to_activate.push(id); + } + sk.set_availability(state); + } + locked.safekeepers = Arc::new(safekeepers); + } + for sk_id in to_activate { + // TODO this can race with set_scheduling_policy (can create disjoint DB <-> in-memory state) + tracing::info!("Activating safekeeper {sk_id}"); + match self.persistence.activate_safekeeper(sk_id.0 as i64).await { + Ok(Some(())) => {} + Ok(None) => { + tracing::info!( + "safekeeper {sk_id} has been removed from db or has different scheduling policy than active or activating" + ); + } + Err(e) => { + tracing::warn!("couldn't apply activation of {sk_id} to db: {e}"); + continue; + } + } + if let Err(e) = self + .set_safekeeper_scheduling_policy_in_mem(sk_id, SkSchedulingPolicy::Active) + .await + { + tracing::info!("couldn't activate safekeeper {sk_id} in memory: {e}"); + continue; + } + tracing::info!("Activation of safekeeper {sk_id} done"); } - locked.safekeepers = Arc::new(safekeepers); } } } @@ -1392,6 +1478,7 @@ impl Service { match result.result { Ok(()) => { + tenant.consecutive_errors_count = 0; tenant.apply_observed_deltas(deltas); tenant.waiter.advance(result.sequence); } @@ -1410,6 +1497,8 @@ impl Service { } } + tenant.consecutive_errors_count = tenant.consecutive_errors_count.saturating_add(1); + // Ordering: populate last_error before advancing error_seq, // so that waiters will see the correct error after waiting. tenant.set_last_error(result.sequence, e); @@ -1681,6 +1770,8 @@ impl Service { None, "".to_string(), 123, + None, + None, AvailabilityZone("test_az".to_string()), false, ) @@ -2056,6 +2147,7 @@ impl Service { &tenant_shard.shard, &tenant_shard.config, &PlacementPolicy::Attached(0), + tenant_shard.intent.get_secondary().len(), )), }, )]); @@ -2265,6 +2357,7 @@ impl Service { // fail, and start from scratch, so it doesn't make sense for us to try and preserve // the stale/multi states at this point. mode: LocationConfigMode::AttachedSingle, + stripe_size: shard.shard.stripe_size, }); shard.generation = std::cmp::max(shard.generation, Some(new_gen)); @@ -2298,6 +2391,7 @@ impl Service { id: *tenant_shard_id, r#gen: None, mode: LocationConfigMode::Secondary, + stripe_size: shard.shard.stripe_size, }); // We must not update observed, because we have no guarantee that our @@ -2322,6 +2416,7 @@ impl Service { NodeSchedulingPolicy::PauseForRestart | NodeSchedulingPolicy::Draining | NodeSchedulingPolicy::Filling + | NodeSchedulingPolicy::Deleting ); let mut new_nodes = (**nodes).clone(); @@ -2563,7 +2658,7 @@ impl Service { .do_initial_shard_scheduling( tenant_shard_id, initial_generation, - &create_req.shard_parameters, + create_req.shard_parameters, create_req.config.clone(), placement_policy.clone(), preferred_az_id.as_ref(), @@ -2620,7 +2715,7 @@ impl Service { &self, tenant_shard_id: TenantShardId, initial_generation: Option, - shard_params: &ShardParameters, + shard_params: ShardParameters, config: TenantConfig, placement_policy: PlacementPolicy, preferred_az_id: Option<&AvailabilityZone>, @@ -5264,7 +5359,7 @@ impl Service { shard_params, result .iter() - .map(|s| format!("{:?}", s)) + .map(|s| format!("{s:?}")) .collect::>() .join(",") ); @@ -5595,7 +5690,15 @@ impl Service { for parent_id in parent_ids { let child_ids = parent_id.split(new_shard_count); - let (pageserver, generation, policy, parent_ident, config, preferred_az) = { + let ( + pageserver, + generation, + policy, + parent_ident, + config, + preferred_az, + secondary_count, + ) = { let mut old_state = tenants .remove(&parent_id) .expect("It was present, we just split it"); @@ -5615,6 +5718,7 @@ impl Service { old_state.shard, old_state.config.clone(), old_state.preferred_az().cloned(), + old_state.intent.get_secondary().len(), ) }; @@ -5636,6 +5740,7 @@ impl Service { &child_shard, &config, &policy, + secondary_count, )), }, ); @@ -6177,7 +6282,7 @@ impl Service { }, ) .await - .map_err(|e| ApiError::Conflict(format!("Failed to split {}: {}", parent_id, e)))?; + .map_err(|e| ApiError::Conflict(format!("Failed to split {parent_id}: {e}")))?; fail::fail_point!("shard-split-post-remote", |_| Err(ApiError::Conflict( "failpoint".to_string() @@ -6194,7 +6299,7 @@ impl Service { response .new_shards .iter() - .map(|s| format!("{:?}", s)) + .map(|s| format!("{s:?}")) .collect::>() .join(",") ); @@ -6210,7 +6315,11 @@ impl Service { } } - pausable_failpoint!("shard-split-pre-complete"); + fail::fail_point!("shard-split-pre-complete", |_| Err(ApiError::Conflict( + "failpoint".to_string() + ))); + + pausable_failpoint!("shard-split-pre-complete-pause"); // TODO: if the pageserver restarted concurrently with our split API call, // the actual generation of the child shard might differ from the generation @@ -6232,12 +6341,21 @@ impl Service { let (response, child_locations, waiters) = self.tenant_shard_split_commit_inmem(tenant_id, new_shard_count, new_stripe_size); + // Notify all page servers to detach and clean up the old shards because they will no longer + // be needed. This is best-effort: if it fails, it will be cleaned up on a subsequent + // Pageserver re-attach/startup. + let shards_to_cleanup = targets + .iter() + .map(|target| (target.parent_id, target.node.get_id())) + .collect(); + self.cleanup_locations(shards_to_cleanup).await; + // Send compute notifications for all the new shards let mut failed_notifications = Vec::new(); for (child_id, child_ps, stripe_size) in child_locations { if let Err(e) = self .compute_hook - .notify( + .notify_attach( compute_hook::ShardUpdate { tenant_shard_id: child_id, node_id: child_ps, @@ -6634,6 +6752,8 @@ impl Service { /// This is for debug/support only: assuming tenant data is already present in S3, we "create" a /// tenant with a very high generation number so that it will see the existing data. + /// It does not create timelines on safekeepers, because they might already exist on some + /// safekeeper set. So, the timelines are not storcon-managed after the import. pub(crate) async fn tenant_import( &self, tenant_id: TenantId, @@ -6909,7 +7029,7 @@ impl Service { /// detaching or deleting it on pageservers. We do not try and re-schedule any /// tenants that were on this node. pub(crate) async fn node_drop(&self, node_id: NodeId) -> Result<(), ApiError> { - self.persistence.delete_node(node_id).await?; + self.persistence.set_tombstone(node_id).await?; let mut locked = self.inner.write().unwrap(); @@ -6938,7 +7058,7 @@ impl Service { /// If a node has any work on it, it will be rescheduled: this is "clean" in the sense /// that we don't leave any bad state behind in the storage controller, but unclean /// in the sense that we are not carefully draining the node. - pub(crate) async fn node_delete(&self, node_id: NodeId) -> Result<(), ApiError> { + pub(crate) async fn node_delete_old(&self, node_id: NodeId) -> Result<(), ApiError> { let _node_lock = trace_exclusive_lock(&self.node_op_locks, node_id, NodeOperations::Delete).await; @@ -6972,7 +7092,7 @@ impl Service { } for (_tenant_id, mut schedule_context, shards) in - TenantShardContextIterator::new(tenants, ScheduleMode::Normal) + TenantShardExclusiveIterator::new(tenants, ScheduleMode::Normal) { for shard in shards { if shard.deref_node(node_id) { @@ -7033,9 +7153,175 @@ impl Service { // That is safe because in Service::spawn we only use generation_pageserver if it refers to a node // that exists. - // 2. Actually delete the node from the database and from in-memory state + // 2. Actually delete the node from in-memory state and set tombstone to the database + // for preventing the node to register again. tracing::info!("Deleting node from database"); - self.persistence.delete_node(node_id).await?; + self.persistence.set_tombstone(node_id).await?; + + Ok(()) + } + + pub(crate) async fn delete_node( + self: &Arc, + node_id: NodeId, + policy_on_start: NodeSchedulingPolicy, + cancel: CancellationToken, + ) -> Result<(), OperationError> { + let reconciler_config = ReconcilerConfigBuilder::new(ReconcilerPriority::Normal).build(); + + let mut waiters: Vec = Vec::new(); + let mut tid_iter = create_shared_shard_iterator(self.clone()); + + while !tid_iter.finished() { + if cancel.is_cancelled() { + match self + .node_configure(node_id, None, Some(policy_on_start)) + .await + { + Ok(()) => return Err(OperationError::Cancelled), + Err(err) => { + return Err(OperationError::FinalizeError( + format!( + "Failed to finalise delete cancel of {} by setting scheduling policy to {}: {}", + node_id, String::from(policy_on_start), err + ) + .into(), + )); + } + } + } + + operation_utils::validate_node_state( + &node_id, + self.inner.read().unwrap().nodes.clone(), + NodeSchedulingPolicy::Deleting, + )?; + + while waiters.len() < MAX_RECONCILES_PER_OPERATION { + let tid = match tid_iter.next() { + Some(tid) => tid, + None => { + break; + } + }; + + let mut locked = self.inner.write().unwrap(); + let (nodes, tenants, scheduler) = locked.parts_mut(); + + let tenant_shard = match tenants.get_mut(&tid) { + Some(tenant_shard) => tenant_shard, + None => { + // Tenant shard was deleted by another operation. Skip it. + continue; + } + }; + + match tenant_shard.get_scheduling_policy() { + ShardSchedulingPolicy::Active | ShardSchedulingPolicy::Essential => { + // A migration during delete is classed as 'essential' because it is required to + // uphold our availability goals for the tenant: this shard is elegible for migration. + } + ShardSchedulingPolicy::Pause | ShardSchedulingPolicy::Stop => { + // If we have been asked to avoid rescheduling this shard, then do not migrate it during a deletion + tracing::warn!( + tenant_id=%tid.tenant_id, shard_id=%tid.shard_slug(), + "Skip migration during deletion because shard scheduling policy {:?} disallows it", + tenant_shard.get_scheduling_policy(), + ); + continue; + } + } + + if tenant_shard.deref_node(node_id) { + // TODO(ephemeralsad): we should process all shards in a tenant at once, so + // we can avoid settling the tenant unevenly. + let mut schedule_context = ScheduleContext::new(ScheduleMode::Normal); + if let Err(e) = tenant_shard.schedule(scheduler, &mut schedule_context) { + tracing::error!( + "Refusing to delete node, shard {} can't be rescheduled: {e}", + tenant_shard.tenant_shard_id + ); + return Err(OperationError::ImpossibleConstraint(e.to_string().into())); + } else { + tracing::info!( + "Rescheduled shard {} away from node during deletion", + tenant_shard.tenant_shard_id + ) + } + + let waiter = self.maybe_configured_reconcile_shard( + tenant_shard, + nodes, + reconciler_config, + ); + if let Some(some) = waiter { + waiters.push(some); + } + } + } + + waiters = self + .await_waiters_remainder(waiters, WAITER_OPERATION_POLL_TIMEOUT) + .await; + + failpoint_support::sleep_millis_async!("sleepy-delete-loop", &cancel); + } + + while !waiters.is_empty() { + if cancel.is_cancelled() { + match self + .node_configure(node_id, None, Some(policy_on_start)) + .await + { + Ok(()) => return Err(OperationError::Cancelled), + Err(err) => { + return Err(OperationError::FinalizeError( + format!( + "Failed to finalise drain cancel of {} by setting scheduling policy to {}: {}", + node_id, String::from(policy_on_start), err + ) + .into(), + )); + } + } + } + + tracing::info!("Awaiting {} pending delete reconciliations", waiters.len()); + + waiters = self + .await_waiters_remainder(waiters, SHORT_RECONCILE_TIMEOUT) + .await; + } + + self.persistence + .set_tombstone(node_id) + .await + .map_err(|e| OperationError::FinalizeError(e.to_string().into()))?; + + { + let mut locked = self.inner.write().unwrap(); + let (nodes, _, scheduler) = locked.parts_mut(); + + scheduler.node_remove(node_id); + + let mut nodes_mut = (**nodes).clone(); + if let Some(mut removed_node) = nodes_mut.remove(&node_id) { + // Ensure that any reconciler holding an Arc<> to this node will + // drop out when trying to RPC to it (setting Offline state sets the + // cancellation token on the Node object). + removed_node.set_availability(NodeAvailability::Offline); + } + *nodes = Arc::new(nodes_mut); + + metrics::METRICS_REGISTRY + .metrics_group + .storage_controller_pageserver_nodes + .set(nodes.len() as i64); + metrics::METRICS_REGISTRY + .metrics_group + .storage_controller_https_pageserver_nodes + .set(nodes.values().filter(|n| n.has_https_port()).count() as i64); + } Ok(()) } @@ -7054,6 +7340,34 @@ impl Service { Ok(nodes) } + pub(crate) async fn tombstone_list(&self) -> Result, ApiError> { + self.persistence + .list_tombstones() + .await? + .into_iter() + .map(|np| Node::from_persistent(np, false)) + .collect::, _>>() + .map_err(ApiError::InternalServerError) + } + + pub(crate) async fn tombstone_delete(&self, node_id: NodeId) -> Result<(), ApiError> { + let _node_lock = trace_exclusive_lock( + &self.node_op_locks, + node_id, + NodeOperations::DeleteTombstone, + ) + .await; + + if matches!(self.get_node(node_id).await, Err(ApiError::NotFound(_))) { + self.persistence.delete_node(node_id).await?; + Ok(()) + } else { + Err(ApiError::Conflict(format!( + "Node {node_id} is in use, consider using tombstone API first" + ))) + } + } + pub(crate) async fn get_node(&self, node_id: NodeId) -> Result { self.inner .read() @@ -7205,6 +7519,12 @@ impl Service { )); } + if register_req.listen_grpc_addr.is_some() != register_req.listen_grpc_port.is_some() { + return Err(ApiError::BadRequest(anyhow::anyhow!( + "must specify both gRPC address and port" + ))); + } + // Ordering: we must persist the new node _before_ adding it to in-memory state. // This ensures that before we use it for anything or expose it via any external // API, it is guaranteed to be available after a restart. @@ -7215,6 +7535,8 @@ impl Service { register_req.listen_https_port, register_req.listen_pg_addr, register_req.listen_pg_port, + register_req.listen_grpc_addr, + register_req.listen_grpc_port, register_req.availability_zone_id.clone(), self.config.use_https_pageserver_api, ); @@ -7224,7 +7546,25 @@ impl Service { }; match registration_status { - RegistrationStatus::New => self.persistence.insert_node(&new_node).await?, + RegistrationStatus::New => { + self.persistence.insert_node(&new_node).await.map_err(|e| { + if matches!( + e, + crate::persistence::DatabaseError::Query( + diesel::result::Error::DatabaseError( + diesel::result::DatabaseErrorKind::UniqueViolation, + _, + ) + ) + ) { + // The node can be deleted by tombstone API, and not show up in the list of nodes. + // If you see this error, check tombstones first. + ApiError::Conflict(format!("Node {} is already exists", new_node.get_id())) + } else { + ApiError::from(e) + } + })?; + } RegistrationStatus::NeedUpdate => { self.persistence .update_node_on_registration( @@ -7374,7 +7714,7 @@ impl Service { let mut tenants_affected: usize = 0; for (_tenant_id, mut schedule_context, shards) in - TenantShardContextIterator::new(tenants, ScheduleMode::Normal) + TenantShardExclusiveIterator::new(tenants, ScheduleMode::Normal) { for tenant_shard in shards { let tenant_shard_id = tenant_shard.tenant_shard_id; @@ -7545,6 +7885,142 @@ impl Service { self.node_configure(node_id, availability, scheduling).await } + pub(crate) async fn start_node_delete( + self: &Arc, + node_id: NodeId, + ) -> Result<(), ApiError> { + let (ongoing_op, node_policy, schedulable_nodes_count) = { + let locked = self.inner.read().unwrap(); + let nodes = &locked.nodes; + let node = nodes.get(&node_id).ok_or(ApiError::NotFound( + anyhow::anyhow!("Node {} not registered", node_id).into(), + ))?; + let schedulable_nodes_count = nodes + .iter() + .filter(|(_, n)| matches!(n.may_schedule(), MaySchedule::Yes(_))) + .count(); + + ( + locked + .ongoing_operation + .as_ref() + .map(|ongoing| ongoing.operation), + node.get_scheduling(), + schedulable_nodes_count, + ) + }; + + if let Some(ongoing) = ongoing_op { + return Err(ApiError::PreconditionFailed( + format!("Background operation already ongoing for node: {ongoing}").into(), + )); + } + + if schedulable_nodes_count == 0 { + return Err(ApiError::PreconditionFailed( + "No other schedulable nodes to move shards".into(), + )); + } + + match node_policy { + NodeSchedulingPolicy::Active | NodeSchedulingPolicy::Pause => { + self.node_configure(node_id, None, Some(NodeSchedulingPolicy::Deleting)) + .await?; + + let cancel = self.cancel.child_token(); + let gate_guard = self.gate.enter().map_err(|_| ApiError::ShuttingDown)?; + let policy_on_start = node_policy; + + self.inner.write().unwrap().ongoing_operation = Some(OperationHandler { + operation: Operation::Delete(Delete { node_id }), + cancel: cancel.clone(), + }); + + let span = tracing::info_span!(parent: None, "delete_node", %node_id); + + tokio::task::spawn( + { + let service = self.clone(); + let cancel = cancel.clone(); + async move { + let _gate_guard = gate_guard; + + scopeguard::defer! { + let prev = service.inner.write().unwrap().ongoing_operation.take(); + + if let Some(Operation::Delete(removed_delete)) = prev.map(|h| h.operation) { + assert_eq!(removed_delete.node_id, node_id, "We always take the same operation"); + } else { + panic!("We always remove the same operation") + } + } + + tracing::info!("Delete background operation starting"); + let res = service + .delete_node(node_id, policy_on_start, cancel) + .await; + match res { + Ok(()) => { + tracing::info!( + "Delete background operation completed successfully" + ); + } + Err(OperationError::Cancelled) => { + tracing::info!("Delete background operation was cancelled"); + } + Err(err) => { + tracing::error!( + "Delete background operation encountered: {err}" + ) + } + } + } + } + .instrument(span), + ); + } + NodeSchedulingPolicy::Deleting => { + return Err(ApiError::Conflict(format!( + "Node {node_id} has delete in progress" + ))); + } + policy => { + return Err(ApiError::PreconditionFailed( + format!("Node {node_id} cannot be deleted due to {policy:?} policy").into(), + )); + } + } + + Ok(()) + } + + pub(crate) async fn cancel_node_delete( + self: &Arc, + node_id: NodeId, + ) -> Result<(), ApiError> { + { + let locked = self.inner.read().unwrap(); + let nodes = &locked.nodes; + nodes.get(&node_id).ok_or(ApiError::NotFound( + anyhow::anyhow!("Node {} not registered", node_id).into(), + ))?; + } + + if let Some(op_handler) = self.inner.read().unwrap().ongoing_operation.as_ref() { + if let Operation::Delete(delete) = op_handler.operation { + if delete.node_id == node_id { + tracing::info!("Cancelling background delete operation for node {node_id}"); + op_handler.cancel.cancel(); + return Ok(()); + } + } + } + + Err(ApiError::PreconditionFailed( + format!("Node {node_id} has no delete in progress").into(), + )) + } + pub(crate) async fn start_node_drain( self: &Arc, node_id: NodeId, @@ -7573,7 +8049,7 @@ impl Service { if let Some(ongoing) = ongoing_op { return Err(ApiError::PreconditionFailed( - format!("Background operation already ongoing for node: {}", ongoing).into(), + format!("Background operation already ongoing for node: {ongoing}").into(), )); } @@ -7704,7 +8180,7 @@ impl Service { if let Some(ongoing) = ongoing_op { return Err(ApiError::PreconditionFailed( - format!("Background operation already ongoing for node: {}", ongoing).into(), + format!("Background operation already ongoing for node: {ongoing}").into(), )); } @@ -7925,7 +8401,7 @@ impl Service { /// Returns how many reconciliation tasks were started, or `1` if no reconciles were /// spawned but some _would_ have been spawned if `reconciler_concurrency` units where /// available. A return value of 0 indicates that everything is fully reconciled already. - fn reconcile_all(&self) -> usize { + fn reconcile_all(&self) -> ReconcileAllResult { let mut locked = self.inner.write().unwrap(); let (nodes, tenants, scheduler) = locked.parts_mut(); let pageservers = nodes.clone(); @@ -7933,13 +8409,16 @@ impl Service { // This function is an efficient place to update lazy statistics, since we are walking // all tenants. let mut pending_reconciles = 0; + let mut keep_failing_reconciles = 0; let mut az_violations = 0; // If we find any tenants to drop from memory, stash them to offload after // we're done traversing the map of tenants. let mut drop_detached_tenants = Vec::new(); - let mut reconciles_spawned = 0; + let mut spawned_reconciles = 0; + let mut has_delayed_reconciles = false; + for shard in tenants.values_mut() { // Accumulate scheduling statistics if let (Some(attached), Some(preferred)) = @@ -7959,18 +8438,32 @@ impl Service { // If there is something delayed, then return a nonzero count so that // callers like reconcile_all_now do not incorrectly get the impression // that the system is in a quiescent state. - reconciles_spawned = std::cmp::max(1, reconciles_spawned); + has_delayed_reconciles = true; pending_reconciles += 1; continue; } // Eventual consistency: if an earlier reconcile job failed, and the shard is still // dirty, spawn another one + let consecutive_errors_count = shard.consecutive_errors_count; if self .maybe_reconcile_shard(shard, &pageservers, ReconcilerPriority::Normal) .is_some() { - reconciles_spawned += 1; + spawned_reconciles += 1; + + // Count shards that are keep-failing. We still want to reconcile them + // to avoid a situation where a shard is stuck. + // But we don't want to consider them when deciding to run optimizations. + if consecutive_errors_count >= MAX_CONSECUTIVE_RECONCILIATION_ERRORS { + tracing::warn!( + tenant_id=%shard.tenant_shard_id.tenant_id, + shard_id=%shard.tenant_shard_id.shard_slug(), + "Shard reconciliation is keep-failing: {} errors", + consecutive_errors_count + ); + keep_failing_reconciles += 1; + } } else if shard.delayed_reconcile { // Shard wanted to reconcile but for some reason couldn't. pending_reconciles += 1; @@ -8009,7 +8502,16 @@ impl Service { .storage_controller_pending_reconciles .set(pending_reconciles as i64); - reconciles_spawned + metrics::METRICS_REGISTRY + .metrics_group + .storage_controller_keep_failing_reconciles + .set(keep_failing_reconciles as i64); + + ReconcileAllResult::new( + spawned_reconciles, + keep_failing_reconciles, + has_delayed_reconciles, + ) } /// `optimize` in this context means identifying shards which have valid scheduled locations, but @@ -8095,7 +8597,7 @@ impl Service { // to ignore the utilisation component of the score. for (_tenant_id, schedule_context, shards) in - TenantShardContextIterator::new(tenants, ScheduleMode::Speculative) + TenantShardExclusiveIterator::new(tenants, ScheduleMode::Speculative) { for shard in shards { if work.len() >= MAX_OPTIMIZATIONS_PLAN_PER_PASS { @@ -8265,7 +8767,6 @@ impl Service { "Skipping migration of {tenant_shard_id} to {node} because secondary isn't ready: {progress:?}" ); - #[cfg(feature = "testing")] if progress.heatmap_mtime.is_none() { // No heatmap might mean the attached location has never uploaded one, or that // the secondary download hasn't happened yet. This is relatively unusual in the field, @@ -8290,8 +8791,12 @@ impl Service { /// happens on multi-minute timescales in the field, which is fine because optimisation is meant /// to be a lazy background thing. However, when testing, it is not practical to wait around, so /// we have this helper to move things along faster. - #[cfg(feature = "testing")] async fn kick_secondary_download(&self, tenant_shard_id: TenantShardId) { + if !self.config.kick_secondary_downloads { + // No-op if kick_secondary_downloads functionaliuty is not configured + return; + } + let (attached_node, secondaries) = { let locked = self.inner.read().unwrap(); let Some(shard) = locked.tenants.get(&tenant_shard_id) else { @@ -8679,13 +9184,13 @@ impl Service { /// also wait for any generated Reconcilers to complete. Calling this until it returns zero should /// put the system into a quiescent state where future background reconciliations won't do anything. pub(crate) async fn reconcile_all_now(&self) -> Result { - let reconciles_spawned = self.reconcile_all(); - let reconciles_spawned = if reconciles_spawned == 0 { + let reconcile_all_result = self.reconcile_all(); + let mut spawned_reconciles = reconcile_all_result.spawned_reconciles; + if reconcile_all_result.can_run_optimizations() { // Only optimize when we are otherwise idle - self.optimize_all().await - } else { - reconciles_spawned - }; + let optimization_reconciles = self.optimize_all().await; + spawned_reconciles += optimization_reconciles; + } let waiters = { let mut waiters = Vec::new(); @@ -8701,25 +9206,32 @@ impl Service { let waiter_count = waiters.len(); match self.await_waiters(waiters, RECONCILE_TIMEOUT).await { Ok(()) => {} - Err(ReconcileWaitError::Failed(_, reconcile_error)) - if matches!(*reconcile_error, ReconcileError::Cancel) => - { - // Ignore reconciler cancel errors: this reconciler might have shut down - // because some other change superceded it. We will return a nonzero number, - // so the caller knows they might have to call again to quiesce the system. - } Err(e) => { - return Err(e); + if let ReconcileWaitError::Failed(_, reconcile_error) = &e { + match **reconcile_error { + ReconcileError::Cancel + | ReconcileError::Remote(mgmt_api::Error::Cancelled) => { + // Ignore reconciler cancel errors: this reconciler might have shut down + // because some other change superceded it. We will return a nonzero number, + // so the caller knows they might have to call again to quiesce the system. + } + _ => { + return Err(e); + } + } + } else { + return Err(e); + } } }; tracing::info!( "{} reconciles in reconcile_all, {} waiters", - reconciles_spawned, + spawned_reconciles, waiter_count ); - Ok(std::cmp::max(waiter_count, reconciles_spawned)) + Ok(std::cmp::max(waiter_count, spawned_reconciles)) } async fn stop_reconciliations(&self, reason: StopReconciliationsReason) { @@ -8763,7 +9275,7 @@ impl Service { let nodes = self.inner.read().unwrap().nodes.clone(); let node = nodes.get(secondary).ok_or(mgmt_api::Error::ApiError( StatusCode::NOT_FOUND, - format!("Node with id {} not found", secondary), + format!("Node with id {secondary} not found"), ))?; match node @@ -8812,25 +9324,7 @@ impl Service { let mut waiters = Vec::new(); - let mut tid_iter = TenantShardIterator::new({ - let service = self.clone(); - move |last_inspected_shard: Option| { - let locked = &service.inner.read().unwrap(); - let tenants = &locked.tenants; - let entry = match last_inspected_shard { - Some(skip_past) => { - // Skip to the last seen tenant shard id - let mut cursor = tenants.iter().skip_while(|(tid, _)| **tid != skip_past); - - // Skip past the last seen - cursor.nth(1) - } - None => tenants.first_key_value(), - }; - - entry.map(|(tid, _)| tid).copied() - } - }); + let mut tid_iter = create_shared_shard_iterator(self.clone()); while !tid_iter.finished() { if cancel.is_cancelled() { @@ -8842,8 +9336,7 @@ impl Service { Err(err) => { return Err(OperationError::FinalizeError( format!( - "Failed to finalise drain cancel of {} by setting scheduling policy to Active: {}", - node_id, err + "Failed to finalise drain cancel of {node_id} by setting scheduling policy to Active: {err}" ) .into(), )); @@ -8851,7 +9344,11 @@ impl Service { } } - drain_utils::validate_node_state(&node_id, self.inner.read().unwrap().nodes.clone())?; + operation_utils::validate_node_state( + &node_id, + self.inner.read().unwrap().nodes.clone(), + NodeSchedulingPolicy::Draining, + )?; while waiters.len() < MAX_RECONCILES_PER_OPERATION { let tid = match tid_iter.next() { @@ -8931,7 +9428,7 @@ impl Service { } waiters = self - .await_waiters_remainder(waiters, WAITER_FILL_DRAIN_POLL_TIMEOUT) + .await_waiters_remainder(waiters, WAITER_OPERATION_POLL_TIMEOUT) .await; failpoint_support::sleep_millis_async!("sleepy-drain-loop", &cancel); @@ -8947,8 +9444,7 @@ impl Service { Err(err) => { return Err(OperationError::FinalizeError( format!( - "Failed to finalise drain cancel of {} by setting scheduling policy to Active: {}", - node_id, err + "Failed to finalise drain cancel of {node_id} by setting scheduling policy to Active: {err}" ) .into(), )); @@ -9158,8 +9654,7 @@ impl Service { Err(err) => { return Err(OperationError::FinalizeError( format!( - "Failed to finalise drain cancel of {} by setting scheduling policy to Active: {}", - node_id, err + "Failed to finalise drain cancel of {node_id} by setting scheduling policy to Active: {err}" ) .into(), )); @@ -9227,7 +9722,7 @@ impl Service { } waiters = self - .await_waiters_remainder(waiters, WAITER_FILL_DRAIN_POLL_TIMEOUT) + .await_waiters_remainder(waiters, WAITER_OPERATION_POLL_TIMEOUT) .await; } @@ -9241,8 +9736,7 @@ impl Service { Err(err) => { return Err(OperationError::FinalizeError( format!( - "Failed to finalise drain cancel of {} by setting scheduling policy to Active: {}", - node_id, err + "Failed to finalise drain cancel of {node_id} by setting scheduling policy to Active: {err}" ) .into(), )); diff --git a/storage_controller/src/service/chaos_injector.rs b/storage_controller/src/service/chaos_injector.rs index 9c7a9e3798..4087de200a 100644 --- a/storage_controller/src/service/chaos_injector.rs +++ b/storage_controller/src/service/chaos_injector.rs @@ -107,7 +107,7 @@ impl ChaosInjector { // - Skip shards doing a graceful migration already, so that we allow these to run to // completion rather than only exercising the first part and then cancelling with // some other chaos. - !matches!(shard.get_scheduling_policy(), ShardSchedulingPolicy::Active) + matches!(shard.get_scheduling_policy(), ShardSchedulingPolicy::Active) && shard.get_preferred_node().is_none() } diff --git a/storage_controller/src/service/feature_flag.rs b/storage_controller/src/service/feature_flag.rs new file mode 100644 index 0000000000..f44bf046b9 --- /dev/null +++ b/storage_controller/src/service/feature_flag.rs @@ -0,0 +1,111 @@ +use std::{sync::Arc, time::Duration}; + +use futures::StreamExt; +use pageserver_api::config::PostHogConfig; +use pageserver_client::mgmt_api; +use posthog_client_lite::PostHogClient; +use reqwest::StatusCode; +use tokio::time::MissedTickBehavior; +use tokio_util::sync::CancellationToken; + +use crate::{pageserver_client::PageserverClient, service::Service}; + +pub struct FeatureFlagService { + service: Arc, + config: PostHogConfig, + client: PostHogClient, + http_client: reqwest::Client, +} + +const DEFAULT_POSTHOG_REFRESH_INTERVAL: Duration = Duration::from_secs(30); + +impl FeatureFlagService { + pub fn new(service: Arc, config: PostHogConfig) -> Result { + let client = PostHogClient::new(config.clone().try_into_posthog_config()?); + Ok(Self { + service, + config, + client, + http_client: reqwest::Client::new(), + }) + } + + async fn refresh(self: Arc, cancel: CancellationToken) -> Result<(), anyhow::Error> { + let nodes = { + let inner = self.service.inner.read().unwrap(); + inner.nodes.clone() + }; + + let feature_flag_spec = self.client.get_feature_flags_local_evaluation_raw().await?; + let stream = futures::stream::iter(nodes.values().cloned()).map(|node| { + let this = self.clone(); + let feature_flag_spec = feature_flag_spec.clone(); + async move { + let res = async { + let client = PageserverClient::new( + node.get_id(), + this.http_client.clone(), + node.base_url(), + // TODO: what if we rotate the token during storcon lifetime? + this.service.config.pageserver_jwt_token.as_deref(), + ); + + client.update_feature_flag_spec(feature_flag_spec).await?; + tracing::info!( + "Updated {}({}) with feature flag spec", + node.get_id(), + node.base_url() + ); + Ok::<_, mgmt_api::Error>(()) + }; + + if let Err(e) = res.await { + if let mgmt_api::Error::ApiError(status, _) = e { + if status == StatusCode::NOT_FOUND { + // This is expected during deployments where the API is not available, so we can ignore it + return; + } + } + tracing::warn!( + "Failed to update feature flag spec for {}: {e}", + node.get_id() + ); + } + } + }); + let mut stream = stream.buffer_unordered(8); + + while stream.next().await.is_some() { + if cancel.is_cancelled() { + return Ok(()); + } + } + + Ok(()) + } + + pub async fn run(self: Arc, cancel: CancellationToken) { + let refresh_interval = self + .config + .refresh_interval + .unwrap_or(DEFAULT_POSTHOG_REFRESH_INTERVAL); + let mut interval = tokio::time::interval(refresh_interval); + interval.set_missed_tick_behavior(MissedTickBehavior::Skip); + tracing::info!( + "Starting feature flag service with refresh interval: {:?}", + refresh_interval + ); + loop { + tokio::select! { + _ = interval.tick() => {} + _ = cancel.cancelled() => { + break; + } + } + let res = self.clone().refresh(cancel.clone()).await; + if let Err(e) = res { + tracing::error!("Failed to refresh feature flags: {e:#?}"); + } + } + } +} diff --git a/storage_controller/src/service/safekeeper_reconciler.rs b/storage_controller/src/service/safekeeper_reconciler.rs index fbf0b5c4e3..b67a679fad 100644 --- a/storage_controller/src/service/safekeeper_reconciler.rs +++ b/storage_controller/src/service/safekeeper_reconciler.rs @@ -145,7 +145,7 @@ pub(crate) async fn load_schedule_requests( } let Some(sk) = safekeepers.get(&other_node_id) else { tracing::warn!( - "couldnt find safekeeper with pending op id {other_node_id}, not pulling from it" + "couldn't find safekeeper with pending op id {other_node_id}, not pulling from it" ); return None; }; @@ -230,7 +230,7 @@ impl ReconcilerHandle { // increase it before putting into the queue. let queued_gauge = &METRICS_REGISTRY .metrics_group - .storage_controller_safkeeper_reconciles_queued; + .storage_controller_safekeeper_reconciles_queued; let label_group = SafekeeperReconcilerLabelGroup { sk_az: &sk_az, sk_node_id: &sk_node_id, @@ -306,7 +306,7 @@ impl SafekeeperReconciler { let queued_gauge = &METRICS_REGISTRY .metrics_group - .storage_controller_safkeeper_reconciles_queued; + .storage_controller_safekeeper_reconciles_queued; queued_gauge.set( SafekeeperReconcilerLabelGroup { sk_az: &req.safekeeper.skp.availability_zone_id, @@ -547,7 +547,7 @@ impl SafekeeperReconcilerInner { let complete_counter = &METRICS_REGISTRY .metrics_group - .storage_controller_safkeeper_reconciles_complete; + .storage_controller_safekeeper_reconciles_complete; complete_counter.inc(SafekeeperReconcilerLabelGroup { sk_az: &req.safekeeper.skp.availability_zone_id, sk_node_id: &req.safekeeper.get_id().to_string(), diff --git a/storage_controller/src/service/safekeeper_service.rs b/storage_controller/src/service/safekeeper_service.rs index 1f673fe445..90ea48dd7b 100644 --- a/storage_controller/src/service/safekeeper_service.rs +++ b/storage_controller/src/service/safekeeper_service.rs @@ -4,20 +4,31 @@ use std::sync::Arc; use std::time::Duration; use super::safekeeper_reconciler::ScheduleRequest; +use crate::compute_hook; use crate::heartbeater::SafekeeperState; +use crate::id_lock_map::trace_shared_lock; use crate::metrics; use crate::persistence::{ DatabaseError, SafekeeperTimelineOpKind, TimelinePendingOpPersistence, TimelinePersistence, }; use crate::safekeeper::Safekeeper; +use crate::safekeeper_client::SafekeeperClient; +use crate::service::TenantOperations; use crate::timeline_import::TimelineImportFinalizeError; use anyhow::Context; use http_utils::error::ApiError; use pageserver_api::controller_api::{ SafekeeperDescribeResponse, SkSchedulingPolicy, TimelineImportRequest, + TimelineSafekeeperMigrateRequest, }; use pageserver_api::models::{SafekeeperInfo, SafekeepersInfo, TimelineInfo}; -use safekeeper_api::membership::{MemberSet, SafekeeperId}; +use safekeeper_api::PgVersionId; +use safekeeper_api::membership::{self, MemberSet, SafekeeperGeneration}; +use safekeeper_api::models::{ + PullTimelineRequest, TimelineMembershipSwitchRequest, TimelineMembershipSwitchResponse, +}; +use safekeeper_api::{INITIAL_TERM, Term}; +use safekeeper_client::mgmt_api; use tokio::task::JoinSet; use tokio_util::sync::CancellationToken; use utils::id::{NodeId, TenantId, TimelineId}; @@ -26,7 +37,41 @@ use utils::lsn::Lsn; use super::Service; +#[derive(serde::Serialize, serde::Deserialize, Clone)] +pub struct TimelineLocateResponse { + pub generation: SafekeeperGeneration, + pub sk_set: Vec, + pub new_sk_set: Option>, +} + impl Service { + fn make_member_set(safekeepers: &[Safekeeper]) -> Result { + let members = safekeepers + .iter() + .map(|sk| sk.get_safekeeper_id()) + .collect::>(); + + MemberSet::new(members).map_err(ApiError::InternalServerError) + } + + fn get_safekeepers(&self, ids: &[i64]) -> Result, ApiError> { + let safekeepers = { + let locked = self.inner.read().unwrap(); + locked.safekeepers.clone() + }; + + ids.iter() + .map(|&id| { + let node_id = NodeId(id as u64); + safekeepers.get(&node_id).cloned().ok_or_else(|| { + ApiError::InternalServerError(anyhow::anyhow!( + "safekeeper {node_id} is not registered" + )) + }) + }) + .collect::, _>>() + } + /// Timeline creation on safekeepers /// /// Returns `Ok(left)` if the timeline has been created on a quorum of safekeepers, @@ -36,38 +81,12 @@ impl Service { &self, tenant_id: TenantId, timeline_id: TimelineId, - pg_version: u32, + pg_version: PgVersionId, timeline_persistence: &TimelinePersistence, ) -> Result, ApiError> { - // If quorum is reached, return if we are outside of a specified timeout - let jwt = self - .config - .safekeeper_jwt_token - .clone() - .map(SecretString::from); - let mut joinset = JoinSet::new(); + let safekeepers = self.get_safekeepers(&timeline_persistence.sk_set)?; - // Prepare membership::Configuration from choosen safekeepers. - let safekeepers = { - let locked = self.inner.read().unwrap(); - locked.safekeepers.clone() - }; - - let mut members = Vec::new(); - for sk_id in timeline_persistence.sk_set.iter() { - let sk_id = NodeId(*sk_id as u64); - let Some(safekeeper) = safekeepers.get(&sk_id) else { - return Err(ApiError::InternalServerError(anyhow::anyhow!( - "couldn't find entry for safekeeper with id {sk_id}" - )))?; - }; - members.push(SafekeeperId { - id: sk_id, - host: safekeeper.skp.host.clone(), - pg_port: safekeeper.skp.port as u16, - }); - } - let mset = MemberSet::new(members).map_err(ApiError::InternalServerError)?; + let mset = Self::make_member_set(&safekeepers)?; let mconf = safekeeper_api::membership::Configuration::new(mset); let req = safekeeper_api::models::TimelineCreateRequest { @@ -80,107 +99,168 @@ impl Service { timeline_id, wal_seg_size: None, }; + const SK_CREATE_TIMELINE_RECONCILE_TIMEOUT: Duration = Duration::from_secs(30); - for sk in timeline_persistence.sk_set.iter() { - let sk_id = NodeId(*sk as u64); - let safekeepers = safekeepers.clone(); + + let results = self + .tenant_timeline_safekeeper_op_quorum( + &safekeepers, + move |client| { + let req = req.clone(); + async move { client.create_timeline(&req).await } + }, + SK_CREATE_TIMELINE_RECONCILE_TIMEOUT, + ) + .await?; + + Ok(results + .into_iter() + .enumerate() + .filter_map(|(idx, res)| { + if res.is_ok() { + None // Success, don't return this safekeeper + } else { + Some(safekeepers[idx].get_id()) // Failure, return this safekeeper + } + }) + .collect::>()) + } + + /// Perform an operation on a list of safekeepers in parallel with retries. + /// + /// Return the results of the operation on each safekeeper in the input order. + async fn tenant_timeline_safekeeper_op( + &self, + safekeepers: &[Safekeeper], + op: O, + timeout: Duration, + ) -> Result>, ApiError> + where + O: FnMut(SafekeeperClient) -> F + Send + 'static, + O: Clone, + F: std::future::Future> + Send + 'static, + T: Sync + Send + 'static, + { + let jwt = self + .config + .safekeeper_jwt_token + .clone() + .map(SecretString::from); + let mut joinset = JoinSet::new(); + + for (idx, sk) in safekeepers.iter().enumerate() { + let sk = sk.clone(); let http_client = self.http_client.clone(); let jwt = jwt.clone(); - let req = req.clone(); + let op = op.clone(); joinset.spawn(async move { - // Unwrap is fine as we already would have returned error above - let sk_p = safekeepers.get(&sk_id).unwrap(); - let res = sk_p + let res = sk .with_client_retries( - |client| { - let req = req.clone(); - async move { client.create_timeline(&req).await } - }, + op, &http_client, &jwt, 3, 3, - SK_CREATE_TIMELINE_RECONCILE_TIMEOUT, + // TODO(diko): This is a wrong timeout. + // It should be scaled to the retry count. + timeout, &CancellationToken::new(), ) .await; - (sk_id, sk_p.skp.host.clone(), res) + (idx, res) }); } + + // Initialize results with timeout errors in case we never get a response. + let mut results: Vec> = safekeepers + .iter() + .map(|_| { + Err(mgmt_api::Error::Timeout( + "safekeeper operation timed out".to_string(), + )) + }) + .collect(); + // After we have built the joinset, we now wait for the tasks to complete, // but with a specified timeout to make sure we return swiftly, either with // a failure or success. - let reconcile_deadline = tokio::time::Instant::now() + SK_CREATE_TIMELINE_RECONCILE_TIMEOUT; + let reconcile_deadline = tokio::time::Instant::now() + timeout; // Wait until all tasks finish or timeout is hit, whichever occurs // first. - let mut reconcile_results = Vec::new(); + let mut result_count = 0; loop { if let Ok(res) = tokio::time::timeout_at(reconcile_deadline, joinset.join_next()).await { let Some(res) = res else { break }; match res { - Ok(res) => { + Ok((idx, res)) => { + let sk = &safekeepers[idx]; tracing::info!( "response from safekeeper id:{} at {}: {:?}", - res.0, - res.1, - res.2 + sk.get_id(), + sk.skp.host, + // Only print errors, as there is no Debug trait for T. + res.as_ref().map(|_| ()), ); - reconcile_results.push(res); + results[idx] = res; + result_count += 1; } Err(join_err) => { tracing::info!("join_err for task in joinset: {join_err}"); } } } else { - tracing::info!( - "timeout for creation call after {} responses", - reconcile_results.len() - ); + tracing::info!("timeout for operation call after {result_count} responses",); break; } } - // Now check now if quorum was reached in reconcile_results. - let total_result_count = reconcile_results.len(); - let remaining = reconcile_results - .into_iter() - .filter_map(|res| res.2.is_err().then_some(res.0)) - .collect::>(); - tracing::info!( - "Got {} non-successful responses from initial creation request of total {total_result_count} responses", - remaining.len() - ); - let target_sk_count = timeline_persistence.sk_set.len(); - let quorum_size = match target_sk_count { - 0 => { - return Err(ApiError::InternalServerError(anyhow::anyhow!( - "timeline configured without any safekeepers", - ))); - } - 1 | 2 => { - #[cfg(feature = "testing")] - { - // In test settings, it is allowed to have one or two safekeepers - target_sk_count - } - #[cfg(not(feature = "testing"))] - { - // The region is misconfigured: we need at least three safekeepers to be configured - // in order to schedule work to them - tracing::warn!( - "couldn't find at least 3 safekeepers for timeline, found: {:?}", - timeline_persistence.sk_set - ); - return Err(ApiError::InternalServerError(anyhow::anyhow!( - "couldn't find at least 3 safekeepers to put timeline to" - ))); - } - } - _ => target_sk_count / 2 + 1, - }; - let success_count = target_sk_count - remaining.len(); + Ok(results) + } + + /// Perform an operation on a list of safekeepers in parallel with retries, + /// and validates that we reach a quorum of successful responses. + /// + /// Return the results of the operation on each safekeeper in the input order. + /// It's guaranteed that at least a quorum of the responses are successful. + async fn tenant_timeline_safekeeper_op_quorum( + &self, + safekeepers: &[Safekeeper], + op: O, + timeout: Duration, + ) -> Result>, ApiError> + where + O: FnMut(SafekeeperClient) -> F, + O: Clone + Send + 'static, + F: std::future::Future> + Send + 'static, + T: Sync + Send + 'static, + { + let target_sk_count = safekeepers.len(); + + if target_sk_count == 0 { + return Err(ApiError::InternalServerError(anyhow::anyhow!( + "timeline configured without any safekeepers" + ))); + } + + if target_sk_count < self.config.timeline_safekeeper_count { + tracing::warn!( + "running a quorum operation with {} safekeepers, which is less than configured {} safekeepers per timeline", + target_sk_count, + self.config.timeline_safekeeper_count + ); + } + + let results = self + .tenant_timeline_safekeeper_op(safekeepers, op, timeout) + .await?; + + // Now check if quorum was reached in results. + + let quorum_size = target_sk_count / 2 + 1; + + let success_count = results.iter().filter(|res| res.is_ok()).count(); if success_count < quorum_size { // Failure return Err(ApiError::InternalServerError(anyhow::anyhow!( @@ -188,7 +268,7 @@ impl Service { ))); } - Ok(remaining) + Ok(results) } /// Create timeline in controller database and on safekeepers. @@ -211,7 +291,7 @@ impl Service { read_only: bool, ) -> Result { let timeline_id = timeline_info.timeline_id; - let pg_version = timeline_info.pg_version * 10000; + let pg_version = PgVersionId::from(timeline_info.pg_version); // Initially start_lsn is determined by last_record_lsn in pageserver // response as it does initdb. However, later we persist it and in sk // creation calls replace with the value from the timeline row if it @@ -396,6 +476,38 @@ impl Service { Ok(()) } + /// Locate safekeepers for a timeline. + /// Return the generation, sk_set and new_sk_set if present. + /// If the timeline is not storcon-managed, return NotFound. + pub(crate) async fn tenant_timeline_locate( + &self, + tenant_id: TenantId, + timeline_id: TimelineId, + ) -> Result { + let timeline = self + .persistence + .get_timeline(tenant_id, timeline_id) + .await?; + + let Some(timeline) = timeline else { + return Err(ApiError::NotFound( + anyhow::anyhow!("Timeline {}/{} not found", tenant_id, timeline_id).into(), + )); + }; + + Ok(TimelineLocateResponse { + generation: SafekeeperGeneration::new(timeline.generation as u32), + sk_set: timeline + .sk_set + .iter() + .map(|id| NodeId(*id as u64)) + .collect(), + new_sk_set: timeline + .new_sk_set + .map(|sk_set| sk_set.iter().map(|id| NodeId(*id as u64)).collect()), + }) + } + /// Perform timeline deletion on safekeepers. Will return success: we persist the deletion into the reconciler. pub(super) async fn tenant_timeline_delete_safekeepers( self: &Arc, @@ -569,7 +681,8 @@ impl Service { Ok(()) } - /// Choose safekeepers for the new timeline: 3 in different azs. + /// Choose safekeepers for the new timeline in different azs. + /// 3 are choosen by default, but may be configured via config (for testing). pub(crate) async fn safekeepers_for_new_timeline( &self, ) -> Result, ApiError> { @@ -612,18 +725,8 @@ impl Service { ) }); // Number of safekeepers in different AZs we are looking for - let wanted_count = match all_safekeepers.len() { - 0 => { - return Err(ApiError::InternalServerError(anyhow::anyhow!( - "couldn't find any active safekeeper for new timeline", - ))); - } - // Have laxer requirements on testig mode as we don't want to - // spin up three safekeepers for every single test - #[cfg(feature = "testing")] - 1 | 2 => all_safekeepers.len(), - _ => 3, - }; + let wanted_count = self.config.timeline_safekeeper_count; + let mut sks = Vec::new(); let mut azs = HashSet::new(); for (_sk_util, sk_info, az_id) in all_safekeepers.iter() { @@ -702,7 +805,7 @@ impl Service { Safekeeper::from_persistence( crate::persistence::SafekeeperPersistence::from_upsert( record, - SkSchedulingPolicy::Pause, + SkSchedulingPolicy::Activating, ), CancellationToken::new(), use_https, @@ -743,27 +846,518 @@ impl Service { .await?; let node_id = NodeId(id as u64); // After the change has been persisted successfully, update the in-memory state - { - let mut locked = self.inner.write().unwrap(); - let mut safekeepers = (*locked.safekeepers).clone(); - let sk = safekeepers - .get_mut(&node_id) - .ok_or(DatabaseError::Logical("Not found".to_string()))?; - sk.set_scheduling_policy(scheduling_policy); + self.set_safekeeper_scheduling_policy_in_mem(node_id, scheduling_policy) + .await + } - match scheduling_policy { - SkSchedulingPolicy::Active => { - locked - .safekeeper_reconcilers - .start_reconciler(node_id, self); - } - SkSchedulingPolicy::Decomissioned | SkSchedulingPolicy::Pause => { - locked.safekeeper_reconcilers.stop_reconciler(node_id); - } + pub(crate) async fn set_safekeeper_scheduling_policy_in_mem( + self: &Arc, + node_id: NodeId, + scheduling_policy: SkSchedulingPolicy, + ) -> Result<(), DatabaseError> { + let mut locked = self.inner.write().unwrap(); + let mut safekeepers = (*locked.safekeepers).clone(); + let sk = safekeepers + .get_mut(&node_id) + .ok_or(DatabaseError::Logical("Not found".to_string()))?; + sk.set_scheduling_policy(scheduling_policy); + + match scheduling_policy { + SkSchedulingPolicy::Active => { + locked + .safekeeper_reconcilers + .start_reconciler(node_id, self); + } + SkSchedulingPolicy::Decomissioned + | SkSchedulingPolicy::Pause + | SkSchedulingPolicy::Activating => { + locked.safekeeper_reconcilers.stop_reconciler(node_id); } - - locked.safekeepers = Arc::new(safekeepers); } + + locked.safekeepers = Arc::new(safekeepers); Ok(()) } + + /// Call `switch_timeline_membership` on all safekeepers with retries + /// till the quorum of successful responses is reached. + /// + /// If min_position is not None, validates that majority of safekeepers + /// reached at least min_position. + /// + /// Return responses from safekeepers in the input order. + async fn tenant_timeline_set_membership_quorum( + self: &Arc, + tenant_id: TenantId, + timeline_id: TimelineId, + safekeepers: &[Safekeeper], + config: &membership::Configuration, + min_position: Option<(Term, Lsn)>, + ) -> Result>, ApiError> { + let req = TimelineMembershipSwitchRequest { + mconf: config.clone(), + }; + + const SK_SET_MEM_TIMELINE_RECONCILE_TIMEOUT: Duration = Duration::from_secs(30); + + let results = self + .tenant_timeline_safekeeper_op_quorum( + safekeepers, + move |client| { + let req = req.clone(); + async move { + let mut res = client + .switch_timeline_membership(tenant_id, timeline_id, &req) + .await; + + // If min_position is not reached, map the response to an error, + // so it isn't counted toward the quorum. + if let Some(min_position) = min_position { + if let Ok(ok_res) = &res { + if (ok_res.term, ok_res.flush_lsn) < min_position { + // Use Error::Timeout to make this error retriable. + res = Err(mgmt_api::Error::Timeout( + format!( + "safekeeper {} returned position {:?} which is less than minimum required position {:?}", + client.node_id_label(), + (ok_res.term, ok_res.flush_lsn), + min_position + ) + )); + } + } + } + + res + } + }, + SK_SET_MEM_TIMELINE_RECONCILE_TIMEOUT, + ) + .await?; + + for res in results.iter().flatten() { + if res.current_conf.generation > config.generation { + // Antoher switch_membership raced us. + return Err(ApiError::Conflict(format!( + "received configuration with generation {} from safekeeper, but expected {}", + res.current_conf.generation, config.generation + ))); + } else if res.current_conf.generation < config.generation { + // Note: should never happen. + // If we get a response, it should be at least the sent generation. + tracing::error!( + "received configuration with generation {} from safekeeper, but expected {}", + res.current_conf.generation, + config.generation + ); + return Err(ApiError::InternalServerError(anyhow::anyhow!( + "received configuration with generation {} from safekeeper, but expected {}", + res.current_conf.generation, + config.generation + ))); + } + } + + Ok(results) + } + + /// Pull timeline to to_safekeepers from from_safekeepers with retries. + /// + /// Returns Ok(()) only if all the pull_timeline requests were successful. + async fn tenant_timeline_pull_from_peers( + self: &Arc, + tenant_id: TenantId, + timeline_id: TimelineId, + to_safekeepers: &[Safekeeper], + from_safekeepers: &[Safekeeper], + ) -> Result<(), ApiError> { + let http_hosts = from_safekeepers + .iter() + .map(|sk| sk.base_url()) + .collect::>(); + + tracing::info!( + "pulling timeline to {:?} from {:?}", + to_safekeepers + .iter() + .map(|sk| sk.get_id()) + .collect::>(), + from_safekeepers + .iter() + .map(|sk| sk.get_id()) + .collect::>() + ); + + // TODO(diko): need to pass mconf/generation with the request + // to properly handle tombstones. Ignore tombstones for now. + // Worst case: we leave a timeline on a safekeeper which is not in the current set. + let req = PullTimelineRequest { + tenant_id, + timeline_id, + http_hosts, + ignore_tombstone: Some(true), + }; + + const SK_PULL_TIMELINE_RECONCILE_TIMEOUT: Duration = Duration::from_secs(30); + + let responses = self + .tenant_timeline_safekeeper_op( + to_safekeepers, + move |client| { + let req = req.clone(); + async move { client.pull_timeline(&req).await } + }, + SK_PULL_TIMELINE_RECONCILE_TIMEOUT, + ) + .await?; + + if let Some((idx, err)) = responses + .iter() + .enumerate() + .find_map(|(idx, res)| Some((idx, res.as_ref().err()?))) + { + let sk_id = to_safekeepers[idx].get_id(); + return Err(ApiError::InternalServerError(anyhow::anyhow!( + "pull_timeline to {sk_id} failed: {err}", + ))); + } + + Ok(()) + } + + /// Exclude a timeline from safekeepers in parallel with retries. + /// If an exclude request is unsuccessful, it will be added to + /// the reconciler, and after that the function will succeed. + async fn tenant_timeline_safekeeper_exclude( + self: &Arc, + tenant_id: TenantId, + timeline_id: TimelineId, + safekeepers: &[Safekeeper], + config: &membership::Configuration, + ) -> Result<(), ApiError> { + let req = TimelineMembershipSwitchRequest { + mconf: config.clone(), + }; + + const SK_EXCLUDE_TIMELINE_TIMEOUT: Duration = Duration::from_secs(30); + + let results = self + .tenant_timeline_safekeeper_op( + safekeepers, + move |client| { + let req = req.clone(); + async move { client.exclude_timeline(tenant_id, timeline_id, &req).await } + }, + SK_EXCLUDE_TIMELINE_TIMEOUT, + ) + .await?; + + let mut reconcile_requests = Vec::new(); + + for (idx, res) in results.iter().enumerate() { + if res.is_err() { + let sk_id = safekeepers[idx].skp.id; + let pending_op = TimelinePendingOpPersistence { + tenant_id: tenant_id.to_string(), + timeline_id: timeline_id.to_string(), + generation: config.generation.into_inner() as i32, + op_kind: SafekeeperTimelineOpKind::Exclude, + sk_id, + }; + tracing::info!("writing pending exclude op for sk id {sk_id}"); + self.persistence.insert_pending_op(pending_op).await?; + + let req = ScheduleRequest { + safekeeper: Box::new(safekeepers[idx].clone()), + host_list: Vec::new(), + tenant_id, + timeline_id: Some(timeline_id), + generation: config.generation.into_inner(), + kind: SafekeeperTimelineOpKind::Exclude, + }; + reconcile_requests.push(req); + } + } + + if !reconcile_requests.is_empty() { + let locked = self.inner.read().unwrap(); + for req in reconcile_requests { + locked.safekeeper_reconcilers.schedule_request(req); + } + } + + Ok(()) + } + + /// Migrate timeline safekeeper set to a new set. + /// + /// This function implements an algorithm from RFC-035. + /// + pub(crate) async fn tenant_timeline_safekeeper_migrate( + self: &Arc, + tenant_id: TenantId, + timeline_id: TimelineId, + req: TimelineSafekeeperMigrateRequest, + ) -> Result<(), ApiError> { + let all_safekeepers = self.inner.read().unwrap().safekeepers.clone(); + + let new_sk_set = req.new_sk_set; + + for sk_id in new_sk_set.iter() { + if !all_safekeepers.contains_key(sk_id) { + return Err(ApiError::BadRequest(anyhow::anyhow!( + "safekeeper {sk_id} does not exist" + ))); + } + } + + // TODO(diko): per-tenant lock is too wide. Consider introducing per-timeline locks. + let _tenant_lock = trace_shared_lock( + &self.tenant_op_locks, + tenant_id, + TenantOperations::TimelineSafekeeperMigrate, + ) + .await; + + // 1. Fetch current timeline configuration from the configuration storage. + + let timeline = self + .persistence + .get_timeline(tenant_id, timeline_id) + .await?; + + let Some(timeline) = timeline else { + return Err(ApiError::NotFound( + anyhow::anyhow!( + "timeline {tenant_id}/{timeline_id} doesn't exist in timelines table" + ) + .into(), + )); + }; + + let cur_sk_set = timeline + .sk_set + .iter() + .map(|&id| NodeId(id as u64)) + .collect::>(); + + tracing::info!( + ?cur_sk_set, + ?new_sk_set, + "Migrating timeline to new safekeeper set", + ); + + let mut generation = SafekeeperGeneration::new(timeline.generation as u32); + + if let Some(ref presistent_new_sk_set) = timeline.new_sk_set { + // 2. If it is already joint one and new_set is different from desired_set refuse to change. + if presistent_new_sk_set + .iter() + .map(|&id| NodeId(id as u64)) + .ne(new_sk_set.iter().cloned()) + { + tracing::info!( + ?presistent_new_sk_set, + ?new_sk_set, + "different new safekeeper set is already set in the database", + ); + return Err(ApiError::Conflict(format!( + "the timeline is already migrating to a different safekeeper set: {presistent_new_sk_set:?}" + ))); + } + // It it is the same new_sk_set, we can continue the migration (retry). + } else { + // 3. No active migration yet. + // Increment current generation and put desired_set to new_sk_set. + generation = generation.next(); + + self.persistence + .update_timeline_membership( + tenant_id, + timeline_id, + generation, + &cur_sk_set, + Some(&new_sk_set), + ) + .await?; + } + + let cur_safekeepers = self.get_safekeepers(&timeline.sk_set)?; + let cur_sk_member_set = Self::make_member_set(&cur_safekeepers)?; + + let new_sk_set_i64 = new_sk_set.iter().map(|id| id.0 as i64).collect::>(); + let new_safekeepers = self.get_safekeepers(&new_sk_set_i64)?; + let new_sk_member_set = Self::make_member_set(&new_safekeepers)?; + + let joint_config = membership::Configuration { + generation, + members: cur_sk_member_set, + new_members: Some(new_sk_member_set.clone()), + }; + + // 4. Call PUT configuration on safekeepers from the current set, + // delivering them joint_conf. + + // Notify cplane/compute about the membership change BEFORE changing the membership on safekeepers. + // This way the compute will know about new safekeepers from joint_config before we require to + // collect a quorum from them. + self.cplane_notify_safekeepers(tenant_id, timeline_id, &joint_config) + .await?; + + let results = self + .tenant_timeline_set_membership_quorum( + tenant_id, + timeline_id, + &cur_safekeepers, + &joint_config, + None, // no min position + ) + .await?; + + let mut sync_position = (INITIAL_TERM, Lsn::INVALID); + for res in results.into_iter().flatten() { + let sk_position = (res.term, res.flush_lsn); + if sync_position < sk_position { + sync_position = sk_position; + } + } + + tracing::info!( + %generation, + ?sync_position, + "safekeepers set membership updated", + ); + + // 5. Initialize timeline on safekeeper(s) from new_sk_set where it doesn't exist yet + // by doing pull_timeline from the majority of the current set. + + // Filter out safekeepers which are already in the current set. + let from_ids: HashSet = cur_safekeepers.iter().map(|sk| sk.get_id()).collect(); + let pull_to_safekeepers = new_safekeepers + .iter() + .filter(|sk| !from_ids.contains(&sk.get_id())) + .cloned() + .collect::>(); + + self.tenant_timeline_pull_from_peers( + tenant_id, + timeline_id, + &pull_to_safekeepers, + &cur_safekeepers, + ) + .await?; + + // 6. Call POST bump_term(sync_term) on safekeepers from the new set. Success on majority is enough. + + // TODO(diko): do we need to bump timeline term? + + // 7. Repeatedly call PUT configuration on safekeepers from the new set, + // delivering them joint_conf and collecting their positions. + + tracing::info!(?sync_position, "waiting for safekeepers to sync position"); + + self.tenant_timeline_set_membership_quorum( + tenant_id, + timeline_id, + &new_safekeepers, + &joint_config, + Some(sync_position), + ) + .await?; + + // 8. Create new_conf: Configuration incrementing joint_conf generation and + // having new safekeeper set as sk_set and None new_sk_set. + + let generation = generation.next(); + + let new_conf = membership::Configuration { + generation, + members: new_sk_member_set, + new_members: None, + }; + + self.persistence + .update_timeline_membership(tenant_id, timeline_id, generation, &new_sk_set, None) + .await?; + + // TODO(diko): at this point we have already updated the timeline in the database, + // but we still need to notify safekeepers and cplane about the new configuration, + // and put delition of the timeline from the old safekeepers into the reconciler. + // Ideally it should be done atomically, but now it's not. + // Worst case: the timeline is not deleted from old safekeepers, + // the compute may require both quorums till the migration is retried and completed. + + self.tenant_timeline_set_membership_quorum( + tenant_id, + timeline_id, + &new_safekeepers, + &new_conf, + None, // no min position + ) + .await?; + + let new_ids: HashSet = new_safekeepers.iter().map(|sk| sk.get_id()).collect(); + let exclude_safekeepers = cur_safekeepers + .into_iter() + .filter(|sk| !new_ids.contains(&sk.get_id())) + .collect::>(); + self.tenant_timeline_safekeeper_exclude( + tenant_id, + timeline_id, + &exclude_safekeepers, + &new_conf, + ) + .await?; + + // Notify cplane/compute about the membership change AFTER changing the membership on safekeepers. + // This way the compute will stop talking to excluded safekeepers only after we stop requiring to + // collect a quorum from them. + self.cplane_notify_safekeepers(tenant_id, timeline_id, &new_conf) + .await?; + + Ok(()) + } + + /// Notify cplane about safekeeper membership change. + /// The cplane will receive a joint set of safekeepers as a safekeeper list. + async fn cplane_notify_safekeepers( + &self, + tenant_id: TenantId, + timeline_id: TimelineId, + mconf: &membership::Configuration, + ) -> Result<(), ApiError> { + let mut safekeepers = Vec::new(); + let mut ids: HashSet<_> = HashSet::new(); + + for member in mconf + .members + .m + .iter() + .chain(mconf.new_members.iter().flat_map(|m| m.m.iter())) + { + if ids.insert(member.id) { + safekeepers.push(compute_hook::SafekeeperInfo { + id: member.id, + hostname: Some(member.host.clone()), + }); + } + } + + self.compute_hook + .notify_safekeepers( + compute_hook::SafekeepersUpdate { + tenant_id, + timeline_id, + generation: mconf.generation, + safekeepers, + }, + &self.cancel, + ) + .await + .map_err(|err| { + ApiError::InternalServerError(anyhow::anyhow!( + "failed to notify cplane about safekeeper membership change: {err}" + )) + }) + } } diff --git a/storage_controller/src/service/context_iterator.rs b/storage_controller/src/service/tenant_shard_iterator.rs similarity index 52% rename from storage_controller/src/service/context_iterator.rs rename to storage_controller/src/service/tenant_shard_iterator.rs index c4784e5e36..576b94b3a4 100644 --- a/storage_controller/src/service/context_iterator.rs +++ b/storage_controller/src/service/tenant_shard_iterator.rs @@ -1,4 +1,5 @@ use std::collections::BTreeMap; +use std::sync::Arc; use utils::id::TenantId; use utils::shard::TenantShardId; @@ -6,16 +7,21 @@ use utils::shard::TenantShardId; use crate::scheduler::{ScheduleContext, ScheduleMode}; use crate::tenant_shard::TenantShard; +use super::Service; + +/// Exclusive iterator over all tenant shards. +/// It is used to iterate over consistent tenants state at specific point in time. +/// /// When making scheduling decisions, it is useful to have the ScheduleContext for a whole /// tenant while considering the individual shards within it. This iterator is a helper /// that gathers all the shards in a tenant and then yields them together with a ScheduleContext /// for the tenant. -pub(super) struct TenantShardContextIterator<'a> { +pub(super) struct TenantShardExclusiveIterator<'a> { schedule_mode: ScheduleMode, inner: std::collections::btree_map::IterMut<'a, TenantShardId, TenantShard>, } -impl<'a> TenantShardContextIterator<'a> { +impl<'a> TenantShardExclusiveIterator<'a> { pub(super) fn new( tenants: &'a mut BTreeMap, schedule_mode: ScheduleMode, @@ -27,7 +33,7 @@ impl<'a> TenantShardContextIterator<'a> { } } -impl<'a> Iterator for TenantShardContextIterator<'a> { +impl<'a> Iterator for TenantShardExclusiveIterator<'a> { type Item = (TenantId, ScheduleContext, Vec<&'a mut TenantShard>); fn next(&mut self) -> Option { @@ -52,13 +58,93 @@ impl<'a> Iterator for TenantShardContextIterator<'a> { } } +/// Shared iterator over all tenant shards. +/// It is used to iterate over all tenants without blocking another code, working with tenants +/// +/// A simple iterator which can be used in tandem with [`crate::service::Service`] +/// to iterate over all known tenant shard ids without holding the lock on the +/// service state at all times. +pub(crate) struct TenantShardSharedIterator { + tenants_accessor: F, + inspected_all_shards: bool, + last_inspected_shard: Option, +} + +impl TenantShardSharedIterator +where + F: Fn(Option) -> Option, +{ + pub(crate) fn new(tenants_accessor: F) -> Self { + Self { + tenants_accessor, + inspected_all_shards: false, + last_inspected_shard: None, + } + } + + pub(crate) fn finished(&self) -> bool { + self.inspected_all_shards + } +} + +impl Iterator for TenantShardSharedIterator +where + F: Fn(Option) -> Option, +{ + // TODO(ephemeralsad): consider adding schedule context to the iterator + type Item = TenantShardId; + + /// Returns the next tenant shard id if one exists + fn next(&mut self) -> Option { + if self.inspected_all_shards { + return None; + } + + match (self.tenants_accessor)(self.last_inspected_shard) { + Some(tid) => { + self.last_inspected_shard = Some(tid); + Some(tid) + } + None => { + self.inspected_all_shards = true; + None + } + } + } +} + +pub(crate) fn create_shared_shard_iterator( + service: Arc, +) -> TenantShardSharedIterator) -> Option> { + let tenants_accessor = move |last_inspected_shard: Option| { + let locked = &service.inner.read().unwrap(); + let tenants = &locked.tenants; + let entry = match last_inspected_shard { + Some(skip_past) => { + // Skip to the last seen tenant shard id + let mut cursor = tenants.iter().skip_while(|(tid, _)| **tid != skip_past); + + // Skip past the last seen + cursor.nth(1) + } + None => tenants.first_key_value(), + }; + + entry.map(|(tid, _)| tid).copied() + }; + + TenantShardSharedIterator::new(tenants_accessor) +} + #[cfg(test)] mod tests { use std::collections::BTreeMap; use std::str::FromStr; + use std::sync::Arc; use pageserver_api::controller_api::PlacementPolicy; - use utils::shard::{ShardCount, ShardNumber}; + use utils::id::TenantId; + use utils::shard::{ShardCount, ShardNumber, TenantShardId}; use super::*; use crate::scheduler::test_utils::make_test_nodes; @@ -66,7 +152,7 @@ mod tests { use crate::tenant_shard::tests::make_test_tenant_with_id; #[test] - fn test_context_iterator() { + fn test_exclusive_shard_iterator() { // Hand-crafted tenant IDs to ensure they appear in the expected order when put into // a btreemap & iterated let mut t_1_shards = make_test_tenant_with_id( @@ -106,7 +192,7 @@ mod tests { shard.schedule(&mut scheduler, &mut context).unwrap(); } - let mut iter = TenantShardContextIterator::new(&mut tenants, ScheduleMode::Speculative); + let mut iter = TenantShardExclusiveIterator::new(&mut tenants, ScheduleMode::Speculative); let (tenant_id, context, shards) = iter.next().unwrap(); assert_eq!(tenant_id, t1_id); assert_eq!(shards[0].tenant_shard_id.shard_number, ShardNumber(0)); @@ -132,4 +218,46 @@ mod tests { shard.intent.clear(&mut scheduler); } } + + #[test] + fn test_shared_shard_iterator() { + let tenant_id = TenantId::generate(); + let shard_count = ShardCount(8); + + let mut tenant_shards = Vec::default(); + for i in 0..shard_count.0 { + tenant_shards.push(( + TenantShardId { + tenant_id, + shard_number: ShardNumber(i), + shard_count, + }, + (), + )) + } + + let tenant_shards = Arc::new(tenant_shards); + + let tid_iter = TenantShardSharedIterator::new({ + let tenants = tenant_shards.clone(); + move |last_inspected_shard: Option| { + let entry = match last_inspected_shard { + Some(skip_past) => { + let mut cursor = tenants.iter().skip_while(|(tid, _)| *tid != skip_past); + cursor.nth(1) + } + None => tenants.first(), + }; + + entry.map(|(tid, _)| tid).copied() + } + }); + + let mut iterated_over = Vec::default(); + for tid in tid_iter { + iterated_over.push((tid, ())); + } + + assert_eq!(iterated_over, *tenant_shards); + } } diff --git a/storage_controller/src/tenant_shard.rs b/storage_controller/src/tenant_shard.rs index c7b2628ec4..0bfca5385e 100644 --- a/storage_controller/src/tenant_shard.rs +++ b/storage_controller/src/tenant_shard.rs @@ -131,6 +131,15 @@ pub(crate) struct TenantShard { #[serde(serialize_with = "read_last_error")] pub(crate) last_error: std::sync::Arc>>>, + /// Number of consecutive reconciliation errors that have occurred for this shard. + /// + /// When this count reaches MAX_CONSECUTIVE_RECONCILIATION_ERRORS, the tenant shard + /// will be countered as keep-failing in `reconcile_all` calculations. This will lead to + /// allowing optimizations to run even with some failing shards. + /// + /// The counter is reset to 0 after a successful reconciliation. + pub(crate) consecutive_errors_count: usize, + /// If we have a pending compute notification that for some reason we weren't able to send, /// set this to true. If this is set, calls to [`Self::get_reconcile_needed`] will return Yes /// and trigger a Reconciler run. This is the mechanism by which compute notifications are included in the scope @@ -594,6 +603,7 @@ impl TenantShard { waiter: Arc::new(SeqWait::new(Sequence(0))), error_waiter: Arc::new(SeqWait::new(Sequence(0))), last_error: Arc::default(), + consecutive_errors_count: 0, pending_compute_notification: false, scheduling_policy: ShardSchedulingPolicy::default(), preferred_node: None, @@ -1184,11 +1194,19 @@ impl TenantShard { for secondary in self.intent.get_secondary() { // Make sure we don't try to migrate a secondary to our attached location: this case happens // easily in environments without multiple AZs. - let exclude = match self.intent.attached { + let mut exclude = match self.intent.attached { Some(attached) => vec![attached], None => vec![], }; + // Exclude all other secondaries from the scheduling process to avoid replacing + // one existing secondary with another existing secondary. + for another_secondary in self.intent.secondary.iter() { + if another_secondary != secondary { + exclude.push(*another_secondary); + } + } + let replacement = match &self.policy { PlacementPolicy::Attached(_) => { // Secondaries for an attached shard should be scheduled using `SecondaryShardTag` @@ -1348,28 +1366,19 @@ impl TenantShard { /// Reconciliation may still be needed for other aspects of state such as secondaries (see [`Self::dirty`]): this /// funciton should not be used to decide whether to reconcile. pub(crate) fn stably_attached(&self) -> Option { - if let Some(attach_intent) = self.intent.attached { - match self.observed.locations.get(&attach_intent) { - Some(loc) => match &loc.conf { - Some(conf) => match conf.mode { - LocationConfigMode::AttachedMulti - | LocationConfigMode::AttachedSingle - | LocationConfigMode::AttachedStale => { - // Our intent and observed state agree that this node is in an attached state. - Some(attach_intent) - } - // Our observed config is not an attached state - _ => None, - }, - // Our observed state is None, i.e. in flux - None => None, - }, - // We have no observed state for this node - None => None, - } - } else { - // Our intent is not to attach - None + // We have an intent to attach for this node + let attach_intent = self.intent.attached?; + // We have an observed state for this node + let location = self.observed.locations.get(&attach_intent)?; + // Our observed state is not None, i.e. not in flux + let location_config = location.conf.as_ref()?; + + // Check if our intent and observed state agree that this node is in an attached state. + match location_config.mode { + LocationConfigMode::AttachedMulti + | LocationConfigMode::AttachedSingle + | LocationConfigMode::AttachedStale => Some(attach_intent), + _ => None, } } @@ -1382,8 +1391,13 @@ impl TenantShard { .generation .expect("Attempted to enter attached state without a generation"); - let wanted_conf = - attached_location_conf(generation, &self.shard, &self.config, &self.policy); + let wanted_conf = attached_location_conf( + generation, + &self.shard, + &self.config, + &self.policy, + self.intent.get_secondary().len(), + ); match self.observed.locations.get(&node_id) { Some(conf) if conf.conf.as_ref() == Some(&wanted_conf) => {} Some(_) | None => { @@ -1855,6 +1869,7 @@ impl TenantShard { waiter: Arc::new(SeqWait::new(Sequence::initial())), error_waiter: Arc::new(SeqWait::new(Sequence::initial())), last_error: Arc::default(), + consecutive_errors_count: 0, pending_compute_notification: false, delayed_reconcile: false, scheduling_policy: serde_json::from_str(&tsp.scheduling_policy).unwrap(), @@ -3004,21 +3019,18 @@ pub(crate) mod tests { if attachments_in_wrong_az > 0 { violations.push(format!( - "{} attachments scheduled to the incorrect AZ", - attachments_in_wrong_az + "{attachments_in_wrong_az} attachments scheduled to the incorrect AZ" )); } if secondaries_in_wrong_az > 0 { violations.push(format!( - "{} secondaries scheduled to the incorrect AZ", - secondaries_in_wrong_az + "{secondaries_in_wrong_az} secondaries scheduled to the incorrect AZ" )); } eprintln!( - "attachments_in_wrong_az={} secondaries_in_wrong_az={}", - attachments_in_wrong_az, secondaries_in_wrong_az + "attachments_in_wrong_az={attachments_in_wrong_az} secondaries_in_wrong_az={secondaries_in_wrong_az}" ); for (node_id, stats) in &node_stats { diff --git a/storage_controller/src/timeline_import.rs b/storage_controller/src/timeline_import.rs index eb50819d02..e88bce4c82 100644 --- a/storage_controller/src/timeline_import.rs +++ b/storage_controller/src/timeline_import.rs @@ -195,7 +195,7 @@ impl UpcallClient { let authorization_header = config .control_plane_jwt_token .clone() - .map(|jwt| format!("Bearer {}", jwt)); + .map(|jwt| format!("Bearer {jwt}")); let client = reqwest::ClientBuilder::new() .timeout(IMPORT_COMPLETE_REQUEST_TIMEOUT) diff --git a/storage_scrubber/src/checks.rs b/storage_scrubber/src/checks.rs index 865f0908f9..774418f237 100644 --- a/storage_scrubber/src/checks.rs +++ b/storage_scrubber/src/checks.rs @@ -146,7 +146,7 @@ pub(crate) async fn branch_cleanup_and_check_errors( for (layer, metadata) in index_part.layer_metadata { if metadata.file_size == 0 { result.errors.push(format!( - "index_part.json contains a layer {} that has 0 size in its layer metadata", layer, + "index_part.json contains a layer {layer} that has 0 size in its layer metadata", )) } diff --git a/storage_scrubber/src/lib.rs b/storage_scrubber/src/lib.rs index 25a157f108..d3ed5a8357 100644 --- a/storage_scrubber/src/lib.rs +++ b/storage_scrubber/src/lib.rs @@ -123,7 +123,7 @@ impl S3Target { pub fn with_sub_segment(&self, new_segment: &str) -> Self { let mut new_self = self.clone(); if new_self.prefix_in_bucket.is_empty() { - new_self.prefix_in_bucket = format!("/{}/", new_segment); + new_self.prefix_in_bucket = format!("/{new_segment}/"); } else { if new_self.prefix_in_bucket.ends_with('/') { new_self.prefix_in_bucket.pop(); diff --git a/storage_scrubber/src/scan_safekeeper_metadata.rs b/storage_scrubber/src/scan_safekeeper_metadata.rs index f10d758097..cf0a3d19e9 100644 --- a/storage_scrubber/src/scan_safekeeper_metadata.rs +++ b/storage_scrubber/src/scan_safekeeper_metadata.rs @@ -265,7 +265,7 @@ async fn load_timelines_from_db( // so spawn it off to run on its own. tokio::spawn(async move { if let Err(e) = connection.await { - eprintln!("connection error: {}", e); + eprintln!("connection error: {e}"); } }); @@ -274,7 +274,7 @@ async fn load_timelines_from_db( "and tenant_id in ({})", tenant_ids .iter() - .map(|t| format!("'{}'", t)) + .map(|t| format!("'{t}'")) .collect::>() .join(", ") ) diff --git a/test_runner/fixtures/endpoint/http.py b/test_runner/fixtures/endpoint/http.py index 4b4b98aa6c..294c52321b 100644 --- a/test_runner/fixtures/endpoint/http.py +++ b/test_runner/fixtures/endpoint/http.py @@ -24,7 +24,7 @@ The value to place in the `aud` claim. @final class ComputeClaimsScope(StrEnum): - ADMIN = "admin" + ADMIN = "compute_ctl:admin" @final @@ -57,6 +57,8 @@ class EndpointHttpClient(requests.Session): self.auth = BearerAuth(jwt) self.mount("http://", HTTPAdapter()) + self.prewarm_url = f"http://localhost:{external_port}/lfc/prewarm" + self.offload_url = f"http://localhost:{external_port}/lfc/offload" def dbs_and_roles(self): res = self.get(f"http://localhost:{self.external_port}/dbs_and_roles", auth=self.auth) @@ -64,31 +66,39 @@ class EndpointHttpClient(requests.Session): return res.json() def prewarm_lfc_status(self) -> dict[str, str]: - res = self.get(f"http://localhost:{self.external_port}/lfc/prewarm") + res = self.get(self.prewarm_url) res.raise_for_status() json: dict[str, str] = res.json() return json - def prewarm_lfc(self): - self.post(f"http://localhost:{self.external_port}/lfc/prewarm").raise_for_status() + def prewarm_lfc(self, from_endpoint_id: str | None = None): + params = {"from_endpoint": from_endpoint_id} if from_endpoint_id else dict() + self.post(self.prewarm_url, params=params).raise_for_status() + self.prewarm_lfc_wait() + def prewarm_lfc_wait(self): def prewarmed(): json = self.prewarm_lfc_status() status, err = json["status"], json.get("error") - assert status == "completed", f"{status}, error {err}" + assert status == "completed", f"{status}, {err=}" - wait_until(prewarmed) + wait_until(prewarmed, timeout=60) + + def offload_lfc_status(self) -> dict[str, str]: + res = self.get(self.offload_url) + res.raise_for_status() + json: dict[str, str] = res.json() + return json def offload_lfc(self): - url = f"http://localhost:{self.external_port}/lfc/offload" - self.post(url).raise_for_status() + self.post(self.offload_url).raise_for_status() + self.offload_lfc_wait() + def offload_lfc_wait(self): def offloaded(): - res = self.get(url) - res.raise_for_status() - json = res.json() + json = self.offload_lfc_status() status, err = json["status"], json.get("error") - assert status == "completed", f"{status}, error {err}" + assert status == "completed", f"{status}, {err=}" wait_until(offloaded) diff --git a/test_runner/fixtures/neon_api.py b/test_runner/fixtures/neon_api.py index 0cf5945458..9d85b9a332 100644 --- a/test_runner/fixtures/neon_api.py +++ b/test_runner/fixtures/neon_api.py @@ -129,6 +129,18 @@ class NeonAPI: return cast("dict[str, Any]", resp.json()) + def get_project_limits(self, project_id: str) -> dict[str, Any]: + resp = self.__request( + "GET", + f"/projects/{project_id}/limits", + headers={ + "Accept": "application/json", + "Content-Type": "application/json", + }, + ) + + return cast("dict[str, Any]", resp.json()) + def delete_project( self, project_id: str, diff --git a/test_runner/fixtures/neon_cli.py b/test_runner/fixtures/neon_cli.py index bb07e2b6d1..1abd3396e4 100644 --- a/test_runner/fixtures/neon_cli.py +++ b/test_runner/fixtures/neon_cli.py @@ -497,6 +497,7 @@ class NeonLocalCli(AbstractNeonCli): tenant_id: TenantId, pg_version: PgVersion, endpoint_id: str | None = None, + grpc: bool | None = None, hot_standby: bool = False, lsn: Lsn | None = None, pageserver_id: int | None = None, @@ -521,6 +522,8 @@ class NeonLocalCli(AbstractNeonCli): args.extend(["--external-http-port", str(external_http_port)]) if internal_http_port is not None: args.extend(["--internal-http-port", str(internal_http_port)]) + if grpc: + args.append("--grpc") if endpoint_id is not None: args.append(endpoint_id) if hot_standby: @@ -564,6 +567,9 @@ class NeonLocalCli(AbstractNeonCli): basebackup_request_tries: int | None = None, timeout: str | None = None, env: dict[str, str] | None = None, + dev: bool = False, + autoprewarm: bool = False, + offload_lfc_interval_seconds: int | None = None, ) -> subprocess.CompletedProcess[str]: args = [ "endpoint", @@ -589,6 +595,12 @@ class NeonLocalCli(AbstractNeonCli): args.extend(["--create-test-user"]) if timeout is not None: args.extend(["--start-timeout", str(timeout)]) + if autoprewarm: + args.extend(["--autoprewarm"]) + if offload_lfc_interval_seconds is not None: + args.extend(["--offload-lfc-interval-seconds", str(offload_lfc_interval_seconds)]) + if dev: + args.extend(["--dev"]) res = self.raw_cli(args, extra_env_vars) res.check_returncode() @@ -617,7 +629,7 @@ class NeonLocalCli(AbstractNeonCli): destroy=False, check_return_code=True, mode: str | None = None, - ) -> subprocess.CompletedProcess[str]: + ) -> tuple[Lsn | None, subprocess.CompletedProcess[str]]: args = [ "endpoint", "stop", @@ -629,7 +641,11 @@ class NeonLocalCli(AbstractNeonCli): if endpoint_id is not None: args.append(endpoint_id) - return self.raw_cli(args, check_return_code=check_return_code) + proc = self.raw_cli(args, check_return_code=check_return_code) + log.debug(f"endpoint stop stdout: {proc.stdout}") + lsn_str = proc.stdout.split()[-1] + lsn: Lsn | None = None if lsn_str == "null" else Lsn(lsn_str) + return lsn, proc def mappings_map_branch( self, name: str, tenant_id: TenantId, timeline_id: TimelineId diff --git a/test_runner/fixtures/neon_fixtures.py b/test_runner/fixtures/neon_fixtures.py index db3f080261..1a21c1d8e3 100644 --- a/test_runner/fixtures/neon_fixtures.py +++ b/test_runner/fixtures/neon_fixtures.py @@ -453,6 +453,7 @@ class NeonEnvBuilder: pageserver_get_vectored_concurrent_io: str | None = None, pageserver_tracing_config: PageserverTracingConfig | None = None, pageserver_import_config: PageserverImportConfig | None = None, + storcon_kick_secondary_downloads: bool | None = True, ): self.repo_dir = repo_dir self.rust_log_override = rust_log_override @@ -489,7 +490,9 @@ class NeonEnvBuilder: self.config_init_force: str | None = None self.top_output_dir = top_output_dir self.control_plane_hooks_api: str | None = None - self.storage_controller_config: dict[Any, Any] | None = None + self.storage_controller_config: dict[Any, Any] | None = { + "timelines_onto_safekeepers": True, + } # Flag to enable https listener in pageserver, generate local ssl certs, # and force storage controller to use https for pageserver api. @@ -512,6 +515,8 @@ class NeonEnvBuilder: self.pageserver_tracing_config = pageserver_tracing_config self.pageserver_import_config = pageserver_import_config + self.storcon_kick_secondary_downloads = storcon_kick_secondary_downloads + self.pageserver_default_tenant_config_compaction_algorithm: dict[str, Any] | None = ( pageserver_default_tenant_config_compaction_algorithm ) @@ -719,15 +724,21 @@ class NeonEnvBuilder: shutil.copytree(storcon_db_from_dir, storcon_db_to_dir, ignore=ignore_postgres_log) assert not (storcon_db_to_dir / "postgres.log").exists() + # NB: neon_local rewrites postgresql.conf on each start based on neon_local config. No need to patch it. - # However, in this new NeonEnv, the pageservers listen on different ports, and the storage controller - # will currently reject re-attach requests from them because the NodeMetadata isn't identical. + # However, in this new NeonEnv, the pageservers and safekeepers listen on different ports, and the storage + # controller will currently reject re-attach requests from them because the NodeMetadata isn't identical. # So, from_repo_dir patches up the the storcon database. patch_script_path = self.repo_dir / "storage_controller_db.startup.sql" assert not patch_script_path.exists() patch_script = "" + for ps in self.env.pageservers: - patch_script += f"UPDATE nodes SET listen_http_port={ps.service_port.http}, listen_pg_port={ps.service_port.pg} WHERE node_id = '{ps.id}';" + patch_script += f"UPDATE nodes SET listen_http_port={ps.service_port.http}, listen_pg_port={ps.service_port.pg} WHERE node_id = '{ps.id}';\n" + + for sk in self.env.safekeepers: + patch_script += f"UPDATE safekeepers SET http_port={sk.port.http}, port={sk.port.pg} WHERE id = '{sk.id}';\n" + patch_script_path.write_text(patch_script) # Update the config with info about tenants and timelines @@ -1210,6 +1221,13 @@ class NeonEnv: storage_controller_config = storage_controller_config or {} storage_controller_config["use_https_safekeeper_api"] = True + # TODO(diko): uncomment when timeline_safekeeper_count option is in the release branch, + # so the compat tests will not fail bacause of it presence. + # if config.num_safekeepers < 3: + # storage_controller_config = storage_controller_config or {} + # if "timeline_safekeeper_count" not in storage_controller_config: + # storage_controller_config["timeline_safekeeper_count"] = config.num_safekeepers + if storage_controller_config is not None: cfg["storage_controller"] = storage_controller_config @@ -1219,6 +1237,14 @@ class NeonEnv: else: cfg["storage_controller"] = {"use_local_compute_notifications": False} + if config.storcon_kick_secondary_downloads is not None: + # Configure whether storage controller should actively kick off secondary downloads + if "storage_controller" not in cfg: + cfg["storage_controller"] = {} + cfg["storage_controller"]["kick_secondary_downloads"] = ( + config.storcon_kick_secondary_downloads + ) + # Create config for pageserver http_auth_type = "NeonJWT" if config.auth_enabled else "Trust" pg_auth_type = "NeonJWT" if config.auth_enabled else "Trust" @@ -1228,6 +1254,7 @@ class NeonEnv: ): pageserver_port = PageserverPort( pg=self.port_distributor.get_port(), + grpc=self.port_distributor.get_port(), http=self.port_distributor.get_port(), https=self.port_distributor.get_port() if config.use_https_pageserver_api else None, ) @@ -1243,13 +1270,14 @@ class NeonEnv: ps_cfg: dict[str, Any] = { "id": ps_id, "listen_pg_addr": f"localhost:{pageserver_port.pg}", + "listen_grpc_addr": f"localhost:{pageserver_port.grpc}", "listen_http_addr": f"localhost:{pageserver_port.http}", "listen_https_addr": f"localhost:{pageserver_port.https}" if config.use_https_pageserver_api else None, "pg_auth_type": pg_auth_type, - "http_auth_type": http_auth_type, "grpc_auth_type": grpc_auth_type, + "http_auth_type": http_auth_type, "availability_zone": availability_zone, # Disable pageserver disk syncs in tests: when running tests concurrently, this avoids # the pageserver taking a long time to start up due to syncfs flushing other tests' data @@ -1762,6 +1790,7 @@ def neon_env_builder( @dataclass class PageserverPort: pg: int + grpc: int http: int https: int | None = None @@ -1838,6 +1867,7 @@ class PageserverSchedulingPolicy(StrEnum): FILLING = "Filling" PAUSE = "Pause" PAUSE_FOR_RESTART = "PauseForRestart" + DELETING = "Deleting" class StorageControllerLeadershipStatus(StrEnum): @@ -2046,14 +2076,38 @@ class NeonStorageController(MetricsGetter, LogUtils): headers=self.headers(TokenScope.ADMIN), ) - def node_delete(self, node_id): - log.info(f"node_delete({node_id})") + def node_delete_old(self, node_id): + log.info(f"node_delete_old({node_id})") self.request( "DELETE", f"{self.api}/control/v1/node/{node_id}", headers=self.headers(TokenScope.ADMIN), ) + def node_delete(self, node_id): + log.info(f"node_delete({node_id})") + self.request( + "PUT", + f"{self.api}/control/v1/node/{node_id}/delete", + headers=self.headers(TokenScope.ADMIN), + ) + + def cancel_node_delete(self, node_id): + log.info(f"cancel_node_delete({node_id})") + self.request( + "DELETE", + f"{self.api}/control/v1/node/{node_id}/delete", + headers=self.headers(TokenScope.ADMIN), + ) + + def tombstone_delete(self, node_id): + log.info(f"tombstone_delete({node_id})") + self.request( + "DELETE", + f"{self.api}/debug/v1/tombstone/{node_id}", + headers=self.headers(TokenScope.ADMIN), + ) + def node_drain(self, node_id): log.info(f"node_drain({node_id})") self.request( @@ -2110,6 +2164,14 @@ class NeonStorageController(MetricsGetter, LogUtils): ) return response.json() + def tombstone_list(self): + response = self.request( + "GET", + f"{self.api}/debug/v1/tombstone", + headers=self.headers(TokenScope.ADMIN), + ) + return response.json() + def tenant_shard_dump(self): """ Debug listing API: dumps the internal map of tenant shards @@ -2194,6 +2256,21 @@ class NeonStorageController(MetricsGetter, LogUtils): response.raise_for_status() log.info(f"timeline_create success: {response.json()}") + def migrate_safekeepers( + self, + tenant_id: TenantId, + timeline_id: TimelineId, + new_sk_set: list[int], + ): + response = self.request( + "POST", + f"{self.api}/v1/tenant/{tenant_id}/timeline/{timeline_id}/safekeeper_migrate", + json={"new_sk_set": new_sk_set}, + headers=self.headers(TokenScope.PAGE_SERVER_API), + ) + response.raise_for_status() + log.info(f"migrate_safekeepers success: {response.json()}") + def locate(self, tenant_id: TenantId) -> list[dict[str, Any]]: """ :return: list of {"shard_id": "", "node_id": int, "listen_pg_addr": str, "listen_pg_port": int, "listen_http_addr": str, "listen_http_port": int} @@ -2207,6 +2284,17 @@ class NeonStorageController(MetricsGetter, LogUtils): shards: list[dict[str, Any]] = body["shards"] return shards + def timeline_locate(self, tenant_id: TenantId, timeline_id: TimelineId): + """ + :return: dict {"generation": int, "sk_set": [int], "new_sk_set": [int]} + """ + response = self.request( + "GET", + f"{self.api}/debug/v1/tenant/{tenant_id}/timeline/{timeline_id}/locate", + headers=self.headers(TokenScope.ADMIN), + ) + return response.json() + def tenant_describe(self, tenant_id: TenantId): """ :return: list of {"shard_id": "", "node_id": int, "listen_pg_addr": str, "listen_pg_port": int, "listen_http_addr: str, "listen_http_port: int, preferred_az_id: str} @@ -2333,6 +2421,7 @@ class NeonStorageController(MetricsGetter, LogUtils): delay_max = max_interval while n > 0: n = self.reconcile_all() + if n == 0: break elif time.time() - start_at > timeout_secs: @@ -4030,6 +4119,16 @@ def static_proxy( "CREATE TABLE neon_control_plane.endpoints (endpoint_id VARCHAR(255) PRIMARY KEY, allowed_ips VARCHAR(255))" ) + vanilla_pg.stop() + vanilla_pg.edit_hba( + [ + "local all all trust", + "host all all 127.0.0.1/32 scram-sha-256", + "host all all ::1/128 scram-sha-256", + ] + ) + vanilla_pg.start() + proxy_port = port_distributor.get_port() mgmt_port = port_distributor.get_port() http_port = port_distributor.get_port() @@ -4155,6 +4254,8 @@ class Endpoint(PgProtocol, LogUtils): self._running = threading.Semaphore(0) self.__jwt: str | None = None + self.terminate_flush_lsn: Lsn | None = None + def http_client(self, retries: Retry | None = None) -> EndpointHttpClient: assert self.__jwt is not None return EndpointHttpClient( @@ -4167,6 +4268,7 @@ class Endpoint(PgProtocol, LogUtils): self, branch_name: str, endpoint_id: str | None = None, + grpc: bool | None = None, hot_standby: bool = False, lsn: Lsn | None = None, config_lines: list[str] | None = None, @@ -4191,6 +4293,7 @@ class Endpoint(PgProtocol, LogUtils): endpoint_id=self.endpoint_id, tenant_id=self.tenant_id, lsn=lsn, + grpc=grpc, hot_standby=hot_standby, pg_port=self.pg_port, external_http_port=self.external_http_port, @@ -4206,7 +4309,18 @@ class Endpoint(PgProtocol, LogUtils): # set small 'max_replication_write_lag' to enable backpressure # and make tests more stable. - config_lines = ["max_replication_write_lag=15MB"] + config_lines + config_lines += ["max_replication_write_lag=15MB"] + + # If gRPC is enabled, use the new communicator too. + # + # NB: the communicator is enabled by default, so force it to false otherwise. + # + # XXX: By checking for None, we enable the new communicator for all tests + # by default + if grpc or grpc is None: + config_lines += [f"neon.enable_new_communicator=on"] + else: + config_lines += [f"neon.enable_new_communicator=off"] # Delete file cache if it exists (and we're recreating the endpoint) if USE_LFC: @@ -4259,6 +4373,8 @@ class Endpoint(PgProtocol, LogUtils): basebackup_request_tries: int | None = None, timeout: str | None = None, env: dict[str, str] | None = None, + autoprewarm: bool = False, + offload_lfc_interval_seconds: int | None = None, ) -> Self: """ Start the Postgres instance. @@ -4283,6 +4399,8 @@ class Endpoint(PgProtocol, LogUtils): basebackup_request_tries=basebackup_request_tries, timeout=timeout, env=env, + autoprewarm=autoprewarm, + offload_lfc_interval_seconds=offload_lfc_interval_seconds, ) self._running.release(1) self.log_config_value("shared_buffers") @@ -4457,9 +4575,10 @@ class Endpoint(PgProtocol, LogUtils): running = self._running.acquire(blocking=False) if running: assert self.endpoint_id is not None - self.env.neon_cli.endpoint_stop( + lsn, _ = self.env.neon_cli.endpoint_stop( self.endpoint_id, check_return_code=self.check_stop_result, mode=mode ) + self.terminate_flush_lsn = lsn if sks_wait_walreceiver_gone is not None: for sk in sks_wait_walreceiver_gone[0]: @@ -4477,9 +4596,10 @@ class Endpoint(PgProtocol, LogUtils): running = self._running.acquire(blocking=False) if running: assert self.endpoint_id is not None - self.env.neon_cli.endpoint_stop( + lsn, _ = self.env.neon_cli.endpoint_stop( self.endpoint_id, True, check_return_code=self.check_stop_result, mode=mode ) + self.terminate_flush_lsn = lsn self.endpoint_id = None return self @@ -4488,6 +4608,7 @@ class Endpoint(PgProtocol, LogUtils): self, branch_name: str, endpoint_id: str | None = None, + grpc: bool | None = None, hot_standby: bool = False, lsn: Lsn | None = None, config_lines: list[str] | None = None, @@ -4495,6 +4616,8 @@ class Endpoint(PgProtocol, LogUtils): pageserver_id: int | None = None, allow_multiple: bool = False, basebackup_request_tries: int | None = None, + autoprewarm: bool = False, + offload_lfc_interval_seconds: int | None = None, ) -> Self: """ Create an endpoint, apply config, and start Postgres. @@ -4505,6 +4628,7 @@ class Endpoint(PgProtocol, LogUtils): branch_name=branch_name, endpoint_id=endpoint_id, config_lines=config_lines, + grpc=grpc, hot_standby=hot_standby, lsn=lsn, pageserver_id=pageserver_id, @@ -4514,6 +4638,8 @@ class Endpoint(PgProtocol, LogUtils): pageserver_id=pageserver_id, allow_multiple=allow_multiple, basebackup_request_tries=basebackup_request_tries, + autoprewarm=autoprewarm, + offload_lfc_interval_seconds=offload_lfc_interval_seconds, ) return self @@ -4592,11 +4718,14 @@ class EndpointFactory: endpoint_id: str | None = None, tenant_id: TenantId | None = None, lsn: Lsn | None = None, + grpc: bool | None = None, hot_standby: bool = False, config_lines: list[str] | None = None, remote_ext_base_url: str | None = None, pageserver_id: int | None = None, basebackup_request_tries: int | None = None, + autoprewarm: bool = False, + offload_lfc_interval_seconds: int | None = None, ) -> Endpoint: ep = Endpoint( self.env, @@ -4611,12 +4740,15 @@ class EndpointFactory: return ep.create_start( branch_name=branch_name, endpoint_id=endpoint_id, + grpc=grpc, hot_standby=hot_standby, config_lines=config_lines, lsn=lsn, remote_ext_base_url=remote_ext_base_url, pageserver_id=pageserver_id, basebackup_request_tries=basebackup_request_tries, + autoprewarm=autoprewarm, + offload_lfc_interval_seconds=offload_lfc_interval_seconds, ) def create( @@ -4625,6 +4757,7 @@ class EndpointFactory: endpoint_id: str | None = None, tenant_id: TenantId | None = None, lsn: Lsn | None = None, + grpc: bool | None = None, hot_standby: bool = False, config_lines: list[str] | None = None, pageserver_id: int | None = None, @@ -4647,6 +4780,7 @@ class EndpointFactory: branch_name=branch_name, endpoint_id=endpoint_id, lsn=lsn, + grpc=grpc, hot_standby=hot_standby, config_lines=config_lines, pageserver_id=pageserver_id, @@ -4671,6 +4805,7 @@ class EndpointFactory: self, origin: Endpoint, endpoint_id: str | None = None, + grpc: bool | None = None, config_lines: list[str] | None = None, ) -> Endpoint: branch_name = origin.branch_name @@ -4682,6 +4817,7 @@ class EndpointFactory: endpoint_id=endpoint_id, tenant_id=origin.tenant_id, lsn=None, + grpc=grpc, hot_standby=True, config_lines=config_lines, ) @@ -4690,6 +4826,7 @@ class EndpointFactory: self, origin: Endpoint, endpoint_id: str | None = None, + grpc: bool | None = None, config_lines: list[str] | None = None, ) -> Endpoint: branch_name = origin.branch_name @@ -4701,6 +4838,7 @@ class EndpointFactory: endpoint_id=endpoint_id, tenant_id=origin.tenant_id, lsn=None, + grpc=grpc, hot_standby=True, config_lines=config_lines, ) @@ -4852,6 +4990,9 @@ class Safekeeper(LogUtils): log.info(f"finished pulling timeline from {src_ids} to {self.id}") return res + def safekeeper_id(self) -> SafekeeperId: + return SafekeeperId(self.id, "localhost", self.port.pg_tenant_only) + @property def data_dir(self) -> Path: return self.env.repo_dir / "safekeepers" / f"sk{self.id}" @@ -5237,6 +5378,7 @@ SKIP_FILES = frozenset( "postmaster.pid", "pg_control", "pg_dynshmem", + ".metrics.socket", ) ) diff --git a/test_runner/fixtures/pageserver/allowed_errors.py b/test_runner/fixtures/pageserver/allowed_errors.py index 9b564f0a60..6a715c4b93 100755 --- a/test_runner/fixtures/pageserver/allowed_errors.py +++ b/test_runner/fixtures/pageserver/allowed_errors.py @@ -111,6 +111,7 @@ DEFAULT_PAGESERVER_ALLOWED_ERRORS = ( ".*stalling layer flushes for compaction backpressure.*", ".*layer roll waiting for flush due to compaction backpressure.*", ".*BatchSpanProcessor.*", + ".*No broker updates received for a while.*", *( [ r".*your platform is not a supported production platform, ignoing request for O_DIRECT; this could hide alignment bugs.*" diff --git a/test_runner/fixtures/pageserver/http.py b/test_runner/fixtures/pageserver/http.py index c29192c25c..d9037f2d08 100644 --- a/test_runner/fixtures/pageserver/http.py +++ b/test_runner/fixtures/pageserver/http.py @@ -1219,3 +1219,31 @@ class PageserverHttpClient(requests.Session, MetricsGetter): ) self.verbose_error(res) return res.json() + + def force_override_feature_flag(self, flag: str, value: str | None = None): + if value is None: + res = self.delete( + f"http://localhost:{self.port}/v1/feature_flag/{flag}", + ) + else: + res = self.put( + f"http://localhost:{self.port}/v1/feature_flag/{flag}", + params={"value": value}, + ) + self.verbose_error(res) + + def evaluate_feature_flag_boolean(self, tenant_id: TenantId, flag: str) -> Any: + res = self.get( + f"http://localhost:{self.port}/v1/tenant/{tenant_id}/feature_flag/{flag}", + params={"as": "boolean"}, + ) + self.verbose_error(res) + return res.json() + + def evaluate_feature_flag_multivariate(self, tenant_id: TenantId, flag: str) -> Any: + res = self.get( + f"http://localhost:{self.port}/v1/tenant/{tenant_id}/feature_flag/{flag}", + params={"as": "multivariate"}, + ) + self.verbose_error(res) + return res.json() diff --git a/test_runner/performance/large_synthetic_oltp/grow_action_blocks.sql b/test_runner/performance/large_synthetic_oltp/grow_action_blocks.sql new file mode 100644 index 0000000000..0860b76331 --- /dev/null +++ b/test_runner/performance/large_synthetic_oltp/grow_action_blocks.sql @@ -0,0 +1,22 @@ +-- add 100000 rows or approximately 11 MB to the action_blocks table +-- takes about 1 second +INSERT INTO workflows.action_blocks ( + id, + uuid, + created_at, + status, + function_signature, + reference_id, + blocking, + run_synchronously +) +SELECT + id, + uuid_generate_v4(), + now() - (random() * interval '100 days'), -- Random date within the last 100 days + 'CONDITIONS_NOT_MET', + 'function_signature_' || id, -- Create a unique function signature using id + CASE WHEN random() > 0.5 THEN 'reference_' || id ELSE NULL END, -- 50% chance of being NULL + true, + CASE WHEN random() > 0.5 THEN true ELSE false END -- Random boolean value +FROM generate_series(1, 100000) AS id; \ No newline at end of file diff --git a/test_runner/performance/large_synthetic_oltp/grow_action_kwargs.sql b/test_runner/performance/large_synthetic_oltp/grow_action_kwargs.sql new file mode 100644 index 0000000000..8a2b7c398a --- /dev/null +++ b/test_runner/performance/large_synthetic_oltp/grow_action_kwargs.sql @@ -0,0 +1,11 @@ +-- add 100000 rows or approximately 10 MB to the action_kwargs table +-- takes about 5 minutes +INSERT INTO workflows.action_kwargs (created_at, key, uuid, value_id, state_value_id, action_block_id) +SELECT + now(), -- Using the default value for `created_at` + 'key_' || gs.id, -- Generating a unique key based on the id + uuid_generate_v4(), -- Generating a new UUID for each row + CASE WHEN gs.id % 2 = 0 THEN gs.id ELSE NULL END, -- Setting value_id for even ids + CASE WHEN gs.id % 2 <> 0 THEN gs.id ELSE NULL END, -- Setting state_value_id for odd ids + 1 -- Setting action_block_id as 1 for simplicity +FROM generate_series(1, 100000) AS gs(id); \ No newline at end of file diff --git a/test_runner/performance/large_synthetic_oltp/grow_device_fingerprint_event.sql b/test_runner/performance/large_synthetic_oltp/grow_device_fingerprint_event.sql new file mode 100644 index 0000000000..1ef38451b7 --- /dev/null +++ b/test_runner/performance/large_synthetic_oltp/grow_device_fingerprint_event.sql @@ -0,0 +1,56 @@ +-- add 100000 rows or approx. 30 MB to the device_fingerprint_event table +-- takes about 4 minutes +INSERT INTO authentication.device_fingerprint_event ( + uuid, + created_at, + identity_uuid, + fingerprint_request_id, + fingerprint_id, + confidence_score, + ip_address, + url, + client_referrer, + last_seen_at, + raw_fingerprint_response, + session_uuid, + fingerprint_response, + browser_version, + browser_name, + device, + operating_system, + operating_system_version, + user_agent, + ip_address_location_city, + ip_address_location_region, + ip_address_location_country_code, + ip_address_location_latitude, + ip_address_location_longitude, + is_incognito +) +SELECT + gen_random_uuid(), -- Generates a random UUID for primary key + now() - (random() * interval '10 days'), -- Random timestamp within the last 10 days + gen_random_uuid(), -- Random UUID for identity + md5(gs::text), -- Simulates unique fingerprint request ID using `md5` hash of series number + md5((gs + 10000)::text), -- Simulates unique fingerprint ID + round(CAST(random() AS numeric), 2), -- Generates a random score between 0 and 1, cast `random()` to numeric + '192.168.' || (random() * 255)::int || '.' || (random() * 255)::int, -- Random IP address + 'https://example.com/' || (gs % 1000), -- Random URL with series number suffix + CASE WHEN random() < 0.5 THEN NULL ELSE 'https://referrer.com/' || (gs % 100)::text END, -- Random referrer, 50% chance of being NULL + now() - (random() * interval '5 days'), -- Last seen timestamp within the last 5 days + NULL, -- Keeping raw_fingerprint_response NULL for simplicity + CASE WHEN random() < 0.3 THEN gen_random_uuid() ELSE NULL END, -- Session UUID, 30% chance of NULL + NULL, -- Keeping fingerprint_response NULL for simplicity + CASE WHEN random() < 0.5 THEN '93.0' ELSE '92.0' END, -- Random browser version + CASE WHEN random() < 0.5 THEN 'Firefox' ELSE 'Chrome' END, -- Random browser name + CASE WHEN random() < 0.5 THEN 'Desktop' ELSE 'Mobile' END, -- Random device type + 'Windows', -- Static value for operating system + '10.0', -- Static value for operating system version + 'Mozilla/5.0', -- Static value for user agent + 'City ' || (gs % 1000)::text, -- Random city name + 'Region ' || (gs % 100)::text, -- Random region name + 'US', -- Static country code + random() * 180 - 90, -- Random latitude between -90 and 90 + random() * 360 - 180, -- Random longitude between -180 and 180 + random() < 0.1 -- 10% chance of being incognito +FROM generate_series(1, 100000) AS gs; \ No newline at end of file diff --git a/test_runner/performance/large_synthetic_oltp/grow_edges.sql b/test_runner/performance/large_synthetic_oltp/grow_edges.sql new file mode 100644 index 0000000000..17f289fe5b --- /dev/null +++ b/test_runner/performance/large_synthetic_oltp/grow_edges.sql @@ -0,0 +1,10 @@ +-- add 100000 rows or approximately 11 MB to the edges table +-- takes about 1 minute +INSERT INTO workflows.edges (created_at, workflow_id, uuid, from_vertex_id, to_vertex_id) +SELECT + now() - (random() * interval '365 days'), -- Random `created_at` timestamp in the last year + (random() * 100)::int + 1, -- Random `workflow_id` between 1 and 100 + uuid_generate_v4(), -- Generate a new UUID for each row + (random() * 100000)::bigint + 1, -- Random `from_vertex_id` between 1 and 100,000 + (random() * 100000)::bigint + 1 -- Random `to_vertex_id` between 1 and 100,000 +FROM generate_series(1, 100000) AS gs; -- Generate 100,000 sequential IDs \ No newline at end of file diff --git a/test_runner/performance/large_synthetic_oltp/grow_hotel_rate_mapping.sql b/test_runner/performance/large_synthetic_oltp/grow_hotel_rate_mapping.sql new file mode 100644 index 0000000000..1e79f94eab --- /dev/null +++ b/test_runner/performance/large_synthetic_oltp/grow_hotel_rate_mapping.sql @@ -0,0 +1,21 @@ +-- add 100000 rows or approximately 10 MB to the hotel_rate_mapping table +-- takes about 1 second +INSERT INTO booking_inventory.hotel_rate_mapping ( + uuid, + created_at, + updated_at, + hotel_rate_id, + remote_id, + source +) +SELECT + uuid_generate_v4(), -- Unique UUID for each row + now(), -- Created at timestamp + now(), -- Updated at timestamp + 'rate_' || gs AS hotel_rate_id, -- Unique hotel_rate_id + 'remote_' || gs AS remote_id, -- Unique remote_id + CASE WHEN gs % 3 = 0 THEN 'source_1' + WHEN gs % 3 = 1 THEN 'source_2' + ELSE 'source_3' + END AS source -- Distributing sources among three options +FROM generate_series(1, 100000) AS gs; \ No newline at end of file diff --git a/test_runner/performance/large_synthetic_oltp/grow_ocr_pipeline_results_version.sql b/test_runner/performance/large_synthetic_oltp/grow_ocr_pipeline_results_version.sql new file mode 100644 index 0000000000..21ebac74d2 --- /dev/null +++ b/test_runner/performance/large_synthetic_oltp/grow_ocr_pipeline_results_version.sql @@ -0,0 +1,31 @@ +-- add 100000 rows or approximately 20 MB to the ocr_pipeline_results_version table +-- takes about 1 second +INSERT INTO ocr.ocr_pipeline_results_version ( + id, transaction_id, operation_type, created_at, updated_at, s3_filename, completed_at, result, + end_transaction_id, pipeline_type, is_async, callback, callback_kwargs, input, error, file_type, s3_bucket_name, pipeline_kwargs +) +SELECT + gs.aid, -- id + gs.aid, -- transaction_id (same as id for simplicity) + (gs.aid % 5)::smallint + 1, -- operation_type (cyclic values from 1 to 5) + now() - interval '1 day' * (random() * 30), -- created_at (random timestamp within the last 30 days) + now() - interval '1 day' * (random() * 30), -- updated_at (random timestamp within the last 30 days) + 's3_file_' || gs.aid || '.txt', -- s3_filename (synthetic filename) + now() - interval '1 day' * (random() * 30), -- completed_at (random timestamp within the last 30 days) + '{}'::jsonb, -- result (empty JSON object) + NULL, -- end_transaction_id (NULL) + CASE (gs.aid % 3) -- pipeline_type (cyclic text values) + WHEN 0 THEN 'OCR' + WHEN 1 THEN 'PDF' + ELSE 'Image' + END, + gs.aid % 2 = 0, -- is_async (alternating between true and false) + 'http://callback/' || gs.aid, -- callback (synthetic URL) + '{}'::jsonb, -- callback_kwargs (empty JSON object) + 'Input text ' || gs.aid, -- input (synthetic input text) + NULL, -- error (NULL) + 'pdf', -- file_type (default to 'pdf') + 'bucket_' || gs.aid % 10, -- s3_bucket_name (synthetic bucket names) + '{}'::jsonb -- pipeline_kwargs (empty JSON object) +FROM + generate_series(1, 100000) AS gs(aid); \ No newline at end of file diff --git a/test_runner/performance/large_synthetic_oltp/grow_priceline_raw_response.sql b/test_runner/performance/large_synthetic_oltp/grow_priceline_raw_response.sql new file mode 100644 index 0000000000..28c4f1a7fb --- /dev/null +++ b/test_runner/performance/large_synthetic_oltp/grow_priceline_raw_response.sql @@ -0,0 +1,18 @@ +-- add 100000 rows or approx. 20 MB to the priceline_raw_response table +-- takes about 20 seconds +INSERT INTO booking_inventory.priceline_raw_response ( + uuid, created_at, updated_at, url, base_url, path, method, params, request, response +) +SELECT + gen_random_uuid(), -- Generate random UUIDs + now() - (random() * interval '30 days'), -- Random creation time within the past 30 days + now() - (random() * interval '30 days'), -- Random update time within the past 30 days + 'https://example.com/resource/' || gs, -- Construct a unique URL for each row + 'https://example.com', -- Base URL for all rows + '/resource/' || gs, -- Path for each row + CASE WHEN gs % 2 = 0 THEN 'GET' ELSE 'POST' END, -- Alternate between GET and POST methods + 'id=' || gs, -- Simple parameter pattern for each row + '{}'::jsonb, -- Empty JSON object for request + jsonb_build_object('status', 'success', 'data', gs) -- Construct a valid JSON response +FROM + generate_series(1, 100000) AS gs; \ No newline at end of file diff --git a/test_runner/performance/large_synthetic_oltp/grow_relabled_transactions.sql b/test_runner/performance/large_synthetic_oltp/grow_relabled_transactions.sql new file mode 100644 index 0000000000..0b1aa2d2bd --- /dev/null +++ b/test_runner/performance/large_synthetic_oltp/grow_relabled_transactions.sql @@ -0,0 +1,26 @@ +-- add 100000 rows or approx. 1 MB to the relabeled_transactions table +-- takes about 1 second +INSERT INTO heron.relabeled_transactions ( + id, + created_at, + universal_transaction_id, + raw_result, + category, + category_confidence, + merchant, + batch_id +) +SELECT + gs.aid AS id, + now() - (gs.aid % 1000) * interval '1 second' AS created_at, + 'txn_' || gs.aid AS universal_transaction_id, + '{}'::jsonb AS raw_result, + CASE WHEN gs.aid % 5 = 0 THEN 'grocery' + WHEN gs.aid % 5 = 1 THEN 'electronics' + WHEN gs.aid % 5 = 2 THEN 'clothing' + WHEN gs.aid % 5 = 3 THEN 'utilities' + ELSE NULL END AS category, + ROUND(RANDOM()::numeric, 2) AS category_confidence, + CASE WHEN gs.aid % 2 = 0 THEN 'Merchant_' || gs.aid % 20 ELSE NULL END AS merchant, + gs.aid % 100 + 1 AS batch_id +FROM generate_series(1, 100000) AS gs(aid); \ No newline at end of file diff --git a/test_runner/performance/large_synthetic_oltp/grow_state_values.sql b/test_runner/performance/large_synthetic_oltp/grow_state_values.sql new file mode 100644 index 0000000000..8a8ce146be --- /dev/null +++ b/test_runner/performance/large_synthetic_oltp/grow_state_values.sql @@ -0,0 +1,9 @@ +-- add 100000 rows or approx.10 MB to the state_values table +-- takes about 14 seconds +INSERT INTO workflows.state_values (key, workflow_id, state_type, value_id) +SELECT + 'key_' || gs::text, -- Key: Generate as 'key_1', 'key_2', etc. + (gs - 1) / 1000 + 1, -- workflow_id: Distribute over a range (1000 workflows) + 'STATIC', -- state_type: Use constant 'STATIC' as defined in schema + gs::bigint -- value_id: Use the same as the series value +FROM generate_series(1, 100000) AS gs; -- Generate 100,000 rows \ No newline at end of file diff --git a/test_runner/performance/large_synthetic_oltp/grow_values.sql b/test_runner/performance/large_synthetic_oltp/grow_values.sql new file mode 100644 index 0000000000..3afdafdf86 --- /dev/null +++ b/test_runner/performance/large_synthetic_oltp/grow_values.sql @@ -0,0 +1,30 @@ +-- add 100000 rows or approx. 24 MB to the values table +-- takes about 126 seconds +INSERT INTO workflows.values ( + id, + type, + int_value, + string_value, + child_type, + bool_value, + uuid, + numeric_value, + workflow_id, + jsonb_value, + parent_value_id +) +SELECT + gs AS id, + 'TYPE_A' AS type, + CASE WHEN selector = 1 THEN gs ELSE NULL END AS int_value, + CASE WHEN selector = 2 THEN 'string_value_' || gs::text ELSE NULL END AS string_value, + 'CHILD_TYPE_A' AS child_type, -- Always non-null + CASE WHEN selector = 3 THEN (gs % 2 = 0) ELSE NULL END AS bool_value, + uuid_generate_v4() AS uuid, -- Always non-null + CASE WHEN selector = 4 THEN gs * 1.0 ELSE NULL END AS numeric_value, + (array[1, 2, 3, 4, 5])[gs % 5 + 1] AS workflow_id, -- Use only existing workflow IDs + CASE WHEN selector = 5 THEN ('{"key":' || gs::text || '}')::jsonb ELSE NULL END AS jsonb_value, + (gs % 100) + 1 AS parent_value_id -- Always non-null +FROM + generate_series(1, 100000) AS gs, + (SELECT floor(random() * 5 + 1)::int AS selector) AS s; \ No newline at end of file diff --git a/test_runner/performance/large_synthetic_oltp/grow_vertices.sql b/test_runner/performance/large_synthetic_oltp/grow_vertices.sql new file mode 100644 index 0000000000..87a2410e8a --- /dev/null +++ b/test_runner/performance/large_synthetic_oltp/grow_vertices.sql @@ -0,0 +1,26 @@ +-- add 100000 rows or approx. 18 MB to the vertices table +-- takes about 90 seconds +INSERT INTO workflows.vertices( + uuid, + created_at, + condition_block_id, + operator, + has_been_visited, + reference_id, + workflow_id, + meta_data, + -- id, + action_block_id +) +SELECT + uuid_generate_v4() AS uuid, + now() AS created_at, + CASE WHEN (gs % 2 = 0) THEN gs % 10 ELSE NULL END AS condition_block_id, -- Every alternate row has a condition_block_id + 'operator_' || (gs % 10) AS operator, -- Cyclical operator values (e.g., operator_0, operator_1) + false AS has_been_visited, + 'ref_' || gs AS reference_id, -- Unique reference_id for each row + (gs % 1000) + 1 AS workflow_id, -- Random workflow_id values between 1 and 1000 + '{}'::jsonb AS meta_data, -- Empty JSON metadata + -- gs AS id, -- default from sequence to get unique ID + CASE WHEN (gs % 2 = 1) THEN gs ELSE NULL END AS action_block_id -- Complementary to condition_block_id +FROM generate_series(1, 100000) AS gs; \ No newline at end of file diff --git a/test_runner/performance/large_synthetic_oltp/update_accounting_coding_body_tracking_category_selection.sql b/test_runner/performance/large_synthetic_oltp/update_accounting_coding_body_tracking_category_selection.sql new file mode 100644 index 0000000000..78688fc8ba --- /dev/null +++ b/test_runner/performance/large_synthetic_oltp/update_accounting_coding_body_tracking_category_selection.sql @@ -0,0 +1,9 @@ +-- update approximately 2000 rows or 200 kb in the accounting_coding_body_tracking_category_selection table +-- takes about 1 second +UPDATE accounting.accounting_coding_body_tracking_category_selection +SET created_at = now() +WHERE ctid in ( + SELECT ctid + FROM accounting.accounting_coding_body_tracking_category_selection + TABLESAMPLE SYSTEM (0.0005) +); diff --git a/test_runner/performance/large_synthetic_oltp/update_action_blocks.sql b/test_runner/performance/large_synthetic_oltp/update_action_blocks.sql new file mode 100644 index 0000000000..ad1ee6c749 --- /dev/null +++ b/test_runner/performance/large_synthetic_oltp/update_action_blocks.sql @@ -0,0 +1,9 @@ +-- update approximately 9000 rows or 1 MB in the action_blocks table +-- takes about 1 second +UPDATE workflows.action_blocks +SET run_synchronously = NOT run_synchronously +WHERE ctid in ( + SELECT ctid + FROM workflows.action_blocks + TABLESAMPLE SYSTEM (0.001) +); diff --git a/test_runner/performance/large_synthetic_oltp/update_action_kwargs.sql b/test_runner/performance/large_synthetic_oltp/update_action_kwargs.sql new file mode 100644 index 0000000000..b939c0ff2d --- /dev/null +++ b/test_runner/performance/large_synthetic_oltp/update_action_kwargs.sql @@ -0,0 +1,9 @@ +-- update approximately 5000 rows or 1 MB in the action_kwargs table +-- takes about 1 second +UPDATE workflows.action_kwargs +SET created_at = now() +WHERE ctid in ( + SELECT ctid + FROM workflows.action_kwargs + TABLESAMPLE SYSTEM (0.0002) +); diff --git a/test_runner/performance/large_synthetic_oltp/update_denormalized_approval_workflow.sql b/test_runner/performance/large_synthetic_oltp/update_denormalized_approval_workflow.sql new file mode 100644 index 0000000000..671ddbc2d4 --- /dev/null +++ b/test_runner/performance/large_synthetic_oltp/update_denormalized_approval_workflow.sql @@ -0,0 +1,10 @@ +-- update approximately 3000 rows or 500 KB in the denormalized_approval_workflow table +-- takes about 1 second +UPDATE approvals_v2.denormalized_approval_workflow +SET created_at = now() +WHERE ctid in ( + SELECT ctid + FROM approvals_v2.denormalized_approval_workflow + TABLESAMPLE SYSTEM (0.0005) +); + diff --git a/test_runner/performance/large_synthetic_oltp/update_device_fingerprint_event.sql b/test_runner/performance/large_synthetic_oltp/update_device_fingerprint_event.sql new file mode 100644 index 0000000000..20baf12887 --- /dev/null +++ b/test_runner/performance/large_synthetic_oltp/update_device_fingerprint_event.sql @@ -0,0 +1,9 @@ +-- update approximately 2000 rows or 1 MB in the device_fingerprint_event table +-- takes about 5 seconds +UPDATE authentication.device_fingerprint_event +SET is_incognito = NOT is_incognito +WHERE ctid in ( + SELECT ctid + FROM authentication.device_fingerprint_event + TABLESAMPLE SYSTEM (0.001) +); diff --git a/test_runner/performance/large_synthetic_oltp/update_edges.sql b/test_runner/performance/large_synthetic_oltp/update_edges.sql new file mode 100644 index 0000000000..d79da78de3 --- /dev/null +++ b/test_runner/performance/large_synthetic_oltp/update_edges.sql @@ -0,0 +1,9 @@ +-- update approximately 4000 rows or 600 kb in the edges table +-- takes about 1 second +UPDATE workflows.edges +SET created_at = now() +WHERE ctid in ( + SELECT ctid + FROM workflows.edges + TABLESAMPLE SYSTEM (0.0005) +); diff --git a/test_runner/performance/large_synthetic_oltp/update_heron_transaction_enriched_log.sql b/test_runner/performance/large_synthetic_oltp/update_heron_transaction_enriched_log.sql new file mode 100644 index 0000000000..5bcc885736 --- /dev/null +++ b/test_runner/performance/large_synthetic_oltp/update_heron_transaction_enriched_log.sql @@ -0,0 +1,9 @@ +-- update approximately 10000 rows or 200 KB in the heron_transaction_enriched_log table +-- takes about 1 minutes +UPDATE heron.heron_transaction_enriched_log +SET created_at = now() +WHERE ctid in ( + SELECT ctid + FROM heron.heron_transaction_enriched_log + TABLESAMPLE SYSTEM (0.0005) +); diff --git a/test_runner/performance/large_synthetic_oltp/update_heron_transaction_enrichment_requests.sql b/test_runner/performance/large_synthetic_oltp/update_heron_transaction_enrichment_requests.sql new file mode 100644 index 0000000000..02cf0ca420 --- /dev/null +++ b/test_runner/performance/large_synthetic_oltp/update_heron_transaction_enrichment_requests.sql @@ -0,0 +1,9 @@ +-- update approximately 4000 rows or 1 MB in the heron_transaction_enrichment_requests table +-- takes about 2 minutes +UPDATE heron.heron_transaction_enrichment_requests +SET created_at = now() +WHERE ctid in ( + SELECT ctid + FROM heron.heron_transaction_enrichment_requests + TABLESAMPLE SYSTEM (0.0002) +); diff --git a/test_runner/performance/large_synthetic_oltp/update_hotel_rate_mapping.sql b/test_runner/performance/large_synthetic_oltp/update_hotel_rate_mapping.sql new file mode 100644 index 0000000000..3210b6dff8 --- /dev/null +++ b/test_runner/performance/large_synthetic_oltp/update_hotel_rate_mapping.sql @@ -0,0 +1,9 @@ +-- update approximately 6000 rows or 600 kb in the hotel_rate_mapping table +-- takes about 1 second +UPDATE booking_inventory.hotel_rate_mapping +SET created_at = now() +WHERE ctid in ( + SELECT ctid + FROM booking_inventory.hotel_rate_mapping + TABLESAMPLE SYSTEM (0.0005) +); diff --git a/test_runner/performance/large_synthetic_oltp/update_incoming_webhooks.sql b/test_runner/performance/large_synthetic_oltp/update_incoming_webhooks.sql new file mode 100644 index 0000000000..ea284eb47c --- /dev/null +++ b/test_runner/performance/large_synthetic_oltp/update_incoming_webhooks.sql @@ -0,0 +1,9 @@ +-- update approximately 2000 rows or 1 MB in the incoming_webhooks table +-- takes about 5 seconds +UPDATE webhook.incoming_webhooks +SET is_body_encrypted = NOT is_body_encrypted +WHERE ctid in ( + SELECT ctid + FROM webhook.incoming_webhooks + TABLESAMPLE SYSTEM (0.0002) +); diff --git a/test_runner/performance/large_synthetic_oltp/update_manual_transaction.sql b/test_runner/performance/large_synthetic_oltp/update_manual_transaction.sql new file mode 100644 index 0000000000..190bc625e2 --- /dev/null +++ b/test_runner/performance/large_synthetic_oltp/update_manual_transaction.sql @@ -0,0 +1,9 @@ +-- update approximately 1000 rows or 200 kb in the manual_transaction table +-- takes about 2 seconds +UPDATE banking.manual_transaction +SET created_at = now() +WHERE ctid in ( + SELECT ctid + FROM banking.manual_transaction + TABLESAMPLE SYSTEM (0.0005) +); diff --git a/test_runner/performance/large_synthetic_oltp/update_ml_receipt_matching_log.sql b/test_runner/performance/large_synthetic_oltp/update_ml_receipt_matching_log.sql new file mode 100644 index 0000000000..810021b09d --- /dev/null +++ b/test_runner/performance/large_synthetic_oltp/update_ml_receipt_matching_log.sql @@ -0,0 +1,9 @@ +-- update approximately 1000 rows or 100 kb in the ml_receipt_matching_log table +-- takes about 1 second +UPDATE receipt.ml_receipt_matching_log +SET is_shadow_mode = NOT is_shadow_mode +WHERE ctid in ( + SELECT ctid + FROM receipt.ml_receipt_matching_log + TABLESAMPLE SYSTEM (0.0005) +); diff --git a/test_runner/performance/large_synthetic_oltp/update_ocr_pipeine_results_version.sql b/test_runner/performance/large_synthetic_oltp/update_ocr_pipeine_results_version.sql new file mode 100644 index 0000000000..a1da8fdb07 --- /dev/null +++ b/test_runner/performance/large_synthetic_oltp/update_ocr_pipeine_results_version.sql @@ -0,0 +1,9 @@ +-- update approximately 2000 rows or 400 kb in the ocr_pipeline_results_version table +-- takes about 1 second +UPDATE ocr.ocr_pipeline_results_version +SET is_async = NOT is_async +WHERE ctid in ( + SELECT ctid + FROM ocr.ocr_pipeline_results_version + TABLESAMPLE SYSTEM (0.0005) +); diff --git a/test_runner/performance/large_synthetic_oltp/update_orc_pipeline_step_results.sql b/test_runner/performance/large_synthetic_oltp/update_orc_pipeline_step_results.sql new file mode 100644 index 0000000000..b7bb4932bd --- /dev/null +++ b/test_runner/performance/large_synthetic_oltp/update_orc_pipeline_step_results.sql @@ -0,0 +1,9 @@ +-- update approximately 3000 rows or 1 MB in the ocr_pipeline_step_results table +-- takes about 11 seconds +UPDATE ocr.ocr_pipeline_step_results +SET created_at = now() +WHERE ctid in ( + SELECT ctid + FROM ocr.ocr_pipeline_step_results + TABLESAMPLE SYSTEM (0.0005) +); diff --git a/test_runner/performance/large_synthetic_oltp/update_orc_pipeline_step_results_version.sql b/test_runner/performance/large_synthetic_oltp/update_orc_pipeline_step_results_version.sql new file mode 100644 index 0000000000..83e9765d22 --- /dev/null +++ b/test_runner/performance/large_synthetic_oltp/update_orc_pipeline_step_results_version.sql @@ -0,0 +1,9 @@ +-- update approximately 5000 rows or 1 MB in the ocr_pipeline_step_results_version table +-- takes about 40 seconds +UPDATE ocr.ocr_pipeline_step_results_version +SET created_at = now() +WHERE ctid in ( + SELECT ctid + FROM ocr.ocr_pipeline_step_results_version + TABLESAMPLE SYSTEM (0.0005) +); diff --git a/test_runner/performance/large_synthetic_oltp/update_priceline_raw_response.sql b/test_runner/performance/large_synthetic_oltp/update_priceline_raw_response.sql new file mode 100644 index 0000000000..a434c6cb63 --- /dev/null +++ b/test_runner/performance/large_synthetic_oltp/update_priceline_raw_response.sql @@ -0,0 +1,9 @@ +-- update approximately 5000 rows or 1 MB in the priceline_raw_response table +-- takes about 1 second +UPDATE booking_inventory.priceline_raw_response +SET created_at = now() +WHERE ctid in ( + SELECT ctid + FROM booking_inventory.priceline_raw_response + TABLESAMPLE SYSTEM (0.0005) +); diff --git a/test_runner/performance/large_synthetic_oltp/update_quickbooks_transactions.sql b/test_runner/performance/large_synthetic_oltp/update_quickbooks_transactions.sql new file mode 100644 index 0000000000..a783246c4c --- /dev/null +++ b/test_runner/performance/large_synthetic_oltp/update_quickbooks_transactions.sql @@ -0,0 +1,9 @@ +-- update approximately 5000 rows or 1 MB in the quickbooks_transactions table +-- takes about 30 seconds +UPDATE accounting.quickbooks_transactions +SET created_at = now() +WHERE ctid in ( + SELECT ctid + FROM accounting.quickbooks_transactions + TABLESAMPLE SYSTEM (0.0005) +); diff --git a/test_runner/performance/large_synthetic_oltp/update_raw_finicity_transaction.sql b/test_runner/performance/large_synthetic_oltp/update_raw_finicity_transaction.sql new file mode 100644 index 0000000000..91fb1bc789 --- /dev/null +++ b/test_runner/performance/large_synthetic_oltp/update_raw_finicity_transaction.sql @@ -0,0 +1,15 @@ +-- update approximately 6000 rows or 600 kb in the raw_finicity_transaction table +-- takes about 1 second +UPDATE banking.raw_finicity_transaction +SET raw_data = + jsonb_set( + raw_data, + '{updated}', + to_jsonb(now()), + true + ) +WHERE ctid IN ( + SELECT ctid + FROM banking.raw_finicity_transaction + TABLESAMPLE SYSTEM (0.0005) +); diff --git a/test_runner/performance/large_synthetic_oltp/update_relabeled_transactions.sql b/test_runner/performance/large_synthetic_oltp/update_relabeled_transactions.sql new file mode 100644 index 0000000000..87b402f9e7 --- /dev/null +++ b/test_runner/performance/large_synthetic_oltp/update_relabeled_transactions.sql @@ -0,0 +1,9 @@ +-- update approximately 8000 rows or 1 MB in the relabeled_transactions table +-- takes about 1 second +UPDATE heron.relabeled_transactions +SET created_at = now() +WHERE ctid in ( + SELECT ctid + FROM heron.relabeled_transactions + TABLESAMPLE SYSTEM (0.0005) +); diff --git a/test_runner/performance/large_synthetic_oltp/update_state_values.sql b/test_runner/performance/large_synthetic_oltp/update_state_values.sql new file mode 100644 index 0000000000..2365ea3d6b --- /dev/null +++ b/test_runner/performance/large_synthetic_oltp/update_state_values.sql @@ -0,0 +1,9 @@ +-- update approximately 8000 rows or 1 MB in the state_values table +-- takes about 2 minutes +UPDATE workflows.state_values +SET state_type = now()::text +WHERE ctid in ( + SELECT ctid + FROM workflows.state_values + TABLESAMPLE SYSTEM (0.0002) +); \ No newline at end of file diff --git a/test_runner/performance/large_synthetic_oltp/update_stripe_authorization_event_log.sql b/test_runner/performance/large_synthetic_oltp/update_stripe_authorization_event_log.sql new file mode 100644 index 0000000000..5328db9fb8 --- /dev/null +++ b/test_runner/performance/large_synthetic_oltp/update_stripe_authorization_event_log.sql @@ -0,0 +1,9 @@ +-- update approximately 4000 rows or 1 MB in the stripe_authorization_event_log table +-- takes about 5 minutes +UPDATE stripe.stripe_authorization_event_log +SET approved = NOT approved +WHERE ctid in ( + SELECT ctid + FROM stripe.stripe_authorization_event_log + TABLESAMPLE SYSTEM (0.0002) +); \ No newline at end of file diff --git a/test_runner/performance/large_synthetic_oltp/update_transaction.sql b/test_runner/performance/large_synthetic_oltp/update_transaction.sql new file mode 100644 index 0000000000..83bec52065 --- /dev/null +++ b/test_runner/performance/large_synthetic_oltp/update_transaction.sql @@ -0,0 +1,9 @@ +-- update approximately 2000 rows or 301 MB in the transaction table +-- takes about 90 seconds +UPDATE transaction.transaction +SET is_last = NOT is_last +WHERE ctid in ( + SELECT ctid + FROM transaction.transaction + TABLESAMPLE SYSTEM (0.0002) +); diff --git a/test_runner/performance/large_synthetic_oltp/update_values.sql b/test_runner/performance/large_synthetic_oltp/update_values.sql new file mode 100644 index 0000000000..e5d576dae5 --- /dev/null +++ b/test_runner/performance/large_synthetic_oltp/update_values.sql @@ -0,0 +1,9 @@ +-- update approximately 2500 rows or 1 MB in the values table +-- takes about 3 minutes +UPDATE workflows.values +SET bool_value = NOT bool_value +WHERE ctid in ( + SELECT ctid + FROM workflows.values + TABLESAMPLE SYSTEM (0.0002) +) AND bool_value IS NOT NULL; diff --git a/test_runner/performance/large_synthetic_oltp/update_vertices.sql b/test_runner/performance/large_synthetic_oltp/update_vertices.sql new file mode 100644 index 0000000000..714c38965b --- /dev/null +++ b/test_runner/performance/large_synthetic_oltp/update_vertices.sql @@ -0,0 +1,9 @@ +-- update approximately 10000 rows or 2 MB in the vertices table +-- takes about 1 minute +UPDATE workflows.vertices +SET has_been_visited = NOT has_been_visited +WHERE ctid in ( + SELECT ctid + FROM workflows.vertices + TABLESAMPLE SYSTEM (0.0002) +); \ No newline at end of file diff --git a/test_runner/performance/pageserver/pagebench/test_large_slru_basebackup.py b/test_runner/performance/pageserver/pagebench/test_large_slru_basebackup.py index 8af52dcbd0..25dfd5277c 100644 --- a/test_runner/performance/pageserver/pagebench/test_large_slru_basebackup.py +++ b/test_runner/performance/pageserver/pagebench/test_large_slru_basebackup.py @@ -146,8 +146,6 @@ def run_benchmark(env: NeonEnv, pg_bin: PgBin, record, duration_secs: int): ps_http.base_url, "--page-service-connstring", env.pageserver.connstr(password=None), - "--gzip-probability", - "1", "--runtime", f"{duration_secs}s", # don't specify the targets explicitly, let pagebench auto-discover them diff --git a/test_runner/performance/test_perf_oltp_large_tenant.py b/test_runner/performance/test_perf_oltp_large_tenant.py index b45394d627..bd00f6b65f 100644 --- a/test_runner/performance/test_perf_oltp_large_tenant.py +++ b/test_runner/performance/test_perf_oltp_large_tenant.py @@ -31,7 +31,9 @@ def get_custom_scripts( return rv -def run_test_pgbench(env: PgCompare, custom_scripts: str, duration: int): +def run_test_pgbench( + env: PgCompare, custom_scripts: str, duration: int, clients: int = 500, jobs: int = 100 +): password = env.pg.default_options.get("password", None) options = env.pg.default_options.get("options", "") # drop password from the connection string by passing password=None and set password separately @@ -46,8 +48,8 @@ def run_test_pgbench(env: PgCompare, custom_scripts: str, duration: int): "-n", # no explicit vacuum before the test - we want to rely on auto-vacuum "-M", "prepared", - "--client=500", - "--jobs=100", + f"--client={clients}", + f"--jobs={jobs}", f"-T{duration}", "-P60", # progress every minute "--progress-timestamp", @@ -164,6 +166,12 @@ def test_perf_oltp_large_tenant_pgbench( run_test_pgbench(remote_compare, custom_scripts, duration) +@pytest.mark.parametrize("duration", get_durations_matrix()) +@pytest.mark.remote_cluster +def test_perf_oltp_large_tenant_growth(remote_compare: PgCompare, duration: int): + run_test_pgbench(remote_compare, " ".join(get_custom_scripts()), duration, 35, 35) + + @pytest.mark.remote_cluster def test_perf_oltp_large_tenant_maintenance(remote_compare: PgCompare): # run analyze, vacuum, re-index after the test and measure and report its duration diff --git a/test_runner/performance/test_sharding_autosplit.py b/test_runner/performance/test_sharding_autosplit.py index 04bebae92f..0bb210db23 100644 --- a/test_runner/performance/test_sharding_autosplit.py +++ b/test_runner/performance/test_sharding_autosplit.py @@ -62,7 +62,8 @@ def test_sharding_autosplit(neon_env_builder: NeonEnvBuilder, pg_bin: PgBin): ps.allowed_errors.extend( [ # We shut down pageservers while they might have some compaction work going on - ".*Compaction failed.*shutting down.*" + ".*Compaction failed.*shutting down.*", + ".*flush task cancelled.*", ] ) diff --git a/test_runner/random_ops/test_random_ops.py b/test_runner/random_ops/test_random_ops.py index 645c9b7b9d..d3815c40bb 100644 --- a/test_runner/random_ops/test_random_ops.py +++ b/test_runner/random_ops/test_random_ops.py @@ -45,6 +45,8 @@ class NeonEndpoint: if self.branch.connect_env: self.connect_env = self.branch.connect_env.copy() self.connect_env["PGHOST"] = self.host + if self.type == "read_only": + self.project.read_only_endpoints_total += 1 def delete(self): self.project.delete_endpoint(self.id) @@ -228,8 +230,13 @@ class NeonProject: self.benchmarks: dict[str, subprocess.Popen[Any]] = {} self.restore_num: int = 0 self.restart_pgbench_on_console_errors: bool = False + self.limits: dict[str, Any] = self.get_limits()["limits"] + self.read_only_endpoints_total: int = 0 - def delete(self): + def get_limits(self) -> dict[str, Any]: + return self.neon_api.get_project_limits(self.id) + + def delete(self) -> None: self.neon_api.delete_project(self.id) def create_branch(self, parent_id: str | None = None) -> NeonBranch | None: @@ -282,6 +289,7 @@ class NeonProject: self.neon_api.delete_endpoint(self.id, endpoint_id) self.endpoints[endpoint_id].branch.endpoints.pop(endpoint_id) self.endpoints.pop(endpoint_id) + self.read_only_endpoints_total -= 1 self.wait() def start_benchmark(self, target: str, clients: int = 10) -> subprocess.Popen[Any]: @@ -369,49 +377,64 @@ def setup_class( print(f"::warning::Retried on 524 error {neon_api.retries524} times") if neon_api.retries4xx > 0: print(f"::warning::Retried on 4xx error {neon_api.retries4xx} times") - log.info("Removing the project") + log.info("Removing the project %s", project.id) project.delete() -def do_action(project: NeonProject, action: str) -> None: +def do_action(project: NeonProject, action: str) -> bool: """ Runs the action """ log.info("Action: %s", action) if action == "new_branch": log.info("Trying to create a new branch") + if 0 <= project.limits["max_branches"] <= len(project.branches): + log.info( + "Maximum branch limit exceeded (%s of %s)", + len(project.branches), + project.limits["max_branches"], + ) + return False parent = project.branches[ random.choice(list(set(project.branches.keys()) - project.reset_branches)) ] log.info("Parent: %s", parent) child = parent.create_child_branch() if child is None: - return + return False log.info("Created branch %s", child) child.start_benchmark() elif action == "delete_branch": if project.leaf_branches: - target = random.choice(list(project.leaf_branches.values())) + target: NeonBranch = random.choice(list(project.leaf_branches.values())) log.info("Trying to delete branch %s", target) target.delete() else: log.info("Leaf branches not found, skipping") + return False elif action == "new_ro_endpoint": + if 0 <= project.limits["max_read_only_endpoints"] <= project.read_only_endpoints_total: + log.info( + "Maximum read only endpoint limit exceeded (%s of %s)", + project.read_only_endpoints_total, + project.limits["max_read_only_endpoints"], + ) + return False ep = random.choice( [br for br in project.branches.values() if br.id not in project.reset_branches] ).create_ro_endpoint() log.info("Created the RO endpoint with id %s branch: %s", ep.id, ep.branch.id) ep.start_benchmark() elif action == "delete_ro_endpoint": + if project.read_only_endpoints_total == 0: + log.info("no read_only endpoints present, skipping") + return False ro_endpoints: list[NeonEndpoint] = [ endpoint for endpoint in project.endpoints.values() if endpoint.type == "read_only" ] - if ro_endpoints: - target_ep: NeonEndpoint = random.choice(ro_endpoints) - target_ep.delete() - log.info("endpoint %s deleted", target_ep.id) - else: - log.info("no read_only endpoints present, skipping") + target_ep: NeonEndpoint = random.choice(ro_endpoints) + target_ep.delete() + log.info("endpoint %s deleted", target_ep.id) elif action == "restore_random_time": if project.leaf_branches: br: NeonBranch = random.choice(list(project.leaf_branches.values())) @@ -419,8 +442,10 @@ def do_action(project: NeonProject, action: str) -> None: br.restore_random_time() else: log.info("No leaf branches found") + return False else: raise ValueError(f"The action {action} is unknown") + return True @pytest.mark.timeout(7200) @@ -457,8 +482,9 @@ def test_api_random( pg_bin.run(["pgbench", "-i", "-I", "dtGvp", "-s100"], env=project.main_branch.connect_env) for _ in range(num_operations): log.info("Starting action #%s", _ + 1) - do_action( + while not do_action( project, random.choices([a[0] for a in ACTIONS], weights=[w[1] for w in ACTIONS])[0] - ) + ): + log.info("Retrying...") project.check_all_benchmarks() assert True diff --git a/test_runner/regress/test_attach_tenant_config.py b/test_runner/regress/test_attach_tenant_config.py index dc44fc77db..7788faceb4 100644 --- a/test_runner/regress/test_attach_tenant_config.py +++ b/test_runner/regress/test_attach_tenant_config.py @@ -184,7 +184,7 @@ def test_fully_custom_config(positive_env: NeonEnv): "timeline_offloading": False, "rel_size_v2_enabled": True, "relsize_snapshot_cache_capacity": 10000, - "gc_compaction_enabled": True, + "gc_compaction_enabled": False, "gc_compaction_verification": False, "gc_compaction_initial_threshold_kb": 1024000, "gc_compaction_ratio_percent": 200, diff --git a/test_runner/regress/test_basebackup.py b/test_runner/regress/test_basebackup.py index 2d42be4051..d1b10ec85d 100644 --- a/test_runner/regress/test_basebackup.py +++ b/test_runner/regress/test_basebackup.py @@ -69,6 +69,11 @@ def test_basebackup_cache(neon_env_builder: NeonEnvBuilder): ).value == i + 1 ) + # There should be only one basebackup file in the cache. + assert metrics.query_one("pageserver_basebackup_cache_entries_total").value == 1 + # The size of one basebackup for new DB is ~20KB. + size_bytes = metrics.query_one("pageserver_basebackup_cache_size_bytes").value + assert 10 * 1024 <= size_bytes <= 100 * 1024 wait_until(check_metrics) diff --git a/test_runner/regress/test_branching.py b/test_runner/regress/test_branching.py index 9ce618b2ad..011c6896bd 100644 --- a/test_runner/regress/test_branching.py +++ b/test_runner/regress/test_branching.py @@ -11,6 +11,7 @@ from fixtures.common_types import Lsn, TimelineId from fixtures.log_helper import log from fixtures.pageserver.http import PageserverApiException from fixtures.pageserver.utils import wait_until_tenant_active +from fixtures.safekeeper.http import MembershipConfiguration, TimelineCreateRequest from fixtures.utils import query_scalar from performance.test_perf_pgbench import get_scales_matrix from requests import RequestException @@ -164,6 +165,19 @@ def test_cannot_create_endpoint_on_non_uploaded_timeline(neon_env_builder: NeonE ps_http.configure_failpoints(("before-upload-index-pausable", "pause")) env.pageserver.tenant_create(env.initial_tenant) + sk = env.safekeepers[0] + assert sk + sk.http_client().timeline_create( + TimelineCreateRequest( + env.initial_tenant, + env.initial_timeline, + MembershipConfiguration(generation=1, members=[sk.safekeeper_id()], new_members=None), + int(env.pg_version) * 10000, + Lsn(0), + None, + ) + ) + initial_branch = "initial_branch" def start_creating_timeline(): @@ -402,6 +416,8 @@ def test_duplicate_creation(neon_env_builder: NeonEnvBuilder): # timeline creation (uploads). mask it out here to avoid flakyness. del success_result["remote_consistent_lsn_visible"] del repeat_result["remote_consistent_lsn_visible"] + del success_result["walreceiver_status"] + del repeat_result["walreceiver_status"] assert repeat_result == success_result finally: env.pageserver.stop(immediate=True) diff --git a/test_runner/regress/test_compatibility.py b/test_runner/regress/test_compatibility.py index 784afbba82..a4d2bf8d9b 100644 --- a/test_runner/regress/test_compatibility.py +++ b/test_runner/regress/test_compatibility.py @@ -18,6 +18,8 @@ from fixtures.neon_fixtures import ( NeonEnv, NeonEnvBuilder, PgBin, + Safekeeper, + StorageControllerApiException, flush_ep_to_pageserver, ) from fixtures.pageserver.http import PageserverApiException @@ -26,6 +28,7 @@ from fixtures.pageserver.utils import ( ) from fixtures.pg_version import PgVersion from fixtures.remote_storage import RemoteStorageKind, S3Storage, s3_storage +from fixtures.safekeeper.http import MembershipConfiguration from fixtures.workload import Workload if TYPE_CHECKING: @@ -73,6 +76,7 @@ if TYPE_CHECKING: # export CHECK_ONDISK_DATA_COMPATIBILITY=true # export COMPATIBILITY_NEON_BIN=neon_previous/target/${BUILD_TYPE} # export COMPATIBILITY_POSTGRES_DISTRIB_DIR=neon_previous/pg_install +# export COMPATIBILITY_SNAPSHOT_DIR=test_output/compatibility_snapshot_pgv${DEFAULT_PG_VERSION} # # # Build previous version of binaries and store them somewhere: # rm -rf pg_install target @@ -99,6 +103,7 @@ if TYPE_CHECKING: # export CHECK_ONDISK_DATA_COMPATIBILITY=true # export COMPATIBILITY_NEON_BIN=neon_previous/target/${BUILD_TYPE} # export COMPATIBILITY_POSTGRES_DISTRIB_DIR=neon_previous/pg_install +# export COMPATIBILITY_SNAPSHOT_DIR=test_output/compatibility_snapshot_pgv${DEFAULT_PG_VERSION} # export NEON_BIN=target/${BUILD_TYPE} # export POSTGRES_DISTRIB_DIR=pg_install # @@ -125,6 +130,12 @@ check_ondisk_data_compatibility_if_enabled = pytest.mark.skipif( reason="CHECK_ONDISK_DATA_COMPATIBILITY env is not set", ) +skip_old_debug_versions = pytest.mark.skipif( + os.getenv("BUILD_TYPE", "debug") == "debug" + and os.getenv("DEFAULT_PG_VERSION") in [PgVersion.V14, PgVersion.V15, PgVersion.V16], + reason="compatibility snaphots not available for old versions of debug builds", +) + @pytest.mark.xdist_group("compatibility") @pytest.mark.order(before="test_forward_compatibility") @@ -195,6 +206,7 @@ ingest_lag_log_line = ".*ingesting record with timestamp lagging more than wait_ @check_ondisk_data_compatibility_if_enabled +@skip_old_debug_versions @pytest.mark.xdist_group("compatibility") @pytest.mark.order(after="test_create_snapshot") def test_backward_compatibility( @@ -222,6 +234,7 @@ def test_backward_compatibility( @check_ondisk_data_compatibility_if_enabled +@skip_old_debug_versions @pytest.mark.xdist_group("compatibility") @pytest.mark.order(after="test_create_snapshot") def test_forward_compatibility( @@ -291,7 +304,20 @@ def test_forward_compatibility( def check_neon_works(env: NeonEnv, test_output_dir: Path, sql_dump_path: Path, repo_dir: Path): ep = env.endpoints.create("main") ep_env = {"LD_LIBRARY_PATH": str(env.pg_distrib_dir / f"v{env.pg_version}/lib")} - ep.start(env=ep_env) + + # If the compatibility snapshot was created with --timelines-onto-safekeepers=false, + # we should not pass safekeeper_generation to the endpoint because the compute + # will not be able to start. + # Zero generation is INVALID_GENERATION. + generation = 0 + try: + res = env.storage_controller.timeline_locate(env.initial_tenant, env.initial_timeline) + generation = res["generation"] + except StorageControllerApiException as e: + if e.status_code != 404 or not re.search(r"Timeline .* not found", str(e)): + raise e + + ep.start(env=ep_env, safekeeper_generation=generation) connstr = ep.connstr() @@ -341,7 +367,7 @@ def check_neon_works(env: NeonEnv, test_output_dir: Path, sql_dump_path: Path, r ) # Timeline exists again: restart the endpoint - ep.start(env=ep_env) + ep.start(env=ep_env, safekeeper_generation=generation) pg_bin.run_capture( ["pg_dumpall", f"--dbname={connstr}", f"--file={test_output_dir / 'dump-from-wal.sql'}"] @@ -542,6 +568,24 @@ def test_historic_storage_formats( # All our artifacts should contain at least one timeline assert len(timelines) > 0 + # Import tenant does not create the timeline on safekeepers, + # because it is a debug handler and the timeline may have already been + # created on some set of safekeepers. + # Create the timeline on safekeepers manually. + # TODO(diko): when we have the script/storcon handler to migrate + # the timeline to storcon, we can replace this code with it. + mconf = MembershipConfiguration( + generation=1, + members=Safekeeper.sks_to_safekeeper_ids([env.safekeepers[0]]), + new_members=None, + ) + members_sks = Safekeeper.mconf_sks(env, mconf) + + for timeline in timelines: + Safekeeper.create_timeline( + dataset.tenant_id, timeline["timeline_id"], env.pageserver, mconf, members_sks + ) + # TODO: ensure that the snapshots we're importing contain a sensible variety of content, at the very # least they should include a mixture of deltas and image layers. Preferably they should also # contain some "exotic" stuff like aux files from logical replication. @@ -573,6 +617,7 @@ def test_historic_storage_formats( @check_ondisk_data_compatibility_if_enabled +@skip_old_debug_versions @pytest.mark.xdist_group("compatibility") @pytest.mark.parametrize( **fixtures.utils.allpairs_versions(), diff --git a/test_runner/regress/test_compute_metrics.py b/test_runner/regress/test_compute_metrics.py index c751a3e7cc..d1e61e597c 100644 --- a/test_runner/regress/test_compute_metrics.py +++ b/test_runner/regress/test_compute_metrics.py @@ -418,7 +418,7 @@ def test_sql_exporter_metrics_e2e( pg_user = conn_options["user"] pg_dbname = conn_options["dbname"] pg_application_name = f"sql_exporter{stem_suffix}" - connstr = f"postgresql://{pg_user}@{pg_host}:{pg_port}/{pg_dbname}?sslmode=disable&application_name={pg_application_name}" + connstr = f"postgresql://{pg_user}@{pg_host}:{pg_port}/{pg_dbname}?sslmode=disable&application_name={pg_application_name}&pgaudit.log=none" def escape_go_filepath_match_characters(s: str) -> str: """ diff --git a/test_runner/regress/test_compute_reconfigure.py b/test_runner/regress/test_compute_reconfigure.py index b533d45b1e..cc792333ba 100644 --- a/test_runner/regress/test_compute_reconfigure.py +++ b/test_runner/regress/test_compute_reconfigure.py @@ -9,6 +9,8 @@ from fixtures.utils import wait_until if TYPE_CHECKING: from fixtures.neon_fixtures import NeonEnv +from fixtures.log_helper import log + def test_compute_reconfigure(neon_simple_env: NeonEnv): """ @@ -85,3 +87,57 @@ def test_compute_reconfigure(neon_simple_env: NeonEnv): samples = metrics.query_all("compute_ctl_up", {"build_tag": build_tag}) assert len(samples) == 1 assert samples[0].value == 1 + + +def test_compute_safekeeper_connstrings_duplicate(neon_simple_env: NeonEnv): + """ + Test that we catch duplicate entries in neon.safekeepers. + """ + env = neon_simple_env + + endpoint = env.endpoints.create_start("main") + + # grab the current value of neon.safekeepers + sk_list = [] + with endpoint.cursor() as cursor: + cursor.execute("SHOW neon.safekeepers;") + row = cursor.fetchone() + assert row is not None + + log.info(f' initial neon.safekeepers: "{row}"') + + # build a safekeepers list with a duplicate + sk_list.append(row[0]) + sk_list.append(row[0]) + + safekeepers = ",".join(sk_list) + log.info(f'reconfigure neon.safekeepers: "{safekeepers}"') + + # introduce duplicate entry in neon.safekeepers, on purpose + endpoint.respec_deep( + **{ + "spec": { + "skip_pg_catalog_updates": True, + "cluster": { + "settings": [ + { + "name": "neon.safekeepers", + "vartype": "string", + "value": safekeepers, + } + ] + }, + }, + } + ) + + try: + endpoint.reconfigure() + + # Check that in logs we see that it was actually reconfigured, + # not restarted or something else. + endpoint.log_contains("INFO request{method=POST uri=/configure") + + except Exception as e: + # we except a failure here + log.info(f"RAISED: {e}" % e) diff --git a/test_runner/regress/test_feature_flag.py b/test_runner/regress/test_feature_flag.py new file mode 100644 index 0000000000..2712d13dcc --- /dev/null +++ b/test_runner/regress/test_feature_flag.py @@ -0,0 +1,51 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +from fixtures.utils import run_only_on_default_postgres + +if TYPE_CHECKING: + from fixtures.neon_fixtures import NeonEnvBuilder + + +@run_only_on_default_postgres("Pageserver-only test only needs to run on one version") +def test_feature_flag(neon_env_builder: NeonEnvBuilder): + env = neon_env_builder.init_start() + env.pageserver.http_client().force_override_feature_flag("test-feature-flag", "true") + assert env.pageserver.http_client().evaluate_feature_flag_boolean( + env.initial_tenant, "test-feature-flag" + )["result"]["Ok"] + assert ( + env.pageserver.http_client().evaluate_feature_flag_multivariate( + env.initial_tenant, "test-feature-flag" + )["result"]["Ok"] + == "true" + ) + + env.pageserver.http_client().force_override_feature_flag("test-feature-flag", "false") + assert ( + env.pageserver.http_client().evaluate_feature_flag_boolean( + env.initial_tenant, "test-feature-flag" + )["result"]["Err"] + == "No condition group is matched" + ) + assert ( + env.pageserver.http_client().evaluate_feature_flag_multivariate( + env.initial_tenant, "test-feature-flag" + )["result"]["Ok"] + == "false" + ) + + env.pageserver.http_client().force_override_feature_flag("test-feature-flag", None) + assert ( + "Err" + in env.pageserver.http_client().evaluate_feature_flag_boolean( + env.initial_tenant, "test-feature-flag" + )["result"] + ) + assert ( + "Err" + in env.pageserver.http_client().evaluate_feature_flag_multivariate( + env.initial_tenant, "test-feature-flag" + )["result"] + ) diff --git a/test_runner/regress/test_import.py b/test_runner/regress/test_import.py index 55737c35f0..e1070a81e6 100644 --- a/test_runner/regress/test_import.py +++ b/test_runner/regress/test_import.py @@ -87,6 +87,9 @@ def test_import_from_vanilla(test_output_dir, pg_bin, vanilla_pg, neon_env_build # Set up pageserver for import neon_env_builder.enable_pageserver_remote_storage(RemoteStorageKind.LOCAL_FS) + neon_env_builder.storage_controller_config = { + "timelines_onto_safekeepers": True, + } env = neon_env_builder.init_start() env.pageserver.tenant_create(tenant) diff --git a/test_runner/regress/test_lfc_prewarm.py b/test_runner/regress/test_lfc_prewarm.py index 82e1e9fcba..1fa1ead034 100644 --- a/test_runner/regress/test_lfc_prewarm.py +++ b/test_runner/regress/test_lfc_prewarm.py @@ -1,34 +1,38 @@ import random import threading -import time -from enum import Enum +from enum import StrEnum +from time import sleep +from typing import Any import pytest from fixtures.endpoint.http import EndpointHttpClient from fixtures.log_helper import log from fixtures.neon_fixtures import NeonEnv -from fixtures.utils import USE_LFC +from fixtures.utils import USE_LFC, wait_until from prometheus_client.parser import text_string_to_metric_families as prom_parse_impl +from psycopg2.extensions import cursor as Cursor -class LfcQueryMethod(Enum): - COMPUTE_CTL = False - POSTGRES = True +class PrewarmMethod(StrEnum): + POSTGRES = "postgres" + COMPUTE_CTL = "compute-ctl" + AUTOPREWARM = "autoprewarm" -PREWARM_LABEL = "compute_ctl_lfc_prewarm_requests_total" -OFFLOAD_LABEL = "compute_ctl_lfc_offload_requests_total" -QUERY_OPTIONS = LfcQueryMethod.POSTGRES, LfcQueryMethod.COMPUTE_CTL +PREWARM_LABEL = "compute_ctl_lfc_prewarms_total" +OFFLOAD_LABEL = "compute_ctl_lfc_offloads_total" +METHOD_VALUES = [e for e in PrewarmMethod] +METHOD_IDS = [e.value for e in PrewarmMethod] -def check_pinned_entries(cur): +def check_pinned_entries(cur: Cursor): # some LFC buffer can be temporary locked by autovacuum or background writer for _ in range(10): cur.execute("select lfc_value from neon_lfc_stats where lfc_key='file_cache_chunks_pinned'") n_pinned = cur.fetchall()[0][0] if n_pinned == 0: break - time.sleep(1) + sleep(1) assert n_pinned == 0 @@ -41,25 +45,72 @@ def prom_parse(client: EndpointHttpClient) -> dict[str, float]: } +def offload_lfc(method: PrewarmMethod, client: EndpointHttpClient, cur: Cursor) -> Any: + if method == PrewarmMethod.AUTOPREWARM: + client.offload_lfc_wait() + elif method == PrewarmMethod.COMPUTE_CTL: + status = client.prewarm_lfc_status() + assert status["status"] == "not_prewarmed" + assert "error" not in status + client.offload_lfc() + assert client.prewarm_lfc_status()["status"] == "not_prewarmed" + assert prom_parse(client) == {OFFLOAD_LABEL: 1, PREWARM_LABEL: 0} + elif method == PrewarmMethod.POSTGRES: + cur.execute("select get_local_cache_state()") + return cur.fetchall()[0][0] + else: + raise AssertionError(f"{method} not in PrewarmMethod") + + +def prewarm_endpoint( + method: PrewarmMethod, client: EndpointHttpClient, cur: Cursor, lfc_state: str | None +): + if method == PrewarmMethod.AUTOPREWARM: + client.prewarm_lfc_wait() + elif method == PrewarmMethod.COMPUTE_CTL: + client.prewarm_lfc() + elif method == PrewarmMethod.POSTGRES: + cur.execute("select prewarm_local_cache(%s)", (lfc_state,)) + + +def check_prewarmed( + method: PrewarmMethod, client: EndpointHttpClient, desired_status: dict[str, str | int] +): + if method == PrewarmMethod.AUTOPREWARM: + assert client.prewarm_lfc_status() == desired_status + assert prom_parse(client)[PREWARM_LABEL] == 1 + elif method == PrewarmMethod.COMPUTE_CTL: + assert client.prewarm_lfc_status() == desired_status + assert prom_parse(client) == {OFFLOAD_LABEL: 0, PREWARM_LABEL: 1} + + @pytest.mark.skipif(not USE_LFC, reason="LFC is disabled, skipping") -@pytest.mark.parametrize("query", QUERY_OPTIONS, ids=["postgres", "compute-ctl"]) -def test_lfc_prewarm(neon_simple_env: NeonEnv, query: LfcQueryMethod): +@pytest.mark.parametrize("method", METHOD_VALUES, ids=METHOD_IDS) +def test_lfc_prewarm(neon_simple_env: NeonEnv, method: PrewarmMethod): env = neon_simple_env n_records = 1000000 - endpoint = env.endpoints.create_start( - branch_name="main", - config_lines=[ - "autovacuum = off", - "shared_buffers=1MB", - "neon.max_file_cache_size=1GB", - "neon.file_cache_size_limit=1GB", - "neon.file_cache_prewarm_limit=1000", - ], - ) + cfg = [ + "autovacuum = off", + "shared_buffers=1MB", + "neon.max_file_cache_size=1GB", + "neon.file_cache_size_limit=1GB", + "neon.file_cache_prewarm_limit=1000", + ] + offload_secs = 2 + + if method == PrewarmMethod.AUTOPREWARM: + endpoint = env.endpoints.create_start( + branch_name="main", + config_lines=cfg, + autoprewarm=True, + offload_lfc_interval_seconds=offload_secs, + ) + else: + endpoint = env.endpoints.create_start(branch_name="main", config_lines=cfg) pg_conn = endpoint.connect() pg_cur = pg_conn.cursor() - pg_cur.execute("create extension neon version '1.6'") + pg_cur.execute("create extension neon") pg_cur.execute("create database lfc") lfc_conn = endpoint.connect(dbname="lfc") @@ -69,34 +120,21 @@ def test_lfc_prewarm(neon_simple_env: NeonEnv, query: LfcQueryMethod): lfc_cur.execute(f"insert into t (pk) values (generate_series(1,{n_records}))") log.info(f"Inserted {n_records} rows") - http_client = endpoint.http_client() - if query is LfcQueryMethod.COMPUTE_CTL: - status = http_client.prewarm_lfc_status() - assert status["status"] == "not_prewarmed" - assert "error" not in status - http_client.offload_lfc() - assert http_client.prewarm_lfc_status()["status"] == "not_prewarmed" - assert prom_parse(http_client) == {OFFLOAD_LABEL: 1, PREWARM_LABEL: 0} - else: - pg_cur.execute("select get_local_cache_state()") - lfc_state = pg_cur.fetchall()[0][0] + client = endpoint.http_client() + lfc_state = offload_lfc(method, client, pg_cur) endpoint.stop() - endpoint.start() + if method == PrewarmMethod.AUTOPREWARM: + endpoint.start(autoprewarm=True, offload_lfc_interval_seconds=offload_secs) + else: + endpoint.start() - # wait until compute_ctl completes downgrade of extension to default version - time.sleep(1) pg_conn = endpoint.connect() pg_cur = pg_conn.cursor() - pg_cur.execute("alter extension neon update to '1.6'") lfc_conn = endpoint.connect(dbname="lfc") lfc_cur = lfc_conn.cursor() - - if query is LfcQueryMethod.COMPUTE_CTL: - http_client.prewarm_lfc() - else: - pg_cur.execute("select prewarm_local_cache(%s)", (lfc_state,)) + prewarm_endpoint(method, client, pg_cur, lfc_state) pg_cur.execute("select lfc_value from neon_lfc_stats where lfc_key='file_cache_used_pages'") lfc_used_pages = pg_cur.fetchall()[0][0] @@ -114,37 +152,36 @@ def test_lfc_prewarm(neon_simple_env: NeonEnv, query: LfcQueryMethod): and prewarm_info[1] > 0 and prewarm_info[0] == prewarm_info[1] + prewarm_info[2] ) - lfc_cur.execute("select sum(pk) from t") assert lfc_cur.fetchall()[0][0] == n_records * (n_records + 1) / 2 check_pinned_entries(pg_cur) - desired = {"status": "completed", "total": total, "prewarmed": prewarmed, "skipped": skipped} - if query is LfcQueryMethod.COMPUTE_CTL: - assert http_client.prewarm_lfc_status() == desired - assert prom_parse(http_client) == {OFFLOAD_LABEL: 0, PREWARM_LABEL: 1} + check_prewarmed(method, client, desired) + + +# autoprewarm isn't needed as we prewarm manually +WORKLOAD_VALUES = METHOD_VALUES[:-1] +WORKLOAD_IDS = METHOD_IDS[:-1] @pytest.mark.skipif(not USE_LFC, reason="LFC is disabled, skipping") -@pytest.mark.parametrize("query", QUERY_OPTIONS, ids=["postgres", "compute-ctl"]) -def test_lfc_prewarm_under_workload(neon_simple_env: NeonEnv, query: LfcQueryMethod): +@pytest.mark.parametrize("method", WORKLOAD_VALUES, ids=WORKLOAD_IDS) +def test_lfc_prewarm_under_workload(neon_simple_env: NeonEnv, method: PrewarmMethod): env = neon_simple_env n_records = 10000 n_threads = 4 - endpoint = env.endpoints.create_start( - branch_name="main", - config_lines=[ - "shared_buffers=1MB", - "neon.max_file_cache_size=1GB", - "neon.file_cache_size_limit=1GB", - "neon.file_cache_prewarm_limit=1000000", - ], - ) + cfg = [ + "shared_buffers=1MB", + "neon.max_file_cache_size=1GB", + "neon.file_cache_size_limit=1GB", + "neon.file_cache_prewarm_limit=1000000", + ] + endpoint = env.endpoints.create_start(branch_name="main", config_lines=cfg) pg_conn = endpoint.connect() pg_cur = pg_conn.cursor() - pg_cur.execute("create extension neon version '1.6'") + pg_cur.execute("create extension neon") pg_cur.execute("CREATE DATABASE lfc") lfc_conn = endpoint.connect(dbname="lfc") @@ -157,12 +194,7 @@ def test_lfc_prewarm_under_workload(neon_simple_env: NeonEnv, query: LfcQueryMet log.info(f"Inserted {n_records} rows") http_client = endpoint.http_client() - if query is LfcQueryMethod.COMPUTE_CTL: - http_client.offload_lfc() - else: - pg_cur.execute("select get_local_cache_state()") - lfc_state = pg_cur.fetchall()[0][0] - + lfc_state = offload_lfc(method, http_client, pg_cur) running = True n_prewarms = 0 @@ -173,8 +205,8 @@ def test_lfc_prewarm_under_workload(neon_simple_env: NeonEnv, query: LfcQueryMet while running: src = random.randint(1, n_records) dst = random.randint(1, n_records) - lfc_cur.execute("update accounts set balance=balance-100 where id=%s", (src,)) - lfc_cur.execute("update accounts set balance=balance+100 where id=%s", (dst,)) + lfc_cur.execute(f"update accounts set balance=balance-100 where id={src}") + lfc_cur.execute(f"update accounts set balance=balance+100 where id={dst}") n_transfers += 1 log.info(f"Number of transfers: {n_transfers}") @@ -186,12 +218,7 @@ def test_lfc_prewarm_under_workload(neon_simple_env: NeonEnv, query: LfcQueryMet pg_cur.execute("select pg_reload_conf()") pg_cur.execute("alter system set neon.file_cache_size_limit='1GB'") pg_cur.execute("select pg_reload_conf()") - - if query is LfcQueryMethod.COMPUTE_CTL: - http_client.prewarm_lfc() - else: - pg_cur.execute("select prewarm_local_cache(%s)", (lfc_state,)) - + prewarm_endpoint(method, http_client, pg_cur, lfc_state) nonlocal n_prewarms n_prewarms += 1 log.info(f"Number of prewarms: {n_prewarms}") @@ -205,7 +232,10 @@ def test_lfc_prewarm_under_workload(neon_simple_env: NeonEnv, query: LfcQueryMet prewarm_thread = threading.Thread(target=prewarm) prewarm_thread.start() - time.sleep(20) + def prewarmed(): + assert n_prewarms > 5 + + wait_until(prewarmed) running = False for t in workload_threads: @@ -217,5 +247,5 @@ def test_lfc_prewarm_under_workload(neon_simple_env: NeonEnv, query: LfcQueryMet assert total_balance == 0 check_pinned_entries(pg_cur) - if query is LfcQueryMethod.COMPUTE_CTL: + if method != PrewarmMethod.POSTGRES: assert prom_parse(http_client) == {OFFLOAD_LABEL: 1, PREWARM_LABEL: n_prewarms} diff --git a/test_runner/regress/test_neon_extension.py b/test_runner/regress/test_neon_extension.py index e79ab458ca..6bcd15d463 100644 --- a/test_runner/regress/test_neon_extension.py +++ b/test_runner/regress/test_neon_extension.py @@ -29,7 +29,7 @@ def test_neon_extension(neon_env_builder: NeonEnvBuilder): # IMPORTANT: # If the version has changed, the test should be updated. # Ensure that the default version is also updated in the neon.control file - assert cur.fetchone() == ("1.5",) + assert cur.fetchone() == ("1.6",) cur.execute("SELECT * from neon.NEON_STAT_FILE_CACHE") res = cur.fetchall() log.info(res) @@ -53,10 +53,10 @@ def test_neon_extension_compatibility(neon_env_builder: NeonEnvBuilder): # IMPORTANT: # If the version has changed, the test should be updated. # Ensure that the default version is also updated in the neon.control file - assert cur.fetchone() == ("1.5",) + assert cur.fetchone() == ("1.6",) cur.execute("SELECT * from neon.NEON_STAT_FILE_CACHE") - all_versions = ["1.5", "1.4", "1.3", "1.2", "1.1", "1.0"] - current_version = "1.5" + all_versions = ["1.6", "1.5", "1.4", "1.3", "1.2", "1.1", "1.0"] + current_version = "1.6" for idx, begin_version in enumerate(all_versions): for target_version in all_versions[idx + 1 :]: if current_version != begin_version: diff --git a/test_runner/regress/test_normal_work.py b/test_runner/regress/test_normal_work.py index 44590ea4b9..b815fee702 100644 --- a/test_runner/regress/test_normal_work.py +++ b/test_runner/regress/test_normal_work.py @@ -17,7 +17,7 @@ def check_tenant( config_lines = [ f"neon.safekeeper_proto_version = {safekeeper_proto_version}", ] - endpoint = env.endpoints.create_start("main", tenant_id=tenant_id, config_lines=config_lines) + endpoint = env.endpoints.create_start("main", tenant_id=tenant_id, config_lines=config_lines, grpc=True) # we rely upon autocommit after each statement res_1 = endpoint.safe_psql_many( queries=[ @@ -64,6 +64,11 @@ def test_normal_work( """ neon_env_builder.num_safekeepers = num_safekeepers + + if safekeeper_proto_version == 2: + neon_env_builder.storage_controller_config = { + "timelines_onto_safekeepers": False, + } env = neon_env_builder.init_start() pageserver_http = env.pageserver.http_client() diff --git a/test_runner/regress/test_pageserver_metric_collection.py b/test_runner/regress/test_pageserver_metric_collection.py index 474258c9eb..52c33687ae 100644 --- a/test_runner/regress/test_pageserver_metric_collection.py +++ b/test_runner/regress/test_pageserver_metric_collection.py @@ -180,7 +180,7 @@ def test_metric_collection( httpserver.check() # Check that at least one bucket output object is present, and that all - # can be decompressed and decoded. + # can be decompressed and decoded as NDJSON. bucket_dumps = {} assert isinstance(env.pageserver_remote_storage, LocalFsStorage) for dirpath, _dirs, files in os.walk(env.pageserver_remote_storage.root): @@ -188,7 +188,13 @@ def test_metric_collection( file_path = os.path.join(dirpath, file) log.info(file_path) if file.endswith(".gz"): - bucket_dumps[file_path] = json.load(gzip.open(file_path)) + events = [] + with gzip.open(file_path, "rt") as f: + for line in f: + line = line.strip() + if line: + events.append(json.loads(line)) + bucket_dumps[file_path] = {"events": events} assert len(bucket_dumps) >= 1 assert all("events" in data for data in bucket_dumps.values()) diff --git a/test_runner/regress/test_pageserver_restarts_under_workload.py b/test_runner/regress/test_pageserver_restarts_under_workload.py index 9f19c887a4..6b33b3e046 100644 --- a/test_runner/regress/test_pageserver_restarts_under_workload.py +++ b/test_runner/regress/test_pageserver_restarts_under_workload.py @@ -16,7 +16,7 @@ if TYPE_CHECKING: # Test restarting page server, while safekeeper and compute node keep # running. -def test_pageserver_restarts_under_worload(neon_simple_env: NeonEnv, pg_bin: PgBin): +def test_pageserver_restarts_under_workload(neon_simple_env: NeonEnv, pg_bin: PgBin): env = neon_simple_env env.create_branch("test_pageserver_restarts") endpoint = env.endpoints.create_start("test_pageserver_restarts") @@ -28,7 +28,11 @@ def test_pageserver_restarts_under_worload(neon_simple_env: NeonEnv, pg_bin: PgB pg_bin.run_capture(["pgbench", "-i", "-I", "dtGvp", f"-s{scale}", connstr]) pg_bin.run_capture(["pgbench", f"-T{n_restarts}", connstr]) - thread = threading.Thread(target=run_pgbench, args=(endpoint.connstr(),), daemon=True) + thread = threading.Thread( + target=run_pgbench, + args=(endpoint.connstr(options="-cstatement_timeout=360s"),), + daemon=True, + ) thread.start() for _ in range(n_restarts): diff --git a/test_runner/regress/test_pg_regress.py b/test_runner/regress/test_pg_regress.py index 474002353b..728241b465 100644 --- a/test_runner/regress/test_pg_regress.py +++ b/test_runner/regress/test_pg_regress.py @@ -173,7 +173,11 @@ def test_pg_regress( (runpath / "testtablespace").mkdir(parents=True) # Compute all the file locations that pg_regress will need. - build_path = pg_distrib_dir / f"build/{env.pg_version.v_prefixed}/src/test/regress" + # + # XXX: We assume that the `build` directory is a sibling of the + # pg_distrib_dir. That is the default when you check out the + # repository; `build` and `pg_install` are created side by side. + build_path = pg_distrib_dir / f"../build/{env.pg_version.v_prefixed}/src/test/regress" src_path = base_dir / f"vendor/postgres-{env.pg_version.v_prefixed}/src/test/regress" bindir = pg_distrib_dir / f"v{env.pg_version}/bin" schedule = src_path / "parallel_schedule" @@ -250,7 +254,11 @@ def test_isolation( (runpath / "testtablespace").mkdir(parents=True) # Compute all the file locations that pg_isolation_regress will need. - build_path = pg_distrib_dir / f"build/{env.pg_version.v_prefixed}/src/test/isolation" + # + # XXX: We assume that the `build` directory is a sibling of the + # pg_distrib_dir. That is the default when you check out the + # repository; `build` and `pg_install` are created side by side. + build_path = pg_distrib_dir / f"../build/{env.pg_version.v_prefixed}/src/test/isolation" src_path = base_dir / f"vendor/postgres-{env.pg_version.v_prefixed}/src/test/isolation" bindir = pg_distrib_dir / f"v{env.pg_version}/bin" schedule = src_path / "isolation_schedule" @@ -306,13 +314,7 @@ def test_sql_regress( ) # Connect to postgres and create a database called "regression". - endpoint = env.endpoints.create_start( - "main", - config_lines=[ - # Enable the test mode, so that we don't need to patch the test cases. - "neon.regress_test_mode = true", - ], - ) + endpoint = env.endpoints.create_start("main") endpoint.safe_psql(f"CREATE DATABASE {DBNAME}") # Create some local directories for pg_regress to run in. @@ -320,8 +322,11 @@ def test_sql_regress( (runpath / "testtablespace").mkdir(parents=True) # Compute all the file locations that pg_regress will need. - # This test runs neon specific tests - build_path = pg_distrib_dir / f"build/v{env.pg_version}/src/test/regress" + # + # XXX: We assume that the `build` directory is a sibling of the + # pg_distrib_dir. That is the default when you check out the + # repository; `build` and `pg_install` are created side by side. + build_path = pg_distrib_dir / f"../build/{env.pg_version.v_prefixed}/src/test/regress" src_path = base_dir / "test_runner/sql_regress" bindir = pg_distrib_dir / f"v{env.pg_version}/bin" schedule = src_path / "parallel_schedule" diff --git a/test_runner/regress/test_proxy_allowed_ips.py b/test_runner/regress/test_proxy_allowed_ips.py index 7384326385..5ac74585b9 100644 --- a/test_runner/regress/test_proxy_allowed_ips.py +++ b/test_runner/regress/test_proxy_allowed_ips.py @@ -19,11 +19,15 @@ TABLE_NAME = "neon_control_plane.endpoints" async def test_proxy_psql_allowed_ips(static_proxy: NeonProxy, vanilla_pg: VanillaPostgres): # Shouldn't be able to connect to this project vanilla_pg.safe_psql( - f"INSERT INTO {TABLE_NAME} (endpoint_id, allowed_ips) VALUES ('private-project', '8.8.8.8')" + f"INSERT INTO {TABLE_NAME} (endpoint_id, allowed_ips) VALUES ('private-project', '8.8.8.8')", + user="proxy", + password="password", ) # Should be able to connect to this project vanilla_pg.safe_psql( - f"INSERT INTO {TABLE_NAME} (endpoint_id, allowed_ips) VALUES ('generic-project', '::1,127.0.0.1')" + f"INSERT INTO {TABLE_NAME} (endpoint_id, allowed_ips) VALUES ('generic-project', '::1,127.0.0.1')", + user="proxy", + password="password", ) def check_cannot_connect(**kwargs): @@ -60,7 +64,9 @@ async def test_proxy_http_allowed_ips(static_proxy: NeonProxy, vanilla_pg: Vanil # Shouldn't be able to connect to this project vanilla_pg.safe_psql( - f"INSERT INTO {TABLE_NAME} (endpoint_id, allowed_ips) VALUES ('proxy', '8.8.8.8')" + f"INSERT INTO {TABLE_NAME} (endpoint_id, allowed_ips) VALUES ('proxy', '8.8.8.8')", + user="proxy", + password="password", ) def query(status: int, query: str, *args): @@ -75,6 +81,8 @@ async def test_proxy_http_allowed_ips(static_proxy: NeonProxy, vanilla_pg: Vanil query(400, "select 1;") # ip address is not allowed # Should be able to connect to this project vanilla_pg.safe_psql( - f"UPDATE {TABLE_NAME} SET allowed_ips = '8.8.8.8,127.0.0.1' WHERE endpoint_id = 'proxy'" + f"UPDATE {TABLE_NAME} SET allowed_ips = '8.8.8.8,127.0.0.1' WHERE endpoint_id = 'proxy'", + user="proxy", + password="password", ) query(200, "select 1;") # should work now diff --git a/test_runner/regress/test_replica_promotes.py b/test_runner/regress/test_replica_promotes.py index e378d37635..4486901bae 100644 --- a/test_runner/regress/test_replica_promotes.py +++ b/test_runner/regress/test_replica_promotes.py @@ -4,13 +4,25 @@ File with secondary->primary promotion testing. This far, only contains a test that we don't break and that the data is persisted. """ +from typing import cast + import psycopg2 +from fixtures.common_types import Lsn from fixtures.log_helper import log from fixtures.neon_fixtures import Endpoint, NeonEnv, wait_replica_caughtup from fixtures.pg_version import PgVersion from pytest import raises +def stop_and_check_lsn(ep: Endpoint, expected_lsn: Lsn | None): + ep.stop(mode="immediate-terminate") + lsn = ep.terminate_flush_lsn + if expected_lsn is not None: + assert lsn >= expected_lsn, f"{expected_lsn=} < {lsn=}" + else: + assert lsn == expected_lsn, f"{expected_lsn=} != {lsn=}" + + def test_replica_promotes(neon_simple_env: NeonEnv, pg_version: PgVersion): """ Test that a replica safely promotes, and can commit data updates which @@ -37,7 +49,9 @@ def test_replica_promotes(neon_simple_env: NeonEnv, pg_version: PgVersion): pg_current_wal_flush_lsn() """ ) - log.info(f"Primary: Current LSN after workload is {primary_cur.fetchone()}") + lsn_triple = cast("tuple[str, str, str]", primary_cur.fetchone()) + log.info(f"Primary: Current LSN after workload is {lsn_triple}") + expected_primary_lsn: Lsn = Lsn(lsn_triple[2]) primary_cur.execute("show neon.safekeepers") safekeepers = primary_cur.fetchall()[0][0] @@ -57,7 +71,7 @@ def test_replica_promotes(neon_simple_env: NeonEnv, pg_version: PgVersion): secondary_cur.execute("select count(*) from t") assert secondary_cur.fetchone() == (100,) - primary.stop_and_destroy(mode="immediate") + stop_and_check_lsn(primary, expected_primary_lsn) # Reconnect to the secondary to make sure we get a read-write connection promo_conn = secondary.connect() @@ -109,9 +123,10 @@ def test_replica_promotes(neon_simple_env: NeonEnv, pg_version: PgVersion): # wait_for_last_flush_lsn(env, secondary, env.initial_tenant, env.initial_timeline) - secondary.stop_and_destroy() + # secondaries don't sync safekeepers on finish so LSN will be None + stop_and_check_lsn(secondary, None) - primary = env.endpoints.create_start(branch_name="main", endpoint_id="primary") + primary = env.endpoints.create_start(branch_name="main", endpoint_id="primary2") with primary.connect() as new_primary: new_primary_cur = new_primary.cursor() @@ -122,7 +137,9 @@ def test_replica_promotes(neon_simple_env: NeonEnv, pg_version: PgVersion): pg_current_wal_flush_lsn() """ ) - log.info(f"New primary: Boot LSN is {new_primary_cur.fetchone()}") + lsn_triple = cast("tuple[str, str, str]", new_primary_cur.fetchone()) + expected_primary_lsn = Lsn(lsn_triple[2]) + log.info(f"New primary: Boot LSN is {lsn_triple}") new_primary_cur.execute("select count(*) from t") assert new_primary_cur.fetchone() == (200,) @@ -130,4 +147,4 @@ def test_replica_promotes(neon_simple_env: NeonEnv, pg_version: PgVersion): new_primary_cur.execute("select count(*) from t") assert new_primary_cur.fetchone() == (300,) - primary.stop(mode="immediate") + stop_and_check_lsn(primary, expected_primary_lsn) diff --git a/test_runner/regress/test_s3_restore.py b/test_runner/regress/test_s3_restore.py index 082808f9ff..2d7be1f9d1 100644 --- a/test_runner/regress/test_s3_restore.py +++ b/test_runner/regress/test_s3_restore.py @@ -74,7 +74,7 @@ def test_tenant_s3_restore( last_flush_lsn = Lsn(endpoint.safe_psql("SELECT pg_current_wal_flush_lsn()")[0][0]) last_flush_lsns.append(last_flush_lsn) ps_http.timeline_checkpoint(tenant_id, timeline_id) - wait_for_upload(ps_http, tenant_id, timeline_id, last_flush_lsn) + wait_for_upload(ps_http, tenant_id, timeline_id, last_flush_lsn, timeout=60) log.info(f"{timeline} timeline {timeline_id} {last_flush_lsn=}") parent = timeline diff --git a/test_runner/regress/test_safekeeper_deletion.py b/test_runner/regress/test_safekeeper_deletion.py index b681a86103..bc79969e9a 100644 --- a/test_runner/regress/test_safekeeper_deletion.py +++ b/test_runner/regress/test_safekeeper_deletion.py @@ -30,6 +30,7 @@ def test_safekeeper_delete_timeline(neon_env_builder: NeonEnvBuilder, auth_enabl env.pageserver.allowed_errors.extend( [ ".*Timeline .* was not found in global map.*", + ".*Timeline .* has been deleted.*", ".*Timeline .* was cancelled and cannot be used anymore.*", ] ) @@ -198,6 +199,7 @@ def test_safekeeper_delete_timeline_under_load(neon_env_builder: NeonEnvBuilder) env.pageserver.allowed_errors.extend( [ ".*Timeline.*was cancelled.*", + ".*Timeline.*has been deleted.*", ".*Timeline.*was not found.*", ] ) diff --git a/test_runner/regress/test_safekeeper_migration.py b/test_runner/regress/test_safekeeper_migration.py new file mode 100644 index 0000000000..057371175c --- /dev/null +++ b/test_runner/regress/test_safekeeper_migration.py @@ -0,0 +1,76 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from fixtures.neon_fixtures import NeonEnvBuilder + + +def test_safekeeper_migration_simple(neon_env_builder: NeonEnvBuilder): + """ + Simple safekeeper migration test. + Creates 3 safekeepers. The timeline is configuret to use only one safekeeper. + 1. Go through all safekeepers, migrate the timeline to it. + 2. Stop the other safekeepers. Validate that the insert is successful. + 3. Start the other safekeepers again and go to the next safekeeper. + 4. Validate that the table contains all inserted values. + """ + neon_env_builder.num_safekeepers = 3 + neon_env_builder.storage_controller_config = { + "timelines_onto_safekeepers": True, + "timeline_safekeeper_count": 1, + } + env = neon_env_builder.init_start() + # TODO(diko): pageserver spams with various errors during safekeeper migration. + # Fix the code so it handles the migration better. + env.pageserver.allowed_errors.extend( + [ + ".*Timeline .* was cancelled and cannot be used anymore.*", + ".*Timeline .* has been deleted.*", + ".*wal receiver task finished with an error.*", + ] + ) + + ep = env.endpoints.create("main", tenant_id=env.initial_tenant) + + mconf = env.storage_controller.timeline_locate(env.initial_tenant, env.initial_timeline) + assert mconf["new_sk_set"] is None + assert len(mconf["sk_set"]) == 1 + assert mconf["generation"] == 1 + + ep.start(safekeeper_generation=1, safekeepers=mconf["sk_set"]) + ep.safe_psql("CREATE EXTENSION neon_test_utils;") + ep.safe_psql("CREATE TABLE t(a int)") + + for active_sk in range(1, 4): + env.storage_controller.migrate_safekeepers( + env.initial_tenant, env.initial_timeline, [active_sk] + ) + + other_sks = [sk for sk in range(1, 4) if sk != active_sk] + + for sk in other_sks: + env.safekeepers[sk - 1].stop() + + ep.safe_psql(f"INSERT INTO t VALUES ({active_sk})") + + for sk in other_sks: + env.safekeepers[sk - 1].start() + + ep.clear_buffers() + + assert ep.safe_psql("SELECT * FROM t") == [(i,) for i in range(1, 4)] + + # 1 initial generation + 2 migrations on each loop iteration. + expected_gen = 1 + 2 * 3 + + mconf = env.storage_controller.timeline_locate(env.initial_tenant, env.initial_timeline) + assert mconf["generation"] == expected_gen + + assert ep.safe_psql("SHOW neon.safekeepers")[0][0].startswith(f"g#{expected_gen}:") + + # Restart and check again to make sure data is persistent. + ep.stop() + ep.start(safekeeper_generation=1, safekeepers=[3]) + + assert ep.safe_psql("SELECT * FROM t") == [(i,) for i in range(1, 4)] diff --git a/test_runner/regress/test_sharding.py b/test_runner/regress/test_sharding.py index 4c9887fb92..93c621f564 100644 --- a/test_runner/regress/test_sharding.py +++ b/test_runner/regress/test_sharding.py @@ -1337,7 +1337,7 @@ def test_sharding_split_failures( # Create bystander tenants with various shard counts. They should not be affected by the aborted # splits. Regression test for https://github.com/neondatabase/cloud/issues/28589. bystanders = {} # id → shard_count - for bystander_shard_count in [1, 2, 4, 8]: + for bystander_shard_count in [1, 2, 4]: id, _ = env.create_tenant(shard_count=bystander_shard_count) bystanders[id] = bystander_shard_count @@ -1358,6 +1358,8 @@ def test_sharding_split_failures( ".*Reconcile error.*Cancelled.*", # While parent shard's client is stopped during split, flush loop updating LSNs will emit this warning ".*Failed to schedule metadata upload after updating disk_consistent_lsn.*", + # We didn't identify a secondary to remove. + ".*Keeping extra secondaries.*", ] ) @@ -1388,51 +1390,36 @@ def test_sharding_split_failures( with pytest.raises(failure.expect_exception()): env.storage_controller.tenant_shard_split(tenant_id, shard_count=4) + def assert_shard_count(shard_count: int, exclude_ps_id: int | None = None) -> None: + secondary_count = 0 + attached_count = 0 + log.info(f"Iterating over {len(env.pageservers)} pageservers to check shard count") + for ps in env.pageservers: + if exclude_ps_id is not None and ps.id == exclude_ps_id: + continue + + locations = ps.http_client().tenant_list_locations()["tenant_shards"] + for loc in locations: + tenant_shard_id = TenantShardId.parse(loc[0]) + if tenant_shard_id.tenant_id != tenant_id: + continue # skip bystanders + log.info(f"Shard {tenant_shard_id} seen on node {ps.id} in mode {loc[1]['mode']}") + assert tenant_shard_id.shard_count == shard_count + if loc[1]["mode"] == "Secondary": + secondary_count += 1 + else: + attached_count += 1 + assert secondary_count == shard_count + assert attached_count == shard_count + # We expect that the overall operation will fail, but some split requests # will have succeeded: the net result should be to return to a clean state, including # detaching any child shards. def assert_rolled_back(exclude_ps_id=None) -> None: - secondary_count = 0 - attached_count = 0 - for ps in env.pageservers: - if exclude_ps_id is not None and ps.id == exclude_ps_id: - continue - - locations = ps.http_client().tenant_list_locations()["tenant_shards"] - for loc in locations: - tenant_shard_id = TenantShardId.parse(loc[0]) - if tenant_shard_id.tenant_id != tenant_id: - continue # skip bystanders - log.info(f"Shard {tenant_shard_id} seen on node {ps.id} in mode {loc[1]['mode']}") - assert tenant_shard_id.shard_count == initial_shard_count - if loc[1]["mode"] == "Secondary": - secondary_count += 1 - else: - attached_count += 1 - - assert secondary_count == initial_shard_count - assert attached_count == initial_shard_count + assert_shard_count(initial_shard_count, exclude_ps_id) def assert_split_done(exclude_ps_id: int | None = None) -> None: - secondary_count = 0 - attached_count = 0 - for ps in env.pageservers: - if exclude_ps_id is not None and ps.id == exclude_ps_id: - continue - - locations = ps.http_client().tenant_list_locations()["tenant_shards"] - for loc in locations: - tenant_shard_id = TenantShardId.parse(loc[0]) - if tenant_shard_id.tenant_id != tenant_id: - continue # skip bystanders - log.info(f"Shard {tenant_shard_id} seen on node {ps.id} in mode {loc[1]['mode']}") - assert tenant_shard_id.shard_count == split_shard_count - if loc[1]["mode"] == "Secondary": - secondary_count += 1 - else: - attached_count += 1 - assert attached_count == split_shard_count - assert secondary_count == split_shard_count + assert_shard_count(split_shard_count, exclude_ps_id) def finish_split(): # Having failed+rolled back, we should be able to split again @@ -1468,6 +1455,7 @@ def test_sharding_split_failures( # The split should appear to be rolled back from the point of view of all pageservers # apart from the one that is offline + env.storage_controller.reconcile_until_idle(timeout_secs=60, max_interval=2) wait_until(lambda: assert_rolled_back(exclude_ps_id=failure.pageserver_id)) finish_split() @@ -1482,6 +1470,7 @@ def test_sharding_split_failures( log.info("Clearing failure...") failure.clear(env) + env.storage_controller.reconcile_until_idle(timeout_secs=60, max_interval=2) wait_until(assert_rolled_back) # Having rolled back, the tenant should be working @@ -1836,3 +1825,90 @@ def test_sharding_gc( shard_gc_cutoff_lsn = Lsn(shard_index["metadata_bytes"]["latest_gc_cutoff_lsn"]) log.info(f"Shard {shard_number} cutoff LSN: {shard_gc_cutoff_lsn}") assert shard_gc_cutoff_lsn == shard_0_gc_cutoff_lsn + + +def test_split_ps_delete_old_shard_after_commit(neon_env_builder: NeonEnvBuilder): + """ + Check that PageServer only deletes old shards after the split is committed such that it doesn't + have to download a lot of files during abort. + """ + DBNAME = "regression" + + init_shard_count = 4 + neon_env_builder.num_pageservers = init_shard_count + stripe_size = 32 + + env = neon_env_builder.init_start( + initial_tenant_shard_count=init_shard_count, initial_tenant_shard_stripe_size=stripe_size + ) + + env.storage_controller.allowed_errors.extend( + [ + # All split failures log a warning when they enqueue the abort operation + ".*Enqueuing background abort.*", + # Tolerate any error logs that mention a failpoint + ".*failpoint.*", + ] + ) + + endpoint = env.endpoints.create("main") + endpoint.respec(skip_pg_catalog_updates=False) + endpoint.start() + + # Write some initial data. + endpoint.safe_psql(f"CREATE DATABASE {DBNAME}") + endpoint.safe_psql("CREATE TABLE usertable ( YCSB_KEY INT, FIELD0 TEXT);") + + for _ in range(1000): + endpoint.safe_psql( + "INSERT INTO usertable SELECT random(), repeat('a', 1000);", log_query=False + ) + + # Record how many bytes we've downloaded before the split. + def collect_downloaded_bytes() -> list[float | None]: + downloaded_bytes = [] + for page_server in env.pageservers: + metric = page_server.http_client().get_metric_value( + "pageserver_remote_ondemand_downloaded_bytes_total" + ) + downloaded_bytes.append(metric) + return downloaded_bytes + + downloaded_bytes_before = collect_downloaded_bytes() + + # Attempt to split the tenant, but fail the split before it completes. + env.storage_controller.configure_failpoints(("shard-split-pre-complete", "return(1)")) + with pytest.raises(StorageControllerApiException): + env.storage_controller.tenant_shard_split(env.initial_tenant, shard_count=16) + + # Wait until split is aborted. + def check_split_is_aborted(): + tenants = env.storage_controller.tenant_list() + assert len(tenants) == 1 + shards = tenants[0]["shards"] + assert len(shards) == 4 + for shard in shards: + assert not shard["is_splitting"] + assert not shard["is_reconciling"] + + # Make sure all new shards have been deleted. + valid_shards = 0 + for ps in env.pageservers: + for tenant_dir in os.listdir(ps.workdir / "tenants"): + try: + tenant_shard_id = TenantShardId.parse(tenant_dir) + valid_shards += 1 + assert tenant_shard_id.shard_count == 4 + except ValueError: + log.info(f"{tenant_dir} is not valid tenant shard id") + assert valid_shards >= 4 + + wait_until(check_split_is_aborted) + + endpoint.safe_psql("SELECT count(*) from usertable;", log_query=False) + + # Make sure we didn't download anything following the aborted split. + downloaded_bytes_after = collect_downloaded_bytes() + + assert downloaded_bytes_before == downloaded_bytes_after + endpoint.stop_and_destroy() diff --git a/test_runner/regress/test_storage_controller.py b/test_runner/regress/test_storage_controller.py index 346ef0951d..8471ab9f57 100644 --- a/test_runner/regress/test_storage_controller.py +++ b/test_runner/regress/test_storage_controller.py @@ -88,6 +88,12 @@ def test_storage_controller_smoke( neon_env_builder.control_plane_hooks_api = compute_reconfigure_listener.control_plane_hooks_api env = neon_env_builder.init_configs() + # These bubble up from safekeepers + for ps in env.pageservers: + ps.allowed_errors.extend( + [".*Timeline.* has been deleted.*", ".*Timeline.*was cancelled and cannot be used"] + ) + # Start services by hand so that we can skip a pageserver (this will start + register later) env.broker.start() env.storage_controller.start() @@ -983,6 +989,102 @@ def test_storage_controller_compute_hook_retry( ) +@run_only_on_default_postgres("postgres behavior is not relevant") +def test_storage_controller_compute_hook_keep_failing( + httpserver: HTTPServer, + neon_env_builder: NeonEnvBuilder, + httpserver_listen_address: ListenAddress, +): + neon_env_builder.num_pageservers = 4 + neon_env_builder.storage_controller_config = {"use_local_compute_notifications": False} + (host, port) = httpserver_listen_address + neon_env_builder.control_plane_hooks_api = f"http://{host}:{port}" + + # Set up CP handler for compute notifications + status_by_tenant: dict[TenantId, int] = {} + + def handler(request: Request): + notify_request = request.json + assert notify_request is not None + status = status_by_tenant[TenantId(notify_request["tenant_id"])] + log.info(f"Notify request[{status}]: {notify_request}") + return Response(status=status) + + httpserver.expect_request("/notify-attach", method="PUT").respond_with_handler(handler) + + # Run neon environment + env = neon_env_builder.init_configs() + env.start() + + # Create two tenants: + # - The first tenant is banned by CP and contains only one shard + # - The second tenant is allowed by CP and contains four shards + banned_tenant = TenantId.generate() + status_by_tenant[banned_tenant] = 200 # we will ban this tenant later + env.create_tenant(banned_tenant, placement_policy='{"Attached": 1}') + + shard_count = 4 + allowed_tenant = TenantId.generate() + status_by_tenant[allowed_tenant] = 200 + env.create_tenant(allowed_tenant, shard_count=shard_count, placement_policy='{"Attached": 1}') + + # Find the pageserver of the banned tenant + banned_tenant_ps = env.get_tenant_pageserver(banned_tenant) + assert banned_tenant_ps is not None + alive_pageservers = [p for p in env.pageservers if p.id != banned_tenant_ps.id] + + # Stop pageserver and ban tenant to trigger failed reconciliation + status_by_tenant[banned_tenant] = 423 + banned_tenant_ps.stop() + env.storage_controller.allowed_errors.append(NOTIFY_BLOCKED_LOG) + env.storage_controller.allowed_errors.extend(NOTIFY_FAILURE_LOGS) + env.storage_controller.allowed_errors.append(".*Shard reconciliation is keep-failing.*") + env.storage_controller.node_configure(banned_tenant_ps.id, {"availability": "Offline"}) + + # Migrate all allowed tenant shards to the first alive pageserver + # to trigger storage controller optimizations due to affinity rules + for shard_number in range(shard_count): + env.storage_controller.tenant_shard_migrate( + TenantShardId(allowed_tenant, shard_number, shard_count), + alive_pageservers[0].id, + config=StorageControllerMigrationConfig(prewarm=False, override_scheduler=True), + ) + + # Make some reconcile_all calls to trigger optimizations + # RECONCILE_COUNT must be greater than storcon's MAX_CONSECUTIVE_RECONCILIATION_ERRORS + RECONCILE_COUNT = 12 + for i in range(RECONCILE_COUNT): + try: + n = env.storage_controller.reconcile_all() + log.info(f"Reconciliation attempt {i} finished with success: {n}") + except StorageControllerApiException as e: + assert "Control plane tenant busy" in str(e) + log.info(f"Reconciliation attempt {i} finished with failure") + + banned_descr = env.storage_controller.tenant_describe(banned_tenant) + assert banned_descr["shards"][0]["is_pending_compute_notification"] is True + time.sleep(2) + + # Check that the allowed tenant shards are optimized due to affinity rules + locations = alive_pageservers[0].http_client().tenant_list_locations()["tenant_shards"] + not_optimized_shard_count = 0 + for loc in locations: + tsi = TenantShardId.parse(loc[0]) + if tsi.tenant_id != allowed_tenant: + continue + if loc[1]["mode"] == "AttachedSingle": + not_optimized_shard_count += 1 + log.info(f"Shard {tsi} seen in mode {loc[1]['mode']}") + + assert not_optimized_shard_count < shard_count, "At least one shard should be optimized" + + # Unban the tenant and run reconciliations + status_by_tenant[banned_tenant] = 200 + env.storage_controller.reconcile_all() + banned_descr = env.storage_controller.tenant_describe(banned_tenant) + assert banned_descr["shards"][0]["is_pending_compute_notification"] is False + + @run_only_on_default_postgres("this test doesn't start an endpoint") def test_storage_controller_compute_hook_revert( httpserver: HTTPServer, @@ -2516,7 +2618,7 @@ def test_storage_controller_node_deletion( wait_until(assert_shards_migrated) log.info(f"Deleting pageserver {victim.id}") - env.storage_controller.node_delete(victim.id) + env.storage_controller.node_delete_old(victim.id) if not while_offline: @@ -2551,6 +2653,60 @@ def test_storage_controller_node_deletion( env.storage_controller.consistency_check() +def test_storage_controller_node_delete_cancellation(neon_env_builder: NeonEnvBuilder): + neon_env_builder.num_pageservers = 3 + neon_env_builder.num_azs = 3 + env = neon_env_builder.init_configs() + env.start() + + tenant_count = 12 + shard_count_per_tenant = 16 + tenant_ids = [] + + for _ in range(0, tenant_count): + tid = TenantId.generate() + tenant_ids.append(tid) + env.create_tenant( + tid, placement_policy='{"Attached":1}', shard_count=shard_count_per_tenant + ) + + # Sanity check: initial creations should not leave the system in an unstable scheduling state + assert env.storage_controller.reconcile_all() == 0 + + nodes = env.storage_controller.node_list() + assert len(nodes) == 3 + + env.storage_controller.configure_failpoints(("sleepy-delete-loop", "return(10000)")) + + ps_id_to_delete = env.pageservers[0].id + + env.storage_controller.warm_up_all_secondaries() + env.storage_controller.retryable_node_operation( + lambda ps_id: env.storage_controller.node_delete(ps_id), + ps_id_to_delete, + max_attempts=3, + backoff=2, + ) + + env.storage_controller.poll_node_status( + ps_id_to_delete, + PageserverAvailability.ACTIVE, + PageserverSchedulingPolicy.DELETING, + max_attempts=6, + backoff=2, + ) + + env.storage_controller.cancel_node_delete(ps_id_to_delete) + + env.storage_controller.poll_node_status( + ps_id_to_delete, + PageserverAvailability.ACTIVE, + PageserverSchedulingPolicy.ACTIVE, + max_attempts=6, + backoff=2, + ) + + @pytest.mark.parametrize("shard_count", [None, 2]) def test_storage_controller_metadata_health( neon_env_builder: NeonEnvBuilder, @@ -2956,7 +3112,7 @@ def test_storage_controller_leadership_transfer_during_split( env.storage_controller.allowed_errors.extend( [".*Unexpected child shard count.*", ".*Enqueuing background abort.*"] ) - pause_failpoint = "shard-split-pre-complete" + pause_failpoint = "shard-split-pre-complete-pause" env.storage_controller.configure_failpoints((pause_failpoint, "pause")) split_fut = executor.submit( @@ -3003,7 +3159,7 @@ def test_storage_controller_leadership_transfer_during_split( env.storage_controller.request( "PUT", f"http://127.0.0.1:{storage_controller_1_port}/debug/v1/failpoints", - json=[{"name": "shard-split-pre-complete", "actions": "off"}], + json=[{"name": pause_failpoint, "actions": "off"}], headers=env.storage_controller.headers(TokenScope.ADMIN), ) @@ -3093,6 +3249,58 @@ def test_storage_controller_ps_restarted_during_drain(neon_env_builder: NeonEnvB wait_until(reconfigure_node_again) +def test_ps_unavailable_after_delete(neon_env_builder: NeonEnvBuilder): + neon_env_builder.num_pageservers = 3 + + env = neon_env_builder.init_start() + + def assert_nodes_count(n: int): + nodes = env.storage_controller.node_list() + assert len(nodes) == n + + # Nodes count must remain the same before deletion + assert_nodes_count(3) + + ps = env.pageservers[0] + env.storage_controller.node_delete_old(ps.id) + + # After deletion, the node count must be reduced + assert_nodes_count(2) + + # Running pageserver CLI init in a separate thread + with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor: + log.info("Restarting tombstoned pageserver...") + ps.stop() + ps_start_fut = executor.submit(lambda: ps.start(await_active=False)) + + # After deleted pageserver restart, the node count must remain the same + assert_nodes_count(2) + + tombstones = env.storage_controller.tombstone_list() + assert len(tombstones) == 1 and tombstones[0]["id"] == ps.id + + env.storage_controller.tombstone_delete(ps.id) + + tombstones = env.storage_controller.tombstone_list() + assert len(tombstones) == 0 + + # Wait for the pageserver start operation to complete. + # If it fails with an exception, we try restarting the pageserver since the failure + # may be due to the storage controller refusing to register the node. + # However, if we get a TimeoutError that means the pageserver is completely hung, + # which is an unexpected failure mode that we'll let propagate up. + try: + ps_start_fut.result(timeout=20) + except TimeoutError: + raise + except Exception: + log.info("Restarting deleted pageserver...") + ps.restart() + + # Finally, the node can be registered again after tombstone is deleted + wait_until(lambda: assert_nodes_count(3)) + + def test_storage_controller_timeline_crud_race(neon_env_builder: NeonEnvBuilder): """ The storage controller is meant to handle the case where a timeline CRUD operation races @@ -3403,7 +3611,7 @@ def test_safekeeper_deployment_time_update(neon_env_builder: NeonEnvBuilder): assert target.get_safekeeper(fake_id) is None - assert len(target.get_safekeepers()) == 0 + start_sks = target.get_safekeepers() sk_0 = env.safekeepers[0] @@ -3425,7 +3633,7 @@ def test_safekeeper_deployment_time_update(neon_env_builder: NeonEnvBuilder): inserted = target.get_safekeeper(fake_id) assert inserted is not None - assert target.get_safekeepers() == [inserted] + assert target.get_safekeepers() == start_sks + [inserted] assert eq_safekeeper_records(body, inserted) # error out if pk is changed (unexpected) @@ -3437,7 +3645,7 @@ def test_safekeeper_deployment_time_update(neon_env_builder: NeonEnvBuilder): assert exc.value.status_code == 400 inserted_again = target.get_safekeeper(fake_id) - assert target.get_safekeepers() == [inserted_again] + assert target.get_safekeepers() == start_sks + [inserted_again] assert inserted_again is not None assert eq_safekeeper_records(inserted, inserted_again) @@ -3446,7 +3654,7 @@ def test_safekeeper_deployment_time_update(neon_env_builder: NeonEnvBuilder): body["version"] += 1 target.on_safekeeper_deploy(fake_id, body) inserted_now = target.get_safekeeper(fake_id) - assert target.get_safekeepers() == [inserted_now] + assert target.get_safekeepers() == start_sks + [inserted_now] assert inserted_now is not None assert eq_safekeeper_records(body, inserted_now) @@ -3455,7 +3663,7 @@ def test_safekeeper_deployment_time_update(neon_env_builder: NeonEnvBuilder): body["https_port"] = 123 target.on_safekeeper_deploy(fake_id, body) inserted_now = target.get_safekeeper(fake_id) - assert target.get_safekeepers() == [inserted_now] + assert target.get_safekeepers() == start_sks + [inserted_now] assert inserted_now is not None assert eq_safekeeper_records(body, inserted_now) env.storage_controller.consistency_check() @@ -3464,7 +3672,7 @@ def test_safekeeper_deployment_time_update(neon_env_builder: NeonEnvBuilder): body["https_port"] = None target.on_safekeeper_deploy(fake_id, body) inserted_now = target.get_safekeeper(fake_id) - assert target.get_safekeepers() == [inserted_now] + assert target.get_safekeepers() == start_sks + [inserted_now] assert inserted_now is not None assert eq_safekeeper_records(body, inserted_now) env.storage_controller.consistency_check() @@ -3472,18 +3680,21 @@ def test_safekeeper_deployment_time_update(neon_env_builder: NeonEnvBuilder): # some small tests for the scheduling policy querying and returning APIs newest_info = target.get_safekeeper(inserted["id"]) assert newest_info - assert newest_info["scheduling_policy"] == "Pause" - target.safekeeper_scheduling_policy(inserted["id"], "Active") - newest_info = target.get_safekeeper(inserted["id"]) - assert newest_info - assert newest_info["scheduling_policy"] == "Active" - # Ensure idempotency - target.safekeeper_scheduling_policy(inserted["id"], "Active") - newest_info = target.get_safekeeper(inserted["id"]) - assert newest_info - assert newest_info["scheduling_policy"] == "Active" - # change back to paused again + assert ( + newest_info["scheduling_policy"] == "Activating" + or newest_info["scheduling_policy"] == "Active" + ) target.safekeeper_scheduling_policy(inserted["id"], "Pause") + newest_info = target.get_safekeeper(inserted["id"]) + assert newest_info + assert newest_info["scheduling_policy"] == "Pause" + # Ensure idempotency + target.safekeeper_scheduling_policy(inserted["id"], "Pause") + newest_info = target.get_safekeeper(inserted["id"]) + assert newest_info + assert newest_info["scheduling_policy"] == "Pause" + # change back to active again + target.safekeeper_scheduling_policy(inserted["id"], "Active") def storcon_heartbeat(): assert env.storage_controller.log_contains( @@ -3496,6 +3707,57 @@ def test_safekeeper_deployment_time_update(neon_env_builder: NeonEnvBuilder): target.safekeeper_scheduling_policy(inserted["id"], "Decomissioned") +@run_only_on_default_postgres("this is like a 'unit test' against storcon db") +def test_safekeeper_activating_to_active(neon_env_builder: NeonEnvBuilder): + env = neon_env_builder.init_configs() + env.start() + + fake_id = 5 + + target = env.storage_controller + + assert target.get_safekeeper(fake_id) is None + + start_sks = target.get_safekeepers() + + sk_0 = env.safekeepers[0] + + body = { + "active": True, + "id": fake_id, + "created_at": "2023-10-25T09:11:25Z", + "updated_at": "2024-08-28T11:32:43Z", + "region_id": "aws-eu-central-1", + "host": "localhost", + "port": sk_0.port.pg, + "http_port": sk_0.port.http, + "https_port": None, + "version": 5957, + "availability_zone_id": "eu-central-1a", + } + + target.on_safekeeper_deploy(fake_id, body) + + inserted = target.get_safekeeper(fake_id) + assert inserted is not None + assert target.get_safekeepers() == start_sks + [inserted] + assert eq_safekeeper_records(body, inserted) + + def safekeeper_is_active(): + newest_info = target.get_safekeeper(inserted["id"]) + assert newest_info + assert newest_info["scheduling_policy"] == "Active" + + wait_until(safekeeper_is_active) + + target.safekeeper_scheduling_policy(inserted["id"], "Activating") + + wait_until(safekeeper_is_active) + + # Now decomission it + target.safekeeper_scheduling_policy(inserted["id"], "Decomissioned") + + def eq_safekeeper_records(a: dict[str, Any], b: dict[str, Any]) -> bool: compared = [dict(a), dict(b)] @@ -3583,6 +3845,11 @@ def test_timeline_delete_mid_live_migration(neon_env_builder: NeonEnvBuilder, mi env = neon_env_builder.init_configs() env.start() + for ps in env.pageservers: + ps.allowed_errors.extend( + [".*Timeline.* has been deleted.*", ".*Timeline.*was cancelled and cannot be used"] + ) + tenant_id = TenantId.generate() timeline_id = TimelineId.generate() env.storage_controller.tenant_create(tenant_id, placement_policy={"Attached": 1}) @@ -4105,13 +4372,20 @@ class DeletionSubject(Enum): TENANT = "tenant" +class EmptyTimeline(Enum): + EMPTY = "empty" + NONEMPTY = "nonempty" + + @run_only_on_default_postgres("PG version is not interesting here") @pytest.mark.parametrize("restart_storcon", [RestartStorcon.RESTART, RestartStorcon.ONLINE]) @pytest.mark.parametrize("deletetion_subject", [DeletionSubject.TENANT, DeletionSubject.TIMELINE]) +@pytest.mark.parametrize("empty_timeline", [EmptyTimeline.EMPTY, EmptyTimeline.NONEMPTY]) def test_storcon_create_delete_sk_down( neon_env_builder: NeonEnvBuilder, restart_storcon: RestartStorcon, deletetion_subject: DeletionSubject, + empty_timeline: EmptyTimeline, ): """ Test that the storcon can create and delete tenants and timelines with a safekeeper being down. @@ -4163,10 +4437,11 @@ def test_storcon_create_delete_sk_down( ep.start(safekeeper_generation=1, safekeepers=[1, 2, 3]) ep.safe_psql("CREATE TABLE IF NOT EXISTS t(key int, value text)") - with env.endpoints.create("child_of_main", tenant_id=tenant_id) as ep: - # endpoint should start. - ep.start(safekeeper_generation=1, safekeepers=[1, 2, 3]) - ep.safe_psql("CREATE TABLE IF NOT EXISTS t(key int, value text)") + if empty_timeline == EmptyTimeline.NONEMPTY: + with env.endpoints.create("child_of_main", tenant_id=tenant_id) as ep: + # endpoint should start. + ep.start(safekeeper_generation=1, safekeepers=[1, 2, 3]) + ep.safe_psql("CREATE TABLE IF NOT EXISTS t(key int, value text)") env.storage_controller.assert_log_contains("writing pending op for sk id 1") env.safekeepers[0].start() @@ -4373,6 +4648,53 @@ def test_storage_controller_graceful_migration(neon_env_builder: NeonEnvBuilder, assert initial_ps.http_client().tenant_list_locations()["tenant_shards"] == [] +def test_attached_0_graceful_migration(neon_env_builder: NeonEnvBuilder): + neon_env_builder.num_pageservers = 4 + neon_env_builder.num_azs = 2 + + neon_env_builder.storcon_kick_secondary_downloads = False + + env = neon_env_builder.init_start() + + # It is default, but we want to ensure that there are no secondary locations requested + env.storage_controller.tenant_policy_update(env.initial_tenant, {"placement": {"Attached": 0}}) + env.storage_controller.reconcile_until_idle() + + desc = env.storage_controller.tenant_describe(env.initial_tenant)["shards"][0] + src_ps_id = desc["node_attached"] + src_ps = env.get_pageserver(src_ps_id) + src_az = desc["preferred_az_id"] + + # There must be no secondary locations with Attached(0) placement policy + assert len(desc["node_secondary"]) == 0 + + # Migrate tenant shard to the same AZ node + dst_ps = [ps for ps in env.pageservers if ps.id != src_ps_id and ps.az_id == src_az][0] + + env.storage_controller.tenant_shard_migrate( + TenantShardId(env.initial_tenant, 0, 0), + dst_ps.id, + config=StorageControllerMigrationConfig(prewarm=True), + ) + + def tenant_shard_migrated(): + src_locations = src_ps.http_client().tenant_list_locations()["tenant_shards"] + assert len(src_locations) == 0 + log.info(f"Tenant shard migrated from {src_ps.id}") + dst_locations = dst_ps.http_client().tenant_list_locations()["tenant_shards"] + assert len(dst_locations) == 1 + assert dst_locations[0][1]["mode"] == "AttachedSingle" + log.info(f"Tenant shard migrated to {dst_ps.id}") + + # After all we expect that tenant shard exists only on dst node. + # We wait so long because [`DEFAULT_HEATMAP_PERIOD`] and [`DEFAULT_DOWNLOAD_INTERVAL`] + # are set to 60 seconds by default. + # + # TODO: we should consider making these configurable, so the test can run faster. + wait_until(tenant_shard_migrated, timeout=180, interval=5, status_interval=10) + log.info("Tenant shard migrated successfully") + + @run_only_on_default_postgres("this is like a 'unit test' against storcon db") def test_storage_controller_migrate_with_pageserver_restart( neon_env_builder: NeonEnvBuilder, make_httpserver diff --git a/test_runner/regress/test_storage_scrubber.py b/test_runner/regress/test_storage_scrubber.py index 03cd133ccb..e29cb801d5 100644 --- a/test_runner/regress/test_storage_scrubber.py +++ b/test_runner/regress/test_storage_scrubber.py @@ -341,6 +341,11 @@ def test_scrubber_physical_gc_timeline_deletion(neon_env_builder: NeonEnvBuilder env = neon_env_builder.init_configs() env.start() + for ps in env.pageservers: + ps.allowed_errors.extend( + [".*Timeline.* has been deleted.*", ".*Timeline.*was cancelled and cannot be used"] + ) + tenant_id = TenantId.generate() timeline_id = TimelineId.generate() env.create_tenant( diff --git a/test_runner/regress/test_tenant_delete.py b/test_runner/regress/test_tenant_delete.py index 8379908631..a0ff9a3ae2 100644 --- a/test_runner/regress/test_tenant_delete.py +++ b/test_runner/regress/test_tenant_delete.py @@ -430,6 +430,7 @@ def test_tenant_delete_stale_shards(neon_env_builder: NeonEnvBuilder, pg_bin: Pg workload.init() workload.write_rows(256) workload.validate() + workload.stop() assert_prefix_not_empty( neon_env_builder.pageserver_remote_storage, diff --git a/test_runner/regress/test_tenants.py b/test_runner/regress/test_tenants.py index d08692500f..c54dd8b38d 100644 --- a/test_runner/regress/test_tenants.py +++ b/test_runner/regress/test_tenants.py @@ -12,7 +12,7 @@ from typing import TYPE_CHECKING import pytest import requests -from fixtures.common_types import Lsn, TenantId, TimelineId +from fixtures.common_types import Lsn, TenantId, TimelineArchivalState, TimelineId from fixtures.log_helper import log from fixtures.metrics import ( PAGESERVER_GLOBAL_METRICS, @@ -299,6 +299,65 @@ def test_pageserver_metrics_removed_after_detach(neon_env_builder: NeonEnvBuilde assert post_detach_samples == set() +def test_pageserver_metrics_removed_after_offload(neon_env_builder: NeonEnvBuilder): + """Tests that when a timeline is offloaded, the tenant specific metrics are not left behind""" + + neon_env_builder.enable_pageserver_remote_storage(RemoteStorageKind.MOCK_S3) + + neon_env_builder.num_safekeepers = 3 + + env = neon_env_builder.init_start() + tenant_1, _ = env.create_tenant() + + timeline_1 = env.create_timeline("test_metrics_removed_after_offload_1", tenant_id=tenant_1) + timeline_2 = env.create_timeline("test_metrics_removed_after_offload_2", tenant_id=tenant_1) + + endpoint_tenant1 = env.endpoints.create_start( + "test_metrics_removed_after_offload_1", tenant_id=tenant_1 + ) + endpoint_tenant2 = env.endpoints.create_start( + "test_metrics_removed_after_offload_2", tenant_id=tenant_1 + ) + + for endpoint in [endpoint_tenant1, endpoint_tenant2]: + with closing(endpoint.connect()) as conn: + with conn.cursor() as cur: + cur.execute("CREATE TABLE t(key int primary key, value text)") + cur.execute("INSERT INTO t SELECT generate_series(1,100000), 'payload'") + cur.execute("SELECT sum(key) FROM t") + assert cur.fetchone() == (5000050000,) + endpoint.stop() + + def get_ps_metric_samples_for_timeline( + tenant_id: TenantId, timeline_id: TimelineId + ) -> list[Sample]: + ps_metrics = env.pageserver.http_client().get_metrics() + samples = [] + for metric_name in ps_metrics.metrics: + for sample in ps_metrics.query_all( + name=metric_name, + filter={"tenant_id": str(tenant_id), "timeline_id": str(timeline_id)}, + ): + samples.append(sample) + return samples + + for timeline in [timeline_1, timeline_2]: + pre_offload_samples = set( + [x.name for x in get_ps_metric_samples_for_timeline(tenant_1, timeline)] + ) + assert len(pre_offload_samples) > 0, f"expected at least one sample for {timeline}" + env.pageserver.http_client().timeline_archival_config( + tenant_1, + timeline, + state=TimelineArchivalState.ARCHIVED, + ) + env.pageserver.http_client().timeline_offload(tenant_1, timeline) + post_offload_samples = set( + [x.name for x in get_ps_metric_samples_for_timeline(tenant_1, timeline)] + ) + assert post_offload_samples == set() + + def test_pageserver_with_empty_tenants(neon_env_builder: NeonEnvBuilder): env = neon_env_builder.init_start() diff --git a/test_runner/regress/test_timeline_archive.py b/test_runner/regress/test_timeline_archive.py index 8d46ef8306..41286a2adc 100644 --- a/test_runner/regress/test_timeline_archive.py +++ b/test_runner/regress/test_timeline_archive.py @@ -896,6 +896,134 @@ def test_timeline_retain_lsn( assert sum == pre_branch_sum +def test_timeline_offload_delete_race(neon_env_builder: NeonEnvBuilder): + """ + Regression test for https://github.com/neondatabase/cloud/issues/30406 + """ + remote_storage_kind = s3_storage() + neon_env_builder.enable_pageserver_remote_storage(remote_storage_kind) + neon_env_builder.num_pageservers = 2 + + env = neon_env_builder.init_start() + + # Turn off gc and compaction loops: we want to issue them manually for better reliability + tenant_id, root_timeline_id = env.create_tenant( + conf={ + "gc_period": "0s", + "compaction_period": "0s", + "checkpoint_distance": f"{1024**2}", + } + ) + + origin_ps = env.get_tenant_pageserver(tenant_id) + assert origin_ps + origin_ps.allowed_errors.extend( + [ + ".*Timed out waiting for deletion queue flush.*", + ".*Timed out waiting for flush to remote storage.*", + ] + ) + origin_ps_http = origin_ps.http_client() + + # We are not sharding this tenant + tenant_shard_id = TenantShardId(tenant_id, 0, 0) + + # Create a branch and archive it + child_timeline_id = env.create_branch("test_archived_branch_persisted", tenant_id) + + with env.endpoints.create_start( + "test_archived_branch_persisted", tenant_id=tenant_id + ) as endpoint: + endpoint.safe_psql_many( + [ + "CREATE TABLE foo(key serial primary key, t text default 'data_content')", + "INSERT INTO foo SELECT FROM generate_series(1,512)", + ] + ) + last_flush_lsn_upload(env, endpoint, tenant_id, child_timeline_id) + + assert_prefix_not_empty( + neon_env_builder.pageserver_remote_storage, + prefix=f"tenants/{str(tenant_id)}/", + ) + assert_prefix_not_empty( + neon_env_builder.pageserver_remote_storage, + prefix=f"tenants/{str(tenant_id)}/tenant-manifest", + ) + + origin_ps_http.timeline_archival_config( + tenant_id, + child_timeline_id, + state=TimelineArchivalState.ARCHIVED, + ) + + def timeline_offloaded_api(timeline_id: TimelineId) -> bool: + return any( + timeline["timeline_id"] == str(timeline_id) + for timeline in origin_ps_http.timeline_and_offloaded_list( + tenant_id=tenant_id + ).offloaded + ) + + def child_offloaded(): + origin_ps_http.timeline_offload(tenant_id=tenant_id, timeline_id=child_timeline_id) + assert timeline_offloaded_api(child_timeline_id) + + wait_until(child_offloaded) + + # Delete the timeline from the origin pageserver, holding up the deletion queue so that it doesn't finish + failpoint_deletion_queue = "deletion-queue-before-execute-pause" + origin_ps_http.configure_failpoints((failpoint_deletion_queue, "pause")) + origin_ps_http.timeline_delete(tenant_id, child_timeline_id) + + dest_ps = [ps for ps in env.pageservers if ps.id != origin_ps.id][0] + assert dest_ps + log.info(f"Migrating {tenant_id} {origin_ps.id}->{dest_ps.id}") + env.storage_controller.tenant_shard_migrate(tenant_shard_id, dest_ps_id=dest_ps.id) + + log.info("unstuck the DELETE") + origin_ps_http.configure_failpoints((failpoint_deletion_queue, "off")) + + def child_prefix_empty(): + assert_prefix_empty( + neon_env_builder.pageserver_remote_storage, + prefix=f"tenants/{str(tenant_id)}/{str(child_timeline_id)}/", + ) + + wait_until(child_prefix_empty) + + dest_ps_http = dest_ps.http_client() + + # We can't use timeline_delete_wait_completed here as timeline status will return 404, but we want to return 404 from the deletion endpoint + def timeline_is_missing(): + data = None + try: + data = dest_ps_http.timeline_delete(tenant_id, child_timeline_id) + log.info(f"timeline delete {data}") + except PageserverApiException as e: + log.debug(e) + if e.status_code == 404: + return + + raise RuntimeError(f"Timeline exists {data}") + + wait_until(timeline_is_missing) + # (dest_ps_http, tenant_id, child_timeline_id) + + # + # Now ensure that scrubber doesn't have anything to clean up. + # + + # Sleep some amount larger than min_age_secs + time.sleep(3) + + # Ensure that min_age_secs has a deletion impeding effect + gc_summary = env.storage_scrubber.pageserver_physical_gc(min_age_secs=1, mode="full") + assert gc_summary["remote_storage_errors"] == 0 + assert gc_summary["indices_deleted"] == 0 + assert gc_summary["tenant_manifests_deleted"] == 0 + + def test_timeline_offload_generations(neon_env_builder: NeonEnvBuilder): """ Test for scrubber deleting old generations of manifests diff --git a/test_runner/regress/test_timeline_detach_ancestor.py b/test_runner/regress/test_timeline_detach_ancestor.py index f0810270b1..c0f163db32 100644 --- a/test_runner/regress/test_timeline_detach_ancestor.py +++ b/test_runner/regress/test_timeline_detach_ancestor.py @@ -21,7 +21,10 @@ from fixtures.neon_fixtures import ( last_flush_lsn_upload, wait_for_last_flush_lsn, ) -from fixtures.pageserver.http import HistoricLayerInfo, PageserverApiException +from fixtures.pageserver.http import ( + HistoricLayerInfo, + PageserverApiException, +) from fixtures.pageserver.utils import wait_for_last_record_lsn, wait_timeline_detail_404 from fixtures.remote_storage import LocalFsStorage, RemoteStorageKind from fixtures.utils import assert_pageserver_backups_equal, skip_in_debug_build, wait_until @@ -321,7 +324,7 @@ def test_ancestor_detach_reparents_earlier(neon_env_builder: NeonEnvBuilder): # it is to be in line with the deletion timestamp.. well, almost. when = original_ancestor[2][:26] when_ts = datetime.datetime.fromisoformat(when).replace(tzinfo=datetime.UTC) - now = datetime.datetime.utcnow().replace(tzinfo=datetime.UTC) + now = datetime.datetime.now(datetime.UTC) assert when_ts < now assert len(lineage.get("reparenting_history", [])) == 0 elif expected_ancestor == timeline_id: @@ -413,6 +416,7 @@ def test_ancestor_detach_behavior_v2(neon_env_builder: NeonEnvBuilder, snapshots "read_only": True, }, ) + sk = env.safekeepers[0] assert sk with pytest.raises(requests.exceptions.HTTPError, match="Not Found"): @@ -454,19 +458,20 @@ def test_ancestor_detach_behavior_v2(neon_env_builder: NeonEnvBuilder, snapshots env.pageserver.quiesce_tenants() - # checking the ancestor after is much faster than waiting for the endpoint not start + # checking the ancestor after is much faster than waiting for the endpoint to start expected_result = [ - ("main", env.initial_timeline, None, 24576, 1), - ("after", after, env.initial_timeline, 24576, 1), - ("snapshot_branchpoint_old", snapshot_branchpoint_old, env.initial_timeline, 8192, 1), - ("snapshot_branchpoint", snapshot_branchpoint, env.initial_timeline, 16384, 1), - ("branch_to_detach", branch_to_detach, None, 16384, 1), - ("earlier", earlier, env.initial_timeline, 0, 1), + # (branch_name, queried_timeline, expected_ancestor, rows, starts, read_only) + ("main", env.initial_timeline, None, 24576, 1, False), + ("after", after, env.initial_timeline, 24576, 1, False), + ("snapshot_branchpoint_old", snapshot_branchpoint_old, env.initial_timeline, 8192, 1, True), + ("snapshot_branchpoint", snapshot_branchpoint, env.initial_timeline, 16384, 1, False), + ("branch_to_detach", branch_to_detach, None, 16384, 1, False), + ("earlier", earlier, env.initial_timeline, 0, 1, False), ] assert isinstance(env.pageserver_remote_storage, LocalFsStorage) - for branch_name, queried_timeline, expected_ancestor, _, _ in expected_result: + for branch_name, queried_timeline, expected_ancestor, _, _, _ in expected_result: details = client.timeline_detail(env.initial_tenant, queried_timeline) ancestor_timeline_id = details["ancestor_timeline_id"] if expected_ancestor is None: @@ -504,8 +509,19 @@ def test_ancestor_detach_behavior_v2(neon_env_builder: NeonEnvBuilder, snapshots assert len(lineage.get("original_ancestor", [])) == 0 assert len(lineage.get("reparenting_history", [])) == 0 - for name, _, _, rows, starts in expected_result: - with env.endpoints.create_start(name, tenant_id=env.initial_tenant) as ep: + for branch_name, queried_timeline, _, rows, starts, read_only in expected_result: + last_record_lsn = None + if read_only: + # specifying the lsn makes the endpoint read-only and not connect to safekeepers + details = client.timeline_detail(env.initial_tenant, queried_timeline) + last_record_lsn = Lsn(details["last_record_lsn"]) + + log.info(f"reading data from branch {branch_name} at {last_record_lsn}") + with env.endpoints.create( + branch_name, + lsn=last_record_lsn, + ) as ep: + ep.start(safekeeper_generation=1) assert ep.safe_psql("SELECT count(*) FROM foo;")[0][0] == rows assert ep.safe_psql(f"SELECT count(*) FROM audit WHERE starts = {starts}")[0][0] == 1 @@ -1088,6 +1104,9 @@ def test_timeline_detach_ancestor_interrupted_by_deletion( for ps in env.pageservers: ps.allowed_errors.extend(SHUTDOWN_ALLOWED_ERRORS) + ps.allowed_errors.extend( + [".*Timeline.* has been deleted.*", ".*Timeline.*was cancelled and cannot be used"] + ) pageservers = dict((int(p.id), p) for p in env.pageservers) @@ -1209,6 +1228,9 @@ def test_sharded_tad_interleaved_after_partial_success(neon_env_builder: NeonEnv for ps in env.pageservers: ps.allowed_errors.extend(SHUTDOWN_ALLOWED_ERRORS) + ps.allowed_errors.extend( + [".*Timeline.* has been deleted.*", ".*Timeline.*was cancelled and cannot be used"] + ) pageservers = dict((int(p.id), p) for p in env.pageservers) @@ -1867,6 +1889,31 @@ def test_timeline_detach_with_aux_files_with_detach_v1( assert set(http.list_aux_files(env.initial_tenant, branch_timeline_id, lsn1).keys()) == set([]) +def test_detach_ancestors_with_no_writes( + neon_env_builder: NeonEnvBuilder, +): + env = neon_env_builder.init_start() + + endpoint = env.endpoints.create_start("main", tenant_id=env.initial_tenant) + wait_for_last_flush_lsn(env, endpoint, env.initial_tenant, env.initial_timeline) + endpoint.safe_psql( + "SELECT pg_create_logical_replication_slot('test_slot_parent_1', 'pgoutput')" + ) + wait_for_last_flush_lsn(env, endpoint, env.initial_tenant, env.initial_timeline) + endpoint.stop() + + for i in range(0, 5): + if i == 0: + ancestor_name = "main" + else: + ancestor_name = f"b{i}" + + tlid = env.create_branch(f"b{i + 1}", ancestor_branch_name=ancestor_name) + + client = env.pageserver.http_client() + client.detach_ancestor(tenant_id=env.initial_tenant, timeline_id=tlid) + + # TODO: # - branch near existing L1 boundary, image layers? # - investigate: why are layers started at uneven lsn? not just after branching, but in general. diff --git a/test_runner/regress/test_timeline_gc_blocking.py b/test_runner/regress/test_timeline_gc_blocking.py index 9a710f5b80..daba8019b6 100644 --- a/test_runner/regress/test_timeline_gc_blocking.py +++ b/test_runner/regress/test_timeline_gc_blocking.py @@ -24,6 +24,10 @@ def test_gc_blocking_by_timeline(neon_env_builder: NeonEnvBuilder, sharded: bool initial_tenant_conf={"gc_period": "1s", "lsn_lease_length": "0s"}, initial_tenant_shard_count=2 if sharded else None, ) + for ps in env.pageservers: + ps.allowed_errors.extend( + [".*Timeline.* has been deleted.*", ".*Timeline.*was cancelled and cannot be used"] + ) if sharded: http = env.storage_controller.pageserver_api() diff --git a/test_runner/regress/test_wal_acceptor.py b/test_runner/regress/test_wal_acceptor.py index 6a7c7a8bef..22e6d2e1c3 100644 --- a/test_runner/regress/test_wal_acceptor.py +++ b/test_runner/regress/test_wal_acceptor.py @@ -229,7 +229,7 @@ def test_many_timelines(neon_env_builder: NeonEnvBuilder): # Test timeline_list endpoint. http_cli = env.safekeepers[0].http_client() - assert len(http_cli.timeline_list()) == 3 + assert len(http_cli.timeline_list()) == 4 # Check that dead minority doesn't prevent the commits: execute insert n_inserts @@ -433,6 +433,7 @@ def test_wal_backup(neon_env_builder: NeonEnvBuilder): env.pageserver.allowed_errors.extend( [ ".*Timeline .* was not found in global map.*", + ".*Timeline .* has been deleted.*", ".*Timeline .* was cancelled and cannot be used anymore.*", ] ) @@ -739,8 +740,8 @@ def test_timeline_status(neon_env_builder: NeonEnvBuilder, auth_enabled: bool): env = neon_env_builder.init_start() tenant_id = env.initial_tenant - timeline_id = env.create_branch("test_timeline_status") - endpoint = env.endpoints.create_start("test_timeline_status") + timeline_id = env.initial_timeline + endpoint = env.endpoints.create_start("main") wa = env.safekeepers[0] @@ -1291,6 +1292,12 @@ def test_lagging_sk(neon_env_builder: NeonEnvBuilder): # it works without compute at all. def test_peer_recovery(neon_env_builder: NeonEnvBuilder): neon_env_builder.num_safekeepers = 3 + + # timelines should be created the old way + neon_env_builder.storage_controller_config = { + "timelines_onto_safekeepers": False, + } + env = neon_env_builder.init_start() tenant_id = env.initial_tenant @@ -1531,6 +1538,11 @@ def test_safekeeper_without_pageserver( def test_replace_safekeeper(neon_env_builder: NeonEnvBuilder): + # timelines should be created the old way manually until we have migration support + neon_env_builder.storage_controller_config = { + "timelines_onto_safekeepers": False, + } + def execute_payload(endpoint: Endpoint): with closing(endpoint.connect()) as conn: with conn.cursor() as cur: @@ -1660,6 +1672,15 @@ def test_pull_timeline(neon_env_builder: NeonEnvBuilder, live_sk_change: bool): res = env.safekeepers[3].pull_timeline( [env.safekeepers[0], env.safekeepers[2]], tenant_id, timeline_id ) + sk_id_1 = env.safekeepers[0].safekeeper_id() + sk_id_3 = env.safekeepers[2].safekeeper_id() + sk_id_4 = env.safekeepers[3].safekeeper_id() + new_conf = MembershipConfiguration( + generation=2, members=[sk_id_1, sk_id_3, sk_id_4], new_members=None + ) + for i in [0, 2, 3]: + env.safekeepers[i].http_client().membership_switch(tenant_id, timeline_id, new_conf) + log.info("Finished pulling timeline") log.info(res) @@ -1704,13 +1725,15 @@ def test_pull_timeline_gc(neon_env_builder: NeonEnvBuilder): neon_env_builder.num_safekeepers = 3 neon_env_builder.enable_safekeeper_remote_storage(default_remote_storage()) env = neon_env_builder.init_start() - tenant_id = env.initial_tenant - timeline_id = env.initial_timeline (src_sk, dst_sk) = (env.safekeepers[0], env.safekeepers[2]) + dst_sk.stop() + + [tenant_id, timeline_id] = env.create_tenant() + log.info("use only first 2 safekeepers, 3rd will be seeded") - endpoint = env.endpoints.create("main") + endpoint = env.endpoints.create("main", tenant_id=tenant_id) endpoint.active_safekeepers = [1, 2] endpoint.start() endpoint.safe_psql("create table t(key int, value text)") @@ -1722,6 +1745,7 @@ def test_pull_timeline_gc(neon_env_builder: NeonEnvBuilder): src_http = src_sk.http_client() # run pull_timeline which will halt before downloading files src_http.configure_failpoints(("sk-snapshot-after-list-pausable", "pause")) + dst_sk.start() pt_handle = PropagatingThread( target=dst_sk.pull_timeline, args=([src_sk], tenant_id, timeline_id) ) @@ -1781,23 +1805,27 @@ def test_pull_timeline_term_change(neon_env_builder: NeonEnvBuilder): neon_env_builder.enable_safekeeper_remote_storage(default_remote_storage()) env = neon_env_builder.init_start() tenant_id = env.initial_tenant - timeline_id = env.initial_timeline (src_sk, dst_sk) = (env.safekeepers[0], env.safekeepers[2]) + dst_sk.stop() + src_http = src_sk.http_client() + src_http.configure_failpoints(("sk-snapshot-after-list-pausable", "pause")) + + timeline_id = env.create_branch("pull_timeline_term_changes") + + # run pull_timeline which will halt before downloading files log.info("use only first 2 safekeepers, 3rd will be seeded") - ep = env.endpoints.create("main") + ep = env.endpoints.create("pull_timeline_term_changes") ep.active_safekeepers = [1, 2] ep.start() ep.safe_psql("create table t(key int, value text)") ep.safe_psql("insert into t select generate_series(1, 1000), 'pear'") - src_http = src_sk.http_client() - # run pull_timeline which will halt before downloading files - src_http.configure_failpoints(("sk-snapshot-after-list-pausable", "pause")) pt_handle = PropagatingThread( target=dst_sk.pull_timeline, args=([src_sk], tenant_id, timeline_id) ) + dst_sk.start() pt_handle.start() src_sk.wait_until_paused("sk-snapshot-after-list-pausable") @@ -1806,7 +1834,7 @@ def test_pull_timeline_term_change(neon_env_builder: NeonEnvBuilder): # restart compute to bump term ep.stop() - ep = env.endpoints.create("main") + ep = env.endpoints.create("pull_timeline_term_changes") ep.active_safekeepers = [1, 2] ep.start() ep.safe_psql("insert into t select generate_series(1, 100), 'pear'") @@ -1928,12 +1956,18 @@ def test_pull_timeline_while_evicted(neon_env_builder: NeonEnvBuilder): @run_only_on_default_postgres("tests only safekeeper API") def test_membership_api(neon_env_builder: NeonEnvBuilder): neon_env_builder.num_safekeepers = 1 + # timelines should be created the old way + neon_env_builder.storage_controller_config = { + "timelines_onto_safekeepers": False, + } + env = neon_env_builder.init_start() # These are expected after timeline deletion on safekeepers. env.pageserver.allowed_errors.extend( [ ".*Timeline .* was not found in global map.*", + ".*Timeline .* has been deleted.*", ".*Timeline .* was cancelled and cannot be used anymore.*", ] ) @@ -2007,6 +2041,12 @@ def test_explicit_timeline_creation(neon_env_builder: NeonEnvBuilder): created manually, later storcon will do that. """ neon_env_builder.num_safekeepers = 3 + + # timelines should be created the old way manually + neon_env_builder.storage_controller_config = { + "timelines_onto_safekeepers": False, + } + env = neon_env_builder.init_start() tenant_id = env.initial_tenant @@ -2062,7 +2102,7 @@ def test_idle_reconnections(neon_env_builder: NeonEnvBuilder): env = neon_env_builder.init_start() tenant_id = env.initial_tenant - timeline_id = env.create_branch("test_idle_reconnections") + timeline_id = env.initial_timeline def collect_stats() -> dict[str, float]: # we need to collect safekeeper_pg_queries_received_total metric from all safekeepers @@ -2093,7 +2133,7 @@ def test_idle_reconnections(neon_env_builder: NeonEnvBuilder): collect_stats() - endpoint = env.endpoints.create_start("test_idle_reconnections") + endpoint = env.endpoints.create_start("main") # just write something to the timeline endpoint.safe_psql("create table t(i int)") collect_stats() @@ -2700,3 +2740,85 @@ def test_pull_timeline_partial_segment_integrity(neon_env_builder: NeonEnvBuilde raise Exception("Uneviction did not happen on source safekeeper yet") wait_until(unevicted) + + +def test_timeline_disk_usage_limit(neon_env_builder: NeonEnvBuilder): + """ + Test that the timeline disk usage circuit breaker works as expected. We test that: + 1. The circuit breaker kicks in when the timeline's disk usage exceeds the configured limit, + and it causes writes to hang. + 2. The hanging writes unblock when the issue resolves (by restarting the safekeeper in the + test to simulate a more realistic production troubleshooting scenario). + 3. We can continue to write as normal after the issue resolves. + 4. There is no data corruption throughout the test. + """ + # Set up environment with a very small disk usage limit (1KB) + neon_env_builder.num_safekeepers = 1 + remote_storage_kind = s3_storage() + neon_env_builder.enable_safekeeper_remote_storage(remote_storage_kind) + + # Set a very small disk usage limit (1KB) + neon_env_builder.safekeeper_extra_opts = ["--max-timeline-disk-usage-bytes=1024"] + + env = neon_env_builder.init_start() + + # Create a timeline and endpoint + env.create_branch("test_timeline_disk_usage_limit") + endpoint = env.endpoints.create_start("test_timeline_disk_usage_limit") + + # Get the safekeeper + sk = env.safekeepers[0] + + # Inject a failpoint to stop WAL backup + with sk.http_client() as http_cli: + http_cli.configure_failpoints([("backup-lsn-range-pausable", "pause")]) + + # Write some data that will exceed the 1KB limit. While the failpoint is active, this operation + # will hang as Postgres encounters safekeeper-returned errors and retries. + def run_hanging_insert(): + with closing(endpoint.connect()) as bg_conn: + with bg_conn.cursor() as bg_cur: + # This should generate more than 1KB of WAL + bg_cur.execute("create table t(key int, value text)") + bg_cur.execute("insert into t select generate_series(1,2000), 'payload'") + + # Start the inserts in a background thread + bg_thread = threading.Thread(target=run_hanging_insert) + bg_thread.start() + + # Wait for the error message to appear in the compute log + def error_logged(): + return endpoint.log_contains("WAL storage utilization exceeds configured limit") is not None + + wait_until(error_logged) + log.info("Found expected error message in compute log, resuming.") + + # Sanity check that the hanging insert is indeed still hanging. Otherwise means the circuit breaker we + # implemented didn't work as expected. + time.sleep(2) + assert bg_thread.is_alive(), ( + "The hanging insert somehow unblocked without resolving the disk usage issue!" + ) + + log.info("Restarting the safekeeper to resume WAL backup.") + # Restart the safekeeper with defaults to both clear the failpoint and resume the larger disk usage limit. + for sk in env.safekeepers: + sk.stop().start(extra_opts=[]) + + # The hanging insert will now complete. Join the background thread so that we can + # verify that the insert completed successfully. + bg_thread.join(timeout=120) + assert not bg_thread.is_alive(), "Hanging insert did not complete after safekeeper restart" + log.info("Hanging insert unblocked.") + + # Verify we can continue to write as normal + with closing(endpoint.connect()) as conn: + with conn.cursor() as cur: + cur.execute("insert into t select generate_series(2001,3000), 'payload'") + + # Sanity check data correctness + with closing(endpoint.connect()) as conn: + with conn.cursor() as cur: + cur.execute("select count(*) from t") + # 2000 rows from first insert + 1000 from last insert + assert cur.fetchone() == (3000,) diff --git a/test_runner/regress/test_wal_acceptor_async.py b/test_runner/regress/test_wal_acceptor_async.py index d8a7dc2a2b..1bad387a90 100644 --- a/test_runner/regress/test_wal_acceptor_async.py +++ b/test_runner/regress/test_wal_acceptor_async.py @@ -590,6 +590,13 @@ async def run_wal_truncation(env: NeonEnv, safekeeper_proto_version: int): @pytest.mark.parametrize("safekeeper_proto_version", [2, 3]) def test_wal_truncation(neon_env_builder: NeonEnvBuilder, safekeeper_proto_version: int): neon_env_builder.num_safekeepers = 3 + if safekeeper_proto_version == 2: + # On the legacy protocol, we don't support generations, which are part of + # `timelines_onto_safekeepers` + neon_env_builder.storage_controller_config = { + "timelines_onto_safekeepers": False, + } + env = neon_env_builder.init_start() asyncio.run(run_wal_truncation(env, safekeeper_proto_version)) @@ -713,6 +720,11 @@ async def run_quorum_sanity(env: NeonEnv): # we don't. def test_quorum_sanity(neon_env_builder: NeonEnvBuilder): neon_env_builder.num_safekeepers = 4 + + # The test fails basically always on the new mode. + neon_env_builder.storage_controller_config = { + "timelines_onto_safekeepers": False, + } env = neon_env_builder.init_start() asyncio.run(run_quorum_sanity(env)) diff --git a/test_runner/regress/test_wal_receiver.py b/test_runner/regress/test_wal_receiver.py index 0252b590cc..72fc58d761 100644 --- a/test_runner/regress/test_wal_receiver.py +++ b/test_runner/regress/test_wal_receiver.py @@ -13,43 +13,6 @@ if TYPE_CHECKING: from fixtures.neon_fixtures import NeonEnv, NeonEnvBuilder -# Checks that pageserver's walreceiver state is printed in the logs during WAL wait timeout. -# Ensures that walreceiver does not run without any data inserted and only starts after the insertion. -def test_pageserver_lsn_wait_error_start(neon_env_builder: NeonEnvBuilder): - # Trigger WAL wait timeout faster - neon_env_builder.pageserver_config_override = "wait_lsn_timeout = '1s'" - env = neon_env_builder.init_start() - env.pageserver.http_client() - - # In this test we force 'Timed out while waiting for WAL record error' while - # fetching basebackup and don't want any retries. - os.environ["NEON_COMPUTE_TESTING_BASEBACKUP_RETRIES"] = "1" - - tenant_id, timeline_id = env.create_tenant() - expected_timeout_error = f"Timed out while waiting for WAL record at LSN {future_lsn} to arrive" - env.pageserver.allowed_errors.append(f".*{expected_timeout_error}.*") - - try: - trigger_wait_lsn_timeout(env, tenant_id) - except Exception as e: - exception_string = str(e) - assert expected_timeout_error in exception_string, "Should time out during waiting for WAL" - assert "WalReceiver status: Not active" in exception_string, ( - "Walreceiver should not be active before any data writes" - ) - - insert_test_elements(env, tenant_id, start=0, count=1_000) - try: - trigger_wait_lsn_timeout(env, tenant_id) - except Exception as e: - exception_string = str(e) - assert expected_timeout_error in exception_string, "Should time out during waiting for WAL" - assert "WalReceiver status: Not active" not in exception_string, ( - "Should not be inactive anymore after INSERTs are made" - ) - assert "WalReceiver status" in exception_string, "But still should have some other status" - - # Checks that all active safekeepers are shown in pageserver's walreceiver state printed on WAL wait timeout. # Kills one of the safekeepers and ensures that only the active ones are printed in the state. def test_pageserver_lsn_wait_error_safekeeper_stop(neon_env_builder: NeonEnvBuilder): diff --git a/test_runner/sql_regress/expected/neon-event-triggers.out b/test_runner/sql_regress/expected/neon-event-triggers.out new file mode 100644 index 0000000000..3a62e67316 --- /dev/null +++ b/test_runner/sql_regress/expected/neon-event-triggers.out @@ -0,0 +1,90 @@ +create or replace function admin_proc() + returns event_trigger + language plpgsql as +$$ +begin + raise notice 'admin event trigger is executed for %', current_user; +end; +$$; +create role neon_superuser; +create role neon_admin login inherit createrole createdb in role neon_superuser; +grant create on schema public to neon_admin; +create database neondb with owner neon_admin; +grant all privileges on database neondb to neon_superuser; +create role neon_user; +grant create on schema public to neon_user; +create event trigger on_ddl1 on ddl_command_end +execute procedure admin_proc(); +set role neon_user; +-- check that non-privileged user can not change neon.event_triggers +set neon.event_triggers to false; +ERROR: permission denied to set neon.event_triggers +DETAIL: Only "neon_superuser" is allowed to set the GUC +-- Non-privileged neon user should not be able to create event trigers +create event trigger on_ddl2 on ddl_command_end +execute procedure admin_proc(); +ERROR: permission denied to create event trigger "on_ddl2" +HINT: Must be superuser to create an event trigger. +set role neon_admin; +-- neon_superuser should be able to create event trigers +create or replace function neon_proc() + returns event_trigger + language plpgsql as +$$ +begin + raise notice 'neon event trigger is executed for %', current_user; +end; +$$; +NOTICE: admin event trigger is executed for neon_admin +create event trigger on_ddl2 on ddl_command_end +execute procedure neon_proc(); +\c neondb neon_admin +create or replace function neondb_proc() + returns event_trigger + language plpgsql as +$$ +begin + raise notice 'neondb event trigger is executed for %', current_user; +end; +$$; +create or replace function neondb_secdef_proc() + returns event_trigger + language plpgsql + SECURITY DEFINER +as +$$ +begin + raise notice 'neondb secdef event trigger is executed for %', current_user; +end; +$$; +-- neon_admin (neon_superuser member) should be able to create event triggers +create event trigger on_ddl3 on ddl_command_end +execute procedure neondb_proc(); +create event trigger on_ddl4 on ddl_command_end +execute procedure neondb_secdef_proc(); +-- Check that event trigger is fired for neon_admin +create table t1(x integer); +NOTICE: neondb event trigger is executed for neon_admin +NOTICE: neondb secdef event trigger is executed for neon_admin +-- Check that event trigger can be skipped +set neon.event_triggers to false; +create table t2(x integer); +WARNING: Skipping Event Trigger: neon.event_triggers is false +WARNING: Skipping Event Trigger: neon.event_triggers is false +\c regression cloud_admin +-- Check that event triggers are not fired for superuser +create table t3(x integer); +NOTICE: admin event trigger is executed for cloud_admin +WARNING: Skipping Event Trigger +DETAIL: Event Trigger function "neon_proc" is owned by non-superuser role "neon_admin", and current_user "cloud_admin" is superuser +\c neondb cloud_admin +-- Check that user-defined event triggers are not fired for superuser +create table t4(x integer); +WARNING: Skipping Event Trigger +DETAIL: Event Trigger function "neondb_proc" is owned by non-superuser role "neon_admin", and current_user "cloud_admin" is superuser +WARNING: Skipping Event Trigger +DETAIL: Event Trigger function "neondb_secdef_proc" is owned by non-superuser role "neon_admin", and current_user "cloud_admin" is superuser +\c neondb neon_admin +-- Check that neon_admin can drop event triggers +drop event trigger on_ddl3; +drop event trigger on_ddl4; diff --git a/test_runner/sql_regress/expected/neon-subxacts.out b/test_runner/sql_regress/expected/neon-subxacts.out new file mode 100644 index 0000000000..5ed8cfcac9 --- /dev/null +++ b/test_runner/sql_regress/expected/neon-subxacts.out @@ -0,0 +1,21 @@ +DO $$ +DECLARE +i numeric; +BEGIN + create role somebody; + FOR i IN 1..1000000 LOOP + BEGIN + IF i % 1000 = 0 THEN + alter role somebody password 'welcome'; + ELSE + PERFORM 1; + END IF; + EXCEPTION WHEN OTHERS THEN + RAISE WARNING 'error'; + END; + IF I = 1000000 THEN + PERFORM pg_log_backend_memory_contexts(pg_backend_pid()); + END IF; + END LOOP; +END; +$$; diff --git a/test_runner/sql_regress/parallel_schedule b/test_runner/sql_regress/parallel_schedule index d9508d1c90..0ce9f0e28f 100644 --- a/test_runner/sql_regress/parallel_schedule +++ b/test_runner/sql_regress/parallel_schedule @@ -9,3 +9,5 @@ test: neon-rel-truncate test: neon-clog test: neon-test-utils test: neon-vacuum-full +test: neon-event-triggers +test: neon-subxacts diff --git a/test_runner/sql_regress/sql/neon-event-triggers.sql b/test_runner/sql_regress/sql/neon-event-triggers.sql new file mode 100644 index 0000000000..75365455dc --- /dev/null +++ b/test_runner/sql_regress/sql/neon-event-triggers.sql @@ -0,0 +1,96 @@ +create or replace function admin_proc() + returns event_trigger + language plpgsql as +$$ +begin + raise notice 'admin event trigger is executed for %', current_user; +end; +$$; + +create role neon_superuser; +create role neon_admin login inherit createrole createdb in role neon_superuser; +grant create on schema public to neon_admin; +create database neondb with owner neon_admin; +grant all privileges on database neondb to neon_superuser; + +create role neon_user; +grant create on schema public to neon_user; + +create event trigger on_ddl1 on ddl_command_end +execute procedure admin_proc(); + +set role neon_user; + +-- check that non-privileged user can not change neon.event_triggers +set neon.event_triggers to false; + +-- Non-privileged neon user should not be able to create event trigers +create event trigger on_ddl2 on ddl_command_end +execute procedure admin_proc(); + +set role neon_admin; + +-- neon_superuser should be able to create event trigers +create or replace function neon_proc() + returns event_trigger + language plpgsql as +$$ +begin + raise notice 'neon event trigger is executed for %', current_user; +end; +$$; + +create event trigger on_ddl2 on ddl_command_end +execute procedure neon_proc(); + +\c neondb neon_admin + +create or replace function neondb_proc() + returns event_trigger + language plpgsql as +$$ +begin + raise notice 'neondb event trigger is executed for %', current_user; +end; +$$; + +create or replace function neondb_secdef_proc() + returns event_trigger + language plpgsql + SECURITY DEFINER +as +$$ +begin + raise notice 'neondb secdef event trigger is executed for %', current_user; +end; +$$; + +-- neon_admin (neon_superuser member) should be able to create event triggers +create event trigger on_ddl3 on ddl_command_end +execute procedure neondb_proc(); + +create event trigger on_ddl4 on ddl_command_end +execute procedure neondb_secdef_proc(); + +-- Check that event trigger is fired for neon_admin +create table t1(x integer); + +-- Check that event trigger can be skipped +set neon.event_triggers to false; +create table t2(x integer); + +\c regression cloud_admin + +-- Check that event triggers are not fired for superuser +create table t3(x integer); + +\c neondb cloud_admin + +-- Check that user-defined event triggers are not fired for superuser +create table t4(x integer); + +\c neondb neon_admin + +-- Check that neon_admin can drop event triggers +drop event trigger on_ddl3; +drop event trigger on_ddl4; diff --git a/test_runner/sql_regress/sql/neon-subxacts.sql b/test_runner/sql_regress/sql/neon-subxacts.sql new file mode 100644 index 0000000000..5ed8cfcac9 --- /dev/null +++ b/test_runner/sql_regress/sql/neon-subxacts.sql @@ -0,0 +1,21 @@ +DO $$ +DECLARE +i numeric; +BEGIN + create role somebody; + FOR i IN 1..1000000 LOOP + BEGIN + IF i % 1000 = 0 THEN + alter role somebody password 'welcome'; + ELSE + PERFORM 1; + END IF; + EXCEPTION WHEN OTHERS THEN + RAISE WARNING 'error'; + END; + IF I = 1000000 THEN + PERFORM pg_log_backend_memory_contexts(pg_backend_pid()); + END IF; + END LOOP; +END; +$$; diff --git a/vendor/postgres-v14 b/vendor/postgres-v14 index 6770bc2513..9085654ee8 160000 --- a/vendor/postgres-v14 +++ b/vendor/postgres-v14 @@ -1 +1 @@ -Subproject commit 6770bc251301ef40c66f7ecb731741dc435b5051 +Subproject commit 9085654ee8022d5cc4ca719380a1dc53e5e3246f diff --git a/vendor/revisions.json b/vendor/revisions.json index 12d5499ddb..b260698c86 100644 --- a/vendor/revisions.json +++ b/vendor/revisions.json @@ -13,6 +13,6 @@ ], "v14": [ "14.18", - "6770bc251301ef40c66f7ecb731741dc435b5051" + "9085654ee8022d5cc4ca719380a1dc53e5e3246f" ] } diff --git a/workspace_hack/Cargo.toml b/workspace_hack/Cargo.toml index 2b07889871..fb10e27d2a 100644 --- a/workspace_hack/Cargo.toml +++ b/workspace_hack/Cargo.toml @@ -20,8 +20,7 @@ anstream = { version = "0.6" } anyhow = { version = "1", features = ["backtrace"] } axum = { version = "0.8", features = ["ws"] } axum-core = { version = "0.5", default-features = false, features = ["tracing"] } -base64-594e8ee84c453af0 = { package = "base64", version = "0.13", features = ["alloc"] } -base64-647d43efb71741da = { package = "base64", version = "0.21" } +base64 = { version = "0.21" } base64ct = { version = "1", default-features = false, features = ["std"] } bytes = { version = "1", features = ["serde"] } camino = { version = "1", default-features = false, features = ["serde1"] } @@ -98,7 +97,7 @@ time = { version = "0.3", features = ["macros", "serde-well-known"] } tokio = { version = "1", features = ["full", "test-util"] } tokio-rustls = { version = "0.26", default-features = false, features = ["logging", "ring", "tls12"] } tokio-stream = { version = "0.1", features = ["net"] } -tokio-util = { version = "0.7", features = ["codec", "compat", "io", "rt"] } +tokio-util = { version = "0.7", features = ["codec", "compat", "io-util", "rt"] } toml_edit = { version = "0.22", features = ["serde"] } tower = { version = "0.5", default-features = false, features = ["balance", "buffer", "limit", "log"] } tracing = { version = "0.1", features = ["log"] } @@ -114,10 +113,13 @@ zstd-sys = { version = "2", default-features = false, features = ["legacy", "std [build-dependencies] ahash = { version = "0.8" } +anstream = { version = "0.6" } anyhow = { version = "1", features = ["backtrace"] } bytes = { version = "1", features = ["serde"] } cc = { version = "1", default-features = false, features = ["parallel"] } chrono = { version = "0.4", default-features = false, features = ["clock", "serde", "wasmbind"] } +clap = { version = "4", features = ["derive", "env", "string"] } +clap_builder = { version = "4", default-features = false, features = ["color", "env", "help", "std", "string", "suggestions", "usage"] } either = { version = "1" } getrandom = { version = "0.2", default-features = false, features = ["std"] } half = { version = "2", default-features = false, features = ["num-traits"] } @@ -145,6 +147,7 @@ regex = { version = "1" } regex-automata = { version = "0.4", default-features = false, features = ["dfa-onepass", "hybrid", "meta", "nfa-backtrack", "perf-inline", "perf-literal", "unicode"] } regex-syntax = { version = "0.8" } serde = { version = "1", features = ["alloc", "derive"] } +serde_json = { version = "1", features = ["alloc", "raw_value"] } syn = { version = "2", features = ["extra-traits", "fold", "full", "visit", "visit-mut"] } time-macros = { version = "0.2", default-features = false, features = ["formatting", "parsing", "serde"] } toml_edit = { version = "0.22", features = ["serde"] }