diff --git a/.github/workflows/_build-and-test-locally.yml b/.github/workflows/_build-and-test-locally.yml index 3f66f41ef2..e2203a38ec 100644 --- a/.github/workflows/_build-and-test-locally.yml +++ b/.github/workflows/_build-and-test-locally.yml @@ -104,11 +104,10 @@ jobs: # Set some environment variables used by all the steps. # - # CARGO_FLAGS is extra options to pass to "cargo build", "cargo test" etc. - # It also includes --features, if any + # CARGO_FLAGS is extra options to pass to all "cargo" subcommands. # - # CARGO_FEATURES is passed to "cargo metadata". It is separate from CARGO_FLAGS, - # because "cargo metadata" doesn't accept --release or --debug options + # CARGO_PROFILE is passed to "cargo build", "cargo test" etc, but not to + # "cargo metadata", because it doesn't accept --release or --debug options. # # We run tests with addtional features, that are turned off by default (e.g. in release builds), see # corresponding Cargo.toml files for their descriptions. @@ -117,16 +116,16 @@ jobs: ARCH: ${{ inputs.arch }} SANITIZERS: ${{ inputs.sanitizers }} run: | - CARGO_FEATURES="--features testing" + CARGO_FLAGS="--locked --features testing" if [[ $BUILD_TYPE == "debug" && $ARCH == 'x64' ]]; then cov_prefix="scripts/coverage --profraw-prefix=$GITHUB_JOB --dir=/tmp/coverage run" - CARGO_FLAGS="--locked" + CARGO_PROFILE="" elif [[ $BUILD_TYPE == "debug" ]]; then cov_prefix="" - CARGO_FLAGS="--locked" + CARGO_PROFILE="" elif [[ $BUILD_TYPE == "release" ]]; then cov_prefix="" - CARGO_FLAGS="--locked --release" + CARGO_PROFILE="--release" fi if [[ $SANITIZERS == 'enabled' ]]; then make_vars="WITH_SANITIZERS=yes" @@ -136,8 +135,8 @@ jobs: { echo "cov_prefix=${cov_prefix}" echo "make_vars=${make_vars}" - echo "CARGO_FEATURES=${CARGO_FEATURES}" echo "CARGO_FLAGS=${CARGO_FLAGS}" + echo "CARGO_PROFILE=${CARGO_PROFILE}" echo "CARGO_HOME=${GITHUB_WORKSPACE}/.cargo" } >> $GITHUB_ENV @@ -189,34 +188,18 @@ jobs: path: pg_install/v17 key: v1-${{ runner.os }}-${{ runner.arch }}-${{ inputs.build-type }}-pg-${{ steps.pg_v17_rev.outputs.pg_rev }}-bookworm-${{ hashFiles('Makefile', 'build-tools.Dockerfile') }} - - name: Build postgres v14 - if: steps.cache_pg_14.outputs.cache-hit != 'true' - run: mold -run make ${make_vars} postgres-v14 -j$(nproc) - - - name: Build postgres v15 - if: steps.cache_pg_15.outputs.cache-hit != 'true' - run: mold -run make ${make_vars} postgres-v15 -j$(nproc) - - - name: Build postgres v16 - if: steps.cache_pg_16.outputs.cache-hit != 'true' - run: mold -run make ${make_vars} postgres-v16 -j$(nproc) - - - name: Build postgres v17 - if: steps.cache_pg_17.outputs.cache-hit != 'true' - run: mold -run make ${make_vars} postgres-v17 -j$(nproc) - - - name: Build neon extensions - run: mold -run make ${make_vars} neon-pg-ext -j$(nproc) + - name: Build all + # Note: the Makefile picks up BUILD_TYPE and CARGO_PROFILE from the env variables + run: mold -run make ${make_vars} all -j$(nproc) CARGO_BUILD_FLAGS="$CARGO_FLAGS" - name: Build walproposer-lib run: mold -run make ${make_vars} walproposer-lib -j$(nproc) - - name: Run cargo build - env: - WITH_TESTS: ${{ inputs.sanitizers != 'enabled' && '--tests' || '' }} + - name: Build unit tests + if: inputs.sanitizers != 'enabled' run: | export ASAN_OPTIONS=detect_leaks=0 - ${cov_prefix} mold -run cargo build $CARGO_FLAGS $CARGO_FEATURES --bins ${WITH_TESTS} + ${cov_prefix} mold -run cargo build $CARGO_FLAGS $CARGO_PROFILE --tests # Do install *before* running rust tests because they might recompile the # binaries with different features/flags. @@ -228,7 +211,7 @@ jobs: # Install target binaries mkdir -p /tmp/neon/bin/ binaries=$( - ${cov_prefix} cargo metadata $CARGO_FEATURES --format-version=1 --no-deps | + ${cov_prefix} cargo metadata $CARGO_FLAGS --format-version=1 --no-deps | jq -r '.packages[].targets[] | select(.kind | index("bin")) | .name' ) for bin in $binaries; do @@ -245,7 +228,7 @@ jobs: mkdir -p /tmp/neon/test_bin/ test_exe_paths=$( - ${cov_prefix} cargo test $CARGO_FLAGS $CARGO_FEATURES --message-format=json --no-run | + ${cov_prefix} cargo test $CARGO_FLAGS $CARGO_PROFILE --message-format=json --no-run | jq -r '.executable | select(. != null)' ) for bin in $test_exe_paths; do @@ -279,10 +262,10 @@ jobs: export LD_LIBRARY_PATH #nextest does not yet support running doctests - ${cov_prefix} cargo test --doc $CARGO_FLAGS $CARGO_FEATURES + ${cov_prefix} cargo test --doc $CARGO_FLAGS $CARGO_PROFILE # run all non-pageserver tests - ${cov_prefix} cargo nextest run $CARGO_FLAGS $CARGO_FEATURES -E '!package(pageserver)' + ${cov_prefix} cargo nextest run $CARGO_FLAGS $CARGO_PROFILE -E '!package(pageserver)' # run pageserver tests # (When developing new pageserver features gated by config fields, we commonly make the rust @@ -291,13 +274,13 @@ jobs: # pageserver tests from non-pageserver tests cuts down the time it takes for this CI step.) NEON_PAGESERVER_UNIT_TEST_VIRTUAL_FILE_IOENGINE=tokio-epoll-uring \ ${cov_prefix} \ - cargo nextest run $CARGO_FLAGS $CARGO_FEATURES -E 'package(pageserver)' + cargo nextest run $CARGO_FLAGS $CARGO_PROFILE -E 'package(pageserver)' # Run separate tests for real S3 export ENABLE_REAL_S3_REMOTE_STORAGE=nonempty export REMOTE_STORAGE_S3_BUCKET=neon-github-ci-tests export REMOTE_STORAGE_S3_REGION=eu-central-1 - ${cov_prefix} cargo nextest run $CARGO_FLAGS $CARGO_FEATURES -E 'package(remote_storage)' -E 'test(test_real_s3)' + ${cov_prefix} cargo nextest run $CARGO_FLAGS $CARGO_PROFILE -E 'package(remote_storage)' -E 'test(test_real_s3)' # Run separate tests for real Azure Blob Storage # XXX: replace region with `eu-central-1`-like region @@ -306,17 +289,17 @@ jobs: export AZURE_STORAGE_ACCESS_KEY="${{ secrets.AZURE_STORAGE_ACCESS_KEY_DEV }}" export REMOTE_STORAGE_AZURE_CONTAINER="${{ vars.REMOTE_STORAGE_AZURE_CONTAINER }}" export REMOTE_STORAGE_AZURE_REGION="${{ vars.REMOTE_STORAGE_AZURE_REGION }}" - ${cov_prefix} cargo nextest run $CARGO_FLAGS $CARGO_FEATURES -E 'package(remote_storage)' -E 'test(test_real_azure)' + ${cov_prefix} cargo nextest run $CARGO_FLAGS $CARGO_PROFILE -E 'package(remote_storage)' -E 'test(test_real_azure)' - name: Install postgres binaries run: | # Use tar to copy files matching the pattern, preserving the paths in the destionation tar c \ pg_install/v* \ - pg_install/build/*/src/test/regress/*.so \ - pg_install/build/*/src/test/regress/pg_regress \ - pg_install/build/*/src/test/isolation/isolationtester \ - pg_install/build/*/src/test/isolation/pg_isolation_regress \ + build/*/src/test/regress/*.so \ + build/*/src/test/regress/pg_regress \ + build/*/src/test/isolation/isolationtester \ + build/*/src/test/isolation/pg_isolation_regress \ | tar x -C /tmp/neon - name: Upload Neon artifact diff --git a/.github/workflows/build-macos.yml b/.github/workflows/build-macos.yml index 0f7fa3e813..160c3d05bc 100644 --- a/.github/workflows/build-macos.yml +++ b/.github/workflows/build-macos.yml @@ -110,7 +110,7 @@ jobs: build-walproposer-lib: if: | - inputs.pg_versions != '[]' || inputs.rebuild_everything || + contains(inputs.pg_versions, 'v17') || inputs.rebuild_everything || contains(github.event.pull_request.labels.*.name, 'run-extra-build-macos') || contains(github.event.pull_request.labels.*.name, 'run-extra-build-*') || github.ref_name == 'main' @@ -144,7 +144,7 @@ jobs: id: cache_walproposer_lib uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 with: - path: pg_install/build/walproposer-lib + path: build/walproposer-lib key: v1-${{ runner.os }}-${{ runner.arch }}-${{ env.BUILD_TYPE }}-walproposer_lib-v17-${{ steps.pg_rev.outputs.pg_rev }}-${{ hashFiles('Makefile') }} - name: Checkout submodule vendor/postgres-v17 @@ -169,11 +169,11 @@ jobs: run: make walproposer-lib -j$(sysctl -n hw.ncpu) - - name: Upload "pg_install/build/walproposer-lib" artifact + - name: Upload "build/walproposer-lib" artifact uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 with: - name: pg_install--build--walproposer-lib - path: pg_install/build/walproposer-lib + name: build--walproposer-lib + path: build/walproposer-lib # The artifact is supposed to be used by the next job in the same workflow, # so there’s no need to store it for too long. retention-days: 1 @@ -226,11 +226,11 @@ jobs: name: pg_install--v17 path: pg_install/v17 - - name: Download "pg_install/build/walproposer-lib" artifact + - name: Download "build/walproposer-lib" artifact uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0 with: - name: pg_install--build--walproposer-lib - path: pg_install/build/walproposer-lib + name: build--walproposer-lib + path: build/walproposer-lib # `actions/download-artifact` doesn't preserve permissions: # https://github.com/actions/download-artifact?tab=readme-ov-file#permission-loss diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml index 7faaed49c1..94f768719f 100644 --- a/.github/workflows/build_and_test.yml +++ b/.github/workflows/build_and_test.yml @@ -670,7 +670,7 @@ jobs: ghcr.io/neondatabase/neon:${{ needs.meta.outputs.build-tag }}-bookworm-arm64 compute-node-image-arch: - needs: [ check-permissions, build-build-tools-image, meta ] + needs: [ check-permissions, meta ] if: ${{ contains(fromJSON('["push-main", "pr", "compute-rc-pr"]'), needs.meta.outputs.run-kind) }} permissions: id-token: write # aws-actions/configure-aws-credentials @@ -743,7 +743,6 @@ jobs: GIT_VERSION=${{ github.event.pull_request.head.sha || github.sha }} PG_VERSION=${{ matrix.version.pg }} BUILD_TAG=${{ needs.meta.outputs.release-tag || needs.meta.outputs.build-tag }} - TAG=${{ needs.build-build-tools-image.outputs.image-tag }}-${{ matrix.version.debian }} DEBIAN_VERSION=${{ matrix.version.debian }} provenance: false push: true @@ -763,7 +762,6 @@ jobs: GIT_VERSION=${{ github.event.pull_request.head.sha || github.sha }} PG_VERSION=${{ matrix.version.pg }} BUILD_TAG=${{ needs.meta.outputs.release-tag || needs.meta.outputs.build-tag }} - TAG=${{ needs.build-build-tools-image.outputs.image-tag }}-${{ matrix.version.debian }} DEBIAN_VERSION=${{ matrix.version.debian }} provenance: false push: true diff --git a/.github/workflows/proxy-benchmark.yml b/.github/workflows/proxy-benchmark.yml new file mode 100644 index 0000000000..75ecacaced --- /dev/null +++ b/.github/workflows/proxy-benchmark.yml @@ -0,0 +1,83 @@ +name: Periodic proxy performance test on unit-perf hetzner runner + +on: + push: # TODO: remove after testing + branches: + - test-proxy-bench # Runs on pushes to branches starting with test-proxy-bench + # schedule: + # * is a special character in YAML so you have to quote this string + # ┌───────────── minute (0 - 59) + # │ ┌───────────── hour (0 - 23) + # │ │ ┌───────────── day of the month (1 - 31) + # │ │ │ ┌───────────── month (1 - 12 or JAN-DEC) + # │ │ │ │ ┌───────────── day of the week (0 - 6 or SUN-SAT) + # - cron: '0 5 * * *' # Runs at 5 UTC once a day + workflow_dispatch: # adds an ability to run this manually + +defaults: + run: + shell: bash -euo pipefail {0} + +concurrency: + group: ${{ github.workflow }} + cancel-in-progress: false + +permissions: + contents: read + +jobs: + run_periodic_proxybench_test: + permissions: + id-token: write # aws-actions/configure-aws-credentials + statuses: write + contents: write + pull-requests: write + runs-on: [self-hosted, unit-perf] + timeout-minutes: 60 # 1h timeout + container: + image: ghcr.io/neondatabase/build-tools:pinned-bookworm + credentials: + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + options: --init + steps: + - name: Checkout proxy-bench Repo + uses: actions/checkout@v4 + with: + repository: neondatabase/proxy-bench + path: proxy-bench + + - name: Set up the environment which depends on $RUNNER_TEMP on nvme drive + id: set-env + shell: bash -euxo pipefail {0} + run: | + PROXY_BENCH_PATH=$(realpath ./proxy-bench) + { + echo "PROXY_BENCH_PATH=$PROXY_BENCH_PATH" + echo "NEON_DIR=${RUNNER_TEMP}/neon" + echo "TEST_OUTPUT=${PROXY_BENCH_PATH}/test_output" + echo "" + } >> "$GITHUB_ENV" + + - name: Run proxy-bench + run: ./${PROXY_BENCH_PATH}/run.sh + + - name: Ingest Bench Results # neon repo script + if: success() + run: | + mkdir -p $TEST_OUTPUT + python $NEON_DIR/scripts/proxy_bench_results_ingest.py --out $TEST_OUTPUT + + - name: Push Metrics to Proxy perf database + if: success() + env: + PERF_TEST_RESULT_CONNSTR: "${{ secrets.PROXY_TEST_RESULT_CONNSTR }}" + REPORT_FROM: $TEST_OUTPUT + run: $NEON_DIR/scripts/generate_and_push_perf_report.sh + + - name: Docker cleanup + run: docker compose down + + - name: Notify Failure + if: failure() + run: echo "Proxy bench job failed" && exit 1 \ No newline at end of file diff --git a/.gitignore b/.gitignore index e2e26e0eda..8fc776e1ad 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,5 @@ /artifact_cache +/build /pg_install /target /tmp_check diff --git a/Cargo.lock b/Cargo.lock index 50a8cac8a0..e8047c03f6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1318,6 +1318,7 @@ dependencies = [ "p256 0.13.2", "postgres", "postgres_initdb", + "postgres_versioninfo", "regex", "remote_storage", "reqwest", @@ -4296,6 +4297,7 @@ dependencies = [ "tokio-util", "tonic 0.13.1", "tracing", + "url", "utils", "workspace_hack", ] @@ -4375,6 +4377,7 @@ dependencies = [ "postgres_backend", "postgres_connection", "postgres_ffi", + "postgres_ffi_types", "postgres_initdb", "posthog_client_lite", "pprof", @@ -4444,7 +4447,8 @@ dependencies = [ "nix 0.30.1", "once_cell", "postgres_backend", - "postgres_ffi", + "postgres_ffi_types", + "postgres_versioninfo", "rand 0.8.5", "remote_storage", "reqwest", @@ -4468,6 +4472,7 @@ dependencies = [ "futures", "http-utils", "pageserver_api", + "postgres_versioninfo", "reqwest", "serde", "thiserror 1.0.69", @@ -4512,6 +4517,8 @@ dependencies = [ "pageserver_api", "postgres_ffi", "prost 0.13.5", + "strum", + "strum_macros", "thiserror 1.0.69", "tokio", "tonic 0.13.1", @@ -4933,6 +4940,8 @@ dependencies = [ "memoffset 0.9.0", "once_cell", "postgres", + "postgres_ffi_types", + "postgres_versioninfo", "pprof", "regex", "serde", @@ -4941,17 +4950,37 @@ dependencies = [ "utils", ] +[[package]] +name = "postgres_ffi_types" +version = "0.1.0" +dependencies = [ + "thiserror 1.0.69", + "workspace_hack", +] + [[package]] name = "postgres_initdb" version = "0.1.0" dependencies = [ "anyhow", "camino", + "postgres_versioninfo", "thiserror 1.0.69", "tokio", "workspace_hack", ] +[[package]] +name = "postgres_versioninfo" +version = "0.1.0" +dependencies = [ + "anyhow", + "serde", + "serde_repr", + "thiserror 1.0.69", + "workspace_hack", +] + [[package]] name = "posthog_client_lite" version = "0.1.0" @@ -6145,6 +6174,7 @@ dependencies = [ "postgres-protocol", "postgres_backend", "postgres_ffi", + "postgres_versioninfo", "pprof", "pq_proto", "rand 0.8.5", @@ -6189,6 +6219,7 @@ dependencies = [ "const_format", "pageserver_api", "postgres_ffi", + "postgres_versioninfo", "pq_proto", "serde", "serde_json", @@ -6512,6 +6543,17 @@ dependencies = [ "thiserror 1.0.69", ] +[[package]] +name = "serde_repr" +version = "0.1.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "175ee3e80ae9982737ca543e96133087cbd9a485eecc3bc4de9c1a37b47ea59c" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.100", +] + [[package]] name = "serde_spanned" version = "0.6.6" @@ -6838,6 +6880,7 @@ dependencies = [ "hex", "http-utils", "humantime", + "humantime-serde", "hyper 0.14.30", "itertools 0.10.5", "json-structural-diff", @@ -6848,6 +6891,7 @@ dependencies = [ "pageserver_api", "pageserver_client", "postgres_connection", + "posthog_client_lite", "rand 0.8.5", "regex", "reqwest", @@ -7645,6 +7689,7 @@ dependencies = [ "axum", "base64 0.22.1", "bytes", + "flate2", "h2 0.4.4", "http 1.1.0", "http-body 1.0.0", @@ -7664,6 +7709,7 @@ dependencies = [ "tower-layer", "tower-service", "tracing", + "zstd", ] [[package]] @@ -8243,6 +8289,7 @@ dependencies = [ "futures", "pageserver_api", "postgres_ffi", + "postgres_ffi_types", "pprof", "prost 0.13.5", "remote_storage", diff --git a/Cargo.toml b/Cargo.toml index 597d607393..9a52c2bcc9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -22,6 +22,8 @@ members = [ "libs/http-utils", "libs/pageserver_api", "libs/postgres_ffi", + "libs/postgres_ffi_types", + "libs/postgres_versioninfo", "libs/safekeeper_api", "libs/desim", "libs/neon-shmem", @@ -173,6 +175,7 @@ serde_json = "1" serde_path_to_error = "0.1" serde_with = { version = "3", features = [ "base64" ] } serde_assert = "0.5.0" +serde_repr = "0.1.20" sha2 = "0.10.2" signal-hook = "0.3" smallvec = "1.11" @@ -199,7 +202,7 @@ tokio-tar = "0.3" tokio-util = { version = "0.7.10", features = ["io", "rt"] } toml = "0.8" toml_edit = "0.22" -tonic = { version = "0.13.1", default-features = false, features = ["channel", "codegen", "prost", "router", "server", "tls-ring", "tls-native-roots"] } +tonic = { version = "0.13.1", default-features = false, features = ["channel", "codegen", "gzip", "prost", "router", "server", "tls-ring", "tls-native-roots", "zstd"] } tonic-reflection = { version = "0.13.1", features = ["server"] } tower = { version = "0.5.2", default-features = false } tower-http = { version = "0.6.2", features = ["auth", "request-id", "trace"] } @@ -261,6 +264,8 @@ pageserver_page_api = { path = "./pageserver/page_api" } postgres_backend = { version = "0.1", path = "./libs/postgres_backend/" } postgres_connection = { version = "0.1", path = "./libs/postgres_connection/" } postgres_ffi = { version = "0.1", path = "./libs/postgres_ffi/" } +postgres_ffi_types = { version = "0.1", path = "./libs/postgres_ffi_types/" } +postgres_versioninfo = { version = "0.1", path = "./libs/postgres_versioninfo/" } postgres_initdb = { path = "./libs/postgres_initdb" } posthog_client_lite = { version = "0.1", path = "./libs/posthog_client_lite" } pq_proto = { version = "0.1", path = "./libs/pq_proto/" } diff --git a/Dockerfile b/Dockerfile index 0b7ef491fd..69657067de 100644 --- a/Dockerfile +++ b/Dockerfile @@ -5,8 +5,6 @@ ARG REPOSITORY=ghcr.io/neondatabase ARG IMAGE=build-tools ARG TAG=pinned -ARG DEFAULT_PG_VERSION=17 -ARG STABLE_PG_VERSION=16 ARG DEBIAN_VERSION=bookworm ARG DEBIAN_FLAVOR=${DEBIAN_VERSION}-slim @@ -47,7 +45,6 @@ COPY --chown=nonroot scripts/ninstall.sh scripts/ninstall.sh ENV BUILD_TYPE=release RUN set -e \ && mold -run make -j $(nproc) -s neon-pg-ext \ - && rm -rf pg_install/build \ && tar -C pg_install -czf /home/nonroot/postgres_install.tar.gz . # Prepare cargo-chef recipe @@ -63,14 +60,11 @@ FROM $REPOSITORY/$IMAGE:$TAG AS build WORKDIR /home/nonroot ARG GIT_VERSION=local ARG BUILD_TAG -ARG STABLE_PG_VERSION COPY --from=pg-build /home/nonroot/pg_install/v14/include/postgresql/server pg_install/v14/include/postgresql/server COPY --from=pg-build /home/nonroot/pg_install/v15/include/postgresql/server pg_install/v15/include/postgresql/server COPY --from=pg-build /home/nonroot/pg_install/v16/include/postgresql/server pg_install/v16/include/postgresql/server COPY --from=pg-build /home/nonroot/pg_install/v17/include/postgresql/server pg_install/v17/include/postgresql/server -COPY --from=pg-build /home/nonroot/pg_install/v16/lib pg_install/v16/lib -COPY --from=pg-build /home/nonroot/pg_install/v17/lib pg_install/v17/lib COPY --from=plan /home/nonroot/recipe.json recipe.json ARG ADDITIONAL_RUSTFLAGS="" @@ -97,7 +91,6 @@ RUN set -e \ # Build final image # FROM $BASE_IMAGE_SHA -ARG DEFAULT_PG_VERSION WORKDIR /data RUN set -e \ @@ -107,8 +100,6 @@ RUN set -e \ libreadline-dev \ libseccomp-dev \ ca-certificates \ - # System postgres for use with client libraries (e.g. in storage controller) - postgresql-15 \ openssl \ unzip \ curl \ diff --git a/Makefile b/Makefile index 0911465fb8..d39b9b68c8 100644 --- a/Makefile +++ b/Makefile @@ -1,8 +1,18 @@ ROOT_PROJECT_DIR := $(dir $(abspath $(lastword $(MAKEFILE_LIST)))) -# Where to install Postgres, default is ./pg_install, maybe useful for package managers +# Where to install Postgres, default is ./pg_install, maybe useful for package +# managers. POSTGRES_INSTALL_DIR ?= $(ROOT_PROJECT_DIR)/pg_install/ +# CARGO_BUILD_FLAGS: Extra flags to pass to `cargo build`. `--locked` +# and `--features testing` are popular examples. +# +# CARGO_PROFILE: You can also set to override the cargo profile to +# use. By default, it is derived from BUILD_TYPE. + +# All intermediate build artifacts are stored here. +BUILD_DIR := build + ICU_PREFIX_DIR := /usr/local/icu # @@ -16,12 +26,12 @@ ifeq ($(BUILD_TYPE),release) PG_CONFIGURE_OPTS = --enable-debug --with-openssl PG_CFLAGS += -O2 -g3 $(CFLAGS) PG_LDFLAGS = $(LDFLAGS) - # Unfortunately, `--profile=...` is a nightly feature - CARGO_BUILD_FLAGS += --release + CARGO_PROFILE ?= --profile=release else ifeq ($(BUILD_TYPE),debug) PG_CONFIGURE_OPTS = --enable-debug --with-openssl --enable-cassert --enable-depend PG_CFLAGS += -O0 -g3 $(CFLAGS) PG_LDFLAGS = $(LDFLAGS) + CARGO_PROFILE ?= --profile=dev else $(error Bad build type '$(BUILD_TYPE)', see Makefile for options) endif @@ -93,7 +103,7 @@ all: neon postgres neon-pg-ext .PHONY: neon neon: postgres-headers walproposer-lib cargo-target-dir +@echo "Compiling Neon" - $(CARGO_CMD_PREFIX) cargo build $(CARGO_BUILD_FLAGS) + $(CARGO_CMD_PREFIX) cargo build $(CARGO_BUILD_FLAGS) $(CARGO_PROFILE) .PHONY: cargo-target-dir cargo-target-dir: # https://github.com/rust-lang/cargo/issues/14281 @@ -104,21 +114,20 @@ cargo-target-dir: # Some rules are duplicated for Postgres v14 and 15. We may want to refactor # to avoid the duplication in the future, but it's tolerable for now. # -$(POSTGRES_INSTALL_DIR)/build/%/config.status: - - mkdir -p $(POSTGRES_INSTALL_DIR) - test -e $(POSTGRES_INSTALL_DIR)/CACHEDIR.TAG || echo "$(CACHEDIR_TAG_CONTENTS)" > $(POSTGRES_INSTALL_DIR)/CACHEDIR.TAG +$(BUILD_DIR)/%/config.status: + mkdir -p $(BUILD_DIR) + test -e $(BUILD_DIR)/CACHEDIR.TAG || echo "$(CACHEDIR_TAG_CONTENTS)" > $(BUILD_DIR)/CACHEDIR.TAG +@echo "Configuring Postgres $* build" @test -s $(ROOT_PROJECT_DIR)/vendor/postgres-$*/configure || { \ echo "\nPostgres submodule not found in $(ROOT_PROJECT_DIR)/vendor/postgres-$*/, execute "; \ echo "'git submodule update --init --recursive --depth 2 --progress .' in project root.\n"; \ exit 1; } - mkdir -p $(POSTGRES_INSTALL_DIR)/build/$* + mkdir -p $(BUILD_DIR)/$* VERSION=$*; \ EXTRA_VERSION=$$(cd $(ROOT_PROJECT_DIR)/vendor/postgres-$$VERSION && git rev-parse HEAD); \ - (cd $(POSTGRES_INSTALL_DIR)/build/$$VERSION && \ + (cd $(BUILD_DIR)/$$VERSION && \ env PATH="$(EXTRA_PATH_OVERRIDES):$$PATH" $(ROOT_PROJECT_DIR)/vendor/postgres-$$VERSION/configure \ CFLAGS='$(PG_CFLAGS)' LDFLAGS='$(PG_LDFLAGS)' \ $(PG_CONFIGURE_OPTS) --with-extra-version=" ($$EXTRA_VERSION)" \ @@ -130,96 +139,52 @@ $(POSTGRES_INSTALL_DIR)/build/%/config.status: # the "build-all-versions" entry points) where direct mention of PostgreSQL # versions is used. .PHONY: postgres-configure-v17 -postgres-configure-v17: $(POSTGRES_INSTALL_DIR)/build/v17/config.status +postgres-configure-v17: $(BUILD_DIR)/v17/config.status .PHONY: postgres-configure-v16 -postgres-configure-v16: $(POSTGRES_INSTALL_DIR)/build/v16/config.status +postgres-configure-v16: $(BUILD_DIR)/v16/config.status .PHONY: postgres-configure-v15 -postgres-configure-v15: $(POSTGRES_INSTALL_DIR)/build/v15/config.status +postgres-configure-v15: $(BUILD_DIR)/v15/config.status .PHONY: postgres-configure-v14 -postgres-configure-v14: $(POSTGRES_INSTALL_DIR)/build/v14/config.status +postgres-configure-v14: $(BUILD_DIR)/v14/config.status # Install the PostgreSQL header files into $(POSTGRES_INSTALL_DIR)//include .PHONY: postgres-headers-% postgres-headers-%: postgres-configure-% +@echo "Installing PostgreSQL $* headers" - $(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/$*/src/include MAKELEVEL=0 install + $(MAKE) -C $(BUILD_DIR)/$*/src/include MAKELEVEL=0 install # Compile and install PostgreSQL .PHONY: postgres-% postgres-%: postgres-configure-% \ postgres-headers-% # to prevent `make install` conflicts with neon's `postgres-headers` +@echo "Compiling PostgreSQL $*" - $(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/$* MAKELEVEL=0 install - +@echo "Compiling libpq $*" - $(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/$*/src/interfaces/libpq install + $(MAKE) -C $(BUILD_DIR)/$* MAKELEVEL=0 install +@echo "Compiling pg_prewarm $*" - $(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/$*/contrib/pg_prewarm install + $(MAKE) -C $(BUILD_DIR)/$*/contrib/pg_prewarm install +@echo "Compiling pg_buffercache $*" - $(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/$*/contrib/pg_buffercache install + $(MAKE) -C $(BUILD_DIR)/$*/contrib/pg_buffercache install +@echo "Compiling pg_visibility $*" - $(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/$*/contrib/pg_visibility install + $(MAKE) -C $(BUILD_DIR)/$*/contrib/pg_visibility install +@echo "Compiling pageinspect $*" - $(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/$*/contrib/pageinspect install + $(MAKE) -C $(BUILD_DIR)/$*/contrib/pageinspect install +@echo "Compiling pg_trgm $*" - $(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/$*/contrib/pg_trgm install + $(MAKE) -C $(BUILD_DIR)/$*/contrib/pg_trgm install +@echo "Compiling amcheck $*" - $(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/$*/contrib/amcheck install + $(MAKE) -C $(BUILD_DIR)/$*/contrib/amcheck install +@echo "Compiling test_decoding $*" - $(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/$*/contrib/test_decoding install - -.PHONY: postgres-clean-% -postgres-clean-%: - $(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/$* MAKELEVEL=0 clean - $(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/$*/contrib/pg_buffercache clean - $(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/$*/contrib/pageinspect clean - $(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/$*/src/interfaces/libpq clean + $(MAKE) -C $(BUILD_DIR)/$*/contrib/test_decoding install .PHONY: postgres-check-% postgres-check-%: postgres-% - $(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/$* MAKELEVEL=0 check + $(MAKE) -C $(BUILD_DIR)/$* MAKELEVEL=0 check .PHONY: neon-pg-ext-% neon-pg-ext-%: postgres-% - +@echo "Compiling neon $*" - mkdir -p $(POSTGRES_INSTALL_DIR)/build/neon-$* + +@echo "Compiling neon-specific Postgres extensions for $*" + mkdir -p $(BUILD_DIR)/pgxn-$* $(MAKE) PG_CONFIG=$(POSTGRES_INSTALL_DIR)/$*/bin/pg_config COPT='$(COPT)' \ - -C $(POSTGRES_INSTALL_DIR)/build/neon-$* \ - -f $(ROOT_PROJECT_DIR)/pgxn/neon/Makefile install - +@echo "Compiling neon_walredo $*" - mkdir -p $(POSTGRES_INSTALL_DIR)/build/neon-walredo-$* - $(MAKE) PG_CONFIG=$(POSTGRES_INSTALL_DIR)/$*/bin/pg_config COPT='$(COPT)' \ - -C $(POSTGRES_INSTALL_DIR)/build/neon-walredo-$* \ - -f $(ROOT_PROJECT_DIR)/pgxn/neon_walredo/Makefile install - +@echo "Compiling neon_rmgr $*" - mkdir -p $(POSTGRES_INSTALL_DIR)/build/neon-rmgr-$* - $(MAKE) PG_CONFIG=$(POSTGRES_INSTALL_DIR)/$*/bin/pg_config COPT='$(COPT)' \ - -C $(POSTGRES_INSTALL_DIR)/build/neon-rmgr-$* \ - -f $(ROOT_PROJECT_DIR)/pgxn/neon_rmgr/Makefile install - +@echo "Compiling neon_test_utils $*" - mkdir -p $(POSTGRES_INSTALL_DIR)/build/neon-test-utils-$* - $(MAKE) PG_CONFIG=$(POSTGRES_INSTALL_DIR)/$*/bin/pg_config COPT='$(COPT)' \ - -C $(POSTGRES_INSTALL_DIR)/build/neon-test-utils-$* \ - -f $(ROOT_PROJECT_DIR)/pgxn/neon_test_utils/Makefile install - +@echo "Compiling neon_utils $*" - mkdir -p $(POSTGRES_INSTALL_DIR)/build/neon-utils-$* - $(MAKE) PG_CONFIG=$(POSTGRES_INSTALL_DIR)/$*/bin/pg_config COPT='$(COPT)' \ - -C $(POSTGRES_INSTALL_DIR)/build/neon-utils-$* \ - -f $(ROOT_PROJECT_DIR)/pgxn/neon_utils/Makefile install - -.PHONY: neon-pg-clean-ext-% -neon-pg-clean-ext-%: - $(MAKE) PG_CONFIG=$(POSTGRES_INSTALL_DIR)/$*/bin/pg_config \ - -C $(POSTGRES_INSTALL_DIR)/build/neon-$* \ - -f $(ROOT_PROJECT_DIR)/pgxn/neon/Makefile clean - $(MAKE) PG_CONFIG=$(POSTGRES_INSTALL_DIR)/$*/bin/pg_config \ - -C $(POSTGRES_INSTALL_DIR)/build/neon-walredo-$* \ - -f $(ROOT_PROJECT_DIR)/pgxn/neon_walredo/Makefile clean - $(MAKE) PG_CONFIG=$(POSTGRES_INSTALL_DIR)/$*/bin/pg_config \ - -C $(POSTGRES_INSTALL_DIR)/build/neon-test-utils-$* \ - -f $(ROOT_PROJECT_DIR)/pgxn/neon_test_utils/Makefile clean - $(MAKE) PG_CONFIG=$(POSTGRES_INSTALL_DIR)/$*/bin/pg_config \ - -C $(POSTGRES_INSTALL_DIR)/build/neon-utils-$* \ - -f $(ROOT_PROJECT_DIR)/pgxn/neon_utils/Makefile clean + -C $(BUILD_DIR)/pgxn-$*\ + -f $(ROOT_PROJECT_DIR)/pgxn/Makefile install # Build walproposer as a static library. walproposer source code is located # in the pgxn/neon directory. @@ -233,15 +198,15 @@ neon-pg-clean-ext-%: .PHONY: walproposer-lib walproposer-lib: neon-pg-ext-v17 +@echo "Compiling walproposer-lib" - mkdir -p $(POSTGRES_INSTALL_DIR)/build/walproposer-lib + mkdir -p $(BUILD_DIR)/walproposer-lib $(MAKE) PG_CONFIG=$(POSTGRES_INSTALL_DIR)/v17/bin/pg_config COPT='$(COPT)' \ - -C $(POSTGRES_INSTALL_DIR)/build/walproposer-lib \ + -C $(BUILD_DIR)/walproposer-lib \ -f $(ROOT_PROJECT_DIR)/pgxn/neon/Makefile walproposer-lib - cp $(POSTGRES_INSTALL_DIR)/v17/lib/libpgport.a $(POSTGRES_INSTALL_DIR)/build/walproposer-lib - cp $(POSTGRES_INSTALL_DIR)/v17/lib/libpgcommon.a $(POSTGRES_INSTALL_DIR)/build/walproposer-lib - $(AR) d $(POSTGRES_INSTALL_DIR)/build/walproposer-lib/libpgport.a \ + cp $(POSTGRES_INSTALL_DIR)/v17/lib/libpgport.a $(BUILD_DIR)/walproposer-lib + cp $(POSTGRES_INSTALL_DIR)/v17/lib/libpgcommon.a $(BUILD_DIR)/walproposer-lib + $(AR) d $(BUILD_DIR)/walproposer-lib/libpgport.a \ pg_strong_random.o - $(AR) d $(POSTGRES_INSTALL_DIR)/build/walproposer-lib/libpgcommon.a \ + $(AR) d $(BUILD_DIR)/walproposer-lib/libpgcommon.a \ checksum_helper.o \ cryptohash_openssl.o \ hmac_openssl.o \ @@ -249,16 +214,10 @@ walproposer-lib: neon-pg-ext-v17 parse_manifest.o \ scram-common.o ifeq ($(UNAME_S),Linux) - $(AR) d $(POSTGRES_INSTALL_DIR)/build/walproposer-lib/libpgcommon.a \ + $(AR) d $(BUILD_DIR)/walproposer-lib/libpgcommon.a \ pg_crc32c.o endif -.PHONY: walproposer-lib-clean -walproposer-lib-clean: - $(MAKE) PG_CONFIG=$(POSTGRES_INSTALL_DIR)/v17/bin/pg_config \ - -C $(POSTGRES_INSTALL_DIR)/build/walproposer-lib \ - -f $(ROOT_PROJECT_DIR)/pgxn/neon/Makefile clean - .PHONY: neon-pg-ext neon-pg-ext: \ neon-pg-ext-v14 \ @@ -266,13 +225,6 @@ neon-pg-ext: \ neon-pg-ext-v16 \ neon-pg-ext-v17 -.PHONY: neon-pg-clean-ext -neon-pg-clean-ext: \ - neon-pg-clean-ext-v14 \ - neon-pg-clean-ext-v15 \ - neon-pg-clean-ext-v16 \ - neon-pg-clean-ext-v17 - # shorthand to build all Postgres versions .PHONY: postgres postgres: \ @@ -288,13 +240,6 @@ postgres-headers: \ postgres-headers-v16 \ postgres-headers-v17 -.PHONY: postgres-clean -postgres-clean: \ - postgres-clean-v14 \ - postgres-clean-v15 \ - postgres-clean-v16 \ - postgres-clean-v17 - .PHONY: postgres-check postgres-check: \ postgres-check-v14 \ @@ -302,12 +247,6 @@ postgres-check: \ postgres-check-v16 \ postgres-check-v17 -# This doesn't remove the effects of 'configure'. -.PHONY: clean -clean: postgres-clean neon-pg-clean-ext - $(MAKE) -C compute clean - $(CARGO_CMD_PREFIX) cargo clean - # This removes everything .PHONY: distclean distclean: @@ -320,7 +259,7 @@ fmt: postgres-%-pg-bsd-indent: postgres-% +@echo "Compiling pg_bsd_indent" - $(MAKE) -C $(POSTGRES_INSTALL_DIR)/build/$*/src/tools/pg_bsd_indent/ + $(MAKE) -C $(BUILD_DIR)/$*/src/tools/pg_bsd_indent/ # Create typedef list for the core. Note that generally it should be combined with # buildfarm one to cover platform specific stuff. @@ -339,7 +278,7 @@ postgres-%-pgindent: postgres-%-pg-bsd-indent postgres-%-typedefs.list cat $(ROOT_PROJECT_DIR)/vendor/postgres-$*/src/tools/pgindent/typedefs.list |\ cat - postgres-$*-typedefs.list | sort | uniq > postgres-$*-typedefs-full.list +@echo note: you might want to run it on selected files/dirs instead. - INDENT=$(POSTGRES_INSTALL_DIR)/build/$*/src/tools/pg_bsd_indent/pg_bsd_indent \ + INDENT=$(BUILD_DIR)/$*/src/tools/pg_bsd_indent/pg_bsd_indent \ $(ROOT_PROJECT_DIR)/vendor/postgres-$*/src/tools/pgindent/pgindent --typedefs postgres-$*-typedefs-full.list \ $(ROOT_PROJECT_DIR)/vendor/postgres-$*/src/ \ --excludes $(ROOT_PROJECT_DIR)/vendor/postgres-$*/src/tools/pgindent/exclude_file_patterns @@ -350,9 +289,9 @@ postgres-%-pgindent: postgres-%-pg-bsd-indent postgres-%-typedefs.list neon-pgindent: postgres-v17-pg-bsd-indent neon-pg-ext-v17 $(MAKE) PG_CONFIG=$(POSTGRES_INSTALL_DIR)/v17/bin/pg_config COPT='$(COPT)' \ FIND_TYPEDEF=$(ROOT_PROJECT_DIR)/vendor/postgres-v17/src/tools/find_typedef \ - INDENT=$(POSTGRES_INSTALL_DIR)/build/v17/src/tools/pg_bsd_indent/pg_bsd_indent \ + INDENT=$(BUILD_DIR)/v17/src/tools/pg_bsd_indent/pg_bsd_indent \ PGINDENT_SCRIPT=$(ROOT_PROJECT_DIR)/vendor/postgres-v17/src/tools/pgindent/pgindent \ - -C $(POSTGRES_INSTALL_DIR)/build/neon-v17 \ + -C $(BUILD_DIR)/neon-v17 \ -f $(ROOT_PROJECT_DIR)/pgxn/neon/Makefile pgindent diff --git a/compute/Makefile b/compute/Makefile index c53d040887..ef2e55f7b1 100644 --- a/compute/Makefile +++ b/compute/Makefile @@ -22,7 +22,7 @@ sql_exporter.yml: $(jsonnet_files) --output-file etc/$@ \ --tla-str collector_name=neon_collector \ --tla-str collector_file=neon_collector.yml \ - --tla-str 'connection_string=postgresql://cloud_admin@127.0.0.1:5432/postgres?sslmode=disable&application_name=sql_exporter' \ + --tla-str 'connection_string=postgresql://cloud_admin@127.0.0.1:5432/postgres?sslmode=disable&application_name=sql_exporter&pgaudit.log=none' \ etc/sql_exporter.jsonnet sql_exporter_autoscaling.yml: $(jsonnet_files) @@ -30,7 +30,7 @@ sql_exporter_autoscaling.yml: $(jsonnet_files) --output-file etc/$@ \ --tla-str collector_name=neon_collector_autoscaling \ --tla-str collector_file=neon_collector_autoscaling.yml \ - --tla-str 'connection_string=postgresql://cloud_admin@127.0.0.1:5432/postgres?sslmode=disable&application_name=sql_exporter_autoscaling' \ + --tla-str 'connection_string=postgresql://cloud_admin@127.0.0.1:5432/postgres?sslmode=disable&application_name=sql_exporter_autoscaling&pgaudit.log=none' \ etc/sql_exporter.jsonnet .PHONY: clean diff --git a/compute/compute-node.Dockerfile b/compute/compute-node.Dockerfile index 3390c64547..e03b6d1874 100644 --- a/compute/compute-node.Dockerfile +++ b/compute/compute-node.Dockerfile @@ -77,9 +77,6 @@ # build_and_test.yml github workflow for how that's done. ARG PG_VERSION -ARG REPOSITORY=ghcr.io/neondatabase -ARG IMAGE=build-tools -ARG TAG=pinned ARG BUILD_TAG ARG DEBIAN_VERSION=bookworm ARG DEBIAN_FLAVOR=${DEBIAN_VERSION}-slim @@ -150,6 +147,7 @@ RUN case $DEBIAN_VERSION in \ zlib1g-dev libxml2-dev libcurl4-openssl-dev libossp-uuid-dev wget ca-certificates pkg-config libssl-dev \ libicu-dev libxslt1-dev liblz4-dev libzstd-dev zstd curl unzip g++ \ libclang-dev \ + jsonnet \ $VERSION_INSTALLS \ && apt clean && rm -rf /var/lib/apt/lists/* && \ useradd -ms /bin/bash nonroot -b /home @@ -173,9 +171,6 @@ RUN cd postgres && \ eval $CONFIGURE_CMD && \ make MAKELEVEL=0 -j $(getconf _NPROCESSORS_ONLN) -s install && \ make MAKELEVEL=0 -j $(getconf _NPROCESSORS_ONLN) -s -C contrib/ install && \ - # Install headers - make MAKELEVEL=0 -j $(getconf _NPROCESSORS_ONLN) -s -C src/include install && \ - make MAKELEVEL=0 -j $(getconf _NPROCESSORS_ONLN) -s -C src/interfaces/libpq install && \ # Enable some of contrib extensions echo 'trusted = true' >> /usr/local/pgsql/share/extension/autoinc.control && \ echo 'trusted = true' >> /usr/local/pgsql/share/extension/dblink.control && \ @@ -1570,20 +1565,20 @@ ARG PG_VERSION WORKDIR /ext-src RUN case "${PG_VERSION}" in \ "v14") \ - export PGAUDIT_VERSION=1.6.2 \ - export PGAUDIT_CHECKSUM=1f350d70a0cbf488c0f2b485e3a5c9b11f78ad9e3cbb95ef6904afa1eb3187eb \ + export PGAUDIT_VERSION=1.6.3 \ + export PGAUDIT_CHECKSUM=37a8f5a7cc8d9188e536d15cf0fdc457fcdab2547caedb54442c37f124110919 \ ;; \ "v15") \ - export PGAUDIT_VERSION=1.7.0 \ - export PGAUDIT_CHECKSUM=8f4a73e451c88c567e516e6cba7dc1e23bc91686bb6f1f77f8f3126d428a8bd8 \ + export PGAUDIT_VERSION=1.7.1 \ + export PGAUDIT_CHECKSUM=e9c8e6e092d82b2f901d72555ce0fe7780552f35f8985573796cd7e64b09d4ec \ ;; \ "v16") \ - export PGAUDIT_VERSION=16.0 \ - export PGAUDIT_CHECKSUM=d53ef985f2d0b15ba25c512c4ce967dce07b94fd4422c95bd04c4c1a055fe738 \ + export PGAUDIT_VERSION=16.1 \ + export PGAUDIT_CHECKSUM=3bae908ab70ba0c6f51224009dbcfff1a97bd6104c6273297a64292e1b921fee \ ;; \ "v17") \ - export PGAUDIT_VERSION=17.0 \ - export PGAUDIT_CHECKSUM=7d0d08d030275d525f36cd48b38c6455f1023da863385badff0cec44965bfd8c \ + export PGAUDIT_VERSION=17.1 \ + export PGAUDIT_CHECKSUM=9c5f37504d393486cc75d2ced83f75f5899be64fa85f689d6babb833b4361e6c \ ;; \ *) \ echo "pgaudit is not supported on this PostgreSQL version" && exit 1;; \ @@ -1634,18 +1629,7 @@ FROM pg-build AS neon-ext-build ARG PG_VERSION COPY pgxn/ pgxn/ -RUN make -j $(getconf _NPROCESSORS_ONLN) \ - -C pgxn/neon \ - -s install && \ - make -j $(getconf _NPROCESSORS_ONLN) \ - -C pgxn/neon_utils \ - -s install && \ - make -j $(getconf _NPROCESSORS_ONLN) \ - -C pgxn/neon_test_utils \ - -s install && \ - make -j $(getconf _NPROCESSORS_ONLN) \ - -C pgxn/neon_rmgr \ - -s install +RUN make -j $(getconf _NPROCESSORS_ONLN) -C pgxn -s install-compute ######################################################################################### # @@ -1763,7 +1747,7 @@ COPY --chown=nonroot . . RUN --mount=type=cache,uid=1000,target=/home/nonroot/.cargo/registry \ --mount=type=cache,uid=1000,target=/home/nonroot/.cargo/git \ --mount=type=cache,uid=1000,target=/home/nonroot/target \ - mold -run cargo build --locked --profile release-line-debug-size-lto --bin compute_ctl --bin fast_import --bin local_proxy && \ + cargo build --locked --profile release-line-debug-size-lto --bin compute_ctl --bin fast_import --bin local_proxy && \ mkdir target-bin && \ cp target/release-line-debug-size-lto/compute_ctl \ target/release-line-debug-size-lto/fast_import \ @@ -1857,10 +1841,11 @@ RUN rm /usr/local/pgsql/lib/lib*.a # Preprocess the sql_exporter configuration files # ######################################################################################### -FROM $REPOSITORY/$IMAGE:$TAG AS sql_exporter_preprocessor +FROM build-deps AS sql_exporter_preprocessor ARG PG_VERSION USER nonroot +WORKDIR /home/nonroot COPY --chown=nonroot compute compute diff --git a/compute/vm-image-spec-bookworm.yaml b/compute/vm-image-spec-bookworm.yaml index 057099994a..267e4c83b5 100644 --- a/compute/vm-image-spec-bookworm.yaml +++ b/compute/vm-image-spec-bookworm.yaml @@ -26,7 +26,7 @@ commands: - name: postgres-exporter user: nobody sysvInitAction: respawn - shell: 'DATA_SOURCE_NAME="user=cloud_admin sslmode=disable dbname=postgres application_name=postgres-exporter" /bin/postgres_exporter --config.file=/etc/postgres_exporter.yml' + shell: 'DATA_SOURCE_NAME="user=cloud_admin sslmode=disable dbname=postgres application_name=postgres-exporter pgaudit.log=none" /bin/postgres_exporter --config.file=/etc/postgres_exporter.yml' - name: pgbouncer-exporter user: postgres sysvInitAction: respawn @@ -59,7 +59,7 @@ files: # the rules use ALL as the hostname. Avoid the pointless lookups and the "unable to # resolve host" log messages that they generate. Defaults !fqdn - + # Allow postgres user (which is what compute_ctl runs as) to run /neonvm/bin/resize-swap # and /neonvm/bin/set-disk-quota as root without requiring entering a password (NOPASSWD), # regardless of hostname (ALL) diff --git a/compute/vm-image-spec-bullseye.yaml b/compute/vm-image-spec-bullseye.yaml index d048e20b2e..2b6e77b656 100644 --- a/compute/vm-image-spec-bullseye.yaml +++ b/compute/vm-image-spec-bullseye.yaml @@ -26,7 +26,7 @@ commands: - name: postgres-exporter user: nobody sysvInitAction: respawn - shell: 'DATA_SOURCE_NAME="user=cloud_admin sslmode=disable dbname=postgres application_name=postgres-exporter" /bin/postgres_exporter --config.file=/etc/postgres_exporter.yml' + shell: 'DATA_SOURCE_NAME="user=cloud_admin sslmode=disable dbname=postgres application_name=postgres-exporter pgaudit.log=none" /bin/postgres_exporter --config.file=/etc/postgres_exporter.yml' - name: pgbouncer-exporter user: postgres sysvInitAction: respawn @@ -59,7 +59,7 @@ files: # the rules use ALL as the hostname. Avoid the pointless lookups and the "unable to # resolve host" log messages that they generate. Defaults !fqdn - + # Allow postgres user (which is what compute_ctl runs as) to run /neonvm/bin/resize-swap # and /neonvm/bin/set-disk-quota as root without requiring entering a password (NOPASSWD), # regardless of hostname (ALL) diff --git a/compute_tools/Cargo.toml b/compute_tools/Cargo.toml index f9da3ba700..a5879c4b7c 100644 --- a/compute_tools/Cargo.toml +++ b/compute_tools/Cargo.toml @@ -64,6 +64,7 @@ uuid.workspace = true walkdir.workspace = true x509-cert.workspace = true +postgres_versioninfo.workspace = true postgres_initdb.workspace = true compute_api.workspace = true utils.workspace = true diff --git a/compute_tools/src/bin/fast_import.rs b/compute_tools/src/bin/fast_import.rs index e65c210b23..0eca9aba53 100644 --- a/compute_tools/src/bin/fast_import.rs +++ b/compute_tools/src/bin/fast_import.rs @@ -29,7 +29,7 @@ use anyhow::{Context, bail}; use aws_config::BehaviorVersion; use camino::{Utf8Path, Utf8PathBuf}; use clap::{Parser, Subcommand}; -use compute_tools::extension_server::{PostgresMajorVersion, get_pg_version}; +use compute_tools::extension_server::get_pg_version; use nix::unistd::Pid; use std::ops::Not; use tracing::{Instrument, error, info, info_span, warn}; @@ -179,12 +179,8 @@ impl PostgresProcess { .await .context("create pgdata directory")?; - let pg_version = match get_pg_version(self.pgbin.as_ref()) { - PostgresMajorVersion::V14 => 14, - PostgresMajorVersion::V15 => 15, - PostgresMajorVersion::V16 => 16, - PostgresMajorVersion::V17 => 17, - }; + let pg_version = get_pg_version(self.pgbin.as_ref()); + postgres_initdb::do_run_initdb(postgres_initdb::RunInitdbArgs { superuser: initdb_user, locale: DEFAULT_LOCALE, // XXX: this shouldn't be hard-coded, @@ -486,10 +482,8 @@ async fn cmd_pgdata( }; let superuser = "cloud_admin"; - let destination_connstring = format!( - "host=localhost port={} user={} dbname=neondb", - pg_port, superuser - ); + let destination_connstring = + format!("host=localhost port={pg_port} user={superuser} dbname=neondb"); let pgdata_dir = workdir.join("pgdata"); let mut proc = PostgresProcess::new(pgdata_dir.clone(), pg_bin_dir.clone(), pg_lib_dir.clone()); diff --git a/compute_tools/src/bin/fast_import/s3_uri.rs b/compute_tools/src/bin/fast_import/s3_uri.rs index cf4dab7c02..e1a85c73e7 100644 --- a/compute_tools/src/bin/fast_import/s3_uri.rs +++ b/compute_tools/src/bin/fast_import/s3_uri.rs @@ -69,7 +69,7 @@ impl clap::builder::TypedValueParser for S3Uri { S3Uri::from_str(value_str).map_err(|e| { clap::Error::raw( clap::error::ErrorKind::InvalidValue, - format!("Failed to parse S3 URI: {}", e), + format!("Failed to parse S3 URI: {e}"), ) }) } diff --git a/compute_tools/src/catalog.rs b/compute_tools/src/catalog.rs index 082ba62b8e..bc9f64075a 100644 --- a/compute_tools/src/catalog.rs +++ b/compute_tools/src/catalog.rs @@ -22,7 +22,7 @@ pub async fn get_dbs_and_roles(compute: &Arc) -> anyhow::Result { let mut lines = stderr_reader.lines(); if let Some(line) = lines.next_line().await? { - if line.contains(&format!("FATAL: database \"{}\" does not exist", dbname)) { + if line.contains(&format!("FATAL: database \"{dbname}\" does not exist")) { return Err(SchemaDumpError::DatabaseDoesNotExist); } warn!("pg_dump stderr: {}", line) diff --git a/compute_tools/src/compute.rs b/compute_tools/src/compute.rs index 7a7f2dfedc..70b2d28bf2 100644 --- a/compute_tools/src/compute.rs +++ b/compute_tools/src/compute.rs @@ -250,8 +250,7 @@ impl ParsedSpec { // duplicate entry? if current == previous { return Err(format!( - "duplicate entry in safekeeper_connstrings: {}!", - current, + "duplicate entry in safekeeper_connstrings: {current}!", )); } @@ -406,9 +405,11 @@ impl ComputeNode { // that can affect `compute_ctl` and prevent it from properly configuring the database schema. // Unset them via connection string options before connecting to the database. // N.B. keep it in sync with `ZENITH_OPTIONS` in `get_maintenance_client()`. - const EXTRA_OPTIONS: &str = "-c role=cloud_admin -c default_transaction_read_only=off -c search_path=public -c statement_timeout=0"; + const EXTRA_OPTIONS: &str = "-c role=cloud_admin -c default_transaction_read_only=off -c search_path=public -c statement_timeout=0 -c pgaudit.log=none"; let options = match conn_conf.get_options() { - Some(options) => format!("{} {}", options, EXTRA_OPTIONS), + // Allow the control plane to override any options set by the + // compute + Some(options) => format!("{EXTRA_OPTIONS} {options}"), None => EXTRA_OPTIONS.to_string(), }; conn_conf.options(&options); @@ -1125,7 +1126,7 @@ impl ComputeNode { let sk_configs = sk_connstrs.into_iter().map(|connstr| { // Format connstr let id = connstr.clone(); - let connstr = format!("postgresql://no_user@{}", connstr); + let connstr = format!("postgresql://no_user@{connstr}"); let options = format!( "-c timeline_id={} tenant_id={}", pspec.timeline_id, pspec.tenant_id @@ -1488,7 +1489,7 @@ impl ComputeNode { let (mut client, connection) = conf.connect(NoTls).await?; tokio::spawn(async move { if let Err(e) = connection.await { - eprintln!("connection error: {}", e); + eprintln!("connection error: {e}"); } }); @@ -1631,7 +1632,7 @@ impl ComputeNode { Ok((mut client, connection)) => { tokio::spawn(async move { if let Err(e) = connection.await { - eprintln!("connection error: {}", e); + eprintln!("connection error: {e}"); } }); if let Err(e) = handle_migrations(&mut client).await { @@ -1935,7 +1936,7 @@ impl ComputeNode { let (client, connection) = connect_result.unwrap(); tokio::spawn(async move { if let Err(e) = connection.await { - eprintln!("connection error: {}", e); + eprintln!("connection error: {e}"); } }); let result = client @@ -2104,7 +2105,7 @@ LIMIT 100", db_client .simple_query(&query) .await - .with_context(|| format!("Failed to execute query: {}", query))?; + .with_context(|| format!("Failed to execute query: {query}"))?; } Ok(()) @@ -2131,7 +2132,7 @@ LIMIT 100", let version: Option = db_client .query_opt(version_query, &[&ext_name]) .await - .with_context(|| format!("Failed to execute query: {}", version_query))? + .with_context(|| format!("Failed to execute query: {version_query}"))? .map(|row| row.get(0)); // sanitize the inputs as postgres idents. @@ -2146,14 +2147,14 @@ LIMIT 100", db_client .simple_query(&query) .await - .with_context(|| format!("Failed to execute query: {}", query))?; + .with_context(|| format!("Failed to execute query: {query}"))?; } else { let query = format!("CREATE EXTENSION IF NOT EXISTS {ext_name} WITH VERSION {quoted_version}"); db_client .simple_query(&query) .await - .with_context(|| format!("Failed to execute query: {}", query))?; + .with_context(|| format!("Failed to execute query: {query}"))?; } Ok(ext_version) diff --git a/compute_tools/src/config.rs b/compute_tools/src/config.rs index 933b30134f..169de5c963 100644 --- a/compute_tools/src/config.rs +++ b/compute_tools/src/config.rs @@ -51,7 +51,7 @@ pub fn write_postgres_conf( // Write the postgresql.conf content from the spec file as is. if let Some(conf) = &spec.cluster.postgresql_conf { - writeln!(file, "{}", conf)?; + writeln!(file, "{conf}")?; } // Add options for connecting to storage @@ -70,7 +70,7 @@ pub fn write_postgres_conf( ); // If generation is given, prepend sk list with g#number: if let Some(generation) = spec.safekeepers_generation { - write!(neon_safekeepers_value, "g#{}:", generation)?; + write!(neon_safekeepers_value, "g#{generation}:")?; } neon_safekeepers_value.push_str(&spec.safekeeper_connstrings.join(",")); writeln!( @@ -109,8 +109,8 @@ pub fn write_postgres_conf( tls::update_key_path_blocking(pgdata_path, tls_config); // these are the default, but good to be explicit. - writeln!(file, "ssl_cert_file = '{}'", SERVER_CRT)?; - writeln!(file, "ssl_key_file = '{}'", SERVER_KEY)?; + writeln!(file, "ssl_cert_file = '{SERVER_CRT}'")?; + writeln!(file, "ssl_key_file = '{SERVER_KEY}'")?; } // Locales @@ -191,8 +191,7 @@ pub fn write_postgres_conf( } writeln!( file, - "shared_preload_libraries='{}{}'", - libs, extra_shared_preload_libraries + "shared_preload_libraries='{libs}{extra_shared_preload_libraries}'" )?; } else { // Typically, this should be unreacheable, @@ -244,8 +243,7 @@ pub fn write_postgres_conf( } writeln!( file, - "shared_preload_libraries='{}{}'", - libs, extra_shared_preload_libraries + "shared_preload_libraries='{libs}{extra_shared_preload_libraries}'" )?; } else { // Typically, this should be unreacheable, @@ -263,7 +261,7 @@ pub fn write_postgres_conf( } } - writeln!(file, "neon.extension_server_port={}", extension_server_port)?; + writeln!(file, "neon.extension_server_port={extension_server_port}")?; if spec.drop_subscriptions_before_start { writeln!(file, "neon.disable_logical_replication_subscribers=true")?; @@ -291,7 +289,7 @@ where { let path = pgdata_path.join("compute_ctl_temp_override.conf"); let mut file = File::create(path)?; - write!(file, "{}", options)?; + write!(file, "{options}")?; let res = exec(); diff --git a/compute_tools/src/extension_server.rs b/compute_tools/src/extension_server.rs index 3764bc1525..47931d5f72 100644 --- a/compute_tools/src/extension_server.rs +++ b/compute_tools/src/extension_server.rs @@ -74,9 +74,11 @@ More specifically, here is an example ext_index.json use std::path::Path; use std::str; +use crate::metrics::{REMOTE_EXT_REQUESTS_TOTAL, UNKNOWN_HTTP_STATUS}; use anyhow::{Context, Result, bail}; use bytes::Bytes; use compute_api::spec::RemoteExtSpec; +use postgres_versioninfo::PgMajorVersion; use regex::Regex; use remote_storage::*; use reqwest::StatusCode; @@ -86,8 +88,6 @@ use tracing::log::warn; use url::Url; use zstd::stream::read::Decoder; -use crate::metrics::{REMOTE_EXT_REQUESTS_TOTAL, UNKNOWN_HTTP_STATUS}; - fn get_pg_config(argument: &str, pgbin: &str) -> String { // gives the result of `pg_config [argument]` // where argument is a flag like `--version` or `--sharedir` @@ -106,7 +106,7 @@ fn get_pg_config(argument: &str, pgbin: &str) -> String { .to_string() } -pub fn get_pg_version(pgbin: &str) -> PostgresMajorVersion { +pub fn get_pg_version(pgbin: &str) -> PgMajorVersion { // pg_config --version returns a (platform specific) human readable string // such as "PostgreSQL 15.4". We parse this to v14/v15/v16 etc. let human_version = get_pg_config("--version", pgbin); @@ -114,25 +114,11 @@ pub fn get_pg_version(pgbin: &str) -> PostgresMajorVersion { } pub fn get_pg_version_string(pgbin: &str) -> String { - match get_pg_version(pgbin) { - PostgresMajorVersion::V14 => "v14", - PostgresMajorVersion::V15 => "v15", - PostgresMajorVersion::V16 => "v16", - PostgresMajorVersion::V17 => "v17", - } - .to_owned() + get_pg_version(pgbin).v_str() } -#[derive(Copy, Clone, Debug, PartialEq, Eq)] -pub enum PostgresMajorVersion { - V14, - V15, - V16, - V17, -} - -fn parse_pg_version(human_version: &str) -> PostgresMajorVersion { - use PostgresMajorVersion::*; +fn parse_pg_version(human_version: &str) -> PgMajorVersion { + use PgMajorVersion::*; // Normal releases have version strings like "PostgreSQL 15.4". But there // are also pre-release versions like "PostgreSQL 17devel" or "PostgreSQL // 16beta2" or "PostgreSQL 17rc1". And with the --with-extra-version @@ -143,10 +129,10 @@ fn parse_pg_version(human_version: &str) -> PostgresMajorVersion { .captures(human_version) { Some(captures) if captures.len() == 2 => match &captures["major"] { - "14" => return V14, - "15" => return V15, - "16" => return V16, - "17" => return V17, + "14" => return PG14, + "15" => return PG15, + "16" => return PG16, + "17" => return PG17, _ => {} }, _ => {} @@ -310,10 +296,7 @@ async fn download_extension_tar(remote_ext_base_url: &Url, ext_path: &str) -> Re async fn do_extension_server_request(uri: Url) -> Result { let resp = reqwest::get(uri).await.map_err(|e| { ( - format!( - "could not perform remote extensions server request: {:?}", - e - ), + format!("could not perform remote extensions server request: {e:?}"), UNKNOWN_HTTP_STATUS.to_string(), ) })?; @@ -323,7 +306,7 @@ async fn do_extension_server_request(uri: Url) -> Result match resp.bytes().await { Ok(resp) => Ok(resp), Err(e) => Err(( - format!("could not read remote extensions server response: {:?}", e), + format!("could not read remote extensions server response: {e:?}"), // It's fine to return and report error with status as 200 OK, // because we still failed to read the response. status.to_string(), @@ -334,10 +317,7 @@ async fn do_extension_server_request(uri: Url) -> Result Err(( - format!( - "unexpected remote extensions server response status code: {}", - status - ), + format!("unexpected remote extensions server response status code: {status}"), status.to_string(), )), } @@ -349,25 +329,25 @@ mod tests { #[test] fn test_parse_pg_version() { - use super::PostgresMajorVersion::*; - assert_eq!(parse_pg_version("PostgreSQL 15.4"), V15); - assert_eq!(parse_pg_version("PostgreSQL 15.14"), V15); + use postgres_versioninfo::PgMajorVersion::*; + assert_eq!(parse_pg_version("PostgreSQL 15.4"), PG15); + assert_eq!(parse_pg_version("PostgreSQL 15.14"), PG15); assert_eq!( parse_pg_version("PostgreSQL 15.4 (Ubuntu 15.4-0ubuntu0.23.04.1)"), - V15 + PG15 ); - assert_eq!(parse_pg_version("PostgreSQL 14.15"), V14); - assert_eq!(parse_pg_version("PostgreSQL 14.0"), V14); + assert_eq!(parse_pg_version("PostgreSQL 14.15"), PG14); + assert_eq!(parse_pg_version("PostgreSQL 14.0"), PG14); assert_eq!( parse_pg_version("PostgreSQL 14.9 (Debian 14.9-1.pgdg120+1"), - V14 + PG14 ); - assert_eq!(parse_pg_version("PostgreSQL 16devel"), V16); - assert_eq!(parse_pg_version("PostgreSQL 16beta1"), V16); - assert_eq!(parse_pg_version("PostgreSQL 16rc2"), V16); - assert_eq!(parse_pg_version("PostgreSQL 16extra"), V16); + assert_eq!(parse_pg_version("PostgreSQL 16devel"), PG16); + assert_eq!(parse_pg_version("PostgreSQL 16beta1"), PG16); + assert_eq!(parse_pg_version("PostgreSQL 16rc2"), PG16); + assert_eq!(parse_pg_version("PostgreSQL 16extra"), PG16); } #[test] diff --git a/compute_tools/src/http/routes/configure.rs b/compute_tools/src/http/routes/configure.rs index c29e3a97da..b7325d283f 100644 --- a/compute_tools/src/http/routes/configure.rs +++ b/compute_tools/src/http/routes/configure.rs @@ -65,7 +65,7 @@ pub(in crate::http) async fn configure( if state.status == ComputeStatus::Failed { let err = state.error.as_ref().map_or("unknown error", |x| x); - let msg = format!("compute configuration failed: {:?}", err); + let msg = format!("compute configuration failed: {err:?}"); return Err(msg); } } diff --git a/compute_tools/src/installed_extensions.rs b/compute_tools/src/installed_extensions.rs index d95c168a99..411e03b7ec 100644 --- a/compute_tools/src/installed_extensions.rs +++ b/compute_tools/src/installed_extensions.rs @@ -43,7 +43,7 @@ pub async fn get_installed_extensions(mut conf: Config) -> Result Result Result> { let mut client = config.connect(NoTls)?; - let cmd = format!("lease lsn {} {} {} ", tenant_shard_id, timeline_id, lsn); + let cmd = format!("lease lsn {tenant_shard_id} {timeline_id} {lsn} "); let res = client.simple_query(&cmd)?; let msg = match res.first() { Some(msg) => msg, diff --git a/compute_tools/src/pg_helpers.rs b/compute_tools/src/pg_helpers.rs index 94467a0d2f..0a3ceed2fa 100644 --- a/compute_tools/src/pg_helpers.rs +++ b/compute_tools/src/pg_helpers.rs @@ -36,9 +36,9 @@ pub fn escape_literal(s: &str) -> String { let res = s.replace('\'', "''").replace('\\', "\\\\"); if res.contains('\\') { - format!("E'{}'", res) + format!("E'{res}'") } else { - format!("'{}'", res) + format!("'{res}'") } } @@ -46,7 +46,7 @@ pub fn escape_literal(s: &str) -> String { /// with `'{}'` is not required, as it returns a ready-to-use config string. pub fn escape_conf_value(s: &str) -> String { let res = s.replace('\'', "''").replace('\\', "\\\\"); - format!("'{}'", res) + format!("'{res}'") } pub trait GenericOptionExt { @@ -446,7 +446,7 @@ pub async fn tune_pgbouncer( let mut pgbouncer_connstr = "host=localhost port=6432 dbname=pgbouncer user=postgres sslmode=disable".to_string(); if let Ok(pass) = std::env::var("PGBOUNCER_PASSWORD") { - pgbouncer_connstr.push_str(format!(" password={}", pass).as_str()); + pgbouncer_connstr.push_str(format!(" password={pass}").as_str()); } pgbouncer_connstr }; @@ -464,7 +464,7 @@ pub async fn tune_pgbouncer( Ok((client, connection)) => { tokio::spawn(async move { if let Err(e) = connection.await { - eprintln!("connection error: {}", e); + eprintln!("connection error: {e}"); } }); break client; diff --git a/compute_tools/src/spec.rs b/compute_tools/src/spec.rs index 4b38e6e29c..43cfbb48f7 100644 --- a/compute_tools/src/spec.rs +++ b/compute_tools/src/spec.rs @@ -23,12 +23,12 @@ fn do_control_plane_request( ) -> Result { let resp = reqwest::blocking::Client::new() .get(uri) - .header("Authorization", format!("Bearer {}", jwt)) + .header("Authorization", format!("Bearer {jwt}")) .send() .map_err(|e| { ( true, - format!("could not perform request to control plane: {:?}", e), + format!("could not perform request to control plane: {e:?}"), UNKNOWN_HTTP_STATUS.to_string(), ) })?; @@ -39,7 +39,7 @@ fn do_control_plane_request( Ok(spec_resp) => Ok(spec_resp), Err(e) => Err(( true, - format!("could not deserialize control plane response: {:?}", e), + format!("could not deserialize control plane response: {e:?}"), status.to_string(), )), }, @@ -62,7 +62,7 @@ fn do_control_plane_request( // or some internal failure happened. Doesn't make much sense to retry in this case. _ => Err(( false, - format!("unexpected control plane response status code: {}", status), + format!("unexpected control plane response status code: {status}"), status.to_string(), )), } diff --git a/compute_tools/src/spec_apply.rs b/compute_tools/src/spec_apply.rs index 0d1389dbad..fcd072263a 100644 --- a/compute_tools/src/spec_apply.rs +++ b/compute_tools/src/spec_apply.rs @@ -933,56 +933,53 @@ async fn get_operations<'a>( PerDatabasePhase::DeleteDBRoleReferences => { let ctx = ctx.read().await; - let operations = - spec.delta_operations - .iter() - .flatten() - .filter(|op| op.action == "delete_role") - .filter_map(move |op| { - if db.is_owned_by(&op.name) { - return None; - } - if !ctx.roles.contains_key(&op.name) { - return None; - } - let quoted = op.name.pg_quote(); - let new_owner = match &db { - DB::SystemDB => PgIdent::from("cloud_admin").pg_quote(), - DB::UserDB(db) => db.owner.pg_quote(), - }; - let (escaped_role, outer_tag) = op.name.pg_quote_dollar(); + let operations = spec + .delta_operations + .iter() + .flatten() + .filter(|op| op.action == "delete_role") + .filter_map(move |op| { + if db.is_owned_by(&op.name) { + return None; + } + if !ctx.roles.contains_key(&op.name) { + return None; + } + let quoted = op.name.pg_quote(); + let new_owner = match &db { + DB::SystemDB => PgIdent::from("cloud_admin").pg_quote(), + DB::UserDB(db) => db.owner.pg_quote(), + }; + let (escaped_role, outer_tag) = op.name.pg_quote_dollar(); - Some(vec![ - // This will reassign all dependent objects to the db owner - Operation { - query: format!( - "REASSIGN OWNED BY {} TO {}", - quoted, new_owner, - ), - comment: None, - }, - // Revoke some potentially blocking privileges (Neon-specific currently) - Operation { - query: format!( - include_str!("sql/pre_drop_role_revoke_privileges.sql"), - // N.B. this has to be properly dollar-escaped with `pg_quote_dollar()` - role_name = escaped_role, - outer_tag = outer_tag, - ), - comment: None, - }, - // This now will only drop privileges of the role - // TODO: this is obviously not 100% true because of the above case, - // there could be still some privileges that are not revoked. Maybe this - // only drops privileges that were granted *by this* role, not *to this* role, - // but this has to be checked. - Operation { - query: format!("DROP OWNED BY {}", quoted), - comment: None, - }, - ]) - }) - .flatten(); + Some(vec![ + // This will reassign all dependent objects to the db owner + Operation { + query: format!("REASSIGN OWNED BY {quoted} TO {new_owner}",), + comment: None, + }, + // Revoke some potentially blocking privileges (Neon-specific currently) + Operation { + query: format!( + include_str!("sql/pre_drop_role_revoke_privileges.sql"), + // N.B. this has to be properly dollar-escaped with `pg_quote_dollar()` + role_name = escaped_role, + outer_tag = outer_tag, + ), + comment: None, + }, + // This now will only drop privileges of the role + // TODO: this is obviously not 100% true because of the above case, + // there could be still some privileges that are not revoked. Maybe this + // only drops privileges that were granted *by this* role, not *to this* role, + // but this has to be checked. + Operation { + query: format!("DROP OWNED BY {quoted}"), + comment: None, + }, + ]) + }) + .flatten(); Ok(Box::new(operations)) } diff --git a/compute_tools/src/sync_sk.rs b/compute_tools/src/sync_sk.rs index 22b7027b93..6c348644b2 100644 --- a/compute_tools/src/sync_sk.rs +++ b/compute_tools/src/sync_sk.rs @@ -27,7 +27,7 @@ pub async fn ping_safekeeper( let (client, conn) = config.connect(tokio_postgres::NoTls).await?; tokio::spawn(async move { if let Err(e) = conn.await { - eprintln!("connection error: {}", e); + eprintln!("connection error: {e}"); } }); diff --git a/control_plane/src/bin/neon_local.rs b/control_plane/src/bin/neon_local.rs index 21f55336aa..c818d07fef 100644 --- a/control_plane/src/bin/neon_local.rs +++ b/control_plane/src/bin/neon_local.rs @@ -48,7 +48,7 @@ use postgres_connection::parse_host_port; use safekeeper_api::membership::{SafekeeperGeneration, SafekeeperId}; use safekeeper_api::{ DEFAULT_HTTP_LISTEN_PORT as DEFAULT_SAFEKEEPER_HTTP_PORT, - DEFAULT_PG_LISTEN_PORT as DEFAULT_SAFEKEEPER_PG_PORT, + DEFAULT_PG_LISTEN_PORT as DEFAULT_SAFEKEEPER_PG_PORT, PgMajorVersion, PgVersionId, }; use storage_broker::DEFAULT_LISTEN_ADDR as DEFAULT_BROKER_ADDR; use tokio::task::JoinSet; @@ -64,7 +64,7 @@ const DEFAULT_PAGESERVER_ID: NodeId = NodeId(1); const DEFAULT_BRANCH_NAME: &str = "main"; project_git_version!(GIT_VERSION); -const DEFAULT_PG_VERSION: u32 = 17; +const DEFAULT_PG_VERSION: PgMajorVersion = PgMajorVersion::PG17; const DEFAULT_PAGESERVER_CONTROL_PLANE_API: &str = "http://127.0.0.1:1234/upcall/v1/"; @@ -169,7 +169,7 @@ struct TenantCreateCmdArgs { #[arg(default_value_t = DEFAULT_PG_VERSION)] #[clap(long, help = "Postgres version to use for the initial timeline")] - pg_version: u32, + pg_version: PgMajorVersion, #[clap( long, @@ -292,7 +292,7 @@ struct TimelineCreateCmdArgs { #[arg(default_value_t = DEFAULT_PG_VERSION)] #[clap(long, help = "Postgres version")] - pg_version: u32, + pg_version: PgMajorVersion, } #[derive(clap::Args)] @@ -324,7 +324,7 @@ struct TimelineImportCmdArgs { #[arg(default_value_t = DEFAULT_PG_VERSION)] #[clap(long, help = "Postgres version of the backup being imported")] - pg_version: u32, + pg_version: PgMajorVersion, } #[derive(clap::Subcommand)] @@ -603,7 +603,7 @@ struct EndpointCreateCmdArgs { #[arg(default_value_t = DEFAULT_PG_VERSION)] #[clap(long, help = "Postgres version")] - pg_version: u32, + pg_version: PgMajorVersion, /// Use gRPC to communicate with Pageservers, by generating grpc:// connstrings. /// @@ -919,7 +919,7 @@ fn print_timeline( br_sym = "┗━"; } - print!("{} @{}: ", br_sym, ancestor_lsn); + print!("{br_sym} @{ancestor_lsn}: "); } // Finally print a timeline id and name with new line @@ -1295,7 +1295,7 @@ async fn handle_timeline(cmd: &TimelineCmd, env: &mut local_env::LocalEnv) -> Re }, new_members: None, }; - let pg_version = args.pg_version * 10000; + let pg_version = PgVersionId::from(args.pg_version); let req = safekeeper_api::models::TimelineCreateRequest { tenant_id, timeline_id, @@ -1742,7 +1742,7 @@ async fn handle_pageserver(subcmd: &PageserverCmd, env: &local_env::LocalEnv) -> StopMode::Immediate => true, }; if let Err(e) = get_pageserver(env, args.pageserver_id)?.stop(immediate) { - eprintln!("pageserver stop failed: {}", e); + eprintln!("pageserver stop failed: {e}"); exit(1); } } @@ -1751,7 +1751,7 @@ async fn handle_pageserver(subcmd: &PageserverCmd, env: &local_env::LocalEnv) -> let pageserver = get_pageserver(env, args.pageserver_id)?; //TODO what shutdown strategy should we use here? if let Err(e) = pageserver.stop(false) { - eprintln!("pageserver stop failed: {}", e); + eprintln!("pageserver stop failed: {e}"); exit(1); } @@ -1768,7 +1768,7 @@ async fn handle_pageserver(subcmd: &PageserverCmd, env: &local_env::LocalEnv) -> { Ok(_) => println!("Page server is up and running"), Err(err) => { - eprintln!("Page server is not available: {}", err); + eprintln!("Page server is not available: {err}"); exit(1); } } @@ -1805,7 +1805,7 @@ async fn handle_storage_controller( }, }; if let Err(e) = svc.stop(stop_args).await { - eprintln!("stop failed: {}", e); + eprintln!("stop failed: {e}"); exit(1); } } @@ -1827,7 +1827,7 @@ async fn handle_safekeeper(subcmd: &SafekeeperCmd, env: &local_env::LocalEnv) -> let safekeeper = get_safekeeper(env, args.id)?; if let Err(e) = safekeeper.start(&args.extra_opt, &args.start_timeout).await { - eprintln!("safekeeper start failed: {}", e); + eprintln!("safekeeper start failed: {e}"); exit(1); } } @@ -1839,7 +1839,7 @@ async fn handle_safekeeper(subcmd: &SafekeeperCmd, env: &local_env::LocalEnv) -> StopMode::Immediate => true, }; if let Err(e) = safekeeper.stop(immediate) { - eprintln!("safekeeper stop failed: {}", e); + eprintln!("safekeeper stop failed: {e}"); exit(1); } } @@ -1852,12 +1852,12 @@ async fn handle_safekeeper(subcmd: &SafekeeperCmd, env: &local_env::LocalEnv) -> }; if let Err(e) = safekeeper.stop(immediate) { - eprintln!("safekeeper stop failed: {}", e); + eprintln!("safekeeper stop failed: {e}"); exit(1); } if let Err(e) = safekeeper.start(&args.extra_opt, &args.start_timeout).await { - eprintln!("safekeeper start failed: {}", e); + eprintln!("safekeeper start failed: {e}"); exit(1); } } @@ -2113,7 +2113,7 @@ async fn try_stop_all(env: &local_env::LocalEnv, immediate: bool) { let storage = EndpointStorage::from_env(env); if let Err(e) = storage.stop(immediate) { - eprintln!("endpoint_storage stop failed: {:#}", e); + eprintln!("endpoint_storage stop failed: {e:#}"); } for ps_conf in &env.pageservers { diff --git a/control_plane/src/endpoint.rs b/control_plane/src/endpoint.rs index ae81e7abbe..e3faa082db 100644 --- a/control_plane/src/endpoint.rs +++ b/control_plane/src/endpoint.rs @@ -67,6 +67,7 @@ use nix::sys::signal::{Signal, kill}; use pageserver_api::shard::ShardStripeSize; use pem::Pem; use reqwest::header::CONTENT_TYPE; +use safekeeper_api::PgMajorVersion; use safekeeper_api::membership::SafekeeperGeneration; use serde::{Deserialize, Serialize}; use sha2::{Digest, Sha256}; @@ -89,7 +90,7 @@ pub struct EndpointConf { pg_port: u16, external_http_port: u16, internal_http_port: u16, - pg_version: u32, + pg_version: PgMajorVersion, grpc: bool, skip_pg_catalog_updates: bool, reconfigure_concurrency: usize, @@ -192,7 +193,7 @@ impl ComputeControlPlane { pg_port: Option, external_http_port: Option, internal_http_port: Option, - pg_version: u32, + pg_version: PgMajorVersion, mode: ComputeMode, grpc: bool, skip_pg_catalog_updates: bool, @@ -312,7 +313,7 @@ pub struct Endpoint { pub internal_http_address: SocketAddr, // postgres major version in the format: 14, 15, etc. - pg_version: u32, + pg_version: PgMajorVersion, // These are not part of the endpoint as such, but the environment // the endpoint runs in. @@ -557,7 +558,7 @@ impl Endpoint { conf.append("hot_standby", "on"); // prefetching of blocks referenced in WAL doesn't make sense for us // Neon hot standby ignores pages that are not in the shared_buffers - if self.pg_version >= 15 { + if self.pg_version >= PgMajorVersion::PG15 { conf.append("recovery_prefetch", "off"); } } @@ -846,10 +847,10 @@ impl Endpoint { // Launch compute_ctl let conn_str = self.connstr("cloud_admin", "postgres"); - println!("Starting postgres node at '{}'", conn_str); + println!("Starting postgres node at '{conn_str}'"); if create_test_user { let conn_str = self.connstr("test", "neondb"); - println!("Also at '{}'", conn_str); + println!("Also at '{conn_str}'"); } let mut cmd = Command::new(self.env.neon_distrib_dir.join("compute_ctl")); cmd.args([ @@ -948,8 +949,7 @@ impl Endpoint { Err(e) => { if Instant::now().duration_since(start_at) > start_timeout { return Err(e).context(format!( - "timed out {:?} waiting to connect to compute_ctl HTTP", - start_timeout, + "timed out {start_timeout:?} waiting to connect to compute_ctl HTTP", )); } } @@ -988,7 +988,7 @@ impl Endpoint { // reqwest does not export its error construction utility functions, so let's craft the message ourselves let url = response.url().to_owned(); let msg = match response.text().await { - Ok(err_body) => format!("Error: {}", err_body), + Ok(err_body) => format!("Error: {err_body}"), Err(_) => format!("Http error ({}) at {}.", status.as_u16(), url), }; Err(anyhow::anyhow!(msg)) @@ -1054,7 +1054,7 @@ impl Endpoint { } else { let url = response.url().to_owned(); let msg = match response.text().await { - Ok(err_body) => format!("Error: {}", err_body), + Ok(err_body) => format!("Error: {err_body}"), Err(_) => format!("Http error ({}) at {}.", status.as_u16(), url), }; Err(anyhow::anyhow!(msg)) diff --git a/control_plane/src/local_env.rs b/control_plane/src/local_env.rs index 47b77f0720..16cd2d8c08 100644 --- a/control_plane/src/local_env.rs +++ b/control_plane/src/local_env.rs @@ -12,9 +12,11 @@ use std::{env, fs}; use anyhow::{Context, bail}; use clap::ValueEnum; +use pageserver_api::config::PostHogConfig; use pem::Pem; use postgres_backend::AuthType; use reqwest::{Certificate, Url}; +use safekeeper_api::PgMajorVersion; use serde::{Deserialize, Serialize}; use utils::auth::encode_from_key_file; use utils::id::{NodeId, TenantId, TenantTimelineId, TimelineId}; @@ -209,6 +211,12 @@ pub struct NeonStorageControllerConf { pub use_https_safekeeper_api: bool, pub use_local_compute_notifications: bool, + + pub timeline_safekeeper_count: Option, + + pub posthog_config: Option, + + pub kick_secondary_downloads: Option, } impl NeonStorageControllerConf { @@ -236,9 +244,12 @@ impl Default for NeonStorageControllerConf { heartbeat_interval: Self::DEFAULT_HEARTBEAT_INTERVAL, long_reconcile_threshold: None, use_https_pageserver_api: false, - timelines_onto_safekeepers: false, + timelines_onto_safekeepers: true, use_https_safekeeper_api: false, use_local_compute_notifications: true, + timeline_safekeeper_count: None, + posthog_config: None, + kick_secondary_downloads: None, } } } @@ -254,7 +265,7 @@ impl Default for EndpointStorageConf { impl NeonBroker { pub fn client_url(&self) -> Url { let url = if let Some(addr) = self.listen_https_addr { - format!("https://{}", addr) + format!("https://{addr}") } else { format!( "http://{}", @@ -418,25 +429,21 @@ impl LocalEnv { self.pg_distrib_dir.clone() } - pub fn pg_distrib_dir(&self, pg_version: u32) -> anyhow::Result { + pub fn pg_distrib_dir(&self, pg_version: PgMajorVersion) -> anyhow::Result { let path = self.pg_distrib_dir.clone(); - #[allow(clippy::manual_range_patterns)] - match pg_version { - 14 | 15 | 16 | 17 => Ok(path.join(format!("v{pg_version}"))), - _ => bail!("Unsupported postgres version: {}", pg_version), - } + Ok(path.join(pg_version.v_str())) } - pub fn pg_dir(&self, pg_version: u32, dir_name: &str) -> anyhow::Result { + pub fn pg_dir(&self, pg_version: PgMajorVersion, dir_name: &str) -> anyhow::Result { Ok(self.pg_distrib_dir(pg_version)?.join(dir_name)) } - pub fn pg_bin_dir(&self, pg_version: u32) -> anyhow::Result { + pub fn pg_bin_dir(&self, pg_version: PgMajorVersion) -> anyhow::Result { self.pg_dir(pg_version, "bin") } - pub fn pg_lib_dir(&self, pg_version: u32) -> anyhow::Result { + pub fn pg_lib_dir(&self, pg_version: PgMajorVersion) -> anyhow::Result { self.pg_dir(pg_version, "lib") } @@ -727,7 +734,7 @@ impl LocalEnv { let config_toml_path = dentry.path().join("pageserver.toml"); let config_toml: PageserverConfigTomlSubset = toml_edit::de::from_str( &std::fs::read_to_string(&config_toml_path) - .with_context(|| format!("read {:?}", config_toml_path))?, + .with_context(|| format!("read {config_toml_path:?}"))?, ) .context("parse pageserver.toml")?; let identity_toml_path = dentry.path().join("identity.toml"); @@ -737,7 +744,7 @@ impl LocalEnv { } let identity_toml: IdentityTomlSubset = toml_edit::de::from_str( &std::fs::read_to_string(&identity_toml_path) - .with_context(|| format!("read {:?}", identity_toml_path))?, + .with_context(|| format!("read {identity_toml_path:?}"))?, ) .context("parse identity.toml")?; let PageserverConfigTomlSubset { diff --git a/control_plane/src/pageserver.rs b/control_plane/src/pageserver.rs index a683d2daec..3f66960edd 100644 --- a/control_plane/src/pageserver.rs +++ b/control_plane/src/pageserver.rs @@ -22,6 +22,7 @@ use pageserver_api::shard::TenantShardId; use pageserver_client::mgmt_api; use postgres_backend::AuthType; use postgres_connection::{PgConnectionConfig, parse_host_port}; +use safekeeper_api::PgMajorVersion; use utils::auth::{Claims, Scope}; use utils::id::{NodeId, TenantId, TimelineId}; use utils::lsn::Lsn; @@ -121,7 +122,7 @@ impl PageServerNode { .env .generate_auth_token(&Claims::new(None, Scope::GenerationsApi)) .unwrap(); - overrides.push(format!("control_plane_api_token='{}'", jwt_token)); + overrides.push(format!("control_plane_api_token='{jwt_token}'")); } if !conf.other.contains_key("remote_storage") { @@ -607,7 +608,7 @@ impl PageServerNode { timeline_id: TimelineId, base: (Lsn, PathBuf), pg_wal: Option<(Lsn, PathBuf)>, - pg_version: u32, + pg_version: PgMajorVersion, ) -> anyhow::Result<()> { // Init base reader let (start_lsn, base_tarfile_path) = base; diff --git a/control_plane/src/safekeeper.rs b/control_plane/src/safekeeper.rs index 28d369a315..da9dafd8e9 100644 --- a/control_plane/src/safekeeper.rs +++ b/control_plane/src/safekeeper.rs @@ -143,7 +143,7 @@ impl SafekeeperNode { let id_string = id.to_string(); // TODO: add availability_zone to the config. // Right now we just specify any value here and use it to check metrics in tests. - let availability_zone = format!("sk-{}", id_string); + let availability_zone = format!("sk-{id_string}"); let mut args = vec![ "-D".to_owned(), diff --git a/control_plane/src/storage_controller.rs b/control_plane/src/storage_controller.rs index 755d67a7ad..dea7ae2ccf 100644 --- a/control_plane/src/storage_controller.rs +++ b/control_plane/src/storage_controller.rs @@ -6,6 +6,8 @@ use std::str::FromStr; use std::sync::OnceLock; use std::time::{Duration, Instant}; +use crate::background_process; +use crate::local_env::{LocalEnv, NeonStorageControllerConf}; use camino::{Utf8Path, Utf8PathBuf}; use hyper0::Uri; use nix::unistd::Pid; @@ -22,6 +24,7 @@ use pageserver_client::mgmt_api::ResponseErrorMessageExt; use pem::Pem; use postgres_backend::AuthType; use reqwest::{Method, Response}; +use safekeeper_api::PgMajorVersion; use serde::de::DeserializeOwned; use serde::{Deserialize, Serialize}; use tokio::process::Command; @@ -31,9 +34,6 @@ use utils::auth::{Claims, Scope, encode_from_key_file}; use utils::id::{NodeId, TenantId}; use whoami::username; -use crate::background_process; -use crate::local_env::{LocalEnv, NeonStorageControllerConf}; - pub struct StorageController { env: LocalEnv, private_key: Option, @@ -48,7 +48,7 @@ pub struct StorageController { const COMMAND: &str = "storage_controller"; -const STORAGE_CONTROLLER_POSTGRES_VERSION: u32 = 16; +const STORAGE_CONTROLLER_POSTGRES_VERSION: PgMajorVersion = PgMajorVersion::PG16; const DB_NAME: &str = "storage_controller"; @@ -167,7 +167,7 @@ impl StorageController { fn storage_controller_instance_dir(&self, instance_id: u8) -> PathBuf { self.env .base_data_dir - .join(format!("storage_controller_{}", instance_id)) + .join(format!("storage_controller_{instance_id}")) } fn pid_file(&self, instance_id: u8) -> Utf8PathBuf { @@ -184,9 +184,15 @@ impl StorageController { /// to other versions if that one isn't found. Some automated tests create circumstances /// where only one version is available in pg_distrib_dir, such as `test_remote_extensions`. async fn get_pg_dir(&self, dir_name: &str) -> anyhow::Result { - let prefer_versions = [STORAGE_CONTROLLER_POSTGRES_VERSION, 16, 15, 14]; + const PREFER_VERSIONS: [PgMajorVersion; 5] = [ + STORAGE_CONTROLLER_POSTGRES_VERSION, + PgMajorVersion::PG16, + PgMajorVersion::PG15, + PgMajorVersion::PG14, + PgMajorVersion::PG17, + ]; - for v in prefer_versions { + for v in PREFER_VERSIONS { let path = Utf8PathBuf::from_path_buf(self.env.pg_dir(v, dir_name)?).unwrap(); if tokio::fs::try_exists(&path).await? { return Ok(path); @@ -220,7 +226,7 @@ impl StorageController { "-d", DB_NAME, "-p", - &format!("{}", postgres_port), + &format!("{postgres_port}"), ]; let pg_lib_dir = self.get_pg_lib_dir().await.unwrap(); let envs = [ @@ -263,7 +269,7 @@ impl StorageController { "-h", "localhost", "-p", - &format!("{}", postgres_port), + &format!("{postgres_port}"), "-U", &username(), "-O", @@ -425,7 +431,7 @@ impl StorageController { // from `LocalEnv`'s config file (`.neon/config`). tokio::fs::write( &pg_data_path.join("postgresql.conf"), - format!("port = {}\nfsync=off\n", postgres_port), + format!("port = {postgres_port}\nfsync=off\n"), ) .await?; @@ -477,7 +483,7 @@ impl StorageController { self.setup_database(postgres_port).await?; } - let database_url = format!("postgresql://localhost:{}/{DB_NAME}", postgres_port); + let database_url = format!("postgresql://localhost:{postgres_port}/{DB_NAME}"); // We support running a startup SQL script to fiddle with the database before we launch storcon. // This is used by the test suite. @@ -508,7 +514,7 @@ impl StorageController { drop(client); conn.await??; - let addr = format!("{}:{}", host, listen_port); + let addr = format!("{host}:{listen_port}"); let address_for_peers = Uri::builder() .scheme(scheme) .authority(addr.clone()) @@ -557,6 +563,10 @@ impl StorageController { args.push("--use-local-compute-notifications".to_string()); } + if let Some(value) = self.config.kick_secondary_downloads { + args.push(format!("--kick-secondary-downloads={value}")); + } + if let Some(ssl_ca_file) = self.env.ssl_ca_cert_path() { args.push(format!("--ssl-ca-file={}", ssl_ca_file.to_str().unwrap())); } @@ -628,6 +638,22 @@ impl StorageController { args.push("--timelines-onto-safekeepers".to_string()); } + if let Some(sk_cnt) = self.config.timeline_safekeeper_count { + args.push(format!("--timeline-safekeeper-count={sk_cnt}")); + } + + let mut envs = vec![ + ("LD_LIBRARY_PATH".to_owned(), pg_lib_dir.to_string()), + ("DYLD_LIBRARY_PATH".to_owned(), pg_lib_dir.to_string()), + ]; + + if let Some(posthog_config) = &self.config.posthog_config { + envs.push(( + "POSTHOG_CONFIG".to_string(), + serde_json::to_string(posthog_config)?, + )); + } + println!("Starting storage controller"); background_process::start_process( @@ -635,10 +661,7 @@ impl StorageController { &instance_dir, &self.env.storage_controller_bin(), args, - vec![ - ("LD_LIBRARY_PATH".to_owned(), pg_lib_dir.to_string()), - ("DYLD_LIBRARY_PATH".to_owned(), pg_lib_dir.to_string()), - ], + envs, background_process::InitialPidFile::Create(self.pid_file(start_args.instance_id)), &start_args.start_timeout, || async { @@ -802,9 +825,9 @@ impl StorageController { builder = builder.json(&body) } if let Some(private_key) = &self.private_key { - println!("Getting claims for path {}", path); + println!("Getting claims for path {path}"); if let Some(required_claims) = Self::get_claims_for_path(&path)? { - println!("Got claims {:?} for path {}", required_claims, path); + println!("Got claims {required_claims:?} for path {path}"); let jwt_token = encode_from_key_file(&required_claims, private_key)?; builder = builder.header( reqwest::header::AUTHORIZATION, diff --git a/control_plane/storcon_cli/src/main.rs b/control_plane/storcon_cli/src/main.rs index 507190b1e0..0036b7d0f6 100644 --- a/control_plane/storcon_cli/src/main.rs +++ b/control_plane/storcon_cli/src/main.rs @@ -649,7 +649,7 @@ async fn main() -> anyhow::Result<()> { response .new_shards .iter() - .map(|s| format!("{:?}", s)) + .map(|s| format!("{s:?}")) .collect::>() .join(",") ); @@ -771,8 +771,8 @@ async fn main() -> anyhow::Result<()> { println!("Tenant {tenant_id}"); let mut table = comfy_table::Table::new(); - table.add_row(["Policy", &format!("{:?}", policy)]); - table.add_row(["Stripe size", &format!("{:?}", stripe_size)]); + table.add_row(["Policy", &format!("{policy:?}")]); + table.add_row(["Stripe size", &format!("{stripe_size:?}")]); table.add_row(["Config", &serde_json::to_string_pretty(&config).unwrap()]); println!("{table}"); println!("Shards:"); @@ -789,7 +789,7 @@ async fn main() -> anyhow::Result<()> { let secondary = shard .node_secondary .iter() - .map(|n| format!("{}", n)) + .map(|n| format!("{n}")) .collect::>() .join(","); @@ -863,7 +863,7 @@ async fn main() -> anyhow::Result<()> { } } else { // Make it obvious to the user that since they've omitted an AZ, we're clearing it - eprintln!("Clearing preferred AZ for tenant {}", tenant_id); + eprintln!("Clearing preferred AZ for tenant {tenant_id}"); } // Construct a request that modifies all the tenant's shards @@ -1134,8 +1134,7 @@ async fn main() -> anyhow::Result<()> { Err((tenant_shard_id, from, to, error)) => { failure += 1; println!( - "Failed to migrate {} from node {} to node {}: {}", - tenant_shard_id, from, to, error + "Failed to migrate {tenant_shard_id} from node {from} to node {to}: {error}" ); } } @@ -1277,8 +1276,7 @@ async fn main() -> anyhow::Result<()> { concurrency, } => { let mut path = format!( - "/v1/tenant/{}/timeline/{}/download_heatmap_layers", - tenant_shard_id, timeline_id, + "/v1/tenant/{tenant_shard_id}/timeline/{timeline_id}/download_heatmap_layers", ); if let Some(c) = concurrency { @@ -1303,8 +1301,7 @@ async fn watch_tenant_shard( ) -> anyhow::Result<()> { if let Some(until_migrated_to) = until_migrated_to { println!( - "Waiting for tenant shard {} to be migrated to node {}", - tenant_shard_id, until_migrated_to + "Waiting for tenant shard {tenant_shard_id} to be migrated to node {until_migrated_to}" ); } @@ -1327,7 +1324,7 @@ async fn watch_tenant_shard( "attached: {} secondary: {} {}", shard .node_attached - .map(|n| format!("{}", n)) + .map(|n| format!("{n}")) .unwrap_or("none".to_string()), shard .node_secondary @@ -1341,15 +1338,12 @@ async fn watch_tenant_shard( "(reconciler idle)" } ); - println!("{}", summary); + println!("{summary}"); // Maybe drop out if we finished migration if let Some(until_migrated_to) = until_migrated_to { if shard.node_attached == Some(until_migrated_to) && !shard.is_reconciling { - println!( - "Tenant shard {} is now on node {}", - tenant_shard_id, until_migrated_to - ); + println!("Tenant shard {tenant_shard_id} is now on node {until_migrated_to}"); break; } } diff --git a/docs/rfcs/040-Endpoint-Persistent-Unlogged-Files-Storage.md b/docs/rfcs/040-Endpoint-Persistent-Unlogged-Files-Storage.md new file mode 100644 index 0000000000..182b2682af --- /dev/null +++ b/docs/rfcs/040-Endpoint-Persistent-Unlogged-Files-Storage.md @@ -0,0 +1,396 @@ +# Memo: Endpoint Persistent Unlogged Files Storage +Created on 2024-11-05 +Implemented on N/A + +## Summary +A design for a storage system that allows storage of files required to make +Neon's Endpoints have a better experience at or after a reboot. + +## Motivation +Several systems inside PostgreSQL (and Neon) need some persistent storage for +optimal workings across reboots and restarts, but still work without. +Examples are the query-level statistics files of `pg_stat_statements` in +`pg_stat/pg_stat_statements.stat`, and `pg_prewarm`'s `autoprewarm.blocks`. +We need a storage system that can store and manage these files for each +Endpoint, without necessarily granting users access to an unlimited storage +device. + +## Goals +- Store known files for Endpoints with reasonable persistence. + _Data loss in this service, while annoying and bad for UX, won't lose any + customer's data._ + +## Non Goals (if relevant) +- This storage system does not need branching, file versioning, or other such + features. The files are as ephemeral to the timeline of the data as the + Endpoints that host the data. +- This storage system does not need to store _all_ user files, only 'known' + user files. +- This storage system does not need to be hosted fully inside Computes. + _Instead, this will be a separate component similar to Pageserver, + SafeKeeper, the S3 proxy used for dynamically loaded extensions, etc._ + +## Impacted components +- Compute needs new code to load and store these files in its lifetime. +- Control Plane needs to consider this new storage system when signalling + the deletion of an Endpoint, Timeline, or Tenant. +- Control Plane needs to consider this new storage system when it resets + or re-assigns an endpoint's timeline/branch state. + +A new service is created: the Endpoint Persistent Unlogged Files Storage +service. This could be integrated in e.g. Pageserver or Control Plane, or a +separately hosted service. + +## Proposed implementation +Endpoint-related data files are managed by a newly designed service (which +optionally is integrated in an existing service like Pageserver or Control +Plane), which stores data directly into S3 or any blob storage of choice. + +Upon deletion of the Endpoint, or reassignment of the endpoint to a different +branch, this ephemeral data is dropped: the data stored may not match the +state of the branch's data after reassignment, and on endpoint deletion the +data won't have any use to the user. + +Compute gets credentials (JWT token with Tenant, Timeline & Endpoint claims) +which it can use to authenticate to this new service and retrieve and store +data associated with this endpoint. This limited scope reduces leaks of data +across endpoints and timeline resets, and limits the ability of endpoints to +mess with other endpoints' data. + +The path of this endpoint data in S3 is initially as follows: + + s3:/// + tenants/ + / + tenants/ + / + endpoints/ + / + pgdata/ + + +For other blob storages an equivalent or similar path can be constructed. + +### Reliability, failure modes and corner cases (if relevant) +Reliability is important, but not critical to the workings of Neon. The data +stored in this service will, when lost, reduce performance, but won't be a +cause of permanent data loss - only operational metadata is stored. + +Most, if not all, blob storage services have sufficiently high persistence +guarantees to cater our need for persistence and uptime. The only concern with +blob storages is that the access latency is generally higher than local disk, +but for the object types stored (cache state, ...) I don't think this will be +much of an issue. + +### Interaction/Sequence diagram (if relevant) + +In these diagrams you can replace S3 with any persistent storage device of +choice, but S3 is chosen as representative name: The well-known and short name +of AWS' blob storage. Azure Blob Storage should work too, but it has a much +longer name making it less practical for the diagrams. + +Write data: + +```http +POST /tenants//timelines//endpoints//pgdata/ +Host: epufs.svc.neon.local + +<<< + +200 OK +{ + "version": "", # opaque file version token, changes when the file contents change + "size": , +} +``` + +```mermaid +sequenceDiagram + autonumber + participant co as Compute + participant ep as EPUFS + participant s3 as Blob Storage + + co-->ep: Connect with credentials + co->>+ep: Store Unlogged Persistent File + opt is authenticated + ep->>s3: Write UPF to S3 + end + ep->>-co: OK / Failure / Auth Failure + co-->ep: Cancel connection +``` + +Read data: (optional with cache-relevant request parameters, e.g. If-Modified-Since) +```http +GET /tenants//timelines//endpoints//pgdata/ +Host: epufs.svc.neon.local + +<<< + +200 OK + + +``` + +```mermaid +sequenceDiagram + autonumber + participant co as Compute + participant ep as EPUFS + participant s3 as Blob Storage + + co->>+ep: Read Unlogged Persistent File + opt is authenticated + ep->>+s3: Request UPF from storage + s3->>-ep: Receive UPF from storage + end + ep->>-co: OK(response) / Failure(storage, auth, ...) +``` + +Compute Startup: +```mermaid +sequenceDiagram + autonumber + participant co as Compute + participant ps as Pageserver + participant ep as EPUFS + participant es as Extension server + + note over co: Bind endpoint ep-xxx + par Get basebackup + co->>+ps: Request basebackup @ LSN + ps-)ps: Construct basebackup + ps->>-co: Receive basebackup TAR @ LSN + and Get startup-critical Unlogged Persistent Files + co->>+ep: Get all UPFs of endpoint ep-xxx + ep-)ep: Retrieve and gather all UPFs + ep->>-co: TAR of UPFs + and Get startup-critical extensions + loop For every startup-critical extension + co->>es: Get critical extension + es->>co: Receive critical extension + end + end + note over co: Start compute +``` + +CPlane ops: +```http +DELETE /tenants//timelines//endpoints/ +Host: epufs.svc.neon.local + +<<< + +200 OK +{ + "tenant": "", + "timeline": "", + "endpoint": "", + "deleted": { + "files": , + "bytes": , + }, +} +``` + +```http +DELETE /tenants//timelines/ +Host: epufs.svc.neon.local + +<<< + +200 OK +{ + "tenant": "", + "timeline": "", + "deleted": { + "files": , + "bytes": , + }, +} +``` + +```http +DELETE /tenants/ +Host: epufs.svc.neon.local + +<<< + +200 OK +{ + "tenant": "", + "deleted": { + "files": , + "bytes": , + }, +} +``` + +```mermaid +sequenceDiagram + autonumber + participant cp as Control Plane + participant ep as EPUFS + participant s3 as Blob Storage + + alt Tenant deleted + cp-)ep: Tenant deleted + loop For every object associated with removed tenant + ep->>s3: Remove data of deleted tenant from Storage + end + opt + ep-)cp: Tenant cleanup complete + end + alt Timeline deleted + cp-)ep: Timeline deleted + loop For every object associated with removed timeline + ep->>s3: Remove data of deleted timeline from Storage + end + opt + ep-)cp: Timeline cleanup complete + end + else Endpoint reassigned or removed + cp->>+ep: Endpoint reassigned + loop For every object associated with reassigned/removed endpoint + ep->>s3: Remove data from Storage + end + ep->>-cp: Cleanup complete + end +``` + +### Scalability (if relevant) + +Provisionally: As this service is going to be part of compute startup, this +service should be able to quickly respond to all requests. Therefore this +service is deployed to every AZ we host Computes in, and Computes communicate +(generally) only to the EPUFS endpoint of the AZ they're hosted in. + +Local caching of frequently restarted endpoints' data or metadata may be +needed for best performance. However, due to the regional nature of stored +data but zonal nature of the service deployment, we should be careful when we +implement any local caching, as it is possible that computes in AZ 1 will +update data originally written and thus cached by AZ 2. Cache version tests +and invalidation is therefore required if we want to roll out caching to this +service, which is too broad a scope for an MVC. This is why caching is left +out of scope for this RFC, and should be considered separately after this RFC +is implemented. + +### Security implications (if relevant) +This service must be able to authenticate users at least by Tenant ID, +Timeline ID and Endpoint ID. This will use the existing JWT infrastructure of +Compute, which will be upgraded to the extent needed to support Timeline- and +Endpoint-based claims. + +The service requires unlimited access to (a prefix of) a blob storage bucket, +and thus must be hosted outside the Compute VM sandbox. + +A service that generates pre-signed request URLs for Compute to download the +data from that URL is likely problematic, too: Compute would be able to write +unlimited data to the bucket, or exfiltrate this signed URL to get read/write +access to specific objects in this bucket, which would still effectively give +users access to the S3 bucket (but with improved access logging). + +There may be a use case for transferring data associated with one endpoint to +another endpoint (e.g. to make one endpoint warm its caches with the state of +another endpoint), but that's not currently in scope, and specific needs may +be solved through out-of-line communication of data or pre-signed URLs. + +### Unresolved questions (if relevant) +Caching of files is not in the implementation scope of the document, but +should at some future point be considered to maximize performance. + +## Alternative implementation (if relevant) +Several ideas have come up to solve this issue: + +### Use AUXfile +One prevalent idea was to WAL-log the files using our AUXfile mechanism. + +Benefits: + ++ We already have this storage mechanism + +Demerits: + +- It isn't available on read replicas +- Additional WAL will be consumed during shutdown and after the shutdown + checkpoint, which needs PG modifications to work without panics. +- It increases the data we need to manage in our versioned storage, thus + causing higher storage costs with higher retention due to duplication at + the storage layer. + +### Sign URLs for read/write operations, instead of proxying them + +Benefits: + ++ The service can be implemented with a much reduced IO budget + +Demerits: + +- Users could get access to these signed credentials +- Not all blob storage services may implement URL signing + +### Give endpoints each their own directly accessed block volume + +Benefits: + ++ Easier to integrate for PostgreSQL + +Demerits: + +- Little control on data size and contents +- Potentially problematic as we'd need to store data all across the pgdata + directory. +- EBS is not a good candidate + - Attaches in 10s of seconds, if not more; i.e. too cold to start + - Shared EBS volumes are a no-go, as you'd have to schedule the endpoint + with users of the same EBS volumes, which can't work with VM migration + - EBS storage costs are very high (>80$/kilotenant when using a + volume/tenant) + - EBS volumes can't be mounted across AZ boundaries +- Bucket per endpoint is unfeasible + - S3 buckets are priced at $20/month per 1k, which we could better spend + on developers. + - Allocating service accounts takes time (100s of ms), and service accounts + are a limited resource, too; so they're not a good candidate to allocate + on a per-endpoint basis. + - Giving credentials limited to prefix has similar issues as the pre-signed + URL approach. + - Bucket DNS lookup will fill DNS caches and put pressure on DNS lookup + much more than our current systems would. +- Volumes bound by hypervisor are unlikely + - This requires significant investment and increased software on the + hypervisor. + - It is unclear if we can attach volumes after boot, i.e. for pooled + instances. + +### Put the files into a table + +Benefits: + + + Mostly already available in PostgreSQL + +Demerits: + + - Uses WAL + - Can't be used after shutdown checkpoint + - Needs a RW endpoint, and table & catalog access to write to this data + - Gets hit with DB size limitations + - Depending on user acces: + - Inaccessible: + The user doesn't have control over database size caused by + these systems. + - Accessible: + The user can corrupt these files and cause the system to crash while + user-corrupted files are present, thus increasing on-call overhead. + +## Definition of Done (if relevant) + +This project is done if we have: + +- One S3 bucket equivalent per region, which stores this per-endpoint data. +- A new service endpoint in at least every AZ, which indirectly grants + endpoints access to the data stored for these endpoints in these buckets. +- Compute writes & reads temp-data at shutdown and startup, respectively, for + at least the pg_prewarm or lfc_prewarm state files. +- Cleanup of endpoint data is triggered when the endpoint is deleted or is + detached from its current timeline. diff --git a/endpoint_storage/src/app.rs b/endpoint_storage/src/app.rs index f44efe6d7a..42431c0066 100644 --- a/endpoint_storage/src/app.rs +++ b/endpoint_storage/src/app.rs @@ -374,7 +374,7 @@ MC4CAQAwBQYDK2VwBCIEID/Drmc1AA6U/znNRWpF3zEGegOATQxfkdWxitcOMsIH let request = Request::builder() .uri(format!("/{tenant}/{timeline}/{endpoint}/sub/path/key")) .method(method) - .header("Authorization", format!("Bearer {}", token)) + .header("Authorization", format!("Bearer {token}")) .body(Body::empty()) .unwrap(); let status = ServiceExt::ready(&mut app) diff --git a/libs/compute_api/src/requests.rs b/libs/compute_api/src/requests.rs index bbab271474..745c44c05b 100644 --- a/libs/compute_api/src/requests.rs +++ b/libs/compute_api/src/requests.rs @@ -16,6 +16,7 @@ pub static COMPUTE_AUDIENCE: &str = "compute"; pub enum ComputeClaimsScope { /// An admin-scoped token allows access to all of `compute_ctl`'s authorized /// facilities. + #[serde(rename = "compute_ctl:admin")] Admin, } @@ -24,7 +25,7 @@ impl FromStr for ComputeClaimsScope { fn from_str(s: &str) -> Result { match s { - "admin" => Ok(ComputeClaimsScope::Admin), + "compute_ctl:admin" => Ok(ComputeClaimsScope::Admin), _ => Err(anyhow::anyhow!("invalid compute claims scope \"{s}\"")), } } @@ -80,3 +81,23 @@ pub struct SetRoleGrantsRequest { pub privileges: Vec, pub role: PgIdent, } + +#[cfg(test)] +mod test { + use std::str::FromStr; + + use crate::requests::ComputeClaimsScope; + + /// Confirm that whether we parse the scope by string or through serde, the + /// same values parse to the same enum variant. + #[test] + fn compute_request_scopes() { + const ADMIN_SCOPE: &str = "compute_ctl:admin"; + + let from_serde: ComputeClaimsScope = + serde_json::from_str(&format!("\"{ADMIN_SCOPE}\"")).unwrap(); + let from_str = ComputeClaimsScope::from_str(ADMIN_SCOPE).unwrap(); + + assert_eq!(from_serde, from_str); + } +} diff --git a/libs/desim/src/executor.rs b/libs/desim/src/executor.rs index 51b11ff97e..bdb9c6cd4b 100644 --- a/libs/desim/src/executor.rs +++ b/libs/desim/src/executor.rs @@ -71,7 +71,7 @@ impl Runtime { debug!("thread panicked: {:?}", e); let mut result = ctx.result.lock(); if result.0 == -1 { - *result = (256, format!("thread panicked: {:?}", e)); + *result = (256, format!("thread panicked: {e:?}")); } }); } diff --git a/libs/desim/src/proto.rs b/libs/desim/src/proto.rs index 31bc29e6a6..7c3de4ff4b 100644 --- a/libs/desim/src/proto.rs +++ b/libs/desim/src/proto.rs @@ -47,8 +47,8 @@ impl Debug for AnyMessage { match self { AnyMessage::None => write!(f, "None"), AnyMessage::InternalConnect => write!(f, "InternalConnect"), - AnyMessage::Just32(v) => write!(f, "Just32({})", v), - AnyMessage::ReplCell(v) => write!(f, "ReplCell({:?})", v), + AnyMessage::Just32(v) => write!(f, "Just32({v})"), + AnyMessage::ReplCell(v) => write!(f, "ReplCell({v:?})"), AnyMessage::Bytes(v) => write!(f, "Bytes({})", hex::encode(v)), AnyMessage::LSN(v) => write!(f, "LSN({})", Lsn(*v)), } diff --git a/libs/http-utils/src/endpoint.rs b/libs/http-utils/src/endpoint.rs index 64147f2dd0..f32ced1180 100644 --- a/libs/http-utils/src/endpoint.rs +++ b/libs/http-utils/src/endpoint.rs @@ -582,14 +582,14 @@ pub fn attach_openapi_ui( deepLinking: true, showExtensions: true, showCommonExtensions: true, - url: "{}", + url: "{spec_mount_path}", }}) window.ui = ui; }}; - "#, spec_mount_path))).unwrap()) + "#))).unwrap()) }) ) } @@ -696,7 +696,7 @@ mod tests { let remote_addr = SocketAddr::new(IpAddr::from_str("127.0.0.1").unwrap(), 80); let mut service = builder.build(remote_addr); if let Err(e) = poll_fn(|ctx| service.poll_ready(ctx)).await { - panic!("request service is not ready: {:?}", e); + panic!("request service is not ready: {e:?}"); } let mut req: Request = Request::default(); @@ -716,7 +716,7 @@ mod tests { let remote_addr = SocketAddr::new(IpAddr::from_str("127.0.0.1").unwrap(), 80); let mut service = builder.build(remote_addr); if let Err(e) = poll_fn(|ctx| service.poll_ready(ctx)).await { - panic!("request service is not ready: {:?}", e); + panic!("request service is not ready: {e:?}"); } let req: Request = Request::default(); diff --git a/libs/neon-shmem/src/lib.rs b/libs/neon-shmem/src/lib.rs index e1b14b1371..c689959b68 100644 --- a/libs/neon-shmem/src/lib.rs +++ b/libs/neon-shmem/src/lib.rs @@ -86,7 +86,7 @@ impl ShmemHandle { // somewhat smaller than that, because with anything close to that, you'll run out of // memory anyway. if max_size >= 1 << 48 { - panic!("max size {} too large", max_size); + panic!("max size {max_size} too large"); } if initial_size > max_size { panic!("initial size {initial_size} larger than max size {max_size}"); @@ -279,7 +279,7 @@ mod tests { fn assert_range(ptr: *const u8, expected: u8, range: Range) { for i in range { let b = unsafe { *(ptr.add(i)) }; - assert_eq!(expected, b, "unexpected byte at offset {}", i); + assert_eq!(expected, b, "unexpected byte at offset {i}"); } } diff --git a/libs/pageserver_api/Cargo.toml b/libs/pageserver_api/Cargo.toml index 25f29b8ecd..a34e065788 100644 --- a/libs/pageserver_api/Cargo.toml +++ b/libs/pageserver_api/Cargo.toml @@ -17,7 +17,8 @@ anyhow.workspace = true bytes.workspace = true byteorder.workspace = true utils.workspace = true -postgres_ffi.workspace = true +postgres_ffi_types.workspace = true +postgres_versioninfo.workspace = true enum-map.workspace = true strum.workspace = true strum_macros.workspace = true diff --git a/libs/pageserver_api/src/config.rs b/libs/pageserver_api/src/config.rs index 2d7a06a72f..7926e839cf 100644 --- a/libs/pageserver_api/src/config.rs +++ b/libs/pageserver_api/src/config.rs @@ -63,7 +63,8 @@ impl Display for NodeMetadata { } } -/// PostHog integration config. +/// PostHog integration config. This is used in pageserver, storcon, and neon_local. +/// Ensure backward compatibility when adding new fields. #[derive(Debug, Clone, PartialEq, Eq, serde::Serialize, serde::Deserialize)] pub struct PostHogConfig { /// PostHog project ID @@ -76,6 +77,13 @@ pub struct PostHogConfig { pub private_api_url: String, /// Public API URL pub public_api_url: String, + /// Refresh interval for the feature flag spec. + /// The storcon will push the feature flag spec to the pageserver. If the pageserver does not receive + /// the spec for `refresh_interval`, it will fetch the spec from the PostHog API. + #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] + #[serde(with = "humantime_serde")] + pub refresh_interval: Option, } /// `pageserver.toml` @@ -816,7 +824,7 @@ pub mod tenant_conf_defaults { // By default ingest enough WAL for two new L0 layers before checking if new image // image layers should be created. pub const DEFAULT_IMAGE_LAYER_CREATION_CHECK_THRESHOLD: u8 = 2; - pub const DEFAULT_GC_COMPACTION_ENABLED: bool = false; + pub const DEFAULT_GC_COMPACTION_ENABLED: bool = true; pub const DEFAULT_GC_COMPACTION_VERIFICATION: bool = true; pub const DEFAULT_GC_COMPACTION_INITIAL_THRESHOLD_KB: u64 = 5 * 1024 * 1024; // 5GB pub const DEFAULT_GC_COMPACTION_RATIO_PERCENT: u64 = 100; diff --git a/libs/pageserver_api/src/controller_api.rs b/libs/pageserver_api/src/controller_api.rs index 55495dd68e..ff18d40bfe 100644 --- a/libs/pageserver_api/src/controller_api.rs +++ b/libs/pageserver_api/src/controller_api.rs @@ -577,8 +577,7 @@ mod test { let err = serde_json::from_value::(create_request).unwrap_err(); assert!( err.to_string().contains("unknown field `unknown_field`"), - "expect unknown field `unknown_field` error, got: {}", - err + "expect unknown field `unknown_field` error, got: {err}" ); } diff --git a/libs/pageserver_api/src/key.rs b/libs/pageserver_api/src/key.rs index c14975167b..102bbee879 100644 --- a/libs/pageserver_api/src/key.rs +++ b/libs/pageserver_api/src/key.rs @@ -4,8 +4,8 @@ use std::ops::Range; use anyhow::{Result, bail}; use byteorder::{BE, ByteOrder}; use bytes::Bytes; -use postgres_ffi::relfile_utils::{FSM_FORKNUM, VISIBILITYMAP_FORKNUM}; -use postgres_ffi::{Oid, RepOriginId}; +use postgres_ffi_types::forknum::{FSM_FORKNUM, VISIBILITYMAP_FORKNUM}; +use postgres_ffi_types::{Oid, RepOriginId}; use serde::{Deserialize, Serialize}; use utils::const_assert; @@ -194,7 +194,7 @@ impl Key { /// will be rejected on the write path. #[allow(dead_code)] pub fn is_valid_key_on_write_path_strong(&self) -> bool { - use postgres_ffi::pg_constants::{DEFAULTTABLESPACE_OID, GLOBALTABLESPACE_OID}; + use postgres_ffi_types::constants::{DEFAULTTABLESPACE_OID, GLOBALTABLESPACE_OID}; if !self.is_i128_representable() { return false; } diff --git a/libs/pageserver_api/src/keyspace.rs b/libs/pageserver_api/src/keyspace.rs index 79e3ef553b..10a242e13b 100644 --- a/libs/pageserver_api/src/keyspace.rs +++ b/libs/pageserver_api/src/keyspace.rs @@ -1,7 +1,6 @@ use std::ops::Range; use itertools::Itertools; -use postgres_ffi::BLCKSZ; use crate::key::Key; use crate::shard::{ShardCount, ShardIdentity}; @@ -269,9 +268,13 @@ impl KeySpace { /// Partition a key space into roughly chunks of roughly 'target_size' bytes /// in each partition. /// - pub fn partition(&self, shard_identity: &ShardIdentity, target_size: u64) -> KeyPartitioning { - // Assume that each value is 8k in size. - let target_nblocks = (target_size / BLCKSZ as u64) as u32; + pub fn partition( + &self, + shard_identity: &ShardIdentity, + target_size: u64, + block_size: u64, + ) -> KeyPartitioning { + let target_nblocks = (target_size / block_size) as u32; let mut parts = Vec::new(); let mut current_part = Vec::new(); @@ -331,8 +334,7 @@ impl KeySpace { std::cmp::max(range.start, prev.start) < std::cmp::min(range.end, prev.end); assert!( !overlap, - "Attempt to merge ovelapping keyspaces: {:?} overlaps {:?}", - prev, range + "Attempt to merge ovelapping keyspaces: {prev:?} overlaps {range:?}" ); } @@ -1101,7 +1103,7 @@ mod tests { // total range contains at least one shard-local page let all_nonzero = fragments.iter().all(|f| f.0 > 0); if !all_nonzero { - eprintln!("Found a zero-length fragment: {:?}", fragments); + eprintln!("Found a zero-length fragment: {fragments:?}"); } assert!(all_nonzero); } else { diff --git a/libs/pageserver_api/src/lib.rs b/libs/pageserver_api/src/lib.rs index 6c91d61508..52aed7a2c2 100644 --- a/libs/pageserver_api/src/lib.rs +++ b/libs/pageserver_api/src/lib.rs @@ -6,11 +6,9 @@ pub mod key; pub mod keyspace; pub mod models; pub mod pagestream_api; -pub mod record; pub mod reltag; pub mod shard; /// Public API types pub mod upcall_api; -pub mod value; pub mod config; diff --git a/libs/pageserver_api/src/models.rs b/libs/pageserver_api/src/models.rs index 2fc32c8f49..82a3ac0eb4 100644 --- a/libs/pageserver_api/src/models.rs +++ b/libs/pageserver_api/src/models.rs @@ -11,6 +11,7 @@ use std::time::{Duration, SystemTime}; #[cfg(feature = "testing")] use camino::Utf8PathBuf; +use postgres_versioninfo::PgMajorVersion; use serde::{Deserialize, Deserializer, Serialize, Serializer}; use serde_with::serde_as; pub use utilization::PageserverUtilization; @@ -398,7 +399,7 @@ pub enum TimelineCreateRequestMode { // inherits the ancestor's pg_version. Earlier code wasn't // using a flattened enum, so, it was an accepted field, and // we continue to accept it by having it here. - pg_version: Option, + pg_version: Option, #[serde(default, skip_serializing_if = "std::ops::Not::not")] read_only: bool, }, @@ -410,7 +411,7 @@ pub enum TimelineCreateRequestMode { Bootstrap { #[serde(default)] existing_initdb_timeline_id: Option, - pg_version: Option, + pg_version: Option, }, } @@ -1182,7 +1183,7 @@ impl Display for ImageCompressionAlgorithm { ImageCompressionAlgorithm::Disabled => write!(f, "disabled"), ImageCompressionAlgorithm::Zstd { level } => { if let Some(level) = level { - write!(f, "zstd({})", level) + write!(f, "zstd({level})") } else { write!(f, "zstd") } @@ -1573,7 +1574,7 @@ pub struct TimelineInfo { pub last_received_msg_lsn: Option, /// the timestamp (in microseconds) of the last received message pub last_received_msg_ts: Option, - pub pg_version: u32, + pub pg_version: PgMajorVersion, pub state: TimelineState, @@ -2011,8 +2012,7 @@ mod tests { let err = serde_json::from_value::(config_request).unwrap_err(); assert!( err.to_string().contains("unknown field `unknown_field`"), - "expect unknown field `unknown_field` error, got: {}", - err + "expect unknown field `unknown_field` error, got: {err}" ); } diff --git a/libs/pageserver_api/src/pagestream_api.rs b/libs/pageserver_api/src/pagestream_api.rs index fba64c82d9..862da8268a 100644 --- a/libs/pageserver_api/src/pagestream_api.rs +++ b/libs/pageserver_api/src/pagestream_api.rs @@ -8,9 +8,15 @@ use crate::reltag::RelTag; use byteorder::{BigEndian, ReadBytesExt}; use bytes::{Buf, BufMut, Bytes, BytesMut}; -use postgres_ffi::BLCKSZ; use utils::lsn::Lsn; +/// Block size. +/// +/// XXX: We assume 8k block size in the SLRU fetch API. It's not great to hardcode +/// that in the protocol, because Postgres supports different block sizes as a compile +/// time option. +const BLCKSZ: usize = 8192; + // Wrapped in libpq CopyData #[derive(PartialEq, Eq, Debug)] pub enum PagestreamFeMessage { @@ -443,7 +449,7 @@ impl PagestreamBeMessage { Self::GetSlruSegment(resp) => { bytes.put_u8(Tag::GetSlruSegment as u8); - bytes.put_u32((resp.segment.len() / BLCKSZ as usize) as u32); + bytes.put_u32((resp.segment.len() / BLCKSZ) as u32); bytes.put(&resp.segment[..]); } @@ -520,7 +526,7 @@ impl PagestreamBeMessage { bytes.put_u64(resp.req.hdr.not_modified_since.0); bytes.put_u8(resp.req.kind); bytes.put_u32(resp.req.segno); - bytes.put_u32((resp.segment.len() / BLCKSZ as usize) as u32); + bytes.put_u32((resp.segment.len() / BLCKSZ) as u32); bytes.put(&resp.segment[..]); } @@ -662,7 +668,7 @@ impl PagestreamBeMessage { let kind = buf.read_u8()?; let segno = buf.read_u32::()?; let n_blocks = buf.read_u32::()?; - let mut segment = vec![0; n_blocks as usize * BLCKSZ as usize]; + let mut segment = vec![0; n_blocks as usize * BLCKSZ]; buf.read_exact(&mut segment)?; Self::GetSlruSegment(PagestreamGetSlruSegmentResponse { req: PagestreamGetSlruSegmentRequest { diff --git a/libs/pageserver_api/src/reltag.rs b/libs/pageserver_api/src/reltag.rs index e0dd4fdfe8..d0e37dffae 100644 --- a/libs/pageserver_api/src/reltag.rs +++ b/libs/pageserver_api/src/reltag.rs @@ -1,9 +1,9 @@ use std::cmp::Ordering; use std::fmt; -use postgres_ffi::Oid; -use postgres_ffi::pg_constants::GLOBALTABLESPACE_OID; -use postgres_ffi::relfile_utils::{MAIN_FORKNUM, forkname_to_number, forknumber_to_name}; +use postgres_ffi_types::Oid; +use postgres_ffi_types::constants::GLOBALTABLESPACE_OID; +use postgres_ffi_types::forknum::{MAIN_FORKNUM, forkname_to_number, forknumber_to_name}; use serde::{Deserialize, Serialize}; /// diff --git a/libs/pageserver_api/src/shard.rs b/libs/pageserver_api/src/shard.rs index feb59f5070..9c16be93e8 100644 --- a/libs/pageserver_api/src/shard.rs +++ b/libs/pageserver_api/src/shard.rs @@ -35,7 +35,7 @@ use std::hash::{Hash, Hasher}; #[doc(inline)] pub use ::utils::shard::*; -use postgres_ffi::relfile_utils::INIT_FORKNUM; +use postgres_ffi_types::forknum::INIT_FORKNUM; use serde::{Deserialize, Serialize}; use crate::key::Key; diff --git a/libs/pageserver_api/src/upcall_api.rs b/libs/pageserver_api/src/upcall_api.rs index e2de02eea0..07cada2eb1 100644 --- a/libs/pageserver_api/src/upcall_api.rs +++ b/libs/pageserver_api/src/upcall_api.rs @@ -23,22 +23,12 @@ pub struct ReAttachRequest { pub register: Option, } -fn default_mode() -> LocationConfigMode { - LocationConfigMode::AttachedSingle -} - #[derive(Serialize, Deserialize, Debug)] pub struct ReAttachResponseTenant { pub id: TenantShardId, /// Mandatory if LocationConfigMode is None or set to an Attached* mode pub r#gen: Option, - - /// Default value only for backward compat: this field should be set - #[serde(default = "default_mode")] pub mode: LocationConfigMode, - - // Default value only for backward compat: this field should be set - #[serde(default = "ShardStripeSize::default")] pub stripe_size: ShardStripeSize, } #[derive(Serialize, Deserialize)] diff --git a/libs/postgres_backend/src/lib.rs b/libs/postgres_backend/src/lib.rs index 714d8ac403..091299f842 100644 --- a/libs/postgres_backend/src/lib.rs +++ b/libs/postgres_backend/src/lib.rs @@ -939,7 +939,7 @@ impl PostgresBackendReader { FeMessage::CopyFail => Err(CopyStreamHandlerEnd::CopyFail), FeMessage::Terminate => Err(CopyStreamHandlerEnd::Terminate), _ => Err(CopyStreamHandlerEnd::from(ConnectionError::Protocol( - ProtocolError::Protocol(format!("unexpected message in COPY stream {:?}", msg)), + ProtocolError::Protocol(format!("unexpected message in COPY stream {msg:?}")), ))), }, None => Err(CopyStreamHandlerEnd::EOF), diff --git a/libs/postgres_backend/tests/simple_select.rs b/libs/postgres_backend/tests/simple_select.rs index 75ca123014..23e17799bd 100644 --- a/libs/postgres_backend/tests/simple_select.rs +++ b/libs/postgres_backend/tests/simple_select.rs @@ -61,7 +61,7 @@ async fn simple_select() { // so spawn it off to run on its own. tokio::spawn(async move { if let Err(e) = connection.await { - eprintln!("connection error: {}", e); + eprintln!("connection error: {e}"); } }); @@ -137,7 +137,7 @@ async fn simple_select_ssl() { // so spawn it off to run on its own. tokio::spawn(async move { if let Err(e) = connection.await { - eprintln!("connection error: {}", e); + eprintln!("connection error: {e}"); } }); diff --git a/libs/postgres_connection/src/lib.rs b/libs/postgres_connection/src/lib.rs index cd981b3729..2388303329 100644 --- a/libs/postgres_connection/src/lib.rs +++ b/libs/postgres_connection/src/lib.rs @@ -223,7 +223,7 @@ mod tests_pg_connection_config { assert_eq!(cfg.port(), 123); assert_eq!(cfg.raw_address(), "stub.host.example:123"); assert_eq!( - format!("{:?}", cfg), + format!("{cfg:?}"), "PgConnectionConfig { host: Domain(\"stub.host.example\"), port: 123, password: None }" ); } @@ -239,7 +239,7 @@ mod tests_pg_connection_config { assert_eq!(cfg.port(), 123); assert_eq!(cfg.raw_address(), "[::1]:123"); assert_eq!( - format!("{:?}", cfg), + format!("{cfg:?}"), "PgConnectionConfig { host: Ipv6(::1), port: 123, password: None }" ); } @@ -252,7 +252,7 @@ mod tests_pg_connection_config { assert_eq!(cfg.port(), 123); assert_eq!(cfg.raw_address(), "stub.host.example:123"); assert_eq!( - format!("{:?}", cfg), + format!("{cfg:?}"), "PgConnectionConfig { host: Domain(\"stub.host.example\"), port: 123, password: Some(REDACTED-STRING) }" ); } diff --git a/libs/postgres_ffi/Cargo.toml b/libs/postgres_ffi/Cargo.toml index b7a376841d..d4fec6cbe9 100644 --- a/libs/postgres_ffi/Cargo.toml +++ b/libs/postgres_ffi/Cargo.toml @@ -16,8 +16,10 @@ memoffset.workspace = true pprof.workspace = true thiserror.workspace = true serde.workspace = true +postgres_ffi_types.workspace = true utils.workspace = true tracing.workspace = true +postgres_versioninfo.workspace = true [dev-dependencies] env_logger.workspace = true diff --git a/libs/postgres_ffi/benches/waldecoder.rs b/libs/postgres_ffi/benches/waldecoder.rs index 2e1d62e452..b2a884c7db 100644 --- a/libs/postgres_ffi/benches/waldecoder.rs +++ b/libs/postgres_ffi/benches/waldecoder.rs @@ -4,6 +4,7 @@ use criterion::{Bencher, Criterion, criterion_group, criterion_main}; use postgres_ffi::v17::wal_generator::LogicalMessageGenerator; use postgres_ffi::v17::waldecoder_handler::WalStreamDecoderHandler; use postgres_ffi::waldecoder::WalStreamDecoder; +use postgres_versioninfo::PgMajorVersion; use pprof::criterion::{Output, PProfProfiler}; use utils::lsn::Lsn; @@ -32,7 +33,7 @@ fn bench_complete_record(c: &mut Criterion) { let value_size = LogicalMessageGenerator::make_value_size(size, PREFIX); let value = vec![1; value_size]; - let mut decoder = WalStreamDecoder::new(Lsn(0), 170000); + let mut decoder = WalStreamDecoder::new(Lsn(0), PgMajorVersion::PG17); let msg = LogicalMessageGenerator::new(PREFIX, &value) .next() .unwrap() diff --git a/libs/postgres_ffi/src/lib.rs b/libs/postgres_ffi/src/lib.rs index 05d8de4c7a..9297ac46c9 100644 --- a/libs/postgres_ffi/src/lib.rs +++ b/libs/postgres_ffi/src/lib.rs @@ -14,6 +14,8 @@ use bytes::Bytes; use utils::bin_ser::SerializeError; use utils::lsn::Lsn; +pub use postgres_versioninfo::PgMajorVersion; + macro_rules! postgres_ffi { ($version:ident) => { #[path = "."] @@ -91,21 +93,22 @@ macro_rules! dispatch_pgversion { $version => $code, default = $invalid_pgver_handling, pgversions = [ - 14 : v14, - 15 : v15, - 16 : v16, - 17 : v17, + $crate::PgMajorVersion::PG14 => v14, + $crate::PgMajorVersion::PG15 => v15, + $crate::PgMajorVersion::PG16 => v16, + $crate::PgMajorVersion::PG17 => v17, ] ) }; ($pgversion:expr => $code:expr, default = $default:expr, - pgversions = [$($sv:literal : $vsv:ident),+ $(,)?]) => { - match ($pgversion) { + pgversions = [$($sv:pat => $vsv:ident),+ $(,)?]) => { + match ($pgversion.clone().into()) { $($sv => { use $crate::$vsv as pgv; $code },)+ + #[allow(unreachable_patterns)] _ => { $default } @@ -179,9 +182,9 @@ macro_rules! enum_pgversion { $($variant ( $crate::$md::$t )),+ } impl self::$name { - pub fn pg_version(&self) -> u32 { + pub fn pg_version(&self) -> PgMajorVersion { enum_pgversion_dispatch!(self, $name, _ign, { - pgv::bindings::PG_MAJORVERSION_NUM + pgv::bindings::MY_PGVERSION }) } } @@ -195,15 +198,15 @@ macro_rules! enum_pgversion { }; {name = $name:ident, path = $p:ident, - typ = $t:ident, + $(typ = $t:ident,)? pgversions = [$($variant:ident : $md:ident),+ $(,)?]} => { pub enum $name { - $($variant ($crate::$md::$p::$t)),+ + $($variant $(($crate::$md::$p::$t))?),+ } impl $name { - pub fn pg_version(&self) -> u32 { + pub fn pg_version(&self) -> PgMajorVersion { enum_pgversion_dispatch!(self, $name, _ign, { - pgv::bindings::PG_MAJORVERSION_NUM + pgv::bindings::MY_PGVERSION }) } } @@ -249,22 +252,21 @@ pub use v14::xlog_utils::{ try_from_pg_timestamp, }; -pub fn bkpimage_is_compressed(bimg_info: u8, version: u32) -> bool { +pub fn bkpimage_is_compressed(bimg_info: u8, version: PgMajorVersion) -> bool { dispatch_pgversion!(version, pgv::bindings::bkpimg_is_compressed(bimg_info)) } pub fn generate_wal_segment( segno: u64, system_id: u64, - pg_version: u32, + pg_version: PgMajorVersion, lsn: Lsn, ) -> Result { assert_eq!(segno, lsn.segment_number(WAL_SEGMENT_SIZE)); dispatch_pgversion!( pg_version, - pgv::xlog_utils::generate_wal_segment(segno, system_id, lsn), - Err(SerializeError::BadInput) + pgv::xlog_utils::generate_wal_segment(segno, system_id, lsn) ) } @@ -272,7 +274,7 @@ pub fn generate_pg_control( pg_control_bytes: &[u8], checkpoint_bytes: &[u8], lsn: Lsn, - pg_version: u32, + pg_version: PgMajorVersion, ) -> anyhow::Result<(Bytes, u64, bool)> { dispatch_pgversion!( pg_version, @@ -352,6 +354,7 @@ pub fn fsm_logical_to_physical(addr: BlockNumber) -> BlockNumber { pub mod waldecoder { use std::num::NonZeroU32; + use crate::PgMajorVersion; use bytes::{Buf, Bytes, BytesMut}; use thiserror::Error; use utils::lsn::Lsn; @@ -369,7 +372,7 @@ pub mod waldecoder { pub struct WalStreamDecoder { pub lsn: Lsn, - pub pg_version: u32, + pub pg_version: PgMajorVersion, pub inputbuf: BytesMut, pub state: State, } @@ -382,7 +385,7 @@ pub mod waldecoder { } impl WalStreamDecoder { - pub fn new(lsn: Lsn, pg_version: u32) -> WalStreamDecoder { + pub fn new(lsn: Lsn, pg_version: PgMajorVersion) -> WalStreamDecoder { WalStreamDecoder { lsn, pg_version, diff --git a/libs/postgres_ffi/src/pg_constants.rs b/libs/postgres_ffi/src/pg_constants.rs index b0bdd8a8da..f61b9a71c2 100644 --- a/libs/postgres_ffi/src/pg_constants.rs +++ b/libs/postgres_ffi/src/pg_constants.rs @@ -11,11 +11,7 @@ use crate::{BLCKSZ, PageHeaderData}; -// -// From pg_tablespace_d.h -// -pub const DEFAULTTABLESPACE_OID: u32 = 1663; -pub const GLOBALTABLESPACE_OID: u32 = 1664; +// Note: There are a few more widely-used constants in the postgres_ffi_types::constants crate. // From storage_xlog.h pub const XLOG_SMGR_CREATE: u8 = 0x10; diff --git a/libs/postgres_ffi/src/pg_constants_v14.rs b/libs/postgres_ffi/src/pg_constants_v14.rs index fe01a5df7c..fd393995db 100644 --- a/libs/postgres_ffi/src/pg_constants_v14.rs +++ b/libs/postgres_ffi/src/pg_constants_v14.rs @@ -1,3 +1,7 @@ +use crate::PgMajorVersion; + +pub const MY_PGVERSION: PgMajorVersion = PgMajorVersion::PG14; + pub const XLOG_DBASE_CREATE: u8 = 0x00; pub const XLOG_DBASE_DROP: u8 = 0x10; diff --git a/libs/postgres_ffi/src/pg_constants_v15.rs b/libs/postgres_ffi/src/pg_constants_v15.rs index 3cd1b7aec5..6c1e2c13de 100644 --- a/libs/postgres_ffi/src/pg_constants_v15.rs +++ b/libs/postgres_ffi/src/pg_constants_v15.rs @@ -1,3 +1,7 @@ +use crate::PgMajorVersion; + +pub const MY_PGVERSION: PgMajorVersion = PgMajorVersion::PG15; + pub const XACT_XINFO_HAS_DROPPED_STATS: u32 = 1u32 << 8; pub const XLOG_DBASE_CREATE_FILE_COPY: u8 = 0x00; diff --git a/libs/postgres_ffi/src/pg_constants_v16.rs b/libs/postgres_ffi/src/pg_constants_v16.rs index 31bd5b68fd..d84db502f3 100644 --- a/libs/postgres_ffi/src/pg_constants_v16.rs +++ b/libs/postgres_ffi/src/pg_constants_v16.rs @@ -1,3 +1,7 @@ +use crate::PgMajorVersion; + +pub const MY_PGVERSION: PgMajorVersion = PgMajorVersion::PG16; + pub const XACT_XINFO_HAS_DROPPED_STATS: u32 = 1u32 << 8; pub const XLOG_DBASE_CREATE_FILE_COPY: u8 = 0x00; diff --git a/libs/postgres_ffi/src/pg_constants_v17.rs b/libs/postgres_ffi/src/pg_constants_v17.rs index 2132938680..14d4b3d42f 100644 --- a/libs/postgres_ffi/src/pg_constants_v17.rs +++ b/libs/postgres_ffi/src/pg_constants_v17.rs @@ -1,3 +1,7 @@ +use crate::PgMajorVersion; + +pub const MY_PGVERSION: PgMajorVersion = PgMajorVersion::PG17; + pub const XACT_XINFO_HAS_DROPPED_STATS: u32 = 1u32 << 8; pub const XLOG_DBASE_CREATE_FILE_COPY: u8 = 0x00; diff --git a/libs/postgres_ffi/src/relfile_utils.rs b/libs/postgres_ffi/src/relfile_utils.rs index aa0e625b47..38f94b7221 100644 --- a/libs/postgres_ffi/src/relfile_utils.rs +++ b/libs/postgres_ffi/src/relfile_utils.rs @@ -4,50 +4,7 @@ use once_cell::sync::OnceCell; use regex::Regex; -// -// Fork numbers, from relpath.h -// -pub const MAIN_FORKNUM: u8 = 0; -pub const FSM_FORKNUM: u8 = 1; -pub const VISIBILITYMAP_FORKNUM: u8 = 2; -pub const INIT_FORKNUM: u8 = 3; - -#[derive(Debug, Clone, thiserror::Error, PartialEq, Eq)] -pub enum FilePathError { - #[error("invalid relation fork name")] - InvalidForkName, - #[error("invalid relation data file name")] - InvalidFileName, -} - -impl From for FilePathError { - fn from(_e: core::num::ParseIntError) -> Self { - FilePathError::InvalidFileName - } -} - -/// Convert Postgres relation file's fork suffix to fork number. -pub fn forkname_to_number(forkname: Option<&str>) -> Result { - match forkname { - // "main" is not in filenames, it's implicit if the fork name is not present - None => Ok(MAIN_FORKNUM), - Some("fsm") => Ok(FSM_FORKNUM), - Some("vm") => Ok(VISIBILITYMAP_FORKNUM), - Some("init") => Ok(INIT_FORKNUM), - Some(_) => Err(FilePathError::InvalidForkName), - } -} - -/// Convert Postgres fork number to the right suffix of the relation data file. -pub fn forknumber_to_name(forknum: u8) -> Option<&'static str> { - match forknum { - MAIN_FORKNUM => None, - FSM_FORKNUM => Some("fsm"), - VISIBILITYMAP_FORKNUM => Some("vm"), - INIT_FORKNUM => Some("init"), - _ => Some("UNKNOWN FORKNUM"), - } -} +use postgres_ffi_types::forknum::*; /// Parse a filename of a relation file. Returns (relfilenode, forknum, segno) tuple. /// @@ -75,7 +32,9 @@ pub fn parse_relfilename(fname: &str) -> Result<(u32, u8, u32), FilePathError> { .ok_or(FilePathError::InvalidFileName)?; let relnode_str = caps.name("relnode").unwrap().as_str(); - let relnode = relnode_str.parse::()?; + let relnode = relnode_str + .parse::() + .map_err(|_e| FilePathError::InvalidFileName)?; let forkname = caps.name("forkname").map(|f| f.as_str()); let forknum = forkname_to_number(forkname)?; @@ -84,7 +43,11 @@ pub fn parse_relfilename(fname: &str) -> Result<(u32, u8, u32), FilePathError> { let segno = if segno_match.is_none() { 0 } else { - segno_match.unwrap().as_str().parse::()? + segno_match + .unwrap() + .as_str() + .parse::() + .map_err(|_e| FilePathError::InvalidFileName)? }; Ok((relnode, forknum, segno)) diff --git a/libs/postgres_ffi/src/waldecoder_handler.rs b/libs/postgres_ffi/src/waldecoder_handler.rs index b4d50375bd..9cd40645ec 100644 --- a/libs/postgres_ffi/src/waldecoder_handler.rs +++ b/libs/postgres_ffi/src/waldecoder_handler.rs @@ -114,7 +114,7 @@ impl WalStreamDecoderHandler for WalStreamDecoder { let hdr = XLogLongPageHeaderData::from_bytes(&mut self.inputbuf).map_err( |e| WalDecodeError { - msg: format!("long header deserialization failed {}", e), + msg: format!("long header deserialization failed {e}"), lsn: self.lsn, }, )?; @@ -130,7 +130,7 @@ impl WalStreamDecoderHandler for WalStreamDecoder { let hdr = XLogPageHeaderData::from_bytes(&mut self.inputbuf).map_err(|e| { WalDecodeError { - msg: format!("header deserialization failed {}", e), + msg: format!("header deserialization failed {e}"), lsn: self.lsn, } })?; @@ -155,7 +155,7 @@ impl WalStreamDecoderHandler for WalStreamDecoder { let xl_tot_len = (&self.inputbuf[0..4]).get_u32_le(); if (xl_tot_len as usize) < XLOG_SIZE_OF_XLOG_RECORD { return Err(WalDecodeError { - msg: format!("invalid xl_tot_len {}", xl_tot_len), + msg: format!("invalid xl_tot_len {xl_tot_len}"), lsn: self.lsn, }); } @@ -218,7 +218,7 @@ impl WalStreamDecoderHandler for WalStreamDecoder { let xlogrec = XLogRecord::from_slice(&recordbuf[0..XLOG_SIZE_OF_XLOG_RECORD]).map_err(|e| { WalDecodeError { - msg: format!("xlog record deserialization failed {}", e), + msg: format!("xlog record deserialization failed {e}"), lsn: self.lsn, } })?; diff --git a/libs/postgres_ffi/src/walrecord.rs b/libs/postgres_ffi/src/walrecord.rs index 1ccf4590a9..d593123dc0 100644 --- a/libs/postgres_ffi/src/walrecord.rs +++ b/libs/postgres_ffi/src/walrecord.rs @@ -9,8 +9,8 @@ use utils::bin_ser::DeserializeError; use utils::lsn::Lsn; use crate::{ - BLCKSZ, BlockNumber, MultiXactId, MultiXactOffset, MultiXactStatus, Oid, RepOriginId, - TimestampTz, TransactionId, XLOG_SIZE_OF_XLOG_RECORD, XLogRecord, pg_constants, + BLCKSZ, BlockNumber, MultiXactId, MultiXactOffset, MultiXactStatus, Oid, PgMajorVersion, + RepOriginId, TimestampTz, TransactionId, XLOG_SIZE_OF_XLOG_RECORD, XLogRecord, pg_constants, }; #[repr(C)] @@ -199,20 +199,17 @@ impl DecodedWALRecord { /// Check if this WAL record represents a legacy "copy" database creation, which populates new relations /// by reading other existing relations' data blocks. This is more complex to apply than new-style database /// creations which simply include all the desired blocks in the WAL, so we need a helper function to detect this case. - pub fn is_dbase_create_copy(&self, pg_version: u32) -> bool { + pub fn is_dbase_create_copy(&self, pg_version: PgMajorVersion) -> bool { if self.xl_rmid == pg_constants::RM_DBASE_ID { let info = self.xl_info & pg_constants::XLR_RMGR_INFO_MASK; match pg_version { - 14 => { + PgMajorVersion::PG14 => { // Postgres 14 database creations are always the legacy kind info == crate::v14::bindings::XLOG_DBASE_CREATE } - 15 => info == crate::v15::bindings::XLOG_DBASE_CREATE_FILE_COPY, - 16 => info == crate::v16::bindings::XLOG_DBASE_CREATE_FILE_COPY, - 17 => info == crate::v17::bindings::XLOG_DBASE_CREATE_FILE_COPY, - _ => { - panic!("Unsupported postgres version {pg_version}") - } + PgMajorVersion::PG15 => info == crate::v15::bindings::XLOG_DBASE_CREATE_FILE_COPY, + PgMajorVersion::PG16 => info == crate::v16::bindings::XLOG_DBASE_CREATE_FILE_COPY, + PgMajorVersion::PG17 => info == crate::v17::bindings::XLOG_DBASE_CREATE_FILE_COPY, } } else { false @@ -248,7 +245,7 @@ impl DecodedWALRecord { pub fn decode_wal_record( record: Bytes, decoded: &mut DecodedWALRecord, - pg_version: u32, + pg_version: PgMajorVersion, ) -> anyhow::Result<()> { let mut rnode_spcnode: u32 = 0; let mut rnode_dbnode: u32 = 0; @@ -1106,9 +1103,9 @@ pub struct XlClogTruncate { } impl XlClogTruncate { - pub fn decode(buf: &mut Bytes, pg_version: u32) -> XlClogTruncate { + pub fn decode(buf: &mut Bytes, pg_version: PgMajorVersion) -> XlClogTruncate { XlClogTruncate { - pageno: if pg_version < 17 { + pageno: if pg_version < PgMajorVersion::PG17 { buf.get_u32_le() } else { buf.get_u64_le() as u32 @@ -1199,7 +1196,7 @@ pub fn describe_postgres_wal_record(record: &Bytes) -> Result "HEAP2 MULTI_INSERT", pg_constants::XLOG_HEAP2_VISIBLE => "HEAP2 VISIBLE", _ => { - unknown_str = format!("HEAP2 UNKNOWN_0x{:02x}", info); + unknown_str = format!("HEAP2 UNKNOWN_0x{info:02x}"); &unknown_str } } @@ -1212,7 +1209,7 @@ pub fn describe_postgres_wal_record(record: &Bytes) -> Result "HEAP UPDATE", pg_constants::XLOG_HEAP_HOT_UPDATE => "HEAP HOT_UPDATE", _ => { - unknown_str = format!("HEAP2 UNKNOWN_0x{:02x}", info); + unknown_str = format!("HEAP2 UNKNOWN_0x{info:02x}"); &unknown_str } } @@ -1223,7 +1220,7 @@ pub fn describe_postgres_wal_record(record: &Bytes) -> Result "XLOG FPI", pg_constants::XLOG_FPI_FOR_HINT => "XLOG FPI_FOR_HINT", _ => { - unknown_str = format!("XLOG UNKNOWN_0x{:02x}", info); + unknown_str = format!("XLOG UNKNOWN_0x{info:02x}"); &unknown_str } } @@ -1231,7 +1228,7 @@ pub fn describe_postgres_wal_record(record: &Bytes) -> Result { let info = xlogrec.xl_info & pg_constants::XLR_RMGR_INFO_MASK; - unknown_str = format!("UNKNOWN_RM_{} INFO_0x{:02x}", rmid, info); + unknown_str = format!("UNKNOWN_RM_{rmid} INFO_0x{info:02x}"); &unknown_str } }; diff --git a/libs/postgres_ffi/src/xlog_utils.rs b/libs/postgres_ffi/src/xlog_utils.rs index 14fb1f2a1f..f7b6296053 100644 --- a/libs/postgres_ffi/src/xlog_utils.rs +++ b/libs/postgres_ffi/src/xlog_utils.rs @@ -11,9 +11,9 @@ use super::super::waldecoder::WalStreamDecoder; use super::bindings::{ CheckPoint, ControlFileData, DBState_DB_SHUTDOWNED, FullTransactionId, TimeLineID, TimestampTz, XLogLongPageHeaderData, XLogPageHeaderData, XLogRecPtr, XLogRecord, XLogSegNo, XLOG_PAGE_MAGIC, + MY_PGVERSION }; use super::wal_generator::LogicalMessageGenerator; -use super::PG_MAJORVERSION; use crate::pg_constants; use crate::PG_TLI; use crate::{uint32, uint64, Oid}; @@ -233,7 +233,7 @@ pub fn find_end_of_wal( let mut result = start_lsn; let mut curr_lsn = start_lsn; let mut buf = [0u8; XLOG_BLCKSZ]; - let pg_version = PG_MAJORVERSION[1..3].parse::().unwrap(); + let pg_version = MY_PGVERSION; debug!("find_end_of_wal PG_VERSION: {}", pg_version); let mut decoder = WalStreamDecoder::new(start_lsn, pg_version); diff --git a/libs/postgres_ffi/wal_craft/src/bin/wal_craft.rs b/libs/postgres_ffi/wal_craft/src/bin/wal_craft.rs index 6151ce34ac..44bc4dfa95 100644 --- a/libs/postgres_ffi/wal_craft/src/bin/wal_craft.rs +++ b/libs/postgres_ffi/wal_craft/src/bin/wal_craft.rs @@ -4,6 +4,7 @@ use std::str::FromStr; use anyhow::*; use clap::{Arg, ArgMatches, Command, value_parser}; use postgres::Client; +use postgres_ffi::PgMajorVersion; use wal_craft::*; fn main() -> Result<()> { @@ -48,7 +49,7 @@ fn main() -> Result<()> { Some(("with-initdb", arg_matches)) => { let cfg = Conf { pg_version: *arg_matches - .get_one::("pg-version") + .get_one::("pg-version") .context("'pg-version' is required")?, pg_distrib_dir: arg_matches .get_one::("pg-distrib-dir") diff --git a/libs/postgres_ffi/wal_craft/src/lib.rs b/libs/postgres_ffi/wal_craft/src/lib.rs index ca9530faef..ef9e854297 100644 --- a/libs/postgres_ffi/wal_craft/src/lib.rs +++ b/libs/postgres_ffi/wal_craft/src/lib.rs @@ -9,8 +9,8 @@ use log::*; use postgres::Client; use postgres::types::PgLsn; use postgres_ffi::{ - WAL_SEGMENT_SIZE, XLOG_BLCKSZ, XLOG_SIZE_OF_XLOG_LONG_PHD, XLOG_SIZE_OF_XLOG_RECORD, - XLOG_SIZE_OF_XLOG_SHORT_PHD, + PgMajorVersion, WAL_SEGMENT_SIZE, XLOG_BLCKSZ, XLOG_SIZE_OF_XLOG_LONG_PHD, + XLOG_SIZE_OF_XLOG_RECORD, XLOG_SIZE_OF_XLOG_SHORT_PHD, }; macro_rules! xlog_utils_test { @@ -29,7 +29,7 @@ macro_rules! xlog_utils_test { postgres_ffi::for_all_postgres_versions! { xlog_utils_test } pub struct Conf { - pub pg_version: u32, + pub pg_version: PgMajorVersion, pub pg_distrib_dir: PathBuf, pub datadir: PathBuf, } @@ -52,11 +52,7 @@ impl Conf { pub fn pg_distrib_dir(&self) -> anyhow::Result { let path = self.pg_distrib_dir.clone(); - #[allow(clippy::manual_range_patterns)] - match self.pg_version { - 14 | 15 | 16 | 17 => Ok(path.join(format!("v{}", self.pg_version))), - _ => bail!("Unsupported postgres version: {}", self.pg_version), - } + Ok(path.join(self.pg_version.v_str())) } fn pg_bin_dir(&self) -> anyhow::Result { diff --git a/libs/postgres_ffi/wal_craft/src/xlog_utils_test.rs b/libs/postgres_ffi/wal_craft/src/xlog_utils_test.rs index 4a33dbe25b..366aa7dbef 100644 --- a/libs/postgres_ffi/wal_craft/src/xlog_utils_test.rs +++ b/libs/postgres_ffi/wal_craft/src/xlog_utils_test.rs @@ -24,7 +24,7 @@ fn init_logging() { fn test_end_of_wal(test_name: &str) { use crate::*; - let pg_version = PG_MAJORVERSION[1..3].parse::().unwrap(); + let pg_version = MY_PGVERSION; // Craft some WAL let top_path = PathBuf::from(env!("CARGO_MANIFEST_DIR")) @@ -34,7 +34,7 @@ fn test_end_of_wal(test_name: &str) { let cfg = Conf { pg_version, pg_distrib_dir: top_path.join("pg_install"), - datadir: top_path.join(format!("test_output/{}-{PG_MAJORVERSION}", test_name)), + datadir: top_path.join(format!("test_output/{test_name}-{PG_MAJORVERSION}")), }; if cfg.datadir.exists() { fs::remove_dir_all(&cfg.datadir).unwrap(); diff --git a/libs/postgres_ffi_types/Cargo.toml b/libs/postgres_ffi_types/Cargo.toml new file mode 100644 index 0000000000..50c6fc7874 --- /dev/null +++ b/libs/postgres_ffi_types/Cargo.toml @@ -0,0 +1,11 @@ +[package] +name = "postgres_ffi_types" +version = "0.1.0" +edition.workspace = true +license.workspace = true + +[dependencies] +thiserror.workspace = true +workspace_hack = { version = "0.1", path = "../../workspace_hack" } + +[dev-dependencies] diff --git a/libs/postgres_ffi_types/src/constants.rs b/libs/postgres_ffi_types/src/constants.rs new file mode 100644 index 0000000000..c1a004c5ab --- /dev/null +++ b/libs/postgres_ffi_types/src/constants.rs @@ -0,0 +1,8 @@ +//! Misc constants, copied from PostgreSQL headers. +//! +//! Any constants included here must be the same in all PostgreSQL versions and unlikely to change +//! in the future either! + +// From pg_tablespace_d.h +pub const DEFAULTTABLESPACE_OID: u32 = 1663; +pub const GLOBALTABLESPACE_OID: u32 = 1664; diff --git a/libs/postgres_ffi_types/src/forknum.rs b/libs/postgres_ffi_types/src/forknum.rs new file mode 100644 index 0000000000..9b225d8ce5 --- /dev/null +++ b/libs/postgres_ffi_types/src/forknum.rs @@ -0,0 +1,36 @@ +// Fork numbers, from relpath.h +pub const MAIN_FORKNUM: u8 = 0; +pub const FSM_FORKNUM: u8 = 1; +pub const VISIBILITYMAP_FORKNUM: u8 = 2; +pub const INIT_FORKNUM: u8 = 3; + +#[derive(Debug, Clone, thiserror::Error, PartialEq, Eq)] +pub enum FilePathError { + #[error("invalid relation fork name")] + InvalidForkName, + #[error("invalid relation data file name")] + InvalidFileName, +} + +/// Convert Postgres relation file's fork suffix to fork number. +pub fn forkname_to_number(forkname: Option<&str>) -> Result { + match forkname { + // "main" is not in filenames, it's implicit if the fork name is not present + None => Ok(MAIN_FORKNUM), + Some("fsm") => Ok(FSM_FORKNUM), + Some("vm") => Ok(VISIBILITYMAP_FORKNUM), + Some("init") => Ok(INIT_FORKNUM), + Some(_) => Err(FilePathError::InvalidForkName), + } +} + +/// Convert Postgres fork number to the right suffix of the relation data file. +pub fn forknumber_to_name(forknum: u8) -> Option<&'static str> { + match forknum { + MAIN_FORKNUM => None, + FSM_FORKNUM => Some("fsm"), + VISIBILITYMAP_FORKNUM => Some("vm"), + INIT_FORKNUM => Some("init"), + _ => Some("UNKNOWN FORKNUM"), + } +} diff --git a/libs/postgres_ffi_types/src/lib.rs b/libs/postgres_ffi_types/src/lib.rs new file mode 100644 index 0000000000..84ef499b9f --- /dev/null +++ b/libs/postgres_ffi_types/src/lib.rs @@ -0,0 +1,13 @@ +//! This package contains some PostgreSQL constants and datatypes that are the same in all versions +//! of PostgreSQL and unlikely to change in the future either. These could be derived from the +//! PostgreSQL headers with 'bindgen', but in order to avoid proliferating the dependency to bindgen +//! and the PostgreSQL C headers to all services, we prefer to have this small stand-alone crate for +//! them instead. +//! +//! Be mindful in what you add here, as these types are deeply ingrained in the APIs. + +pub mod constants; +pub mod forknum; + +pub type Oid = u32; +pub type RepOriginId = u16; diff --git a/libs/postgres_initdb/Cargo.toml b/libs/postgres_initdb/Cargo.toml index 1605279bce..5b3b0cd936 100644 --- a/libs/postgres_initdb/Cargo.toml +++ b/libs/postgres_initdb/Cargo.toml @@ -9,4 +9,5 @@ anyhow.workspace = true tokio.workspace = true camino.workspace = true thiserror.workspace = true +postgres_versioninfo.workspace = true workspace_hack = { version = "0.1", path = "../../workspace_hack" } diff --git a/libs/postgres_initdb/src/lib.rs b/libs/postgres_initdb/src/lib.rs index ed54696861..a0c6ebef81 100644 --- a/libs/postgres_initdb/src/lib.rs +++ b/libs/postgres_initdb/src/lib.rs @@ -7,12 +7,13 @@ use std::fmt; use camino::Utf8Path; +use postgres_versioninfo::PgMajorVersion; pub struct RunInitdbArgs<'a> { pub superuser: &'a str, pub locale: &'a str, pub initdb_bin: &'a Utf8Path, - pub pg_version: u32, + pub pg_version: PgMajorVersion, pub library_search_path: &'a Utf8Path, pub pgdata: &'a Utf8Path, } @@ -31,15 +32,15 @@ pub enum Error { impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { - Error::Spawn(e) => write!(f, "Error spawning command: {:?}", e), + Error::Spawn(e) => write!(f, "Error spawning command: {e:?}"), Error::Failed { status, stderr } => write!( f, "Command failed with status {:?}: {}", status, String::from_utf8_lossy(stderr) ), - Error::WaitOutput(e) => write!(f, "Error waiting for command output: {:?}", e), - Error::Other(e) => write!(f, "Error: {:?}", e), + Error::WaitOutput(e) => write!(f, "Error waiting for command output: {e:?}"), + Error::Other(e) => write!(f, "Error: {e:?}"), } } } @@ -79,12 +80,16 @@ pub async fn do_run_initdb(args: RunInitdbArgs<'_>) -> Result<(), Error> { .stderr(std::process::Stdio::piped()); // Before version 14, only the libc provide was available. - if pg_version > 14 { + if pg_version > PgMajorVersion::PG14 { // Version 17 brought with it a builtin locale provider which only provides // C and C.UTF-8. While being safer for collation purposes since it is // guaranteed to be consistent throughout a major release, it is also more // performant. - let locale_provider = if pg_version >= 17 { "builtin" } else { "libc" }; + let locale_provider = if pg_version >= PgMajorVersion::PG17 { + "builtin" + } else { + "libc" + }; initdb_command.args(["--locale-provider", locale_provider]); } diff --git a/libs/postgres_versioninfo/Cargo.toml b/libs/postgres_versioninfo/Cargo.toml new file mode 100644 index 0000000000..cc59f9698d --- /dev/null +++ b/libs/postgres_versioninfo/Cargo.toml @@ -0,0 +1,12 @@ +[package] +name = "postgres_versioninfo" +version = "0.1.0" +edition = "2024" +license.workspace = true + +[dependencies] +anyhow.workspace = true +thiserror.workspace = true +serde.workspace = true +serde_repr.workspace = true +workspace_hack = { version = "0.1", path = "../../workspace_hack" } diff --git a/libs/postgres_versioninfo/src/lib.rs b/libs/postgres_versioninfo/src/lib.rs new file mode 100644 index 0000000000..286507b654 --- /dev/null +++ b/libs/postgres_versioninfo/src/lib.rs @@ -0,0 +1,175 @@ +use serde::{Deserialize, Deserializer, Serialize, Serializer}; +use serde_repr::{Deserialize_repr, Serialize_repr}; +use std::fmt::{Display, Formatter}; +use std::str::FromStr; + +/// An enum with one variant for each major version of PostgreSQL that we support. +/// +#[derive(Debug, Clone, Copy, Ord, PartialOrd, Eq, PartialEq, Deserialize_repr, Serialize_repr)] +#[repr(u32)] +pub enum PgMajorVersion { + PG14 = 14, + PG15 = 15, + PG16 = 16, + PG17 = 17, + // !!! When you add a new PgMajorVersion, don't forget to update PgMajorVersion::ALL +} + +/// A full PostgreSQL version ID, in MMmmbb numerical format (Major/minor/bugfix) +#[derive(Debug, Copy, Clone, Ord, PartialOrd, Eq, PartialEq)] +#[repr(transparent)] +pub struct PgVersionId(u32); + +impl PgVersionId { + pub const UNKNOWN: PgVersionId = PgVersionId(0); + + pub fn from_full_pg_version(version: u32) -> PgVersionId { + match version { + 0 => PgVersionId(version), // unknown version + 140000..180000 => PgVersionId(version), + _ => panic!("Invalid full PostgreSQL version ID {version}"), + } + } +} + +impl Display for PgVersionId { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + u32::fmt(&self.0, f) + } +} + +impl Serialize for PgVersionId { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + u32::serialize(&self.0, serializer) + } +} + +impl<'de> Deserialize<'de> for PgVersionId { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + u32::deserialize(deserializer).map(PgVersionId) + } + + fn deserialize_in_place(deserializer: D, place: &mut Self) -> Result<(), D::Error> + where + D: Deserializer<'de>, + { + u32::deserialize_in_place(deserializer, &mut place.0) + } +} + +impl PgMajorVersion { + /// Get the numerical representation of the represented Major Version + pub const fn major_version_num(&self) -> u32 { + match self { + PgMajorVersion::PG14 => 14, + PgMajorVersion::PG15 => 15, + PgMajorVersion::PG16 => 16, + PgMajorVersion::PG17 => 17, + } + } + + /// Get the contents of this version's PG_VERSION file. + /// + /// The PG_VERSION file is used to determine the PostgreSQL version that currently + /// owns the data in a PostgreSQL data directory. + pub fn versionfile_string(&self) -> &'static str { + match self { + PgMajorVersion::PG14 => "14", + PgMajorVersion::PG15 => "15", + PgMajorVersion::PG16 => "16\x0A", + PgMajorVersion::PG17 => "17\x0A", + } + } + + /// Get the v{version} string of this major PostgreSQL version. + /// + /// Because this was hand-coded in various places, this was moved into a shared + /// implementation. + pub fn v_str(&self) -> String { + match self { + PgMajorVersion::PG14 => "v14", + PgMajorVersion::PG15 => "v15", + PgMajorVersion::PG16 => "v16", + PgMajorVersion::PG17 => "v17", + } + .to_string() + } + + /// All currently supported major versions of PostgreSQL. + pub const ALL: &'static [PgMajorVersion] = &[ + PgMajorVersion::PG14, + PgMajorVersion::PG15, + PgMajorVersion::PG16, + PgMajorVersion::PG17, + ]; +} + +impl Display for PgMajorVersion { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + f.write_str(match self { + PgMajorVersion::PG14 => "PgMajorVersion::PG14", + PgMajorVersion::PG15 => "PgMajorVersion::PG15", + PgMajorVersion::PG16 => "PgMajorVersion::PG16", + PgMajorVersion::PG17 => "PgMajorVersion::PG17", + }) + } +} + +#[derive(Debug, thiserror::Error)] +#[allow(dead_code)] +pub struct InvalidPgVersion(u32); + +impl Display for InvalidPgVersion { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + write!(f, "InvalidPgVersion({})", self.0) + } +} + +impl TryFrom for PgMajorVersion { + type Error = InvalidPgVersion; + + fn try_from(value: PgVersionId) -> Result { + Ok(match value.0 / 10000 { + 14 => PgMajorVersion::PG14, + 15 => PgMajorVersion::PG15, + 16 => PgMajorVersion::PG16, + 17 => PgMajorVersion::PG17, + _ => return Err(InvalidPgVersion(value.0)), + }) + } +} + +impl From for PgVersionId { + fn from(value: PgMajorVersion) -> Self { + PgVersionId((value as u32) * 10000) + } +} + +#[derive(Debug, PartialEq, Eq, thiserror::Error)] +pub struct PgMajorVersionParseError(String); + +impl Display for PgMajorVersionParseError { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + write!(f, "PgMajorVersionParseError({})", self.0) + } +} + +impl FromStr for PgMajorVersion { + type Err = PgMajorVersionParseError; + + fn from_str(s: &str) -> Result { + Ok(match s { + "14" => PgMajorVersion::PG14, + "15" => PgMajorVersion::PG15, + "16" => PgMajorVersion::PG16, + "17" => PgMajorVersion::PG17, + _ => return Err(PgMajorVersionParseError(s.to_string())), + }) + } +} diff --git a/libs/posthog_client_lite/src/background_loop.rs b/libs/posthog_client_lite/src/background_loop.rs index 693d62efc4..08cb0d2264 100644 --- a/libs/posthog_client_lite/src/background_loop.rs +++ b/libs/posthog_client_lite/src/background_loop.rs @@ -1,17 +1,22 @@ //! A background loop that fetches feature flags from PostHog and updates the feature store. -use std::{sync::Arc, time::Duration}; +use std::{ + sync::Arc, + time::{Duration, SystemTime}, +}; use arc_swap::ArcSwap; use tokio_util::sync::CancellationToken; use tracing::{Instrument, info_span}; -use crate::{CaptureEvent, FeatureStore, PostHogClient, PostHogClientConfig}; +use crate::{ + CaptureEvent, FeatureStore, LocalEvaluationResponse, PostHogClient, PostHogClientConfig, +}; /// A background loop that fetches feature flags from PostHog and updates the feature store. pub struct FeatureResolverBackgroundLoop { posthog_client: PostHogClient, - feature_store: ArcSwap, + feature_store: ArcSwap<(SystemTime, Arc)>, cancel: CancellationToken, } @@ -19,11 +24,35 @@ impl FeatureResolverBackgroundLoop { pub fn new(config: PostHogClientConfig, shutdown_pageserver: CancellationToken) -> Self { Self { posthog_client: PostHogClient::new(config), - feature_store: ArcSwap::new(Arc::new(FeatureStore::new())), + feature_store: ArcSwap::new(Arc::new(( + SystemTime::UNIX_EPOCH, + Arc::new(FeatureStore::new()), + ))), cancel: shutdown_pageserver, } } + /// Update the feature store with a new feature flag spec bypassing the normal refresh loop. + pub fn update(&self, spec: String) -> anyhow::Result<()> { + let resp: LocalEvaluationResponse = serde_json::from_str(&spec)?; + self.update_feature_store_nofail(resp, "http_propagate"); + Ok(()) + } + + fn update_feature_store_nofail(&self, resp: LocalEvaluationResponse, source: &'static str) { + let project_id = self.posthog_client.config.project_id.parse::().ok(); + match FeatureStore::new_with_flags(resp.flags, project_id) { + Ok(feature_store) => { + self.feature_store + .store(Arc::new((SystemTime::now(), Arc::new(feature_store)))); + tracing::info!("Feature flag updated from {}", source); + } + Err(e) => { + tracing::warn!("Cannot process feature flag spec from {}: {}", source, e); + } + } + } + pub fn spawn( self: Arc, handle: &tokio::runtime::Handle, @@ -36,7 +65,10 @@ impl FeatureResolverBackgroundLoop { // Main loop of updating the feature flags. handle.spawn( async move { - tracing::info!("Starting PostHog feature resolver"); + tracing::info!( + "Starting PostHog feature resolver with refresh period: {:?}", + refresh_period + ); let mut ticker = tokio::time::interval(refresh_period); ticker.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip); loop { @@ -44,6 +76,17 @@ impl FeatureResolverBackgroundLoop { _ = ticker.tick() => {} _ = cancel.cancelled() => break } + { + let last_update = this.feature_store.load().0; + if let Ok(elapsed) = last_update.elapsed() { + if elapsed < refresh_period { + tracing::debug!( + "Skipping feature flag refresh because it's too soon" + ); + continue; + } + } + } let resp = match this .posthog_client .get_feature_flags_local_evaluation() @@ -55,16 +98,7 @@ impl FeatureResolverBackgroundLoop { continue; } }; - let project_id = this.posthog_client.config.project_id.parse::().ok(); - match FeatureStore::new_with_flags(resp.flags, project_id) { - Ok(feature_store) => { - this.feature_store.store(Arc::new(feature_store)); - tracing::info!("Feature flag updated"); - } - Err(e) => { - tracing::warn!("Cannot process feature flag spec: {}", e); - } - } + this.update_feature_store_nofail(resp, "refresh_loop"); } tracing::info!("PostHog feature resolver stopped"); } @@ -89,6 +123,6 @@ impl FeatureResolverBackgroundLoop { } pub fn feature_store(&self) -> Arc { - self.feature_store.load_full() + self.feature_store.load().1.clone() } } diff --git a/libs/posthog_client_lite/src/lib.rs b/libs/posthog_client_lite/src/lib.rs index 730878fb58..d042ee2410 100644 --- a/libs/posthog_client_lite/src/lib.rs +++ b/libs/posthog_client_lite/src/lib.rs @@ -168,15 +168,13 @@ impl FeatureStore { let PostHogFlagFilterPropertyValue::String(provided) = provided else { // Left should be a string return Err(PostHogEvaluationError::Internal(format!( - "The left side of the condition is not a string: {:?}", - provided + "The left side of the condition is not a string: {provided:?}" ))); }; let PostHogFlagFilterPropertyValue::List(requested) = requested else { // Right should be a list of string return Err(PostHogEvaluationError::Internal(format!( - "The right side of the condition is not a list: {:?}", - requested + "The right side of the condition is not a list: {requested:?}" ))); }; Ok(requested.contains(provided)) @@ -185,14 +183,12 @@ impl FeatureStore { let PostHogFlagFilterPropertyValue::String(requested) = requested else { // Right should be a string return Err(PostHogEvaluationError::Internal(format!( - "The right side of the condition is not a string: {:?}", - requested + "The right side of the condition is not a string: {requested:?}" ))); }; let Ok(requested) = requested.parse::() else { return Err(PostHogEvaluationError::Internal(format!( - "Can not parse the right side of the condition as a number: {:?}", - requested + "Can not parse the right side of the condition as a number: {requested:?}" ))); }; // Left can either be a number or a string @@ -201,16 +197,14 @@ impl FeatureStore { PostHogFlagFilterPropertyValue::String(provided) => { let Ok(provided) = provided.parse::() else { return Err(PostHogEvaluationError::Internal(format!( - "Can not parse the left side of the condition as a number: {:?}", - provided + "Can not parse the left side of the condition as a number: {provided:?}" ))); }; provided } _ => { return Err(PostHogEvaluationError::Internal(format!( - "The left side of the condition is not a number or a string: {:?}", - provided + "The left side of the condition is not a number or a string: {provided:?}" ))); } }; @@ -218,14 +212,12 @@ impl FeatureStore { "lt" => Ok(provided < requested), "gt" => Ok(provided > requested), op => Err(PostHogEvaluationError::Internal(format!( - "Unsupported operator: {}", - op + "Unsupported operator: {op}" ))), } } _ => Err(PostHogEvaluationError::Internal(format!( - "Unsupported operator: {}", - operator + "Unsupported operator: {operator}" ))), } } @@ -373,8 +365,7 @@ impl FeatureStore { if let Some(flag_config) = self.flags.get(flag_key) { if !flag_config.active { return Err(PostHogEvaluationError::NotAvailable(format!( - "The feature flag is not active: {}", - flag_key + "The feature flag is not active: {flag_key}" ))); } let Some(ref multivariate) = flag_config.filters.multivariate else { @@ -401,8 +392,7 @@ impl FeatureStore { // This should not happen because the rollout percentage always adds up to 100, but just in case that PostHog // returned invalid spec, we return an error. return Err(PostHogEvaluationError::Internal(format!( - "Rollout percentage does not add up to 100: {}", - flag_key + "Rollout percentage does not add up to 100: {flag_key}" ))); } GroupEvaluationResult::Unmatched => continue, @@ -413,8 +403,7 @@ impl FeatureStore { } else { // The feature flag is not available yet Err(PostHogEvaluationError::NotAvailable(format!( - "Not found in the local evaluation spec: {}", - flag_key + "Not found in the local evaluation spec: {flag_key}" ))) } } @@ -440,8 +429,7 @@ impl FeatureStore { if let Some(flag_config) = self.flags.get(flag_key) { if !flag_config.active { return Err(PostHogEvaluationError::NotAvailable(format!( - "The feature flag is not active: {}", - flag_key + "The feature flag is not active: {flag_key}" ))); } if flag_config.filters.multivariate.is_some() { @@ -456,8 +444,7 @@ impl FeatureStore { match self.evaluate_group(group, hash_on_global_rollout_percentage, properties)? { GroupEvaluationResult::MatchedAndOverride(_) => { return Err(PostHogEvaluationError::Internal(format!( - "Boolean flag cannot have overrides: {}", - flag_key + "Boolean flag cannot have overrides: {flag_key}" ))); } GroupEvaluationResult::MatchedAndEvaluate => { @@ -471,8 +458,7 @@ impl FeatureStore { } else { // The feature flag is not available yet Err(PostHogEvaluationError::NotAvailable(format!( - "Not found in the local evaluation spec: {}", - flag_key + "Not found in the local evaluation spec: {flag_key}" ))) } } @@ -483,8 +469,7 @@ impl FeatureStore { Ok(flag_config.filters.multivariate.is_none()) } else { Err(PostHogEvaluationError::NotAvailable(format!( - "Not found in the local evaluation spec: {}", - flag_key + "Not found in the local evaluation spec: {flag_key}" ))) } } @@ -559,17 +544,8 @@ impl PostHogClient { self.config.server_api_key.starts_with("phs_") } - /// Fetch the feature flag specs from the server. - /// - /// This is unfortunately an undocumented API at: - /// - - /// - - /// - /// The handling logic in [`FeatureStore`] mostly follows the Python API implementation. - /// See `_compute_flag_locally` in - pub async fn get_feature_flags_local_evaluation( - &self, - ) -> anyhow::Result { + /// Get the raw JSON spec, same as `get_feature_flags_local_evaluation` but without parsing. + pub async fn get_feature_flags_local_evaluation_raw(&self) -> anyhow::Result { // BASE_URL/api/projects/:project_id/feature_flags/local_evaluation // with bearer token of self.server_api_key // OR @@ -603,7 +579,22 @@ impl PostHogClient { body )); } - Ok(serde_json::from_str(&body)?) + Ok(body) + } + + /// Fetch the feature flag specs from the server. + /// + /// This is unfortunately an undocumented API at: + /// - + /// - + /// + /// The handling logic in [`FeatureStore`] mostly follows the Python API implementation. + /// See `_compute_flag_locally` in + pub async fn get_feature_flags_local_evaluation( + &self, + ) -> Result { + let raw = self.get_feature_flags_local_evaluation_raw().await?; + Ok(serde_json::from_str(&raw)?) } /// Capture an event. This will only be used to report the feature flag usage back to PostHog, though diff --git a/libs/pq_proto/src/lib.rs b/libs/pq_proto/src/lib.rs index e7afc64564..482dd9a298 100644 --- a/libs/pq_proto/src/lib.rs +++ b/libs/pq_proto/src/lib.rs @@ -198,7 +198,7 @@ impl fmt::Display for CancelKeyData { // This format is more compact and might work better for logs. f.debug_tuple("CancelKeyData") - .field(&format_args!("{:x}", id)) + .field(&format_args!("{id:x}")) .finish() } } @@ -291,8 +291,7 @@ impl FeMessage { let len = (&buf[1..5]).read_u32::().unwrap(); if len < 4 { return Err(ProtocolError::Protocol(format!( - "invalid message length {}", - len + "invalid message length {len}" ))); } @@ -367,8 +366,7 @@ impl FeStartupPacket { #[allow(clippy::manual_range_contains)] if len < 8 || len > MAX_STARTUP_PACKET_LENGTH { return Err(ProtocolError::Protocol(format!( - "invalid startup packet message length {}", - len + "invalid startup packet message length {len}" ))); } diff --git a/libs/proxy/postgres-protocol2/src/authentication/sasl.rs b/libs/proxy/postgres-protocol2/src/authentication/sasl.rs index a7bf3da20a..b8304f9d8d 100644 --- a/libs/proxy/postgres-protocol2/src/authentication/sasl.rs +++ b/libs/proxy/postgres-protocol2/src/authentication/sasl.rs @@ -308,7 +308,7 @@ impl ScramSha256 { let verifier = match parsed { ServerFinalMessage::Error(e) => { - return Err(io::Error::other(format!("SCRAM error: {}", e))); + return Err(io::Error::other(format!("SCRAM error: {e}"))); } ServerFinalMessage::Verifier(verifier) => verifier, }; @@ -343,10 +343,8 @@ impl<'a> Parser<'a> { match self.it.next() { Some((_, c)) if c == target => Ok(()), Some((i, c)) => { - let m = format!( - "unexpected character at byte {}: expected `{}` but got `{}", - i, target, c - ); + let m = + format!("unexpected character at byte {i}: expected `{target}` but got `{c}"); Err(io::Error::new(io::ErrorKind::InvalidInput, m)) } None => Err(io::Error::new( @@ -412,7 +410,7 @@ impl<'a> Parser<'a> { match self.it.peek() { Some(&(i, _)) => Err(io::Error::new( io::ErrorKind::InvalidInput, - format!("unexpected trailing data at byte {}", i), + format!("unexpected trailing data at byte {i}"), )), None => Ok(()), } diff --git a/libs/proxy/postgres-protocol2/src/message/backend.rs b/libs/proxy/postgres-protocol2/src/message/backend.rs index d7eaef9509..3fc9a9335c 100644 --- a/libs/proxy/postgres-protocol2/src/message/backend.rs +++ b/libs/proxy/postgres-protocol2/src/message/backend.rs @@ -211,7 +211,7 @@ impl Message { tag => { return Err(io::Error::new( io::ErrorKind::InvalidInput, - format!("unknown authentication tag `{}`", tag), + format!("unknown authentication tag `{tag}`"), )); } }, @@ -238,7 +238,7 @@ impl Message { tag => { return Err(io::Error::new( io::ErrorKind::InvalidInput, - format!("unknown message tag `{}`", tag), + format!("unknown message tag `{tag}`"), )); } }; diff --git a/libs/proxy/postgres-types2/src/lib.rs b/libs/proxy/postgres-types2/src/lib.rs index 7c9874bda3..c98c45636b 100644 --- a/libs/proxy/postgres-types2/src/lib.rs +++ b/libs/proxy/postgres-types2/src/lib.rs @@ -46,7 +46,7 @@ impl fmt::Display for Type { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { match self.schema() { "public" | "pg_catalog" => {} - schema => write!(fmt, "{}.", schema)?, + schema => write!(fmt, "{schema}.")?, } fmt.write_str(self.name()) } diff --git a/libs/proxy/tokio-postgres2/src/cancel_query.rs b/libs/proxy/tokio-postgres2/src/cancel_query.rs index 4c2a5ef50f..94fbf333ed 100644 --- a/libs/proxy/tokio-postgres2/src/cancel_query.rs +++ b/libs/proxy/tokio-postgres2/src/cancel_query.rs @@ -1,5 +1,3 @@ -use std::io; - use tokio::net::TcpStream; use crate::client::SocketConfig; @@ -8,7 +6,7 @@ use crate::tls::MakeTlsConnect; use crate::{Error, cancel_query_raw, connect_socket}; pub(crate) async fn cancel_query( - config: Option, + config: SocketConfig, ssl_mode: SslMode, tls: T, process_id: i32, @@ -17,16 +15,6 @@ pub(crate) async fn cancel_query( where T: MakeTlsConnect, { - let config = match config { - Some(config) => config, - None => { - return Err(Error::connect(io::Error::new( - io::ErrorKind::InvalidInput, - "unknown host", - ))); - } - }; - let hostname = match &config.host { Host::Tcp(host) => &**host, }; diff --git a/libs/proxy/tokio-postgres2/src/cancel_token.rs b/libs/proxy/tokio-postgres2/src/cancel_token.rs index f6526395ee..c5566b4ad9 100644 --- a/libs/proxy/tokio-postgres2/src/cancel_token.rs +++ b/libs/proxy/tokio-postgres2/src/cancel_token.rs @@ -7,11 +7,16 @@ use crate::config::SslMode; use crate::tls::{MakeTlsConnect, TlsConnect}; use crate::{Error, cancel_query, cancel_query_raw}; -/// The capability to request cancellation of in-progress queries on a -/// connection. -#[derive(Clone, Serialize, Deserialize)] +/// A cancellation token that allows easy cancellation of a query. +#[derive(Clone)] pub struct CancelToken { - pub socket_config: Option, + pub socket_config: SocketConfig, + pub raw: RawCancelToken, +} + +/// A raw cancellation token that allows cancellation of a query, given a fresh connection to postgres. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct RawCancelToken { pub ssl_mode: SslMode, pub process_id: i32, pub secret_key: i32, @@ -36,14 +41,16 @@ impl CancelToken { { cancel_query::cancel_query( self.socket_config.clone(), - self.ssl_mode, + self.raw.ssl_mode, tls, - self.process_id, - self.secret_key, + self.raw.process_id, + self.raw.secret_key, ) .await } +} +impl RawCancelToken { /// Like `cancel_query`, but uses a stream which is already connected to the server rather than opening a new /// connection itself. pub async fn cancel_query_raw(&self, stream: S, tls: T) -> Result<(), Error> diff --git a/libs/proxy/tokio-postgres2/src/client.rs b/libs/proxy/tokio-postgres2/src/client.rs index a7edfc076a..41b22e35b6 100644 --- a/libs/proxy/tokio-postgres2/src/client.rs +++ b/libs/proxy/tokio-postgres2/src/client.rs @@ -12,6 +12,7 @@ use postgres_protocol2::message::frontend; use serde::{Deserialize, Serialize}; use tokio::sync::mpsc; +use crate::cancel_token::RawCancelToken; use crate::codec::{BackendMessages, FrontendMessage}; use crate::config::{Host, SslMode}; use crate::query::RowStream; @@ -331,10 +332,12 @@ impl Client { /// connection associated with this client. pub fn cancel_token(&self) -> CancelToken { CancelToken { - socket_config: Some(self.socket_config.clone()), - ssl_mode: self.ssl_mode, - process_id: self.process_id, - secret_key: self.secret_key, + socket_config: self.socket_config.clone(), + raw: RawCancelToken { + ssl_mode: self.ssl_mode, + process_id: self.process_id, + secret_key: self.secret_key, + }, } } diff --git a/libs/proxy/tokio-postgres2/src/config.rs b/libs/proxy/tokio-postgres2/src/config.rs index 243a5bc725..961cbc923e 100644 --- a/libs/proxy/tokio-postgres2/src/config.rs +++ b/libs/proxy/tokio-postgres2/src/config.rs @@ -12,7 +12,9 @@ use tokio::net::TcpStream; use crate::connect::connect; use crate::connect_raw::{RawConnection, connect_raw}; -use crate::tls::{MakeTlsConnect, TlsConnect}; +use crate::connect_tls::connect_tls; +use crate::maybe_tls_stream::MaybeTlsStream; +use crate::tls::{MakeTlsConnect, TlsConnect, TlsStream}; use crate::{Client, Connection, Error}; /// TLS configuration. @@ -238,7 +240,7 @@ impl Config { connect(tls, self).await } - pub async fn connect_raw( + pub async fn tls_and_authenticate( &self, stream: S, tls: T, @@ -247,7 +249,19 @@ impl Config { S: AsyncRead + AsyncWrite + Unpin, T: TlsConnect, { - connect_raw(stream, tls, self).await + let stream = connect_tls(stream, self.ssl_mode, tls).await?; + connect_raw(stream, self).await + } + + pub async fn authenticate( + &self, + stream: MaybeTlsStream, + ) -> Result, Error> + where + S: AsyncRead + AsyncWrite + Unpin, + T: TlsStream + Unpin, + { + connect_raw(stream, self).await } } diff --git a/libs/proxy/tokio-postgres2/src/connect.rs b/libs/proxy/tokio-postgres2/src/connect.rs index f7bc863337..4a07eccf9a 100644 --- a/libs/proxy/tokio-postgres2/src/connect.rs +++ b/libs/proxy/tokio-postgres2/src/connect.rs @@ -9,6 +9,7 @@ use crate::codec::BackendMessage; use crate::config::Host; use crate::connect_raw::connect_raw; use crate::connect_socket::connect_socket; +use crate::connect_tls::connect_tls; use crate::tls::{MakeTlsConnect, TlsConnect}; use crate::{Client, Config, Connection, Error, RawConnection}; @@ -44,13 +45,14 @@ where T: TlsConnect, { let socket = connect_socket(host_addr, host, port, config.connect_timeout).await?; + let stream = connect_tls(socket, config.ssl_mode, tls).await?; let RawConnection { stream, parameters, delayed_notice, process_id, secret_key, - } = connect_raw(socket, tls, config).await?; + } = connect_raw(stream, config).await?; let socket_config = SocketConfig { host_addr, diff --git a/libs/proxy/tokio-postgres2/src/connect_raw.rs b/libs/proxy/tokio-postgres2/src/connect_raw.rs index 20dc538cf2..b89a600a2e 100644 --- a/libs/proxy/tokio-postgres2/src/connect_raw.rs +++ b/libs/proxy/tokio-postgres2/src/connect_raw.rs @@ -16,9 +16,8 @@ use tokio_util::codec::Framed; use crate::Error; use crate::codec::{BackendMessage, BackendMessages, FrontendMessage, PostgresCodec}; use crate::config::{self, AuthKeys, Config}; -use crate::connect_tls::connect_tls; use crate::maybe_tls_stream::MaybeTlsStream; -use crate::tls::{TlsConnect, TlsStream}; +use crate::tls::TlsStream; pub struct StartupStream { inner: Framed, PostgresCodec>, @@ -87,16 +86,13 @@ pub struct RawConnection { } pub async fn connect_raw( - stream: S, - tls: T, + stream: MaybeTlsStream, config: &Config, -) -> Result, Error> +) -> Result, Error> where S: AsyncRead + AsyncWrite + Unpin, - T: TlsConnect, + T: TlsStream + Unpin, { - let stream = connect_tls(stream, config.ssl_mode, tls).await?; - let mut stream = StartupStream { inner: Framed::new(stream, PostgresCodec), buf: BackendMessages::empty(), diff --git a/libs/proxy/tokio-postgres2/src/error/mod.rs b/libs/proxy/tokio-postgres2/src/error/mod.rs index 8149bceeb9..5309bce17e 100644 --- a/libs/proxy/tokio-postgres2/src/error/mod.rs +++ b/libs/proxy/tokio-postgres2/src/error/mod.rs @@ -332,10 +332,10 @@ impl fmt::Display for DbError { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { write!(fmt, "{}: {}", self.severity, self.message)?; if let Some(detail) = &self.detail { - write!(fmt, "\nDETAIL: {}", detail)?; + write!(fmt, "\nDETAIL: {detail}")?; } if let Some(hint) = &self.hint { - write!(fmt, "\nHINT: {}", hint)?; + write!(fmt, "\nHINT: {hint}")?; } Ok(()) } @@ -398,9 +398,9 @@ impl fmt::Display for Error { Kind::Io => fmt.write_str("error communicating with the server")?, Kind::UnexpectedMessage => fmt.write_str("unexpected message from server")?, Kind::Tls => fmt.write_str("error performing TLS handshake")?, - Kind::ToSql(idx) => write!(fmt, "error serializing parameter {}", idx)?, - Kind::FromSql(idx) => write!(fmt, "error deserializing column {}", idx)?, - Kind::Column(column) => write!(fmt, "invalid column `{}`", column)?, + Kind::ToSql(idx) => write!(fmt, "error serializing parameter {idx}")?, + Kind::FromSql(idx) => write!(fmt, "error deserializing column {idx}")?, + Kind::Column(column) => write!(fmt, "invalid column `{column}`")?, Kind::Closed => fmt.write_str("connection closed")?, Kind::Db => fmt.write_str("db error")?, Kind::Parse => fmt.write_str("error parsing response from server")?, @@ -411,7 +411,7 @@ impl fmt::Display for Error { Kind::Timeout => fmt.write_str("timeout waiting for server")?, }; if let Some(ref cause) = self.0.cause { - write!(fmt, ": {}", cause)?; + write!(fmt, ": {cause}")?; } Ok(()) } diff --git a/libs/proxy/tokio-postgres2/src/lib.rs b/libs/proxy/tokio-postgres2/src/lib.rs index 9556070ed5..791c93b972 100644 --- a/libs/proxy/tokio-postgres2/src/lib.rs +++ b/libs/proxy/tokio-postgres2/src/lib.rs @@ -3,7 +3,7 @@ use postgres_protocol2::message::backend::ReadyForQueryBody; -pub use crate::cancel_token::CancelToken; +pub use crate::cancel_token::{CancelToken, RawCancelToken}; pub use crate::client::{Client, SocketConfig}; pub use crate::config::Config; pub use crate::connect_raw::RawConnection; diff --git a/libs/proxy/tokio-postgres2/src/row.rs b/libs/proxy/tokio-postgres2/src/row.rs index 5fc955eef4..36d578558f 100644 --- a/libs/proxy/tokio-postgres2/src/row.rs +++ b/libs/proxy/tokio-postgres2/src/row.rs @@ -156,7 +156,7 @@ impl Row { { match self.get_inner(&idx) { Ok(ok) => ok, - Err(err) => panic!("error retrieving column {}: {}", idx, err), + Err(err) => panic!("error retrieving column {idx}: {err}"), } } @@ -274,7 +274,7 @@ impl SimpleQueryRow { { match self.get_inner(&idx) { Ok(ok) => ok, - Err(err) => panic!("error retrieving column {}: {}", idx, err), + Err(err) => panic!("error retrieving column {idx}: {err}"), } } diff --git a/libs/remote_storage/src/local_fs.rs b/libs/remote_storage/src/local_fs.rs index 8320d7afdc..30690b1bdb 100644 --- a/libs/remote_storage/src/local_fs.rs +++ b/libs/remote_storage/src/local_fs.rs @@ -400,7 +400,7 @@ impl RemoteStorage for LocalFs { key }; - let relative_key = format!("{}", relative_key); + let relative_key = format!("{relative_key}"); if relative_key.contains(REMOTE_STORAGE_PREFIX_SEPARATOR) { let first_part = relative_key .split(REMOTE_STORAGE_PREFIX_SEPARATOR) @@ -594,13 +594,9 @@ impl RemoteStorage for LocalFs { let from_path = from.with_base(&self.storage_root); let to_path = to.with_base(&self.storage_root); create_target_directory(&to_path).await?; - fs::copy(&from_path, &to_path).await.with_context(|| { - format!( - "Failed to copy file from '{from_path}' to '{to_path}'", - from_path = from_path, - to_path = to_path - ) - })?; + fs::copy(&from_path, &to_path) + .await + .with_context(|| format!("Failed to copy file from '{from_path}' to '{to_path}'"))?; Ok(()) } @@ -1183,7 +1179,7 @@ mod fs_tests { .write(true) .create_new(true) .open(path)?; - write!(file_for_writing, "{}", contents)?; + write!(file_for_writing, "{contents}")?; drop(file_for_writing); let file_size = path.metadata()?.len() as usize; Ok(( diff --git a/libs/safekeeper_api/Cargo.toml b/libs/safekeeper_api/Cargo.toml index d9d080e8fe..928e583b0b 100644 --- a/libs/safekeeper_api/Cargo.toml +++ b/libs/safekeeper_api/Cargo.toml @@ -10,6 +10,7 @@ const_format.workspace = true serde.workspace = true serde_json.workspace = true postgres_ffi.workspace = true +postgres_versioninfo.workspace = true pq_proto.workspace = true tokio.workspace = true utils.workspace = true diff --git a/libs/safekeeper_api/src/lib.rs b/libs/safekeeper_api/src/lib.rs index fa86523ad7..ba0bfee971 100644 --- a/libs/safekeeper_api/src/lib.rs +++ b/libs/safekeeper_api/src/lib.rs @@ -8,6 +8,8 @@ pub mod membership; /// Public API types pub mod models; +pub use postgres_versioninfo::{PgMajorVersion, PgVersionId}; + /// Consensus logical timestamp. Note: it is a part of sk control file. pub type Term = u64; /// With this term timeline is created initially. It @@ -20,7 +22,7 @@ pub const INITIAL_TERM: Term = 0; #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] pub struct ServerInfo { /// Postgres server version - pub pg_version: u32, + pub pg_version: PgVersionId, pub system_id: SystemId, pub wal_seg_size: u32, } diff --git a/libs/safekeeper_api/src/membership.rs b/libs/safekeeper_api/src/membership.rs index 3d4d17096e..1751c54f6a 100644 --- a/libs/safekeeper_api/src/membership.rs +++ b/libs/safekeeper_api/src/membership.rs @@ -193,10 +193,10 @@ mod tests { }) .unwrap(); - println!("members: {}", members); + println!("members: {members}"); let j = serde_json::to_string(&members).expect("failed to serialize"); - println!("members json: {}", j); + println!("members json: {j}"); assert_eq!( j, r#"[{"id":42,"host":"lala.org","pg_port":5432},{"id":43,"host":"bubu.org","pg_port":5432}]"# diff --git a/libs/safekeeper_api/src/models.rs b/libs/safekeeper_api/src/models.rs index fd05f6fda3..5c1ee41f7b 100644 --- a/libs/safekeeper_api/src/models.rs +++ b/libs/safekeeper_api/src/models.rs @@ -4,6 +4,7 @@ use std::net::SocketAddr; use pageserver_api::shard::ShardIdentity; use postgres_ffi::TimestampTz; +use postgres_versioninfo::PgVersionId; use serde::{Deserialize, Serialize}; use tokio::time::Instant; use utils::id::{NodeId, TenantId, TenantTimelineId, TimelineId}; @@ -23,8 +24,7 @@ pub struct TimelineCreateRequest { pub tenant_id: TenantId, pub timeline_id: TimelineId, pub mconf: Configuration, - /// In the PG_VERSION_NUM macro format, like 140017. - pub pg_version: u32, + pub pg_version: PgVersionId, pub system_id: Option, // By default WAL_SEGMENT_SIZE pub wal_seg_size: Option, diff --git a/libs/utils/src/error.rs b/libs/utils/src/error.rs index 7ce203e918..6fa86916c1 100644 --- a/libs/utils/src/error.rs +++ b/libs/utils/src/error.rs @@ -41,7 +41,7 @@ pub fn report_compact_sources(e: &E) -> impl std::fmt::Dis // why is E a generic parameter here? hope that rustc will see through a default // Error::source implementation and leave the following out if there cannot be any // sources: - Sources(self.0.source()).try_for_each(|src| write!(f, ": {}", src)) + Sources(self.0.source()).try_for_each(|src| write!(f, ": {src}")) } } diff --git a/libs/utils/src/generation.rs b/libs/utils/src/generation.rs index b5e4a4644a..8a3bef914a 100644 --- a/libs/utils/src/generation.rs +++ b/libs/utils/src/generation.rs @@ -135,7 +135,7 @@ impl Debug for Generation { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { Self::Valid(v) => { - write!(f, "{:08x}", v) + write!(f, "{v:08x}") } Self::None => { write!(f, "") diff --git a/libs/utils/src/id.rs b/libs/utils/src/id.rs index 68cb1f0209..e3037aec21 100644 --- a/libs/utils/src/id.rs +++ b/libs/utils/src/id.rs @@ -280,7 +280,7 @@ impl TryFrom> for TimelineId { value .unwrap_or_default() .parse::() - .with_context(|| format!("Could not parse timeline id from {:?}", value)) + .with_context(|| format!("Could not parse timeline id from {value:?}")) } } diff --git a/libs/utils/src/postgres_client.rs b/libs/utils/src/postgres_client.rs index 4167839e28..7596fefe38 100644 --- a/libs/utils/src/postgres_client.rs +++ b/libs/utils/src/postgres_client.rs @@ -89,7 +89,7 @@ pub fn wal_stream_connection_config( .set_password(args.auth_token.map(|s| s.to_owned())); if let Some(availability_zone) = args.availability_zone { - connstr = connstr.extend_options([format!("availability_zone={}", availability_zone)]); + connstr = connstr.extend_options([format!("availability_zone={availability_zone}")]); } Ok(connstr) diff --git a/libs/utils/src/shard.rs b/libs/utils/src/shard.rs index c8c410a725..f2b81373e2 100644 --- a/libs/utils/src/shard.rs +++ b/libs/utils/src/shard.rs @@ -196,7 +196,7 @@ impl std::fmt::Display for TenantShardId { impl std::fmt::Debug for TenantShardId { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { // Debug is the same as Display: the compact hex representation - write!(f, "{}", self) + write!(f, "{self}") } } @@ -284,7 +284,7 @@ impl std::fmt::Display for ShardIndex { impl std::fmt::Debug for ShardIndex { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { // Debug is the same as Display: the compact hex representation - write!(f, "{}", self) + write!(f, "{self}") } } diff --git a/libs/utils/src/signals.rs b/libs/utils/src/signals.rs index 426bb65916..bdaa3cb665 100644 --- a/libs/utils/src/signals.rs +++ b/libs/utils/src/signals.rs @@ -29,7 +29,7 @@ impl ShutdownSignals { SIGINT => Signal::Interrupt, SIGTERM => Signal::Terminate, SIGQUIT => Signal::Quit, - other => panic!("unknown signal: {}", other), + other => panic!("unknown signal: {other}"), }; handler(signal)?; diff --git a/libs/vm_monitor/src/dispatcher.rs b/libs/vm_monitor/src/dispatcher.rs index 7b7201ab77..7bd6adc2f8 100644 --- a/libs/vm_monitor/src/dispatcher.rs +++ b/libs/vm_monitor/src/dispatcher.rs @@ -90,8 +90,7 @@ impl Dispatcher { Err(e) => { sink.send(Message::Text(Utf8Bytes::from( serde_json::to_string(&ProtocolResponse::Error(format!( - "Received protocol version range {} which does not overlap with {}", - agent_range, monitor_range + "Received protocol version range {agent_range} which does not overlap with {monitor_range}" ))) .unwrap(), ))) diff --git a/libs/vm_monitor/src/filecache.rs b/libs/vm_monitor/src/filecache.rs index bc42347e5a..55bbdea169 100644 --- a/libs/vm_monitor/src/filecache.rs +++ b/libs/vm_monitor/src/filecache.rs @@ -285,7 +285,7 @@ impl FileCacheState { // why we're constructing the query here. self.client .query( - &format!("ALTER SYSTEM SET neon.file_cache_size_limit = {};", num_mb), + &format!("ALTER SYSTEM SET neon.file_cache_size_limit = {num_mb};"), &[], ) .await diff --git a/libs/wal_decoder/Cargo.toml b/libs/wal_decoder/Cargo.toml index cb0ef4b00d..600ef091f5 100644 --- a/libs/wal_decoder/Cargo.toml +++ b/libs/wal_decoder/Cargo.toml @@ -14,6 +14,7 @@ bytes.workspace = true pageserver_api.workspace = true prost.workspace = true postgres_ffi.workspace = true +postgres_ffi_types.workspace = true serde.workspace = true thiserror.workspace = true tokio = { workspace = true, features = ["io-util"] } diff --git a/libs/wal_decoder/benches/bench_interpret_wal.rs b/libs/wal_decoder/benches/bench_interpret_wal.rs index ed6ba4d267..e3956eca05 100644 --- a/libs/wal_decoder/benches/bench_interpret_wal.rs +++ b/libs/wal_decoder/benches/bench_interpret_wal.rs @@ -10,7 +10,7 @@ use futures::StreamExt; use futures::stream::FuturesUnordered; use pageserver_api::shard::{ShardIdentity, ShardStripeSize}; use postgres_ffi::waldecoder::WalStreamDecoder; -use postgres_ffi::{MAX_SEND_SIZE, WAL_SEGMENT_SIZE}; +use postgres_ffi::{MAX_SEND_SIZE, PgMajorVersion, WAL_SEGMENT_SIZE}; use pprof::criterion::{Output, PProfProfiler}; use remote_storage::{ DownloadOpts, GenericRemoteStorage, ListingMode, RemoteStorageConfig, RemoteStorageKind, @@ -64,7 +64,7 @@ async fn download_bench_data( let temp_dir_parent: Utf8PathBuf = env::current_dir().unwrap().try_into()?; let temp_dir = camino_tempfile::tempdir_in(temp_dir_parent)?; - eprintln!("Downloading benchmark data to {:?}", temp_dir); + eprintln!("Downloading benchmark data to {temp_dir:?}"); let listing = client .list(None, ListingMode::NoDelimiter, None, cancel) @@ -115,12 +115,12 @@ struct BenchmarkData { #[derive(Deserialize)] struct BenchmarkMetadata { - pg_version: u32, + pg_version: PgMajorVersion, start_lsn: Lsn, } async fn load_bench_data(path: &Utf8Path, input_size: usize) -> anyhow::Result { - eprintln!("Loading benchmark data from {:?}", path); + eprintln!("Loading benchmark data from {path:?}"); let mut entries = tokio::fs::read_dir(path).await?; let mut ordered_segment_paths = Vec::new(); diff --git a/libs/wal_decoder/build.rs b/libs/wal_decoder/build.rs index d5b7ad02ad..e8acb52256 100644 --- a/libs/wal_decoder/build.rs +++ b/libs/wal_decoder/build.rs @@ -6,6 +6,6 @@ fn main() -> Result<(), Box> { // the build then. Anyway, per cargo docs build script shouldn't output to // anywhere but $OUT_DIR. tonic_build::compile_protos("proto/interpreted_wal.proto") - .unwrap_or_else(|e| panic!("failed to compile protos {:?}", e)); + .unwrap_or_else(|e| panic!("failed to compile protos {e:?}")); Ok(()) } diff --git a/libs/wal_decoder/src/decoder.rs b/libs/wal_decoder/src/decoder.rs index cb0835e894..0843eb35bf 100644 --- a/libs/wal_decoder/src/decoder.rs +++ b/libs/wal_decoder/src/decoder.rs @@ -7,9 +7,9 @@ use bytes::{Buf, Bytes}; use pageserver_api::key::rel_block_to_key; use pageserver_api::reltag::{RelTag, SlruKind}; use pageserver_api::shard::ShardIdentity; -use postgres_ffi::pg_constants; -use postgres_ffi::relfile_utils::VISIBILITYMAP_FORKNUM; use postgres_ffi::walrecord::*; +use postgres_ffi::{PgMajorVersion, pg_constants}; +use postgres_ffi_types::forknum::VISIBILITYMAP_FORKNUM; use utils::lsn::Lsn; use crate::models::*; @@ -24,7 +24,7 @@ impl InterpretedWalRecord { buf: Bytes, shards: &[ShardIdentity], next_record_lsn: Lsn, - pg_version: u32, + pg_version: PgMajorVersion, ) -> anyhow::Result> { let mut decoded = DecodedWALRecord::default(); decode_wal_record(buf, &mut decoded, pg_version)?; @@ -78,7 +78,7 @@ impl MetadataRecord { decoded: &DecodedWALRecord, shard_records: &mut HashMap, next_record_lsn: Lsn, - pg_version: u32, + pg_version: PgMajorVersion, ) -> anyhow::Result<()> { // Note: this doesn't actually copy the bytes since // the [`Bytes`] type implements it via a level of indirection. @@ -193,7 +193,7 @@ impl MetadataRecord { fn decode_heapam_record( buf: &mut Bytes, decoded: &DecodedWALRecord, - pg_version: u32, + pg_version: PgMajorVersion, ) -> anyhow::Result> { // Handle VM bit updates that are implicitly part of heap records. @@ -205,7 +205,7 @@ impl MetadataRecord { let mut flags = pg_constants::VISIBILITYMAP_VALID_BITS; match pg_version { - 14 => { + PgMajorVersion::PG14 => { if decoded.xl_rmid == pg_constants::RM_HEAP_ID { let info = decoded.xl_info & pg_constants::XLOG_HEAP_OPMASK; @@ -272,7 +272,7 @@ impl MetadataRecord { anyhow::bail!("Unknown RMGR {} for Heap decoding", decoded.xl_rmid); } } - 15 => { + PgMajorVersion::PG15 => { if decoded.xl_rmid == pg_constants::RM_HEAP_ID { let info = decoded.xl_info & pg_constants::XLOG_HEAP_OPMASK; @@ -339,7 +339,7 @@ impl MetadataRecord { anyhow::bail!("Unknown RMGR {} for Heap decoding", decoded.xl_rmid); } } - 16 => { + PgMajorVersion::PG16 => { if decoded.xl_rmid == pg_constants::RM_HEAP_ID { let info = decoded.xl_info & pg_constants::XLOG_HEAP_OPMASK; @@ -406,7 +406,7 @@ impl MetadataRecord { anyhow::bail!("Unknown RMGR {} for Heap decoding", decoded.xl_rmid); } } - 17 => { + PgMajorVersion::PG17 => { if decoded.xl_rmid == pg_constants::RM_HEAP_ID { let info = decoded.xl_info & pg_constants::XLOG_HEAP_OPMASK; @@ -473,7 +473,6 @@ impl MetadataRecord { anyhow::bail!("Unknown RMGR {} for Heap decoding", decoded.xl_rmid); } } - _ => {} } if new_heap_blkno.is_some() || old_heap_blkno.is_some() { @@ -500,7 +499,7 @@ impl MetadataRecord { fn decode_neonmgr_record( buf: &mut Bytes, decoded: &DecodedWALRecord, - pg_version: u32, + pg_version: PgMajorVersion, ) -> anyhow::Result> { // Handle VM bit updates that are implicitly part of heap records. @@ -514,7 +513,7 @@ impl MetadataRecord { assert_eq!(decoded.xl_rmid, pg_constants::RM_NEON_ID); match pg_version { - 16 | 17 => { + PgMajorVersion::PG16 | PgMajorVersion::PG17 => { let info = decoded.xl_info & pg_constants::XLOG_HEAP_OPMASK; match info { @@ -574,7 +573,7 @@ impl MetadataRecord { info => anyhow::bail!("Unknown WAL record type for Neon RMGR: {}", info), } } - _ => anyhow::bail!( + PgMajorVersion::PG15 | PgMajorVersion::PG14 => anyhow::bail!( "Neon RMGR has no known compatibility with PostgreSQL version {}", pg_version ), @@ -629,116 +628,121 @@ impl MetadataRecord { fn decode_dbase_record( buf: &mut Bytes, decoded: &DecodedWALRecord, - pg_version: u32, + pg_version: PgMajorVersion, ) -> anyhow::Result> { // TODO: Refactor this to avoid the duplication between postgres versions. let info = decoded.xl_info & pg_constants::XLR_RMGR_INFO_MASK; tracing::debug!(%info, %pg_version, "handle RM_DBASE_ID"); - if pg_version == 14 { - if info == postgres_ffi::v14::bindings::XLOG_DBASE_CREATE { - let createdb = XlCreateDatabase::decode(buf); - tracing::debug!("XLOG_DBASE_CREATE v14"); + match pg_version { + PgMajorVersion::PG14 => { + if info == postgres_ffi::v14::bindings::XLOG_DBASE_CREATE { + let createdb = XlCreateDatabase::decode(buf); + tracing::debug!("XLOG_DBASE_CREATE v14"); - let record = MetadataRecord::Dbase(DbaseRecord::Create(DbaseCreate { - db_id: createdb.db_id, - tablespace_id: createdb.tablespace_id, - src_db_id: createdb.src_db_id, - src_tablespace_id: createdb.src_tablespace_id, - })); + let record = MetadataRecord::Dbase(DbaseRecord::Create(DbaseCreate { + db_id: createdb.db_id, + tablespace_id: createdb.tablespace_id, + src_db_id: createdb.src_db_id, + src_tablespace_id: createdb.src_tablespace_id, + })); - return Ok(Some(record)); - } else if info == postgres_ffi::v14::bindings::XLOG_DBASE_DROP { - let dropdb = XlDropDatabase::decode(buf); + return Ok(Some(record)); + } else if info == postgres_ffi::v14::bindings::XLOG_DBASE_DROP { + let dropdb = XlDropDatabase::decode(buf); - let record = MetadataRecord::Dbase(DbaseRecord::Drop(DbaseDrop { - db_id: dropdb.db_id, - tablespace_ids: dropdb.tablespace_ids, - })); + let record = MetadataRecord::Dbase(DbaseRecord::Drop(DbaseDrop { + db_id: dropdb.db_id, + tablespace_ids: dropdb.tablespace_ids, + })); - return Ok(Some(record)); + return Ok(Some(record)); + } } - } else if pg_version == 15 { - if info == postgres_ffi::v15::bindings::XLOG_DBASE_CREATE_WAL_LOG { - tracing::debug!("XLOG_DBASE_CREATE_WAL_LOG: noop"); - } else if info == postgres_ffi::v15::bindings::XLOG_DBASE_CREATE_FILE_COPY { - // The XLOG record was renamed between v14 and v15, - // but the record format is the same. - // So we can reuse XlCreateDatabase here. - tracing::debug!("XLOG_DBASE_CREATE_FILE_COPY"); + PgMajorVersion::PG15 => { + if info == postgres_ffi::v15::bindings::XLOG_DBASE_CREATE_WAL_LOG { + tracing::debug!("XLOG_DBASE_CREATE_WAL_LOG: noop"); + } else if info == postgres_ffi::v15::bindings::XLOG_DBASE_CREATE_FILE_COPY { + // The XLOG record was renamed between v14 and v15, + // but the record format is the same. + // So we can reuse XlCreateDatabase here. + tracing::debug!("XLOG_DBASE_CREATE_FILE_COPY"); - let createdb = XlCreateDatabase::decode(buf); - let record = MetadataRecord::Dbase(DbaseRecord::Create(DbaseCreate { - db_id: createdb.db_id, - tablespace_id: createdb.tablespace_id, - src_db_id: createdb.src_db_id, - src_tablespace_id: createdb.src_tablespace_id, - })); + let createdb = XlCreateDatabase::decode(buf); + let record = MetadataRecord::Dbase(DbaseRecord::Create(DbaseCreate { + db_id: createdb.db_id, + tablespace_id: createdb.tablespace_id, + src_db_id: createdb.src_db_id, + src_tablespace_id: createdb.src_tablespace_id, + })); - return Ok(Some(record)); - } else if info == postgres_ffi::v15::bindings::XLOG_DBASE_DROP { - let dropdb = XlDropDatabase::decode(buf); - let record = MetadataRecord::Dbase(DbaseRecord::Drop(DbaseDrop { - db_id: dropdb.db_id, - tablespace_ids: dropdb.tablespace_ids, - })); + return Ok(Some(record)); + } else if info == postgres_ffi::v15::bindings::XLOG_DBASE_DROP { + let dropdb = XlDropDatabase::decode(buf); + let record = MetadataRecord::Dbase(DbaseRecord::Drop(DbaseDrop { + db_id: dropdb.db_id, + tablespace_ids: dropdb.tablespace_ids, + })); - return Ok(Some(record)); + return Ok(Some(record)); + } } - } else if pg_version == 16 { - if info == postgres_ffi::v16::bindings::XLOG_DBASE_CREATE_WAL_LOG { - tracing::debug!("XLOG_DBASE_CREATE_WAL_LOG: noop"); - } else if info == postgres_ffi::v16::bindings::XLOG_DBASE_CREATE_FILE_COPY { - // The XLOG record was renamed between v14 and v15, - // but the record format is the same. - // So we can reuse XlCreateDatabase here. - tracing::debug!("XLOG_DBASE_CREATE_FILE_COPY"); + PgMajorVersion::PG16 => { + if info == postgres_ffi::v16::bindings::XLOG_DBASE_CREATE_WAL_LOG { + tracing::debug!("XLOG_DBASE_CREATE_WAL_LOG: noop"); + } else if info == postgres_ffi::v16::bindings::XLOG_DBASE_CREATE_FILE_COPY { + // The XLOG record was renamed between v14 and v15, + // but the record format is the same. + // So we can reuse XlCreateDatabase here. + tracing::debug!("XLOG_DBASE_CREATE_FILE_COPY"); - let createdb = XlCreateDatabase::decode(buf); - let record = MetadataRecord::Dbase(DbaseRecord::Create(DbaseCreate { - db_id: createdb.db_id, - tablespace_id: createdb.tablespace_id, - src_db_id: createdb.src_db_id, - src_tablespace_id: createdb.src_tablespace_id, - })); + let createdb = XlCreateDatabase::decode(buf); + let record = MetadataRecord::Dbase(DbaseRecord::Create(DbaseCreate { + db_id: createdb.db_id, + tablespace_id: createdb.tablespace_id, + src_db_id: createdb.src_db_id, + src_tablespace_id: createdb.src_tablespace_id, + })); - return Ok(Some(record)); - } else if info == postgres_ffi::v16::bindings::XLOG_DBASE_DROP { - let dropdb = XlDropDatabase::decode(buf); - let record = MetadataRecord::Dbase(DbaseRecord::Drop(DbaseDrop { - db_id: dropdb.db_id, - tablespace_ids: dropdb.tablespace_ids, - })); + return Ok(Some(record)); + } else if info == postgres_ffi::v16::bindings::XLOG_DBASE_DROP { + let dropdb = XlDropDatabase::decode(buf); + let record = MetadataRecord::Dbase(DbaseRecord::Drop(DbaseDrop { + db_id: dropdb.db_id, + tablespace_ids: dropdb.tablespace_ids, + })); - return Ok(Some(record)); + return Ok(Some(record)); + } } - } else if pg_version == 17 { - if info == postgres_ffi::v17::bindings::XLOG_DBASE_CREATE_WAL_LOG { - tracing::debug!("XLOG_DBASE_CREATE_WAL_LOG: noop"); - } else if info == postgres_ffi::v17::bindings::XLOG_DBASE_CREATE_FILE_COPY { - // The XLOG record was renamed between v14 and v15, - // but the record format is the same. - // So we can reuse XlCreateDatabase here. - tracing::debug!("XLOG_DBASE_CREATE_FILE_COPY"); + PgMajorVersion::PG17 => { + if info == postgres_ffi::v17::bindings::XLOG_DBASE_CREATE_WAL_LOG { + tracing::debug!("XLOG_DBASE_CREATE_WAL_LOG: noop"); + } else if info == postgres_ffi::v17::bindings::XLOG_DBASE_CREATE_FILE_COPY { + // The XLOG record was renamed between v14 and v15, + // but the record format is the same. + // So we can reuse XlCreateDatabase here. + tracing::debug!("XLOG_DBASE_CREATE_FILE_COPY"); - let createdb = XlCreateDatabase::decode(buf); - let record = MetadataRecord::Dbase(DbaseRecord::Create(DbaseCreate { - db_id: createdb.db_id, - tablespace_id: createdb.tablespace_id, - src_db_id: createdb.src_db_id, - src_tablespace_id: createdb.src_tablespace_id, - })); + let createdb = XlCreateDatabase::decode(buf); + let record = MetadataRecord::Dbase(DbaseRecord::Create(DbaseCreate { + db_id: createdb.db_id, + tablespace_id: createdb.tablespace_id, + src_db_id: createdb.src_db_id, + src_tablespace_id: createdb.src_tablespace_id, + })); - return Ok(Some(record)); - } else if info == postgres_ffi::v17::bindings::XLOG_DBASE_DROP { - let dropdb = XlDropDatabase::decode(buf); - let record = MetadataRecord::Dbase(DbaseRecord::Drop(DbaseDrop { - db_id: dropdb.db_id, - tablespace_ids: dropdb.tablespace_ids, - })); + return Ok(Some(record)); + } else if info == postgres_ffi::v17::bindings::XLOG_DBASE_DROP { + let dropdb = XlDropDatabase::decode(buf); + let record = MetadataRecord::Dbase(DbaseRecord::Drop(DbaseDrop { + db_id: dropdb.db_id, + tablespace_ids: dropdb.tablespace_ids, + })); - return Ok(Some(record)); + return Ok(Some(record)); + } } } @@ -748,12 +752,12 @@ impl MetadataRecord { fn decode_clog_record( buf: &mut Bytes, decoded: &DecodedWALRecord, - pg_version: u32, + pg_version: PgMajorVersion, ) -> anyhow::Result> { let info = decoded.xl_info & !pg_constants::XLR_INFO_MASK; if info == pg_constants::CLOG_ZEROPAGE { - let pageno = if pg_version < 17 { + let pageno = if pg_version < PgMajorVersion::PG17 { buf.get_u32_le() } else { buf.get_u64_le() as u32 @@ -765,7 +769,7 @@ impl MetadataRecord { ClogZeroPage { segno, rpageno }, )))) } else { - assert!(info == pg_constants::CLOG_TRUNCATE); + assert_eq!(info, pg_constants::CLOG_TRUNCATE); let xlrec = XlClogTruncate::decode(buf, pg_version); Ok(Some(MetadataRecord::Clog(ClogRecord::Truncate( @@ -838,14 +842,14 @@ impl MetadataRecord { fn decode_multixact_record( buf: &mut Bytes, decoded: &DecodedWALRecord, - pg_version: u32, + pg_version: PgMajorVersion, ) -> anyhow::Result> { let info = decoded.xl_info & pg_constants::XLR_RMGR_INFO_MASK; if info == pg_constants::XLOG_MULTIXACT_ZERO_OFF_PAGE || info == pg_constants::XLOG_MULTIXACT_ZERO_MEM_PAGE { - let pageno = if pg_version < 17 { + let pageno = if pg_version < PgMajorVersion::PG17 { buf.get_u32_le() } else { buf.get_u64_le() as u32 diff --git a/libs/wal_decoder/src/models.rs b/libs/wal_decoder/src/models.rs index 7e1934c6c3..94a00c0e53 100644 --- a/libs/wal_decoder/src/models.rs +++ b/libs/wal_decoder/src/models.rs @@ -25,6 +25,9 @@ //! | //! |--> write to KV store within the pageserver +pub mod record; +pub mod value; + use bytes::Bytes; use pageserver_api::reltag::{RelTag, SlruKind}; use postgres_ffi::walrecord::{ diff --git a/libs/pageserver_api/src/record.rs b/libs/wal_decoder/src/models/record.rs similarity index 99% rename from libs/pageserver_api/src/record.rs rename to libs/wal_decoder/src/models/record.rs index 73516c5220..51659ed904 100644 --- a/libs/pageserver_api/src/record.rs +++ b/libs/wal_decoder/src/models/record.rs @@ -128,6 +128,6 @@ pub fn describe_wal_record(rec: &NeonWalRecord) -> Result Ok(format!("{:?}", rec)), + _ => Ok(format!("{rec:?}")), } } diff --git a/libs/pageserver_api/src/value.rs b/libs/wal_decoder/src/models/value.rs similarity index 99% rename from libs/pageserver_api/src/value.rs rename to libs/wal_decoder/src/models/value.rs index e9000939c3..3b4f896a45 100644 --- a/libs/pageserver_api/src/value.rs +++ b/libs/wal_decoder/src/models/value.rs @@ -10,7 +10,7 @@ use bytes::Bytes; use serde::{Deserialize, Serialize}; -use crate::record::NeonWalRecord; +use crate::models::record::NeonWalRecord; #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] pub enum Value { diff --git a/libs/wal_decoder/src/serialized_batch.rs b/libs/wal_decoder/src/serialized_batch.rs index b451d6d8e0..ab38ff3d73 100644 --- a/libs/wal_decoder/src/serialized_batch.rs +++ b/libs/wal_decoder/src/serialized_batch.rs @@ -1,4 +1,4 @@ -//! This module implements batch type for serialized [`pageserver_api::value::Value`] +//! This module implements batch type for serialized [`crate::models::value::Value`] //! instances. Each batch contains a raw buffer (serialized values) //! and a list of metadata for each (key, LSN) tuple present in the batch. //! @@ -10,17 +10,17 @@ use std::collections::{BTreeSet, HashMap}; use bytes::{Bytes, BytesMut}; use pageserver_api::key::{CompactKey, Key, rel_block_to_key}; use pageserver_api::keyspace::KeySpace; -use pageserver_api::record::NeonWalRecord; use pageserver_api::reltag::RelTag; use pageserver_api::shard::ShardIdentity; -use pageserver_api::value::Value; use postgres_ffi::walrecord::{DecodedBkpBlock, DecodedWALRecord}; -use postgres_ffi::{BLCKSZ, page_is_new, page_set_lsn, pg_constants}; +use postgres_ffi::{BLCKSZ, PgMajorVersion, page_is_new, page_set_lsn, pg_constants}; use serde::{Deserialize, Serialize}; use utils::bin_ser::BeSer; use utils::lsn::Lsn; use crate::models::InterpretedWalRecord; +use crate::models::record::NeonWalRecord; +use crate::models::value::Value; static ZERO_PAGE: Bytes = Bytes::from_static(&[0u8; BLCKSZ as usize]); @@ -139,7 +139,7 @@ impl SerializedValueBatch { decoded: DecodedWALRecord, shard_records: &mut HashMap, next_record_lsn: Lsn, - pg_version: u32, + pg_version: PgMajorVersion, ) -> anyhow::Result<()> { // First determine how big the buffers need to be and allocate it up-front. // This duplicates some of the work below, but it's empirically much faster. @@ -267,7 +267,7 @@ impl SerializedValueBatch { fn estimate_buffer_size( decoded: &DecodedWALRecord, shard: &ShardIdentity, - pg_version: u32, + pg_version: PgMajorVersion, ) -> usize { let mut estimate: usize = 0; @@ -303,7 +303,11 @@ impl SerializedValueBatch { estimate } - fn block_is_image(decoded: &DecodedWALRecord, blk: &DecodedBkpBlock, pg_version: u32) -> bool { + fn block_is_image( + decoded: &DecodedWALRecord, + blk: &DecodedBkpBlock, + pg_version: PgMajorVersion, + ) -> bool { blk.apply_image && blk.has_image && decoded.xl_rmid == pg_constants::RM_XLOG_ID diff --git a/libs/walproposer/build.rs b/libs/walproposer/build.rs index 530ceb1327..b13c8b32b4 100644 --- a/libs/walproposer/build.rs +++ b/libs/walproposer/build.rs @@ -13,22 +13,24 @@ fn main() -> anyhow::Result<()> { // Tell cargo to invalidate the built crate whenever the wrapper changes println!("cargo:rerun-if-changed=bindgen_deps.h"); + let root_path = PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("../.."); + // Finding the location of built libraries and Postgres C headers: // - if POSTGRES_INSTALL_DIR is set look into it, otherwise look into `/pg_install` // - if there's a `bin/pg_config` file use it for getting include server, otherwise use `/pg_install/{PG_MAJORVERSION}/include/postgresql/server` let pg_install_dir = if let Some(postgres_install_dir) = env::var_os("POSTGRES_INSTALL_DIR") { postgres_install_dir.into() } else { - PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("../../pg_install") + root_path.join("pg_install") }; let pg_install_abs = std::fs::canonicalize(pg_install_dir)?; - let walproposer_lib_dir = pg_install_abs.join("build/walproposer-lib"); + let walproposer_lib_dir = root_path.join("build/walproposer-lib"); let walproposer_lib_search_str = walproposer_lib_dir .to_str() .ok_or(anyhow!("Bad non-UTF path"))?; - let pgxn_neon = PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("../../pgxn/neon"); + let pgxn_neon = root_path.join("pgxn/neon"); let pgxn_neon = std::fs::canonicalize(pgxn_neon)?; let pgxn_neon = pgxn_neon.to_str().ok_or(anyhow!("Bad non-UTF path"))?; diff --git a/libs/walproposer/src/api_bindings.rs b/libs/walproposer/src/api_bindings.rs index b89f1877fd..7c6abf252e 100644 --- a/libs/walproposer/src/api_bindings.rs +++ b/libs/walproposer/src/api_bindings.rs @@ -376,7 +376,7 @@ impl Level { FATAL => Level::Fatal, PANIC => Level::Panic, WPEVENT => Level::WPEvent, - _ => panic!("unknown log level {}", elevel), + _ => panic!("unknown log level {elevel}"), } } } @@ -446,7 +446,7 @@ pub fn empty_shmem() -> crate::bindings::WalproposerShmemState { impl std::fmt::Display for Level { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(f, "{:?}", self) + write!(f, "{self:?}") } } diff --git a/libs/walproposer/src/walproposer.rs b/libs/walproposer/src/walproposer.rs index c853658ddf..93bb0d5eb0 100644 --- a/libs/walproposer/src/walproposer.rs +++ b/libs/walproposer/src/walproposer.rs @@ -380,7 +380,7 @@ mod tests { } fn conn_send_query(&self, _: &mut crate::bindings::Safekeeper, query: &str) -> bool { - println!("conn_send_query: {}", query); + println!("conn_send_query: {query}"); true } @@ -399,13 +399,13 @@ mod tests { ) -> crate::bindings::PGAsyncReadResult { println!("conn_async_read"); let reply = self.next_safekeeper_reply(); - println!("conn_async_read result: {:?}", reply); + println!("conn_async_read result: {reply:?}"); vec.extend_from_slice(reply); crate::bindings::PGAsyncReadResult_PG_ASYNC_READ_SUCCESS } fn conn_blocking_write(&self, _: &mut crate::bindings::Safekeeper, buf: &[u8]) -> bool { - println!("conn_blocking_write: {:?}", buf); + println!("conn_blocking_write: {buf:?}"); self.check_walproposer_msg(buf); true } @@ -456,10 +456,7 @@ mod tests { timeout_millis: i64, ) -> super::WaitResult { let data = self.wait_events.get(); - println!( - "wait_event_set, timeout_millis={}, res={:?}", - timeout_millis, data - ); + println!("wait_event_set, timeout_millis={timeout_millis}, res={data:?}"); super::WaitResult::Network(data.sk, data.event_mask) } @@ -475,7 +472,7 @@ mod tests { } fn log_internal(&self, _wp: &mut crate::bindings::WalProposer, level: Level, msg: &str) { - println!("wp_log[{}] {}", level, msg); + println!("wp_log[{level}] {msg}"); } fn after_election(&self, _wp: &mut crate::bindings::WalProposer) { diff --git a/pageserver/Cargo.toml b/pageserver/Cargo.toml index 9591c729e8..8a2e2ed3be 100644 --- a/pageserver/Cargo.toml +++ b/pageserver/Cargo.toml @@ -12,6 +12,9 @@ testing = ["fail/failpoints", "pageserver_api/testing", "wal_decoder/testing", " fuzz-read-path = ["testing"] +# Enables benchmarking only APIs +benchmarking = [] + [dependencies] anyhow.workspace = true arc-swap.workspace = true @@ -56,6 +59,7 @@ pin-project-lite.workspace = true postgres_backend.workspace = true postgres_connection.workspace = true postgres_ffi.workspace = true +postgres_ffi_types.workspace = true postgres_initdb.workspace = true postgres-protocol.workspace = true postgres-types.workspace = true @@ -126,6 +130,7 @@ harness = false [[bench]] name = "bench_ingest" harness = false +required-features = ["benchmarking"] [[bench]] name = "upload_queue" diff --git a/pageserver/benches/bench_ingest.rs b/pageserver/benches/bench_ingest.rs index eaadfe14ae..438c6e235e 100644 --- a/pageserver/benches/bench_ingest.rs +++ b/pageserver/benches/bench_ingest.rs @@ -1,23 +1,30 @@ use std::env; use std::num::NonZeroUsize; +use std::sync::Arc; use bytes::Bytes; use camino::Utf8PathBuf; use criterion::{Criterion, criterion_group, criterion_main}; +use futures::stream::FuturesUnordered; use pageserver::config::PageServerConf; use pageserver::context::{DownloadBehavior, RequestContext}; +use pageserver::keyspace::KeySpace; use pageserver::l0_flush::{L0FlushConfig, L0FlushGlobalState}; use pageserver::task_mgr::TaskKind; -use pageserver::tenant::storage_layer::InMemoryLayer; +use pageserver::tenant::storage_layer::IoConcurrency; +use pageserver::tenant::storage_layer::{InMemoryLayer, ValuesReconstructState}; use pageserver::{page_cache, virtual_file}; +use pageserver_api::config::GetVectoredConcurrentIo; use pageserver_api::key::Key; use pageserver_api::models::virtual_file::IoMode; use pageserver_api::shard::TenantShardId; -use pageserver_api::value::Value; -use strum::IntoEnumIterator; +use tokio_stream::StreamExt; use tokio_util::sync::CancellationToken; use utils::bin_ser::BeSer; use utils::id::{TenantId, TimelineId}; +use utils::lsn::Lsn; +use utils::sync::gate::Gate; +use wal_decoder::models::value::Value; use wal_decoder::serialized_batch::SerializedValueBatch; // A very cheap hash for generating non-sequential keys. @@ -30,7 +37,7 @@ fn murmurhash32(mut h: u32) -> u32 { h } -#[derive(serde::Serialize, Clone, Copy, Debug)] +#[derive(serde::Serialize, Clone, Copy, Debug, PartialEq)] enum KeyLayout { /// Sequential unique keys Sequential, @@ -40,19 +47,30 @@ enum KeyLayout { RandomReuse(u32), } -#[derive(serde::Serialize, Clone, Copy, Debug)] +#[derive(serde::Serialize, Clone, Copy, Debug, PartialEq)] enum WriteDelta { Yes, No, } +#[derive(serde::Serialize, Clone, Copy, Debug, PartialEq)] +enum ConcurrentReads { + Yes, + No, +} + async fn ingest( conf: &'static PageServerConf, put_size: usize, put_count: usize, key_layout: KeyLayout, write_delta: WriteDelta, + concurrent_reads: ConcurrentReads, ) -> anyhow::Result<()> { + if concurrent_reads == ConcurrentReads::Yes { + assert_eq!(key_layout, KeyLayout::Sequential); + } + let mut lsn = utils::lsn::Lsn(1000); let mut key = Key::from_i128(0x0); @@ -68,16 +86,18 @@ async fn ingest( let gate = utils::sync::gate::Gate::default(); let cancel = CancellationToken::new(); - let layer = InMemoryLayer::create( - conf, - timeline_id, - tenant_shard_id, - lsn, - &gate, - &cancel, - &ctx, - ) - .await?; + let layer = Arc::new( + InMemoryLayer::create( + conf, + timeline_id, + tenant_shard_id, + lsn, + &gate, + &cancel, + &ctx, + ) + .await?, + ); let data = Value::Image(Bytes::from(vec![0u8; put_size])); let data_ser_size = data.serialized_size().unwrap() as usize; @@ -86,6 +106,61 @@ async fn ingest( pageserver::context::DownloadBehavior::Download, ); + const READ_BATCH_SIZE: u32 = 32; + let (tx, mut rx) = tokio::sync::watch::channel::>(None); + let reader_cancel = CancellationToken::new(); + let reader_handle = if concurrent_reads == ConcurrentReads::Yes { + Some(tokio::task::spawn({ + let cancel = reader_cancel.clone(); + let layer = layer.clone(); + let ctx = ctx.attached_child(); + async move { + let gate = Gate::default(); + let gate_guard = gate.enter().unwrap(); + let io_concurrency = IoConcurrency::spawn_from_conf( + GetVectoredConcurrentIo::SidecarTask, + gate_guard, + ); + + rx.wait_for(|key| key.is_some()).await.unwrap(); + + while !cancel.is_cancelled() { + let key = match *rx.borrow() { + Some(some) => some, + None => unreachable!(), + }; + + let mut start_key = key; + start_key.field6 = key.field6.saturating_sub(READ_BATCH_SIZE); + let key_range = start_key..key.next(); + + let mut reconstruct_state = ValuesReconstructState::new(io_concurrency.clone()); + + layer + .get_values_reconstruct_data( + KeySpace::single(key_range), + Lsn(1)..Lsn(u64::MAX), + &mut reconstruct_state, + &ctx, + ) + .await + .unwrap(); + + let mut collect_futs = std::mem::take(&mut reconstruct_state.keys) + .into_values() + .map(|state| state.sink_pending_ios()) + .collect::>(); + while collect_futs.next().await.is_some() {} + } + + drop(io_concurrency); + gate.close().await; + } + })) + } else { + None + }; + const BATCH_SIZE: usize = 16; let mut batch = Vec::new(); @@ -113,19 +188,27 @@ async fn ingest( batch.push((key.to_compact(), lsn, data_ser_size, data.clone())); if batch.len() >= BATCH_SIZE { + let last_key = Key::from_compact(batch.last().unwrap().0); + let this_batch = std::mem::take(&mut batch); let serialized = SerializedValueBatch::from_values(this_batch); layer.put_batch(serialized, &ctx).await?; + + tx.send(Some(last_key)).unwrap(); } } if !batch.is_empty() { + let last_key = Key::from_compact(batch.last().unwrap().0); + let this_batch = std::mem::take(&mut batch); let serialized = SerializedValueBatch::from_values(this_batch); layer.put_batch(serialized, &ctx).await?; + + tx.send(Some(last_key)).unwrap(); } layer.freeze(lsn + 1).await; - if matches!(write_delta, WriteDelta::Yes) { + if write_delta == WriteDelta::Yes { let l0_flush_state = L0FlushGlobalState::new(L0FlushConfig::Direct { max_concurrency: NonZeroUsize::new(1).unwrap(), }); @@ -136,6 +219,11 @@ async fn ingest( tokio::fs::remove_file(path).await?; } + reader_cancel.cancel(); + if let Some(handle) = reader_handle { + handle.await.unwrap(); + } + Ok(()) } @@ -147,6 +235,7 @@ fn ingest_main( put_count: usize, key_layout: KeyLayout, write_delta: WriteDelta, + concurrent_reads: ConcurrentReads, ) { pageserver::virtual_file::set_io_mode(io_mode); @@ -156,7 +245,15 @@ fn ingest_main( .unwrap(); runtime.block_on(async move { - let r = ingest(conf, put_size, put_count, key_layout, write_delta).await; + let r = ingest( + conf, + put_size, + put_count, + key_layout, + write_delta, + concurrent_reads, + ) + .await; if let Err(e) = r { panic!("{e:?}"); } @@ -195,6 +292,7 @@ fn criterion_benchmark(c: &mut Criterion) { key_size: usize, key_layout: KeyLayout, write_delta: WriteDelta, + concurrent_reads: ConcurrentReads, } #[derive(Clone)] struct HandPickedParameters { @@ -245,7 +343,7 @@ fn criterion_benchmark(c: &mut Criterion) { ]; let exploded_parameters = { let mut out = Vec::new(); - for io_mode in IoMode::iter() { + for concurrent_reads in [ConcurrentReads::Yes, ConcurrentReads::No] { for param in expect.clone() { let HandPickedParameters { volume_mib, @@ -253,12 +351,18 @@ fn criterion_benchmark(c: &mut Criterion) { key_layout, write_delta, } = param; + + if key_layout != KeyLayout::Sequential && concurrent_reads == ConcurrentReads::Yes { + continue; + } + out.push(ExplodedParameters { - io_mode, + io_mode: IoMode::DirectRw, volume_mib, key_size, key_layout, write_delta, + concurrent_reads, }); } } @@ -272,9 +376,10 @@ fn criterion_benchmark(c: &mut Criterion) { key_size, key_layout, write_delta, + concurrent_reads, } = self; format!( - "io_mode={io_mode:?} volume_mib={volume_mib:?} key_size_bytes={key_size:?} key_layout={key_layout:?} write_delta={write_delta:?}" + "io_mode={io_mode:?} volume_mib={volume_mib:?} key_size_bytes={key_size:?} key_layout={key_layout:?} write_delta={write_delta:?} concurrent_reads={concurrent_reads:?}" ) } } @@ -287,12 +392,23 @@ fn criterion_benchmark(c: &mut Criterion) { key_size, key_layout, write_delta, + concurrent_reads, } = params; let put_count = volume_mib * 1024 * 1024 / key_size; group.throughput(criterion::Throughput::Bytes((key_size * put_count) as u64)); group.sample_size(10); group.bench_function(id, |b| { - b.iter(|| ingest_main(conf, io_mode, key_size, put_count, key_layout, write_delta)) + b.iter(|| { + ingest_main( + conf, + io_mode, + key_size, + put_count, + key_layout, + write_delta, + concurrent_reads, + ) + }) }); } } diff --git a/pageserver/benches/bench_walredo.rs b/pageserver/benches/bench_walredo.rs index 215682d90c..efb970f705 100644 --- a/pageserver/benches/bench_walredo.rs +++ b/pageserver/benches/bench_walredo.rs @@ -67,12 +67,13 @@ use once_cell::sync::Lazy; use pageserver::config::PageServerConf; use pageserver::walredo::{PostgresRedoManager, RedoAttemptType}; use pageserver_api::key::Key; -use pageserver_api::record::NeonWalRecord; use pageserver_api::shard::TenantShardId; +use postgres_ffi::{BLCKSZ, PgMajorVersion}; use tokio::sync::Barrier; use tokio::task::JoinSet; use utils::id::TenantId; use utils::lsn::Lsn; +use wal_decoder::models::record::NeonWalRecord; fn bench(c: &mut Criterion) { macro_rules! bench_group { @@ -94,7 +95,7 @@ fn bench(c: &mut Criterion) { // // benchmark the protocol implementation // - let pg_version = 14; + let pg_version = PgMajorVersion::PG14; bench_group!( "ping", Arc::new(move |mgr: Arc| async move { @@ -107,7 +108,7 @@ fn bench(c: &mut Criterion) { let make_redo_work = |req: &'static Request| { Arc::new(move |mgr: Arc| async move { let page = req.execute(&mgr).await.unwrap(); - assert_eq!(page.remaining(), 8192); + assert_eq!(page.remaining(), BLCKSZ as usize); }) }; bench_group!("short", { @@ -208,7 +209,7 @@ struct Request { lsn: Lsn, base_img: Option<(Lsn, Bytes)>, records: Vec<(Lsn, NeonWalRecord)>, - pg_version: u32, + pg_version: PgMajorVersion, } impl Request { @@ -267,7 +268,7 @@ impl Request { pg_record(false, b"\xbc\0\0\0\0\0\0\0h?m\x01\0\0\0\0p\n\0\09\x08\xa3\xea\0 \x8c\0\x7f\x06\0\0\xd22\0\0\xeb\x04\0\0\0\0\0\0\xff\x02\0@\0\0another_table\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x98\x08\0\0\x02@\0\0\0\0\0\0\n\0\0\0\x02\0\0\0\0@\0\0\0\0\0\0\x05\0\0\0\0@zD\x05\0\0\0\0\0\0\0\0\0pr\x01\0\0\0\0\0\0\0\0\x01d\0\0\0\0\0\0\x04\0\0\x01\0\0\0\x02\0"), ), ], - pg_version: 14, + pg_version: PgMajorVersion::PG14, } } @@ -516,7 +517,7 @@ impl Request { (lsn!("0/16B8000"), pg_record(false, b"C\0\0\0\0\x04\0\0p\x7fk\x01\0\0\0\0\0\n\0\0\\\xc4:?\0 \x12\0\x7f\x06\0\0\xd22\0\0\0@\0\0\0\0\0\0\xff\x03\x01\0\0\x08\x01\0\0\0\x18\0\xe1\0\0\0\0\0\0\0\xe2\0\0")), (lsn!("0/16CBD68"), pg_record(false, b"@ \0\0\0\0\0\0\xc0|l\x01\0\0\0\0@\t\0\0\xdf\xb0\x1a`\0\x12\0\0\0 \0\0\x04\x7f\x06\0\0\xd22\0\0\0@\0\0\0\0\0\0\x01\x80\0\0\0\0\0\0\xff\x05\0\0\0\0\0\0\0\0\0\0\0\0\x18\0\0 \0 \x04 \0\0\0\0\x01\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x04\0\0\x01")), ], - pg_version: 14, + pg_version: PgMajorVersion::PG14, } } } diff --git a/pageserver/client/Cargo.toml b/pageserver/client/Cargo.toml index 970a437a42..47e2a6ddae 100644 --- a/pageserver/client/Cargo.toml +++ b/pageserver/client/Cargo.toml @@ -18,6 +18,7 @@ workspace_hack = { version = "0.1", path = "../../workspace_hack" } tokio-postgres.workspace = true tokio-stream.workspace = true tokio.workspace = true +postgres_versioninfo.workspace = true futures.workspace = true tokio-util.workspace = true anyhow.workspace = true diff --git a/pageserver/client/src/mgmt_api.rs b/pageserver/client/src/mgmt_api.rs index 219e63c9d4..af4be23b9b 100644 --- a/pageserver/client/src/mgmt_api.rs +++ b/pageserver/client/src/mgmt_api.rs @@ -7,6 +7,7 @@ use detach_ancestor::AncestorDetached; use http_utils::error::HttpErrorBody; use pageserver_api::models::*; use pageserver_api::shard::TenantShardId; +use postgres_versioninfo::PgMajorVersion; pub use reqwest::Body as ReqwestBody; use reqwest::{IntoUrl, Method, StatusCode, Url}; use utils::id::{TenantId, TimelineId}; @@ -508,11 +509,11 @@ impl Client { .expect("Cannot build URL"); path.query_pairs_mut() - .append_pair("recurse", &format!("{}", recurse)); + .append_pair("recurse", &format!("{recurse}")); if let Some(concurrency) = concurrency { path.query_pairs_mut() - .append_pair("concurrency", &format!("{}", concurrency)); + .append_pair("concurrency", &format!("{concurrency}")); } self.request(Method::POST, path, ()).await.map(|_| ()) @@ -745,9 +746,11 @@ impl Client { timeline_id: TimelineId, base_lsn: Lsn, end_lsn: Lsn, - pg_version: u32, + pg_version: PgMajorVersion, basebackup_tarball: ReqwestBody, ) -> Result<()> { + let pg_version = pg_version.major_version_num(); + let uri = format!( "{}/v1/tenant/{tenant_id}/timeline/{timeline_id}/import_basebackup?base_lsn={base_lsn}&end_lsn={end_lsn}&pg_version={pg_version}", self.mgmt_api_endpoint, @@ -841,4 +844,13 @@ impl Client { .await .map_err(Error::ReceiveBody) } + + pub async fn update_feature_flag_spec(&self, spec: String) -> Result<()> { + let uri = format!("{}/v1/feature_flag_spec", self.mgmt_api_endpoint); + self.request(Method::POST, uri, spec) + .await? + .json() + .await + .map_err(Error::ReceiveBody) + } } diff --git a/pageserver/compaction/src/simulator/draw.rs b/pageserver/compaction/src/simulator/draw.rs index 3d35d1b91e..8322fe7d6d 100644 --- a/pageserver/compaction/src/simulator/draw.rs +++ b/pageserver/compaction/src/simulator/draw.rs @@ -152,7 +152,7 @@ pub fn draw_history(history: &[LayerTraceEvent], mut output: let key_diff = key_end - key_start; if key_start >= key_end { - panic!("Invalid key range {}-{}", key_start, key_end); + panic!("Invalid key range {key_start}-{key_end}"); } let lsn_start = lsn_map.map(f.lsn_range.start); @@ -212,12 +212,12 @@ pub fn draw_history(history: &[LayerTraceEvent], mut output: )?; writeln!(svg, "")?; } - Ordering::Greater => panic!("Invalid lsn range {}-{}", lsn_start, lsn_end), + Ordering::Greater => panic!("Invalid lsn range {lsn_start}-{lsn_end}"), } files_seen.insert(f); } - writeln!(svg, "{}", EndSvg)?; + writeln!(svg, "{EndSvg}")?; let mut layer_events_str = String::new(); let mut first = true; diff --git a/pageserver/ctl/src/draw_timeline_dir.rs b/pageserver/ctl/src/draw_timeline_dir.rs index 881ebd49a7..2135d302c1 100644 --- a/pageserver/ctl/src/draw_timeline_dir.rs +++ b/pageserver/ctl/src/draw_timeline_dir.rs @@ -228,7 +228,7 @@ pub fn main() -> Result<()> { let lsn_max = lsn_map.len(); if key_start >= key_end { - panic!("Invalid key range {}-{}", key_start, key_end); + panic!("Invalid key range {key_start}-{key_end}"); } let lsn_start = *lsn_map.get(&lsnr.start).unwrap(); @@ -250,7 +250,7 @@ pub fn main() -> Result<()> { ymargin = 0.05; fill = Fill::Color(rgb(0, 0, 0)); } - Ordering::Greater => panic!("Invalid lsn range {}-{}", lsn_start, lsn_end), + Ordering::Greater => panic!("Invalid lsn range {lsn_start}-{lsn_end}"), } println!( @@ -287,10 +287,10 @@ pub fn main() -> Result<()> { ); } - println!("{}", EndSvg); + println!("{EndSvg}"); - eprintln!("num_images: {}", num_images); - eprintln!("num_deltas: {}", num_deltas); + eprintln!("num_images: {num_images}"); + eprintln!("num_deltas: {num_deltas}"); Ok(()) } diff --git a/pageserver/ctl/src/key.rs b/pageserver/ctl/src/key.rs index 600f7c412e..c4daafdfd0 100644 --- a/pageserver/ctl/src/key.rs +++ b/pageserver/ctl/src/key.rs @@ -372,7 +372,7 @@ impl std::fmt::Debug for RelTagish { f.write_char('/')?; } first = false; - write!(f, "{}", x) + write!(f, "{x}") }) } } diff --git a/pageserver/ctl/src/layer_map_analyzer.rs b/pageserver/ctl/src/layer_map_analyzer.rs index c49c8b58df..ef844fbd0f 100644 --- a/pageserver/ctl/src/layer_map_analyzer.rs +++ b/pageserver/ctl/src/layer_map_analyzer.rs @@ -224,8 +224,7 @@ pub(crate) async fn main(cmd: &AnalyzeLayerMapCmd) -> Result<()> { } } println!( - "Total delta layers {} image layers {} excess layers {}", - total_delta_layers, total_image_layers, total_excess_layers + "Total delta layers {total_delta_layers} image layers {total_image_layers} excess layers {total_excess_layers}" ); Ok(()) } diff --git a/pageserver/page_api/Cargo.toml b/pageserver/page_api/Cargo.toml index 8b13b9e1db..c5283c2b09 100644 --- a/pageserver/page_api/Cargo.toml +++ b/pageserver/page_api/Cargo.toml @@ -11,6 +11,8 @@ futures.workspace = true pageserver_api.workspace = true postgres_ffi.workspace = true prost.workspace = true +strum.workspace = true +strum_macros.workspace = true thiserror.workspace = true tokio.workspace = true tonic.workspace = true diff --git a/pageserver/page_api/proto/page_service.proto b/pageserver/page_api/proto/page_service.proto index 81953a710f..d06b2cfca5 100644 --- a/pageserver/page_api/proto/page_service.proto +++ b/pageserver/page_api/proto/page_service.proto @@ -110,6 +110,19 @@ message GetBaseBackupRequest { bool replica = 2; // If true, include relation files in the base backup. Mainly for debugging and tests. bool full = 3; + // Compression algorithm to use. Base backups send a compressed payload instead of using gRPC + // compression, so that we can cache compressed backups on the server. + BaseBackupCompression compression = 4; +} + +// Base backup compression algorithms. +enum BaseBackupCompression { + // Unknown algorithm. Used when clients send an unsupported algorithm. + BASE_BACKUP_COMPRESSION_UNKNOWN = 0; + // No compression. + BASE_BACKUP_COMPRESSION_NONE = 1; + // GZIP compression. + BASE_BACKUP_COMPRESSION_GZIP = 2; } // Base backup response chunk, returned as an ordered stream. diff --git a/pageserver/page_api/src/client.rs b/pageserver/page_api/src/client.rs index 057a1d4ad6..71d539ab91 100644 --- a/pageserver/page_api/src/client.rs +++ b/pageserver/page_api/src/client.rs @@ -83,6 +83,7 @@ impl Client { timeline_id: TimelineId, shard_id: ShardIndex, auth_header: Option, + compression: Option, ) -> anyhow::Result { let endpoint: tonic::transport::Endpoint = into_endpoint .try_into() @@ -90,7 +91,14 @@ impl Client { let channel = endpoint.connect().await?; let auth = AuthInterceptor::new(tenant_id, timeline_id, auth_header, shard_id) .map_err(|e| anyhow::anyhow!(e.to_string()))?; - let client = proto::PageServiceClient::with_interceptor(channel, auth); + let mut client = proto::PageServiceClient::with_interceptor(channel, auth); + + if let Some(compression) = compression { + // TODO: benchmark this (including network latency). + client = client + .accept_compressed(compression) + .send_compressed(compression); + } Ok(Self { client }) } @@ -112,7 +120,7 @@ impl Client { pub async fn get_base_backup( &mut self, req: model::GetBaseBackupRequest, - ) -> Result>, tonic::Status> { + ) -> Result> + 'static, tonic::Status> { let proto_req = proto::GetBaseBackupRequest::from(req); let response_stream: Streaming = @@ -122,7 +130,7 @@ impl Client { let domain_stream = response_stream.map(|chunk_res| { chunk_res.and_then(|proto_chunk| { proto_chunk.try_into().map_err(|e| { - tonic::Status::internal(format!("Failed to convert response chunk: {}", e)) + tonic::Status::internal(format!("Failed to convert response chunk: {e}")) }) }) }); diff --git a/pageserver/page_api/src/model.rs b/pageserver/page_api/src/model.rs index ef7f89473f..1ca89b4870 100644 --- a/pageserver/page_api/src/model.rs +++ b/pageserver/page_api/src/model.rs @@ -191,15 +191,21 @@ pub struct GetBaseBackupRequest { pub replica: bool, /// If true, include relation files in the base backup. Mainly for debugging and tests. pub full: bool, + /// Compression algorithm to use. Base backups send a compressed payload instead of using gRPC + /// compression, so that we can cache compressed backups on the server. + pub compression: BaseBackupCompression, } -impl From for GetBaseBackupRequest { - fn from(pb: proto::GetBaseBackupRequest) -> Self { - Self { +impl TryFrom for GetBaseBackupRequest { + type Error = ProtocolError; + + fn try_from(pb: proto::GetBaseBackupRequest) -> Result { + Ok(Self { lsn: (pb.lsn != 0).then_some(Lsn(pb.lsn)), replica: pb.replica, full: pb.full, - } + compression: pb.compression.try_into()?, + }) } } @@ -209,10 +215,55 @@ impl From for proto::GetBaseBackupRequest { lsn: request.lsn.unwrap_or_default().0, replica: request.replica, full: request.full, + compression: request.compression.into(), } } } +/// Base backup compression algorithm. +#[derive(Clone, Copy, Debug)] +pub enum BaseBackupCompression { + None, + Gzip, +} + +impl TryFrom for BaseBackupCompression { + type Error = ProtocolError; + + fn try_from(pb: proto::BaseBackupCompression) -> Result { + match pb { + proto::BaseBackupCompression::Unknown => Err(ProtocolError::invalid("compression", pb)), + proto::BaseBackupCompression::None => Ok(Self::None), + proto::BaseBackupCompression::Gzip => Ok(Self::Gzip), + } + } +} + +impl TryFrom for BaseBackupCompression { + type Error = ProtocolError; + + fn try_from(compression: i32) -> Result { + proto::BaseBackupCompression::try_from(compression) + .map_err(|_| ProtocolError::invalid("compression", compression)) + .and_then(Self::try_from) + } +} + +impl From for proto::BaseBackupCompression { + fn from(compression: BaseBackupCompression) -> Self { + match compression { + BaseBackupCompression::None => Self::None, + BaseBackupCompression::Gzip => Self::Gzip, + } + } +} + +impl From for i32 { + fn from(compression: BaseBackupCompression) -> Self { + proto::BaseBackupCompression::from(compression).into() + } +} + pub type GetBaseBackupResponseChunk = Bytes; impl TryFrom for GetBaseBackupResponseChunk { @@ -459,7 +510,7 @@ impl GetPageResponse { /// These are effectively equivalent to gRPC statuses. However, we use a bidirectional stream /// (potentially shared by many backends), and a gRPC status response would terminate the stream so /// we send GetPageResponse messages with these codes instead. -#[derive(Clone, Copy, Debug)] +#[derive(Clone, Copy, Debug, PartialEq, strum_macros::Display)] pub enum GetPageStatusCode { /// Unknown status. For forwards compatibility: used when an older client version receives a new /// status code from a newer server version. diff --git a/pageserver/pagebench/Cargo.toml b/pageserver/pagebench/Cargo.toml index 5e4af88e69..f5dfc0db25 100644 --- a/pageserver/pagebench/Cargo.toml +++ b/pageserver/pagebench/Cargo.toml @@ -25,6 +25,7 @@ tokio.workspace = true tokio-stream.workspace = true tokio-util.workspace = true tonic.workspace = true +url.workspace = true pageserver_client.workspace = true pageserver_api.workspace = true diff --git a/pageserver/pagebench/src/cmd/aux_files.rs b/pageserver/pagebench/src/cmd/aux_files.rs index 6441c047c2..43d7a73399 100644 --- a/pageserver/pagebench/src/cmd/aux_files.rs +++ b/pageserver/pagebench/src/cmd/aux_files.rs @@ -62,7 +62,7 @@ async fn main_impl(args: Args) -> anyhow::Result<()> { let tenant_shard_id = TenantShardId::unsharded(timeline.tenant_id); let timeline_id = timeline.timeline_id; - println!("operating on timeline {}", timeline); + println!("operating on timeline {timeline}"); mgmt_api_client .set_tenant_config(&TenantConfigRequest { @@ -75,8 +75,8 @@ async fn main_impl(args: Args) -> anyhow::Result<()> { let items = (0..100) .map(|id| { ( - format!("pg_logical/mappings/{:03}.{:03}", batch, id), - format!("{:08}", id), + format!("pg_logical/mappings/{batch:03}.{id:03}"), + format!("{id:08}"), ) }) .collect::>(); diff --git a/pageserver/pagebench/src/cmd/basebackup.rs b/pageserver/pagebench/src/cmd/basebackup.rs index 43ad92980c..4111d09f92 100644 --- a/pageserver/pagebench/src/cmd/basebackup.rs +++ b/pageserver/pagebench/src/cmd/basebackup.rs @@ -1,20 +1,29 @@ use std::collections::HashMap; use std::num::NonZeroUsize; use std::ops::Range; -use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering}; +use std::pin::Pin; +use std::sync::atomic::{AtomicU64, Ordering}; use std::sync::{Arc, Mutex}; use std::time::Instant; -use anyhow::Context; +use anyhow::anyhow; +use futures::TryStreamExt as _; use pageserver_api::shard::TenantShardId; use pageserver_client::mgmt_api::ForceAwaitLogicalSize; use pageserver_client::page_service::BasebackupRequest; +use pageserver_page_api as page_api; use rand::prelude::*; +use tokio::io::AsyncRead; use tokio::sync::Barrier; use tokio::task::JoinSet; +use tokio_util::compat::{TokioAsyncReadCompatExt as _, TokioAsyncWriteCompatExt as _}; +use tokio_util::io::StreamReader; +use tonic::async_trait; use tracing::{info, instrument}; +use url::Url; use utils::id::TenantTimelineId; use utils::lsn::Lsn; +use utils::shard::ShardIndex; use crate::util::tokio_thread_local_stats::AllThreadLocalStats; use crate::util::{request_stats, tokio_thread_local_stats}; @@ -24,14 +33,15 @@ use crate::util::{request_stats, tokio_thread_local_stats}; pub(crate) struct Args { #[clap(long, default_value = "http://localhost:9898")] mgmt_api_endpoint: String, - #[clap(long, default_value = "postgres://postgres@localhost:64000")] + /// The Pageserver to connect to. Use postgresql:// for libpq, or grpc:// for gRPC. + #[clap(long, default_value = "postgresql://postgres@localhost:64000")] page_service_connstring: String, #[clap(long)] pageserver_jwt: Option, #[clap(long, default_value = "1")] num_clients: NonZeroUsize, - #[clap(long, default_value = "1.0")] - gzip_probability: f64, + #[clap(long)] + no_compression: bool, #[clap(long)] runtime: Option, #[clap(long)] @@ -146,12 +156,27 @@ async fn main_impl( let mut work_senders = HashMap::new(); let mut tasks = Vec::new(); - for tl in &timelines { + let scheme = match Url::parse(&args.page_service_connstring) { + Ok(url) => url.scheme().to_lowercase().to_string(), + Err(url::ParseError::RelativeUrlWithoutBase) => "postgresql".to_string(), + Err(err) => return Err(anyhow!("invalid connstring: {err}")), + }; + for &tl in &timelines { let (sender, receiver) = tokio::sync::mpsc::channel(1); // TODO: not sure what the implications of this are work_senders.insert(tl, sender); - tasks.push(tokio::spawn(client( - args, - *tl, + + let client: Box = match scheme.as_str() { + "postgresql" | "postgres" => Box::new( + LibpqClient::new(&args.page_service_connstring, tl, !args.no_compression).await?, + ), + "grpc" => Box::new( + GrpcClient::new(&args.page_service_connstring, tl, !args.no_compression).await?, + ), + scheme => return Err(anyhow!("invalid scheme {scheme}")), + }; + + tasks.push(tokio::spawn(run_worker( + client, Arc::clone(&start_work_barrier), receiver, Arc::clone(&all_work_done_barrier), @@ -166,13 +191,7 @@ async fn main_impl( let mut rng = rand::thread_rng(); let target = all_targets.choose(&mut rng).unwrap(); let lsn = target.lsn_range.clone().map(|r| rng.gen_range(r)); - ( - target.timeline, - Work { - lsn, - gzip: rng.gen_bool(args.gzip_probability), - }, - ) + (target.timeline, Work { lsn }) }; let sender = work_senders.get(&timeline).unwrap(); // TODO: what if this blocks? @@ -216,13 +235,11 @@ async fn main_impl( #[derive(Copy, Clone)] struct Work { lsn: Option, - gzip: bool, } #[instrument(skip_all)] -async fn client( - args: &'static Args, - timeline: TenantTimelineId, +async fn run_worker( + mut client: Box, start_work_barrier: Arc, mut work: tokio::sync::mpsc::Receiver, all_work_done_barrier: Arc, @@ -230,37 +247,14 @@ async fn client( ) { start_work_barrier.wait().await; - let client = pageserver_client::page_service::Client::new(args.page_service_connstring.clone()) - .await - .unwrap(); - - while let Some(Work { lsn, gzip }) = work.recv().await { + while let Some(Work { lsn }) = work.recv().await { let start = Instant::now(); - let copy_out_stream = client - .basebackup(&BasebackupRequest { - tenant_id: timeline.tenant_id, - timeline_id: timeline.timeline_id, - lsn, - gzip, - }) - .await - .with_context(|| format!("start basebackup for {timeline}")) - .unwrap(); + let stream = client.basebackup(lsn).await.unwrap(); - use futures::StreamExt; - let size = Arc::new(AtomicUsize::new(0)); - copy_out_stream - .for_each({ - |r| { - let size = Arc::clone(&size); - async move { - let size = Arc::clone(&size); - size.fetch_add(r.unwrap().len(), Ordering::Relaxed); - } - } - }) - .await; - info!("basebackup size is {} bytes", size.load(Ordering::Relaxed)); + let size = futures::io::copy(stream.compat(), &mut tokio::io::sink().compat_write()) + .await + .unwrap(); + info!("basebackup size is {size} bytes"); let elapsed = start.elapsed(); live_stats.inc(); STATS.with(|stats| { @@ -270,3 +264,100 @@ async fn client( all_work_done_barrier.wait().await; } + +/// A basebackup client. This allows switching out the client protocol implementation. +#[async_trait] +trait Client: Send { + async fn basebackup( + &mut self, + lsn: Option, + ) -> anyhow::Result>>; +} + +/// A libpq-based Pageserver client. +struct LibpqClient { + inner: pageserver_client::page_service::Client, + ttid: TenantTimelineId, + compression: bool, +} + +impl LibpqClient { + async fn new( + connstring: &str, + ttid: TenantTimelineId, + compression: bool, + ) -> anyhow::Result { + Ok(Self { + inner: pageserver_client::page_service::Client::new(connstring.to_string()).await?, + ttid, + compression, + }) + } +} + +#[async_trait] +impl Client for LibpqClient { + async fn basebackup( + &mut self, + lsn: Option, + ) -> anyhow::Result>> { + let req = BasebackupRequest { + tenant_id: self.ttid.tenant_id, + timeline_id: self.ttid.timeline_id, + lsn, + gzip: self.compression, + }; + let stream = self.inner.basebackup(&req).await?; + Ok(Box::pin(StreamReader::new( + stream.map_err(std::io::Error::other), + ))) + } +} + +/// A gRPC Pageserver client. +struct GrpcClient { + inner: page_api::Client, + compression: page_api::BaseBackupCompression, +} + +impl GrpcClient { + async fn new( + connstring: &str, + ttid: TenantTimelineId, + compression: bool, + ) -> anyhow::Result { + let inner = page_api::Client::new( + connstring.to_string(), + ttid.tenant_id, + ttid.timeline_id, + ShardIndex::unsharded(), + None, + None, // NB: uses payload compression + ) + .await?; + let compression = match compression { + true => page_api::BaseBackupCompression::Gzip, + false => page_api::BaseBackupCompression::None, + }; + Ok(Self { inner, compression }) + } +} + +#[async_trait] +impl Client for GrpcClient { + async fn basebackup( + &mut self, + lsn: Option, + ) -> anyhow::Result>> { + let req = page_api::GetBaseBackupRequest { + lsn, + replica: false, + full: false, + compression: self.compression, + }; + let stream = self.inner.get_base_backup(req).await?; + Ok(Box::pin(StreamReader::new( + stream.map_err(std::io::Error::other), + ))) + } +} diff --git a/pageserver/pagebench/src/cmd/getpage_latest_lsn.rs b/pageserver/pagebench/src/cmd/getpage_latest_lsn.rs index 3a68a77279..a297819e9b 100644 --- a/pageserver/pagebench/src/cmd/getpage_latest_lsn.rs +++ b/pageserver/pagebench/src/cmd/getpage_latest_lsn.rs @@ -10,33 +10,31 @@ use anyhow::Context; use async_trait::async_trait; use bytes::Bytes; use camino::Utf8PathBuf; +use futures::{Stream, StreamExt as _}; use pageserver_api::key::Key; use pageserver_api::keyspace::KeySpaceAccum; use pageserver_api::pagestream_api::{PagestreamGetPageRequest, PagestreamRequest}; use pageserver_api::reltag::RelTag; use pageserver_api::shard::TenantShardId; -use pageserver_page_api::proto; +use pageserver_page_api as page_api; use rand::prelude::*; use tokio::task::JoinSet; use tokio_util::sync::CancellationToken; use tracing::info; +use url::Url; use utils::id::TenantTimelineId; use utils::lsn::Lsn; +use utils::shard::ShardIndex; use crate::util::tokio_thread_local_stats::AllThreadLocalStats; use crate::util::{request_stats, tokio_thread_local_stats}; -#[derive(clap::ValueEnum, Clone, Debug)] -enum Protocol { - Libpq, - Grpc, -} - /// GetPage@LatestLSN, uniformly distributed across the compute-accessible keyspace. #[derive(clap::Parser)] pub(crate) struct Args { #[clap(long, default_value = "http://localhost:9898")] mgmt_api_endpoint: String, + /// Pageserver connection string. Supports postgresql:// and grpc:// protocols. #[clap(long, default_value = "postgres://postgres@localhost:64000")] page_service_connstring: String, #[clap(long)] @@ -45,8 +43,9 @@ pub(crate) struct Args { num_clients: NonZeroUsize, #[clap(long)] runtime: Option, - #[clap(long, value_enum, default_value = "libpq")] - protocol: Protocol, + /// If true, enable compression (only for gRPC). + #[clap(long)] + compression: bool, /// Each client sends requests at the given rate. /// /// If a request takes too long and we should be issuing a new request already, @@ -325,18 +324,32 @@ async fn main_impl( .unwrap(); Box::pin(async move { - let client: Box = match args.protocol { - Protocol::Libpq => Box::new( - LibpqClient::new(args.page_service_connstring.clone(), worker_id.timeline) - .await - .unwrap(), + let scheme = match Url::parse(&args.page_service_connstring) { + Ok(url) => url.scheme().to_lowercase().to_string(), + Err(url::ParseError::RelativeUrlWithoutBase) => "postgresql".to_string(), + Err(err) => panic!("invalid connstring: {err}"), + }; + let client: Box = match scheme.as_str() { + "postgresql" | "postgres" => { + assert!(!args.compression, "libpq does not support compression"); + Box::new( + LibpqClient::new(&args.page_service_connstring, worker_id.timeline) + .await + .unwrap(), + ) + } + + "grpc" => Box::new( + GrpcClient::new( + &args.page_service_connstring, + worker_id.timeline, + args.compression, + ) + .await + .unwrap(), ), - Protocol::Grpc => Box::new( - GrpcClient::new(args.page_service_connstring.clone(), worker_id.timeline) - .await - .unwrap(), - ), + scheme => panic!("unsupported scheme {scheme}"), }; run_worker(args, client, ss, cancel, rps_period, ranges, weights).await }) @@ -543,8 +556,8 @@ struct LibpqClient { } impl LibpqClient { - async fn new(connstring: String, ttid: TenantTimelineId) -> anyhow::Result { - let inner = pageserver_client::page_service::Client::new(connstring) + async fn new(connstring: &str, ttid: TenantTimelineId) -> anyhow::Result { + let inner = pageserver_client::page_service::Client::new(connstring.to_string()) .await? .pagestream(ttid.tenant_id, ttid.timeline_id) .await?; @@ -600,34 +613,36 @@ impl Client for LibpqClient { } } -/// A gRPC client using the raw, no-frills gRPC client. +/// A gRPC Pageserver client. struct GrpcClient { - req_tx: tokio::sync::mpsc::Sender, - resp_rx: tonic::Streaming, + req_tx: tokio::sync::mpsc::Sender, + resp_rx: Pin> + Send>>, } impl GrpcClient { - async fn new(connstring: String, ttid: TenantTimelineId) -> anyhow::Result { - let mut client = pageserver_page_api::proto::PageServiceClient::connect(connstring).await?; + async fn new( + connstring: &str, + ttid: TenantTimelineId, + compression: bool, + ) -> anyhow::Result { + let mut client = page_api::Client::new( + connstring.to_string(), + ttid.tenant_id, + ttid.timeline_id, + ShardIndex::unsharded(), + None, + compression.then_some(tonic::codec::CompressionEncoding::Zstd), + ) + .await?; // The channel has a buffer size of 1, since 0 is not allowed. It does not matter, since the // benchmark will control the queue depth (i.e. in-flight requests) anyway, and requests are // buffered by Tonic and the OS too. let (req_tx, req_rx) = tokio::sync::mpsc::channel(1); let req_stream = tokio_stream::wrappers::ReceiverStream::new(req_rx); - let mut req = tonic::Request::new(req_stream); - let metadata = req.metadata_mut(); - metadata.insert("neon-tenant-id", ttid.tenant_id.to_string().try_into()?); - metadata.insert("neon-timeline-id", ttid.timeline_id.to_string().try_into()?); - metadata.insert("neon-shard-id", "0000".try_into()?); + let resp_rx = Box::pin(client.get_pages(req_stream).await?); - let resp = client.get_pages(req).await?; - let resp_stream = resp.into_inner(); - - Ok(Self { - req_tx, - resp_rx: resp_stream, - }) + Ok(Self { req_tx, resp_rx }) } } @@ -641,27 +656,27 @@ impl Client for GrpcClient { rel: RelTag, blks: Vec, ) -> anyhow::Result<()> { - let req = proto::GetPageRequest { + let req = page_api::GetPageRequest { request_id: req_id, - request_class: proto::GetPageClass::Normal as i32, - read_lsn: Some(proto::ReadLsn { - request_lsn: req_lsn.0, - not_modified_since_lsn: mod_lsn.0, - }), - rel: Some(rel.into()), - block_number: blks, + request_class: page_api::GetPageClass::Normal, + read_lsn: page_api::ReadLsn { + request_lsn: req_lsn, + not_modified_since_lsn: Some(mod_lsn), + }, + rel, + block_numbers: blks, }; self.req_tx.send(req).await?; Ok(()) } async fn recv_get_page(&mut self) -> anyhow::Result<(u64, Vec)> { - let resp = self.resp_rx.message().await?.unwrap(); + let resp = self.resp_rx.next().await.unwrap().unwrap(); anyhow::ensure!( - resp.status_code == proto::GetPageStatusCode::Ok as i32, + resp.status_code == page_api::GetPageStatusCode::Ok, "unexpected status code: {}", - resp.status_code + resp.status_code, ); - Ok((resp.request_id, resp.page_image)) + Ok((resp.request_id, resp.page_images)) } } diff --git a/pageserver/src/basebackup.rs b/pageserver/src/basebackup.rs index 2a0548b811..36dada1e89 100644 --- a/pageserver/src/basebackup.rs +++ b/pageserver/src/basebackup.rs @@ -14,19 +14,19 @@ use std::fmt::Write as FmtWrite; use std::time::{Instant, SystemTime}; use anyhow::{Context, anyhow}; +use async_compression::tokio::write::GzipEncoder; use bytes::{BufMut, Bytes, BytesMut}; use fail::fail_point; use pageserver_api::key::{Key, rel_block_to_key}; use pageserver_api::reltag::{RelTag, SlruKind}; -use postgres_ffi::pg_constants::{ - DEFAULTTABLESPACE_OID, GLOBALTABLESPACE_OID, PG_HBA, PGDATA_SPECIAL_FILES, -}; -use postgres_ffi::relfile_utils::{INIT_FORKNUM, MAIN_FORKNUM}; +use postgres_ffi::pg_constants::{PG_HBA, PGDATA_SPECIAL_FILES}; use postgres_ffi::{ - BLCKSZ, PG_TLI, RELSEG_SIZE, WAL_SEGMENT_SIZE, XLogFileName, dispatch_pgversion, pg_constants, + BLCKSZ, PG_TLI, PgMajorVersion, RELSEG_SIZE, WAL_SEGMENT_SIZE, XLogFileName, + dispatch_pgversion, pg_constants, }; -use tokio::io; -use tokio::io::AsyncWrite; +use postgres_ffi_types::constants::{DEFAULTTABLESPACE_OID, GLOBALTABLESPACE_OID}; +use postgres_ffi_types::forknum::{INIT_FORKNUM, MAIN_FORKNUM}; +use tokio::io::{self, AsyncWrite, AsyncWriteExt as _}; use tokio_tar::{Builder, EntryType, Header}; use tracing::*; use utils::lsn::Lsn; @@ -97,6 +97,7 @@ impl From for tonic::Status { /// * When working without safekeepers. In this situation it is important to match the lsn /// we are taking basebackup on with the lsn that is used in pageserver's walreceiver /// to start the replication. +#[allow(clippy::too_many_arguments)] pub async fn send_basebackup_tarball<'a, W>( write: &'a mut W, timeline: &'a Timeline, @@ -104,6 +105,7 @@ pub async fn send_basebackup_tarball<'a, W>( prev_lsn: Option, full_backup: bool, replica: bool, + gzip_level: Option, ctx: &'a RequestContext, ) -> Result<(), BasebackupError> where @@ -122,7 +124,7 @@ where // prev_lsn value; that happens if the timeline was just branched from // an old LSN and it doesn't have any WAL of its own yet. We will set // prev_lsn to Lsn(0) if we cannot provide the correct value. - let (backup_prev, backup_lsn) = if let Some(req_lsn) = req_lsn { + let (backup_prev, lsn) = if let Some(req_lsn) = req_lsn { // Backup was requested at a particular LSN. The caller should've // already checked that it's a valid LSN. @@ -143,7 +145,7 @@ where }; // Consolidate the derived and the provided prev_lsn values - let prev_lsn = if let Some(provided_prev_lsn) = prev_lsn { + let prev_record_lsn = if let Some(provided_prev_lsn) = prev_lsn { if backup_prev != Lsn(0) && backup_prev != provided_prev_lsn { return Err(BasebackupError::Server(anyhow!( "backup_prev {backup_prev} != provided_prev_lsn {provided_prev_lsn}" @@ -155,30 +157,55 @@ where }; info!( - "taking basebackup lsn={}, prev_lsn={} (full_backup={}, replica={})", - backup_lsn, prev_lsn, full_backup, replica + "taking basebackup lsn={lsn}, prev_lsn={prev_record_lsn} \ + (full_backup={full_backup}, replica={replica}, gzip={gzip_level:?})", + ); + let span = info_span!("send_tarball", backup_lsn=%lsn); + + let io_concurrency = IoConcurrency::spawn_from_conf( + timeline.conf.get_vectored_concurrent_io, + timeline + .gate + .enter() + .map_err(|_| BasebackupError::Shutdown)?, ); - let basebackup = Basebackup { - ar: Builder::new_non_terminated(write), - timeline, - lsn: backup_lsn, - prev_record_lsn: prev_lsn, - full_backup, - replica, - ctx, - io_concurrency: IoConcurrency::spawn_from_conf( - timeline.conf.get_vectored_concurrent_io, - timeline - .gate - .enter() - .map_err(|_| BasebackupError::Shutdown)?, - ), - }; - basebackup + if let Some(gzip_level) = gzip_level { + let mut encoder = GzipEncoder::with_quality(write, gzip_level); + Basebackup { + ar: Builder::new_non_terminated(&mut encoder), + timeline, + lsn, + prev_record_lsn, + full_backup, + replica, + ctx, + io_concurrency, + } .send_tarball() - .instrument(info_span!("send_tarball", backup_lsn=%backup_lsn)) - .await + .instrument(span) + .await?; + encoder + .shutdown() + .await + .map_err(|err| BasebackupError::Client(err, "gzip"))?; + } else { + Basebackup { + ar: Builder::new_non_terminated(write), + timeline, + lsn, + prev_record_lsn, + full_backup, + replica, + ctx, + io_concurrency, + } + .send_tarball() + .instrument(span) + .await?; + } + + Ok(()) } /// This is short-living object only for the time of tarball creation, @@ -372,6 +399,7 @@ where .partition( self.timeline.get_shard_identity(), self.timeline.conf.max_get_vectored_keys.get() as u64 * BLCKSZ as u64, + BLCKSZ as u64, ); let mut slru_builder = SlruSegmentsBuilder::new(&mut self.ar); @@ -619,10 +647,7 @@ where }; if spcnode == GLOBALTABLESPACE_OID { - let pg_version_str = match self.timeline.pg_version { - 14 | 15 => self.timeline.pg_version.to_string(), - ver => format!("{ver}\x0A"), - }; + let pg_version_str = self.timeline.pg_version.versionfile_string(); let header = new_tar_header("PG_VERSION", pg_version_str.len() as u64)?; self.ar .append(&header, pg_version_str.as_bytes()) @@ -669,7 +694,7 @@ where } // Append dir path for each database - let path = format!("base/{}", dbnode); + let path = format!("base/{dbnode}"); let header = new_tar_header_dir(&path)?; self.ar .append(&header, io::empty()) @@ -677,19 +702,16 @@ where .map_err(|e| BasebackupError::Client(e, "add_dbdir,base"))?; if let Some(img) = relmap_img { - let dst_path = format!("base/{}/PG_VERSION", dbnode); + let dst_path = format!("base/{dbnode}/PG_VERSION"); - let pg_version_str = match self.timeline.pg_version { - 14 | 15 => self.timeline.pg_version.to_string(), - ver => format!("{ver}\x0A"), - }; + let pg_version_str = self.timeline.pg_version.versionfile_string(); let header = new_tar_header(&dst_path, pg_version_str.len() as u64)?; self.ar .append(&header, pg_version_str.as_bytes()) .await .map_err(|e| BasebackupError::Client(e, "add_dbdir,base/PG_VERSION"))?; - let relmap_path = format!("base/{}/pg_filenode.map", dbnode); + let relmap_path = format!("base/{dbnode}/pg_filenode.map"); let header = new_tar_header(&relmap_path, img.len() as u64)?; self.ar .append(&header, &img[..]) @@ -713,10 +735,10 @@ where buf.extend_from_slice(&img[..]); let crc = crc32c::crc32c(&img[..]); buf.put_u32_le(crc); - let path = if self.timeline.pg_version < 17 { - format!("pg_twophase/{:>08X}", xid) + let path = if self.timeline.pg_version < PgMajorVersion::PG17 { + format!("pg_twophase/{xid:>08X}") } else { - format!("pg_twophase/{:>016X}", xid) + format!("pg_twophase/{xid:>016X}") }; let header = new_tar_header(&path, buf.len() as u64)?; self.ar @@ -768,7 +790,7 @@ where //send wal segment let segno = self.lsn.segment_number(WAL_SEGMENT_SIZE); let wal_file_name = XLogFileName(PG_TLI, segno, WAL_SEGMENT_SIZE); - let wal_file_path = format!("pg_wal/{}", wal_file_name); + let wal_file_path = format!("pg_wal/{wal_file_name}"); let header = new_tar_header(&wal_file_path, WAL_SEGMENT_SIZE as u64)?; let wal_seg = postgres_ffi::generate_wal_segment( diff --git a/pageserver/src/basebackup_cache.rs b/pageserver/src/basebackup_cache.rs index 24f6413380..69438dae7f 100644 --- a/pageserver/src/basebackup_cache.rs +++ b/pageserver/src/basebackup_cache.rs @@ -1,7 +1,6 @@ use std::{collections::HashMap, sync::Arc}; use anyhow::Context; -use async_compression::tokio::write::GzipEncoder; use camino::{Utf8Path, Utf8PathBuf}; use metrics::core::{AtomicU64, GenericCounter}; use pageserver_api::{config::BasebackupCacheConfig, models::TenantState}; @@ -594,13 +593,6 @@ impl BackgroundTask { let file = tokio::fs::File::create(entry_tmp_path).await?; let mut writer = BufWriter::new(file); - let mut encoder = GzipEncoder::with_quality( - &mut writer, - // Level::Best because compression is not on the hot path of basebackup requests. - // The decompression is almost not affected by the compression level. - async_compression::Level::Best, - ); - // We may receive a request before the WAL record is applied to the timeline. // Wait for the requested LSN to be applied. timeline @@ -613,17 +605,19 @@ impl BackgroundTask { .await?; send_basebackup_tarball( - &mut encoder, + &mut writer, timeline, Some(req_lsn), None, false, false, + // Level::Best because compression is not on the hot path of basebackup requests. + // The decompression is almost not affected by the compression level. + Some(async_compression::Level::Best), &ctx, ) .await?; - encoder.shutdown().await?; writer.flush().await?; writer.into_inner().sync_all().await?; diff --git a/pageserver/src/bin/pageserver.rs b/pageserver/src/bin/pageserver.rs index 417503089a..d137d651eb 100644 --- a/pageserver/src/bin/pageserver.rs +++ b/pageserver/src/bin/pageserver.rs @@ -583,7 +583,7 @@ fn start_pageserver( deletion_queue_client, l0_flush_global_state, basebackup_prepare_sender, - feature_resolver, + feature_resolver: feature_resolver.clone(), }, shutdown_pageserver.clone(), ); @@ -715,6 +715,7 @@ fn start_pageserver( disk_usage_eviction_state, deletion_queue.new_client(), secondary_controller, + feature_resolver, ) .context("Failed to initialize router state")?, ); diff --git a/pageserver/src/config.rs b/pageserver/src/config.rs index 3492a8d966..5b51a9617b 100644 --- a/pageserver/src/config.rs +++ b/pageserver/src/config.rs @@ -11,7 +11,7 @@ use std::num::NonZeroUsize; use std::sync::Arc; use std::time::Duration; -use anyhow::{Context, bail, ensure}; +use anyhow::{Context, ensure}; use camino::{Utf8Path, Utf8PathBuf}; use once_cell::sync::OnceCell; use pageserver_api::config::{ @@ -22,6 +22,7 @@ use pageserver_api::models::ImageCompressionAlgorithm; use pageserver_api::shard::TenantShardId; use pem::Pem; use postgres_backend::AuthType; +use postgres_ffi::PgMajorVersion; use remote_storage::{RemotePath, RemoteStorageConfig}; use reqwest::Url; use storage_broker::Uri; @@ -338,20 +339,16 @@ impl PageServerConf { // // Postgres distribution paths // - pub fn pg_distrib_dir(&self, pg_version: u32) -> anyhow::Result { + pub fn pg_distrib_dir(&self, pg_version: PgMajorVersion) -> anyhow::Result { let path = self.pg_distrib_dir.clone(); - #[allow(clippy::manual_range_patterns)] - match pg_version { - 14 | 15 | 16 | 17 => Ok(path.join(format!("v{pg_version}"))), - _ => bail!("Unsupported postgres version: {}", pg_version), - } + Ok(path.join(pg_version.v_str())) } - pub fn pg_bin_dir(&self, pg_version: u32) -> anyhow::Result { + pub fn pg_bin_dir(&self, pg_version: PgMajorVersion) -> anyhow::Result { Ok(self.pg_distrib_dir(pg_version)?.join("bin")) } - pub fn pg_lib_dir(&self, pg_version: u32) -> anyhow::Result { + pub fn pg_lib_dir(&self, pg_version: PgMajorVersion) -> anyhow::Result { Ok(self.pg_distrib_dir(pg_version)?.join("lib")) } @@ -765,4 +762,23 @@ mod tests { let result = PageServerConf::parse_and_validate(NodeId(0), config_toml, &workdir); assert_eq!(result.is_ok(), is_valid); } + + #[test] + fn test_config_posthog_config_is_valid() { + let input = r#" + control_plane_api = "http://localhost:6666" + + [posthog_config] + server_api_key = "phs_AAA" + client_api_key = "phc_BBB" + project_id = "000" + private_api_url = "https://us.posthog.com" + public_api_url = "https://us.i.posthog.com" + "#; + let config_toml = toml_edit::de::from_str::(input) + .expect("posthogconfig is valid"); + let workdir = Utf8PathBuf::from("/nonexistent"); + PageServerConf::parse_and_validate(NodeId(0), config_toml, &workdir) + .expect("parse_and_validate"); + } } diff --git a/pageserver/src/feature_resolver.rs b/pageserver/src/feature_resolver.rs index 84edd68011..92a9ef2880 100644 --- a/pageserver/src/feature_resolver.rs +++ b/pageserver/src/feature_resolver.rs @@ -1,5 +1,6 @@ use std::{collections::HashMap, sync::Arc, time::Duration}; +use arc_swap::ArcSwap; use pageserver_api::config::NodeMetadata; use posthog_client_lite::{ CaptureEvent, FeatureResolverBackgroundLoop, PostHogClientConfig, PostHogEvaluationError, @@ -12,10 +13,13 @@ use utils::id::TenantId; use crate::{config::PageServerConf, metrics::FEATURE_FLAG_EVALUATION}; +const DEFAULT_POSTHOG_REFRESH_INTERVAL: Duration = Duration::from_secs(600); + #[derive(Clone)] pub struct FeatureResolver { inner: Option>, internal_properties: Option>>, + force_overrides_for_testing: Arc>>, } impl FeatureResolver { @@ -23,9 +27,17 @@ impl FeatureResolver { Self { inner: None, internal_properties: None, + force_overrides_for_testing: Arc::new(ArcSwap::new(Arc::new(HashMap::new()))), } } + pub fn update(&self, spec: String) -> anyhow::Result<()> { + if let Some(inner) = &self.inner { + inner.update(spec)?; + } + Ok(()) + } + pub fn spawn( conf: &PageServerConf, shutdown_pageserver: CancellationToken, @@ -139,18 +151,23 @@ impl FeatureResolver { } tenants }; - // TODO: make refresh period configurable - inner - .clone() - .spawn(handle, Duration::from_secs(60), fake_tenants); + inner.clone().spawn( + handle, + posthog_config + .refresh_interval + .unwrap_or(DEFAULT_POSTHOG_REFRESH_INTERVAL), + fake_tenants, + ); Ok(FeatureResolver { inner: Some(inner), internal_properties: Some(internal_properties), + force_overrides_for_testing: Arc::new(ArcSwap::new(Arc::new(HashMap::new()))), }) } else { Ok(FeatureResolver { inner: None, internal_properties: None, + force_overrides_for_testing: Arc::new(ArcSwap::new(Arc::new(HashMap::new()))), }) } } @@ -190,6 +207,11 @@ impl FeatureResolver { flag_key: &str, tenant_id: TenantId, ) -> Result { + let force_overrides = self.force_overrides_for_testing.load(); + if let Some(value) = force_overrides.get(flag_key) { + return Ok(value.clone()); + } + if let Some(inner) = &self.inner { let res = inner.feature_store().evaluate_multivariate( flag_key, @@ -228,6 +250,15 @@ impl FeatureResolver { flag_key: &str, tenant_id: TenantId, ) -> Result<(), PostHogEvaluationError> { + let force_overrides = self.force_overrides_for_testing.load(); + if let Some(value) = force_overrides.get(flag_key) { + return if value == "true" { + Ok(()) + } else { + Err(PostHogEvaluationError::NoConditionGroupMatched) + }; + } + if let Some(inner) = &self.inner { let res = inner.feature_store().evaluate_boolean( flag_key, @@ -259,8 +290,22 @@ impl FeatureResolver { inner.feature_store().is_feature_flag_boolean(flag_key) } else { Err(PostHogEvaluationError::NotAvailable( - "PostHog integration is not enabled".to_string(), + "PostHog integration is not enabled, cannot auto-determine the flag type" + .to_string(), )) } } + + /// Force override a feature flag for testing. This is only for testing purposes. Assume the caller only call it + /// from a single thread so it won't race. + pub fn force_override_for_testing(&self, flag_key: &str, value: Option<&str>) { + let mut force_overrides = self.force_overrides_for_testing.load().as_ref().clone(); + if let Some(value) = value { + force_overrides.insert(flag_key.to_string(), value.to_string()); + } else { + force_overrides.remove(flag_key); + } + self.force_overrides_for_testing + .store(Arc::new(force_overrides)); + } } diff --git a/pageserver/src/http/routes.rs b/pageserver/src/http/routes.rs index 626986f580..aa9bec657c 100644 --- a/pageserver/src/http/routes.rs +++ b/pageserver/src/http/routes.rs @@ -41,6 +41,7 @@ use pageserver_api::models::{ TopTenantShardItem, TopTenantShardsRequest, TopTenantShardsResponse, }; use pageserver_api::shard::{ShardCount, TenantShardId}; +use postgres_ffi::PgMajorVersion; use remote_storage::{DownloadError, GenericRemoteStorage, TimeTravelError}; use scopeguard::defer; use serde_json::json; @@ -59,6 +60,7 @@ use crate::config::PageServerConf; use crate::context; use crate::context::{DownloadBehavior, RequestContext, RequestContextBuilder}; use crate::deletion_queue::DeletionQueueClient; +use crate::feature_resolver::FeatureResolver; use crate::pgdatadir_mapping::LsnForTimestamp; use crate::task_mgr::TaskKind; use crate::tenant::config::LocationConf; @@ -107,6 +109,7 @@ pub struct State { deletion_queue_client: DeletionQueueClient, secondary_controller: SecondaryController, latest_utilization: tokio::sync::Mutex>, + feature_resolver: FeatureResolver, } impl State { @@ -120,6 +123,7 @@ impl State { disk_usage_eviction_state: Arc, deletion_queue_client: DeletionQueueClient, secondary_controller: SecondaryController, + feature_resolver: FeatureResolver, ) -> anyhow::Result { let allowlist_routes = &[ "/v1/status", @@ -140,6 +144,7 @@ impl State { deletion_queue_client, secondary_controller, latest_utilization: Default::default(), + feature_resolver, }) } } @@ -283,11 +288,11 @@ impl From for ApiError { GetActiveTenantError::WillNotBecomeActive(TenantState::Stopping { .. }) => { ApiError::ShuttingDown } - GetActiveTenantError::WillNotBecomeActive(_) => ApiError::Conflict(format!("{}", e)), + GetActiveTenantError::WillNotBecomeActive(_) => ApiError::Conflict(format!("{e}")), GetActiveTenantError::Cancelled => ApiError::ShuttingDown, GetActiveTenantError::NotFound(gte) => gte.into(), GetActiveTenantError::WaitForActiveTimeout { .. } => { - ApiError::ResourceUnavailable(format!("{}", e).into()) + ApiError::ResourceUnavailable(format!("{e}").into()) } GetActiveTenantError::SwitchedTenant => { // in our HTTP handlers, this error doesn't happen @@ -1011,7 +1016,7 @@ async fn get_lsn_by_timestamp_handler( let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?; let timestamp_raw = must_get_query_param(&request, "timestamp")?; let timestamp = humantime::parse_rfc3339(×tamp_raw) - .with_context(|| format!("Invalid time: {:?}", timestamp_raw)) + .with_context(|| format!("Invalid time: {timestamp_raw:?}")) .map_err(ApiError::BadRequest)?; let timestamp_pg = postgres_ffi::to_pg_timestamp(timestamp); @@ -1106,7 +1111,7 @@ async fn get_timestamp_of_lsn_handler( json_response(StatusCode::OK, time) } None => Err(ApiError::PreconditionFailed( - format!("Timestamp for lsn {} not found", lsn).into(), + format!("Timestamp for lsn {lsn} not found").into(), )), } } @@ -2417,7 +2422,7 @@ async fn timeline_offload_handler( } if let (false, reason) = timeline.can_offload() { return Err(ApiError::PreconditionFailed( - format!("Timeline::can_offload() check failed: {}", reason) .into(), + format!("Timeline::can_offload() check failed: {reason}") .into(), )); } offload_timeline(&tenant, &timeline) @@ -3381,7 +3386,7 @@ async fn put_tenant_timeline_import_basebackup( let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?; let base_lsn: Lsn = must_parse_query_param(&request, "base_lsn")?; let end_lsn: Lsn = must_parse_query_param(&request, "end_lsn")?; - let pg_version: u32 = must_parse_query_param(&request, "pg_version")?; + let pg_version: PgMajorVersion = must_parse_query_param(&request, "pg_version")?; check_permission(&request, Some(tenant_id))?; @@ -3675,8 +3680,8 @@ async fn tenant_evaluate_feature_flag( let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?; check_permission(&request, Some(tenant_shard_id.tenant_id))?; - let flag: String = must_parse_query_param(&request, "flag")?; - let as_type: String = must_parse_query_param(&request, "as")?; + let flag: String = parse_request_param(&request, "flag_key")?; + let as_type: Option = parse_query_param(&request, "as")?; let state = get_state(&request); @@ -3685,11 +3690,11 @@ async fn tenant_evaluate_feature_flag( .tenant_manager .get_attached_tenant_shard(tenant_shard_id)?; let properties = tenant.feature_resolver.collect_properties(tenant_shard_id.tenant_id); - if as_type == "boolean" { + if as_type.as_deref() == Some("boolean") { let result = tenant.feature_resolver.evaluate_boolean(&flag, tenant_shard_id.tenant_id); let result = result.map(|_| true).map_err(|e| e.to_string()); json_response(StatusCode::OK, json!({ "result": result, "properties": properties })) - } else if as_type == "multivariate" { + } else if as_type.as_deref() == Some("multivariate") { let result = tenant.feature_resolver.evaluate_multivariate(&flag, tenant_shard_id.tenant_id).map_err(|e| e.to_string()); json_response(StatusCode::OK, json!({ "result": result, "properties": properties })) } else { @@ -3709,6 +3714,49 @@ async fn tenant_evaluate_feature_flag( .await } +async fn force_override_feature_flag_for_testing_put( + request: Request, + _cancel: CancellationToken, +) -> Result, ApiError> { + check_permission(&request, None)?; + + let flag: String = parse_request_param(&request, "flag_key")?; + let value: String = must_parse_query_param(&request, "value")?; + let state = get_state(&request); + state + .feature_resolver + .force_override_for_testing(&flag, Some(&value)); + json_response(StatusCode::OK, ()) +} + +async fn force_override_feature_flag_for_testing_delete( + request: Request, + _cancel: CancellationToken, +) -> Result, ApiError> { + check_permission(&request, None)?; + + let flag: String = parse_request_param(&request, "flag_key")?; + let state = get_state(&request); + state + .feature_resolver + .force_override_for_testing(&flag, None); + json_response(StatusCode::OK, ()) +} + +async fn update_feature_flag_spec( + mut request: Request, + _cancel: CancellationToken, +) -> Result, ApiError> { + check_permission(&request, None)?; + let body = json_request(&mut request).await?; + let state = get_state(&request); + state + .feature_resolver + .update(body) + .map_err(ApiError::InternalServerError)?; + json_response(StatusCode::OK, ()) +} + /// Common functionality of all the HTTP API handlers. /// /// - Adds a tracing span to each request (by `request_span`) @@ -4085,8 +4133,17 @@ pub fn make_router( "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/activate_post_import", |r| api_handler(r, activate_post_import_handler), ) - .get("/v1/tenant/:tenant_shard_id/feature_flag", |r| { + .get("/v1/tenant/:tenant_shard_id/feature_flag/:flag_key", |r| { api_handler(r, tenant_evaluate_feature_flag) }) + .put("/v1/feature_flag/:flag_key", |r| { + testing_api_handler("force override feature flag - put", r, force_override_feature_flag_for_testing_put) + }) + .delete("/v1/feature_flag/:flag_key", |r| { + testing_api_handler("force override feature flag - delete", r, force_override_feature_flag_for_testing_delete) + }) + .post("/v1/feature_flag_spec", |r| { + api_handler(r, update_feature_flag_spec) + }) .any(handler_404)) } diff --git a/pageserver/src/import_datadir.rs b/pageserver/src/import_datadir.rs index 911449c7c5..96fe0c1078 100644 --- a/pageserver/src/import_datadir.rs +++ b/pageserver/src/import_datadir.rs @@ -520,7 +520,7 @@ async fn import_file( } if file_path.starts_with("global") { - let spcnode = postgres_ffi::pg_constants::GLOBALTABLESPACE_OID; + let spcnode = postgres_ffi_types::constants::GLOBALTABLESPACE_OID; let dbnode = 0; match file_name.as_ref() { @@ -553,7 +553,7 @@ async fn import_file( } } } else if file_path.starts_with("base") { - let spcnode = pg_constants::DEFAULTTABLESPACE_OID; + let spcnode = postgres_ffi_types::constants::DEFAULTTABLESPACE_OID; let dbnode: u32 = file_path .iter() .nth(1) diff --git a/pageserver/src/lib.rs b/pageserver/src/lib.rs index ae7cbf1d6b..0dd3c465e0 100644 --- a/pageserver/src/lib.rs +++ b/pageserver/src/lib.rs @@ -38,6 +38,7 @@ pub mod walredo; use camino::Utf8Path; use deletion_queue::DeletionQueue; +use postgres_ffi::PgMajorVersion; use tenant::mgr::{BackgroundPurges, TenantManager}; use tenant::secondary; use tracing::{info, info_span}; @@ -51,7 +52,7 @@ use tracing::{info, info_span}; /// backwards-compatible changes to the metadata format. pub const STORAGE_FORMAT_VERSION: u16 = 3; -pub const DEFAULT_PG_VERSION: u32 = 17; +pub const DEFAULT_PG_VERSION: PgMajorVersion = PgMajorVersion::PG17; // Magic constants used to identify different kinds of files pub const IMAGE_FILE_MAGIC: u16 = 0x5A60; diff --git a/pageserver/src/metrics.rs b/pageserver/src/metrics.rs index bf54614baa..7929b094b4 100644 --- a/pageserver/src/metrics.rs +++ b/pageserver/src/metrics.rs @@ -1727,12 +1727,7 @@ impl Drop for SmgrOpTimer { impl SmgrOpFlushInProgress { /// The caller must guarantee that `socket_fd`` outlives this function. - pub(crate) async fn measure( - self, - started_at: Instant, - mut fut: Fut, - socket_fd: RawFd, - ) -> O + pub(crate) async fn measure(self, started_at: Instant, fut: Fut, socket_fd: RawFd) -> O where Fut: std::future::Future, { @@ -3426,7 +3421,7 @@ impl TimelineMetrics { pub fn dec_frozen_layer(&self, layer: &InMemoryLayer) { assert!(matches!(layer.info(), InMemoryLayerInfo::Frozen { .. })); let labels = self.make_frozen_layer_labels(layer); - let size = layer.try_len().expect("frozen layer should have no writer"); + let size = layer.len(); TIMELINE_LAYER_COUNT .get_metric_with_label_values(&labels) .unwrap() @@ -3441,7 +3436,7 @@ impl TimelineMetrics { pub fn inc_frozen_layer(&self, layer: &InMemoryLayer) { assert!(matches!(layer.info(), InMemoryLayerInfo::Frozen { .. })); let labels = self.make_frozen_layer_labels(layer); - let size = layer.try_len().expect("frozen layer should have no writer"); + let size = layer.len(); TIMELINE_LAYER_COUNT .get_metric_with_label_values(&labels) .unwrap() diff --git a/pageserver/src/page_service.rs b/pageserver/src/page_service.rs index 57087dc6c3..dd02947e5c 100644 --- a/pageserver/src/page_service.rs +++ b/pageserver/src/page_service.rs @@ -13,7 +13,6 @@ use std::time::{Duration, Instant, SystemTime}; use std::{io, str}; use anyhow::{Context as _, anyhow, bail}; -use async_compression::tokio::write::GzipEncoder; use bytes::{Buf as _, BufMut as _, BytesMut}; use futures::future::BoxFuture; use futures::{FutureExt, Stream}; @@ -41,7 +40,7 @@ use postgres_backend::{ AuthType, PostgresBackend, PostgresBackendReader, QueryError, is_expected_io_error, }; use postgres_ffi::BLCKSZ; -use postgres_ffi::pg_constants::DEFAULTTABLESPACE_OID; +use postgres_ffi_types::constants::DEFAULTTABLESPACE_OID; use pq_proto::framed::ConnectionError; use pq_proto::{BeMessage, FeMessage, FeStartupPacket, RowDescriptor}; use smallvec::{SmallVec, smallvec}; @@ -392,16 +391,14 @@ async fn page_service_conn_main( } else { let tenant_id = conn_handler.timeline_handles.as_ref().unwrap().tenant_id(); Err(io_error).context(format!( - "Postgres connection error for tenant_id={:?} client at peer_addr={}", - tenant_id, peer_addr + "Postgres connection error for tenant_id={tenant_id:?} client at peer_addr={peer_addr}" )) } } other => { let tenant_id = conn_handler.timeline_handles.as_ref().unwrap().tenant_id(); other.context(format!( - "Postgres query error for tenant_id={:?} client peer_addr={}", - tenant_id, peer_addr + "Postgres query error for tenant_id={tenant_id:?} client peer_addr={peer_addr}" )) } } @@ -2140,8 +2137,7 @@ impl PageServerHandler { if request_lsn < not_modified_since { return Err(PageStreamError::BadRequest( format!( - "invalid request with request LSN {} and not_modified_since {}", - request_lsn, not_modified_since, + "invalid request with request LSN {request_lsn} and not_modified_since {not_modified_since}", ) .into(), )); @@ -2616,6 +2612,7 @@ impl PageServerHandler { prev_lsn, full_backup, replica, + None, &ctx, ) .await?; @@ -2644,31 +2641,6 @@ impl PageServerHandler { .map_err(|err| { BasebackupError::Client(err, "handle_basebackup_request,cached,copy") })?; - } else if gzip { - let mut encoder = GzipEncoder::with_quality( - &mut writer, - // NOTE using fast compression because it's on the critical path - // for compute startup. For an empty database, we get - // <100KB with this method. The Level::Best compression method - // gives us <20KB, but maybe we should add basebackup caching - // on compute shutdown first. - async_compression::Level::Fastest, - ); - basebackup::send_basebackup_tarball( - &mut encoder, - &timeline, - lsn, - prev_lsn, - full_backup, - replica, - &ctx, - ) - .await?; - // shutdown the encoder to ensure the gzip footer is written - encoder - .shutdown() - .await - .map_err(|e| QueryError::Disconnected(ConnectionError::Io(e)))?; } else { basebackup::send_basebackup_tarball( &mut writer, @@ -2677,6 +2649,11 @@ impl PageServerHandler { prev_lsn, full_backup, replica, + // NB: using fast compression because it's on the critical path for compute + // startup. For an empty database, we get <100KB with this method. The + // Level::Best compression method gives us <20KB, but maybe we should add + // basebackup caching on compute shutdown first. + gzip.then_some(async_compression::Level::Fastest), &ctx, ) .await?; @@ -3286,7 +3263,14 @@ impl GrpcPageServiceHandler { Ok(req) })) // Run the page service. - .service(proto::PageServiceServer::new(page_service_handler)); + .service( + proto::PageServiceServer::new(page_service_handler) + // Support both gzip and zstd compression. The client decides what to use. + .accept_compressed(tonic::codec::CompressionEncoding::Gzip) + .accept_compressed(tonic::codec::CompressionEncoding::Zstd) + .send_compressed(tonic::codec::CompressionEncoding::Gzip) + .send_compressed(tonic::codec::CompressionEncoding::Zstd), + ); let server = server.add_service(page_service); // Reflection service for use with e.g. grpcurl. @@ -3532,14 +3516,14 @@ impl proto::PageService for GrpcPageServiceHandler { Ok(tonic::Response::new(resp.into())) } - // TODO: ensure clients use gzip compression for the stream. #[instrument(skip_all, fields(lsn))] async fn get_base_backup( &self, req: tonic::Request, ) -> Result, tonic::Status> { - // Send 64 KB chunks to avoid large memory allocations. - const CHUNK_SIZE: usize = 64 * 1024; + // Send chunks of 256 KB to avoid large memory allocations. pagebench basebackup shows this + // to be the sweet spot where throughput is saturated. + const CHUNK_SIZE: usize = 256 * 1024; let timeline = self.get_request_timeline(&req).await?; let ctx = self.ctx.with_scope_timeline(&timeline); @@ -3549,7 +3533,7 @@ impl proto::PageService for GrpcPageServiceHandler { if timeline.is_archived() == Some(true) { return Err(tonic::Status::failed_precondition("timeline is archived")); } - let req: page_api::GetBaseBackupRequest = req.into_inner().into(); + let req: page_api::GetBaseBackupRequest = req.into_inner().try_into()?; span_record!(lsn=?req.lsn); @@ -3575,6 +3559,15 @@ impl proto::PageService for GrpcPageServiceHandler { let span = Span::current(); let (mut simplex_read, mut simplex_write) = tokio::io::simplex(CHUNK_SIZE); let jh = tokio::spawn(async move { + let gzip_level = match req.compression { + page_api::BaseBackupCompression::None => None, + // NB: using fast compression because it's on the critical path for compute + // startup. For an empty database, we get <100KB with this method. The + // Level::Best compression method gives us <20KB, but maybe we should add + // basebackup caching on compute shutdown first. + page_api::BaseBackupCompression::Gzip => Some(async_compression::Level::Fastest), + }; + let result = basebackup::send_basebackup_tarball( &mut simplex_write, &timeline, @@ -3582,6 +3575,7 @@ impl proto::PageService for GrpcPageServiceHandler { None, req.full, req.replica, + gzip_level, &ctx, ) .instrument(span) // propagate request span diff --git a/pageserver/src/pgdatadir_mapping.rs b/pageserver/src/pgdatadir_mapping.rs index 633d62210d..09a7a8a651 100644 --- a/pageserver/src/pgdatadir_mapping.rs +++ b/pageserver/src/pgdatadir_mapping.rs @@ -23,12 +23,11 @@ use pageserver_api::key::{ }; use pageserver_api::keyspace::{KeySpaceRandomAccum, SparseKeySpace}; use pageserver_api::models::RelSizeMigration; -use pageserver_api::record::NeonWalRecord; use pageserver_api::reltag::{BlockNumber, RelTag, SlruKind}; use pageserver_api::shard::ShardIdentity; -use pageserver_api::value::Value; -use postgres_ffi::relfile_utils::{FSM_FORKNUM, VISIBILITYMAP_FORKNUM}; -use postgres_ffi::{BLCKSZ, Oid, RepOriginId, TimestampTz, TransactionId}; +use postgres_ffi::{BLCKSZ, PgMajorVersion, TimestampTz, TransactionId}; +use postgres_ffi_types::forknum::{FSM_FORKNUM, VISIBILITYMAP_FORKNUM}; +use postgres_ffi_types::{Oid, RepOriginId}; use serde::{Deserialize, Serialize}; use strum::IntoEnumIterator; use tokio_util::sync::CancellationToken; @@ -36,6 +35,8 @@ use tracing::{debug, info, info_span, trace, warn}; use utils::bin_ser::{BeSer, DeserializeError}; use utils::lsn::Lsn; use utils::pausable_failpoint; +use wal_decoder::models::record::NeonWalRecord; +use wal_decoder::models::value::Value; use wal_decoder::serialized_batch::{SerializedValueBatch, ValueMeta}; use super::tenant::{PageReconstructError, Timeline}; @@ -720,6 +721,7 @@ impl Timeline { let batches = keyspace.partition( self.get_shard_identity(), self.conf.max_get_vectored_keys.get() as u64 * BLCKSZ as u64, + BLCKSZ as u64, ); let io_concurrency = IoConcurrency::spawn_from_conf( @@ -960,6 +962,7 @@ impl Timeline { let batches = keyspace.partition( self.get_shard_identity(), self.conf.max_get_vectored_keys.get() as u64 * BLCKSZ as u64, + BLCKSZ as u64, ); let io_concurrency = IoConcurrency::spawn_from_conf( @@ -1078,7 +1081,7 @@ impl Timeline { // fetch directory entry let buf = self.get(TWOPHASEDIR_KEY, lsn, ctx).await?; - if self.pg_version >= 17 { + if self.pg_version >= PgMajorVersion::PG17 { Ok(TwoPhaseDirectoryV17::des(&buf)?.xids) } else { Ok(TwoPhaseDirectory::des(&buf)? @@ -1182,7 +1185,7 @@ impl Timeline { } let origin_id = k.field6 as RepOriginId; let origin_lsn = Lsn::des(&v) - .with_context(|| format!("decode replorigin value for {}: {v:?}", origin_id))?; + .with_context(|| format!("decode replorigin value for {origin_id}: {v:?}"))?; if origin_lsn != Lsn::INVALID { result.insert(origin_id, origin_lsn); } @@ -1610,7 +1613,7 @@ impl DatadirModification<'_> { .push((DirectoryKind::Db, MetricsUpdate::Set(0))); self.put(DBDIR_KEY, Value::Image(buf.into())); - let buf = if self.tline.pg_version >= 17 { + let buf = if self.tline.pg_version >= PgMajorVersion::PG17 { TwoPhaseDirectoryV17::ser(&TwoPhaseDirectoryV17 { xids: HashSet::new(), }) @@ -1964,7 +1967,7 @@ impl DatadirModification<'_> { ) -> Result<(), WalIngestError> { // Add it to the directory entry let dirbuf = self.get(TWOPHASEDIR_KEY, ctx).await?; - let newdirbuf = if self.tline.pg_version >= 17 { + let newdirbuf = if self.tline.pg_version >= PgMajorVersion::PG17 { let mut dir = TwoPhaseDirectoryV17::des(&dirbuf)?; if !dir.xids.insert(xid) { Err(WalIngestErrorKind::FileAlreadyExists(xid))?; @@ -2380,7 +2383,7 @@ impl DatadirModification<'_> { ) -> Result<(), WalIngestError> { // Remove it from the directory entry let buf = self.get(TWOPHASEDIR_KEY, ctx).await?; - let newdirbuf = if self.tline.pg_version >= 17 { + let newdirbuf = if self.tline.pg_version >= PgMajorVersion::PG17 { let mut dir = TwoPhaseDirectoryV17::des(&buf)?; if !dir.xids.remove(&xid) { @@ -2437,8 +2440,7 @@ impl DatadirModification<'_> { if path == p { assert!( modifying_file.is_none(), - "duplicated entries found for {}", - path + "duplicated entries found for {path}" ); modifying_file = Some(content); } else { diff --git a/pageserver/src/tenant.rs b/pageserver/src/tenant.rs index cfecf5561c..c71655ce17 100644 --- a/pageserver/src/tenant.rs +++ b/pageserver/src/tenant.rs @@ -38,6 +38,7 @@ use pageserver_api::models::{ WalRedoManagerStatus, }; use pageserver_api::shard::{ShardIdentity, ShardStripeSize, TenantShardId}; +use postgres_ffi::PgMajorVersion; use remote_storage::{DownloadError, GenericRemoteStorage, TimeoutOrCancel}; use remote_timeline_client::index::GcCompactionState; use remote_timeline_client::manifest::{ @@ -496,8 +497,8 @@ impl WalRedoManager { key: pageserver_api::key::Key, lsn: Lsn, base_img: Option<(Lsn, bytes::Bytes)>, - records: Vec<(Lsn, pageserver_api::record::NeonWalRecord)>, - pg_version: u32, + records: Vec<(Lsn, wal_decoder::models::record::NeonWalRecord)>, + pg_version: PgMajorVersion, redo_attempt_type: RedoAttemptType, ) -> Result { match self { @@ -933,7 +934,7 @@ pub(crate) enum CreateTimelineParams { pub(crate) struct CreateTimelineParamsBootstrap { pub(crate) new_timeline_id: TimelineId, pub(crate) existing_initdb_timeline_id: Option, - pub(crate) pg_version: u32, + pub(crate) pg_version: PgMajorVersion, } /// NB: See comment on [`CreateTimelineIdempotency::Branch`] for why there's no `pg_version` here. @@ -971,7 +972,7 @@ pub(crate) enum CreateTimelineIdempotency { /// NB: special treatment, see comment in [`Self`]. FailWithConflict, Bootstrap { - pg_version: u32, + pg_version: PgMajorVersion, }, /// NB: branches always have the same `pg_version` as their ancestor. /// While [`pageserver_api::models::TimelineCreateRequestMode::Branch::pg_version`] @@ -1859,6 +1860,29 @@ impl TenantShard { } } + // At this point we've initialized all timelines and are tracking them. + // Now compute the layer visibility for all (not offloaded) timelines. + let compute_visiblity_for = { + let timelines_accessor = self.timelines.lock().unwrap(); + let mut timelines_offloaded_accessor = self.timelines_offloaded.lock().unwrap(); + + timelines_offloaded_accessor.extend(offloaded_timelines_list.into_iter()); + + // Before activation, populate each Timeline's GcInfo with information about its children + self.initialize_gc_info(&timelines_accessor, &timelines_offloaded_accessor, None); + + timelines_accessor.values().cloned().collect::>() + }; + + for tl in compute_visiblity_for { + tl.update_layer_visibility().await.with_context(|| { + format!( + "failed initial timeline visibility computation {} for tenant {}", + tl.timeline_id, self.tenant_shard_id + ) + })?; + } + // Walk through deleted timelines, resume deletion for (timeline_id, index_part, remote_timeline_client) in timelines_to_resume_deletions { remote_timeline_client @@ -1878,10 +1902,6 @@ impl TenantShard { .context("resume_deletion") .map_err(LoadLocalTimelineError::ResumeDeletion)?; } - { - let mut offloaded_timelines_accessor = self.timelines_offloaded.lock().unwrap(); - offloaded_timelines_accessor.extend(offloaded_timelines_list.into_iter()); - } // Stash the preloaded tenant manifest, and upload a new manifest if changed. // @@ -2522,7 +2542,7 @@ impl TenantShard { self: &Arc, new_timeline_id: TimelineId, initdb_lsn: Lsn, - pg_version: u32, + pg_version: PgMajorVersion, ctx: &RequestContext, ) -> anyhow::Result<(UninitializedTimeline, RequestContext)> { anyhow::ensure!( @@ -2574,7 +2594,7 @@ impl TenantShard { self: &Arc, new_timeline_id: TimelineId, initdb_lsn: Lsn, - pg_version: u32, + pg_version: PgMajorVersion, ctx: &RequestContext, ) -> anyhow::Result> { let (uninit_tl, ctx) = self @@ -2613,7 +2633,7 @@ impl TenantShard { self: &Arc, new_timeline_id: TimelineId, initdb_lsn: Lsn, - pg_version: u32, + pg_version: PgMajorVersion, ctx: &RequestContext, in_memory_layer_desc: Vec, delta_layer_desc: Vec, @@ -2879,7 +2899,7 @@ impl TenantShard { Lsn(0), initdb_lsn, initdb_lsn, - 15, + PgMajorVersion::PG15, ); this.prepare_new_timeline( new_timeline_id, @@ -3430,7 +3450,7 @@ impl TenantShard { use pageserver_api::models::ActivatingFrom; match &*current_state { TenantState::Activating(_) | TenantState::Active | TenantState::Broken { .. } | TenantState::Stopping { .. } => { - panic!("caller is responsible for calling activate() only on Loading / Attaching tenants, got {state:?}", state = current_state); + panic!("caller is responsible for calling activate() only on Loading / Attaching tenants, got {current_state:?}"); } TenantState::Attaching => { *current_state = TenantState::Activating(ActivatingFrom::Attaching); @@ -3449,9 +3469,6 @@ impl TenantShard { .values() .filter(|timeline| !(timeline.is_broken() || timeline.is_stopping())); - // Before activation, populate each Timeline's GcInfo with information about its children - self.initialize_gc_info(&timelines_accessor, &timelines_offloaded_accessor, None); - // Spawn gc and compaction loops. The loops will shut themselves // down when they notice that the tenant is inactive. tasks::start_background_loops(self, background_jobs_can_start); @@ -5074,7 +5091,7 @@ impl TenantShard { pub(crate) async fn bootstrap_timeline_test( self: &Arc, timeline_id: TimelineId, - pg_version: u32, + pg_version: PgMajorVersion, load_existing_initdb: Option, ctx: &RequestContext, ) -> anyhow::Result> { @@ -5216,7 +5233,7 @@ impl TenantShard { async fn bootstrap_timeline( self: &Arc, timeline_id: TimelineId, - pg_version: u32, + pg_version: PgMajorVersion, load_existing_initdb: Option, ctx: &RequestContext, ) -> Result { @@ -5754,7 +5771,7 @@ impl TenantShard { async fn run_initdb( conf: &'static PageServerConf, initdb_target_dir: &Utf8Path, - pg_version: u32, + pg_version: PgMajorVersion, cancel: &CancellationToken, ) -> Result<(), InitdbError> { let initdb_bin_path = conf @@ -5836,10 +5853,10 @@ pub(crate) mod harness { use once_cell::sync::OnceCell; use pageserver_api::key::Key; use pageserver_api::models::ShardParameters; - use pageserver_api::record::NeonWalRecord; use pageserver_api::shard::ShardIndex; use utils::id::TenantId; use utils::logging; + use wal_decoder::models::record::NeonWalRecord; use super::*; use crate::deletion_queue::mock::MockDeletionQueue; @@ -6035,7 +6052,7 @@ pub(crate) mod harness { lsn: Lsn, base_img: Option<(Lsn, Bytes)>, records: Vec<(Lsn, NeonWalRecord)>, - _pg_version: u32, + _pg_version: PgMajorVersion, _redo_attempt_type: RedoAttemptType, ) -> Result { let records_neon = records.iter().all(|r| apply_neon::can_apply_in_neon(&r.1)); @@ -6094,9 +6111,6 @@ mod tests { #[cfg(feature = "testing")] use pageserver_api::keyspace::KeySpaceRandomAccum; use pageserver_api::models::{CompactionAlgorithm, CompactionAlgorithmSettings}; - #[cfg(feature = "testing")] - use pageserver_api::record::NeonWalRecord; - use pageserver_api::value::Value; use pageserver_compaction::helpers::overlaps_with; #[cfg(feature = "testing")] use rand::SeedableRng; @@ -6117,6 +6131,9 @@ mod tests { use timeline::{CompactOptions, DeltaLayerTestDesc, VersionedKeySpaceQuery}; use utils::id::TenantId; use utils::shard::{ShardCount, ShardNumber}; + #[cfg(feature = "testing")] + use wal_decoder::models::record::NeonWalRecord; + use wal_decoder::models::value::Value; use super::*; use crate::DEFAULT_PG_VERSION; @@ -6207,7 +6224,7 @@ mod tests { async fn randomize_timeline( tenant: &Arc, new_timeline_id: TimelineId, - pg_version: u32, + pg_version: PgMajorVersion, spec: TestTimelineSpecification, random: &mut rand::rngs::StdRng, ctx: &RequestContext, @@ -6600,7 +6617,7 @@ mod tests { .put( *TEST_KEY, lsn, - &Value::Image(test_img(&format!("foo at {}", lsn))), + &Value::Image(test_img(&format!("foo at {lsn}"))), ctx, ) .await?; @@ -6610,7 +6627,7 @@ mod tests { .put( *TEST_KEY, lsn, - &Value::Image(test_img(&format!("foo at {}", lsn))), + &Value::Image(test_img(&format!("foo at {lsn}"))), ctx, ) .await?; @@ -6624,7 +6641,7 @@ mod tests { .put( *TEST_KEY, lsn, - &Value::Image(test_img(&format!("foo at {}", lsn))), + &Value::Image(test_img(&format!("foo at {lsn}"))), ctx, ) .await?; @@ -6634,7 +6651,7 @@ mod tests { .put( *TEST_KEY, lsn, - &Value::Image(test_img(&format!("foo at {}", lsn))), + &Value::Image(test_img(&format!("foo at {lsn}"))), ctx, ) .await?; @@ -7133,7 +7150,7 @@ mod tests { .put( test_key, lsn, - &Value::Image(test_img(&format!("{} at {}", blknum, lsn))), + &Value::Image(test_img(&format!("{blknum} at {lsn}"))), ctx, ) .await?; @@ -7421,7 +7438,7 @@ mod tests { .put( gap_at_key, current_lsn, - &Value::Image(test_img(&format!("{} at {}", gap_at_key, current_lsn))), + &Value::Image(test_img(&format!("{gap_at_key} at {current_lsn}"))), &ctx, ) .await?; @@ -7460,7 +7477,7 @@ mod tests { .put( current_key, current_lsn, - &Value::Image(test_img(&format!("{} at {}", current_key, current_lsn))), + &Value::Image(test_img(&format!("{current_key} at {current_lsn}"))), &ctx, ) .await?; @@ -7568,7 +7585,7 @@ mod tests { while key < end_key { current_lsn += 0x10; - let image_value = format!("{} at {}", child_gap_at_key, current_lsn); + let image_value = format!("{child_gap_at_key} at {current_lsn}"); let mut writer = parent_timeline.writer().await; writer @@ -7611,7 +7628,7 @@ mod tests { .put( key, current_lsn, - &Value::Image(test_img(&format!("{} at {}", key, current_lsn))), + &Value::Image(test_img(&format!("{key} at {current_lsn}"))), &ctx, ) .await?; @@ -7732,7 +7749,7 @@ mod tests { .put( test_key, lsn, - &Value::Image(test_img(&format!("{} at {}", blknum, lsn))), + &Value::Image(test_img(&format!("{blknum} at {lsn}"))), &ctx, ) .await?; @@ -7753,7 +7770,7 @@ mod tests { .put( test_key, lsn, - &Value::Image(test_img(&format!("{} at {}", blknum, lsn))), + &Value::Image(test_img(&format!("{blknum} at {lsn}"))), &ctx, ) .await?; @@ -7767,7 +7784,7 @@ mod tests { test_key.field6 = blknum as u32; assert_eq!( tline.get(test_key, lsn, &ctx).await?, - test_img(&format!("{} at {}", blknum, last_lsn)) + test_img(&format!("{blknum} at {last_lsn}")) ); } @@ -7813,7 +7830,7 @@ mod tests { .put( test_key, lsn, - &Value::Image(test_img(&format!("{} at {}", blknum, lsn))), + &Value::Image(test_img(&format!("{blknum} at {lsn}"))), &ctx, ) .await?; @@ -7842,11 +7859,11 @@ mod tests { .put( test_key, lsn, - &Value::Image(test_img(&format!("{} at {}", blknum, lsn))), + &Value::Image(test_img(&format!("{blknum} at {lsn}"))), &ctx, ) .await?; - println!("updating {} at {}", blknum, lsn); + println!("updating {blknum} at {lsn}"); writer.finish_write(lsn); drop(writer); updated[blknum] = lsn; @@ -7857,7 +7874,7 @@ mod tests { test_key.field6 = blknum as u32; assert_eq!( tline.get(test_key, lsn, &ctx).await?, - test_img(&format!("{} at {}", blknum, last_lsn)) + test_img(&format!("{blknum} at {last_lsn}")) ); } @@ -7910,11 +7927,11 @@ mod tests { .put( test_key, lsn, - &Value::Image(test_img(&format!("{} {} at {}", idx, blknum, lsn))), + &Value::Image(test_img(&format!("{idx} {blknum} at {lsn}"))), &ctx, ) .await?; - println!("updating [{}][{}] at {}", idx, blknum, lsn); + println!("updating [{idx}][{blknum}] at {lsn}"); writer.finish_write(lsn); drop(writer); updated[idx][blknum] = lsn; @@ -8120,7 +8137,7 @@ mod tests { .put( test_key, lsn, - &Value::Image(test_img(&format!("{} at {}", blknum, lsn))), + &Value::Image(test_img(&format!("{blknum} at {lsn}"))), &ctx, ) .await?; @@ -8137,7 +8154,7 @@ mod tests { test_key.field6 = (blknum * STEP) as u32; assert_eq!( tline.get(test_key, lsn, &ctx).await?, - test_img(&format!("{} at {}", blknum, last_lsn)) + test_img(&format!("{blknum} at {last_lsn}")) ); } @@ -8174,7 +8191,7 @@ mod tests { .put( test_key, lsn, - &Value::Image(test_img(&format!("{} at {}", blknum, lsn))), + &Value::Image(test_img(&format!("{blknum} at {lsn}"))), &ctx, ) .await?; @@ -8427,7 +8444,7 @@ mod tests { .put( test_key, lsn, - &Value::Image(test_img(&format!("{} at {}", blknum, lsn))), + &Value::Image(test_img(&format!("{blknum} at {lsn}"))), &ctx, ) .await?; @@ -8447,7 +8464,7 @@ mod tests { .put( test_key, lsn, - &Value::Image(test_img(&format!("{} at {}", blknum, lsn))), + &Value::Image(test_img(&format!("{blknum} at {lsn}"))), &ctx, ) .await?; @@ -9368,12 +9385,7 @@ mod tests { let end_lsn = Lsn(0x100); let image_layers = (0x20..=0x90) .step_by(0x10) - .map(|n| { - ( - Lsn(n), - vec![(key, test_img(&format!("data key at {:x}", n)))], - ) - }) + .map(|n| (Lsn(n), vec![(key, test_img(&format!("data key at {n:x}")))])) .collect(); let timeline = tenant diff --git a/pageserver/src/tenant/checks.rs b/pageserver/src/tenant/checks.rs index d5b979ab2a..83d54f09de 100644 --- a/pageserver/src/tenant/checks.rs +++ b/pageserver/src/tenant/checks.rs @@ -63,8 +63,7 @@ pub fn check_valid_layermap(metadata: &[LayerName]) -> Option { && overlaps_with(&layer.key_range, &other_layer.key_range) { let err = format!( - "layer violates the layer map LSN split assumption: layer {} intersects with layer {}", - layer, other_layer + "layer violates the layer map LSN split assumption: layer {layer} intersects with layer {other_layer}" ); return Some(err); } diff --git a/pageserver/src/tenant/config.rs b/pageserver/src/tenant/config.rs index 030b43a020..c5087f7e0f 100644 --- a/pageserver/src/tenant/config.rs +++ b/pageserver/src/tenant/config.rs @@ -61,8 +61,10 @@ pub(crate) struct LocationConf { /// The detailed shard identity. This structure is already scoped within /// a TenantShardId, but we need the full ShardIdentity to enable calculating /// key->shard mappings. - // TODO(vlad): Remove this default once all configs have a shard identity on disk. - #[serde(default = "ShardIdentity::unsharded")] + /// + /// NB: we store this even for unsharded tenants, so that we agree with storcon on the intended + /// stripe size. Otherwise, a split request that does not specify a stripe size may use a + /// different default than storcon, which can lead to incorrect stripe sizes and corruption. pub(crate) shard: ShardIdentity, /// The pan-cluster tenant configuration, the same on all locations diff --git a/pageserver/src/tenant/ephemeral_file.rs b/pageserver/src/tenant/ephemeral_file.rs index 2edf22e9fd..203b5bf592 100644 --- a/pageserver/src/tenant/ephemeral_file.rs +++ b/pageserver/src/tenant/ephemeral_file.rs @@ -3,7 +3,7 @@ use std::io; use std::sync::Arc; -use std::sync::atomic::AtomicU64; +use std::sync::atomic::{AtomicU64, Ordering}; use camino::Utf8PathBuf; use num_traits::Num; @@ -18,6 +18,7 @@ use crate::assert_u64_eq_usize::{U64IsUsize, UsizeIsU64}; use crate::config::PageServerConf; use crate::context::RequestContext; use crate::page_cache; +use crate::tenant::storage_layer::inmemory_layer::GlobalResourceUnits; use crate::tenant::storage_layer::inmemory_layer::vectored_dio_read::File; use crate::virtual_file::owned_buffers_io::io_buf_aligned::IoBufAlignedMut; use crate::virtual_file::owned_buffers_io::slice::SliceMutExt; @@ -30,9 +31,13 @@ pub struct EphemeralFile { _tenant_shard_id: TenantShardId, _timeline_id: TimelineId, page_cache_file_id: page_cache::FileId, - bytes_written: u64, file: TempVirtualFileCoOwnedByEphemeralFileAndBufferedWriter, - buffered_writer: BufferedWriter, + + buffered_writer: tokio::sync::RwLock, + + bytes_written: AtomicU64, + + resource_units: std::sync::Mutex, } type BufferedWriter = owned_buffers_io::write::BufferedWriter< @@ -94,9 +99,8 @@ impl EphemeralFile { _tenant_shard_id: tenant_shard_id, _timeline_id: timeline_id, page_cache_file_id, - bytes_written: 0, file: file.clone(), - buffered_writer: BufferedWriter::new( + buffered_writer: tokio::sync::RwLock::new(BufferedWriter::new( file, 0, || IoBufferMut::with_capacity(TAIL_SZ), @@ -104,7 +108,9 @@ impl EphemeralFile { cancel.child_token(), ctx, info_span!(parent: None, "ephemeral_file_buffered_writer", tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug(), timeline_id=%timeline_id, path = %filename), - ), + )), + bytes_written: AtomicU64::new(0), + resource_units: std::sync::Mutex::new(GlobalResourceUnits::new()), }) } } @@ -151,15 +157,17 @@ impl std::ops::Deref for TempVirtualFileCoOwnedByEphemeralFileAndBufferedWriter #[derive(Debug, thiserror::Error)] pub(crate) enum EphemeralFileWriteError { - #[error("{0}")] - TooLong(String), #[error("cancelled")] Cancelled, } impl EphemeralFile { pub(crate) fn len(&self) -> u64 { - self.bytes_written + // TODO(vlad): The value returned here is not always correct if + // we have more than one concurrent writer. Writes are always + // sequenced, but we could grab the buffered writer lock if we wanted + // to. + self.bytes_written.load(Ordering::Acquire) } pub(crate) fn page_cache_file_id(&self) -> page_cache::FileId { @@ -186,7 +194,7 @@ impl EphemeralFile { /// Panics if the write is short because there's no way we can recover from that. /// TODO: make upstack handle this as an error. pub(crate) async fn write_raw( - &mut self, + &self, srcbuf: &[u8], ctx: &RequestContext, ) -> Result { @@ -198,22 +206,13 @@ impl EphemeralFile { } async fn write_raw_controlled( - &mut self, + &self, srcbuf: &[u8], ctx: &RequestContext, ) -> Result<(u64, Option), EphemeralFileWriteError> { - let pos = self.bytes_written; + let mut writer = self.buffered_writer.write().await; - let new_bytes_written = pos.checked_add(srcbuf.len().into_u64()).ok_or_else(|| { - EphemeralFileWriteError::TooLong(format!( - "write would grow EphemeralFile beyond u64::MAX: len={pos} writen={srcbuf_len}", - srcbuf_len = srcbuf.len(), - )) - })?; - - // Write the payload - let (nwritten, control) = self - .buffered_writer + let (nwritten, control) = writer .write_buffered_borrowed_controlled(srcbuf, ctx) .await .map_err(|e| match e { @@ -225,43 +224,69 @@ impl EphemeralFile { "buffered writer has no short writes" ); - self.bytes_written = new_bytes_written; + // There's no realistic risk of overflow here. We won't have exabytes sized files on disk. + let pos = self + .bytes_written + .fetch_add(srcbuf.len().into_u64(), Ordering::AcqRel); + + let mut resource_units = self.resource_units.lock().unwrap(); + resource_units.maybe_publish_size(self.bytes_written.load(Ordering::Relaxed)); Ok((pos, control)) } + + pub(crate) fn tick(&self) -> Option { + let mut resource_units = self.resource_units.lock().unwrap(); + let len = self.bytes_written.load(Ordering::Relaxed); + resource_units.publish_size(len) + } } impl super::storage_layer::inmemory_layer::vectored_dio_read::File for EphemeralFile { async fn read_exact_at_eof_ok( &self, start: u64, - dst: tokio_epoll_uring::Slice, + mut dst: tokio_epoll_uring::Slice, ctx: &RequestContext, ) -> std::io::Result<(tokio_epoll_uring::Slice, usize)> { - let submitted_offset = self.buffered_writer.bytes_submitted(); + // We will fill the slice in back to front. Hence, we need + // the slice to be fully initialized. + // TODO(vlad): Is there a nicer way of doing this? + dst.as_mut_rust_slice_full_zeroed(); - let mutable = match self.buffered_writer.inspect_mutable() { - Some(mutable) => &mutable[0..mutable.pending()], - None => { - // Timeline::cancel and hence buffered writer flush was cancelled. - // Remain read-available while timeline is shutting down. - &[] - } - }; + let writer = self.buffered_writer.read().await; - let maybe_flushed = self.buffered_writer.inspect_maybe_flushed(); + // Read bytes written while under lock. This is a hack to deal with concurrent + // writes updating the number of bytes written. `bytes_written` is not DIO alligned + // but we may end the read there. + // + // TODO(vlad): Feels like there's a nicer path where we align the end if it + // shoots over the end of the file. + let bytes_written = self.bytes_written.load(Ordering::Acquire); let dst_cap = dst.bytes_total().into_u64(); let end = { // saturating_add is correct here because the max file size is u64::MAX, so, // if start + dst.len() > u64::MAX, then we know it will be a short read let mut end: u64 = start.saturating_add(dst_cap); - if end > self.bytes_written { - end = self.bytes_written; + if end > bytes_written { + end = bytes_written; } end }; + let submitted_offset = writer.bytes_submitted(); + let maybe_flushed = writer.inspect_maybe_flushed(); + + let mutable = match writer.inspect_mutable() { + Some(mutable) => &mutable[0..mutable.pending()], + None => { + // Timeline::cancel and hence buffered writer flush was cancelled. + // Remain read-available while timeline is shutting down. + &[] + } + }; + // inclusive, exclusive #[derive(Debug)] struct Range(N, N); @@ -306,13 +331,33 @@ impl super::storage_layer::inmemory_layer::vectored_dio_read::File for Ephemeral let mutable_range = Range(std::cmp::max(start, submitted_offset), end); - let dst = if written_range.len() > 0 { + // There are three sources from which we might have to read data: + // 1. The file itself + // 2. The buffer which contains changes currently being flushed + // 3. The buffer which contains chnages yet to be flushed + // + // For better concurrency, we do them in reverse order: perform the in-memory + // reads while holding the writer lock, drop the writer lock and read from the + // file if required. + + let dst = if mutable_range.len() > 0 { + let offset_in_buffer = mutable_range + .0 + .checked_sub(submitted_offset) + .unwrap() + .into_usize(); + let to_copy = + &mutable[offset_in_buffer..(offset_in_buffer + mutable_range.len().into_usize())]; let bounds = dst.bounds(); - let slice = self - .file - .read_exact_at(dst.slice(0..written_range.len().into_usize()), start, ctx) - .await?; - Slice::from_buf_bounds(Slice::into_inner(slice), bounds) + let mut view = dst.slice({ + let start = + written_range.len().into_usize() + maybe_flushed_range.len().into_usize(); + let end = start.checked_add(mutable_range.len().into_usize()).unwrap(); + start..end + }); + view.as_mut_rust_slice_full_zeroed() + .copy_from_slice(to_copy); + Slice::from_buf_bounds(Slice::into_inner(view), bounds) } else { dst }; @@ -342,24 +387,15 @@ impl super::storage_layer::inmemory_layer::vectored_dio_read::File for Ephemeral dst }; - let dst = if mutable_range.len() > 0 { - let offset_in_buffer = mutable_range - .0 - .checked_sub(submitted_offset) - .unwrap() - .into_usize(); - let to_copy = - &mutable[offset_in_buffer..(offset_in_buffer + mutable_range.len().into_usize())]; + drop(writer); + + let dst = if written_range.len() > 0 { let bounds = dst.bounds(); - let mut view = dst.slice({ - let start = - written_range.len().into_usize() + maybe_flushed_range.len().into_usize(); - let end = start.checked_add(mutable_range.len().into_usize()).unwrap(); - start..end - }); - view.as_mut_rust_slice_full_zeroed() - .copy_from_slice(to_copy); - Slice::from_buf_bounds(Slice::into_inner(view), bounds) + let slice = self + .file + .read_exact_at(dst.slice(0..written_range.len().into_usize()), start, ctx) + .await?; + Slice::from_buf_bounds(Slice::into_inner(slice), bounds) } else { dst }; @@ -460,13 +496,15 @@ mod tests { let gate = utils::sync::gate::Gate::default(); let cancel = CancellationToken::new(); - let mut file = EphemeralFile::create(conf, tenant_id, timeline_id, &gate, &cancel, &ctx) + let file = EphemeralFile::create(conf, tenant_id, timeline_id, &gate, &cancel, &ctx) .await .unwrap(); - let mutable = file.buffered_writer.mutable(); + let writer = file.buffered_writer.read().await; + let mutable = writer.mutable(); let cap = mutable.capacity(); let align = mutable.align(); + drop(writer); let write_nbytes = cap * 2 + cap / 2; @@ -504,10 +542,11 @@ mod tests { let file_contents = std::fs::read(file.file.path()).unwrap(); assert!(file_contents == content[0..cap * 2]); - let maybe_flushed_buffer_contents = file.buffered_writer.inspect_maybe_flushed().unwrap(); + let writer = file.buffered_writer.read().await; + let maybe_flushed_buffer_contents = writer.inspect_maybe_flushed().unwrap(); assert_eq!(&maybe_flushed_buffer_contents[..], &content[cap..cap * 2]); - let mutable_buffer_contents = file.buffered_writer.mutable(); + let mutable_buffer_contents = writer.mutable(); assert_eq!(mutable_buffer_contents, &content[cap * 2..write_nbytes]); } @@ -517,12 +556,14 @@ mod tests { let gate = utils::sync::gate::Gate::default(); let cancel = CancellationToken::new(); - let mut file = EphemeralFile::create(conf, tenant_id, timeline_id, &gate, &cancel, &ctx) + let file = EphemeralFile::create(conf, tenant_id, timeline_id, &gate, &cancel, &ctx) .await .unwrap(); // mutable buffer and maybe_flushed buffer each has `cap` bytes. - let cap = file.buffered_writer.mutable().capacity(); + let writer = file.buffered_writer.read().await; + let cap = writer.mutable().capacity(); + drop(writer); let content: Vec = rand::thread_rng() .sample_iter(rand::distributions::Standard) @@ -540,12 +581,13 @@ mod tests { 2 * cap.into_u64(), "buffered writer requires one write to be flushed if we write 2.5x buffer capacity" ); + let writer = file.buffered_writer.read().await; assert_eq!( - &file.buffered_writer.inspect_maybe_flushed().unwrap()[0..cap], + &writer.inspect_maybe_flushed().unwrap()[0..cap], &content[cap..cap * 2] ); assert_eq!( - &file.buffered_writer.mutable()[0..cap / 2], + &writer.mutable()[0..cap / 2], &content[cap * 2..cap * 2 + cap / 2] ); } @@ -563,13 +605,15 @@ mod tests { let gate = utils::sync::gate::Gate::default(); let cancel = CancellationToken::new(); - let mut file = EphemeralFile::create(conf, tenant_id, timeline_id, &gate, &cancel, &ctx) + let file = EphemeralFile::create(conf, tenant_id, timeline_id, &gate, &cancel, &ctx) .await .unwrap(); - let mutable = file.buffered_writer.mutable(); + let writer = file.buffered_writer.read().await; + let mutable = writer.mutable(); let cap = mutable.capacity(); let align = mutable.align(); + drop(writer); let content: Vec = rand::thread_rng() .sample_iter(rand::distributions::Standard) .take(cap * 2 + cap / 2) diff --git a/pageserver/src/tenant/metadata.rs b/pageserver/src/tenant/metadata.rs index bea3128265..2f407de951 100644 --- a/pageserver/src/tenant/metadata.rs +++ b/pageserver/src/tenant/metadata.rs @@ -18,6 +18,7 @@ //! [`IndexPart`]: super::remote_timeline_client::index::IndexPart use anyhow::ensure; +use postgres_ffi::PgMajorVersion; use serde::{Deserialize, Serialize}; use utils::bin_ser::{BeSer, SerializeError}; use utils::id::TimelineId; @@ -136,7 +137,7 @@ struct TimelineMetadataBodyV2 { latest_gc_cutoff_lsn: Lsn, initdb_lsn: Lsn, - pg_version: u32, + pg_version: PgMajorVersion, } #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] @@ -167,7 +168,7 @@ impl TimelineMetadata { ancestor_lsn: Lsn, latest_gc_cutoff_lsn: Lsn, initdb_lsn: Lsn, - pg_version: u32, + pg_version: PgMajorVersion, ) -> Self { Self { hdr: TimelineMetadataHeader { @@ -215,7 +216,7 @@ impl TimelineMetadata { ancestor_lsn: body.ancestor_lsn, latest_gc_cutoff_lsn: body.latest_gc_cutoff_lsn, initdb_lsn: body.initdb_lsn, - pg_version: 14, // All timelines created before this version had pg_version 14 + pg_version: PgMajorVersion::PG14, // All timelines created before this version had pg_version 14 }; hdr.format_version = METADATA_FORMAT_VERSION; @@ -317,7 +318,7 @@ impl TimelineMetadata { self.body.initdb_lsn } - pub fn pg_version(&self) -> u32 { + pub fn pg_version(&self) -> PgMajorVersion { self.body.pg_version } @@ -331,7 +332,7 @@ impl TimelineMetadata { Lsn::from_hex("00000000").unwrap(), Lsn::from_hex("00000000").unwrap(), Lsn::from_hex("00000000").unwrap(), - 0, + PgMajorVersion::PG14, ); let bytes = instance.to_bytes().unwrap(); Self::from_bytes(&bytes).unwrap() @@ -545,13 +546,12 @@ mod tests { Lsn(0), Lsn(0), Lsn(0), - 14, // All timelines created before this version had pg_version 14 + PgMajorVersion::PG14, // All timelines created before this version had pg_version 14 ); assert_eq!( deserialized_metadata.body, expected_metadata.body, - "Metadata of the old version {} should be upgraded to the latest version {}", - METADATA_OLD_FORMAT_VERSION, METADATA_FORMAT_VERSION + "Metadata of the old version {METADATA_OLD_FORMAT_VERSION} should be upgraded to the latest version {METADATA_FORMAT_VERSION}" ); } @@ -566,7 +566,7 @@ mod tests { Lsn(0), // Updating this version to 17 will cause the test to fail at the // next assert_eq!(). - 16, + PgMajorVersion::PG16, ); let expected_bytes = vec![ /* TimelineMetadataHeader */ diff --git a/pageserver/src/tenant/remote_timeline_client/index.rs b/pageserver/src/tenant/remote_timeline_client/index.rs index a5cd8989aa..6060c42cbb 100644 --- a/pageserver/src/tenant/remote_timeline_client/index.rs +++ b/pageserver/src/tenant/remote_timeline_client/index.rs @@ -427,8 +427,8 @@ impl GcBlocking { #[cfg(test)] mod tests { + use postgres_ffi::PgMajorVersion; use std::str::FromStr; - use utils::id::TimelineId; use super::*; @@ -831,7 +831,7 @@ mod tests { Lsn::INVALID, Lsn::from_str("0/1696070").unwrap(), Lsn::from_str("0/1696070").unwrap(), - 14, + PgMajorVersion::PG14, ).with_recalculated_checksum().unwrap(), deleted_at: Some(parse_naive_datetime("2023-07-31T09:00:00.123000000")), archived_at: None, @@ -893,7 +893,7 @@ mod tests { Lsn::INVALID, Lsn::from_str("0/1696070").unwrap(), Lsn::from_str("0/1696070").unwrap(), - 14, + PgMajorVersion::PG14, ).with_recalculated_checksum().unwrap(), deleted_at: Some(parse_naive_datetime("2023-07-31T09:00:00.123000000")), archived_at: Some(parse_naive_datetime("2023-04-29T09:00:00.123000000")), @@ -957,7 +957,7 @@ mod tests { Lsn::INVALID, Lsn::from_str("0/1696070").unwrap(), Lsn::from_str("0/1696070").unwrap(), - 14, + PgMajorVersion::PG14, ).with_recalculated_checksum().unwrap(), deleted_at: None, lineage: Default::default(), @@ -1033,7 +1033,7 @@ mod tests { Lsn::INVALID, Lsn::from_str("0/1696070").unwrap(), Lsn::from_str("0/1696070").unwrap(), - 14, + PgMajorVersion::PG14, ).with_recalculated_checksum().unwrap(), deleted_at: None, lineage: Default::default(), @@ -1114,7 +1114,7 @@ mod tests { Lsn::INVALID, Lsn::from_str("0/1696070").unwrap(), Lsn::from_str("0/1696070").unwrap(), - 14, + PgMajorVersion::PG14, ).with_recalculated_checksum().unwrap(), deleted_at: None, lineage: Default::default(), @@ -1199,7 +1199,7 @@ mod tests { Lsn::INVALID, Lsn::from_str("0/1696070").unwrap(), Lsn::from_str("0/1696070").unwrap(), - 14, + PgMajorVersion::PG14, ).with_recalculated_checksum().unwrap(), deleted_at: None, lineage: Default::default(), @@ -1287,7 +1287,7 @@ mod tests { Lsn::INVALID, Lsn::from_str("0/1696070").unwrap(), Lsn::from_str("0/1696070").unwrap(), - 14, + PgMajorVersion::PG14, ).with_recalculated_checksum().unwrap(), deleted_at: None, lineage: Default::default(), diff --git a/pageserver/src/tenant/secondary/downloader.rs b/pageserver/src/tenant/secondary/downloader.rs index dd49c843f3..6b315dc4bc 100644 --- a/pageserver/src/tenant/secondary/downloader.rs +++ b/pageserver/src/tenant/secondary/downloader.rs @@ -1427,7 +1427,7 @@ async fn init_timeline_state( let local_meta = dentry .metadata() .await - .fatal_err(&format!("Read metadata on {}", file_path)); + .fatal_err(&format!("Read metadata on {file_path}")); let file_name = file_path.file_name().expect("created it from the dentry"); if crate::is_temporary(&file_path) diff --git a/pageserver/src/tenant/storage_layer.rs b/pageserver/src/tenant/storage_layer.rs index 9d15e7c4de..9fbb9d2438 100644 --- a/pageserver/src/tenant/storage_layer.rs +++ b/pageserver/src/tenant/storage_layer.rs @@ -34,11 +34,11 @@ pub use layer_name::{DeltaLayerName, ImageLayerName, LayerName}; use pageserver_api::config::GetVectoredConcurrentIo; use pageserver_api::key::Key; use pageserver_api::keyspace::{KeySpace, KeySpaceRandomAccum}; -use pageserver_api::record::NeonWalRecord; -use pageserver_api::value::Value; use tracing::{Instrument, info_span, trace}; use utils::lsn::Lsn; use utils::sync::gate::GateGuard; +use wal_decoder::models::record::NeonWalRecord; +use wal_decoder::models::value::Value; use self::inmemory_layer::InMemoryLayerFileId; use super::PageReconstructError; @@ -109,7 +109,7 @@ pub(crate) enum OnDiskValue { /// Reconstruct data accumulated for a single key during a vectored get #[derive(Debug, Default)] -pub(crate) struct VectoredValueReconstructState { +pub struct VectoredValueReconstructState { pub(crate) on_disk_values: Vec<(Lsn, OnDiskValueIoWaiter)>, pub(crate) situation: ValueReconstructSituation, @@ -244,13 +244,60 @@ impl VectoredValueReconstructState { res } + + /// Benchmarking utility to await for the completion of all pending ios + /// + /// # Cancel-Safety + /// + /// Technically fine to stop polling this future, but, the IOs will still + /// be executed to completion by the sidecar task and hold on to / consume resources. + /// Better not do it to make reasonsing about the system easier. + #[cfg(feature = "benchmarking")] + pub async fn sink_pending_ios(self) -> Result<(), std::io::Error> { + let mut res = Ok(()); + + // We should try hard not to bail early, so that by the time we return from this + // function, all IO for this value is done. It's not required -- we could totally + // stop polling the IO futures in the sidecar task, they need to support that, + // but just stopping to poll doesn't reduce the IO load on the disk. It's easier + // to reason about the system if we just wait for all IO to complete, even if + // we're no longer interested in the result. + // + // Revisit this when IO futures are replaced with a more sophisticated IO system + // and an IO scheduler, where we know which IOs were submitted and which ones + // just queued. Cf the comment on IoConcurrency::spawn_io. + for (_lsn, waiter) in self.on_disk_values { + let value_recv_res = waiter + .wait_completion() + // we rely on the caller to poll us to completion, so this is not a bail point + .await; + + match (&mut res, value_recv_res) { + (Err(_), _) => { + // We've already failed, no need to process more. + } + (Ok(_), Err(_wait_err)) => { + // This shouldn't happen - likely the sidecar task panicked. + unreachable!(); + } + (Ok(_), Ok(Err(err))) => { + let err: std::io::Error = err; + res = Err(err); + } + (Ok(_ok), Ok(Ok(OnDiskValue::RawImage(_img)))) => {} + (Ok(_ok), Ok(Ok(OnDiskValue::WalRecordOrImage(_buf)))) => {} + } + } + + res + } } /// Bag of data accumulated during a vectored get.. -pub(crate) struct ValuesReconstructState { +pub struct ValuesReconstructState { /// The keys will be removed after `get_vectored` completes. The caller outside `Timeline` /// should not expect to get anything from this hashmap. - pub(crate) keys: HashMap, + pub keys: HashMap, /// The keys which are already retrieved keys_done: KeySpaceRandomAccum, @@ -272,7 +319,7 @@ pub(crate) struct ValuesReconstructState { /// The desired end state is that we always do parallel IO. /// This struct and the dispatching in the impl will be removed once /// we've built enough confidence. -pub(crate) enum IoConcurrency { +pub enum IoConcurrency { Sequential, SidecarTask { task_id: usize, @@ -317,10 +364,7 @@ impl IoConcurrency { Self::spawn(SelectedIoConcurrency::Sequential) } - pub(crate) fn spawn_from_conf( - conf: GetVectoredConcurrentIo, - gate_guard: GateGuard, - ) -> IoConcurrency { + pub fn spawn_from_conf(conf: GetVectoredConcurrentIo, gate_guard: GateGuard) -> IoConcurrency { let selected = match conf { GetVectoredConcurrentIo::Sequential => SelectedIoConcurrency::Sequential, GetVectoredConcurrentIo::SidecarTask => SelectedIoConcurrency::SidecarTask(gate_guard), @@ -425,16 +469,6 @@ impl IoConcurrency { } } - pub(crate) fn clone(&self) -> Self { - match self { - IoConcurrency::Sequential => IoConcurrency::Sequential, - IoConcurrency::SidecarTask { task_id, ios_tx } => IoConcurrency::SidecarTask { - task_id: *task_id, - ios_tx: ios_tx.clone(), - }, - } - } - /// Submit an IO to be executed in the background. DEADLOCK RISK, read the full doc string. /// /// The IO is represented as an opaque future. @@ -573,6 +607,18 @@ impl IoConcurrency { } } +impl Clone for IoConcurrency { + fn clone(&self) -> Self { + match self { + IoConcurrency::Sequential => IoConcurrency::Sequential, + IoConcurrency::SidecarTask { task_id, ios_tx } => IoConcurrency::SidecarTask { + task_id: *task_id, + ios_tx: ios_tx.clone(), + }, + } + } +} + /// Make noise in case the [`ValuesReconstructState`] gets dropped while /// there are still IOs in flight. /// Refer to `collect_pending_ios` for why we prefer not to do that. @@ -603,7 +649,7 @@ impl Drop for ValuesReconstructState { } impl ValuesReconstructState { - pub(crate) fn new(io_concurrency: IoConcurrency) -> Self { + pub fn new(io_concurrency: IoConcurrency) -> Self { Self { keys: HashMap::new(), keys_done: KeySpaceRandomAccum::new(), diff --git a/pageserver/src/tenant/storage_layer/batch_split_writer.rs b/pageserver/src/tenant/storage_layer/batch_split_writer.rs index 51f2e909a2..1d50a5f3a0 100644 --- a/pageserver/src/tenant/storage_layer/batch_split_writer.rs +++ b/pageserver/src/tenant/storage_layer/batch_split_writer.rs @@ -4,11 +4,11 @@ use std::sync::Arc; use bytes::Bytes; use pageserver_api::key::{KEY_SIZE, Key}; -use pageserver_api::value::Value; use tokio_util::sync::CancellationToken; use utils::id::TimelineId; use utils::lsn::Lsn; use utils::shard::TenantShardId; +use wal_decoder::models::value::Value; use super::errors::PutError; use super::layer::S3_UPLOAD_LIMIT; diff --git a/pageserver/src/tenant/storage_layer/delta_layer.rs b/pageserver/src/tenant/storage_layer/delta_layer.rs index e82a28bb4c..c2f76c859c 100644 --- a/pageserver/src/tenant/storage_layer/delta_layer.rs +++ b/pageserver/src/tenant/storage_layer/delta_layer.rs @@ -44,7 +44,6 @@ use pageserver_api::key::{DBDIR_KEY, KEY_SIZE, Key}; use pageserver_api::keyspace::KeySpace; use pageserver_api::models::ImageCompressionAlgorithm; use pageserver_api::shard::TenantShardId; -use pageserver_api::value::Value; use serde::{Deserialize, Serialize}; use tokio::sync::OnceCell; use tokio_epoll_uring::IoBuf; @@ -54,6 +53,7 @@ use utils::bin_ser::BeSer; use utils::bin_ser::SerializeError; use utils::id::{TenantId, TimelineId}; use utils::lsn::Lsn; +use wal_decoder::models::value::Value; use super::errors::PutError; use super::{ @@ -783,7 +783,7 @@ impl DeltaLayer { ctx, ) .await - .with_context(|| format!("Failed to open file '{}'", path))?; + .with_context(|| format!("Failed to open file '{path}'"))?; let file_id = page_cache::next_file_id(); let block_reader = FileBlockReader::new(&file, file_id); let summary_blk = block_reader.read_blk(0, ctx).await?; @@ -1306,7 +1306,7 @@ impl DeltaLayerInner { // is it an image or will_init walrecord? // FIXME: this could be handled by threading the BlobRef to the // VectoredReadBuilder - let will_init = pageserver_api::value::ValueBytes::will_init(&data) + let will_init = wal_decoder::models::value::ValueBytes::will_init(&data) .inspect_err(|_e| { #[cfg(feature = "testing")] tracing::error!(data=?utils::Hex(&data), err=?_e, %key, %lsn, "failed to parse will_init out of serialized value"); @@ -1369,7 +1369,7 @@ impl DeltaLayerInner { format!(" img {} bytes", img.len()) } Value::WalRecord(rec) => { - let wal_desc = pageserver_api::record::describe_wal_record(&rec)?; + let wal_desc = wal_decoder::models::record::describe_wal_record(&rec)?; format!( " rec {} bytes will_init: {} {}", buf.len(), @@ -1401,7 +1401,7 @@ impl DeltaLayerInner { match val { Value::Image(img) => { let checkpoint = CheckPoint::decode(&img)?; - println!(" CHECKPOINT: {:?}", checkpoint); + println!(" CHECKPOINT: {checkpoint:?}"); } Value::WalRecord(_rec) => { println!(" unexpected walrecord value for checkpoint key"); @@ -1622,12 +1622,6 @@ impl DeltaLayerIterator<'_> { pub(crate) mod test { use std::collections::BTreeMap; - use bytes::Bytes; - use itertools::MinMaxResult; - use pageserver_api::value::Value; - use rand::prelude::{SeedableRng, SliceRandom, StdRng}; - use rand::{Rng, RngCore}; - use super::*; use crate::DEFAULT_PG_VERSION; use crate::context::DownloadBehavior; @@ -1637,6 +1631,11 @@ pub(crate) mod test { use crate::tenant::storage_layer::{Layer, ResidentLayer}; use crate::tenant::timeline::layer_manager::LayerManagerLockHolder; use crate::tenant::{TenantShard, Timeline}; + use bytes::Bytes; + use itertools::MinMaxResult; + use postgres_ffi::PgMajorVersion; + use rand::prelude::{SeedableRng, SliceRandom, StdRng}; + use rand::{Rng, RngCore}; /// Construct an index for a fictional delta layer and and then /// traverse in order to plan vectored reads for a query. Finally, @@ -1988,7 +1987,7 @@ pub(crate) mod test { #[tokio::test] async fn copy_delta_prefix_smoke() { use bytes::Bytes; - use pageserver_api::record::NeonWalRecord; + use wal_decoder::models::record::NeonWalRecord; let h = crate::tenant::harness::TenantHarness::create("truncate_delta_smoke") .await @@ -1996,7 +1995,7 @@ pub(crate) mod test { let (tenant, ctx) = h.load().await; let ctx = &ctx; let timeline = tenant - .create_test_timeline(TimelineId::generate(), Lsn(0x10), 14, ctx) + .create_test_timeline(TimelineId::generate(), Lsn(0x10), PgMajorVersion::PG14, ctx) .await .unwrap(); let ctx = &ctx.with_scope_timeline(&timeline); diff --git a/pageserver/src/tenant/storage_layer/filter_iterator.rs b/pageserver/src/tenant/storage_layer/filter_iterator.rs index 1a330ecfc2..d345195446 100644 --- a/pageserver/src/tenant/storage_layer/filter_iterator.rs +++ b/pageserver/src/tenant/storage_layer/filter_iterator.rs @@ -4,8 +4,8 @@ use std::sync::Arc; use anyhow::bail; use pageserver_api::key::Key; use pageserver_api::keyspace::{KeySpace, SparseKeySpace}; -use pageserver_api::value::Value; use utils::lsn::Lsn; +use wal_decoder::models::value::Value; use super::PersistentLayerKey; use super::merge_iterator::{MergeIterator, MergeIteratorItem}; @@ -126,7 +126,6 @@ mod tests { #[tokio::test] async fn filter_keyspace_iterator() { use bytes::Bytes; - use pageserver_api::value::Value; let harness = TenantHarness::create("filter_iterator_filter_keyspace_iterator") .await diff --git a/pageserver/src/tenant/storage_layer/image_layer.rs b/pageserver/src/tenant/storage_layer/image_layer.rs index 740f53f928..9f76f697d3 100644 --- a/pageserver/src/tenant/storage_layer/image_layer.rs +++ b/pageserver/src/tenant/storage_layer/image_layer.rs @@ -42,7 +42,6 @@ use pageserver_api::config::MaxVectoredReadBytes; use pageserver_api::key::{DBDIR_KEY, KEY_SIZE, Key}; use pageserver_api::keyspace::KeySpace; use pageserver_api::shard::{ShardIdentity, TenantShardId}; -use pageserver_api::value::Value; use serde::{Deserialize, Serialize}; use tokio::sync::OnceCell; use tokio_stream::StreamExt; @@ -52,6 +51,7 @@ use utils::bin_ser::BeSer; use utils::bin_ser::SerializeError; use utils::id::{TenantId, TimelineId}; use utils::lsn::Lsn; +use wal_decoder::models::value::Value; use super::errors::PutError; use super::layer_name::ImageLayerName; @@ -272,8 +272,7 @@ impl ImageLayer { conf.timeline_path(&tenant_shard_id, &timeline_id) .join(format!( - "{fname}.{:x}.{TEMP_FILE_SUFFIX}", - filename_disambiguator + "{fname}.{filename_disambiguator:x}.{TEMP_FILE_SUFFIX}" )) } @@ -370,7 +369,7 @@ impl ImageLayer { ctx, ) .await - .with_context(|| format!("Failed to open file '{}'", path))?; + .with_context(|| format!("Failed to open file '{path}'"))?; let file_id = page_cache::next_file_id(); let block_reader = FileBlockReader::new(&file, file_id); let summary_blk = block_reader.read_blk(0, ctx).await?; @@ -1232,10 +1231,10 @@ mod test { use itertools::Itertools; use pageserver_api::key::Key; use pageserver_api::shard::{ShardCount, ShardIdentity, ShardNumber, ShardStripeSize}; - use pageserver_api::value::Value; use utils::generation::Generation; use utils::id::{TenantId, TimelineId}; use utils::lsn::Lsn; + use wal_decoder::models::value::Value; use super::{ImageLayerIterator, ImageLayerWriter}; use crate::DEFAULT_PG_VERSION; @@ -1475,7 +1474,7 @@ mod test { assert_eq!(l1, expect_lsn); assert_eq!(&i1, i2); } - (o1, o2) => panic!("iterators length mismatch: {:?}, {:?}", o1, o2), + (o1, o2) => panic!("iterators length mismatch: {o1:?}, {o2:?}"), } } } diff --git a/pageserver/src/tenant/storage_layer/inmemory_layer.rs b/pageserver/src/tenant/storage_layer/inmemory_layer.rs index 200beba115..c4d53c6405 100644 --- a/pageserver/src/tenant/storage_layer/inmemory_layer.rs +++ b/pageserver/src/tenant/storage_layer/inmemory_layer.rs @@ -70,23 +70,15 @@ pub struct InMemoryLayer { /// We use a separate lock for the index to reduce the critical section /// during which reads cannot be planned. /// - /// If you need access to both the index and the underlying file at the same time, - /// respect the following locking order to avoid deadlocks: - /// 1. [`InMemoryLayer::inner`] - /// 2. [`InMemoryLayer::index`] - /// - /// Note that the file backing [`InMemoryLayer::inner`] is append-only, - /// so it is not necessary to hold simultaneous locks on index. - /// This avoids holding index locks across IO, and is crucial for avoiding read tail latency. + /// Note that the file backing [`InMemoryLayer::file`] is append-only, + /// so it is not necessary to hold a lock on the index while reading or writing from the file. /// In particular: - /// 1. It is safe to read and release [`InMemoryLayer::index`] before locking and reading from [`InMemoryLayer::inner`]. - /// 2. It is safe to write and release [`InMemoryLayer::inner`] before locking and updating [`InMemoryLayer::index`]. + /// 1. It is safe to read and release [`InMemoryLayer::index`] before reading from [`InMemoryLayer::file`]. + /// 2. It is safe to write to [`InMemoryLayer::file`] before locking and updating [`InMemoryLayer::index`]. index: RwLock>>, - /// The above fields never change, except for `end_lsn`, which is only set once, - /// and `index` (see rationale there). - /// All other changing parts are in `inner`, and protected by a mutex. - inner: RwLock, + /// Wrapper for the actual on-disk file. Uses interior mutability for concurrent reads/writes. + file: EphemeralFile, estimated_in_mem_size: AtomicU64, } @@ -96,20 +88,10 @@ impl std::fmt::Debug for InMemoryLayer { f.debug_struct("InMemoryLayer") .field("start_lsn", &self.start_lsn) .field("end_lsn", &self.end_lsn) - .field("inner", &self.inner) .finish() } } -pub struct InMemoryLayerInner { - /// The values are stored in a serialized format in this file. - /// Each serialized Value is preceded by a 'u32' length field. - /// PerSeg::page_versions map stores offsets into this file. - file: EphemeralFile, - - resource_units: GlobalResourceUnits, -} - /// Support the same max blob length as blob_io, because ultimately /// all the InMemoryLayer contents end up being written into a delta layer, /// using the [`crate::tenant::blob_io`]. @@ -258,12 +240,6 @@ struct IndexEntryUnpacked { pos: u64, } -impl std::fmt::Debug for InMemoryLayerInner { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("InMemoryLayerInner").finish() - } -} - /// State shared by all in-memory (ephemeral) layers. Updated infrequently during background ticks in Timeline, /// to minimize contention. /// @@ -280,7 +256,7 @@ pub(crate) struct GlobalResources { } // Per-timeline RAII struct for its contribution to [`GlobalResources`] -struct GlobalResourceUnits { +pub(crate) struct GlobalResourceUnits { // How many dirty bytes have I added to the global dirty_bytes: this guard object is responsible // for decrementing the global counter by this many bytes when dropped. dirty_bytes: u64, @@ -292,7 +268,7 @@ impl GlobalResourceUnits { // updated when the Timeline "ticks" in the background. const MAX_SIZE_DRIFT: u64 = 10 * 1024 * 1024; - fn new() -> Self { + pub(crate) fn new() -> Self { GLOBAL_RESOURCES .dirty_layers .fetch_add(1, AtomicOrdering::Relaxed); @@ -304,7 +280,7 @@ impl GlobalResourceUnits { /// /// Returns the effective layer size limit that should be applied, if any, to keep /// the total number of dirty bytes below the configured maximum. - fn publish_size(&mut self, size: u64) -> Option { + pub(crate) fn publish_size(&mut self, size: u64) -> Option { let new_global_dirty_bytes = match size.cmp(&self.dirty_bytes) { Ordering::Equal => GLOBAL_RESOURCES.dirty_bytes.load(AtomicOrdering::Relaxed), Ordering::Greater => { @@ -349,7 +325,7 @@ impl GlobalResourceUnits { // Call publish_size if the input size differs from last published size by more than // the drift limit - fn maybe_publish_size(&mut self, size: u64) { + pub(crate) fn maybe_publish_size(&mut self, size: u64) { let publish = match size.cmp(&self.dirty_bytes) { Ordering::Equal => false, Ordering::Greater => size - self.dirty_bytes > Self::MAX_SIZE_DRIFT, @@ -398,8 +374,8 @@ impl InMemoryLayer { } } - pub(crate) fn try_len(&self) -> Option { - self.inner.try_read().map(|i| i.file.len()).ok() + pub(crate) fn len(&self) -> u64 { + self.file.len() } pub(crate) fn assert_writable(&self) { @@ -430,7 +406,7 @@ impl InMemoryLayer { // Look up the keys in the provided keyspace and update // the reconstruct state with whatever is found. - pub(crate) async fn get_values_reconstruct_data( + pub async fn get_values_reconstruct_data( self: &Arc, keyspace: KeySpace, lsn_range: Range, @@ -479,14 +455,13 @@ impl InMemoryLayer { } } } - drop(index); // release the lock before we spawn the IO; if it's serial-mode IO we will deadlock on the read().await below + drop(index); // release the lock before we spawn the IO let read_from = Arc::clone(self); let read_ctx = ctx.attached_child(); reconstruct_state .spawn_io(async move { - let inner = read_from.inner.read().await; let f = vectored_dio_read::execute( - &inner.file, + &read_from.file, reads .iter() .flat_map(|(_, value_reads)| value_reads.iter().map(|v| &v.read)), @@ -518,7 +493,6 @@ impl InMemoryLayer { // This is kinda forced for InMemoryLayer because we need to inner.read() anyway, // but it's less obvious for DeltaLayer and ImageLayer. So, keep this explicit // drop for consistency among all three layer types. - drop(inner); drop(read_from); }) .await; @@ -537,7 +511,7 @@ fn inmem_layer_log_display( start_lsn: Lsn, end_lsn: Lsn, ) -> std::fmt::Result { - write!(f, "timeline {} in-memory ", timeline)?; + write!(f, "timeline {timeline} in-memory ")?; inmem_layer_display(f, start_lsn, end_lsn) } @@ -549,12 +523,6 @@ impl std::fmt::Display for InMemoryLayer { } impl InMemoryLayer { - /// Get layer size. - pub async fn size(&self) -> Result { - let inner = self.inner.read().await; - Ok(inner.file.len()) - } - pub fn estimated_in_mem_size(&self) -> u64 { self.estimated_in_mem_size.load(AtomicOrdering::Relaxed) } @@ -587,10 +555,7 @@ impl InMemoryLayer { end_lsn: OnceLock::new(), opened_at: Instant::now(), index: RwLock::new(BTreeMap::new()), - inner: RwLock::new(InMemoryLayerInner { - file, - resource_units: GlobalResourceUnits::new(), - }), + file, estimated_in_mem_size: AtomicU64::new(0), }) } @@ -599,41 +564,37 @@ impl InMemoryLayer { /// /// Errors are not retryable, the [`InMemoryLayer`] must be discarded, and not be read from. /// The reason why it's not retryable is that the [`EphemeralFile`] writes are not retryable. + /// + /// This method shall not be called concurrently. We enforce this property via [`crate::tenant::Timeline::write_lock`]. + /// /// TODO: it can be made retryable if we aborted the process on EphemeralFile write errors. pub async fn put_batch( &self, serialized_batch: SerializedValueBatch, ctx: &RequestContext, ) -> anyhow::Result<()> { - let (base_offset, metadata) = { - let mut inner = self.inner.write().await; - self.assert_writable(); + self.assert_writable(); - let base_offset = inner.file.len(); + let base_offset = self.file.len(); - let SerializedValueBatch { - raw, - metadata, - max_lsn: _, - len: _, - } = serialized_batch; + let SerializedValueBatch { + raw, + metadata, + max_lsn: _, + len: _, + } = serialized_batch; - // Write the batch to the file - inner.file.write_raw(&raw, ctx).await?; - let new_size = inner.file.len(); + // Write the batch to the file + self.file.write_raw(&raw, ctx).await?; + let new_size = self.file.len(); - let expected_new_len = base_offset - .checked_add(raw.len().into_u64()) - // write_raw would error if we were to overflow u64. - // also IndexEntry and higher levels in - //the code don't allow the file to grow that large - .unwrap(); - assert_eq!(new_size, expected_new_len); - - inner.resource_units.maybe_publish_size(new_size); - - (base_offset, metadata) - }; + let expected_new_len = base_offset + .checked_add(raw.len().into_u64()) + // write_raw would error if we were to overflow u64. + // also IndexEntry and higher levels in + //the code don't allow the file to grow that large + .unwrap(); + assert_eq!(new_size, expected_new_len); // Update the index with the new entries let mut index = self.index.write().await; @@ -686,10 +647,8 @@ impl InMemoryLayer { self.opened_at } - pub(crate) async fn tick(&self) -> Option { - let mut inner = self.inner.write().await; - let size = inner.file.len(); - inner.resource_units.publish_size(size) + pub(crate) fn tick(&self) -> Option { + self.file.tick() } pub(crate) async fn put_tombstones(&self, _key_ranges: &[(Range, Lsn)]) -> Result<()> { @@ -753,12 +712,6 @@ impl InMemoryLayer { gate: &utils::sync::gate::Gate, cancel: CancellationToken, ) -> Result> { - // Grab the lock in read-mode. We hold it over the I/O, but because this - // layer is not writeable anymore, no one should be trying to acquire the - // write lock on it, so we shouldn't block anyone. See the comment on - // [`InMemoryLayer::freeze`] to understand how locking between the append path - // and layer flushing works. - let inner = self.inner.read().await; let index = self.index.read().await; use l0_flush::Inner; @@ -793,7 +746,7 @@ impl InMemoryLayer { match l0_flush_global_state { l0_flush::Inner::Direct { .. } => { - let file_contents = inner.file.load_to_io_buf(ctx).await?; + let file_contents = self.file.load_to_io_buf(ctx).await?; let file_contents = file_contents.freeze(); for (key, vec_map) in index.iter() { diff --git a/pageserver/src/tenant/storage_layer/inmemory_layer/vectored_dio_read.rs b/pageserver/src/tenant/storage_layer/inmemory_layer/vectored_dio_read.rs index ea354fc716..27fbc6f5fb 100644 --- a/pageserver/src/tenant/storage_layer/inmemory_layer/vectored_dio_read.rs +++ b/pageserver/src/tenant/storage_layer/inmemory_layer/vectored_dio_read.rs @@ -380,7 +380,7 @@ impl std::fmt::Debug for LogicalReadState { write!(f, "Ongoing({:?})", BufferDebug::from(b as &dyn Buffer)) } LogicalReadState::Ok(b) => write!(f, "Ok({:?})", BufferDebug::from(b as &dyn Buffer)), - LogicalReadState::Error(e) => write!(f, "Error({:?})", e), + LogicalReadState::Error(e) => write!(f, "Error({e:?})"), LogicalReadState::Undefined => write!(f, "Undefined"), } } diff --git a/pageserver/src/tenant/storage_layer/layer.rs b/pageserver/src/tenant/storage_layer/layer.rs index 3d55972017..0be13e67a8 100644 --- a/pageserver/src/tenant/storage_layer/layer.rs +++ b/pageserver/src/tenant/storage_layer/layer.rs @@ -105,7 +105,7 @@ impl std::fmt::Display for Layer { impl std::fmt::Debug for Layer { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{}", self) + write!(f, "{self}") } } diff --git a/pageserver/src/tenant/storage_layer/layer/tests.rs b/pageserver/src/tenant/storage_layer/layer/tests.rs index 2f2ff0f273..9bdce163c9 100644 --- a/pageserver/src/tenant/storage_layer/layer/tests.rs +++ b/pageserver/src/tenant/storage_layer/layer/tests.rs @@ -1,6 +1,7 @@ use std::time::UNIX_EPOCH; use pageserver_api::key::{CONTROLFILE_KEY, Key}; +use postgres_ffi::PgMajorVersion; use tokio::task::JoinSet; use utils::completion::{self, Completion}; use utils::id::TimelineId; @@ -45,7 +46,7 @@ async fn smoke_test() { .create_test_timeline_with_layers( TimelineId::generate(), Lsn(0x10), - 14, + PgMajorVersion::PG14, &ctx, Default::default(), // in-memory layers Default::default(), @@ -256,7 +257,12 @@ async fn evict_and_wait_on_wanted_deleted() { let (tenant, ctx) = h.load().await; let timeline = tenant - .create_test_timeline(TimelineId::generate(), Lsn(0x10), 14, &ctx) + .create_test_timeline( + TimelineId::generate(), + Lsn(0x10), + PgMajorVersion::PG14, + &ctx, + ) .await .unwrap(); @@ -341,7 +347,12 @@ fn read_wins_pending_eviction() { let download_span = span.in_scope(|| tracing::info_span!("downloading", timeline_id = 1)); let timeline = tenant - .create_test_timeline(TimelineId::generate(), Lsn(0x10), 14, &ctx) + .create_test_timeline( + TimelineId::generate(), + Lsn(0x10), + PgMajorVersion::PG14, + &ctx, + ) .await .unwrap(); let ctx = ctx.with_scope_timeline(&timeline); @@ -474,7 +485,12 @@ fn multiple_pending_evictions_scenario(name: &'static str, in_order: bool) { let download_span = span.in_scope(|| tracing::info_span!("downloading", timeline_id = 1)); let timeline = tenant - .create_test_timeline(TimelineId::generate(), Lsn(0x10), 14, &ctx) + .create_test_timeline( + TimelineId::generate(), + Lsn(0x10), + PgMajorVersion::PG14, + &ctx, + ) .await .unwrap(); let ctx = ctx.with_scope_timeline(&timeline); @@ -644,7 +660,12 @@ async fn cancelled_get_or_maybe_download_does_not_cancel_eviction() { let (tenant, ctx) = h.load().await; let timeline = tenant - .create_test_timeline(TimelineId::generate(), Lsn(0x10), 14, &ctx) + .create_test_timeline( + TimelineId::generate(), + Lsn(0x10), + PgMajorVersion::PG14, + &ctx, + ) .await .unwrap(); let ctx = ctx.with_scope_timeline(&timeline); @@ -730,7 +751,12 @@ async fn evict_and_wait_does_not_wait_for_download() { let download_span = span.in_scope(|| tracing::info_span!("downloading", timeline_id = 1)); let timeline = tenant - .create_test_timeline(TimelineId::generate(), Lsn(0x10), 14, &ctx) + .create_test_timeline( + TimelineId::generate(), + Lsn(0x10), + PgMajorVersion::PG14, + &ctx, + ) .await .unwrap(); let ctx = ctx.with_scope_timeline(&timeline); @@ -824,7 +850,7 @@ async fn evict_and_wait_does_not_wait_for_download() { #[tokio::test(start_paused = true)] async fn eviction_cancellation_on_drop() { use bytes::Bytes; - use pageserver_api::value::Value; + use wal_decoder::models::value::Value; // this is the runtime on which Layer spawns the blocking tasks on let handle = tokio::runtime::Handle::current(); @@ -836,7 +862,12 @@ async fn eviction_cancellation_on_drop() { let (tenant, ctx) = h.load().await; let timeline = tenant - .create_test_timeline(TimelineId::generate(), Lsn(0x10), 14, &ctx) + .create_test_timeline( + TimelineId::generate(), + Lsn(0x10), + PgMajorVersion::PG14, + &ctx, + ) .await .unwrap(); diff --git a/pageserver/src/tenant/storage_layer/merge_iterator.rs b/pageserver/src/tenant/storage_layer/merge_iterator.rs index ea3dea50c3..c15abcdf3f 100644 --- a/pageserver/src/tenant/storage_layer/merge_iterator.rs +++ b/pageserver/src/tenant/storage_layer/merge_iterator.rs @@ -4,8 +4,8 @@ use std::sync::Arc; use anyhow::bail; use pageserver_api::key::Key; -use pageserver_api::value::Value; use utils::lsn::Lsn; +use wal_decoder::models::value::Value; use super::delta_layer::{DeltaLayerInner, DeltaLayerIterator}; use super::image_layer::{ImageLayerInner, ImageLayerIterator}; @@ -402,9 +402,9 @@ impl<'a> MergeIterator<'a> { mod tests { use itertools::Itertools; use pageserver_api::key::Key; - #[cfg(feature = "testing")] - use pageserver_api::record::NeonWalRecord; use utils::lsn::Lsn; + #[cfg(feature = "testing")] + use wal_decoder::models::record::NeonWalRecord; use super::*; use crate::DEFAULT_PG_VERSION; @@ -436,7 +436,6 @@ mod tests { #[tokio::test] async fn merge_in_between() { use bytes::Bytes; - use pageserver_api::value::Value; let harness = TenantHarness::create("merge_iterator_merge_in_between") .await @@ -501,7 +500,6 @@ mod tests { #[tokio::test] async fn delta_merge() { use bytes::Bytes; - use pageserver_api::value::Value; let harness = TenantHarness::create("merge_iterator_delta_merge") .await @@ -578,7 +576,6 @@ mod tests { #[tokio::test] async fn delta_image_mixed_merge() { use bytes::Bytes; - use pageserver_api::value::Value; let harness = TenantHarness::create("merge_iterator_delta_image_mixed_merge") .await diff --git a/pageserver/src/tenant/timeline.rs b/pageserver/src/tenant/timeline.rs index a1969ecae6..4ca005bfd4 100644 --- a/pageserver/src/tenant/timeline.rs +++ b/pageserver/src/tenant/timeline.rs @@ -56,11 +56,9 @@ use pageserver_api::models::{ }; use pageserver_api::reltag::{BlockNumber, RelTag}; use pageserver_api::shard::{ShardIdentity, ShardIndex, ShardNumber, TenantShardId}; -#[cfg(test)] -use pageserver_api::value::Value; use postgres_connection::PgConnectionConfig; use postgres_ffi::v14::xlog_utils; -use postgres_ffi::{WAL_SEGMENT_SIZE, to_pg_timestamp}; +use postgres_ffi::{PgMajorVersion, WAL_SEGMENT_SIZE, to_pg_timestamp}; use rand::Rng; use remote_storage::DownloadError; use serde_with::serde_as; @@ -81,6 +79,8 @@ use utils::seqwait::SeqWait; use utils::simple_rcu::{Rcu, RcuReadGuard}; use utils::sync::gate::{Gate, GateGuard}; use utils::{completion, critical, fs_ext, pausable_failpoint}; +#[cfg(test)] +use wal_decoder::models::value::Value; use wal_decoder::serialized_batch::{SerializedValueBatch, ValueMeta}; use self::delete::DeleteTimelineFlow; @@ -178,7 +178,7 @@ pub enum LastImageLayerCreationStatus { impl std::fmt::Display for ImageLayerCreationMode { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{:?}", self) + write!(f, "{self:?}") } } @@ -225,7 +225,7 @@ pub struct Timeline { /// to shards, and is constant through the lifetime of this Timeline. shard_identity: ShardIdentity, - pub pg_version: u32, + pub pg_version: PgMajorVersion, /// The tuple has two elements. /// 1. `LayerFileManager` keeps track of the various physical representations of the layer files (inmem, local, remote). @@ -632,7 +632,7 @@ pub enum ReadPathLayerId { impl std::fmt::Display for ReadPathLayerId { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { - ReadPathLayerId::PersistentLayer(key) => write!(f, "{}", key), + ReadPathLayerId::PersistentLayer(key) => write!(f, "{key}"), ReadPathLayerId::InMemoryLayer(range) => { write!(f, "in-mem {}..{}", range.start, range.end) } @@ -708,7 +708,7 @@ impl MissingKeyError { impl std::fmt::Debug for MissingKeyError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{}", self) + write!(f, "{self}") } } @@ -721,19 +721,19 @@ impl std::fmt::Display for MissingKeyError { )?; if let Some(ref ancestor_lsn) = self.ancestor_lsn { - write!(f, ", ancestor {}", ancestor_lsn)?; + write!(f, ", ancestor {ancestor_lsn}")?; } if let Some(ref query) = self.query { - write!(f, ", query {}", query)?; + write!(f, ", query {query}")?; } if let Some(ref read_path) = self.read_path { - write!(f, "\n{}", read_path)?; + write!(f, "\n{read_path}")?; } if let Some(ref backtrace) = self.backtrace { - write!(f, "\n{}", backtrace)?; + write!(f, "\n{backtrace}")?; } Ok(()) @@ -816,7 +816,7 @@ impl From for FlushLayerError { } #[derive(thiserror::Error, Debug)] -pub(crate) enum GetVectoredError { +pub enum GetVectoredError { #[error("timeline shutting down")] Cancelled, @@ -849,7 +849,7 @@ impl From for GetVectoredError { } #[derive(thiserror::Error, Debug)] -pub(crate) enum GetReadyAncestorError { +pub enum GetReadyAncestorError { #[error("ancestor LSN wait error")] AncestorLsnTimeout(#[from] WaitLsnError), @@ -939,7 +939,7 @@ impl std::fmt::Debug for Timeline { } #[derive(thiserror::Error, Debug, Clone)] -pub(crate) enum WaitLsnError { +pub enum WaitLsnError { // Called on a timeline which is shutting down #[error("Shutdown")] Shutdown, @@ -1902,16 +1902,11 @@ impl Timeline { return; }; - let Some(current_size) = open_layer.try_len() else { - // Unexpected: since we hold the write guard, nobody else should be writing to this layer, so - // read lock to get size should always succeed. - tracing::warn!("Lock conflict while reading size of open layer"); - return; - }; + let current_size = open_layer.len(); let current_lsn = self.get_last_record_lsn(); - let checkpoint_distance_override = open_layer.tick().await; + let checkpoint_distance_override = open_layer.tick(); if let Some(size_override) = checkpoint_distance_override { if current_size > size_override { @@ -2918,7 +2913,7 @@ impl Timeline { shard_identity: ShardIdentity, walredo_mgr: Option>, resources: TimelineResources, - pg_version: u32, + pg_version: PgMajorVersion, state: TimelineState, attach_wal_lag_cooldown: Arc>, create_idempotency: crate::tenant::CreateTimelineIdempotency, @@ -3422,10 +3417,6 @@ impl Timeline { // TenantShard::create_timeline will wait for these uploads to happen before returning, or // on retry. - // Now that we have the full layer map, we may calculate the visibility of layers within it (a global scan) - drop(guard); // drop write lock, update_layer_visibility will take a read lock. - self.update_layer_visibility().await?; - info!( "loaded layer map with {} layers at {}, total physical size: {}", num_layers, disk_consistent_lsn, total_physical_size @@ -5211,7 +5202,11 @@ impl Timeline { } let (dense_ks, sparse_ks) = self.collect_keyspace(lsn, ctx).await?; - let dense_partitioning = dense_ks.partition(&self.shard_identity, partition_size); + let dense_partitioning = dense_ks.partition( + &self.shard_identity, + partition_size, + postgres_ffi::BLCKSZ as u64, + ); let sparse_partitioning = SparseKeyPartitioning { parts: vec![sparse_ks], }; // no partitioning for metadata keys for now @@ -5939,7 +5934,7 @@ impl Drop for Timeline { if let Ok(mut gc_info) = ancestor.gc_info.write() { if !gc_info.remove_child_not_offloaded(self.timeline_id) { tracing::error!(tenant_id = %self.tenant_shard_id.tenant_id, shard_id = %self.tenant_shard_id.shard_slug(), timeline_id = %self.timeline_id, - "Couldn't remove retain_lsn entry from offloaded timeline's parent: already removed"); + "Couldn't remove retain_lsn entry from timeline's parent on drop: already removed"); } } } @@ -6543,7 +6538,7 @@ impl Timeline { debug!("retain_lsns: {:?}", retain_lsns); - let mut layers_to_remove = Vec::new(); + let max_retain_lsn = retain_lsns.iter().max(); // Scan all layers in the timeline (remote or on-disk). // @@ -6553,108 +6548,110 @@ impl Timeline { // 3. it doesn't need to be retained for 'retain_lsns'; // 4. it does not need to be kept for LSNs holding valid leases. // 5. newer on-disk image layers cover the layer's whole key range - // - // TODO holding a write lock is too agressive and avoidable - let mut guard = self - .layers - .write(LayerManagerLockHolder::GarbageCollection) - .await; - let layers = guard.layer_map()?; - 'outer: for l in layers.iter_historic_layers() { - result.layers_total += 1; + let layers_to_remove = { + let mut layers_to_remove = Vec::new(); - // 1. Is it newer than GC horizon cutoff point? - if l.get_lsn_range().end > space_cutoff { - info!( - "keeping {} because it's newer than space_cutoff {}", - l.layer_name(), - space_cutoff, - ); - result.layers_needed_by_cutoff += 1; - continue 'outer; - } + let guard = self + .layers + .read(LayerManagerLockHolder::GarbageCollection) + .await; + let layers = guard.layer_map()?; + 'outer: for l in layers.iter_historic_layers() { + result.layers_total += 1; - // 2. It is newer than PiTR cutoff point? - if l.get_lsn_range().end > time_cutoff { - info!( - "keeping {} because it's newer than time_cutoff {}", - l.layer_name(), - time_cutoff, - ); - result.layers_needed_by_pitr += 1; - continue 'outer; - } - - // 3. Is it needed by a child branch? - // NOTE With that we would keep data that - // might be referenced by child branches forever. - // We can track this in child timeline GC and delete parent layers when - // they are no longer needed. This might be complicated with long inheritance chains. - // - // TODO Vec is not a great choice for `retain_lsns` - for retain_lsn in &retain_lsns { - // start_lsn is inclusive - if &l.get_lsn_range().start <= retain_lsn { - info!( - "keeping {} because it's still might be referenced by child branch forked at {} is_dropped: xx is_incremental: {}", + // 1. Is it newer than GC horizon cutoff point? + if l.get_lsn_range().end > space_cutoff { + debug!( + "keeping {} because it's newer than space_cutoff {}", l.layer_name(), - retain_lsn, - l.is_incremental(), + space_cutoff, ); - result.layers_needed_by_branches += 1; + result.layers_needed_by_cutoff += 1; continue 'outer; } - } - // 4. Is there a valid lease that requires us to keep this layer? - if let Some(lsn) = &max_lsn_with_valid_lease { - // keep if layer start <= any of the lease - if &l.get_lsn_range().start <= lsn { - info!( - "keeping {} because there is a valid lease preventing GC at {}", + // 2. It is newer than PiTR cutoff point? + if l.get_lsn_range().end > time_cutoff { + debug!( + "keeping {} because it's newer than time_cutoff {}", l.layer_name(), - lsn, + time_cutoff, ); - result.layers_needed_by_leases += 1; + result.layers_needed_by_pitr += 1; continue 'outer; } + + // 3. Is it needed by a child branch? + // NOTE With that we would keep data that + // might be referenced by child branches forever. + // We can track this in child timeline GC and delete parent layers when + // they are no longer needed. This might be complicated with long inheritance chains. + if let Some(retain_lsn) = max_retain_lsn { + // start_lsn is inclusive + if &l.get_lsn_range().start <= retain_lsn { + debug!( + "keeping {} because it's still might be referenced by child branch forked at {} is_dropped: xx is_incremental: {}", + l.layer_name(), + retain_lsn, + l.is_incremental(), + ); + result.layers_needed_by_branches += 1; + continue 'outer; + } + } + + // 4. Is there a valid lease that requires us to keep this layer? + if let Some(lsn) = &max_lsn_with_valid_lease { + // keep if layer start <= any of the lease + if &l.get_lsn_range().start <= lsn { + debug!( + "keeping {} because there is a valid lease preventing GC at {}", + l.layer_name(), + lsn, + ); + result.layers_needed_by_leases += 1; + continue 'outer; + } + } + + // 5. Is there a later on-disk layer for this relation? + // + // The end-LSN is exclusive, while disk_consistent_lsn is + // inclusive. For example, if disk_consistent_lsn is 100, it is + // OK for a delta layer to have end LSN 101, but if the end LSN + // is 102, then it might not have been fully flushed to disk + // before crash. + // + // For example, imagine that the following layers exist: + // + // 1000 - image (A) + // 1000-2000 - delta (B) + // 2000 - image (C) + // 2000-3000 - delta (D) + // 3000 - image (E) + // + // If GC horizon is at 2500, we can remove layers A and B, but + // we cannot remove C, even though it's older than 2500, because + // the delta layer 2000-3000 depends on it. + if !layers + .image_layer_exists(&l.get_key_range(), &(l.get_lsn_range().end..new_gc_cutoff)) + { + debug!("keeping {} because it is the latest layer", l.layer_name()); + result.layers_not_updated += 1; + continue 'outer; + } + + // We didn't find any reason to keep this file, so remove it. + info!( + "garbage collecting {} is_dropped: xx is_incremental: {}", + l.layer_name(), + l.is_incremental(), + ); + layers_to_remove.push(l); } - // 5. Is there a later on-disk layer for this relation? - // - // The end-LSN is exclusive, while disk_consistent_lsn is - // inclusive. For example, if disk_consistent_lsn is 100, it is - // OK for a delta layer to have end LSN 101, but if the end LSN - // is 102, then it might not have been fully flushed to disk - // before crash. - // - // For example, imagine that the following layers exist: - // - // 1000 - image (A) - // 1000-2000 - delta (B) - // 2000 - image (C) - // 2000-3000 - delta (D) - // 3000 - image (E) - // - // If GC horizon is at 2500, we can remove layers A and B, but - // we cannot remove C, even though it's older than 2500, because - // the delta layer 2000-3000 depends on it. - if !layers - .image_layer_exists(&l.get_key_range(), &(l.get_lsn_range().end..new_gc_cutoff)) - { - info!("keeping {} because it is the latest layer", l.layer_name()); - result.layers_not_updated += 1; - continue 'outer; - } - - // We didn't find any reason to keep this file, so remove it. - info!( - "garbage collecting {} is_dropped: xx is_incremental: {}", - l.layer_name(), - l.is_incremental(), - ); - layers_to_remove.push(l); - } + layers_to_remove + }; if !layers_to_remove.is_empty() { // Persist the new GC cutoff value before we actually remove anything. @@ -6670,15 +6667,19 @@ impl Timeline { } })?; + let mut guard = self + .layers + .write(LayerManagerLockHolder::GarbageCollection) + .await; + let gc_layers = layers_to_remove .iter() - .map(|x| guard.get_from_desc(x)) + .flat_map(|desc| guard.try_get_from_key(&desc.key()).cloned()) .collect::>(); result.layers_removed = gc_layers.len() as u64; self.remote_client.schedule_gc_update(&gc_layers)?; - guard.open_mut()?.finish_gc_timeline(&gc_layers); #[cfg(feature = "testing")] @@ -7178,9 +7179,7 @@ impl Timeline { if let Some(end) = layer_end_lsn { assert!( end <= last_record_lsn, - "advance last record lsn before inserting a layer, end_lsn={}, last_record_lsn={}", - end, - last_record_lsn, + "advance last record lsn before inserting a layer, end_lsn={end}, last_record_lsn={last_record_lsn}", ); } @@ -7366,7 +7365,7 @@ impl TimelineWriter<'_> { .tl .get_layer_for_write(at, &self.write_guard, ctx) .await?; - let initial_size = layer.size().await?; + let initial_size = layer.len(); let last_freeze_at = self.last_freeze_at.load(); self.write_guard.replace(TimelineWriterState::new( @@ -7594,11 +7593,12 @@ mod tests { use std::sync::Arc; use pageserver_api::key::Key; - use pageserver_api::value::Value; + use postgres_ffi::PgMajorVersion; use std::iter::Iterator; use tracing::Instrument; use utils::id::TimelineId; use utils::lsn::Lsn; + use wal_decoder::models::value::Value; use super::HeatMapTimeline; use crate::context::RequestContextBuilder; @@ -7668,7 +7668,7 @@ mod tests { .create_test_timeline_with_layers( TimelineId::generate(), Lsn(0x10), - 14, + PgMajorVersion::PG14, &ctx, Vec::new(), // in-memory layers delta_layers, @@ -7804,7 +7804,7 @@ mod tests { .create_test_timeline_with_layers( TimelineId::generate(), Lsn(0x10), - 14, + PgMajorVersion::PG14, &ctx, Vec::new(), // in-memory layers delta_layers, @@ -7864,7 +7864,12 @@ mod tests { let (tenant, ctx) = harness.load().await; let timeline = tenant - .create_test_timeline(TimelineId::generate(), Lsn(0x10), 14, &ctx) + .create_test_timeline( + TimelineId::generate(), + Lsn(0x10), + PgMajorVersion::PG14, + &ctx, + ) .await .unwrap(); diff --git a/pageserver/src/tenant/timeline/compaction.rs b/pageserver/src/tenant/timeline/compaction.rs index 5307d3836f..1b8e5f4b9c 100644 --- a/pageserver/src/tenant/timeline/compaction.rs +++ b/pageserver/src/tenant/timeline/compaction.rs @@ -29,9 +29,7 @@ use pageserver_api::config::tenant_conf_defaults::DEFAULT_CHECKPOINT_DISTANCE; use pageserver_api::key::{KEY_SIZE, Key}; use pageserver_api::keyspace::{KeySpace, ShardedRange}; use pageserver_api::models::{CompactInfoResponse, CompactKeyRange}; -use pageserver_api::record::NeonWalRecord; use pageserver_api::shard::{ShardCount, ShardIdentity, TenantShardId}; -use pageserver_api::value::Value; use pageserver_compaction::helpers::{fully_contains, overlaps_with}; use pageserver_compaction::interface::*; use serde::Serialize; @@ -41,6 +39,8 @@ use tracing::{Instrument, debug, error, info, info_span, trace, warn}; use utils::critical; use utils::id::TimelineId; use utils::lsn::Lsn; +use wal_decoder::models::record::NeonWalRecord; +use wal_decoder::models::value::Value; use crate::context::{AccessStatsBehavior, RequestContext, RequestContextBuilder}; use crate::page_cache; @@ -977,7 +977,7 @@ impl KeyHistoryRetention { tline .reconstruct_value(key, lsn, data, RedoAttemptType::GcCompaction) .await - .with_context(|| format!("verification failed for key {} at lsn {}", key, lsn))?; + .with_context(|| format!("verification failed for key {key} at lsn {lsn}"))?; Ok(()) } @@ -2647,15 +2647,15 @@ impl Timeline { use std::fmt::Write; let mut output = String::new(); if let Some((key, _, _)) = replay_history.first() { - write!(output, "key={} ", key).unwrap(); + write!(output, "key={key} ").unwrap(); let mut cnt = 0; for (_, lsn, val) in replay_history { if val.is_image() { - write!(output, "i@{} ", lsn).unwrap(); + write!(output, "i@{lsn} ").unwrap(); } else if val.will_init() { - write!(output, "di@{} ", lsn).unwrap(); + write!(output, "di@{lsn} ").unwrap(); } else { - write!(output, "d@{} ", lsn).unwrap(); + write!(output, "d@{lsn} ").unwrap(); } cnt += 1; if cnt >= 128 { diff --git a/pageserver/src/tenant/timeline/import_pgdata/flow.rs b/pageserver/src/tenant/timeline/import_pgdata/flow.rs index ed679a9bdc..d471e9fc69 100644 --- a/pageserver/src/tenant/timeline/import_pgdata/flow.rs +++ b/pageserver/src/tenant/timeline/import_pgdata/flow.rs @@ -36,8 +36,8 @@ use pageserver_api::keyspace::{ShardedRange, singleton_range}; use pageserver_api::models::{ShardImportProgress, ShardImportProgressV1, ShardImportStatus}; use pageserver_api::reltag::{RelTag, SlruKind}; use pageserver_api::shard::ShardIdentity; +use postgres_ffi::BLCKSZ; use postgres_ffi::relfile_utils::parse_relfilename; -use postgres_ffi::{BLCKSZ, pg_constants}; use remote_storage::RemotePath; use tokio::sync::Semaphore; use tokio_stream::StreamExt; @@ -558,7 +558,7 @@ impl PgDataDir { PgDataDirDb::new( storage, &basedir.join(dboid.to_string()), - pg_constants::DEFAULTTABLESPACE_OID, + postgres_ffi_types::constants::DEFAULTTABLESPACE_OID, dboid, &datadir_path, ) @@ -571,7 +571,7 @@ impl PgDataDir { PgDataDirDb::new( storage, &datadir_path.join("global"), - postgres_ffi::pg_constants::GLOBALTABLESPACE_OID, + postgres_ffi_types::constants::GLOBALTABLESPACE_OID, 0, &datadir_path, ) diff --git a/pageserver/src/tenant/timeline/import_pgdata/importbucket_client.rs b/pageserver/src/tenant/timeline/import_pgdata/importbucket_client.rs index bf2d9875c1..98c44313f1 100644 --- a/pageserver/src/tenant/timeline/import_pgdata/importbucket_client.rs +++ b/pageserver/src/tenant/timeline/import_pgdata/importbucket_client.rs @@ -3,7 +3,7 @@ use std::sync::Arc; use anyhow::Context; use bytes::Bytes; -use postgres_ffi::ControlFileData; +use postgres_ffi::{ControlFileData, PgMajorVersion}; use remote_storage::{ Download, DownloadError, DownloadKind, DownloadOpts, GenericRemoteStorage, Listing, ListingObject, RemotePath, RemoteStorageConfig, @@ -264,7 +264,7 @@ impl ControlFile { pub(crate) fn base_lsn(&self) -> Lsn { Lsn(self.control_file_data.checkPoint).align() } - pub(crate) fn pg_version(&self) -> u32 { + pub(crate) fn pg_version(&self) -> PgMajorVersion { self.try_pg_version() .expect("prepare() checks that try_pg_version doesn't error") } @@ -274,13 +274,14 @@ impl ControlFile { pub(crate) fn control_file_buf(&self) -> &Bytes { &self.control_file_buf } - fn try_pg_version(&self) -> anyhow::Result { + + fn try_pg_version(&self) -> anyhow::Result { Ok(match self.control_file_data.catalog_version_no { // thesea are from catversion.h - 202107181 => 14, - 202209061 => 15, - 202307071 => 16, - 202406281 => 17, + 202107181 => PgMajorVersion::PG14, + 202209061 => PgMajorVersion::PG15, + 202307071 => PgMajorVersion::PG16, + 202406281 => PgMajorVersion::PG17, catversion => { anyhow::bail!("unrecognized catalog version {catversion}") } diff --git a/pageserver/src/tenant/timeline/walreceiver/walreceiver_connection.rs b/pageserver/src/tenant/timeline/walreceiver/walreceiver_connection.rs index 343e04f5f0..6d52da1f00 100644 --- a/pageserver/src/tenant/timeline/walreceiver/walreceiver_connection.rs +++ b/pageserver/src/tenant/timeline/walreceiver/walreceiver_connection.rs @@ -275,12 +275,20 @@ pub(super) async fn handle_walreceiver_connection( let copy_stream = replication_client.copy_both_simple(&query).await?; let mut physical_stream = pin!(ReplicationStream::new(copy_stream)); - let mut walingest = WalIngest::new(timeline.as_ref(), startpoint, &ctx) - .await - .map_err(|e| match e.kind { - crate::walingest::WalIngestErrorKind::Cancelled => WalReceiverError::Cancelled, - _ => WalReceiverError::Other(e.into()), - })?; + let walingest_future = WalIngest::new(timeline.as_ref(), startpoint, &ctx); + let walingest_res = select! { + walingest_res = walingest_future => walingest_res, + _ = cancellation.cancelled() => { + // We are doing reads in WalIngest::new, and those can hang as they come from the network. + // Timeline cancellation hits the walreceiver cancellation token before it hits the timeline global one. + debug!("Connection cancelled"); + return Err(WalReceiverError::Cancelled); + }, + }; + let mut walingest = walingest_res.map_err(|e| match e.kind { + crate::walingest::WalIngestErrorKind::Cancelled => WalReceiverError::Cancelled, + _ => WalReceiverError::Other(e.into()), + })?; let (format, compression) = match protocol { PostgresClientProtocol::Interpreted { @@ -360,8 +368,7 @@ pub(super) async fn handle_walreceiver_connection( match raw_wal_start_lsn.cmp(&expected_wal_start) { std::cmp::Ordering::Greater => { let msg = format!( - "Gap in streamed WAL: [{}, {})", - expected_wal_start, raw_wal_start_lsn + "Gap in streamed WAL: [{expected_wal_start}, {raw_wal_start_lsn})" ); critical!("{msg}"); return Err(WalReceiverError::Other(anyhow!(msg))); diff --git a/pageserver/src/virtual_file/owned_buffers_io/aligned_buffer/buffer.rs b/pageserver/src/virtual_file/owned_buffers_io/aligned_buffer/buffer.rs index 090d2ece85..85ea5c4d80 100644 --- a/pageserver/src/virtual_file/owned_buffers_io/aligned_buffer/buffer.rs +++ b/pageserver/src/virtual_file/owned_buffers_io/aligned_buffer/buffer.rs @@ -68,16 +68,9 @@ impl AlignedBuffer { assert!( begin <= end, - "range start must not be greater than end: {:?} <= {:?}", - begin, - end, - ); - assert!( - end <= len, - "range end out of bounds: {:?} <= {:?}", - end, - len, + "range start must not be greater than end: {begin:?} <= {end:?}", ); + assert!(end <= len, "range end out of bounds: {end:?} <= {len:?}",); let begin = self.range.start + begin; let end = self.range.start + end; diff --git a/pageserver/src/virtual_file/owned_buffers_io/aligned_buffer/buffer_mut.rs b/pageserver/src/virtual_file/owned_buffers_io/aligned_buffer/buffer_mut.rs index 07f949b89e..93116ea85e 100644 --- a/pageserver/src/virtual_file/owned_buffers_io/aligned_buffer/buffer_mut.rs +++ b/pageserver/src/virtual_file/owned_buffers_io/aligned_buffer/buffer_mut.rs @@ -242,10 +242,7 @@ unsafe impl bytes::BufMut for AlignedBufferMut { /// Panic with a nice error message. #[cold] fn panic_advance(idx: usize, len: usize) -> ! { - panic!( - "advance out of bounds: the len is {} but advancing by {}", - len, idx - ); + panic!("advance out of bounds: the len is {len} but advancing by {idx}"); } /// Safety: [`AlignedBufferMut`] has exclusive ownership of the io buffer, diff --git a/pageserver/src/walingest.rs b/pageserver/src/walingest.rs index c1a3b79915..a597aedee3 100644 --- a/pageserver/src/walingest.rs +++ b/pageserver/src/walingest.rs @@ -28,20 +28,20 @@ use std::time::{Duration, Instant, SystemTime}; use bytes::{Buf, Bytes}; use pageserver_api::key::{Key, rel_block_to_key}; -use pageserver_api::record::NeonWalRecord; use pageserver_api::reltag::{BlockNumber, RelTag, SlruKind}; use pageserver_api::shard::ShardIdentity; -use postgres_ffi::relfile_utils::{FSM_FORKNUM, INIT_FORKNUM, MAIN_FORKNUM, VISIBILITYMAP_FORKNUM}; use postgres_ffi::walrecord::*; use postgres_ffi::{ - TimestampTz, TransactionId, dispatch_pgversion, enum_pgversion, enum_pgversion_dispatch, - fsm_logical_to_physical, pg_constants, + PgMajorVersion, TimestampTz, TransactionId, dispatch_pgversion, enum_pgversion, + enum_pgversion_dispatch, fsm_logical_to_physical, pg_constants, }; +use postgres_ffi_types::forknum::{FSM_FORKNUM, INIT_FORKNUM, MAIN_FORKNUM, VISIBILITYMAP_FORKNUM}; use tracing::*; use utils::bin_ser::{DeserializeError, SerializeError}; use utils::lsn::Lsn; use utils::rate_limit::RateLimit; use utils::{critical, failpoint_support}; +use wal_decoder::models::record::NeonWalRecord; use wal_decoder::models::*; use crate::ZERO_PAGE; @@ -781,7 +781,7 @@ impl WalIngest { ) -> Result<(), WalIngestError> { let (xact_common, is_commit, is_prepared) = match record { XactRecord::Prepare(XactPrepare { xl_xid, data }) => { - let xid: u64 = if modification.tline.pg_version >= 17 { + let xid: u64 = if modification.tline.pg_version >= PgMajorVersion::PG17 { self.adjust_to_full_transaction_id(xl_xid)? } else { xl_xid as u64 @@ -886,7 +886,7 @@ impl WalIngest { xl_xid, parsed.xid, lsn, ); - let xid: u64 = if modification.tline.pg_version >= 17 { + let xid: u64 = if modification.tline.pg_version >= PgMajorVersion::PG17 { self.adjust_to_full_transaction_id(parsed.xid)? } else { parsed.xid as u64 @@ -1241,7 +1241,7 @@ impl WalIngest { if xlog_checkpoint.oldestActiveXid == pg_constants::INVALID_TRANSACTION_ID && info == pg_constants::XLOG_CHECKPOINT_SHUTDOWN { - let oldest_active_xid = if pg_version >= 17 { + let oldest_active_xid = if pg_version >= PgMajorVersion::PG17 { let mut oldest_active_full_xid = cp.nextXid.value; for xid in modification.tline.list_twophase_files(lsn, ctx).await? { if xid < oldest_active_full_xid { @@ -1475,10 +1475,11 @@ impl WalIngest { const fn rate_limiter( &self, - pg_version: u32, + pg_version: PgMajorVersion, ) -> Option<&Lazy>> { - const MIN_PG_VERSION: u32 = 14; - const MAX_PG_VERSION: u32 = 17; + const MIN_PG_VERSION: u32 = PgMajorVersion::PG14.major_version_num(); + const MAX_PG_VERSION: u32 = PgMajorVersion::PG17.major_version_num(); + let pg_version = pg_version.major_version_num(); if pg_version < MIN_PG_VERSION || pg_version > MAX_PG_VERSION { return None; @@ -1603,6 +1604,7 @@ async fn get_relsize( #[cfg(test)] mod tests { use anyhow::Result; + use postgres_ffi::PgMajorVersion; use postgres_ffi::RELSEG_SIZE; use super::*; @@ -1625,7 +1627,7 @@ mod tests { #[tokio::test] async fn test_zeroed_checkpoint_decodes_correctly() -> Result<(), anyhow::Error> { - for i in 14..=16 { + for i in PgMajorVersion::ALL { dispatch_pgversion!(i, { pgv::CheckPoint::decode(&pgv::ZERO_CHECKPOINT)?; }); @@ -2108,7 +2110,7 @@ mod tests { // Check relation content for blkno in 0..relsize { let lsn = Lsn(0x20); - let data = format!("foo blk {} at {}", blkno, lsn); + let data = format!("foo blk {blkno} at {lsn}"); assert_eq!( tline .get_rel_page_at_lsn( @@ -2142,7 +2144,7 @@ mod tests { for blkno in 0..1 { let lsn = Lsn(0x20); - let data = format!("foo blk {} at {}", blkno, lsn); + let data = format!("foo blk {blkno} at {lsn}"); assert_eq!( tline .get_rel_page_at_lsn( @@ -2167,7 +2169,7 @@ mod tests { ); for blkno in 0..relsize { let lsn = Lsn(0x20); - let data = format!("foo blk {} at {}", blkno, lsn); + let data = format!("foo blk {blkno} at {lsn}"); assert_eq!( tline .get_rel_page_at_lsn( @@ -2188,7 +2190,7 @@ mod tests { let lsn = Lsn(0x80); let mut m = tline.begin_modification(lsn); for blkno in 0..relsize { - let data = format!("foo blk {} at {}", blkno, lsn); + let data = format!("foo blk {blkno} at {lsn}"); walingest .put_rel_page_image(&mut m, TESTREL_A, blkno, test_img(&data), &ctx) .await?; @@ -2210,7 +2212,7 @@ mod tests { // Check relation content for blkno in 0..relsize { let lsn = Lsn(0x80); - let data = format!("foo blk {} at {}", blkno, lsn); + let data = format!("foo blk {blkno} at {lsn}"); assert_eq!( tline .get_rel_page_at_lsn( @@ -2335,7 +2337,7 @@ mod tests { // 5. Grep sk logs for "restart decoder" to get startpoint // 6. Run just the decoder from this test to get the endpoint. // It's the last LSN the decoder will output. - let pg_version = 15; // The test data was generated by pg15 + let pg_version = PgMajorVersion::PG15; // The test data was generated by pg15 let path = "test_data/sk_wal_segment_from_pgbench"; let wal_segment_path = format!("{path}/000000010000000000000001.zst"); let source_initdb_path = format!("{path}/{INITDB_PATH}"); @@ -2414,6 +2416,6 @@ mod tests { } let duration = started_at.elapsed(); - println!("done in {:?}", duration); + println!("done in {duration:?}"); } } diff --git a/pageserver/src/walredo.rs b/pageserver/src/walredo.rs index ed8a954369..b17b5a15f9 100644 --- a/pageserver/src/walredo.rs +++ b/pageserver/src/walredo.rs @@ -32,12 +32,13 @@ use anyhow::Context; use bytes::{Bytes, BytesMut}; use pageserver_api::key::Key; use pageserver_api::models::{WalRedoManagerProcessStatus, WalRedoManagerStatus}; -use pageserver_api::record::NeonWalRecord; use pageserver_api::shard::TenantShardId; +use postgres_ffi::PgMajorVersion; use tracing::*; use utils::lsn::Lsn; use utils::sync::gate::GateError; use utils::sync::heavier_once_cell; +use wal_decoder::models::record::NeonWalRecord; use crate::config::PageServerConf; use crate::metrics::{ @@ -165,7 +166,7 @@ impl PostgresRedoManager { lsn: Lsn, base_img: Option<(Lsn, Bytes)>, records: Vec<(Lsn, NeonWalRecord)>, - pg_version: u32, + pg_version: PgMajorVersion, redo_attempt_type: RedoAttemptType, ) -> Result { if records.is_empty() { @@ -232,7 +233,7 @@ impl PostgresRedoManager { /// # Cancel-Safety /// /// This method is cancellation-safe. - pub async fn ping(&self, pg_version: u32) -> Result<(), Error> { + pub async fn ping(&self, pg_version: PgMajorVersion) -> Result<(), Error> { self.do_with_walredo_process(pg_version, |proc| async move { proc.ping(Duration::from_secs(1)) .await @@ -342,7 +343,7 @@ impl PostgresRedoManager { O, >( &self, - pg_version: u32, + pg_version: PgMajorVersion, closure: F, ) -> Result { let proc: Arc = match self.redo_process.get_or_init_detached().await { @@ -442,7 +443,7 @@ impl PostgresRedoManager { base_img_lsn: Lsn, records: &[(Lsn, NeonWalRecord)], wal_redo_timeout: Duration, - pg_version: u32, + pg_version: PgMajorVersion, max_retry_attempts: u32, ) -> Result { *(self.last_redo_at.lock().unwrap()) = Some(Instant::now()); @@ -571,11 +572,12 @@ mod tests { use bytes::Bytes; use pageserver_api::key::Key; - use pageserver_api::record::NeonWalRecord; use pageserver_api::shard::TenantShardId; + use postgres_ffi::PgMajorVersion; use tracing::Instrument; use utils::id::TenantId; use utils::lsn::Lsn; + use wal_decoder::models::record::NeonWalRecord; use super::PostgresRedoManager; use crate::config::PageServerConf; @@ -586,7 +588,7 @@ mod tests { let h = RedoHarness::new().unwrap(); h.manager - .ping(14) + .ping(PgMajorVersion::PG14) .instrument(h.span()) .await .expect("ping should work"); @@ -612,7 +614,7 @@ mod tests { Lsn::from_str("0/16E2408").unwrap(), None, short_records(), - 14, + PgMajorVersion::PG14, RedoAttemptType::ReadPage, ) .instrument(h.span()) @@ -641,7 +643,7 @@ mod tests { Lsn::from_str("0/16E2408").unwrap(), None, short_records(), - 14, + PgMajorVersion::PG14, RedoAttemptType::ReadPage, ) .instrument(h.span()) @@ -663,7 +665,7 @@ mod tests { Lsn::INVALID, None, short_records(), - 16, /* 16 currently produces stderr output on startup, which adds a nice extra edge */ + PgMajorVersion::PG16, /* 16 currently produces stderr output on startup, which adds a nice extra edge */ RedoAttemptType::ReadPage, ) .instrument(h.span()) diff --git a/pageserver/src/walredo/apply_neon.rs b/pageserver/src/walredo/apply_neon.rs index a3840f1f6f..a525579082 100644 --- a/pageserver/src/walredo/apply_neon.rs +++ b/pageserver/src/walredo/apply_neon.rs @@ -2,16 +2,16 @@ use anyhow::Context; use byteorder::{ByteOrder, LittleEndian}; use bytes::BytesMut; use pageserver_api::key::Key; -use pageserver_api::record::NeonWalRecord; use pageserver_api::reltag::SlruKind; -use postgres_ffi::relfile_utils::VISIBILITYMAP_FORKNUM; use postgres_ffi::v14::nonrelfile_utils::{ mx_offset_to_flags_bitshift, mx_offset_to_flags_offset, mx_offset_to_member_offset, transaction_id_set_status, }; use postgres_ffi::{BLCKSZ, pg_constants}; +use postgres_ffi_types::forknum::VISIBILITYMAP_FORKNUM; use tracing::*; use utils::lsn::Lsn; +use wal_decoder::models::record::NeonWalRecord; /// Can this request be served by neon redo functions /// or we need to pass it to wal-redo postgres process? @@ -52,8 +52,7 @@ pub(crate) fn apply_in_neon( let (rel, _) = key.to_rel_block().context("invalid record")?; assert!( rel.forknum == VISIBILITYMAP_FORKNUM, - "TruncateVisibilityMap record on unexpected rel {}", - rel + "TruncateVisibilityMap record on unexpected rel {rel}" ); let map = &mut page[pg_constants::MAXALIGN_SIZE_OF_PAGE_HEADER_DATA..]; map[*trunc_byte + 1..].fill(0u8); @@ -78,8 +77,7 @@ pub(crate) fn apply_in_neon( let (rel, blknum) = key.to_rel_block().context("invalid record")?; assert!( rel.forknum == VISIBILITYMAP_FORKNUM, - "ClearVisibilityMapFlags record on unexpected rel {}", - rel + "ClearVisibilityMapFlags record on unexpected rel {rel}" ); if let Some(heap_blkno) = *new_heap_blkno { // Calculate the VM block and offset that corresponds to the heap block. @@ -124,8 +122,7 @@ pub(crate) fn apply_in_neon( assert_eq!( slru_kind, SlruKind::Clog, - "ClogSetCommitted record with unexpected key {}", - key + "ClogSetCommitted record with unexpected key {key}" ); for &xid in xids { let pageno = xid / pg_constants::CLOG_XACTS_PER_PAGE; @@ -135,15 +132,11 @@ pub(crate) fn apply_in_neon( // Check that we're modifying the correct CLOG block. assert!( segno == expected_segno, - "ClogSetCommitted record for XID {} with unexpected key {}", - xid, - key + "ClogSetCommitted record for XID {xid} with unexpected key {key}" ); assert!( blknum == expected_blknum, - "ClogSetCommitted record for XID {} with unexpected key {}", - xid, - key + "ClogSetCommitted record for XID {xid} with unexpected key {key}" ); transaction_id_set_status(xid, pg_constants::TRANSACTION_STATUS_COMMITTED, page); @@ -169,8 +162,7 @@ pub(crate) fn apply_in_neon( assert_eq!( slru_kind, SlruKind::Clog, - "ClogSetAborted record with unexpected key {}", - key + "ClogSetAborted record with unexpected key {key}" ); for &xid in xids { let pageno = xid / pg_constants::CLOG_XACTS_PER_PAGE; @@ -180,15 +172,11 @@ pub(crate) fn apply_in_neon( // Check that we're modifying the correct CLOG block. assert!( segno == expected_segno, - "ClogSetAborted record for XID {} with unexpected key {}", - xid, - key + "ClogSetAborted record for XID {xid} with unexpected key {key}" ); assert!( blknum == expected_blknum, - "ClogSetAborted record for XID {} with unexpected key {}", - xid, - key + "ClogSetAborted record for XID {xid} with unexpected key {key}" ); transaction_id_set_status(xid, pg_constants::TRANSACTION_STATUS_ABORTED, page); @@ -199,8 +187,7 @@ pub(crate) fn apply_in_neon( assert_eq!( slru_kind, SlruKind::MultiXactOffsets, - "MultixactOffsetCreate record with unexpected key {}", - key + "MultixactOffsetCreate record with unexpected key {key}" ); // Compute the block and offset to modify. // See RecordNewMultiXact in PostgreSQL sources. @@ -213,15 +200,11 @@ pub(crate) fn apply_in_neon( let expected_blknum = pageno % pg_constants::SLRU_PAGES_PER_SEGMENT; assert!( segno == expected_segno, - "MultiXactOffsetsCreate record for multi-xid {} with unexpected key {}", - mid, - key + "MultiXactOffsetsCreate record for multi-xid {mid} with unexpected key {key}" ); assert!( blknum == expected_blknum, - "MultiXactOffsetsCreate record for multi-xid {} with unexpected key {}", - mid, - key + "MultiXactOffsetsCreate record for multi-xid {mid} with unexpected key {key}" ); LittleEndian::write_u32(&mut page[offset..offset + 4], *moff); @@ -231,8 +214,7 @@ pub(crate) fn apply_in_neon( assert_eq!( slru_kind, SlruKind::MultiXactMembers, - "MultixactMembersCreate record with unexpected key {}", - key + "MultixactMembersCreate record with unexpected key {key}" ); for (i, member) in members.iter().enumerate() { let offset = moff + i as u32; @@ -249,15 +231,11 @@ pub(crate) fn apply_in_neon( let expected_blknum = pageno % pg_constants::SLRU_PAGES_PER_SEGMENT; assert!( segno == expected_segno, - "MultiXactMembersCreate record for offset {} with unexpected key {}", - moff, - key + "MultiXactMembersCreate record for offset {moff} with unexpected key {key}" ); assert!( blknum == expected_blknum, - "MultiXactMembersCreate record for offset {} with unexpected key {}", - moff, - key + "MultiXactMembersCreate record for offset {moff} with unexpected key {key}" ); let mut flagsval = LittleEndian::read_u32(&page[flagsoff..flagsoff + 4]); diff --git a/pageserver/src/walredo/process.rs b/pageserver/src/walredo/process.rs index 6d4a38d4ff..c8b0846480 100644 --- a/pageserver/src/walredo/process.rs +++ b/pageserver/src/walredo/process.rs @@ -10,14 +10,14 @@ use std::time::Duration; use anyhow::Context; use bytes::Bytes; -use pageserver_api::record::NeonWalRecord; use pageserver_api::reltag::RelTag; use pageserver_api::shard::TenantShardId; -use postgres_ffi::BLCKSZ; +use postgres_ffi::{BLCKSZ, PgMajorVersion}; use tokio::io::{AsyncReadExt, AsyncWriteExt}; use tracing::{Instrument, debug, error, instrument}; use utils::lsn::Lsn; use utils::poison::Poison; +use wal_decoder::models::record::NeonWalRecord; use self::no_leak_child::NoLeakChild; use crate::config::PageServerConf; @@ -54,11 +54,11 @@ impl WalRedoProcess { // // Start postgres binary in special WAL redo mode. // - #[instrument(skip_all,fields(pg_version=pg_version))] + #[instrument(skip_all,fields(pg_version=pg_version.major_version_num()))] pub(crate) fn launch( conf: &'static PageServerConf, tenant_shard_id: TenantShardId, - pg_version: u32, + pg_version: PgMajorVersion, ) -> anyhow::Result { crate::span::debug_assert_current_span_has_tenant_id(); diff --git a/pgxn/Makefile b/pgxn/Makefile new file mode 100644 index 0000000000..8f190668ea --- /dev/null +++ b/pgxn/Makefile @@ -0,0 +1,28 @@ +# This makefile assumes that 'pg_config' is in the path, or is passed in the +# PG_CONFIG variable. +# +# This is used in two different ways: +# +# 1. The main makefile calls this, when you invoke the `make neon-pg-ext-%` +# target. It passes PG_CONFIG pointing to pg_install/%/bin/pg_config. +# This is a VPATH build; the current directory is build/pgxn-%, and +# the path to the Makefile is passed with the -f argument. +# +# 2. compute-node.Dockerfile invokes this to build the compute extensions +# for the specific Postgres version. It relies on pg_config already +# being in $(PATH). + +srcdir = $(dir $(firstword $(MAKEFILE_LIST))) + +PG_CONFIG = pg_config + +subdirs = neon neon_rmgr neon_walredo neon_utils neon_test_utils + +.PHONY: install install-compute install-storage $(subdirs) +install: $(subdirs) +install-compute: neon neon_utils neon_test_utils neon_rmgr +install-storage: neon_rmgr neon_walredo + +$(subdirs): %: + mkdir -p $* + $(MAKE) PG_CONFIG=$(PG_CONFIG) -C $* -f $(abspath $(srcdir)/$@/Makefile) install diff --git a/pgxn/neon/file_cache.c b/pgxn/neon/file_cache.c index 45a4695495..8cfa09bc87 100644 --- a/pgxn/neon/file_cache.c +++ b/pgxn/neon/file_cache.c @@ -1295,7 +1295,8 @@ lfc_readv_select(NRelFileInfo rinfo, ForkNumber forkNum, BlockNumber blkno, if (iteration_hits != 0) { - /* chunk offset (# of pages) into the LFC file */ + /* chunk offset (# + of pages) into the LFC file */ off_t first_read_offset = (off_t) entry_offset * lfc_blocks_per_chunk; int nwrite = iov_last_used - first_block_in_chunk_read; /* offset of first IOV */ @@ -1313,16 +1314,6 @@ lfc_readv_select(NRelFileInfo rinfo, ForkNumber forkNum, BlockNumber blkno, lfc_disable("read"); return -1; } - - /* - * We successfully read the pages we know were valid when we - * started reading; now mark those pages as read - */ - for (int i = first_block_in_chunk_read; i < iov_last_used; i++) - { - if (BITMAP_ISSET(chunk_mask, i)) - BITMAP_SET(mask, buf_offset + i); - } } /* Place entry to the head of LRU list */ @@ -1340,6 +1331,15 @@ lfc_readv_select(NRelFileInfo rinfo, ForkNumber forkNum, BlockNumber blkno, { lfc_ctl->time_read += io_time_us; inc_page_cache_read_wait(io_time_us); + /* + * We successfully read the pages we know were valid when we + * started reading; now mark those pages as read + */ + for (int i = first_block_in_chunk_read; i < iov_last_used; i++) + { + if (BITMAP_ISSET(chunk_mask, i)) + BITMAP_SET(mask, buf_offset + i); + } } CriticalAssert(entry->access_count > 0); diff --git a/poetry.lock b/poetry.lock index f9b6f83366..1bc5077eb7 100644 --- a/poetry.lock +++ b/poetry.lock @@ -746,23 +746,23 @@ xray = ["mypy-boto3-xray (>=1.26.0,<1.27.0)"] [[package]] name = "botocore" -version = "1.34.11" +version = "1.34.162" description = "Low-level, data-driven core of boto 3." optional = false -python-versions = ">= 3.8" +python-versions = ">=3.8" groups = ["main"] files = [ - {file = "botocore-1.34.11-py3-none-any.whl", hash = "sha256:1ff1398b6ea670e1c01ac67a33af3da854f8e700d3528289c04f319c330d8250"}, - {file = "botocore-1.34.11.tar.gz", hash = "sha256:51905c3d623c60df5dc5794387de7caf886d350180a01a3dfa762e903edb45a9"}, + {file = "botocore-1.34.162-py3-none-any.whl", hash = "sha256:2d918b02db88d27a75b48275e6fb2506e9adaaddbec1ffa6a8a0898b34e769be"}, + {file = "botocore-1.34.162.tar.gz", hash = "sha256:adc23be4fb99ad31961236342b7cbf3c0bfc62532cd02852196032e8c0d682f3"}, ] [package.dependencies] jmespath = ">=0.7.1,<2.0.0" python-dateutil = ">=2.1,<3.0.0" -urllib3 = {version = ">=1.25.4,<2.1", markers = "python_version >= \"3.10\""} +urllib3 = {version = ">=1.25.4,<2.2.0 || >2.2.0,<3", markers = "python_version >= \"3.10\""} [package.extras] -crt = ["awscrt (==0.19.19)"] +crt = ["awscrt (==0.21.2)"] [[package]] name = "botocore-stubs" @@ -3422,20 +3422,21 @@ files = [ [[package]] name = "urllib3" -version = "1.26.19" +version = "2.5.0" description = "HTTP library with thread-safe connection pooling, file post, and more." optional = false -python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7" +python-versions = ">=3.9" groups = ["main"] files = [ - {file = "urllib3-1.26.19-py2.py3-none-any.whl", hash = "sha256:37a0344459b199fce0e80b0d3569837ec6b6937435c5244e7fd73fa6006830f3"}, - {file = "urllib3-1.26.19.tar.gz", hash = "sha256:3e3d753a8618b86d7de333b4223005f68720bcd6a7d2bcb9fbd2229ec7c1e429"}, + {file = "urllib3-2.5.0-py3-none-any.whl", hash = "sha256:e6b01673c0fa6a13e374b50871808eb3bf7046c4b125b216f6bf1cc604cff0dc"}, + {file = "urllib3-2.5.0.tar.gz", hash = "sha256:3fc47733c7e419d4bc3f6b3dc2b4f890bb743906a30d56ba4a5bfa4bbff92760"}, ] [package.extras] -brotli = ["brotli (==1.0.9) ; os_name != \"nt\" and python_version < \"3\" and platform_python_implementation == \"CPython\"", "brotli (>=1.0.9) ; python_version >= \"3\" and platform_python_implementation == \"CPython\"", "brotlicffi (>=0.8.0) ; (os_name != \"nt\" or python_version >= \"3\") and platform_python_implementation != \"CPython\"", "brotlipy (>=0.6.0) ; os_name == \"nt\" and python_version < \"3\""] -secure = ["certifi", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "ipaddress ; python_version == \"2.7\"", "pyOpenSSL (>=0.14)", "urllib3-secure-extra"] -socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"] +brotli = ["brotli (>=1.0.9) ; platform_python_implementation == \"CPython\"", "brotlicffi (>=0.8.0) ; platform_python_implementation != \"CPython\""] +h2 = ["h2 (>=4,<5)"] +socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] +zstd = ["zstandard (>=0.18.0)"] [[package]] name = "websockets" diff --git a/proxy/src/batch.rs b/proxy/src/batch.rs new file mode 100644 index 0000000000..33e08797f2 --- /dev/null +++ b/proxy/src/batch.rs @@ -0,0 +1,180 @@ +//! Batch processing system based on intrusive linked lists. +//! +//! Enqueuing a batch job requires no allocations, with +//! direct support for cancelling jobs early. +use std::collections::BTreeMap; +use std::pin::pin; +use std::sync::Mutex; + +use scopeguard::ScopeGuard; +use tokio::sync::oneshot::error::TryRecvError; + +use crate::ext::LockExt; + +pub trait QueueProcessing: Send + 'static { + type Req: Send + 'static; + type Res: Send; + + /// Get the desired batch size. + fn batch_size(&self, queue_size: usize) -> usize; + + /// This applies a full batch of events. + /// Must respond with a full batch of replies. + /// + /// If this apply can error, it's expected that errors be forwarded to each Self::Res. + /// + /// Batching does not need to happen atomically. + fn apply(&mut self, req: Vec) -> impl Future> + Send; +} + +pub struct BatchQueue { + processor: tokio::sync::Mutex

, + inner: Mutex>, +} + +struct BatchJob { + req: P::Req, + res: tokio::sync::oneshot::Sender, +} + +impl BatchQueue

{ + pub fn new(p: P) -> Self { + Self { + processor: tokio::sync::Mutex::new(p), + inner: Mutex::new(BatchQueueInner { + version: 0, + queue: BTreeMap::new(), + }), + } + } + + /// Perform a single request-response process, this may be batched internally. + /// + /// This function is not cancel safe. + pub async fn call( + &self, + req: P::Req, + cancelled: impl Future, + ) -> Result { + let (id, mut rx) = self.inner.lock_propagate_poison().register_job(req); + + let mut cancelled = pin!(cancelled); + let resp = loop { + // try become the leader, or try wait for success. + let mut processor = tokio::select! { + // try become leader. + p = self.processor.lock() => p, + // wait for success. + resp = &mut rx => break resp.ok(), + // wait for cancellation. + cancel = cancelled.as_mut() => { + let mut inner = self.inner.lock_propagate_poison(); + if inner.queue.remove(&id).is_some() { + tracing::warn!("batched task cancelled before completion"); + } + return Err(cancel); + }, + }; + + tracing::debug!(id, "batch: became leader"); + let (reqs, resps) = self.inner.lock_propagate_poison().get_batch(&processor); + + // snitch incase the task gets cancelled. + let cancel_safety = scopeguard::guard((), |()| { + if !std::thread::panicking() { + tracing::error!( + id, + "batch: leader cancelled, despite not being cancellation safe" + ); + } + }); + + // apply a batch. + // if this is cancelled, jobs will not be completed and will panic. + let values = processor.apply(reqs).await; + + // good: we didn't get cancelled. + ScopeGuard::into_inner(cancel_safety); + + if values.len() != resps.len() { + tracing::error!( + "batch: invalid response size, expected={}, got={}", + resps.len(), + values.len() + ); + } + + // send response values. + for (tx, value) in std::iter::zip(resps, values) { + if tx.send(value).is_err() { + // receiver hung up but that's fine. + } + } + + match rx.try_recv() { + Ok(resp) => break Some(resp), + Err(TryRecvError::Closed) => break None, + // edge case - there was a race condition where + // we became the leader but were not in the batch. + // + // Example: + // thread 1: register job id=1 + // thread 2: register job id=2 + // thread 2: processor.lock().await + // thread 1: processor.lock().await + // thread 2: becomes leader, batch_size=1, jobs=[1]. + Err(TryRecvError::Empty) => {} + } + }; + + tracing::debug!(id, "batch: job completed"); + + Ok(resp.expect("no response found. batch processer should not panic")) + } +} + +struct BatchQueueInner { + version: u64, + queue: BTreeMap>, +} + +impl BatchQueueInner

{ + fn register_job(&mut self, req: P::Req) -> (u64, tokio::sync::oneshot::Receiver) { + let (tx, rx) = tokio::sync::oneshot::channel(); + + let id = self.version; + + // Overflow concern: + // This is a u64, and we might enqueue 2^16 tasks per second. + // This gives us 2^48 seconds (9 million years). + // Even if this does overflow, it will not break, but some + // jobs with the higher version might never get prioritised. + self.version += 1; + + self.queue.insert(id, BatchJob { req, res: tx }); + + tracing::debug!(id, "batch: registered job in the queue"); + + (id, rx) + } + + fn get_batch(&mut self, p: &P) -> (Vec, Vec>) { + let batch_size = p.batch_size(self.queue.len()); + let mut reqs = Vec::with_capacity(batch_size); + let mut resps = Vec::with_capacity(batch_size); + let mut ids = Vec::with_capacity(batch_size); + + while reqs.len() < batch_size { + let Some((id, job)) = self.queue.pop_first() else { + break; + }; + reqs.push(job.req); + resps.push(job.res); + ids.push(id); + } + + tracing::debug!(ids=?ids, "batch: acquired jobs"); + + (reqs, resps) + } +} diff --git a/proxy/src/binary/local_proxy.rs b/proxy/src/binary/local_proxy.rs index 8fca7ffa77..70527dc9dd 100644 --- a/proxy/src/binary/local_proxy.rs +++ b/proxy/src/binary/local_proxy.rs @@ -206,7 +206,7 @@ pub async fn run() -> anyhow::Result<()> { auth_backend, http_listener, shutdown.clone(), - Arc::new(CancellationHandler::new(&config.connect_to_compute, None)), + Arc::new(CancellationHandler::new(&config.connect_to_compute)), endpoint_rate_limiter, ); diff --git a/proxy/src/binary/proxy.rs b/proxy/src/binary/proxy.rs index ba59cecb4e..c8fba50368 100644 --- a/proxy/src/binary/proxy.rs +++ b/proxy/src/binary/proxy.rs @@ -23,7 +23,8 @@ use utils::{project_build_tag, project_git_version}; use crate::auth::backend::jwt::JwkCache; use crate::auth::backend::{ConsoleRedirectBackend, MaybeOwned}; -use crate::cancellation::{CancellationHandler, handle_cancel_messages}; +use crate::batch::BatchQueue; +use crate::cancellation::{CancellationHandler, CancellationProcessor}; use crate::config::{ self, AuthenticationConfig, CacheOptions, ComputeConfig, HttpConfig, ProjectInfoCacheOptions, ProxyConfig, ProxyProtocolV2, remote_storage_from_toml, RestConfig, @@ -396,13 +397,7 @@ pub async fn run() -> anyhow::Result<()> { .as_ref() .map(|redis_publisher| RedisKVClient::new(redis_publisher.clone(), redis_rps_limit)); - // channel size should be higher than redis client limit to avoid blocking - let cancel_ch_size = args.cancellation_ch_size; - let (tx_cancel, rx_cancel) = tokio::sync::mpsc::channel(cancel_ch_size); - let cancellation_handler = Arc::new(CancellationHandler::new( - &config.connect_to_compute, - Some(tx_cancel), - )); + let cancellation_handler = Arc::new(CancellationHandler::new(&config.connect_to_compute)); let endpoint_rate_limiter = Arc::new(EndpointRateLimiter::new_with_shards( RateBucketInfo::to_leaky_bucket(&args.endpoint_rps_limit) @@ -534,21 +529,11 @@ pub async fn run() -> anyhow::Result<()> { match redis_kv_client.try_connect().await { Ok(()) => { info!("Connected to Redis KV client"); - maintenance_tasks.spawn(async move { - handle_cancel_messages( - &mut redis_kv_client, - rx_cancel, - args.cancellation_batch_size, - ) - .await?; + cancellation_handler.init_tx(BatchQueue::new(CancellationProcessor { + client: redis_kv_client, + batch_size: args.cancellation_batch_size, + })); - drop(redis_kv_client); - - // `handle_cancel_messages` was terminated due to the tx_cancel - // being dropped. this is not worthy of an error, and this task can only return `Err`, - // so let's wait forever instead. - std::future::pending().await - }); break; } Err(e) => { diff --git a/proxy/src/cancellation.rs b/proxy/src/cancellation.rs index cce4c1d3a0..ffc0cf43f1 100644 --- a/proxy/src/cancellation.rs +++ b/proxy/src/cancellation.rs @@ -1,19 +1,24 @@ +use std::convert::Infallible; use std::net::{IpAddr, SocketAddr}; -use std::sync::Arc; +use std::pin::pin; +use std::sync::{Arc, OnceLock}; +use std::time::Duration; -use anyhow::{Context, anyhow}; +use anyhow::anyhow; +use futures::FutureExt; use ipnet::{IpNet, Ipv4Net, Ipv6Net}; -use postgres_client::CancelToken; +use postgres_client::RawCancelToken; use postgres_client::tls::MakeTlsConnect; use redis::{Cmd, FromRedisValue, Value}; use serde::{Deserialize, Serialize}; use thiserror::Error; use tokio::net::TcpStream; -use tokio::sync::{mpsc, oneshot}; -use tracing::{debug, error, info, warn}; +use tokio::time::timeout; +use tracing::{debug, error, info}; use crate::auth::AuthError; use crate::auth::backend::ComputeUserInfo; +use crate::batch::{BatchQueue, QueueProcessing}; use crate::config::ComputeConfig; use crate::context::RequestContext; use crate::control_plane::ControlPlaneApi; @@ -27,46 +32,36 @@ use crate::redis::kv_ops::RedisKVClient; type IpSubnetKey = IpNet; -const CANCEL_KEY_TTL: i64 = 1_209_600; // 2 weeks cancellation key expire time +const CANCEL_KEY_TTL: std::time::Duration = std::time::Duration::from_secs(600); +const CANCEL_KEY_REFRESH: std::time::Duration = std::time::Duration::from_secs(570); // Message types for sending through mpsc channel pub enum CancelKeyOp { StoreCancelKey { - key: String, - field: String, - value: String, - resp_tx: Option>>, - _guard: CancelChannelSizeGuard<'static>, - expire: i64, // TTL for key + key: CancelKeyData, + value: Box, + expire: std::time::Duration, }, GetCancelData { - key: String, - resp_tx: oneshot::Sender>>, - _guard: CancelChannelSizeGuard<'static>, - }, - RemoveCancelKey { - key: String, - field: String, - resp_tx: Option>>, - _guard: CancelChannelSizeGuard<'static>, + key: CancelKeyData, }, } pub struct Pipeline { inner: redis::Pipeline, - replies: Vec, + replies: usize, } impl Pipeline { fn with_capacity(n: usize) -> Self { Self { inner: redis::Pipeline::with_capacity(n), - replies: Vec::with_capacity(n), + replies: 0, } } - async fn execute(&mut self, client: &mut RedisKVClient) { - let responses = self.replies.len(); + async fn execute(self, client: &mut RedisKVClient) -> Vec> { + let responses = self.replies; let batch_size = self.inner.len(); match client.query(&self.inner).await { @@ -76,176 +71,72 @@ impl Pipeline { batch_size, responses, "successfully completed cancellation jobs", ); - for (value, reply) in std::iter::zip(values, self.replies.drain(..)) { - reply.send_value(value); - } + values.into_iter().map(Ok).collect() } Ok(value) => { error!(batch_size, ?value, "unexpected redis return value"); - for reply in self.replies.drain(..) { - reply.send_err(anyhow!("incorrect response type from redis")); - } + std::iter::repeat_with(|| Err(anyhow!("incorrect response type from redis"))) + .take(responses) + .collect() } Err(err) => { - for reply in self.replies.drain(..) { - reply.send_err(anyhow!("could not send cmd to redis: {err}")); - } + std::iter::repeat_with(|| Err(anyhow!("could not send cmd to redis: {err}"))) + .take(responses) + .collect() } } - - self.inner.clear(); - self.replies.clear(); } - fn add_command_with_reply(&mut self, cmd: Cmd, reply: CancelReplyOp) { + fn add_command_with_reply(&mut self, cmd: Cmd) { self.inner.add_command(cmd); - self.replies.push(reply); + self.replies += 1; } fn add_command_no_reply(&mut self, cmd: Cmd) { self.inner.add_command(cmd).ignore(); } - - fn add_command(&mut self, cmd: Cmd, reply: Option) { - match reply { - Some(reply) => self.add_command_with_reply(cmd, reply), - None => self.add_command_no_reply(cmd), - } - } } impl CancelKeyOp { - fn register(self, pipe: &mut Pipeline) { - #[allow(clippy::used_underscore_binding)] + fn register(&self, pipe: &mut Pipeline) { match self { - CancelKeyOp::StoreCancelKey { - key, - field, - value, - resp_tx, - _guard, - expire, - } => { - let reply = - resp_tx.map(|resp_tx| CancelReplyOp::StoreCancelKey { resp_tx, _guard }); - pipe.add_command(Cmd::hset(&key, field, value), reply); - pipe.add_command_no_reply(Cmd::expire(key, expire)); + CancelKeyOp::StoreCancelKey { key, value, expire } => { + let key = KeyPrefix::Cancel(*key).build_redis_key(); + pipe.add_command_with_reply(Cmd::hset(&key, "data", &**value)); + pipe.add_command_no_reply(Cmd::expire(&key, expire.as_secs() as i64)); } - CancelKeyOp::GetCancelData { - key, - resp_tx, - _guard, - } => { - let reply = CancelReplyOp::GetCancelData { resp_tx, _guard }; - pipe.add_command_with_reply(Cmd::hgetall(key), reply); - } - CancelKeyOp::RemoveCancelKey { - key, - field, - resp_tx, - _guard, - } => { - let reply = - resp_tx.map(|resp_tx| CancelReplyOp::RemoveCancelKey { resp_tx, _guard }); - pipe.add_command(Cmd::hdel(key, field), reply); + CancelKeyOp::GetCancelData { key } => { + let key = KeyPrefix::Cancel(*key).build_redis_key(); + pipe.add_command_with_reply(Cmd::hget(key, "data")); } } } } -// Message types for sending through mpsc channel -pub enum CancelReplyOp { - StoreCancelKey { - resp_tx: oneshot::Sender>, - _guard: CancelChannelSizeGuard<'static>, - }, - GetCancelData { - resp_tx: oneshot::Sender>>, - _guard: CancelChannelSizeGuard<'static>, - }, - RemoveCancelKey { - resp_tx: oneshot::Sender>, - _guard: CancelChannelSizeGuard<'static>, - }, +pub struct CancellationProcessor { + pub client: RedisKVClient, + pub batch_size: usize, } -impl CancelReplyOp { - fn send_err(self, e: anyhow::Error) { - match self { - CancelReplyOp::StoreCancelKey { resp_tx, _guard } => { - resp_tx - .send(Err(e)) - .inspect_err(|_| tracing::debug!("could not send reply")) - .ok(); - } - CancelReplyOp::GetCancelData { resp_tx, _guard } => { - resp_tx - .send(Err(e)) - .inspect_err(|_| tracing::debug!("could not send reply")) - .ok(); - } - CancelReplyOp::RemoveCancelKey { resp_tx, _guard } => { - resp_tx - .send(Err(e)) - .inspect_err(|_| tracing::debug!("could not send reply")) - .ok(); - } - } +impl QueueProcessing for CancellationProcessor { + type Req = (CancelChannelSizeGuard<'static>, CancelKeyOp); + type Res = anyhow::Result; + + fn batch_size(&self, _queue_size: usize) -> usize { + self.batch_size } - fn send_value(self, v: redis::Value) { - match self { - CancelReplyOp::StoreCancelKey { resp_tx, _guard } => { - let send = - FromRedisValue::from_owned_redis_value(v).context("could not parse value"); - resp_tx - .send(send) - .inspect_err(|_| tracing::debug!("could not send reply")) - .ok(); - } - CancelReplyOp::GetCancelData { resp_tx, _guard } => { - let send = - FromRedisValue::from_owned_redis_value(v).context("could not parse value"); - resp_tx - .send(send) - .inspect_err(|_| tracing::debug!("could not send reply")) - .ok(); - } - CancelReplyOp::RemoveCancelKey { resp_tx, _guard } => { - let send = - FromRedisValue::from_owned_redis_value(v).context("could not parse value"); - resp_tx - .send(send) - .inspect_err(|_| tracing::debug!("could not send reply")) - .ok(); - } - } - } -} - -// Running as a separate task to accept messages through the rx channel -pub async fn handle_cancel_messages( - client: &mut RedisKVClient, - mut rx: mpsc::Receiver, - batch_size: usize, -) -> anyhow::Result<()> { - let mut batch = Vec::with_capacity(batch_size); - let mut pipeline = Pipeline::with_capacity(batch_size); - - loop { - if rx.recv_many(&mut batch, batch_size).await == 0 { - warn!("shutting down cancellation queue"); - break Ok(()); - } + async fn apply(&mut self, batch: Vec) -> Vec { + let mut pipeline = Pipeline::with_capacity(batch.len()); let batch_size = batch.len(); debug!(batch_size, "running cancellation jobs"); - for msg in batch.drain(..) { - msg.register(&mut pipeline); + for (_, op) in &batch { + op.register(&mut pipeline); } - pipeline.execute(client).await; + pipeline.execute(&mut self.client).await } } @@ -256,7 +147,7 @@ pub struct CancellationHandler { compute_config: &'static ComputeConfig, // rate limiter of cancellation requests limiter: Arc>>, - tx: Option>, // send messages to the redis KV client task + tx: OnceLock>, // send messages to the redis KV client task } #[derive(Debug, Error)] @@ -296,13 +187,10 @@ impl ReportableError for CancelError { } impl CancellationHandler { - pub fn new( - compute_config: &'static ComputeConfig, - tx: Option>, - ) -> Self { + pub fn new(compute_config: &'static ComputeConfig) -> Self { Self { compute_config, - tx, + tx: OnceLock::new(), limiter: Arc::new(std::sync::Mutex::new( LeakyBucketRateLimiter::::new_with_shards( LeakyBucketRateLimiter::::DEFAULT, @@ -312,7 +200,14 @@ impl CancellationHandler { } } - pub(crate) fn get_key(self: &Arc) -> Session { + pub fn init_tx(&self, queue: BatchQueue) { + self.tx + .set(queue) + .map_err(|_| {}) + .expect("cancellation queue should be registered once"); + } + + pub(crate) fn get_key(self: Arc) -> Session { // we intentionally generate a random "backend pid" and "secret key" here. // we use the corresponding u64 as an identifier for the // actual endpoint+pid+secret for postgres/pgbouncer. @@ -322,83 +217,68 @@ impl CancellationHandler { let key: CancelKeyData = rand::random(); - let prefix_key: KeyPrefix = KeyPrefix::Cancel(key); - let redis_key = prefix_key.build_redis_key(); - debug!("registered new query cancellation key {key}"); Session { key, - redis_key, - cancellation_handler: Arc::clone(self), + cancellation_handler: self, } } + /// This is not cancel safe async fn get_cancel_key( &self, key: CancelKeyData, ) -> Result, CancelError> { - let prefix_key: KeyPrefix = KeyPrefix::Cancel(key); - let redis_key = prefix_key.build_redis_key(); + let guard = Metrics::get() + .proxy + .cancel_channel_size + .guard(RedisMsgKind::HGet); + let op = CancelKeyOp::GetCancelData { key }; - let (resp_tx, resp_rx) = tokio::sync::oneshot::channel(); - let op = CancelKeyOp::GetCancelData { - key: redis_key, - resp_tx, - _guard: Metrics::get() - .proxy - .cancel_channel_size - .guard(RedisMsgKind::HGetAll), - }; - - let Some(tx) = &self.tx else { + let Some(tx) = self.tx.get() else { tracing::warn!("cancellation handler is not available"); return Err(CancelError::InternalError); }; - tx.try_send(op) - .map_err(|e| { - tracing::warn!("failed to send GetCancelData for {key}: {e}"); - }) - .map_err(|()| CancelError::InternalError)?; - - let result = resp_rx.await.map_err(|e| { + const TIMEOUT: Duration = Duration::from_secs(5); + let result = timeout( + TIMEOUT, + tx.call((guard, op), std::future::pending::()), + ) + .await + .map_err(|_| { + tracing::warn!("timed out waiting to receive GetCancelData response"); + CancelError::RateLimit + })? + // cannot be cancelled + .unwrap_or_else(|x| match x {}) + .map_err(|e| { tracing::warn!("failed to receive GetCancelData response: {e}"); CancelError::InternalError })?; - let cancel_state_str: Option = match result { - Ok(mut state) => { - if state.len() == 1 { - Some(state.remove(0).1) - } else { - tracing::warn!("unexpected number of entries in cancel state: {state:?}"); - return Err(CancelError::InternalError); - } - } - Err(e) => { - tracing::warn!("failed to receive cancel state from redis: {e}"); - return Err(CancelError::InternalError); - } - }; + let cancel_state_str = String::from_owned_redis_value(result).map_err(|e| { + tracing::warn!("failed to receive GetCancelData response: {e}"); + CancelError::InternalError + })?; - let cancel_state: Option = match cancel_state_str { - Some(state) => { - let cancel_closure: CancelClosure = serde_json::from_str(&state).map_err(|e| { - tracing::warn!("failed to deserialize cancel state: {e}"); - CancelError::InternalError - })?; - Some(cancel_closure) - } - None => None, - }; - Ok(cancel_state) + let cancel_closure: CancelClosure = + serde_json::from_str(&cancel_state_str).map_err(|e| { + tracing::warn!("failed to deserialize cancel state: {e}"); + CancelError::InternalError + })?; + + Ok(Some(cancel_closure)) } + /// Try to cancel a running query for the corresponding connection. /// If the cancellation key is not found, it will be published to Redis. /// check_allowed - if true, check if the IP is allowed to cancel the query. /// Will fetch IP allowlist internally. /// /// return Result primarily for tests + /// + /// This is not cancel safe pub(crate) async fn cancel_session( &self, key: CancelKeyData, @@ -467,10 +347,10 @@ impl CancellationHandler { /// This should've been a [`std::future::Future`], but /// it's impossible to name a type of an unboxed future /// (we'd need something like `#![feature(type_alias_impl_trait)]`). -#[derive(Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize)] pub struct CancelClosure { socket_addr: SocketAddr, - cancel_token: CancelToken, + cancel_token: RawCancelToken, hostname: String, // for pg_sni router user_info: ComputeUserInfo, } @@ -478,7 +358,7 @@ pub struct CancelClosure { impl CancelClosure { pub(crate) fn new( socket_addr: SocketAddr, - cancel_token: CancelToken, + cancel_token: RawCancelToken, hostname: String, user_info: ComputeUserInfo, ) -> Self { @@ -491,7 +371,7 @@ impl CancelClosure { } /// Cancels the query running on user's compute node. pub(crate) async fn try_cancel_query( - self, + &self, compute_config: &ComputeConfig, ) -> Result<(), CancelError> { let socket = TcpStream::connect(self.socket_addr).await?; @@ -512,7 +392,6 @@ impl CancelClosure { pub(crate) struct Session { /// The user-facing key identifying this session. key: CancelKeyData, - redis_key: String, cancellation_handler: Arc, } @@ -521,60 +400,75 @@ impl Session { &self.key } - // Send the store key op to the cancellation handler and set TTL for the key - pub(crate) fn write_cancel_key( + /// Ensure the cancel key is continously refreshed, + /// but stop when the channel is dropped. + /// + /// This is not cancel safe + pub(crate) async fn maintain_cancel_key( &self, - cancel_closure: CancelClosure, - ) -> Result<(), CancelError> { - let Some(tx) = &self.cancellation_handler.tx else { + session_id: uuid::Uuid, + cancel: tokio::sync::oneshot::Receiver, + cancel_closure: &CancelClosure, + compute_config: &ComputeConfig, + ) { + let Some(tx) = self.cancellation_handler.tx.get() else { tracing::warn!("cancellation handler is not available"); - return Err(CancelError::InternalError); + // don't exit, as we only want to exit if cancelled externally. + std::future::pending().await }; - let closure_json = serde_json::to_string(&cancel_closure).map_err(|e| { - tracing::warn!("failed to serialize cancel closure: {e}"); - CancelError::InternalError - })?; + let closure_json = serde_json::to_string(&cancel_closure) + .expect("serialising to json string should not fail") + .into_boxed_str(); - let op = CancelKeyOp::StoreCancelKey { - key: self.redis_key.clone(), - field: "data".to_string(), - value: closure_json, - resp_tx: None, - _guard: Metrics::get() + let mut cancel = pin!(cancel); + + loop { + let guard = Metrics::get() .proxy .cancel_channel_size - .guard(RedisMsgKind::HSet), - expire: CANCEL_KEY_TTL, - }; + .guard(RedisMsgKind::HSet); + let op = CancelKeyOp::StoreCancelKey { + key: self.key, + value: closure_json.clone(), + expire: CANCEL_KEY_TTL, + }; - let _ = tx.try_send(op).map_err(|e| { - let key = self.key; - tracing::warn!("failed to send StoreCancelKey for {key}: {e}"); - }); - Ok(()) - } + tracing::debug!( + src=%self.key, + dest=?cancel_closure.cancel_token, + "registering cancellation key" + ); - pub(crate) fn remove_cancel_key(&self) -> Result<(), CancelError> { - let Some(tx) = &self.cancellation_handler.tx else { - tracing::warn!("cancellation handler is not available"); - return Err(CancelError::InternalError); - }; + match tx.call((guard, op), cancel.as_mut()).await { + Ok(Ok(_)) => { + tracing::debug!( + src=%self.key, + dest=?cancel_closure.cancel_token, + "registered cancellation key" + ); - let op = CancelKeyOp::RemoveCancelKey { - key: self.redis_key.clone(), - field: "data".to_string(), - resp_tx: None, - _guard: Metrics::get() - .proxy - .cancel_channel_size - .guard(RedisMsgKind::HDel), - }; + // wait before continuing. + tokio::time::sleep(CANCEL_KEY_REFRESH).await; + } + // retry immediately. + Ok(Err(error)) => { + tracing::warn!(?error, "error registering cancellation key"); + } + Err(Err(_cancelled)) => break, + } + } - let _ = tx.try_send(op).map_err(|e| { - let key = self.key; - tracing::warn!("failed to send RemoveCancelKey for {key}: {e}"); - }); - Ok(()) + if let Err(err) = cancel_closure + .try_cancel_query(compute_config) + .boxed() + .await + { + tracing::warn!( + ?session_id, + ?err, + "could not cancel the query in the database" + ); + } } } diff --git a/proxy/src/compute/mod.rs b/proxy/src/compute/mod.rs index 64d6cb654f..489e220019 100644 --- a/proxy/src/compute/mod.rs +++ b/proxy/src/compute/mod.rs @@ -6,10 +6,10 @@ use std::net::{IpAddr, SocketAddr}; use futures::{FutureExt, TryFutureExt}; use itertools::Itertools; -use postgres_client::config::{AuthKeys, SslMode}; +use postgres_client::config::{AuthKeys, ChannelBinding, SslMode}; use postgres_client::maybe_tls_stream::MaybeTlsStream; use postgres_client::tls::MakeTlsConnect; -use postgres_client::{CancelToken, NoTls, RawConnection}; +use postgres_client::{NoTls, RawCancelToken, RawConnection}; use postgres_protocol::message::backend::NoticeResponseBody; use thiserror::Error; use tokio::net::{TcpStream, lookup_host}; @@ -33,12 +33,51 @@ use crate::types::Host; pub const COULD_NOT_CONNECT: &str = "Couldn't connect to compute node"; #[derive(Debug, Error)] -pub(crate) enum ConnectionError { +pub(crate) enum PostgresError { /// This error doesn't seem to reveal any secrets; for instance, /// `postgres_client::error::Kind` doesn't contain ip addresses and such. #[error("{COULD_NOT_CONNECT}: {0}")] Postgres(#[from] postgres_client::Error), +} +impl UserFacingError for PostgresError { + fn to_string_client(&self) -> String { + match self { + // This helps us drop irrelevant library-specific prefixes. + // TODO: propagate severity level and other parameters. + PostgresError::Postgres(err) => match err.as_db_error() { + Some(err) => { + let msg = err.message(); + + if msg.starts_with("unsupported startup parameter: ") + || msg.starts_with("unsupported startup parameter in options: ") + { + format!( + "{msg}. Please use unpooled connection or remove this parameter from the startup package. More details: https://neon.tech/docs/connect/connection-errors#unsupported-startup-parameter" + ) + } else { + msg.to_owned() + } + } + None => err.to_string(), + }, + } + } +} + +impl ReportableError for PostgresError { + fn get_error_kind(&self) -> crate::error::ErrorKind { + match self { + PostgresError::Postgres(e) if e.as_db_error().is_some() => { + crate::error::ErrorKind::Postgres + } + PostgresError::Postgres(_) => crate::error::ErrorKind::Compute, + } + } +} + +#[derive(Debug, Error)] +pub(crate) enum ConnectionError { #[error("{COULD_NOT_CONNECT}: {0}")] TlsError(#[from] TlsError), @@ -52,22 +91,6 @@ pub(crate) enum ConnectionError { impl UserFacingError for ConnectionError { fn to_string_client(&self) -> String { match self { - // This helps us drop irrelevant library-specific prefixes. - // TODO: propagate severity level and other parameters. - ConnectionError::Postgres(err) => match err.as_db_error() { - Some(err) => { - let msg = err.message(); - - if msg.starts_with("unsupported startup parameter: ") - || msg.starts_with("unsupported startup parameter in options: ") - { - format!("{msg}. Please use unpooled connection or remove this parameter from the startup package. More details: https://neon.tech/docs/connect/connection-errors#unsupported-startup-parameter") - } else { - msg.to_owned() - } - } - None => err.to_string(), - }, ConnectionError::WakeComputeError(err) => err.to_string_client(), ConnectionError::TooManyConnectionAttempts(_) => { "Failed to acquire permit to connect to the database. Too many database connection attempts are currently ongoing.".to_owned() @@ -80,10 +103,6 @@ impl UserFacingError for ConnectionError { impl ReportableError for ConnectionError { fn get_error_kind(&self) -> crate::error::ErrorKind { match self { - ConnectionError::Postgres(e) if e.as_db_error().is_some() => { - crate::error::ErrorKind::Postgres - } - ConnectionError::Postgres(_) => crate::error::ErrorKind::Compute, ConnectionError::TlsError(_) => crate::error::ErrorKind::Compute, ConnectionError::WakeComputeError(e) => e.get_error_kind(), ConnectionError::TooManyConnectionAttempts(e) => e.get_error_kind(), @@ -110,6 +129,8 @@ pub(crate) struct AuthInfo { auth: Option, server_params: StartupMessageParams, + channel_binding: ChannelBinding, + /// Console redirect sets user and database, we shouldn't re-use those from the params. skip_db_user: bool, } @@ -133,6 +154,8 @@ impl AuthInfo { auth: pw.map(|pw| Auth::Password(pw.as_bytes().to_owned())), server_params, skip_db_user: true, + // pg-sni-router is a mitm so this would fail. + channel_binding: ChannelBinding::Disable, } } @@ -146,6 +169,7 @@ impl AuthInfo { }, server_params: StartupMessageParams::default(), skip_db_user: false, + channel_binding: ChannelBinding::Prefer, } } } @@ -168,6 +192,7 @@ impl AuthInfo { Some(Auth::Password(pw)) => config.password(pw), None => &mut config, }; + config.channel_binding(self.channel_binding); for (k, v) in self.server_params.iter() { config.set_param(k, v); } @@ -206,6 +231,56 @@ impl AuthInfo { } } } + + pub async fn authenticate( + &self, + ctx: &RequestContext, + compute: &mut ComputeConnection, + user_info: ComputeUserInfo, + ) -> Result { + // client config with stubbed connect info. + // TODO(conrad): should we rewrite this to bypass tokio-postgres2 entirely, + // utilising pqproto.rs. + let mut tmp_config = postgres_client::Config::new(String::new(), 0); + // We have already established SSL if necessary. + tmp_config.ssl_mode(SslMode::Disable); + let tmp_config = self.enrich(tmp_config); + + let pause = ctx.latency_timer_pause(crate::metrics::Waiting::Compute); + let connection = tmp_config + .tls_and_authenticate(&mut compute.stream, NoTls) + .await?; + drop(pause); + + let RawConnection { + stream: _, + parameters, + delayed_notice, + process_id, + secret_key, + } = connection; + + tracing::Span::current().record("pid", tracing::field::display(process_id)); + + // NB: CancelToken is supposed to hold socket_addr, but we use connect_raw. + // Yet another reason to rework the connection establishing code. + let cancel_closure = CancelClosure::new( + compute.socket_addr, + RawCancelToken { + ssl_mode: compute.ssl_mode, + process_id, + secret_key, + }, + compute.hostname.to_string(), + user_info, + ); + + Ok(PostgresSettings { + params: parameters, + cancel_closure, + delayed_notice, + }) + } } impl ConnectInfo { @@ -265,53 +340,45 @@ impl ConnectInfo { } } -type RustlsStream = >::Stream; +pub type RustlsStream = >::Stream; +pub type MaybeRustlsStream = MaybeTlsStream; -pub(crate) struct PostgresConnection { - /// Socket connected to a compute node. - pub(crate) stream: MaybeTlsStream, +// TODO(conrad): we don't need to parse these. +// These are just immediately forwarded back to the client. +// We could instead stream them out instead of reading them into memory. +pub struct PostgresSettings { /// PostgreSQL connection parameters. - pub(crate) params: std::collections::HashMap, + pub params: std::collections::HashMap, /// Query cancellation token. - pub(crate) cancel_closure: CancelClosure, - /// Labels for proxy's metrics. - pub(crate) aux: MetricsAuxInfo, + pub cancel_closure: CancelClosure, /// Notices received from compute after authenticating - pub(crate) delayed_notice: Vec, + pub delayed_notice: Vec, +} - _guage: NumDbConnectionsGuard<'static>, +pub struct ComputeConnection { + /// Socket connected to a compute node. + pub stream: MaybeTlsStream, + /// Labels for proxy's metrics. + pub aux: MetricsAuxInfo, + pub hostname: Host, + pub ssl_mode: SslMode, + pub socket_addr: SocketAddr, + pub guage: NumDbConnectionsGuard<'static>, } impl ConnectInfo { /// Connect to a corresponding compute node. - pub(crate) async fn connect( + pub async fn connect( &self, ctx: &RequestContext, - aux: MetricsAuxInfo, - auth: &AuthInfo, + aux: &MetricsAuxInfo, config: &ComputeConfig, - user_info: ComputeUserInfo, - ) -> Result { - let mut tmp_config = auth.enrich(self.to_postgres_client_config()); - // we setup SSL early in `ConnectInfo::connect_raw`. - tmp_config.ssl_mode(SslMode::Disable); - + ) -> Result { let pause = ctx.latency_timer_pause(crate::metrics::Waiting::Compute); let (socket_addr, stream) = self.connect_raw(config).await?; - let connection = tmp_config.connect_raw(stream, NoTls).await?; drop(pause); - let RawConnection { - stream, - parameters, - delayed_notice, - process_id, - secret_key, - } = connection; - - tracing::Span::current().record("pid", tracing::field::display(process_id)); tracing::Span::current().record("compute_id", tracing::field::display(&aux.compute_id)); - let MaybeTlsStream::Raw(stream) = stream.into_inner(); // TODO: lots of useful info but maybe we can move it elsewhere (eg traces?) info!( @@ -323,27 +390,13 @@ impl ConnectInfo { ctx.get_testodrome_id().unwrap_or_default(), ); - // NB: CancelToken is supposed to hold socket_addr, but we use connect_raw. - // Yet another reason to rework the connection establishing code. - let cancel_closure = CancelClosure::new( - socket_addr, - CancelToken { - socket_config: None, - ssl_mode: self.ssl_mode, - process_id, - secret_key, - }, - self.host.to_string(), - user_info, - ); - - let connection = PostgresConnection { + let connection = ComputeConnection { stream, - params: parameters, - delayed_notice, - cancel_closure, - aux, - _guage: Metrics::get().proxy.db_connections.guard(ctx.protocol()), + socket_addr, + hostname: self.host.clone(), + ssl_mode: self.ssl_mode, + aux: aux.clone(), + guage: Metrics::get().proxy.db_connections.guard(ctx.protocol()), }; Ok(connection) diff --git a/proxy/src/console_redirect_proxy.rs b/proxy/src/console_redirect_proxy.rs index 5331ea41fd..113a11beab 100644 --- a/proxy/src/console_redirect_proxy.rs +++ b/proxy/src/console_redirect_proxy.rs @@ -120,7 +120,7 @@ pub async fn task_main( Ok(Some(p)) => { ctx.set_success(); let _disconnect = ctx.log_connect(); - match p.proxy_pass(&config.connect_to_compute).await { + match p.proxy_pass().await { Ok(()) => {} Err(ErrorSource::Client(e)) => { error!( @@ -218,11 +218,9 @@ pub(crate) async fn handle_client( }; auth_info.set_startup_params(¶ms, true); - let node = connect_to_compute( + let mut node = connect_to_compute( ctx, &TcpMechanism { - user_info, - auth: auth_info, locks: &config.connect_compute_locks, }, &node_info, @@ -232,22 +230,40 @@ pub(crate) async fn handle_client( .or_else(|e| async { Err(stream.throw_error(e, Some(ctx)).await) }) .await?; - let cancellation_handler_clone = Arc::clone(&cancellation_handler); - let session = cancellation_handler_clone.get_key(); + let pg_settings = auth_info + .authenticate(ctx, &mut node, user_info) + .or_else(|e| async { Err(stream.throw_error(e, Some(ctx)).await) }) + .await?; - session.write_cancel_key(node.cancel_closure.clone())?; + let session = cancellation_handler.get_key(); - prepare_client_connection(&node, *session.key(), &mut stream); + prepare_client_connection(&pg_settings, *session.key(), &mut stream); let stream = stream.flush_and_into_inner().await?; + let session_id = ctx.session_id(); + let (cancel_on_shutdown, cancel) = tokio::sync::oneshot::channel(); + tokio::spawn(async move { + session + .maintain_cancel_key( + session_id, + cancel, + &pg_settings.cancel_closure, + &config.connect_to_compute, + ) + .await; + }); + Ok(Some(ProxyPassthrough { client: stream, - aux: node.aux.clone(), + compute: node.stream, + + aux: node.aux, private_link_id: None, - compute: node, - session_id: ctx.session_id(), - cancel: session, + + _cancel_on_shutdown: cancel_on_shutdown, + _req: request_gauge, _conn: conn_gauge, + _db_conn: node.guage, })) } diff --git a/proxy/src/control_plane/mod.rs b/proxy/src/control_plane/mod.rs index ed83e98bfe..a8c59dad0c 100644 --- a/proxy/src/control_plane/mod.rs +++ b/proxy/src/control_plane/mod.rs @@ -76,13 +76,9 @@ impl NodeInfo { pub(crate) async fn connect( &self, ctx: &RequestContext, - auth: &compute::AuthInfo, config: &ComputeConfig, - user_info: ComputeUserInfo, - ) -> Result { - self.conn_info - .connect(ctx, self.aux.clone(), auth, config, user_info) - .await + ) -> Result { + self.conn_info.connect(ctx, &self.aux, config).await } } diff --git a/proxy/src/lib.rs b/proxy/src/lib.rs index 026c6aeba9..263d784e78 100644 --- a/proxy/src/lib.rs +++ b/proxy/src/lib.rs @@ -61,6 +61,10 @@ clippy::too_many_lines, clippy::unused_self )] +#![allow( + clippy::unsafe_derive_deserialize, + reason = "false positive: https://github.com/rust-lang/rust-clippy/issues/15120" +)] #![cfg_attr( any(test, feature = "testing"), allow( @@ -75,6 +79,7 @@ pub mod binary; mod auth; +mod batch; mod cache; mod cancellation; mod compute; diff --git a/proxy/src/pglb/passthrough.rs b/proxy/src/pglb/passthrough.rs index 6f651d383d..d4c029f6d9 100644 --- a/proxy/src/pglb/passthrough.rs +++ b/proxy/src/pglb/passthrough.rs @@ -1,15 +1,17 @@ -use futures::FutureExt; +use std::convert::Infallible; + use smol_str::SmolStr; use tokio::io::{AsyncRead, AsyncWrite}; use tracing::debug; use utils::measured_stream::MeasuredStream; use super::copy_bidirectional::ErrorSource; -use crate::cancellation; -use crate::compute::PostgresConnection; -use crate::config::ComputeConfig; +use crate::compute::MaybeRustlsStream; use crate::control_plane::messages::MetricsAuxInfo; -use crate::metrics::{Direction, Metrics, NumClientConnectionsGuard, NumConnectionRequestsGuard}; +use crate::metrics::{ + Direction, Metrics, NumClientConnectionsGuard, NumConnectionRequestsGuard, + NumDbConnectionsGuard, +}; use crate::stream::Stream; use crate::usage_metrics::{Ids, MetricCounterRecorder, USAGE_METRICS}; @@ -64,40 +66,20 @@ pub(crate) async fn proxy_pass( pub(crate) struct ProxyPassthrough { pub(crate) client: Stream, - pub(crate) compute: PostgresConnection, + pub(crate) compute: MaybeRustlsStream, + pub(crate) aux: MetricsAuxInfo, - pub(crate) session_id: uuid::Uuid, pub(crate) private_link_id: Option, - pub(crate) cancel: cancellation::Session, + + pub(crate) _cancel_on_shutdown: tokio::sync::oneshot::Sender, pub(crate) _req: NumConnectionRequestsGuard<'static>, pub(crate) _conn: NumClientConnectionsGuard<'static>, + pub(crate) _db_conn: NumDbConnectionsGuard<'static>, } impl ProxyPassthrough { - pub(crate) async fn proxy_pass( - self, - compute_config: &ComputeConfig, - ) -> Result<(), ErrorSource> { - let res = proxy_pass( - self.client, - self.compute.stream, - self.aux, - self.private_link_id, - ) - .await; - if let Err(err) = self - .compute - .cancel_closure - .try_cancel_query(compute_config) - .boxed() - .await - { - tracing::warn!(session_id = ?self.session_id, ?err, "could not cancel the query in the database"); - } - - drop(self.cancel.remove_cancel_key()); // we don't need a result. If the queue is full, we just log the error - - res + pub(crate) async fn proxy_pass(self) -> Result<(), ErrorSource> { + proxy_pass(self.client, self.compute, self.aux, self.private_link_id).await } } diff --git a/proxy/src/proxy/connect_compute.rs b/proxy/src/proxy/connect_compute.rs index 92ed84f50f..aa675a439e 100644 --- a/proxy/src/proxy/connect_compute.rs +++ b/proxy/src/proxy/connect_compute.rs @@ -2,8 +2,7 @@ use async_trait::async_trait; use tokio::time; use tracing::{debug, info, warn}; -use crate::auth::backend::ComputeUserInfo; -use crate::compute::{self, AuthInfo, COULD_NOT_CONNECT, PostgresConnection}; +use crate::compute::{self, COULD_NOT_CONNECT, ComputeConnection}; use crate::config::{ComputeConfig, RetryConfig}; use crate::context::RequestContext; use crate::control_plane::errors::WakeComputeError; @@ -50,15 +49,13 @@ pub(crate) trait ConnectMechanism { } pub(crate) struct TcpMechanism { - pub(crate) auth: AuthInfo, /// connect_to_compute concurrency lock pub(crate) locks: &'static ApiLocks, - pub(crate) user_info: ComputeUserInfo, } #[async_trait] impl ConnectMechanism for TcpMechanism { - type Connection = PostgresConnection; + type Connection = ComputeConnection; type ConnectError = compute::ConnectionError; type Error = compute::ConnectionError; @@ -71,13 +68,9 @@ impl ConnectMechanism for TcpMechanism { ctx: &RequestContext, node_info: &control_plane::CachedNodeInfo, config: &ComputeConfig, - ) -> Result { + ) -> Result { let permit = self.locks.get_permit(&node_info.conn_info.host).await?; - permit.release_result( - node_info - .connect(ctx, &self.auth, config, self.user_info.clone()) - .await, - ) + permit.release_result(node_info.connect(ctx, config).await) } } diff --git a/proxy/src/proxy/mod.rs b/proxy/src/proxy/mod.rs index 4211406f6c..6947e07488 100644 --- a/proxy/src/proxy/mod.rs +++ b/proxy/src/proxy/mod.rs @@ -155,7 +155,7 @@ pub async fn task_main( Ok(Some(p)) => { ctx.set_success(); let _disconnect = ctx.log_connect(); - match p.proxy_pass(&config.connect_to_compute).await { + match p.proxy_pass().await { Ok(()) => {} Err(ErrorSource::Client(e)) => { warn!( @@ -357,28 +357,43 @@ pub(crate) async fn handle_client( let res = connect_to_compute( ctx, &TcpMechanism { - user_info: creds.info.clone(), - auth: auth_info, locks: &config.connect_compute_locks, }, - &auth::Backend::ControlPlane(cplane, creds.info), + &auth::Backend::ControlPlane(cplane, creds.info.clone()), config.wake_compute_retry_config, &config.connect_to_compute, ) .await; - let node = match res { + let mut node = match res { Ok(node) => node, Err(e) => Err(stream.throw_error(e, Some(ctx)).await)?, }; - let cancellation_handler_clone = Arc::clone(&cancellation_handler); - let session = cancellation_handler_clone.get_key(); + let pg_settings = auth_info.authenticate(ctx, &mut node, creds.info).await; + let pg_settings = match pg_settings { + Ok(pg_settings) => pg_settings, + Err(e) => Err(stream.throw_error(e, Some(ctx)).await)?, + }; - session.write_cancel_key(node.cancel_closure.clone())?; - prepare_client_connection(&node, *session.key(), &mut stream); + let session = cancellation_handler.get_key(); + + prepare_client_connection(&pg_settings, *session.key(), &mut stream); let stream = stream.flush_and_into_inner().await?; + let session_id = ctx.session_id(); + let (cancel_on_shutdown, cancel) = tokio::sync::oneshot::channel(); + tokio::spawn(async move { + session + .maintain_cancel_key( + session_id, + cancel, + &pg_settings.cancel_closure, + &config.connect_to_compute, + ) + .await; + }); + let private_link_id = match ctx.extra() { Some(ConnectionInfoExtra::Aws { vpce_id }) => Some(vpce_id.clone()), Some(ConnectionInfoExtra::Azure { link_id }) => Some(link_id.to_smolstr()), @@ -387,31 +402,34 @@ pub(crate) async fn handle_client( Ok(Some(ProxyPassthrough { client: stream, - aux: node.aux.clone(), + compute: node.stream, + + aux: node.aux, private_link_id, - compute: node, - session_id: ctx.session_id(), - cancel: session, + + _cancel_on_shutdown: cancel_on_shutdown, + _req: request_gauge, _conn: conn_gauge, + _db_conn: node.guage, })) } /// Finish client connection initialization: confirm auth success, send params, etc. pub(crate) fn prepare_client_connection( - node: &compute::PostgresConnection, + settings: &compute::PostgresSettings, cancel_key_data: CancelKeyData, stream: &mut PqStream, ) { // Forward all deferred notices to the client. - for notice in &node.delayed_notice { + for notice in &settings.delayed_notice { stream.write_raw(notice.as_bytes().len(), b'N', |buf| { buf.extend_from_slice(notice.as_bytes()); }); } // Forward all postgres connection params to the client. - for (name, value) in &node.params { + for (name, value) in &settings.params { stream.write_message(BeMessage::ParameterStatus { name: name.as_bytes(), value: value.as_bytes(), diff --git a/proxy/src/proxy/retry.rs b/proxy/src/proxy/retry.rs index 0f19944afa..e9eca95724 100644 --- a/proxy/src/proxy/retry.rs +++ b/proxy/src/proxy/retry.rs @@ -99,7 +99,6 @@ impl ShouldRetryWakeCompute for postgres_client::Error { impl CouldRetry for compute::ConnectionError { fn could_retry(&self) -> bool { match self { - compute::ConnectionError::Postgres(err) => err.could_retry(), compute::ConnectionError::TlsError(err) => err.could_retry(), compute::ConnectionError::WakeComputeError(err) => err.could_retry(), compute::ConnectionError::TooManyConnectionAttempts(_) => false, @@ -109,7 +108,6 @@ impl CouldRetry for compute::ConnectionError { impl ShouldRetryWakeCompute for compute::ConnectionError { fn should_retry_wake_compute(&self) -> bool { match self { - compute::ConnectionError::Postgres(err) => err.should_retry_wake_compute(), // the cache entry was not checked for validity compute::ConnectionError::TooManyConnectionAttempts(_) => false, _ => true, diff --git a/proxy/src/proxy/tests/mitm.rs b/proxy/src/proxy/tests/mitm.rs index c92ee49b8d..67dd0ab522 100644 --- a/proxy/src/proxy/tests/mitm.rs +++ b/proxy/src/proxy/tests/mitm.rs @@ -169,7 +169,7 @@ async fn scram_auth_disable_channel_binding() -> anyhow::Result<()> { .dbname("db") .password("password") .ssl_mode(SslMode::Require) - .connect_raw(server, client_config.make_tls_connect()?) + .tls_and_authenticate(server, client_config.make_tls_connect()?) .await?; proxy.await? @@ -252,7 +252,7 @@ async fn connect_failure( .dbname("db") .password("password") .ssl_mode(SslMode::Require) - .connect_raw(server, client_config.make_tls_connect()?) + .tls_and_authenticate(server, client_config.make_tls_connect()?) .await .err() .context("client shouldn't be able to connect")?; diff --git a/proxy/src/proxy/tests/mod.rs b/proxy/src/proxy/tests/mod.rs index 12de5cbc09..29a269208a 100644 --- a/proxy/src/proxy/tests/mod.rs +++ b/proxy/src/proxy/tests/mod.rs @@ -199,7 +199,7 @@ async fn handshake_tls_is_enforced_by_proxy() -> anyhow::Result<()> { .user("john_doe") .dbname("earth") .ssl_mode(SslMode::Disable) - .connect_raw(server, NoTls) + .tls_and_authenticate(server, NoTls) .await .err() // -> Option .context("client shouldn't be able to connect")?; @@ -228,7 +228,7 @@ async fn handshake_tls() -> anyhow::Result<()> { .user("john_doe") .dbname("earth") .ssl_mode(SslMode::Require) - .connect_raw(server, client_config.make_tls_connect()?) + .tls_and_authenticate(server, client_config.make_tls_connect()?) .await?; proxy.await? @@ -245,7 +245,7 @@ async fn handshake_raw() -> anyhow::Result<()> { .dbname("earth") .set_param("options", "project=generic-project-name") .ssl_mode(SslMode::Prefer) - .connect_raw(server, NoTls) + .tls_and_authenticate(server, NoTls) .await?; proxy.await? @@ -293,7 +293,7 @@ async fn scram_auth_good(#[case] password: &str) -> anyhow::Result<()> { .dbname("db") .password(password) .ssl_mode(SslMode::Require) - .connect_raw(server, client_config.make_tls_connect()?) + .tls_and_authenticate(server, client_config.make_tls_connect()?) .await?; proxy.await? @@ -317,7 +317,7 @@ async fn scram_auth_disable_channel_binding() -> anyhow::Result<()> { .dbname("db") .password("password") .ssl_mode(SslMode::Require) - .connect_raw(server, client_config.make_tls_connect()?) + .tls_and_authenticate(server, client_config.make_tls_connect()?) .await?; proxy.await? @@ -344,7 +344,7 @@ async fn scram_auth_mock() -> anyhow::Result<()> { .dbname("db") .password(&password) // no password will match the mocked secret .ssl_mode(SslMode::Require) - .connect_raw(server, client_config.make_tls_connect()?) + .tls_and_authenticate(server, client_config.make_tls_connect()?) .await .err() // -> Option .context("client shouldn't be able to connect")?; diff --git a/proxy/src/redis/keys.rs b/proxy/src/redis/keys.rs index 3113bad949..ffb7bc876b 100644 --- a/proxy/src/redis/keys.rs +++ b/proxy/src/redis/keys.rs @@ -1,8 +1,4 @@ -use std::io::ErrorKind; - -use anyhow::Ok; - -use crate::pqproto::{CancelKeyData, id_to_cancel_key}; +use crate::pqproto::CancelKeyData; pub mod keyspace { pub const CANCEL_PREFIX: &str = "cancel"; @@ -23,40 +19,12 @@ impl KeyPrefix { } } } - - #[allow(dead_code)] - pub(crate) fn as_str(&self) -> &'static str { - match self { - KeyPrefix::Cancel(_) => keyspace::CANCEL_PREFIX, - } - } -} - -#[allow(dead_code)] -pub(crate) fn parse_redis_key(key: &str) -> anyhow::Result { - let (prefix, key_str) = key.split_once(':').ok_or_else(|| { - anyhow::anyhow!(std::io::Error::new( - ErrorKind::InvalidData, - "missing prefix" - )) - })?; - - match prefix { - keyspace::CANCEL_PREFIX => { - let id = u64::from_str_radix(key_str, 16)?; - - Ok(KeyPrefix::Cancel(id_to_cancel_key(id))) - } - _ => Err(anyhow::anyhow!(std::io::Error::new( - ErrorKind::InvalidData, - "unknown prefix" - ))), - } } #[cfg(test)] mod tests { use super::*; + use crate::pqproto::id_to_cancel_key; #[test] fn test_build_redis_key() { @@ -65,16 +33,4 @@ mod tests { let redis_key = cancel_key.build_redis_key(); assert_eq!(redis_key, "cancel:30390000d431"); } - - #[test] - fn test_parse_redis_key() { - let redis_key = "cancel:30390000d431"; - let key: KeyPrefix = parse_redis_key(redis_key).expect("Failed to parse key"); - - let ref_key = id_to_cancel_key(12345 << 32 | 54321); - - assert_eq!(key.as_str(), KeyPrefix::Cancel(ref_key).as_str()); - let KeyPrefix::Cancel(cancel_key) = key; - assert_eq!(ref_key, cancel_key); - } } diff --git a/proxy/src/redis/kv_ops.rs b/proxy/src/redis/kv_ops.rs index f71730c533..f8d3b5cc66 100644 --- a/proxy/src/redis/kv_ops.rs +++ b/proxy/src/redis/kv_ops.rs @@ -1,3 +1,6 @@ +use std::time::Duration; + +use futures::FutureExt; use redis::aio::ConnectionLike; use redis::{Cmd, FromRedisValue, Pipeline, RedisResult}; @@ -35,14 +38,11 @@ impl RedisKVClient { } pub async fn try_connect(&mut self) -> anyhow::Result<()> { - match self.client.connect().await { - Ok(()) => {} - Err(e) => { - tracing::error!("failed to connect to redis: {e}"); - return Err(e); - } - } - Ok(()) + self.client + .connect() + .boxed() + .await + .inspect_err(|e| tracing::error!("failed to connect to redis: {e}")) } pub(crate) async fn query( @@ -54,15 +54,25 @@ impl RedisKVClient { return Err(anyhow::anyhow!("Rate limit exceeded")); } - match q.query(&mut self.client).await { + let e = match q.query(&mut self.client).await { Ok(t) => return Ok(t), - Err(e) => { - tracing::error!("failed to run query: {e}"); + Err(e) => e, + }; + + tracing::error!("failed to run query: {e}"); + match e.retry_method() { + redis::RetryMethod::Reconnect => { + tracing::info!("Redis client is disconnected. Reconnecting..."); + self.try_connect().await?; } + redis::RetryMethod::RetryImmediately => {} + redis::RetryMethod::WaitAndRetry => { + // somewhat arbitrary. + tokio::time::sleep(Duration::from_millis(100)).await; + } + _ => Err(e)?, } - tracing::info!("Redis client is disconnected. Reconnecting..."); - self.try_connect().await?; Ok(q.query(&mut self.client).await?) } } diff --git a/proxy/src/serverless/websocket.rs b/proxy/src/serverless/websocket.rs index 8648a94869..0d374e6df2 100644 --- a/proxy/src/serverless/websocket.rs +++ b/proxy/src/serverless/websocket.rs @@ -167,7 +167,7 @@ pub(crate) async fn serve_websocket( Ok(Some(p)) => { ctx.set_success(); ctx.log_connect(); - match p.proxy_pass(&config.connect_to_compute).await { + match p.proxy_pass().await { Ok(()) => Ok(()), Err(ErrorSource::Client(err)) => Err(err).context("client"), Err(ErrorSource::Compute(err)) => Err(err).context("compute"), diff --git a/proxy/src/usage_metrics.rs b/proxy/src/usage_metrics.rs index 115b958c54..c82c4865a7 100644 --- a/proxy/src/usage_metrics.rs +++ b/proxy/src/usage_metrics.rs @@ -399,7 +399,7 @@ async fn collect_metrics_iteration( fn create_remote_path_prefix(now: DateTime) -> String { format!( - "year={year:04}/month={month:02}/day={day:02}/{hour:02}:{minute:02}:{second:02}Z", + "year={year:04}/month={month:02}/day={day:02}/hour={hour:02}/{hour:02}:{minute:02}:{second:02}Z", year = now.year(), month = now.month(), day = now.day(), @@ -461,7 +461,7 @@ async fn upload_backup_events( real_now.second().into(), real_now.nanosecond(), )); - let path = format!("{path_prefix}_{id}.json.gz"); + let path = format!("{path_prefix}_{id}.ndjson.gz"); let remote_path = match RemotePath::from_string(&path) { Ok(remote_path) => remote_path, Err(e) => { @@ -471,9 +471,12 @@ async fn upload_backup_events( // TODO: This is async compression from Vec to Vec. Rewrite as byte stream. // Use sync compression in blocking threadpool. - let data = serde_json::to_vec(chunk).context("serialize metrics")?; let mut encoder = GzipEncoder::new(Vec::new()); - encoder.write_all(&data).await.context("compress metrics")?; + for event in chunk.events.iter() { + let data = serde_json::to_vec(event).context("serialize metrics")?; + encoder.write_all(&data).await.context("compress metrics")?; + encoder.write_all(b"\n").await.context("compress metrics")?; + } encoder.shutdown().await.context("compress metrics")?; let compressed_data: Bytes = encoder.get_ref().clone().into(); backoff::retry( @@ -499,7 +502,7 @@ async fn upload_backup_events( #[cfg(test)] mod tests { use std::fs; - use std::io::BufReader; + use std::io::{BufRead, BufReader}; use std::sync::{Arc, Mutex}; use anyhow::Error; @@ -673,11 +676,22 @@ mod tests { { let path = local_fs_path.join(&path_prefix).to_string(); if entry.path().to_str().unwrap().starts_with(&path) { - let chunk = serde_json::from_reader(flate2::bufread::GzDecoder::new( - BufReader::new(fs::File::open(entry.into_path()).unwrap()), - )) - .unwrap(); - stored_chunks.push(chunk); + let file = fs::File::open(entry.into_path()).unwrap(); + let decoder = flate2::bufread::GzDecoder::new(BufReader::new(file)); + let reader = BufReader::new(decoder); + + let mut events: Vec> = Vec::new(); + for line in reader.lines() { + let line = line.unwrap(); + let event: Event = serde_json::from_str(&line).unwrap(); + events.push(event); + } + + let report = Report { + events: Cow::Owned(events), + }; + + stored_chunks.push(report); } } storage_test_dir.close().ok(); diff --git a/safekeeper/Cargo.toml b/safekeeper/Cargo.toml index 0a8cc415be..6955028c73 100644 --- a/safekeeper/Cargo.toml +++ b/safekeeper/Cargo.toml @@ -58,6 +58,7 @@ metrics.workspace = true pem.workspace = true postgres_backend.workspace = true postgres_ffi.workspace = true +postgres_versioninfo.workspace = true pq_proto.workspace = true remote_storage.workspace = true safekeeper_api.workspace = true diff --git a/safekeeper/src/control_file.rs b/safekeeper/src/control_file.rs index 1bf3e4cac1..4fc62fb229 100644 --- a/safekeeper/src/control_file.rs +++ b/safekeeper/src/control_file.rs @@ -206,16 +206,10 @@ impl Storage for FileStorage { let buf: Vec = s.write_to_buf()?; control_partial.write_all(&buf).await.with_context(|| { - format!( - "failed to write safekeeper state into control file at: {}", - control_partial_path - ) + format!("failed to write safekeeper state into control file at: {control_partial_path}") })?; control_partial.flush().await.with_context(|| { - format!( - "failed to flush safekeeper state into control file at: {}", - control_partial_path - ) + format!("failed to flush safekeeper state into control file at: {control_partial_path}") })?; let control_path = self.timeline_dir.join(CONTROL_FILE_NAME); diff --git a/safekeeper/src/control_file_upgrade.rs b/safekeeper/src/control_file_upgrade.rs index 1ad9e62f9b..555cbe457b 100644 --- a/safekeeper/src/control_file_upgrade.rs +++ b/safekeeper/src/control_file_upgrade.rs @@ -2,6 +2,7 @@ use std::vec; use anyhow::{Result, bail}; +use postgres_versioninfo::PgVersionId; use pq_proto::SystemId; use safekeeper_api::membership::{Configuration, INVALID_GENERATION}; use safekeeper_api::{ServerInfo, Term}; @@ -46,7 +47,7 @@ struct SafeKeeperStateV1 { #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] pub struct ServerInfoV2 { /// Postgres server version - pub pg_version: u32, + pub pg_version: PgVersionId, pub system_id: SystemId, pub tenant_id: TenantId, pub timeline_id: TimelineId, @@ -75,7 +76,7 @@ pub struct SafeKeeperStateV2 { #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] pub struct ServerInfoV3 { /// Postgres server version - pub pg_version: u32, + pub pg_version: PgVersionId, pub system_id: SystemId, #[serde(with = "hex")] pub tenant_id: TenantId, @@ -444,13 +445,13 @@ pub fn upgrade_control_file(buf: &[u8], version: u32) -> Result TimelinePersisten mod tests { use std::str::FromStr; + use postgres_versioninfo::PgMajorVersion; use utils::Hex; use utils::id::NodeId; @@ -563,7 +565,7 @@ mod tests { epoch: 43, }, server: ServerInfoV2 { - pg_version: 14, + pg_version: PgVersionId::from(PgMajorVersion::PG14), system_id: 0x1234567887654321, tenant_id, timeline_id, @@ -586,8 +588,8 @@ mod tests { 0x2a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // epoch 0x2b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - // pg_version - 0x0e, 0x00, 0x00, 0x00, + // pg_version = 140000 + 0xE0, 0x22, 0x02, 0x00, // system_id 0x21, 0x43, 0x65, 0x87, 0x78, 0x56, 0x34, 0x12, // tenant_id @@ -626,7 +628,7 @@ mod tests { }]), }, server: ServerInfoV2 { - pg_version: 14, + pg_version: PgVersionId::from(PgMajorVersion::PG14), system_id: 0x1234567887654321, tenant_id, timeline_id, @@ -646,7 +648,7 @@ mod tests { let expected = [ 0x2a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x0e, 0x00, 0x00, 0x00, 0x21, 0x43, 0x65, 0x87, 0x78, 0x56, + 0x00, 0x00, 0x00, 0x00, 0xE0, 0x22, 0x02, 0x00, 0x21, 0x43, 0x65, 0x87, 0x78, 0x56, 0x34, 0x12, 0xcf, 0x04, 0x80, 0x92, 0x97, 0x07, 0xee, 0x75, 0x37, 0x23, 0x37, 0xef, 0xaa, 0x5e, 0xcf, 0x96, 0x11, 0x2d, 0xed, 0x66, 0x42, 0x2a, 0xa5, 0xe9, 0x53, 0xe5, 0x44, 0x0f, 0xa5, 0x42, 0x7a, 0xc4, 0x78, 0x56, 0x34, 0x12, 0xc4, 0x7a, 0x42, 0xa5, @@ -675,7 +677,7 @@ mod tests { }]), }, server: ServerInfoV3 { - pg_version: 14, + pg_version: PgVersionId::from(PgMajorVersion::PG14), system_id: 0x1234567887654321, tenant_id, timeline_id, @@ -695,7 +697,7 @@ mod tests { let expected = [ 0x2a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x0e, 0x00, 0x00, 0x00, 0x21, 0x43, 0x65, 0x87, 0x78, 0x56, + 0x00, 0x00, 0x00, 0x00, 0xE0, 0x22, 0x02, 0x00, 0x21, 0x43, 0x65, 0x87, 0x78, 0x56, 0x34, 0x12, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x63, 0x66, 0x30, 0x34, 0x38, 0x30, 0x39, 0x32, 0x39, 0x37, 0x30, 0x37, 0x65, 0x65, 0x37, 0x35, 0x33, 0x37, 0x32, 0x33, 0x33, 0x37, 0x65, 0x66, 0x61, 0x61, 0x35, 0x65, 0x63, 0x66, 0x39, 0x36, @@ -731,7 +733,7 @@ mod tests { }]), }, server: ServerInfo { - pg_version: 14, + pg_version: PgVersionId::from(PgMajorVersion::PG14), system_id: 0x1234567887654321, wal_seg_size: 0x12345678, }, @@ -765,7 +767,7 @@ mod tests { 0x30, 0x66, 0x61, 0x35, 0x34, 0x32, 0x37, 0x61, 0x63, 0x34, 0x2a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x0e, 0x00, 0x00, 0x00, 0x21, 0x43, 0x65, 0x87, 0x78, 0x56, 0x34, 0x12, 0x78, 0x56, + 0xE0, 0x22, 0x02, 0x00, 0x21, 0x43, 0x65, 0x87, 0x78, 0x56, 0x34, 0x12, 0x78, 0x56, 0x34, 0x12, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x63, 0x34, 0x37, 0x61, 0x34, 0x32, 0x61, 0x35, 0x30, 0x66, 0x34, 0x34, 0x65, 0x35, 0x35, 0x33, 0x65, 0x39, 0x61, 0x35, 0x32, 0x61, 0x34, 0x32, 0x36, 0x36, 0x65, 0x64, 0x32, 0x64, 0x31, 0x31, diff --git a/safekeeper/src/handler.rs b/safekeeper/src/handler.rs index b54bee8bfb..5e7f1d8758 100644 --- a/safekeeper/src/handler.rs +++ b/safekeeper/src/handler.rs @@ -73,7 +73,7 @@ fn parse_cmd(cmd: &str) -> anyhow::Result { let re = Regex::new(r"START_WAL_PUSH(\s+?\((.*)\))?").unwrap(); let caps = re .captures(cmd) - .context(format!("failed to parse START_WAL_PUSH command {}", cmd))?; + .context(format!("failed to parse START_WAL_PUSH command {cmd}"))?; // capture () content let options = caps.get(2).map(|m| m.as_str()).unwrap_or(""); // default values @@ -85,24 +85,20 @@ fn parse_cmd(cmd: &str) -> anyhow::Result { } let mut kvit = kvstr.split_whitespace(); let key = kvit.next().context(format!( - "failed to parse key in kv {} in command {}", - kvstr, cmd + "failed to parse key in kv {kvstr} in command {cmd}" ))?; let value = kvit.next().context(format!( - "failed to parse value in kv {} in command {}", - kvstr, cmd + "failed to parse value in kv {kvstr} in command {cmd}" ))?; let value_trimmed = value.trim_matches('\''); if key == "proto_version" { proto_version = value_trimmed.parse::().context(format!( - "failed to parse proto_version value {} in command {}", - value, cmd + "failed to parse proto_version value {value} in command {cmd}" ))?; } if key == "allow_timeline_creation" { allow_timeline_creation = value_trimmed.parse::().context(format!( - "failed to parse allow_timeline_creation value {} in command {}", - value, cmd + "failed to parse allow_timeline_creation value {value} in command {cmd}" ))?; } } @@ -118,7 +114,7 @@ fn parse_cmd(cmd: &str) -> anyhow::Result { .unwrap(); let caps = re .captures(cmd) - .context(format!("failed to parse START_REPLICATION command {}", cmd))?; + .context(format!("failed to parse START_REPLICATION command {cmd}"))?; let start_lsn = Lsn::from_str(&caps[1]).context("parse start LSN from START_REPLICATION command")?; let term = if let Some(m) = caps.get(2) { diff --git a/safekeeper/src/safekeeper.rs b/safekeeper/src/safekeeper.rs index 886cac869d..4d15fc9de3 100644 --- a/safekeeper/src/safekeeper.rs +++ b/safekeeper/src/safekeeper.rs @@ -9,6 +9,7 @@ use anyhow::{Context, Result, bail}; use byteorder::{LittleEndian, ReadBytesExt}; use bytes::{Buf, BufMut, Bytes, BytesMut}; use postgres_ffi::{MAX_SEND_SIZE, TimeLineID}; +use postgres_versioninfo::{PgMajorVersion, PgVersionId}; use pq_proto::SystemId; use safekeeper_api::membership::{ INVALID_GENERATION, MemberSet, SafekeeperGeneration as Generation, SafekeeperId, @@ -29,7 +30,7 @@ use crate::{control_file, wal_storage}; pub const SK_PROTO_VERSION_2: u32 = 2; pub const SK_PROTO_VERSION_3: u32 = 3; -pub const UNKNOWN_SERVER_VERSION: u32 = 0; +pub const UNKNOWN_SERVER_VERSION: PgVersionId = PgVersionId::UNKNOWN; #[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)] pub struct TermLsn { @@ -64,10 +65,10 @@ impl TermHistory { for i in 0..n_entries { let term = bytes .get_u64_f() - .with_context(|| format!("TermHistory pos {} misses term", i))?; + .with_context(|| format!("TermHistory pos {i} misses term"))?; let lsn = bytes .get_u64_f() - .with_context(|| format!("TermHistory pos {} misses lsn", i))? + .with_context(|| format!("TermHistory pos {i} misses lsn"))? .into(); res.push(TermLsn { term, lsn }) } @@ -121,9 +122,7 @@ impl TermHistory { if let Some(sk_th_last) = sk_th.last() { assert!( sk_th_last.lsn <= sk_wal_end, - "safekeeper term history end {:?} LSN is higher than WAL end {:?}", - sk_th_last, - sk_wal_end + "safekeeper term history end {sk_th_last:?} LSN is higher than WAL end {sk_wal_end:?}" ); } @@ -220,7 +219,7 @@ pub struct ProposerGreeting { pub timeline_id: TimelineId, pub mconf: membership::Configuration, /// Postgres server version - pub pg_version: u32, + pub pg_version: PgVersionId, pub system_id: SystemId, pub wal_seg_size: u32, } @@ -231,7 +230,7 @@ pub struct ProposerGreetingV2 { /// proposer-acceptor protocol version pub protocol_version: u32, /// Postgres server version - pub pg_version: u32, + pub pg_version: PgVersionId, pub proposer_id: PgUuid, pub system_id: SystemId, pub timeline_id: TimelineId, @@ -438,11 +437,11 @@ impl ProposerAcceptorMessage { for i in 0..members_len { let id = buf .get_u64_f() - .with_context(|| format!("reading member {} node_id", i))?; - let host = Self::get_cstr(buf).with_context(|| format!("reading member {} host", i))?; + .with_context(|| format!("reading member {i} node_id"))?; + let host = Self::get_cstr(buf).with_context(|| format!("reading member {i} host"))?; let pg_port = buf .get_u16_f() - .with_context(|| format!("reading member {} port", i))?; + .with_context(|| format!("reading member {i} port"))?; let sk = SafekeeperId { id: NodeId(id), host, @@ -463,12 +462,12 @@ impl ProposerAcceptorMessage { for i in 0..new_members_len { let id = buf .get_u64_f() - .with_context(|| format!("reading new member {} node_id", i))?; - let host = Self::get_cstr(buf) - .with_context(|| format!("reading new member {} host", i))?; + .with_context(|| format!("reading new member {i} node_id"))?; + let host = + Self::get_cstr(buf).with_context(|| format!("reading new member {i} host"))?; let pg_port = buf .get_u16_f() - .with_context(|| format!("reading new member {} port", i))?; + .with_context(|| format!("reading new member {i} port"))?; let sk = SafekeeperId { id: NodeId(id), host, @@ -513,7 +512,7 @@ impl ProposerAcceptorMessage { tenant_id, timeline_id, mconf, - pg_version, + pg_version: PgVersionId::from_full_pg_version(pg_version), system_id, wal_seg_size, }; @@ -963,7 +962,8 @@ where * because safekeepers parse WAL headers and the format * may change between versions. */ - if msg.pg_version / 10000 != self.state.server.pg_version / 10000 + if PgMajorVersion::try_from(msg.pg_version)? + != PgMajorVersion::try_from(self.state.server.pg_version)? && self.state.server.pg_version != UNKNOWN_SERVER_VERSION { bail!( @@ -1508,7 +1508,7 @@ mod tests { let mut vote_resp = sk.process_msg(&vote_request).await; match vote_resp.unwrap() { Some(AcceptorProposerMessage::VoteResponse(resp)) => assert!(resp.vote_given), - r => panic!("unexpected response: {:?}", r), + r => panic!("unexpected response: {r:?}"), } // reboot... @@ -1523,7 +1523,7 @@ mod tests { vote_resp = sk.process_msg(&vote_request).await; match vote_resp.unwrap() { Some(AcceptorProposerMessage::VoteResponse(resp)) => assert!(!resp.vote_given), - r => panic!("unexpected response: {:?}", r), + r => panic!("unexpected response: {r:?}"), } } @@ -1750,7 +1750,7 @@ mod tests { }]), }, server: ServerInfo { - pg_version: 14, + pg_version: PgVersionId::from_full_pg_version(140000), system_id: 0x1234567887654321, wal_seg_size: 0x12345678, }, diff --git a/safekeeper/src/send_interpreted_wal.rs b/safekeeper/src/send_interpreted_wal.rs index 2b1fd7b854..2192f5eab4 100644 --- a/safekeeper/src/send_interpreted_wal.rs +++ b/safekeeper/src/send_interpreted_wal.rs @@ -8,8 +8,8 @@ use futures::StreamExt; use futures::future::Either; use pageserver_api::shard::ShardIdentity; use postgres_backend::{CopyStreamHandlerEnd, PostgresBackend}; -use postgres_ffi::get_current_timestamp; use postgres_ffi::waldecoder::{WalDecodeError, WalStreamDecoder}; +use postgres_ffi::{PgMajorVersion, get_current_timestamp}; use pq_proto::{BeMessage, InterpretedWalRecordsBody, WalSndKeepAlive}; use tokio::io::{AsyncRead, AsyncWrite}; use tokio::sync::mpsc::error::SendError; @@ -78,7 +78,7 @@ pub(crate) struct InterpretedWalReader { shard_senders: HashMap>, shard_notification_rx: Option>, state: Arc>, - pg_version: u32, + pg_version: PgMajorVersion, } /// A handle for [`InterpretedWalReader`] which allows for interacting with it @@ -258,7 +258,7 @@ impl InterpretedWalReader { start_pos: Lsn, tx: tokio::sync::mpsc::Sender, shard: ShardIdentity, - pg_version: u32, + pg_version: PgMajorVersion, appname: &Option, ) -> InterpretedWalReaderHandle { let state = Arc::new(std::sync::RwLock::new(InterpretedWalReaderState::Running { @@ -322,7 +322,7 @@ impl InterpretedWalReader { start_pos: Lsn, tx: tokio::sync::mpsc::Sender, shard: ShardIdentity, - pg_version: u32, + pg_version: PgMajorVersion, shard_notification_rx: Option< tokio::sync::mpsc::UnboundedReceiver, >, @@ -718,7 +718,7 @@ mod tests { use std::time::Duration; use pageserver_api::shard::{ShardIdentity, ShardStripeSize}; - use postgres_ffi::MAX_SEND_SIZE; + use postgres_ffi::{MAX_SEND_SIZE, PgMajorVersion}; use tokio::sync::mpsc::error::TryRecvError; use utils::id::{NodeId, TenantTimelineId}; use utils::lsn::Lsn; @@ -734,7 +734,7 @@ mod tests { const SIZE: usize = 8 * 1024; const MSG_COUNT: usize = 200; - const PG_VERSION: u32 = 17; + const PG_VERSION: PgMajorVersion = PgMajorVersion::PG17; const SHARD_COUNT: u8 = 2; let start_lsn = Lsn::from_str("0/149FD18").unwrap(); @@ -876,7 +876,7 @@ mod tests { const SIZE: usize = 8 * 1024; const MSG_COUNT: usize = 200; - const PG_VERSION: u32 = 17; + const PG_VERSION: PgMajorVersion = PgMajorVersion::PG17; const SHARD_COUNT: u8 = 2; let start_lsn = Lsn::from_str("0/149FD18").unwrap(); @@ -1025,7 +1025,7 @@ mod tests { const SIZE: usize = 64 * 1024; const MSG_COUNT: usize = 10; - const PG_VERSION: u32 = 17; + const PG_VERSION: PgMajorVersion = PgMajorVersion::PG17; const SHARD_COUNT: u8 = 2; const WAL_READER_BATCH_SIZE: usize = 8192; @@ -1148,7 +1148,7 @@ mod tests { const SIZE: usize = 8 * 1024; const MSG_COUNT: usize = 10; - const PG_VERSION: u32 = 17; + const PG_VERSION: PgMajorVersion = PgMajorVersion::PG17; let start_lsn = Lsn::from_str("0/149FD18").unwrap(); let env = Env::new(true).unwrap(); diff --git a/safekeeper/src/send_wal.rs b/safekeeper/src/send_wal.rs index 05f827494e..177e759db5 100644 --- a/safekeeper/src/send_wal.rs +++ b/safekeeper/src/send_wal.rs @@ -12,7 +12,7 @@ use futures::FutureExt; use itertools::Itertools; use parking_lot::Mutex; use postgres_backend::{CopyStreamHandlerEnd, PostgresBackend, PostgresBackendReader, QueryError}; -use postgres_ffi::{MAX_SEND_SIZE, TimestampTz, get_current_timestamp}; +use postgres_ffi::{MAX_SEND_SIZE, PgMajorVersion, TimestampTz, get_current_timestamp}; use pq_proto::{BeMessage, WalSndKeepAlive, XLogDataBody}; use safekeeper_api::Term; use safekeeper_api::models::{ @@ -559,7 +559,9 @@ impl SafekeeperPostgresHandler { format, compression, } => { - let pg_version = tli.tli.get_state().await.1.server.pg_version / 10000; + let pg_version = + PgMajorVersion::try_from(tli.tli.get_state().await.1.server.pg_version) + .unwrap(); let end_watch_view = end_watch.view(); let wal_residence_guard = tli.wal_residence_guard().await?; let (tx, rx) = tokio::sync::mpsc::channel::(2); diff --git a/safekeeper/src/state.rs b/safekeeper/src/state.rs index 7533005c35..b6cf73be2e 100644 --- a/safekeeper/src/state.rs +++ b/safekeeper/src/state.rs @@ -7,6 +7,7 @@ use std::time::SystemTime; use anyhow::{Result, bail}; use postgres_ffi::WAL_SEGMENT_SIZE; +use postgres_versioninfo::{PgMajorVersion, PgVersionId}; use safekeeper_api::membership::Configuration; use safekeeper_api::models::{TimelineMembershipSwitchResponse, TimelineTermBumpResponse}; use safekeeper_api::{INITIAL_TERM, ServerInfo, Term}; @@ -149,8 +150,8 @@ impl TimelinePersistentState { &TenantTimelineId::empty(), Configuration::empty(), ServerInfo { - pg_version: 170000, /* Postgres server version (major * 10000) */ - system_id: 0, /* Postgres system identifier */ + pg_version: PgVersionId::from(PgMajorVersion::PG17), + system_id: 0, /* Postgres system identifier */ wal_seg_size: WAL_SEGMENT_SIZE as u32, }, Lsn::INVALID, diff --git a/safekeeper/src/timeline_eviction.rs b/safekeeper/src/timeline_eviction.rs index e817dbf6f9..47b65a579a 100644 --- a/safekeeper/src/timeline_eviction.rs +++ b/safekeeper/src/timeline_eviction.rs @@ -342,7 +342,7 @@ where let bytes_read1 = reader1 .read(&mut buffer1[..bytes_to_read]) .await - .with_context(|| format!("failed to read from reader1 at offset {}", offset))?; + .with_context(|| format!("failed to read from reader1 at offset {offset}"))?; if bytes_read1 == 0 { anyhow::bail!("unexpected EOF from reader1 at offset {}", offset); } @@ -351,10 +351,7 @@ where .read_exact(&mut buffer2[..bytes_read1]) .await .with_context(|| { - format!( - "failed to read {} bytes from reader2 at offset {}", - bytes_read1, offset - ) + format!("failed to read {bytes_read1} bytes from reader2 at offset {offset}") })?; assert!(bytes_read2 == bytes_read1); diff --git a/safekeeper/src/timeline_manager.rs b/safekeeper/src/timeline_manager.rs index 48eda92fed..a68752bfdd 100644 --- a/safekeeper/src/timeline_manager.rs +++ b/safekeeper/src/timeline_manager.rs @@ -108,7 +108,7 @@ impl std::fmt::Debug for ManagerCtlMessage { match self { ManagerCtlMessage::GuardRequest(_) => write!(f, "GuardRequest"), ManagerCtlMessage::TryGuardRequest(_) => write!(f, "TryGuardRequest"), - ManagerCtlMessage::GuardDrop(id) => write!(f, "GuardDrop({:?})", id), + ManagerCtlMessage::GuardDrop(id) => write!(f, "GuardDrop({id:?})"), ManagerCtlMessage::BackupPartialReset(_) => write!(f, "BackupPartialReset"), } } diff --git a/safekeeper/src/timelines_global_map.rs b/safekeeper/src/timelines_global_map.rs index 6e41ada1b3..a81a7298a9 100644 --- a/safekeeper/src/timelines_global_map.rs +++ b/safekeeper/src/timelines_global_map.rs @@ -147,7 +147,7 @@ impl GlobalTimelines { }; let mut tenant_count = 0; for tenants_dir_entry in std::fs::read_dir(&tenants_dir) - .with_context(|| format!("failed to list tenants dir {}", tenants_dir))? + .with_context(|| format!("failed to list tenants dir {tenants_dir}"))? { match &tenants_dir_entry { Ok(tenants_dir_entry) => { @@ -188,7 +188,7 @@ impl GlobalTimelines { let timelines_dir = get_tenant_dir(&conf, &tenant_id); for timelines_dir_entry in std::fs::read_dir(&timelines_dir) - .with_context(|| format!("failed to list timelines dir {}", timelines_dir))? + .with_context(|| format!("failed to list timelines dir {timelines_dir}"))? { match &timelines_dir_entry { Ok(timeline_dir_entry) => { diff --git a/safekeeper/src/wal_backup_partial.rs b/safekeeper/src/wal_backup_partial.rs index fe0f1b3607..cdf68262dd 100644 --- a/safekeeper/src/wal_backup_partial.rs +++ b/safekeeper/src/wal_backup_partial.rs @@ -364,8 +364,7 @@ impl PartialBackup { // there should always be zero or one uploaded segment assert!( new_segments.is_empty(), - "too many uploaded segments: {:?}", - new_segments + "too many uploaded segments: {new_segments:?}" ); } diff --git a/safekeeper/src/wal_storage.rs b/safekeeper/src/wal_storage.rs index 8ba3e7cc47..da00df2dd7 100644 --- a/safekeeper/src/wal_storage.rs +++ b/safekeeper/src/wal_storage.rs @@ -19,6 +19,7 @@ use futures::future::BoxFuture; use postgres_ffi::v14::xlog_utils::{IsPartialXLogFileName, IsXLogFileName, XLogFromFileName}; use postgres_ffi::waldecoder::WalStreamDecoder; use postgres_ffi::{PG_TLI, XLogFileName, XLogSegNo, dispatch_pgversion}; +use postgres_versioninfo::{PgMajorVersion, PgVersionId}; use pq_proto::SystemId; use remote_storage::RemotePath; use std::sync::Arc; @@ -92,7 +93,7 @@ pub struct PhysicalStorage { /// Size of WAL segment in bytes. wal_seg_size: usize, - pg_version: u32, + pg_version: PgVersionId, system_id: u64, /// Written to disk, but possibly still in the cache and not fully persisted. @@ -180,7 +181,7 @@ impl PhysicalStorage { let write_lsn = if state.commit_lsn == Lsn(0) { Lsn(0) } else { - let version = state.server.pg_version / 10000; + let version = PgMajorVersion::try_from(state.server.pg_version).unwrap(); dispatch_pgversion!( version, @@ -226,7 +227,10 @@ impl PhysicalStorage { write_record_lsn: write_lsn, flush_lsn, flush_record_lsn: flush_lsn, - decoder: WalStreamDecoder::new(write_lsn, state.server.pg_version / 10000), + decoder: WalStreamDecoder::new( + write_lsn, + PgMajorVersion::try_from(state.server.pg_version).unwrap(), + ), file: None, pending_wal_truncation: true, }) @@ -408,7 +412,7 @@ impl Storage for PhysicalStorage { let segno = init_lsn.segment_number(self.wal_seg_size); let (mut file, _) = self.open_or_create(segno).await?; - let major_pg_version = self.pg_version / 10000; + let major_pg_version = PgMajorVersion::try_from(self.pg_version).unwrap(); let wal_seg = postgres_ffi::generate_wal_segment(segno, self.system_id, major_pg_version, init_lsn)?; file.seek(SeekFrom::Start(0)).await?; @@ -654,7 +658,7 @@ pub struct WalReader { // pos is in the same segment as timeline_start_lsn. timeline_start_lsn: Lsn, // integer version number of PostgreSQL, e.g. 14; 15; 16 - pg_version: u32, + pg_version: PgMajorVersion, system_id: SystemId, timeline_start_segment: Option, } @@ -697,7 +701,7 @@ impl WalReader { wal_backup, local_start_lsn: state.local_start_lsn, timeline_start_lsn: state.timeline_start_lsn, - pg_version: state.server.pg_version / 10000, + pg_version: PgMajorVersion::try_from(state.server.pg_version).unwrap(), system_id: state.server.system_id, timeline_start_segment: None, }) @@ -841,7 +845,7 @@ pub(crate) async fn open_wal_file( // If that failed, try it without the .partial extension. let pf = tokio::fs::File::open(&wal_file_path) .await - .with_context(|| format!("failed to open WAL file {:#}", wal_file_path)) + .with_context(|| format!("failed to open WAL file {wal_file_path:#}")) .map_err(|e| { warn!("{}", e); e diff --git a/safekeeper/tests/walproposer_sim/log.rs b/safekeeper/tests/walproposer_sim/log.rs index e2ba3282ca..cecbc859e6 100644 --- a/safekeeper/tests/walproposer_sim/log.rs +++ b/safekeeper/tests/walproposer_sim/log.rs @@ -33,7 +33,7 @@ impl FormatTime for SimClock { if let Some(clock) = clock.as_ref() { let now = clock.now(); - write!(w, "[{}]", now) + write!(w, "[{now}]") } else { write!(w, "[?]") } diff --git a/safekeeper/tests/walproposer_sim/safekeeper.rs b/safekeeper/tests/walproposer_sim/safekeeper.rs index 5fb29683f2..1fdf8e4949 100644 --- a/safekeeper/tests/walproposer_sim/safekeeper.rs +++ b/safekeeper/tests/walproposer_sim/safekeeper.rs @@ -257,7 +257,7 @@ pub fn run_server(os: NodeOs, disk: Arc) -> Result<()> { let estr = e.to_string(); if !estr.contains("finished processing START_REPLICATION") { warn!("conn {:?} error: {:?}", connection_id, e); - panic!("unexpected error at safekeeper: {:#}", e); + panic!("unexpected error at safekeeper: {e:#}"); } conns.remove(&connection_id); break; diff --git a/safekeeper/tests/walproposer_sim/safekeeper_disk.rs b/safekeeper/tests/walproposer_sim/safekeeper_disk.rs index 94a849b5f0..029f8fab0a 100644 --- a/safekeeper/tests/walproposer_sim/safekeeper_disk.rs +++ b/safekeeper/tests/walproposer_sim/safekeeper_disk.rs @@ -7,8 +7,8 @@ use anyhow::Result; use bytes::{Buf, BytesMut}; use futures::future::BoxFuture; use parking_lot::Mutex; -use postgres_ffi::XLogSegNo; use postgres_ffi::waldecoder::WalStreamDecoder; +use postgres_ffi::{PgMajorVersion, XLogSegNo}; use safekeeper::metrics::WalStorageMetrics; use safekeeper::state::TimelinePersistentState; use safekeeper::{control_file, wal_storage}; @@ -142,7 +142,7 @@ impl DiskWALStorage { write_lsn, write_record_lsn: flush_lsn, flush_record_lsn: flush_lsn, - decoder: WalStreamDecoder::new(flush_lsn, 16), + decoder: WalStreamDecoder::new(flush_lsn, PgMajorVersion::PG16), unflushed_bytes: BytesMut::new(), disk, }) @@ -151,7 +151,7 @@ impl DiskWALStorage { fn find_end_of_wal(disk: Arc, start_lsn: Lsn) -> Result { let mut buf = [0; 8192]; let mut pos = start_lsn.0; - let mut decoder = WalStreamDecoder::new(start_lsn, 16); + let mut decoder = WalStreamDecoder::new(start_lsn, PgMajorVersion::PG16); let mut result = start_lsn; loop { disk.wal.lock().read(pos, &mut buf); @@ -204,7 +204,7 @@ impl wal_storage::Storage for DiskWALStorage { self.decoder.available(), startpos, ); - self.decoder = WalStreamDecoder::new(startpos, 16); + self.decoder = WalStreamDecoder::new(startpos, PgMajorVersion::PG16); } self.decoder.feed_bytes(buf); loop { @@ -242,7 +242,7 @@ impl wal_storage::Storage for DiskWALStorage { self.write_record_lsn = end_pos; self.flush_record_lsn = end_pos; self.unflushed_bytes.clear(); - self.decoder = WalStreamDecoder::new(end_pos, 16); + self.decoder = WalStreamDecoder::new(end_pos, PgMajorVersion::PG16); Ok(()) } diff --git a/safekeeper/tests/walproposer_sim/simulation.rs b/safekeeper/tests/walproposer_sim/simulation.rs index 70fecfbe22..edd3bf2d9e 100644 --- a/safekeeper/tests/walproposer_sim/simulation.rs +++ b/safekeeper/tests/walproposer_sim/simulation.rs @@ -217,7 +217,7 @@ impl TestConfig { ]; let server_ids = [servers[0].id, servers[1].id, servers[2].id]; - let safekeepers_addrs = server_ids.map(|id| format!("node:{}", id)).to_vec(); + let safekeepers_addrs = server_ids.map(|id| format!("node:{id}")).to_vec(); let ttid = TenantTimelineId::generate(); diff --git a/safekeeper/tests/walproposer_sim/walproposer_api.rs b/safekeeper/tests/walproposer_sim/walproposer_api.rs index c2604c4bdc..29b361db7e 100644 --- a/safekeeper/tests/walproposer_sim/walproposer_api.rs +++ b/safekeeper/tests/walproposer_sim/walproposer_api.rs @@ -523,7 +523,7 @@ impl ApiImpl for SimulationApi { // Voting bug when safekeeper disconnects after voting executor::exit(1, msg.to_owned()); } - panic!("unknown FATAL error from walproposer: {}", msg); + panic!("unknown FATAL error from walproposer: {msg}"); } } @@ -544,10 +544,7 @@ impl ApiImpl for SimulationApi { } } - let msg = format!( - "prop_elected;{};{};{};{}", - prop_lsn, prop_term, prev_lsn, prev_term - ); + let msg = format!("prop_elected;{prop_lsn};{prop_term};{prev_lsn};{prev_term}"); debug!(msg); self.os.log_event(msg); diff --git a/scripts/ingest_perf_test_result.py b/scripts/ingest_perf_test_result.py index 804f8a3cde..898e1ee954 100644 --- a/scripts/ingest_perf_test_result.py +++ b/scripts/ingest_perf_test_result.py @@ -26,7 +26,7 @@ CREATE TABLE IF NOT EXISTS perf_test_results ( metric_unit VARCHAR(10), metric_report_type TEXT, recorded_at_timestamp TIMESTAMP WITH TIME ZONE DEFAULT NOW(), - labels JSONB with default '{}' + labels JSONB DEFAULT '{}'::jsonb ) """ diff --git a/scripts/proxy_bench_results_ingest.py b/scripts/proxy_bench_results_ingest.py new file mode 100644 index 0000000000..475d053ed2 --- /dev/null +++ b/scripts/proxy_bench_results_ingest.py @@ -0,0 +1,187 @@ +#!/usr/bin/env python3 + +import argparse +import json +import time +from typing import Any, TypedDict, cast + +import requests + +PROMETHEUS_URL = "http://localhost:9090" +SAMPLE_INTERVAL = 1 # seconds + +DEFAULT_REVISION = "unknown" +DEFAULT_PLATFORM = "unknown" +DEFAULT_SUIT = "proxy_bench" + + +class MetricConfig(TypedDict, total=False): + name: str + promql: str + unit: str + report: str + labels: dict[str, str] + is_vector: bool + label_field: str + + +METRICS: list[MetricConfig] = [ + { + "name": "latency_p99", + "promql": 'histogram_quantile(0.99, sum(rate(proxy_compute_connection_latency_seconds_bucket{outcome="success", excluded="client_and_cplane"}[5m])) by (le))', + "unit": "s", + "report": "LOWER_IS_BETTER", + "labels": {}, + }, + { + "name": "error_rate", + "promql": 'sum(rate(proxy_errors_total{type!~"user|clientdisconnect|quota"}[5m])) / sum(rate(proxy_accepted_connections_total[5m]))', + "unit": "", + "report": "LOWER_IS_BETTER", + "labels": {}, + }, + { + "name": "max_memory_kb", + "promql": "max(libmetrics_maxrss_kb)", + "unit": "kB", + "report": "LOWER_IS_BETTER", + "labels": {}, + }, + { + "name": "jemalloc_active_bytes", + "promql": "sum(jemalloc_active_bytes)", + "unit": "bytes", + "report": "LOWER_IS_BETTER", + "labels": {}, + }, + { + "name": "open_connections", + "promql": "sum by (protocol) (proxy_opened_client_connections_total - proxy_closed_client_connections_total)", + "unit": "", + "report": "HIGHER_IS_BETTER", + "labels": {}, + "is_vector": True, + "label_field": "protocol", + }, +] + + +class PrometheusMetric(TypedDict): + metric: dict[str, str] + value: list[str | float] + + +class PrometheusResult(TypedDict): + result: list[PrometheusMetric] + + +class PrometheusResponse(TypedDict): + data: PrometheusResult + + +def query_prometheus(promql: str) -> PrometheusResponse: + resp = requests.get(f"{PROMETHEUS_URL}/api/v1/query", params={"query": promql}) + resp.raise_for_status() + return cast("PrometheusResponse", resp.json()) + + +def extract_scalar_metric(result_json: PrometheusResponse) -> float | None: + try: + return float(result_json["data"]["result"][0]["value"][1]) + except (IndexError, KeyError, ValueError, TypeError): + return None + + +def extract_vector_metric( + result_json: PrometheusResponse, label_field: str +) -> list[tuple[str | None, float, dict[str, str]]]: + out: list[tuple[str | None, float, dict[str, str]]] = [] + for entry in result_json["data"]["result"]: + try: + value_str = entry["value"][1] + if not isinstance(value_str, (str | float)): + continue + value = float(value_str) + except (IndexError, KeyError, ValueError, TypeError): + continue + labels = entry.get("metric", {}) + label_val = labels.get(label_field, None) + out.append((label_val, value, labels)) + return out + + +def main() -> None: + parser = argparse.ArgumentParser( + description="Collect Prometheus metrics and output in benchmark fixture format" + ) + parser.add_argument("--revision", default=DEFAULT_REVISION) + parser.add_argument("--platform", default=DEFAULT_PLATFORM) + parser.add_argument("--suit", default=DEFAULT_SUIT) + parser.add_argument("--out", default="metrics_benchmarks.json", help="Output JSON file") + parser.add_argument( + "--interval", default=SAMPLE_INTERVAL, type=int, help="Sampling interval (s)" + ) + args = parser.parse_args() + + start_time = int(time.time()) + samples: list[dict[str, Any]] = [] + + print("Collecting metrics (Ctrl+C to stop)...") + try: + while True: + ts = int(time.time()) + for metric in METRICS: + if metric.get("is_vector", False): + # Vector (per-label, e.g. per-protocol) + for label_val, value, labels in extract_vector_metric( + query_prometheus(metric["promql"]), metric["label_field"] + ): + entry = { + "name": f"{metric['name']}.{label_val}" + if label_val + else metric["name"], + "value": value, + "unit": metric["unit"], + "report": metric["report"], + "labels": {**metric.get("labels", {}), **labels}, + "timestamp": ts, + } + samples.append(entry) + else: + result = extract_scalar_metric(query_prometheus(metric["promql"])) + if result is not None: + entry = { + "name": metric["name"], + "value": result, + "unit": metric["unit"], + "report": metric["report"], + "labels": metric.get("labels", {}), + "timestamp": ts, + } + samples.append(entry) + time.sleep(args.interval) + except KeyboardInterrupt: + print("Collection stopped.") + + total_duration = int(time.time()) - start_time + + # Compose output + out = { + "revision": args.revision, + "platform": args.platform, + "result": [ + { + "suit": args.suit, + "total_duration": total_duration, + "data": samples, + } + ], + } + + with open(args.out, "w") as f: + json.dump(out, f, indent=2) + print(f"Wrote metrics in fixture format to {args.out}") + + +if __name__ == "__main__": + main() diff --git a/storage_broker/benches/rps.rs b/storage_broker/benches/rps.rs index 9953ccfa91..5f3e594687 100644 --- a/storage_broker/benches/rps.rs +++ b/storage_broker/benches/rps.rs @@ -161,7 +161,7 @@ async fn publish(client: Option, n_keys: u64) { } }; let response = client.publish_safekeeper_info(Request::new(outbound)).await; - println!("pub response is {:?}", response); + println!("pub response is {response:?}"); } #[tokio::main] diff --git a/storage_broker/build.rs b/storage_broker/build.rs index 08dadeacd5..77c441dddd 100644 --- a/storage_broker/build.rs +++ b/storage_broker/build.rs @@ -6,6 +6,6 @@ fn main() -> Result<(), Box> { // the build then. Anyway, per cargo docs build script shouldn't output to // anywhere but $OUT_DIR. tonic_build::compile_protos("proto/broker.proto") - .unwrap_or_else(|e| panic!("failed to compile protos {:?}", e)); + .unwrap_or_else(|e| panic!("failed to compile protos {e:?}")); Ok(()) } diff --git a/storage_broker/src/lib.rs b/storage_broker/src/lib.rs index 149656a191..7d8b57380f 100644 --- a/storage_broker/src/lib.rs +++ b/storage_broker/src/lib.rs @@ -86,13 +86,9 @@ impl BrokerClientChannel { #[allow(clippy::result_large_err, reason = "TODO")] pub fn parse_proto_ttid(proto_ttid: &ProtoTenantTimelineId) -> Result { let tenant_id = TenantId::from_slice(&proto_ttid.tenant_id) - .map_err(|e| Status::new(Code::InvalidArgument, format!("malformed tenant_id: {}", e)))?; - let timeline_id = TimelineId::from_slice(&proto_ttid.timeline_id).map_err(|e| { - Status::new( - Code::InvalidArgument, - format!("malformed timeline_id: {}", e), - ) - })?; + .map_err(|e| Status::new(Code::InvalidArgument, format!("malformed tenant_id: {e}")))?; + let timeline_id = TimelineId::from_slice(&proto_ttid.timeline_id) + .map_err(|e| Status::new(Code::InvalidArgument, format!("malformed timeline_id: {e}")))?; Ok(TenantTimelineId { tenant_id, timeline_id, diff --git a/storage_controller/Cargo.toml b/storage_controller/Cargo.toml index c41e174d9d..3a0806b3b2 100644 --- a/storage_controller/Cargo.toml +++ b/storage_controller/Cargo.toml @@ -27,6 +27,7 @@ governor.workspace = true hex.workspace = true hyper0.workspace = true humantime.workspace = true +humantime-serde.workspace = true itertools.workspace = true json-structural-diff.workspace = true lasso.workspace = true @@ -34,6 +35,7 @@ once_cell.workspace = true pageserver_api.workspace = true pageserver_client.workspace = true postgres_connection.workspace = true +posthog_client_lite.workspace = true rand.workspace = true reqwest = { workspace = true, features = ["stream"] } routerify.workspace = true diff --git a/storage_controller/src/compute_hook.rs b/storage_controller/src/compute_hook.rs index a4482a4dac..0b5569b3d6 100644 --- a/storage_controller/src/compute_hook.rs +++ b/storage_controller/src/compute_hook.rs @@ -370,7 +370,7 @@ impl ComputeHook { let authorization_header = config .control_plane_jwt_token .clone() - .map(|jwt| format!("Bearer {}", jwt)); + .map(|jwt| format!("Bearer {jwt}")); let mut client = reqwest::ClientBuilder::new().timeout(NOTIFY_REQUEST_TIMEOUT); for cert in &config.ssl_ca_certs { diff --git a/storage_controller/src/drain_utils.rs b/storage_controller/src/drain_utils.rs index bd4b8ba38f..0dae7b8147 100644 --- a/storage_controller/src/drain_utils.rs +++ b/storage_controller/src/drain_utils.rs @@ -62,7 +62,7 @@ pub(crate) fn validate_node_state( nodes: Arc>, ) -> Result<(), OperationError> { let node = nodes.get(node_id).ok_or(OperationError::NodeStateChanged( - format!("node {} was removed", node_id).into(), + format!("node {node_id} was removed").into(), ))?; let current_policy = node.get_scheduling(); @@ -70,7 +70,7 @@ pub(crate) fn validate_node_state( // TODO(vlad): maybe cancel pending reconciles before erroring out. need to think // about it return Err(OperationError::NodeStateChanged( - format!("node {} changed state to {:?}", node_id, current_policy).into(), + format!("node {node_id} changed state to {current_policy:?}").into(), )); } @@ -145,7 +145,7 @@ impl TenantShardDrain { if !nodes.contains_key(&destination) { return Err(OperationError::NodeStateChanged( - format!("node {} was removed", destination).into(), + format!("node {destination} was removed").into(), )); } diff --git a/storage_controller/src/http.rs b/storage_controller/src/http.rs index 346595aa11..a7e86b5224 100644 --- a/storage_controller/src/http.rs +++ b/storage_controller/src/http.rs @@ -721,9 +721,9 @@ async fn handle_tenant_timeline_passthrough( // Callers will always pass an unsharded tenant ID. Before proxying, we must // rewrite this to a shard-aware shard zero ID. - let path = format!("{}", path); + let path = format!("{path}"); let tenant_str = tenant_or_shard_id.tenant_id.to_string(); - let tenant_shard_str = format!("{}", tenant_shard_id); + let tenant_shard_str = format!("{tenant_shard_id}"); let path = path.replace(&tenant_str, &tenant_shard_str); let latency = &METRICS_REGISTRY @@ -1539,7 +1539,7 @@ async fn handle_ready(req: Request) -> Result, ApiError> { impl From for ApiError { fn from(value: ReconcileError) -> Self { - ApiError::Conflict(format!("Reconciliation error: {}", value)) + ApiError::Conflict(format!("Reconciliation error: {value}")) } } diff --git a/storage_controller/src/main.rs b/storage_controller/src/main.rs index 2eea2f9d10..296a98e620 100644 --- a/storage_controller/src/main.rs +++ b/storage_controller/src/main.rs @@ -5,17 +5,22 @@ use std::time::Duration; use anyhow::{Context, anyhow}; use camino::Utf8PathBuf; + +#[cfg(feature = "testing")] +use clap::ArgAction; use clap::Parser; use futures::future::OptionFuture; use http_utils::tls_certs::ReloadingCertificateResolver; use hyper0::Uri; use metrics::BuildInfo; use metrics::launch_timestamp::LaunchTimestamp; +use pageserver_api::config::PostHogConfig; use reqwest::Certificate; use storage_controller::http::make_router; use storage_controller::metrics::preinitialize_metrics; use storage_controller::persistence::Persistence; use storage_controller::service::chaos_injector::ChaosInjector; +use storage_controller::service::feature_flag::FeatureFlagService; use storage_controller::service::{ Config, HEARTBEAT_INTERVAL_DEFAULT, LONG_RECONCILE_THRESHOLD_DEFAULT, MAX_OFFLINE_INTERVAL_DEFAULT, MAX_WARMING_UP_INTERVAL_DEFAULT, @@ -207,6 +212,19 @@ struct Cli { /// the compute notification directly (instead of via control plane). #[arg(long, default_value = "false")] use_local_compute_notifications: bool, + + /// Number of safekeepers to choose for a timeline when creating it. + /// Safekeepers will be choosen from different availability zones. + /// This option exists primarily for testing purposes. + #[arg(long, default_value = "3", value_parser = clap::value_parser!(i64).range(1..))] + timeline_safekeeper_count: i64, + + /// When set, actively checks and initiates heatmap downloads/uploads during reconciliation. + /// This speed up migrations by avoiding the default wait for the heatmap download interval. + /// Primarily useful for testing to reduce test execution time. + #[cfg(feature = "testing")] + #[arg(long, default_value = "true", action=ArgAction::Set)] + kick_secondary_downloads: bool, } enum StrictMode { @@ -236,6 +254,8 @@ struct Secrets { peer_jwt_token: Option, } +const POSTHOG_CONFIG_ENV: &str = "POSTHOG_CONFIG"; + impl Secrets { const DATABASE_URL_ENV: &'static str = "DATABASE_URL"; const PAGESERVER_JWT_TOKEN_ENV: &'static str = "PAGESERVER_JWT_TOKEN"; @@ -371,6 +391,11 @@ async fn async_main() -> anyhow::Result<()> { StrictMode::Strict if args.use_local_compute_notifications => { anyhow::bail!("`--use-local-compute-notifications` is only permitted in `--dev` mode"); } + StrictMode::Strict if args.timeline_safekeeper_count < 3 => { + anyhow::bail!( + "Running with less than 3 safekeepers per timeline is only permitted in `--dev` mode" + ); + } StrictMode::Strict => { tracing::info!("Starting in strict mode: configuration is OK.") } @@ -388,6 +413,18 @@ async fn async_main() -> anyhow::Result<()> { None => Vec::new(), }; + let posthog_config = if let Ok(json) = std::env::var(POSTHOG_CONFIG_ENV) { + let res: Result = serde_json::from_str(&json); + if let Ok(config) = res { + Some(config) + } else { + tracing::warn!("Invalid posthog config: {json}"); + None + } + } else { + None + }; + let config = Config { pageserver_jwt_token: secrets.pageserver_jwt_token, safekeeper_jwt_token: secrets.safekeeper_jwt_token, @@ -433,6 +470,10 @@ async fn async_main() -> anyhow::Result<()> { ssl_ca_certs, timelines_onto_safekeepers: args.timelines_onto_safekeepers, use_local_compute_notifications: args.use_local_compute_notifications, + timeline_safekeeper_count: args.timeline_safekeeper_count, + posthog_config: posthog_config.clone(), + #[cfg(feature = "testing")] + kick_secondary_downloads: args.kick_secondary_downloads, }; // Validate that we can connect to the database @@ -513,6 +554,23 @@ async fn async_main() -> anyhow::Result<()> { ) }); + let feature_flag_task = if let Some(posthog_config) = posthog_config { + let service = service.clone(); + let cancel = CancellationToken::new(); + let cancel_bg = cancel.clone(); + let task = tokio::task::spawn( + async move { + let feature_flag_service = FeatureFlagService::new(service, posthog_config); + let feature_flag_service = Arc::new(feature_flag_service); + feature_flag_service.run(cancel_bg).await + } + .instrument(tracing::info_span!("feature_flag_service")), + ); + Some((task, cancel)) + } else { + None + }; + // Wait until we receive a signal let mut sigint = tokio::signal::unix::signal(SignalKind::interrupt())?; let mut sigquit = tokio::signal::unix::signal(SignalKind::quit())?; @@ -560,6 +618,12 @@ async fn async_main() -> anyhow::Result<()> { chaos_jh.await.ok(); } + // If we were running the feature flag service, stop that so that we're not calling into Service while it shuts down + if let Some((feature_flag_task, feature_flag_cancel)) = feature_flag_task { + feature_flag_cancel.cancel(); + feature_flag_task.await.ok(); + } + service.shutdown().await; tracing::info!("Service shutdown complete"); diff --git a/storage_controller/src/pageserver_client.rs b/storage_controller/src/pageserver_client.rs index 817409e112..d6fe173eb3 100644 --- a/storage_controller/src/pageserver_client.rs +++ b/storage_controller/src/pageserver_client.rs @@ -376,4 +376,13 @@ impl PageserverClient { .await ) } + + pub(crate) async fn update_feature_flag_spec(&self, spec: String) -> Result<()> { + measured_request!( + "update_feature_flag_spec", + crate::metrics::Method::Post, + &self.node_id_label, + self.inner.update_feature_flag_spec(spec).await + ) + } } diff --git a/storage_controller/src/persistence.rs b/storage_controller/src/persistence.rs index 4300dd32a9..2948e9019f 100644 --- a/storage_controller/src/persistence.rs +++ b/storage_controller/src/persistence.rs @@ -500,15 +500,13 @@ impl Persistence { if let Some(np) = node_to_delete { let lc = NodeLifecycle::from_str(&np.lifecycle).map_err(|e| { DatabaseError::Logical(format!( - "Node {} has invalid lifecycle: {}", - del_node_id, e + "Node {del_node_id} has invalid lifecycle: {e}" )) })?; if lc != NodeLifecycle::Deleted { return Err(DatabaseError::Logical(format!( - "Node {} was not soft deleted before, cannot hard delete it", - del_node_id + "Node {del_node_id} was not soft deleted before, cannot hard delete it" ))); } @@ -642,8 +640,7 @@ impl Persistence { .await?; if deleted_node > 0 { return Err(DatabaseError::Logical(format!( - "Node {} is marked as deleted, re-attach is not allowed", - input_node_id + "Node {input_node_id} is marked as deleted, re-attach is not allowed" ))); } @@ -1003,7 +1000,7 @@ impl Persistence { .execute(conn).await?; if u8::try_from(updated) .map_err(|_| DatabaseError::Logical( - format!("Overflow existing shard count {} while splitting", updated)) + format!("Overflow existing shard count {updated} while splitting")) )? != old_shard_count.count() { // Perhaps a deletion or another split raced with this attempt to split, mutating // the parent shards that we intend to split. In this case the split request should fail. @@ -1343,8 +1340,7 @@ impl Persistence { if inserted_updated != 1 { return Err(DatabaseError::Logical(format!( - "unexpected number of rows ({})", - inserted_updated + "unexpected number of rows ({inserted_updated})" ))); } @@ -1406,8 +1402,7 @@ impl Persistence { 0 => Ok(false), 1 => Ok(true), _ => Err(DatabaseError::Logical(format!( - "unexpected number of rows ({})", - inserted_updated + "unexpected number of rows ({inserted_updated})" ))), } }) @@ -1476,8 +1471,7 @@ impl Persistence { 0 => Ok(()), 1 => Ok(()), _ => Err(DatabaseError::Logical(format!( - "unexpected number of rows ({})", - updated + "unexpected number of rows ({updated})" ))), } }) @@ -1570,8 +1564,7 @@ impl Persistence { 0 => Ok(false), 1 => Ok(true), _ => Err(DatabaseError::Logical(format!( - "unexpected number of rows ({})", - inserted_updated + "unexpected number of rows ({inserted_updated})" ))), } }) diff --git a/storage_controller/src/reconciler.rs b/storage_controller/src/reconciler.rs index b03a6dae04..92844c9c7b 100644 --- a/storage_controller/src/reconciler.rs +++ b/storage_controller/src/reconciler.rs @@ -856,6 +856,7 @@ impl Reconciler { &self.shard, &self.config, &self.placement_policy, + self.intent.secondary.len(), ); match self.observed.locations.get(&node.get_id()) { Some(conf) if conf.conf.as_ref() == Some(&wanted_conf) => { @@ -1235,11 +1236,11 @@ pub(crate) fn attached_location_conf( shard: &ShardIdentity, config: &TenantConfig, policy: &PlacementPolicy, + secondary_count: usize, ) -> LocationConfig { let has_secondaries = match policy { - PlacementPolicy::Attached(0) | PlacementPolicy::Detached | PlacementPolicy::Secondary => { - false - } + PlacementPolicy::Detached | PlacementPolicy::Secondary => false, + PlacementPolicy::Attached(0) => secondary_count > 0, PlacementPolicy::Attached(_) => true, }; diff --git a/storage_controller/src/scheduler.rs b/storage_controller/src/scheduler.rs index 3fa25443da..b86b4dfab1 100644 --- a/storage_controller/src/scheduler.rs +++ b/storage_controller/src/scheduler.rs @@ -23,7 +23,7 @@ pub enum ScheduleError { impl From for ApiError { fn from(value: ScheduleError) -> Self { - ApiError::Conflict(format!("Scheduling error: {}", value)) + ApiError::Conflict(format!("Scheduling error: {value}")) } } @@ -825,6 +825,7 @@ impl Scheduler { struct AzScore { home_shard_count: usize, scheduleable: bool, + node_count: usize, } let mut azs: HashMap<&AvailabilityZone, AzScore> = HashMap::new(); @@ -832,6 +833,7 @@ impl Scheduler { let az = azs.entry(&node.az).or_default(); az.home_shard_count += node.home_shard_count; az.scheduleable |= matches!(node.may_schedule, MaySchedule::Yes(_)); + az.node_count += 1; } // If any AZs are schedulable, then filter out the non-schedulable ones (i.e. AZs where @@ -840,10 +842,20 @@ impl Scheduler { azs.retain(|_, i| i.scheduleable); } + // We will multiply up shard counts by the max node count for scoring, before dividing + // by per-node max node count, to get a normalized score that doesn't collapse to zero + // when the absolute shard count is less than the node count. + let max_node_count = azs.values().map(|i| i.node_count).max().unwrap_or(0); + // Find the AZ with the lowest number of shards currently allocated Some( azs.into_iter() - .min_by_key(|i| (i.1.home_shard_count, i.0)) + .min_by_key(|i| { + ( + (i.1.home_shard_count * max_node_count) / i.1.node_count, + i.0, + ) + }) .unwrap() .0 .clone(), @@ -891,7 +903,7 @@ impl Scheduler { /// rigorously updating them on every change. pub(crate) fn update_metrics(&self) { for (node_id, node) in &self.nodes { - let node_id_str = format!("{}", node_id); + let node_id_str = format!("{node_id}"); let label_group = NodeLabelGroup { az: &node.az.0, node_id: &node_id_str, @@ -1314,7 +1326,7 @@ mod tests { .map(|(node_id, node)| (node_id, node.home_shard_count)) .collect::>(); node_home_counts.sort_by_key(|i| i.0); - eprintln!("Selected {}, vs nodes {:?}", preferred_az, node_home_counts); + eprintln!("Selected {preferred_az}, vs nodes {node_home_counts:?}"); let tenant_shard_id = TenantShardId { tenant_id: TenantId::generate(), diff --git a/storage_controller/src/service.rs b/storage_controller/src/service.rs index 14c81ccf59..b4dfd01249 100644 --- a/storage_controller/src/service.rs +++ b/storage_controller/src/service.rs @@ -1,5 +1,6 @@ pub mod chaos_injector; mod context_iterator; +pub mod feature_flag; pub(crate) mod safekeeper_reconciler; mod safekeeper_service; @@ -25,6 +26,7 @@ use futures::stream::FuturesUnordered; use http_utils::error::ApiError; use hyper::Uri; use itertools::Itertools; +use pageserver_api::config::PostHogConfig; use pageserver_api::controller_api::{ AvailabilityZone, MetadataHealthRecord, MetadataHealthUpdateRequest, NodeAvailability, NodeRegisterRequest, NodeSchedulingPolicy, NodeShard, NodeShardResponse, PlacementPolicy, @@ -260,7 +262,7 @@ fn passthrough_api_error(node: &Node, e: mgmt_api::Error) -> ApiError { // Presume errors receiving body are connectivity/availability issues except for decoding errors let src_str = err.source().map(|e| e.to_string()).unwrap_or_default(); ApiError::ResourceUnavailable( - format!("{node} error receiving error body: {err} {}", src_str).into(), + format!("{node} error receiving error body: {err} {src_str}").into(), ) } mgmt_api::Error::ApiError(StatusCode::NOT_FOUND, msg) => { @@ -466,6 +468,16 @@ pub struct Config { pub timelines_onto_safekeepers: bool, pub use_local_compute_notifications: bool, + + /// Number of safekeepers to choose for a timeline when creating it. + /// Safekeepers will be choosen from different availability zones. + pub timeline_safekeeper_count: i64, + + /// PostHog integration config + pub posthog_config: Option, + + #[cfg(feature = "testing")] + pub kick_secondary_downloads: bool, } impl From for ApiError { @@ -664,7 +676,7 @@ impl std::fmt::Display for StopReconciliationsReason { Self::ShuttingDown => "Shutting down", Self::SteppingDown => "Stepping down", }; - write!(writer, "{}", s) + write!(writer, "{s}") } } @@ -2060,6 +2072,7 @@ impl Service { &tenant_shard.shard, &tenant_shard.config, &PlacementPolicy::Attached(0), + tenant_shard.intent.get_secondary().len(), )), }, )]); @@ -5270,7 +5283,7 @@ impl Service { shard_params, result .iter() - .map(|s| format!("{:?}", s)) + .map(|s| format!("{s:?}")) .collect::>() .join(",") ); @@ -5601,7 +5614,15 @@ impl Service { for parent_id in parent_ids { let child_ids = parent_id.split(new_shard_count); - let (pageserver, generation, policy, parent_ident, config, preferred_az) = { + let ( + pageserver, + generation, + policy, + parent_ident, + config, + preferred_az, + secondary_count, + ) = { let mut old_state = tenants .remove(&parent_id) .expect("It was present, we just split it"); @@ -5621,6 +5642,7 @@ impl Service { old_state.shard, old_state.config.clone(), old_state.preferred_az().cloned(), + old_state.intent.get_secondary().len(), ) }; @@ -5642,6 +5664,7 @@ impl Service { &child_shard, &config, &policy, + secondary_count, )), }, ); @@ -6183,7 +6206,7 @@ impl Service { }, ) .await - .map_err(|e| ApiError::Conflict(format!("Failed to split {}: {}", parent_id, e)))?; + .map_err(|e| ApiError::Conflict(format!("Failed to split {parent_id}: {e}")))?; fail::fail_point!("shard-split-post-remote", |_| Err(ApiError::Conflict( "failpoint".to_string() @@ -6200,7 +6223,7 @@ impl Service { response .new_shards .iter() - .map(|s| format!("{:?}", s)) + .map(|s| format!("{s:?}")) .collect::>() .join(",") ); @@ -7099,8 +7122,7 @@ impl Service { Ok(()) } else { Err(ApiError::Conflict(format!( - "Node {} is in use, consider using tombstone API first", - node_id + "Node {node_id} is in use, consider using tombstone API first" ))) } } @@ -7650,7 +7672,7 @@ impl Service { if let Some(ongoing) = ongoing_op { return Err(ApiError::PreconditionFailed( - format!("Background operation already ongoing for node: {}", ongoing).into(), + format!("Background operation already ongoing for node: {ongoing}").into(), )); } @@ -7781,7 +7803,7 @@ impl Service { if let Some(ongoing) = ongoing_op { return Err(ApiError::PreconditionFailed( - format!("Background operation already ongoing for node: {}", ongoing).into(), + format!("Background operation already ongoing for node: {ongoing}").into(), )); } @@ -8369,6 +8391,11 @@ impl Service { /// we have this helper to move things along faster. #[cfg(feature = "testing")] async fn kick_secondary_download(&self, tenant_shard_id: TenantShardId) { + if !self.config.kick_secondary_downloads { + // No-op if kick_secondary_downloads functionaliuty is not configured + return; + } + let (attached_node, secondaries) = { let locked = self.inner.read().unwrap(); let Some(shard) = locked.tenants.get(&tenant_shard_id) else { @@ -8847,7 +8874,7 @@ impl Service { let nodes = self.inner.read().unwrap().nodes.clone(); let node = nodes.get(secondary).ok_or(mgmt_api::Error::ApiError( StatusCode::NOT_FOUND, - format!("Node with id {} not found", secondary), + format!("Node with id {secondary} not found"), ))?; match node @@ -8926,8 +8953,7 @@ impl Service { Err(err) => { return Err(OperationError::FinalizeError( format!( - "Failed to finalise drain cancel of {} by setting scheduling policy to Active: {}", - node_id, err + "Failed to finalise drain cancel of {node_id} by setting scheduling policy to Active: {err}" ) .into(), )); @@ -9031,8 +9057,7 @@ impl Service { Err(err) => { return Err(OperationError::FinalizeError( format!( - "Failed to finalise drain cancel of {} by setting scheduling policy to Active: {}", - node_id, err + "Failed to finalise drain cancel of {node_id} by setting scheduling policy to Active: {err}" ) .into(), )); @@ -9242,8 +9267,7 @@ impl Service { Err(err) => { return Err(OperationError::FinalizeError( format!( - "Failed to finalise drain cancel of {} by setting scheduling policy to Active: {}", - node_id, err + "Failed to finalise drain cancel of {node_id} by setting scheduling policy to Active: {err}" ) .into(), )); @@ -9325,8 +9349,7 @@ impl Service { Err(err) => { return Err(OperationError::FinalizeError( format!( - "Failed to finalise drain cancel of {} by setting scheduling policy to Active: {}", - node_id, err + "Failed to finalise drain cancel of {node_id} by setting scheduling policy to Active: {err}" ) .into(), )); diff --git a/storage_controller/src/service/feature_flag.rs b/storage_controller/src/service/feature_flag.rs new file mode 100644 index 0000000000..645eb75237 --- /dev/null +++ b/storage_controller/src/service/feature_flag.rs @@ -0,0 +1,117 @@ +use std::{sync::Arc, time::Duration}; + +use futures::StreamExt; +use pageserver_api::config::PostHogConfig; +use pageserver_client::mgmt_api; +use posthog_client_lite::{PostHogClient, PostHogClientConfig}; +use reqwest::StatusCode; +use tokio::time::MissedTickBehavior; +use tokio_util::sync::CancellationToken; + +use crate::{pageserver_client::PageserverClient, service::Service}; + +pub struct FeatureFlagService { + service: Arc, + config: PostHogConfig, + client: PostHogClient, + http_client: reqwest::Client, +} + +const DEFAULT_POSTHOG_REFRESH_INTERVAL: Duration = Duration::from_secs(30); + +impl FeatureFlagService { + pub fn new(service: Arc, config: PostHogConfig) -> Self { + let client = PostHogClient::new(PostHogClientConfig { + project_id: config.project_id.clone(), + server_api_key: config.server_api_key.clone(), + client_api_key: config.client_api_key.clone(), + private_api_url: config.private_api_url.clone(), + public_api_url: config.public_api_url.clone(), + }); + Self { + service, + config, + client, + http_client: reqwest::Client::new(), + } + } + + async fn refresh(self: Arc, cancel: CancellationToken) -> Result<(), anyhow::Error> { + let nodes = { + let inner = self.service.inner.read().unwrap(); + inner.nodes.clone() + }; + + let feature_flag_spec = self.client.get_feature_flags_local_evaluation_raw().await?; + let stream = futures::stream::iter(nodes.values().cloned()).map(|node| { + let this = self.clone(); + let feature_flag_spec = feature_flag_spec.clone(); + async move { + let res = async { + let client = PageserverClient::new( + node.get_id(), + this.http_client.clone(), + node.base_url(), + // TODO: what if we rotate the token during storcon lifetime? + this.service.config.pageserver_jwt_token.as_deref(), + ); + + client.update_feature_flag_spec(feature_flag_spec).await?; + tracing::info!( + "Updated {}({}) with feature flag spec", + node.get_id(), + node.base_url() + ); + Ok::<_, mgmt_api::Error>(()) + }; + + if let Err(e) = res.await { + if let mgmt_api::Error::ApiError(status, _) = e { + if status == StatusCode::NOT_FOUND { + // This is expected during deployments where the API is not available, so we can ignore it + return; + } + } + tracing::warn!( + "Failed to update feature flag spec for {}: {e}", + node.get_id() + ); + } + } + }); + let mut stream = stream.buffer_unordered(8); + + while stream.next().await.is_some() { + if cancel.is_cancelled() { + return Ok(()); + } + } + + Ok(()) + } + + pub async fn run(self: Arc, cancel: CancellationToken) { + let refresh_interval = self + .config + .refresh_interval + .unwrap_or(DEFAULT_POSTHOG_REFRESH_INTERVAL); + let mut interval = tokio::time::interval(refresh_interval); + interval.set_missed_tick_behavior(MissedTickBehavior::Skip); + tracing::info!( + "Starting feature flag service with refresh interval: {:?}", + refresh_interval + ); + loop { + tokio::select! { + _ = interval.tick() => {} + _ = cancel.cancelled() => { + break; + } + } + let res = self.clone().refresh(cancel.clone()).await; + if let Err(e) = res { + tracing::error!("Failed to refresh feature flags: {e:#?}"); + } + } + } +} diff --git a/storage_controller/src/service/safekeeper_service.rs b/storage_controller/src/service/safekeeper_service.rs index 61b9ec6b6d..fec81fb661 100644 --- a/storage_controller/src/service/safekeeper_service.rs +++ b/storage_controller/src/service/safekeeper_service.rs @@ -1,3 +1,4 @@ +use std::cmp::max; use std::collections::HashSet; use std::str::FromStr; use std::sync::Arc; @@ -17,6 +18,7 @@ use pageserver_api::controller_api::{ SafekeeperDescribeResponse, SkSchedulingPolicy, TimelineImportRequest, }; use pageserver_api::models::{SafekeeperInfo, SafekeepersInfo, TimelineInfo}; +use safekeeper_api::PgVersionId; use safekeeper_api::membership::{MemberSet, SafekeeperGeneration, SafekeeperId}; use tokio::task::JoinSet; use tokio_util::sync::CancellationToken; @@ -43,7 +45,7 @@ impl Service { &self, tenant_id: TenantId, timeline_id: TimelineId, - pg_version: u32, + pg_version: PgVersionId, timeline_persistence: &TimelinePersistence, ) -> Result, ApiError> { // If quorum is reached, return if we are outside of a specified timeout @@ -218,7 +220,7 @@ impl Service { read_only: bool, ) -> Result { let timeline_id = timeline_info.timeline_id; - let pg_version = timeline_info.pg_version * 10000; + let pg_version = PgVersionId::from(timeline_info.pg_version); // Initially start_lsn is determined by last_record_lsn in pageserver // response as it does initdb. However, later we persist it and in sk // creation calls replace with the value from the timeline row if it @@ -608,7 +610,8 @@ impl Service { Ok(()) } - /// Choose safekeepers for the new timeline: 3 in different azs. + /// Choose safekeepers for the new timeline in different azs. + /// 3 are choosen by default, but may be configured via config (for testing). pub(crate) async fn safekeepers_for_new_timeline( &self, ) -> Result, ApiError> { @@ -651,18 +654,14 @@ impl Service { ) }); // Number of safekeepers in different AZs we are looking for - let wanted_count = match all_safekeepers.len() { - 0 => { - return Err(ApiError::InternalServerError(anyhow::anyhow!( - "couldn't find any active safekeeper for new timeline", - ))); - } - // Have laxer requirements on testig mode as we don't want to - // spin up three safekeepers for every single test - #[cfg(feature = "testing")] - 1 | 2 => all_safekeepers.len(), - _ => 3, - }; + let mut wanted_count = self.config.timeline_safekeeper_count as usize; + // TODO(diko): remove this when `timeline_safekeeper_count` option is in the release + // branch and is specified in tests/neon_local config. + if cfg!(feature = "testing") && all_safekeepers.len() < wanted_count { + // In testing mode, we can have less safekeepers than the config says + wanted_count = max(all_safekeepers.len(), 1); + } + let mut sks = Vec::new(); let mut azs = HashSet::new(); for (_sk_util, sk_info, az_id) in all_safekeepers.iter() { diff --git a/storage_controller/src/tenant_shard.rs b/storage_controller/src/tenant_shard.rs index acd18734cf..359921ecbf 100644 --- a/storage_controller/src/tenant_shard.rs +++ b/storage_controller/src/tenant_shard.rs @@ -1381,8 +1381,13 @@ impl TenantShard { .generation .expect("Attempted to enter attached state without a generation"); - let wanted_conf = - attached_location_conf(generation, &self.shard, &self.config, &self.policy); + let wanted_conf = attached_location_conf( + generation, + &self.shard, + &self.config, + &self.policy, + self.intent.get_secondary().len(), + ); match self.observed.locations.get(&node_id) { Some(conf) if conf.conf.as_ref() == Some(&wanted_conf) => {} Some(_) | None => { @@ -3003,21 +3008,18 @@ pub(crate) mod tests { if attachments_in_wrong_az > 0 { violations.push(format!( - "{} attachments scheduled to the incorrect AZ", - attachments_in_wrong_az + "{attachments_in_wrong_az} attachments scheduled to the incorrect AZ" )); } if secondaries_in_wrong_az > 0 { violations.push(format!( - "{} secondaries scheduled to the incorrect AZ", - secondaries_in_wrong_az + "{secondaries_in_wrong_az} secondaries scheduled to the incorrect AZ" )); } eprintln!( - "attachments_in_wrong_az={} secondaries_in_wrong_az={}", - attachments_in_wrong_az, secondaries_in_wrong_az + "attachments_in_wrong_az={attachments_in_wrong_az} secondaries_in_wrong_az={secondaries_in_wrong_az}" ); for (node_id, stats) in &node_stats { diff --git a/storage_controller/src/timeline_import.rs b/storage_controller/src/timeline_import.rs index eb50819d02..e88bce4c82 100644 --- a/storage_controller/src/timeline_import.rs +++ b/storage_controller/src/timeline_import.rs @@ -195,7 +195,7 @@ impl UpcallClient { let authorization_header = config .control_plane_jwt_token .clone() - .map(|jwt| format!("Bearer {}", jwt)); + .map(|jwt| format!("Bearer {jwt}")); let client = reqwest::ClientBuilder::new() .timeout(IMPORT_COMPLETE_REQUEST_TIMEOUT) diff --git a/storage_scrubber/src/checks.rs b/storage_scrubber/src/checks.rs index 865f0908f9..774418f237 100644 --- a/storage_scrubber/src/checks.rs +++ b/storage_scrubber/src/checks.rs @@ -146,7 +146,7 @@ pub(crate) async fn branch_cleanup_and_check_errors( for (layer, metadata) in index_part.layer_metadata { if metadata.file_size == 0 { result.errors.push(format!( - "index_part.json contains a layer {} that has 0 size in its layer metadata", layer, + "index_part.json contains a layer {layer} that has 0 size in its layer metadata", )) } diff --git a/storage_scrubber/src/lib.rs b/storage_scrubber/src/lib.rs index 25a157f108..d3ed5a8357 100644 --- a/storage_scrubber/src/lib.rs +++ b/storage_scrubber/src/lib.rs @@ -123,7 +123,7 @@ impl S3Target { pub fn with_sub_segment(&self, new_segment: &str) -> Self { let mut new_self = self.clone(); if new_self.prefix_in_bucket.is_empty() { - new_self.prefix_in_bucket = format!("/{}/", new_segment); + new_self.prefix_in_bucket = format!("/{new_segment}/"); } else { if new_self.prefix_in_bucket.ends_with('/') { new_self.prefix_in_bucket.pop(); diff --git a/storage_scrubber/src/scan_safekeeper_metadata.rs b/storage_scrubber/src/scan_safekeeper_metadata.rs index f10d758097..cf0a3d19e9 100644 --- a/storage_scrubber/src/scan_safekeeper_metadata.rs +++ b/storage_scrubber/src/scan_safekeeper_metadata.rs @@ -265,7 +265,7 @@ async fn load_timelines_from_db( // so spawn it off to run on its own. tokio::spawn(async move { if let Err(e) = connection.await { - eprintln!("connection error: {}", e); + eprintln!("connection error: {e}"); } }); @@ -274,7 +274,7 @@ async fn load_timelines_from_db( "and tenant_id in ({})", tenant_ids .iter() - .map(|t| format!("'{}'", t)) + .map(|t| format!("'{t}'")) .collect::>() .join(", ") ) diff --git a/test_runner/fixtures/endpoint/http.py b/test_runner/fixtures/endpoint/http.py index e2d405227b..f5be544439 100644 --- a/test_runner/fixtures/endpoint/http.py +++ b/test_runner/fixtures/endpoint/http.py @@ -24,7 +24,7 @@ The value to place in the `aud` claim. @final class ComputeClaimsScope(StrEnum): - ADMIN = "admin" + ADMIN = "compute_ctl:admin" @final diff --git a/test_runner/fixtures/neon_fixtures.py b/test_runner/fixtures/neon_fixtures.py index 8cf1020adb..4eb85119ca 100644 --- a/test_runner/fixtures/neon_fixtures.py +++ b/test_runner/fixtures/neon_fixtures.py @@ -453,6 +453,7 @@ class NeonEnvBuilder: pageserver_get_vectored_concurrent_io: str | None = None, pageserver_tracing_config: PageserverTracingConfig | None = None, pageserver_import_config: PageserverImportConfig | None = None, + storcon_kick_secondary_downloads: bool | None = None, ): self.repo_dir = repo_dir self.rust_log_override = rust_log_override @@ -489,7 +490,9 @@ class NeonEnvBuilder: self.config_init_force: str | None = None self.top_output_dir = top_output_dir self.control_plane_hooks_api: str | None = None - self.storage_controller_config: dict[Any, Any] | None = None + self.storage_controller_config: dict[Any, Any] | None = { + "timelines_onto_safekeepers": True, + } # Flag to enable https listener in pageserver, generate local ssl certs, # and force storage controller to use https for pageserver api. @@ -512,6 +515,8 @@ class NeonEnvBuilder: self.pageserver_tracing_config = pageserver_tracing_config self.pageserver_import_config = pageserver_import_config + self.storcon_kick_secondary_downloads = storcon_kick_secondary_downloads + self.pageserver_default_tenant_config_compaction_algorithm: dict[str, Any] | None = ( pageserver_default_tenant_config_compaction_algorithm ) @@ -1219,6 +1224,14 @@ class NeonEnv: else: cfg["storage_controller"] = {"use_local_compute_notifications": False} + if config.storcon_kick_secondary_downloads is not None: + # Configure whether storage controller should actively kick off secondary downloads + if "storage_controller" not in cfg: + cfg["storage_controller"] = {} + cfg["storage_controller"]["kick_secondary_downloads"] = ( + config.storcon_kick_secondary_downloads + ) + # Create config for pageserver http_auth_type = "NeonJWT" if config.auth_enabled else "Trust" pg_auth_type = "NeonJWT" if config.auth_enabled else "Trust" @@ -4909,6 +4922,9 @@ class Safekeeper(LogUtils): log.info(f"finished pulling timeline from {src_ids} to {self.id}") return res + def safekeeper_id(self) -> SafekeeperId: + return SafekeeperId(self.id, "localhost", self.port.pg_tenant_only) + @property def data_dir(self) -> Path: return self.env.repo_dir / "safekeepers" / f"sk{self.id}" diff --git a/test_runner/fixtures/pageserver/http.py b/test_runner/fixtures/pageserver/http.py index c29192c25c..d9037f2d08 100644 --- a/test_runner/fixtures/pageserver/http.py +++ b/test_runner/fixtures/pageserver/http.py @@ -1219,3 +1219,31 @@ class PageserverHttpClient(requests.Session, MetricsGetter): ) self.verbose_error(res) return res.json() + + def force_override_feature_flag(self, flag: str, value: str | None = None): + if value is None: + res = self.delete( + f"http://localhost:{self.port}/v1/feature_flag/{flag}", + ) + else: + res = self.put( + f"http://localhost:{self.port}/v1/feature_flag/{flag}", + params={"value": value}, + ) + self.verbose_error(res) + + def evaluate_feature_flag_boolean(self, tenant_id: TenantId, flag: str) -> Any: + res = self.get( + f"http://localhost:{self.port}/v1/tenant/{tenant_id}/feature_flag/{flag}", + params={"as": "boolean"}, + ) + self.verbose_error(res) + return res.json() + + def evaluate_feature_flag_multivariate(self, tenant_id: TenantId, flag: str) -> Any: + res = self.get( + f"http://localhost:{self.port}/v1/tenant/{tenant_id}/feature_flag/{flag}", + params={"as": "multivariate"}, + ) + self.verbose_error(res) + return res.json() diff --git a/test_runner/performance/pageserver/pagebench/test_large_slru_basebackup.py b/test_runner/performance/pageserver/pagebench/test_large_slru_basebackup.py index 8af52dcbd0..25dfd5277c 100644 --- a/test_runner/performance/pageserver/pagebench/test_large_slru_basebackup.py +++ b/test_runner/performance/pageserver/pagebench/test_large_slru_basebackup.py @@ -146,8 +146,6 @@ def run_benchmark(env: NeonEnv, pg_bin: PgBin, record, duration_secs: int): ps_http.base_url, "--page-service-connstring", env.pageserver.connstr(password=None), - "--gzip-probability", - "1", "--runtime", f"{duration_secs}s", # don't specify the targets explicitly, let pagebench auto-discover them diff --git a/test_runner/regress/test_attach_tenant_config.py b/test_runner/regress/test_attach_tenant_config.py index dc44fc77db..7788faceb4 100644 --- a/test_runner/regress/test_attach_tenant_config.py +++ b/test_runner/regress/test_attach_tenant_config.py @@ -184,7 +184,7 @@ def test_fully_custom_config(positive_env: NeonEnv): "timeline_offloading": False, "rel_size_v2_enabled": True, "relsize_snapshot_cache_capacity": 10000, - "gc_compaction_enabled": True, + "gc_compaction_enabled": False, "gc_compaction_verification": False, "gc_compaction_initial_threshold_kb": 1024000, "gc_compaction_ratio_percent": 200, diff --git a/test_runner/regress/test_branching.py b/test_runner/regress/test_branching.py index 9ce618b2ad..920c538069 100644 --- a/test_runner/regress/test_branching.py +++ b/test_runner/regress/test_branching.py @@ -11,6 +11,7 @@ from fixtures.common_types import Lsn, TimelineId from fixtures.log_helper import log from fixtures.pageserver.http import PageserverApiException from fixtures.pageserver.utils import wait_until_tenant_active +from fixtures.safekeeper.http import MembershipConfiguration, TimelineCreateRequest from fixtures.utils import query_scalar from performance.test_perf_pgbench import get_scales_matrix from requests import RequestException @@ -164,6 +165,19 @@ def test_cannot_create_endpoint_on_non_uploaded_timeline(neon_env_builder: NeonE ps_http.configure_failpoints(("before-upload-index-pausable", "pause")) env.pageserver.tenant_create(env.initial_tenant) + sk = env.safekeepers[0] + assert sk + sk.http_client().timeline_create( + TimelineCreateRequest( + env.initial_tenant, + env.initial_timeline, + MembershipConfiguration(generation=1, members=[sk.safekeeper_id()], new_members=None), + int(env.pg_version) * 10000, + Lsn(0), + None, + ) + ) + initial_branch = "initial_branch" def start_creating_timeline(): diff --git a/test_runner/regress/test_compute_metrics.py b/test_runner/regress/test_compute_metrics.py index c751a3e7cc..d1e61e597c 100644 --- a/test_runner/regress/test_compute_metrics.py +++ b/test_runner/regress/test_compute_metrics.py @@ -418,7 +418,7 @@ def test_sql_exporter_metrics_e2e( pg_user = conn_options["user"] pg_dbname = conn_options["dbname"] pg_application_name = f"sql_exporter{stem_suffix}" - connstr = f"postgresql://{pg_user}@{pg_host}:{pg_port}/{pg_dbname}?sslmode=disable&application_name={pg_application_name}" + connstr = f"postgresql://{pg_user}@{pg_host}:{pg_port}/{pg_dbname}?sslmode=disable&application_name={pg_application_name}&pgaudit.log=none" def escape_go_filepath_match_characters(s: str) -> str: """ diff --git a/test_runner/regress/test_feature_flag.py b/test_runner/regress/test_feature_flag.py new file mode 100644 index 0000000000..2712d13dcc --- /dev/null +++ b/test_runner/regress/test_feature_flag.py @@ -0,0 +1,51 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +from fixtures.utils import run_only_on_default_postgres + +if TYPE_CHECKING: + from fixtures.neon_fixtures import NeonEnvBuilder + + +@run_only_on_default_postgres("Pageserver-only test only needs to run on one version") +def test_feature_flag(neon_env_builder: NeonEnvBuilder): + env = neon_env_builder.init_start() + env.pageserver.http_client().force_override_feature_flag("test-feature-flag", "true") + assert env.pageserver.http_client().evaluate_feature_flag_boolean( + env.initial_tenant, "test-feature-flag" + )["result"]["Ok"] + assert ( + env.pageserver.http_client().evaluate_feature_flag_multivariate( + env.initial_tenant, "test-feature-flag" + )["result"]["Ok"] + == "true" + ) + + env.pageserver.http_client().force_override_feature_flag("test-feature-flag", "false") + assert ( + env.pageserver.http_client().evaluate_feature_flag_boolean( + env.initial_tenant, "test-feature-flag" + )["result"]["Err"] + == "No condition group is matched" + ) + assert ( + env.pageserver.http_client().evaluate_feature_flag_multivariate( + env.initial_tenant, "test-feature-flag" + )["result"]["Ok"] + == "false" + ) + + env.pageserver.http_client().force_override_feature_flag("test-feature-flag", None) + assert ( + "Err" + in env.pageserver.http_client().evaluate_feature_flag_boolean( + env.initial_tenant, "test-feature-flag" + )["result"] + ) + assert ( + "Err" + in env.pageserver.http_client().evaluate_feature_flag_multivariate( + env.initial_tenant, "test-feature-flag" + )["result"] + ) diff --git a/test_runner/regress/test_normal_work.py b/test_runner/regress/test_normal_work.py index 44590ea4b9..3335cf686c 100644 --- a/test_runner/regress/test_normal_work.py +++ b/test_runner/regress/test_normal_work.py @@ -64,6 +64,11 @@ def test_normal_work( """ neon_env_builder.num_safekeepers = num_safekeepers + + if safekeeper_proto_version == 2: + neon_env_builder.storage_controller_config = { + "timelines_onto_safekeepers": False, + } env = neon_env_builder.init_start() pageserver_http = env.pageserver.http_client() diff --git a/test_runner/regress/test_pg_regress.py b/test_runner/regress/test_pg_regress.py index 3695ece66b..728241b465 100644 --- a/test_runner/regress/test_pg_regress.py +++ b/test_runner/regress/test_pg_regress.py @@ -173,7 +173,11 @@ def test_pg_regress( (runpath / "testtablespace").mkdir(parents=True) # Compute all the file locations that pg_regress will need. - build_path = pg_distrib_dir / f"build/{env.pg_version.v_prefixed}/src/test/regress" + # + # XXX: We assume that the `build` directory is a sibling of the + # pg_distrib_dir. That is the default when you check out the + # repository; `build` and `pg_install` are created side by side. + build_path = pg_distrib_dir / f"../build/{env.pg_version.v_prefixed}/src/test/regress" src_path = base_dir / f"vendor/postgres-{env.pg_version.v_prefixed}/src/test/regress" bindir = pg_distrib_dir / f"v{env.pg_version}/bin" schedule = src_path / "parallel_schedule" @@ -250,7 +254,11 @@ def test_isolation( (runpath / "testtablespace").mkdir(parents=True) # Compute all the file locations that pg_isolation_regress will need. - build_path = pg_distrib_dir / f"build/{env.pg_version.v_prefixed}/src/test/isolation" + # + # XXX: We assume that the `build` directory is a sibling of the + # pg_distrib_dir. That is the default when you check out the + # repository; `build` and `pg_install` are created side by side. + build_path = pg_distrib_dir / f"../build/{env.pg_version.v_prefixed}/src/test/isolation" src_path = base_dir / f"vendor/postgres-{env.pg_version.v_prefixed}/src/test/isolation" bindir = pg_distrib_dir / f"v{env.pg_version}/bin" schedule = src_path / "isolation_schedule" @@ -314,8 +322,11 @@ def test_sql_regress( (runpath / "testtablespace").mkdir(parents=True) # Compute all the file locations that pg_regress will need. - # This test runs neon specific tests - build_path = pg_distrib_dir / f"build/v{env.pg_version}/src/test/regress" + # + # XXX: We assume that the `build` directory is a sibling of the + # pg_distrib_dir. That is the default when you check out the + # repository; `build` and `pg_install` are created side by side. + build_path = pg_distrib_dir / f"../build/{env.pg_version.v_prefixed}/src/test/regress" src_path = base_dir / "test_runner/sql_regress" bindir = pg_distrib_dir / f"v{env.pg_version}/bin" schedule = src_path / "parallel_schedule" diff --git a/test_runner/regress/test_s3_restore.py b/test_runner/regress/test_s3_restore.py index 082808f9ff..2d7be1f9d1 100644 --- a/test_runner/regress/test_s3_restore.py +++ b/test_runner/regress/test_s3_restore.py @@ -74,7 +74,7 @@ def test_tenant_s3_restore( last_flush_lsn = Lsn(endpoint.safe_psql("SELECT pg_current_wal_flush_lsn()")[0][0]) last_flush_lsns.append(last_flush_lsn) ps_http.timeline_checkpoint(tenant_id, timeline_id) - wait_for_upload(ps_http, tenant_id, timeline_id, last_flush_lsn) + wait_for_upload(ps_http, tenant_id, timeline_id, last_flush_lsn, timeout=60) log.info(f"{timeline} timeline {timeline_id} {last_flush_lsn=}") parent = timeline diff --git a/test_runner/regress/test_storage_controller.py b/test_runner/regress/test_storage_controller.py index 8f3aa010e3..70772766d7 100644 --- a/test_runner/regress/test_storage_controller.py +++ b/test_runner/regress/test_storage_controller.py @@ -88,6 +88,12 @@ def test_storage_controller_smoke( neon_env_builder.control_plane_hooks_api = compute_reconfigure_listener.control_plane_hooks_api env = neon_env_builder.init_configs() + # These bubble up from safekeepers + for ps in env.pageservers: + ps.allowed_errors.extend( + [".*Timeline.* has been deleted.*", ".*Timeline.*was cancelled and cannot be used"] + ) + # Start services by hand so that we can skip a pageserver (this will start + register later) env.broker.start() env.storage_controller.start() @@ -3455,7 +3461,7 @@ def test_safekeeper_deployment_time_update(neon_env_builder: NeonEnvBuilder): assert target.get_safekeeper(fake_id) is None - assert len(target.get_safekeepers()) == 0 + start_sks = target.get_safekeepers() sk_0 = env.safekeepers[0] @@ -3477,7 +3483,7 @@ def test_safekeeper_deployment_time_update(neon_env_builder: NeonEnvBuilder): inserted = target.get_safekeeper(fake_id) assert inserted is not None - assert target.get_safekeepers() == [inserted] + assert target.get_safekeepers() == start_sks + [inserted] assert eq_safekeeper_records(body, inserted) # error out if pk is changed (unexpected) @@ -3489,7 +3495,7 @@ def test_safekeeper_deployment_time_update(neon_env_builder: NeonEnvBuilder): assert exc.value.status_code == 400 inserted_again = target.get_safekeeper(fake_id) - assert target.get_safekeepers() == [inserted_again] + assert target.get_safekeepers() == start_sks + [inserted_again] assert inserted_again is not None assert eq_safekeeper_records(inserted, inserted_again) @@ -3498,7 +3504,7 @@ def test_safekeeper_deployment_time_update(neon_env_builder: NeonEnvBuilder): body["version"] += 1 target.on_safekeeper_deploy(fake_id, body) inserted_now = target.get_safekeeper(fake_id) - assert target.get_safekeepers() == [inserted_now] + assert target.get_safekeepers() == start_sks + [inserted_now] assert inserted_now is not None assert eq_safekeeper_records(body, inserted_now) @@ -3507,7 +3513,7 @@ def test_safekeeper_deployment_time_update(neon_env_builder: NeonEnvBuilder): body["https_port"] = 123 target.on_safekeeper_deploy(fake_id, body) inserted_now = target.get_safekeeper(fake_id) - assert target.get_safekeepers() == [inserted_now] + assert target.get_safekeepers() == start_sks + [inserted_now] assert inserted_now is not None assert eq_safekeeper_records(body, inserted_now) env.storage_controller.consistency_check() @@ -3516,7 +3522,7 @@ def test_safekeeper_deployment_time_update(neon_env_builder: NeonEnvBuilder): body["https_port"] = None target.on_safekeeper_deploy(fake_id, body) inserted_now = target.get_safekeeper(fake_id) - assert target.get_safekeepers() == [inserted_now] + assert target.get_safekeepers() == start_sks + [inserted_now] assert inserted_now is not None assert eq_safekeeper_records(body, inserted_now) env.storage_controller.consistency_check() @@ -3635,6 +3641,11 @@ def test_timeline_delete_mid_live_migration(neon_env_builder: NeonEnvBuilder, mi env = neon_env_builder.init_configs() env.start() + for ps in env.pageservers: + ps.allowed_errors.extend( + [".*Timeline.* has been deleted.*", ".*Timeline.*was cancelled and cannot be used"] + ) + tenant_id = TenantId.generate() timeline_id = TimelineId.generate() env.storage_controller.tenant_create(tenant_id, placement_policy={"Attached": 1}) @@ -4425,6 +4436,53 @@ def test_storage_controller_graceful_migration(neon_env_builder: NeonEnvBuilder, assert initial_ps.http_client().tenant_list_locations()["tenant_shards"] == [] +def test_attached_0_graceful_migration(neon_env_builder: NeonEnvBuilder): + neon_env_builder.num_pageservers = 4 + neon_env_builder.num_azs = 2 + + neon_env_builder.storcon_kick_secondary_downloads = False + + env = neon_env_builder.init_start() + + # It is default, but we want to ensure that there are no secondary locations requested + env.storage_controller.tenant_policy_update(env.initial_tenant, {"placement": {"Attached": 0}}) + env.storage_controller.reconcile_until_idle() + + desc = env.storage_controller.tenant_describe(env.initial_tenant)["shards"][0] + src_ps_id = desc["node_attached"] + src_ps = env.get_pageserver(src_ps_id) + src_az = desc["preferred_az_id"] + + # There must be no secondary locations with Attached(0) placement policy + assert len(desc["node_secondary"]) == 0 + + # Migrate tenant shard to the same AZ node + dst_ps = [ps for ps in env.pageservers if ps.id != src_ps_id and ps.az_id == src_az][0] + + env.storage_controller.tenant_shard_migrate( + TenantShardId(env.initial_tenant, 0, 0), + dst_ps.id, + config=StorageControllerMigrationConfig(prewarm=True), + ) + + def tenant_shard_migrated(): + src_locations = src_ps.http_client().tenant_list_locations()["tenant_shards"] + assert len(src_locations) == 0 + log.info(f"Tenant shard migrated from {src_ps.id}") + dst_locations = dst_ps.http_client().tenant_list_locations()["tenant_shards"] + assert len(dst_locations) == 1 + assert dst_locations[0][1]["mode"] == "AttachedSingle" + log.info(f"Tenant shard migrated to {dst_ps.id}") + + # After all we expect that tenant shard exists only on dst node. + # We wait so long because [`DEFAULT_HEATMAP_PERIOD`] and [`DEFAULT_DOWNLOAD_INTERVAL`] + # are set to 60 seconds by default. + # + # TODO: we should consider making these configurable, so the test can run faster. + wait_until(tenant_shard_migrated, timeout=180, interval=5, status_interval=10) + log.info("Tenant shard migrated successfully") + + @run_only_on_default_postgres("this is like a 'unit test' against storcon db") def test_storage_controller_migrate_with_pageserver_restart( neon_env_builder: NeonEnvBuilder, make_httpserver diff --git a/test_runner/regress/test_storage_scrubber.py b/test_runner/regress/test_storage_scrubber.py index 03cd133ccb..e29cb801d5 100644 --- a/test_runner/regress/test_storage_scrubber.py +++ b/test_runner/regress/test_storage_scrubber.py @@ -341,6 +341,11 @@ def test_scrubber_physical_gc_timeline_deletion(neon_env_builder: NeonEnvBuilder env = neon_env_builder.init_configs() env.start() + for ps in env.pageservers: + ps.allowed_errors.extend( + [".*Timeline.* has been deleted.*", ".*Timeline.*was cancelled and cannot be used"] + ) + tenant_id = TenantId.generate() timeline_id = TimelineId.generate() env.create_tenant( diff --git a/test_runner/regress/test_timeline_detach_ancestor.py b/test_runner/regress/test_timeline_detach_ancestor.py index f0810270b1..b5cc431afe 100644 --- a/test_runner/regress/test_timeline_detach_ancestor.py +++ b/test_runner/regress/test_timeline_detach_ancestor.py @@ -21,7 +21,10 @@ from fixtures.neon_fixtures import ( last_flush_lsn_upload, wait_for_last_flush_lsn, ) -from fixtures.pageserver.http import HistoricLayerInfo, PageserverApiException +from fixtures.pageserver.http import ( + HistoricLayerInfo, + PageserverApiException, +) from fixtures.pageserver.utils import wait_for_last_record_lsn, wait_timeline_detail_404 from fixtures.remote_storage import LocalFsStorage, RemoteStorageKind from fixtures.utils import assert_pageserver_backups_equal, skip_in_debug_build, wait_until @@ -413,6 +416,7 @@ def test_ancestor_detach_behavior_v2(neon_env_builder: NeonEnvBuilder, snapshots "read_only": True, }, ) + sk = env.safekeepers[0] assert sk with pytest.raises(requests.exceptions.HTTPError, match="Not Found"): @@ -504,8 +508,15 @@ def test_ancestor_detach_behavior_v2(neon_env_builder: NeonEnvBuilder, snapshots assert len(lineage.get("original_ancestor", [])) == 0 assert len(lineage.get("reparenting_history", [])) == 0 - for name, _, _, rows, starts in expected_result: - with env.endpoints.create_start(name, tenant_id=env.initial_tenant) as ep: + for branch_name, queried_timeline, _, rows, starts in expected_result: + details = client.timeline_detail(env.initial_tenant, queried_timeline) + log.info(f"reading data from branch {branch_name}") + # specifying the lsn makes the endpoint read-only and not connect to safekeepers + with env.endpoints.create( + branch_name, + lsn=Lsn(details["last_record_lsn"]), + ) as ep: + ep.start(safekeeper_generation=1) assert ep.safe_psql("SELECT count(*) FROM foo;")[0][0] == rows assert ep.safe_psql(f"SELECT count(*) FROM audit WHERE starts = {starts}")[0][0] == 1 @@ -1088,6 +1099,9 @@ def test_timeline_detach_ancestor_interrupted_by_deletion( for ps in env.pageservers: ps.allowed_errors.extend(SHUTDOWN_ALLOWED_ERRORS) + ps.allowed_errors.extend( + [".*Timeline.* has been deleted.*", ".*Timeline.*was cancelled and cannot be used"] + ) pageservers = dict((int(p.id), p) for p in env.pageservers) @@ -1209,6 +1223,9 @@ def test_sharded_tad_interleaved_after_partial_success(neon_env_builder: NeonEnv for ps in env.pageservers: ps.allowed_errors.extend(SHUTDOWN_ALLOWED_ERRORS) + ps.allowed_errors.extend( + [".*Timeline.* has been deleted.*", ".*Timeline.*was cancelled and cannot be used"] + ) pageservers = dict((int(p.id), p) for p in env.pageservers) diff --git a/test_runner/regress/test_timeline_gc_blocking.py b/test_runner/regress/test_timeline_gc_blocking.py index 9a710f5b80..daba8019b6 100644 --- a/test_runner/regress/test_timeline_gc_blocking.py +++ b/test_runner/regress/test_timeline_gc_blocking.py @@ -24,6 +24,10 @@ def test_gc_blocking_by_timeline(neon_env_builder: NeonEnvBuilder, sharded: bool initial_tenant_conf={"gc_period": "1s", "lsn_lease_length": "0s"}, initial_tenant_shard_count=2 if sharded else None, ) + for ps in env.pageservers: + ps.allowed_errors.extend( + [".*Timeline.* has been deleted.*", ".*Timeline.*was cancelled and cannot be used"] + ) if sharded: http = env.storage_controller.pageserver_api() diff --git a/test_runner/regress/test_wal_acceptor.py b/test_runner/regress/test_wal_acceptor.py index b9183286af..ea120c1814 100644 --- a/test_runner/regress/test_wal_acceptor.py +++ b/test_runner/regress/test_wal_acceptor.py @@ -229,7 +229,7 @@ def test_many_timelines(neon_env_builder: NeonEnvBuilder): # Test timeline_list endpoint. http_cli = env.safekeepers[0].http_client() - assert len(http_cli.timeline_list()) == 3 + assert len(http_cli.timeline_list()) == 4 # Check that dead minority doesn't prevent the commits: execute insert n_inserts @@ -740,8 +740,8 @@ def test_timeline_status(neon_env_builder: NeonEnvBuilder, auth_enabled: bool): env = neon_env_builder.init_start() tenant_id = env.initial_tenant - timeline_id = env.create_branch("test_timeline_status") - endpoint = env.endpoints.create_start("test_timeline_status") + timeline_id = env.initial_timeline + endpoint = env.endpoints.create_start("main") wa = env.safekeepers[0] @@ -1292,6 +1292,12 @@ def test_lagging_sk(neon_env_builder: NeonEnvBuilder): # it works without compute at all. def test_peer_recovery(neon_env_builder: NeonEnvBuilder): neon_env_builder.num_safekeepers = 3 + + # timelines should be created the old way + neon_env_builder.storage_controller_config = { + "timelines_onto_safekeepers": False, + } + env = neon_env_builder.init_start() tenant_id = env.initial_tenant @@ -1532,6 +1538,11 @@ def test_safekeeper_without_pageserver( def test_replace_safekeeper(neon_env_builder: NeonEnvBuilder): + # timelines should be created the old way manually until we have migration support + neon_env_builder.storage_controller_config = { + "timelines_onto_safekeepers": False, + } + def execute_payload(endpoint: Endpoint): with closing(endpoint.connect()) as conn: with conn.cursor() as cur: @@ -1661,6 +1672,15 @@ def test_pull_timeline(neon_env_builder: NeonEnvBuilder, live_sk_change: bool): res = env.safekeepers[3].pull_timeline( [env.safekeepers[0], env.safekeepers[2]], tenant_id, timeline_id ) + sk_id_1 = env.safekeepers[0].safekeeper_id() + sk_id_3 = env.safekeepers[2].safekeeper_id() + sk_id_4 = env.safekeepers[3].safekeeper_id() + new_conf = MembershipConfiguration( + generation=2, members=[sk_id_1, sk_id_3, sk_id_4], new_members=None + ) + for i in [0, 2, 3]: + env.safekeepers[i].http_client().membership_switch(tenant_id, timeline_id, new_conf) + log.info("Finished pulling timeline") log.info(res) @@ -1705,13 +1725,15 @@ def test_pull_timeline_gc(neon_env_builder: NeonEnvBuilder): neon_env_builder.num_safekeepers = 3 neon_env_builder.enable_safekeeper_remote_storage(default_remote_storage()) env = neon_env_builder.init_start() - tenant_id = env.initial_tenant - timeline_id = env.initial_timeline (src_sk, dst_sk) = (env.safekeepers[0], env.safekeepers[2]) + dst_sk.stop() + + [tenant_id, timeline_id] = env.create_tenant() + log.info("use only first 2 safekeepers, 3rd will be seeded") - endpoint = env.endpoints.create("main") + endpoint = env.endpoints.create("main", tenant_id=tenant_id) endpoint.active_safekeepers = [1, 2] endpoint.start() endpoint.safe_psql("create table t(key int, value text)") @@ -1723,6 +1745,7 @@ def test_pull_timeline_gc(neon_env_builder: NeonEnvBuilder): src_http = src_sk.http_client() # run pull_timeline which will halt before downloading files src_http.configure_failpoints(("sk-snapshot-after-list-pausable", "pause")) + dst_sk.start() pt_handle = PropagatingThread( target=dst_sk.pull_timeline, args=([src_sk], tenant_id, timeline_id) ) @@ -1782,23 +1805,27 @@ def test_pull_timeline_term_change(neon_env_builder: NeonEnvBuilder): neon_env_builder.enable_safekeeper_remote_storage(default_remote_storage()) env = neon_env_builder.init_start() tenant_id = env.initial_tenant - timeline_id = env.initial_timeline (src_sk, dst_sk) = (env.safekeepers[0], env.safekeepers[2]) + dst_sk.stop() + src_http = src_sk.http_client() + src_http.configure_failpoints(("sk-snapshot-after-list-pausable", "pause")) + + timeline_id = env.create_branch("pull_timeline_term_changes") + + # run pull_timeline which will halt before downloading files log.info("use only first 2 safekeepers, 3rd will be seeded") - ep = env.endpoints.create("main") + ep = env.endpoints.create("pull_timeline_term_changes") ep.active_safekeepers = [1, 2] ep.start() ep.safe_psql("create table t(key int, value text)") ep.safe_psql("insert into t select generate_series(1, 1000), 'pear'") - src_http = src_sk.http_client() - # run pull_timeline which will halt before downloading files - src_http.configure_failpoints(("sk-snapshot-after-list-pausable", "pause")) pt_handle = PropagatingThread( target=dst_sk.pull_timeline, args=([src_sk], tenant_id, timeline_id) ) + dst_sk.start() pt_handle.start() src_sk.wait_until_paused("sk-snapshot-after-list-pausable") @@ -1807,7 +1834,7 @@ def test_pull_timeline_term_change(neon_env_builder: NeonEnvBuilder): # restart compute to bump term ep.stop() - ep = env.endpoints.create("main") + ep = env.endpoints.create("pull_timeline_term_changes") ep.active_safekeepers = [1, 2] ep.start() ep.safe_psql("insert into t select generate_series(1, 100), 'pear'") @@ -1929,6 +1956,11 @@ def test_pull_timeline_while_evicted(neon_env_builder: NeonEnvBuilder): @run_only_on_default_postgres("tests only safekeeper API") def test_membership_api(neon_env_builder: NeonEnvBuilder): neon_env_builder.num_safekeepers = 1 + # timelines should be created the old way + neon_env_builder.storage_controller_config = { + "timelines_onto_safekeepers": False, + } + env = neon_env_builder.init_start() # These are expected after timeline deletion on safekeepers. @@ -2009,6 +2041,12 @@ def test_explicit_timeline_creation(neon_env_builder: NeonEnvBuilder): created manually, later storcon will do that. """ neon_env_builder.num_safekeepers = 3 + + # timelines should be created the old way manually + neon_env_builder.storage_controller_config = { + "timelines_onto_safekeepers": False, + } + env = neon_env_builder.init_start() tenant_id = env.initial_tenant @@ -2064,7 +2102,7 @@ def test_idle_reconnections(neon_env_builder: NeonEnvBuilder): env = neon_env_builder.init_start() tenant_id = env.initial_tenant - timeline_id = env.create_branch("test_idle_reconnections") + timeline_id = env.initial_timeline def collect_stats() -> dict[str, float]: # we need to collect safekeeper_pg_queries_received_total metric from all safekeepers @@ -2095,7 +2133,7 @@ def test_idle_reconnections(neon_env_builder: NeonEnvBuilder): collect_stats() - endpoint = env.endpoints.create_start("test_idle_reconnections") + endpoint = env.endpoints.create_start("main") # just write something to the timeline endpoint.safe_psql("create table t(i int)") collect_stats() diff --git a/test_runner/regress/test_wal_acceptor_async.py b/test_runner/regress/test_wal_acceptor_async.py index d8a7dc2a2b..1bad387a90 100644 --- a/test_runner/regress/test_wal_acceptor_async.py +++ b/test_runner/regress/test_wal_acceptor_async.py @@ -590,6 +590,13 @@ async def run_wal_truncation(env: NeonEnv, safekeeper_proto_version: int): @pytest.mark.parametrize("safekeeper_proto_version", [2, 3]) def test_wal_truncation(neon_env_builder: NeonEnvBuilder, safekeeper_proto_version: int): neon_env_builder.num_safekeepers = 3 + if safekeeper_proto_version == 2: + # On the legacy protocol, we don't support generations, which are part of + # `timelines_onto_safekeepers` + neon_env_builder.storage_controller_config = { + "timelines_onto_safekeepers": False, + } + env = neon_env_builder.init_start() asyncio.run(run_wal_truncation(env, safekeeper_proto_version)) @@ -713,6 +720,11 @@ async def run_quorum_sanity(env: NeonEnv): # we don't. def test_quorum_sanity(neon_env_builder: NeonEnvBuilder): neon_env_builder.num_safekeepers = 4 + + # The test fails basically always on the new mode. + neon_env_builder.storage_controller_config = { + "timelines_onto_safekeepers": False, + } env = neon_env_builder.init_start() asyncio.run(run_quorum_sanity(env)) diff --git a/test_runner/regress/test_wal_receiver.py b/test_runner/regress/test_wal_receiver.py index 0252b590cc..d281c055b0 100644 --- a/test_runner/regress/test_wal_receiver.py +++ b/test_runner/regress/test_wal_receiver.py @@ -16,6 +16,13 @@ if TYPE_CHECKING: # Checks that pageserver's walreceiver state is printed in the logs during WAL wait timeout. # Ensures that walreceiver does not run without any data inserted and only starts after the insertion. def test_pageserver_lsn_wait_error_start(neon_env_builder: NeonEnvBuilder): + # we assert below that the walreceiver is not active before data writes. + # with manually created timelines, it is active. + # FIXME: remove this test once we remove timelines_onto_safekeepers + neon_env_builder.storage_controller_config = { + "timelines_onto_safekeepers": False, + } + # Trigger WAL wait timeout faster neon_env_builder.pageserver_config_override = "wait_lsn_timeout = '1s'" env = neon_env_builder.init_start() diff --git a/vendor/postgres-v14 b/vendor/postgres-v14 index 6770bc2513..9085654ee8 160000 --- a/vendor/postgres-v14 +++ b/vendor/postgres-v14 @@ -1 +1 @@ -Subproject commit 6770bc251301ef40c66f7ecb731741dc435b5051 +Subproject commit 9085654ee8022d5cc4ca719380a1dc53e5e3246f diff --git a/vendor/revisions.json b/vendor/revisions.json index 12d5499ddb..b260698c86 100644 --- a/vendor/revisions.json +++ b/vendor/revisions.json @@ -13,6 +13,6 @@ ], "v14": [ "14.18", - "6770bc251301ef40c66f7ecb731741dc435b5051" + "9085654ee8022d5cc4ca719380a1dc53e5e3246f" ] }