diff --git a/.dockerignore b/.dockerignore index 92eb4f24de..2bbff86100 100644 --- a/.dockerignore +++ b/.dockerignore @@ -14,6 +14,7 @@ !pgxn/ !proxy/ !safekeeper/ +!storage_broker/ !vendor/postgres-v14/ !vendor/postgres-v15/ !workspace_hack/ diff --git a/.github/ansible/.gitignore b/.github/ansible/.gitignore index e3454fd43c..9cd8044417 100644 --- a/.github/ansible/.gitignore +++ b/.github/ansible/.gitignore @@ -1,5 +1,3 @@ -zenith_install.tar.gz -.zenith_current_version neon_install.tar.gz .neon_current_version diff --git a/.github/ansible/get_binaries.sh b/.github/ansible/get_binaries.sh index 9d2d0926f5..4bb580428c 100755 --- a/.github/ansible/get_binaries.sh +++ b/.github/ansible/get_binaries.sh @@ -25,6 +25,7 @@ mkdir neon_install/bin/ docker cp ${ID}:/usr/local/bin/pageserver neon_install/bin/ docker cp ${ID}:/usr/local/bin/pageserver_binutils neon_install/bin/ docker cp ${ID}:/usr/local/bin/safekeeper neon_install/bin/ +docker cp ${ID}:/usr/local/bin/storage_broker neon_install/bin/ docker cp ${ID}:/usr/local/bin/proxy neon_install/bin/ docker cp ${ID}:/usr/local/v14/bin/ neon_install/v14/bin/ docker cp ${ID}:/usr/local/v15/bin/ neon_install/v15/bin/ diff --git a/.github/ansible/staging.eu-west-1.hosts.yaml b/.github/ansible/staging.eu-west-1.hosts.yaml new file mode 100644 index 0000000000..088ba03e5e --- /dev/null +++ b/.github/ansible/staging.eu-west-1.hosts.yaml @@ -0,0 +1,33 @@ +storage: + vars: + bucket_name: neon-dev-storage-eu-west-1 + bucket_region: eu-west-1 + console_mgmt_base_url: http://console-staging.local + etcd_endpoints: etcd-0.eu-west-1.aws.neon.build:2379 + pageserver_config_stub: + pg_distrib_dir: /usr/local + remote_storage: + bucket_name: "{{ bucket_name }}" + bucket_region: "{{ bucket_region }}" + prefix_in_bucket: "pageserver/v1" + safekeeper_s3_prefix: safekeeper/v1/wal + hostname_suffix: "" + remote_user: ssm-user + ansible_aws_ssm_region: eu-west-1 + ansible_aws_ssm_bucket_name: neon-dev-storage-eu-west-1 + console_region_id: aws-eu-west-1 + + children: + pageservers: + hosts: + pageserver-0.eu-west-1.aws.neon.build: + ansible_host: i-01d496c5041c7f34c + + safekeepers: + hosts: + safekeeper-0.eu-west-1.aws.neon.build: + ansible_host: i-05226ef85722831bf + safekeeper-1.eu-west-1.aws.neon.build: + ansible_host: i-06969ee1bf2958bfc + safekeeper-2.eu-west-1.aws.neon.build: + ansible_host: i-087892e9625984a0b diff --git a/.github/ansible/staging.us-east-2.hosts.yaml b/.github/ansible/staging.us-east-2.hosts.yaml index 3bbf5fe8cb..26a82f8db4 100644 --- a/.github/ansible/staging.us-east-2.hosts.yaml +++ b/.github/ansible/staging.us-east-2.hosts.yaml @@ -22,6 +22,8 @@ storage: hosts: pageserver-0.us-east-2.aws.neon.build: ansible_host: i-0c3e70929edb5d691 + pageserver-1.us-east-2.aws.neon.build: + ansible_host: i-0565a8b4008aa3f40 safekeepers: hosts: diff --git a/.github/helm-values/dev-eu-west-1-zeta.neon-proxy-scram.yaml b/.github/helm-values/dev-eu-west-1-zeta.neon-proxy-scram.yaml new file mode 100644 index 0000000000..0e98636057 --- /dev/null +++ b/.github/helm-values/dev-eu-west-1-zeta.neon-proxy-scram.yaml @@ -0,0 +1,31 @@ +# Helm chart values for neon-proxy-scram. +# This is a YAML-formatted file. + +image: + repository: neondatabase/neon + +settings: + authBackend: "console" + authEndpoint: "http://console-staging.local/management/api/v2" + domain: "*.eu-west-1.aws.neon.build" + +# -- Additional labels for neon-proxy pods +podLabels: + zenith_service: proxy-scram + zenith_env: dev + zenith_region: eu-west-1 + zenith_region_slug: eu-west-1 + +exposedService: + annotations: + service.beta.kubernetes.io/aws-load-balancer-type: external + service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip + service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing + external-dns.alpha.kubernetes.io/hostname: eu-west-1.aws.neon.build + +#metrics: +# enabled: true +# serviceMonitor: +# enabled: true +# selector: +# release: kube-prometheus-stack diff --git a/.github/helm-values/dev-us-east-2-beta.neon-proxy-link.yaml b/.github/helm-values/dev-us-east-2-beta.neon-proxy-link.yaml new file mode 100644 index 0000000000..685cbd192d --- /dev/null +++ b/.github/helm-values/dev-us-east-2-beta.neon-proxy-link.yaml @@ -0,0 +1,39 @@ +# Helm chart values for neon-proxy-link. +# This is a YAML-formatted file. + +image: + repository: neondatabase/neon + +settings: + authBackend: "link" + authEndpoint: "https://console.stage.neon.tech/authenticate_proxy_request/" + uri: "https://console.stage.neon.tech/psql_session/" + +# -- Additional labels for neon-proxy-link pods +podLabels: + zenith_service: proxy + zenith_env: dev + zenith_region: us-east-2 + zenith_region_slug: us-east-2 + +service: + type: LoadBalancer + annotations: + service.beta.kubernetes.io/aws-load-balancer-type: external + service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip + service.beta.kubernetes.io/aws-load-balancer-scheme: internal + external-dns.alpha.kubernetes.io/hostname: neon-proxy-link-mgmt.beta.us-east-2.aws.neon.build + +exposedService: + annotations: + service.beta.kubernetes.io/aws-load-balancer-type: external + service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip + service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing + external-dns.alpha.kubernetes.io/hostname: neon-proxy-link.beta.us-east-2.aws.neon.build + +#metrics: +# enabled: true +# serviceMonitor: +# enabled: true +# selector: +# release: kube-prometheus-stack diff --git a/.github/workflows/benchmarking.yml b/.github/workflows/benchmarking.yml index 573a215142..b3008a2aed 100644 --- a/.github/workflows/benchmarking.yml +++ b/.github/workflows/benchmarking.yml @@ -144,7 +144,9 @@ jobs: # neon-captest-new: Run pgbench in a freshly created project # neon-captest-reuse: Same, but reusing existing project # neon-captest-prefetch: Same, with prefetching enabled (new project) - platform: [ neon-captest-new, neon-captest-reuse, neon-captest-prefetch ] + # rds-aurora: Aurora Postgres Serverless v2 with autoscaling from 0.5 to 2 ACUs + # rds-postgres: RDS Postgres db.m5.large instance (2 vCPU, 8 GiB) with gp3 EBS storage + platform: [ neon-captest-new, neon-captest-reuse, neon-captest-prefetch, rds-postgres ] db_size: [ 10gb ] include: - platform: neon-captest-new @@ -207,8 +209,11 @@ jobs: rds-aurora) CONNSTR=${{ secrets.BENCHMARK_RDS_CONNSTR }} ;; + rds-postgres) + CONNSTR=${{ secrets.BENCHMARK_RDS_POSTGRES_CONNSTR }} + ;; *) - echo 2>&1 "Unknown PLATFORM=${PLATFORM}. Allowed only 'neon-captest-reuse', 'neon-captest-new', 'neon-captest-prefetch' or 'rds-aurora'" + echo 2>&1 "Unknown PLATFORM=${PLATFORM}. Allowed only 'neon-captest-reuse', 'neon-captest-new', 'neon-captest-prefetch', 'rds-aurora', or 'rds-postgres'" exit 1 ;; esac diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml index ac30a9ec97..cb7627a6cd 100644 --- a/.github/workflows/build_and_test.yml +++ b/.github/workflows/build_and_test.yml @@ -761,7 +761,6 @@ jobs: run: | export DOCKER_TAG=${{needs.tag.outputs.build-tag}} cd "$(pwd)/.github/ansible" - if [[ "$GITHUB_REF_NAME" == "main" ]]; then ./get_binaries.sh elif [[ "$GITHUB_REF_NAME" == "release" ]]; then @@ -770,6 +769,38 @@ jobs: echo "GITHUB_REF_NAME (value '$GITHUB_REF_NAME') is not set to either 'main' or 'release'" exit 1 fi + ansible-galaxy collection install sivel.toiletwater + ansible-playbook deploy.yaml -i staging.${{ matrix.target_region }}.hosts.yaml -e @ssm_config -e CONSOLE_API_TOKEN=${{secrets.NEON_STAGING_API_KEY}} + rm -f neon_install.tar.gz .neon_current_version + + deploy-pr-test-new: + runs-on: [ self-hosted, dev, x64 ] + container: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/ansible:pinned + # We need both storage **and** compute images for deploy, because control plane picks the compute version based on the storage version. + # If it notices a fresh storage it may bump the compute version. And if compute image failed to build it may break things badly + needs: [ push-docker-hub, tag, regress-tests ] + if: | + contains(github.event.pull_request.labels.*.name, 'deploy-test-storage') && + github.event_name != 'workflow_dispatch' + defaults: + run: + shell: bash + strategy: + matrix: + target_region: [ eu-west-1 ] + steps: + - name: Checkout + uses: actions/checkout@v3 + with: + submodules: true + fetch-depth: 0 + + - name: Redeploy + run: | + export DOCKER_TAG=${{needs.tag.outputs.build-tag}} + cd "$(pwd)/.github/ansible" + + ./get_binaries.sh ansible-galaxy collection install sivel.toiletwater ansible-playbook deploy.yaml -i staging.${{ matrix.target_region }}.hosts.yaml -e @ssm_config -e CONSOLE_API_TOKEN=${{secrets.NEON_STAGING_API_KEY}} @@ -780,7 +811,7 @@ jobs: container: 093970136003.dkr.ecr.eu-central-1.amazonaws.com/ansible:latest # We need both storage **and** compute images for deploy, because control plane picks the compute version based on the storage version. # If it notices a fresh storage it may bump the compute version. And if compute image failed to build it may break things badly - needs: [ push-docker-hub, calculate-deploy-targets, tag, regress-tests ] + needs: [ push-docker-hub, tag, regress-tests ] if: | (github.ref_name == 'release') && github.event_name != 'workflow_dispatch' @@ -861,7 +892,7 @@ jobs: runs-on: [ self-hosted, dev, x64 ] container: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/ansible:pinned # Compute image isn't strictly required for proxy deploy, but let's still wait for it to run all deploy jobs consistently. - needs: [ push-docker-hub, calculate-deploy-targets, tag, regress-tests ] + needs: [ push-docker-hub, tag, regress-tests ] if: | (github.ref_name == 'main') && github.event_name != 'workflow_dispatch' @@ -873,6 +904,10 @@ jobs: include: - target_region: us-east-2 target_cluster: dev-us-east-2-beta + deploy_link_proxy: true + - target_region: eu-west-1 + target_cluster: dev-eu-west-1-zeta + deploy_link_proxy: false steps: - name: Checkout uses: actions/checkout@v3 @@ -885,16 +920,22 @@ jobs: helm repo add neondatabase https://neondatabase.github.io/helm-charts aws --region ${{ matrix.target_region }} eks update-kubeconfig --name ${{ matrix.target_cluster }} - - name: Re-deploy proxy + - name: Re-deploy scram proxy run: | DOCKER_TAG=${{needs.tag.outputs.build-tag}} helm upgrade neon-proxy-scram neondatabase/neon-proxy --namespace neon-proxy --create-namespace --install -f .github/helm-values/${{ matrix.target_cluster }}.neon-proxy-scram.yaml --set image.tag=${DOCKER_TAG} --wait --timeout 15m0s + - name: Re-deploy link proxy + if: matrix.deploy_link_proxy + run: | + DOCKER_TAG=${{needs.tag.outputs.build-tag}} + helm upgrade neon-proxy-link neondatabase/neon-proxy --namespace neon-proxy --create-namespace --install -f .github/helm-values/${{ matrix.target_cluster }}.neon-proxy-link.yaml --set image.tag=${DOCKER_TAG} --wait --timeout 15m0s + deploy-proxy-prod-new: runs-on: prod container: 093970136003.dkr.ecr.eu-central-1.amazonaws.com/ansible:latest # Compute image isn't strictly required for proxy deploy, but let's still wait for it to run all deploy jobs consistently. - needs: [ push-docker-hub, calculate-deploy-targets, tag, regress-tests ] + needs: [ push-docker-hub, tag, regress-tests ] if: | (github.ref_name == 'release') && github.event_name != 'workflow_dispatch' diff --git a/.github/workflows/codestyle.yml b/.github/workflows/codestyle.yml index bb000efbac..01fef71c9a 100644 --- a/.github/workflows/codestyle.yml +++ b/.github/workflows/codestyle.yml @@ -48,11 +48,11 @@ jobs: if: matrix.os == 'ubuntu-latest' run: | sudo apt update - sudo apt install build-essential libreadline-dev zlib1g-dev flex bison libseccomp-dev libssl-dev + sudo apt install build-essential libreadline-dev zlib1g-dev flex bison libseccomp-dev libssl-dev protobuf-compiler - name: Install macOS postgres dependencies if: matrix.os == 'macos-latest' - run: brew install flex bison openssl + run: brew install flex bison openssl protobuf - name: Set pg 14 revision for caching id: pg_v14_rev diff --git a/Cargo.lock b/Cargo.lock index 87f8a4226f..ac290dc59b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -457,11 +457,26 @@ checksum = "6bf8832993da70a4c6d13c581f4463c2bdda27b9bf1c5498dc4365543abe6d6f" dependencies = [ "atty", "bitflags", + "clap_derive", "clap_lex 0.3.0", + "once_cell", "strsim", "termcolor", ] +[[package]] +name = "clap_derive" +version = "4.0.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c42f169caba89a7d512b5418b09864543eeb4d497416c917d7137863bd2076ad" +dependencies = [ + "heck", + "proc-macro-error", + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "clap_lex" version = "0.2.4" @@ -586,6 +601,7 @@ dependencies = [ "once_cell", "pageserver_api", "postgres", + "postgres_connection", "regex", "reqwest", "safekeeper_api", @@ -1005,11 +1021,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9fb8664f6ea68aba5503d42dd1be786b0f1bd9b7972e7f40208c83ef74db91bf" dependencies = [ "http", - "prost", + "prost 0.10.4", "tokio", "tokio-stream", - "tonic", - "tonic-build", + "tonic 0.7.2", + "tonic-build 0.7.2", "tower", "tower-service", ] @@ -2144,6 +2160,7 @@ dependencies = [ "postgres", "postgres-protocol", "postgres-types", + "postgres_connection", "postgres_ffi", "pprof", "pq_proto", @@ -2391,6 +2408,19 @@ dependencies = [ "postgres-protocol", ] +[[package]] +name = "postgres_connection" +version = "0.1.0" +dependencies = [ + "anyhow", + "itertools", + "once_cell", + "postgres", + "tokio-postgres", + "url", + "workspace_hack", +] + [[package]] name = "postgres_ffi" version = "0.1.0" @@ -2465,6 +2495,30 @@ dependencies = [ "syn", ] +[[package]] +name = "proc-macro-error" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" +dependencies = [ + "proc-macro-error-attr", + "proc-macro2", + "quote", + "syn", + "version_check", +] + +[[package]] +name = "proc-macro-error-attr" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" +dependencies = [ + "proc-macro2", + "quote", + "version_check", +] + [[package]] name = "proc-macro-hack" version = "0.5.19" @@ -2516,7 +2570,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "71adf41db68aa0daaefc69bb30bcd68ded9b9abaad5d1fbb6304c4fb390e083e" dependencies = [ "bytes", - "prost-derive", + "prost-derive 0.10.1", +] + +[[package]] +name = "prost" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a0841812012b2d4a6145fae9a6af1534873c32aa67fff26bd09f8fa42c83f95a" +dependencies = [ + "bytes", + "prost-derive 0.11.2", ] [[package]] @@ -2534,13 +2598,35 @@ dependencies = [ "log", "multimap", "petgraph", - "prost", - "prost-types", + "prost 0.10.4", + "prost-types 0.10.1", "regex", "tempfile", "which", ] +[[package]] +name = "prost-build" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d8b442418ea0822409d9e7d047cbf1e7e9e1760b172bf9982cf29d517c93511" +dependencies = [ + "bytes", + "heck", + "itertools", + "lazy_static", + "log", + "multimap", + "petgraph", + "prettyplease", + "prost 0.11.2", + "prost-types 0.11.2", + "regex", + "syn", + "tempfile", + "which", +] + [[package]] name = "prost-derive" version = "0.10.1" @@ -2554,6 +2640,19 @@ dependencies = [ "syn", ] +[[package]] +name = "prost-derive" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "164ae68b6587001ca506d3bf7f1000bfa248d0e1217b618108fba4ec1d0cc306" +dependencies = [ + "anyhow", + "itertools", + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "prost-types" version = "0.10.1" @@ -2561,7 +2660,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2d0a014229361011dc8e69c8a1ec6c2e8d0f2af7c91e3ea3f5b2170298461e68" dependencies = [ "bytes", - "prost", + "prost 0.10.4", +] + +[[package]] +name = "prost-types" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "747761bc3dc48f9a34553bf65605cf6cb6288ba219f3450b4275dbd81539551a" +dependencies = [ + "bytes", + "prost 0.11.2", ] [[package]] @@ -3423,6 +3532,32 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" +[[package]] +name = "storage_broker" +version = "0.1.0" +dependencies = [ + "async-stream", + "bytes", + "clap 4.0.15", + "futures", + "futures-core", + "futures-util", + "git-version", + "humantime", + "hyper", + "metrics", + "once_cell", + "parking_lot 0.12.1", + "prost 0.11.2", + "tokio", + "tokio-stream", + "tonic 0.8.2", + "tonic-build 0.8.2", + "tracing", + "utils", + "workspace_hack", +] + [[package]] name = "str_stack" version = "0.1.0" @@ -3823,8 +3958,40 @@ dependencies = [ "hyper-timeout", "percent-encoding", "pin-project", - "prost", - "prost-derive", + "prost 0.10.4", + "prost-derive 0.10.1", + "tokio", + "tokio-stream", + "tokio-util", + "tower", + "tower-layer", + "tower-service", + "tracing", + "tracing-futures", +] + +[[package]] +name = "tonic" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "55b9af819e54b8f33d453655bef9b9acc171568fb49523078d0cc4e7484200ec" +dependencies = [ + "async-stream", + "async-trait", + "axum", + "base64", + "bytes", + "futures-core", + "futures-util", + "h2", + "http", + "http-body", + "hyper", + "hyper-timeout", + "percent-encoding", + "pin-project", + "prost 0.11.2", + "prost-derive 0.11.2", "tokio", "tokio-stream", "tokio-util", @@ -3843,7 +4010,20 @@ checksum = "d9263bf4c9bfaae7317c1c2faf7f18491d2fe476f70c414b73bf5d445b00ffa1" dependencies = [ "prettyplease", "proc-macro2", - "prost-build", + "prost-build 0.10.4", + "quote", + "syn", +] + +[[package]] +name = "tonic-build" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48c6fd7c2581e36d63388a9e04c350c21beb7a8b059580b2e93993c526899ddc" +dependencies = [ + "prettyplease", + "proc-macro2", + "prost-build 0.11.2", "quote", "syn", ] @@ -4402,7 +4582,8 @@ dependencies = [ "num-bigint", "num-integer", "num-traits", - "prost", + "prost 0.10.4", + "prost 0.11.2", "rand", "regex", "regex-syntax", diff --git a/Cargo.toml b/Cargo.toml index 0d73710bbb..2f73215d3f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -16,6 +16,7 @@ members = [ "pageserver", "proxy", "safekeeper", + "storage_broker", "workspace_hack", "libs/*", ] diff --git a/Dockerfile b/Dockerfile index b0d934d480..f0244fa8d3 100644 --- a/Dockerfile +++ b/Dockerfile @@ -44,7 +44,7 @@ COPY . . # Show build caching stats to check if it was used in the end. # Has to be the part of the same RUN since cachepot daemon is killed in the end of this RUN, losing the compilation stats. RUN set -e \ -&& mold -run cargo build --bin pageserver --bin pageserver_binutils --bin draw_timeline_dir --bin safekeeper --bin proxy --locked --release \ +&& mold -run cargo build --bin pageserver --bin pageserver_binutils --bin draw_timeline_dir --bin safekeeper --bin storage_broker --bin proxy --locked --release \ && cachepot -s # Build final image @@ -67,6 +67,7 @@ COPY --from=build --chown=neon:neon /home/nonroot/target/release/pageserver COPY --from=build --chown=neon:neon /home/nonroot/target/release/pageserver_binutils /usr/local/bin COPY --from=build --chown=neon:neon /home/nonroot/target/release/draw_timeline_dir /usr/local/bin COPY --from=build --chown=neon:neon /home/nonroot/target/release/safekeeper /usr/local/bin +COPY --from=build --chown=neon:neon /home/nonroot/target/release/storage_broker /usr/local/bin COPY --from=build --chown=neon:neon /home/nonroot/target/release/proxy /usr/local/bin COPY --from=pg-build /home/nonroot/pg_install/v14 /usr/local/v14/ diff --git a/Dockerfile.compute-node-v14 b/Dockerfile.compute-node-v14 index 27e15593ad..ad036338a0 100644 --- a/Dockerfile.compute-node-v14 +++ b/Dockerfile.compute-node-v14 @@ -200,9 +200,6 @@ COPY --from=compute-tools --chown=postgres /home/nonroot/target/release-line-deb # libreadline8 for psql # libossp-uuid16 for extension ossp-uuid # libgeos, libgdal, libproj and libprotobuf-c1 for PostGIS -# -# Lastly, link compute_ctl into zenith_ctl while we're at it, -# so that we don't need to put this in another layer. RUN apt update && \ apt install --no-install-recommends -y \ libreadline8 \ @@ -211,8 +208,7 @@ RUN apt update && \ libgdal28 \ libproj19 \ libprotobuf-c1 && \ - rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* && \ - ln /usr/local/bin/compute_ctl /usr/local/bin/zenith_ctl + rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* USER postgres ENTRYPOINT ["/usr/local/bin/compute_ctl"] diff --git a/Dockerfile.compute-node-v15 b/Dockerfile.compute-node-v15 index 567848ffd7..4526644421 100644 --- a/Dockerfile.compute-node-v15 +++ b/Dockerfile.compute-node-v15 @@ -200,9 +200,6 @@ COPY --from=compute-tools --chown=postgres /home/nonroot/target/release-line-deb # libreadline8 for psql # libossp-uuid16 for extension ossp-uuid # libgeos, libgdal, libproj and libprotobuf-c1 for PostGIS -# -# Lastly, link compute_ctl into zenith_ctl while we're at it, -# so that we don't need to put this in another layer. RUN apt update && \ apt install --no-install-recommends -y \ libreadline8 \ @@ -211,8 +208,7 @@ RUN apt update && \ libgdal28 \ libproj19 \ libprotobuf-c1 && \ - rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* && \ - ln /usr/local/bin/compute_ctl /usr/local/bin/zenith_ctl + rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* USER postgres ENTRYPOINT ["/usr/local/bin/compute_ctl"] diff --git a/README.md b/README.md index 770c24d11f..cda36008d8 100644 --- a/README.md +++ b/README.md @@ -35,12 +35,12 @@ Pageserver consists of: * On Ubuntu or Debian, this set of packages should be sufficient to build the code: ```bash apt install build-essential libtool libreadline-dev zlib1g-dev flex bison libseccomp-dev \ -libssl-dev clang pkg-config libpq-dev etcd cmake postgresql-client +libssl-dev clang pkg-config libpq-dev etcd cmake postgresql-client protobuf-compiler ``` * On Fedora, these packages are needed: ```bash dnf install flex bison readline-devel zlib-devel openssl-devel \ - libseccomp-devel perl clang cmake etcd postgresql postgresql-contrib + libseccomp-devel perl clang cmake etcd postgresql postgresql-contrib protobuf-compiler ``` 2. [Install Rust](https://www.rust-lang.org/tools/install) diff --git a/cli-v2-story.md b/cli-v2-story.md deleted file mode 100644 index 1f213c903b..0000000000 --- a/cli-v2-story.md +++ /dev/null @@ -1,188 +0,0 @@ -Create a new Zenith repository in the current directory: - - ~/git-sandbox/zenith (cli-v2)$ ./target/debug/cli init - The files belonging to this database system will be owned by user "heikki". - This user must also own the server process. - - The database cluster will be initialized with locale "en_GB.UTF-8". - The default database encoding has accordingly been set to "UTF8". - The default text search configuration will be set to "english". - - Data page checksums are disabled. - - creating directory tmp ... ok - creating subdirectories ... ok - selecting dynamic shared memory implementation ... posix - selecting default max_connections ... 100 - selecting default shared_buffers ... 128MB - selecting default time zone ... Europe/Helsinki - creating configuration files ... ok - running bootstrap script ... ok - performing post-bootstrap initialization ... ok - syncing data to disk ... ok - - initdb: warning: enabling "trust" authentication for local connections - You can change this by editing pg_hba.conf or using the option -A, or - --auth-local and --auth-host, the next time you run initdb. - new zenith repository was created in .zenith - -Initially, there is only one branch: - - ~/git-sandbox/zenith (cli-v2)$ ./target/debug/cli branch - main - -Start a local Postgres instance on the branch: - - ~/git-sandbox/zenith (cli-v2)$ ./target/debug/cli start main - Creating data directory from snapshot at 0/15FFB08... - waiting for server to start....2021-04-13 09:27:43.919 EEST [984664] LOG: starting PostgreSQL 14devel on x86_64-pc-linux-gnu, compiled by gcc (Debian 10.2.1-6) 10.2.1 20210110, 64-bit - 2021-04-13 09:27:43.920 EEST [984664] LOG: listening on IPv6 address "::1", port 5432 - 2021-04-13 09:27:43.920 EEST [984664] LOG: listening on IPv4 address "127.0.0.1", port 5432 - 2021-04-13 09:27:43.927 EEST [984664] LOG: listening on Unix socket "/tmp/.s.PGSQL.5432" - 2021-04-13 09:27:43.939 EEST [984665] LOG: database system was interrupted; last known up at 2021-04-13 09:27:33 EEST - 2021-04-13 09:27:43.939 EEST [984665] LOG: creating missing WAL directory "pg_wal/archive_status" - 2021-04-13 09:27:44.189 EEST [984665] LOG: database system was not properly shut down; automatic recovery in progress - 2021-04-13 09:27:44.195 EEST [984665] LOG: invalid record length at 0/15FFB80: wanted 24, got 0 - 2021-04-13 09:27:44.195 EEST [984665] LOG: redo is not required - 2021-04-13 09:27:44.225 EEST [984664] LOG: database system is ready to accept connections - done - server started - -Run some commands against it: - - ~/git-sandbox/zenith (cli-v2)$ psql postgres -c "create table foo (t text);" - CREATE TABLE - ~/git-sandbox/zenith (cli-v2)$ psql postgres -c "insert into foo values ('inserted on the main branch');" - INSERT 0 1 - ~/git-sandbox/zenith (cli-v2)$ psql postgres -c "select * from foo" - t - ----------------------------- - inserted on the main branch - (1 row) - -Create a new branch called 'experimental'. We create it from the -current end of the 'main' branch, but you could specify a different -LSN as the start point instead. - - ~/git-sandbox/zenith (cli-v2)$ ./target/debug/cli branch experimental main - branching at end of WAL: 0/161F478 - - ~/git-sandbox/zenith (cli-v2)$ ./target/debug/cli branch - experimental - main - -Start another Postgres instance off the 'experimental' branch: - - ~/git-sandbox/zenith (cli-v2)$ ./target/debug/cli start experimental -- -o -p5433 - Creating data directory from snapshot at 0/15FFB08... - waiting for server to start....2021-04-13 09:28:41.874 EEST [984766] LOG: starting PostgreSQL 14devel on x86_64-pc-linux-gnu, compiled by gcc (Debian 10.2.1-6) 10.2.1 20210110, 64-bit - 2021-04-13 09:28:41.875 EEST [984766] LOG: listening on IPv6 address "::1", port 5433 - 2021-04-13 09:28:41.875 EEST [984766] LOG: listening on IPv4 address "127.0.0.1", port 5433 - 2021-04-13 09:28:41.883 EEST [984766] LOG: listening on Unix socket "/tmp/.s.PGSQL.5433" - 2021-04-13 09:28:41.896 EEST [984767] LOG: database system was interrupted; last known up at 2021-04-13 09:27:33 EEST - 2021-04-13 09:28:42.265 EEST [984767] LOG: database system was not properly shut down; automatic recovery in progress - 2021-04-13 09:28:42.269 EEST [984767] LOG: redo starts at 0/15FFB80 - 2021-04-13 09:28:42.272 EEST [984767] LOG: invalid record length at 0/161F4B0: wanted 24, got 0 - 2021-04-13 09:28:42.272 EEST [984767] LOG: redo done at 0/161F478 system usage: CPU: user: 0.00 s, system: 0.00 s, elapsed: 0.00 s - 2021-04-13 09:28:42.321 EEST [984766] LOG: database system is ready to accept connections - done - server started - -Insert some a row on the 'experimental' branch: - - ~/git-sandbox/zenith (cli-v2)$ psql postgres -p5433 -c "select * from foo" - t - ----------------------------- - inserted on the main branch - (1 row) - - ~/git-sandbox/zenith (cli-v2)$ psql postgres -p5433 -c "insert into foo values ('inserted on experimental')" - INSERT 0 1 - ~/git-sandbox/zenith (cli-v2)$ psql postgres -p5433 -c "select * from foo" - t - ----------------------------- - inserted on the main branch - inserted on experimental - (2 rows) - -See that the other Postgres instance is still running on 'main' branch on port 5432: - - - ~/git-sandbox/zenith (cli-v2)$ psql postgres -p5432 -c "select * from foo" - t - ----------------------------- - inserted on the main branch - (1 row) - - - - -Everything is stored in the .zenith directory: - - ~/git-sandbox/zenith (cli-v2)$ ls -l .zenith/ - total 12 - drwxr-xr-x 4 heikki heikki 4096 Apr 13 09:28 datadirs - drwxr-xr-x 4 heikki heikki 4096 Apr 13 09:27 refs - drwxr-xr-x 4 heikki heikki 4096 Apr 13 09:28 timelines - -The 'datadirs' directory contains the datadirs of the running instances: - - ~/git-sandbox/zenith (cli-v2)$ ls -l .zenith/datadirs/ - total 8 - drwx------ 18 heikki heikki 4096 Apr 13 09:27 3c0c634c1674079b2c6d4edf7c91523e - drwx------ 18 heikki heikki 4096 Apr 13 09:28 697e3c103d4b1763cd6e82e4ff361d76 - ~/git-sandbox/zenith (cli-v2)$ ls -l .zenith/datadirs/3c0c634c1674079b2c6d4edf7c91523e/ - total 124 - drwxr-xr-x 5 heikki heikki 4096 Apr 13 09:27 base - drwxr-xr-x 2 heikki heikki 4096 Apr 13 09:27 global - drwxr-xr-x 2 heikki heikki 4096 Apr 13 09:27 pg_commit_ts - drwxr-xr-x 2 heikki heikki 4096 Apr 13 09:27 pg_dynshmem - -rw------- 1 heikki heikki 4760 Apr 13 09:27 pg_hba.conf - -rw------- 1 heikki heikki 1636 Apr 13 09:27 pg_ident.conf - drwxr-xr-x 4 heikki heikki 4096 Apr 13 09:32 pg_logical - drwxr-xr-x 4 heikki heikki 4096 Apr 13 09:27 pg_multixact - drwxr-xr-x 2 heikki heikki 4096 Apr 13 09:27 pg_notify - drwxr-xr-x 2 heikki heikki 4096 Apr 13 09:27 pg_replslot - drwxr-xr-x 2 heikki heikki 4096 Apr 13 09:27 pg_serial - drwxr-xr-x 2 heikki heikki 4096 Apr 13 09:27 pg_snapshots - drwxr-xr-x 2 heikki heikki 4096 Apr 13 09:27 pg_stat - drwxr-xr-x 2 heikki heikki 4096 Apr 13 09:34 pg_stat_tmp - drwxr-xr-x 2 heikki heikki 4096 Apr 13 09:27 pg_subtrans - drwxr-xr-x 2 heikki heikki 4096 Apr 13 09:27 pg_tblspc - drwxr-xr-x 2 heikki heikki 4096 Apr 13 09:27 pg_twophase - -rw------- 1 heikki heikki 3 Apr 13 09:27 PG_VERSION - lrwxrwxrwx 1 heikki heikki 52 Apr 13 09:27 pg_wal -> ../../timelines/3c0c634c1674079b2c6d4edf7c91523e/wal - drwxr-xr-x 2 heikki heikki 4096 Apr 13 09:27 pg_xact - -rw------- 1 heikki heikki 88 Apr 13 09:27 postgresql.auto.conf - -rw------- 1 heikki heikki 28688 Apr 13 09:27 postgresql.conf - -rw------- 1 heikki heikki 96 Apr 13 09:27 postmaster.opts - -rw------- 1 heikki heikki 149 Apr 13 09:27 postmaster.pid - -Note how 'pg_wal' is just a symlink to the 'timelines' directory. The -datadir is ephemeral, you can delete it at any time, and it can be reconstructed -from the snapshots and WAL stored in the 'timelines' directory. So if you push/pull -the repository, the 'datadirs' are not included. (They are like git working trees) - - ~/git-sandbox/zenith (cli-v2)$ killall -9 postgres - ~/git-sandbox/zenith (cli-v2)$ rm -rf .zenith/datadirs/* - ~/git-sandbox/zenith (cli-v2)$ ./target/debug/cli start experimental -- -o -p5433 - Creating data directory from snapshot at 0/15FFB08... - waiting for server to start....2021-04-13 09:37:05.476 EEST [985340] LOG: starting PostgreSQL 14devel on x86_64-pc-linux-gnu, compiled by gcc (Debian 10.2.1-6) 10.2.1 20210110, 64-bit - 2021-04-13 09:37:05.477 EEST [985340] LOG: listening on IPv6 address "::1", port 5433 - 2021-04-13 09:37:05.477 EEST [985340] LOG: listening on IPv4 address "127.0.0.1", port 5433 - 2021-04-13 09:37:05.487 EEST [985340] LOG: listening on Unix socket "/tmp/.s.PGSQL.5433" - 2021-04-13 09:37:05.498 EEST [985341] LOG: database system was interrupted; last known up at 2021-04-13 09:27:33 EEST - 2021-04-13 09:37:05.808 EEST [985341] LOG: database system was not properly shut down; automatic recovery in progress - 2021-04-13 09:37:05.813 EEST [985341] LOG: redo starts at 0/15FFB80 - 2021-04-13 09:37:05.815 EEST [985341] LOG: invalid record length at 0/161F770: wanted 24, got 0 - 2021-04-13 09:37:05.815 EEST [985341] LOG: redo done at 0/161F738 system usage: CPU: user: 0.00 s, system: 0.00 s, elapsed: 0.00 s - 2021-04-13 09:37:05.866 EEST [985340] LOG: database system is ready to accept connections - done - server started - ~/git-sandbox/zenith (cli-v2)$ psql postgres -p5433 -c "select * from foo" - t - ----------------------------- - inserted on the main branch - inserted on experimental - (2 rows) - diff --git a/control_plane/Cargo.toml b/control_plane/Cargo.toml index a9d30b4a86..2ab48fa76c 100644 --- a/control_plane/Cargo.toml +++ b/control_plane/Cargo.toml @@ -23,6 +23,7 @@ url = "2.2.2" # Note: Do not directly depend on pageserver or safekeeper; use pageserver_api or safekeeper_api # instead, so that recompile times are better. pageserver_api = { path = "../libs/pageserver_api" } +postgres_connection = { path = "../libs/postgres_connection" } safekeeper_api = { path = "../libs/safekeeper_api" } utils = { path = "../libs/utils" } workspace_hack = { version = "0.1", path = "../workspace_hack" } diff --git a/control_plane/src/background_process.rs b/control_plane/src/background_process.rs index 860b5344ba..3840e3cd08 100644 --- a/control_plane/src/background_process.rs +++ b/control_plane/src/background_process.rs @@ -49,11 +49,16 @@ pub enum InitialPidFile<'t> { } /// Start a background child process using the parameters given. -pub fn start_process>( +pub fn start_process< + F, + S: AsRef, + EI: IntoIterator, // Not generic AsRef, otherwise empty `envs` prevents type inference +>( process_name: &str, datadir: &Path, command: &Path, args: &[S], + envs: EI, initial_pid_file: InitialPidFile, process_status_check: F, ) -> anyhow::Result @@ -79,6 +84,7 @@ where .stderr(same_file_for_stderr) .args(args); let filled_cmd = fill_aws_secrets_vars(fill_rust_env_vars(background_command)); + filled_cmd.envs(envs); let mut spawned_process = filled_cmd.spawn().with_context(|| { format!("Could not spawn {process_name}, see console output and log files for details.") diff --git a/control_plane/src/compute.rs b/control_plane/src/compute.rs index 359948a8c9..0eec25c51e 100644 --- a/control_plane/src/compute.rs +++ b/control_plane/src/compute.rs @@ -322,6 +322,9 @@ impl PostgresNode { conf.append("shared_preload_libraries", "neon"); conf.append_line(""); conf.append("neon.pageserver_connstring", &pageserver_connstr); + if let AuthType::NeonJWT = auth_type { + conf.append("neon.safekeeper_token_env", "$ZENITH_AUTH_TOKEN"); + } conf.append("neon.tenant_id", &self.tenant_id.to_string()); conf.append("neon.timeline_id", &self.timeline_id.to_string()); if let Some(lsn) = self.lsn { @@ -343,7 +346,7 @@ impl PostgresNode { // To be able to restore database in case of pageserver node crash, safekeeper should not // remove WAL beyond this point. Too large lag can cause space exhaustion in safekeepers // (if they are not able to upload WAL to S3). - conf.append("max_replication_write_lag", "500MB"); + conf.append("max_replication_write_lag", "15MB"); conf.append("max_replication_flush_lag", "10GB"); if !self.env.safekeepers.is_empty() { diff --git a/control_plane/src/connection.rs b/control_plane/src/connection.rs deleted file mode 100644 index cca837de6e..0000000000 --- a/control_plane/src/connection.rs +++ /dev/null @@ -1,57 +0,0 @@ -use url::Url; - -#[derive(Debug)] -pub struct PgConnectionConfig { - url: Url, -} - -impl PgConnectionConfig { - pub fn host(&self) -> &str { - self.url.host_str().expect("BUG: no host") - } - - pub fn port(&self) -> u16 { - self.url.port().expect("BUG: no port") - } - - /// Return a `:` string. - pub fn raw_address(&self) -> String { - format!("{}:{}", self.host(), self.port()) - } - - /// Connect using postgres protocol with TLS disabled. - pub fn connect_no_tls(&self) -> Result { - postgres::Client::connect(self.url.as_str(), postgres::NoTls) - } -} - -impl std::str::FromStr for PgConnectionConfig { - type Err = anyhow::Error; - - fn from_str(s: &str) -> Result { - let mut url: Url = s.parse()?; - - match url.scheme() { - "postgres" | "postgresql" => {} - other => anyhow::bail!("invalid scheme: {other}"), - } - - // It's not a valid connection url if host is unavailable. - if url.host().is_none() { - anyhow::bail!(url::ParseError::EmptyHost); - } - - // E.g. `postgres:bar`. - if url.cannot_be_a_base() { - anyhow::bail!("URL cannot be a base"); - } - - // Set the default PG port if it's missing. - if url.port().is_none() { - url.set_port(Some(5432)) - .expect("BUG: couldn't set the default port"); - } - - Ok(Self { url }) - } -} diff --git a/control_plane/src/etcd.rs b/control_plane/src/etcd.rs index 60aa5da780..031ffa539b 100644 --- a/control_plane/src/etcd.rs +++ b/control_plane/src/etcd.rs @@ -39,6 +39,7 @@ pub fn start_etcd_process(env: &local_env::LocalEnv) -> anyhow::Result<()> { &etcd_data_dir, &etcd_broker.etcd_binary_path, &args, + [], background_process::InitialPidFile::Create(&pid_file_path), || { for broker_endpoint in &etcd_broker.broker_endpoints { diff --git a/control_plane/src/lib.rs b/control_plane/src/lib.rs index c3b47fe81b..7c1007b133 100644 --- a/control_plane/src/lib.rs +++ b/control_plane/src/lib.rs @@ -9,7 +9,6 @@ mod background_process; pub mod compute; -pub mod connection; pub mod etcd; pub mod local_env; pub mod pageserver; diff --git a/control_plane/src/pageserver.rs b/control_plane/src/pageserver.rs index aec6f5bc2c..ef128109eb 100644 --- a/control_plane/src/pageserver.rs +++ b/control_plane/src/pageserver.rs @@ -6,14 +6,15 @@ use std::path::{Path, PathBuf}; use std::process::Child; use std::{io, result}; -use crate::connection::PgConnectionConfig; use anyhow::{bail, Context}; use pageserver_api::models::{ TenantConfigRequest, TenantCreateRequest, TenantInfo, TimelineCreateRequest, TimelineInfo, }; +use postgres_connection::{parse_host_port, PgConnectionConfig}; use reqwest::blocking::{Client, RequestBuilder, Response}; use reqwest::{IntoUrl, Method}; use thiserror::Error; +use utils::auth::{Claims, Scope}; use utils::{ http::error::HttpErrorBody, id::{TenantId, TimelineId}, @@ -77,30 +78,24 @@ pub struct PageServerNode { impl PageServerNode { pub fn from_env(env: &LocalEnv) -> PageServerNode { + let (host, port) = parse_host_port(&env.pageserver.listen_pg_addr) + .expect("Unable to parse listen_pg_addr"); + let port = port.unwrap_or(5432); let password = if env.pageserver.auth_type == AuthType::NeonJWT { - &env.pageserver.auth_token + Some(env.pageserver.auth_token.clone()) } else { - "" + None }; Self { - pg_connection_config: Self::pageserver_connection_config( - password, - &env.pageserver.listen_pg_addr, - ), + pg_connection_config: PgConnectionConfig::new_host_port(host, port) + .set_password(password), env: env.clone(), http_client: Client::new(), http_base_url: format!("http://{}/v1", env.pageserver.listen_http_addr), } } - /// Construct libpq connection string for connecting to the pageserver. - fn pageserver_connection_config(password: &str, listen_addr: &str) -> PgConnectionConfig { - format!("postgresql://no_user:{password}@{listen_addr}/no_db") - .parse() - .unwrap() - } - pub fn initialize( &self, create_tenant: Option, @@ -259,11 +254,21 @@ impl PageServerNode { args.extend(["-c", config_override]); } + let envs = if self.env.pageserver.auth_type != AuthType::Trust { + // Generate a token to connect from the pageserver to a safekeeper + let token = self + .env + .generate_auth_token(&Claims::new(None, Scope::SafekeeperData))?; + vec![("ZENITH_AUTH_TOKEN".to_owned(), token)] + } else { + vec![] + }; background_process::start_process( "pageserver", datadir, &self.env.pageserver_bin(), &args, + envs, background_process::InitialPidFile::Expect(&self.pid_file()), || match self.check_status() { Ok(()) => Ok(true), diff --git a/control_plane/src/safekeeper.rs b/control_plane/src/safekeeper.rs index 0bc35b3680..583d9709d0 100644 --- a/control_plane/src/safekeeper.rs +++ b/control_plane/src/safekeeper.rs @@ -5,12 +5,12 @@ use std::sync::Arc; use std::{io, result}; use anyhow::Context; +use postgres_connection::PgConnectionConfig; use reqwest::blocking::{Client, RequestBuilder, Response}; use reqwest::{IntoUrl, Method}; use thiserror::Error; use utils::{http::error::HttpErrorBody, id::NodeId}; -use crate::connection::PgConnectionConfig; use crate::pageserver::PageServerNode; use crate::{ background_process, @@ -86,10 +86,7 @@ impl SafekeeperNode { /// Construct libpq connection string for connecting to this safekeeper. fn safekeeper_connection_config(port: u16) -> PgConnectionConfig { - // TODO safekeeper authentication not implemented yet - format!("postgresql://no_user@127.0.0.1:{port}/no_db") - .parse() - .unwrap() + PgConnectionConfig::new_host_port(url::Host::parse("127.0.0.1").unwrap(), port) } pub fn datadir_path_by_id(env: &LocalEnv, sk_id: NodeId) -> PathBuf { @@ -169,6 +166,7 @@ impl SafekeeperNode { &datadir, &self.env.safekeeper_bin(), &args, + [], background_process::InitialPidFile::Expect(&self.pid_file()), || match self.check_status() { Ok(()) => Ok(true), diff --git a/docs/authentication.md b/docs/authentication.md index 9748a7ab0d..c5c4f02833 100644 --- a/docs/authentication.md +++ b/docs/authentication.md @@ -1,30 +1,154 @@ ## Authentication ### Overview +We use JWT tokens in communication between almost all components (compute, pageserver, safekeeper, CLI) regardless of the protocol used (HTTP/PostgreSQL). +Etcd currently has no authentication. +Authentication is optional and is disabled by default for easier debugging. +It is used in some tests, though. +Note that we do not cover authentication with `pg.neon.tech` here. -Current state of authentication includes usage of JWT tokens in communication between compute and pageserver and between CLI and pageserver. JWT token is signed using RSA keys. CLI generates a key pair during call to `neon_local init`. Using following openssl commands: +For HTTP connections we use the Bearer authentication scheme. +For PostgreSQL connections we expect the token to be passed as a password. +There is a caveat for `psql`: it silently truncates passwords to 100 symbols, so to correctly pass JWT via `psql` you have to either use `PGPASSWORD` environment variable, or store password in `psql`'s config file. + +Current token scopes are described in `utils::auth::Scope`. +There are no expiration or rotation schemes. + +_TODO_: some scopes allow both access to server management API and to the data. +These probably should be split into multiple scopes. + +Tokens should not occur in logs. +They may sometimes occur in configuration files, although this is discouraged +because configs may be parsed and dumped into logs. + +#### Tokens generation and validation +JWT tokens are signed using a private key. +Compute/pageserver/safekeeper use the private key's public counterpart to validate JWT tokens. +These components should not have access to the private key and may only get tokens from their configuration or external clients. + +The key pair is generated once for an installation of compute/pageserver/safekeeper, e.g. by `neon_local init`. +There is currently no way to rotate the key without bringing down all components. + +### CLI +CLI generates a key pair during call to `neon_local init` with the following commands: ```bash -openssl genrsa -out private_key.pem 2048 -openssl rsa -in private_key.pem -pubout -outform PEM -out public_key.pem +openssl genrsa -out auth_private_key.pem 2048 +openssl rsa -in auth_private_key.pem -pubout -outform PEM -out auth_public_key.pem ``` -CLI also generates signed token and saves it in the config for later access to pageserver. Now authentication is optional. Pageserver has two variables in config: `auth_validation_public_key_path` and `auth_type`, so when auth type present and set to `NeonJWT` pageserver will require authentication for connections. Actual JWT is passed in password field of connection string. There is a caveat for psql, it silently truncates passwords to 100 symbols, so to correctly pass JWT via psql you have to either use PGPASSWORD environment variable, or store password in psql config file. +Configuration files for all components point to `public_key.pem` for JWT validation. +However, authentication is disabled by default. +There is no way to automatically enable it everywhere, you have to configure each component individually. -Currently there is no authentication between compute and safekeepers, because this communication layer is under heavy refactoring. After this refactoring support for authentication will be added there too. Now safekeeper supports "hardcoded" token passed via environment variable to be able to use callmemaybe command in pageserver. +CLI also generates signed token (full access to Pageserver) and saves it in +the CLI's `config` file under `pageserver.auth_token`. +Note that pageserver's config does not have any similar parameter. +CLI is the only component which accesses that token. +Technically it could generate it from the private key on each run, +but it does not do that for some reason (_TODO_). -Compute uses token passed via environment variable to communicate to pageserver and in the future to the safekeeper too. +### Compute +#### Overview +Compute is a per-timeline PostgreSQL instance, so it should not have +any access to data of other tenants. +All tokens used by a compute are restricted to a specific tenant. +There is no auth isolation from other timelines of the same tenant, +but a non-rogue client never accesses another timeline even by an accident: +timeline IDs are random and hard to guess. -JWT authentication now supports two scopes: tenant and pageserverapi. Tenant scope is intended for use in tenant related api calls, e.g. create_branch. Compute launched for particular tenant also uses this scope. Scope pageserver api is intended to be used by console to manage pageserver. For now we have only one management operation - create tenant. +#### Incoming connections +All incoming connections are from PostgreSQL clients. +Their authentication is just plain PostgreSQL authentication and out of scope for this document. -Examples for token generation in python: +There is no administrative API except those provided by PostgreSQL. + +#### Outgoing connections +Compute connects to Pageserver for getting pages. +The connection string is configured by the `neon.pageserver_connstring` PostgreSQL GUC, e.g. `postgresql://no_user:$ZENITH_AUTH_TOKEN@localhost:15028`. +The environment variable inside the connection string is substituted with +the JWT token. + +Compute connects to Safekeepers to write and commit data. +The token is the same for all safekeepers. +It's stored in an environment variable, whose name is configured +by the `neon.safekeeper_token_env` PostgreSQL GUC. +If the GUC is unset, no token is passed. + +Note that both tokens can be (and typically are) the same; +the scope is the tenant and the token is usually passed through the +`$ZENITH_AUTH_TOKEN` environment variable. + +### Pageserver +#### Overview +Pageserver keeps track of multiple tenants, each having multiple timelines. +For each timeline, it connects to the corresponding Safekeeper. +Information about "corresponding Safekeeper" is published by Safekeepers +in the Etcd, but they do not publish access tokens, otherwise what is +the point of authentication. + +Pageserver keeps a connection to some set of Safekeepers, which +may or may not correspond to active Computes. +Hence, we cannot obtain a per-timeline access token from a Compute. +E.g. if the timeline's Compute terminates before all WAL is +consumed by the Pageserver, the Pageserver continues consuming WAL. + +Pageserver replicas' authentication is the same as the main's. + +#### Incoming connections +Pageserver listens for connections from computes. +Each compute should present a token valid for the timeline's tenant. + +Pageserver also has HTTP API: some parts are per-tenant, +some parts are server-wide, these are different scopes. + +The `auth_type` configuration variable in Pageserver's config may have +either of three values: + +* `Trust` removes all authentication. The outdated `MD5` value does likewise +* `NeonJWT` enables JWT validation. + Tokens are validated using the public key which lies in a PEM file + specified in the `auth_validation_public_key_path` config. + +#### Outgoing connections +Pageserver makes a connection to a Safekeeper for each active timeline. +As Pageserver may want to access any timeline it has on the disk, +it is given a blanket JWT token to access any data on any Safekeeper. +This token is passed through an environment variable called `ZENITH_AUTH_TOKEN` +(non-configurable as of writing this text). + +A better way _may be_ to store JWT token for each timeline next to it, +but may be not. + +### Safekeeper +#### Overview +Safekeeper keeps track of multiple tenants, each having multiple timelines. + +#### Incoming connections +Safekeeper accepts connections from Compute/Pageserver, each +connection corresponds to a specific timeline and requires +a corresponding JWT token. + +Safekeeper also has HTTP API: some parts are per-tenant, +some parts are server-wide, these are different scopes. + +The `auth-validation-public-key-path` command line options controls +the authentication mode: + +* If the option is missing, there is no authentication or JWT token validation. +* If the option is present, it should be a path to the public key PEM file used for JWT token validation. + +#### Outgoing connections +No connections are initiated by a Safekeeper. + +### In the source code +Tests do not use authentication by default. +If you need it, you can enable it by configuring the test's environment: ```python -# generate pageserverapi token -management_token = jwt.encode({"scope": "pageserverapi"}, auth_keys.priv, algorithm="RS256") - -# generate tenant token -tenant_token = jwt.encode({"scope": "tenant", "tenant_id": ps.initial_tenant}, auth_keys.priv, algorithm="RS256") +neon_env_builder.auth_enabled = True ``` -Utility functions to work with jwts in rust are located in libs/utils/src/auth.rs +You will have to generate tokens if you want to access components inside the test directly, +use `AuthKeys.generate_*_token` methods for that. +If you create a new scope, please create a new method to prevent mistypes in scope's name. diff --git a/docs/sourcetree.md b/docs/sourcetree.md index 4ea83dd068..309f5a6966 100644 --- a/docs/sourcetree.md +++ b/docs/sourcetree.md @@ -2,6 +2,11 @@ Below you will find a brief overview of each subdir in the source tree in alphabetical order. +`storage_broker`: + +Neon storage broker, providing messaging between safekeepers and pageservers. +[storage_broker.md](./storage_broker.md) + `/control_plane`: Local control plane. diff --git a/docs/storage_broker.md b/docs/storage_broker.md new file mode 100644 index 0000000000..827fe5c5f2 --- /dev/null +++ b/docs/storage_broker.md @@ -0,0 +1,27 @@ +# Storage broker + +Storage broker targets two issues: +- Allowing safekeepers and pageservers learn which nodes also hold their + timelines, and timeline statuses there. +- Avoiding O(n^2) connections between storage nodes while doing so. + +This is used +- By pageservers to determine the most advanced and alive safekeeper to pull WAL from. +- By safekeepers to synchronize on the timeline: advance + `remote_consistent_lsn`, `backup_lsn`, choose who offloads WAL to s3. + +Technically, it is a simple stateless pub-sub message broker based on tonic +(grpc) making multiplexing easy. Since it is stateless, fault tolerance can be +provided by k8s; there is no built in replication support, though it is not hard +to add. + +Currently, the only message is `SafekeeperTimelineInfo`. Each safekeeper, for +each active timeline, once in a while pushes timeline status to the broker. +Other nodes subscribe and receive this info, using it per above. + +Broker serves /metrics on the same port as grpc service. + +grpcurl can be used to check which values are currently being pushed: +``` +grpcurl -proto broker/proto/broker.proto -d '{"all":{}}' -plaintext localhost:50051 storage_broker.BrokerService/SubscribeSafekeeperInfo +``` diff --git a/libs/pageserver_api/src/models.rs b/libs/pageserver_api/src/models.rs index 750585b58b..9c94b53318 100644 --- a/libs/pageserver_api/src/models.rs +++ b/libs/pageserver_api/src/models.rs @@ -21,11 +21,11 @@ pub enum TenantState { Attaching, /// Tenant is fully operational Active, - /// A tenant is recognized by pageserver, but it is being detached or the system is being - /// shut down. + /// A tenant is recognized by pageserver, but it is being detached or the + /// system is being shut down. Paused, - /// A tenant is recognized by the pageserver, but can no longer used for any operations, - /// because it failed to get activated. + /// A tenant is recognized by the pageserver, but can no longer be used for + /// any operations, because it failed to be activated. Broken, } @@ -54,7 +54,8 @@ pub enum TimelineState { /// A timeline is recognized by pageserver, but not yet ready to operate and not allowed to /// automatically become Active after certain events: only a management call can change this status. Paused, - /// A timeline is recognized by the pageserver, but no longer used for any operations, as failed to get activated. + /// A timeline is recognized by the pageserver, but can no longer be used for + /// any operations, because it failed to be activated. Broken, } diff --git a/libs/postgres_connection/Cargo.toml b/libs/postgres_connection/Cargo.toml new file mode 100644 index 0000000000..314f3c6f1c --- /dev/null +++ b/libs/postgres_connection/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "postgres_connection" +version = "0.1.0" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +anyhow = "1.0" +itertools = "0.10.3" +postgres = { git = "https://github.com/neondatabase/rust-postgres.git", rev = "d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" } +tokio-postgres = { git = "https://github.com/neondatabase/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" } +url = "2.2.2" +workspace_hack = { version = "0.1", path = "../../workspace_hack" } + +[dev-dependencies] +once_cell = "1.13.0" diff --git a/libs/postgres_connection/src/lib.rs b/libs/postgres_connection/src/lib.rs new file mode 100644 index 0000000000..35344a9168 --- /dev/null +++ b/libs/postgres_connection/src/lib.rs @@ -0,0 +1,253 @@ +use anyhow::{bail, Context}; +use itertools::Itertools; +use std::borrow::Cow; +use std::fmt; +use url::Host; + +/// Parses a string of format either `host:port` or `host` into a corresponding pair. +/// The `host` part should be a correct `url::Host`, while `port` (if present) should be +/// a valid decimal u16 of digits only. +pub fn parse_host_port>(host_port: S) -> Result<(Host, Option), anyhow::Error> { + let (host, port) = match host_port.as_ref().rsplit_once(':') { + Some((host, port)) => ( + host, + // +80 is a valid u16, but not a valid port + if port.chars().all(|c| c.is_ascii_digit()) { + Some(port.parse::().context("Unable to parse port")?) + } else { + bail!("Port contains a non-ascii-digit") + }, + ), + None => (host_port.as_ref(), None), // No colons, no port specified + }; + let host = Host::parse(host).context("Unable to parse host")?; + Ok((host, port)) +} + +#[cfg(test)] +mod tests_parse_host_port { + use crate::parse_host_port; + use url::Host; + + #[test] + fn test_normal() { + let (host, port) = parse_host_port("hello:123").unwrap(); + assert_eq!(host, Host::Domain("hello".to_owned())); + assert_eq!(port, Some(123)); + } + + #[test] + fn test_no_port() { + let (host, port) = parse_host_port("hello").unwrap(); + assert_eq!(host, Host::Domain("hello".to_owned())); + assert_eq!(port, None); + } + + #[test] + fn test_ipv6() { + let (host, port) = parse_host_port("[::1]:123").unwrap(); + assert_eq!(host, Host::::Ipv6(std::net::Ipv6Addr::LOCALHOST)); + assert_eq!(port, Some(123)); + } + + #[test] + fn test_invalid_host() { + assert!(parse_host_port("hello world").is_err()); + } + + #[test] + fn test_invalid_port() { + assert!(parse_host_port("hello:+80").is_err()); + } +} + +#[derive(Clone)] +pub struct PgConnectionConfig { + host: Host, + port: u16, + password: Option, + options: Vec, +} + +/// A simplified PostgreSQL connection configuration. Supports only a subset of possible +/// settings for simplicity. A password getter or `to_connection_string` methods are not +/// added by design to avoid accidentally leaking password through logging, command line +/// arguments to a child process, or likewise. +impl PgConnectionConfig { + pub fn new_host_port(host: Host, port: u16) -> Self { + PgConnectionConfig { + host, + port, + password: None, + options: vec![], + } + } + + pub fn host(&self) -> &Host { + &self.host + } + + pub fn port(&self) -> u16 { + self.port + } + + pub fn set_host(mut self, h: Host) -> Self { + self.host = h; + self + } + + pub fn set_port(mut self, p: u16) -> Self { + self.port = p; + self + } + + pub fn set_password(mut self, s: Option) -> Self { + self.password = s; + self + } + + pub fn extend_options, S: Into>(mut self, i: I) -> Self { + self.options.extend(i.into_iter().map(|s| s.into())); + self + } + + /// Return a `:` string. + pub fn raw_address(&self) -> String { + format!("{}:{}", self.host(), self.port()) + } + + /// Build a client library-specific connection configuration. + /// Used for testing and when we need to add some obscure configuration + /// elements at the last moment. + pub fn to_tokio_postgres_config(&self) -> tokio_postgres::Config { + // Use `tokio_postgres::Config` instead of `postgres::Config` because + // the former supports more options to fiddle with later. + let mut config = tokio_postgres::Config::new(); + config.host(&self.host().to_string()).port(self.port); + if let Some(password) = &self.password { + config.password(password); + } + if !self.options.is_empty() { + // These options are command-line options and should be escaped before being passed + // as an 'options' connection string parameter, see + // https://www.postgresql.org/docs/15/libpq-connect.html#LIBPQ-CONNECT-OPTIONS + // + // They will be space-separated, so each space inside an option should be escaped, + // and all backslashes should be escaped before that. Although we don't expect options + // with spaces at the moment, they're supported by PostgreSQL. Hence we support them + // in this typesafe interface. + // + // We use `Cow` to avoid allocations in the best case (no escaping). A fully imperative + // solution would require 1-2 allocations in the worst case as well, but it's harder to + // implement and this function is hardly a bottleneck. The function is only called around + // establishing a new connection. + #[allow(unstable_name_collisions)] + config.options( + &self + .options + .iter() + .map(|s| { + if s.contains(['\\', ' ']) { + Cow::Owned(s.replace('\\', "\\\\").replace(' ', "\\ ")) + } else { + Cow::Borrowed(s.as_str()) + } + }) + .intersperse(Cow::Borrowed(" ")) // TODO: use impl from std once it's stabilized + .collect::(), + ); + } + config + } + + /// Connect using postgres protocol with TLS disabled. + pub fn connect_no_tls(&self) -> Result { + postgres::Config::from(self.to_tokio_postgres_config()).connect(postgres::NoTls) + } +} + +impl fmt::Debug for PgConnectionConfig { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + // We want `password: Some(REDACTED-STRING)`, not `password: Some("REDACTED-STRING")` + // so even if the password is `REDACTED-STRING` (quite unlikely) there is no confusion. + // Hence `format_args!()`, it returns a "safe" string which is not escaped by `Debug`. + f.debug_struct("PgConnectionConfig") + .field("host", &self.host) + .field("port", &self.port) + .field( + "password", + &self + .password + .as_ref() + .map(|_| format_args!("REDACTED-STRING")), + ) + .finish() + } +} + +#[cfg(test)] +mod tests_pg_connection_config { + use crate::PgConnectionConfig; + use once_cell::sync::Lazy; + use url::Host; + + static STUB_HOST: Lazy = Lazy::new(|| Host::Domain("stub.host.example".to_owned())); + + #[test] + fn test_no_password() { + let cfg = PgConnectionConfig::new_host_port(STUB_HOST.clone(), 123); + assert_eq!(cfg.host(), &*STUB_HOST); + assert_eq!(cfg.port(), 123); + assert_eq!(cfg.raw_address(), "stub.host.example:123"); + assert_eq!( + format!("{:?}", cfg), + "PgConnectionConfig { host: Domain(\"stub.host.example\"), port: 123, password: None }" + ); + } + + #[test] + fn test_ipv6() { + // May be a special case because hostname contains a colon. + let cfg = PgConnectionConfig::new_host_port(Host::parse("[::1]").unwrap(), 123); + assert_eq!( + cfg.host(), + &Host::::Ipv6(std::net::Ipv6Addr::LOCALHOST) + ); + assert_eq!(cfg.port(), 123); + assert_eq!(cfg.raw_address(), "[::1]:123"); + assert_eq!( + format!("{:?}", cfg), + "PgConnectionConfig { host: Ipv6(::1), port: 123, password: None }" + ); + } + + #[test] + fn test_with_password() { + let cfg = PgConnectionConfig::new_host_port(STUB_HOST.clone(), 123) + .set_password(Some("password".to_owned())); + assert_eq!(cfg.host(), &*STUB_HOST); + assert_eq!(cfg.port(), 123); + assert_eq!(cfg.raw_address(), "stub.host.example:123"); + assert_eq!( + format!("{:?}", cfg), + "PgConnectionConfig { host: Domain(\"stub.host.example\"), port: 123, password: Some(REDACTED-STRING) }" + ); + } + + #[test] + fn test_with_options() { + let cfg = PgConnectionConfig::new_host_port(STUB_HOST.clone(), 123).extend_options([ + "hello", + "world", + "with space", + "and \\ backslashes", + ]); + assert_eq!(cfg.host(), &*STUB_HOST); + assert_eq!(cfg.port(), 123); + assert_eq!(cfg.raw_address(), "stub.host.example:123"); + assert_eq!( + cfg.to_tokio_postgres_config().get_options(), + Some("hello world with\\ space and\\ \\\\\\ backslashes") + ); + } +} diff --git a/libs/postgres_ffi/src/lib.rs b/libs/postgres_ffi/src/lib.rs index f3dad159be..492ec9748a 100644 --- a/libs/postgres_ffi/src/lib.rs +++ b/libs/postgres_ffi/src/lib.rs @@ -163,6 +163,27 @@ pub fn page_set_lsn(pg: &mut [u8], lsn: Lsn) { pg[4..8].copy_from_slice(&(lsn.0 as u32).to_le_bytes()); } +// This is port of function with the same name from freespace.c. +// The only difference is that it does not have "level" parameter because XLogRecordPageWithFreeSpace +// always call it with level=FSM_BOTTOM_LEVEL +pub fn fsm_logical_to_physical(addr: BlockNumber) -> BlockNumber { + let mut leafno = addr; + const FSM_TREE_DEPTH: u32 = if pg_constants::SLOTS_PER_FSM_PAGE >= 1626 { + 3 + } else { + 4 + }; + + /* Count upper level nodes required to address the leaf page */ + let mut pages: BlockNumber = 0; + for _l in 0..FSM_TREE_DEPTH { + pages += leafno + 1; + leafno /= pg_constants::SLOTS_PER_FSM_PAGE; + } + /* Turn the page count into 0-based block number */ + pages - 1 +} + pub mod waldecoder { use crate::{v14, v15}; diff --git a/libs/postgres_ffi/src/pg_constants.rs b/libs/postgres_ffi/src/pg_constants.rs index 6aaa739a69..09678353af 100644 --- a/libs/postgres_ffi/src/pg_constants.rs +++ b/libs/postgres_ffi/src/pg_constants.rs @@ -197,6 +197,16 @@ pub const XLOG_CHECKPOINT_SHUTDOWN: u8 = 0x00; pub const XLOG_CHECKPOINT_ONLINE: u8 = 0x10; pub const XLP_LONG_HEADER: u16 = 0x0002; +/* From fsm_internals.h */ +const FSM_NODES_PER_PAGE: usize = BLCKSZ as usize - SIZEOF_PAGE_HEADER_DATA - 4; +const FSM_NON_LEAF_NODES_PER_PAGE: usize = BLCKSZ as usize / 2 - 1; +const FSM_LEAF_NODES_PER_PAGE: usize = FSM_NODES_PER_PAGE - FSM_NON_LEAF_NODES_PER_PAGE; +pub const SLOTS_PER_FSM_PAGE: u32 = FSM_LEAF_NODES_PER_PAGE as u32; + +/* From visibilitymap.c */ +pub const VM_HEAPBLOCKS_PER_PAGE: u32 = + (BLCKSZ as usize - SIZEOF_PAGE_HEADER_DATA) as u32 * (8 / 2); // MAPSIZE * (BITS_PER_BYTE / BITS_PER_HEAPBLOCK) + // List of subdirectories inside pgdata. // Copied from src/bin/initdb/initdb.c pub const PGDATA_SUBDIRS: [&str; 22] = [ diff --git a/libs/tenant_size_model/src/lib.rs b/libs/tenant_size_model/src/lib.rs index c7ec1e8870..86814b5f25 100644 --- a/libs/tenant_size_model/src/lib.rs +++ b/libs/tenant_size_model/src/lib.rs @@ -33,8 +33,8 @@ pub struct Segment { /// Logical size before this state start_size: u64, - /// Logical size at this state - pub end_size: u64, + /// Logical size at this state. Can be None in the last Segment of a branch. + pub end_size: Option, /// Indices to [`Storage::segments`] /// @@ -115,7 +115,7 @@ impl Storage { start_lsn: 0, end_lsn: 0, start_size: 0, - end_size: 0, + end_size: Some(0), children_after: Vec::new(), }; @@ -125,6 +125,39 @@ impl Storage { } } + /// Advances the branch with a new point, at given LSN. + pub fn insert_point( + &mut self, + branch: &Q, + op: Cow<'static, str>, + lsn: u64, + size: Option, + ) where + K: std::borrow::Borrow, + Q: std::hash::Hash + Eq, + { + let lastseg_id = *self.branches.get(branch).unwrap(); + let newseg_id = self.segments.len(); + let lastseg = &mut self.segments[lastseg_id]; + + assert!(lsn > lastseg.end_lsn); + + let newseg = Segment { + op, + parent: Some(lastseg_id), + start_lsn: lastseg.end_lsn, + end_lsn: lsn, + start_size: lastseg.end_size.unwrap(), + end_size: size, + children_after: Vec::new(), + needed: false, + }; + lastseg.children_after.push(newseg_id); + + self.segments.push(newseg); + *self.branches.get_mut(branch).expect("read already") = newseg_id; + } + /// Advances the branch with the named operation, by the relative LSN and logical size bytes. pub fn modify_branch( &mut self, @@ -145,8 +178,8 @@ impl Storage { parent: Some(lastseg_id), start_lsn: lastseg.end_lsn, end_lsn: lastseg.end_lsn + lsn_bytes, - start_size: lastseg.end_size, - end_size: (lastseg.end_size as i64 + size_bytes) as u64, + start_size: lastseg.end_size.unwrap(), + end_size: Some((lastseg.end_size.unwrap() as i64 + size_bytes) as u64), children_after: Vec::new(), needed: false, }; @@ -321,7 +354,7 @@ impl Storage { Some(SegmentSize { seg_id, method: SnapshotAfter, - this_size: seg.end_size, + this_size: seg.end_size.unwrap(), children, }) } else { diff --git a/libs/tenant_size_model/src/main.rs b/libs/tenant_size_model/src/main.rs index 47c0e8122f..f5bea399a1 100644 --- a/libs/tenant_size_model/src/main.rs +++ b/libs/tenant_size_model/src/main.rs @@ -174,7 +174,7 @@ fn graphviz_recurse(segments: &[Segment], node: &SegmentSize) { let seg_id = node.seg_id; let seg = segments.get(seg_id).unwrap(); let lsn = seg.end_lsn; - let size = seg.end_size; + let size = seg.end_size.unwrap_or(0); let method = node.method; println!(" {{"); @@ -226,7 +226,7 @@ fn graphviz_recurse(segments: &[Segment], node: &SegmentSize) { print!( " label=\"{} / {}\"", next.end_lsn - seg.end_lsn, - (next.end_size as i128 - seg.end_size as i128) + (next.end_size.unwrap_or(0) as i128 - seg.end_size.unwrap_or(0) as i128) ); } else { print!(" label=\"{}: {}\"", next.op, next.end_lsn - seg.end_lsn); diff --git a/libs/utils/src/auth.rs b/libs/utils/src/auth.rs index b190b0d1c5..4fa85346ad 100644 --- a/libs/utils/src/auth.rs +++ b/libs/utils/src/auth.rs @@ -7,7 +7,7 @@ use serde; use std::fs; use std::path::Path; -use anyhow::{bail, Result}; +use anyhow::Result; use jsonwebtoken::{ decode, encode, Algorithm, DecodingKey, EncodingKey, Header, TokenData, Validation, }; @@ -21,8 +21,16 @@ const JWT_ALGORITHM: Algorithm = Algorithm::RS256; #[derive(Debug, Serialize, Deserialize, Clone)] #[serde(rename_all = "lowercase")] pub enum Scope { + // Provides access to all data for a specific tenant (specified in `struct Claims` below) + // TODO: join these two? Tenant, + // Provides blanket access to all tenants on the pageserver plus pageserver-wide APIs. + // Should only be used e.g. for status check/tenant creation/list. PageServerApi, + // Provides blanket access to all data on the safekeeper plus safekeeper-wide APIs. + // Should only be used e.g. for status check. + // Currently also used for connection from any pageserver to any safekeeper. + SafekeeperData, } #[serde_as] @@ -40,22 +48,6 @@ impl Claims { } } -pub fn check_permission(claims: &Claims, tenant_id: Option) -> Result<()> { - match (&claims.scope, tenant_id) { - (Scope::Tenant, None) => { - bail!("Attempt to access management api with tenant scope. Permission denied") - } - (Scope::Tenant, Some(tenant_id)) => { - if claims.tenant_id.unwrap() != tenant_id { - bail!("Tenant id mismatch. Permission denied") - } - Ok(()) - } - (Scope::PageServerApi, None) => Ok(()), // access to management api for PageServerApi scope - (Scope::PageServerApi, Some(_)) => Ok(()), // access to tenant api using PageServerApi scope - } -} - pub struct JwtAuth { decoding_key: DecodingKey, validation: Validation, diff --git a/libs/utils/src/http/endpoint.rs b/libs/utils/src/http/endpoint.rs index 7a519929cf..fecbbb945b 100644 --- a/libs/utils/src/http/endpoint.rs +++ b/libs/utils/src/http/endpoint.rs @@ -1,6 +1,5 @@ -use crate::auth::{self, Claims, JwtAuth}; +use crate::auth::{Claims, JwtAuth}; use crate::http::error; -use crate::id::TenantId; use anyhow::anyhow; use hyper::header::AUTHORIZATION; use hyper::{header::CONTENT_TYPE, Body, Request, Response, Server}; @@ -144,10 +143,14 @@ pub fn auth_middleware( }) } -pub fn check_permission(req: &Request, tenant_id: Option) -> Result<(), ApiError> { +pub fn check_permission_with( + req: &Request, + check_permission: impl Fn(&Claims) -> Result<(), anyhow::Error>, +) -> Result<(), ApiError> { match req.context::() { - Some(claims) => Ok(auth::check_permission(&claims, tenant_id) - .map_err(|err| ApiError::Forbidden(err.to_string()))?), + Some(claims) => { + Ok(check_permission(&claims).map_err(|err| ApiError::Forbidden(err.to_string()))?) + } None => Ok(()), // claims is None because auth is disabled } } diff --git a/libs/utils/src/id.rs b/libs/utils/src/id.rs index 7ce324614d..f84bcb793f 100644 --- a/libs/utils/src/id.rs +++ b/libs/utils/src/id.rs @@ -3,6 +3,13 @@ use std::{fmt, str::FromStr}; use hex::FromHex; use rand::Rng; use serde::{Deserialize, Serialize}; +use thiserror::Error; + +#[derive(Error, Debug)] +pub enum IdError { + #[error("invalid id length {0}")] + SliceParseError(usize), +} /// Neon ID is a 128-bit random ID. /// Used to represent various identifiers. Provides handy utility methods and impls. @@ -22,6 +29,15 @@ impl Id { Id::from(arr) } + pub fn from_slice(src: &[u8]) -> Result { + if src.len() != 16 { + return Err(IdError::SliceParseError(src.len())); + } + let mut id_array = [0u8; 16]; + id_array.copy_from_slice(src); + Ok(id_array.into()) + } + pub fn as_arr(&self) -> [u8; 16] { self.0 } @@ -100,6 +116,10 @@ macro_rules! id_newtype { $t(Id::get_from_buf(buf)) } + pub fn from_slice(src: &[u8]) -> Result<$t, IdError> { + Ok($t(Id::from_slice(src)?)) + } + pub fn as_arr(&self) -> [u8; 16] { self.0.as_arr() } diff --git a/pageserver/Cargo.toml b/pageserver/Cargo.toml index e81a631a85..61c7b8ae97 100644 --- a/pageserver/Cargo.toml +++ b/pageserver/Cargo.toml @@ -62,6 +62,7 @@ walkdir = "2.3.2" etcd_broker = { path = "../libs/etcd_broker" } metrics = { path = "../libs/metrics" } pageserver_api = { path = "../libs/pageserver_api" } +postgres_connection = { path = "../libs/postgres_connection" } postgres_ffi = { path = "../libs/postgres_ffi" } pq_proto = { path = "../libs/pq_proto" } remote_storage = { path = "../libs/remote_storage" } diff --git a/pageserver/src/auth.rs b/pageserver/src/auth.rs new file mode 100644 index 0000000000..268117cae2 --- /dev/null +++ b/pageserver/src/auth.rs @@ -0,0 +1,22 @@ +use anyhow::{bail, Result}; +use utils::auth::{Claims, Scope}; +use utils::id::TenantId; + +pub fn check_permission(claims: &Claims, tenant_id: Option) -> Result<()> { + match (&claims.scope, tenant_id) { + (Scope::Tenant, None) => { + bail!("Attempt to access management api with tenant scope. Permission denied") + } + (Scope::Tenant, Some(tenant_id)) => { + if claims.tenant_id.unwrap() != tenant_id { + bail!("Tenant id mismatch. Permission denied") + } + Ok(()) + } + (Scope::PageServerApi, None) => Ok(()), // access to management api for PageServerApi scope + (Scope::PageServerApi, Some(_)) => Ok(()), // access to tenant api using PageServerApi scope + (Scope::SafekeeperData, _) => { + bail!("SafekeeperData scope makes no sense for Pageserver") + } + } +} diff --git a/pageserver/src/bin/pageserver.rs b/pageserver/src/bin/pageserver.rs index a10d32cb57..32d3fca47c 100644 --- a/pageserver/src/bin/pageserver.rs +++ b/pageserver/src/bin/pageserver.rs @@ -1,5 +1,7 @@ //! Main entry point for the Page Server executable. +use std::env::{var, VarError}; +use std::sync::Arc; use std::{env, ops::ControlFlow, path::Path, str::FromStr}; use anyhow::{anyhow, Context}; @@ -270,6 +272,23 @@ fn start_pageserver(conf: &'static PageServerConf) -> anyhow::Result<()> { }; info!("Using auth: {:#?}", conf.auth_type); + match var("ZENITH_AUTH_TOKEN") { + Ok(v) => { + info!("Loaded JWT token for authentication with Safekeeper"); + pageserver::config::SAFEKEEPER_AUTH_TOKEN + .set(Arc::new(v)) + .map_err(|_| anyhow!("Could not initialize SAFEKEEPER_AUTH_TOKEN"))?; + } + Err(VarError::NotPresent) => { + info!("No JWT token for authentication with Safekeeper detected"); + } + Err(e) => { + return Err(e).with_context(|| { + "Failed to either load to detect non-present ZENITH_AUTH_TOKEN environment variable" + }) + } + }; + let remote_storage = conf .remote_storage_config .as_ref() diff --git a/pageserver/src/config.rs b/pageserver/src/config.rs index e9b89830c4..1ac07f6ebc 100644 --- a/pageserver/src/config.rs +++ b/pageserver/src/config.rs @@ -10,9 +10,11 @@ use std::env; use utils::crashsafe::path_with_suffix_extension; use utils::id::ConnectionId; +use once_cell::sync::OnceCell; use std::num::NonZeroUsize; use std::path::{Path, PathBuf}; use std::str::FromStr; +use std::sync::Arc; use std::time::Duration; use toml_edit; use toml_edit::{Document, Item}; @@ -25,11 +27,7 @@ use utils::{ use crate::tenant::{TENANT_ATTACHING_MARKER_FILENAME, TIMELINES_SEGMENT_NAME}; use crate::tenant_config::{TenantConf, TenantConfOpt}; - -/// The name of the metadata file pageserver creates per timeline. -pub const METADATA_FILE_NAME: &str = "metadata"; -pub const TIMELINE_UNINIT_MARK_SUFFIX: &str = "___uninit"; -const TENANT_CONFIG_NAME: &str = "config"; +use crate::{METADATA_FILE_NAME, TENANT_CONFIG_NAME, TIMELINE_UNINIT_MARK_SUFFIX}; pub mod defaults { use crate::tenant_config::defaults::*; @@ -145,6 +143,15 @@ pub struct PageServerConf { pub concurrent_tenant_size_logical_size_queries: ConfigurableSemaphore, } +/// We do not want to store this in a PageServerConf because the latter may be logged +/// and/or serialized at a whim, while the token is secret. Currently this token is the +/// same for accessing all tenants/timelines, but may become per-tenant/per-timeline in +/// the future, more tokens and auth may arrive for etcd and/or its rewrite (see +/// https://github.com/neondatabase/neon/issues/2394), completely changing the logic. +/// Hence, we resort to a global variable for now instead of passing the token from the +/// startup code to the connection code through a dozen layers. +pub static SAFEKEEPER_AUTH_TOKEN: OnceCell> = OnceCell::new(); + #[derive(Debug, Clone, PartialEq, Eq)] pub enum ProfilingConfig { Disabled, diff --git a/pageserver/src/http/openapi_spec.yml b/pageserver/src/http/openapi_spec.yml index 225eba7569..b8f467cd02 100644 --- a/pageserver/src/http/openapi_spec.yml +++ b/pageserver/src/http/openapi_spec.yml @@ -661,6 +661,7 @@ components: - disk_consistent_lsn - awaits_download - state + - latest_gc_cutoff_lsn properties: timeline_id: type: string @@ -705,6 +706,9 @@ components: type: boolean state: type: string + latest_gc_cutoff_lsn: + type: string + format: hex # These 'local' and 'remote' fields just duplicate some of the fields # above. They are kept for backwards-compatibility. They can be removed, diff --git a/pageserver/src/http/routes.rs b/pageserver/src/http/routes.rs index 6b65266f1c..dff2266033 100644 --- a/pageserver/src/http/routes.rs +++ b/pageserver/src/http/routes.rs @@ -19,7 +19,7 @@ use crate::{config::PageServerConf, tenant_mgr}; use utils::{ auth::JwtAuth, http::{ - endpoint::{self, attach_openapi_ui, auth_middleware, check_permission}, + endpoint::{self, attach_openapi_ui, auth_middleware, check_permission_with}, error::{ApiError, HttpErrorBody}, json::{json_request, json_response}, request::parse_request_param, @@ -74,6 +74,12 @@ fn get_config(request: &Request) -> &'static PageServerConf { get_state(request).conf } +fn check_permission(request: &Request, tenant_id: Option) -> Result<(), ApiError> { + check_permission_with(request, |claims| { + crate::auth::check_permission(claims, tenant_id) + }) +} + // Helper function to construct a TimelineInfo struct for a timeline fn build_timeline_info( tenant_state: TenantState, @@ -102,7 +108,7 @@ fn build_timeline_info_common( let guard = timeline.last_received_wal.lock().unwrap(); if let Some(info) = guard.as_ref() { ( - Some(info.wal_source_connstr.clone()), + Some(format!("{:?}", info.wal_source_connconf)), // Password is hidden, but it's for statistics only. Some(info.last_received_msg_lsn), Some(info.last_received_msg_ts), ) @@ -169,6 +175,7 @@ fn build_timeline_info_common( // healthcheck handler async fn status_handler(request: Request) -> Result, ApiError> { + check_permission(&request, None)?; let config = get_config(&request); json_response(StatusCode::OK, StatusResponse { id: config.id }) } diff --git a/pageserver/src/lib.rs b/pageserver/src/lib.rs index 2d5b66f575..00ed3b3dc6 100644 --- a/pageserver/src/lib.rs +++ b/pageserver/src/lib.rs @@ -1,3 +1,4 @@ +mod auth; pub mod basebackup; pub mod config; pub mod http; @@ -22,6 +23,8 @@ pub mod walreceiver; pub mod walrecord; pub mod walredo; +use std::path::Path; + use tracing::info; use crate::task_mgr::TaskKind; @@ -103,17 +106,40 @@ fn exponential_backoff_duration_seconds(n: u32, base_increment: f64, max_seconds } } -/// A suffix to be used during file sync from the remote storage, -/// to ensure that we do not leave corrupted files that pretend to be layers. -const TEMP_FILE_SUFFIX: &str = "___temp"; +/// The name of the metadata file pageserver creates per timeline. +/// Full path: `tenants//timelines//metadata`. +pub const METADATA_FILE_NAME: &str = "metadata"; -pub fn is_temporary(path: &std::path::Path) -> bool { +/// Per-tenant configuration file. +/// Full path: `tenants//config`. +pub const TENANT_CONFIG_NAME: &str = "config"; + +/// A suffix used for various temporary files. Any temporary files found in the +/// data directory at pageserver startup can be automatically removed. +pub const TEMP_FILE_SUFFIX: &str = "___temp"; + +/// A marker file to mark that a timeline directory was not fully initialized. +/// If a timeline directory with this marker is encountered at pageserver startup, +/// the timeline directory and the marker file are both removed. +/// Full path: `tenants//timelines/___uninit`. +pub const TIMELINE_UNINIT_MARK_SUFFIX: &str = "___uninit"; + +pub fn is_temporary(path: &Path) -> bool { match path.file_name() { Some(name) => name.to_string_lossy().ends_with(TEMP_FILE_SUFFIX), None => false, } } +pub fn is_uninit_mark(path: &Path) -> bool { + match path.file_name() { + Some(name) => name + .to_string_lossy() + .ends_with(TIMELINE_UNINIT_MARK_SUFFIX), + None => false, + } +} + #[cfg(test)] mod backoff_defaults_tests { use super::*; diff --git a/pageserver/src/page_service.rs b/pageserver/src/page_service.rs index b829f05395..12d9aa3a47 100644 --- a/pageserver/src/page_service.rs +++ b/pageserver/src/page_service.rs @@ -32,7 +32,7 @@ use tokio_util::io::SyncIoBridge; use tracing::*; use utils::id::ConnectionId; use utils::{ - auth::{self, Claims, JwtAuth, Scope}, + auth::{Claims, JwtAuth, Scope}, id::{TenantId, TimelineId}, lsn::Lsn, postgres_backend::AuthType, @@ -40,6 +40,7 @@ use utils::{ simple_rcu::RcuReadGuard, }; +use crate::auth::check_permission; use crate::basebackup; use crate::config::{PageServerConf, ProfilingConfig}; use crate::import_datadir::import_wal_from_tar; @@ -671,7 +672,7 @@ impl PageServerHandler { .claims .as_ref() .expect("claims presence already checked"); - auth::check_permission(claims, tenant_id) + check_permission(claims, tenant_id) } } diff --git a/pageserver/src/tenant.rs b/pageserver/src/tenant.rs index 4416307ecf..dd121c23a2 100644 --- a/pageserver/src/tenant.rs +++ b/pageserver/src/tenant.rs @@ -46,8 +46,8 @@ use std::time::{Duration, Instant}; use self::metadata::TimelineMetadata; use crate::config::PageServerConf; -use crate::config::TIMELINE_UNINIT_MARK_SUFFIX; use crate::import_datadir; +use crate::is_uninit_mark; use crate::metrics::{remove_tenant_metrics, STORAGE_TIME}; use crate::repository::GcResult; use crate::storage_sync::create_remote_timeline_client; @@ -1072,14 +1072,7 @@ impl Tenant { .context("Cannot branch off the timeline that's not present in pageserver")?; if let Some(lsn) = ancestor_start_lsn.as_mut() { - // Wait for the WAL to arrive and be processed on the parent branch up - // to the requested branch point. The repository code itself doesn't - // require it, but if we start to receive WAL on the new timeline, - // decoding the new WAL might need to look up previous pages, relation - // sizes etc. and that would get confused if the previous page versions - // are not in the repository yet. *lsn = lsn.align(); - ancestor_timeline.wait_lsn(*lsn).await?; let ancestor_ancestor_lsn = ancestor_timeline.get_ancestor_lsn(); if ancestor_ancestor_lsn > *lsn { @@ -1091,6 +1084,14 @@ impl Tenant { ancestor_ancestor_lsn, ); } + + // Wait for the WAL to arrive and be processed on the parent branch up + // to the requested branch point. The repository code itself doesn't + // require it, but if we start to receive WAL on the new timeline, + // decoding the new WAL might need to look up previous pages, relation + // sizes etc. and that would get confused if the previous page versions + // are not in the repository yet. + ancestor_timeline.wait_lsn(*lsn).await?; } self.branch_timeline(ancestor_timeline_id, new_timeline_id, ancestor_start_lsn)? @@ -2167,15 +2168,6 @@ impl Tenant { } } -fn is_uninit_mark(path: &Path) -> bool { - match path.file_name() { - Some(name) => name - .to_string_lossy() - .ends_with(TIMELINE_UNINIT_MARK_SUFFIX), - None => false, - } -} - fn remove_timeline_and_uninit_mark(timeline_dir: &Path, uninit_mark: &Path) -> anyhow::Result<()> { fs::remove_dir_all(&timeline_dir) .or_else(|e| { @@ -2598,11 +2590,11 @@ pub mod harness { #[cfg(test)] mod tests { use super::*; - use crate::config::METADATA_FILE_NAME; use crate::keyspace::KeySpaceAccum; use crate::repository::{Key, Value}; use crate::tenant::harness::*; use crate::DEFAULT_PG_VERSION; + use crate::METADATA_FILE_NAME; use bytes::BytesMut; use hex_literal::hex; use once_cell::sync::Lazy; diff --git a/pageserver/src/tenant/size.rs b/pageserver/src/tenant/size.rs index 86e685fd4c..24d9b2a10e 100644 --- a/pageserver/src/tenant/size.rs +++ b/pageserver/src/tenant/size.rs @@ -183,6 +183,19 @@ pub(super) async fn gather_inputs( } } + // all timelines also have an end point if they have made any progress + if last_record_lsn > timeline.get_ancestor_lsn() + && !interesting_lsns + .iter() + .any(|(lsn, _)| lsn == &last_record_lsn) + { + updates.push(Update { + lsn: last_record_lsn, + command: Command::EndOfBranch, + timeline_id: timeline.timeline_id, + }); + } + timeline_inputs.insert( timeline.timeline_id, TimelineInputs { @@ -270,48 +283,22 @@ impl ModelInputs { // impossible to always determine the a one main branch. let mut storage = tenant_size_model::Storage::>::new(None); - // tracking these not to require modifying the current implementation of the size model, - // which works in relative LSNs and sizes. - let mut last_state: HashMap = HashMap::new(); - for update in &self.updates { let Update { lsn, command: op, timeline_id, } = update; + let Lsn(now) = *lsn; match op { Command::Update(sz) => { - let latest = last_state.get_mut(timeline_id).ok_or_else(|| { - anyhow::anyhow!( - "ordering-mismatch: there must had been a previous state for {timeline_id}" - ) - })?; - - let lsn_bytes = { - let Lsn(now) = lsn; - let Lsn(prev) = latest.0; - debug_assert!(prev <= *now, "self.updates should had been sorted"); - now - prev - }; - - let size_diff = - i64::try_from(*sz as i128 - latest.1 as i128).with_context(|| { - format!("size difference i64 overflow for {timeline_id}") - })?; - - storage.modify_branch(&Some(*timeline_id), "".into(), lsn_bytes, size_diff); - *latest = (*lsn, *sz); + storage.insert_point(&Some(*timeline_id), "".into(), now, Some(*sz)); + } + Command::EndOfBranch => { + storage.insert_point(&Some(*timeline_id), "".into(), now, None); } Command::BranchFrom(parent) => { storage.branch(parent, Some(*timeline_id)); - - let size = parent - .as_ref() - .and_then(|id| last_state.get(id)) - .map(|x| x.1) - .unwrap_or(0); - last_state.insert(*timeline_id, (*lsn, size)); } } } @@ -320,10 +307,7 @@ impl ModelInputs { } } -/// Single size model update. -/// -/// Sizing model works with relative increments over latest branch state. -/// Updates are absolute, so additional state needs to be tracked when applying. +/// A point of interest in the tree of branches #[serde_with::serde_as] #[derive( Debug, PartialEq, PartialOrd, Eq, Ord, Clone, Copy, serde::Serialize, serde::Deserialize, @@ -342,6 +326,7 @@ struct Update { enum Command { Update(u64), BranchFrom(#[serde_as(as = "Option")] Option), + EndOfBranch, } impl std::fmt::Debug for Command { @@ -351,6 +336,7 @@ impl std::fmt::Debug for Command { match self { Self::Update(arg0) => write!(f, "Update({arg0})"), Self::BranchFrom(arg0) => write!(f, "BranchFrom({arg0:?})"), + Self::EndOfBranch => write!(f, "EndOfBranch"), } } } diff --git a/pageserver/src/tenant/timeline.rs b/pageserver/src/tenant/timeline.rs index 75394145bd..189386aed9 100644 --- a/pageserver/src/tenant/timeline.rs +++ b/pageserver/src/tenant/timeline.rs @@ -33,7 +33,7 @@ use crate::tenant::{ storage_layer::{Layer, ValueReconstructResult, ValueReconstructState}, }; -use crate::config::{PageServerConf, METADATA_FILE_NAME}; +use crate::config::PageServerConf; use crate::keyspace::{KeyPartitioning, KeySpace}; use crate::metrics::TimelineMetrics; use crate::pgdatadir_mapping::BlockNumber; @@ -42,6 +42,7 @@ use crate::pgdatadir_mapping::{is_rel_fsm_block_key, is_rel_vm_block_key}; use crate::tenant_config::TenantConfOpt; use pageserver_api::reltag::RelTag; +use postgres_connection::PgConnectionConfig; use postgres_ffi::to_pg_timestamp; use utils::{ id::{TenantId, TimelineId}, @@ -56,6 +57,7 @@ use crate::task_mgr::TaskKind; use crate::walreceiver::{is_etcd_client_initialized, spawn_connection_manager_task}; use crate::walredo::WalRedoManager; use crate::CheckpointConfig; +use crate::METADATA_FILE_NAME; use crate::ZERO_PAGE; use crate::{is_temporary, task_mgr}; use crate::{page_cache, storage_sync::index::LayerFileMetadata}; @@ -298,7 +300,7 @@ impl LogicalSize { } pub struct WalReceiverInfo { - pub wal_source_connstr: String, + pub wal_source_connconf: PgConnectionConfig, pub last_received_msg_lsn: Lsn, pub last_received_msg_ts: u128, } @@ -880,6 +882,7 @@ impl Timeline { walreceiver_connect_timeout, lagging_wal_timeout, max_lsn_wal_lag, + crate::config::SAFEKEEPER_AUTH_TOKEN.get().cloned(), ); } diff --git a/pageserver/src/walingest.rs b/pageserver/src/walingest.rs index bd7c08a31f..e8a2e99f06 100644 --- a/pageserver/src/walingest.rs +++ b/pageserver/src/walingest.rs @@ -24,7 +24,7 @@ use anyhow::Context; use postgres_ffi::v14::nonrelfile_utils::clogpage_precedes; use postgres_ffi::v14::nonrelfile_utils::slru_may_delete_clogsegment; -use postgres_ffi::{page_is_new, page_set_lsn}; +use postgres_ffi::{fsm_logical_to_physical, page_is_new, page_set_lsn}; use anyhow::Result; use bytes::{Buf, Bytes, BytesMut}; @@ -612,20 +612,19 @@ impl<'a> WalIngest<'a> { forknum: FSM_FORKNUM, }; - // FIXME: 'blkno' stored in the WAL record is the new size of the - // heap. The formula for calculating the new size of the FSM is - // pretty complicated (see FreeSpaceMapPrepareTruncateRel() in - // PostgreSQL), and we should also clear bits in the tail FSM block, - // and update the upper level FSM pages. None of that has been - // implemented. What we do instead, is always just truncate the FSM - // to zero blocks. That's bad for performance, but safe. (The FSM - // isn't needed for correctness, so we could also leave garbage in - // it. Seems more tidy to zap it away.) - if rec.blkno != 0 { - info!("Partial truncation of FSM is not supported"); + let fsm_logical_page_no = rec.blkno / pg_constants::SLOTS_PER_FSM_PAGE; + let mut fsm_physical_page_no = fsm_logical_to_physical(fsm_logical_page_no); + if rec.blkno % pg_constants::SLOTS_PER_FSM_PAGE != 0 { + // Tail of last remaining FSM page has to be zeroed. + // We are not precise here and instead of digging in FSM bitmap format just clear the whole page. + modification.put_rel_page_image(rel, fsm_physical_page_no, ZERO_PAGE.clone())?; + fsm_physical_page_no += 1; + } + let nblocks = self.get_relsize(rel, modification.lsn)?; + if nblocks > fsm_physical_page_no { + // check if something to do: FSM is larger than truncate position + self.put_rel_truncation(modification, rel, fsm_physical_page_no)?; } - let num_fsm_blocks = 0; - self.put_rel_truncation(modification, rel, num_fsm_blocks)?; } if (rec.flags & pg_constants::SMGR_TRUNCATE_VM) != 0 { let rel = RelTag { @@ -635,16 +634,18 @@ impl<'a> WalIngest<'a> { forknum: VISIBILITYMAP_FORKNUM, }; - // FIXME: Like with the FSM above, the logic to truncate the VM - // correctly has not been implemented. Just zap it away completely, - // always. Unlike the FSM, the VM must never have bits incorrectly - // set. From a correctness point of view, it's always OK to clear - // bits or remove it altogether, though. - if rec.blkno != 0 { - info!("Partial truncation of VM is not supported"); + let mut vm_page_no = rec.blkno / pg_constants::VM_HEAPBLOCKS_PER_PAGE; + if rec.blkno % pg_constants::VM_HEAPBLOCKS_PER_PAGE != 0 { + // Tail of last remaining vm page has to be zeroed. + // We are not precise here and instead of digging in VM bitmap format just clear the whole page. + modification.put_rel_page_image(rel, vm_page_no, ZERO_PAGE.clone())?; + vm_page_no += 1; + } + let nblocks = self.get_relsize(rel, modification.lsn)?; + if nblocks > vm_page_no { + // check if something to do: VM is larger than truncate position + self.put_rel_truncation(modification, rel, vm_page_no)?; } - let num_vm_blocks = 0; - self.put_rel_truncation(modification, rel, num_vm_blocks)?; } Ok(()) } diff --git a/pageserver/src/walreceiver/connection_manager.rs b/pageserver/src/walreceiver/connection_manager.rs index 0654e3e22b..9a1b55ac1c 100644 --- a/pageserver/src/walreceiver/connection_manager.rs +++ b/pageserver/src/walreceiver/connection_manager.rs @@ -35,6 +35,7 @@ use crate::{ exponential_backoff, walreceiver::get_etcd_client, DEFAULT_BASE_BACKOFF_SECONDS, DEFAULT_MAX_BACKOFF_SECONDS, }; +use postgres_connection::{parse_host_port, PgConnectionConfig}; use utils::{ id::{NodeId, TenantTimelineId}, lsn::Lsn, @@ -49,6 +50,7 @@ pub fn spawn_connection_manager_task( wal_connect_timeout: Duration, lagging_wal_timeout: Duration, max_lsn_wal_lag: NonZeroU64, + auth_token: Option>, ) { let mut etcd_client = get_etcd_client().clone(); @@ -69,6 +71,7 @@ pub fn spawn_connection_manager_task( wal_connect_timeout, lagging_wal_timeout, max_lsn_wal_lag, + auth_token, ); loop { select! { @@ -247,7 +250,7 @@ async fn connection_manager_loop_step( walreceiver_state .change_connection( new_candidate.safekeeper_id, - new_candidate.wal_source_connstr, + new_candidate.wal_source_connconf, ) .await } @@ -359,6 +362,7 @@ struct WalreceiverState { wal_connection_retries: HashMap, /// Data about all timelines, available for connection, fetched from etcd, grouped by their corresponding safekeeper node id. wal_stream_candidates: HashMap, + auth_token: Option>, } /// Current connection data. @@ -407,6 +411,7 @@ impl WalreceiverState { wal_connect_timeout: Duration, lagging_wal_timeout: Duration, max_lsn_wal_lag: NonZeroU64, + auth_token: Option>, ) -> Self { let id = TenantTimelineId { tenant_id: timeline.tenant_id, @@ -421,11 +426,16 @@ impl WalreceiverState { wal_connection: None, wal_stream_candidates: HashMap::new(), wal_connection_retries: HashMap::new(), + auth_token, } } /// Shuts down the current connection (if any) and immediately starts another one with the given connection string. - async fn change_connection(&mut self, new_sk_id: NodeId, new_wal_source_connstr: String) { + async fn change_connection( + &mut self, + new_sk_id: NodeId, + new_wal_source_connconf: PgConnectionConfig, + ) { self.drop_old_connection(true).await; let id = self.id; @@ -435,7 +445,7 @@ impl WalreceiverState { async move { super::walreceiver_connection::handle_walreceiver_connection( timeline, - new_wal_source_connstr, + new_wal_source_connconf, events_sender, cancellation, connect_timeout, @@ -575,7 +585,7 @@ impl WalreceiverState { Some(existing_wal_connection) => { let connected_sk_node = existing_wal_connection.sk_id; - let (new_sk_id, new_safekeeper_etcd_data, new_wal_source_connstr) = + let (new_sk_id, new_safekeeper_etcd_data, new_wal_source_connconf) = self.select_connection_candidate(Some(connected_sk_node))?; let now = Utc::now().naive_utc(); @@ -586,7 +596,7 @@ impl WalreceiverState { if latest_interaciton > self.wal_connect_timeout { return Some(NewWalConnectionCandidate { safekeeper_id: new_sk_id, - wal_source_connstr: new_wal_source_connstr, + wal_source_connconf: new_wal_source_connconf, reason: ReconnectReason::NoKeepAlives { last_keep_alive: Some( existing_wal_connection.status.latest_connection_update, @@ -611,7 +621,7 @@ impl WalreceiverState { if new_sk_lsn_advantage >= self.max_lsn_wal_lag.get() { return Some(NewWalConnectionCandidate { safekeeper_id: new_sk_id, - wal_source_connstr: new_wal_source_connstr, + wal_source_connconf: new_wal_source_connconf, reason: ReconnectReason::LaggingWal { current_commit_lsn, new_commit_lsn, @@ -685,7 +695,7 @@ impl WalreceiverState { { return Some(NewWalConnectionCandidate { safekeeper_id: new_sk_id, - wal_source_connstr: new_wal_source_connstr, + wal_source_connconf: new_wal_source_connconf, reason: ReconnectReason::NoWalTimeout { current_lsn, current_commit_lsn, @@ -704,11 +714,11 @@ impl WalreceiverState { self.wal_connection.as_mut().unwrap().discovered_new_wal = discovered_new_wal; } None => { - let (new_sk_id, _, new_wal_source_connstr) = + let (new_sk_id, _, new_wal_source_connconf) = self.select_connection_candidate(None)?; return Some(NewWalConnectionCandidate { safekeeper_id: new_sk_id, - wal_source_connstr: new_wal_source_connstr, + wal_source_connconf: new_wal_source_connconf, reason: ReconnectReason::NoExistingConnection, }); } @@ -726,7 +736,7 @@ impl WalreceiverState { fn select_connection_candidate( &self, node_to_omit: Option, - ) -> Option<(NodeId, &SkTimelineInfo, String)> { + ) -> Option<(NodeId, &SkTimelineInfo, PgConnectionConfig)> { self.applicable_connection_candidates() .filter(|&(sk_id, _, _)| Some(sk_id) != node_to_omit) .max_by_key(|(_, info, _)| info.commit_lsn) @@ -736,7 +746,7 @@ impl WalreceiverState { /// Some safekeepers are filtered by the retry cooldown. fn applicable_connection_candidates( &self, - ) -> impl Iterator { + ) -> impl Iterator { let now = Utc::now().naive_utc(); self.wal_stream_candidates @@ -754,9 +764,13 @@ impl WalreceiverState { }) .filter_map(|(sk_id, etcd_info)| { let info = &etcd_info.timeline; - match wal_stream_connection_string( + match wal_stream_connection_config( self.id, info.safekeeper_connstr.as_deref()?, + match &self.auth_token { + None => None, + Some(x) => Some(x), + }, ) { Ok(connstr) => Some((*sk_id, info, connstr)), Err(e) => { @@ -797,10 +811,12 @@ impl WalreceiverState { } } -#[derive(Debug, PartialEq, Eq)] +#[derive(Debug)] struct NewWalConnectionCandidate { safekeeper_id: NodeId, - wal_source_connstr: String, + wal_source_connconf: PgConnectionConfig, + // This field is used in `derive(Debug)` only. + #[allow(dead_code)] reason: ReconnectReason, } @@ -828,34 +844,31 @@ enum ReconnectReason { }, } -fn wal_stream_connection_string( +fn wal_stream_connection_config( TenantTimelineId { tenant_id, timeline_id, }: TenantTimelineId, listen_pg_addr_str: &str, -) -> anyhow::Result { - let sk_connstr = format!("postgresql://no_user@{listen_pg_addr_str}/no_db"); - sk_connstr - .parse() - .context("bad url") - .and_then(|url: url::Url| { - let host = url.host_str().context("host is missing")?; - let port = url.port().unwrap_or(5432); // default PG port - - Ok(format!( - "host={host} \ - port={port} \ - options='-c timeline_id={timeline_id} tenant_id={tenant_id}'" - )) - }) - .with_context(|| format!("Failed to parse pageserver connection URL '{sk_connstr}'")) + auth_token: Option<&str>, +) -> anyhow::Result { + let (host, port) = + parse_host_port(&listen_pg_addr_str).context("Unable to parse listen_pg_addr_str")?; + let port = port.unwrap_or(5432); + Ok(PgConnectionConfig::new_host_port(host, port) + .extend_options([ + "-c".to_owned(), + format!("timeline_id={}", timeline_id), + format!("tenant_id={}", tenant_id), + ]) + .set_password(auth_token.map(|s| s.to_owned()))) } #[cfg(test)] mod tests { use super::*; use crate::tenant::harness::{TenantHarness, TIMELINE_ID}; + use url::Host; #[tokio::test] async fn no_connection_no_candidate() -> anyhow::Result<()> { @@ -992,7 +1005,7 @@ mod tests { peer_horizon_lsn: None, local_start_lsn: None, - safekeeper_connstr: Some(DUMMY_SAFEKEEPER_CONNSTR.to_string()), + safekeeper_connstr: Some(DUMMY_SAFEKEEPER_HOST.to_string()), }, etcd_version: 0, latest_update: now, @@ -1064,7 +1077,7 @@ mod tests { peer_horizon_lsn: None, local_start_lsn: None, - safekeeper_connstr: Some(DUMMY_SAFEKEEPER_CONNSTR.to_string()), + safekeeper_connstr: Some(DUMMY_SAFEKEEPER_HOST.to_string()), }, etcd_version: 0, latest_update: now, @@ -1080,9 +1093,10 @@ mod tests { ReconnectReason::NoExistingConnection, "Should select new safekeeper due to missing connection, even if there's also a lag in the wal over the threshold" ); - assert!(only_candidate - .wal_source_connstr - .contains(DUMMY_SAFEKEEPER_CONNSTR)); + assert_eq!( + only_candidate.wal_source_connconf.host(), + &Host::Domain(DUMMY_SAFEKEEPER_HOST.to_owned()) + ); let selected_lsn = 100_000; state.wal_stream_candidates = HashMap::from([ @@ -1116,7 +1130,7 @@ mod tests { peer_horizon_lsn: None, local_start_lsn: None, - safekeeper_connstr: Some(DUMMY_SAFEKEEPER_CONNSTR.to_string()), + safekeeper_connstr: Some(DUMMY_SAFEKEEPER_HOST.to_string()), }, etcd_version: 0, latest_update: now, @@ -1151,9 +1165,10 @@ mod tests { ReconnectReason::NoExistingConnection, "Should select new safekeeper due to missing connection, even if there's also a lag in the wal over the threshold" ); - assert!(biggest_wal_candidate - .wal_source_connstr - .contains(DUMMY_SAFEKEEPER_CONNSTR)); + assert_eq!( + biggest_wal_candidate.wal_source_connconf.host(), + &Host::Domain(DUMMY_SAFEKEEPER_HOST.to_owned()) + ); Ok(()) } @@ -1181,7 +1196,7 @@ mod tests { peer_horizon_lsn: None, local_start_lsn: None, - safekeeper_connstr: Some(DUMMY_SAFEKEEPER_CONNSTR.to_string()), + safekeeper_connstr: Some(DUMMY_SAFEKEEPER_HOST.to_string()), }, etcd_version: 0, latest_update: now, @@ -1199,7 +1214,7 @@ mod tests { peer_horizon_lsn: None, local_start_lsn: None, - safekeeper_connstr: Some(DUMMY_SAFEKEEPER_CONNSTR.to_string()), + safekeeper_connstr: Some(DUMMY_SAFEKEEPER_HOST.to_string()), }, etcd_version: 0, latest_update: now, @@ -1270,7 +1285,7 @@ mod tests { peer_horizon_lsn: None, local_start_lsn: None, - safekeeper_connstr: Some(DUMMY_SAFEKEEPER_CONNSTR.to_string()), + safekeeper_connstr: Some(DUMMY_SAFEKEEPER_HOST.to_string()), }, etcd_version: 0, latest_update: now, @@ -1310,9 +1325,10 @@ mod tests { }, "Should select bigger WAL safekeeper if it starts to lag enough" ); - assert!(over_threshcurrent_candidate - .wal_source_connstr - .contains("advanced_by_lsn_safekeeper")); + assert_eq!( + over_threshcurrent_candidate.wal_source_connconf.host(), + &Host::Domain("advanced_by_lsn_safekeeper".to_owned()) + ); Ok(()) } @@ -1361,7 +1377,7 @@ mod tests { peer_horizon_lsn: None, local_start_lsn: None, - safekeeper_connstr: Some(DUMMY_SAFEKEEPER_CONNSTR.to_string()), + safekeeper_connstr: Some(DUMMY_SAFEKEEPER_HOST.to_string()), }, etcd_version: 0, latest_update: now, @@ -1384,9 +1400,10 @@ mod tests { } unexpected => panic!("Unexpected reason: {unexpected:?}"), } - assert!(over_threshcurrent_candidate - .wal_source_connstr - .contains(DUMMY_SAFEKEEPER_CONNSTR)); + assert_eq!( + over_threshcurrent_candidate.wal_source_connconf.host(), + &Host::Domain(DUMMY_SAFEKEEPER_HOST.to_owned()) + ); Ok(()) } @@ -1434,7 +1451,7 @@ mod tests { peer_horizon_lsn: None, local_start_lsn: None, - safekeeper_connstr: Some(DUMMY_SAFEKEEPER_CONNSTR.to_string()), + safekeeper_connstr: Some(DUMMY_SAFEKEEPER_HOST.to_string()), }, etcd_version: 0, latest_update: now, @@ -1463,14 +1480,15 @@ mod tests { } unexpected => panic!("Unexpected reason: {unexpected:?}"), } - assert!(over_threshcurrent_candidate - .wal_source_connstr - .contains(DUMMY_SAFEKEEPER_CONNSTR)); + assert_eq!( + over_threshcurrent_candidate.wal_source_connconf.host(), + &Host::Domain(DUMMY_SAFEKEEPER_HOST.to_owned()) + ); Ok(()) } - const DUMMY_SAFEKEEPER_CONNSTR: &str = "safekeeper_connstr"; + const DUMMY_SAFEKEEPER_HOST: &str = "safekeeper_connstr"; async fn dummy_state(harness: &TenantHarness<'_>) -> WalreceiverState { WalreceiverState { @@ -1491,6 +1509,7 @@ mod tests { wal_connection: None, wal_stream_candidates: HashMap::new(), wal_connection_retries: HashMap::new(), + auth_token: None, } } } diff --git a/pageserver/src/walreceiver/walreceiver_connection.rs b/pageserver/src/walreceiver/walreceiver_connection.rs index f3aabff973..cf2a99f1b5 100644 --- a/pageserver/src/walreceiver/walreceiver_connection.rs +++ b/pageserver/src/walreceiver/walreceiver_connection.rs @@ -29,6 +29,7 @@ use crate::{ walingest::WalIngest, walrecord::DecodedWALRecord, }; +use postgres_connection::PgConnectionConfig; use postgres_ffi::waldecoder::WalStreamDecoder; use pq_proto::ReplicationFeedback; use utils::lsn::Lsn; @@ -55,22 +56,23 @@ pub struct WalConnectionStatus { /// messages as we go. pub async fn handle_walreceiver_connection( timeline: Arc, - wal_source_connstr: String, + wal_source_connconf: PgConnectionConfig, events_sender: watch::Sender>, mut cancellation: watch::Receiver<()>, connect_timeout: Duration, ) -> anyhow::Result<()> { // Connect to the database in replication mode. - info!("connecting to {wal_source_connstr}"); - let connect_cfg = format!("{wal_source_connstr} application_name=pageserver replication=true"); + info!("connecting to {wal_source_connconf:?}"); - let (mut replication_client, connection) = time::timeout( - connect_timeout, - tokio_postgres::connect(&connect_cfg, postgres::NoTls), - ) - .await - .context("Timed out while waiting for walreceiver connection to open")? - .context("Failed to open walreceiver connection")?; + let (mut replication_client, connection) = { + let mut config = wal_source_connconf.to_tokio_postgres_config(); + config.application_name("pageserver"); + config.replication_mode(tokio_postgres::config::ReplicationMode::Physical); + time::timeout(connect_timeout, config.connect(postgres::NoTls)) + .await + .context("Timed out while waiting for walreceiver connection to open")? + .context("Failed to open walreceiver connection")? + }; info!("connected!"); let mut connection_status = WalConnectionStatus { @@ -300,7 +302,7 @@ pub async fn handle_walreceiver_connection( // Update the status about what we just received. This is shown in the mgmt API. let last_received_wal = WalReceiverInfo { - wal_source_connstr: wal_source_connstr.to_owned(), + wal_source_connconf: wal_source_connconf.clone(), last_received_msg_lsn: last_lsn, last_received_msg_ts: ts .duration_since(SystemTime::UNIX_EPOCH) diff --git a/pgxn/neon/libpagestore.c b/pgxn/neon/libpagestore.c index df92a1e2f4..ae8275168d 100644 --- a/pgxn/neon/libpagestore.c +++ b/pgxn/neon/libpagestore.c @@ -44,6 +44,7 @@ PGconn *pageserver_conn = NULL; WaitEventSet *pageserver_conn_wes = NULL; char *page_server_connstring_raw; +char *safekeeper_token_env; int n_unflushed_requests = 0; int flush_every_n_requests = 8; @@ -418,6 +419,15 @@ pg_init_libpagestore(void) 0, /* no flags required */ NULL, NULL, NULL); + DefineCustomStringVariable("neon.safekeeper_token_env", + "the environment variable containing JWT token for authentication with Safekeepers, the convention is to either unset or set to $ZENITH_AUTH_TOKEN", + NULL, + &safekeeper_token_env, + NULL, + PGC_POSTMASTER, + 0, /* no flags required */ + NULL, NULL, NULL); + DefineCustomStringVariable("neon.timeline_id", "Neon timeline_id the server is running on", NULL, @@ -481,6 +491,24 @@ pg_init_libpagestore(void) neon_timeline_walproposer = neon_timeline; neon_tenant_walproposer = neon_tenant; + /* retrieve the token for Safekeeper, if present */ + if (safekeeper_token_env != NULL) { + if (safekeeper_token_env[0] != '$') { + ereport(ERROR, + (errcode(ERRCODE_CONNECTION_EXCEPTION), + errmsg("expected safekeeper auth token environment variable's name starting with $ but found: %s", + safekeeper_token_env))); + } + neon_safekeeper_token_walproposer = getenv(&safekeeper_token_env[1]); + if (!neon_safekeeper_token_walproposer) { + ereport(ERROR, + (errcode(ERRCODE_CONNECTION_EXCEPTION), + errmsg("cannot get safekeeper auth token, environment variable %s is not set", + &safekeeper_token_env[1]))); + } + neon_log(LOG, "using safekeeper auth token from environment variable"); + } + if (page_server_connstring && page_server_connstring[0]) { neon_log(PageStoreTrace, "set neon_smgr hook"); diff --git a/pgxn/neon/walproposer.c b/pgxn/neon/walproposer.c index 8323811b0d..bf8bb02493 100644 --- a/pgxn/neon/walproposer.c +++ b/pgxn/neon/walproposer.c @@ -80,6 +80,7 @@ bool am_wal_proposer; char *neon_timeline_walproposer = NULL; char *neon_tenant_walproposer = NULL; +char *neon_safekeeper_token_walproposer = NULL; #define WAL_PROPOSER_SLOT_NAME "wal_proposer_slot" @@ -509,11 +510,25 @@ WalProposerInit(XLogRecPtr flushRecPtr, uint64 systemId) safekeeper[n_safekeepers].state = SS_OFFLINE; safekeeper[n_safekeepers].conn = NULL; - /* - * Set conninfo to empty. We'll fill it out once later, in - * `ResetConnection` as needed - */ - safekeeper[n_safekeepers].conninfo[0] = '\0'; + { + Safekeeper *sk = &safekeeper[n_safekeepers]; + int written = 0; + + if (neon_safekeeper_token_walproposer != NULL) { + written = snprintf((char *) &sk->conninfo, MAXCONNINFO, + "host=%s port=%s password=%s dbname=replication options='-c timeline_id=%s tenant_id=%s'", + sk->host, sk->port, neon_safekeeper_token_walproposer, neon_timeline_walproposer, + neon_tenant_walproposer); + } else { + written = snprintf((char *) &sk->conninfo, MAXCONNINFO, + "host=%s port=%s dbname=replication options='-c timeline_id=%s tenant_id=%s'", + sk->host, sk->port, neon_timeline_walproposer, neon_tenant_walproposer); + } + + if (written > MAXCONNINFO || written < 0) + elog(FATAL, "could not create connection string for safekeeper %s:%s", sk->host, sk->port); + } + initStringInfo(&safekeeper[n_safekeepers].outbuf); safekeeper[n_safekeepers].xlogreader = XLogReaderAllocate(wal_segment_size, NULL, XL_ROUTINE(.segment_open = wal_segment_open,.segment_close = wal_segment_close), NULL); if (safekeeper[n_safekeepers].xlogreader == NULL) @@ -684,31 +699,7 @@ ResetConnection(Safekeeper *sk) /* * Try to establish new connection - * - * If the connection information hasn't been filled out, we need to do - * that here. */ - if (sk->conninfo[0] == '\0') - { - int written = 0; - - written = snprintf((char *) &sk->conninfo, MAXCONNINFO, - "host=%s port=%s dbname=replication options='-c timeline_id=%s tenant_id=%s'", - sk->host, sk->port, neon_timeline_walproposer, neon_tenant_walproposer); - - /* - * currently connection string is not that long, but once we pass - * something like jwt we might overflow the buffer, - */ - - /* - * so it is better to be defensive and check that everything aligns - * well - */ - if (written > MAXCONNINFO || written < 0) - elog(FATAL, "could not create connection string for safekeeper %s:%s", sk->host, sk->port); - } - sk->conn = walprop_connect_start((char *) &sk->conninfo); /* @@ -729,12 +720,13 @@ ResetConnection(Safekeeper *sk) * According to libpq docs: * "If the result is CONNECTION_BAD, the connection attempt has already failed, * typically because of invalid connection parameters." - * We should report this failure. + * We should report this failure. Do not print the exact `conninfo` as it may + * contain e.g. password. The error message should already provide enough information. * * https://www.postgresql.org/docs/devel/libpq-connect.html#LIBPQ-PQCONNECTSTARTPARAMS */ - elog(WARNING, "Immediate failure to connect with node:\n\t%s\n\terror: %s", - sk->conninfo, walprop_error_message(sk->conn)); + elog(WARNING, "Immediate failure to connect with node '%s:%s':\n\terror: %s", + sk->host, sk->port, walprop_error_message(sk->conn)); /* * Even though the connection failed, we still need to clean up the @@ -1403,14 +1395,11 @@ DetermineEpochStartLsn(void) static bool WalProposerRecovery(int donor, TimeLineID timeline, XLogRecPtr startpos, XLogRecPtr endpos) { - char conninfo[MAXCONNINFO]; char *err; WalReceiverConn *wrconn; WalRcvStreamOptions options; - sprintf(conninfo, "host=%s port=%s dbname=replication options='-c timeline_id=%s tenant_id=%s'", - safekeeper[donor].host, safekeeper[donor].port, neon_timeline_walproposer, neon_tenant_walproposer); - wrconn = walrcv_connect(conninfo, false, "wal_proposer_recovery", &err); + wrconn = walrcv_connect(safekeeper[donor].conninfo, false, "wal_proposer_recovery", &err); if (!wrconn) { ereport(WARNING, diff --git a/pgxn/neon/walproposer.h b/pgxn/neon/walproposer.h index 362b194b32..1abaab2cc6 100644 --- a/pgxn/neon/walproposer.h +++ b/pgxn/neon/walproposer.h @@ -41,6 +41,7 @@ typedef struct WalMessage WalMessage; extern char *neon_timeline_walproposer; extern char *neon_tenant_walproposer; +extern char *neon_safekeeper_token_walproposer; /* Possible return values from ReadPGAsync */ typedef enum @@ -337,8 +338,13 @@ typedef struct Safekeeper { char const *host; char const *port; - char conninfo[MAXCONNINFO]; /* connection info for* - * connecting/reconnecting */ + + /* + * connection string for connecting/reconnecting. + * + * May contain private information like password and should not be logged. + */ + char conninfo[MAXCONNINFO]; /* * postgres protocol connection to the WAL acceptor diff --git a/poetry.lock b/poetry.lock index bc1b57bc64..716423d51e 100644 --- a/poetry.lock +++ b/poetry.lock @@ -49,16 +49,16 @@ python-versions = ">=3.6" [[package]] name = "asyncpg" -version = "0.24.0" +version = "0.27.0" description = "An asyncio PostgreSQL driver" category = "main" optional = false -python-versions = ">=3.6.0" +python-versions = ">=3.7.0" [package.extras] -dev = ["Cython (>=0.29.24,<0.30.0)", "Sphinx (>=4.1.2,<4.2.0)", "flake8 (>=3.9.2,<3.10.0)", "pycodestyle (>=2.7.0,<2.8.0)", "pytest (>=6.0)", "sphinx-rtd-theme (>=0.5.2,<0.6.0)", "sphinxcontrib-asyncio (>=0.3.0,<0.4.0)", "uvloop (>=0.15.3)"] +dev = ["Cython (>=0.29.24,<0.30.0)", "Sphinx (>=4.1.2,<4.2.0)", "flake8 (>=5.0.4,<5.1.0)", "pytest (>=6.0)", "sphinx-rtd-theme (>=0.5.2,<0.6.0)", "sphinxcontrib-asyncio (>=0.3.0,<0.4.0)", "uvloop (>=0.15.3)"] docs = ["Sphinx (>=4.1.2,<4.2.0)", "sphinx-rtd-theme (>=0.5.2,<0.6.0)", "sphinxcontrib-asyncio (>=0.3.0,<0.4.0)"] -test = ["flake8 (>=3.9.2,<3.10.0)", "pycodestyle (>=2.7.0,<2.8.0)", "uvloop (>=0.15.3)"] +test = ["flake8 (>=5.0.4,<5.1.0)", "uvloop (>=0.15.3)"] [[package]] name = "atomicwrites" @@ -141,14 +141,14 @@ uvloop = ["uvloop (>=0.15.2)"] [[package]] name = "boto3" -version = "1.24.38" +version = "1.26.16" description = "The AWS SDK for Python" category = "main" optional = false python-versions = ">= 3.7" [package.dependencies] -botocore = ">=1.27.38,<1.28.0" +botocore = ">=1.29.16,<1.30.0" jmespath = ">=0.7.1,<2.0.0" s3transfer = ">=0.6.0,<0.7.0" @@ -157,339 +157,348 @@ crt = ["botocore[crt] (>=1.21.0,<2.0a0)"] [[package]] name = "boto3-stubs" -version = "1.24.58" -description = "Type annotations for boto3 1.24.58 generated with mypy-boto3-builder 7.11.7" +version = "1.26.16" +description = "Type annotations for boto3 1.26.16 generated with mypy-boto3-builder 7.11.11" category = "main" optional = false python-versions = ">=3.7" [package.dependencies] botocore-stubs = "*" -mypy-boto3-s3 = {version = ">=1.24.0,<1.25.0", optional = true, markers = "extra == \"s3\""} +mypy-boto3-s3 = {version = ">=1.26.0,<1.27.0", optional = true, markers = "extra == \"s3\""} types-s3transfer = "*" typing-extensions = ">=4.1.0" [package.extras] -accessanalyzer = ["mypy-boto3-accessanalyzer (>=1.24.0,<1.25.0)"] -account = ["mypy-boto3-account (>=1.24.0,<1.25.0)"] -acm = ["mypy-boto3-acm (>=1.24.0,<1.25.0)"] -acm-pca = ["mypy-boto3-acm-pca (>=1.24.0,<1.25.0)"] -alexaforbusiness = ["mypy-boto3-alexaforbusiness (>=1.24.0,<1.25.0)"] -all = ["mypy-boto3-accessanalyzer (>=1.24.0,<1.25.0)", "mypy-boto3-account (>=1.24.0,<1.25.0)", "mypy-boto3-acm (>=1.24.0,<1.25.0)", "mypy-boto3-acm-pca (>=1.24.0,<1.25.0)", "mypy-boto3-alexaforbusiness (>=1.24.0,<1.25.0)", "mypy-boto3-amp (>=1.24.0,<1.25.0)", "mypy-boto3-amplify (>=1.24.0,<1.25.0)", "mypy-boto3-amplifybackend (>=1.24.0,<1.25.0)", "mypy-boto3-amplifyuibuilder (>=1.24.0,<1.25.0)", "mypy-boto3-apigateway (>=1.24.0,<1.25.0)", "mypy-boto3-apigatewaymanagementapi (>=1.24.0,<1.25.0)", "mypy-boto3-apigatewayv2 (>=1.24.0,<1.25.0)", "mypy-boto3-appconfig (>=1.24.0,<1.25.0)", "mypy-boto3-appconfigdata (>=1.24.0,<1.25.0)", "mypy-boto3-appflow (>=1.24.0,<1.25.0)", "mypy-boto3-appintegrations (>=1.24.0,<1.25.0)", "mypy-boto3-application-autoscaling (>=1.24.0,<1.25.0)", "mypy-boto3-application-insights (>=1.24.0,<1.25.0)", "mypy-boto3-applicationcostprofiler (>=1.24.0,<1.25.0)", "mypy-boto3-appmesh (>=1.24.0,<1.25.0)", "mypy-boto3-apprunner (>=1.24.0,<1.25.0)", "mypy-boto3-appstream (>=1.24.0,<1.25.0)", "mypy-boto3-appsync (>=1.24.0,<1.25.0)", "mypy-boto3-athena (>=1.24.0,<1.25.0)", "mypy-boto3-auditmanager (>=1.24.0,<1.25.0)", "mypy-boto3-autoscaling (>=1.24.0,<1.25.0)", "mypy-boto3-autoscaling-plans (>=1.24.0,<1.25.0)", "mypy-boto3-backup (>=1.24.0,<1.25.0)", "mypy-boto3-backup-gateway (>=1.24.0,<1.25.0)", "mypy-boto3-backupstorage (>=1.24.0,<1.25.0)", "mypy-boto3-batch (>=1.24.0,<1.25.0)", "mypy-boto3-billingconductor (>=1.24.0,<1.25.0)", "mypy-boto3-braket (>=1.24.0,<1.25.0)", "mypy-boto3-budgets (>=1.24.0,<1.25.0)", "mypy-boto3-ce (>=1.24.0,<1.25.0)", "mypy-boto3-chime (>=1.24.0,<1.25.0)", "mypy-boto3-chime-sdk-identity (>=1.24.0,<1.25.0)", "mypy-boto3-chime-sdk-media-pipelines (>=1.24.0,<1.25.0)", "mypy-boto3-chime-sdk-meetings (>=1.24.0,<1.25.0)", "mypy-boto3-chime-sdk-messaging (>=1.24.0,<1.25.0)", "mypy-boto3-cloud9 (>=1.24.0,<1.25.0)", "mypy-boto3-cloudcontrol (>=1.24.0,<1.25.0)", "mypy-boto3-clouddirectory (>=1.24.0,<1.25.0)", "mypy-boto3-cloudformation (>=1.24.0,<1.25.0)", "mypy-boto3-cloudfront (>=1.24.0,<1.25.0)", "mypy-boto3-cloudhsm (>=1.24.0,<1.25.0)", "mypy-boto3-cloudhsmv2 (>=1.24.0,<1.25.0)", "mypy-boto3-cloudsearch (>=1.24.0,<1.25.0)", "mypy-boto3-cloudsearchdomain (>=1.24.0,<1.25.0)", "mypy-boto3-cloudtrail (>=1.24.0,<1.25.0)", "mypy-boto3-cloudwatch (>=1.24.0,<1.25.0)", "mypy-boto3-codeartifact (>=1.24.0,<1.25.0)", "mypy-boto3-codebuild (>=1.24.0,<1.25.0)", "mypy-boto3-codecommit (>=1.24.0,<1.25.0)", "mypy-boto3-codedeploy (>=1.24.0,<1.25.0)", "mypy-boto3-codeguru-reviewer (>=1.24.0,<1.25.0)", "mypy-boto3-codeguruprofiler (>=1.24.0,<1.25.0)", "mypy-boto3-codepipeline (>=1.24.0,<1.25.0)", "mypy-boto3-codestar (>=1.24.0,<1.25.0)", "mypy-boto3-codestar-connections (>=1.24.0,<1.25.0)", "mypy-boto3-codestar-notifications (>=1.24.0,<1.25.0)", "mypy-boto3-cognito-identity (>=1.24.0,<1.25.0)", "mypy-boto3-cognito-idp (>=1.24.0,<1.25.0)", "mypy-boto3-cognito-sync (>=1.24.0,<1.25.0)", "mypy-boto3-comprehend (>=1.24.0,<1.25.0)", "mypy-boto3-comprehendmedical (>=1.24.0,<1.25.0)", "mypy-boto3-compute-optimizer (>=1.24.0,<1.25.0)", "mypy-boto3-config (>=1.24.0,<1.25.0)", "mypy-boto3-connect (>=1.24.0,<1.25.0)", "mypy-boto3-connect-contact-lens (>=1.24.0,<1.25.0)", "mypy-boto3-connectcampaigns (>=1.24.0,<1.25.0)", "mypy-boto3-connectparticipant (>=1.24.0,<1.25.0)", "mypy-boto3-cur (>=1.24.0,<1.25.0)", "mypy-boto3-customer-profiles (>=1.24.0,<1.25.0)", "mypy-boto3-databrew (>=1.24.0,<1.25.0)", "mypy-boto3-dataexchange (>=1.24.0,<1.25.0)", "mypy-boto3-datapipeline (>=1.24.0,<1.25.0)", "mypy-boto3-datasync (>=1.24.0,<1.25.0)", "mypy-boto3-dax (>=1.24.0,<1.25.0)", "mypy-boto3-detective (>=1.24.0,<1.25.0)", "mypy-boto3-devicefarm (>=1.24.0,<1.25.0)", "mypy-boto3-devops-guru (>=1.24.0,<1.25.0)", "mypy-boto3-directconnect (>=1.24.0,<1.25.0)", "mypy-boto3-discovery (>=1.24.0,<1.25.0)", "mypy-boto3-dlm (>=1.24.0,<1.25.0)", "mypy-boto3-dms (>=1.24.0,<1.25.0)", "mypy-boto3-docdb (>=1.24.0,<1.25.0)", "mypy-boto3-drs (>=1.24.0,<1.25.0)", "mypy-boto3-ds (>=1.24.0,<1.25.0)", "mypy-boto3-dynamodb (>=1.24.0,<1.25.0)", "mypy-boto3-dynamodbstreams (>=1.24.0,<1.25.0)", "mypy-boto3-ebs (>=1.24.0,<1.25.0)", "mypy-boto3-ec2 (>=1.24.0,<1.25.0)", "mypy-boto3-ec2-instance-connect (>=1.24.0,<1.25.0)", "mypy-boto3-ecr (>=1.24.0,<1.25.0)", "mypy-boto3-ecr-public (>=1.24.0,<1.25.0)", "mypy-boto3-ecs (>=1.24.0,<1.25.0)", "mypy-boto3-efs (>=1.24.0,<1.25.0)", "mypy-boto3-eks (>=1.24.0,<1.25.0)", "mypy-boto3-elastic-inference (>=1.24.0,<1.25.0)", "mypy-boto3-elasticache (>=1.24.0,<1.25.0)", "mypy-boto3-elasticbeanstalk (>=1.24.0,<1.25.0)", "mypy-boto3-elastictranscoder (>=1.24.0,<1.25.0)", "mypy-boto3-elb (>=1.24.0,<1.25.0)", "mypy-boto3-elbv2 (>=1.24.0,<1.25.0)", "mypy-boto3-emr (>=1.24.0,<1.25.0)", "mypy-boto3-emr-containers (>=1.24.0,<1.25.0)", "mypy-boto3-emr-serverless (>=1.24.0,<1.25.0)", "mypy-boto3-es (>=1.24.0,<1.25.0)", "mypy-boto3-events (>=1.24.0,<1.25.0)", "mypy-boto3-evidently (>=1.24.0,<1.25.0)", "mypy-boto3-finspace (>=1.24.0,<1.25.0)", "mypy-boto3-finspace-data (>=1.24.0,<1.25.0)", "mypy-boto3-firehose (>=1.24.0,<1.25.0)", "mypy-boto3-fis (>=1.24.0,<1.25.0)", "mypy-boto3-fms (>=1.24.0,<1.25.0)", "mypy-boto3-forecast (>=1.24.0,<1.25.0)", "mypy-boto3-forecastquery (>=1.24.0,<1.25.0)", "mypy-boto3-frauddetector (>=1.24.0,<1.25.0)", "mypy-boto3-fsx (>=1.24.0,<1.25.0)", "mypy-boto3-gamelift (>=1.24.0,<1.25.0)", "mypy-boto3-gamesparks (>=1.24.0,<1.25.0)", "mypy-boto3-glacier (>=1.24.0,<1.25.0)", "mypy-boto3-globalaccelerator (>=1.24.0,<1.25.0)", "mypy-boto3-glue (>=1.24.0,<1.25.0)", "mypy-boto3-grafana (>=1.24.0,<1.25.0)", "mypy-boto3-greengrass (>=1.24.0,<1.25.0)", "mypy-boto3-greengrassv2 (>=1.24.0,<1.25.0)", "mypy-boto3-groundstation (>=1.24.0,<1.25.0)", "mypy-boto3-guardduty (>=1.24.0,<1.25.0)", "mypy-boto3-health (>=1.24.0,<1.25.0)", "mypy-boto3-healthlake (>=1.24.0,<1.25.0)", "mypy-boto3-honeycode (>=1.24.0,<1.25.0)", "mypy-boto3-iam (>=1.24.0,<1.25.0)", "mypy-boto3-identitystore (>=1.24.0,<1.25.0)", "mypy-boto3-imagebuilder (>=1.24.0,<1.25.0)", "mypy-boto3-importexport (>=1.24.0,<1.25.0)", "mypy-boto3-inspector (>=1.24.0,<1.25.0)", "mypy-boto3-inspector2 (>=1.24.0,<1.25.0)", "mypy-boto3-iot (>=1.24.0,<1.25.0)", "mypy-boto3-iot-data (>=1.24.0,<1.25.0)", "mypy-boto3-iot-jobs-data (>=1.24.0,<1.25.0)", "mypy-boto3-iot1click-devices (>=1.24.0,<1.25.0)", "mypy-boto3-iot1click-projects (>=1.24.0,<1.25.0)", "mypy-boto3-iotanalytics (>=1.24.0,<1.25.0)", "mypy-boto3-iotdeviceadvisor (>=1.24.0,<1.25.0)", "mypy-boto3-iotevents (>=1.24.0,<1.25.0)", "mypy-boto3-iotevents-data (>=1.24.0,<1.25.0)", "mypy-boto3-iotfleethub (>=1.24.0,<1.25.0)", "mypy-boto3-iotsecuretunneling (>=1.24.0,<1.25.0)", "mypy-boto3-iotsitewise (>=1.24.0,<1.25.0)", "mypy-boto3-iotthingsgraph (>=1.24.0,<1.25.0)", "mypy-boto3-iottwinmaker (>=1.24.0,<1.25.0)", "mypy-boto3-iotwireless (>=1.24.0,<1.25.0)", "mypy-boto3-ivs (>=1.24.0,<1.25.0)", "mypy-boto3-ivschat (>=1.24.0,<1.25.0)", "mypy-boto3-kafka (>=1.24.0,<1.25.0)", "mypy-boto3-kafkaconnect (>=1.24.0,<1.25.0)", "mypy-boto3-kendra (>=1.24.0,<1.25.0)", "mypy-boto3-keyspaces (>=1.24.0,<1.25.0)", "mypy-boto3-kinesis (>=1.24.0,<1.25.0)", "mypy-boto3-kinesis-video-archived-media (>=1.24.0,<1.25.0)", "mypy-boto3-kinesis-video-media (>=1.24.0,<1.25.0)", "mypy-boto3-kinesis-video-signaling (>=1.24.0,<1.25.0)", "mypy-boto3-kinesisanalytics (>=1.24.0,<1.25.0)", "mypy-boto3-kinesisanalyticsv2 (>=1.24.0,<1.25.0)", "mypy-boto3-kinesisvideo (>=1.24.0,<1.25.0)", "mypy-boto3-kms (>=1.24.0,<1.25.0)", "mypy-boto3-lakeformation (>=1.24.0,<1.25.0)", "mypy-boto3-lambda (>=1.24.0,<1.25.0)", "mypy-boto3-lex-models (>=1.24.0,<1.25.0)", "mypy-boto3-lex-runtime (>=1.24.0,<1.25.0)", "mypy-boto3-lexv2-models (>=1.24.0,<1.25.0)", "mypy-boto3-lexv2-runtime (>=1.24.0,<1.25.0)", "mypy-boto3-license-manager (>=1.24.0,<1.25.0)", "mypy-boto3-license-manager-user-subscriptions (>=1.24.0,<1.25.0)", "mypy-boto3-lightsail (>=1.24.0,<1.25.0)", "mypy-boto3-location (>=1.24.0,<1.25.0)", "mypy-boto3-logs (>=1.24.0,<1.25.0)", "mypy-boto3-lookoutequipment (>=1.24.0,<1.25.0)", "mypy-boto3-lookoutmetrics (>=1.24.0,<1.25.0)", "mypy-boto3-lookoutvision (>=1.24.0,<1.25.0)", "mypy-boto3-m2 (>=1.24.0,<1.25.0)", "mypy-boto3-machinelearning (>=1.24.0,<1.25.0)", "mypy-boto3-macie (>=1.24.0,<1.25.0)", "mypy-boto3-macie2 (>=1.24.0,<1.25.0)", "mypy-boto3-managedblockchain (>=1.24.0,<1.25.0)", "mypy-boto3-marketplace-catalog (>=1.24.0,<1.25.0)", "mypy-boto3-marketplace-entitlement (>=1.24.0,<1.25.0)", "mypy-boto3-marketplacecommerceanalytics (>=1.24.0,<1.25.0)", "mypy-boto3-mediaconnect (>=1.24.0,<1.25.0)", "mypy-boto3-mediaconvert (>=1.24.0,<1.25.0)", "mypy-boto3-medialive (>=1.24.0,<1.25.0)", "mypy-boto3-mediapackage (>=1.24.0,<1.25.0)", "mypy-boto3-mediapackage-vod (>=1.24.0,<1.25.0)", "mypy-boto3-mediastore (>=1.24.0,<1.25.0)", "mypy-boto3-mediastore-data (>=1.24.0,<1.25.0)", "mypy-boto3-mediatailor (>=1.24.0,<1.25.0)", "mypy-boto3-memorydb (>=1.24.0,<1.25.0)", "mypy-boto3-meteringmarketplace (>=1.24.0,<1.25.0)", "mypy-boto3-mgh (>=1.24.0,<1.25.0)", "mypy-boto3-mgn (>=1.24.0,<1.25.0)", "mypy-boto3-migration-hub-refactor-spaces (>=1.24.0,<1.25.0)", "mypy-boto3-migrationhub-config (>=1.24.0,<1.25.0)", "mypy-boto3-migrationhubstrategy (>=1.24.0,<1.25.0)", "mypy-boto3-mobile (>=1.24.0,<1.25.0)", "mypy-boto3-mq (>=1.24.0,<1.25.0)", "mypy-boto3-mturk (>=1.24.0,<1.25.0)", "mypy-boto3-mwaa (>=1.24.0,<1.25.0)", "mypy-boto3-neptune (>=1.24.0,<1.25.0)", "mypy-boto3-network-firewall (>=1.24.0,<1.25.0)", "mypy-boto3-networkmanager (>=1.24.0,<1.25.0)", "mypy-boto3-nimble (>=1.24.0,<1.25.0)", "mypy-boto3-opensearch (>=1.24.0,<1.25.0)", "mypy-boto3-opsworks (>=1.24.0,<1.25.0)", "mypy-boto3-opsworkscm (>=1.24.0,<1.25.0)", "mypy-boto3-organizations (>=1.24.0,<1.25.0)", "mypy-boto3-outposts (>=1.24.0,<1.25.0)", "mypy-boto3-panorama (>=1.24.0,<1.25.0)", "mypy-boto3-personalize (>=1.24.0,<1.25.0)", "mypy-boto3-personalize-events (>=1.24.0,<1.25.0)", "mypy-boto3-personalize-runtime (>=1.24.0,<1.25.0)", "mypy-boto3-pi (>=1.24.0,<1.25.0)", "mypy-boto3-pinpoint (>=1.24.0,<1.25.0)", "mypy-boto3-pinpoint-email (>=1.24.0,<1.25.0)", "mypy-boto3-pinpoint-sms-voice (>=1.24.0,<1.25.0)", "mypy-boto3-pinpoint-sms-voice-v2 (>=1.24.0,<1.25.0)", "mypy-boto3-polly (>=1.24.0,<1.25.0)", "mypy-boto3-pricing (>=1.24.0,<1.25.0)", "mypy-boto3-privatenetworks (>=1.24.0,<1.25.0)", "mypy-boto3-proton (>=1.24.0,<1.25.0)", "mypy-boto3-qldb (>=1.24.0,<1.25.0)", "mypy-boto3-qldb-session (>=1.24.0,<1.25.0)", "mypy-boto3-quicksight (>=1.24.0,<1.25.0)", "mypy-boto3-ram (>=1.24.0,<1.25.0)", "mypy-boto3-rbin (>=1.24.0,<1.25.0)", "mypy-boto3-rds (>=1.24.0,<1.25.0)", "mypy-boto3-rds-data (>=1.24.0,<1.25.0)", "mypy-boto3-redshift (>=1.24.0,<1.25.0)", "mypy-boto3-redshift-data (>=1.24.0,<1.25.0)", "mypy-boto3-redshift-serverless (>=1.24.0,<1.25.0)", "mypy-boto3-rekognition (>=1.24.0,<1.25.0)", "mypy-boto3-resiliencehub (>=1.24.0,<1.25.0)", "mypy-boto3-resource-groups (>=1.24.0,<1.25.0)", "mypy-boto3-resourcegroupstaggingapi (>=1.24.0,<1.25.0)", "mypy-boto3-robomaker (>=1.24.0,<1.25.0)", "mypy-boto3-rolesanywhere (>=1.24.0,<1.25.0)", "mypy-boto3-route53 (>=1.24.0,<1.25.0)", "mypy-boto3-route53-recovery-cluster (>=1.24.0,<1.25.0)", "mypy-boto3-route53-recovery-control-config (>=1.24.0,<1.25.0)", "mypy-boto3-route53-recovery-readiness (>=1.24.0,<1.25.0)", "mypy-boto3-route53domains (>=1.24.0,<1.25.0)", "mypy-boto3-route53resolver (>=1.24.0,<1.25.0)", "mypy-boto3-rum (>=1.24.0,<1.25.0)", "mypy-boto3-s3 (>=1.24.0,<1.25.0)", "mypy-boto3-s3control (>=1.24.0,<1.25.0)", "mypy-boto3-s3outposts (>=1.24.0,<1.25.0)", "mypy-boto3-sagemaker (>=1.24.0,<1.25.0)", "mypy-boto3-sagemaker-a2i-runtime (>=1.24.0,<1.25.0)", "mypy-boto3-sagemaker-edge (>=1.24.0,<1.25.0)", "mypy-boto3-sagemaker-featurestore-runtime (>=1.24.0,<1.25.0)", "mypy-boto3-sagemaker-runtime (>=1.24.0,<1.25.0)", "mypy-boto3-savingsplans (>=1.24.0,<1.25.0)", "mypy-boto3-schemas (>=1.24.0,<1.25.0)", "mypy-boto3-sdb (>=1.24.0,<1.25.0)", "mypy-boto3-secretsmanager (>=1.24.0,<1.25.0)", "mypy-boto3-securityhub (>=1.24.0,<1.25.0)", "mypy-boto3-serverlessrepo (>=1.24.0,<1.25.0)", "mypy-boto3-service-quotas (>=1.24.0,<1.25.0)", "mypy-boto3-servicecatalog (>=1.24.0,<1.25.0)", "mypy-boto3-servicecatalog-appregistry (>=1.24.0,<1.25.0)", "mypy-boto3-servicediscovery (>=1.24.0,<1.25.0)", "mypy-boto3-ses (>=1.24.0,<1.25.0)", "mypy-boto3-sesv2 (>=1.24.0,<1.25.0)", "mypy-boto3-shield (>=1.24.0,<1.25.0)", "mypy-boto3-signer (>=1.24.0,<1.25.0)", "mypy-boto3-sms (>=1.24.0,<1.25.0)", "mypy-boto3-sms-voice (>=1.24.0,<1.25.0)", "mypy-boto3-snow-device-management (>=1.24.0,<1.25.0)", "mypy-boto3-snowball (>=1.24.0,<1.25.0)", "mypy-boto3-sns (>=1.24.0,<1.25.0)", "mypy-boto3-sqs (>=1.24.0,<1.25.0)", "mypy-boto3-ssm (>=1.24.0,<1.25.0)", "mypy-boto3-ssm-contacts (>=1.24.0,<1.25.0)", "mypy-boto3-ssm-incidents (>=1.24.0,<1.25.0)", "mypy-boto3-sso (>=1.24.0,<1.25.0)", "mypy-boto3-sso-admin (>=1.24.0,<1.25.0)", "mypy-boto3-sso-oidc (>=1.24.0,<1.25.0)", "mypy-boto3-stepfunctions (>=1.24.0,<1.25.0)", "mypy-boto3-storagegateway (>=1.24.0,<1.25.0)", "mypy-boto3-sts (>=1.24.0,<1.25.0)", "mypy-boto3-support (>=1.24.0,<1.25.0)", "mypy-boto3-support-app (>=1.24.0,<1.25.0)", "mypy-boto3-swf (>=1.24.0,<1.25.0)", "mypy-boto3-synthetics (>=1.24.0,<1.25.0)", "mypy-boto3-textract (>=1.24.0,<1.25.0)", "mypy-boto3-timestream-query (>=1.24.0,<1.25.0)", "mypy-boto3-timestream-write (>=1.24.0,<1.25.0)", "mypy-boto3-transcribe (>=1.24.0,<1.25.0)", "mypy-boto3-transfer (>=1.24.0,<1.25.0)", "mypy-boto3-translate (>=1.24.0,<1.25.0)", "mypy-boto3-voice-id (>=1.24.0,<1.25.0)", "mypy-boto3-waf (>=1.24.0,<1.25.0)", "mypy-boto3-waf-regional (>=1.24.0,<1.25.0)", "mypy-boto3-wafv2 (>=1.24.0,<1.25.0)", "mypy-boto3-wellarchitected (>=1.24.0,<1.25.0)", "mypy-boto3-wisdom (>=1.24.0,<1.25.0)", "mypy-boto3-workdocs (>=1.24.0,<1.25.0)", "mypy-boto3-worklink (>=1.24.0,<1.25.0)", "mypy-boto3-workmail (>=1.24.0,<1.25.0)", "mypy-boto3-workmailmessageflow (>=1.24.0,<1.25.0)", "mypy-boto3-workspaces (>=1.24.0,<1.25.0)", "mypy-boto3-workspaces-web (>=1.24.0,<1.25.0)", "mypy-boto3-xray (>=1.24.0,<1.25.0)"] -amp = ["mypy-boto3-amp (>=1.24.0,<1.25.0)"] -amplify = ["mypy-boto3-amplify (>=1.24.0,<1.25.0)"] -amplifybackend = ["mypy-boto3-amplifybackend (>=1.24.0,<1.25.0)"] -amplifyuibuilder = ["mypy-boto3-amplifyuibuilder (>=1.24.0,<1.25.0)"] -apigateway = ["mypy-boto3-apigateway (>=1.24.0,<1.25.0)"] -apigatewaymanagementapi = ["mypy-boto3-apigatewaymanagementapi (>=1.24.0,<1.25.0)"] -apigatewayv2 = ["mypy-boto3-apigatewayv2 (>=1.24.0,<1.25.0)"] -appconfig = ["mypy-boto3-appconfig (>=1.24.0,<1.25.0)"] -appconfigdata = ["mypy-boto3-appconfigdata (>=1.24.0,<1.25.0)"] -appflow = ["mypy-boto3-appflow (>=1.24.0,<1.25.0)"] -appintegrations = ["mypy-boto3-appintegrations (>=1.24.0,<1.25.0)"] -application-autoscaling = ["mypy-boto3-application-autoscaling (>=1.24.0,<1.25.0)"] -application-insights = ["mypy-boto3-application-insights (>=1.24.0,<1.25.0)"] -applicationcostprofiler = ["mypy-boto3-applicationcostprofiler (>=1.24.0,<1.25.0)"] -appmesh = ["mypy-boto3-appmesh (>=1.24.0,<1.25.0)"] -apprunner = ["mypy-boto3-apprunner (>=1.24.0,<1.25.0)"] -appstream = ["mypy-boto3-appstream (>=1.24.0,<1.25.0)"] -appsync = ["mypy-boto3-appsync (>=1.24.0,<1.25.0)"] -athena = ["mypy-boto3-athena (>=1.24.0,<1.25.0)"] -auditmanager = ["mypy-boto3-auditmanager (>=1.24.0,<1.25.0)"] -autoscaling = ["mypy-boto3-autoscaling (>=1.24.0,<1.25.0)"] -autoscaling-plans = ["mypy-boto3-autoscaling-plans (>=1.24.0,<1.25.0)"] -backup = ["mypy-boto3-backup (>=1.24.0,<1.25.0)"] -backup-gateway = ["mypy-boto3-backup-gateway (>=1.24.0,<1.25.0)"] -backupstorage = ["mypy-boto3-backupstorage (>=1.24.0,<1.25.0)"] -batch = ["mypy-boto3-batch (>=1.24.0,<1.25.0)"] -billingconductor = ["mypy-boto3-billingconductor (>=1.24.0,<1.25.0)"] -braket = ["mypy-boto3-braket (>=1.24.0,<1.25.0)"] -budgets = ["mypy-boto3-budgets (>=1.24.0,<1.25.0)"] -ce = ["mypy-boto3-ce (>=1.24.0,<1.25.0)"] -chime = ["mypy-boto3-chime (>=1.24.0,<1.25.0)"] -chime-sdk-identity = ["mypy-boto3-chime-sdk-identity (>=1.24.0,<1.25.0)"] -chime-sdk-media-pipelines = ["mypy-boto3-chime-sdk-media-pipelines (>=1.24.0,<1.25.0)"] -chime-sdk-meetings = ["mypy-boto3-chime-sdk-meetings (>=1.24.0,<1.25.0)"] -chime-sdk-messaging = ["mypy-boto3-chime-sdk-messaging (>=1.24.0,<1.25.0)"] -cloud9 = ["mypy-boto3-cloud9 (>=1.24.0,<1.25.0)"] -cloudcontrol = ["mypy-boto3-cloudcontrol (>=1.24.0,<1.25.0)"] -clouddirectory = ["mypy-boto3-clouddirectory (>=1.24.0,<1.25.0)"] -cloudformation = ["mypy-boto3-cloudformation (>=1.24.0,<1.25.0)"] -cloudfront = ["mypy-boto3-cloudfront (>=1.24.0,<1.25.0)"] -cloudhsm = ["mypy-boto3-cloudhsm (>=1.24.0,<1.25.0)"] -cloudhsmv2 = ["mypy-boto3-cloudhsmv2 (>=1.24.0,<1.25.0)"] -cloudsearch = ["mypy-boto3-cloudsearch (>=1.24.0,<1.25.0)"] -cloudsearchdomain = ["mypy-boto3-cloudsearchdomain (>=1.24.0,<1.25.0)"] -cloudtrail = ["mypy-boto3-cloudtrail (>=1.24.0,<1.25.0)"] -cloudwatch = ["mypy-boto3-cloudwatch (>=1.24.0,<1.25.0)"] -codeartifact = ["mypy-boto3-codeartifact (>=1.24.0,<1.25.0)"] -codebuild = ["mypy-boto3-codebuild (>=1.24.0,<1.25.0)"] -codecommit = ["mypy-boto3-codecommit (>=1.24.0,<1.25.0)"] -codedeploy = ["mypy-boto3-codedeploy (>=1.24.0,<1.25.0)"] -codeguru-reviewer = ["mypy-boto3-codeguru-reviewer (>=1.24.0,<1.25.0)"] -codeguruprofiler = ["mypy-boto3-codeguruprofiler (>=1.24.0,<1.25.0)"] -codepipeline = ["mypy-boto3-codepipeline (>=1.24.0,<1.25.0)"] -codestar = ["mypy-boto3-codestar (>=1.24.0,<1.25.0)"] -codestar-connections = ["mypy-boto3-codestar-connections (>=1.24.0,<1.25.0)"] -codestar-notifications = ["mypy-boto3-codestar-notifications (>=1.24.0,<1.25.0)"] -cognito-identity = ["mypy-boto3-cognito-identity (>=1.24.0,<1.25.0)"] -cognito-idp = ["mypy-boto3-cognito-idp (>=1.24.0,<1.25.0)"] -cognito-sync = ["mypy-boto3-cognito-sync (>=1.24.0,<1.25.0)"] -comprehend = ["mypy-boto3-comprehend (>=1.24.0,<1.25.0)"] -comprehendmedical = ["mypy-boto3-comprehendmedical (>=1.24.0,<1.25.0)"] -compute-optimizer = ["mypy-boto3-compute-optimizer (>=1.24.0,<1.25.0)"] -config = ["mypy-boto3-config (>=1.24.0,<1.25.0)"] -connect = ["mypy-boto3-connect (>=1.24.0,<1.25.0)"] -connect-contact-lens = ["mypy-boto3-connect-contact-lens (>=1.24.0,<1.25.0)"] -connectcampaigns = ["mypy-boto3-connectcampaigns (>=1.24.0,<1.25.0)"] -connectparticipant = ["mypy-boto3-connectparticipant (>=1.24.0,<1.25.0)"] -cur = ["mypy-boto3-cur (>=1.24.0,<1.25.0)"] -customer-profiles = ["mypy-boto3-customer-profiles (>=1.24.0,<1.25.0)"] -databrew = ["mypy-boto3-databrew (>=1.24.0,<1.25.0)"] -dataexchange = ["mypy-boto3-dataexchange (>=1.24.0,<1.25.0)"] -datapipeline = ["mypy-boto3-datapipeline (>=1.24.0,<1.25.0)"] -datasync = ["mypy-boto3-datasync (>=1.24.0,<1.25.0)"] -dax = ["mypy-boto3-dax (>=1.24.0,<1.25.0)"] -detective = ["mypy-boto3-detective (>=1.24.0,<1.25.0)"] -devicefarm = ["mypy-boto3-devicefarm (>=1.24.0,<1.25.0)"] -devops-guru = ["mypy-boto3-devops-guru (>=1.24.0,<1.25.0)"] -directconnect = ["mypy-boto3-directconnect (>=1.24.0,<1.25.0)"] -discovery = ["mypy-boto3-discovery (>=1.24.0,<1.25.0)"] -dlm = ["mypy-boto3-dlm (>=1.24.0,<1.25.0)"] -dms = ["mypy-boto3-dms (>=1.24.0,<1.25.0)"] -docdb = ["mypy-boto3-docdb (>=1.24.0,<1.25.0)"] -drs = ["mypy-boto3-drs (>=1.24.0,<1.25.0)"] -ds = ["mypy-boto3-ds (>=1.24.0,<1.25.0)"] -dynamodb = ["mypy-boto3-dynamodb (>=1.24.0,<1.25.0)"] -dynamodbstreams = ["mypy-boto3-dynamodbstreams (>=1.24.0,<1.25.0)"] -ebs = ["mypy-boto3-ebs (>=1.24.0,<1.25.0)"] -ec2 = ["mypy-boto3-ec2 (>=1.24.0,<1.25.0)"] -ec2-instance-connect = ["mypy-boto3-ec2-instance-connect (>=1.24.0,<1.25.0)"] -ecr = ["mypy-boto3-ecr (>=1.24.0,<1.25.0)"] -ecr-public = ["mypy-boto3-ecr-public (>=1.24.0,<1.25.0)"] -ecs = ["mypy-boto3-ecs (>=1.24.0,<1.25.0)"] -efs = ["mypy-boto3-efs (>=1.24.0,<1.25.0)"] -eks = ["mypy-boto3-eks (>=1.24.0,<1.25.0)"] -elastic-inference = ["mypy-boto3-elastic-inference (>=1.24.0,<1.25.0)"] -elasticache = ["mypy-boto3-elasticache (>=1.24.0,<1.25.0)"] -elasticbeanstalk = ["mypy-boto3-elasticbeanstalk (>=1.24.0,<1.25.0)"] -elastictranscoder = ["mypy-boto3-elastictranscoder (>=1.24.0,<1.25.0)"] -elb = ["mypy-boto3-elb (>=1.24.0,<1.25.0)"] -elbv2 = ["mypy-boto3-elbv2 (>=1.24.0,<1.25.0)"] -emr = ["mypy-boto3-emr (>=1.24.0,<1.25.0)"] -emr-containers = ["mypy-boto3-emr-containers (>=1.24.0,<1.25.0)"] -emr-serverless = ["mypy-boto3-emr-serverless (>=1.24.0,<1.25.0)"] -es = ["mypy-boto3-es (>=1.24.0,<1.25.0)"] -essential = ["mypy-boto3-cloudformation (>=1.24.0,<1.25.0)", "mypy-boto3-dynamodb (>=1.24.0,<1.25.0)", "mypy-boto3-ec2 (>=1.24.0,<1.25.0)", "mypy-boto3-lambda (>=1.24.0,<1.25.0)", "mypy-boto3-rds (>=1.24.0,<1.25.0)", "mypy-boto3-s3 (>=1.24.0,<1.25.0)", "mypy-boto3-sqs (>=1.24.0,<1.25.0)"] -events = ["mypy-boto3-events (>=1.24.0,<1.25.0)"] -evidently = ["mypy-boto3-evidently (>=1.24.0,<1.25.0)"] -finspace = ["mypy-boto3-finspace (>=1.24.0,<1.25.0)"] -finspace-data = ["mypy-boto3-finspace-data (>=1.24.0,<1.25.0)"] -firehose = ["mypy-boto3-firehose (>=1.24.0,<1.25.0)"] -fis = ["mypy-boto3-fis (>=1.24.0,<1.25.0)"] -fms = ["mypy-boto3-fms (>=1.24.0,<1.25.0)"] -forecast = ["mypy-boto3-forecast (>=1.24.0,<1.25.0)"] -forecastquery = ["mypy-boto3-forecastquery (>=1.24.0,<1.25.0)"] -frauddetector = ["mypy-boto3-frauddetector (>=1.24.0,<1.25.0)"] -fsx = ["mypy-boto3-fsx (>=1.24.0,<1.25.0)"] -gamelift = ["mypy-boto3-gamelift (>=1.24.0,<1.25.0)"] -gamesparks = ["mypy-boto3-gamesparks (>=1.24.0,<1.25.0)"] -glacier = ["mypy-boto3-glacier (>=1.24.0,<1.25.0)"] -globalaccelerator = ["mypy-boto3-globalaccelerator (>=1.24.0,<1.25.0)"] -glue = ["mypy-boto3-glue (>=1.24.0,<1.25.0)"] -grafana = ["mypy-boto3-grafana (>=1.24.0,<1.25.0)"] -greengrass = ["mypy-boto3-greengrass (>=1.24.0,<1.25.0)"] -greengrassv2 = ["mypy-boto3-greengrassv2 (>=1.24.0,<1.25.0)"] -groundstation = ["mypy-boto3-groundstation (>=1.24.0,<1.25.0)"] -guardduty = ["mypy-boto3-guardduty (>=1.24.0,<1.25.0)"] -health = ["mypy-boto3-health (>=1.24.0,<1.25.0)"] -healthlake = ["mypy-boto3-healthlake (>=1.24.0,<1.25.0)"] -honeycode = ["mypy-boto3-honeycode (>=1.24.0,<1.25.0)"] -iam = ["mypy-boto3-iam (>=1.24.0,<1.25.0)"] -identitystore = ["mypy-boto3-identitystore (>=1.24.0,<1.25.0)"] -imagebuilder = ["mypy-boto3-imagebuilder (>=1.24.0,<1.25.0)"] -importexport = ["mypy-boto3-importexport (>=1.24.0,<1.25.0)"] -inspector = ["mypy-boto3-inspector (>=1.24.0,<1.25.0)"] -inspector2 = ["mypy-boto3-inspector2 (>=1.24.0,<1.25.0)"] -iot = ["mypy-boto3-iot (>=1.24.0,<1.25.0)"] -iot-data = ["mypy-boto3-iot-data (>=1.24.0,<1.25.0)"] -iot-jobs-data = ["mypy-boto3-iot-jobs-data (>=1.24.0,<1.25.0)"] -iot1click-devices = ["mypy-boto3-iot1click-devices (>=1.24.0,<1.25.0)"] -iot1click-projects = ["mypy-boto3-iot1click-projects (>=1.24.0,<1.25.0)"] -iotanalytics = ["mypy-boto3-iotanalytics (>=1.24.0,<1.25.0)"] -iotdeviceadvisor = ["mypy-boto3-iotdeviceadvisor (>=1.24.0,<1.25.0)"] -iotevents = ["mypy-boto3-iotevents (>=1.24.0,<1.25.0)"] -iotevents-data = ["mypy-boto3-iotevents-data (>=1.24.0,<1.25.0)"] -iotfleethub = ["mypy-boto3-iotfleethub (>=1.24.0,<1.25.0)"] -iotsecuretunneling = ["mypy-boto3-iotsecuretunneling (>=1.24.0,<1.25.0)"] -iotsitewise = ["mypy-boto3-iotsitewise (>=1.24.0,<1.25.0)"] -iotthingsgraph = ["mypy-boto3-iotthingsgraph (>=1.24.0,<1.25.0)"] -iottwinmaker = ["mypy-boto3-iottwinmaker (>=1.24.0,<1.25.0)"] -iotwireless = ["mypy-boto3-iotwireless (>=1.24.0,<1.25.0)"] -ivs = ["mypy-boto3-ivs (>=1.24.0,<1.25.0)"] -ivschat = ["mypy-boto3-ivschat (>=1.24.0,<1.25.0)"] -kafka = ["mypy-boto3-kafka (>=1.24.0,<1.25.0)"] -kafkaconnect = ["mypy-boto3-kafkaconnect (>=1.24.0,<1.25.0)"] -kendra = ["mypy-boto3-kendra (>=1.24.0,<1.25.0)"] -keyspaces = ["mypy-boto3-keyspaces (>=1.24.0,<1.25.0)"] -kinesis = ["mypy-boto3-kinesis (>=1.24.0,<1.25.0)"] -kinesis-video-archived-media = ["mypy-boto3-kinesis-video-archived-media (>=1.24.0,<1.25.0)"] -kinesis-video-media = ["mypy-boto3-kinesis-video-media (>=1.24.0,<1.25.0)"] -kinesis-video-signaling = ["mypy-boto3-kinesis-video-signaling (>=1.24.0,<1.25.0)"] -kinesisanalytics = ["mypy-boto3-kinesisanalytics (>=1.24.0,<1.25.0)"] -kinesisanalyticsv2 = ["mypy-boto3-kinesisanalyticsv2 (>=1.24.0,<1.25.0)"] -kinesisvideo = ["mypy-boto3-kinesisvideo (>=1.24.0,<1.25.0)"] -kms = ["mypy-boto3-kms (>=1.24.0,<1.25.0)"] -lakeformation = ["mypy-boto3-lakeformation (>=1.24.0,<1.25.0)"] -lambda = ["mypy-boto3-lambda (>=1.24.0,<1.25.0)"] -lex-models = ["mypy-boto3-lex-models (>=1.24.0,<1.25.0)"] -lex-runtime = ["mypy-boto3-lex-runtime (>=1.24.0,<1.25.0)"] -lexv2-models = ["mypy-boto3-lexv2-models (>=1.24.0,<1.25.0)"] -lexv2-runtime = ["mypy-boto3-lexv2-runtime (>=1.24.0,<1.25.0)"] -license-manager = ["mypy-boto3-license-manager (>=1.24.0,<1.25.0)"] -license-manager-user-subscriptions = ["mypy-boto3-license-manager-user-subscriptions (>=1.24.0,<1.25.0)"] -lightsail = ["mypy-boto3-lightsail (>=1.24.0,<1.25.0)"] -location = ["mypy-boto3-location (>=1.24.0,<1.25.0)"] -logs = ["mypy-boto3-logs (>=1.24.0,<1.25.0)"] -lookoutequipment = ["mypy-boto3-lookoutequipment (>=1.24.0,<1.25.0)"] -lookoutmetrics = ["mypy-boto3-lookoutmetrics (>=1.24.0,<1.25.0)"] -lookoutvision = ["mypy-boto3-lookoutvision (>=1.24.0,<1.25.0)"] -m2 = ["mypy-boto3-m2 (>=1.24.0,<1.25.0)"] -machinelearning = ["mypy-boto3-machinelearning (>=1.24.0,<1.25.0)"] -macie = ["mypy-boto3-macie (>=1.24.0,<1.25.0)"] -macie2 = ["mypy-boto3-macie2 (>=1.24.0,<1.25.0)"] -managedblockchain = ["mypy-boto3-managedblockchain (>=1.24.0,<1.25.0)"] -marketplace-catalog = ["mypy-boto3-marketplace-catalog (>=1.24.0,<1.25.0)"] -marketplace-entitlement = ["mypy-boto3-marketplace-entitlement (>=1.24.0,<1.25.0)"] -marketplacecommerceanalytics = ["mypy-boto3-marketplacecommerceanalytics (>=1.24.0,<1.25.0)"] -mediaconnect = ["mypy-boto3-mediaconnect (>=1.24.0,<1.25.0)"] -mediaconvert = ["mypy-boto3-mediaconvert (>=1.24.0,<1.25.0)"] -medialive = ["mypy-boto3-medialive (>=1.24.0,<1.25.0)"] -mediapackage = ["mypy-boto3-mediapackage (>=1.24.0,<1.25.0)"] -mediapackage-vod = ["mypy-boto3-mediapackage-vod (>=1.24.0,<1.25.0)"] -mediastore = ["mypy-boto3-mediastore (>=1.24.0,<1.25.0)"] -mediastore-data = ["mypy-boto3-mediastore-data (>=1.24.0,<1.25.0)"] -mediatailor = ["mypy-boto3-mediatailor (>=1.24.0,<1.25.0)"] -memorydb = ["mypy-boto3-memorydb (>=1.24.0,<1.25.0)"] -meteringmarketplace = ["mypy-boto3-meteringmarketplace (>=1.24.0,<1.25.0)"] -mgh = ["mypy-boto3-mgh (>=1.24.0,<1.25.0)"] -mgn = ["mypy-boto3-mgn (>=1.24.0,<1.25.0)"] -migration-hub-refactor-spaces = ["mypy-boto3-migration-hub-refactor-spaces (>=1.24.0,<1.25.0)"] -migrationhub-config = ["mypy-boto3-migrationhub-config (>=1.24.0,<1.25.0)"] -migrationhubstrategy = ["mypy-boto3-migrationhubstrategy (>=1.24.0,<1.25.0)"] -mobile = ["mypy-boto3-mobile (>=1.24.0,<1.25.0)"] -mq = ["mypy-boto3-mq (>=1.24.0,<1.25.0)"] -mturk = ["mypy-boto3-mturk (>=1.24.0,<1.25.0)"] -mwaa = ["mypy-boto3-mwaa (>=1.24.0,<1.25.0)"] -neptune = ["mypy-boto3-neptune (>=1.24.0,<1.25.0)"] -network-firewall = ["mypy-boto3-network-firewall (>=1.24.0,<1.25.0)"] -networkmanager = ["mypy-boto3-networkmanager (>=1.24.0,<1.25.0)"] -nimble = ["mypy-boto3-nimble (>=1.24.0,<1.25.0)"] -opensearch = ["mypy-boto3-opensearch (>=1.24.0,<1.25.0)"] -opsworks = ["mypy-boto3-opsworks (>=1.24.0,<1.25.0)"] -opsworkscm = ["mypy-boto3-opsworkscm (>=1.24.0,<1.25.0)"] -organizations = ["mypy-boto3-organizations (>=1.24.0,<1.25.0)"] -outposts = ["mypy-boto3-outposts (>=1.24.0,<1.25.0)"] -panorama = ["mypy-boto3-panorama (>=1.24.0,<1.25.0)"] -personalize = ["mypy-boto3-personalize (>=1.24.0,<1.25.0)"] -personalize-events = ["mypy-boto3-personalize-events (>=1.24.0,<1.25.0)"] -personalize-runtime = ["mypy-boto3-personalize-runtime (>=1.24.0,<1.25.0)"] -pi = ["mypy-boto3-pi (>=1.24.0,<1.25.0)"] -pinpoint = ["mypy-boto3-pinpoint (>=1.24.0,<1.25.0)"] -pinpoint-email = ["mypy-boto3-pinpoint-email (>=1.24.0,<1.25.0)"] -pinpoint-sms-voice = ["mypy-boto3-pinpoint-sms-voice (>=1.24.0,<1.25.0)"] -pinpoint-sms-voice-v2 = ["mypy-boto3-pinpoint-sms-voice-v2 (>=1.24.0,<1.25.0)"] -polly = ["mypy-boto3-polly (>=1.24.0,<1.25.0)"] -pricing = ["mypy-boto3-pricing (>=1.24.0,<1.25.0)"] -privatenetworks = ["mypy-boto3-privatenetworks (>=1.24.0,<1.25.0)"] -proton = ["mypy-boto3-proton (>=1.24.0,<1.25.0)"] -qldb = ["mypy-boto3-qldb (>=1.24.0,<1.25.0)"] -qldb-session = ["mypy-boto3-qldb-session (>=1.24.0,<1.25.0)"] -quicksight = ["mypy-boto3-quicksight (>=1.24.0,<1.25.0)"] -ram = ["mypy-boto3-ram (>=1.24.0,<1.25.0)"] -rbin = ["mypy-boto3-rbin (>=1.24.0,<1.25.0)"] -rds = ["mypy-boto3-rds (>=1.24.0,<1.25.0)"] -rds-data = ["mypy-boto3-rds-data (>=1.24.0,<1.25.0)"] -redshift = ["mypy-boto3-redshift (>=1.24.0,<1.25.0)"] -redshift-data = ["mypy-boto3-redshift-data (>=1.24.0,<1.25.0)"] -redshift-serverless = ["mypy-boto3-redshift-serverless (>=1.24.0,<1.25.0)"] -rekognition = ["mypy-boto3-rekognition (>=1.24.0,<1.25.0)"] -resiliencehub = ["mypy-boto3-resiliencehub (>=1.24.0,<1.25.0)"] -resource-groups = ["mypy-boto3-resource-groups (>=1.24.0,<1.25.0)"] -resourcegroupstaggingapi = ["mypy-boto3-resourcegroupstaggingapi (>=1.24.0,<1.25.0)"] -robomaker = ["mypy-boto3-robomaker (>=1.24.0,<1.25.0)"] -rolesanywhere = ["mypy-boto3-rolesanywhere (>=1.24.0,<1.25.0)"] -route53 = ["mypy-boto3-route53 (>=1.24.0,<1.25.0)"] -route53-recovery-cluster = ["mypy-boto3-route53-recovery-cluster (>=1.24.0,<1.25.0)"] -route53-recovery-control-config = ["mypy-boto3-route53-recovery-control-config (>=1.24.0,<1.25.0)"] -route53-recovery-readiness = ["mypy-boto3-route53-recovery-readiness (>=1.24.0,<1.25.0)"] -route53domains = ["mypy-boto3-route53domains (>=1.24.0,<1.25.0)"] -route53resolver = ["mypy-boto3-route53resolver (>=1.24.0,<1.25.0)"] -rum = ["mypy-boto3-rum (>=1.24.0,<1.25.0)"] -s3 = ["mypy-boto3-s3 (>=1.24.0,<1.25.0)"] -s3control = ["mypy-boto3-s3control (>=1.24.0,<1.25.0)"] -s3outposts = ["mypy-boto3-s3outposts (>=1.24.0,<1.25.0)"] -sagemaker = ["mypy-boto3-sagemaker (>=1.24.0,<1.25.0)"] -sagemaker-a2i-runtime = ["mypy-boto3-sagemaker-a2i-runtime (>=1.24.0,<1.25.0)"] -sagemaker-edge = ["mypy-boto3-sagemaker-edge (>=1.24.0,<1.25.0)"] -sagemaker-featurestore-runtime = ["mypy-boto3-sagemaker-featurestore-runtime (>=1.24.0,<1.25.0)"] -sagemaker-runtime = ["mypy-boto3-sagemaker-runtime (>=1.24.0,<1.25.0)"] -savingsplans = ["mypy-boto3-savingsplans (>=1.24.0,<1.25.0)"] -schemas = ["mypy-boto3-schemas (>=1.24.0,<1.25.0)"] -sdb = ["mypy-boto3-sdb (>=1.24.0,<1.25.0)"] -secretsmanager = ["mypy-boto3-secretsmanager (>=1.24.0,<1.25.0)"] -securityhub = ["mypy-boto3-securityhub (>=1.24.0,<1.25.0)"] -serverlessrepo = ["mypy-boto3-serverlessrepo (>=1.24.0,<1.25.0)"] -service-quotas = ["mypy-boto3-service-quotas (>=1.24.0,<1.25.0)"] -servicecatalog = ["mypy-boto3-servicecatalog (>=1.24.0,<1.25.0)"] -servicecatalog-appregistry = ["mypy-boto3-servicecatalog-appregistry (>=1.24.0,<1.25.0)"] -servicediscovery = ["mypy-boto3-servicediscovery (>=1.24.0,<1.25.0)"] -ses = ["mypy-boto3-ses (>=1.24.0,<1.25.0)"] -sesv2 = ["mypy-boto3-sesv2 (>=1.24.0,<1.25.0)"] -shield = ["mypy-boto3-shield (>=1.24.0,<1.25.0)"] -signer = ["mypy-boto3-signer (>=1.24.0,<1.25.0)"] -sms = ["mypy-boto3-sms (>=1.24.0,<1.25.0)"] -sms-voice = ["mypy-boto3-sms-voice (>=1.24.0,<1.25.0)"] -snow-device-management = ["mypy-boto3-snow-device-management (>=1.24.0,<1.25.0)"] -snowball = ["mypy-boto3-snowball (>=1.24.0,<1.25.0)"] -sns = ["mypy-boto3-sns (>=1.24.0,<1.25.0)"] -sqs = ["mypy-boto3-sqs (>=1.24.0,<1.25.0)"] -ssm = ["mypy-boto3-ssm (>=1.24.0,<1.25.0)"] -ssm-contacts = ["mypy-boto3-ssm-contacts (>=1.24.0,<1.25.0)"] -ssm-incidents = ["mypy-boto3-ssm-incidents (>=1.24.0,<1.25.0)"] -sso = ["mypy-boto3-sso (>=1.24.0,<1.25.0)"] -sso-admin = ["mypy-boto3-sso-admin (>=1.24.0,<1.25.0)"] -sso-oidc = ["mypy-boto3-sso-oidc (>=1.24.0,<1.25.0)"] -stepfunctions = ["mypy-boto3-stepfunctions (>=1.24.0,<1.25.0)"] -storagegateway = ["mypy-boto3-storagegateway (>=1.24.0,<1.25.0)"] -sts = ["mypy-boto3-sts (>=1.24.0,<1.25.0)"] -support = ["mypy-boto3-support (>=1.24.0,<1.25.0)"] -support-app = ["mypy-boto3-support-app (>=1.24.0,<1.25.0)"] -swf = ["mypy-boto3-swf (>=1.24.0,<1.25.0)"] -synthetics = ["mypy-boto3-synthetics (>=1.24.0,<1.25.0)"] -textract = ["mypy-boto3-textract (>=1.24.0,<1.25.0)"] -timestream-query = ["mypy-boto3-timestream-query (>=1.24.0,<1.25.0)"] -timestream-write = ["mypy-boto3-timestream-write (>=1.24.0,<1.25.0)"] -transcribe = ["mypy-boto3-transcribe (>=1.24.0,<1.25.0)"] -transfer = ["mypy-boto3-transfer (>=1.24.0,<1.25.0)"] -translate = ["mypy-boto3-translate (>=1.24.0,<1.25.0)"] -voice-id = ["mypy-boto3-voice-id (>=1.24.0,<1.25.0)"] -waf = ["mypy-boto3-waf (>=1.24.0,<1.25.0)"] -waf-regional = ["mypy-boto3-waf-regional (>=1.24.0,<1.25.0)"] -wafv2 = ["mypy-boto3-wafv2 (>=1.24.0,<1.25.0)"] -wellarchitected = ["mypy-boto3-wellarchitected (>=1.24.0,<1.25.0)"] -wisdom = ["mypy-boto3-wisdom (>=1.24.0,<1.25.0)"] -workdocs = ["mypy-boto3-workdocs (>=1.24.0,<1.25.0)"] -worklink = ["mypy-boto3-worklink (>=1.24.0,<1.25.0)"] -workmail = ["mypy-boto3-workmail (>=1.24.0,<1.25.0)"] -workmailmessageflow = ["mypy-boto3-workmailmessageflow (>=1.24.0,<1.25.0)"] -workspaces = ["mypy-boto3-workspaces (>=1.24.0,<1.25.0)"] -workspaces-web = ["mypy-boto3-workspaces-web (>=1.24.0,<1.25.0)"] -xray = ["mypy-boto3-xray (>=1.24.0,<1.25.0)"] +accessanalyzer = ["mypy-boto3-accessanalyzer (>=1.26.0,<1.27.0)"] +account = ["mypy-boto3-account (>=1.26.0,<1.27.0)"] +acm = ["mypy-boto3-acm (>=1.26.0,<1.27.0)"] +acm-pca = ["mypy-boto3-acm-pca (>=1.26.0,<1.27.0)"] +alexaforbusiness = ["mypy-boto3-alexaforbusiness (>=1.26.0,<1.27.0)"] +all = ["mypy-boto3-accessanalyzer (>=1.26.0,<1.27.0)", "mypy-boto3-account (>=1.26.0,<1.27.0)", "mypy-boto3-acm (>=1.26.0,<1.27.0)", "mypy-boto3-acm-pca (>=1.26.0,<1.27.0)", "mypy-boto3-alexaforbusiness (>=1.26.0,<1.27.0)", "mypy-boto3-amp (>=1.26.0,<1.27.0)", "mypy-boto3-amplify (>=1.26.0,<1.27.0)", "mypy-boto3-amplifybackend (>=1.26.0,<1.27.0)", "mypy-boto3-amplifyuibuilder (>=1.26.0,<1.27.0)", "mypy-boto3-apigateway (>=1.26.0,<1.27.0)", "mypy-boto3-apigatewaymanagementapi (>=1.26.0,<1.27.0)", "mypy-boto3-apigatewayv2 (>=1.26.0,<1.27.0)", "mypy-boto3-appconfig (>=1.26.0,<1.27.0)", "mypy-boto3-appconfigdata (>=1.26.0,<1.27.0)", "mypy-boto3-appflow (>=1.26.0,<1.27.0)", "mypy-boto3-appintegrations (>=1.26.0,<1.27.0)", "mypy-boto3-application-autoscaling (>=1.26.0,<1.27.0)", "mypy-boto3-application-insights (>=1.26.0,<1.27.0)", "mypy-boto3-applicationcostprofiler (>=1.26.0,<1.27.0)", "mypy-boto3-appmesh (>=1.26.0,<1.27.0)", "mypy-boto3-apprunner (>=1.26.0,<1.27.0)", "mypy-boto3-appstream (>=1.26.0,<1.27.0)", "mypy-boto3-appsync (>=1.26.0,<1.27.0)", "mypy-boto3-athena (>=1.26.0,<1.27.0)", "mypy-boto3-auditmanager (>=1.26.0,<1.27.0)", "mypy-boto3-autoscaling (>=1.26.0,<1.27.0)", "mypy-boto3-autoscaling-plans (>=1.26.0,<1.27.0)", "mypy-boto3-backup (>=1.26.0,<1.27.0)", "mypy-boto3-backup-gateway (>=1.26.0,<1.27.0)", "mypy-boto3-backupstorage (>=1.26.0,<1.27.0)", "mypy-boto3-batch (>=1.26.0,<1.27.0)", "mypy-boto3-billingconductor (>=1.26.0,<1.27.0)", "mypy-boto3-braket (>=1.26.0,<1.27.0)", "mypy-boto3-budgets (>=1.26.0,<1.27.0)", "mypy-boto3-ce (>=1.26.0,<1.27.0)", "mypy-boto3-chime (>=1.26.0,<1.27.0)", "mypy-boto3-chime-sdk-identity (>=1.26.0,<1.27.0)", "mypy-boto3-chime-sdk-media-pipelines (>=1.26.0,<1.27.0)", "mypy-boto3-chime-sdk-meetings (>=1.26.0,<1.27.0)", "mypy-boto3-chime-sdk-messaging (>=1.26.0,<1.27.0)", "mypy-boto3-chime-sdk-voice (>=1.26.0,<1.27.0)", "mypy-boto3-cloud9 (>=1.26.0,<1.27.0)", "mypy-boto3-cloudcontrol (>=1.26.0,<1.27.0)", "mypy-boto3-clouddirectory (>=1.26.0,<1.27.0)", "mypy-boto3-cloudformation (>=1.26.0,<1.27.0)", "mypy-boto3-cloudfront (>=1.26.0,<1.27.0)", "mypy-boto3-cloudhsm (>=1.26.0,<1.27.0)", "mypy-boto3-cloudhsmv2 (>=1.26.0,<1.27.0)", "mypy-boto3-cloudsearch (>=1.26.0,<1.27.0)", "mypy-boto3-cloudsearchdomain (>=1.26.0,<1.27.0)", "mypy-boto3-cloudtrail (>=1.26.0,<1.27.0)", "mypy-boto3-cloudwatch (>=1.26.0,<1.27.0)", "mypy-boto3-codeartifact (>=1.26.0,<1.27.0)", "mypy-boto3-codebuild (>=1.26.0,<1.27.0)", "mypy-boto3-codecommit (>=1.26.0,<1.27.0)", "mypy-boto3-codedeploy (>=1.26.0,<1.27.0)", "mypy-boto3-codeguru-reviewer (>=1.26.0,<1.27.0)", "mypy-boto3-codeguruprofiler (>=1.26.0,<1.27.0)", "mypy-boto3-codepipeline (>=1.26.0,<1.27.0)", "mypy-boto3-codestar (>=1.26.0,<1.27.0)", "mypy-boto3-codestar-connections (>=1.26.0,<1.27.0)", "mypy-boto3-codestar-notifications (>=1.26.0,<1.27.0)", "mypy-boto3-cognito-identity (>=1.26.0,<1.27.0)", "mypy-boto3-cognito-idp (>=1.26.0,<1.27.0)", "mypy-boto3-cognito-sync (>=1.26.0,<1.27.0)", "mypy-boto3-comprehend (>=1.26.0,<1.27.0)", "mypy-boto3-comprehendmedical (>=1.26.0,<1.27.0)", "mypy-boto3-compute-optimizer (>=1.26.0,<1.27.0)", "mypy-boto3-config (>=1.26.0,<1.27.0)", "mypy-boto3-connect (>=1.26.0,<1.27.0)", "mypy-boto3-connect-contact-lens (>=1.26.0,<1.27.0)", "mypy-boto3-connectcampaigns (>=1.26.0,<1.27.0)", "mypy-boto3-connectcases (>=1.26.0,<1.27.0)", "mypy-boto3-connectparticipant (>=1.26.0,<1.27.0)", "mypy-boto3-controltower (>=1.26.0,<1.27.0)", "mypy-boto3-cur (>=1.26.0,<1.27.0)", "mypy-boto3-customer-profiles (>=1.26.0,<1.27.0)", "mypy-boto3-databrew (>=1.26.0,<1.27.0)", "mypy-boto3-dataexchange (>=1.26.0,<1.27.0)", "mypy-boto3-datapipeline (>=1.26.0,<1.27.0)", "mypy-boto3-datasync (>=1.26.0,<1.27.0)", "mypy-boto3-dax (>=1.26.0,<1.27.0)", "mypy-boto3-detective (>=1.26.0,<1.27.0)", "mypy-boto3-devicefarm (>=1.26.0,<1.27.0)", "mypy-boto3-devops-guru (>=1.26.0,<1.27.0)", "mypy-boto3-directconnect (>=1.26.0,<1.27.0)", "mypy-boto3-discovery (>=1.26.0,<1.27.0)", "mypy-boto3-dlm (>=1.26.0,<1.27.0)", "mypy-boto3-dms (>=1.26.0,<1.27.0)", "mypy-boto3-docdb (>=1.26.0,<1.27.0)", "mypy-boto3-drs (>=1.26.0,<1.27.0)", "mypy-boto3-ds (>=1.26.0,<1.27.0)", "mypy-boto3-dynamodb (>=1.26.0,<1.27.0)", "mypy-boto3-dynamodbstreams (>=1.26.0,<1.27.0)", "mypy-boto3-ebs (>=1.26.0,<1.27.0)", "mypy-boto3-ec2 (>=1.26.0,<1.27.0)", "mypy-boto3-ec2-instance-connect (>=1.26.0,<1.27.0)", "mypy-boto3-ecr (>=1.26.0,<1.27.0)", "mypy-boto3-ecr-public (>=1.26.0,<1.27.0)", "mypy-boto3-ecs (>=1.26.0,<1.27.0)", "mypy-boto3-efs (>=1.26.0,<1.27.0)", "mypy-boto3-eks (>=1.26.0,<1.27.0)", "mypy-boto3-elastic-inference (>=1.26.0,<1.27.0)", "mypy-boto3-elasticache (>=1.26.0,<1.27.0)", "mypy-boto3-elasticbeanstalk (>=1.26.0,<1.27.0)", "mypy-boto3-elastictranscoder (>=1.26.0,<1.27.0)", "mypy-boto3-elb (>=1.26.0,<1.27.0)", "mypy-boto3-elbv2 (>=1.26.0,<1.27.0)", "mypy-boto3-emr (>=1.26.0,<1.27.0)", "mypy-boto3-emr-containers (>=1.26.0,<1.27.0)", "mypy-boto3-emr-serverless (>=1.26.0,<1.27.0)", "mypy-boto3-es (>=1.26.0,<1.27.0)", "mypy-boto3-events (>=1.26.0,<1.27.0)", "mypy-boto3-evidently (>=1.26.0,<1.27.0)", "mypy-boto3-finspace (>=1.26.0,<1.27.0)", "mypy-boto3-finspace-data (>=1.26.0,<1.27.0)", "mypy-boto3-firehose (>=1.26.0,<1.27.0)", "mypy-boto3-fis (>=1.26.0,<1.27.0)", "mypy-boto3-fms (>=1.26.0,<1.27.0)", "mypy-boto3-forecast (>=1.26.0,<1.27.0)", "mypy-boto3-forecastquery (>=1.26.0,<1.27.0)", "mypy-boto3-frauddetector (>=1.26.0,<1.27.0)", "mypy-boto3-fsx (>=1.26.0,<1.27.0)", "mypy-boto3-gamelift (>=1.26.0,<1.27.0)", "mypy-boto3-gamesparks (>=1.26.0,<1.27.0)", "mypy-boto3-glacier (>=1.26.0,<1.27.0)", "mypy-boto3-globalaccelerator (>=1.26.0,<1.27.0)", "mypy-boto3-glue (>=1.26.0,<1.27.0)", "mypy-boto3-grafana (>=1.26.0,<1.27.0)", "mypy-boto3-greengrass (>=1.26.0,<1.27.0)", "mypy-boto3-greengrassv2 (>=1.26.0,<1.27.0)", "mypy-boto3-groundstation (>=1.26.0,<1.27.0)", "mypy-boto3-guardduty (>=1.26.0,<1.27.0)", "mypy-boto3-health (>=1.26.0,<1.27.0)", "mypy-boto3-healthlake (>=1.26.0,<1.27.0)", "mypy-boto3-honeycode (>=1.26.0,<1.27.0)", "mypy-boto3-iam (>=1.26.0,<1.27.0)", "mypy-boto3-identitystore (>=1.26.0,<1.27.0)", "mypy-boto3-imagebuilder (>=1.26.0,<1.27.0)", "mypy-boto3-importexport (>=1.26.0,<1.27.0)", "mypy-boto3-inspector (>=1.26.0,<1.27.0)", "mypy-boto3-inspector2 (>=1.26.0,<1.27.0)", "mypy-boto3-iot (>=1.26.0,<1.27.0)", "mypy-boto3-iot-data (>=1.26.0,<1.27.0)", "mypy-boto3-iot-jobs-data (>=1.26.0,<1.27.0)", "mypy-boto3-iot-roborunner (>=1.26.0,<1.27.0)", "mypy-boto3-iot1click-devices (>=1.26.0,<1.27.0)", "mypy-boto3-iot1click-projects (>=1.26.0,<1.27.0)", "mypy-boto3-iotanalytics (>=1.26.0,<1.27.0)", "mypy-boto3-iotdeviceadvisor (>=1.26.0,<1.27.0)", "mypy-boto3-iotevents (>=1.26.0,<1.27.0)", "mypy-boto3-iotevents-data (>=1.26.0,<1.27.0)", "mypy-boto3-iotfleethub (>=1.26.0,<1.27.0)", "mypy-boto3-iotfleetwise (>=1.26.0,<1.27.0)", "mypy-boto3-iotsecuretunneling (>=1.26.0,<1.27.0)", "mypy-boto3-iotsitewise (>=1.26.0,<1.27.0)", "mypy-boto3-iotthingsgraph (>=1.26.0,<1.27.0)", "mypy-boto3-iottwinmaker (>=1.26.0,<1.27.0)", "mypy-boto3-iotwireless (>=1.26.0,<1.27.0)", "mypy-boto3-ivs (>=1.26.0,<1.27.0)", "mypy-boto3-ivschat (>=1.26.0,<1.27.0)", "mypy-boto3-kafka (>=1.26.0,<1.27.0)", "mypy-boto3-kafkaconnect (>=1.26.0,<1.27.0)", "mypy-boto3-kendra (>=1.26.0,<1.27.0)", "mypy-boto3-keyspaces (>=1.26.0,<1.27.0)", "mypy-boto3-kinesis (>=1.26.0,<1.27.0)", "mypy-boto3-kinesis-video-archived-media (>=1.26.0,<1.27.0)", "mypy-boto3-kinesis-video-media (>=1.26.0,<1.27.0)", "mypy-boto3-kinesis-video-signaling (>=1.26.0,<1.27.0)", "mypy-boto3-kinesisanalytics (>=1.26.0,<1.27.0)", "mypy-boto3-kinesisanalyticsv2 (>=1.26.0,<1.27.0)", "mypy-boto3-kinesisvideo (>=1.26.0,<1.27.0)", "mypy-boto3-kms (>=1.26.0,<1.27.0)", "mypy-boto3-lakeformation (>=1.26.0,<1.27.0)", "mypy-boto3-lambda (>=1.26.0,<1.27.0)", "mypy-boto3-lex-models (>=1.26.0,<1.27.0)", "mypy-boto3-lex-runtime (>=1.26.0,<1.27.0)", "mypy-boto3-lexv2-models (>=1.26.0,<1.27.0)", "mypy-boto3-lexv2-runtime (>=1.26.0,<1.27.0)", "mypy-boto3-license-manager (>=1.26.0,<1.27.0)", "mypy-boto3-license-manager-user-subscriptions (>=1.26.0,<1.27.0)", "mypy-boto3-lightsail (>=1.26.0,<1.27.0)", "mypy-boto3-location (>=1.26.0,<1.27.0)", "mypy-boto3-logs (>=1.26.0,<1.27.0)", "mypy-boto3-lookoutequipment (>=1.26.0,<1.27.0)", "mypy-boto3-lookoutmetrics (>=1.26.0,<1.27.0)", "mypy-boto3-lookoutvision (>=1.26.0,<1.27.0)", "mypy-boto3-m2 (>=1.26.0,<1.27.0)", "mypy-boto3-machinelearning (>=1.26.0,<1.27.0)", "mypy-boto3-macie (>=1.26.0,<1.27.0)", "mypy-boto3-macie2 (>=1.26.0,<1.27.0)", "mypy-boto3-managedblockchain (>=1.26.0,<1.27.0)", "mypy-boto3-marketplace-catalog (>=1.26.0,<1.27.0)", "mypy-boto3-marketplace-entitlement (>=1.26.0,<1.27.0)", "mypy-boto3-marketplacecommerceanalytics (>=1.26.0,<1.27.0)", "mypy-boto3-mediaconnect (>=1.26.0,<1.27.0)", "mypy-boto3-mediaconvert (>=1.26.0,<1.27.0)", "mypy-boto3-medialive (>=1.26.0,<1.27.0)", "mypy-boto3-mediapackage (>=1.26.0,<1.27.0)", "mypy-boto3-mediapackage-vod (>=1.26.0,<1.27.0)", "mypy-boto3-mediastore (>=1.26.0,<1.27.0)", "mypy-boto3-mediastore-data (>=1.26.0,<1.27.0)", "mypy-boto3-mediatailor (>=1.26.0,<1.27.0)", "mypy-boto3-memorydb (>=1.26.0,<1.27.0)", "mypy-boto3-meteringmarketplace (>=1.26.0,<1.27.0)", "mypy-boto3-mgh (>=1.26.0,<1.27.0)", "mypy-boto3-mgn (>=1.26.0,<1.27.0)", "mypy-boto3-migration-hub-refactor-spaces (>=1.26.0,<1.27.0)", "mypy-boto3-migrationhub-config (>=1.26.0,<1.27.0)", "mypy-boto3-migrationhuborchestrator (>=1.26.0,<1.27.0)", "mypy-boto3-migrationhubstrategy (>=1.26.0,<1.27.0)", "mypy-boto3-mobile (>=1.26.0,<1.27.0)", "mypy-boto3-mq (>=1.26.0,<1.27.0)", "mypy-boto3-mturk (>=1.26.0,<1.27.0)", "mypy-boto3-mwaa (>=1.26.0,<1.27.0)", "mypy-boto3-neptune (>=1.26.0,<1.27.0)", "mypy-boto3-network-firewall (>=1.26.0,<1.27.0)", "mypy-boto3-networkmanager (>=1.26.0,<1.27.0)", "mypy-boto3-nimble (>=1.26.0,<1.27.0)", "mypy-boto3-opensearch (>=1.26.0,<1.27.0)", "mypy-boto3-opsworks (>=1.26.0,<1.27.0)", "mypy-boto3-opsworkscm (>=1.26.0,<1.27.0)", "mypy-boto3-organizations (>=1.26.0,<1.27.0)", "mypy-boto3-outposts (>=1.26.0,<1.27.0)", "mypy-boto3-panorama (>=1.26.0,<1.27.0)", "mypy-boto3-personalize (>=1.26.0,<1.27.0)", "mypy-boto3-personalize-events (>=1.26.0,<1.27.0)", "mypy-boto3-personalize-runtime (>=1.26.0,<1.27.0)", "mypy-boto3-pi (>=1.26.0,<1.27.0)", "mypy-boto3-pinpoint (>=1.26.0,<1.27.0)", "mypy-boto3-pinpoint-email (>=1.26.0,<1.27.0)", "mypy-boto3-pinpoint-sms-voice (>=1.26.0,<1.27.0)", "mypy-boto3-pinpoint-sms-voice-v2 (>=1.26.0,<1.27.0)", "mypy-boto3-polly (>=1.26.0,<1.27.0)", "mypy-boto3-pricing (>=1.26.0,<1.27.0)", "mypy-boto3-privatenetworks (>=1.26.0,<1.27.0)", "mypy-boto3-proton (>=1.26.0,<1.27.0)", "mypy-boto3-qldb (>=1.26.0,<1.27.0)", "mypy-boto3-qldb-session (>=1.26.0,<1.27.0)", "mypy-boto3-quicksight (>=1.26.0,<1.27.0)", "mypy-boto3-ram (>=1.26.0,<1.27.0)", "mypy-boto3-rbin (>=1.26.0,<1.27.0)", "mypy-boto3-rds (>=1.26.0,<1.27.0)", "mypy-boto3-rds-data (>=1.26.0,<1.27.0)", "mypy-boto3-redshift (>=1.26.0,<1.27.0)", "mypy-boto3-redshift-data (>=1.26.0,<1.27.0)", "mypy-boto3-redshift-serverless (>=1.26.0,<1.27.0)", "mypy-boto3-rekognition (>=1.26.0,<1.27.0)", "mypy-boto3-resiliencehub (>=1.26.0,<1.27.0)", "mypy-boto3-resource-explorer-2 (>=1.26.0,<1.27.0)", "mypy-boto3-resource-groups (>=1.26.0,<1.27.0)", "mypy-boto3-resourcegroupstaggingapi (>=1.26.0,<1.27.0)", "mypy-boto3-robomaker (>=1.26.0,<1.27.0)", "mypy-boto3-rolesanywhere (>=1.26.0,<1.27.0)", "mypy-boto3-route53 (>=1.26.0,<1.27.0)", "mypy-boto3-route53-recovery-cluster (>=1.26.0,<1.27.0)", "mypy-boto3-route53-recovery-control-config (>=1.26.0,<1.27.0)", "mypy-boto3-route53-recovery-readiness (>=1.26.0,<1.27.0)", "mypy-boto3-route53domains (>=1.26.0,<1.27.0)", "mypy-boto3-route53resolver (>=1.26.0,<1.27.0)", "mypy-boto3-rum (>=1.26.0,<1.27.0)", "mypy-boto3-s3 (>=1.26.0,<1.27.0)", "mypy-boto3-s3control (>=1.26.0,<1.27.0)", "mypy-boto3-s3outposts (>=1.26.0,<1.27.0)", "mypy-boto3-sagemaker (>=1.26.0,<1.27.0)", "mypy-boto3-sagemaker-a2i-runtime (>=1.26.0,<1.27.0)", "mypy-boto3-sagemaker-edge (>=1.26.0,<1.27.0)", "mypy-boto3-sagemaker-featurestore-runtime (>=1.26.0,<1.27.0)", "mypy-boto3-sagemaker-runtime (>=1.26.0,<1.27.0)", "mypy-boto3-savingsplans (>=1.26.0,<1.27.0)", "mypy-boto3-scheduler (>=1.26.0,<1.27.0)", "mypy-boto3-schemas (>=1.26.0,<1.27.0)", "mypy-boto3-sdb (>=1.26.0,<1.27.0)", "mypy-boto3-secretsmanager (>=1.26.0,<1.27.0)", "mypy-boto3-securityhub (>=1.26.0,<1.27.0)", "mypy-boto3-serverlessrepo (>=1.26.0,<1.27.0)", "mypy-boto3-service-quotas (>=1.26.0,<1.27.0)", "mypy-boto3-servicecatalog (>=1.26.0,<1.27.0)", "mypy-boto3-servicecatalog-appregistry (>=1.26.0,<1.27.0)", "mypy-boto3-servicediscovery (>=1.26.0,<1.27.0)", "mypy-boto3-ses (>=1.26.0,<1.27.0)", "mypy-boto3-sesv2 (>=1.26.0,<1.27.0)", "mypy-boto3-shield (>=1.26.0,<1.27.0)", "mypy-boto3-signer (>=1.26.0,<1.27.0)", "mypy-boto3-sms (>=1.26.0,<1.27.0)", "mypy-boto3-sms-voice (>=1.26.0,<1.27.0)", "mypy-boto3-snow-device-management (>=1.26.0,<1.27.0)", "mypy-boto3-snowball (>=1.26.0,<1.27.0)", "mypy-boto3-sns (>=1.26.0,<1.27.0)", "mypy-boto3-sqs (>=1.26.0,<1.27.0)", "mypy-boto3-ssm (>=1.26.0,<1.27.0)", "mypy-boto3-ssm-contacts (>=1.26.0,<1.27.0)", "mypy-boto3-ssm-incidents (>=1.26.0,<1.27.0)", "mypy-boto3-ssm-sap (>=1.26.0,<1.27.0)", "mypy-boto3-sso (>=1.26.0,<1.27.0)", "mypy-boto3-sso-admin (>=1.26.0,<1.27.0)", "mypy-boto3-sso-oidc (>=1.26.0,<1.27.0)", "mypy-boto3-stepfunctions (>=1.26.0,<1.27.0)", "mypy-boto3-storagegateway (>=1.26.0,<1.27.0)", "mypy-boto3-sts (>=1.26.0,<1.27.0)", "mypy-boto3-support (>=1.26.0,<1.27.0)", "mypy-boto3-support-app (>=1.26.0,<1.27.0)", "mypy-boto3-swf (>=1.26.0,<1.27.0)", "mypy-boto3-synthetics (>=1.26.0,<1.27.0)", "mypy-boto3-textract (>=1.26.0,<1.27.0)", "mypy-boto3-timestream-query (>=1.26.0,<1.27.0)", "mypy-boto3-timestream-write (>=1.26.0,<1.27.0)", "mypy-boto3-transcribe (>=1.26.0,<1.27.0)", "mypy-boto3-transfer (>=1.26.0,<1.27.0)", "mypy-boto3-translate (>=1.26.0,<1.27.0)", "mypy-boto3-voice-id (>=1.26.0,<1.27.0)", "mypy-boto3-waf (>=1.26.0,<1.27.0)", "mypy-boto3-waf-regional (>=1.26.0,<1.27.0)", "mypy-boto3-wafv2 (>=1.26.0,<1.27.0)", "mypy-boto3-wellarchitected (>=1.26.0,<1.27.0)", "mypy-boto3-wisdom (>=1.26.0,<1.27.0)", "mypy-boto3-workdocs (>=1.26.0,<1.27.0)", "mypy-boto3-worklink (>=1.26.0,<1.27.0)", "mypy-boto3-workmail (>=1.26.0,<1.27.0)", "mypy-boto3-workmailmessageflow (>=1.26.0,<1.27.0)", "mypy-boto3-workspaces (>=1.26.0,<1.27.0)", "mypy-boto3-workspaces-web (>=1.26.0,<1.27.0)", "mypy-boto3-xray (>=1.26.0,<1.27.0)"] +amp = ["mypy-boto3-amp (>=1.26.0,<1.27.0)"] +amplify = ["mypy-boto3-amplify (>=1.26.0,<1.27.0)"] +amplifybackend = ["mypy-boto3-amplifybackend (>=1.26.0,<1.27.0)"] +amplifyuibuilder = ["mypy-boto3-amplifyuibuilder (>=1.26.0,<1.27.0)"] +apigateway = ["mypy-boto3-apigateway (>=1.26.0,<1.27.0)"] +apigatewaymanagementapi = ["mypy-boto3-apigatewaymanagementapi (>=1.26.0,<1.27.0)"] +apigatewayv2 = ["mypy-boto3-apigatewayv2 (>=1.26.0,<1.27.0)"] +appconfig = ["mypy-boto3-appconfig (>=1.26.0,<1.27.0)"] +appconfigdata = ["mypy-boto3-appconfigdata (>=1.26.0,<1.27.0)"] +appflow = ["mypy-boto3-appflow (>=1.26.0,<1.27.0)"] +appintegrations = ["mypy-boto3-appintegrations (>=1.26.0,<1.27.0)"] +application-autoscaling = ["mypy-boto3-application-autoscaling (>=1.26.0,<1.27.0)"] +application-insights = ["mypy-boto3-application-insights (>=1.26.0,<1.27.0)"] +applicationcostprofiler = ["mypy-boto3-applicationcostprofiler (>=1.26.0,<1.27.0)"] +appmesh = ["mypy-boto3-appmesh (>=1.26.0,<1.27.0)"] +apprunner = ["mypy-boto3-apprunner (>=1.26.0,<1.27.0)"] +appstream = ["mypy-boto3-appstream (>=1.26.0,<1.27.0)"] +appsync = ["mypy-boto3-appsync (>=1.26.0,<1.27.0)"] +athena = ["mypy-boto3-athena (>=1.26.0,<1.27.0)"] +auditmanager = ["mypy-boto3-auditmanager (>=1.26.0,<1.27.0)"] +autoscaling = ["mypy-boto3-autoscaling (>=1.26.0,<1.27.0)"] +autoscaling-plans = ["mypy-boto3-autoscaling-plans (>=1.26.0,<1.27.0)"] +backup = ["mypy-boto3-backup (>=1.26.0,<1.27.0)"] +backup-gateway = ["mypy-boto3-backup-gateway (>=1.26.0,<1.27.0)"] +backupstorage = ["mypy-boto3-backupstorage (>=1.26.0,<1.27.0)"] +batch = ["mypy-boto3-batch (>=1.26.0,<1.27.0)"] +billingconductor = ["mypy-boto3-billingconductor (>=1.26.0,<1.27.0)"] +braket = ["mypy-boto3-braket (>=1.26.0,<1.27.0)"] +budgets = ["mypy-boto3-budgets (>=1.26.0,<1.27.0)"] +ce = ["mypy-boto3-ce (>=1.26.0,<1.27.0)"] +chime = ["mypy-boto3-chime (>=1.26.0,<1.27.0)"] +chime-sdk-identity = ["mypy-boto3-chime-sdk-identity (>=1.26.0,<1.27.0)"] +chime-sdk-media-pipelines = ["mypy-boto3-chime-sdk-media-pipelines (>=1.26.0,<1.27.0)"] +chime-sdk-meetings = ["mypy-boto3-chime-sdk-meetings (>=1.26.0,<1.27.0)"] +chime-sdk-messaging = ["mypy-boto3-chime-sdk-messaging (>=1.26.0,<1.27.0)"] +chime-sdk-voice = ["mypy-boto3-chime-sdk-voice (>=1.26.0,<1.27.0)"] +cloud9 = ["mypy-boto3-cloud9 (>=1.26.0,<1.27.0)"] +cloudcontrol = ["mypy-boto3-cloudcontrol (>=1.26.0,<1.27.0)"] +clouddirectory = ["mypy-boto3-clouddirectory (>=1.26.0,<1.27.0)"] +cloudformation = ["mypy-boto3-cloudformation (>=1.26.0,<1.27.0)"] +cloudfront = ["mypy-boto3-cloudfront (>=1.26.0,<1.27.0)"] +cloudhsm = ["mypy-boto3-cloudhsm (>=1.26.0,<1.27.0)"] +cloudhsmv2 = ["mypy-boto3-cloudhsmv2 (>=1.26.0,<1.27.0)"] +cloudsearch = ["mypy-boto3-cloudsearch (>=1.26.0,<1.27.0)"] +cloudsearchdomain = ["mypy-boto3-cloudsearchdomain (>=1.26.0,<1.27.0)"] +cloudtrail = ["mypy-boto3-cloudtrail (>=1.26.0,<1.27.0)"] +cloudwatch = ["mypy-boto3-cloudwatch (>=1.26.0,<1.27.0)"] +codeartifact = ["mypy-boto3-codeartifact (>=1.26.0,<1.27.0)"] +codebuild = ["mypy-boto3-codebuild (>=1.26.0,<1.27.0)"] +codecommit = ["mypy-boto3-codecommit (>=1.26.0,<1.27.0)"] +codedeploy = ["mypy-boto3-codedeploy (>=1.26.0,<1.27.0)"] +codeguru-reviewer = ["mypy-boto3-codeguru-reviewer (>=1.26.0,<1.27.0)"] +codeguruprofiler = ["mypy-boto3-codeguruprofiler (>=1.26.0,<1.27.0)"] +codepipeline = ["mypy-boto3-codepipeline (>=1.26.0,<1.27.0)"] +codestar = ["mypy-boto3-codestar (>=1.26.0,<1.27.0)"] +codestar-connections = ["mypy-boto3-codestar-connections (>=1.26.0,<1.27.0)"] +codestar-notifications = ["mypy-boto3-codestar-notifications (>=1.26.0,<1.27.0)"] +cognito-identity = ["mypy-boto3-cognito-identity (>=1.26.0,<1.27.0)"] +cognito-idp = ["mypy-boto3-cognito-idp (>=1.26.0,<1.27.0)"] +cognito-sync = ["mypy-boto3-cognito-sync (>=1.26.0,<1.27.0)"] +comprehend = ["mypy-boto3-comprehend (>=1.26.0,<1.27.0)"] +comprehendmedical = ["mypy-boto3-comprehendmedical (>=1.26.0,<1.27.0)"] +compute-optimizer = ["mypy-boto3-compute-optimizer (>=1.26.0,<1.27.0)"] +config = ["mypy-boto3-config (>=1.26.0,<1.27.0)"] +connect = ["mypy-boto3-connect (>=1.26.0,<1.27.0)"] +connect-contact-lens = ["mypy-boto3-connect-contact-lens (>=1.26.0,<1.27.0)"] +connectcampaigns = ["mypy-boto3-connectcampaigns (>=1.26.0,<1.27.0)"] +connectcases = ["mypy-boto3-connectcases (>=1.26.0,<1.27.0)"] +connectparticipant = ["mypy-boto3-connectparticipant (>=1.26.0,<1.27.0)"] +controltower = ["mypy-boto3-controltower (>=1.26.0,<1.27.0)"] +cur = ["mypy-boto3-cur (>=1.26.0,<1.27.0)"] +customer-profiles = ["mypy-boto3-customer-profiles (>=1.26.0,<1.27.0)"] +databrew = ["mypy-boto3-databrew (>=1.26.0,<1.27.0)"] +dataexchange = ["mypy-boto3-dataexchange (>=1.26.0,<1.27.0)"] +datapipeline = ["mypy-boto3-datapipeline (>=1.26.0,<1.27.0)"] +datasync = ["mypy-boto3-datasync (>=1.26.0,<1.27.0)"] +dax = ["mypy-boto3-dax (>=1.26.0,<1.27.0)"] +detective = ["mypy-boto3-detective (>=1.26.0,<1.27.0)"] +devicefarm = ["mypy-boto3-devicefarm (>=1.26.0,<1.27.0)"] +devops-guru = ["mypy-boto3-devops-guru (>=1.26.0,<1.27.0)"] +directconnect = ["mypy-boto3-directconnect (>=1.26.0,<1.27.0)"] +discovery = ["mypy-boto3-discovery (>=1.26.0,<1.27.0)"] +dlm = ["mypy-boto3-dlm (>=1.26.0,<1.27.0)"] +dms = ["mypy-boto3-dms (>=1.26.0,<1.27.0)"] +docdb = ["mypy-boto3-docdb (>=1.26.0,<1.27.0)"] +drs = ["mypy-boto3-drs (>=1.26.0,<1.27.0)"] +ds = ["mypy-boto3-ds (>=1.26.0,<1.27.0)"] +dynamodb = ["mypy-boto3-dynamodb (>=1.26.0,<1.27.0)"] +dynamodbstreams = ["mypy-boto3-dynamodbstreams (>=1.26.0,<1.27.0)"] +ebs = ["mypy-boto3-ebs (>=1.26.0,<1.27.0)"] +ec2 = ["mypy-boto3-ec2 (>=1.26.0,<1.27.0)"] +ec2-instance-connect = ["mypy-boto3-ec2-instance-connect (>=1.26.0,<1.27.0)"] +ecr = ["mypy-boto3-ecr (>=1.26.0,<1.27.0)"] +ecr-public = ["mypy-boto3-ecr-public (>=1.26.0,<1.27.0)"] +ecs = ["mypy-boto3-ecs (>=1.26.0,<1.27.0)"] +efs = ["mypy-boto3-efs (>=1.26.0,<1.27.0)"] +eks = ["mypy-boto3-eks (>=1.26.0,<1.27.0)"] +elastic-inference = ["mypy-boto3-elastic-inference (>=1.26.0,<1.27.0)"] +elasticache = ["mypy-boto3-elasticache (>=1.26.0,<1.27.0)"] +elasticbeanstalk = ["mypy-boto3-elasticbeanstalk (>=1.26.0,<1.27.0)"] +elastictranscoder = ["mypy-boto3-elastictranscoder (>=1.26.0,<1.27.0)"] +elb = ["mypy-boto3-elb (>=1.26.0,<1.27.0)"] +elbv2 = ["mypy-boto3-elbv2 (>=1.26.0,<1.27.0)"] +emr = ["mypy-boto3-emr (>=1.26.0,<1.27.0)"] +emr-containers = ["mypy-boto3-emr-containers (>=1.26.0,<1.27.0)"] +emr-serverless = ["mypy-boto3-emr-serverless (>=1.26.0,<1.27.0)"] +es = ["mypy-boto3-es (>=1.26.0,<1.27.0)"] +essential = ["mypy-boto3-cloudformation (>=1.26.0,<1.27.0)", "mypy-boto3-dynamodb (>=1.26.0,<1.27.0)", "mypy-boto3-ec2 (>=1.26.0,<1.27.0)", "mypy-boto3-lambda (>=1.26.0,<1.27.0)", "mypy-boto3-rds (>=1.26.0,<1.27.0)", "mypy-boto3-s3 (>=1.26.0,<1.27.0)", "mypy-boto3-sqs (>=1.26.0,<1.27.0)"] +events = ["mypy-boto3-events (>=1.26.0,<1.27.0)"] +evidently = ["mypy-boto3-evidently (>=1.26.0,<1.27.0)"] +finspace = ["mypy-boto3-finspace (>=1.26.0,<1.27.0)"] +finspace-data = ["mypy-boto3-finspace-data (>=1.26.0,<1.27.0)"] +firehose = ["mypy-boto3-firehose (>=1.26.0,<1.27.0)"] +fis = ["mypy-boto3-fis (>=1.26.0,<1.27.0)"] +fms = ["mypy-boto3-fms (>=1.26.0,<1.27.0)"] +forecast = ["mypy-boto3-forecast (>=1.26.0,<1.27.0)"] +forecastquery = ["mypy-boto3-forecastquery (>=1.26.0,<1.27.0)"] +frauddetector = ["mypy-boto3-frauddetector (>=1.26.0,<1.27.0)"] +fsx = ["mypy-boto3-fsx (>=1.26.0,<1.27.0)"] +gamelift = ["mypy-boto3-gamelift (>=1.26.0,<1.27.0)"] +gamesparks = ["mypy-boto3-gamesparks (>=1.26.0,<1.27.0)"] +glacier = ["mypy-boto3-glacier (>=1.26.0,<1.27.0)"] +globalaccelerator = ["mypy-boto3-globalaccelerator (>=1.26.0,<1.27.0)"] +glue = ["mypy-boto3-glue (>=1.26.0,<1.27.0)"] +grafana = ["mypy-boto3-grafana (>=1.26.0,<1.27.0)"] +greengrass = ["mypy-boto3-greengrass (>=1.26.0,<1.27.0)"] +greengrassv2 = ["mypy-boto3-greengrassv2 (>=1.26.0,<1.27.0)"] +groundstation = ["mypy-boto3-groundstation (>=1.26.0,<1.27.0)"] +guardduty = ["mypy-boto3-guardduty (>=1.26.0,<1.27.0)"] +health = ["mypy-boto3-health (>=1.26.0,<1.27.0)"] +healthlake = ["mypy-boto3-healthlake (>=1.26.0,<1.27.0)"] +honeycode = ["mypy-boto3-honeycode (>=1.26.0,<1.27.0)"] +iam = ["mypy-boto3-iam (>=1.26.0,<1.27.0)"] +identitystore = ["mypy-boto3-identitystore (>=1.26.0,<1.27.0)"] +imagebuilder = ["mypy-boto3-imagebuilder (>=1.26.0,<1.27.0)"] +importexport = ["mypy-boto3-importexport (>=1.26.0,<1.27.0)"] +inspector = ["mypy-boto3-inspector (>=1.26.0,<1.27.0)"] +inspector2 = ["mypy-boto3-inspector2 (>=1.26.0,<1.27.0)"] +iot = ["mypy-boto3-iot (>=1.26.0,<1.27.0)"] +iot-data = ["mypy-boto3-iot-data (>=1.26.0,<1.27.0)"] +iot-jobs-data = ["mypy-boto3-iot-jobs-data (>=1.26.0,<1.27.0)"] +iot-roborunner = ["mypy-boto3-iot-roborunner (>=1.26.0,<1.27.0)"] +iot1click-devices = ["mypy-boto3-iot1click-devices (>=1.26.0,<1.27.0)"] +iot1click-projects = ["mypy-boto3-iot1click-projects (>=1.26.0,<1.27.0)"] +iotanalytics = ["mypy-boto3-iotanalytics (>=1.26.0,<1.27.0)"] +iotdeviceadvisor = ["mypy-boto3-iotdeviceadvisor (>=1.26.0,<1.27.0)"] +iotevents = ["mypy-boto3-iotevents (>=1.26.0,<1.27.0)"] +iotevents-data = ["mypy-boto3-iotevents-data (>=1.26.0,<1.27.0)"] +iotfleethub = ["mypy-boto3-iotfleethub (>=1.26.0,<1.27.0)"] +iotfleetwise = ["mypy-boto3-iotfleetwise (>=1.26.0,<1.27.0)"] +iotsecuretunneling = ["mypy-boto3-iotsecuretunneling (>=1.26.0,<1.27.0)"] +iotsitewise = ["mypy-boto3-iotsitewise (>=1.26.0,<1.27.0)"] +iotthingsgraph = ["mypy-boto3-iotthingsgraph (>=1.26.0,<1.27.0)"] +iottwinmaker = ["mypy-boto3-iottwinmaker (>=1.26.0,<1.27.0)"] +iotwireless = ["mypy-boto3-iotwireless (>=1.26.0,<1.27.0)"] +ivs = ["mypy-boto3-ivs (>=1.26.0,<1.27.0)"] +ivschat = ["mypy-boto3-ivschat (>=1.26.0,<1.27.0)"] +kafka = ["mypy-boto3-kafka (>=1.26.0,<1.27.0)"] +kafkaconnect = ["mypy-boto3-kafkaconnect (>=1.26.0,<1.27.0)"] +kendra = ["mypy-boto3-kendra (>=1.26.0,<1.27.0)"] +keyspaces = ["mypy-boto3-keyspaces (>=1.26.0,<1.27.0)"] +kinesis = ["mypy-boto3-kinesis (>=1.26.0,<1.27.0)"] +kinesis-video-archived-media = ["mypy-boto3-kinesis-video-archived-media (>=1.26.0,<1.27.0)"] +kinesis-video-media = ["mypy-boto3-kinesis-video-media (>=1.26.0,<1.27.0)"] +kinesis-video-signaling = ["mypy-boto3-kinesis-video-signaling (>=1.26.0,<1.27.0)"] +kinesisanalytics = ["mypy-boto3-kinesisanalytics (>=1.26.0,<1.27.0)"] +kinesisanalyticsv2 = ["mypy-boto3-kinesisanalyticsv2 (>=1.26.0,<1.27.0)"] +kinesisvideo = ["mypy-boto3-kinesisvideo (>=1.26.0,<1.27.0)"] +kms = ["mypy-boto3-kms (>=1.26.0,<1.27.0)"] +lakeformation = ["mypy-boto3-lakeformation (>=1.26.0,<1.27.0)"] +lambda = ["mypy-boto3-lambda (>=1.26.0,<1.27.0)"] +lex-models = ["mypy-boto3-lex-models (>=1.26.0,<1.27.0)"] +lex-runtime = ["mypy-boto3-lex-runtime (>=1.26.0,<1.27.0)"] +lexv2-models = ["mypy-boto3-lexv2-models (>=1.26.0,<1.27.0)"] +lexv2-runtime = ["mypy-boto3-lexv2-runtime (>=1.26.0,<1.27.0)"] +license-manager = ["mypy-boto3-license-manager (>=1.26.0,<1.27.0)"] +license-manager-user-subscriptions = ["mypy-boto3-license-manager-user-subscriptions (>=1.26.0,<1.27.0)"] +lightsail = ["mypy-boto3-lightsail (>=1.26.0,<1.27.0)"] +location = ["mypy-boto3-location (>=1.26.0,<1.27.0)"] +logs = ["mypy-boto3-logs (>=1.26.0,<1.27.0)"] +lookoutequipment = ["mypy-boto3-lookoutequipment (>=1.26.0,<1.27.0)"] +lookoutmetrics = ["mypy-boto3-lookoutmetrics (>=1.26.0,<1.27.0)"] +lookoutvision = ["mypy-boto3-lookoutvision (>=1.26.0,<1.27.0)"] +m2 = ["mypy-boto3-m2 (>=1.26.0,<1.27.0)"] +machinelearning = ["mypy-boto3-machinelearning (>=1.26.0,<1.27.0)"] +macie = ["mypy-boto3-macie (>=1.26.0,<1.27.0)"] +macie2 = ["mypy-boto3-macie2 (>=1.26.0,<1.27.0)"] +managedblockchain = ["mypy-boto3-managedblockchain (>=1.26.0,<1.27.0)"] +marketplace-catalog = ["mypy-boto3-marketplace-catalog (>=1.26.0,<1.27.0)"] +marketplace-entitlement = ["mypy-boto3-marketplace-entitlement (>=1.26.0,<1.27.0)"] +marketplacecommerceanalytics = ["mypy-boto3-marketplacecommerceanalytics (>=1.26.0,<1.27.0)"] +mediaconnect = ["mypy-boto3-mediaconnect (>=1.26.0,<1.27.0)"] +mediaconvert = ["mypy-boto3-mediaconvert (>=1.26.0,<1.27.0)"] +medialive = ["mypy-boto3-medialive (>=1.26.0,<1.27.0)"] +mediapackage = ["mypy-boto3-mediapackage (>=1.26.0,<1.27.0)"] +mediapackage-vod = ["mypy-boto3-mediapackage-vod (>=1.26.0,<1.27.0)"] +mediastore = ["mypy-boto3-mediastore (>=1.26.0,<1.27.0)"] +mediastore-data = ["mypy-boto3-mediastore-data (>=1.26.0,<1.27.0)"] +mediatailor = ["mypy-boto3-mediatailor (>=1.26.0,<1.27.0)"] +memorydb = ["mypy-boto3-memorydb (>=1.26.0,<1.27.0)"] +meteringmarketplace = ["mypy-boto3-meteringmarketplace (>=1.26.0,<1.27.0)"] +mgh = ["mypy-boto3-mgh (>=1.26.0,<1.27.0)"] +mgn = ["mypy-boto3-mgn (>=1.26.0,<1.27.0)"] +migration-hub-refactor-spaces = ["mypy-boto3-migration-hub-refactor-spaces (>=1.26.0,<1.27.0)"] +migrationhub-config = ["mypy-boto3-migrationhub-config (>=1.26.0,<1.27.0)"] +migrationhuborchestrator = ["mypy-boto3-migrationhuborchestrator (>=1.26.0,<1.27.0)"] +migrationhubstrategy = ["mypy-boto3-migrationhubstrategy (>=1.26.0,<1.27.0)"] +mobile = ["mypy-boto3-mobile (>=1.26.0,<1.27.0)"] +mq = ["mypy-boto3-mq (>=1.26.0,<1.27.0)"] +mturk = ["mypy-boto3-mturk (>=1.26.0,<1.27.0)"] +mwaa = ["mypy-boto3-mwaa (>=1.26.0,<1.27.0)"] +neptune = ["mypy-boto3-neptune (>=1.26.0,<1.27.0)"] +network-firewall = ["mypy-boto3-network-firewall (>=1.26.0,<1.27.0)"] +networkmanager = ["mypy-boto3-networkmanager (>=1.26.0,<1.27.0)"] +nimble = ["mypy-boto3-nimble (>=1.26.0,<1.27.0)"] +opensearch = ["mypy-boto3-opensearch (>=1.26.0,<1.27.0)"] +opsworks = ["mypy-boto3-opsworks (>=1.26.0,<1.27.0)"] +opsworkscm = ["mypy-boto3-opsworkscm (>=1.26.0,<1.27.0)"] +organizations = ["mypy-boto3-organizations (>=1.26.0,<1.27.0)"] +outposts = ["mypy-boto3-outposts (>=1.26.0,<1.27.0)"] +panorama = ["mypy-boto3-panorama (>=1.26.0,<1.27.0)"] +personalize = ["mypy-boto3-personalize (>=1.26.0,<1.27.0)"] +personalize-events = ["mypy-boto3-personalize-events (>=1.26.0,<1.27.0)"] +personalize-runtime = ["mypy-boto3-personalize-runtime (>=1.26.0,<1.27.0)"] +pi = ["mypy-boto3-pi (>=1.26.0,<1.27.0)"] +pinpoint = ["mypy-boto3-pinpoint (>=1.26.0,<1.27.0)"] +pinpoint-email = ["mypy-boto3-pinpoint-email (>=1.26.0,<1.27.0)"] +pinpoint-sms-voice = ["mypy-boto3-pinpoint-sms-voice (>=1.26.0,<1.27.0)"] +pinpoint-sms-voice-v2 = ["mypy-boto3-pinpoint-sms-voice-v2 (>=1.26.0,<1.27.0)"] +polly = ["mypy-boto3-polly (>=1.26.0,<1.27.0)"] +pricing = ["mypy-boto3-pricing (>=1.26.0,<1.27.0)"] +privatenetworks = ["mypy-boto3-privatenetworks (>=1.26.0,<1.27.0)"] +proton = ["mypy-boto3-proton (>=1.26.0,<1.27.0)"] +qldb = ["mypy-boto3-qldb (>=1.26.0,<1.27.0)"] +qldb-session = ["mypy-boto3-qldb-session (>=1.26.0,<1.27.0)"] +quicksight = ["mypy-boto3-quicksight (>=1.26.0,<1.27.0)"] +ram = ["mypy-boto3-ram (>=1.26.0,<1.27.0)"] +rbin = ["mypy-boto3-rbin (>=1.26.0,<1.27.0)"] +rds = ["mypy-boto3-rds (>=1.26.0,<1.27.0)"] +rds-data = ["mypy-boto3-rds-data (>=1.26.0,<1.27.0)"] +redshift = ["mypy-boto3-redshift (>=1.26.0,<1.27.0)"] +redshift-data = ["mypy-boto3-redshift-data (>=1.26.0,<1.27.0)"] +redshift-serverless = ["mypy-boto3-redshift-serverless (>=1.26.0,<1.27.0)"] +rekognition = ["mypy-boto3-rekognition (>=1.26.0,<1.27.0)"] +resiliencehub = ["mypy-boto3-resiliencehub (>=1.26.0,<1.27.0)"] +resource-explorer-2 = ["mypy-boto3-resource-explorer-2 (>=1.26.0,<1.27.0)"] +resource-groups = ["mypy-boto3-resource-groups (>=1.26.0,<1.27.0)"] +resourcegroupstaggingapi = ["mypy-boto3-resourcegroupstaggingapi (>=1.26.0,<1.27.0)"] +robomaker = ["mypy-boto3-robomaker (>=1.26.0,<1.27.0)"] +rolesanywhere = ["mypy-boto3-rolesanywhere (>=1.26.0,<1.27.0)"] +route53 = ["mypy-boto3-route53 (>=1.26.0,<1.27.0)"] +route53-recovery-cluster = ["mypy-boto3-route53-recovery-cluster (>=1.26.0,<1.27.0)"] +route53-recovery-control-config = ["mypy-boto3-route53-recovery-control-config (>=1.26.0,<1.27.0)"] +route53-recovery-readiness = ["mypy-boto3-route53-recovery-readiness (>=1.26.0,<1.27.0)"] +route53domains = ["mypy-boto3-route53domains (>=1.26.0,<1.27.0)"] +route53resolver = ["mypy-boto3-route53resolver (>=1.26.0,<1.27.0)"] +rum = ["mypy-boto3-rum (>=1.26.0,<1.27.0)"] +s3 = ["mypy-boto3-s3 (>=1.26.0,<1.27.0)"] +s3control = ["mypy-boto3-s3control (>=1.26.0,<1.27.0)"] +s3outposts = ["mypy-boto3-s3outposts (>=1.26.0,<1.27.0)"] +sagemaker = ["mypy-boto3-sagemaker (>=1.26.0,<1.27.0)"] +sagemaker-a2i-runtime = ["mypy-boto3-sagemaker-a2i-runtime (>=1.26.0,<1.27.0)"] +sagemaker-edge = ["mypy-boto3-sagemaker-edge (>=1.26.0,<1.27.0)"] +sagemaker-featurestore-runtime = ["mypy-boto3-sagemaker-featurestore-runtime (>=1.26.0,<1.27.0)"] +sagemaker-runtime = ["mypy-boto3-sagemaker-runtime (>=1.26.0,<1.27.0)"] +savingsplans = ["mypy-boto3-savingsplans (>=1.26.0,<1.27.0)"] +scheduler = ["mypy-boto3-scheduler (>=1.26.0,<1.27.0)"] +schemas = ["mypy-boto3-schemas (>=1.26.0,<1.27.0)"] +sdb = ["mypy-boto3-sdb (>=1.26.0,<1.27.0)"] +secretsmanager = ["mypy-boto3-secretsmanager (>=1.26.0,<1.27.0)"] +securityhub = ["mypy-boto3-securityhub (>=1.26.0,<1.27.0)"] +serverlessrepo = ["mypy-boto3-serverlessrepo (>=1.26.0,<1.27.0)"] +service-quotas = ["mypy-boto3-service-quotas (>=1.26.0,<1.27.0)"] +servicecatalog = ["mypy-boto3-servicecatalog (>=1.26.0,<1.27.0)"] +servicecatalog-appregistry = ["mypy-boto3-servicecatalog-appregistry (>=1.26.0,<1.27.0)"] +servicediscovery = ["mypy-boto3-servicediscovery (>=1.26.0,<1.27.0)"] +ses = ["mypy-boto3-ses (>=1.26.0,<1.27.0)"] +sesv2 = ["mypy-boto3-sesv2 (>=1.26.0,<1.27.0)"] +shield = ["mypy-boto3-shield (>=1.26.0,<1.27.0)"] +signer = ["mypy-boto3-signer (>=1.26.0,<1.27.0)"] +sms = ["mypy-boto3-sms (>=1.26.0,<1.27.0)"] +sms-voice = ["mypy-boto3-sms-voice (>=1.26.0,<1.27.0)"] +snow-device-management = ["mypy-boto3-snow-device-management (>=1.26.0,<1.27.0)"] +snowball = ["mypy-boto3-snowball (>=1.26.0,<1.27.0)"] +sns = ["mypy-boto3-sns (>=1.26.0,<1.27.0)"] +sqs = ["mypy-boto3-sqs (>=1.26.0,<1.27.0)"] +ssm = ["mypy-boto3-ssm (>=1.26.0,<1.27.0)"] +ssm-contacts = ["mypy-boto3-ssm-contacts (>=1.26.0,<1.27.0)"] +ssm-incidents = ["mypy-boto3-ssm-incidents (>=1.26.0,<1.27.0)"] +ssm-sap = ["mypy-boto3-ssm-sap (>=1.26.0,<1.27.0)"] +sso = ["mypy-boto3-sso (>=1.26.0,<1.27.0)"] +sso-admin = ["mypy-boto3-sso-admin (>=1.26.0,<1.27.0)"] +sso-oidc = ["mypy-boto3-sso-oidc (>=1.26.0,<1.27.0)"] +stepfunctions = ["mypy-boto3-stepfunctions (>=1.26.0,<1.27.0)"] +storagegateway = ["mypy-boto3-storagegateway (>=1.26.0,<1.27.0)"] +sts = ["mypy-boto3-sts (>=1.26.0,<1.27.0)"] +support = ["mypy-boto3-support (>=1.26.0,<1.27.0)"] +support-app = ["mypy-boto3-support-app (>=1.26.0,<1.27.0)"] +swf = ["mypy-boto3-swf (>=1.26.0,<1.27.0)"] +synthetics = ["mypy-boto3-synthetics (>=1.26.0,<1.27.0)"] +textract = ["mypy-boto3-textract (>=1.26.0,<1.27.0)"] +timestream-query = ["mypy-boto3-timestream-query (>=1.26.0,<1.27.0)"] +timestream-write = ["mypy-boto3-timestream-write (>=1.26.0,<1.27.0)"] +transcribe = ["mypy-boto3-transcribe (>=1.26.0,<1.27.0)"] +transfer = ["mypy-boto3-transfer (>=1.26.0,<1.27.0)"] +translate = ["mypy-boto3-translate (>=1.26.0,<1.27.0)"] +voice-id = ["mypy-boto3-voice-id (>=1.26.0,<1.27.0)"] +waf = ["mypy-boto3-waf (>=1.26.0,<1.27.0)"] +waf-regional = ["mypy-boto3-waf-regional (>=1.26.0,<1.27.0)"] +wafv2 = ["mypy-boto3-wafv2 (>=1.26.0,<1.27.0)"] +wellarchitected = ["mypy-boto3-wellarchitected (>=1.26.0,<1.27.0)"] +wisdom = ["mypy-boto3-wisdom (>=1.26.0,<1.27.0)"] +workdocs = ["mypy-boto3-workdocs (>=1.26.0,<1.27.0)"] +worklink = ["mypy-boto3-worklink (>=1.26.0,<1.27.0)"] +workmail = ["mypy-boto3-workmail (>=1.26.0,<1.27.0)"] +workmailmessageflow = ["mypy-boto3-workmailmessageflow (>=1.26.0,<1.27.0)"] +workspaces = ["mypy-boto3-workspaces (>=1.26.0,<1.27.0)"] +workspaces-web = ["mypy-boto3-workspaces-web (>=1.26.0,<1.27.0)"] +xray = ["mypy-boto3-xray (>=1.26.0,<1.27.0)"] [[package]] name = "botocore" -version = "1.27.38" +version = "1.29.16" description = "Low-level, data-driven core of boto 3." category = "main" optional = false @@ -501,7 +510,7 @@ python-dateutil = ">=2.1,<3.0.0" urllib3 = ">=1.25.4,<1.27" [package.extras] -crt = ["awscrt (==0.13.8)"] +crt = ["awscrt (==0.14.0)"] [[package]] name = "botocore-stubs" @@ -516,7 +525,7 @@ typing-extensions = ">=4.1.0" [[package]] name = "certifi" -version = "2022.6.15" +version = "2022.9.24" description = "Python package for providing Mozilla's CA Bundle." category = "main" optional = false @@ -950,8 +959,8 @@ reports = ["lxml"] [[package]] name = "mypy-boto3-s3" -version = "1.24.36.post1" -description = "Type annotations for boto3.S3 1.24.36 service generated with mypy-boto3-builder 7.10.0" +version = "1.26.0.post1" +description = "Type annotations for boto3.S3 1.26.0 service generated with mypy-boto3-builder 7.11.10" category = "main" optional = false python-versions = ">=3.7" @@ -1239,8 +1248,8 @@ python-versions = ">=3.6" [package.dependencies] pytest = [ - {version = ">=5.0", markers = "python_version < \"3.10\""}, {version = ">=6.2.4", markers = "python_version >= \"3.10\""}, + {version = ">=5.0", markers = "python_version < \"3.10\""}, ] [[package]] @@ -1486,7 +1495,7 @@ python-versions = ">=3.7,<4.0" name = "types-toml" version = "0.10.8" description = "Typing stubs for toml" -category = "dev" +category = "main" optional = false python-versions = "*" @@ -1574,7 +1583,7 @@ testing = ["func-timeout", "jaraco.itertools", "pytest (>=6)", "pytest-black (>= [metadata] lock-version = "1.1" python-versions = "^3.9" -content-hash = "c95c184fccaf40815405ad616ec1c55869c7f87b72777cc3a9cbaff41de98977" +content-hash = "98d63eaa73253882440e0fc8cdb305bb536944768c5ba313c25d0ee65f546544" [metadata.files] aiopg = [ @@ -1594,19 +1603,42 @@ async-timeout = [ {file = "async_timeout-4.0.2-py3-none-any.whl", hash = "sha256:8ca1e4fcf50d07413d66d1a5e416e42cfdf5851c981d679a09851a6853383b3c"}, ] asyncpg = [ - {file = "asyncpg-0.24.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c4fc0205fe4ddd5aeb3dfdc0f7bafd43411181e1f5650189608e5971cceacff1"}, - {file = "asyncpg-0.24.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:a7095890c96ba36f9f668eb552bb020dddb44f8e73e932f8573efc613ee83843"}, - {file = "asyncpg-0.24.0-cp310-cp310-win_amd64.whl", hash = "sha256:8ff5073d4b654e34bd5eaadc01dc4d68b8a9609084d835acd364cd934190a08d"}, - {file = "asyncpg-0.24.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:e36c6806883786b19551bb70a4882561f31135dc8105a59662e0376cf5b2cbc5"}, - {file = "asyncpg-0.24.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:ddffcb85227bf39cd1bedd4603e0082b243cf3b14ced64dce506a15b05232b83"}, - {file = "asyncpg-0.24.0-cp37-cp37m-win_amd64.whl", hash = "sha256:41704c561d354bef01353835a7846e5606faabbeb846214dfcf666cf53319f18"}, - {file = "asyncpg-0.24.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:29ef6ae0a617fc13cc2ac5dc8e9b367bb83cba220614b437af9b67766f4b6b20"}, - {file = "asyncpg-0.24.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:eed43abc6ccf1dc02e0d0efc06ce46a411362f3358847c6b0ec9a43426f91ece"}, - {file = "asyncpg-0.24.0-cp38-cp38-win_amd64.whl", hash = "sha256:129d501f3d30616afd51eb8d3142ef51ba05374256bd5834cec3ef4956a9b317"}, - {file = "asyncpg-0.24.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a458fc69051fbb67d995fdda46d75a012b5d6200f91e17d23d4751482640ed4c"}, - {file = "asyncpg-0.24.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:556b0e92e2b75dc028b3c4bc9bd5162ddf0053b856437cf1f04c97f9c6837d03"}, - {file = "asyncpg-0.24.0-cp39-cp39-win_amd64.whl", hash = "sha256:a738f4807c853623d3f93f0fea11f61be6b0e5ca16ea8aeb42c2c7ee742aa853"}, - {file = "asyncpg-0.24.0.tar.gz", hash = "sha256:dd2fa063c3344823487d9ddccb40802f02622ddf8bf8a6cc53885ee7a2c1c0c6"}, + {file = "asyncpg-0.27.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:fca608d199ffed4903dce1bcd97ad0fe8260f405c1c225bdf0002709132171c2"}, + {file = "asyncpg-0.27.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:20b596d8d074f6f695c13ffb8646d0b6bb1ab570ba7b0cfd349b921ff03cfc1e"}, + {file = "asyncpg-0.27.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7a6206210c869ebd3f4eb9e89bea132aefb56ff3d1b7dd7e26b102b17e27bbb1"}, + {file = "asyncpg-0.27.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a7a94c03386bb95456b12c66026b3a87d1b965f0f1e5733c36e7229f8f137747"}, + {file = "asyncpg-0.27.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:bfc3980b4ba6f97138b04f0d32e8af21d6c9fa1f8e6e140c07d15690a0a99279"}, + {file = "asyncpg-0.27.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:9654085f2b22f66952124de13a8071b54453ff972c25c59b5ce1173a4283ffd9"}, + {file = "asyncpg-0.27.0-cp310-cp310-win32.whl", hash = "sha256:879c29a75969eb2722f94443752f4720d560d1e748474de54ae8dd230bc4956b"}, + {file = "asyncpg-0.27.0-cp310-cp310-win_amd64.whl", hash = "sha256:ab0f21c4818d46a60ca789ebc92327d6d874d3b7ccff3963f7af0a21dc6cff52"}, + {file = "asyncpg-0.27.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:18f77e8e71e826ba2d0c3ba6764930776719ae2b225ca07e014590545928b576"}, + {file = "asyncpg-0.27.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c2232d4625c558f2aa001942cac1d7952aa9f0dbfc212f63bc754277769e1ef2"}, + {file = "asyncpg-0.27.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9a3a4ff43702d39e3c97a8786314123d314e0f0e4dabc8367db5b665c93914de"}, + {file = "asyncpg-0.27.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ccddb9419ab4e1c48742457d0c0362dbdaeb9b28e6875115abfe319b29ee225d"}, + {file = "asyncpg-0.27.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:768e0e7c2898d40b16d4ef7a0b44e8150db3dd8995b4652aa1fe2902e92c7df8"}, + {file = "asyncpg-0.27.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:609054a1f47292a905582a1cfcca51a6f3f30ab9d822448693e66fdddde27920"}, + {file = "asyncpg-0.27.0-cp311-cp311-win32.whl", hash = "sha256:8113e17cfe236dc2277ec844ba9b3d5312f61bd2fdae6d3ed1c1cdd75f6cf2d8"}, + {file = "asyncpg-0.27.0-cp311-cp311-win_amd64.whl", hash = "sha256:bb71211414dd1eeb8d31ec529fe77cff04bf53efc783a5f6f0a32d84923f45cf"}, + {file = "asyncpg-0.27.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4750f5cf49ed48a6e49c6e5aed390eee367694636c2dcfaf4a273ca832c5c43c"}, + {file = "asyncpg-0.27.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:eca01eb112a39d31cc4abb93a5aef2a81514c23f70956729f42fb83b11b3483f"}, + {file = "asyncpg-0.27.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:5710cb0937f696ce303f5eed6d272e3f057339bb4139378ccecafa9ee923a71c"}, + {file = "asyncpg-0.27.0-cp37-cp37m-win_amd64.whl", hash = "sha256:71cca80a056ebe19ec74b7117b09e650990c3ca535ac1c35234a96f65604192f"}, + {file = "asyncpg-0.27.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4bb366ae34af5b5cabc3ac6a5347dfb6013af38c68af8452f27968d49085ecc0"}, + {file = "asyncpg-0.27.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:16ba8ec2e85d586b4a12bcd03e8d29e3d99e832764d6a1d0b8c27dbbe4a2569d"}, + {file = "asyncpg-0.27.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d20dea7b83651d93b1eb2f353511fe7fd554752844523f17ad30115d8b9c8cd6"}, + {file = "asyncpg-0.27.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:e56ac8a8237ad4adec97c0cd4728596885f908053ab725e22900b5902e7f8e69"}, + {file = "asyncpg-0.27.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:bf21ebf023ec67335258e0f3d3ad7b91bb9507985ba2b2206346de488267cad0"}, + {file = "asyncpg-0.27.0-cp38-cp38-win32.whl", hash = "sha256:69aa1b443a182b13a17ff926ed6627af2d98f62f2fe5890583270cc4073f63bf"}, + {file = "asyncpg-0.27.0-cp38-cp38-win_amd64.whl", hash = "sha256:62932f29cf2433988fcd799770ec64b374a3691e7902ecf85da14d5e0854d1ea"}, + {file = "asyncpg-0.27.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:fddcacf695581a8d856654bc4c8cfb73d5c9df26d5f55201722d3e6a699e9629"}, + {file = "asyncpg-0.27.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:7d8585707ecc6661d07367d444bbaa846b4e095d84451340da8df55a3757e152"}, + {file = "asyncpg-0.27.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:975a320baf7020339a67315284a4d3bf7460e664e484672bd3e71dbd881bc692"}, + {file = "asyncpg-0.27.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2232ebae9796d4600a7819fc383da78ab51b32a092795f4555575fc934c1c89d"}, + {file = "asyncpg-0.27.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:88b62164738239f62f4af92567b846a8ef7cf8abf53eddd83650603de4d52163"}, + {file = "asyncpg-0.27.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:eb4b2fdf88af4fb1cc569781a8f933d2a73ee82cd720e0cb4edabbaecf2a905b"}, + {file = "asyncpg-0.27.0-cp39-cp39-win32.whl", hash = "sha256:8934577e1ed13f7d2d9cea3cc016cc6f95c19faedea2c2b56a6f94f257cea672"}, + {file = "asyncpg-0.27.0-cp39-cp39-win_amd64.whl", hash = "sha256:1b6499de06fe035cf2fa932ec5617ed3f37d4ebbf663b655922e105a484a6af9"}, + {file = "asyncpg-0.27.0.tar.gz", hash = "sha256:720986d9a4705dd8a40fdf172036f5ae787225036a7eb46e704c45aa8f62c054"}, ] atomicwrites = [ {file = "atomicwrites-1.4.1.tar.gz", hash = "sha256:81b2c9071a49367a7f770170e5eec8cb66567cfbbc8c73d20ce5ca4a8d71cf11"}, @@ -1654,24 +1686,24 @@ black = [ {file = "black-22.6.0.tar.gz", hash = "sha256:6c6d39e28aed379aec40da1c65434c77d75e65bb59a1e1c283de545fb4e7c6c9"}, ] boto3 = [ - {file = "boto3-1.24.38-py3-none-any.whl", hash = "sha256:bcf97fd7c494f4e2bbbe2511625500654179c0a6b3bea977d46f97af764e85a4"}, - {file = "boto3-1.24.38.tar.gz", hash = "sha256:f4c6b025f392c934338c7f01badfddbd0d3cf2397ff5df35c31409798dce33f5"}, + {file = "boto3-1.26.16-py3-none-any.whl", hash = "sha256:4f493a2aed71cee93e626de4f67ce58dd82c0473480a0fc45b131715cd8f4f30"}, + {file = "boto3-1.26.16.tar.gz", hash = "sha256:31c0adf71e4bd19a5428580bb229d7ea3b5795eecaa0847a85385df00c026116"}, ] boto3-stubs = [ - {file = "boto3-stubs-1.24.58.tar.gz", hash = "sha256:95ab521a9a931cc21d48c97c5bd7de0e37370d9b6a298e3905ec621db9243897"}, - {file = "boto3_stubs-1.24.58-py3-none-any.whl", hash = "sha256:a16940df2a347f7890075af8c0b202b06057bc18ff4c640ef94e09ce4176adb9"}, + {file = "boto3-stubs-1.26.16.tar.gz", hash = "sha256:618253ae19f1480785759bcaee8c8b10ed3fc037027247c26a3461a50f58406d"}, + {file = "boto3_stubs-1.26.16-py3-none-any.whl", hash = "sha256:8cf2925bc3e1349c93eb0f49c1061affc5ca314d69eeb335349037969d0787ed"}, ] botocore = [ - {file = "botocore-1.27.38-py3-none-any.whl", hash = "sha256:46a0264ff3335496bd9cb404f83ec0d8eb7bfdef8f74a830c13e6a6b9612adea"}, - {file = "botocore-1.27.38.tar.gz", hash = "sha256:56a7682564ea57ceecfef5648f77b77e0543b9c904212fc9ef4416517d24fa45"}, + {file = "botocore-1.29.16-py3-none-any.whl", hash = "sha256:271b599e6cfe214405ed50d41cd967add1d5d469383dd81ff583bc818b47f59b"}, + {file = "botocore-1.29.16.tar.gz", hash = "sha256:8cfcc10f2f1751608c3cec694f2d6b5e16ebcd50d0a104f9914d5616227c62e9"}, ] botocore-stubs = [ {file = "botocore-stubs-1.27.38.tar.gz", hash = "sha256:408e8b86b5d171b58f81c74ca9d3b5317a5a8e2d3bc2073aa841ac13b8939e56"}, {file = "botocore_stubs-1.27.38-py3-none-any.whl", hash = "sha256:7add7641e9a479a9c8366893bb522fd9ca3d58714201e43662a200a148a1bc38"}, ] certifi = [ - {file = "certifi-2022.6.15-py3-none-any.whl", hash = "sha256:fe86415d55e84719d75f8b69414f6438ac3547d2078ab91b67e779ef69378412"}, - {file = "certifi-2022.6.15.tar.gz", hash = "sha256:84c85a9078b11105f04f3036a9482ae10e4621616db313fe045dd24743a0820d"}, + {file = "certifi-2022.9.24-py3-none-any.whl", hash = "sha256:90c1a32f1d68f940488354e36370f6cca89f0f106db09518524c88d6ed83f382"}, + {file = "certifi-2022.9.24.tar.gz", hash = "sha256:0d9c601124e5a6ba9712dbc60d9c53c21e34f5f641fe83002317394311bdce14"}, ] cffi = [ {file = "cffi-1.15.1-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:a66d3508133af6e8548451b25058d5812812ec3798c886bf38ed24a98216fab2"}, @@ -1942,8 +1974,8 @@ mypy = [ {file = "mypy-0.971.tar.gz", hash = "sha256:40b0f21484238269ae6a57200c807d80debc6459d444c0489a102d7c6a75fa56"}, ] mypy-boto3-s3 = [ - {file = "mypy-boto3-s3-1.24.36.post1.tar.gz", hash = "sha256:3bd7e06f9ade5059eae2181d7a9f1a41e7fa807ad3e94c01c9901838e87e0abe"}, - {file = "mypy_boto3_s3-1.24.36.post1-py3-none-any.whl", hash = "sha256:30ae59b33c55f8b7b693170f9519ea5b91a2fbf31a73de79cdef57a27d784e5a"}, + {file = "mypy-boto3-s3-1.26.0.post1.tar.gz", hash = "sha256:6d7079f8c739dc993cbedad0736299c413b297814b73795a3855a79169ecc938"}, + {file = "mypy_boto3_s3-1.26.0.post1-py3-none-any.whl", hash = "sha256:7de2792ff0cc541b84cd46ff3a6aa2b6e5f267217f2203f27f6e4016bddc644d"}, ] mypy-extensions = [ {file = "mypy_extensions-0.4.3-py2.py3-none-any.whl", hash = "sha256:090fedd75945a69ae91ce1303b5824f428daf5a028d2f6ab8a299250a846f15d"}, @@ -2004,7 +2036,6 @@ psutil = [ psycopg2-binary = [ {file = "psycopg2-binary-2.9.3.tar.gz", hash = "sha256:761df5313dc15da1502b21453642d7599d26be88bff659382f8f9747c7ebea4e"}, {file = "psycopg2_binary-2.9.3-cp310-cp310-macosx_10_14_x86_64.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:539b28661b71da7c0e428692438efbcd048ca21ea81af618d845e06ebfd29478"}, - {file = "psycopg2_binary-2.9.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2f2534ab7dc7e776a263b463a16e189eb30e85ec9bbe1bff9e78dae802608932"}, {file = "psycopg2_binary-2.9.3-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6e82d38390a03da28c7985b394ec3f56873174e2c88130e6966cb1c946508e65"}, {file = "psycopg2_binary-2.9.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:57804fc02ca3ce0dbfbef35c4b3a4a774da66d66ea20f4bda601294ad2ea6092"}, {file = "psycopg2_binary-2.9.3-cp310-cp310-manylinux_2_24_aarch64.whl", hash = "sha256:083a55275f09a62b8ca4902dd11f4b33075b743cf0d360419e2051a8a5d5ff76"}, @@ -2038,7 +2069,6 @@ psycopg2-binary = [ {file = "psycopg2_binary-2.9.3-cp37-cp37m-win32.whl", hash = "sha256:adf20d9a67e0b6393eac162eb81fb10bc9130a80540f4df7e7355c2dd4af9fba"}, {file = "psycopg2_binary-2.9.3-cp37-cp37m-win_amd64.whl", hash = "sha256:2f9ffd643bc7349eeb664eba8864d9e01f057880f510e4681ba40a6532f93c71"}, {file = "psycopg2_binary-2.9.3-cp38-cp38-macosx_10_14_x86_64.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:def68d7c21984b0f8218e8a15d514f714d96904265164f75f8d3a70f9c295667"}, - {file = "psycopg2_binary-2.9.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:e6aa71ae45f952a2205377773e76f4e3f27951df38e69a4c95440c779e013560"}, {file = "psycopg2_binary-2.9.3-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dffc08ca91c9ac09008870c9eb77b00a46b3378719584059c034b8945e26b272"}, {file = "psycopg2_binary-2.9.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:280b0bb5cbfe8039205c7981cceb006156a675362a00fe29b16fbc264e242834"}, {file = "psycopg2_binary-2.9.3-cp38-cp38-manylinux_2_24_aarch64.whl", hash = "sha256:af9813db73395fb1fc211bac696faea4ca9ef53f32dc0cfa27e4e7cf766dcf24"}, @@ -2050,7 +2080,6 @@ psycopg2-binary = [ {file = "psycopg2_binary-2.9.3-cp38-cp38-win32.whl", hash = "sha256:6472a178e291b59e7f16ab49ec8b4f3bdada0a879c68d3817ff0963e722a82ce"}, {file = "psycopg2_binary-2.9.3-cp38-cp38-win_amd64.whl", hash = "sha256:35168209c9d51b145e459e05c31a9eaeffa9a6b0fd61689b48e07464ffd1a83e"}, {file = "psycopg2_binary-2.9.3-cp39-cp39-macosx_10_14_x86_64.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:47133f3f872faf28c1e87d4357220e809dfd3fa7c64295a4a148bcd1e6e34ec9"}, - {file = "psycopg2_binary-2.9.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b3a24a1982ae56461cc24f6680604fffa2c1b818e9dc55680da038792e004d18"}, {file = "psycopg2_binary-2.9.3-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:91920527dea30175cc02a1099f331aa8c1ba39bf8b7762b7b56cbf54bc5cce42"}, {file = "psycopg2_binary-2.9.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:887dd9aac71765ac0d0bac1d0d4b4f2c99d5f5c1382d8b770404f0f3d0ce8a39"}, {file = "psycopg2_binary-2.9.3-cp39-cp39-manylinux_2_24_aarch64.whl", hash = "sha256:1f14c8b0942714eb3c74e1e71700cbbcb415acbc311c730370e70c578a44a25c"}, @@ -2067,7 +2096,18 @@ py = [ {file = "py-1.11.0.tar.gz", hash = "sha256:51c75c4126074b472f746a24399ad32f6053d1b34b68d2fa41e558e6f4a98719"}, ] pyasn1 = [ + {file = "pyasn1-0.4.8-py2.4.egg", hash = "sha256:fec3e9d8e36808a28efb59b489e4528c10ad0f480e57dcc32b4de5c9d8c9fdf3"}, + {file = "pyasn1-0.4.8-py2.5.egg", hash = "sha256:0458773cfe65b153891ac249bcf1b5f8f320b7c2ce462151f8fa74de8934becf"}, + {file = "pyasn1-0.4.8-py2.6.egg", hash = "sha256:5c9414dcfede6e441f7e8f81b43b34e834731003427e5b09e4e00e3172a10f00"}, + {file = "pyasn1-0.4.8-py2.7.egg", hash = "sha256:6e7545f1a61025a4e58bb336952c5061697da694db1cae97b116e9c46abcf7c8"}, {file = "pyasn1-0.4.8-py2.py3-none-any.whl", hash = "sha256:39c7e2ec30515947ff4e87fb6f456dfc6e84857d34be479c9d4a4ba4bf46aa5d"}, + {file = "pyasn1-0.4.8-py3.1.egg", hash = "sha256:78fa6da68ed2727915c4767bb386ab32cdba863caa7dbe473eaae45f9959da86"}, + {file = "pyasn1-0.4.8-py3.2.egg", hash = "sha256:08c3c53b75eaa48d71cf8c710312316392ed40899cb34710d092e96745a358b7"}, + {file = "pyasn1-0.4.8-py3.3.egg", hash = "sha256:03840c999ba71680a131cfaee6fab142e1ed9bbd9c693e285cc6aca0d555e576"}, + {file = "pyasn1-0.4.8-py3.4.egg", hash = "sha256:7ab8a544af125fb704feadb008c99a88805126fb525280b2270bb25cc1d78a12"}, + {file = "pyasn1-0.4.8-py3.5.egg", hash = "sha256:e89bf84b5437b532b0803ba5c9a5e054d21fec423a89952a74f87fa2c9b7bce2"}, + {file = "pyasn1-0.4.8-py3.6.egg", hash = "sha256:014c0e9976956a08139dc0712ae195324a75e142284d5f87f1a87ee1b068a359"}, + {file = "pyasn1-0.4.8-py3.7.egg", hash = "sha256:99fcc3c8d804d1bc6d9a099921e39d827026409a58f2a720dcdb89374ea0c776"}, {file = "pyasn1-0.4.8.tar.gz", hash = "sha256:aef77c9fb94a3ac588e87841208bdec464471d9871bd5050a287cc9a475cd0ba"}, ] pycodestyle = [ @@ -2173,13 +2213,6 @@ pyyaml = [ {file = "PyYAML-6.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:f84fbc98b019fef2ee9a1cb3ce93e3187a6df0b2538a651bfb890254ba9f90b5"}, {file = "PyYAML-6.0-cp310-cp310-win32.whl", hash = "sha256:2cd5df3de48857ed0544b34e2d40e9fac445930039f3cfe4bcc592a1f836d513"}, {file = "PyYAML-6.0-cp310-cp310-win_amd64.whl", hash = "sha256:daf496c58a8c52083df09b80c860005194014c3698698d1a57cbcfa182142a3a"}, - {file = "PyYAML-6.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:d4b0ba9512519522b118090257be113b9468d804b19d63c71dbcf4a48fa32358"}, - {file = "PyYAML-6.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:81957921f441d50af23654aa6c5e5eaf9b06aba7f0a19c18a538dc7ef291c5a1"}, - {file = "PyYAML-6.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:afa17f5bc4d1b10afd4466fd3a44dc0e245382deca5b3c353d8b757f9e3ecb8d"}, - {file = "PyYAML-6.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dbad0e9d368bb989f4515da330b88a057617d16b6a8245084f1b05400f24609f"}, - {file = "PyYAML-6.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:432557aa2c09802be39460360ddffd48156e30721f5e8d917f01d31694216782"}, - {file = "PyYAML-6.0-cp311-cp311-win32.whl", hash = "sha256:bfaef573a63ba8923503d27530362590ff4f576c626d86a9fed95822a8255fd7"}, - {file = "PyYAML-6.0-cp311-cp311-win_amd64.whl", hash = "sha256:01b45c0191e6d66c470b6cf1b9531a771a83c1c4208272ead47a3ae4f2f603bf"}, {file = "PyYAML-6.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:897b80890765f037df3403d22bab41627ca8811ae55e9a722fd0392850ec4d86"}, {file = "PyYAML-6.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50602afada6d6cbfad699b0c7bb50d5ccffa7e46a3d738092afddc1f9758427f"}, {file = "PyYAML-6.0-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:48c346915c114f5fdb3ead70312bd042a953a8ce5c7106d5bfb1a5254e47da92"}, diff --git a/proxy/src/auth.rs b/proxy/src/auth.rs index 2df4f9d920..f272f9adc1 100644 --- a/proxy/src/auth.rs +++ b/proxy/src/auth.rs @@ -1,7 +1,7 @@ //! Client authentication mechanisms. pub mod backend; -pub use backend::{BackendType, ConsoleReqExtra, DatabaseInfo}; +pub use backend::{BackendType, ConsoleReqExtra}; mod credentials; pub use credentials::ClientCredentials; diff --git a/proxy/src/auth/backend.rs b/proxy/src/auth/backend.rs index bb919770c1..4b937f017a 100644 --- a/proxy/src/auth/backend.rs +++ b/proxy/src/auth/backend.rs @@ -12,7 +12,6 @@ use crate::{ waiters::{self, Waiter, Waiters}, }; use once_cell::sync::Lazy; -use serde::{Deserialize, Serialize}; use std::borrow::Cow; use tokio::io::{AsyncRead, AsyncWrite}; use tracing::{info, warn}; @@ -36,45 +35,6 @@ pub fn notify(psql_session_id: &str, msg: mgmt::ComputeReady) -> Result<(), wait CPLANE_WAITERS.notify(psql_session_id, msg) } -/// Compute node connection params provided by the cloud. -/// Note how it implements serde traits, since we receive it over the wire. -#[derive(Serialize, Deserialize, Default)] -pub struct DatabaseInfo { - pub host: String, - pub port: u16, - pub dbname: String, - pub user: String, - pub password: Option, -} - -// Manually implement debug to omit personal and sensitive info. -impl std::fmt::Debug for DatabaseInfo { - fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { - fmt.debug_struct("DatabaseInfo") - .field("host", &self.host) - .field("port", &self.port) - .finish_non_exhaustive() - } -} - -impl From for tokio_postgres::Config { - fn from(db_info: DatabaseInfo) -> Self { - let mut config = tokio_postgres::Config::new(); - - config - .host(&db_info.host) - .port(db_info.port) - .dbname(&db_info.dbname) - .user(&db_info.user); - - if let Some(password) = db_info.password { - config.password(password); - } - - config - } -} - /// Extra query params we'd like to pass to the console. pub struct ConsoleReqExtra<'a> { /// A unique identifier for a connection. @@ -158,54 +118,107 @@ impl<'a, T, E> BackendType<'a, Result> { } } +/// A product of successful authentication. +pub struct AuthSuccess { + /// Did we send [`pq_proto::BeMessage::AuthenticationOk`] to client? + pub reported_auth_ok: bool, + /// Something to be considered a positive result. + pub value: T, +} + +impl AuthSuccess { + /// Very similar to [`std::option::Option::map`]. + /// Maps [`AuthSuccess`] to [`AuthSuccess`] by applying + /// a function to a contained value. + pub fn map(self, f: impl FnOnce(T) -> R) -> AuthSuccess { + AuthSuccess { + reported_auth_ok: self.reported_auth_ok, + value: f(self.value), + } + } +} + +/// Info for establishing a connection to a compute node. +/// This is what we get after auth succeeded, but not before! +pub struct NodeInfo { + /// Project from [`auth::ClientCredentials`]. + pub project: String, + /// Compute node connection params. + pub config: compute::ConnCfg, +} + impl BackendType<'_, ClientCredentials<'_>> { + /// Do something special if user didn't provide the `project` parameter. + async fn try_password_hack( + &mut self, + extra: &ConsoleReqExtra<'_>, + client: &mut stream::PqStream, + ) -> auth::Result>> { + use BackendType::*; + + // If there's no project so far, that entails that client doesn't + // support SNI or other means of passing the project name. + // We now expect to see a very specific payload in the place of password. + let fetch_magic_payload = async { + warn!("project name not specified, resorting to the password hack auth flow"); + let payload = AuthFlow::new(client) + .begin(auth::PasswordHack) + .await? + .authenticate() + .await?; + + info!(project = &payload.project, "received missing parameter"); + auth::Result::Ok(payload) + }; + + // TODO: find a proper way to merge those very similar blocks. + let (mut config, payload) = match self { + Console(endpoint, creds) if creds.project.is_none() => { + let payload = fetch_magic_payload.await?; + + let mut creds = creds.as_ref(); + creds.project = Some(payload.project.as_str().into()); + let config = console::Api::new(endpoint, extra, &creds) + .wake_compute() + .await?; + + (config, payload) + } + Postgres(endpoint, creds) if creds.project.is_none() => { + let payload = fetch_magic_payload.await?; + + let mut creds = creds.as_ref(); + creds.project = Some(payload.project.as_str().into()); + let config = postgres::Api::new(endpoint, &creds).wake_compute().await?; + + (config, payload) + } + _ => return Ok(None), + }; + + config.password(payload.password); + Ok(Some(AuthSuccess { + reported_auth_ok: false, + value: NodeInfo { + project: payload.project, + config, + }, + })) + } + /// Authenticate the client via the requested backend, possibly using credentials. pub async fn authenticate( mut self, extra: &ConsoleReqExtra<'_>, client: &mut stream::PqStream, - ) -> super::Result { + ) -> auth::Result> { use BackendType::*; - if let Console(_, creds) | Postgres(_, creds) = &mut self { - // If there's no project so far, that entails that client doesn't - // support SNI or other means of passing the project name. - // We now expect to see a very specific payload in the place of password. - if creds.project().is_none() { - warn!("project name not specified, resorting to the password hack auth flow"); - - let payload = AuthFlow::new(client) - .begin(auth::PasswordHack) - .await? - .authenticate() - .await?; - - // Finally we may finish the initialization of `creds`. - // TODO: add missing type safety to ClientCredentials. - info!(project = &payload.project, "received missing parameter"); - creds.project = Some(payload.project.into()); - - let mut config = match &self { - Console(endpoint, creds) => { - console::Api::new(endpoint, extra, creds) - .wake_compute() - .await? - } - Postgres(endpoint, creds) => { - postgres::Api::new(endpoint, creds).wake_compute().await? - } - _ => unreachable!("see the patterns above"), - }; - - // We should use a password from payload as well. - config.password(payload.password); - - info!("user successfully authenticated (using the password hack)"); - return Ok(compute::NodeInfo { - reported_auth_ok: false, - config, - }); - } + // Handle cases when `project` is missing in `creds`. + // TODO: type safety: return `creds` with irrefutable `project`. + if let Some(res) = self.try_password_hack(extra, client).await? { + info!("user successfully authenticated (using the password hack)"); + return Ok(res); } let res = match self { @@ -215,22 +228,34 @@ impl BackendType<'_, ClientCredentials<'_>> { project = creds.project(), "performing authentication using the console" ); + + assert!(creds.project.is_some()); console::Api::new(&endpoint, extra, &creds) .handle_user(client) - .await + .await? + .map(|config| NodeInfo { + project: creds.project.unwrap().into_owned(), + config, + }) } Postgres(endpoint, creds) => { info!("performing mock authentication using a local postgres instance"); + + assert!(creds.project.is_some()); postgres::Api::new(&endpoint, &creds) .handle_user(client) - .await + .await? + .map(|config| NodeInfo { + project: creds.project.unwrap().into_owned(), + config, + }) } // NOTE: this auth backend doesn't use client credentials. Link(url) => { info!("performing link authentication"); - link::handle_user(&url, client).await + link::handle_user(&url, client).await? } - }?; + }; info!("user successfully authenticated"); Ok(res) diff --git a/proxy/src/auth/backend/console.rs b/proxy/src/auth/backend/console.rs index cf99aa08ef..929dfb33f7 100644 --- a/proxy/src/auth/backend/console.rs +++ b/proxy/src/auth/backend/console.rs @@ -1,9 +1,9 @@ //! Cloud API V2. -use super::ConsoleReqExtra; +use super::{AuthSuccess, ConsoleReqExtra}; use crate::{ auth::{self, AuthFlow, ClientCredentials}, - compute::{self, ComputeConnCfg}, + compute, error::{io_error, UserFacingError}, http, scram, stream::PqStream, @@ -128,7 +128,7 @@ impl<'a> Api<'a> { pub(super) async fn handle_user( self, client: &mut PqStream, - ) -> auth::Result { + ) -> auth::Result> { handle_user(client, &self, Self::get_auth_info, Self::wake_compute).await } @@ -164,7 +164,7 @@ impl<'a> Api<'a> { } /// Wake up the compute node and return the corresponding connection info. - pub(super) async fn wake_compute(&self) -> Result { + pub(super) async fn wake_compute(&self) -> Result { let request_id = uuid::Uuid::new_v4().to_string(); let req = self .endpoint @@ -195,7 +195,7 @@ impl<'a> Api<'a> { Some(x) => x, }; - let mut config = ComputeConnCfg::new(); + let mut config = compute::ConnCfg::new(); config .host(host) .port(port) @@ -213,10 +213,10 @@ pub(super) async fn handle_user<'a, Endpoint, GetAuthInfo, WakeCompute>( endpoint: &'a Endpoint, get_auth_info: impl FnOnce(&'a Endpoint) -> GetAuthInfo, wake_compute: impl FnOnce(&'a Endpoint) -> WakeCompute, -) -> auth::Result +) -> auth::Result> where GetAuthInfo: Future>, - WakeCompute: Future>, + WakeCompute: Future>, { info!("fetching user's authentication info"); let auth_info = get_auth_info(endpoint).await?; @@ -243,9 +243,9 @@ where config.auth_keys(tokio_postgres::config::AuthKeys::ScramSha256(keys)); } - Ok(compute::NodeInfo { + Ok(AuthSuccess { reported_auth_ok: false, - config, + value: config, }) } diff --git a/proxy/src/auth/backend/link.rs b/proxy/src/auth/backend/link.rs index 96c6f0ba18..440a55f194 100644 --- a/proxy/src/auth/backend/link.rs +++ b/proxy/src/auth/backend/link.rs @@ -1,3 +1,4 @@ +use super::{AuthSuccess, NodeInfo}; use crate::{auth, compute, error::UserFacingError, stream::PqStream, waiters}; use pq_proto::{BeMessage as Be, BeParameterStatusMessage}; use thiserror::Error; @@ -49,7 +50,7 @@ pub fn new_psql_session_id() -> String { pub async fn handle_user( link_uri: &reqwest::Url, client: &mut PqStream, -) -> auth::Result { +) -> auth::Result> { let psql_session_id = new_psql_session_id(); let span = info_span!("link", psql_session_id = &psql_session_id); let greeting = hello_message(link_uri, &psql_session_id); @@ -71,8 +72,22 @@ pub async fn handle_user( client.write_message_noflush(&Be::NoticeResponse("Connecting to database."))?; - Ok(compute::NodeInfo { + let mut config = compute::ConnCfg::new(); + config + .host(&db_info.host) + .port(db_info.port) + .dbname(&db_info.dbname) + .user(&db_info.user); + + if let Some(password) = db_info.password { + config.password(password); + } + + Ok(AuthSuccess { reported_auth_ok: true, - config: db_info.into(), + value: NodeInfo { + project: db_info.project, + config, + }, }) } diff --git a/proxy/src/auth/backend/postgres.rs b/proxy/src/auth/backend/postgres.rs index 2055ee14c8..e56b62622a 100644 --- a/proxy/src/auth/backend/postgres.rs +++ b/proxy/src/auth/backend/postgres.rs @@ -1,12 +1,12 @@ //! Local mock of Cloud API V2. +use super::{ + console::{self, AuthInfo, GetAuthInfoError, TransportError, WakeComputeError}, + AuthSuccess, +}; use crate::{ - auth::{ - self, - backend::console::{self, AuthInfo, GetAuthInfoError, TransportError, WakeComputeError}, - ClientCredentials, - }, - compute::{self, ComputeConnCfg}, + auth::{self, ClientCredentials}, + compute, error::io_error, scram, stream::PqStream, @@ -37,7 +37,7 @@ impl<'a> Api<'a> { pub(super) async fn handle_user( self, client: &mut PqStream, - ) -> auth::Result { + ) -> auth::Result> { // We reuse user handling logic from a production module. console::handle_user(client, &self, Self::get_auth_info, Self::wake_compute).await } @@ -82,8 +82,8 @@ impl<'a> Api<'a> { } /// We don't need to wake anything locally, so we just return the connection info. - pub(super) async fn wake_compute(&self) -> Result { - let mut config = ComputeConnCfg::new(); + pub(super) async fn wake_compute(&self) -> Result { + let mut config = compute::ConnCfg::new(); config .host(self.endpoint.host_str().unwrap_or("localhost")) .port(self.endpoint.port().unwrap_or(5432)) diff --git a/proxy/src/auth/credentials.rs b/proxy/src/auth/credentials.rs index 907f99b8e0..0a3b84bb52 100644 --- a/proxy/src/auth/credentials.rs +++ b/proxy/src/auth/credentials.rs @@ -11,14 +11,15 @@ pub enum ClientCredsParseError { #[error("Parameter '{0}' is missing in startup packet.")] MissingKey(&'static str), - #[error("Inconsistent project name inferred from SNI ('{0}') and project option ('{1}').")] - InconsistentProjectNames(String, String), + #[error("Inconsistent project name inferred from SNI ('{}') and project option ('{}').", .domain, .option)] + InconsistentProjectNames { domain: String, option: String }, #[error( - "SNI ('{1}') inconsistently formatted with respect to common name ('{0}'). \ - SNI should be formatted as '.{0}'." + "SNI ('{}') inconsistently formatted with respect to common name ('{}'). \ + SNI should be formatted as '.{}'.", + .sni, .cn, .cn, )] - InconsistentSni(String, String), + InconsistentSni { sni: String, cn: String }, #[error("Project name ('{0}') must contain only alphanumeric characters and hyphen.")] MalformedProjectName(String), @@ -36,11 +37,23 @@ pub struct ClientCredentials<'a> { } impl ClientCredentials<'_> { + #[inline] pub fn project(&self) -> Option<&str> { self.project.as_deref() } } +impl<'a> ClientCredentials<'a> { + #[inline] + pub fn as_ref(&'a self) -> ClientCredentials<'a> { + Self { + user: self.user, + dbname: self.dbname, + project: self.project().map(Cow::Borrowed), + } + } +} + impl<'a> ClientCredentials<'a> { pub fn parse( params: &'a StartupMessageParams, @@ -55,7 +68,7 @@ impl<'a> ClientCredentials<'a> { let dbname = get_param("database")?; // Project name might be passed via PG's command-line options. - let project_a = params.options_raw().and_then(|mut options| { + let project_option = params.options_raw().and_then(|mut options| { options .find_map(|opt| opt.strip_prefix("project=")) .map(Cow::Borrowed) @@ -63,18 +76,26 @@ impl<'a> ClientCredentials<'a> { // Alternative project name is in fact a subdomain from SNI. // NOTE: we do not consider SNI if `common_name` is missing. - let project_b = sni + let project_domain = sni .zip(common_name) .map(|(sni, cn)| { subdomain_from_sni(sni, cn) - .ok_or_else(|| InconsistentSni(sni.into(), cn.into())) + .ok_or_else(|| InconsistentSni { + sni: sni.into(), + cn: cn.into(), + }) .map(Cow::<'static, str>::Owned) }) .transpose()?; - let project = match (project_a, project_b) { + let project = match (project_option, project_domain) { // Invariant: if we have both project name variants, they should match. - (Some(a), Some(b)) if a != b => Some(Err(InconsistentProjectNames(a.into(), b.into()))), + (Some(option), Some(domain)) if option != domain => { + Some(Err(InconsistentProjectNames { + domain: domain.into(), + option: option.into(), + })) + } // Invariant: project name may not contain certain characters. (a, b) => a.or(b).map(|name| match project_name_valid(&name) { false => Err(MalformedProjectName(name.into())), @@ -111,6 +132,7 @@ fn subdomain_from_sni(sni: &str, common_name: &str) -> Option { #[cfg(test)] mod tests { use super::*; + use ClientCredsParseError::*; #[test] #[ignore = "TODO: fix how database is handled"] @@ -198,9 +220,30 @@ mod tests { let sni = Some("second.localhost"); let common_name = Some("localhost"); - assert!(matches!( - ClientCredentials::parse(&options, sni, common_name).expect_err("should fail"), - ClientCredsParseError::InconsistentProjectNames(_, _) - )); + let err = ClientCredentials::parse(&options, sni, common_name).expect_err("should fail"); + match err { + InconsistentProjectNames { domain, option } => { + assert_eq!(option, "first"); + assert_eq!(domain, "second"); + } + _ => panic!("bad error: {err:?}"), + } + } + + #[test] + fn parse_inconsistent_sni() { + let options = StartupMessageParams::new([("user", "john_doe"), ("database", "world")]); + + let sni = Some("project.localhost"); + let common_name = Some("example.com"); + + let err = ClientCredentials::parse(&options, sni, common_name).expect_err("should fail"); + match err { + InconsistentSni { sni, cn } => { + assert_eq!(sni, "project.localhost"); + assert_eq!(cn, "example.com"); + } + _ => panic!("bad error: {err:?}"), + } } } diff --git a/proxy/src/compute.rs b/proxy/src/compute.rs index 4771c774a1..4c5edb9673 100644 --- a/proxy/src/compute.rs +++ b/proxy/src/compute.rs @@ -40,17 +40,36 @@ impl UserFacingError for ConnectionError { /// A pair of `ClientKey` & `ServerKey` for `SCRAM-SHA-256`. pub type ScramKeys = tokio_postgres::config::ScramKeys<32>; -pub type ComputeConnCfg = tokio_postgres::Config; +/// A config for establishing a connection to compute node. +/// Eventually, `tokio_postgres` will be replaced with something better. +/// Newtype allows us to implement methods on top of it. +#[repr(transparent)] +pub struct ConnCfg(pub tokio_postgres::Config); -/// Various compute node info for establishing connection etc. -pub struct NodeInfo { - /// Did we send [`pq_proto::BeMessage::AuthenticationOk`]? - pub reported_auth_ok: bool, - /// Compute node connection params. - pub config: tokio_postgres::Config, +impl ConnCfg { + /// Construct a new connection config. + pub fn new() -> Self { + Self(tokio_postgres::Config::new()) + } } -impl NodeInfo { +impl std::ops::Deref for ConnCfg { + type Target = tokio_postgres::Config; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +/// For now, let's make it easier to setup the config. +impl std::ops::DerefMut for ConnCfg { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +impl ConnCfg { + /// Establish a raw TCP connection to the compute node. async fn connect_raw(&self) -> io::Result<(SocketAddr, TcpStream)> { use tokio_postgres::config::Host; @@ -68,8 +87,8 @@ impl NodeInfo { // because it has no means for extracting the underlying socket which we // require for our business. let mut connection_error = None; - let ports = self.config.get_ports(); - let hosts = self.config.get_hosts(); + let ports = self.0.get_ports(); + let hosts = self.0.get_hosts(); // the ports array is supposed to have 0 entries, 1 entry, or as many entries as in the hosts array if ports.len() > 1 && ports.len() != hosts.len() { return Err(io::Error::new( @@ -77,7 +96,7 @@ impl NodeInfo { format!( "couldn't connect: bad compute config, \ ports and hosts entries' count does not match: {:?}", - self.config + self.0 ), )); } @@ -103,7 +122,7 @@ impl NodeInfo { Err(connection_error.unwrap_or_else(|| { io::Error::new( io::ErrorKind::Other, - format!("couldn't connect: bad compute config: {:?}", self.config), + format!("couldn't connect: bad compute config: {:?}", self.0), ) })) } @@ -116,7 +135,7 @@ pub struct PostgresConnection { pub version: String, } -impl NodeInfo { +impl ConnCfg { /// Connect to a corresponding compute node. pub async fn connect( mut self, @@ -130,21 +149,21 @@ impl NodeInfo { .intersperse(" ") // TODO: use impl from std once it's stabilized .collect(); - self.config.options(&options); + self.0.options(&options); } if let Some(app_name) = params.get("application_name") { - self.config.application_name(app_name); + self.0.application_name(app_name); } if let Some(replication) = params.get("replication") { use tokio_postgres::config::ReplicationMode; match replication { "true" | "on" | "yes" | "1" => { - self.config.replication_mode(ReplicationMode::Physical); + self.0.replication_mode(ReplicationMode::Physical); } "database" => { - self.config.replication_mode(ReplicationMode::Logical); + self.0.replication_mode(ReplicationMode::Logical); } _other => {} } @@ -160,7 +179,7 @@ impl NodeInfo { .map_err(|_| ConnectionError::FailedToConnectToCompute)?; // TODO: establish a secure connection to the DB - let (client, conn) = self.config.connect_raw(&mut stream, NoTls).await?; + let (client, conn) = self.0.connect_raw(&mut stream, NoTls).await?; let version = conn .parameter("server_version") .ok_or(ConnectionError::FailedToFetchPgVersion)? diff --git a/proxy/src/mgmt.rs b/proxy/src/mgmt.rs index 06d1a4f106..23e10b5a9b 100644 --- a/proxy/src/mgmt.rs +++ b/proxy/src/mgmt.rs @@ -6,16 +6,11 @@ use std::{ net::{TcpListener, TcpStream}, thread, }; -use tracing::{error, info}; +use tracing::{error, info, info_span}; use utils::postgres_backend::{self, AuthType, PostgresBackend}; -/// TODO: move all of that to auth-backend/link.rs when we ditch legacy-console backend - -/// -/// Main proxy listener loop. -/// -/// Listens for connections, and launches a new handler thread for each. -/// +/// Console management API listener thread. +/// It spawns console response handlers needed for the link auth. pub fn thread_main(listener: TcpListener) -> anyhow::Result<()> { scopeguard::defer! { info!("mgmt has shut down"); @@ -24,6 +19,7 @@ pub fn thread_main(listener: TcpListener) -> anyhow::Result<()> { listener .set_nonblocking(false) .context("failed to set listener to blocking")?; + loop { let (socket, peer_addr) = listener.accept().context("failed to accept a new client")?; info!("accepted connection from {peer_addr}"); @@ -31,9 +27,19 @@ pub fn thread_main(listener: TcpListener) -> anyhow::Result<()> { .set_nodelay(true) .context("failed to set client socket option")?; + // TODO: replace with async tasks. thread::spawn(move || { - if let Err(err) = handle_connection(socket) { - error!("{err}"); + let tid = std::thread::current().id(); + let span = info_span!("mgmt", thread = format_args!("{tid:?}")); + let _enter = span.enter(); + + info!("started a new console management API thread"); + scopeguard::defer! { + info!("console management API thread is about to finish"); + } + + if let Err(e) = handle_connection(socket) { + error!("thread failed with an error: {e}"); } }); } @@ -44,44 +50,21 @@ fn handle_connection(socket: TcpStream) -> anyhow::Result<()> { pgbackend.run(&mut MgmtHandler) } -struct MgmtHandler; - -/// Serialized examples: -// { -// "session_id": "71d6d03e6d93d99a", -// "result": { -// "Success": { -// "host": "127.0.0.1", -// "port": 5432, -// "dbname": "stas", -// "user": "stas", -// "password": "mypass" -// } -// } -// } -// { -// "session_id": "71d6d03e6d93d99a", -// "result": { -// "Failure": "oops" -// } -// } -// -// // to test manually by sending a query to mgmt interface: -// psql -h 127.0.0.1 -p 9999 -c '{"session_id":"4f10dde522e14739","result":{"Success":{"host":"127.0.0.1","port":5432,"dbname":"stas","user":"stas","password":"stas"}}}' -#[derive(Deserialize)] +/// Known as `kickResponse` in the console. +#[derive(Debug, Deserialize)] struct PsqlSessionResponse { session_id: String, result: PsqlSessionResult, } -#[derive(Deserialize)] +#[derive(Debug, Deserialize)] enum PsqlSessionResult { - Success(auth::DatabaseInfo), + Success(DatabaseInfo), Failure(String), } /// A message received by `mgmt` when a compute node is ready. -pub type ComputeReady = Result; +pub type ComputeReady = Result; impl PsqlSessionResult { fn into_compute_ready(self) -> ComputeReady { @@ -92,25 +75,51 @@ impl PsqlSessionResult { } } -impl postgres_backend::Handler for MgmtHandler { - fn process_query( - &mut self, - pgb: &mut PostgresBackend, - query_string: &str, - ) -> anyhow::Result<()> { - let res = try_process_query(pgb, query_string); - // intercept and log error message - if res.is_err() { - error!("mgmt query failed: {res:?}"); - } - res +/// Compute node connection params provided by the console. +/// This struct and its parents are mgmt API implementation +/// detail and thus should remain in this module. +// TODO: restore deserialization tests from git history. +#[derive(Deserialize)] +pub struct DatabaseInfo { + pub host: String, + pub port: u16, + pub dbname: String, + pub user: String, + /// Console always provides a password, but it might + /// be inconvenient for debug with local PG instance. + pub password: Option, + pub project: String, +} + +// Manually implement debug to omit sensitive info. +impl std::fmt::Debug for DatabaseInfo { + fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { + fmt.debug_struct("DatabaseInfo") + .field("host", &self.host) + .field("port", &self.port) + .field("dbname", &self.dbname) + .field("user", &self.user) + .finish_non_exhaustive() } } -fn try_process_query(pgb: &mut PostgresBackend, query_string: &str) -> anyhow::Result<()> { - info!("got mgmt query [redacted]"); // Content contains password, don't print it +// TODO: replace with an http-based protocol. +struct MgmtHandler; +impl postgres_backend::Handler for MgmtHandler { + fn process_query(&mut self, pgb: &mut PostgresBackend, query: &str) -> anyhow::Result<()> { + try_process_query(pgb, query).map_err(|e| { + error!("failed to process response: {e:?}"); + e + }) + } +} - let resp: PsqlSessionResponse = serde_json::from_str(query_string)?; +fn try_process_query(pgb: &mut PostgresBackend, query: &str) -> anyhow::Result<()> { + let resp: PsqlSessionResponse = serde_json::from_str(query)?; + + let span = info_span!("event", session_id = resp.session_id); + let _enter = span.enter(); + info!("got response: {:?}", resp.result); match auth::backend::notify(&resp.session_id, resp.result.into_compute_ready()) { Ok(()) => { @@ -119,9 +128,50 @@ fn try_process_query(pgb: &mut PostgresBackend, query_string: &str) -> anyhow::R .write_message(&BeMessage::CommandComplete(b"SELECT 1"))?; } Err(e) => { + error!("failed to deliver response to per-client task"); pgb.write_message(&BeMessage::ErrorResponse(&e.to_string()))?; } } Ok(()) } + +#[cfg(test)] +mod tests { + use super::*; + use serde_json::json; + + #[test] + fn parse_db_info() -> anyhow::Result<()> { + // with password + let _: DatabaseInfo = serde_json::from_value(json!({ + "host": "localhost", + "port": 5432, + "dbname": "postgres", + "user": "john_doe", + "password": "password", + "project": "hello_world", + }))?; + + // without password + let _: DatabaseInfo = serde_json::from_value(json!({ + "host": "localhost", + "port": 5432, + "dbname": "postgres", + "user": "john_doe", + "project": "hello_world", + }))?; + + // new field (forward compatibility) + let _: DatabaseInfo = serde_json::from_value(json!({ + "host": "localhost", + "port": 5432, + "dbname": "postgres", + "user": "john_doe", + "project": "hello_world", + "N.E.W": "forward compatibility check", + }))?; + + Ok(()) + } +} diff --git a/proxy/src/proxy.rs b/proxy/src/proxy.rs index 9257fcd650..411893fee5 100644 --- a/proxy/src/proxy.rs +++ b/proxy/src/proxy.rs @@ -1,10 +1,15 @@ -use crate::auth; -use crate::cancellation::{self, CancelMap}; -use crate::config::{ProxyConfig, TlsConfig}; -use crate::stream::{MeasuredStream, PqStream, Stream}; +#[cfg(test)] +mod tests; + +use crate::{ + auth, + cancellation::{self, CancelMap}, + config::{ProxyConfig, TlsConfig}, + stream::{MeasuredStream, PqStream, Stream}, +}; use anyhow::{bail, Context}; use futures::TryFutureExt; -use metrics::{register_int_counter, IntCounter}; +use metrics::{register_int_counter, register_int_counter_vec, IntCounter, IntCounterVec}; use once_cell::sync::Lazy; use pq_proto::{BeMessage as Be, *}; use std::sync::Arc; @@ -30,10 +35,16 @@ static NUM_CONNECTIONS_CLOSED_COUNTER: Lazy = Lazy::new(|| { .unwrap() }); -static NUM_BYTES_PROXIED_COUNTER: Lazy = Lazy::new(|| { - register_int_counter!( - "proxy_io_bytes_total", - "Number of bytes sent/received between any client and backend." +static NUM_BYTES_PROXIED_COUNTER: Lazy = Lazy::new(|| { + register_int_counter_vec!( + "proxy_io_bytes_per_client", + "Number of bytes sent/received between client and backend.", + &[ + // Received (rx) / sent (tx). + "direction", + // Proxy can keep calling it `project` internally. + "endpoint_id" + ] ) .unwrap() }); @@ -230,16 +241,17 @@ impl Client<'_, S> { application_name: params.get("application_name"), }; - // Authenticate and connect to a compute node. - let auth = creds - .authenticate(&extra, &mut stream) - .instrument(info_span!("auth")) - .await; - - let node = async { auth }.or_else(|e| stream.throw_error(e)).await?; - let reported_auth_ok = node.reported_auth_ok; + let auth_result = async { + // `&mut stream` doesn't let us merge those 2 lines. + let res = creds.authenticate(&extra, &mut stream).await; + async { res }.or_else(|e| stream.throw_error(e)).await + } + .instrument(info_span!("auth")) + .await?; + let node = auth_result.value; let (db, cancel_closure) = node + .config .connect(params) .or_else(|e| stream.throw_error(e)) .await?; @@ -247,7 +259,9 @@ impl Client<'_, S> { let cancel_key_data = session.enable_query_cancellation(cancel_closure); // Report authentication success if we haven't done this already. - if !reported_auth_ok { + // Note that we do this only (for the most part) after we've connected + // to a compute (see above) which performs its own authentication. + if !auth_result.reported_auth_ok { stream .write_message_noflush(&Be::AuthenticationOk)? .write_message_noflush(&BeParameterStatusMessage::encoding())?; @@ -261,314 +275,25 @@ impl Client<'_, S> { .write_message(&BeMessage::ReadyForQuery) .await?; - /// This function will be called for writes to either direction. - fn inc_proxied(cnt: usize) { - // Consider inventing something more sophisticated - // if this ever becomes a bottleneck (cacheline bouncing). - NUM_BYTES_PROXIED_COUNTER.inc_by(cnt as u64); - } + // TODO: add more identifiers. + let metric_id = node.project; + + let m_sent = NUM_BYTES_PROXIED_COUNTER.with_label_values(&["tx", &metric_id]); + let mut client = MeasuredStream::new(stream.into_inner(), |cnt| { + // Number of bytes we sent to the client (outbound). + m_sent.inc_by(cnt as u64); + }); + + let m_recv = NUM_BYTES_PROXIED_COUNTER.with_label_values(&["rx", &metric_id]); + let mut db = MeasuredStream::new(db.stream, |cnt| { + // Number of bytes the client sent to the compute node (inbound). + m_recv.inc_by(cnt as u64); + }); // Starting from here we only proxy the client's traffic. info!("performing the proxy pass..."); - let mut db = MeasuredStream::new(db.stream, inc_proxied); - let mut client = MeasuredStream::new(stream.into_inner(), inc_proxied); let _ = tokio::io::copy_bidirectional(&mut client, &mut db).await?; Ok(()) } } - -#[cfg(test)] -mod tests { - use super::*; - use crate::{auth, scram}; - use async_trait::async_trait; - use rstest::rstest; - use tokio_postgres::config::SslMode; - use tokio_postgres::tls::{MakeTlsConnect, NoTls}; - use tokio_postgres_rustls::MakeRustlsConnect; - - /// Generate a set of TLS certificates: CA + server. - fn generate_certs( - hostname: &str, - ) -> anyhow::Result<(rustls::Certificate, rustls::Certificate, rustls::PrivateKey)> { - let ca = rcgen::Certificate::from_params({ - let mut params = rcgen::CertificateParams::default(); - params.is_ca = rcgen::IsCa::Ca(rcgen::BasicConstraints::Unconstrained); - params - })?; - - let cert = rcgen::generate_simple_self_signed(vec![hostname.into()])?; - Ok(( - rustls::Certificate(ca.serialize_der()?), - rustls::Certificate(cert.serialize_der_with_signer(&ca)?), - rustls::PrivateKey(cert.serialize_private_key_der()), - )) - } - - struct ClientConfig<'a> { - config: rustls::ClientConfig, - hostname: &'a str, - } - - impl ClientConfig<'_> { - fn make_tls_connect( - self, - ) -> anyhow::Result> { - let mut mk = MakeRustlsConnect::new(self.config); - let tls = MakeTlsConnect::::make_tls_connect(&mut mk, self.hostname)?; - Ok(tls) - } - } - - /// Generate TLS certificates and build rustls configs for client and server. - fn generate_tls_config<'a>( - hostname: &'a str, - common_name: &'a str, - ) -> anyhow::Result<(ClientConfig<'a>, TlsConfig)> { - let (ca, cert, key) = generate_certs(hostname)?; - - let tls_config = { - let config = rustls::ServerConfig::builder() - .with_safe_defaults() - .with_no_client_auth() - .with_single_cert(vec![cert], key)? - .into(); - - TlsConfig { - config, - common_name: Some(common_name.to_string()), - } - }; - - let client_config = { - let config = rustls::ClientConfig::builder() - .with_safe_defaults() - .with_root_certificates({ - let mut store = rustls::RootCertStore::empty(); - store.add(&ca)?; - store - }) - .with_no_client_auth(); - - ClientConfig { config, hostname } - }; - - Ok((client_config, tls_config)) - } - - #[async_trait] - trait TestAuth: Sized { - async fn authenticate( - self, - _stream: &mut PqStream>, - ) -> anyhow::Result<()> { - Ok(()) - } - } - - struct NoAuth; - impl TestAuth for NoAuth {} - - struct Scram(scram::ServerSecret); - - impl Scram { - fn new(password: &str) -> anyhow::Result { - let salt = rand::random::<[u8; 16]>(); - let secret = scram::ServerSecret::build(password, &salt, 256) - .context("failed to generate scram secret")?; - Ok(Scram(secret)) - } - - fn mock(user: &str) -> Self { - let salt = rand::random::<[u8; 32]>(); - Scram(scram::ServerSecret::mock(user, &salt)) - } - } - - #[async_trait] - impl TestAuth for Scram { - async fn authenticate( - self, - stream: &mut PqStream>, - ) -> anyhow::Result<()> { - auth::AuthFlow::new(stream) - .begin(auth::Scram(&self.0)) - .await? - .authenticate() - .await?; - - Ok(()) - } - } - - /// A dummy proxy impl which performs a handshake and reports auth success. - async fn dummy_proxy( - client: impl AsyncRead + AsyncWrite + Unpin + Send, - tls: Option, - auth: impl TestAuth + Send, - ) -> anyhow::Result<()> { - let cancel_map = CancelMap::default(); - let (mut stream, _params) = handshake(client, tls.as_ref(), &cancel_map) - .await? - .context("handshake failed")?; - - auth.authenticate(&mut stream).await?; - - stream - .write_message_noflush(&Be::AuthenticationOk)? - .write_message_noflush(&BeParameterStatusMessage::encoding())? - .write_message(&BeMessage::ReadyForQuery) - .await?; - - Ok(()) - } - - #[tokio::test] - async fn handshake_tls_is_enforced_by_proxy() -> anyhow::Result<()> { - let (client, server) = tokio::io::duplex(1024); - - let (_, server_config) = - generate_tls_config("generic-project-name.localhost", "localhost")?; - let proxy = tokio::spawn(dummy_proxy(client, Some(server_config), NoAuth)); - - let client_err = tokio_postgres::Config::new() - .user("john_doe") - .dbname("earth") - .ssl_mode(SslMode::Disable) - .connect_raw(server, NoTls) - .await - .err() // -> Option - .context("client shouldn't be able to connect")?; - - assert!(client_err.to_string().contains(ERR_INSECURE_CONNECTION)); - - let server_err = proxy - .await? - .err() // -> Option - .context("server shouldn't accept client")?; - - assert!(client_err.to_string().contains(&server_err.to_string())); - - Ok(()) - } - - #[tokio::test] - async fn handshake_tls() -> anyhow::Result<()> { - let (client, server) = tokio::io::duplex(1024); - - let (client_config, server_config) = - generate_tls_config("generic-project-name.localhost", "localhost")?; - let proxy = tokio::spawn(dummy_proxy(client, Some(server_config), NoAuth)); - - let (_client, _conn) = tokio_postgres::Config::new() - .user("john_doe") - .dbname("earth") - .ssl_mode(SslMode::Require) - .connect_raw(server, client_config.make_tls_connect()?) - .await?; - - proxy.await? - } - - #[tokio::test] - async fn handshake_raw() -> anyhow::Result<()> { - let (client, server) = tokio::io::duplex(1024); - - let proxy = tokio::spawn(dummy_proxy(client, None, NoAuth)); - - let (_client, _conn) = tokio_postgres::Config::new() - .user("john_doe") - .dbname("earth") - .options("project=generic-project-name") - .ssl_mode(SslMode::Prefer) - .connect_raw(server, NoTls) - .await?; - - proxy.await? - } - - #[tokio::test] - async fn keepalive_is_inherited() -> anyhow::Result<()> { - use tokio::net::{TcpListener, TcpStream}; - - let listener = TcpListener::bind("127.0.0.1:0").await?; - let port = listener.local_addr()?.port(); - socket2::SockRef::from(&listener).set_keepalive(true)?; - - let t = tokio::spawn(async move { - let (client, _) = listener.accept().await?; - let keepalive = socket2::SockRef::from(&client).keepalive()?; - anyhow::Ok(keepalive) - }); - - let _ = TcpStream::connect(("127.0.0.1", port)).await?; - assert!(t.await??, "keepalive should be inherited"); - - Ok(()) - } - - #[rstest] - #[case("password_foo")] - #[case("pwd-bar")] - #[case("")] - #[tokio::test] - async fn scram_auth_good(#[case] password: &str) -> anyhow::Result<()> { - let (client, server) = tokio::io::duplex(1024); - - let (client_config, server_config) = - generate_tls_config("generic-project-name.localhost", "localhost")?; - let proxy = tokio::spawn(dummy_proxy( - client, - Some(server_config), - Scram::new(password)?, - )); - - let (_client, _conn) = tokio_postgres::Config::new() - .user("user") - .dbname("db") - .password(password) - .ssl_mode(SslMode::Require) - .connect_raw(server, client_config.make_tls_connect()?) - .await?; - - proxy.await? - } - - #[tokio::test] - async fn scram_auth_mock() -> anyhow::Result<()> { - let (client, server) = tokio::io::duplex(1024); - - let (client_config, server_config) = - generate_tls_config("generic-project-name.localhost", "localhost")?; - let proxy = tokio::spawn(dummy_proxy( - client, - Some(server_config), - Scram::mock("user"), - )); - - use rand::{distributions::Alphanumeric, Rng}; - let password: String = rand::thread_rng() - .sample_iter(&Alphanumeric) - .take(rand::random::() as usize) - .map(char::from) - .collect(); - - let _client_err = tokio_postgres::Config::new() - .user("user") - .dbname("db") - .password(&password) // no password will match the mocked secret - .ssl_mode(SslMode::Require) - .connect_raw(server, client_config.make_tls_connect()?) - .await - .err() // -> Option - .context("client shouldn't be able to connect")?; - - let _server_err = proxy - .await? - .err() // -> Option - .context("server shouldn't accept client")?; - - Ok(()) - } -} diff --git a/proxy/src/proxy/tests.rs b/proxy/src/proxy/tests.rs new file mode 100644 index 0000000000..3d74dbae5a --- /dev/null +++ b/proxy/src/proxy/tests.rs @@ -0,0 +1,291 @@ +///! A group of high-level tests for connection establishing logic and auth. +use super::*; +use crate::{auth, scram}; +use async_trait::async_trait; +use rstest::rstest; +use tokio_postgres::config::SslMode; +use tokio_postgres::tls::{MakeTlsConnect, NoTls}; +use tokio_postgres_rustls::MakeRustlsConnect; + +/// Generate a set of TLS certificates: CA + server. +fn generate_certs( + hostname: &str, +) -> anyhow::Result<(rustls::Certificate, rustls::Certificate, rustls::PrivateKey)> { + let ca = rcgen::Certificate::from_params({ + let mut params = rcgen::CertificateParams::default(); + params.is_ca = rcgen::IsCa::Ca(rcgen::BasicConstraints::Unconstrained); + params + })?; + + let cert = rcgen::generate_simple_self_signed(vec![hostname.into()])?; + Ok(( + rustls::Certificate(ca.serialize_der()?), + rustls::Certificate(cert.serialize_der_with_signer(&ca)?), + rustls::PrivateKey(cert.serialize_private_key_der()), + )) +} + +struct ClientConfig<'a> { + config: rustls::ClientConfig, + hostname: &'a str, +} + +impl ClientConfig<'_> { + fn make_tls_connect( + self, + ) -> anyhow::Result> { + let mut mk = MakeRustlsConnect::new(self.config); + let tls = MakeTlsConnect::::make_tls_connect(&mut mk, self.hostname)?; + Ok(tls) + } +} + +/// Generate TLS certificates and build rustls configs for client and server. +fn generate_tls_config<'a>( + hostname: &'a str, + common_name: &'a str, +) -> anyhow::Result<(ClientConfig<'a>, TlsConfig)> { + let (ca, cert, key) = generate_certs(hostname)?; + + let tls_config = { + let config = rustls::ServerConfig::builder() + .with_safe_defaults() + .with_no_client_auth() + .with_single_cert(vec![cert], key)? + .into(); + + TlsConfig { + config, + common_name: Some(common_name.to_string()), + } + }; + + let client_config = { + let config = rustls::ClientConfig::builder() + .with_safe_defaults() + .with_root_certificates({ + let mut store = rustls::RootCertStore::empty(); + store.add(&ca)?; + store + }) + .with_no_client_auth(); + + ClientConfig { config, hostname } + }; + + Ok((client_config, tls_config)) +} + +#[async_trait] +trait TestAuth: Sized { + async fn authenticate( + self, + _stream: &mut PqStream>, + ) -> anyhow::Result<()> { + Ok(()) + } +} + +struct NoAuth; +impl TestAuth for NoAuth {} + +struct Scram(scram::ServerSecret); + +impl Scram { + fn new(password: &str) -> anyhow::Result { + let salt = rand::random::<[u8; 16]>(); + let secret = scram::ServerSecret::build(password, &salt, 256) + .context("failed to generate scram secret")?; + Ok(Scram(secret)) + } + + fn mock(user: &str) -> Self { + let salt = rand::random::<[u8; 32]>(); + Scram(scram::ServerSecret::mock(user, &salt)) + } +} + +#[async_trait] +impl TestAuth for Scram { + async fn authenticate( + self, + stream: &mut PqStream>, + ) -> anyhow::Result<()> { + auth::AuthFlow::new(stream) + .begin(auth::Scram(&self.0)) + .await? + .authenticate() + .await?; + + Ok(()) + } +} + +/// A dummy proxy impl which performs a handshake and reports auth success. +async fn dummy_proxy( + client: impl AsyncRead + AsyncWrite + Unpin + Send, + tls: Option, + auth: impl TestAuth + Send, +) -> anyhow::Result<()> { + let cancel_map = CancelMap::default(); + let (mut stream, _params) = handshake(client, tls.as_ref(), &cancel_map) + .await? + .context("handshake failed")?; + + auth.authenticate(&mut stream).await?; + + stream + .write_message_noflush(&Be::AuthenticationOk)? + .write_message_noflush(&BeParameterStatusMessage::encoding())? + .write_message(&BeMessage::ReadyForQuery) + .await?; + + Ok(()) +} + +#[tokio::test] +async fn handshake_tls_is_enforced_by_proxy() -> anyhow::Result<()> { + let (client, server) = tokio::io::duplex(1024); + + let (_, server_config) = generate_tls_config("generic-project-name.localhost", "localhost")?; + let proxy = tokio::spawn(dummy_proxy(client, Some(server_config), NoAuth)); + + let client_err = tokio_postgres::Config::new() + .user("john_doe") + .dbname("earth") + .ssl_mode(SslMode::Disable) + .connect_raw(server, NoTls) + .await + .err() // -> Option + .context("client shouldn't be able to connect")?; + + assert!(client_err.to_string().contains(ERR_INSECURE_CONNECTION)); + + let server_err = proxy + .await? + .err() // -> Option + .context("server shouldn't accept client")?; + + assert!(client_err.to_string().contains(&server_err.to_string())); + + Ok(()) +} + +#[tokio::test] +async fn handshake_tls() -> anyhow::Result<()> { + let (client, server) = tokio::io::duplex(1024); + + let (client_config, server_config) = + generate_tls_config("generic-project-name.localhost", "localhost")?; + let proxy = tokio::spawn(dummy_proxy(client, Some(server_config), NoAuth)); + + let (_client, _conn) = tokio_postgres::Config::new() + .user("john_doe") + .dbname("earth") + .ssl_mode(SslMode::Require) + .connect_raw(server, client_config.make_tls_connect()?) + .await?; + + proxy.await? +} + +#[tokio::test] +async fn handshake_raw() -> anyhow::Result<()> { + let (client, server) = tokio::io::duplex(1024); + + let proxy = tokio::spawn(dummy_proxy(client, None, NoAuth)); + + let (_client, _conn) = tokio_postgres::Config::new() + .user("john_doe") + .dbname("earth") + .options("project=generic-project-name") + .ssl_mode(SslMode::Prefer) + .connect_raw(server, NoTls) + .await?; + + proxy.await? +} + +#[tokio::test] +async fn keepalive_is_inherited() -> anyhow::Result<()> { + use tokio::net::{TcpListener, TcpStream}; + + let listener = TcpListener::bind("127.0.0.1:0").await?; + let port = listener.local_addr()?.port(); + socket2::SockRef::from(&listener).set_keepalive(true)?; + + let t = tokio::spawn(async move { + let (client, _) = listener.accept().await?; + let keepalive = socket2::SockRef::from(&client).keepalive()?; + anyhow::Ok(keepalive) + }); + + let _ = TcpStream::connect(("127.0.0.1", port)).await?; + assert!(t.await??, "keepalive should be inherited"); + + Ok(()) +} + +#[rstest] +#[case("password_foo")] +#[case("pwd-bar")] +#[case("")] +#[tokio::test] +async fn scram_auth_good(#[case] password: &str) -> anyhow::Result<()> { + let (client, server) = tokio::io::duplex(1024); + + let (client_config, server_config) = + generate_tls_config("generic-project-name.localhost", "localhost")?; + let proxy = tokio::spawn(dummy_proxy( + client, + Some(server_config), + Scram::new(password)?, + )); + + let (_client, _conn) = tokio_postgres::Config::new() + .user("user") + .dbname("db") + .password(password) + .ssl_mode(SslMode::Require) + .connect_raw(server, client_config.make_tls_connect()?) + .await?; + + proxy.await? +} + +#[tokio::test] +async fn scram_auth_mock() -> anyhow::Result<()> { + let (client, server) = tokio::io::duplex(1024); + + let (client_config, server_config) = + generate_tls_config("generic-project-name.localhost", "localhost")?; + let proxy = tokio::spawn(dummy_proxy( + client, + Some(server_config), + Scram::mock("user"), + )); + + use rand::{distributions::Alphanumeric, Rng}; + let password: String = rand::thread_rng() + .sample_iter(&Alphanumeric) + .take(rand::random::() as usize) + .map(char::from) + .collect(); + + let _client_err = tokio_postgres::Config::new() + .user("user") + .dbname("db") + .password(&password) // no password will match the mocked secret + .ssl_mode(SslMode::Require) + .connect_raw(server, client_config.make_tls_connect()?) + .await + .err() // -> Option + .context("client shouldn't be able to connect")?; + + let _server_err = proxy + .await? + .err() // -> Option + .context("server shouldn't accept client")?; + + Ok(()) +} diff --git a/pyproject.toml b/pyproject.toml index b13acece18..b297f7f70b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -12,13 +12,13 @@ typing-extensions = "^4.1.0" PyJWT = {version = "^2.1.0", extras = ["crypto"]} requests = "^2.26.0" pytest-xdist = "^3.0.2" -asyncpg = "^0.24.0" +asyncpg = "^0.27.0" aiopg = "^1.3.1" Jinja2 = "^3.0.2" types-requests = "^2.28.5" types-psycopg2 = "^2.9.18" -boto3 = "^1.20.40" -boto3-stubs = {version = "^1.23.38", extras = ["s3"]} +boto3 = "^1.26.16" +boto3-stubs = {extras = ["s3"], version = "^1.26.16"} moto = {version = "^3.0.0", extras = ["server"]} backoff = "^1.11.1" pytest-lazy-fixture = "^0.6.3" @@ -31,13 +31,13 @@ pytest-asyncio = "^0.19.0" toml = "^0.10.2" psutil = "^5.9.4" types-psutil = "^5.9.5.4" +types-toml = "^0.10.8" [tool.poetry.dev-dependencies] flake8 = "^5.0.4" mypy = "==0.971" black = "^22.6.0" isort = "^5.10.1" -types-toml = "^0.10.8" [build-system] requires = ["poetry-core>=1.0.0"] diff --git a/safekeeper/src/auth.rs b/safekeeper/src/auth.rs new file mode 100644 index 0000000000..2684d82365 --- /dev/null +++ b/safekeeper/src/auth.rs @@ -0,0 +1,19 @@ +use anyhow::{bail, Result}; +use utils::auth::{Claims, Scope}; +use utils::id::TenantId; + +pub fn check_permission(claims: &Claims, tenant_id: Option) -> Result<()> { + match (&claims.scope, tenant_id) { + (Scope::Tenant, None) => { + bail!("Attempt to access management api with tenant scope. Permission denied") + } + (Scope::Tenant, Some(tenant_id)) => { + if claims.tenant_id.unwrap() != tenant_id { + bail!("Tenant id mismatch. Permission denied") + } + Ok(()) + } + (Scope::PageServerApi, _) => bail!("PageServerApi scope makes no sense for Safekeeper"), + (Scope::SafekeeperData, _) => Ok(()), + } +} diff --git a/safekeeper/src/bin/safekeeper.rs b/safekeeper/src/bin/safekeeper.rs index 42f8188d6a..49e9e30cdc 100644 --- a/safekeeper/src/bin/safekeeper.rs +++ b/safekeeper/src/bin/safekeeper.rs @@ -208,11 +208,12 @@ fn start_safekeeper(mut conf: SafeKeeperConf, given_id: Option, init: bo GlobalTimelines::init(conf.clone(), wal_backup_launcher_tx)?; let conf_ = conf.clone(); + let auth_ = auth.clone(); threads.push( thread::Builder::new() .name("http_endpoint_thread".into()) .spawn(|| { - let router = http::make_router(conf_, auth); + let router = http::make_router(conf_, auth_); endpoint::serve_thread_main( router, http_listener, @@ -226,8 +227,7 @@ fn start_safekeeper(mut conf: SafeKeeperConf, given_id: Option, init: bo let safekeeper_thread = thread::Builder::new() .name("Safekeeper thread".into()) .spawn(|| { - // TODO: add auth - if let Err(e) = wal_service::thread_main(conf_cloned, pg_listener) { + if let Err(e) = wal_service::thread_main(conf_cloned, pg_listener, auth) { info!("safekeeper thread terminated: {e}"); } }) @@ -254,7 +254,6 @@ fn start_safekeeper(mut conf: SafeKeeperConf, given_id: Option, init: bo thread::Builder::new() .name("WAL removal thread".into()) .spawn(|| { - // TODO: add auth? remove_wal::thread_main(conf_); })?, ); @@ -264,7 +263,6 @@ fn start_safekeeper(mut conf: SafeKeeperConf, given_id: Option, init: bo thread::Builder::new() .name("wal backup launcher thread".into()) .spawn(move || { - // TODO: add auth? wal_backup::wal_backup_launcher_thread_main(conf_, wal_backup_launcher_rx); })?, ); diff --git a/safekeeper/src/handler.rs b/safekeeper/src/handler.rs index a1e0bcbec0..05527303ca 100644 --- a/safekeeper/src/handler.rs +++ b/safekeeper/src/handler.rs @@ -1,19 +1,23 @@ //! Part of Safekeeper pretending to be Postgres, i.e. handling Postgres //! protocol commands. +use crate::auth::check_permission; use crate::json_ctrl::{handle_json_ctrl, AppendLogicalMessage}; use crate::receive_wal::ReceiveWalConn; use crate::send_wal::ReplicationConn; use crate::{GlobalTimelines, SafeKeeperConf}; -use anyhow::{bail, Context, Result}; +use anyhow::{bail, ensure, Context, Result}; use postgres_ffi::PG_TLI; use regex::Regex; use pq_proto::{BeMessage, FeStartupPacket, RowDescriptor, INT4_OID, TEXT_OID}; +use std::str; +use std::sync::Arc; use tracing::info; +use utils::auth::{Claims, JwtAuth, Scope}; use utils::{ id::{TenantId, TenantTimelineId, TimelineId}, lsn::Lsn, @@ -28,6 +32,8 @@ pub struct SafekeeperPostgresHandler { pub tenant_id: Option, pub timeline_id: Option, pub ttid: TenantTimelineId, + auth: Option>, + claims: Option, } /// Parsed Postgres command. @@ -93,7 +99,44 @@ impl postgres_backend::Handler for SafekeeperPostgresHandler { } } + fn check_auth_jwt( + &mut self, + _pgb: &mut PostgresBackend, + jwt_response: &[u8], + ) -> anyhow::Result<()> { + // this unwrap is never triggered, because check_auth_jwt only called when auth_type is NeonJWT + // which requires auth to be present + let data = self + .auth + .as_ref() + .unwrap() + .decode(str::from_utf8(jwt_response)?)?; + + if matches!(data.claims.scope, Scope::Tenant) { + ensure!( + data.claims.tenant_id.is_some(), + "jwt token scope is Tenant, but tenant id is missing" + ) + } + + info!( + "jwt auth succeeded for scope: {:#?} by tenant id: {:?}", + data.claims.scope, data.claims.tenant_id, + ); + + self.claims = Some(data.claims); + Ok(()) + } + fn process_query(&mut self, pgb: &mut PostgresBackend, query_string: &str) -> Result<()> { + if query_string + .to_ascii_lowercase() + .starts_with("set datestyle to ") + { + // important for debug because psycopg2 executes "SET datestyle TO 'ISO'" on connect + pgb.write_message(&BeMessage::CommandComplete(b"SELECT 1"))?; + return Ok(()); + } let cmd = parse_cmd(query_string)?; info!( @@ -103,6 +146,7 @@ impl postgres_backend::Handler for SafekeeperPostgresHandler { let tenant_id = self.tenant_id.context("tenantid is required")?; let timeline_id = self.timeline_id.context("timelineid is required")?; + self.check_permission(Some(tenant_id))?; self.ttid = TenantTimelineId::new(tenant_id, timeline_id); match cmd { @@ -122,16 +166,35 @@ impl postgres_backend::Handler for SafekeeperPostgresHandler { } impl SafekeeperPostgresHandler { - pub fn new(conf: SafeKeeperConf) -> Self { + pub fn new(conf: SafeKeeperConf, auth: Option>) -> Self { SafekeeperPostgresHandler { conf, appname: None, tenant_id: None, timeline_id: None, ttid: TenantTimelineId::empty(), + auth, + claims: None, } } + // when accessing management api supply None as an argument + // when using to authorize tenant pass corresponding tenant id + fn check_permission(&self, tenant_id: Option) -> Result<()> { + if self.auth.is_none() { + // auth is set to Trust, nothing to check so just return ok + return Ok(()); + } + // auth is some, just checked above, when auth is some + // then claims are always present because of checks during connection init + // so this expect won't trigger + let claims = self + .claims + .as_ref() + .expect("claims presence already checked"); + check_permission(claims, tenant_id) + } + /// /// Handle IDENTIFY_SYSTEM replication command /// diff --git a/safekeeper/src/http/routes.rs b/safekeeper/src/http/routes.rs index 6efd09c7e2..9343611959 100644 --- a/safekeeper/src/http/routes.rs +++ b/safekeeper/src/http/routes.rs @@ -12,7 +12,6 @@ use tokio::task::JoinError; use crate::safekeeper::ServerInfo; use crate::safekeeper::Term; -use crate::safekeeper::TermHistory; use crate::timelines_global_map::TimelineDeleteForceResult; use crate::GlobalTimelines; @@ -21,7 +20,7 @@ use etcd_broker::subscription_value::SkTimelineInfo; use utils::{ auth::JwtAuth, http::{ - endpoint::{self, auth_middleware, check_permission}, + endpoint::{self, auth_middleware, check_permission_with}, error::ApiError, json::{json_request, json_response}, request::{ensure_no_body, parse_request_param}, @@ -62,12 +61,21 @@ where s.serialize_str(&format!("{}", z)) } +/// Same as TermSwitchEntry, but serializes LSN using display serializer +/// in Postgres format, i.e. 0/FFFFFFFF. Used only for the API response. +#[derive(Debug, Serialize)] +struct TermSwitchApiEntry { + pub term: Term, + #[serde(serialize_with = "display_serialize")] + pub lsn: Lsn, +} + /// Augment AcceptorState with epoch for convenience #[derive(Debug, Serialize)] struct AcceptorStateStatus { term: Term, epoch: Term, - term_history: TermHistory, + term_history: Vec, } /// Info about timeline on safekeeper ready for reporting. @@ -95,6 +103,12 @@ struct TimelineStatus { remote_consistent_lsn: Lsn, } +fn check_permission(request: &Request, tenant_id: Option) -> Result<(), ApiError> { + check_permission_with(request, |claims| { + crate::auth::check_permission(claims, tenant_id) + }) +} + /// Report info about timeline. async fn timeline_status_handler(request: Request) -> Result, ApiError> { let ttid = TenantTimelineId::new( @@ -112,10 +126,21 @@ async fn timeline_status_handler(request: Request) -> Result Result<()> { +pub fn thread_main( + conf: SafeKeeperConf, + listener: TcpListener, + auth: Option>, +) -> Result<()> { loop { match listener.accept() { Ok((socket, peer_addr)) => { debug!("accepted connection from {}", peer_addr); let conf = conf.clone(); + let auth = auth.clone(); let _ = thread::Builder::new() .name("WAL service thread".into()) .spawn(move || { - if let Err(err) = handle_socket(socket, conf) { + if let Err(err) = handle_socket(socket, conf, auth) { error!("connection handler exited: {}", err); } }) @@ -44,13 +51,25 @@ fn get_tid() -> u64 { /// This is run by `thread_main` above, inside a background thread. /// -fn handle_socket(socket: TcpStream, conf: SafeKeeperConf) -> Result<()> { +fn handle_socket( + socket: TcpStream, + conf: SafeKeeperConf, + auth: Option>, +) -> Result<()> { let _enter = info_span!("", tid = ?get_tid()).entered(); socket.set_nodelay(true)?; - let mut conn_handler = SafekeeperPostgresHandler::new(conf); - let pgbackend = PostgresBackend::new(socket, AuthType::Trust, None, false)?; + let mut conn_handler = SafekeeperPostgresHandler::new(conf, auth.clone()); + let pgbackend = PostgresBackend::new( + socket, + match auth { + None => AuthType::Trust, + Some(_) => AuthType::NeonJWT, + }, + None, + false, + )?; // libpq replication protocol between safekeeper and replicas/pagers pgbackend.run(&mut conn_handler)?; diff --git a/storage_broker/Cargo.toml b/storage_broker/Cargo.toml new file mode 100644 index 0000000000..843fc53f36 --- /dev/null +++ b/storage_broker/Cargo.toml @@ -0,0 +1,37 @@ +[package] +name = "storage_broker" +version = "0.1.0" +edition = "2021" + +[features] +bench = [] + +[dependencies] +async-stream = "0.3" +bytes = "1.0" +clap = { version = "4.0", features = ["derive"] } +futures = "0.3" +futures-core = "0.3" +futures-util = "0.3" +git-version = "0.3.5" +humantime = "2.1.0" +hyper = {version = "0.14.14", features = ["full"]} +once_cell = "1.13.0" +parking_lot = "0.12" +prost = "0.11" +tonic = "0.8" +tokio = { version = "1.0", features = ["macros", "rt-multi-thread"] } +tokio-stream = "0.1" +tracing = "0.1.27" + +metrics = { path = "../libs/metrics" } +utils = { path = "../libs/utils" } +workspace_hack = { version = "0.1", path = "../workspace_hack" } + +[build-dependencies] +tonic-build = "0.8" + +[[bench]] +name = "rps" +harness = false + diff --git a/storage_broker/benches/rps.rs b/storage_broker/benches/rps.rs new file mode 100644 index 0000000000..0a72adc948 --- /dev/null +++ b/storage_broker/benches/rps.rs @@ -0,0 +1,174 @@ +use std::sync::atomic::{AtomicU64, Ordering}; +use std::sync::Arc; +use std::time::{Duration, Instant}; + +use clap::Parser; +use storage_broker::proto::subscribe_safekeeper_info_request::SubscriptionKey; +use storage_broker::proto::TenantTimelineId as ProtoTenantTimelineId; +use storage_broker::proto::{SafekeeperTimelineInfo, SubscribeSafekeeperInfoRequest}; +use storage_broker::BrokerClientChannel; +use storage_broker::DEFAULT_LISTEN_ADDR; +use tokio::time; + +use tonic::Request; + +const ABOUT: &str = r#" +A simple benchmarking tool for storage_broker. Creates specified number of per +timeline publishers and subscribers; each publisher continiously sends +messages, subscribers read them. Each second the tool outputs number of +messages summed across all subscribers and min number of messages +recevied by single subscriber. + +For example, +cargo build -r -p storage_broker && target/release/storage_broker +cargo bench --bench rps -- -s 1 -p 1 +"#; + +#[derive(Parser, Debug)] +#[clap(author, version, about = ABOUT)] +struct Args { + /// Number of publishers + #[clap(short = 'p', long, value_parser, default_value_t = 1)] + num_pubs: u64, + /// Number of subscribers + #[clap(short = 's', long, value_parser, default_value_t = 1)] + num_subs: u64, + // Fake value to satisfy `cargo bench` passing it. + #[clap(long)] + bench: bool, +} + +async fn progress_reporter(counters: Vec>) { + let mut interval = time::interval(Duration::from_millis(1000)); + let mut c_old = counters.iter().map(|c| c.load(Ordering::Relaxed)).sum(); + let mut c_min_old = counters + .iter() + .map(|c| c.load(Ordering::Relaxed)) + .min() + .unwrap_or(0); + let mut started_at = None; + let mut skipped: u64 = 0; + loop { + interval.tick().await; + let c_new = counters.iter().map(|c| c.load(Ordering::Relaxed)).sum(); + let c_min_new = counters + .iter() + .map(|c| c.load(Ordering::Relaxed)) + .min() + .unwrap_or(0); + if c_new > 0 && started_at.is_none() { + started_at = Some(Instant::now()); + skipped = c_new; + } + let avg_rps = started_at.map(|s| { + let dur = s.elapsed(); + let dur_secs = dur.as_secs() as f64 + (dur.subsec_millis() as f64) / 1000.0; + let avg_rps = (c_new - skipped) as f64 / dur_secs; + (dur, avg_rps) + }); + println!( + "sum rps {}, min rps {} total {}, total min {}, duration, avg sum rps {:?}", + c_new - c_old, + c_min_new - c_min_old, + c_new, + c_min_new, + avg_rps + ); + c_old = c_new; + c_min_old = c_min_new; + } +} + +fn tli_from_u64(i: u64) -> Vec { + let mut timeline_id = vec![0xFF; 8]; + timeline_id.extend_from_slice(&i.to_be_bytes()); + timeline_id +} + +async fn subscribe(client: Option, counter: Arc, i: u64) { + let mut client = match client { + Some(c) => c, + None => BrokerClientChannel::connect_lazy(format!("http://{}", DEFAULT_LISTEN_ADDR)) + .await + .unwrap(), + }; + + let key = SubscriptionKey::TenantTimelineId(ProtoTenantTimelineId { + tenant_id: vec![0xFF; 16], + timeline_id: tli_from_u64(i), + }); + let request = SubscribeSafekeeperInfoRequest { + subscription_key: Some(key), + }; + let mut stream = client + .subscribe_safekeeper_info(request) + .await + .unwrap() + .into_inner(); + + while let Some(_feature) = stream.message().await.unwrap() { + counter.fetch_add(1, Ordering::Relaxed); + } +} + +async fn publish(client: Option, n_keys: u64) { + let mut client = match client { + Some(c) => c, + None => BrokerClientChannel::connect_lazy(format!("http://{}", DEFAULT_LISTEN_ADDR)) + .await + .unwrap(), + }; + let mut counter: u64 = 0; + + // create stream producing new values + let outbound = async_stream::stream! { + loop { + let info = SafekeeperTimelineInfo { + safekeeper_id: 1, + tenant_timeline_id: Some(ProtoTenantTimelineId { + tenant_id: vec![0xFF; 16], + timeline_id: tli_from_u64(counter % n_keys), + }), + last_log_term: 0, + flush_lsn: counter, + commit_lsn: 2, + backup_lsn: 3, + remote_consistent_lsn: 4, + peer_horizon_lsn: 5, + safekeeper_connstr: "zenith-1-sk-1.local:7676".to_owned(), + local_start_lsn: 0, + }; + counter += 1; + yield info; + } + }; + let response = client.publish_safekeeper_info(Request::new(outbound)).await; + println!("pub response is {:?}", response); +} + +#[tokio::main] +async fn main() -> Result<(), Box> { + let args = Args::parse(); + + let mut counters = Vec::with_capacity(args.num_subs as usize); + for _ in 0..args.num_subs { + counters.push(Arc::new(AtomicU64::new(0))); + } + let h = tokio::spawn(progress_reporter(counters.clone())); + + let c = BrokerClientChannel::connect_lazy(format!("http://{}", DEFAULT_LISTEN_ADDR)) + .await + .unwrap(); + + for i in 0..args.num_subs { + let c = Some(c.clone()); + tokio::spawn(subscribe(c, counters[i as usize].clone(), i)); + } + for _i in 0..args.num_pubs { + let c = None; + tokio::spawn(publish(c, args.num_subs as u64)); + } + + h.await?; + Ok(()) +} diff --git a/storage_broker/build.rs b/storage_broker/build.rs new file mode 100644 index 0000000000..244c7217de --- /dev/null +++ b/storage_broker/build.rs @@ -0,0 +1,7 @@ +fn main() -> Result<(), Box> { + // Generate code to deterministic location to make finding it easier. + tonic_build::configure() + .out_dir("proto/") // put generated code to proto/ + .compile(&["proto/broker.proto"], &["proto/"])?; + Ok(()) +} diff --git a/storage_broker/proto/.gitignore b/storage_broker/proto/.gitignore new file mode 100644 index 0000000000..c75b90ab1c --- /dev/null +++ b/storage_broker/proto/.gitignore @@ -0,0 +1,2 @@ +# protobuf generated code +storage_broker.rs diff --git a/storage_broker/proto/broker.proto b/storage_broker/proto/broker.proto new file mode 100644 index 0000000000..1a46896d02 --- /dev/null +++ b/storage_broker/proto/broker.proto @@ -0,0 +1,44 @@ +syntax = "proto3"; + +import "google/protobuf/empty.proto"; + +package storage_broker; + +service BrokerService { + // Subscribe to safekeeper updates. + rpc SubscribeSafekeeperInfo(SubscribeSafekeeperInfoRequest) returns (stream SafekeeperTimelineInfo) {}; + + // Publish safekeeper updates. + rpc PublishSafekeeperInfo(stream SafekeeperTimelineInfo) returns (google.protobuf.Empty) {}; +} + +message SubscribeSafekeeperInfoRequest { + oneof subscription_key { + google.protobuf.Empty all = 1; // subscribe to everything + TenantTimelineId tenant_timeline_id = 2; // subscribe to specific timeline + } +} + +message SafekeeperTimelineInfo { + uint64 safekeeper_id = 1; + TenantTimelineId tenant_timeline_id = 2; + // Term of the last entry. + uint64 last_log_term = 3; + // LSN of the last record. + uint64 flush_lsn = 4; + // Up to which LSN safekeeper regards its WAL as committed. + uint64 commit_lsn = 5; + // LSN up to which safekeeper has backed WAL. + uint64 backup_lsn = 6; + // LSN of last checkpoint uploaded by pageserver. + uint64 remote_consistent_lsn = 7; + uint64 peer_horizon_lsn = 8; + uint64 local_start_lsn = 9; + // A connection string to use for WAL receiving. + string safekeeper_connstr = 10; +} + +message TenantTimelineId { + bytes tenant_id = 1; + bytes timeline_id = 2; +} \ No newline at end of file diff --git a/storage_broker/src/bin/storage_broker.rs b/storage_broker/src/bin/storage_broker.rs new file mode 100644 index 0000000000..04f93a1ebb --- /dev/null +++ b/storage_broker/src/bin/storage_broker.rs @@ -0,0 +1,557 @@ +//! Simple pub-sub based on grpc (tonic) and Tokio broadcast channel for storage +//! nodes messaging. +//! +//! Subscriptions to 1) single timeline 2) all timelines are possible. We could +//! add subscription to the set of timelines to save grpc streams, but testing +//! shows many individual streams is also ok. +//! +//! Message is dropped if subscriber can't consume it, not affecting other +//! subscribers. +//! +//! Only safekeeper message is supported, but it is not hard to add something +//! else with generics. +use clap::{command, Parser}; +use futures_core::Stream; +use futures_util::StreamExt; +use hyper::header::CONTENT_TYPE; +use hyper::server::conn::AddrStream; +use hyper::service::{make_service_fn, service_fn}; +use hyper::{Body, Method, StatusCode}; +use parking_lot::RwLock; +use std::collections::HashMap; +use std::convert::Infallible; +use std::net::SocketAddr; +use std::pin::Pin; +use std::sync::Arc; +use std::task::Poll; +use std::time::Duration; +use tokio::sync::broadcast; +use tokio::sync::broadcast::error::RecvError; +use tokio::time; +use tonic::codegen::Service; +use tonic::transport::server::Connected; +use tonic::Code; +use tonic::{Request, Response, Status}; +use tracing::*; + +use metrics::{Encoder, TextEncoder}; +use storage_broker::metrics::{NUM_PUBS, NUM_SUBS_ALL, NUM_SUBS_TIMELINE}; +use storage_broker::proto::broker_service_server::{BrokerService, BrokerServiceServer}; +use storage_broker::proto::subscribe_safekeeper_info_request::SubscriptionKey as ProtoSubscriptionKey; +use storage_broker::proto::{SafekeeperTimelineInfo, SubscribeSafekeeperInfoRequest}; +use storage_broker::{parse_proto_ttid, EitherBody, DEFAULT_LISTEN_ADDR}; +use utils::id::TenantTimelineId; +use utils::logging::{self, LogFormat}; +use utils::project_git_version; + +project_git_version!(GIT_VERSION); + +const DEFAULT_CHAN_SIZE: usize = 128; +const DEFAULT_HTTP2_KEEPALIVE_INTERVAL: &str = "5000ms"; + +#[derive(Parser, Debug)] +#[command(version = GIT_VERSION, about = "Broker for neon storage nodes communication", long_about = None)] +struct Args { + /// Endpoint to listen on. + #[arg(short, long, default_value = DEFAULT_LISTEN_ADDR)] + listen_addr: SocketAddr, + /// Size of the queue to the subscriber. + #[arg(long, default_value_t = DEFAULT_CHAN_SIZE)] + chan_size: usize, + /// HTTP/2 keepalive interval. + #[arg(long, value_parser= humantime::parse_duration, default_value = DEFAULT_HTTP2_KEEPALIVE_INTERVAL)] + http2_keepalive_interval: Duration, + /// Format for logging, either 'plain' or 'json'. + #[arg(long, default_value = "plain")] + log_format: String, +} + +type PubId = u64; // id of publisher for registering in maps +type SubId = u64; // id of subscriber for registering in maps + +#[derive(Copy, Clone, Debug)] +enum SubscriptionKey { + All, + Timeline(TenantTimelineId), +} + +impl SubscriptionKey { + // Parse protobuf subkey (protobuf doesn't have fixed size bytes, we get vectors). + pub fn from_proto_subscription_key(key: ProtoSubscriptionKey) -> Result { + match key { + ProtoSubscriptionKey::All(_) => Ok(SubscriptionKey::All), + ProtoSubscriptionKey::TenantTimelineId(proto_ttid) => { + Ok(SubscriptionKey::Timeline(parse_proto_ttid(&proto_ttid)?)) + } + } + } +} + +// Channel to timeline subscribers. +struct ChanToTimelineSub { + chan: broadcast::Sender, + // Tracked separately to know when delete the shmem entry. receiver_count() + // is unhandy for that as unregistering and dropping the receiver side + // happens at different moments. + num_subscribers: u64, +} + +struct SharedState { + next_pub_id: PubId, + num_pubs: i64, + next_sub_id: SubId, + num_subs_to_timelines: i64, + chans_to_timeline_subs: HashMap, + num_subs_to_all: i64, + chan_to_all_subs: broadcast::Sender, +} + +impl SharedState { + pub fn new(chan_size: usize) -> Self { + SharedState { + next_pub_id: 0, + num_pubs: 0, + next_sub_id: 0, + num_subs_to_timelines: 0, + chans_to_timeline_subs: HashMap::new(), + num_subs_to_all: 0, + chan_to_all_subs: broadcast::channel(chan_size).0, + } + } + + // Register new publisher. + pub fn register_publisher(&mut self) -> PubId { + let pub_id = self.next_pub_id; + self.next_pub_id += 1; + self.num_pubs += 1; + NUM_PUBS.set(self.num_pubs); + pub_id + } + + // Unregister publisher. + pub fn unregister_publisher(&mut self) { + self.num_pubs -= 1; + NUM_PUBS.set(self.num_pubs); + } + + // Register new subscriber. + pub fn register_subscriber( + &mut self, + sub_key: SubscriptionKey, + chan_size: usize, + ) -> (SubId, broadcast::Receiver) { + let sub_id = self.next_sub_id; + self.next_sub_id += 1; + let sub_rx = match sub_key { + SubscriptionKey::All => { + self.num_subs_to_all += 1; + NUM_SUBS_ALL.set(self.num_subs_to_all); + self.chan_to_all_subs.subscribe() + } + SubscriptionKey::Timeline(ttid) => { + self.num_subs_to_timelines += 1; + NUM_SUBS_TIMELINE.set(self.num_subs_to_timelines); + // Create new broadcast channel for this key, or subscriber to + // the existing one. + let chan_to_timeline_sub = + self.chans_to_timeline_subs + .entry(ttid) + .or_insert(ChanToTimelineSub { + chan: broadcast::channel(chan_size).0, + num_subscribers: 0, + }); + chan_to_timeline_sub.num_subscribers += 1; + chan_to_timeline_sub.chan.subscribe() + } + }; + (sub_id, sub_rx) + } + + // Unregister the subscriber. + pub fn unregister_subscriber(&mut self, sub_key: SubscriptionKey) { + match sub_key { + SubscriptionKey::All => { + self.num_subs_to_all -= 1; + NUM_SUBS_ALL.set(self.num_subs_to_all); + } + SubscriptionKey::Timeline(ttid) => { + self.num_subs_to_timelines -= 1; + NUM_SUBS_TIMELINE.set(self.num_subs_to_timelines); + + // Remove from the map, destroying the channel, if we are the + // last subscriber to this timeline. + + // Missing entry is a bug; we must have registered. + let chan_to_timeline_sub = self + .chans_to_timeline_subs + .get_mut(&ttid) + .expect("failed to find sub entry in shmem during unregister"); + chan_to_timeline_sub.num_subscribers -= 1; + if chan_to_timeline_sub.num_subscribers == 0 { + self.chans_to_timeline_subs.remove(&ttid); + } + } + } + } +} + +// SharedState wrapper. +#[derive(Clone)] +struct Registry { + shared_state: Arc>, + chan_size: usize, +} + +impl Registry { + // Register new publisher in shared state. + pub fn register_publisher(&self, remote_addr: SocketAddr) -> Publisher { + let pub_id = self.shared_state.write().register_publisher(); + info!("publication started id={} addr={:?}", pub_id, remote_addr); + Publisher { + id: pub_id, + registry: self.clone(), + remote_addr, + } + } + + pub fn unregister_publisher(&self, publisher: &Publisher) { + self.shared_state.write().unregister_publisher(); + info!( + "publication ended id={} addr={:?}", + publisher.id, publisher.remote_addr + ); + } + + // Register new subscriber in shared state. + pub fn register_subscriber( + &self, + sub_key: SubscriptionKey, + remote_addr: SocketAddr, + ) -> Subscriber { + let (sub_id, sub_rx) = self + .shared_state + .write() + .register_subscriber(sub_key, self.chan_size); + info!( + "subscription started id={}, key={:?}, addr={:?}", + sub_id, sub_key, remote_addr + ); + Subscriber { + id: sub_id, + key: sub_key, + sub_rx, + registry: self.clone(), + remote_addr, + } + } + + // Unregister the subscriber + pub fn unregister_subscriber(&self, subscriber: &Subscriber) { + self.shared_state + .write() + .unregister_subscriber(subscriber.key); + info!( + "subscription ended id={}, key={:?}, addr={:?}", + subscriber.id, subscriber.key, subscriber.remote_addr + ); + } +} + +// Private subscriber state. +struct Subscriber { + id: SubId, + key: SubscriptionKey, + // Subscriber receives messages from publishers here. + sub_rx: broadcast::Receiver, + // to unregister itself from shared state in Drop + registry: Registry, + // for logging + remote_addr: SocketAddr, +} + +impl Drop for Subscriber { + fn drop(&mut self) { + self.registry.unregister_subscriber(self); + } +} + +// Private publisher state +struct Publisher { + id: PubId, + registry: Registry, + // for logging + remote_addr: SocketAddr, +} + +impl Publisher { + // Send msg to relevant subscribers. + pub fn send_msg(&mut self, msg: &SafekeeperTimelineInfo) -> Result<(), Status> { + // send message to subscribers for everything + let shared_state = self.registry.shared_state.read(); + // Err means there is no subscribers, it is fine. + shared_state.chan_to_all_subs.send(msg.clone()).ok(); + + // send message to per timeline subscribers + let ttid = + parse_proto_ttid(msg.tenant_timeline_id.as_ref().ok_or_else(|| { + Status::new(Code::InvalidArgument, "missing tenant_timeline_id") + })?)?; + if let Some(subs) = shared_state.chans_to_timeline_subs.get(&ttid) { + // Err can't happen here, as tx is destroyed only after removing + // from the map the last subscriber along with tx. + subs.chan + .send(msg.clone()) + .expect("rx is still in the map with zero subscribers"); + } + Ok(()) + } +} + +impl Drop for Publisher { + fn drop(&mut self) { + self.registry.unregister_publisher(self); + } +} + +struct Broker { + registry: Registry, +} + +#[tonic::async_trait] +impl BrokerService for Broker { + async fn publish_safekeeper_info( + &self, + request: Request>, + ) -> Result, Status> { + let remote_addr = request + .remote_addr() + .expect("TCPConnectInfo inserted by handler"); + let mut publisher = self.registry.register_publisher(remote_addr); + + let mut stream = request.into_inner(); + + loop { + match stream.next().await { + Some(Ok(msg)) => publisher.send_msg(&msg)?, + Some(Err(e)) => return Err(e), // grpc error from the stream + None => break, // closed stream + } + } + + Ok(Response::new(())) + } + + type SubscribeSafekeeperInfoStream = + Pin> + Send + 'static>>; + + async fn subscribe_safekeeper_info( + &self, + request: Request, + ) -> Result, Status> { + let remote_addr = request + .remote_addr() + .expect("TCPConnectInfo inserted by handler"); + let proto_key = request + .into_inner() + .subscription_key + .ok_or_else(|| Status::new(Code::InvalidArgument, "missing subscription key"))?; + let sub_key = SubscriptionKey::from_proto_subscription_key(proto_key)?; + let mut subscriber = self.registry.register_subscriber(sub_key, remote_addr); + + // transform rx into stream with item = Result, as method result demands + let output = async_stream::try_stream! { + let mut warn_interval = time::interval(Duration::from_millis(1000)); + let mut missed_msgs: u64 = 0; + loop { + match subscriber.sub_rx.recv().await { + Ok(info) => yield info, + Err(RecvError::Lagged(skipped_msg)) => { + missed_msgs += skipped_msg; + if let Poll::Ready(_) = futures::poll!(Box::pin(warn_interval.tick())) { + warn!("dropped {} messages, channel is full", missed_msgs); + missed_msgs = 0; + } + } + Err(RecvError::Closed) => { + // can't happen, we never drop the channel while there is a subscriber + Err(Status::new(Code::Internal, "channel unexpectantly closed"))?; + } + } + } + }; + + Ok(Response::new( + Box::pin(output) as Self::SubscribeSafekeeperInfoStream + )) + } +} + +// We serve only metrics and healthcheck through http1. +async fn http1_handler( + req: hyper::Request, +) -> Result, Infallible> { + let resp = match (req.method(), req.uri().path()) { + (&Method::GET, "/metrics") => { + let mut buffer = vec![]; + let metrics = metrics::gather(); + let encoder = TextEncoder::new(); + encoder.encode(&metrics, &mut buffer).unwrap(); + + hyper::Response::builder() + .status(StatusCode::OK) + .header(CONTENT_TYPE, encoder.format_type()) + .body(Body::from(buffer)) + .unwrap() + } + (&Method::GET, "/status") => hyper::Response::builder() + .status(StatusCode::OK) + .body(Body::empty()) + .unwrap(), + _ => hyper::Response::builder() + .status(StatusCode::NOT_FOUND) + .body(Body::empty()) + .unwrap(), + }; + Ok(resp) +} + +#[tokio::main] +async fn main() -> Result<(), Box> { + let args = Args::parse(); + + logging::init(LogFormat::from_config(&args.log_format)?)?; + info!("version: {GIT_VERSION}"); + + let registry = Registry { + shared_state: Arc::new(RwLock::new(SharedState::new(args.chan_size))), + chan_size: args.chan_size, + }; + let storage_broker_impl = Broker { + registry: registry.clone(), + }; + let storage_broker_server = BrokerServiceServer::new(storage_broker_impl); + + info!("listening on {}", &args.listen_addr); + + // grpc is served along with http1 for metrics on a single port, hence we + // don't use tonic's Server. + hyper::Server::bind(&args.listen_addr) + .http2_keep_alive_interval(Some(args.http2_keepalive_interval)) + .serve(make_service_fn(move |conn: &AddrStream| { + let storage_broker_server_cloned = storage_broker_server.clone(); + let connect_info = conn.connect_info(); + async move { + Ok::<_, Infallible>(service_fn(move |mut req| { + // That's what tonic's MakeSvc.call does to pass conninfo to + // the request handler (and where its request.remote_addr() + // expects it to find). + req.extensions_mut().insert(connect_info.clone()); + + // Technically this second clone is not needed, but consume + // by async block is apparently unavoidable. BTW, error + // message is enigmatic, see + // https://github.com/rust-lang/rust/issues/68119 + // + // We could get away without async block at all, but then we + // need to resort to futures::Either to merge the result, + // which doesn't caress an eye as well. + let mut storage_broker_server_svc = storage_broker_server_cloned.clone(); + async move { + if req.headers().get("content-type").map(|x| x.as_bytes()) + == Some(b"application/grpc") + { + let res_resp = storage_broker_server_svc.call(req).await; + // Grpc and http1 handlers have slightly different + // Response types: it is UnsyncBoxBody for the + // former one (not sure why) and plain hyper::Body + // for the latter. Both implement HttpBody though, + // and EitherBody is used to merge them. + res_resp.map(|resp| resp.map(EitherBody::Left)) + } else { + let res_resp = http1_handler(req).await; + res_resp.map(|resp| resp.map(EitherBody::Right)) + } + } + })) + } + })) + .await?; + Ok(()) +} + +#[cfg(test)] +mod tests { + use super::*; + use storage_broker::proto::TenantTimelineId as ProtoTenantTimelineId; + use tokio::sync::broadcast::error::TryRecvError; + use utils::id::{TenantId, TimelineId}; + + fn msg(timeline_id: Vec) -> SafekeeperTimelineInfo { + SafekeeperTimelineInfo { + safekeeper_id: 1, + tenant_timeline_id: Some(ProtoTenantTimelineId { + tenant_id: vec![0x00; 16], + timeline_id, + }), + last_log_term: 0, + flush_lsn: 1, + commit_lsn: 2, + backup_lsn: 3, + remote_consistent_lsn: 4, + peer_horizon_lsn: 5, + safekeeper_connstr: "neon-1-sk-1.local:7676".to_owned(), + local_start_lsn: 0, + } + } + + fn tli_from_u64(i: u64) -> Vec { + let mut timeline_id = vec![0xFF; 8]; + timeline_id.extend_from_slice(&i.to_be_bytes()); + timeline_id + } + + fn mock_addr() -> SocketAddr { + "127.0.0.1:8080".parse().unwrap() + } + + #[tokio::test] + async fn test_registry() { + let registry = Registry { + shared_state: Arc::new(RwLock::new(SharedState::new(16))), + chan_size: 16, + }; + + // subscribe to timeline 2 + let ttid_2 = TenantTimelineId { + tenant_id: TenantId::from_slice(&[0x00; 16]).unwrap(), + timeline_id: TimelineId::from_slice(&tli_from_u64(2)).unwrap(), + }; + let sub_key_2 = SubscriptionKey::Timeline(ttid_2); + let mut subscriber_2 = registry.register_subscriber(sub_key_2, mock_addr()); + let mut subscriber_all = registry.register_subscriber(SubscriptionKey::All, mock_addr()); + + // send two messages with different keys + let msg_1 = msg(tli_from_u64(1)); + let msg_2 = msg(tli_from_u64(2)); + let mut publisher = registry.register_publisher(mock_addr()); + publisher.send_msg(&msg_1).expect("failed to send msg"); + publisher.send_msg(&msg_2).expect("failed to send msg"); + + // msg with key 2 should arrive to subscriber_2 + assert_eq!(subscriber_2.sub_rx.try_recv().unwrap(), msg_2); + + // but nothing more + assert_eq!( + subscriber_2.sub_rx.try_recv().unwrap_err(), + TryRecvError::Empty + ); + + // subscriber_all should receive both messages + assert_eq!(subscriber_all.sub_rx.try_recv().unwrap(), msg_1); + assert_eq!(subscriber_all.sub_rx.try_recv().unwrap(), msg_2); + assert_eq!( + subscriber_all.sub_rx.try_recv().unwrap_err(), + TryRecvError::Empty + ); + } +} diff --git a/storage_broker/src/lib.rs b/storage_broker/src/lib.rs new file mode 100644 index 0000000000..39e72ca721 --- /dev/null +++ b/storage_broker/src/lib.rs @@ -0,0 +1,108 @@ +use hyper::body::HttpBody; +use std::pin::Pin; +use std::task::{Context, Poll}; +use tonic::codegen::StdError; +use tonic::{transport::Channel, Code, Status}; +use utils::id::{TenantId, TenantTimelineId, TimelineId}; + +use proto::{ + broker_service_client::BrokerServiceClient, TenantTimelineId as ProtoTenantTimelineId, +}; + +// Code generated by protobuf. +pub mod proto { + include!("../proto/storage_broker.rs"); +} + +pub mod metrics; + +// Re-exports to avoid direct tonic dependency in user crates. +pub use tonic::Request; +pub use tonic::Streaming; + +pub const DEFAULT_LISTEN_ADDR: &str = "127.0.0.1:50051"; + +// NeonBrokerClient charged with tonic provided Channel transport; helps to +// avoid depending on tonic directly in user crates. +pub type BrokerClientChannel = BrokerServiceClient; + +impl BrokerClientChannel { + /// Create a new client to the given endpoint, but don't actually connect until the first request. + pub async fn connect_lazy(dst: D) -> Result + where + D: std::convert::TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect_lazy(); + Ok(Self::new(conn)) + } +} + +// parse variable length bytes from protobuf +pub fn parse_proto_ttid(proto_ttid: &ProtoTenantTimelineId) -> Result { + let tenant_id = TenantId::from_slice(&proto_ttid.tenant_id) + .map_err(|e| Status::new(Code::InvalidArgument, format!("malformed tenant_id: {}", e)))?; + let timeline_id = TimelineId::from_slice(&proto_ttid.timeline_id).map_err(|e| { + Status::new( + Code::InvalidArgument, + format!("malformed timeline_id: {}", e), + ) + })?; + Ok(TenantTimelineId { + tenant_id, + timeline_id, + }) +} + +// These several usages don't justify anyhow dependency, though it would work as +// well. +type AnyError = Box; + +// Provides impl HttpBody for two different types implementing it. Inspired by +// https://github.com/hyperium/tonic/blob/master/examples/src/hyper_warp/server.rs +pub enum EitherBody { + Left(A), + Right(B), +} + +impl HttpBody for EitherBody +where + A: HttpBody + Send + Unpin, + B: HttpBody + Send + Unpin, + A::Error: Into, + B::Error: Into, +{ + type Data = A::Data; + type Error = Box; + + fn is_end_stream(&self) -> bool { + match self { + EitherBody::Left(b) => b.is_end_stream(), + EitherBody::Right(b) => b.is_end_stream(), + } + } + + fn poll_data( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll>> { + match self.get_mut() { + EitherBody::Left(b) => Pin::new(b).poll_data(cx).map(map_option_err), + EitherBody::Right(b) => Pin::new(b).poll_data(cx).map(map_option_err), + } + } + + fn poll_trailers( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll, Self::Error>> { + match self.get_mut() { + EitherBody::Left(b) => Pin::new(b).poll_trailers(cx).map_err(Into::into), + EitherBody::Right(b) => Pin::new(b).poll_trailers(cx).map_err(Into::into), + } + } +} + +fn map_option_err>(err: Option>) -> Option> { + err.map(|e| e.map_err(Into::into)) +} diff --git a/storage_broker/src/metrics.rs b/storage_broker/src/metrics.rs new file mode 100644 index 0000000000..f0649d0f68 --- /dev/null +++ b/storage_broker/src/metrics.rs @@ -0,0 +1,25 @@ +//! Broker metrics. + +use metrics::{register_int_gauge, IntGauge}; +use once_cell::sync::Lazy; + +pub static NUM_PUBS: Lazy = Lazy::new(|| { + register_int_gauge!("storage_broker_active_publishers", "Number of publications") + .expect("Failed to register metric") +}); + +pub static NUM_SUBS_TIMELINE: Lazy = Lazy::new(|| { + register_int_gauge!( + "storage_broker_per_timeline_active_subscribers", + "Number of subsciptions to particular tenant timeline id" + ) + .expect("Failed to register metric") +}); + +pub static NUM_SUBS_ALL: Lazy = Lazy::new(|| { + register_int_gauge!( + "storage_broker_all_keys_active_subscribers", + "Number of subsciptions to all keys" + ) + .expect("Failed to register metric") +}); diff --git a/test_runner/fixtures/neon_fixtures.py b/test_runner/fixtures/neon_fixtures.py index 98115ff7ce..4e66ed677c 100644 --- a/test_runner/fixtures/neon_fixtures.py +++ b/test_runner/fixtures/neon_fixtures.py @@ -420,8 +420,9 @@ class AuthKeys: pub: str priv: str - def generate_management_token(self) -> str: - token = jwt.encode({"scope": "pageserverapi"}, self.priv, algorithm="RS256") + def generate_token(self, *, scope: str, **token_data: str) -> str: + token = jwt.encode({"scope": scope, **token_data}, self.priv, algorithm="RS256") + # cast(Any, self.priv) # jwt.encode can return 'bytes' or 'str', depending on Python version or type # hinting or something (not sure what). If it returned 'bytes', convert it to 'str' @@ -431,17 +432,14 @@ class AuthKeys: return token + def generate_pageserver_token(self) -> str: + return self.generate_token(scope="pageserverapi") + + def generate_safekeeper_token(self) -> str: + return self.generate_token(scope="safekeeperdata") + def generate_tenant_token(self, tenant_id: TenantId) -> str: - token = jwt.encode( - {"scope": "tenant", "tenant_id": str(tenant_id)}, - self.priv, - algorithm="RS256", - ) - - if isinstance(token, bytes): - token = token.decode() - - return token + return self.generate_token(scope="tenant", tenant_id=str(tenant_id)) class MockS3Server: @@ -1761,6 +1759,8 @@ class NeonPageserver(PgProtocol): ".*manual_gc.*is_shutdown_requested\\(\\) called in an unexpected task or thread.*", ".*tenant_list: timeline is not found in remote index while it is present in the tenants registry.*", ".*Removing intermediate uninit mark file.*", + # FIXME: known race condition in TaskHandle: https://github.com/neondatabase/neon/issues/2885 + ".*sender is dropped while join handle is still alive.*", ] def start( @@ -2094,7 +2094,8 @@ class NeonProxy(PgProtocol): def start(self): """ - Starts a proxy with option '--auth-backend postgres' and a postgres instance already provided though '--auth-endpoint '." + Starts a proxy with option '--auth-backend postgres' and a postgres instance + already provided though '--auth-endpoint '." """ assert self._popen is None assert self.auth_endpoint is not None @@ -2499,7 +2500,8 @@ class Safekeeper: # "replication=0" hacks psycopg not to send additional queries # on startup, see https://github.com/psycopg/psycopg2/pull/482 - connstr = f"host=localhost port={self.port.pg} replication=0 options='-c timeline_id={timeline_id} tenant_id={tenant_id}'" + token = self.env.auth_keys.generate_tenant_token(tenant_id) + connstr = f"host=localhost port={self.port.pg} password={token} replication=0 options='-c timeline_id={timeline_id} tenant_id={tenant_id}'" with closing(psycopg2.connect(connstr)) as conn: # server doesn't support transactions diff --git a/test_runner/performance/test_bulk_update.py b/test_runner/performance/test_bulk_update.py new file mode 100644 index 0000000000..bcd26013e5 --- /dev/null +++ b/test_runner/performance/test_bulk_update.py @@ -0,0 +1,62 @@ +import pytest +from fixtures.neon_fixtures import NeonEnvBuilder, wait_for_last_flush_lsn + + +# +# Benchmark effect of prefetch on bulk update operations +# +# A sequential scan that's part of a bulk update is the same as any other sequential scan, +# but dirtying the pages as you go affects the last-written LSN tracking. We used to have +# an issue with the last-written LSN cache where rapidly evicting dirty pages always +# invalidated the prefetched responses, which showed up in bad performance in this test. +# +@pytest.mark.timeout(10000) +@pytest.mark.parametrize("fillfactor", [10, 50, 100]) +def test_bulk_update(neon_env_builder: NeonEnvBuilder, zenbenchmark, fillfactor): + + env = neon_env_builder.init_start() + n_records = 1000000 + + timeline_id = env.neon_cli.create_branch("test_bulk_update") + tenant_id = env.initial_tenant + pg = env.postgres.create_start("test_bulk_update") + cur = pg.connect().cursor() + cur.execute("set statement_timeout=0") + + cur.execute(f"create table t(x integer) WITH (fillfactor={fillfactor})") + + with zenbenchmark.record_duration("insert-1"): + cur.execute(f"insert into t values (generate_series(1,{n_records}))") + + cur.execute("vacuum t") + wait_for_last_flush_lsn(env, pg, tenant_id, timeline_id) + + with zenbenchmark.record_duration("update-no-prefetch"): + cur.execute("update t set x=x+1") + + cur.execute("vacuum t") + wait_for_last_flush_lsn(env, pg, tenant_id, timeline_id) + + with zenbenchmark.record_duration("delete-no-prefetch"): + cur.execute("delete from t") + + cur.execute("drop table t") + cur.execute("set enable_seqscan_prefetch=on") + cur.execute("set seqscan_prefetch_buffers=100") + + cur.execute(f"create table t2(x integer) WITH (fillfactor={fillfactor})") + + with zenbenchmark.record_duration("insert-2"): + cur.execute(f"insert into t2 values (generate_series(1,{n_records}))") + + cur.execute("vacuum t2") + wait_for_last_flush_lsn(env, pg, tenant_id, timeline_id) + + with zenbenchmark.record_duration("update-with-prefetch"): + cur.execute("update t2 set x=x+1") + + cur.execute("vacuum t2") + wait_for_last_flush_lsn(env, pg, tenant_id, timeline_id) + + with zenbenchmark.record_duration("delete-with-prefetch"): + cur.execute("delete from t2") diff --git a/test_runner/performance/test_perf_pgbench.py b/test_runner/performance/test_perf_pgbench.py index 0ed3e45971..015cc40a72 100644 --- a/test_runner/performance/test_perf_pgbench.py +++ b/test_runner/performance/test_perf_pgbench.py @@ -88,7 +88,7 @@ def run_test_pgbench(env: PgCompare, scale: int, duration: int, workload_type: P env.zenbenchmark.record("scale", scale, "", MetricReport.TEST_PARAM) password = env.pg.default_options.get("password", None) - options = "-cstatement_timeout=1h " + env.pg.default_options.get("options", "") + options = "-cstatement_timeout=0 " + env.pg.default_options.get("options", "") # drop password from the connection string by passing password=None and set password separately connstr = env.pg.connstr(password=None, options=options) diff --git a/test_runner/performance/test_seqscans.py b/test_runner/performance/test_seqscans.py index 1755c70324..a0a1dbd01d 100644 --- a/test_runner/performance/test_seqscans.py +++ b/test_runner/performance/test_seqscans.py @@ -22,16 +22,21 @@ from pytest_lazyfixture import lazy_fixture # type: ignore ], ) @pytest.mark.parametrize( - "env", + "env, scale", [ - # Run on all envs - pytest.param(lazy_fixture("neon_compare"), id="neon"), - pytest.param(lazy_fixture("vanilla_compare"), id="vanilla"), - pytest.param(lazy_fixture("remote_compare"), id="remote", marks=pytest.mark.remote_cluster), + # Run on all envs. Use 50x larger table on remote cluster to make sure + # it doesn't fit in shared buffers, which are larger on remote than local. + pytest.param(lazy_fixture("neon_compare"), 1, id="neon"), + pytest.param(lazy_fixture("vanilla_compare"), 1, id="vanilla"), + pytest.param( + lazy_fixture("remote_compare"), 50, id="remote", marks=pytest.mark.remote_cluster + ), ], ) -def test_seqscans(env: PgCompare, rows: int, iters: int, workers: int): - with closing(env.pg.connect()) as conn: +def test_seqscans(env: PgCompare, scale: int, rows: int, iters: int, workers: int): + rows = scale * rows + + with closing(env.pg.connect(options="-cstatement_timeout=0")) as conn: with conn.cursor() as cur: cur.execute("drop table if exists t;") cur.execute("create table t (i integer);") diff --git a/test_runner/performance/test_wal_backpressure.py b/test_runner/performance/test_wal_backpressure.py index dd840acd25..f9a18c84fd 100644 --- a/test_runner/performance/test_wal_backpressure.py +++ b/test_runner/performance/test_wal_backpressure.py @@ -154,7 +154,7 @@ def test_pgbench_simple_update_workload(pg_compare: PgCompare, scale: int, durat def start_pgbench_intensive_initialization(env: PgCompare, scale: int, done_event: threading.Event): with env.record_duration("run_duration"): - # Needs to increase the statement timeout (default: 120s) because the + # Disable statement timeout (default: 120s) because the # initialization step can be slow with a large scale. env.pg_bin.run_capture( [ @@ -162,7 +162,7 @@ def start_pgbench_intensive_initialization(env: PgCompare, scale: int, done_even f"-s{scale}", "-i", "-Idtg", - env.pg.connstr(options="-cstatement_timeout=600s"), + env.pg.connstr(options="-cstatement_timeout=0"), ] ) diff --git a/test_runner/regress/test_auth.py b/test_runner/regress/test_auth.py index 8443aa029f..f3d153d934 100644 --- a/test_runner/regress/test_auth.py +++ b/test_runner/regress/test_auth.py @@ -1,7 +1,7 @@ from contextlib import closing import pytest -from fixtures.neon_fixtures import NeonEnvBuilder, PageserverApiException +from fixtures.neon_fixtures import NeonEnvBuilder, PageserverApiException, PgProtocol from fixtures.types import TenantId @@ -16,13 +16,13 @@ def test_pageserver_auth(neon_env_builder: NeonEnvBuilder): invalid_tenant_token = env.auth_keys.generate_tenant_token(TenantId.generate()) invalid_tenant_http_client = env.pageserver.http_client(invalid_tenant_token) - management_token = env.auth_keys.generate_management_token() - management_http_client = env.pageserver.http_client(management_token) + pageserver_token = env.auth_keys.generate_pageserver_token() + pageserver_http_client = env.pageserver.http_client(pageserver_token) # this does not invoke auth check and only decodes jwt and checks it for validity # check both tokens ps.safe_psql("set FOO", password=tenant_token) - ps.safe_psql("set FOO", password=management_token) + ps.safe_psql("set FOO", password=pageserver_token) new_timeline_id = env.neon_cli.create_branch( "test_pageserver_auth", tenant_id=env.initial_tenant @@ -33,7 +33,7 @@ def test_pageserver_auth(neon_env_builder: NeonEnvBuilder): tenant_id=env.initial_tenant, ancestor_timeline_id=new_timeline_id ) # console can create branches for tenant - management_http_client.timeline_create( + pageserver_http_client.timeline_create( tenant_id=env.initial_tenant, ancestor_timeline_id=new_timeline_id ) @@ -46,7 +46,7 @@ def test_pageserver_auth(neon_env_builder: NeonEnvBuilder): ) # create tenant using management token - management_http_client.tenant_create() + pageserver_http_client.tenant_create() # fail to create tenant using tenant token with pytest.raises( @@ -73,3 +73,73 @@ def test_compute_auth_to_pageserver(neon_env_builder: NeonEnvBuilder): cur.execute("INSERT INTO t SELECT generate_series(1,100000), 'payload'") cur.execute("SELECT sum(key) FROM t") assert cur.fetchone() == (5000050000,) + + +@pytest.mark.parametrize("auth_enabled", [False, True]) +def test_auth_failures(neon_env_builder: NeonEnvBuilder, auth_enabled: bool): + neon_env_builder.auth_enabled = auth_enabled + env = neon_env_builder.init_start() + + branch = f"test_auth_failures_auth_enabled_{auth_enabled}" + timeline_id = env.neon_cli.create_branch(branch) + env.postgres.create_start(branch) + + tenant_token = env.auth_keys.generate_tenant_token(env.initial_tenant) + invalid_tenant_token = env.auth_keys.generate_tenant_token(TenantId.generate()) + pageserver_token = env.auth_keys.generate_pageserver_token() + safekeeper_token = env.auth_keys.generate_safekeeper_token() + + def check_connection( + pg_protocol: PgProtocol, command: str, expect_success: bool, **conn_kwargs + ): + def op(): + with closing(pg_protocol.connect(**conn_kwargs)) as conn: + with conn.cursor() as cur: + cur.execute(command) + + if expect_success: + op() + else: + with pytest.raises(Exception): + op() + + def check_pageserver(expect_success: bool, **conn_kwargs): + check_connection( + env.pageserver, + f"get_last_record_rlsn {env.initial_tenant} {timeline_id}", + expect_success, + **conn_kwargs, + ) + + check_pageserver(not auth_enabled) + if auth_enabled: + check_pageserver(True, password=tenant_token) + + env.pageserver.allowed_errors.append(".*Tenant id mismatch. Permission denied.*") + check_pageserver(False, password=invalid_tenant_token) + + check_pageserver(True, password=pageserver_token) + + env.pageserver.allowed_errors.append( + ".*SafekeeperData scope makes no sense for Pageserver.*" + ) + check_pageserver(False, password=safekeeper_token) + + def check_safekeeper(expect_success: bool, **conn_kwargs): + check_connection( + PgProtocol( + host="localhost", + port=env.safekeepers[0].port.pg, + options=f"ztenantid={env.initial_tenant} ztimelineid={timeline_id}", + ), + "IDENTIFY_SYSTEM", + expect_success, + **conn_kwargs, + ) + + check_safekeeper(not auth_enabled) + if auth_enabled: + check_safekeeper(True, password=tenant_token) + check_safekeeper(False, password=invalid_tenant_token) + check_safekeeper(False, password=pageserver_token) + check_safekeeper(True, password=safekeeper_token) diff --git a/test_runner/regress/test_pageserver_api.py b/test_runner/regress/test_pageserver_api.py index ab321eeb02..eb22ac5f99 100644 --- a/test_runner/regress/test_pageserver_api.py +++ b/test_runner/regress/test_pageserver_api.py @@ -181,7 +181,7 @@ def test_pageserver_http_api_client_auth_enabled(neon_env_builder: NeonEnvBuilde neon_env_builder.auth_enabled = True env = neon_env_builder.init_start() - management_token = env.auth_keys.generate_management_token() + pageserver_token = env.auth_keys.generate_pageserver_token() - with env.pageserver.http_client(auth_token=management_token) as client: + with env.pageserver.http_client(auth_token=pageserver_token) as client: check_client(client, env.initial_tenant) diff --git a/test_runner/regress/test_proxy.py b/test_runner/regress/test_proxy.py index b8cfb21a5b..e868d6b616 100644 --- a/test_runner/regress/test_proxy.py +++ b/test_runner/regress/test_proxy.py @@ -1,5 +1,4 @@ import json -import subprocess from urllib.parse import urlparse import psycopg2 @@ -29,108 +28,65 @@ def test_password_hack(static_proxy: NeonProxy): static_proxy.safe_psql("select 1", sslsni=0, user=user, password=magic) -def get_session_id_from_uri_line(uri_prefix, uri_line): +def get_session_id(uri_prefix, uri_line): assert uri_prefix in uri_line url_parts = urlparse(uri_line) psql_session_id = url_parts.path[1:] - assert psql_session_id.isalnum(), "session_id should only contain alphanumeric chars." - link_auth_uri_prefix = uri_line[: -len(url_parts.path)] - # invariant: the prefix must match the uri_prefix. - assert ( - link_auth_uri_prefix == uri_prefix - ), f"Line='{uri_line}' should contain a http auth link of form '{uri_prefix}/'." - # invariant: the entire link_auth_uri should be on its own line, module spaces. - assert " ".join(uri_line.split(" ")) == f"{uri_prefix}/{psql_session_id}" + assert psql_session_id.isalnum(), "session_id should only contain alphanumeric chars" return psql_session_id -def create_and_send_db_info(local_vanilla_pg, psql_session_id, mgmt_port): - pg_user = "proxy" - pg_password = "password" - - local_vanilla_pg.start() - query = f"create user {pg_user} with login superuser password '{pg_password}'" - local_vanilla_pg.safe_psql(query) - - port = local_vanilla_pg.default_options["port"] - host = local_vanilla_pg.default_options["host"] - dbname = local_vanilla_pg.default_options["dbname"] - - db_info_dict = { - "session_id": psql_session_id, - "result": { - "Success": { - "host": host, - "port": port, - "dbname": dbname, - "user": pg_user, - "password": pg_password, - } - }, - } - db_info_str = json.dumps(db_info_dict) - cmd_args = [ - "psql", - "-h", - "127.0.0.1", # localhost - "-p", - f"{mgmt_port}", - "-c", - db_info_str, - ] - - log.info(f"Sending to proxy the user and db info: {' '.join(cmd_args)}") - p = subprocess.Popen(cmd_args, stdout=subprocess.PIPE) - out, err = p.communicate() - assert "ok" in str(out) - - -async def get_uri_line_from_process_welcome_notice(link_auth_uri_prefix, proc): - """ - Returns the line from the welcome notice from proc containing link_auth_uri_prefix. - :param link_auth_uri_prefix: the uri prefix used to indicate the line of interest - :param proc: the process to read the welcome message from. - :return: a line containing the full link authentication uri. - """ - max_num_lines_of_welcome_message = 15 - for attempt in range(max_num_lines_of_welcome_message): - raw_line = await proc.stderr.readline() - line = raw_line.decode("utf-8").strip() +async def find_auth_link(link_auth_uri_prefix, proc): + for _ in range(100): + line = (await proc.stderr.readline()).decode("utf-8").strip() + log.info(f"psql line: {line}") if link_auth_uri_prefix in line: + log.info(f"SUCCESS, found auth url: {line}") return line - assert False, f"did not find line containing '{link_auth_uri_prefix}'" + + +async def activate_link_auth(local_vanilla_pg, link_proxy, psql_session_id): + pg_user = "proxy" + + log.info("creating a new user for link auth test") + local_vanilla_pg.start() + local_vanilla_pg.safe_psql(f"create user {pg_user} with login superuser") + + db_info = json.dumps( + { + "session_id": psql_session_id, + "result": { + "Success": { + "host": local_vanilla_pg.default_options["host"], + "port": local_vanilla_pg.default_options["port"], + "dbname": local_vanilla_pg.default_options["dbname"], + "user": pg_user, + "project": "irrelevant", + } + }, + } + ) + + log.info("sending session activation message") + psql = await PSQL(host=link_proxy.host, port=link_proxy.mgmt_port).run(db_info) + out = (await psql.stdout.read()).decode("utf-8").strip() + assert out == "ok" @pytest.mark.asyncio async def test_psql_session_id(vanilla_pg: VanillaPostgres, link_proxy: NeonProxy): - """ - Test copied and modified from: test_project_psql_link_auth test from cloud/tests_e2e/tests/test_project.py - Step 1. establish connection to the proxy - Step 2. retrieve session_id: - Step 2.1: read welcome message - Step 2.2: parse session_id - Step 3. create a vanilla_pg and send user and db info via command line (using Popen) a psql query via mgmt port to proxy. - Step 4. assert that select 1 has been executed correctly. - """ - - psql = PSQL( - host=link_proxy.host, - port=link_proxy.proxy_port, - ) - proc = await psql.run("select 42") + psql = await PSQL(host=link_proxy.host, port=link_proxy.proxy_port).run("select 42") uri_prefix = link_proxy.link_auth_uri_prefix - line_str = await get_uri_line_from_process_welcome_notice(uri_prefix, proc) + link = await find_auth_link(uri_prefix, psql) - psql_session_id = get_session_id_from_uri_line(uri_prefix, line_str) - log.info(f"Parsed psql_session_id='{psql_session_id}' from Neon welcome message.") + psql_session_id = get_session_id(uri_prefix, link) + await activate_link_auth(vanilla_pg, link_proxy, psql_session_id) - create_and_send_db_info(vanilla_pg, psql_session_id, link_proxy.mgmt_port) - - assert proc.stdout is not None - out = (await proc.stdout.read()).decode("utf-8").strip() + assert psql.stdout is not None + out = (await psql.stdout.read()).decode("utf-8").strip() assert out == "42" diff --git a/test_runner/regress/test_tenant_size.py b/test_runner/regress/test_tenant_size.py index d9aed351a5..f8197696bc 100644 --- a/test_runner/regress/test_tenant_size.py +++ b/test_runner/regress/test_tenant_size.py @@ -192,10 +192,8 @@ def test_get_tenant_size_with_multiple_branches(neon_env_builder: NeonEnvBuilder "first-branch", main_branch_name, tenant_id ) - # unsure why this happens, the size difference is more than a page alignment size_after_first_branch = http_client.tenant_size(tenant_id) - assert size_after_first_branch > size_at_branch - assert size_after_first_branch - size_at_branch == gc_horizon + assert size_after_first_branch == size_at_branch first_branch_pg = env.postgres.create_start("first-branch", tenant_id=tenant_id) @@ -221,7 +219,7 @@ def test_get_tenant_size_with_multiple_branches(neon_env_builder: NeonEnvBuilder "second-branch", main_branch_name, tenant_id ) size_after_second_branch = http_client.tenant_size(tenant_id) - assert size_after_second_branch > size_after_continuing_on_main + assert size_after_second_branch == size_after_continuing_on_main second_branch_pg = env.postgres.create_start("second-branch", tenant_id=tenant_id) diff --git a/test_runner/regress/test_truncate.py b/test_runner/regress/test_truncate.py new file mode 100644 index 0000000000..8a45276f5a --- /dev/null +++ b/test_runner/regress/test_truncate.py @@ -0,0 +1,46 @@ +import time + +from fixtures.neon_fixtures import NeonEnvBuilder + + +# +# Test truncation of FSM and VM forks of a relation +# +def test_truncate(neon_env_builder: NeonEnvBuilder, zenbenchmark): + + env = neon_env_builder.init_start() + n_records = 10000 + n_iter = 10 + + # Problems with FSM/VM forks truncation are most frequently detected during page reconstruction triggered + # by image layer generation. So adjust default parameters to make it happen more frequently. + tenant, _ = env.neon_cli.create_tenant( + conf={ + "gc_period": "100 m", + "gc_horizon": "1048576", + "checkpoint_distance": "1000000", + "compaction_period": "1 s", + "compaction_threshold": "3", + "image_creation_threshold": "1", + "compaction_target_size": "1000000", + } + ) + + env.neon_cli.create_timeline("test_truncate", tenant_id=tenant) + pg = env.postgres.create_start("test_truncate", tenant_id=tenant) + cur = pg.connect().cursor() + cur.execute("create table t1(x integer)") + cur.execute(f"insert into t1 values (generate_series(1,{n_records}))") + cur.execute("vacuum t1") + for i in range(n_iter): + cur.execute(f"delete from t1 where x>{n_records//2}") + cur.execute("vacuum t1") + time.sleep(1) # let pageserver a chance to create image layers + cur.execute(f"insert into t1 values (generate_series({n_records//2+1}, {n_records}))") + cur.execute("vacuum t1") + time.sleep(1) # let pageserver a chance to create image layers + + cur.execute("select count(*) from t1") + res = cur.fetchone() + assert res is not None + assert res[0] == n_records diff --git a/vendor/postgres-v14 b/vendor/postgres-v14 index 360ff1c637..da50d99db5 160000 --- a/vendor/postgres-v14 +++ b/vendor/postgres-v14 @@ -1 +1 @@ -Subproject commit 360ff1c637a57d351a7a5a391d8e8afd8fde8c3a +Subproject commit da50d99db54848f7a3e910f920aaad7dc6915d36 diff --git a/vendor/postgres-v15 b/vendor/postgres-v15 index d31b3f7c6d..780c3f8e35 160000 --- a/vendor/postgres-v15 +++ b/vendor/postgres-v15 @@ -1 +1 @@ -Subproject commit d31b3f7c6d108e52c8bb11e812ce4e266501ea3d +Subproject commit 780c3f8e3524c2e32a2e28884c7b647fcebf71d7 diff --git a/workspace_hack/Cargo.toml b/workspace_hack/Cargo.toml index 2daa08c9b6..00c590a0ca 100644 --- a/workspace_hack/Cargo.toml +++ b/workspace_hack/Cargo.toml @@ -17,13 +17,13 @@ ahash = { version = "0.7", features = ["std"] } anyhow = { version = "1", features = ["backtrace", "std"] } bytes = { version = "1", features = ["serde", "std"] } chrono = { version = "0.4", features = ["clock", "iana-time-zone", "js-sys", "oldtime", "serde", "std", "time", "wasm-bindgen", "wasmbind", "winapi"] } -clap = { version = "4", features = ["color", "error-context", "help", "std", "string", "suggestions", "usage"] } +clap = { version = "4", features = ["color", "derive", "error-context", "help", "std", "string", "suggestions", "usage"] } crossbeam-utils = { version = "0.8", features = ["once_cell", "std"] } either = { version = "1", features = ["use_std"] } fail = { version = "0.5", default-features = false, features = ["failpoints"] } futures-channel = { version = "0.3", features = ["alloc", "futures-sink", "sink", "std"] } futures-task = { version = "0.3", default-features = false, features = ["alloc", "std"] } -futures-util = { version = "0.3", default-features = false, features = ["alloc", "async-await", "async-await-macro", "channel", "futures-channel", "futures-io", "futures-macro", "futures-sink", "io", "memchr", "sink", "slab", "std"] } +futures-util = { version = "0.3", features = ["alloc", "async-await", "async-await-macro", "channel", "futures-channel", "futures-io", "futures-macro", "futures-sink", "io", "memchr", "sink", "slab", "std"] } hashbrown = { version = "0.12", features = ["ahash", "inline-more", "raw"] } indexmap = { version = "1", default-features = false, features = ["std"] } libc = { version = "0.2", features = ["extra_traits", "std"] } @@ -33,7 +33,8 @@ nom = { version = "7", features = ["alloc", "std"] } num-bigint = { version = "0.4", features = ["std"] } num-integer = { version = "0.1", default-features = false, features = ["i128", "std"] } num-traits = { version = "0.2", features = ["i128", "libm", "std"] } -prost = { version = "0.10", features = ["prost-derive", "std"] } +prost-93f6ce9d446188ac = { package = "prost", version = "0.10", features = ["prost-derive", "std"] } +prost-a6292c17cd707f01 = { package = "prost", version = "0.11", features = ["prost-derive", "std"] } rand = { version = "0.8", features = ["alloc", "getrandom", "libc", "rand_chacha", "rand_hc", "small_rng", "std", "std_rng"] } regex = { version = "1", features = ["aho-corasick", "memchr", "perf", "perf-cache", "perf-dfa", "perf-inline", "perf-literal", "std", "unicode", "unicode-age", "unicode-bool", "unicode-case", "unicode-gencat", "unicode-perl", "unicode-script", "unicode-segment"] } regex-syntax = { version = "0.6", features = ["unicode", "unicode-age", "unicode-bool", "unicode-case", "unicode-gencat", "unicode-perl", "unicode-script", "unicode-segment"] } @@ -58,7 +59,8 @@ libc = { version = "0.2", features = ["extra_traits", "std"] } log = { version = "0.4", default-features = false, features = ["serde", "std"] } memchr = { version = "2", features = ["std"] } nom = { version = "7", features = ["alloc", "std"] } -prost = { version = "0.10", features = ["prost-derive", "std"] } +prost-93f6ce9d446188ac = { package = "prost", version = "0.10", features = ["prost-derive", "std"] } +prost-a6292c17cd707f01 = { package = "prost", version = "0.11", features = ["prost-derive", "std"] } regex = { version = "1", features = ["aho-corasick", "memchr", "perf", "perf-cache", "perf-dfa", "perf-inline", "perf-literal", "std", "unicode", "unicode-age", "unicode-bool", "unicode-case", "unicode-gencat", "unicode-perl", "unicode-script", "unicode-segment"] } regex-syntax = { version = "0.6", features = ["unicode", "unicode-age", "unicode-bool", "unicode-case", "unicode-gencat", "unicode-perl", "unicode-script", "unicode-segment"] } serde = { version = "1", features = ["alloc", "derive", "serde_derive", "std"] }