diff --git a/.circleci/ansible/neon-stress.hosts b/.circleci/ansible/neon-stress.hosts new file mode 100644 index 0000000000..283ec0e8b3 --- /dev/null +++ b/.circleci/ansible/neon-stress.hosts @@ -0,0 +1,19 @@ +[pageservers] +neon-stress-ps-1 console_region_id=1 +neon-stress-ps-2 console_region_id=1 + +[safekeepers] +neon-stress-sk-1 console_region_id=1 +neon-stress-sk-2 console_region_id=1 +neon-stress-sk-3 console_region_id=1 + +[storage:children] +pageservers +safekeepers + +[storage:vars] +console_mgmt_base_url = http://neon-stress-console.local +bucket_name = neon-storage-ireland +bucket_region = eu-west-1 +etcd_endpoints = etcd-stress.local:2379 +safekeeper_enable_s3_offload = false diff --git a/.circleci/ansible/production.hosts b/.circleci/ansible/production.hosts index f32b57154c..2ed8f517f7 100644 --- a/.circleci/ansible/production.hosts +++ b/.circleci/ansible/production.hosts @@ -15,3 +15,4 @@ console_mgmt_base_url = http://console-release.local bucket_name = zenith-storage-oregon bucket_region = us-west-2 etcd_endpoints = etcd-release.local:2379 +safekeeper_enable_s3_offload = true diff --git a/.circleci/ansible/staging.hosts b/.circleci/ansible/staging.hosts index 71166c531e..8e89e843d9 100644 --- a/.circleci/ansible/staging.hosts +++ b/.circleci/ansible/staging.hosts @@ -4,8 +4,8 @@ zenith-us-stage-ps-2 console_region_id=27 [safekeepers] zenith-us-stage-sk-1 console_region_id=27 -zenith-us-stage-sk-2 console_region_id=27 zenith-us-stage-sk-4 console_region_id=27 +zenith-us-stage-sk-5 console_region_id=27 [storage:children] pageservers @@ -16,3 +16,4 @@ console_mgmt_base_url = http://console-staging.local bucket_name = zenith-staging-storage-us-east-1 bucket_region = us-east-1 etcd_endpoints = etcd-staging.local:2379 +safekeeper_enable_s3_offload = false diff --git a/.circleci/ansible/systemd/pageserver.service b/.circleci/ansible/systemd/pageserver.service index d346643e58..54a7b1ba0a 100644 --- a/.circleci/ansible/systemd/pageserver.service +++ b/.circleci/ansible/systemd/pageserver.service @@ -6,7 +6,7 @@ After=network.target auditd.service Type=simple User=pageserver Environment=RUST_BACKTRACE=1 ZENITH_REPO_DIR=/storage/pageserver LD_LIBRARY_PATH=/usr/local/lib -ExecStart=/usr/local/bin/pageserver -c "pg_distrib_dir='/usr/local'" -c "listen_pg_addr='0.0.0.0:6400'" -c "listen_http_addr='0.0.0.0:9898'" -D /storage/pageserver/data +ExecStart=/usr/local/bin/pageserver -c "pg_distrib_dir='/usr/local'" -c "listen_pg_addr='0.0.0.0:6400'" -c "listen_http_addr='0.0.0.0:9898'" -c "broker_endpoints=['{{ etcd_endpoints }}']" -D /storage/pageserver/data ExecReload=/bin/kill -HUP $MAINPID KillMode=mixed KillSignal=SIGINT diff --git a/.circleci/ansible/systemd/safekeeper.service b/.circleci/ansible/systemd/safekeeper.service index cac38d8756..55088db859 100644 --- a/.circleci/ansible/systemd/safekeeper.service +++ b/.circleci/ansible/systemd/safekeeper.service @@ -6,7 +6,7 @@ After=network.target auditd.service Type=simple User=safekeeper Environment=RUST_BACKTRACE=1 ZENITH_REPO_DIR=/storage/safekeeper/data LD_LIBRARY_PATH=/usr/local/lib -ExecStart=/usr/local/bin/safekeeper -l {{ inventory_hostname }}.local:6500 --listen-http {{ inventory_hostname }}.local:7676 -p {{ first_pageserver }}:6400 -D /storage/safekeeper/data --broker-endpoints={{ etcd_endpoints }} +ExecStart=/usr/local/bin/safekeeper -l {{ inventory_hostname }}.local:6500 --listen-http {{ inventory_hostname }}.local:7676 -p {{ first_pageserver }}:6400 -D /storage/safekeeper/data --broker-endpoints={{ etcd_endpoints }} --enable-s3-offload={{ safekeeper_enable_s3_offload }} ExecReload=/bin/kill -HUP $MAINPID KillMode=mixed KillSignal=SIGINT diff --git a/.circleci/config.yml b/.circleci/config.yml index 85654b5d45..41f7693726 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -222,6 +222,12 @@ jobs: key: v2-python-deps-{{ checksum "poetry.lock" }} paths: - /home/circleci/.cache/pypoetry/virtualenvs + - run: + name: Print versions + when: always + command: | + poetry run python --version + poetry show - run: name: Run yapf to ensure code format when: always @@ -355,7 +361,7 @@ jobs: when: always command: | du -sh /tmp/test_output/* - find /tmp/test_output -type f ! -name "pg.log" ! -name "pageserver.log" ! -name "safekeeper.log" ! -name "regression.diffs" ! -name "junit.xml" ! -name "*.filediff" ! -name "*.stdout" ! -name "*.stderr" ! -name "flamegraph.svg" -delete + find /tmp/test_output -type f ! -name "*.log" ! -name "regression.diffs" ! -name "junit.xml" ! -name "*.filediff" ! -name "*.stdout" ! -name "*.stderr" ! -name "flamegraph.svg" ! -name "*.metrics" -delete du -sh /tmp/test_output/* - store_artifacts: path: /tmp/test_output @@ -587,6 +593,56 @@ jobs: helm upgrade neon-proxy neondatabase/neon-proxy --install -f .circleci/helm-values/staging.proxy.yaml --set image.tag=${DOCKER_TAG} --wait helm upgrade neon-proxy-scram neondatabase/neon-proxy --install -f .circleci/helm-values/staging.proxy-scram.yaml --set image.tag=${DOCKER_TAG} --wait + deploy-neon-stress: + docker: + - image: cimg/python:3.10 + steps: + - checkout + - setup_remote_docker + - run: + name: Setup ansible + command: | + pip install --progress-bar off --user ansible boto3 + - run: + name: Redeploy + command: | + cd "$(pwd)/.circleci/ansible" + + ./get_binaries.sh + + echo "${TELEPORT_SSH_KEY}" | tr -d '\n'| base64 --decode >ssh-key + echo "${TELEPORT_SSH_CERT}" | tr -d '\n'| base64 --decode >ssh-key-cert.pub + chmod 0600 ssh-key + ssh-add ssh-key + rm -f ssh-key ssh-key-cert.pub + + ansible-playbook deploy.yaml -i neon-stress.hosts + rm -f neon_install.tar.gz .neon_current_version + + deploy-neon-stress-proxy: + docker: + - image: cimg/base:2021.04 + environment: + KUBECONFIG: .kubeconfig + steps: + - checkout + - run: + name: Store kubeconfig file + command: | + echo "${NEON_STRESS_KUBECONFIG_DATA}" | base64 --decode > ${KUBECONFIG} + chmod 0600 ${KUBECONFIG} + - run: + name: Setup helm v3 + command: | + curl -s https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash + helm repo add neondatabase https://neondatabase.github.io/helm-charts + - run: + name: Re-deploy proxy + command: | + DOCKER_TAG=$(git log --oneline|wc -l) + helm upgrade neon-stress-proxy neondatabase/neon-proxy --install -f .circleci/helm-values/neon-stress.proxy.yaml --set image.tag=${DOCKER_TAG} --wait + helm upgrade neon-stress-proxy-scram neondatabase/neon-proxy --install -f .circleci/helm-values/neon-stress.proxy-scram.yaml --set image.tag=${DOCKER_TAG} --wait + deploy-release: docker: - image: cimg/python:3.10 @@ -634,7 +690,8 @@ jobs: name: Re-deploy proxy command: | DOCKER_TAG="release-$(git log --oneline|wc -l)" - helm upgrade zenith-proxy zenithdb/zenith-proxy --install -f .circleci/helm-values/production.proxy.yaml --set image.tag=${DOCKER_TAG} --wait + helm upgrade neon-proxy neondatabase/neon-proxy --install -f .circleci/helm-values/production.proxy.yaml --set image.tag=${DOCKER_TAG} --wait + helm upgrade neon-proxy-scram neondatabase/neon-proxy --install -f .circleci/helm-values/production.proxy-scram.yaml --set image.tag=${DOCKER_TAG} --wait # Trigger a new remote CI job remote-ci-trigger: @@ -771,6 +828,25 @@ workflows: requires: - docker-image + - deploy-neon-stress: + # Context gives an ability to login + context: Docker Hub + # deploy only for commits to main + filters: + branches: + only: + - main + requires: + - docker-image + - deploy-neon-stress-proxy: + # deploy only for commits to main + filters: + branches: + only: + - main + requires: + - docker-image + - docker-image-release: # Context gives an ability to login context: Docker Hub diff --git a/.circleci/helm-values/neon-stress.proxy-scram.yaml b/.circleci/helm-values/neon-stress.proxy-scram.yaml new file mode 100644 index 0000000000..8f55d31c87 --- /dev/null +++ b/.circleci/helm-values/neon-stress.proxy-scram.yaml @@ -0,0 +1,26 @@ +fullnameOverride: "neon-stress-proxy-scram" + +settings: + authBackend: "console" + authEndpoint: "http://neon-stress-console.local/management/api/v2" + domain: "*.stress.neon.tech" + +podLabels: + zenith_service: proxy-scram + zenith_env: staging + zenith_region: eu-west-1 + zenith_region_slug: ireland + +exposedService: + annotations: + service.beta.kubernetes.io/aws-load-balancer-type: external + service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip + service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing + external-dns.alpha.kubernetes.io/hostname: '*.stress.neon.tech' + +metrics: + enabled: true + serviceMonitor: + enabled: true + selector: + release: kube-prometheus-stack diff --git a/.circleci/helm-values/neon-stress.proxy.yaml b/.circleci/helm-values/neon-stress.proxy.yaml new file mode 100644 index 0000000000..8236f9873a --- /dev/null +++ b/.circleci/helm-values/neon-stress.proxy.yaml @@ -0,0 +1,34 @@ +fullnameOverride: "neon-stress-proxy" + +settings: + authEndpoint: "https://console.dev.neon.tech/authenticate_proxy_request/" + uri: "https://console.dev.neon.tech/psql_session/" + +# -- Additional labels for zenith-proxy pods +podLabels: + zenith_service: proxy + zenith_env: staging + zenith_region: eu-west-1 + zenith_region_slug: ireland + +service: + annotations: + service.beta.kubernetes.io/aws-load-balancer-type: external + service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip + service.beta.kubernetes.io/aws-load-balancer-scheme: internal + external-dns.alpha.kubernetes.io/hostname: neon-stress-proxy.local + type: LoadBalancer + +exposedService: + annotations: + service.beta.kubernetes.io/aws-load-balancer-type: external + service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip + service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing + external-dns.alpha.kubernetes.io/hostname: connect.dev.neon.tech + +metrics: + enabled: true + serviceMonitor: + enabled: true + selector: + release: kube-prometheus-stack diff --git a/.circleci/helm-values/production.proxy-scram.yaml b/.circleci/helm-values/production.proxy-scram.yaml new file mode 100644 index 0000000000..54b0fbcd98 --- /dev/null +++ b/.circleci/helm-values/production.proxy-scram.yaml @@ -0,0 +1,24 @@ +settings: + authBackend: "console" + authEndpoint: "http://console-release.local/management/api/v2" + domain: "*.cloud.neon.tech" + +podLabels: + zenith_service: proxy-scram + zenith_env: production + zenith_region: us-west-2 + zenith_region_slug: oregon + +exposedService: + annotations: + service.beta.kubernetes.io/aws-load-balancer-type: external + service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip + service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing + external-dns.alpha.kubernetes.io/hostname: '*.cloud.neon.tech' + +metrics: + enabled: true + serviceMonitor: + enabled: true + selector: + release: kube-prometheus-stack diff --git a/.circleci/helm-values/production.proxy.yaml b/.circleci/helm-values/production.proxy.yaml index e13968a6a8..87c61c90cf 100644 --- a/.circleci/helm-values/production.proxy.yaml +++ b/.circleci/helm-values/production.proxy.yaml @@ -1,9 +1,3 @@ -# Helm chart values for zenith-proxy. -# This is a YAML-formatted file. - -image: - repository: neondatabase/neon - settings: authEndpoint: "https://console.neon.tech/authenticate_proxy_request/" uri: "https://console.neon.tech/psql_session/" @@ -28,7 +22,7 @@ exposedService: service.beta.kubernetes.io/aws-load-balancer-type: external service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing - external-dns.alpha.kubernetes.io/hostname: start.zenith.tech,connect.neon.tech,pg.neon.tech + external-dns.alpha.kubernetes.io/hostname: connect.neon.tech,pg.neon.tech metrics: enabled: true diff --git a/.github/workflows/testing.yml b/.github/workflows/testing.yml index 6d109b9bb5..79b2ba05d0 100644 --- a/.github/workflows/testing.yml +++ b/.github/workflows/testing.yml @@ -1,6 +1,8 @@ name: Build and Test -on: push +on: + pull_request: + push: jobs: regression-check: diff --git a/COPYRIGHT b/COPYRIGHT deleted file mode 100644 index 448363b12f..0000000000 --- a/COPYRIGHT +++ /dev/null @@ -1,20 +0,0 @@ -This software is licensed under the Apache 2.0 License: - ----------------------------------------------------------------------------- -Copyright 2021 Zenith Labs, Inc - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. ----------------------------------------------------------------------------- - -The PostgreSQL submodule in vendor/postgres is licensed under the -PostgreSQL license. See vendor/postgres/COPYRIGHT. diff --git a/Cargo.lock b/Cargo.lock index 148517a777..6acad6dac8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -166,7 +166,7 @@ dependencies = [ "cc", "cfg-if", "libc", - "miniz_oxide", + "miniz_oxide 0.4.4", "object", "rustc-demangle", ] @@ -868,6 +868,18 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "279fb028e20b3c4c320317955b77c5e0c9701f05a1d309905d6fc702cdc5053e" +[[package]] +name = "flate2" +version = "1.0.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b39522e96686d38f4bc984b9198e3a0613264abaebaff2c5c918bfa6b6da09af" +dependencies = [ + "cfg-if", + "crc32fast", + "libc", + "miniz_oxide 0.5.1", +] + [[package]] name = "fnv" version = "1.0.7" @@ -1527,6 +1539,15 @@ dependencies = [ "autocfg", ] +[[package]] +name = "miniz_oxide" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d2b29bd4bc3f33391105ebee3589c19197c4271e3e5a9ec9bfe8127eeff8f082" +dependencies = [ + "adler", +] + [[package]] name = "mio" version = "0.8.2" @@ -1582,6 +1603,7 @@ dependencies = [ "clap 3.0.14", "comfy-table", "control_plane", + "git-version", "pageserver", "postgres", "postgres_ffi", @@ -1771,8 +1793,10 @@ dependencies = [ "crc32c", "crossbeam-utils", "daemonize", + "etcd_broker", "fail", "futures", + "git-version", "hex", "hex-literal", "humantime", @@ -2023,15 +2047,18 @@ dependencies = [ "bytes", "chrono", "crc32c", + "env_logger", "hex", "lazy_static", "log", "memoffset", + "postgres", "rand", "regex", "serde", "thiserror", "utils", + "wal_generate", "workspace_hack", ] @@ -2085,6 +2112,20 @@ dependencies = [ "unicode-xid", ] +[[package]] +name = "procfs" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95e344cafeaeefe487300c361654bcfc85db3ac53619eeccced29f5ea18c4c70" +dependencies = [ + "bitflags", + "byteorder", + "flate2", + "hex", + "lazy_static", + "libc", +] + [[package]] name = "prometheus" version = "0.13.0" @@ -2094,8 +2135,10 @@ dependencies = [ "cfg-if", "fnv", "lazy_static", + "libc", "memchr", "parking_lot 0.11.2", + "procfs", "thiserror", ] @@ -2164,6 +2207,7 @@ dependencies = [ "bytes", "clap 3.0.14", "futures", + "git-version", "hashbrown", "hex", "hmac 0.12.1", @@ -2616,6 +2660,7 @@ dependencies = [ "daemonize", "etcd_broker", "fs2", + "git-version", "hex", "humantime", "hyper", @@ -3585,6 +3630,18 @@ version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" +[[package]] +name = "wal_generate" +version = "0.1.0" +dependencies = [ + "anyhow", + "clap 3.0.14", + "env_logger", + "log", + "postgres", + "tempfile", +] + [[package]] name = "walkdir" version = "2.3.2" diff --git a/Dockerfile.compute-tools b/Dockerfile.compute-tools index bbe0f517ce..f0c9b9d56a 100644 --- a/Dockerfile.compute-tools +++ b/Dockerfile.compute-tools @@ -15,4 +15,4 @@ RUN set -e \ # Final image that only has one binary FROM debian:buster-slim -COPY --from=rust-build /home/circleci/project/target/release/zenith_ctl /usr/local/bin/zenith_ctl +COPY --from=rust-build /home/circleci/project/target/release/compute_ctl /usr/local/bin/compute_ctl diff --git a/Makefile b/Makefile index d2a79661f2..fdfc64f6fa 100644 --- a/Makefile +++ b/Makefile @@ -12,15 +12,21 @@ endif # BUILD_TYPE ?= debug ifeq ($(BUILD_TYPE),release) - PG_CONFIGURE_OPTS = --enable-debug + PG_CONFIGURE_OPTS = --enable-debug --with-openssl PG_CFLAGS = -O2 -g3 $(CFLAGS) # Unfortunately, `--profile=...` is a nightly feature CARGO_BUILD_FLAGS += --release else ifeq ($(BUILD_TYPE),debug) - PG_CONFIGURE_OPTS = --enable-debug --enable-cassert --enable-depend + PG_CONFIGURE_OPTS = --enable-debug --with-openssl --enable-cassert --enable-depend PG_CFLAGS = -O0 -g3 $(CFLAGS) else -$(error Bad build type `$(BUILD_TYPE)', see Makefile for options) + $(error Bad build type '$(BUILD_TYPE)', see Makefile for options) +endif + +# macOS with brew-installed openssl requires explicit paths +UNAME_S := $(shell uname -s) +ifeq ($(UNAME_S),Darwin) + PG_CONFIGURE_OPTS += --with-includes=/usr/local/opt/openssl/include --with-libraries=/usr/local/opt/openssl/lib endif # Choose whether we should be silent or verbose diff --git a/NOTICE b/NOTICE new file mode 100644 index 0000000000..47cc4e798f --- /dev/null +++ b/NOTICE @@ -0,0 +1,5 @@ +Neon +Copyright 2022 Neon Inc. + +The PostgreSQL submodule in vendor/postgres is licensed under the +PostgreSQL license. See vendor/postgres/COPYRIGHT. diff --git a/README.md b/README.md index af384d2672..8e8bf1a9b2 100644 --- a/README.md +++ b/README.md @@ -23,29 +23,70 @@ Pageserver consists of: ## Running local installation + +#### building on Ubuntu/ Debian (Linux) 1. Install build dependencies and other useful packages On Ubuntu or Debian this set of packages should be sufficient to build the code: ```text apt install build-essential libtool libreadline-dev zlib1g-dev flex bison libseccomp-dev \ -libssl-dev clang pkg-config libpq-dev +libssl-dev clang pkg-config libpq-dev libprotobuf-dev etcd ``` -[Rust] 1.58 or later is also required. +2. [Install Rust](https://www.rust-lang.org/tools/install) +``` +# recommended approach from https://www.rust-lang.org/tools/install +curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh +``` -To run the `psql` client, install the `postgresql-client` package or modify `PATH` and `LD_LIBRARY_PATH` to include `tmp_install/bin` and `tmp_install/lib`, respectively. +3. Install PostgreSQL Client +``` +apt install postgresql-client +``` -To run the integration tests or Python scripts (not required to use the code), install -Python (3.7 or higher), and install python3 packages using `./scripts/pysync` (requires poetry) in the project directory. - -2. Build neon and patched postgres +4. Build neon and patched postgres ```sh git clone --recursive https://github.com/neondatabase/neon.git cd neon make -j5 ``` -3. Start pageserver and postgres on top of it (should be called from repo root): +#### building on OSX (12.3.1) +1. Install XCode and dependencies +``` +xcode-select --install +brew install protobuf etcd +``` + +2. [Install Rust](https://www.rust-lang.org/tools/install) +``` +# recommended approach from https://www.rust-lang.org/tools/install +curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh +``` + +3. Install PostgreSQL Client +``` +# from https://stackoverflow.com/questions/44654216/correct-way-to-install-psql-without-full-postgres-on-macos +brew install libpq +brew link --force libpq +``` + +4. Build neon and patched postgres +```sh +git clone --recursive https://github.com/neondatabase/neon.git +cd neon +make -j5 +``` + +#### dependency installation notes +To run the `psql` client, install the `postgresql-client` package or modify `PATH` and `LD_LIBRARY_PATH` to include `tmp_install/bin` and `tmp_install/lib`, respectively. + +To run the integration tests or Python scripts (not required to use the code), install +Python (3.9 or higher), and install python3 packages using `./scripts/pysync` (requires poetry) in the project directory. + + +#### running neon database +1. Start pageserver and postgres on top of it (should be called from repo root): ```sh # Create repository in .zenith with proper paths to binaries and data # Later that would be responsibility of a package install script @@ -75,7 +116,7 @@ Starting postgres node at 'host=127.0.0.1 port=55432 user=zenith_admin dbname=po main 127.0.0.1:55432 de200bd42b49cc1814412c7e592dd6e9 main 0/16B5BA8 running ``` -4. Now it is possible to connect to postgres and run some queries: +2. Now it is possible to connect to postgres and run some queries: ```text > psql -p55432 -h 127.0.0.1 -U zenith_admin postgres postgres=# CREATE TABLE t(key int primary key, value text); @@ -89,7 +130,7 @@ postgres=# select * from t; (1 row) ``` -5. And create branches and run postgres on them: +3. And create branches and run postgres on them: ```sh # create branch named migration_check > ./target/debug/neon_local timeline branch --branch-name migration_check @@ -133,7 +174,7 @@ postgres=# select * from t; (1 row) ``` -6. If you want to run tests afterwards (see below), you have to stop all the running the pageserver, safekeeper and postgres instances +4. If you want to run tests afterwards (see below), you have to stop all the running the pageserver, safekeeper and postgres instances you have just started. You can stop them all with one command: ```sh > ./target/debug/neon_local stop diff --git a/compute_tools/README.md b/compute_tools/README.md index ccae3d2842..15876ed246 100644 --- a/compute_tools/README.md +++ b/compute_tools/README.md @@ -1,9 +1,9 @@ # Compute node tools -Postgres wrapper (`zenith_ctl`) is intended to be run as a Docker entrypoint or as a `systemd` -`ExecStart` option. It will handle all the `zenith` specifics during compute node +Postgres wrapper (`compute_ctl`) is intended to be run as a Docker entrypoint or as a `systemd` +`ExecStart` option. It will handle all the `Neon` specifics during compute node initialization: -- `zenith_ctl` accepts cluster (compute node) specification as a JSON file. +- `compute_ctl` accepts cluster (compute node) specification as a JSON file. - Every start is a fresh start, so the data directory is removed and initialized again on each run. - Next it will put configuration files into the `PGDATA` directory. @@ -13,18 +13,18 @@ initialization: - Check and alter/drop/create roles and databases. - Hang waiting on the `postmaster` process to exit. -Also `zenith_ctl` spawns two separate service threads: +Also `compute_ctl` spawns two separate service threads: - `compute-monitor` checks the last Postgres activity timestamp and saves it - into the shared `ComputeState`; + into the shared `ComputeNode`; - `http-endpoint` runs a Hyper HTTP API server, which serves readiness and the last activity requests. Usage example: ```sh -zenith_ctl -D /var/db/postgres/compute \ - -C 'postgresql://zenith_admin@localhost/postgres' \ - -S /var/db/postgres/specs/current.json \ - -b /usr/local/bin/postgres +compute_ctl -D /var/db/postgres/compute \ + -C 'postgresql://zenith_admin@localhost/postgres' \ + -S /var/db/postgres/specs/current.json \ + -b /usr/local/bin/postgres ``` ## Tests diff --git a/compute_tools/src/bin/compute_ctl.rs b/compute_tools/src/bin/compute_ctl.rs new file mode 100644 index 0000000000..5c951b7779 --- /dev/null +++ b/compute_tools/src/bin/compute_ctl.rs @@ -0,0 +1,174 @@ +//! +//! Postgres wrapper (`compute_ctl`) is intended to be run as a Docker entrypoint or as a `systemd` +//! `ExecStart` option. It will handle all the `Neon` specifics during compute node +//! initialization: +//! - `compute_ctl` accepts cluster (compute node) specification as a JSON file. +//! - Every start is a fresh start, so the data directory is removed and +//! initialized again on each run. +//! - Next it will put configuration files into the `PGDATA` directory. +//! - Sync safekeepers and get commit LSN. +//! - Get `basebackup` from pageserver using the returned on the previous step LSN. +//! - Try to start `postgres` and wait until it is ready to accept connections. +//! - Check and alter/drop/create roles and databases. +//! - Hang waiting on the `postmaster` process to exit. +//! +//! Also `compute_ctl` spawns two separate service threads: +//! - `compute-monitor` checks the last Postgres activity timestamp and saves it +//! into the shared `ComputeNode`; +//! - `http-endpoint` runs a Hyper HTTP API server, which serves readiness and the +//! last activity requests. +//! +//! Usage example: +//! ```sh +//! compute_ctl -D /var/db/postgres/compute \ +//! -C 'postgresql://zenith_admin@localhost/postgres' \ +//! -S /var/db/postgres/specs/current.json \ +//! -b /usr/local/bin/postgres +//! ``` +//! +use std::fs::File; +use std::panic; +use std::path::Path; +use std::process::exit; +use std::sync::{Arc, RwLock}; +use std::{thread, time::Duration}; + +use anyhow::Result; +use chrono::Utc; +use clap::Arg; +use log::{error, info}; + +use compute_tools::compute::{ComputeMetrics, ComputeNode, ComputeState, ComputeStatus}; +use compute_tools::http::api::launch_http_server; +use compute_tools::logger::*; +use compute_tools::monitor::launch_monitor; +use compute_tools::params::*; +use compute_tools::pg_helpers::*; +use compute_tools::spec::*; + +fn main() -> Result<()> { + // TODO: re-use `utils::logging` later + init_logger(DEFAULT_LOG_LEVEL)?; + + // Env variable is set by `cargo` + let version: Option<&str> = option_env!("CARGO_PKG_VERSION"); + let matches = clap::App::new("compute_ctl") + .version(version.unwrap_or("unknown")) + .arg( + Arg::new("connstr") + .short('C') + .long("connstr") + .value_name("DATABASE_URL") + .required(true), + ) + .arg( + Arg::new("pgdata") + .short('D') + .long("pgdata") + .value_name("DATADIR") + .required(true), + ) + .arg( + Arg::new("pgbin") + .short('b') + .long("pgbin") + .value_name("POSTGRES_PATH"), + ) + .arg( + Arg::new("spec") + .short('s') + .long("spec") + .value_name("SPEC_JSON"), + ) + .arg( + Arg::new("spec-path") + .short('S') + .long("spec-path") + .value_name("SPEC_PATH"), + ) + .get_matches(); + + let pgdata = matches.value_of("pgdata").expect("PGDATA path is required"); + let connstr = matches + .value_of("connstr") + .expect("Postgres connection string is required"); + let spec = matches.value_of("spec"); + let spec_path = matches.value_of("spec-path"); + + // Try to use just 'postgres' if no path is provided + let pgbin = matches.value_of("pgbin").unwrap_or("postgres"); + + let spec: ComputeSpec = match spec { + // First, try to get cluster spec from the cli argument + Some(json) => serde_json::from_str(json)?, + None => { + // Second, try to read it from the file if path is provided + if let Some(sp) = spec_path { + let path = Path::new(sp); + let file = File::open(path)?; + serde_json::from_reader(file)? + } else { + panic!("cluster spec should be provided via --spec or --spec-path argument"); + } + } + }; + + let pageserver_connstr = spec + .cluster + .settings + .find("zenith.page_server_connstring") + .expect("pageserver connstr should be provided"); + let tenant = spec + .cluster + .settings + .find("zenith.zenith_tenant") + .expect("tenant id should be provided"); + let timeline = spec + .cluster + .settings + .find("zenith.zenith_timeline") + .expect("tenant id should be provided"); + + let compute_state = ComputeNode { + start_time: Utc::now(), + connstr: connstr.to_string(), + pgdata: pgdata.to_string(), + pgbin: pgbin.to_string(), + spec, + tenant, + timeline, + pageserver_connstr, + metrics: ComputeMetrics::new(), + state: RwLock::new(ComputeState::new()), + }; + let compute = Arc::new(compute_state); + + // Launch service threads first, so we were able to serve availability + // requests, while configuration is still in progress. + let _http_handle = launch_http_server(&compute).expect("cannot launch http endpoint thread"); + let _monitor_handle = launch_monitor(&compute).expect("cannot launch compute monitor thread"); + + // Run compute (Postgres) and hang waiting on it. + match compute.prepare_and_run() { + Ok(ec) => { + let code = ec.code().unwrap_or(1); + info!("Postgres exited with code {}, shutting down", code); + exit(code) + } + Err(error) => { + error!("could not start the compute node: {}", error); + + let mut state = compute.state.write().unwrap(); + state.error = Some(format!("{:?}", error)); + state.status = ComputeStatus::Failed; + drop(state); + + // Keep serving HTTP requests, so the cloud control plane was able to + // get the actual error. + info!("giving control plane 30s to collect the error before shutdown"); + thread::sleep(Duration::from_secs(30)); + info!("shutting down"); + Err(error) + } + } +} diff --git a/compute_tools/src/bin/zenith_ctl.rs b/compute_tools/src/bin/zenith_ctl.rs deleted file mode 100644 index 3685f8e8b4..0000000000 --- a/compute_tools/src/bin/zenith_ctl.rs +++ /dev/null @@ -1,252 +0,0 @@ -//! -//! Postgres wrapper (`zenith_ctl`) is intended to be run as a Docker entrypoint or as a `systemd` -//! `ExecStart` option. It will handle all the `zenith` specifics during compute node -//! initialization: -//! - `zenith_ctl` accepts cluster (compute node) specification as a JSON file. -//! - Every start is a fresh start, so the data directory is removed and -//! initialized again on each run. -//! - Next it will put configuration files into the `PGDATA` directory. -//! - Sync safekeepers and get commit LSN. -//! - Get `basebackup` from pageserver using the returned on the previous step LSN. -//! - Try to start `postgres` and wait until it is ready to accept connections. -//! - Check and alter/drop/create roles and databases. -//! - Hang waiting on the `postmaster` process to exit. -//! -//! Also `zenith_ctl` spawns two separate service threads: -//! - `compute-monitor` checks the last Postgres activity timestamp and saves it -//! into the shared `ComputeState`; -//! - `http-endpoint` runs a Hyper HTTP API server, which serves readiness and the -//! last activity requests. -//! -//! Usage example: -//! ```sh -//! zenith_ctl -D /var/db/postgres/compute \ -//! -C 'postgresql://zenith_admin@localhost/postgres' \ -//! -S /var/db/postgres/specs/current.json \ -//! -b /usr/local/bin/postgres -//! ``` -//! -use std::fs::File; -use std::panic; -use std::path::Path; -use std::process::{exit, Command, ExitStatus}; -use std::sync::{Arc, RwLock}; - -use anyhow::{Context, Result}; -use chrono::Utc; -use clap::Arg; -use log::info; -use postgres::{Client, NoTls}; - -use compute_tools::checker::create_writablity_check_data; -use compute_tools::config; -use compute_tools::http_api::launch_http_server; -use compute_tools::logger::*; -use compute_tools::monitor::launch_monitor; -use compute_tools::params::*; -use compute_tools::pg_helpers::*; -use compute_tools::spec::*; -use compute_tools::zenith::*; - -/// Do all the preparations like PGDATA directory creation, configuration, -/// safekeepers sync, basebackup, etc. -fn prepare_pgdata(state: &Arc>) -> Result<()> { - let state = state.read().unwrap(); - let spec = &state.spec; - let pgdata_path = Path::new(&state.pgdata); - let pageserver_connstr = spec - .cluster - .settings - .find("zenith.page_server_connstring") - .expect("pageserver connstr should be provided"); - let tenant = spec - .cluster - .settings - .find("zenith.zenith_tenant") - .expect("tenant id should be provided"); - let timeline = spec - .cluster - .settings - .find("zenith.zenith_timeline") - .expect("tenant id should be provided"); - - info!( - "starting cluster #{}, operation #{}", - spec.cluster.cluster_id, - spec.operation_uuid.as_ref().unwrap() - ); - - // Remove/create an empty pgdata directory and put configuration there. - create_pgdata(&state.pgdata)?; - config::write_postgres_conf(&pgdata_path.join("postgresql.conf"), spec)?; - - info!("starting safekeepers syncing"); - let lsn = sync_safekeepers(&state.pgdata, &state.pgbin) - .with_context(|| "failed to sync safekeepers")?; - info!("safekeepers synced at LSN {}", lsn); - - info!( - "getting basebackup@{} from pageserver {}", - lsn, pageserver_connstr - ); - get_basebackup(&state.pgdata, &pageserver_connstr, &tenant, &timeline, &lsn).with_context( - || { - format!( - "failed to get basebackup@{} from pageserver {}", - lsn, pageserver_connstr - ) - }, - )?; - - // Update pg_hba.conf received with basebackup. - update_pg_hba(pgdata_path)?; - - Ok(()) -} - -/// Start Postgres as a child process and manage DBs/roles. -/// After that this will hang waiting on the postmaster process to exit. -fn run_compute(state: &Arc>) -> Result { - let read_state = state.read().unwrap(); - let pgdata_path = Path::new(&read_state.pgdata); - - // Run postgres as a child process. - let mut pg = Command::new(&read_state.pgbin) - .args(&["-D", &read_state.pgdata]) - .spawn() - .expect("cannot start postgres process"); - - // Try default Postgres port if it is not provided - let port = read_state - .spec - .cluster - .settings - .find("port") - .unwrap_or_else(|| "5432".to_string()); - wait_for_postgres(&port, pgdata_path)?; - - let mut client = Client::connect(&read_state.connstr, NoTls)?; - - handle_roles(&read_state.spec, &mut client)?; - handle_databases(&read_state.spec, &mut client)?; - handle_grants(&read_state.spec, &mut client)?; - create_writablity_check_data(&mut client)?; - - // 'Close' connection - drop(client); - - info!( - "finished configuration of cluster #{}", - read_state.spec.cluster.cluster_id - ); - - // Release the read lock. - drop(read_state); - - // Get the write lock, update state and release the lock, so HTTP API - // was able to serve requests, while we are blocked waiting on - // Postgres. - let mut state = state.write().unwrap(); - state.ready = true; - drop(state); - - // Wait for child postgres process basically forever. In this state Ctrl+C - // will be propagated to postgres and it will be shut down as well. - let ecode = pg.wait().expect("failed to wait on postgres"); - - Ok(ecode) -} - -fn main() -> Result<()> { - // TODO: re-use `utils::logging` later - init_logger(DEFAULT_LOG_LEVEL)?; - - // Env variable is set by `cargo` - let version: Option<&str> = option_env!("CARGO_PKG_VERSION"); - let matches = clap::App::new("zenith_ctl") - .version(version.unwrap_or("unknown")) - .arg( - Arg::new("connstr") - .short('C') - .long("connstr") - .value_name("DATABASE_URL") - .required(true), - ) - .arg( - Arg::new("pgdata") - .short('D') - .long("pgdata") - .value_name("DATADIR") - .required(true), - ) - .arg( - Arg::new("pgbin") - .short('b') - .long("pgbin") - .value_name("POSTGRES_PATH"), - ) - .arg( - Arg::new("spec") - .short('s') - .long("spec") - .value_name("SPEC_JSON"), - ) - .arg( - Arg::new("spec-path") - .short('S') - .long("spec-path") - .value_name("SPEC_PATH"), - ) - .get_matches(); - - let pgdata = matches.value_of("pgdata").expect("PGDATA path is required"); - let connstr = matches - .value_of("connstr") - .expect("Postgres connection string is required"); - let spec = matches.value_of("spec"); - let spec_path = matches.value_of("spec-path"); - - // Try to use just 'postgres' if no path is provided - let pgbin = matches.value_of("pgbin").unwrap_or("postgres"); - - let spec: ClusterSpec = match spec { - // First, try to get cluster spec from the cli argument - Some(json) => serde_json::from_str(json)?, - None => { - // Second, try to read it from the file if path is provided - if let Some(sp) = spec_path { - let path = Path::new(sp); - let file = File::open(path)?; - serde_json::from_reader(file)? - } else { - panic!("cluster spec should be provided via --spec or --spec-path argument"); - } - } - }; - - let compute_state = ComputeState { - connstr: connstr.to_string(), - pgdata: pgdata.to_string(), - pgbin: pgbin.to_string(), - spec, - ready: false, - last_active: Utc::now(), - }; - let compute_state = Arc::new(RwLock::new(compute_state)); - - // Launch service threads first, so we were able to serve availability - // requests, while configuration is still in progress. - let mut _threads = vec![ - launch_http_server(&compute_state).expect("cannot launch compute monitor thread"), - launch_monitor(&compute_state).expect("cannot launch http endpoint thread"), - ]; - - prepare_pgdata(&compute_state)?; - - // Run compute (Postgres) and hang waiting on it. Panic if any error happens, - // it will help us to trigger unwind and kill postmaster as well. - match run_compute(&compute_state) { - Ok(ec) => exit(ec.success() as i32), - Err(error) => panic!("cannot start compute node, error: {}", error), - } -} diff --git a/compute_tools/src/checker.rs b/compute_tools/src/checker.rs index 63da6ea23e..dbb70a74cf 100644 --- a/compute_tools/src/checker.rs +++ b/compute_tools/src/checker.rs @@ -1,11 +1,11 @@ -use std::sync::{Arc, RwLock}; +use std::sync::Arc; use anyhow::{anyhow, Result}; use log::error; use postgres::Client; use tokio_postgres::NoTls; -use crate::zenith::ComputeState; +use crate::compute::ComputeNode; pub fn create_writablity_check_data(client: &mut Client) -> Result<()> { let query = " @@ -23,9 +23,9 @@ pub fn create_writablity_check_data(client: &mut Client) -> Result<()> { Ok(()) } -pub async fn check_writability(state: &Arc>) -> Result<()> { - let connstr = state.read().unwrap().connstr.clone(); - let (client, connection) = tokio_postgres::connect(&connstr, NoTls).await?; +pub async fn check_writability(compute: &Arc) -> Result<()> { + let connstr = &compute.connstr; + let (client, connection) = tokio_postgres::connect(connstr, NoTls).await?; if client.is_closed() { return Err(anyhow!("connection to postgres closed")); } diff --git a/compute_tools/src/compute.rs b/compute_tools/src/compute.rs new file mode 100644 index 0000000000..a8422fb2b2 --- /dev/null +++ b/compute_tools/src/compute.rs @@ -0,0 +1,315 @@ +// +// XXX: This starts to be scarry similar to the `PostgresNode` from `control_plane`, +// but there are several things that makes `PostgresNode` usage inconvenient in the +// cloud: +// - it inherits from `LocalEnv`, which contains **all-all** the information about +// a complete service running +// - it uses `PageServerNode` with information about http endpoint, which we do not +// need in the cloud again +// - many tiny pieces like, for example, we do not use `pg_ctl` in the cloud +// +// Thus, to use `PostgresNode` in the cloud, we need to 'mock' a bunch of required +// attributes (not required for the cloud). Yet, it is still tempting to unify these +// `PostgresNode` and `ComputeNode` and use one in both places. +// +// TODO: stabilize `ComputeNode` and think about using it in the `control_plane`. +// +use std::fs; +use std::os::unix::fs::PermissionsExt; +use std::path::Path; +use std::process::{Command, ExitStatus, Stdio}; +use std::sync::atomic::{AtomicU64, Ordering}; +use std::sync::RwLock; + +use anyhow::{Context, Result}; +use chrono::{DateTime, Utc}; +use log::info; +use postgres::{Client, NoTls}; +use serde::{Serialize, Serializer}; + +use crate::checker::create_writablity_check_data; +use crate::config; +use crate::pg_helpers::*; +use crate::spec::*; + +/// Compute node info shared across several `compute_ctl` threads. +pub struct ComputeNode { + pub start_time: DateTime, + pub connstr: String, + pub pgdata: String, + pub pgbin: String, + pub spec: ComputeSpec, + pub tenant: String, + pub timeline: String, + pub pageserver_connstr: String, + pub metrics: ComputeMetrics, + /// Volatile part of the `ComputeNode` so should be used under `RwLock` + /// to allow HTTP API server to serve status requests, while configuration + /// is in progress. + pub state: RwLock, +} + +fn rfc3339_serialize(x: &DateTime, s: S) -> Result +where + S: Serializer, +{ + x.to_rfc3339().serialize(s) +} + +#[derive(Serialize)] +#[serde(rename_all = "snake_case")] +pub struct ComputeState { + pub status: ComputeStatus, + /// Timestamp of the last Postgres activity + #[serde(serialize_with = "rfc3339_serialize")] + pub last_active: DateTime, + pub error: Option, +} + +impl ComputeState { + pub fn new() -> Self { + Self { + status: ComputeStatus::Init, + last_active: Utc::now(), + error: None, + } + } +} + +impl Default for ComputeState { + fn default() -> Self { + Self::new() + } +} + +#[derive(Serialize, Clone, Copy, PartialEq, Eq)] +#[serde(rename_all = "snake_case")] +pub enum ComputeStatus { + Init, + Running, + Failed, +} + +#[derive(Serialize)] +pub struct ComputeMetrics { + pub sync_safekeepers_ms: AtomicU64, + pub basebackup_ms: AtomicU64, + pub config_ms: AtomicU64, + pub total_startup_ms: AtomicU64, +} + +impl ComputeMetrics { + pub fn new() -> Self { + Self { + sync_safekeepers_ms: AtomicU64::new(0), + basebackup_ms: AtomicU64::new(0), + config_ms: AtomicU64::new(0), + total_startup_ms: AtomicU64::new(0), + } + } +} + +impl Default for ComputeMetrics { + fn default() -> Self { + Self::new() + } +} + +impl ComputeNode { + pub fn set_status(&self, status: ComputeStatus) { + self.state.write().unwrap().status = status; + } + + pub fn get_status(&self) -> ComputeStatus { + self.state.read().unwrap().status + } + + // Remove `pgdata` directory and create it again with right permissions. + fn create_pgdata(&self) -> Result<()> { + // Ignore removal error, likely it is a 'No such file or directory (os error 2)'. + // If it is something different then create_dir() will error out anyway. + let _ok = fs::remove_dir_all(&self.pgdata); + fs::create_dir(&self.pgdata)?; + fs::set_permissions(&self.pgdata, fs::Permissions::from_mode(0o700))?; + + Ok(()) + } + + // Get basebackup from the libpq connection to pageserver using `connstr` and + // unarchive it to `pgdata` directory overriding all its previous content. + fn get_basebackup(&self, lsn: &str) -> Result<()> { + let start_time = Utc::now(); + + let mut client = Client::connect(&self.pageserver_connstr, NoTls)?; + let basebackup_cmd = match lsn { + "0/0" => format!("basebackup {} {}", &self.tenant, &self.timeline), // First start of the compute + _ => format!("basebackup {} {} {}", &self.tenant, &self.timeline, lsn), + }; + let copyreader = client.copy_out(basebackup_cmd.as_str())?; + let mut ar = tar::Archive::new(copyreader); + + ar.unpack(&self.pgdata)?; + + self.metrics.basebackup_ms.store( + Utc::now() + .signed_duration_since(start_time) + .to_std() + .unwrap() + .as_millis() as u64, + Ordering::Relaxed, + ); + + Ok(()) + } + + // Run `postgres` in a special mode with `--sync-safekeepers` argument + // and return the reported LSN back to the caller. + fn sync_safekeepers(&self) -> Result { + let start_time = Utc::now(); + + let sync_handle = Command::new(&self.pgbin) + .args(&["--sync-safekeepers"]) + .env("PGDATA", &self.pgdata) // we cannot use -D in this mode + .stdout(Stdio::piped()) + .spawn() + .expect("postgres --sync-safekeepers failed to start"); + + // `postgres --sync-safekeepers` will print all log output to stderr and + // final LSN to stdout. So we pipe only stdout, while stderr will be automatically + // redirected to the caller output. + let sync_output = sync_handle + .wait_with_output() + .expect("postgres --sync-safekeepers failed"); + if !sync_output.status.success() { + anyhow::bail!( + "postgres --sync-safekeepers exited with non-zero status: {}", + sync_output.status, + ); + } + + self.metrics.sync_safekeepers_ms.store( + Utc::now() + .signed_duration_since(start_time) + .to_std() + .unwrap() + .as_millis() as u64, + Ordering::Relaxed, + ); + + let lsn = String::from(String::from_utf8(sync_output.stdout)?.trim()); + + Ok(lsn) + } + + /// Do all the preparations like PGDATA directory creation, configuration, + /// safekeepers sync, basebackup, etc. + pub fn prepare_pgdata(&self) -> Result<()> { + let spec = &self.spec; + let pgdata_path = Path::new(&self.pgdata); + + // Remove/create an empty pgdata directory and put configuration there. + self.create_pgdata()?; + config::write_postgres_conf(&pgdata_path.join("postgresql.conf"), spec)?; + + info!("starting safekeepers syncing"); + let lsn = self + .sync_safekeepers() + .with_context(|| "failed to sync safekeepers")?; + info!("safekeepers synced at LSN {}", lsn); + + info!( + "getting basebackup@{} from pageserver {}", + lsn, &self.pageserver_connstr + ); + self.get_basebackup(&lsn).with_context(|| { + format!( + "failed to get basebackup@{} from pageserver {}", + lsn, &self.pageserver_connstr + ) + })?; + + // Update pg_hba.conf received with basebackup. + update_pg_hba(pgdata_path)?; + + Ok(()) + } + + /// Start Postgres as a child process and manage DBs/roles. + /// After that this will hang waiting on the postmaster process to exit. + pub fn run(&self) -> Result { + let start_time = Utc::now(); + + let pgdata_path = Path::new(&self.pgdata); + + // Run postgres as a child process. + let mut pg = Command::new(&self.pgbin) + .args(&["-D", &self.pgdata]) + .spawn() + .expect("cannot start postgres process"); + + // Try default Postgres port if it is not provided + let port = self + .spec + .cluster + .settings + .find("port") + .unwrap_or_else(|| "5432".to_string()); + wait_for_postgres(&mut pg, &port, pgdata_path)?; + + let mut client = Client::connect(&self.connstr, NoTls)?; + + handle_roles(&self.spec, &mut client)?; + handle_databases(&self.spec, &mut client)?; + handle_grants(&self.spec, &mut client)?; + create_writablity_check_data(&mut client)?; + + // 'Close' connection + drop(client); + let startup_end_time = Utc::now(); + + self.metrics.config_ms.store( + startup_end_time + .signed_duration_since(start_time) + .to_std() + .unwrap() + .as_millis() as u64, + Ordering::Relaxed, + ); + self.metrics.total_startup_ms.store( + startup_end_time + .signed_duration_since(self.start_time) + .to_std() + .unwrap() + .as_millis() as u64, + Ordering::Relaxed, + ); + + self.set_status(ComputeStatus::Running); + + info!( + "finished configuration of compute for project {}", + self.spec.cluster.cluster_id + ); + + // Wait for child Postgres process basically forever. In this state Ctrl+C + // will propagate to Postgres and it will be shut down as well. + let ecode = pg + .wait() + .expect("failed to start waiting on Postgres process"); + + Ok(ecode) + } + + pub fn prepare_and_run(&self) -> Result { + info!( + "starting compute for project {}, operation {}, tenant {}, timeline {}", + self.spec.cluster.cluster_id, + self.spec.operation_uuid.as_ref().unwrap(), + self.tenant, + self.timeline, + ); + + self.prepare_pgdata()?; + self.run() + } +} diff --git a/compute_tools/src/config.rs b/compute_tools/src/config.rs index 22134db0f8..6cbd0e3d4c 100644 --- a/compute_tools/src/config.rs +++ b/compute_tools/src/config.rs @@ -6,7 +6,7 @@ use std::path::Path; use anyhow::Result; use crate::pg_helpers::PgOptionsSerialize; -use crate::zenith::ClusterSpec; +use crate::spec::ComputeSpec; /// Check that `line` is inside a text file and put it there if it is not. /// Create file if it doesn't exist. @@ -32,20 +32,20 @@ pub fn line_in_file(path: &Path, line: &str) -> Result { } /// Create or completely rewrite configuration file specified by `path` -pub fn write_postgres_conf(path: &Path, spec: &ClusterSpec) -> Result<()> { +pub fn write_postgres_conf(path: &Path, spec: &ComputeSpec) -> Result<()> { // File::create() destroys the file content if it exists. let mut postgres_conf = File::create(path)?; - write_zenith_managed_block(&mut postgres_conf, &spec.cluster.settings.as_pg_settings())?; + write_auto_managed_block(&mut postgres_conf, &spec.cluster.settings.as_pg_settings())?; Ok(()) } // Write Postgres config block wrapped with generated comment section -fn write_zenith_managed_block(file: &mut File, buf: &str) -> Result<()> { - writeln!(file, "# Managed by Zenith: begin")?; +fn write_auto_managed_block(file: &mut File, buf: &str) -> Result<()> { + writeln!(file, "# Managed by compute_ctl: begin")?; writeln!(file, "{}", buf)?; - writeln!(file, "# Managed by Zenith: end")?; + writeln!(file, "# Managed by compute_ctl: end")?; Ok(()) } diff --git a/compute_tools/src/http_api.rs b/compute_tools/src/http/api.rs similarity index 56% rename from compute_tools/src/http_api.rs rename to compute_tools/src/http/api.rs index 7e1a876044..4c8bbc608b 100644 --- a/compute_tools/src/http_api.rs +++ b/compute_tools/src/http/api.rs @@ -1,37 +1,64 @@ use std::convert::Infallible; use std::net::SocketAddr; -use std::sync::{Arc, RwLock}; +use std::sync::Arc; use std::thread; use anyhow::Result; use hyper::service::{make_service_fn, service_fn}; use hyper::{Body, Method, Request, Response, Server, StatusCode}; use log::{error, info}; +use serde_json; -use crate::zenith::*; +use crate::compute::{ComputeNode, ComputeStatus}; // Service function to handle all available routes. -async fn routes(req: Request, state: Arc>) -> Response { +async fn routes(req: Request, compute: Arc) -> Response { match (req.method(), req.uri().path()) { // Timestamp of the last Postgres activity in the plain text. + // DEPRECATED in favour of /status (&Method::GET, "/last_activity") => { info!("serving /last_active GET request"); - let state = state.read().unwrap(); + let state = compute.state.read().unwrap(); // Use RFC3339 format for consistency. Response::new(Body::from(state.last_active.to_rfc3339())) } - // Has compute setup process finished? -> true/false + // Has compute setup process finished? -> true/false. + // DEPRECATED in favour of /status (&Method::GET, "/ready") => { info!("serving /ready GET request"); - let state = state.read().unwrap(); - Response::new(Body::from(format!("{}", state.ready))) + let status = compute.get_status(); + Response::new(Body::from(format!("{}", status == ComputeStatus::Running))) } + // Serialized compute state. + (&Method::GET, "/status") => { + info!("serving /status GET request"); + let state = compute.state.read().unwrap(); + Response::new(Body::from(serde_json::to_string(&*state).unwrap())) + } + + // Startup metrics in JSON format. Keep /metrics reserved for a possible + // future use for Prometheus metrics format. + (&Method::GET, "/metrics.json") => { + info!("serving /metrics.json GET request"); + Response::new(Body::from(serde_json::to_string(&compute.metrics).unwrap())) + } + + // DEPRECATED, use POST instead (&Method::GET, "/check_writability") => { info!("serving /check_writability GET request"); - let res = crate::checker::check_writability(&state).await; + let res = crate::checker::check_writability(&compute).await; + match res { + Ok(_) => Response::new(Body::from("true")), + Err(e) => Response::new(Body::from(e.to_string())), + } + } + + (&Method::POST, "/check_writability") => { + info!("serving /check_writability POST request"); + let res = crate::checker::check_writability(&compute).await; match res { Ok(_) => Response::new(Body::from("true")), Err(e) => Response::new(Body::from(e.to_string())), @@ -49,7 +76,7 @@ async fn routes(req: Request, state: Arc>) -> Respons // Main Hyper HTTP server function that runs it and blocks waiting on it forever. #[tokio::main] -async fn serve(state: Arc>) { +async fn serve(state: Arc) { let addr = SocketAddr::from(([0, 0, 0, 0], 3080)); let make_service = make_service_fn(move |_conn| { @@ -73,7 +100,7 @@ async fn serve(state: Arc>) { } /// Launch a separate Hyper HTTP API server thread and return its `JoinHandle`. -pub fn launch_http_server(state: &Arc>) -> Result> { +pub fn launch_http_server(state: &Arc) -> Result> { let state = Arc::clone(state); Ok(thread::Builder::new() diff --git a/compute_tools/src/http/mod.rs b/compute_tools/src/http/mod.rs new file mode 100644 index 0000000000..e5fdf85eed --- /dev/null +++ b/compute_tools/src/http/mod.rs @@ -0,0 +1 @@ +pub mod api; diff --git a/compute_tools/src/http/openapi_spec.yaml b/compute_tools/src/http/openapi_spec.yaml new file mode 100644 index 0000000000..9c0f8e3ccd --- /dev/null +++ b/compute_tools/src/http/openapi_spec.yaml @@ -0,0 +1,158 @@ +openapi: "3.0.2" +info: + title: Compute node control API + version: "1.0" + +servers: + - url: "http://localhost:3080" + +paths: + /status: + get: + tags: + - "info" + summary: Get compute node internal status + description: "" + operationId: getComputeStatus + responses: + "200": + description: ComputeState + content: + application/json: + schema: + $ref: "#/components/schemas/ComputeState" + + /metrics.json: + get: + tags: + - "info" + summary: Get compute node startup metrics in JSON format + description: "" + operationId: getComputeMetricsJSON + responses: + "200": + description: ComputeMetrics + content: + application/json: + schema: + $ref: "#/components/schemas/ComputeMetrics" + + /ready: + get: + deprecated: true + tags: + - "info" + summary: Check whether compute startup process finished successfully + description: "" + operationId: computeIsReady + responses: + "200": + description: Compute is ready ('true') or not ('false') + content: + text/plain: + schema: + type: string + example: "true" + + /last_activity: + get: + deprecated: true + tags: + - "info" + summary: Get timestamp of the last compute activity + description: "" + operationId: getLastComputeActivityTS + responses: + "200": + description: Timestamp of the last compute activity + content: + text/plain: + schema: + type: string + example: "2022-10-12T07:20:50.52Z" + + /check_writability: + get: + deprecated: true + tags: + - "check" + summary: Check that we can write new data on this compute + description: "" + operationId: checkComputeWritabilityDeprecated + responses: + "200": + description: Check result + content: + text/plain: + schema: + type: string + description: Error text or 'true' if check passed + example: "true" + + post: + tags: + - "check" + summary: Check that we can write new data on this compute + description: "" + operationId: checkComputeWritability + responses: + "200": + description: Check result + content: + text/plain: + schema: + type: string + description: Error text or 'true' if check passed + example: "true" + +components: + securitySchemes: + JWT: + type: http + scheme: bearer + bearerFormat: JWT + + schemas: + ComputeMetrics: + type: object + description: Compute startup metrics + required: + - sync_safekeepers_ms + - basebackup_ms + - config_ms + - total_startup_ms + properties: + sync_safekeepers_ms: + type: integer + basebackup_ms: + type: integer + config_ms: + type: integer + total_startup_ms: + type: integer + + ComputeState: + type: object + required: + - status + - last_active + properties: + status: + $ref: '#/components/schemas/ComputeStatus' + last_active: + type: string + description: The last detected compute activity timestamp in UTC and RFC3339 format + example: "2022-10-12T07:20:50.52Z" + error: + type: string + description: Text of the error during compute startup, if any + + ComputeStatus: + type: string + enum: + - init + - failed + - running + +security: + - JWT: [] diff --git a/compute_tools/src/lib.rs b/compute_tools/src/lib.rs index ffb9700a49..aee6b53e6a 100644 --- a/compute_tools/src/lib.rs +++ b/compute_tools/src/lib.rs @@ -4,11 +4,11 @@ //! pub mod checker; pub mod config; -pub mod http_api; +pub mod http; #[macro_use] pub mod logger; +pub mod compute; pub mod monitor; pub mod params; pub mod pg_helpers; pub mod spec; -pub mod zenith; diff --git a/compute_tools/src/monitor.rs b/compute_tools/src/monitor.rs index 596981b2d2..496a5aae3b 100644 --- a/compute_tools/src/monitor.rs +++ b/compute_tools/src/monitor.rs @@ -1,4 +1,4 @@ -use std::sync::{Arc, RwLock}; +use std::sync::Arc; use std::{thread, time}; use anyhow::Result; @@ -6,16 +6,16 @@ use chrono::{DateTime, Utc}; use log::{debug, info}; use postgres::{Client, NoTls}; -use crate::zenith::ComputeState; +use crate::compute::ComputeNode; const MONITOR_CHECK_INTERVAL: u64 = 500; // milliseconds // Spin in a loop and figure out the last activity time in the Postgres. // Then update it in the shared state. This function never errors out. // XXX: the only expected panic is at `RwLock` unwrap(). -fn watch_compute_activity(state: &Arc>) { +fn watch_compute_activity(compute: &Arc) { // Suppose that `connstr` doesn't change - let connstr = state.read().unwrap().connstr.clone(); + let connstr = compute.connstr.clone(); // Define `client` outside of the loop to reuse existing connection if it's active. let mut client = Client::connect(&connstr, NoTls); let timeout = time::Duration::from_millis(MONITOR_CHECK_INTERVAL); @@ -46,7 +46,7 @@ fn watch_compute_activity(state: &Arc>) { AND usename != 'zenith_admin';", // XXX: find a better way to filter other monitors? &[], ); - let mut last_active = state.read().unwrap().last_active; + let mut last_active = compute.state.read().unwrap().last_active; if let Ok(backs) = backends { let mut idle_backs: Vec> = vec![]; @@ -83,14 +83,14 @@ fn watch_compute_activity(state: &Arc>) { } // Update the last activity in the shared state if we got a more recent one. - let mut state = state.write().unwrap(); + let mut state = compute.state.write().unwrap(); if last_active > state.last_active { state.last_active = last_active; debug!("set the last compute activity time to: {}", last_active); } } Err(e) => { - info!("cannot connect to postgres: {}, retrying", e); + debug!("cannot connect to postgres: {}, retrying", e); // Establish a new connection and try again. client = Client::connect(&connstr, NoTls); @@ -100,7 +100,7 @@ fn watch_compute_activity(state: &Arc>) { } /// Launch a separate compute monitor thread and return its `JoinHandle`. -pub fn launch_monitor(state: &Arc>) -> Result> { +pub fn launch_monitor(state: &Arc) -> Result> { let state = Arc::clone(state); Ok(thread::Builder::new() diff --git a/compute_tools/src/pg_helpers.rs b/compute_tools/src/pg_helpers.rs index 1409a81b6b..74856eac63 100644 --- a/compute_tools/src/pg_helpers.rs +++ b/compute_tools/src/pg_helpers.rs @@ -1,7 +1,9 @@ +use std::fs::File; +use std::io::{BufRead, BufReader}; use std::net::{SocketAddr, TcpStream}; use std::os::unix::fs::PermissionsExt; use std::path::Path; -use std::process::Command; +use std::process::Child; use std::str::FromStr; use std::{fs, thread, time}; @@ -220,12 +222,12 @@ pub fn get_existing_dbs(client: &mut Client) -> Result> { /// Wait for Postgres to become ready to accept connections: /// - state should be `ready` in the `pgdata/postmaster.pid` /// - and we should be able to connect to 127.0.0.1:5432 -pub fn wait_for_postgres(port: &str, pgdata: &Path) -> Result<()> { +pub fn wait_for_postgres(pg: &mut Child, port: &str, pgdata: &Path) -> Result<()> { let pid_path = pgdata.join("postmaster.pid"); let mut slept: u64 = 0; // ms let pause = time::Duration::from_millis(100); - let timeout = time::Duration::from_millis(200); + let timeout = time::Duration::from_millis(10); let addr = SocketAddr::from_str(&format!("127.0.0.1:{}", port)).unwrap(); loop { @@ -236,14 +238,19 @@ pub fn wait_for_postgres(port: &str, pgdata: &Path) -> Result<()> { bail!("timed out while waiting for Postgres to start"); } + if let Ok(Some(status)) = pg.try_wait() { + // Postgres exited, that is not what we expected, bail out earlier. + let code = status.code().unwrap_or(-1); + bail!("Postgres exited unexpectedly with code {}", code); + } + if pid_path.exists() { - // XXX: dumb and the simplest way to get the last line in a text file - // TODO: better use `.lines().last()` later - let stdout = Command::new("tail") - .args(&["-n1", pid_path.to_str().unwrap()]) - .output()? - .stdout; - let status = String::from_utf8(stdout)?; + let file = BufReader::new(File::open(&pid_path)?); + let status = file + .lines() + .last() + .unwrap() + .unwrap_or_else(|_| "unknown".to_string()); let can_connect = TcpStream::connect_timeout(&addr, timeout).is_ok(); // Now Postgres is ready to accept connections diff --git a/compute_tools/src/spec.rs b/compute_tools/src/spec.rs index 27114b8202..e88df56a65 100644 --- a/compute_tools/src/spec.rs +++ b/compute_tools/src/spec.rs @@ -3,16 +3,53 @@ use std::path::Path; use anyhow::Result; use log::{info, log_enabled, warn, Level}; use postgres::Client; +use serde::Deserialize; use crate::config; use crate::params::PG_HBA_ALL_MD5; use crate::pg_helpers::*; -use crate::zenith::ClusterSpec; + +/// Cluster spec or configuration represented as an optional number of +/// delta operations + final cluster state description. +#[derive(Clone, Deserialize)] +pub struct ComputeSpec { + pub format_version: f32, + pub timestamp: String, + pub operation_uuid: Option, + /// Expected cluster state at the end of transition process. + pub cluster: Cluster, + pub delta_operations: Option>, +} + +/// Cluster state seen from the perspective of the external tools +/// like Rails web console. +#[derive(Clone, Deserialize)] +pub struct Cluster { + pub cluster_id: String, + pub name: String, + pub state: Option, + pub roles: Vec, + pub databases: Vec, + pub settings: GenericOptions, +} + +/// Single cluster state changing operation that could not be represented as +/// a static `Cluster` structure. For example: +/// - DROP DATABASE +/// - DROP ROLE +/// - ALTER ROLE name RENAME TO new_name +/// - ALTER DATABASE name RENAME TO new_name +#[derive(Clone, Deserialize)] +pub struct DeltaOp { + pub action: String, + pub name: PgIdent, + pub new_name: Option, +} /// It takes cluster specification and does the following: /// - Serialize cluster config and put it into `postgresql.conf` completely rewriting the file. /// - Update `pg_hba.conf` to allow external connections. -pub fn handle_configuration(spec: &ClusterSpec, pgdata_path: &Path) -> Result<()> { +pub fn handle_configuration(spec: &ComputeSpec, pgdata_path: &Path) -> Result<()> { // File `postgresql.conf` is no longer included into `basebackup`, so just // always write all config into it creating new file. config::write_postgres_conf(&pgdata_path.join("postgresql.conf"), spec)?; @@ -39,7 +76,7 @@ pub fn update_pg_hba(pgdata_path: &Path) -> Result<()> { /// Given a cluster spec json and open transaction it handles roles creation, /// deletion and update. -pub fn handle_roles(spec: &ClusterSpec, client: &mut Client) -> Result<()> { +pub fn handle_roles(spec: &ComputeSpec, client: &mut Client) -> Result<()> { let mut xact = client.transaction()?; let existing_roles: Vec = get_existing_roles(&mut xact)?; @@ -136,13 +173,20 @@ pub fn handle_roles(spec: &ClusterSpec, client: &mut Client) -> Result<()> { xact.execute(query.as_str(), &[])?; } } else { - info!("role name {}", &name); + info!("role name: '{}'", &name); let mut query: String = format!("CREATE ROLE {} ", name.quote()); - info!("role create query {}", &query); + info!("role create query: '{}'", &query); info_print!(" -> create"); query.push_str(&role.to_pg_options()); xact.execute(query.as_str(), &[])?; + + let grant_query = format!( + "grant pg_read_all_data, pg_write_all_data to {}", + name.quote() + ); + xact.execute(grant_query.as_str(), &[])?; + info!("role grant query: '{}'", &grant_query); } info_print!("\n"); @@ -158,7 +202,7 @@ pub fn handle_roles(spec: &ClusterSpec, client: &mut Client) -> Result<()> { /// like `CREATE DATABASE` and `DROP DATABASE` do not support it. Statement-level /// atomicity should be enough here due to the order of operations and various checks, /// which together provide us idempotency. -pub fn handle_databases(spec: &ClusterSpec, client: &mut Client) -> Result<()> { +pub fn handle_databases(spec: &ComputeSpec, client: &mut Client) -> Result<()> { let existing_dbs: Vec = get_existing_dbs(client)?; // Print a list of existing Postgres databases (only in debug mode) @@ -247,7 +291,7 @@ pub fn handle_databases(spec: &ClusterSpec, client: &mut Client) -> Result<()> { // Grant CREATE ON DATABASE to the database owner // to allow clients create trusted extensions. -pub fn handle_grants(spec: &ClusterSpec, client: &mut Client) -> Result<()> { +pub fn handle_grants(spec: &ComputeSpec, client: &mut Client) -> Result<()> { info!("cluster spec grants:"); for db in &spec.cluster.databases { diff --git a/compute_tools/src/zenith.rs b/compute_tools/src/zenith.rs deleted file mode 100644 index ba7dc20787..0000000000 --- a/compute_tools/src/zenith.rs +++ /dev/null @@ -1,109 +0,0 @@ -use std::process::{Command, Stdio}; - -use anyhow::Result; -use chrono::{DateTime, Utc}; -use postgres::{Client, NoTls}; -use serde::Deserialize; - -use crate::pg_helpers::*; - -/// Compute node state shared across several `zenith_ctl` threads. -/// Should be used under `RwLock` to allow HTTP API server to serve -/// status requests, while configuration is in progress. -pub struct ComputeState { - pub connstr: String, - pub pgdata: String, - pub pgbin: String, - pub spec: ClusterSpec, - /// Compute setup process has finished - pub ready: bool, - /// Timestamp of the last Postgres activity - pub last_active: DateTime, -} - -/// Cluster spec or configuration represented as an optional number of -/// delta operations + final cluster state description. -#[derive(Clone, Deserialize)] -pub struct ClusterSpec { - pub format_version: f32, - pub timestamp: String, - pub operation_uuid: Option, - /// Expected cluster state at the end of transition process. - pub cluster: Cluster, - pub delta_operations: Option>, -} - -/// Cluster state seen from the perspective of the external tools -/// like Rails web console. -#[derive(Clone, Deserialize)] -pub struct Cluster { - pub cluster_id: String, - pub name: String, - pub state: Option, - pub roles: Vec, - pub databases: Vec, - pub settings: GenericOptions, -} - -/// Single cluster state changing operation that could not be represented as -/// a static `Cluster` structure. For example: -/// - DROP DATABASE -/// - DROP ROLE -/// - ALTER ROLE name RENAME TO new_name -/// - ALTER DATABASE name RENAME TO new_name -#[derive(Clone, Deserialize)] -pub struct DeltaOp { - pub action: String, - pub name: PgIdent, - pub new_name: Option, -} - -/// Get basebackup from the libpq connection to pageserver using `connstr` and -/// unarchive it to `pgdata` directory overriding all its previous content. -pub fn get_basebackup( - pgdata: &str, - connstr: &str, - tenant: &str, - timeline: &str, - lsn: &str, -) -> Result<()> { - let mut client = Client::connect(connstr, NoTls)?; - let basebackup_cmd = match lsn { - "0/0" => format!("basebackup {} {}", tenant, timeline), // First start of the compute - _ => format!("basebackup {} {} {}", tenant, timeline, lsn), - }; - let copyreader = client.copy_out(basebackup_cmd.as_str())?; - let mut ar = tar::Archive::new(copyreader); - - ar.unpack(&pgdata)?; - - Ok(()) -} - -/// Run `postgres` in a special mode with `--sync-safekeepers` argument -/// and return the reported LSN back to the caller. -pub fn sync_safekeepers(pgdata: &str, pgbin: &str) -> Result { - let sync_handle = Command::new(&pgbin) - .args(&["--sync-safekeepers"]) - .env("PGDATA", &pgdata) // we cannot use -D in this mode - .stdout(Stdio::piped()) - .spawn() - .expect("postgres --sync-safekeepers failed to start"); - - // `postgres --sync-safekeepers` will print all log output to stderr and - // final LSN to stdout. So we pipe only stdout, while stderr will be automatically - // redirected to the caller output. - let sync_output = sync_handle - .wait_with_output() - .expect("postgres --sync-safekeepers failed"); - if !sync_output.status.success() { - anyhow::bail!( - "postgres --sync-safekeepers exited with non-zero status: {}", - sync_output.status, - ); - } - - let lsn = String::from(String::from_utf8(sync_output.stdout)?.trim()); - - Ok(lsn) -} diff --git a/compute_tools/tests/pg_helpers_tests.rs b/compute_tools/tests/pg_helpers_tests.rs index 472a49af4b..33f903f0e1 100644 --- a/compute_tools/tests/pg_helpers_tests.rs +++ b/compute_tools/tests/pg_helpers_tests.rs @@ -4,12 +4,12 @@ mod pg_helpers_tests { use std::fs::File; use compute_tools::pg_helpers::*; - use compute_tools::zenith::ClusterSpec; + use compute_tools::spec::ComputeSpec; #[test] fn params_serialize() { let file = File::open("tests/cluster_spec.json").unwrap(); - let spec: ClusterSpec = serde_json::from_reader(file).unwrap(); + let spec: ComputeSpec = serde_json::from_reader(file).unwrap(); assert_eq!( spec.cluster.databases.first().unwrap().to_pg_options(), @@ -24,7 +24,7 @@ mod pg_helpers_tests { #[test] fn settings_serialize() { let file = File::open("tests/cluster_spec.json").unwrap(); - let spec: ClusterSpec = serde_json::from_reader(file).unwrap(); + let spec: ComputeSpec = serde_json::from_reader(file).unwrap(); assert_eq!( spec.cluster.settings.as_pg_settings(), diff --git a/control_plane/simple.conf b/control_plane/simple.conf index 2243a0a5f8..925e2f14ee 100644 --- a/control_plane/simple.conf +++ b/control_plane/simple.conf @@ -9,3 +9,6 @@ auth_type = 'Trust' id = 1 pg_port = 5454 http_port = 7676 + +[etcd_broker] +broker_endpoints = ['http://127.0.0.1:2379'] diff --git a/control_plane/src/etcd.rs b/control_plane/src/etcd.rs new file mode 100644 index 0000000000..df657dd1be --- /dev/null +++ b/control_plane/src/etcd.rs @@ -0,0 +1,93 @@ +use std::{ + fs, + path::PathBuf, + process::{Command, Stdio}, +}; + +use anyhow::Context; +use nix::{ + sys::signal::{kill, Signal}, + unistd::Pid, +}; + +use crate::{local_env, read_pidfile}; + +pub fn start_etcd_process(env: &local_env::LocalEnv) -> anyhow::Result<()> { + let etcd_broker = &env.etcd_broker; + println!( + "Starting etcd broker using {}", + etcd_broker.etcd_binary_path.display() + ); + + let etcd_data_dir = env.base_data_dir.join("etcd"); + fs::create_dir_all(&etcd_data_dir).with_context(|| { + format!( + "Failed to create etcd data dir: {}", + etcd_data_dir.display() + ) + })?; + + let etcd_stdout_file = + fs::File::create(etcd_data_dir.join("etcd.stdout.log")).with_context(|| { + format!( + "Failed to create ectd stout file in directory {}", + etcd_data_dir.display() + ) + })?; + let etcd_stderr_file = + fs::File::create(etcd_data_dir.join("etcd.stderr.log")).with_context(|| { + format!( + "Failed to create ectd stderr file in directory {}", + etcd_data_dir.display() + ) + })?; + let client_urls = etcd_broker.comma_separated_endpoints(); + + let etcd_process = Command::new(&etcd_broker.etcd_binary_path) + .args(&[ + format!("--data-dir={}", etcd_data_dir.display()), + format!("--listen-client-urls={client_urls}"), + format!("--advertise-client-urls={client_urls}"), + ]) + .stdout(Stdio::from(etcd_stdout_file)) + .stderr(Stdio::from(etcd_stderr_file)) + .spawn() + .context("Failed to spawn etcd subprocess")?; + let pid = etcd_process.id(); + + let etcd_pid_file_path = etcd_pid_file_path(env); + fs::write(&etcd_pid_file_path, pid.to_string()).with_context(|| { + format!( + "Failed to create etcd pid file at {}", + etcd_pid_file_path.display() + ) + })?; + + Ok(()) +} + +pub fn stop_etcd_process(env: &local_env::LocalEnv) -> anyhow::Result<()> { + let etcd_path = &env.etcd_broker.etcd_binary_path; + println!("Stopping etcd broker at {}", etcd_path.display()); + + let etcd_pid_file_path = etcd_pid_file_path(env); + let pid = Pid::from_raw(read_pidfile(&etcd_pid_file_path).with_context(|| { + format!( + "Failed to read etcd pid filea at {}", + etcd_pid_file_path.display() + ) + })?); + + kill(pid, Signal::SIGTERM).with_context(|| { + format!( + "Failed to stop etcd with pid {pid} at {}", + etcd_pid_file_path.display() + ) + })?; + + Ok(()) +} + +fn etcd_pid_file_path(env: &local_env::LocalEnv) -> PathBuf { + env.base_data_dir.join("etcd.pid") +} diff --git a/control_plane/src/lib.rs b/control_plane/src/lib.rs index a2ecdd3d64..c3469c3350 100644 --- a/control_plane/src/lib.rs +++ b/control_plane/src/lib.rs @@ -12,6 +12,7 @@ use std::path::Path; use std::process::Command; pub mod compute; +pub mod etcd; pub mod local_env; pub mod postgresql_conf; pub mod safekeeper; diff --git a/control_plane/src/local_env.rs b/control_plane/src/local_env.rs index 5aeff505b6..c73af7d338 100644 --- a/control_plane/src/local_env.rs +++ b/control_plane/src/local_env.rs @@ -4,6 +4,7 @@ //! script which will use local paths. use anyhow::{bail, ensure, Context}; +use reqwest::Url; use serde::{Deserialize, Serialize}; use serde_with::{serde_as, DisplayFromStr}; use std::collections::HashMap; @@ -59,13 +60,7 @@ pub struct LocalEnv { #[serde(default)] pub private_key_path: PathBuf, - // A comma separated broker (etcd) endpoints for storage nodes coordination, e.g. 'http://127.0.0.1:2379'. - #[serde(default)] - pub broker_endpoints: Option, - - /// A prefix to all to any key when pushing/polling etcd from a node. - #[serde(default)] - pub broker_etcd_prefix: Option, + pub etcd_broker: EtcdBroker, pub pageserver: PageServerConf, @@ -81,6 +76,62 @@ pub struct LocalEnv { branch_name_mappings: HashMap>, } +/// Etcd broker config for cluster internal communication. +#[serde_as] +#[derive(Serialize, Deserialize, PartialEq, Eq, Clone, Debug)] +pub struct EtcdBroker { + /// A prefix to all to any key when pushing/polling etcd from a node. + #[serde(default)] + pub broker_etcd_prefix: Option, + + /// Broker (etcd) endpoints for storage nodes coordination, e.g. 'http://127.0.0.1:2379'. + #[serde(default)] + #[serde_as(as = "Vec")] + pub broker_endpoints: Vec, + + /// Etcd binary path to use. + #[serde(default)] + pub etcd_binary_path: PathBuf, +} + +impl EtcdBroker { + pub fn locate_etcd() -> anyhow::Result { + let which_output = Command::new("which") + .arg("etcd") + .output() + .context("Failed to run 'which etcd' command")?; + let stdout = String::from_utf8_lossy(&which_output.stdout); + ensure!( + which_output.status.success(), + "'which etcd' invocation failed. Status: {}, stdout: {stdout}, stderr: {}", + which_output.status, + String::from_utf8_lossy(&which_output.stderr) + ); + + let etcd_path = PathBuf::from(stdout.trim()); + ensure!( + etcd_path.is_file(), + "'which etcd' invocation was successful, but the path it returned is not a file or does not exist: {}", + etcd_path.display() + ); + + Ok(etcd_path) + } + + pub fn comma_separated_endpoints(&self) -> String { + self.broker_endpoints.iter().map(Url::as_str).fold( + String::new(), + |mut comma_separated_urls, url| { + if !comma_separated_urls.is_empty() { + comma_separated_urls.push(','); + } + comma_separated_urls.push_str(url); + comma_separated_urls + }, + ) + } +} + #[derive(Serialize, Deserialize, PartialEq, Eq, Clone, Debug)] #[serde(default)] pub struct PageServerConf { @@ -184,12 +235,7 @@ impl LocalEnv { if old_timeline_id == &timeline_id { Ok(()) } else { - bail!( - "branch '{}' is already mapped to timeline {}, cannot map to another timeline {}", - branch_name, - old_timeline_id, - timeline_id - ); + bail!("branch '{branch_name}' is already mapped to timeline {old_timeline_id}, cannot map to another timeline {timeline_id}"); } } else { existing_values.push((tenant_id, timeline_id)); @@ -225,7 +271,7 @@ impl LocalEnv { /// /// Unlike 'load_config', this function fills in any defaults that are missing /// from the config file. - pub fn create_config(toml: &str) -> anyhow::Result { + pub fn parse_config(toml: &str) -> anyhow::Result { let mut env: LocalEnv = toml::from_str(toml)?; // Find postgres binaries. @@ -238,26 +284,11 @@ impl LocalEnv { env.pg_distrib_dir = cwd.join("tmp_install") } } - if !env.pg_distrib_dir.join("bin/postgres").exists() { - bail!( - "Can't find postgres binary at {}", - env.pg_distrib_dir.display() - ); - } // Find zenith binaries. if env.zenith_distrib_dir == Path::new("") { env.zenith_distrib_dir = env::current_exe()?.parent().unwrap().to_owned(); } - for binary in ["pageserver", "safekeeper"] { - if !env.zenith_distrib_dir.join(binary).exists() { - bail!( - "Can't find binary '{}' in zenith distrib dir '{}'", - binary, - env.zenith_distrib_dir.display() - ); - } - } // If no initial tenant ID was given, generate it. if env.default_tenant_id.is_none() { @@ -351,6 +382,36 @@ impl LocalEnv { "directory '{}' already exists. Perhaps already initialized?", base_path.display() ); + if !self.pg_distrib_dir.join("bin/postgres").exists() { + bail!( + "Can't find postgres binary at {}", + self.pg_distrib_dir.display() + ); + } + for binary in ["pageserver", "safekeeper"] { + if !self.zenith_distrib_dir.join(binary).exists() { + bail!( + "Can't find binary '{}' in zenith distrib dir '{}'", + binary, + self.zenith_distrib_dir.display() + ); + } + } + + for binary in ["pageserver", "safekeeper"] { + if !self.zenith_distrib_dir.join(binary).exists() { + bail!( + "Can't find binary '{binary}' in zenith distrib dir '{}'", + self.zenith_distrib_dir.display() + ); + } + } + if !self.pg_distrib_dir.join("bin/postgres").exists() { + bail!( + "Can't find postgres binary at {}", + self.pg_distrib_dir.display() + ); + } fs::create_dir(&base_path)?; @@ -408,7 +469,35 @@ impl LocalEnv { fn base_path() -> PathBuf { match std::env::var_os("ZENITH_REPO_DIR") { - Some(val) => PathBuf::from(val.to_str().unwrap()), - None => ".zenith".into(), + Some(val) => PathBuf::from(val), + None => PathBuf::from(".zenith"), + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn simple_conf_parsing() { + let simple_conf_toml = include_str!("../simple.conf"); + let simple_conf_parse_result = LocalEnv::parse_config(simple_conf_toml); + assert!( + simple_conf_parse_result.is_ok(), + "failed to parse simple config {simple_conf_toml}, reason: {simple_conf_parse_result:?}" + ); + + let string_to_replace = "broker_endpoints = ['http://127.0.0.1:2379']"; + let spoiled_url_str = "broker_endpoints = ['!@$XOXO%^&']"; + let spoiled_url_toml = simple_conf_toml.replace(string_to_replace, spoiled_url_str); + assert!( + spoiled_url_toml.contains(spoiled_url_str), + "Failed to replace string {string_to_replace} in the toml file {simple_conf_toml}" + ); + let spoiled_url_parse_result = LocalEnv::parse_config(&spoiled_url_toml); + assert!( + spoiled_url_parse_result.is_err(), + "expected toml with invalid Url {spoiled_url_toml} to fail the parsing, but got {spoiled_url_parse_result:?}" + ); } } diff --git a/control_plane/src/safekeeper.rs b/control_plane/src/safekeeper.rs index 074ee72f69..d5b6251209 100644 --- a/control_plane/src/safekeeper.rs +++ b/control_plane/src/safekeeper.rs @@ -52,7 +52,7 @@ impl ResponseErrorMessageExt for Response { Err(SafekeeperHttpError::Response( match self.json::() { Ok(err_body) => format!("Error: {}", err_body.msg), - Err(_) => format!("Http error ({}) at {}.", status.as_u16(), url), + Err(_) => format!("Http error ({}) at {url}.", status.as_u16()), }, )) } @@ -75,17 +75,12 @@ pub struct SafekeeperNode { pub http_base_url: String, pub pageserver: Arc, - - broker_endpoints: Option, - broker_etcd_prefix: Option, } impl SafekeeperNode { pub fn from_env(env: &LocalEnv, conf: &SafekeeperConf) -> SafekeeperNode { let pageserver = Arc::new(PageServerNode::from_env(env)); - println!("initializing for sk {} for {}", conf.id, conf.http_port); - SafekeeperNode { id: conf.id, conf: conf.clone(), @@ -94,8 +89,6 @@ impl SafekeeperNode { http_client: Client::new(), http_base_url: format!("http://127.0.0.1:{}/v1", conf.http_port), pageserver, - broker_endpoints: env.broker_endpoints.clone(), - broker_etcd_prefix: env.broker_etcd_prefix.clone(), } } @@ -142,10 +135,12 @@ impl SafekeeperNode { if !self.conf.sync { cmd.arg("--no-sync"); } - if let Some(ref ep) = self.broker_endpoints { - cmd.args(&["--broker-endpoints", ep]); + + let comma_separated_endpoints = self.env.etcd_broker.comma_separated_endpoints(); + if !comma_separated_endpoints.is_empty() { + cmd.args(&["--broker-endpoints", &comma_separated_endpoints]); } - if let Some(prefix) = self.broker_etcd_prefix.as_deref() { + if let Some(prefix) = self.env.etcd_broker.broker_etcd_prefix.as_deref() { cmd.args(&["--broker-etcd-prefix", prefix]); } @@ -210,12 +205,13 @@ impl SafekeeperNode { let pid = Pid::from_raw(pid); let sig = if immediate { - println!("Stop safekeeper immediately"); + print!("Stopping safekeeper {} immediately..", self.id); Signal::SIGQUIT } else { - println!("Stop safekeeper gracefully"); + print!("Stopping safekeeper {} gracefully..", self.id); Signal::SIGTERM }; + io::stdout().flush().unwrap(); match kill(pid, sig) { Ok(_) => (), Err(Errno::ESRCH) => { @@ -237,25 +233,35 @@ impl SafekeeperNode { // TODO Remove this "timeout" and handle it on caller side instead. // Shutting down may take a long time, // if safekeeper flushes a lot of data + let mut tcp_stopped = false; for _ in 0..100 { - if let Err(_e) = TcpStream::connect(&address) { - println!("Safekeeper stopped receiving connections"); - - //Now check status - match self.check_status() { - Ok(_) => { - println!("Safekeeper status is OK. Wait a bit."); - thread::sleep(Duration::from_secs(1)); - } - Err(err) => { - println!("Safekeeper status is: {}", err); - return Ok(()); + if !tcp_stopped { + if let Err(err) = TcpStream::connect(&address) { + tcp_stopped = true; + if err.kind() != io::ErrorKind::ConnectionRefused { + eprintln!("\nSafekeeper connection failed with error: {err}"); } } - } else { - println!("Safekeeper still receives connections"); - thread::sleep(Duration::from_secs(1)); } + if tcp_stopped { + // Also check status on the HTTP port + match self.check_status() { + Err(SafekeeperHttpError::Transport(err)) if err.is_connect() => { + println!("done!"); + return Ok(()); + } + Err(err) => { + eprintln!("\nSafekeeper status check failed with error: {err}"); + return Ok(()); + } + Ok(()) => { + // keep waiting + } + } + } + print!("."); + io::stdout().flush().unwrap(); + thread::sleep(Duration::from_secs(1)); } bail!("Failed to stop safekeeper with pid {}", pid); diff --git a/control_plane/src/storage.rs b/control_plane/src/storage.rs index d2e63a22de..355c7c250d 100644 --- a/control_plane/src/storage.rs +++ b/control_plane/src/storage.rs @@ -121,6 +121,16 @@ impl PageServerNode { ); let listen_pg_addr_param = format!("listen_pg_addr='{}'", self.env.pageserver.listen_pg_addr); + let broker_endpoints_param = format!( + "broker_endpoints=[{}]", + self.env + .etcd_broker + .broker_endpoints + .iter() + .map(|url| format!("'{url}'")) + .collect::>() + .join(",") + ); let mut args = Vec::with_capacity(20); args.push("--init"); @@ -129,8 +139,19 @@ impl PageServerNode { args.extend(["-c", &authg_type_param]); args.extend(["-c", &listen_http_addr_param]); args.extend(["-c", &listen_pg_addr_param]); + args.extend(["-c", &broker_endpoints_param]); args.extend(["-c", &id]); + let broker_etcd_prefix_param = self + .env + .etcd_broker + .broker_etcd_prefix + .as_ref() + .map(|prefix| format!("broker_etcd_prefix='{prefix}'")); + if let Some(broker_etcd_prefix_param) = broker_etcd_prefix_param.as_deref() { + args.extend(["-c", broker_etcd_prefix_param]); + } + for config_override in config_overrides { args.extend(["-c", config_override]); } @@ -260,12 +281,13 @@ impl PageServerNode { let pid = Pid::from_raw(read_pidfile(&pid_file)?); let sig = if immediate { - println!("Stop pageserver immediately"); + print!("Stopping pageserver immediately.."); Signal::SIGQUIT } else { - println!("Stop pageserver gracefully"); + print!("Stopping pageserver gracefully.."); Signal::SIGTERM }; + io::stdout().flush().unwrap(); match kill(pid, sig) { Ok(_) => (), Err(Errno::ESRCH) => { @@ -287,25 +309,36 @@ impl PageServerNode { // TODO Remove this "timeout" and handle it on caller side instead. // Shutting down may take a long time, // if pageserver checkpoints a lot of data + let mut tcp_stopped = false; for _ in 0..100 { - if let Err(_e) = TcpStream::connect(&address) { - println!("Pageserver stopped receiving connections"); - - //Now check status - match self.check_status() { - Ok(_) => { - println!("Pageserver status is OK. Wait a bit."); - thread::sleep(Duration::from_secs(1)); - } - Err(err) => { - println!("Pageserver status is: {}", err); - return Ok(()); + if !tcp_stopped { + if let Err(err) = TcpStream::connect(&address) { + tcp_stopped = true; + if err.kind() != io::ErrorKind::ConnectionRefused { + eprintln!("\nPageserver connection failed with error: {err}"); } } - } else { - println!("Pageserver still receives connections"); - thread::sleep(Duration::from_secs(1)); } + if tcp_stopped { + // Also check status on the HTTP port + + match self.check_status() { + Err(PageserverHttpError::Transport(err)) if err.is_connect() => { + println!("done!"); + return Ok(()); + } + Err(err) => { + eprintln!("\nPageserver status check failed with error: {err}"); + return Ok(()); + } + Ok(()) => { + // keep waiting + } + } + } + print!("."); + io::stdout().flush().unwrap(); + thread::sleep(Duration::from_secs(1)); } bail!("Failed to stop pageserver with pid {}", pid); diff --git a/docker-entrypoint.sh b/docker-entrypoint.sh index 93bb5f9cd7..6bcbc76551 100755 --- a/docker-entrypoint.sh +++ b/docker-entrypoint.sh @@ -1,13 +1,20 @@ #!/bin/sh set -eux +broker_endpoints_param="${BROKER_ENDPOINT:-absent}" +if [ "$broker_endpoints_param" != "absent" ]; then + broker_endpoints_param="-c broker_endpoints=['$broker_endpoints_param']" +else + broker_endpoints_param='' +fi + if [ "$1" = 'pageserver' ]; then if [ ! -d "/data/tenants" ]; then echo "Initializing pageserver data directory" - pageserver --init -D /data -c "pg_distrib_dir='/usr/local'" -c "id=10" + pageserver --init -D /data -c "pg_distrib_dir='/usr/local'" -c "id=10" $broker_endpoints_param fi echo "Staring pageserver at 0.0.0.0:6400" - pageserver -c "listen_pg_addr='0.0.0.0:6400'" -c "listen_http_addr='0.0.0.0:9898'" -D /data + pageserver -c "listen_pg_addr='0.0.0.0:6400'" -c "listen_http_addr='0.0.0.0:9898'" $broker_endpoints_param -D /data else "$@" fi diff --git a/docs/docker.md b/docs/docker.md index cc54d012dd..100cdd248b 100644 --- a/docs/docker.md +++ b/docs/docker.md @@ -1,20 +1,20 @@ -# Docker images of Zenith +# Docker images of Neon ## Images Currently we build two main images: -- [zenithdb/zenith](https://hub.docker.com/repository/docker/zenithdb/zenith) — image with pre-built `pageserver`, `safekeeper` and `proxy` binaries and all the required runtime dependencies. Built from [/Dockerfile](/Dockerfile). -- [zenithdb/compute-node](https://hub.docker.com/repository/docker/zenithdb/compute-node) — compute node image with pre-built Postgres binaries from [zenithdb/postgres](https://github.com/zenithdb/postgres). +- [neondatabase/neon](https://hub.docker.com/repository/docker/zenithdb/zenith) — image with pre-built `pageserver`, `safekeeper` and `proxy` binaries and all the required runtime dependencies. Built from [/Dockerfile](/Dockerfile). +- [neondatabase/compute-node](https://hub.docker.com/repository/docker/zenithdb/compute-node) — compute node image with pre-built Postgres binaries from [neondatabase/postgres](https://github.com/neondatabase/postgres). -And additional intermediate images: +And additional intermediate image: -- [zenithdb/compute-tools](https://hub.docker.com/repository/docker/zenithdb/compute-tools) — compute node configuration management tools. +- [neondatabase/compute-tools](https://hub.docker.com/repository/docker/neondatabase/compute-tools) — compute node configuration management tools. ## Building pipeline -1. Image `zenithdb/compute-tools` is re-built automatically. +We build all images after a successful `release` tests run and push automatically to Docker Hub with two parallel CI jobs -2. Image `zenithdb/compute-node` is built independently in the [zenithdb/postgres](https://github.com/zenithdb/postgres) repo. +1. `neondatabase/compute-tools` and `neondatabase/compute-node` -3. Image `zenithdb/zenith` is built in this repo after a successful `release` tests run and pushed to Docker Hub automatically. +2. `neondatabase/neon` diff --git a/docs/glossary.md b/docs/glossary.md index ecc57b9ed1..a014446010 100644 --- a/docs/glossary.md +++ b/docs/glossary.md @@ -21,7 +21,7 @@ NOTE:It has nothing to do with PostgreSQL pg_basebackup. ### Branch -We can create branch at certain LSN using `zenith timeline branch` command. +We can create branch at certain LSN using `neon_local timeline branch` command. Each Branch lives in a corresponding timeline[] and has an ancestor[]. @@ -91,7 +91,7 @@ The layer map tracks what layers exist in a timeline. ### Layered repository -Zenith repository implementation that keeps data in layers. +Neon repository implementation that keeps data in layers. ### LSN The Log Sequence Number (LSN) is a unique identifier of the WAL record[] in the WAL log. @@ -101,7 +101,7 @@ It is printed as two hexadecimal numbers of up to 8 digits each, separated by a Check also [PostgreSQL doc about pg_lsn type](https://www.postgresql.org/docs/devel/datatype-pg-lsn.html) Values can be compared to calculate the volume of WAL data that separates them, so they are used to measure the progress of replication and recovery. -In postgres and Zenith lsns are used to describe certain points in WAL handling. +In Postgres and Neon LSNs are used to describe certain points in WAL handling. PostgreSQL LSNs and functions to monitor them: * `pg_current_wal_insert_lsn()` - Returns the current write-ahead log insert location. @@ -111,13 +111,13 @@ PostgreSQL LSNs and functions to monitor them: * `pg_last_wal_replay_lsn ()` - Returns the last write-ahead log location that has been replayed during recovery. If recovery is still in progress this will increase monotonically. [source PostgreSQL documentation](https://www.postgresql.org/docs/devel/functions-admin.html): -Zenith safekeeper LSNs. For more check [safekeeper/README_PROTO.md](/safekeeper/README_PROTO.md) +Neon safekeeper LSNs. For more check [safekeeper/README_PROTO.md](/safekeeper/README_PROTO.md) * `CommitLSN`: position in WAL confirmed by quorum safekeepers. * `RestartLSN`: position in WAL confirmed by all safekeepers. * `FlushLSN`: part of WAL persisted to the disk by safekeeper. * `VCL`: the largerst LSN for which we can guarantee availablity of all prior records. -Zenith pageserver LSNs: +Neon pageserver LSNs: * `last_record_lsn` - the end of last processed WAL record. * `disk_consistent_lsn` - data is known to be fully flushed and fsync'd to local disk on pageserver up to this LSN. * `remote_consistent_lsn` - The last LSN that is synced to remote storage and is guaranteed to survive pageserver crash. @@ -132,7 +132,7 @@ This is the unit of data exchange between compute node and pageserver. ### Pageserver -Zenith storage engine: repositories + wal receiver + page service + wal redo. +Neon storage engine: repositories + wal receiver + page service + wal redo. ### Page service @@ -184,10 +184,10 @@ relation exceeds that size, it is split into multiple segments. SLRUs include pg_clog, pg_multixact/members, and pg_multixact/offsets. There are other SLRUs in PostgreSQL, but they don't need to be stored permanently (e.g. pg_subtrans), -or we do not support them in zenith yet (pg_commit_ts). +or we do not support them in neon yet (pg_commit_ts). ### Tenant (Multitenancy) -Tenant represents a single customer, interacting with Zenith. +Tenant represents a single customer, interacting with Neon. Wal redo[] activity, timelines[], layers[] are managed for each tenant independently. One pageserver[] can serve multiple tenants at once. One safekeeper diff --git a/docs/rfcs/004-durability.md b/docs/rfcs/004-durability.md index 4543be3dae..d4716156d1 100644 --- a/docs/rfcs/004-durability.md +++ b/docs/rfcs/004-durability.md @@ -22,7 +22,7 @@ In addition to the WAL safekeeper nodes, the WAL is archived in S3. WAL that has been archived to S3 can be removed from the safekeepers, so the safekeepers don't need a lot of disk space. - +``` +----------------+ +-----> | WAL safekeeper | | +----------------+ @@ -42,23 +42,23 @@ safekeepers, so the safekeepers don't need a lot of disk space. \ \ \ - \ +--------+ - \ | | - +--> | S3 | - | | - +--------+ - + \ +--------+ + \ | | + +------> | S3 | + | | + +--------+ +``` Every WAL safekeeper holds a section of WAL, and a VCL value. The WAL can be divided into three portions: - +``` VCL LSN | | V V .................ccccccccccccccccccccXXXXXXXXXXXXXXXXXXXXXXX Archived WAL Completed WAL In-flight WAL - +``` Note that all this WAL kept in a safekeeper is a contiguous section. This is different from Aurora: In Aurora, there can be holes in the diff --git a/docs/settings.md b/docs/settings.md index 017d349bb6..9564ef626f 100644 --- a/docs/settings.md +++ b/docs/settings.md @@ -25,10 +25,14 @@ max_file_descriptors = '100' # initial superuser role name to use when creating a new tenant initial_superuser_name = 'zenith_admin' +broker_etcd_prefix = 'neon' +broker_endpoints = ['some://etcd'] + # [remote_storage] ``` -The config above shows default values for all basic pageserver settings. +The config above shows default values for all basic pageserver settings, besides `broker_endpoints`: that one has to be set by the user, +see the corresponding section below. Pageserver uses default values for all files that are missing in the config, so it's not a hard error to leave the config blank. Yet, it validates the config values it can (e.g. postgres install dir) and errors if the validation fails, refusing to start. @@ -46,6 +50,17 @@ Example: `${PAGESERVER_BIN} -c "checkpoint_period = '100 s'" -c "remote_storage= Note that TOML distinguishes between strings and integers, the former require single or double quotes around them. +#### broker_endpoints + +A list of endpoints (etcd currently) to connect and pull the information from. +Mandatory, does not have a default, since requires etcd to be started as a separate process, +and its connection url should be specified separately. + +#### broker_etcd_prefix + +A prefix to add for every etcd key used, to separate one group of related instances from another, in the same cluster. +Default is `neon`. + #### checkpoint_distance `checkpoint_distance` is the amount of incoming WAL that is held in diff --git a/docs/sourcetree.md b/docs/sourcetree.md index 5ddc6208d2..c8d4baff62 100644 --- a/docs/sourcetree.md +++ b/docs/sourcetree.md @@ -91,18 +91,22 @@ so manual installation of dependencies is not recommended. A single virtual environment with all dependencies is described in the single `Pipfile`. ### Prerequisites -- Install Python 3.7 (the minimal supported version) or greater. +- Install Python 3.9 (the minimal supported version) or greater. - Our setup with poetry should work with newer python versions too. So feel free to open an issue with a `c/test-runner` label if something doesnt work as expected. - - If you have some trouble with other version you can resolve it by installing Python 3.7 separately, via pyenv or via system package manager e.g.: + - If you have some trouble with other version you can resolve it by installing Python 3.9 separately, via [pyenv](https://github.com/pyenv/pyenv) or via system package manager e.g.: ```bash # In Ubuntu sudo add-apt-repository ppa:deadsnakes/ppa sudo apt update - sudo apt install python3.7 + sudo apt install python3.9 ``` - Install `poetry` - Exact version of `poetry` is not important, see installation instructions available at poetry's [website](https://python-poetry.org/docs/#installation)`. -- Install dependencies via `./scripts/pysync`. Note that CI uses Python 3.7 so if you have different version some linting tools can yield different result locally vs in the CI. +- Install dependencies via `./scripts/pysync`. + - Note that CI uses specific Python version (look for `PYTHON_VERSION` [here](https://github.com/neondatabase/docker-images/blob/main/rust/Dockerfile)) + so if you have different version some linting tools can yield different result locally vs in the CI. + - You can explicitly specify which Python to use by running `poetry env use /path/to/python`, e.g. `poetry env use python3.9`. + This may also disable the `The currently activated Python version X.Y.Z is not supported by the project` warning. Run `poetry shell` to activate the virtual environment. Alternatively, use `poetry run` to run a single command in the venv, e.g. `poetry run pytest`. diff --git a/libs/etcd_broker/src/lib.rs b/libs/etcd_broker/src/lib.rs index 01cc0cf162..76181f9ba1 100644 --- a/libs/etcd_broker/src/lib.rs +++ b/libs/etcd_broker/src/lib.rs @@ -19,6 +19,10 @@ use utils::{ zid::{ZNodeId, ZTenantId, ZTenantTimelineId}, }; +/// Default value to use for prefixing to all etcd keys with. +/// This way allows isolating safekeeper/pageserver groups in the same etcd cluster. +pub const DEFAULT_NEON_BROKER_ETCD_PREFIX: &str = "neon"; + #[derive(Debug, Deserialize, Serialize)] struct SafekeeperTimeline { safekeeper_id: ZNodeId, @@ -51,7 +55,7 @@ pub struct SkTimelineInfo { #[serde(default)] pub peer_horizon_lsn: Option, #[serde(default)] - pub wal_stream_connection_string: Option, + pub safekeeper_connection_string: Option, } #[derive(Debug, thiserror::Error)] @@ -104,28 +108,28 @@ impl SkTimelineSubscription { /// The subscription kind to the timeline updates from safekeeper. #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct SkTimelineSubscriptionKind { - broker_prefix: String, + broker_etcd_prefix: String, kind: SubscriptionKind, } impl SkTimelineSubscriptionKind { - pub fn all(broker_prefix: String) -> Self { + pub fn all(broker_etcd_prefix: String) -> Self { Self { - broker_prefix, + broker_etcd_prefix, kind: SubscriptionKind::All, } } - pub fn tenant(broker_prefix: String, tenant: ZTenantId) -> Self { + pub fn tenant(broker_etcd_prefix: String, tenant: ZTenantId) -> Self { Self { - broker_prefix, + broker_etcd_prefix, kind: SubscriptionKind::Tenant(tenant), } } - pub fn timeline(broker_prefix: String, timeline: ZTenantTimelineId) -> Self { + pub fn timeline(broker_etcd_prefix: String, timeline: ZTenantTimelineId) -> Self { Self { - broker_prefix, + broker_etcd_prefix, kind: SubscriptionKind::Timeline(timeline), } } @@ -134,12 +138,12 @@ impl SkTimelineSubscriptionKind { match self.kind { SubscriptionKind::All => Regex::new(&format!( r"^{}/([[:xdigit:]]+)/([[:xdigit:]]+)/safekeeper/([[:digit:]])$", - self.broker_prefix + self.broker_etcd_prefix )) .expect("wrong regex for 'everything' subscription"), SubscriptionKind::Tenant(tenant_id) => Regex::new(&format!( r"^{}/{tenant_id}/([[:xdigit:]]+)/safekeeper/([[:digit:]])$", - self.broker_prefix + self.broker_etcd_prefix )) .expect("wrong regex for 'tenant' subscription"), SubscriptionKind::Timeline(ZTenantTimelineId { @@ -147,7 +151,7 @@ impl SkTimelineSubscriptionKind { timeline_id, }) => Regex::new(&format!( r"^{}/{tenant_id}/{timeline_id}/safekeeper/([[:digit:]])$", - self.broker_prefix + self.broker_etcd_prefix )) .expect("wrong regex for 'timeline' subscription"), } @@ -156,16 +160,16 @@ impl SkTimelineSubscriptionKind { /// Etcd key to use for watching a certain timeline updates from safekeepers. pub fn watch_key(&self) -> String { match self.kind { - SubscriptionKind::All => self.broker_prefix.to_string(), + SubscriptionKind::All => self.broker_etcd_prefix.to_string(), SubscriptionKind::Tenant(tenant_id) => { - format!("{}/{tenant_id}/safekeeper", self.broker_prefix) + format!("{}/{tenant_id}/safekeeper", self.broker_etcd_prefix) } SubscriptionKind::Timeline(ZTenantTimelineId { tenant_id, timeline_id, }) => format!( "{}/{tenant_id}/{timeline_id}/safekeeper", - self.broker_prefix + self.broker_etcd_prefix ), } } @@ -217,16 +221,22 @@ pub async fn subscribe_to_safekeeper_timeline_updates( break; } - let mut timeline_updates: HashMap> = - HashMap::new(); + let mut timeline_updates: HashMap> = HashMap::new(); + // Keep track that the timeline data updates from etcd arrive in the right order. + // https://etcd.io/docs/v3.5/learning/api_guarantees/#isolation-level-and-consistency-of-replicas + // > etcd does not ensure linearizability for watch operations. Users are expected to verify the revision of watch responses to ensure correct ordering. + let mut timeline_etcd_versions: HashMap = HashMap::new(); + let events = resp.events(); debug!("Processing {} events", events.len()); for event in events { if EventType::Put == event.event_type() { - if let Some(kv) = event.kv() { - match parse_etcd_key_value(subscription_kind, ®ex, kv) { + if let Some(new_etcd_kv) = event.kv() { + let new_kv_version = new_etcd_kv.version(); + + match parse_etcd_key_value(subscription_kind, ®ex, new_etcd_kv) { Ok(Some((zttid, timeline))) => { match timeline_updates .entry(zttid) @@ -234,12 +244,15 @@ pub async fn subscribe_to_safekeeper_timeline_updates( .entry(timeline.safekeeper_id) { hash_map::Entry::Occupied(mut o) => { - if o.get().flush_lsn < timeline.info.flush_lsn { + let old_etcd_kv_version = timeline_etcd_versions.get(&zttid).copied().unwrap_or(i64::MIN); + if old_etcd_kv_version < new_kv_version { o.insert(timeline.info); + timeline_etcd_versions.insert(zttid,new_kv_version); } } hash_map::Entry::Vacant(v) => { v.insert(timeline.info); + timeline_etcd_versions.insert(zttid,new_kv_version); } } } diff --git a/libs/metrics/Cargo.toml b/libs/metrics/Cargo.toml index 3b6ff4691d..8ff5d1d421 100644 --- a/libs/metrics/Cargo.toml +++ b/libs/metrics/Cargo.toml @@ -4,7 +4,7 @@ version = "0.1.0" edition = "2021" [dependencies] -prometheus = {version = "0.13", default_features=false} # removes protobuf dependency +prometheus = {version = "0.13", default_features=false, features = ["process"]} # removes protobuf dependency libc = "0.2" lazy_static = "1.4" once_cell = "1.8.0" diff --git a/libs/metrics/src/lib.rs b/libs/metrics/src/lib.rs index 8756a078c3..9929fc6d45 100644 --- a/libs/metrics/src/lib.rs +++ b/libs/metrics/src/lib.rs @@ -3,7 +3,6 @@ //! Otherwise, we might not see all metrics registered via //! a default registry. use lazy_static::lazy_static; -use once_cell::race::OnceBox; pub use prometheus::{exponential_buckets, linear_buckets}; pub use prometheus::{register_gauge, Gauge}; pub use prometheus::{register_gauge_vec, GaugeVec}; @@ -27,48 +26,15 @@ pub fn gather() -> Vec { prometheus::gather() } -static COMMON_METRICS_PREFIX: OnceBox<&str> = OnceBox::new(); - -/// Sets a prefix which will be used for all common metrics, typically a service -/// name like 'pageserver'. Should be executed exactly once in the beginning of -/// any executable which uses common metrics. -pub fn set_common_metrics_prefix(prefix: &'static str) { - // Not unwrap() because metrics may be initialized after multiple threads have been started. - COMMON_METRICS_PREFIX - .set(prefix.into()) - .unwrap_or_else(|_| { - eprintln!( - "set_common_metrics_prefix() was called second time with '{}', exiting", - prefix - ); - std::process::exit(1); - }); -} - -/// Prepends a prefix to a common metric name so they are distinguished between -/// different services, see -/// A call to set_common_metrics_prefix() is necessary prior to calling this. -pub fn new_common_metric_name(unprefixed_metric_name: &str) -> String { - // Not unwrap() because metrics may be initialized after multiple threads have been started. - format!( - "{}_{}", - COMMON_METRICS_PREFIX.get().unwrap_or_else(|| { - eprintln!("set_common_metrics_prefix() was not called, but metrics are used, exiting"); - std::process::exit(1); - }), - unprefixed_metric_name - ) -} - lazy_static! { static ref DISK_IO_BYTES: IntGaugeVec = register_int_gauge_vec!( - new_common_metric_name("disk_io_bytes"), + "libmetrics_disk_io_bytes_total", "Bytes written and read from disk, grouped by the operation (read|write)", &["io_operation"] ) .expect("Failed to register disk i/o bytes int gauge vec"); static ref MAXRSS_KB: IntGauge = register_int_gauge!( - new_common_metric_name("maxrss_kb"), + "libmetrics_maxrss_kb", "Memory usage (Maximum Resident Set Size)" ) .expect("Failed to register maxrss_kb int gauge"); diff --git a/libs/postgres_ffi/Cargo.toml b/libs/postgres_ffi/Cargo.toml index 7be5ca1b93..129c93cf6d 100644 --- a/libs/postgres_ffi/Cargo.toml +++ b/libs/postgres_ffi/Cargo.toml @@ -20,5 +20,10 @@ serde = { version = "1.0", features = ["derive"] } utils = { path = "../utils" } workspace_hack = { version = "0.1", path = "../../workspace_hack" } +[dev-dependencies] +env_logger = "0.9" +postgres = { git = "https://github.com/zenithdb/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" } +wal_generate = { path = "wal_generate" } + [build-dependencies] bindgen = "0.59.1" diff --git a/libs/postgres_ffi/src/lib.rs b/libs/postgres_ffi/src/lib.rs index 923fbe4d5a..28d9a13dbf 100644 --- a/libs/postgres_ffi/src/lib.rs +++ b/libs/postgres_ffi/src/lib.rs @@ -8,6 +8,7 @@ #![allow(deref_nullptr)] use serde::{Deserialize, Serialize}; +use utils::lsn::Lsn; include!(concat!(env!("OUT_DIR"), "/bindings.rs")); @@ -37,3 +38,21 @@ pub const fn transaction_id_precedes(id1: TransactionId, id2: TransactionId) -> let diff = id1.wrapping_sub(id2) as i32; diff < 0 } + +// Check if page is not yet initialized (port of Postgres PageIsInit() macro) +pub fn page_is_new(pg: &[u8]) -> bool { + pg[14] == 0 && pg[15] == 0 // pg_upper == 0 +} + +// ExtractLSN from page header +pub fn page_get_lsn(pg: &[u8]) -> Lsn { + Lsn( + ((u32::from_le_bytes(pg[0..4].try_into().unwrap()) as u64) << 32) + | u32::from_le_bytes(pg[4..8].try_into().unwrap()) as u64, + ) +} + +pub fn page_set_lsn(pg: &mut [u8], lsn: Lsn) { + pg[0..4].copy_from_slice(&((lsn.0 >> 32) as u32).to_le_bytes()); + pg[4..8].copy_from_slice(&(lsn.0 as u32).to_le_bytes()); +} diff --git a/libs/postgres_ffi/src/xlog_utils.rs b/libs/postgres_ffi/src/xlog_utils.rs index 7882058868..32a3022c5a 100644 --- a/libs/postgres_ffi/src/xlog_utils.rs +++ b/libs/postgres_ffi/src/xlog_utils.rs @@ -15,7 +15,7 @@ use crate::XLogPageHeaderData; use crate::XLogRecord; use crate::XLOG_PAGE_MAGIC; -use anyhow::bail; +use anyhow::{bail, ensure}; use byteorder::{ByteOrder, LittleEndian}; use bytes::BytesMut; use bytes::{Buf, Bytes}; @@ -30,6 +30,7 @@ use std::path::{Path, PathBuf}; use std::time::SystemTime; use utils::bin_ser::DeserializeError; use utils::bin_ser::SerializeError; +use utils::const_assert; use utils::lsn::Lsn; pub const XLOG_FNAME_LEN: usize = 24; @@ -149,8 +150,9 @@ fn find_end_of_wal_segment( ) -> anyhow::Result { // step back to the beginning of the page to read it in... let mut offs: usize = start_offset - start_offset % XLOG_BLCKSZ; + let mut skipping_first_contrecord: bool = false; let mut contlen: usize = 0; - let mut wal_crc: u32 = 0; + let mut xl_crc: u32 = 0; let mut crc: u32 = 0; let mut rec_offs: usize = 0; let mut buf = [0u8; XLOG_BLCKSZ]; @@ -158,11 +160,15 @@ fn find_end_of_wal_segment( let mut last_valid_rec_pos: usize = start_offset; // assume at given start_offset begins new record let mut file = File::open(data_dir.join(file_name.clone() + ".partial")).unwrap(); file.seek(SeekFrom::Start(offs as u64))?; + // xl_crc is the last field in XLogRecord, will not be read into rec_hdr + const_assert!(XLOG_RECORD_CRC_OFFS + 4 == XLOG_SIZE_OF_XLOG_RECORD); let mut rec_hdr = [0u8; XLOG_RECORD_CRC_OFFS]; + trace!("find_end_of_wal_segment(data_dir={}, segno={}, tli={}, wal_seg_size={}, start_offset=0x{:x})", data_dir.display(), segno, tli, wal_seg_size, start_offset); while offs < wal_seg_size { // we are at the beginning of the page; read it in if offs % XLOG_BLCKSZ == 0 { + trace!("offs=0x{:x}: new page", offs); let bytes_read = file.read(&mut buf)?; if bytes_read != buf.len() { bail!( @@ -176,30 +182,49 @@ fn find_end_of_wal_segment( let xlp_magic = LittleEndian::read_u16(&buf[0..2]); let xlp_info = LittleEndian::read_u16(&buf[2..4]); let xlp_rem_len = LittleEndian::read_u32(&buf[XLP_REM_LEN_OFFS..XLP_REM_LEN_OFFS + 4]); + trace!( + " xlp_magic=0x{:x}, xlp_info=0x{:x}, xlp_rem_len={}", + xlp_magic, + xlp_info, + xlp_rem_len + ); // this is expected in current usage when valid WAL starts after page header if xlp_magic != XLOG_PAGE_MAGIC as u16 { trace!( - "invalid WAL file {}.partial magic {} at {:?}", + " invalid WAL file {}.partial magic {} at {:?}", file_name, xlp_magic, Lsn(XLogSegNoOffsetToRecPtr(segno, offs as u32, wal_seg_size)), ); } if offs == 0 { - offs = XLOG_SIZE_OF_XLOG_LONG_PHD; + offs += XLOG_SIZE_OF_XLOG_LONG_PHD; if (xlp_info & XLP_FIRST_IS_CONTRECORD) != 0 { - offs += ((xlp_rem_len + 7) & !7) as usize; + trace!(" first record is contrecord"); + skipping_first_contrecord = true; + contlen = xlp_rem_len as usize; + if offs < start_offset { + // Pre-condition failed: the beginning of the segment is unexpectedly corrupted. + ensure!(start_offset - offs >= contlen, + "start_offset is in the middle of the first record (which happens to be a contrecord), \ + expected to be on a record boundary. Is beginning of the segment corrupted?"); + contlen = 0; + // keep skipping_first_contrecord to avoid counting the contrecord as valid, we did not check it. + } + } else { + trace!(" first record is not contrecord"); } } else { offs += XLOG_SIZE_OF_XLOG_SHORT_PHD; } // ... and step forward again if asked + trace!(" skipped header to 0x{:x}", offs); offs = max(offs, start_offset); - // beginning of the next record } else if contlen == 0 { let page_offs = offs % XLOG_BLCKSZ; let xl_tot_len = LittleEndian::read_u32(&buf[page_offs..page_offs + 4]) as usize; + trace!("offs=0x{:x}: new record, xl_tot_len={}", offs, xl_tot_len); if xl_tot_len == 0 { info!( "find_end_of_wal_segment reached zeros at {:?}, last records ends at {:?}", @@ -212,10 +237,25 @@ fn find_end_of_wal_segment( ); break; // zeros, reached the end } - last_valid_rec_pos = offs; + if skipping_first_contrecord { + skipping_first_contrecord = false; + trace!(" first contrecord has been just completed"); + } else { + trace!( + " updating last_valid_rec_pos: 0x{:x} --> 0x{:x}", + last_valid_rec_pos, + offs + ); + last_valid_rec_pos = offs; + } offs += 4; rec_offs = 4; contlen = xl_tot_len - 4; + trace!( + " reading rec_hdr[0..4] <-- [0x{:x}; 0x{:x})", + page_offs, + page_offs + 4 + ); rec_hdr[0..4].copy_from_slice(&buf[page_offs..page_offs + 4]); } else { // we're continuing a record, possibly from previous page. @@ -224,42 +264,118 @@ fn find_end_of_wal_segment( // read the rest of the record, or as much as fits on this page. let n = min(contlen, pageleft); - // fill rec_hdr (header up to (but not including) xl_crc field) + trace!( + "offs=0x{:x}, record continuation, pageleft={}, contlen={}", + offs, + pageleft, + contlen + ); + // fill rec_hdr header up to (but not including) xl_crc field + trace!( + " rec_offs={}, XLOG_RECORD_CRC_OFFS={}, XLOG_SIZE_OF_XLOG_RECORD={}", + rec_offs, + XLOG_RECORD_CRC_OFFS, + XLOG_SIZE_OF_XLOG_RECORD + ); if rec_offs < XLOG_RECORD_CRC_OFFS { let len = min(XLOG_RECORD_CRC_OFFS - rec_offs, n); + trace!( + " reading rec_hdr[{}..{}] <-- [0x{:x}; 0x{:x})", + rec_offs, + rec_offs + len, + page_offs, + page_offs + len + ); rec_hdr[rec_offs..rec_offs + len].copy_from_slice(&buf[page_offs..page_offs + len]); } if rec_offs <= XLOG_RECORD_CRC_OFFS && rec_offs + n >= XLOG_SIZE_OF_XLOG_RECORD { let crc_offs = page_offs - rec_offs + XLOG_RECORD_CRC_OFFS; - wal_crc = LittleEndian::read_u32(&buf[crc_offs..crc_offs + 4]); + // All records are aligned on 8-byte boundary, so their 8-byte frames + // cannot be split between pages. As xl_crc is the last field, + // its content is always on the same page. + const_assert!(XLOG_RECORD_CRC_OFFS % 8 == 4); + // We should always start reading aligned records even in incorrect WALs so if + // the condition is false it is likely a bug. However, it is localized somewhere + // in this function, hence we do not crash and just report failure instead. + ensure!(crc_offs % 8 == 4, "Record is not aligned properly (bug?)"); + xl_crc = LittleEndian::read_u32(&buf[crc_offs..crc_offs + 4]); + trace!( + " reading xl_crc: [0x{:x}; 0x{:x}) = 0x{:x}", + crc_offs, + crc_offs + 4, + xl_crc + ); crc = crc32c_append(0, &buf[crc_offs + 4..page_offs + n]); - } else { - crc ^= 0xFFFFFFFFu32; + trace!( + " initializing crc: [0x{:x}; 0x{:x}); crc = 0x{:x}", + crc_offs + 4, + page_offs + n, + crc + ); + } else if rec_offs > XLOG_RECORD_CRC_OFFS { + // As all records are 8-byte aligned, the header is already fully read and `crc` is initialized in the branch above. + ensure!(rec_offs >= XLOG_SIZE_OF_XLOG_RECORD); + let old_crc = crc; crc = crc32c_append(crc, &buf[page_offs..page_offs + n]); + trace!( + " appending to crc: [0x{:x}; 0x{:x}); 0x{:x} --> 0x{:x}", + page_offs, + page_offs + n, + old_crc, + crc + ); + } else { + // Correct because of the way conditions are written above. + assert!(rec_offs + n < XLOG_SIZE_OF_XLOG_RECORD); + // If `skipping_first_contrecord == true`, we may be reading from a middle of a record + // which started in the previous segment. Hence there is no point in validating the header. + if !skipping_first_contrecord && rec_offs + n > XLOG_RECORD_CRC_OFFS { + info!( + "Curiously corrupted WAL: a record stops inside the header; \ + offs=0x{:x}, record continuation, pageleft={}, contlen={}", + offs, pageleft, contlen + ); + break; + } + // Do nothing: we are still reading the header. It's accounted in CRC in the end of the record. } - crc = !crc; rec_offs += n; offs += n; contlen -= n; if contlen == 0 { - crc = !crc; + trace!(" record completed at 0x{:x}", offs); crc = crc32c_append(crc, &rec_hdr); offs = (offs + 7) & !7; // pad on 8 bytes boundary */ - if crc == wal_crc { + trace!( + " padded offs to 0x{:x}, crc is {:x}, expected crc is {:x}", + offs, + crc, + xl_crc + ); + if skipping_first_contrecord { + // do nothing, the flag will go down on next iteration when we're reading new record + trace!(" first conrecord has been just completed"); + } else if crc == xl_crc { // record is valid, advance the result to its end (with // alignment to the next record taken into account) + trace!( + " updating last_valid_rec_pos: 0x{:x} --> 0x{:x}", + last_valid_rec_pos, + offs + ); last_valid_rec_pos = offs; } else { info!( "CRC mismatch {} vs {} at {}", - crc, wal_crc, last_valid_rec_pos + crc, xl_crc, last_valid_rec_pos ); break; } } } } + trace!("last_valid_rec_pos=0x{:x}", last_valid_rec_pos); Ok(last_valid_rec_pos as u32) } @@ -476,78 +592,126 @@ pub fn generate_wal_segment(segno: u64, system_id: u64) -> Result anyhow::Result, + expected_end_of_wal_non_partial: Lsn, + last_segment: &str, + ) { + use wal_generate::*; + // 1. Generate some WAL let top_path = PathBuf::from(env!("CARGO_MANIFEST_DIR")) .join("..") .join(".."); - let data_dir = top_path.join("test_output/test_find_end_of_wal"); - let initdb_path = top_path.join("tmp_install/bin/initdb"); - let lib_path = top_path.join("tmp_install/lib"); - if data_dir.exists() { - fs::remove_dir_all(&data_dir).unwrap(); + let cfg = Conf { + pg_distrib_dir: top_path.join("tmp_install"), + datadir: top_path.join(format!("test_output/{}", test_name)), + }; + if cfg.datadir.exists() { + fs::remove_dir_all(&cfg.datadir).unwrap(); } - println!("Using initdb from '{}'", initdb_path.display()); - println!("Data directory '{}'", data_dir.display()); - let initdb_output = Command::new(initdb_path) - .args(&["-D", data_dir.to_str().unwrap()]) - .arg("--no-instructions") - .arg("--no-sync") - .env_clear() - .env("LD_LIBRARY_PATH", &lib_path) - .env("DYLD_LIBRARY_PATH", &lib_path) - .output() - .unwrap(); - assert!( - initdb_output.status.success(), - "initdb failed. Status: '{}', stdout: '{}', stderr: '{}'", - initdb_output.status, - String::from_utf8_lossy(&initdb_output.stdout), - String::from_utf8_lossy(&initdb_output.stderr), - ); + cfg.initdb().unwrap(); + let mut srv = cfg.start_server().unwrap(); + let expected_wal_end: Lsn = + u64::from(generate_wal(&mut srv.connect_with_timeout().unwrap()).unwrap()).into(); + srv.kill(); // 2. Pick WAL generated by initdb - let wal_dir = data_dir.join("pg_wal"); + let wal_dir = cfg.datadir.join("pg_wal"); let wal_seg_size = 16 * 1024 * 1024; // 3. Check end_of_wal on non-partial WAL segment (we treat it as fully populated) let (wal_end, tli) = find_end_of_wal(&wal_dir, wal_seg_size, true, Lsn(0)).unwrap(); let wal_end = Lsn(wal_end); - println!("wal_end={}, tli={}", wal_end, tli); - assert_eq!(wal_end, "0/2000000".parse::().unwrap()); + info!( + "find_end_of_wal returned (wal_end={}, tli={})", + wal_end, tli + ); + assert_eq!(wal_end, expected_end_of_wal_non_partial); // 4. Get the actual end of WAL by pg_waldump - let waldump_path = top_path.join("tmp_install/bin/pg_waldump"); - let waldump_output = Command::new(waldump_path) - .arg(wal_dir.join("000000010000000000000001")) - .env_clear() - .env("LD_LIBRARY_PATH", &lib_path) - .env("DYLD_LIBRARY_PATH", &lib_path) - .output() - .unwrap(); - let waldump_output = std::str::from_utf8(&waldump_output.stderr).unwrap(); - println!("waldump_output = '{}'", &waldump_output); - let re = Regex::new(r"invalid record length at (.+):").unwrap(); - let caps = re.captures(waldump_output).unwrap(); + let waldump_output = cfg + .pg_waldump("000000010000000000000001", last_segment) + .unwrap() + .stderr; + let waldump_output = std::str::from_utf8(&waldump_output).unwrap(); + let caps = match Regex::new(r"invalid record length at (.+):") + .unwrap() + .captures(waldump_output) + { + Some(caps) => caps, + None => { + error!("Unable to parse pg_waldump's stderr:\n{}", waldump_output); + panic!(); + } + }; let waldump_wal_end = Lsn::from_str(caps.get(1).unwrap().as_str()).unwrap(); + info!( + "waldump erred on {}, expected wal end at {}", + waldump_wal_end, expected_wal_end + ); + assert_eq!(waldump_wal_end, expected_wal_end); // 5. Rename file to partial to actually find last valid lsn fs::rename( - wal_dir.join("000000010000000000000001"), - wal_dir.join("000000010000000000000001.partial"), + wal_dir.join(last_segment), + wal_dir.join(format!("{}.partial", last_segment)), ) .unwrap(); let (wal_end, tli) = find_end_of_wal(&wal_dir, wal_seg_size, true, Lsn(0)).unwrap(); let wal_end = Lsn(wal_end); - println!("wal_end={}, tli={}", wal_end, tli); + info!( + "find_end_of_wal returned (wal_end={}, tli={})", + wal_end, tli + ); assert_eq!(wal_end, waldump_wal_end); } + #[test] + pub fn test_find_end_of_wal_simple() { + init_logging(); + test_end_of_wal( + "test_find_end_of_wal_simple", + wal_generate::generate_simple, + "0/2000000".parse::().unwrap(), + "000000010000000000000001", + ); + } + + #[test] + pub fn test_find_end_of_wal_crossing_segment_followed_by_small_one() { + init_logging(); + test_end_of_wal( + "test_find_end_of_wal_crossing_segment_followed_by_small_one", + wal_generate::generate_wal_record_crossing_segment_followed_by_small_one, + "0/3000000".parse::().unwrap(), + "000000010000000000000002", + ); + } + + #[test] + #[ignore = "not yet fixed, needs correct parsing of pre-last segments"] // TODO + pub fn test_find_end_of_wal_last_crossing_segment() { + init_logging(); + test_end_of_wal( + "test_find_end_of_wal_last_crossing_segment", + wal_generate::generate_last_wal_record_crossing_segment, + "0/3000000".parse::().unwrap(), + "000000010000000000000002", + ); + } + /// Check the math in update_next_xid /// /// NOTE: These checks are sensitive to the value of XID_CHECKPOINT_INTERVAL, diff --git a/libs/postgres_ffi/wal_generate/Cargo.toml b/libs/postgres_ffi/wal_generate/Cargo.toml new file mode 100644 index 0000000000..a10671dddd --- /dev/null +++ b/libs/postgres_ffi/wal_generate/Cargo.toml @@ -0,0 +1,14 @@ +[package] +name = "wal_generate" +version = "0.1.0" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +anyhow = "1.0" +clap = "3.0" +env_logger = "0.9" +log = "0.4" +postgres = { git = "https://github.com/zenithdb/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" } +tempfile = "3.2" diff --git a/libs/postgres_ffi/wal_generate/src/bin/wal_generate.rs b/libs/postgres_ffi/wal_generate/src/bin/wal_generate.rs new file mode 100644 index 0000000000..07ceb31c7f --- /dev/null +++ b/libs/postgres_ffi/wal_generate/src/bin/wal_generate.rs @@ -0,0 +1,58 @@ +use anyhow::*; +use clap::{App, Arg}; +use wal_generate::*; + +fn main() -> Result<()> { + env_logger::Builder::from_env( + env_logger::Env::default().default_filter_or("wal_generate=info"), + ) + .init(); + let arg_matches = App::new("Postgres WAL generator") + .about("Generates Postgres databases with specific WAL properties") + .arg( + Arg::new("datadir") + .short('D') + .long("datadir") + .takes_value(true) + .help("Data directory for the Postgres server") + .required(true) + ) + .arg( + Arg::new("pg-distrib-dir") + .long("pg-distrib-dir") + .takes_value(true) + .help("Directory with Postgres distribution (bin and lib directories, e.g. tmp_install)") + .default_value("/usr/local") + ) + .arg( + Arg::new("type") + .long("type") + .takes_value(true) + .help("Type of WAL to generate") + .possible_values(["simple", "last_wal_record_crossing_segment", "wal_record_crossing_segment_followed_by_small_one"]) + .required(true) + ) + .get_matches(); + + let cfg = Conf { + pg_distrib_dir: arg_matches.value_of("pg-distrib-dir").unwrap().into(), + datadir: arg_matches.value_of("datadir").unwrap().into(), + }; + cfg.initdb()?; + let mut srv = cfg.start_server()?; + let lsn = match arg_matches.value_of("type").unwrap() { + "simple" => generate_simple(&mut srv.connect_with_timeout()?)?, + "last_wal_record_crossing_segment" => { + generate_last_wal_record_crossing_segment(&mut srv.connect_with_timeout()?)? + } + "wal_record_crossing_segment_followed_by_small_one" => { + generate_wal_record_crossing_segment_followed_by_small_one( + &mut srv.connect_with_timeout()?, + )? + } + a => panic!("Unknown --type argument: {}", a), + }; + println!("end_of_wal = {}", lsn); + srv.kill(); + Ok(()) +} diff --git a/libs/postgres_ffi/wal_generate/src/lib.rs b/libs/postgres_ffi/wal_generate/src/lib.rs new file mode 100644 index 0000000000..a5cd81d68a --- /dev/null +++ b/libs/postgres_ffi/wal_generate/src/lib.rs @@ -0,0 +1,278 @@ +use anyhow::*; +use core::time::Duration; +use log::*; +use postgres::types::PgLsn; +use postgres::Client; +use std::cmp::Ordering; +use std::path::{Path, PathBuf}; +use std::process::{Command, Stdio}; +use std::time::Instant; +use tempfile::{tempdir, TempDir}; + +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct Conf { + pub pg_distrib_dir: PathBuf, + pub datadir: PathBuf, +} + +pub struct PostgresServer { + process: std::process::Child, + _unix_socket_dir: TempDir, + client_config: postgres::Config, +} + +impl Conf { + fn pg_bin_dir(&self) -> PathBuf { + self.pg_distrib_dir.join("bin") + } + + fn pg_lib_dir(&self) -> PathBuf { + self.pg_distrib_dir.join("lib") + } + + fn new_pg_command(&self, command: impl AsRef) -> Result { + let path = self.pg_bin_dir().join(command); + ensure!(path.exists(), "Command {:?} does not exist", path); + let mut cmd = Command::new(path); + cmd.env_clear() + .env("LD_LIBRARY_PATH", self.pg_lib_dir()) + .env("DYLD_LIBRARY_PATH", self.pg_lib_dir()); + Ok(cmd) + } + + pub fn initdb(&self) -> Result<()> { + if let Some(parent) = self.datadir.parent() { + info!("Pre-creating parent directory {:?}", parent); + // Tests may be run concurrently and there may be a race to create `test_output/`. + // std::fs::create_dir_all is guaranteed to have no races with another thread creating directories. + std::fs::create_dir_all(parent)?; + } + info!( + "Running initdb in {:?} with user \"postgres\"", + self.datadir + ); + let output = self + .new_pg_command("initdb")? + .arg("-D") + .arg(self.datadir.as_os_str()) + .args(&["-U", "postgres", "--no-instructions", "--no-sync"]) + .output()?; + debug!("initdb output: {:?}", output); + ensure!( + output.status.success(), + "initdb failed, stdout and stderr follow:\n{}{}", + String::from_utf8_lossy(&output.stdout), + String::from_utf8_lossy(&output.stderr), + ); + Ok(()) + } + + pub fn start_server(&self) -> Result { + info!("Starting Postgres server in {:?}", self.datadir); + let unix_socket_dir = tempdir()?; // We need a directory with a short name for Unix socket (up to 108 symbols) + let unix_socket_dir_path = unix_socket_dir.path().to_owned(); + let server_process = self + .new_pg_command("postgres")? + .args(&["-c", "listen_addresses="]) + .arg("-k") + .arg(unix_socket_dir_path.as_os_str()) + .arg("-D") + .arg(self.datadir.as_os_str()) + .args(&["-c", "wal_keep_size=50MB"]) // Ensure old WAL is not removed + .args(&["-c", "logging_collector=on"]) // stderr will mess up with tests output + .args(&["-c", "shared_preload_libraries=zenith"]) // can only be loaded at startup + // Disable background processes as much as possible + .args(&["-c", "wal_writer_delay=10s"]) + .args(&["-c", "autovacuum=off"]) + .stderr(Stdio::null()) + .spawn()?; + let server = PostgresServer { + process: server_process, + _unix_socket_dir: unix_socket_dir, + client_config: { + let mut c = postgres::Config::new(); + c.host_path(&unix_socket_dir_path); + c.user("postgres"); + c.connect_timeout(Duration::from_millis(1000)); + c + }, + }; + Ok(server) + } + + pub fn pg_waldump( + &self, + first_segment_name: &str, + last_segment_name: &str, + ) -> Result { + let first_segment_file = self.datadir.join(first_segment_name); + let last_segment_file = self.datadir.join(last_segment_name); + info!( + "Running pg_waldump for {} .. {}", + first_segment_file.display(), + last_segment_file.display() + ); + let output = self + .new_pg_command("pg_waldump")? + .args(&[ + &first_segment_file.as_os_str(), + &last_segment_file.as_os_str(), + ]) + .output()?; + debug!("waldump output: {:?}", output); + Ok(output) + } +} + +impl PostgresServer { + pub fn connect_with_timeout(&self) -> Result { + let retry_until = Instant::now() + *self.client_config.get_connect_timeout().unwrap(); + while Instant::now() < retry_until { + use std::result::Result::Ok; + if let Ok(client) = self.client_config.connect(postgres::NoTls) { + return Ok(client); + } + std::thread::sleep(Duration::from_millis(100)); + } + bail!("Connection timed out"); + } + + pub fn kill(&mut self) { + self.process.kill().unwrap(); + self.process.wait().unwrap(); + } +} + +impl Drop for PostgresServer { + fn drop(&mut self) { + use std::result::Result::Ok; + match self.process.try_wait() { + Ok(Some(_)) => return, + Ok(None) => { + warn!("Server was not terminated, will be killed"); + } + Err(e) => { + error!("Unable to get status of the server: {}, will be killed", e); + } + } + let _ = self.process.kill(); + } +} + +pub trait PostgresClientExt: postgres::GenericClient { + fn pg_current_wal_insert_lsn(&mut self) -> Result { + Ok(self + .query_one("SELECT pg_current_wal_insert_lsn()", &[])? + .get(0)) + } + fn pg_current_wal_flush_lsn(&mut self) -> Result { + Ok(self + .query_one("SELECT pg_current_wal_flush_lsn()", &[])? + .get(0)) + } +} + +impl PostgresClientExt for C {} + +fn generate_internal( + client: &mut C, + f: impl Fn(&mut C, PgLsn) -> Result>, +) -> Result { + client.execute("create extension if not exists zenith_test_utils", &[])?; + + let wal_segment_size = client.query_one( + "select cast(setting as bigint) as setting, unit \ + from pg_settings where name = 'wal_segment_size'", + &[], + )?; + ensure!( + wal_segment_size.get::<_, String>("unit") == "B", + "Unexpected wal_segment_size unit" + ); + ensure!( + wal_segment_size.get::<_, i64>("setting") == 16 * 1024 * 1024, + "Unexpected wal_segment_size in bytes" + ); + + let initial_lsn = client.pg_current_wal_insert_lsn()?; + info!("LSN initial = {}", initial_lsn); + + let last_lsn = match f(client, initial_lsn)? { + None => client.pg_current_wal_insert_lsn()?, + Some(last_lsn) => match last_lsn.cmp(&client.pg_current_wal_insert_lsn()?) { + Ordering::Less => bail!("Some records were inserted after the generated WAL"), + Ordering::Equal => last_lsn, + Ordering::Greater => bail!("Reported LSN is greater than insert_lsn"), + }, + }; + + // Some records may be not flushed, e.g. non-transactional logical messages. + client.execute("select neon_xlogflush(pg_current_wal_insert_lsn())", &[])?; + match last_lsn.cmp(&client.pg_current_wal_flush_lsn()?) { + Ordering::Less => bail!("Some records were flushed after the generated WAL"), + Ordering::Equal => {} + Ordering::Greater => bail!("Reported LSN is greater than flush_lsn"), + } + Ok(last_lsn) +} + +pub fn generate_simple(client: &mut impl postgres::GenericClient) -> Result { + generate_internal(client, |client, _| { + client.execute("CREATE table t(x int)", &[])?; + Ok(None) + }) +} + +fn generate_single_logical_message( + client: &mut impl postgres::GenericClient, + transactional: bool, +) -> Result { + generate_internal(client, |client, initial_lsn| { + ensure!( + initial_lsn < PgLsn::from(0x0200_0000 - 1024 * 1024), + "Initial LSN is too far in the future" + ); + + let message_lsn: PgLsn = client + .query_one( + "select pg_logical_emit_message($1, 'big-16mb-msg', \ + concat(repeat('abcd', 16 * 256 * 1024), 'end')) as message_lsn", + &[&transactional], + )? + .get("message_lsn"); + ensure!( + message_lsn > PgLsn::from(0x0200_0000 + 4 * 8192), + "Logical message did not cross the segment boundary" + ); + ensure!( + message_lsn < PgLsn::from(0x0400_0000), + "Logical message crossed two segments" + ); + + if transactional { + // Transactional logical messages are part of a transaction, so the one above is + // followed by a small COMMIT record. + + let after_message_lsn = client.pg_current_wal_insert_lsn()?; + ensure!( + message_lsn < after_message_lsn, + "No record found after the emitted message" + ); + Ok(Some(after_message_lsn)) + } else { + Ok(Some(message_lsn)) + } + }) +} + +pub fn generate_wal_record_crossing_segment_followed_by_small_one( + client: &mut impl postgres::GenericClient, +) -> Result { + generate_single_logical_message(client, true) +} + +pub fn generate_last_wal_record_crossing_segment( + client: &mut C, +) -> Result { + generate_single_logical_message(client, false) +} diff --git a/libs/remote_storage/src/lib.rs b/libs/remote_storage/src/lib.rs index 9bbb855dd5..8092e4fc49 100644 --- a/libs/remote_storage/src/lib.rs +++ b/libs/remote_storage/src/lib.rs @@ -87,7 +87,8 @@ pub trait RemoteStorage: Send + Sync { async fn delete(&self, path: &Self::RemoteObjectId) -> anyhow::Result<()>; } -/// TODO kb +/// Every storage, currently supported. +/// Serves as a simple way to pass around the [`RemoteStorage`] without dealing with generics. pub enum GenericRemoteStorage { Local(LocalFs), S3(S3Bucket), diff --git a/libs/utils/build.rs b/libs/utils/build.rs deleted file mode 100644 index ee3346ae66..0000000000 --- a/libs/utils/build.rs +++ /dev/null @@ -1,3 +0,0 @@ -fn main() { - println!("cargo:rerun-if-env-changed=GIT_VERSION"); -} diff --git a/libs/utils/src/http/endpoint.rs b/libs/utils/src/http/endpoint.rs index 77acab496f..51bff5f6eb 100644 --- a/libs/utils/src/http/endpoint.rs +++ b/libs/utils/src/http/endpoint.rs @@ -5,7 +5,7 @@ use anyhow::anyhow; use hyper::header::AUTHORIZATION; use hyper::{header::CONTENT_TYPE, Body, Request, Response, Server}; use lazy_static::lazy_static; -use metrics::{new_common_metric_name, register_int_counter, Encoder, IntCounter, TextEncoder}; +use metrics::{register_int_counter, Encoder, IntCounter, TextEncoder}; use routerify::ext::RequestExt; use routerify::RequestInfo; use routerify::{Middleware, Router, RouterBuilder, RouterService}; @@ -18,7 +18,7 @@ use super::error::ApiError; lazy_static! { static ref SERVE_METRICS_COUNT: IntCounter = register_int_counter!( - new_common_metric_name("serve_metrics_count"), + "libmetrics_metric_handler_requests_total", "Number of metric requests made" ) .expect("failed to define a metric"); diff --git a/libs/utils/src/http/request.rs b/libs/utils/src/http/request.rs index 3bc8993c26..8e3d357397 100644 --- a/libs/utils/src/http/request.rs +++ b/libs/utils/src/http/request.rs @@ -1,7 +1,7 @@ use std::str::FromStr; use super::error::ApiError; -use hyper::{Body, Request}; +use hyper::{body::HttpBody, Body, Request}; use routerify::ext::RequestExt; pub fn get_request_param<'a>( @@ -31,3 +31,10 @@ pub fn parse_request_param( ))), } } + +pub async fn ensure_no_body(request: &mut Request) -> Result<(), ApiError> { + match request.body_mut().data().await { + Some(_) => Err(ApiError::BadRequest("Unexpected request body".into())), + None => Ok(()), + } +} diff --git a/libs/utils/src/lib.rs b/libs/utils/src/lib.rs index de266efe64..15d4c7a81e 100644 --- a/libs/utils/src/lib.rs +++ b/libs/utils/src/lib.rs @@ -54,31 +54,52 @@ pub mod nonblock; // Default signal handling pub mod signals; -// This is a shortcut to embed git sha into binaries and avoid copying the same build script to all packages -// -// we have several cases: -// * building locally from git repo -// * building in CI from git repo -// * building in docker (either in CI or locally) -// -// One thing to note is that .git is not available in docker (and it is bad to include it there). -// So everything becides docker build is covered by git_version crate. -// For docker use environment variable to pass git version, which is then retrieved by buildscript (build.rs). -// It takes variable from build process env and puts it to the rustc env. And then we can retrieve it here by using env! macro. -// Git version received from environment variable used as a fallback in git_version invokation. -// And to avoid running buildscript every recompilation, we use rerun-if-env-changed option. -// So the build script will be run only when GIT_VERSION envvar has changed. -// -// Why not to use buildscript to get git commit sha directly without procmacro from different crate? -// Caching and workspaces complicates that. In case `utils` is not -// recompiled due to caching then version may become outdated. -// git_version crate handles that case by introducing a dependency on .git internals via include_bytes! macro, -// so if we changed the index state git_version will pick that up and rerun the macro. -// -// Note that with git_version prefix is `git:` and in case of git version from env its `git-env:`. -use git_version::git_version; -pub const GIT_VERSION: &str = git_version!( - prefix = "git:", - fallback = concat!("git-env:", env!("GIT_VERSION")), - args = ["--abbrev=40", "--always", "--dirty=-modified"] // always use full sha -); +/// This is a shortcut to embed git sha into binaries and avoid copying the same build script to all packages +/// +/// we have several cases: +/// * building locally from git repo +/// * building in CI from git repo +/// * building in docker (either in CI or locally) +/// +/// One thing to note is that .git is not available in docker (and it is bad to include it there). +/// So everything becides docker build is covered by git_version crate, and docker uses a `GIT_VERSION` argument to get the value required. +/// It takes variable from build process env and puts it to the rustc env. And then we can retrieve it here by using env! macro. +/// Git version received from environment variable used as a fallback in git_version invokation. +/// And to avoid running buildscript every recompilation, we use rerun-if-env-changed option. +/// So the build script will be run only when GIT_VERSION envvar has changed. +/// +/// Why not to use buildscript to get git commit sha directly without procmacro from different crate? +/// Caching and workspaces complicates that. In case `utils` is not +/// recompiled due to caching then version may become outdated. +/// git_version crate handles that case by introducing a dependency on .git internals via include_bytes! macro, +/// so if we changed the index state git_version will pick that up and rerun the macro. +/// +/// Note that with git_version prefix is `git:` and in case of git version from env its `git-env:`. +/// +/// ############################################################################################# +/// TODO this macro is not the way the library is intended to be used, see https://github.com/neondatabase/neon/issues/1565 for details. +/// We use `cachepot` to reduce our current CI build times: https://github.com/neondatabase/cloud/pull/1033#issuecomment-1100935036 +/// Yet, it seems to ignore the GIT_VERSION env variable, passed to Docker build, even with build.rs that contains +/// `println!("cargo:rerun-if-env-changed=GIT_VERSION");` code for cachepot cache invalidation. +/// The problem needs further investigation and regular `const` declaration instead of a macro. +#[macro_export] +macro_rules! project_git_version { + ($const_identifier:ident) => { + const $const_identifier: &str = git_version::git_version!( + prefix = "git:", + fallback = concat!( + "git-env:", + env!("GIT_VERSION", "Missing GIT_VERSION envvar") + ), + args = ["--abbrev=40", "--always", "--dirty=-modified"] // always use full sha + ); + }; +} + +/// Same as `assert!`, but evaluated during compilation and gets optimized out in runtime. +#[macro_export] +macro_rules! const_assert { + ($($args:tt)*) => { + const _: () = assert!($($args)*); + }; +} diff --git a/libs/utils/src/vec_map.rs b/libs/utils/src/vec_map.rs index 558721c724..9953b447c8 100644 --- a/libs/utils/src/vec_map.rs +++ b/libs/utils/src/vec_map.rs @@ -1,11 +1,9 @@ use std::{alloc::Layout, cmp::Ordering, ops::RangeBounds}; -use serde::{Deserialize, Serialize}; - /// Ordered map datastructure implemented in a Vec. /// Append only - can only add keys that are larger than the /// current max key. -#[derive(Clone, Debug, Serialize, Deserialize)] +#[derive(Clone, Debug)] pub struct VecMap(Vec<(K, V)>); impl Default for VecMap { diff --git a/monitoring/docker-compose.yml b/monitoring/docker-compose.yml deleted file mode 100644 index a3fda0b246..0000000000 --- a/monitoring/docker-compose.yml +++ /dev/null @@ -1,25 +0,0 @@ -version: "3" -services: - - prometheus: - container_name: prometheus - image: prom/prometheus:latest - volumes: - - ./prometheus.yaml:/etc/prometheus/prometheus.yml - # ports: - # - "9090:9090" - # TODO: find a proper portable solution - network_mode: "host" - - grafana: - image: grafana/grafana:latest - volumes: - - ./grafana.yaml:/etc/grafana/provisioning/datasources/datasources.yaml - environment: - - GF_AUTH_ANONYMOUS_ENABLED=true - - GF_AUTH_ANONYMOUS_ORG_ROLE=Admin - - GF_AUTH_DISABLE_LOGIN_FORM=true - # ports: - # - "3000:3000" - # TODO: find a proper portable solution - network_mode: "host" diff --git a/monitoring/grafana.yaml b/monitoring/grafana.yaml deleted file mode 100644 index eac8879e6c..0000000000 --- a/monitoring/grafana.yaml +++ /dev/null @@ -1,12 +0,0 @@ -apiVersion: 1 - -datasources: -- name: Prometheus - type: prometheus - access: proxy - orgId: 1 - url: http://localhost:9090 - basicAuth: false - isDefault: false - version: 1 - editable: false diff --git a/monitoring/prometheus.yaml b/monitoring/prometheus.yaml deleted file mode 100644 index ba55d53737..0000000000 --- a/monitoring/prometheus.yaml +++ /dev/null @@ -1,5 +0,0 @@ -scrape_configs: - - job_name: 'default' - scrape_interval: 10s - static_configs: - - targets: ['localhost:9898'] diff --git a/neon_local/Cargo.toml b/neon_local/Cargo.toml index 78d339789f..8ebd7d5c17 100644 --- a/neon_local/Cargo.toml +++ b/neon_local/Cargo.toml @@ -9,6 +9,7 @@ anyhow = "1.0" serde_json = "1" comfy-table = "5.0.1" postgres = { git = "https://github.com/zenithdb/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" } +git-version = "0.3.5" # FIXME: 'pageserver' is needed for BranchInfo. Refactor pageserver = { path = "../pageserver" } diff --git a/neon_local/src/main.rs b/neon_local/src/main.rs index 8b54054080..f04af9cfdd 100644 --- a/neon_local/src/main.rs +++ b/neon_local/src/main.rs @@ -1,10 +1,10 @@ use anyhow::{anyhow, bail, Context, Result}; use clap::{App, AppSettings, Arg, ArgMatches}; use control_plane::compute::ComputeControlPlane; -use control_plane::local_env; -use control_plane::local_env::LocalEnv; +use control_plane::local_env::{EtcdBroker, LocalEnv}; use control_plane::safekeeper::SafekeeperNode; use control_plane::storage::PageServerNode; +use control_plane::{etcd, local_env}; use pageserver::config::defaults::{ DEFAULT_HTTP_LISTEN_ADDR as DEFAULT_PAGESERVER_HTTP_ADDR, DEFAULT_PG_LISTEN_ADDR as DEFAULT_PAGESERVER_PG_ADDR, @@ -14,14 +14,15 @@ use safekeeper::defaults::{ DEFAULT_PG_LISTEN_PORT as DEFAULT_SAFEKEEPER_PG_PORT, }; use std::collections::{BTreeSet, HashMap}; +use std::path::Path; use std::process::exit; use std::str::FromStr; use utils::{ auth::{Claims, Scope}, lsn::Lsn, postgres_backend::AuthType, + project_git_version, zid::{ZNodeId, ZTenantId, ZTenantTimelineId, ZTimelineId}, - GIT_VERSION, }; use pageserver::timelines::TimelineInfo; @@ -30,29 +31,29 @@ use pageserver::timelines::TimelineInfo; const DEFAULT_SAFEKEEPER_ID: ZNodeId = ZNodeId(1); const DEFAULT_PAGESERVER_ID: ZNodeId = ZNodeId(1); const DEFAULT_BRANCH_NAME: &str = "main"; +project_git_version!(GIT_VERSION); -fn default_conf() -> String { +fn default_conf(etcd_binary_path: &Path) -> String { format!( r#" # Default built-in configuration, defined in main.rs +[etcd_broker] +broker_endpoints = ['http://localhost:2379'] +etcd_binary_path = '{etcd_binary_path}' + [pageserver] -id = {pageserver_id} -listen_pg_addr = '{pageserver_pg_addr}' -listen_http_addr = '{pageserver_http_addr}' +id = {DEFAULT_PAGESERVER_ID} +listen_pg_addr = '{DEFAULT_PAGESERVER_PG_ADDR}' +listen_http_addr = '{DEFAULT_PAGESERVER_HTTP_ADDR}' auth_type = '{pageserver_auth_type}' [[safekeepers]] -id = {safekeeper_id} -pg_port = {safekeeper_pg_port} -http_port = {safekeeper_http_port} +id = {DEFAULT_SAFEKEEPER_ID} +pg_port = {DEFAULT_SAFEKEEPER_PG_PORT} +http_port = {DEFAULT_SAFEKEEPER_HTTP_PORT} "#, - pageserver_id = DEFAULT_PAGESERVER_ID, - pageserver_pg_addr = DEFAULT_PAGESERVER_PG_ADDR, - pageserver_http_addr = DEFAULT_PAGESERVER_HTTP_ADDR, + etcd_binary_path = etcd_binary_path.display(), pageserver_auth_type = AuthType::Trust, - safekeeper_id = DEFAULT_SAFEKEEPER_ID, - safekeeper_pg_port = DEFAULT_SAFEKEEPER_PG_PORT, - safekeeper_http_port = DEFAULT_SAFEKEEPER_HTTP_PORT, ) } @@ -166,12 +167,12 @@ fn main() -> Result<()> { .subcommand(App::new("create") .arg(tenant_id_arg.clone()) .arg(timeline_id_arg.clone().help("Use a specific timeline id when creating a tenant and its initial timeline")) - .arg(Arg::new("config").short('c').takes_value(true).multiple_occurrences(true).required(false)) - ) + .arg(Arg::new("config").short('c').takes_value(true).multiple_occurrences(true).required(false)) + ) .subcommand(App::new("config") .arg(tenant_id_arg.clone()) - .arg(Arg::new("config").short('c').takes_value(true).multiple_occurrences(true).required(false)) - ) + .arg(Arg::new("config").short('c').takes_value(true).multiple_occurrences(true).required(false)) + ) ) .subcommand( App::new("pageserver") @@ -274,7 +275,7 @@ fn main() -> Result<()> { "pageserver" => handle_pageserver(sub_args, &env), "pg" => handle_pg(sub_args, &env), "safekeeper" => handle_safekeeper(sub_args, &env), - _ => bail!("unexpected subcommand {}", sub_name), + _ => bail!("unexpected subcommand {sub_name}"), }; if original_env != env { @@ -288,7 +289,7 @@ fn main() -> Result<()> { Ok(Some(updated_env)) => updated_env.persist_config(&updated_env.base_data_dir)?, Ok(None) => (), Err(e) => { - eprintln!("command failed: {:?}", e); + eprintln!("command failed: {e:?}"); exit(1); } } @@ -467,21 +468,21 @@ fn parse_timeline_id(sub_match: &ArgMatches) -> anyhow::Result Result { +fn handle_init(init_match: &ArgMatches) -> anyhow::Result { let initial_timeline_id_arg = parse_timeline_id(init_match)?; // Create config file let toml_file: String = if let Some(config_path) = init_match.value_of("config") { // load and parse the file std::fs::read_to_string(std::path::Path::new(config_path)) - .with_context(|| format!("Could not read configuration file \"{}\"", config_path))? + .with_context(|| format!("Could not read configuration file '{config_path}'"))? } else { // Built-in default config - default_conf() + default_conf(&EtcdBroker::locate_etcd()?) }; let mut env = - LocalEnv::create_config(&toml_file).context("Failed to create neon configuration")?; + LocalEnv::parse_config(&toml_file).context("Failed to create neon configuration")?; env.init().context("Failed to initialize neon repository")?; // default_tenantid was generated by the `env.init()` call above @@ -496,7 +497,7 @@ fn handle_init(init_match: &ArgMatches) -> Result { &pageserver_config_overrides(init_match), ) .unwrap_or_else(|e| { - eprintln!("pageserver init failed: {}", e); + eprintln!("pageserver init failed: {e}"); exit(1); }); @@ -540,6 +541,29 @@ fn handle_tenant(tenant_match: &ArgMatches, env: &mut local_env::LocalEnv) -> an "tenant {} successfully created on the pageserver", new_tenant_id ); + + // Create an initial timeline for the new tenant + let new_timeline_id = parse_timeline_id(create_match)?; + let timeline = pageserver + .timeline_create(new_tenant_id, new_timeline_id, None, None)? + .context(format!( + "Failed to create initial timeline for tenant {new_tenant_id}" + ))?; + let new_timeline_id = timeline.timeline_id; + let last_record_lsn = timeline + .local + .context(format!("Failed to get last record LSN: no local timeline info for timeline {new_timeline_id}"))? + .last_record_lsn; + + env.register_branch_mapping( + DEFAULT_BRANCH_NAME.to_string(), + new_tenant_id, + new_timeline_id, + )?; + + println!( + "Created an initial timeline '{new_timeline_id}' at Lsn {last_record_lsn} for tenant: {new_tenant_id}", + ); } Some(("config", create_match)) => { let tenant_id = get_tenant_id(create_match, env)?; @@ -896,20 +920,23 @@ fn handle_safekeeper(sub_match: &ArgMatches, env: &local_env::LocalEnv) -> Resul Ok(()) } -fn handle_start_all(sub_match: &ArgMatches, env: &local_env::LocalEnv) -> Result<()> { +fn handle_start_all(sub_match: &ArgMatches, env: &local_env::LocalEnv) -> anyhow::Result<()> { + etcd::start_etcd_process(env)?; let pageserver = PageServerNode::from_env(env); // Postgres nodes are not started automatically if let Err(e) = pageserver.start(&pageserver_config_overrides(sub_match)) { - eprintln!("pageserver start failed: {}", e); + eprintln!("pageserver start failed: {e}"); + try_stop_etcd_process(env); exit(1); } for node in env.safekeepers.iter() { let safekeeper = SafekeeperNode::from_env(env, node); if let Err(e) = safekeeper.start() { - eprintln!("safekeeper '{}' start failed: {}", safekeeper.id, e); + eprintln!("safekeeper '{}' start failed: {e}", safekeeper.id); + try_stop_etcd_process(env); exit(1); } } @@ -939,5 +966,14 @@ fn handle_stop_all(sub_match: &ArgMatches, env: &local_env::LocalEnv) -> Result< eprintln!("safekeeper '{}' stop failed: {}", safekeeper.id, e); } } + + try_stop_etcd_process(env); + Ok(()) } + +fn try_stop_etcd_process(env: &local_env::LocalEnv) { + if let Err(e) = etcd::stop_etcd_process(env) { + eprintln!("etcd stop failed: {e}"); + } +} diff --git a/pageserver/Cargo.toml b/pageserver/Cargo.toml index d4cceafc61..290f52e0b2 100644 --- a/pageserver/Cargo.toml +++ b/pageserver/Cargo.toml @@ -52,8 +52,10 @@ nix = "0.23" once_cell = "1.8.0" crossbeam-utils = "0.8.5" fail = "0.5.0" +git-version = "0.3.5" postgres_ffi = { path = "../libs/postgres_ffi" } +etcd_broker = { path = "../libs/etcd_broker" } metrics = { path = "../libs/metrics" } utils = { path = "../libs/utils" } remote_storage = { path = "../libs/remote_storage" } diff --git a/pageserver/src/bin/dump_layerfile.rs b/pageserver/src/bin/dump_layerfile.rs index af73ef6bdb..87390a1b06 100644 --- a/pageserver/src/bin/dump_layerfile.rs +++ b/pageserver/src/bin/dump_layerfile.rs @@ -7,7 +7,9 @@ use pageserver::layered_repository::dump_layerfile_from_path; use pageserver::page_cache; use pageserver::virtual_file; use std::path::PathBuf; -use utils::GIT_VERSION; +use utils::project_git_version; + +project_git_version!(GIT_VERSION); fn main() -> Result<()> { let arg_matches = App::new("Zenith dump_layerfile utility") diff --git a/pageserver/src/bin/pageserver.rs b/pageserver/src/bin/pageserver.rs index 62ae739a18..091ace25ab 100644 --- a/pageserver/src/bin/pageserver.rs +++ b/pageserver/src/bin/pageserver.rs @@ -26,24 +26,24 @@ use utils::{ http::endpoint, logging, postgres_backend::AuthType, + project_git_version, shutdown::exit_now, signals::{self, Signal}, tcp_listener, zid::{ZTenantId, ZTimelineId}, - GIT_VERSION, }; +project_git_version!(GIT_VERSION); + fn version() -> String { format!( - "{} profiling:{} failpoints:{}", - GIT_VERSION, + "{GIT_VERSION} profiling:{} failpoints:{}", cfg!(feature = "profiling"), fail::has_failpoints() ) } fn main() -> anyhow::Result<()> { - metrics::set_common_metrics_prefix("pageserver"); let arg_matches = App::new("Zenith page server") .about("Materializes WAL stream to pages and serves them to the postgres") .version(&*version()) @@ -103,6 +103,8 @@ fn main() -> anyhow::Result<()> { let features: &[&str] = &[ #[cfg(feature = "failpoints")] "failpoints", + #[cfg(feature = "profiling")] + "profiling", ]; println!("{{\"features\": {features:?} }}"); return Ok(()); @@ -188,13 +190,8 @@ fn main() -> anyhow::Result<()> { // as a ref. let conf: &'static PageServerConf = Box::leak(Box::new(conf)); - // If failpoints are used, terminate the whole pageserver process if they are hit. + // Initialize up failpoints support let scenario = FailScenario::setup(); - if fail::has_failpoints() { - std::panic::set_hook(Box::new(|_| { - std::process::exit(1); - })); - } // Basic initialization of things that don't change after startup virtual_file::init(conf.max_file_descriptors); @@ -223,7 +220,7 @@ fn start_pageserver(conf: &'static PageServerConf, daemonize: bool) -> Result<() // Initialize logger let log_file = logging::init(LOG_FILE_NAME, daemonize)?; - info!("version: {}", GIT_VERSION); + info!("version: {GIT_VERSION}"); // TODO: Check that it looks like a valid repository before going further @@ -263,7 +260,7 @@ fn start_pageserver(conf: &'static PageServerConf, daemonize: bool) -> Result<() // Otherwise, the coverage data will be damaged. match daemonize.exit_action(|| exit_now(0)).start() { Ok(_) => info!("Success, daemonized"), - Err(err) => error!(%err, "could not daemonize"), + Err(err) => bail!("{err}. could not daemonize. bailing."), } } diff --git a/pageserver/src/bin/update_metadata.rs b/pageserver/src/bin/update_metadata.rs index fae5e5c2e3..983fdb8647 100644 --- a/pageserver/src/bin/update_metadata.rs +++ b/pageserver/src/bin/update_metadata.rs @@ -6,7 +6,9 @@ use clap::{App, Arg}; use pageserver::layered_repository::metadata::TimelineMetadata; use std::path::PathBuf; use std::str::FromStr; -use utils::{lsn::Lsn, GIT_VERSION}; +use utils::{lsn::Lsn, project_git_version}; + +project_git_version!(GIT_VERSION); fn main() -> Result<()> { let arg_matches = App::new("Zenith update metadata utility") diff --git a/pageserver/src/config.rs b/pageserver/src/config.rs index 5257732c5c..a9215c0701 100644 --- a/pageserver/src/config.rs +++ b/pageserver/src/config.rs @@ -13,6 +13,7 @@ use std::str::FromStr; use std::time::Duration; use toml_edit; use toml_edit::{Document, Item}; +use url::Url; use utils::{ postgres_backend::AuthType, zid::{ZNodeId, ZTenantId, ZTimelineId}, @@ -111,6 +112,13 @@ pub struct PageServerConf { pub profiling: ProfilingConfig, pub default_tenant_conf: TenantConf, + + /// A prefix to add in etcd brokers before every key. + /// Can be used for isolating different pageserver groups withing the same etcd cluster. + pub broker_etcd_prefix: String, + + /// Etcd broker endpoints to connect to. + pub broker_endpoints: Vec, } #[derive(Debug, Clone, PartialEq, Eq)] @@ -175,6 +183,8 @@ struct PageServerConfigBuilder { id: BuilderValue, profiling: BuilderValue, + broker_etcd_prefix: BuilderValue, + broker_endpoints: BuilderValue>, } impl Default for PageServerConfigBuilder { @@ -200,6 +210,8 @@ impl Default for PageServerConfigBuilder { remote_storage_config: Set(None), id: NotSet, profiling: Set(ProfilingConfig::Disabled), + broker_etcd_prefix: Set(etcd_broker::DEFAULT_NEON_BROKER_ETCD_PREFIX.to_string()), + broker_endpoints: Set(Vec::new()), } } } @@ -256,6 +268,14 @@ impl PageServerConfigBuilder { self.remote_storage_config = BuilderValue::Set(remote_storage_config) } + pub fn broker_endpoints(&mut self, broker_endpoints: Vec) { + self.broker_endpoints = BuilderValue::Set(broker_endpoints) + } + + pub fn broker_etcd_prefix(&mut self, broker_etcd_prefix: String) { + self.broker_etcd_prefix = BuilderValue::Set(broker_etcd_prefix) + } + pub fn id(&mut self, node_id: ZNodeId) { self.id = BuilderValue::Set(node_id) } @@ -264,7 +284,11 @@ impl PageServerConfigBuilder { self.profiling = BuilderValue::Set(profiling) } - pub fn build(self) -> Result { + pub fn build(self) -> anyhow::Result { + let broker_endpoints = self + .broker_endpoints + .ok_or(anyhow!("No broker endpoints provided"))?; + Ok(PageServerConf { listen_pg_addr: self .listen_pg_addr @@ -300,6 +324,10 @@ impl PageServerConfigBuilder { profiling: self.profiling.ok_or(anyhow!("missing profiling"))?, // TenantConf is handled separately default_tenant_conf: TenantConf::default(), + broker_endpoints, + broker_etcd_prefix: self + .broker_etcd_prefix + .ok_or(anyhow!("missing broker_etcd_prefix"))?, }) } } @@ -341,7 +369,7 @@ impl PageServerConf { /// validating the input and failing on errors. /// /// This leaves any options not present in the file in the built-in defaults. - pub fn parse_and_validate(toml: &Document, workdir: &Path) -> Result { + pub fn parse_and_validate(toml: &Document, workdir: &Path) -> anyhow::Result { let mut builder = PageServerConfigBuilder::default(); builder.workdir(workdir.to_owned()); @@ -373,6 +401,17 @@ impl PageServerConf { } "id" => builder.id(ZNodeId(parse_toml_u64(key, item)?)), "profiling" => builder.profiling(parse_toml_from_str(key, item)?), + "broker_etcd_prefix" => builder.broker_etcd_prefix(parse_toml_string(key, item)?), + "broker_endpoints" => builder.broker_endpoints( + parse_toml_array(key, item)? + .into_iter() + .map(|endpoint_str| { + endpoint_str.parse::().with_context(|| { + format!("Array item {endpoint_str} for key {key} is not a valid url endpoint") + }) + }) + .collect::>()?, + ), _ => bail!("unrecognized pageserver option '{key}'"), } } @@ -526,6 +565,8 @@ impl PageServerConf { remote_storage_config: None, profiling: ProfilingConfig::Disabled, default_tenant_conf: TenantConf::dummy_conf(), + broker_endpoints: Vec::new(), + broker_etcd_prefix: etcd_broker::DEFAULT_NEON_BROKER_ETCD_PREFIX.to_string(), } } } @@ -576,14 +617,36 @@ fn parse_toml_duration(name: &str, item: &Item) -> Result { Ok(humantime::parse_duration(s)?) } -fn parse_toml_from_str(name: &str, item: &Item) -> Result +fn parse_toml_from_str(name: &str, item: &Item) -> anyhow::Result where - T: FromStr, + T: FromStr, + ::Err: std::fmt::Display, { let v = item .as_str() .with_context(|| format!("configure option {name} is not a string"))?; - T::from_str(v) + T::from_str(v).map_err(|e| { + anyhow!( + "Failed to parse string as {parse_type} for configure option {name}: {e}", + parse_type = stringify!(T) + ) + }) +} + +fn parse_toml_array(name: &str, item: &Item) -> anyhow::Result> { + let array = item + .as_array() + .with_context(|| format!("configure option {name} is not an array"))?; + + array + .iter() + .map(|value| { + value + .as_str() + .map(str::to_string) + .with_context(|| format!("Array item {value:?} for key {name} is not a string")) + }) + .collect() } #[cfg(test)] @@ -616,12 +679,16 @@ id = 10 fn parse_defaults() -> anyhow::Result<()> { let tempdir = tempdir()?; let (workdir, pg_distrib_dir) = prepare_fs(&tempdir)?; - // we have to create dummy pathes to overcome the validation errors - let config_string = format!("pg_distrib_dir='{}'\nid=10", pg_distrib_dir.display()); + let broker_endpoint = "http://127.0.0.1:7777"; + // we have to create dummy values to overcome the validation errors + let config_string = format!( + "pg_distrib_dir='{}'\nid=10\nbroker_endpoints = ['{broker_endpoint}']", + pg_distrib_dir.display() + ); let toml = config_string.parse()?; let parsed_config = PageServerConf::parse_and_validate(&toml, &workdir) - .unwrap_or_else(|e| panic!("Failed to parse config '{config_string}', reason: {e}")); + .unwrap_or_else(|e| panic!("Failed to parse config '{config_string}', reason: {e:?}")); assert_eq!( parsed_config, @@ -641,6 +708,10 @@ id = 10 remote_storage_config: None, profiling: ProfilingConfig::Disabled, default_tenant_conf: TenantConf::default(), + broker_endpoints: vec![broker_endpoint + .parse() + .expect("Failed to parse a valid broker endpoint URL")], + broker_etcd_prefix: etcd_broker::DEFAULT_NEON_BROKER_ETCD_PREFIX.to_string(), }, "Correct defaults should be used when no config values are provided" ); @@ -652,15 +723,16 @@ id = 10 fn parse_basic_config() -> anyhow::Result<()> { let tempdir = tempdir()?; let (workdir, pg_distrib_dir) = prepare_fs(&tempdir)?; + let broker_endpoint = "http://127.0.0.1:7777"; let config_string = format!( - "{ALL_BASE_VALUES_TOML}pg_distrib_dir='{}'", + "{ALL_BASE_VALUES_TOML}pg_distrib_dir='{}'\nbroker_endpoints = ['{broker_endpoint}']", pg_distrib_dir.display() ); let toml = config_string.parse()?; let parsed_config = PageServerConf::parse_and_validate(&toml, &workdir) - .unwrap_or_else(|e| panic!("Failed to parse config '{config_string}', reason: {e}")); + .unwrap_or_else(|e| panic!("Failed to parse config '{config_string}', reason: {e:?}")); assert_eq!( parsed_config, @@ -680,6 +752,10 @@ id = 10 remote_storage_config: None, profiling: ProfilingConfig::Disabled, default_tenant_conf: TenantConf::default(), + broker_endpoints: vec![broker_endpoint + .parse() + .expect("Failed to parse a valid broker endpoint URL")], + broker_etcd_prefix: etcd_broker::DEFAULT_NEON_BROKER_ETCD_PREFIX.to_string(), }, "Should be able to parse all basic config values correctly" ); @@ -691,6 +767,7 @@ id = 10 fn parse_remote_fs_storage_config() -> anyhow::Result<()> { let tempdir = tempdir()?; let (workdir, pg_distrib_dir) = prepare_fs(&tempdir)?; + let broker_endpoint = "http://127.0.0.1:7777"; let local_storage_path = tempdir.path().join("local_remote_storage"); @@ -710,6 +787,7 @@ local_path = '{}'"#, let config_string = format!( r#"{ALL_BASE_VALUES_TOML} pg_distrib_dir='{}' +broker_endpoints = ['{broker_endpoint}'] {remote_storage_config_str}"#, pg_distrib_dir.display(), @@ -718,7 +796,9 @@ pg_distrib_dir='{}' let toml = config_string.parse()?; let parsed_remote_storage_config = PageServerConf::parse_and_validate(&toml, &workdir) - .unwrap_or_else(|e| panic!("Failed to parse config '{config_string}', reason: {e}")) + .unwrap_or_else(|e| { + panic!("Failed to parse config '{config_string}', reason: {e:?}") + }) .remote_storage_config .expect("Should have remote storage config for the local FS"); @@ -728,7 +808,7 @@ pg_distrib_dir='{}' max_concurrent_syncs: NonZeroUsize::new( remote_storage::DEFAULT_REMOTE_STORAGE_MAX_CONCURRENT_SYNCS ) - .unwrap(), + .unwrap(), max_sync_errors: NonZeroU32::new(remote_storage::DEFAULT_REMOTE_STORAGE_MAX_SYNC_ERRORS) .unwrap(), storage: RemoteStorageKind::LocalFs(local_storage_path.clone()), @@ -751,6 +831,7 @@ pg_distrib_dir='{}' let max_concurrent_syncs = NonZeroUsize::new(111).unwrap(); let max_sync_errors = NonZeroU32::new(222).unwrap(); let s3_concurrency_limit = NonZeroUsize::new(333).unwrap(); + let broker_endpoint = "http://127.0.0.1:7777"; let identical_toml_declarations = &[ format!( @@ -773,6 +854,7 @@ concurrency_limit = {s3_concurrency_limit}"# let config_string = format!( r#"{ALL_BASE_VALUES_TOML} pg_distrib_dir='{}' +broker_endpoints = ['{broker_endpoint}'] {remote_storage_config_str}"#, pg_distrib_dir.display(), @@ -781,7 +863,9 @@ pg_distrib_dir='{}' let toml = config_string.parse()?; let parsed_remote_storage_config = PageServerConf::parse_and_validate(&toml, &workdir) - .unwrap_or_else(|e| panic!("Failed to parse config '{config_string}', reason: {e}")) + .unwrap_or_else(|e| { + panic!("Failed to parse config '{config_string}', reason: {e:?}") + }) .remote_storage_config .expect("Should have remote storage config for S3"); diff --git a/pageserver/src/http/openapi_spec.yml b/pageserver/src/http/openapi_spec.yml index 9932a2d08d..55f7b3c5a7 100644 --- a/pageserver/src/http/openapi_spec.yml +++ b/pageserver/src/http/openapi_spec.yml @@ -123,6 +123,53 @@ paths: schema: $ref: "#/components/schemas/Error" + /v1/tenant/{tenant_id}/timeline/{timeline_id}/wal_receiver: + parameters: + - name: tenant_id + in: path + required: true + schema: + type: string + format: hex + - name: timeline_id + in: path + required: true + schema: + type: string + format: hex + get: + description: Get wal receiver's data attached to the timeline + responses: + "200": + description: WalReceiverEntry + content: + application/json: + schema: + $ref: "#/components/schemas/WalReceiverEntry" + "401": + description: Unauthorized Error + content: + application/json: + schema: + $ref: "#/components/schemas/UnauthorizedError" + "403": + description: Forbidden Error + content: + application/json: + schema: + $ref: "#/components/schemas/ForbiddenError" + "404": + description: Error when no wal receiver is running or found + content: + application/json: + schema: + $ref: "#/components/schemas/NotFoundError" + "500": + description: Generic operation error + content: + application/json: + schema: + $ref: "#/components/schemas/Error" /v1/tenant/{tenant_id}/timeline/{timeline_id}/attach: parameters: @@ -520,6 +567,21 @@ components: type: integer current_logical_size_non_incremental: type: integer + WalReceiverEntry: + type: object + required: + - thread_id + - wal_producer_connstr + properties: + thread_id: + type: integer + wal_producer_connstr: + type: string + last_received_msg_lsn: + type: string + format: hex + last_received_msg_ts: + type: integer Error: type: object diff --git a/pageserver/src/http/routes.rs b/pageserver/src/http/routes.rs index 0104df826e..bb650a34ed 100644 --- a/pageserver/src/http/routes.rs +++ b/pageserver/src/http/routes.rs @@ -224,6 +224,30 @@ async fn timeline_detail_handler(request: Request) -> Result) -> Result, ApiError> { + let tenant_id: ZTenantId = parse_request_param(&request, "tenant_id")?; + check_permission(&request, Some(tenant_id))?; + + let timeline_id: ZTimelineId = parse_request_param(&request, "timeline_id")?; + + let wal_receiver = tokio::task::spawn_blocking(move || { + let _enter = + info_span!("wal_receiver_get", tenant = %tenant_id, timeline = %timeline_id).entered(); + + crate::walreceiver::get_wal_receiver_entry(tenant_id, timeline_id) + }) + .await + .map_err(ApiError::from_err)? + .ok_or_else(|| { + ApiError::NotFound(format!( + "WAL receiver not found for tenant {} and timeline {}", + tenant_id, timeline_id + )) + })?; + + json_response(StatusCode::OK, wal_receiver) +} + async fn timeline_attach_handler(request: Request) -> Result, ApiError> { let tenant_id: ZTenantId = parse_request_param(&request, "tenant_id")?; check_permission(&request, Some(tenant_id))?; @@ -485,6 +509,10 @@ pub fn make_router( "/v1/tenant/:tenant_id/timeline/:timeline_id", timeline_detail_handler, ) + .get( + "/v1/tenant/:tenant_id/timeline/:timeline_id/wal_receiver", + wal_receiver_get_handler, + ) .post( "/v1/tenant/:tenant_id/timeline/:timeline_id/attach", timeline_attach_handler, diff --git a/pageserver/src/layered_repository.rs b/pageserver/src/layered_repository.rs index 01c2b961eb..a83907430e 100644 --- a/pageserver/src/layered_repository.rs +++ b/pageserver/src/layered_repository.rs @@ -74,6 +74,7 @@ pub mod metadata; mod par_fsync; mod storage_layer; +use crate::pgdatadir_mapping::LsnForTimestamp; use delta_layer::{DeltaLayer, DeltaLayerWriter}; use ephemeral_file::is_ephemeral_file; use filename::{DeltaFileName, ImageFileName}; @@ -81,6 +82,7 @@ use image_layer::{ImageLayer, ImageLayerWriter}; use inmemory_layer::InMemoryLayer; use layer_map::LayerMap; use layer_map::SearchResult; +use postgres_ffi::xlog_utils::to_pg_timestamp; use storage_layer::{Layer, ValueReconstructResult, ValueReconstructState}; // re-export this function so that page_cache.rs can use it. @@ -89,7 +91,7 @@ pub use crate::layered_repository::ephemeral_file::writeback as writeback_epheme // Metrics collected on operations on the storage repository. lazy_static! { static ref STORAGE_TIME: HistogramVec = register_histogram_vec!( - "pageserver_storage_time", + "pageserver_storage_operations_seconds", "Time spent on storage operations", &["operation", "tenant_id", "timeline_id"] ) @@ -99,8 +101,8 @@ lazy_static! { // Metrics collected on operations on the storage repository. lazy_static! { static ref RECONSTRUCT_TIME: HistogramVec = register_histogram_vec!( - "pageserver_getpage_reconstruct_time", - "Time spent on storage operations", + "pageserver_getpage_reconstruct_seconds", + "Time spent in reconstruct_value", &["tenant_id", "timeline_id"] ) .expect("failed to define a metric"); @@ -108,13 +110,13 @@ lazy_static! { lazy_static! { static ref MATERIALIZED_PAGE_CACHE_HIT: IntCounterVec = register_int_counter_vec!( - "materialize_page_cache_hits", + "pageserver_materialized_cache_hits_total", "Number of cache hits from materialized page cache", &["tenant_id", "timeline_id"] ) .expect("failed to define a metric"); static ref WAIT_LSN_TIME: HistogramVec = register_histogram_vec!( - "wait_lsn_time", + "pageserver_wait_lsn_seconds", "Time spent waiting for WAL to arrive", &["tenant_id", "timeline_id"] ) @@ -134,12 +136,12 @@ lazy_static! { // or in testing they estimate how much we would upload if we did. lazy_static! { static ref NUM_PERSISTENT_FILES_CREATED: IntCounter = register_int_counter!( - "pageserver_num_persistent_files_created", + "pageserver_created_persistent_files_total", "Number of files created that are meant to be uploaded to cloud storage", ) .expect("failed to define a metric"); static ref PERSISTENT_BYTES_WRITTEN: IntCounter = register_int_counter!( - "pageserver_persistent_bytes_written", + "pageserver_written_persistent_bytes_total", "Total bytes written that are meant to be uploaded to cloud storage", ) .expect("failed to define a metric"); @@ -1355,7 +1357,9 @@ impl LayeredTimeline { let mut timeline_owned; let mut timeline = self; - let mut path: Vec<(ValueReconstructResult, Lsn, Arc)> = Vec::new(); + // For debugging purposes, collect the path of layers that we traversed + // through. It's included in the error message if we fail to find the key. + let mut traversal_path: Vec<(ValueReconstructResult, Lsn, Arc)> = Vec::new(); let cached_lsn = if let Some((cached_lsn, _)) = &reconstruct_state.img { *cached_lsn @@ -1385,32 +1389,24 @@ impl LayeredTimeline { if prev_lsn <= cont_lsn { // Didn't make any progress in last iteration. Error out to avoid // getting stuck in the loop. - - // For debugging purposes, print the path of layers that we traversed - // through. - for (r, c, l) in path { - error!( - "PATH: result {:?}, cont_lsn {}, layer: {}", - r, - c, - l.filename().display() - ); - } - bail!("could not find layer with more data for key {} at LSN {}, request LSN {}, ancestor {}", - key, - Lsn(cont_lsn.0 - 1), - request_lsn, - timeline.ancestor_lsn) + return layer_traversal_error(format!( + "could not find layer with more data for key {} at LSN {}, request LSN {}, ancestor {}", + key, + Lsn(cont_lsn.0 - 1), + request_lsn, + timeline.ancestor_lsn + ), traversal_path); } prev_lsn = cont_lsn; } ValueReconstructResult::Missing => { - bail!( - "could not find data for key {} at LSN {}, for request at LSN {}", - key, - cont_lsn, - request_lsn - ) + return layer_traversal_error( + format!( + "could not find data for key {} at LSN {}, for request at LSN {}", + key, cont_lsn, request_lsn + ), + traversal_path, + ); } } @@ -1445,7 +1441,7 @@ impl LayeredTimeline { reconstruct_state, )?; cont_lsn = lsn_floor; - path.push((result, cont_lsn, open_layer.clone())); + traversal_path.push((result, cont_lsn, open_layer.clone())); continue; } } @@ -1460,7 +1456,7 @@ impl LayeredTimeline { reconstruct_state, )?; cont_lsn = lsn_floor; - path.push((result, cont_lsn, frozen_layer.clone())); + traversal_path.push((result, cont_lsn, frozen_layer.clone())); continue 'outer; } } @@ -1475,7 +1471,7 @@ impl LayeredTimeline { reconstruct_state, )?; cont_lsn = lsn_floor; - path.push((result, cont_lsn, layer)); + traversal_path.push((result, cont_lsn, layer)); } else if timeline.ancestor_timeline.is_some() { // Nothing on this timeline. Traverse to parent result = ValueReconstructResult::Continue; @@ -1512,7 +1508,7 @@ impl LayeredTimeline { .ensure_loaded() .with_context(|| { format!( - "Ancestor timeline is not is not loaded. Timeline id: {} Ancestor id {:?}", + "Ancestor timeline is not loaded. Timeline id: {} Ancestor id {:?}", self.timeline_id, self.get_ancestor_timeline_id(), ) @@ -1619,22 +1615,30 @@ impl LayeredTimeline { pub fn check_checkpoint_distance(self: &Arc) -> Result<()> { let last_lsn = self.get_last_record_lsn(); + // Has more than 'checkpoint_distance' of WAL been accumulated? let distance = last_lsn.widening_sub(self.last_freeze_at.load()); if distance >= self.get_checkpoint_distance().into() { + // Yes. Freeze the current in-memory layer. self.freeze_inmem_layer(true); self.last_freeze_at.store(last_lsn); - } - if let Ok(guard) = self.layer_flush_lock.try_lock() { - drop(guard); - let self_clone = Arc::clone(self); - thread_mgr::spawn( - thread_mgr::ThreadKind::LayerFlushThread, - Some(self.tenant_id), - Some(self.timeline_id), - "layer flush thread", - false, - move || self_clone.flush_frozen_layers(false), - )?; + + // Launch a thread to flush the frozen layer to disk, unless + // a thread was already running. (If the thread was running + // at the time that we froze the layer, it must've seen the + // the layer we just froze before it exited; see comments + // in flush_frozen_layers()) + if let Ok(guard) = self.layer_flush_lock.try_lock() { + drop(guard); + let self_clone = Arc::clone(self); + thread_mgr::spawn( + thread_mgr::ThreadKind::LayerFlushThread, + Some(self.tenant_id), + Some(self.timeline_id), + "layer flush thread", + false, + move || self_clone.flush_frozen_layers(false), + )?; + } } Ok(()) } @@ -1942,41 +1946,87 @@ impl LayeredTimeline { Ok(new_path) } + /// + /// Collect a bunch of Level 0 layer files, and compact and reshuffle them as + /// as Level 1 files. + /// fn compact_level0(&self, target_file_size: u64) -> Result<()> { let layers = self.layers.read().unwrap(); - - let level0_deltas = layers.get_level0_deltas()?; - - // We compact or "shuffle" the level-0 delta layers when they've - // accumulated over the compaction threshold. - if level0_deltas.len() < self.get_compaction_threshold() { - return Ok(()); - } + let mut level0_deltas = layers.get_level0_deltas()?; drop(layers); - // FIXME: this function probably won't work correctly if there's overlap - // in the deltas. - let lsn_range = level0_deltas - .iter() - .map(|l| l.get_lsn_range()) - .reduce(|a, b| min(a.start, b.start)..max(a.end, b.end)) - .unwrap(); + // Only compact if enough layers have accumulated. + if level0_deltas.is_empty() || level0_deltas.len() < self.get_compaction_threshold() { + return Ok(()); + } - let all_values_iter = level0_deltas.iter().map(|l| l.iter()).kmerge_by(|a, b| { - if let Ok((a_key, a_lsn, _)) = a { - if let Ok((b_key, b_lsn, _)) = b { - match a_key.cmp(b_key) { - Ordering::Less => true, - Ordering::Equal => a_lsn <= b_lsn, - Ordering::Greater => false, + // Gather the files to compact in this iteration. + // + // Start with the oldest Level 0 delta file, and collect any other + // level 0 files that form a contiguous sequence, such that the end + // LSN of previous file matches the start LSN of the next file. + // + // Note that if the files don't form such a sequence, we might + // "compact" just a single file. That's a bit pointless, but it allows + // us to get rid of the level 0 file, and compact the other files on + // the next iteration. This could probably made smarter, but such + // "gaps" in the sequence of level 0 files should only happen in case + // of a crash, partial download from cloud storage, or something like + // that, so it's not a big deal in practice. + level0_deltas.sort_by_key(|l| l.get_lsn_range().start); + let mut level0_deltas_iter = level0_deltas.iter(); + + let first_level0_delta = level0_deltas_iter.next().unwrap(); + let mut prev_lsn_end = first_level0_delta.get_lsn_range().end; + let mut deltas_to_compact = vec![Arc::clone(first_level0_delta)]; + for l in level0_deltas_iter { + let lsn_range = l.get_lsn_range(); + + if lsn_range.start != prev_lsn_end { + break; + } + deltas_to_compact.push(Arc::clone(l)); + prev_lsn_end = lsn_range.end; + } + let lsn_range = Range { + start: deltas_to_compact.first().unwrap().get_lsn_range().start, + end: deltas_to_compact.last().unwrap().get_lsn_range().end, + }; + + info!( + "Starting Level0 compaction in LSN range {}-{} for {} layers ({} deltas in total)", + lsn_range.start, + lsn_range.end, + deltas_to_compact.len(), + level0_deltas.len() + ); + for l in deltas_to_compact.iter() { + info!("compact includes {}", l.filename().display()); + } + // We don't need the original list of layers anymore. Drop it so that + // we don't accidentally use it later in the function. + drop(level0_deltas); + + // This iterator walks through all key-value pairs from all the layers + // we're compacting, in key, LSN order. + let all_values_iter = deltas_to_compact + .iter() + .map(|l| l.iter()) + .kmerge_by(|a, b| { + if let Ok((a_key, a_lsn, _)) = a { + if let Ok((b_key, b_lsn, _)) = b { + match a_key.cmp(b_key) { + Ordering::Less => true, + Ordering::Equal => a_lsn <= b_lsn, + Ordering::Greater => false, + } + } else { + false } } else { - false + true } - } else { - true - } - }); + }); // Merge the contents of all the input delta layers into a new set // of delta layers, based on the current partitioning. @@ -2042,8 +2092,8 @@ impl LayeredTimeline { // Now that we have reshuffled the data to set of new delta layers, we can // delete the old ones - let mut layer_paths_do_delete = HashSet::with_capacity(level0_deltas.len()); - for l in level0_deltas { + let mut layer_paths_do_delete = HashSet::with_capacity(deltas_to_compact.len()); + for l in deltas_to_compact { l.delete()?; if let Some(path) = l.local_path() { layer_paths_do_delete.insert(path); @@ -2115,14 +2165,57 @@ impl LayeredTimeline { let gc_info = self.gc_info.read().unwrap(); let retain_lsns = &gc_info.retain_lsns; - let cutoff = gc_info.cutoff; + let cutoff = min(gc_info.cutoff, disk_consistent_lsn); let pitr = gc_info.pitr; + // Calculate pitr cutoff point. + // If we cannot determine a cutoff LSN, be conservative and don't GC anything. + let mut pitr_cutoff_lsn: Lsn = *self.get_latest_gc_cutoff_lsn(); + + if let Ok(timeline) = + tenant_mgr::get_local_timeline_with_load(self.tenant_id, self.timeline_id) + { + // First, calculate pitr_cutoff_timestamp and then convert it to LSN. + // If we don't have enough data to convert to LSN, + // play safe and don't remove any layers. + if let Some(pitr_cutoff_timestamp) = now.checked_sub(pitr) { + let pitr_timestamp = to_pg_timestamp(pitr_cutoff_timestamp); + + match timeline.find_lsn_for_timestamp(pitr_timestamp)? { + LsnForTimestamp::Present(lsn) => pitr_cutoff_lsn = lsn, + LsnForTimestamp::Future(lsn) => { + debug!("future({})", lsn); + pitr_cutoff_lsn = cutoff; + } + LsnForTimestamp::Past(lsn) => { + debug!("past({})", lsn); + } + } + debug!("pitr_cutoff_lsn = {:?}", pitr_cutoff_lsn) + } + } else if cfg!(test) { + // We don't have local timeline in mocked cargo tests. + // So, just ignore pitr_interval setting in this case. + pitr_cutoff_lsn = cutoff; + } + + let new_gc_cutoff = Lsn::min(cutoff, pitr_cutoff_lsn); + + // Nothing to GC. Return early. + if *self.get_latest_gc_cutoff_lsn() >= new_gc_cutoff { + info!( + "Nothing to GC for timeline {}. cutoff_lsn {}", + self.timeline_id, new_gc_cutoff + ); + result.elapsed = now.elapsed()?; + return Ok(result); + } + let _enter = info_span!("garbage collection", timeline = %self.timeline_id, tenant = %self.tenant_id, cutoff = %cutoff).entered(); // We need to ensure that no one branches at a point before latest_gc_cutoff_lsn. // See branch_timeline() for details. - *self.latest_gc_cutoff_lsn.write().unwrap() = cutoff; + *self.latest_gc_cutoff_lsn.write().unwrap() = new_gc_cutoff; info!("GC starting"); @@ -2162,30 +2255,18 @@ impl LayeredTimeline { result.layers_needed_by_cutoff += 1; continue 'outer; } - // 2. It is newer than PiTR interval? - // We use modification time of layer file to estimate update time. - // This estimation is not quite precise but maintaining LSN->timestamp map seems to be overkill. - // It is not expected that users will need high precision here. And this estimation - // is conservative: modification time of file is always newer than actual time of version - // creation. So it is safe for users. - // TODO A possible "bloat" issue still persists here. - // If modification time changes because of layer upload/download, we will keep these files - // longer than necessary. - // https://github.com/neondatabase/neon/issues/1554 - // - if let Ok(metadata) = fs::metadata(&l.filename()) { - let last_modified = metadata.modified()?; - if now.duration_since(last_modified)? < pitr { - debug!( - "keeping {} because it's modification time {:?} is newer than PITR {:?}", - l.filename().display(), - last_modified, - pitr - ); - result.layers_needed_by_pitr += 1; - continue 'outer; - } + + // 2. It is newer than PiTR cutoff point? + if l.get_lsn_range().end > pitr_cutoff_lsn { + debug!( + "keeping {} because it's newer than pitr_cutoff_lsn {}", + l.filename().display(), + pitr_cutoff_lsn + ); + result.layers_needed_by_pitr += 1; + continue 'outer; } + // 3. Is it needed by a child branch? // NOTE With that wee would keep data that // might be referenced by child branches forever. @@ -2213,12 +2294,20 @@ impl LayeredTimeline { // is 102, then it might not have been fully flushed to disk // before crash. // - // FIXME: This logic is wrong. See https://github.com/zenithdb/zenith/issues/707 - if !layers.newer_image_layer_exists( - &l.get_key_range(), - l.get_lsn_range().end, - disk_consistent_lsn + 1, - )? { + // For example, imagine that the following layers exist: + // + // 1000 - image (A) + // 1000-2000 - delta (B) + // 2000 - image (C) + // 2000-3000 - delta (D) + // 3000 - image (E) + // + // If GC horizon is at 2500, we can remove layers A and B, but + // we cannot remove C, even though it's older than 2500, because + // the delta layer 2000-3000 depends on it. + if !layers + .image_layer_exists(&l.get_key_range(), &(l.get_lsn_range().end..new_gc_cutoff))? + { debug!( "keeping {} because it is the latest layer", l.filename().display() @@ -2334,6 +2423,32 @@ impl LayeredTimeline { } } +/// Helper function for get_reconstruct_data() to add the path of layers traversed +/// to an error, as anyhow context information. +fn layer_traversal_error( + msg: String, + path: Vec<(ValueReconstructResult, Lsn, Arc)>, +) -> anyhow::Result<()> { + // We want the original 'msg' to be the outermost context. The outermost context + // is the most high-level information, which also gets propagated to the client. + let mut msg_iter = path + .iter() + .map(|(r, c, l)| { + format!( + "layer traversal: result {:?}, cont_lsn {}, layer: {}", + r, + c, + l.filename().display() + ) + }) + .chain(std::iter::once(msg)); + // Construct initial message from the first traversed layer + let err = anyhow!(msg_iter.next().unwrap()); + + // Append all subsequent traversals, and the error message 'msg', as contexts. + Err(msg_iter.fold(err, |err, msg| err.context(msg))) +} + struct LayeredTimelineWriter<'a> { tl: &'a LayeredTimeline, _write_guard: MutexGuard<'a, ()>, diff --git a/pageserver/src/layered_repository/README.md b/pageserver/src/layered_repository/README.md index 519478e417..70c571a507 100644 --- a/pageserver/src/layered_repository/README.md +++ b/pageserver/src/layered_repository/README.md @@ -23,6 +23,7 @@ distribution depends on the workload: the updates could be totally random, or there could be a long stream of updates to a single relation when data is bulk loaded, for example, or something in between. +``` Cloud Storage Page Server Safekeeper L1 L0 Memory WAL @@ -37,6 +38,7 @@ Cloud Storage Page Server Safekeeper +----+----+ +----+----+ | | | |EEEE| |EEEE|EEEE| +---+-----+ +----+ +----+----+ +``` In this illustration, WAL is received as a stream from the Safekeeper, from the right. It is immediately captured by the page server and stored quickly in @@ -47,7 +49,7 @@ the same page and relation close to each other. From the page server memory, whenever enough WAL has been accumulated, it is flushed to disk into a new L0 layer file, and the memory is released. -When enough L0 files have been accumulated, they are merged together rand sliced +When enough L0 files have been accumulated, they are merged together and sliced per key-space, producing a new set of files where each file contains a more narrow key range, but larger LSN range. @@ -121,7 +123,7 @@ The files are called "layer files". Each layer file covers a range of keys, and a range of LSNs (or a single LSN, in case of image layers). You can think of it as a rectangle in the two-dimensional key-LSN space. The layer files for each timeline are stored in the timeline's subdirectory under -.zenith/tenants//timelines. +`.zenith/tenants//timelines`. There are two kind of layer files: images, and delta layers. An image file contains a snapshot of all keys at a particular LSN, whereas a delta file @@ -130,8 +132,11 @@ range of LSN. image file: +``` 000000067F000032BE0000400000000070B6-000000067F000032BE0000400000000080B6__00000000346BC568 start key end key LSN +``` + The first parts define the key range that the layer covers. See pgdatadir_mapping.rs for how the key space is used. The last part is the LSN. @@ -140,8 +145,10 @@ delta file: Delta files are named similarly, but they cover a range of LSNs: +``` 000000067F000032BE0000400000000020B6-000000067F000032BE0000400000000030B6__000000578C6B29-0000000057A50051 start key end key start LSN end LSN +``` A delta file contains all the key-values in the key-range that were updated in the LSN range. If a key has not been modified, there is no trace of it in the @@ -151,7 +158,9 @@ delta layer. A delta layer file can cover a part of the overall key space, as in the previous example, or the whole key range like this: +``` 000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__000000578C6B29-0000000057A50051 +``` A file that covers the whole key range is called a L0 file (Level 0), while a file that covers only part of the key range is called a L1 file. The "level" of @@ -168,7 +177,9 @@ version, and how branching and GC works is still valid. The full path of a delta file looks like this: +``` .zenith/tenants/941ddc8604413b88b3d208bddf90396c/timelines/4af489b06af8eed9e27a841775616962/rel_1663_13990_2609_0_10_000000000169C348_0000000001702000 +``` For simplicity, the examples below use a simplified notation for the paths. The tenant ID is left out, the timeline ID is replaced with @@ -177,8 +188,10 @@ with a human-readable table name. The LSNs are also shorter. For example, a base image file at LSN 100 and a delta file between 100-200 for 'orders' table on 'main' branch is represented like this: +``` main/orders_100 main/orders_100_200 +``` # Creating layer files @@ -188,12 +201,14 @@ branch called 'main' and two tables, 'orders' and 'customers'. The end of WAL is currently at LSN 250. In this starting situation, you would have these files on disk: +``` main/orders_100 main/orders_100_200 main/orders_200 main/customers_100 main/customers_100_200 main/customers_200 +``` In addition to those files, the recent changes between LSN 200 and the end of WAL at 250 are kept in memory. If the page server crashes, the @@ -224,6 +239,7 @@ If the customers table is modified later, a new file is created for it at the next checkpoint. The new file will cover the "gap" from the last layer file, so the LSN ranges are always contiguous: +``` main/orders_100 main/orders_100_200 main/orders_200 @@ -236,6 +252,7 @@ last layer file, so the LSN ranges are always contiguous: main/customers_200 main/customers_200_500 main/customers_500 +``` ## Reading page versions @@ -259,15 +276,18 @@ involves replaying any WAL records applicable to the page between LSNs Imagine that a child branch is created at LSN 250: +``` @250 ----main--+--------------------------> \ +---child--------------> +``` Then, the 'orders' table is updated differently on the 'main' and 'child' branches. You now have this situation on disk: +``` main/orders_100 main/orders_100_200 main/orders_200 @@ -282,6 +302,7 @@ Then, the 'orders' table is updated differently on the 'main' and child/orders_300 child/orders_300_400 child/orders_400 +``` Because the 'customers' table hasn't been modified on the child branch, there is no file for it there. If you request a page for it on @@ -294,6 +315,7 @@ is linear, and the request's LSN identifies unambiguously which file you need to look at. For example, the history for the 'orders' table on the 'main' branch consists of these files: +``` main/orders_100 main/orders_100_200 main/orders_200 @@ -301,10 +323,12 @@ on the 'main' branch consists of these files: main/orders_300 main/orders_300_400 main/orders_400 +``` And from the 'child' branch's point of view, it consists of these files: +``` main/orders_100 main/orders_100_200 main/orders_200 @@ -313,6 +337,7 @@ files: child/orders_300 child/orders_300_400 child/orders_400 +``` The branch metadata includes the point where the child branch was created, LSN 250. If a page request comes with LSN 275, we read the @@ -345,6 +370,7 @@ Let's look at the single branch scenario again. Imagine that the end of the branch is LSN 525, so that the GC horizon is currently at 525-150 = 375 +``` main/orders_100 main/orders_100_200 main/orders_200 @@ -357,11 +383,13 @@ of the branch is LSN 525, so that the GC horizon is currently at main/customers_100 main/customers_100_200 main/customers_200 +``` We can remove the following files because the end LSNs of those files are older than GC horizon 375, and there are more recent layer files for the table: +``` main/orders_100 DELETE main/orders_100_200 DELETE main/orders_200 DELETE @@ -374,8 +402,9 @@ table: main/customers_100 DELETE main/customers_100_200 DELETE main/customers_200 KEEP, NO NEWER VERSION +``` -'main/customers_100_200' is old enough, but it cannot be +'main/customers_200' is old enough, but it cannot be removed because there is no newer layer file for the table. Things get slightly more complicated with multiple branches. All of @@ -384,6 +413,7 @@ retain older shapshot files that are still needed by child branches. For example, if child branch is created at LSN 150, and the 'customers' table is updated on the branch, you would have these files: +``` main/orders_100 KEEP, NEEDED BY child BRANCH main/orders_100_200 KEEP, NEEDED BY child BRANCH main/orders_200 DELETE @@ -398,6 +428,7 @@ table is updated on the branch, you would have these files: main/customers_200 KEEP, NO NEWER VERSION child/customers_150_300 DELETE child/customers_300 KEEP, NO NEWER VERSION +``` In this situation, 'main/orders_100' and 'main/orders_100_200' cannot be removed, even though they are older than the GC horizon, because @@ -407,6 +438,7 @@ and 'main/orders_200_300' can still be removed. If 'orders' is modified later on the 'child' branch, we will create a new base image and delta file for it on the child: +``` main/orders_100 main/orders_100_200 @@ -419,6 +451,7 @@ new base image and delta file for it on the child: child/customers_300 child/orders_150_400 child/orders_400 +``` After this, the 'main/orders_100' and 'main/orders_100_200' file could be removed. It is no longer needed by the child branch, because there @@ -434,6 +467,7 @@ Describe GC and checkpoint interval settings. In principle, each relation can be checkpointed separately, i.e. the LSN ranges of the files don't need to line up. So this would be legal: +``` main/orders_100 main/orders_100_200 main/orders_200 @@ -446,6 +480,7 @@ LSN ranges of the files don't need to line up. So this would be legal: main/customers_250 main/customers_250_500 main/customers_500 +``` However, the code currently always checkpoints all relations together. So that situation doesn't arise in practice. @@ -468,11 +503,13 @@ does that. It could be useful, however, as a transient state when garbage collecting around branch points, or explicit recovery points. For example, if we start with this: +``` main/orders_100 main/orders_100_200 main/orders_200 main/orders_200_300 main/orders_300 +``` And there is a branch or explicit recovery point at LSN 150, we could replace 'main/orders_100_200' with 'main/orders_150' to keep a diff --git a/pageserver/src/layered_repository/delta_layer.rs b/pageserver/src/layered_repository/delta_layer.rs index e78b05695c..ed342c0cca 100644 --- a/pageserver/src/layered_repository/delta_layer.rs +++ b/pageserver/src/layered_repository/delta_layer.rs @@ -37,11 +37,8 @@ use crate::virtual_file::VirtualFile; use crate::walrecord; use crate::{DELTA_FILE_MAGIC, STORAGE_FORMAT_VERSION}; use anyhow::{bail, ensure, Context, Result}; +use rand::{distributions::Alphanumeric, Rng}; use serde::{Deserialize, Serialize}; -use tracing::*; -// avoid binding to Write (conflicts with std::io::Write) -// while being able to use std::fmt::Write's methods -use std::fmt::Write as _; use std::fs; use std::io::{BufWriter, Write}; use std::io::{Seek, SeekFrom}; @@ -49,6 +46,7 @@ use std::ops::Range; use std::os::unix::fs::FileExt; use std::path::{Path, PathBuf}; use std::sync::{RwLock, RwLockReadGuard, RwLockWriteGuard}; +use tracing::*; use utils::{ bin_ser::BeSer, @@ -254,6 +252,9 @@ impl Layer for DeltaLayer { return false; } let entry_lsn = DeltaKey::extract_lsn_from_buf(key); + if entry_lsn < lsn_range.start { + return false; + } offsets.push((entry_lsn, blob_ref.pos())); !blob_ref.will_init() @@ -362,6 +363,28 @@ impl Layer for DeltaLayer { tree_reader.dump()?; let mut cursor = file.block_cursor(); + + // A subroutine to dump a single blob + let mut dump_blob = |blob_ref: BlobRef| -> anyhow::Result { + let buf = cursor.read_blob(blob_ref.pos())?; + let val = Value::des(&buf)?; + let desc = match val { + Value::Image(img) => { + format!(" img {} bytes", img.len()) + } + Value::WalRecord(rec) => { + let wal_desc = walrecord::describe_wal_record(&rec)?; + format!( + " rec {} bytes will_init: {} {}", + buf.len(), + rec.will_init(), + wal_desc + ) + } + }; + Ok(desc) + }; + tree_reader.visit( &[0u8; DELTA_KEY_SIZE], VisitDirection::Forwards, @@ -370,34 +393,10 @@ impl Layer for DeltaLayer { let key = DeltaKey::extract_key_from_buf(delta_key); let lsn = DeltaKey::extract_lsn_from_buf(delta_key); - let mut desc = String::new(); - match cursor.read_blob(blob_ref.pos()) { - Ok(buf) => { - let val = Value::des(&buf); - match val { - Ok(Value::Image(img)) => { - write!(&mut desc, " img {} bytes", img.len()).unwrap(); - } - Ok(Value::WalRecord(rec)) => { - let wal_desc = walrecord::describe_wal_record(&rec).unwrap(); - write!( - &mut desc, - " rec {} bytes will_init: {} {}", - buf.len(), - rec.will_init(), - wal_desc - ) - .unwrap(); - } - Err(err) => { - write!(&mut desc, " DESERIALIZATION ERROR: {}", err).unwrap(); - } - } - } - Err(err) => { - write!(&mut desc, " READ ERROR: {}", err).unwrap(); - } - } + let desc = match dump_blob(blob_ref) { + Ok(desc) => desc, + Err(err) => format!("ERROR: {}", err), + }; println!(" key {} at {}: {}", key, lsn, desc); true }, @@ -422,6 +421,28 @@ impl DeltaLayer { } } + fn temp_path_for( + conf: &PageServerConf, + timelineid: ZTimelineId, + tenantid: ZTenantId, + key_start: Key, + lsn_range: &Range, + ) -> PathBuf { + let rand_string: String = rand::thread_rng() + .sample_iter(&Alphanumeric) + .take(8) + .map(char::from) + .collect(); + + conf.timeline_path(&timelineid, &tenantid).join(format!( + "{}-XXX__{:016X}-{:016X}.{}.temp", + key_start, + u64::from(lsn_range.start), + u64::from(lsn_range.end), + rand_string + )) + } + /// /// Open the underlying file and read the metadata into memory, if it's /// not loaded already. @@ -609,12 +630,8 @@ impl DeltaLayerWriter { // // Note: This overwrites any existing file. There shouldn't be any. // FIXME: throw an error instead? - let path = conf.timeline_path(&timelineid, &tenantid).join(format!( - "{}-XXX__{:016X}-{:016X}.temp", - key_start, - u64::from(lsn_range.start), - u64::from(lsn_range.end) - )); + let path = DeltaLayer::temp_path_for(conf, timelineid, tenantid, key_start, &lsn_range); + let mut file = VirtualFile::create(&path)?; // make room for the header block file.seek(SeekFrom::Start(PAGE_SZ as u64))?; @@ -707,6 +724,8 @@ impl DeltaLayerWriter { }), }; + // fsync the file + file.sync_all()?; // Rename the file to its final name // // Note: This overwrites any existing file. There shouldn't be any. diff --git a/pageserver/src/layered_repository/disk_btree.rs b/pageserver/src/layered_repository/disk_btree.rs index e747192d96..0c9ad75048 100644 --- a/pageserver/src/layered_repository/disk_btree.rs +++ b/pageserver/src/layered_repository/disk_btree.rs @@ -444,6 +444,13 @@ where /// /// stack[0] is the current root page, stack.last() is the leaf. /// + /// We maintain the length of the stack to be always greater than zero. + /// Two exceptions are: + /// 1. `Self::flush_node`. The method will push the new node if it extracted the last one. + /// So because other methods cannot see the intermediate state invariant still holds. + /// 2. `Self::finish`. It consumes self and does not return it back, + /// which means that this is where the structure is destroyed. + /// Thus stack of zero length cannot be observed by other methods. stack: Vec>, /// Last key that was appended to the tree. Used to sanity check that append @@ -482,7 +489,10 @@ where fn append_internal(&mut self, key: &[u8; L], value: Value) -> Result<()> { // Try to append to the current leaf buffer - let last = self.stack.last_mut().unwrap(); + let last = self + .stack + .last_mut() + .expect("should always have at least one item"); let level = last.level; if last.push(key, value) { return Ok(()); @@ -512,19 +522,25 @@ where Ok(()) } + /// Flush the bottommost node in the stack to disk. Appends a downlink to its parent, + /// and recursively flushes the parent too, if it becomes full. If the root page becomes full, + /// creates a new root page, increasing the height of the tree. fn flush_node(&mut self) -> Result<()> { - let last = self.stack.pop().unwrap(); + // Get the current bottommost node in the stack and flush it to disk. + let last = self + .stack + .pop() + .expect("should always have at least one item"); let buf = last.pack(); let downlink_key = last.first_key(); let downlink_ptr = self.writer.write_blk(buf)?; - // Append the downlink to the parent + // Append the downlink to the parent. If there is no parent, ie. this was the root page, + // create a new root page, increasing the height of the tree. if self.stack.is_empty() { self.stack.push(BuildNode::new(last.level + 1)); } - self.append_internal(&downlink_key, Value::from_blknum(downlink_ptr))?; - - Ok(()) + self.append_internal(&downlink_key, Value::from_blknum(downlink_ptr)) } /// @@ -540,7 +556,10 @@ where self.flush_node()?; } - let root = self.stack.first().unwrap(); + let root = self + .stack + .first() + .expect("by the check above we left one item there"); let buf = root.pack(); let root_blknum = self.writer.write_blk(buf)?; diff --git a/pageserver/src/layered_repository/image_layer.rs b/pageserver/src/layered_repository/image_layer.rs index c0c8e7789a..905023ecf9 100644 --- a/pageserver/src/layered_repository/image_layer.rs +++ b/pageserver/src/layered_repository/image_layer.rs @@ -34,6 +34,7 @@ use crate::{IMAGE_FILE_MAGIC, STORAGE_FORMAT_VERSION}; use anyhow::{bail, ensure, Context, Result}; use bytes::Bytes; use hex; +use rand::{distributions::Alphanumeric, Rng}; use serde::{Deserialize, Serialize}; use std::fs; use std::io::Write; @@ -241,6 +242,22 @@ impl ImageLayer { } } + fn temp_path_for( + conf: &PageServerConf, + timelineid: ZTimelineId, + tenantid: ZTenantId, + fname: &ImageFileName, + ) -> PathBuf { + let rand_string: String = rand::thread_rng() + .sample_iter(&Alphanumeric) + .take(8) + .map(char::from) + .collect(); + + conf.timeline_path(&timelineid, &tenantid) + .join(format!("{}.{}.temp", fname, rand_string)) + } + /// /// Open the underlying file and read the metadata into memory, if it's /// not loaded already. @@ -398,7 +415,7 @@ impl ImageLayer { /// pub struct ImageLayerWriter { conf: &'static PageServerConf, - _path: PathBuf, + path: PathBuf, timelineid: ZTimelineId, tenantid: ZTenantId, key_range: Range, @@ -416,12 +433,10 @@ impl ImageLayerWriter { key_range: &Range, lsn: Lsn, ) -> anyhow::Result { - // Create the file - // - // Note: This overwrites any existing file. There shouldn't be any. - // FIXME: throw an error instead? - let path = ImageLayer::path_for( - &PathOrConf::Conf(conf), + // Create the file initially with a temporary filename. + // We'll atomically rename it to the final name when we're done. + let path = ImageLayer::temp_path_for( + conf, timelineid, tenantid, &ImageFileName { @@ -441,7 +456,7 @@ impl ImageLayerWriter { let writer = ImageLayerWriter { conf, - _path: path, + path, timelineid, tenantid, key_range: key_range.clone(), @@ -512,6 +527,25 @@ impl ImageLayerWriter { index_root_blk, }), }; + + // fsync the file + file.sync_all()?; + + // Rename the file to its final name + // + // Note: This overwrites any existing file. There shouldn't be any. + // FIXME: throw an error instead? + let final_path = ImageLayer::path_for( + &PathOrConf::Conf(self.conf), + self.timelineid, + self.tenantid, + &ImageFileName { + key_range: self.key_range.clone(), + lsn: self.lsn, + }, + ); + std::fs::rename(self.path, &final_path)?; + trace!("created image layer {}", layer.path().display()); Ok(layer) diff --git a/pageserver/src/layered_repository/layer_map.rs b/pageserver/src/layered_repository/layer_map.rs index 7491294c03..f7f51bf21f 100644 --- a/pageserver/src/layered_repository/layer_map.rs +++ b/pageserver/src/layered_repository/layer_map.rs @@ -201,18 +201,14 @@ impl LayerMap { NUM_ONDISK_LAYERS.dec(); } - /// Is there a newer image layer for given key-range? + /// Is there a newer image layer for given key- and LSN-range? /// /// This is used for garbage collection, to determine if an old layer can /// be deleted. - /// We ignore layers newer than disk_consistent_lsn because they will be removed at restart - /// We also only look at historic layers - //#[allow(dead_code)] - pub fn newer_image_layer_exists( + pub fn image_layer_exists( &self, key_range: &Range, - lsn: Lsn, - disk_consistent_lsn: Lsn, + lsn_range: &Range, ) -> Result { let mut range_remain = key_range.clone(); @@ -225,8 +221,7 @@ impl LayerMap { let img_lsn = l.get_lsn_range().start; if !l.is_incremental() && l.get_key_range().contains(&range_remain.start) - && img_lsn > lsn - && img_lsn < disk_consistent_lsn + && lsn_range.contains(&img_lsn) { made_progress = true; let img_key_end = l.get_key_range().end; diff --git a/pageserver/src/lib.rs b/pageserver/src/lib.rs index b2ef18761b..73e5422227 100644 --- a/pageserver/src/lib.rs +++ b/pageserver/src/lib.rs @@ -45,7 +45,7 @@ pub const DELTA_FILE_MAGIC: u16 = 0x5A61; lazy_static! { static ref LIVE_CONNECTIONS_COUNT: IntGaugeVec = register_int_gauge_vec!( - "pageserver_live_connections_count", + "pageserver_live_connections", "Number of live network connections", &["pageserver_connection_kind"] ) diff --git a/pageserver/src/page_service.rs b/pageserver/src/page_service.rs index da3dedfc84..03264c9782 100644 --- a/pageserver/src/page_service.rs +++ b/pageserver/src/page_service.rs @@ -19,7 +19,6 @@ use std::net::TcpListener; use std::str; use std::str::FromStr; use std::sync::{Arc, RwLockReadGuard}; -use std::time::Duration; use tracing::*; use utils::{ auth::{self, Claims, JwtAuth, Scope}, @@ -326,7 +325,7 @@ const TIME_BUCKETS: &[f64] = &[ lazy_static! { static ref SMGR_QUERY_TIME: HistogramVec = register_histogram_vec!( - "pageserver_smgr_query_time", + "pageserver_smgr_query_seconds", "Time spent on smgr query handling", &["smgr_query_type", "tenant_id", "timeline_id"], TIME_BUCKETS.into() @@ -731,7 +730,18 @@ impl postgres_backend::Handler for PageServerHandler { for failpoint in failpoints.split(';') { if let Some((name, actions)) = failpoint.split_once('=') { info!("cfg failpoint: {} {}", name, actions); - fail::cfg(name, actions).unwrap(); + + // We recognize one extra "action" that's not natively recognized + // by the failpoints crate: exit, to immediately kill the process + if actions == "exit" { + fail::cfg_callback(name, || { + info!("Exit requested by failpoint"); + std::process::exit(1); + }) + .unwrap(); + } else { + fail::cfg(name, actions).unwrap(); + } } else { bail!("Invalid failpoints format"); } @@ -796,7 +806,9 @@ impl postgres_backend::Handler for PageServerHandler { .unwrap_or_else(|| Ok(repo.get_gc_horizon()))?; let repo = tenant_mgr::get_repository_for_tenant(tenantid)?; - let result = repo.gc_iteration(Some(timelineid), gc_horizon, Duration::ZERO, true)?; + // Use tenant's pitr setting + let pitr = repo.get_pitr_interval(); + let result = repo.gc_iteration(Some(timelineid), gc_horizon, pitr, true)?; pgb.write_message_noflush(&BeMessage::RowDescription(&[ RowDescriptor::int8_col(b"layers_total"), RowDescriptor::int8_col(b"layers_needed_by_cutoff"), diff --git a/pageserver/src/storage_sync.rs b/pageserver/src/storage_sync.rs index b8c6f7fdab..bbebcd1f36 100644 --- a/pageserver/src/storage_sync.rs +++ b/pageserver/src/storage_sync.rs @@ -9,7 +9,7 @@ //! //! * public API via to interact with the external world: //! * [`start_local_timeline_sync`] to launch a background async loop to handle the synchronization -//! * [`schedule_timeline_checkpoint_upload`] and [`schedule_timeline_download`] to enqueue a new upload and download tasks, +//! * [`schedule_layer_upload`], [`schedule_layer_download`], and[`schedule_layer_delete`] to enqueue a new task //! to be processed by the async loop //! //! Here's a schematic overview of all interactions backup and the rest of the pageserver perform: @@ -44,8 +44,8 @@ //! query their downloads later if they are accessed. //! //! Some time later, during pageserver checkpoints, in-memory data is flushed onto disk along with its metadata. -//! If the storage sync loop was successfully started before, pageserver schedules the new checkpoint file uploads after every checkpoint. -//! The checkpoint uploads are disabled, if no remote storage configuration is provided (no sync loop is started this way either). +//! If the storage sync loop was successfully started before, pageserver schedules the layer files and the updated metadata file for upload, every time a layer is flushed to disk. +//! The uploads are disabled, if no remote storage configuration is provided (no sync loop is started this way either). //! See [`crate::layered_repository`] for the upload calls and the adjacent logic. //! //! Synchronization logic is able to communicate back with updated timeline sync states, [`crate::repository::TimelineSyncStatusUpdate`], @@ -54,7 +54,7 @@ //! * once after the sync loop startup, to signal pageserver which timelines will be synchronized in the near future //! * after every loop step, in case a timeline needs to be reloaded or evicted from pageserver's memory //! -//! When the pageserver terminates, the sync loop finishes a current sync task (if any) and exits. +//! When the pageserver terminates, the sync loop finishes current sync task (if any) and exits. //! //! The storage logic considers `image` as a set of local files (layers), fully representing a certain timeline at given moment (identified with `disk_consistent_lsn` from the corresponding `metadata` file). //! Timeline can change its state, by adding more files on disk and advancing its `disk_consistent_lsn`: this happens after pageserver checkpointing and is followed @@ -66,13 +66,13 @@ //! when the newer image is downloaded //! //! Pageserver maintains similar to the local file structure remotely: all layer files are uploaded with the same names under the same directory structure. -//! Yet instead of keeping the `metadata` file remotely, we wrap it with more data in [`IndexShard`], containing the list of remote files. +//! Yet instead of keeping the `metadata` file remotely, we wrap it with more data in [`IndexPart`], containing the list of remote files. //! This file gets read to populate the cache, if the remote timeline data is missing from it and gets updated after every successful download. //! This way, we optimize S3 storage access by not running the `S3 list` command that could be expencive and slow: knowing both [`ZTenantId`] and [`ZTimelineId`], //! we can always reconstruct the path to the timeline, use this to get the same path on the remote storage and retrive its shard contents, if needed, same as any layer files. //! //! By default, pageserver reads the remote storage index data only for timelines located locally, to synchronize those, if needed. -//! Bulk index data download happens only initially, on pageserer startup. The rest of the remote storage stays unknown to pageserver and loaded on demand only, +//! Bulk index data download happens only initially, on pageserver startup. The rest of the remote storage stays unknown to pageserver and loaded on demand only, //! when a new timeline is scheduled for the download. //! //! NOTES: @@ -89,13 +89,12 @@ //! Synchronization is done with the queue being emptied via separate thread asynchronously, //! attempting to fully store pageserver's local data on the remote storage in a custom format, beneficial for storing. //! -//! A queue is implemented in the [`sync_queue`] module as a pair of sender and receiver channels, to block on zero tasks instead of checking the queue. -//! The pair's shared buffer of a fixed size serves as an implicit queue, holding [`SyncTask`] for local files upload/download operations. +//! A queue is implemented in the [`sync_queue`] module as a VecDeque to hold the tasks, and a condition variable for blocking when the queue is empty. //! //! The queue gets emptied by a single thread with the loop, that polls the tasks in batches of deduplicated tasks. //! A task from the batch corresponds to a single timeline, with its files to sync merged together: given that only one task sync loop step is active at a time, //! timeline uploads and downloads can happen concurrently, in no particular order due to incremental nature of the timeline layers. -//! Deletion happens only after a successful upload only, otherwise the compation output might make the timeline inconsistent until both tasks are fully processed without errors. +//! Deletion happens only after a successful upload only, otherwise the compaction output might make the timeline inconsistent until both tasks are fully processed without errors. //! Upload and download update the remote data (inmemory index and S3 json index part file) only after every layer is successfully synchronized, while the deletion task //! does otherwise: it requires to have the remote data updated first succesfully: blob files will be invisible to pageserver this way. //! @@ -138,8 +137,6 @@ //! NOTE: No real contents or checksum check happens right now and is a subject to improve later. //! //! After the whole timeline is downloaded, [`crate::tenant_mgr::apply_timeline_sync_status_updates`] function is used to update pageserver memory stage for the timeline processed. -//! -//! When pageserver signals shutdown, current sync task gets finished and the loop exists. mod delete; mod download; @@ -153,10 +150,7 @@ use std::{ num::{NonZeroU32, NonZeroUsize}, ops::ControlFlow, path::{Path, PathBuf}, - sync::{ - atomic::{AtomicUsize, Ordering}, - Arc, - }, + sync::{Arc, Condvar, Mutex}, }; use anyhow::{anyhow, bail, Context}; @@ -167,7 +161,6 @@ use remote_storage::{GenericRemoteStorage, RemoteStorage}; use tokio::{ fs, runtime::Runtime, - sync::mpsc::{self, error::TryRecvError, UnboundedReceiver, UnboundedSender}, time::{Duration, Instant}, }; use tracing::*; @@ -208,12 +201,12 @@ lazy_static! { ) .expect("failed to register pageserver remote storage remaining sync items int gauge"); static ref FATAL_TASK_FAILURES: IntCounter = register_int_counter!( - "pageserver_remote_storage_fatal_task_failures", + "pageserver_remote_storage_fatal_task_failures_total", "Number of critically failed tasks" ) .expect("failed to register pageserver remote storage remaining sync items int gauge"); static ref IMAGE_SYNC_TIME: HistogramVec = register_histogram_vec!( - "pageserver_remote_storage_image_sync_time", + "pageserver_remote_storage_image_sync_seconds", "Time took to synchronize (download or upload) a whole pageserver image. \ Grouped by `operation_kind` (upload|download) and `status` (success|failure)", &["operation_kind", "status"], @@ -428,6 +421,14 @@ fn collect_timeline_files( entry_path.display() ) })?; + } else if entry_path.extension().and_then(OsStr::to_str) == Some("temp") { + info!("removing temp layer file at {}", entry_path.display()); + std::fs::remove_file(&entry_path).with_context(|| { + format!( + "failed to remove temp layer file at {}", + entry_path.display() + ) + })?; } else { timeline_files.insert(entry_path); } @@ -453,97 +454,77 @@ fn collect_timeline_files( Ok((timeline_id, metadata, timeline_files)) } -/// Wraps mpsc channel bits around into a queue interface. -/// mpsc approach was picked to allow blocking the sync loop if no tasks are present, to avoid meaningless spinning. +/// Global queue of sync tasks. +/// +/// 'queue' is protected by a mutex, and 'condvar' is used to wait for tasks to arrive. struct SyncQueue { - len: AtomicUsize, max_timelines_per_batch: NonZeroUsize, - sender: UnboundedSender<(ZTenantTimelineId, SyncTask)>, + + queue: Mutex>, + condvar: Condvar, } impl SyncQueue { - fn new( - max_timelines_per_batch: NonZeroUsize, - ) -> (Self, UnboundedReceiver<(ZTenantTimelineId, SyncTask)>) { - let (sender, receiver) = mpsc::unbounded_channel(); - ( - Self { - len: AtomicUsize::new(0), - max_timelines_per_batch, - sender, - }, - receiver, - ) + fn new(max_timelines_per_batch: NonZeroUsize) -> Self { + Self { + max_timelines_per_batch, + queue: Mutex::new(VecDeque::new()), + condvar: Condvar::new(), + } } + /// Queue a new task fn push(&self, sync_id: ZTenantTimelineId, new_task: SyncTask) { - match self.sender.send((sync_id, new_task)) { - Ok(()) => { - self.len.fetch_add(1, Ordering::Relaxed); - } - Err(e) => { - error!("failed to push sync task to queue: {e}"); - } + let mut q = self.queue.lock().unwrap(); + + q.push_back((sync_id, new_task)); + if q.len() <= 1 { + self.condvar.notify_one(); } } /// Fetches a task batch, getting every existing entry from the queue, grouping by timelines and merging the tasks for every timeline. - /// A timeline has to care to not to delete cetain layers from the remote storage before the corresponding uploads happen. - /// Otherwise, due to "immutable" nature of the layers, the order of their deletion/uploading/downloading does not matter. + /// A timeline has to care to not to delete certain layers from the remote storage before the corresponding uploads happen. + /// Other than that, due to "immutable" nature of the layers, the order of their deletion/uploading/downloading does not matter. /// Hence, we merge the layers together into single task per timeline and run those concurrently (with the deletion happening only after successful uploading). - async fn next_task_batch( - &self, - // The queue is based on two ends of a channel and has to be accessible statically without blocking for submissions from the sync code. - // Its receiver needs &mut, so we cannot place it in the same container with the other end and get both static and non-blocking access. - // Hence toss this around to use it from the sync loop directly as &mut. - sync_queue_receiver: &mut UnboundedReceiver<(ZTenantTimelineId, SyncTask)>, - ) -> HashMap { - // request the first task in blocking fashion to do less meaningless work - let (first_sync_id, first_task) = if let Some(first_task) = sync_queue_receiver.recv().await - { - self.len.fetch_sub(1, Ordering::Relaxed); - first_task - } else { - info!("Queue sender part was dropped, aborting"); - return HashMap::new(); - }; + fn next_task_batch(&self) -> (HashMap, usize) { + // Wait for the first task in blocking fashion + let mut q = self.queue.lock().unwrap(); + while q.is_empty() { + q = self + .condvar + .wait_timeout(q, Duration::from_millis(1000)) + .unwrap() + .0; + + if thread_mgr::is_shutdown_requested() { + return (HashMap::new(), q.len()); + } + } + let (first_sync_id, first_task) = q.pop_front().unwrap(); + let mut timelines_left_to_batch = self.max_timelines_per_batch.get() - 1; - let mut tasks_to_process = self.len(); + let tasks_to_process = q.len(); let mut batches = HashMap::with_capacity(tasks_to_process); batches.insert(first_sync_id, SyncTaskBatch::new(first_task)); let mut tasks_to_reenqueue = Vec::with_capacity(tasks_to_process); - // Pull the queue channel until we get all tasks that were there at the beginning of the batch construction. + // Greedily grab as many other tasks that we can. // Yet do not put all timelines in the batch, but only the first ones that fit the timeline limit. - // Still merge the rest of the pulled tasks and reenqueue those for later. - while tasks_to_process > 0 { - match sync_queue_receiver.try_recv() { - Ok((sync_id, new_task)) => { - self.len.fetch_sub(1, Ordering::Relaxed); - tasks_to_process -= 1; - - match batches.entry(sync_id) { - hash_map::Entry::Occupied(mut v) => v.get_mut().add(new_task), - hash_map::Entry::Vacant(v) => { - timelines_left_to_batch = timelines_left_to_batch.saturating_sub(1); - if timelines_left_to_batch == 0 { - tasks_to_reenqueue.push((sync_id, new_task)); - } else { - v.insert(SyncTaskBatch::new(new_task)); - } - } + // Re-enqueue the tasks that don't fit in this batch. + while let Some((sync_id, new_task)) = q.pop_front() { + match batches.entry(sync_id) { + hash_map::Entry::Occupied(mut v) => v.get_mut().add(new_task), + hash_map::Entry::Vacant(v) => { + timelines_left_to_batch = timelines_left_to_batch.saturating_sub(1); + if timelines_left_to_batch == 0 { + tasks_to_reenqueue.push((sync_id, new_task)); + } else { + v.insert(SyncTaskBatch::new(new_task)); } } - Err(TryRecvError::Disconnected) => { - debug!("Sender disconnected, batch collection aborted"); - break; - } - Err(TryRecvError::Empty) => { - debug!("No more data in the sync queue, task batch is not full"); - break; - } } } @@ -553,14 +534,15 @@ impl SyncQueue { tasks_to_reenqueue.len() ); for (id, task) in tasks_to_reenqueue { - self.push(id, task); + q.push_back((id, task)); } - batches + (batches, q.len()) } + #[cfg(test)] fn len(&self) -> usize { - self.len.load(Ordering::Relaxed) + self.queue.lock().unwrap().len() } } @@ -823,7 +805,7 @@ pub fn schedule_layer_download(tenant_id: ZTenantId, timeline_id: ZTimelineId) { debug!("Download task for tenant {tenant_id}, timeline {timeline_id} sent") } -/// Uses a remote storage given to start the storage sync loop. +/// Launch a thread to perform remote storage sync tasks. /// See module docs for loop step description. pub(super) fn spawn_storage_sync_thread( conf: &'static PageServerConf, @@ -836,7 +818,7 @@ where P: Debug + Send + Sync + 'static, S: RemoteStorage + Send + Sync + 'static, { - let (sync_queue, sync_queue_receiver) = SyncQueue::new(max_concurrent_timelines_sync); + let sync_queue = SyncQueue::new(max_concurrent_timelines_sync); SYNC_QUEUE .set(sync_queue) .map_err(|_queue| anyhow!("Could not initialize sync queue"))?; @@ -864,7 +846,7 @@ where local_timeline_files, ); - let loop_index = remote_index.clone(); + let remote_index_clone = remote_index.clone(); thread_mgr::spawn( ThreadKind::StorageSync, None, @@ -875,12 +857,7 @@ where storage_sync_loop( runtime, conf, - ( - Arc::new(storage), - loop_index, - sync_queue, - sync_queue_receiver, - ), + (Arc::new(storage), remote_index_clone, sync_queue), max_sync_errors, ); Ok(()) @@ -896,12 +873,7 @@ where fn storage_sync_loop( runtime: Runtime, conf: &'static PageServerConf, - (storage, index, sync_queue, mut sync_queue_receiver): ( - Arc, - RemoteIndex, - &SyncQueue, - UnboundedReceiver<(ZTenantTimelineId, SyncTask)>, - ), + (storage, index, sync_queue): (Arc, RemoteIndex, &SyncQueue), max_sync_errors: NonZeroU32, ) where P: Debug + Send + Sync + 'static, @@ -909,16 +881,35 @@ fn storage_sync_loop( { info!("Starting remote storage sync loop"); loop { - let loop_index = index.clone(); let loop_storage = Arc::clone(&storage); + + let (batched_tasks, remaining_queue_length) = sync_queue.next_task_batch(); + + if thread_mgr::is_shutdown_requested() { + info!("Shutdown requested, stopping"); + break; + } + + REMAINING_SYNC_ITEMS.set(remaining_queue_length as i64); + if remaining_queue_length > 0 || !batched_tasks.is_empty() { + info!("Processing tasks for {} timelines in batch, more tasks left to process: {remaining_queue_length}", batched_tasks.len()); + } else { + debug!("No tasks to process"); + continue; + } + + // Concurrently perform all the tasks in the batch let loop_step = runtime.block_on(async { tokio::select! { - step = loop_step( + step = process_batches( conf, - (loop_storage, loop_index, sync_queue, &mut sync_queue_receiver), max_sync_errors, + loop_storage, + &index, + batched_tasks, + sync_queue, ) - .instrument(info_span!("storage_sync_loop_step")) => step, + .instrument(info_span!("storage_sync_loop_step")) => ControlFlow::Continue(step), _ = thread_mgr::shutdown_watcher() => ControlFlow::Break(()), } }); @@ -944,31 +935,18 @@ fn storage_sync_loop( } } -async fn loop_step( +async fn process_batches( conf: &'static PageServerConf, - (storage, index, sync_queue, sync_queue_receiver): ( - Arc, - RemoteIndex, - &SyncQueue, - &mut UnboundedReceiver<(ZTenantTimelineId, SyncTask)>, - ), max_sync_errors: NonZeroU32, -) -> ControlFlow<(), HashMap>> + storage: Arc, + index: &RemoteIndex, + batched_tasks: HashMap, + sync_queue: &SyncQueue, +) -> HashMap> where P: Debug + Send + Sync + 'static, S: RemoteStorage + Send + Sync + 'static, { - let batched_tasks = sync_queue.next_task_batch(sync_queue_receiver).await; - - let remaining_queue_length = sync_queue.len(); - REMAINING_SYNC_ITEMS.set(remaining_queue_length as i64); - if remaining_queue_length > 0 || !batched_tasks.is_empty() { - info!("Processing tasks for {} timelines in batch, more tasks left to process: {remaining_queue_length}", batched_tasks.len()); - } else { - debug!("No tasks to process"); - return ControlFlow::Continue(HashMap::new()); - } - let mut sync_results = batched_tasks .into_iter() .map(|(sync_id, batch)| { @@ -993,6 +971,7 @@ where ZTenantId, HashMap, > = HashMap::new(); + while let Some((sync_id, state_update)) = sync_results.next().await { debug!("Finished storage sync task for sync id {sync_id}"); if let Some(state_update) = state_update { @@ -1003,7 +982,7 @@ where } } - ControlFlow::Continue(new_timeline_states) + new_timeline_states } async fn process_sync_task_batch( @@ -1376,7 +1355,6 @@ where P: Debug + Send + Sync + 'static, S: RemoteStorage + Send + Sync + 'static, { - info!("Updating remote index for the timeline"); let updated_remote_timeline = { let mut index_accessor = index.write().await; @@ -1443,7 +1421,7 @@ where IndexPart::from_remote_timeline(&timeline_path, updated_remote_timeline) .context("Failed to create an index part from the updated remote timeline")?; - info!("Uploading remote data for the timeline"); + info!("Uploading remote index for the timeline"); upload_index_part(conf, storage, sync_id, new_index_part) .await .context("Failed to upload new index part") @@ -1685,7 +1663,7 @@ mod tests { #[tokio::test] async fn separate_task_ids_batch() { - let (sync_queue, mut sync_queue_receiver) = SyncQueue::new(NonZeroUsize::new(100).unwrap()); + let sync_queue = SyncQueue::new(NonZeroUsize::new(100).unwrap()); assert_eq!(sync_queue.len(), 0); let sync_id_2 = ZTenantTimelineId { @@ -1720,7 +1698,7 @@ mod tests { let submitted_tasks_count = sync_queue.len(); assert_eq!(submitted_tasks_count, 3); - let mut batch = sync_queue.next_task_batch(&mut sync_queue_receiver).await; + let (mut batch, _) = sync_queue.next_task_batch(); assert_eq!( batch.len(), submitted_tasks_count, @@ -1746,7 +1724,7 @@ mod tests { #[tokio::test] async fn same_task_id_separate_tasks_batch() { - let (sync_queue, mut sync_queue_receiver) = SyncQueue::new(NonZeroUsize::new(100).unwrap()); + let sync_queue = SyncQueue::new(NonZeroUsize::new(100).unwrap()); assert_eq!(sync_queue.len(), 0); let download = LayersDownload { @@ -1769,7 +1747,7 @@ mod tests { let submitted_tasks_count = sync_queue.len(); assert_eq!(submitted_tasks_count, 3); - let mut batch = sync_queue.next_task_batch(&mut sync_queue_receiver).await; + let (mut batch, _) = sync_queue.next_task_batch(); assert_eq!( batch.len(), 1, @@ -1801,7 +1779,7 @@ mod tests { #[tokio::test] async fn same_task_id_same_tasks_batch() { - let (sync_queue, mut sync_queue_receiver) = SyncQueue::new(NonZeroUsize::new(1).unwrap()); + let sync_queue = SyncQueue::new(NonZeroUsize::new(1).unwrap()); let download_1 = LayersDownload { layers_to_skip: HashSet::from([PathBuf::from("sk1")]), }; @@ -1823,11 +1801,11 @@ mod tests { sync_queue.push(TEST_SYNC_ID, SyncTask::download(download_1.clone())); sync_queue.push(TEST_SYNC_ID, SyncTask::download(download_2.clone())); - sync_queue.push(sync_id_2, SyncTask::download(download_3.clone())); + sync_queue.push(sync_id_2, SyncTask::download(download_3)); sync_queue.push(TEST_SYNC_ID, SyncTask::download(download_4.clone())); assert_eq!(sync_queue.len(), 4); - let mut smallest_batch = sync_queue.next_task_batch(&mut sync_queue_receiver).await; + let (mut smallest_batch, _) = sync_queue.next_task_batch(); assert_eq!( smallest_batch.len(), 1, diff --git a/pageserver/src/storage_sync/delete.rs b/pageserver/src/storage_sync/delete.rs index 047ad6c2be..91c618d201 100644 --- a/pageserver/src/storage_sync/delete.rs +++ b/pageserver/src/storage_sync/delete.rs @@ -119,7 +119,7 @@ mod tests { #[tokio::test] async fn delete_timeline_negative() -> anyhow::Result<()> { let harness = RepoHarness::create("delete_timeline_negative")?; - let (sync_queue, _) = SyncQueue::new(NonZeroUsize::new(100).unwrap()); + let sync_queue = SyncQueue::new(NonZeroUsize::new(100).unwrap()); let sync_id = ZTenantTimelineId::new(harness.tenant_id, TIMELINE_ID); let storage = LocalFs::new( tempdir()?.path().to_path_buf(), @@ -152,7 +152,7 @@ mod tests { #[tokio::test] async fn delete_timeline() -> anyhow::Result<()> { let harness = RepoHarness::create("delete_timeline")?; - let (sync_queue, _) = SyncQueue::new(NonZeroUsize::new(100).unwrap()); + let sync_queue = SyncQueue::new(NonZeroUsize::new(100).unwrap()); let sync_id = ZTenantTimelineId::new(harness.tenant_id, TIMELINE_ID); let layer_files = ["a", "b", "c", "d"]; diff --git a/pageserver/src/storage_sync/download.rs b/pageserver/src/storage_sync/download.rs index 98a0a0e2fc..a28867f27e 100644 --- a/pageserver/src/storage_sync/download.rs +++ b/pageserver/src/storage_sync/download.rs @@ -286,7 +286,7 @@ mod tests { #[tokio::test] async fn download_timeline() -> anyhow::Result<()> { let harness = RepoHarness::create("download_timeline")?; - let (sync_queue, _) = SyncQueue::new(NonZeroUsize::new(100).unwrap()); + let sync_queue = SyncQueue::new(NonZeroUsize::new(100).unwrap()); let sync_id = ZTenantTimelineId::new(harness.tenant_id, TIMELINE_ID); let layer_files = ["a", "b", "layer_to_skip", "layer_to_keep_locally"]; @@ -385,7 +385,7 @@ mod tests { #[tokio::test] async fn download_timeline_negatives() -> anyhow::Result<()> { let harness = RepoHarness::create("download_timeline_negatives")?; - let (sync_queue, _) = SyncQueue::new(NonZeroUsize::new(100).unwrap()); + let sync_queue = SyncQueue::new(NonZeroUsize::new(100).unwrap()); let sync_id = ZTenantTimelineId::new(harness.tenant_id, TIMELINE_ID); let storage = LocalFs::new(tempdir()?.path().to_owned(), harness.conf.workdir.clone())?; diff --git a/pageserver/src/storage_sync/upload.rs b/pageserver/src/storage_sync/upload.rs index f9d606f2b8..625ec7aed6 100644 --- a/pageserver/src/storage_sync/upload.rs +++ b/pageserver/src/storage_sync/upload.rs @@ -240,7 +240,7 @@ mod tests { #[tokio::test] async fn regular_layer_upload() -> anyhow::Result<()> { let harness = RepoHarness::create("regular_layer_upload")?; - let (sync_queue, _) = SyncQueue::new(NonZeroUsize::new(100).unwrap()); + let sync_queue = SyncQueue::new(NonZeroUsize::new(100).unwrap()); let sync_id = ZTenantTimelineId::new(harness.tenant_id, TIMELINE_ID); let layer_files = ["a", "b"]; @@ -327,7 +327,7 @@ mod tests { #[tokio::test] async fn layer_upload_after_local_fs_update() -> anyhow::Result<()> { let harness = RepoHarness::create("layer_upload_after_local_fs_update")?; - let (sync_queue, _) = SyncQueue::new(NonZeroUsize::new(100).unwrap()); + let sync_queue = SyncQueue::new(NonZeroUsize::new(100).unwrap()); let sync_id = ZTenantTimelineId::new(harness.tenant_id, TIMELINE_ID); let layer_files = ["a1", "b1"]; diff --git a/pageserver/src/tenant_mgr.rs b/pageserver/src/tenant_mgr.rs index 9cac82e3a7..eb34575c02 100644 --- a/pageserver/src/tenant_mgr.rs +++ b/pageserver/src/tenant_mgr.rs @@ -80,6 +80,9 @@ pub enum TenantState { // The local disk might have some newer files that don't exist in cloud storage yet. // The tenant cannot be accessed anymore for any reason, but graceful shutdown. Stopping, + + // Something went wrong loading the tenant state + Broken, } impl fmt::Display for TenantState { @@ -88,6 +91,7 @@ impl fmt::Display for TenantState { TenantState::Active => f.write_str("Active"), TenantState::Idle => f.write_str("Idle"), TenantState::Stopping => f.write_str("Stopping"), + TenantState::Broken => f.write_str("Broken"), } } } @@ -101,7 +105,22 @@ pub fn init_tenant_mgr(conf: &'static PageServerConf) -> anyhow::Result { + tenant.state = TenantState::Stopping; + tenantids.push(*tenantid) + } + TenantState::Broken => {} + } } drop(m); @@ -259,6 +283,10 @@ pub fn activate_tenant(tenant_id: ZTenantId) -> anyhow::Result<()> { TenantState::Stopping => { // don't re-activate it if it's being stopped } + + TenantState::Broken => { + // cannot activate + } } Ok(()) } @@ -359,38 +387,37 @@ pub fn list_tenants() -> Vec { .collect() } -fn init_local_repositories( +fn init_local_repository( conf: &'static PageServerConf, - local_timeline_init_statuses: HashMap>, + tenant_id: ZTenantId, + local_timeline_init_statuses: HashMap, remote_index: &RemoteIndex, ) -> anyhow::Result<(), anyhow::Error> { - for (tenant_id, local_timeline_init_statuses) in local_timeline_init_statuses { - // initialize local tenant - let repo = load_local_repo(conf, tenant_id, remote_index) - .with_context(|| format!("Failed to load repo for tenant {tenant_id}"))?; + // initialize local tenant + let repo = load_local_repo(conf, tenant_id, remote_index) + .with_context(|| format!("Failed to load repo for tenant {tenant_id}"))?; - let mut status_updates = HashMap::with_capacity(local_timeline_init_statuses.len()); - for (timeline_id, init_status) in local_timeline_init_statuses { - match init_status { - LocalTimelineInitStatus::LocallyComplete => { - debug!("timeline {timeline_id} for tenant {tenant_id} is locally complete, registering it in repository"); - status_updates.insert(timeline_id, TimelineSyncStatusUpdate::Downloaded); - } - LocalTimelineInitStatus::NeedsSync => { - debug!( - "timeline {tenant_id} for tenant {timeline_id} needs sync, \ - so skipped for adding into repository until sync is finished" - ); - } + let mut status_updates = HashMap::with_capacity(local_timeline_init_statuses.len()); + for (timeline_id, init_status) in local_timeline_init_statuses { + match init_status { + LocalTimelineInitStatus::LocallyComplete => { + debug!("timeline {timeline_id} for tenant {tenant_id} is locally complete, registering it in repository"); + status_updates.insert(timeline_id, TimelineSyncStatusUpdate::Downloaded); + } + LocalTimelineInitStatus::NeedsSync => { + debug!( + "timeline {tenant_id} for tenant {timeline_id} needs sync, \ + so skipped for adding into repository until sync is finished" + ); } } - - // Lets fail here loudly to be on the safe side. - // XXX: It may be a better api to actually distinguish between repository startup - // and processing of newly downloaded timelines. - apply_timeline_remote_sync_status_updates(&repo, status_updates) - .with_context(|| format!("Failed to bootstrap timelines for tenant {tenant_id}"))? } + + // Lets fail here loudly to be on the safe side. + // XXX: It may be a better api to actually distinguish between repository startup + // and processing of newly downloaded timelines. + apply_timeline_remote_sync_status_updates(&repo, status_updates) + .with_context(|| format!("Failed to bootstrap timelines for tenant {tenant_id}"))?; Ok(()) } diff --git a/pageserver/src/thread_mgr.rs b/pageserver/src/thread_mgr.rs index 15735a8043..94147fc028 100644 --- a/pageserver/src/thread_mgr.rs +++ b/pageserver/src/thread_mgr.rs @@ -139,7 +139,7 @@ pub fn spawn( name: &str, shutdown_process_on_error: bool, f: F, -) -> std::io::Result<()> +) -> std::io::Result where F: FnOnce() -> anyhow::Result<()> + Send + 'static, { @@ -193,7 +193,7 @@ where drop(jh_guard); // The thread is now running. Nothing more to do here - Ok(()) + Ok(thread_id) } /// This wrapper function runs in a newly-spawned thread. It initializes the diff --git a/pageserver/src/timelines.rs b/pageserver/src/timelines.rs index 7cfd33c40b..eadf5bf4e0 100644 --- a/pageserver/src/timelines.rs +++ b/pageserver/src/timelines.rs @@ -45,6 +45,8 @@ pub struct LocalTimelineInfo { #[serde_as(as = "Option")] pub prev_record_lsn: Option, #[serde_as(as = "DisplayFromStr")] + pub latest_gc_cutoff_lsn: Lsn, + #[serde_as(as = "DisplayFromStr")] pub disk_consistent_lsn: Lsn, pub current_logical_size: Option, // is None when timeline is Unloaded pub current_logical_size_non_incremental: Option, @@ -68,6 +70,7 @@ impl LocalTimelineInfo { disk_consistent_lsn: datadir_tline.tline.get_disk_consistent_lsn(), last_record_lsn, prev_record_lsn: Some(datadir_tline.tline.get_prev_record_lsn()), + latest_gc_cutoff_lsn: *datadir_tline.tline.get_latest_gc_cutoff_lsn(), timeline_state: LocalTimelineState::Loaded, current_logical_size: Some(datadir_tline.get_current_logical_size()), current_logical_size_non_incremental: if include_non_incremental_logical_size { @@ -91,6 +94,7 @@ impl LocalTimelineInfo { disk_consistent_lsn: metadata.disk_consistent_lsn(), last_record_lsn: metadata.disk_consistent_lsn(), prev_record_lsn: metadata.prev_record_lsn(), + latest_gc_cutoff_lsn: metadata.latest_gc_cutoff_lsn(), timeline_state: LocalTimelineState::Unloaded, current_logical_size: None, current_logical_size_non_incremental: None, diff --git a/pageserver/src/virtual_file.rs b/pageserver/src/virtual_file.rs index 4ce245a74f..37d70372b5 100644 --- a/pageserver/src/virtual_file.rs +++ b/pageserver/src/virtual_file.rs @@ -34,7 +34,7 @@ const STORAGE_IO_TIME_BUCKETS: &[f64] = &[ lazy_static! { static ref STORAGE_IO_TIME: HistogramVec = register_histogram_vec!( - "pageserver_io_time", + "pageserver_io_operations_seconds", "Time spent in IO operations", &["operation", "tenant_id", "timeline_id"], STORAGE_IO_TIME_BUCKETS.into() @@ -43,8 +43,8 @@ lazy_static! { } lazy_static! { static ref STORAGE_IO_SIZE: IntGaugeVec = register_int_gauge_vec!( - "pageserver_io_size", - "Amount of bytes", + "pageserver_io_operations_bytes_total", + "Total amount of bytes read/written in IO operations", &["operation", "tenant_id", "timeline_id"] ) .expect("failed to define a metric"); diff --git a/pageserver/src/walingest.rs b/pageserver/src/walingest.rs index fbdb328d2c..5223125ce6 100644 --- a/pageserver/src/walingest.rs +++ b/pageserver/src/walingest.rs @@ -24,6 +24,7 @@ use anyhow::Context; use postgres_ffi::nonrelfile_utils::clogpage_precedes; use postgres_ffi::nonrelfile_utils::slru_may_delete_clogsegment; +use postgres_ffi::{page_is_new, page_set_lsn}; use anyhow::Result; use bytes::{Buf, Bytes, BytesMut}; @@ -304,8 +305,14 @@ impl<'a, R: Repository> WalIngest<'a, R> { image.resize(image.len() + blk.hole_length as usize, 0u8); image.unsplit(tail); } - image[0..4].copy_from_slice(&((lsn.0 >> 32) as u32).to_le_bytes()); - image[4..8].copy_from_slice(&(lsn.0 as u32).to_le_bytes()); + // + // Match the logic of XLogReadBufferForRedoExtended: + // The page may be uninitialized. If so, we can't set the LSN because + // that would corrupt the page. + // + if !page_is_new(&image) { + page_set_lsn(&mut image, lsn) + } assert_eq!(image.len(), pg_constants::BLCKSZ as usize); self.put_rel_page_image(modification, rel, blk.blkno, image.freeze())?; } else { diff --git a/pageserver/src/walreceiver.rs b/pageserver/src/walreceiver.rs index b7a33364c9..b8f349af8f 100644 --- a/pageserver/src/walreceiver.rs +++ b/pageserver/src/walreceiver.rs @@ -18,6 +18,8 @@ use lazy_static::lazy_static; use postgres_ffi::waldecoder::*; use postgres_protocol::message::backend::ReplicationMessage; use postgres_types::PgLsn; +use serde::{Deserialize, Serialize}; +use serde_with::{serde_as, DisplayFromStr}; use std::cell::Cell; use std::collections::HashMap; use std::str::FromStr; @@ -35,11 +37,19 @@ use utils::{ zid::{ZTenantId, ZTenantTimelineId, ZTimelineId}, }; -// -// We keep one WAL Receiver active per timeline. -// -struct WalReceiverEntry { +/// +/// A WAL receiver's data stored inside the global `WAL_RECEIVERS`. +/// We keep one WAL receiver active per timeline. +/// +#[serde_as] +#[derive(Debug, Serialize, Deserialize, Clone)] +pub struct WalReceiverEntry { + thread_id: u64, wal_producer_connstr: String, + #[serde_as(as = "Option")] + last_received_msg_lsn: Option, + /// the timestamp (in microseconds) of the last received message + last_received_msg_ts: Option, } lazy_static! { @@ -74,7 +84,7 @@ pub fn launch_wal_receiver( receiver.wal_producer_connstr = wal_producer_connstr.into(); } None => { - thread_mgr::spawn( + let thread_id = thread_mgr::spawn( ThreadKind::WalReceiver, Some(tenantid), Some(timelineid), @@ -88,7 +98,10 @@ pub fn launch_wal_receiver( )?; let receiver = WalReceiverEntry { + thread_id, wal_producer_connstr: wal_producer_connstr.into(), + last_received_msg_lsn: None, + last_received_msg_ts: None, }; receivers.insert((tenantid, timelineid), receiver); @@ -99,15 +112,13 @@ pub fn launch_wal_receiver( Ok(()) } -// Look up current WAL producer connection string in the hash table -fn get_wal_producer_connstr(tenantid: ZTenantId, timelineid: ZTimelineId) -> String { +/// Look up a WAL receiver's data in the global `WAL_RECEIVERS` +pub fn get_wal_receiver_entry( + tenant_id: ZTenantId, + timeline_id: ZTimelineId, +) -> Option { let receivers = WAL_RECEIVERS.lock().unwrap(); - - receivers - .get(&(tenantid, timelineid)) - .unwrap() - .wal_producer_connstr - .clone() + receivers.get(&(tenant_id, timeline_id)).cloned() } // @@ -118,7 +129,18 @@ fn thread_main(conf: &'static PageServerConf, tenant_id: ZTenantId, timeline_id: info!("WAL receiver thread started"); // Look up the current WAL producer address - let wal_producer_connstr = get_wal_producer_connstr(tenant_id, timeline_id); + let wal_producer_connstr = { + match get_wal_receiver_entry(tenant_id, timeline_id) { + Some(e) => e.wal_producer_connstr, + None => { + info!( + "Unable to create the WAL receiver thread: no WAL receiver entry found for tenant {} and timeline {}", + tenant_id, timeline_id + ); + return; + } + } + }; // Make a connection to the WAL safekeeper, or directly to the primary PostgreSQL server, // and start streaming WAL from it. @@ -318,6 +340,28 @@ fn walreceiver_main( let apply_lsn = u64::from(timeline_remote_consistent_lsn); let ts = SystemTime::now(); + // Update the current WAL receiver's data stored inside the global hash table `WAL_RECEIVERS` + { + let mut receivers = WAL_RECEIVERS.lock().unwrap(); + let entry = match receivers.get_mut(&(tenant_id, timeline_id)) { + Some(e) => e, + None => { + anyhow::bail!( + "no WAL receiver entry found for tenant {} and timeline {}", + tenant_id, + timeline_id + ); + } + }; + + entry.last_received_msg_lsn = Some(last_lsn); + entry.last_received_msg_ts = Some( + ts.duration_since(SystemTime::UNIX_EPOCH) + .expect("Received message time should be before UNIX EPOCH!") + .as_micros(), + ); + } + // Send zenith feedback message. // Regular standby_status_update fields are put into this message. let zenith_status_update = ZenithFeedback { diff --git a/pageserver/src/walredo.rs b/pageserver/src/walredo.rs index 777718b311..e556c24548 100644 --- a/pageserver/src/walredo.rs +++ b/pageserver/src/walredo.rs @@ -106,16 +106,16 @@ impl crate::walredo::WalRedoManager for DummyRedoManager { // each tenant. lazy_static! { static ref WAL_REDO_TIME: Histogram = - register_histogram!("pageserver_wal_redo_time", "Time spent on WAL redo") + register_histogram!("pageserver_wal_redo_seconds", "Time spent on WAL redo") .expect("failed to define a metric"); static ref WAL_REDO_WAIT_TIME: Histogram = register_histogram!( - "pageserver_wal_redo_wait_time", + "pageserver_wal_redo_wait_seconds", "Time spent waiting for access to the WAL redo process" ) .expect("failed to define a metric"); static ref WAL_REDO_RECORD_COUNTER: IntCounter = register_int_counter!( - "pageserver_wal_records_replayed", - "Number of WAL records replayed" + "pageserver_replayed_wal_records_total", + "Number of WAL records replayed in WAL redo process" ) .unwrap(); } diff --git a/poetry.lock b/poetry.lock index a7cbe0aa3c..6e552d2cd3 100644 --- a/poetry.lock +++ b/poetry.lock @@ -21,9 +21,6 @@ category = "main" optional = false python-versions = ">=3.6" -[package.dependencies] -typing-extensions = {version = ">=3.6.5", markers = "python_version < \"3.8\""} - [[package]] name = "asyncpg" version = "0.24.0" @@ -32,9 +29,6 @@ category = "main" optional = false python-versions = ">=3.6.0" -[package.dependencies] -typing-extensions = {version = ">=3.7.4.3", markers = "python_version < \"3.8\""} - [package.extras] dev = ["Cython (>=0.29.24,<0.30.0)", "pytest (>=6.0)", "Sphinx (>=4.1.2,<4.2.0)", "sphinxcontrib-asyncio (>=0.3.0,<0.4.0)", "sphinx-rtd-theme (>=0.5.2,<0.6.0)", "pycodestyle (>=2.7.0,<2.8.0)", "flake8 (>=3.9.2,<3.10.0)", "uvloop (>=0.15.3)"] docs = ["Sphinx (>=4.1.2,<4.2.0)", "sphinxcontrib-asyncio (>=0.3.0,<0.4.0)", "sphinx-rtd-theme (>=0.5.2,<0.6.0)"] @@ -125,7 +119,6 @@ python-versions = ">=3.6" [package.dependencies] botocore-stubs = "*" -typing-extensions = {version = "*", markers = "python_version < \"3.9\""} [package.extras] accessanalyzer = ["mypy-boto3-accessanalyzer (>=1.20.0)"] @@ -454,9 +447,6 @@ category = "main" optional = false python-versions = ">=3.6" -[package.dependencies] -typing-extensions = {version = "*", markers = "python_version < \"3.9\""} - [[package]] name = "cached-property" version = "1.5.2" @@ -524,7 +514,6 @@ python-versions = ">=3.6" [package.dependencies] colorama = {version = "*", markers = "platform_system == \"Windows\""} -importlib-metadata = {version = "*", markers = "python_version < \"3.8\""} [[package]] name = "colorama" @@ -605,7 +594,6 @@ optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7" [package.dependencies] -importlib-metadata = {version = "*", markers = "python_version < \"3.8\""} mccabe = ">=0.6.0,<0.7.0" pycodestyle = ">=2.7.0,<2.8.0" pyflakes = ">=2.3.0,<2.4.0" @@ -664,23 +652,6 @@ category = "main" optional = false python-versions = ">=3.5" -[[package]] -name = "importlib-metadata" -version = "4.10.1" -description = "Read metadata from Python packages" -category = "main" -optional = false -python-versions = ">=3.7" - -[package.dependencies] -typing-extensions = {version = ">=3.6.4", markers = "python_version < \"3.8\""} -zipp = ">=0.5" - -[package.extras] -docs = ["sphinx", "jaraco.packaging (>=8.2)", "rst.linker (>=1.9)"] -perf = ["ipython"] -testing = ["pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "pytest-cov", "pytest-enabler (>=1.0.1)", "packaging", "pyfakefs", "flufl.flake8", "pytest-perf (>=0.9.2)", "pytest-black (>=0.3.7)", "pytest-mypy", "importlib-resources (>=1.3)"] - [[package]] name = "iniconfig" version = "1.1.1" @@ -759,9 +730,6 @@ category = "main" optional = false python-versions = ">=2.7" -[package.dependencies] -importlib-metadata = {version = "*", markers = "python_version < \"3.8\""} - [package.extras] docs = ["sphinx", "jaraco.packaging (>=3.2)", "rst.linker (>=1.9)"] testing = ["pytest (>=3.5,!=3.7.3)", "pytest-checkdocs (>=1.2.3)", "pytest-flake8", "pytest-black-multipy", "pytest-cov", "ecdsa", "feedparser", "numpy", "pandas", "pymongo", "scikit-learn", "sqlalchemy", "enum34", "jsonlib"] @@ -785,7 +753,6 @@ python-versions = "*" [package.dependencies] attrs = ">=17.4.0" -importlib-metadata = {version = "*", markers = "python_version < \"3.8\""} pyrsistent = ">=0.14.0" six = ">=1.11.0" @@ -822,7 +789,7 @@ python-versions = "*" [[package]] name = "moto" -version = "3.1.7" +version = "3.1.9" description = "A library that allows your python tests to easily mock out the boto library" category = "main" optional = false @@ -840,7 +807,6 @@ flask = {version = "*", optional = true, markers = "extra == \"server\""} flask-cors = {version = "*", optional = true, markers = "extra == \"server\""} graphql-core = {version = "*", optional = true, markers = "extra == \"server\""} idna = {version = ">=2.5,<4", optional = true, markers = "extra == \"server\""} -importlib-metadata = {version = "*", markers = "python_version < \"3.8\""} Jinja2 = ">=2.10.1" jsondiff = {version = ">=1.1.2", optional = true, markers = "extra == \"server\""} MarkupSafe = "!=2.0.0a1" @@ -868,6 +834,7 @@ ds = ["sshpubkeys (>=3.1.0)"] dynamodb = ["docker (>=2.5.1)"] dynamodb2 = ["docker (>=2.5.1)"] dynamodbstreams = ["docker (>=2.5.1)"] +ebs = ["sshpubkeys (>=3.1.0)"] ec2 = ["sshpubkeys (>=3.1.0)"] efs = ["sshpubkeys (>=3.1.0)"] glue = ["pyparsing (>=3.0.0)"] @@ -889,7 +856,6 @@ python-versions = ">=3.5" [package.dependencies] mypy-extensions = ">=0.4.3,<0.5.0" toml = "*" -typed-ast = {version = ">=1.4.0,<1.5.0", markers = "python_version < \"3.8\""} typing-extensions = ">=3.7.4" [package.extras] @@ -946,13 +912,21 @@ category = "main" optional = false python-versions = ">=3.6" -[package.dependencies] -importlib-metadata = {version = ">=0.12", markers = "python_version < \"3.8\""} - [package.extras] dev = ["pre-commit", "tox"] testing = ["pytest", "pytest-benchmark"] +[[package]] +name = "prometheus-client" +version = "0.14.1" +description = "Python client for the Prometheus monitoring system." +category = "main" +optional = false +python-versions = ">=3.6" + +[package.extras] +twisted = ["twisted"] + [[package]] name = "psycopg2-binary" version = "2.9.3" @@ -1003,7 +977,7 @@ python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" [[package]] name = "pyjwt" -version = "2.3.0" +version = "2.4.0" description = "JSON Web Token implementation in Python" category = "main" optional = false @@ -1049,7 +1023,6 @@ python-versions = ">=3.6" atomicwrites = {version = ">=1.0", markers = "sys_platform == \"win32\""} attrs = ">=19.2.0" colorama = {version = "*", markers = "sys_platform == \"win32\""} -importlib-metadata = {version = ">=0.12", markers = "python_version < \"3.8\""} iniconfig = "*" packaging = "*" pluggy = ">=0.12,<2.0" @@ -1082,6 +1055,17 @@ python-versions = "*" [package.dependencies] pytest = ">=3.2.5" +[[package]] +name = "pytest-timeout" +version = "2.1.0" +description = "pytest plugin to abort hanging tests" +category = "main" +optional = false +python-versions = ">=3.6" + +[package.dependencies] +pytest = ">=5.0.0" + [[package]] name = "pytest-xdist" version = "2.5.0" @@ -1256,14 +1240,6 @@ category = "main" optional = false python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*" -[[package]] -name = "typed-ast" -version = "1.4.3" -description = "a fork of Python 2 and 3 ast modules with type comment support" -category = "dev" -optional = false -python-versions = "*" - [[package]] name = "types-psycopg2" version = "2.9.6" @@ -1360,22 +1336,10 @@ category = "dev" optional = false python-versions = "*" -[[package]] -name = "zipp" -version = "3.7.0" -description = "Backport of pathlib-compatible object wrapper for zip files" -category = "main" -optional = false -python-versions = ">=3.7" - -[package.extras] -docs = ["sphinx", "jaraco.packaging (>=8.2)", "rst.linker (>=1.9)"] -testing = ["pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "pytest-cov", "pytest-enabler (>=1.0.1)", "jaraco.itertools", "func-timeout", "pytest-black (>=0.3.7)", "pytest-mypy"] - [metadata] lock-version = "1.1" -python-versions = "^3.7" -content-hash = "dc63b6e02d0ceccdc4b5616e9362c149a27fdcc6c54fda63a3b115a5b980c42e" +python-versions = "^3.9" +content-hash = "be9c00bb5081535805824242fea2a03b2f82fa9466856d618e24b3140c7da6a0" [metadata.files] aiopg = [ @@ -1571,10 +1535,6 @@ idna = [ {file = "idna-3.3-py3-none-any.whl", hash = "sha256:84d9dd047ffa80596e0f246e2eab0b391788b0503584e8945f2368256d2735ff"}, {file = "idna-3.3.tar.gz", hash = "sha256:9d643ff0a55b762d5cdb124b8eaa99c66322e2157b69160bc32796e824360e6d"}, ] -importlib-metadata = [ - {file = "importlib_metadata-4.10.1-py3-none-any.whl", hash = "sha256:899e2a40a8c4a1aec681feef45733de8a6c58f3f6a0dbed2eb6574b4387a77b6"}, - {file = "importlib_metadata-4.10.1.tar.gz", hash = "sha256:951f0d8a5b7260e9db5e41d429285b5f451e928479f19d80818878527d36e95e"}, -] iniconfig = [ {file = "iniconfig-1.1.1-py2.py3-none-any.whl", hash = "sha256:011e24c64b7f47f6ebd835bb12a743f2fbe9a26d4cecaa7f53bc4f35ee9da8b3"}, {file = "iniconfig-1.1.1.tar.gz", hash = "sha256:bc3af051d7d14b2ee5ef9969666def0cd1a000e121eaea580d4a313df4b37f32"}, @@ -1693,8 +1653,8 @@ mccabe = [ {file = "mccabe-0.6.1.tar.gz", hash = "sha256:dd8d182285a0fe56bace7f45b5e7d1a6ebcbf524e8f3bd87eb0f125271b8831f"}, ] moto = [ - {file = "moto-3.1.7-py3-none-any.whl", hash = "sha256:4ab6fb8dd150343e115d75e3dbdb5a8f850fc7236790819d7cef438c11ee6e89"}, - {file = "moto-3.1.7.tar.gz", hash = "sha256:20607a0fd0cf6530e05ffb623ca84d3f45d50bddbcec2a33705a0cf471e71289"}, + {file = "moto-3.1.9-py3-none-any.whl", hash = "sha256:8928ec168e5fd88b1127413b2fa570a80d45f25182cdad793edd208d07825269"}, + {file = "moto-3.1.9.tar.gz", hash = "sha256:ba683e70950b6579189bc12d74c1477aa036c090c6ad8b151a22f5896c005113"}, ] mypy = [ {file = "mypy-0.910-cp35-cp35m-macosx_10_9_x86_64.whl", hash = "sha256:a155d80ea6cee511a3694b108c4494a39f42de11ee4e61e72bc424c490e46457"}, @@ -1741,6 +1701,10 @@ pluggy = [ {file = "pluggy-1.0.0-py2.py3-none-any.whl", hash = "sha256:74134bbf457f031a36d68416e1509f34bd5ccc019f0bcc952c7b909d06b37bd3"}, {file = "pluggy-1.0.0.tar.gz", hash = "sha256:4224373bacce55f955a878bf9cfa763c1e360858e330072059e10bad68531159"}, ] +prometheus-client = [ + {file = "prometheus_client-0.14.1-py3-none-any.whl", hash = "sha256:522fded625282822a89e2773452f42df14b5a8e84a86433e3f8a189c1d54dc01"}, + {file = "prometheus_client-0.14.1.tar.gz", hash = "sha256:5459c427624961076277fdc6dc50540e2bacb98eebde99886e59ec55ed92093a"}, +] psycopg2-binary = [ {file = "psycopg2-binary-2.9.3.tar.gz", hash = "sha256:761df5313dc15da1502b21453642d7599d26be88bff659382f8f9747c7ebea4e"}, {file = "psycopg2_binary-2.9.3-cp310-cp310-macosx_10_14_x86_64.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:539b28661b71da7c0e428692438efbcd048ca21ea81af618d845e06ebfd29478"}, @@ -1831,8 +1795,8 @@ pyflakes = [ {file = "pyflakes-2.3.1.tar.gz", hash = "sha256:f5bc8ecabc05bb9d291eb5203d6810b49040f6ff446a756326104746cc00c1db"}, ] pyjwt = [ - {file = "PyJWT-2.3.0-py3-none-any.whl", hash = "sha256:e0c4bb8d9f0af0c7f5b1ec4c5036309617d03d56932877f2f7a0beeb5318322f"}, - {file = "PyJWT-2.3.0.tar.gz", hash = "sha256:b888b4d56f06f6dcd777210c334e69c737be74755d3e5e9ee3fe67dc18a0ee41"}, + {file = "PyJWT-2.4.0-py3-none-any.whl", hash = "sha256:72d1d253f32dbd4f5c88eaf1fdc62f3a19f676ccbadb9dbc5d07e951b2b26daf"}, + {file = "PyJWT-2.4.0.tar.gz", hash = "sha256:d42908208c699b3b973cbeb01a969ba6a96c821eefb1c5bfe4c390c01d67abba"}, ] pyparsing = [ {file = "pyparsing-3.0.6-py3-none-any.whl", hash = "sha256:04ff808a5b90911829c55c4e26f75fa5ca8a2f5f36aa3a51f68e27033341d3e4"}, @@ -1873,6 +1837,10 @@ pytest-lazy-fixture = [ {file = "pytest-lazy-fixture-0.6.3.tar.gz", hash = "sha256:0e7d0c7f74ba33e6e80905e9bfd81f9d15ef9a790de97993e34213deb5ad10ac"}, {file = "pytest_lazy_fixture-0.6.3-py3-none-any.whl", hash = "sha256:e0b379f38299ff27a653f03eaa69b08a6fd4484e46fd1c9907d984b9f9daeda6"}, ] +pytest-timeout = [ + {file = "pytest-timeout-2.1.0.tar.gz", hash = "sha256:c07ca07404c612f8abbe22294b23c368e2e5104b521c1790195561f37e1ac3d9"}, + {file = "pytest_timeout-2.1.0-py3-none-any.whl", hash = "sha256:f6f50101443ce70ad325ceb4473c4255e9d74e3c7cd0ef827309dfa4c0d975c6"}, +] pytest-xdist = [ {file = "pytest-xdist-2.5.0.tar.gz", hash = "sha256:4580deca3ff04ddb2ac53eba39d76cb5dd5edeac050cb6fbc768b0dd712b4edf"}, {file = "pytest_xdist-2.5.0-py3-none-any.whl", hash = "sha256:6fe5c74fec98906deb8f2d2b616b5c782022744978e7bd4695d39c8f42d0ce65"}, @@ -1970,38 +1938,6 @@ toml = [ {file = "toml-0.10.2-py2.py3-none-any.whl", hash = "sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b"}, {file = "toml-0.10.2.tar.gz", hash = "sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f"}, ] -typed-ast = [ - {file = "typed_ast-1.4.3-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:2068531575a125b87a41802130fa7e29f26c09a2833fea68d9a40cf33902eba6"}, - {file = "typed_ast-1.4.3-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:c907f561b1e83e93fad565bac5ba9c22d96a54e7ea0267c708bffe863cbe4075"}, - {file = "typed_ast-1.4.3-cp35-cp35m-manylinux2014_aarch64.whl", hash = "sha256:1b3ead4a96c9101bef08f9f7d1217c096f31667617b58de957f690c92378b528"}, - {file = "typed_ast-1.4.3-cp35-cp35m-win32.whl", hash = "sha256:dde816ca9dac1d9c01dd504ea5967821606f02e510438120091b84e852367428"}, - {file = "typed_ast-1.4.3-cp35-cp35m-win_amd64.whl", hash = "sha256:777a26c84bea6cd934422ac2e3b78863a37017618b6e5c08f92ef69853e765d3"}, - {file = "typed_ast-1.4.3-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:f8afcf15cc511ada719a88e013cec87c11aff7b91f019295eb4530f96fe5ef2f"}, - {file = "typed_ast-1.4.3-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:52b1eb8c83f178ab787f3a4283f68258525f8d70f778a2f6dd54d3b5e5fb4341"}, - {file = "typed_ast-1.4.3-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:01ae5f73431d21eead5015997ab41afa53aa1fbe252f9da060be5dad2c730ace"}, - {file = "typed_ast-1.4.3-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:c190f0899e9f9f8b6b7863debfb739abcb21a5c054f911ca3596d12b8a4c4c7f"}, - {file = "typed_ast-1.4.3-cp36-cp36m-win32.whl", hash = "sha256:398e44cd480f4d2b7ee8d98385ca104e35c81525dd98c519acff1b79bdaac363"}, - {file = "typed_ast-1.4.3-cp36-cp36m-win_amd64.whl", hash = "sha256:bff6ad71c81b3bba8fa35f0f1921fb24ff4476235a6e94a26ada2e54370e6da7"}, - {file = "typed_ast-1.4.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:0fb71b8c643187d7492c1f8352f2c15b4c4af3f6338f21681d3681b3dc31a266"}, - {file = "typed_ast-1.4.3-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:760ad187b1041a154f0e4d0f6aae3e40fdb51d6de16e5c99aedadd9246450e9e"}, - {file = "typed_ast-1.4.3-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:5feca99c17af94057417d744607b82dd0a664fd5e4ca98061480fd8b14b18d04"}, - {file = "typed_ast-1.4.3-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:95431a26309a21874005845c21118c83991c63ea800dd44843e42a916aec5899"}, - {file = "typed_ast-1.4.3-cp37-cp37m-win32.whl", hash = "sha256:aee0c1256be6c07bd3e1263ff920c325b59849dc95392a05f258bb9b259cf39c"}, - {file = "typed_ast-1.4.3-cp37-cp37m-win_amd64.whl", hash = "sha256:9ad2c92ec681e02baf81fdfa056fe0d818645efa9af1f1cd5fd6f1bd2bdfd805"}, - {file = "typed_ast-1.4.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b36b4f3920103a25e1d5d024d155c504080959582b928e91cb608a65c3a49e1a"}, - {file = "typed_ast-1.4.3-cp38-cp38-manylinux1_i686.whl", hash = "sha256:067a74454df670dcaa4e59349a2e5c81e567d8d65458d480a5b3dfecec08c5ff"}, - {file = "typed_ast-1.4.3-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:7538e495704e2ccda9b234b82423a4038f324f3a10c43bc088a1636180f11a41"}, - {file = "typed_ast-1.4.3-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:af3d4a73793725138d6b334d9d247ce7e5f084d96284ed23f22ee626a7b88e39"}, - {file = "typed_ast-1.4.3-cp38-cp38-win32.whl", hash = "sha256:f2362f3cb0f3172c42938946dbc5b7843c2a28aec307c49100c8b38764eb6927"}, - {file = "typed_ast-1.4.3-cp38-cp38-win_amd64.whl", hash = "sha256:dd4a21253f42b8d2b48410cb31fe501d32f8b9fbeb1f55063ad102fe9c425e40"}, - {file = "typed_ast-1.4.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f328adcfebed9f11301eaedfa48e15bdece9b519fb27e6a8c01aa52a17ec31b3"}, - {file = "typed_ast-1.4.3-cp39-cp39-manylinux1_i686.whl", hash = "sha256:2c726c276d09fc5c414693a2de063f521052d9ea7c240ce553316f70656c84d4"}, - {file = "typed_ast-1.4.3-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:cae53c389825d3b46fb37538441f75d6aecc4174f615d048321b716df2757fb0"}, - {file = "typed_ast-1.4.3-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:b9574c6f03f685070d859e75c7f9eeca02d6933273b5e69572e5ff9d5e3931c3"}, - {file = "typed_ast-1.4.3-cp39-cp39-win32.whl", hash = "sha256:209596a4ec71d990d71d5e0d312ac935d86930e6eecff6ccc7007fe54d703808"}, - {file = "typed_ast-1.4.3-cp39-cp39-win_amd64.whl", hash = "sha256:9c6d1a54552b5330bc657b7ef0eae25d00ba7ffe85d9ea8ae6540d2197a3788c"}, - {file = "typed_ast-1.4.3.tar.gz", hash = "sha256:fb1bbeac803adea29cedd70781399c99138358c26d05fcbd23c13016b7f5ec65"}, -] types-psycopg2 = [ {file = "types-psycopg2-2.9.6.tar.gz", hash = "sha256:753b50b38da0e61bc8f89d149f2c4420c7e18535a87963d17b72343eb98f7c32"}, {file = "types_psycopg2-2.9.6-py3-none-any.whl", hash = "sha256:2cfd855e1562ebb5da595ee9401da93a308d69121ccd359cb8341f94ba4b6d1c"}, @@ -2092,7 +2028,3 @@ yapf = [ {file = "yapf-0.31.0-py2.py3-none-any.whl", hash = "sha256:e3a234ba8455fe201eaa649cdac872d590089a18b661e39bbac7020978dd9c2e"}, {file = "yapf-0.31.0.tar.gz", hash = "sha256:408fb9a2b254c302f49db83c59f9aa0b4b0fd0ec25be3a5c51181327922ff63d"}, ] -zipp = [ - {file = "zipp-3.7.0-py3-none-any.whl", hash = "sha256:b47250dd24f92b7dd6a0a8fc5244da14608f3ca90a5efcd37a3b1642fac9a375"}, - {file = "zipp-3.7.0.tar.gz", hash = "sha256:9f50f446828eb9d45b267433fd3e9da8d801f614129124863f9c51ebceafb87d"}, -] diff --git a/proxy/Cargo.toml b/proxy/Cargo.toml index 43880d645a..4e45698e3e 100644 --- a/proxy/Cargo.toml +++ b/proxy/Cargo.toml @@ -33,6 +33,7 @@ tokio = { version = "1.17", features = ["macros"] } tokio-postgres = { git = "https://github.com/zenithdb/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" } tokio-rustls = "0.23.0" url = "2.2.2" +git-version = "0.3.5" utils = { path = "../libs/utils" } metrics = { path = "../libs/metrics" } diff --git a/proxy/src/auth_backend/console.rs b/proxy/src/auth_backend/console.rs index 55a0889af4..41a822701f 100644 --- a/proxy/src/auth_backend/console.rs +++ b/proxy/src/auth_backend/console.rs @@ -117,7 +117,7 @@ async fn get_auth_info( let mut url = reqwest::Url::parse(&format!("{auth_endpoint}/proxy_get_role_secret"))?; url.query_pairs_mut() - .append_pair("cluster", cluster) + .append_pair("project", cluster) .append_pair("role", user); // TODO: use a proper logger @@ -141,7 +141,7 @@ async fn wake_compute( cluster: &str, ) -> Result<(String, u16), ConsoleAuthError> { let mut url = reqwest::Url::parse(&format!("{auth_endpoint}/proxy_wake_compute"))?; - url.query_pairs_mut().append_pair("cluster", cluster); + url.query_pairs_mut().append_pair("project", cluster); // TODO: use a proper logger println!("cplane request: {}", url); diff --git a/proxy/src/main.rs b/proxy/src/main.rs index fc2a368b85..b457d46824 100644 --- a/proxy/src/main.rs +++ b/proxy/src/main.rs @@ -25,7 +25,9 @@ use config::ProxyConfig; use futures::FutureExt; use std::{future::Future, net::SocketAddr}; use tokio::{net::TcpListener, task::JoinError}; -use utils::GIT_VERSION; +use utils::project_git_version; + +project_git_version!(GIT_VERSION); /// Flattens `Result>` into `Result`. async fn flatten_err( @@ -36,7 +38,6 @@ async fn flatten_err( #[tokio::main] async fn main() -> anyhow::Result<()> { - metrics::set_common_metrics_prefix("zenith_proxy"); let arg_matches = App::new("Neon proxy/router") .version(GIT_VERSION) .arg( @@ -124,7 +125,7 @@ async fn main() -> anyhow::Result<()> { auth_link_uri: arg_matches.value_of("uri").unwrap().parse()?, })); - println!("Version: {}", GIT_VERSION); + println!("Version: {GIT_VERSION}"); // Check that we can bind to address before further initialization println!("Starting http on {}", http_address); diff --git a/proxy/src/proxy.rs b/proxy/src/proxy.rs index 821ce377f5..642e50c2c1 100644 --- a/proxy/src/proxy.rs +++ b/proxy/src/proxy.rs @@ -5,7 +5,7 @@ use crate::stream::{MetricsStream, PqStream, Stream}; use anyhow::{bail, Context}; use futures::TryFutureExt; use lazy_static::lazy_static; -use metrics::{new_common_metric_name, register_int_counter, IntCounter}; +use metrics::{register_int_counter, IntCounter}; use std::sync::Arc; use tokio::io::{AsyncRead, AsyncWrite}; use utils::pq_proto::{BeMessage as Be, *}; @@ -15,17 +15,17 @@ const ERR_PROTO_VIOLATION: &str = "protocol violation"; lazy_static! { static ref NUM_CONNECTIONS_ACCEPTED_COUNTER: IntCounter = register_int_counter!( - new_common_metric_name("num_connections_accepted"), + "proxy_accepted_connections_total", "Number of TCP client connections accepted." ) .unwrap(); static ref NUM_CONNECTIONS_CLOSED_COUNTER: IntCounter = register_int_counter!( - new_common_metric_name("num_connections_closed"), + "proxy_closed_connections_total", "Number of TCP client connections closed." ) .unwrap(); static ref NUM_BYTES_PROXIED_COUNTER: IntCounter = register_int_counter!( - new_common_metric_name("num_bytes_proxied"), + "proxy_io_bytes_total", "Number of bytes sent/received between any client and backend." ) .unwrap(); diff --git a/pyproject.toml b/pyproject.toml index 335c6d61d8..c965535049 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -5,7 +5,7 @@ description = "" authors = [] [tool.poetry.dependencies] -python = "^3.7" +python = "^3.9" pytest = "^6.2.5" psycopg2-binary = "^2.9.1" typing-extensions = "^3.10.0" @@ -23,6 +23,8 @@ boto3-stubs = "^1.20.40" moto = {version = "^3.0.0", extras = ["server"]} backoff = "^1.11.1" pytest-lazy-fixture = "^0.6.3" +prometheus-client = "^0.14.1" +pytest-timeout = "^2.1.0" [tool.poetry.dev-dependencies] yapf = "==0.31.0" diff --git a/pytest.ini b/pytest.ini index abc69b765b..da9ab8c12f 100644 --- a/pytest.ini +++ b/pytest.ini @@ -9,3 +9,4 @@ minversion = 6.0 log_format = %(asctime)s.%(msecs)-3d %(levelname)s [%(filename)s:%(lineno)d] %(message)s log_date_format = %Y-%m-%d %H:%M:%S log_cli = true +timeout = 300 diff --git a/safekeeper/Cargo.toml b/safekeeper/Cargo.toml index 5e1ceee02e..417cf58cd5 100644 --- a/safekeeper/Cargo.toml +++ b/safekeeper/Cargo.toml @@ -29,6 +29,7 @@ hex = "0.4.3" const_format = "0.2.21" tokio-postgres = { git = "https://github.com/zenithdb/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" } tokio-util = { version = "0.7", features = ["io"] } +git-version = "0.3.5" postgres_ffi = { path = "../libs/postgres_ffi" } metrics = { path = "../libs/metrics" } diff --git a/safekeeper/README.md b/safekeeper/README.md index 3f097d0c24..a4bb260932 100644 --- a/safekeeper/README.md +++ b/safekeeper/README.md @@ -1,6 +1,6 @@ # WAL service -The zenith WAL service acts as a holding area and redistribution +The neon WAL service acts as a holding area and redistribution center for recently generated WAL. The primary Postgres server streams the WAL to the WAL safekeeper, and treats it like a (synchronous) replica. A replication slot is used in the primary to prevent the @@ -94,7 +94,7 @@ Q: What if the compute node evicts a page, needs it back, but the page is yet A: If the compute node has evicted a page, changes to it have been WAL-logged (that's why it is called Write Ahead logging; there are some exceptions like index builds, but these are exceptions). These WAL records will eventually - reach the Page Server. The Page Server notes that the compute note requests + reach the Page Server. The Page Server notes that the compute node requests pages with a very recent LSN and will not respond to the compute node until a corresponding WAL is received from WAL safekeepers. diff --git a/safekeeper/README_PROTO.md b/safekeeper/README_PROTO.md index 5d79f8c2d3..6b2ae50254 100644 --- a/safekeeper/README_PROTO.md +++ b/safekeeper/README_PROTO.md @@ -151,7 +151,7 @@ It is assumed that in case of loosing local data by some safekeepers, it should * `RestartLSN`: position in WAL confirmed by all safekeepers. * `FlushLSN`: part of WAL persisted to the disk by safekeeper. * `NodeID`: pair (term,UUID) -* `Pager`: Zenith component restoring pages from WAL stream +* `Pager`: Neon component restoring pages from WAL stream * `Replica`: read-only computatio node * `VCL`: the largerst LSN for which we can guarantee availablity of all prior records. diff --git a/safekeeper/src/bin/safekeeper.rs b/safekeeper/src/bin/safekeeper.rs index 7e979840c2..a5ffc013e2 100644 --- a/safekeeper/src/bin/safekeeper.rs +++ b/safekeeper/src/bin/safekeeper.rs @@ -17,19 +17,21 @@ use url::{ParseError, Url}; use safekeeper::control_file::{self}; use safekeeper::defaults::{DEFAULT_HTTP_LISTEN_ADDR, DEFAULT_PG_LISTEN_ADDR}; use safekeeper::remove_wal; +use safekeeper::timeline::GlobalTimelines; use safekeeper::wal_service; use safekeeper::SafeKeeperConf; use safekeeper::{broker, callmemaybe}; use safekeeper::{http, s3_offload}; use utils::{ - http::endpoint, logging, shutdown::exit_now, signals, tcp_listener, zid::ZNodeId, GIT_VERSION, + http::endpoint, logging, project_git_version, shutdown::exit_now, signals, tcp_listener, + zid::ZNodeId, }; const LOCK_FILE_NAME: &str = "safekeeper.lock"; const ID_FILE_NAME: &str = "safekeeper.id"; +project_git_version!(GIT_VERSION); -fn main() -> Result<()> { - metrics::set_common_metrics_prefix("safekeeper"); +fn main() -> anyhow::Result<()> { let arg_matches = App::new("Zenith safekeeper") .about("Store WAL stream to local file system and push it to WAL receivers") .version(GIT_VERSION) @@ -115,6 +117,14 @@ fn main() -> Result<()> { .takes_value(true) .help("a prefix to always use when polling/pusing data in etcd from this safekeeper"), ) + .arg( + Arg::new("enable-s3-offload") + .long("enable-s3-offload") + .takes_value(true) + .default_value("true") + .default_missing_value("true") + .help("Enable/disable s3 offloading. When disabled, safekeeper removes WAL ignoring s3 WAL horizon."), + ) .get_matches(); if let Some(addr) = arg_matches.value_of("dump-control-file") { @@ -166,19 +176,26 @@ fn main() -> Result<()> { if let Some(addr) = arg_matches.value_of("broker-endpoints") { let collected_ep: Result, ParseError> = addr.split(',').map(Url::parse).collect(); - conf.broker_endpoints = Some(collected_ep?); + conf.broker_endpoints = collected_ep.context("Failed to parse broker endpoint urls")?; } if let Some(prefix) = arg_matches.value_of("broker-etcd-prefix") { conf.broker_etcd_prefix = prefix.to_string(); } + // Seems like there is no better way to accept bool values explicitly in clap. + conf.s3_offload_enabled = arg_matches + .value_of("enable-s3-offload") + .unwrap() + .parse() + .context("failed to parse bool enable-s3-offload bool")?; + start_safekeeper(conf, given_id, arg_matches.is_present("init")) } fn start_safekeeper(mut conf: SafeKeeperConf, given_id: Option, init: bool) -> Result<()> { let log_file = logging::init("safekeeper.log", conf.daemonize)?; - info!("version: {}", GIT_VERSION); + info!("version: {GIT_VERSION}"); // Prevent running multiple safekeepers on the same directory let lock_file_path = conf.workdir.join(LOCK_FILE_NAME); @@ -228,12 +245,14 @@ fn start_safekeeper(mut conf: SafeKeeperConf, given_id: Option, init: b // Otherwise, the coverage data will be damaged. match daemonize.exit_action(|| exit_now(0)).start() { Ok(_) => info!("Success, daemonized"), - Err(e) => error!("Error, {}", e), + Err(err) => bail!("Error: {err}. could not daemonize. bailing."), } } let signals = signals::install_shutdown_handlers()?; let mut threads = vec![]; + let (callmemaybe_tx, callmemaybe_rx) = mpsc::unbounded_channel(); + GlobalTimelines::set_callmemaybe_tx(callmemaybe_tx); let conf_ = conf.clone(); threads.push( @@ -262,13 +281,12 @@ fn start_safekeeper(mut conf: SafeKeeperConf, given_id: Option, init: b ); } - let (tx, rx) = mpsc::unbounded_channel(); let conf_cloned = conf.clone(); let safekeeper_thread = thread::Builder::new() .name("Safekeeper thread".into()) .spawn(|| { // thread code - let thread_result = wal_service::thread_main(conf_cloned, pg_listener, tx); + let thread_result = wal_service::thread_main(conf_cloned, pg_listener); if let Err(e) = thread_result { info!("safekeeper thread terminated: {}", e); } @@ -282,7 +300,7 @@ fn start_safekeeper(mut conf: SafeKeeperConf, given_id: Option, init: b .name("callmemaybe thread".into()) .spawn(|| { // thread code - let thread_result = callmemaybe::thread_main(conf_cloned, rx); + let thread_result = callmemaybe::thread_main(conf_cloned, callmemaybe_rx); if let Err(e) = thread_result { error!("callmemaybe thread terminated: {}", e); } @@ -290,7 +308,7 @@ fn start_safekeeper(mut conf: SafeKeeperConf, given_id: Option, init: b .unwrap(); threads.push(callmemaybe_thread); - if conf.broker_endpoints.is_some() { + if !conf.broker_endpoints.is_empty() { let conf_ = conf.clone(); threads.push( thread::Builder::new() @@ -299,6 +317,8 @@ fn start_safekeeper(mut conf: SafeKeeperConf, given_id: Option, init: b broker::thread_main(conf_); })?, ); + } else { + warn!("No broker endpoints providing, starting without node sync") } let conf_ = conf.clone(); diff --git a/safekeeper/src/broker.rs b/safekeeper/src/broker.rs index c9ae1a8d98..d7217be20a 100644 --- a/safekeeper/src/broker.rs +++ b/safekeeper/src/broker.rs @@ -34,19 +34,19 @@ pub fn thread_main(conf: SafeKeeperConf) { /// Key to per timeline per safekeeper data. fn timeline_safekeeper_path( - broker_prefix: String, + broker_etcd_prefix: String, zttid: ZTenantTimelineId, sk_id: ZNodeId, ) -> String { format!( "{}/{sk_id}", - SkTimelineSubscriptionKind::timeline(broker_prefix, zttid).watch_key() + SkTimelineSubscriptionKind::timeline(broker_etcd_prefix, zttid).watch_key() ) } /// Push once in a while data about all active timelines to the broker. async fn push_loop(conf: SafeKeeperConf) -> anyhow::Result<()> { - let mut client = Client::connect(&conf.broker_endpoints.as_ref().unwrap(), None).await?; + let mut client = Client::connect(&conf.broker_endpoints, None).await?; // Get and maintain lease to automatically delete obsolete data let lease = client.lease_grant(LEASE_TTL_SEC, None).await?; @@ -60,7 +60,7 @@ async fn push_loop(conf: SafeKeeperConf) -> anyhow::Result<()> { // lock is held. for zttid in GlobalTimelines::get_active_timelines() { if let Ok(tli) = GlobalTimelines::get(&conf, zttid, false) { - let sk_info = tli.get_public_info()?; + let sk_info = tli.get_public_info(&conf)?; let put_opts = PutOptions::new().with_lease(lease.id()); client .put( @@ -91,7 +91,7 @@ async fn push_loop(conf: SafeKeeperConf) -> anyhow::Result<()> { /// Subscribe and fetch all the interesting data from the broker. async fn pull_loop(conf: SafeKeeperConf) -> Result<()> { - let mut client = Client::connect(&conf.broker_endpoints.as_ref().unwrap(), None).await?; + let mut client = Client::connect(&conf.broker_endpoints, None).await?; let mut subscription = etcd_broker::subscribe_to_safekeeper_timeline_updates( &mut client, @@ -99,7 +99,6 @@ async fn pull_loop(conf: SafeKeeperConf) -> Result<()> { ) .await .context("failed to subscribe for safekeeper info")?; - loop { match subscription.fetch_data().await { Some(new_info) => { diff --git a/safekeeper/src/handler.rs b/safekeeper/src/handler.rs index 7d86523b0e..9af78661f9 100644 --- a/safekeeper/src/handler.rs +++ b/safekeeper/src/handler.rs @@ -21,9 +21,6 @@ use utils::{ zid::{ZTenantId, ZTenantTimelineId, ZTimelineId}, }; -use crate::callmemaybe::CallmeEvent; -use tokio::sync::mpsc::UnboundedSender; - /// Safekeeper handler of postgres commands pub struct SafekeeperPostgresHandler { pub conf: SafeKeeperConf, @@ -33,8 +30,6 @@ pub struct SafekeeperPostgresHandler { pub ztimelineid: Option, pub timeline: Option>, pageserver_connstr: Option, - //sender to communicate with callmemaybe thread - pub tx: UnboundedSender, } /// Parsed Postgres command. @@ -140,7 +135,7 @@ impl postgres_backend::Handler for SafekeeperPostgresHandler { } impl SafekeeperPostgresHandler { - pub fn new(conf: SafeKeeperConf, tx: UnboundedSender) -> Self { + pub fn new(conf: SafeKeeperConf) -> Self { SafekeeperPostgresHandler { conf, appname: None, @@ -148,7 +143,6 @@ impl SafekeeperPostgresHandler { ztimelineid: None, timeline: None, pageserver_connstr: None, - tx, } } diff --git a/safekeeper/src/http/routes.rs b/safekeeper/src/http/routes.rs index e731db5617..62fbd2ff2f 100644 --- a/safekeeper/src/http/routes.rs +++ b/safekeeper/src/http/routes.rs @@ -3,19 +3,20 @@ use hyper::{Body, Request, Response, StatusCode}; use serde::Serialize; use serde::Serializer; +use std::collections::HashMap; use std::fmt::Display; use std::sync::Arc; use crate::safekeeper::Term; use crate::safekeeper::TermHistory; -use crate::timeline::GlobalTimelines; +use crate::timeline::{GlobalTimelines, TimelineDeleteForceResult}; use crate::SafeKeeperConf; use utils::{ http::{ endpoint, error::ApiError, json::{json_request, json_response}, - request::parse_request_param, + request::{ensure_no_body, parse_request_param}, RequestExt, RouterBuilder, }, lsn::Lsn, @@ -130,6 +131,44 @@ async fn timeline_create_handler(mut request: Request) -> Result, +) -> Result, ApiError> { + let zttid = ZTenantTimelineId::new( + parse_request_param(&request, "tenant_id")?, + parse_request_param(&request, "timeline_id")?, + ); + ensure_no_body(&mut request).await?; + json_response( + StatusCode::OK, + GlobalTimelines::delete_force(get_conf(&request), &zttid).map_err(ApiError::from_err)?, + ) +} + +/// Deactivates all timelines for the tenant and removes its data directory. +/// See `timeline_delete_force_handler`. +async fn tenant_delete_force_handler( + mut request: Request, +) -> Result, ApiError> { + let tenant_id = parse_request_param(&request, "tenant_id")?; + ensure_no_body(&mut request).await?; + json_response( + StatusCode::OK, + GlobalTimelines::delete_force_all_for_tenant(get_conf(&request), &tenant_id) + .map_err(ApiError::from_err)? + .iter() + .map(|(zttid, resp)| (format!("{}", zttid.timeline_id), *resp)) + .collect::>(), + ) +} + /// Used only in tests to hand craft required data. async fn record_safekeeper_info(mut request: Request) -> Result, ApiError> { let zttid = ZTenantTimelineId::new( @@ -155,6 +194,11 @@ pub fn make_router(conf: SafeKeeperConf) -> RouterBuilder timeline_status_handler, ) .post("/v1/timeline", timeline_create_handler) + .delete( + "/v1/tenant/:tenant_id/timeline/:timeline_id", + timeline_delete_force_handler, + ) + .delete("/v1/tenant/:tenant_id", tenant_delete_force_handler) // for tests .post( "/v1/record_safekeeper_info/:tenant_id/:timeline_id", diff --git a/safekeeper/src/lib.rs b/safekeeper/src/lib.rs index f74e5be992..a87e5da686 100644 --- a/safekeeper/src/lib.rs +++ b/safekeeper/src/lib.rs @@ -3,7 +3,7 @@ use std::path::PathBuf; use std::time::Duration; use url::Url; -use utils::zid::{ZNodeId, ZTenantTimelineId}; +use utils::zid::{ZNodeId, ZTenantId, ZTenantTimelineId}; pub mod broker; pub mod callmemaybe; @@ -27,11 +27,10 @@ pub mod defaults { pub const DEFAULT_PG_LISTEN_PORT: u16 = 5454; pub const DEFAULT_PG_LISTEN_ADDR: &str = formatcp!("127.0.0.1:{DEFAULT_PG_LISTEN_PORT}"); - pub const DEFAULT_NEON_BROKER_PREFIX: &str = "neon"; pub const DEFAULT_HTTP_LISTEN_PORT: u16 = 7676; pub const DEFAULT_HTTP_LISTEN_ADDR: &str = formatcp!("127.0.0.1:{DEFAULT_HTTP_LISTEN_PORT}"); - pub const DEFAULT_RECALL_PERIOD: Duration = Duration::from_secs(1); + pub const DEFAULT_RECALL_PERIOD: Duration = Duration::from_secs(10); } #[derive(Debug, Clone)] @@ -51,14 +50,18 @@ pub struct SafeKeeperConf { pub ttl: Option, pub recall_period: Duration, pub my_id: ZNodeId, - pub broker_endpoints: Option>, + pub broker_endpoints: Vec, pub broker_etcd_prefix: String, + pub s3_offload_enabled: bool, } impl SafeKeeperConf { + pub fn tenant_dir(&self, tenant_id: &ZTenantId) -> PathBuf { + self.workdir.join(tenant_id.to_string()) + } + pub fn timeline_dir(&self, zttid: &ZTenantTimelineId) -> PathBuf { - self.workdir - .join(zttid.tenant_id.to_string()) + self.tenant_dir(&zttid.tenant_id) .join(zttid.timeline_id.to_string()) } } @@ -77,8 +80,9 @@ impl Default for SafeKeeperConf { ttl: None, recall_period: defaults::DEFAULT_RECALL_PERIOD, my_id: ZNodeId(0), - broker_endpoints: None, - broker_etcd_prefix: defaults::DEFAULT_NEON_BROKER_PREFIX.to_string(), + broker_endpoints: Vec::new(), + broker_etcd_prefix: etcd_broker::DEFAULT_NEON_BROKER_ETCD_PREFIX.to_string(), + s3_offload_enabled: true, } } } diff --git a/safekeeper/src/receive_wal.rs b/safekeeper/src/receive_wal.rs index 3ad99ab0df..0ef335c9ed 100644 --- a/safekeeper/src/receive_wal.rs +++ b/safekeeper/src/receive_wal.rs @@ -5,7 +5,6 @@ use anyhow::{anyhow, bail, Result}; use bytes::BytesMut; -use tokio::sync::mpsc::UnboundedSender; use tracing::*; use crate::timeline::Timeline; @@ -28,8 +27,6 @@ use utils::{ sock_split::ReadStream, }; -use crate::callmemaybe::CallmeEvent; - pub struct ReceiveWalConn<'pg> { /// Postgres connection pg_backend: &'pg mut PostgresBackend, @@ -91,10 +88,9 @@ impl<'pg> ReceiveWalConn<'pg> { // Register the connection and defer unregister. spg.timeline .get() - .on_compute_connect(self.pageserver_connstr.as_ref(), &spg.tx)?; + .on_compute_connect(self.pageserver_connstr.as_ref())?; let _guard = ComputeConnectionGuard { timeline: Arc::clone(spg.timeline.get()), - callmemaybe_tx: spg.tx.clone(), }; let mut next_msg = Some(next_msg); @@ -194,13 +190,10 @@ impl ProposerPollStream { struct ComputeConnectionGuard { timeline: Arc, - callmemaybe_tx: UnboundedSender, } impl Drop for ComputeConnectionGuard { fn drop(&mut self) { - self.timeline - .on_compute_disconnect(&self.callmemaybe_tx) - .unwrap(); + self.timeline.on_compute_disconnect().unwrap(); } } diff --git a/safekeeper/src/remove_wal.rs b/safekeeper/src/remove_wal.rs index 9474f65e5f..3278d51bd3 100644 --- a/safekeeper/src/remove_wal.rs +++ b/safekeeper/src/remove_wal.rs @@ -12,7 +12,7 @@ pub fn thread_main(conf: SafeKeeperConf) { let active_tlis = GlobalTimelines::get_active_timelines(); for zttid in &active_tlis { if let Ok(tli) = GlobalTimelines::get(&conf, *zttid, false) { - if let Err(e) = tli.remove_old_wal() { + if let Err(e) = tli.remove_old_wal(conf.s3_offload_enabled) { warn!( "failed to remove WAL for tenant {} timeline {}: {}", tli.zttid.tenant_id, tli.zttid.timeline_id, e diff --git a/safekeeper/src/safekeeper.rs b/safekeeper/src/safekeeper.rs index b9264565dc..fff1c269b6 100644 --- a/safekeeper/src/safekeeper.rs +++ b/safekeeper/src/safekeeper.rs @@ -930,13 +930,18 @@ where /// offloading. /// While it is safe to use inmem values for determining horizon, /// we use persistent to make possible normal states less surprising. - pub fn get_horizon_segno(&self) -> XLogSegNo { + pub fn get_horizon_segno(&self, s3_offload_enabled: bool) -> XLogSegNo { + let s3_offload_horizon = if s3_offload_enabled { + self.state.s3_wal_lsn + } else { + Lsn(u64::MAX) + }; let horizon_lsn = min( min( self.state.remote_consistent_lsn, self.state.peer_horizon_lsn, ), - self.state.s3_wal_lsn, + s3_offload_horizon, ); horizon_lsn.segment_number(self.state.server.wal_seg_size as usize) } diff --git a/safekeeper/src/send_wal.rs b/safekeeper/src/send_wal.rs index 960f70d154..d52dd6ea57 100644 --- a/safekeeper/src/send_wal.rs +++ b/safekeeper/src/send_wal.rs @@ -264,13 +264,13 @@ impl ReplicationConn { } else { let pageserver_connstr = pageserver_connstr.expect("there should be a pageserver connection string since this is not a wal_proposer_recovery"); let zttid = spg.timeline.get().zttid; - let tx_clone = spg.tx.clone(); + let tx_clone = spg.timeline.get().callmemaybe_tx.clone(); let subscription_key = SubscriptionStateKey::new( zttid.tenant_id, zttid.timeline_id, pageserver_connstr.clone(), ); - spg.tx + tx_clone .send(CallmeEvent::Pause(subscription_key)) .unwrap_or_else(|e| { error!("failed to send Pause request to callmemaybe thread {}", e); @@ -315,7 +315,7 @@ impl ReplicationConn { } else { // TODO: also check once in a while whether we are walsender // to right pageserver. - if spg.timeline.get().check_deactivate(replica_id, &spg.tx)? { + if spg.timeline.get().check_deactivate(replica_id)? { // Shut down, timeline is suspended. // TODO create proper error type for this bail!("end streaming to {:?}", spg.appname); diff --git a/safekeeper/src/timeline.rs b/safekeeper/src/timeline.rs index 140d6660ac..2bb7771aac 100644 --- a/safekeeper/src/timeline.rs +++ b/safekeeper/src/timeline.rs @@ -7,6 +7,8 @@ use etcd_broker::SkTimelineInfo; use lazy_static::lazy_static; use postgres_ffi::xlog_utils::XLogSegNo; +use serde::Serialize; + use std::cmp::{max, min}; use std::collections::HashMap; use std::fs::{self}; @@ -19,7 +21,7 @@ use tracing::*; use utils::{ lsn::Lsn, pq_proto::ZenithFeedback, - zid::{ZNodeId, ZTenantTimelineId}, + zid::{ZNodeId, ZTenantId, ZTenantTimelineId}, }; use crate::callmemaybe::{CallmeEvent, SubscriptionStateKey}; @@ -89,7 +91,6 @@ struct SharedState { active: bool, num_computes: u32, pageserver_connstr: Option, - listen_pg_addr: String, last_removed_segno: XLogSegNo, } @@ -112,7 +113,6 @@ impl SharedState { active: false, num_computes: 0, pageserver_connstr: None, - listen_pg_addr: conf.listen_pg_addr.clone(), last_removed_segno: 0, }) } @@ -132,7 +132,6 @@ impl SharedState { active: false, num_computes: 0, pageserver_connstr: None, - listen_pg_addr: conf.listen_pg_addr.clone(), last_removed_segno: 0, }) } @@ -278,15 +277,21 @@ impl SharedState { /// Database instance (tenant) pub struct Timeline { pub zttid: ZTenantTimelineId, + pub callmemaybe_tx: UnboundedSender, mutex: Mutex, /// conditional variable used to notify wal senders cond: Condvar, } impl Timeline { - fn new(zttid: ZTenantTimelineId, shared_state: SharedState) -> Timeline { + fn new( + zttid: ZTenantTimelineId, + callmemaybe_tx: UnboundedSender, + shared_state: SharedState, + ) -> Timeline { Timeline { zttid, + callmemaybe_tx, mutex: Mutex::new(shared_state), cond: Condvar::new(), } @@ -295,34 +300,27 @@ impl Timeline { /// Register compute connection, starting timeline-related activity if it is /// not running yet. /// Can fail only if channel to a static thread got closed, which is not normal at all. - pub fn on_compute_connect( - &self, - pageserver_connstr: Option<&String>, - callmemaybe_tx: &UnboundedSender, - ) -> Result<()> { + pub fn on_compute_connect(&self, pageserver_connstr: Option<&String>) -> Result<()> { let mut shared_state = self.mutex.lock().unwrap(); shared_state.num_computes += 1; // FIXME: currently we always adopt latest pageserver connstr, but we // should have kind of generations assigned by compute to distinguish // the latest one or even pass it through consensus to reliably deliver // to all safekeepers. - shared_state.activate(&self.zttid, pageserver_connstr, callmemaybe_tx)?; + shared_state.activate(&self.zttid, pageserver_connstr, &self.callmemaybe_tx)?; Ok(()) } /// De-register compute connection, shutting down timeline activity if /// pageserver doesn't need catchup. /// Can fail only if channel to a static thread got closed, which is not normal at all. - pub fn on_compute_disconnect( - &self, - callmemaybe_tx: &UnboundedSender, - ) -> Result<()> { + pub fn on_compute_disconnect(&self) -> Result<()> { let mut shared_state = self.mutex.lock().unwrap(); shared_state.num_computes -= 1; // If there is no pageserver, can suspend right away; otherwise let // walsender do that. if shared_state.num_computes == 0 && shared_state.pageserver_connstr.is_none() { - shared_state.deactivate(&self.zttid, callmemaybe_tx)?; + shared_state.deactivate(&self.zttid, &self.callmemaybe_tx)?; } Ok(()) } @@ -330,11 +328,7 @@ impl Timeline { /// Deactivate tenant if there is no computes and pageserver is caughtup, /// assuming the pageserver status is in replica_id. /// Returns true if deactivated. - pub fn check_deactivate( - &self, - replica_id: usize, - callmemaybe_tx: &UnboundedSender, - ) -> Result { + pub fn check_deactivate(&self, replica_id: usize) -> Result { let mut shared_state = self.mutex.lock().unwrap(); if !shared_state.active { // already suspended @@ -346,13 +340,27 @@ impl Timeline { (replica_state.last_received_lsn != Lsn::MAX && // Lsn::MAX means that we don't know the latest LSN yet. replica_state.last_received_lsn >= shared_state.sk.inmem.commit_lsn); if deactivate { - shared_state.deactivate(&self.zttid, callmemaybe_tx)?; + shared_state.deactivate(&self.zttid, &self.callmemaybe_tx)?; return Ok(true); } } Ok(false) } + /// Deactivates the timeline, assuming it is being deleted. + /// Returns whether the timeline was already active. + /// + /// The callmemaybe thread is stopped by the deactivation message. We assume all other threads + /// will stop by themselves eventually (possibly with errors, but no panics). There should be no + /// compute threads (as we're deleting the timeline), actually. Some WAL may be left unsent, but + /// we're deleting the timeline anyway. + pub fn deactivate_for_delete(&self) -> Result { + let mut shared_state = self.mutex.lock().unwrap(); + let was_active = shared_state.active; + shared_state.deactivate(&self.zttid, &self.callmemaybe_tx)?; + Ok(was_active) + } + fn is_active(&self) -> bool { let shared_state = self.mutex.lock().unwrap(); shared_state.active @@ -421,7 +429,7 @@ impl Timeline { } /// Prepare public safekeeper info for reporting. - pub fn get_public_info(&self) -> anyhow::Result { + pub fn get_public_info(&self, conf: &SafeKeeperConf) -> anyhow::Result { let shared_state = self.mutex.lock().unwrap(); Ok(SkTimelineInfo { last_log_term: Some(shared_state.sk.get_epoch()), @@ -435,18 +443,7 @@ impl Timeline { shared_state.sk.inmem.remote_consistent_lsn, )), peer_horizon_lsn: Some(shared_state.sk.inmem.peer_horizon_lsn), - wal_stream_connection_string: shared_state - .pageserver_connstr - .as_deref() - .map(|pageserver_connstr| { - wal_stream_connection_string( - self.zttid, - &shared_state.listen_pg_addr, - pageserver_connstr, - ) - }) - .transpose() - .context("Failed to get the pageserver callmemaybe connstr")?, + safekeeper_connection_string: Some(conf.listen_pg_addr.clone()), }) } @@ -479,7 +476,7 @@ impl Timeline { shared_state.sk.wal_store.flush_lsn() } - pub fn remove_old_wal(&self) -> Result<()> { + pub fn remove_old_wal(&self, s3_offload_enabled: bool) -> Result<()> { let horizon_segno: XLogSegNo; let remover: Box Result<(), anyhow::Error>>; { @@ -488,7 +485,7 @@ impl Timeline { if shared_state.sk.state.server.wal_seg_size == 0 { return Ok(()); } - horizon_segno = shared_state.sk.get_horizon_segno(); + horizon_segno = shared_state.sk.get_horizon_segno(s3_offload_enabled); remover = shared_state.sk.wal_store.remove_up_to(); if horizon_segno <= 1 || horizon_segno <= shared_state.last_removed_segno { return Ok(()); @@ -504,29 +501,6 @@ impl Timeline { } } -// pageserver connstr is needed to be able to distinguish between different pageservers -// it is required to correctly manage callmemaybe subscriptions when more than one pageserver is involved -// TODO it is better to use some sort of a unique id instead of connection string, see https://github.com/zenithdb/zenith/issues/1105 -fn wal_stream_connection_string( - ZTenantTimelineId { - tenant_id, - timeline_id, - }: ZTenantTimelineId, - listen_pg_addr_str: &str, - pageserver_connstr: &str, -) -> anyhow::Result { - let me_connstr = format!("postgresql://no_user@{}/no_db", listen_pg_addr_str); - let me_conf = me_connstr - .parse::() - .with_context(|| { - format!("Failed to parse pageserver connection string '{me_connstr}' as a postgres one") - })?; - let (host, port) = utils::connstring::connection_host_port(&me_conf); - Ok(format!( - "host={host} port={port} options='-c ztimelineid={timeline_id} ztenantid={tenant_id} pageserver_connstr={pageserver_connstr}'", - )) -} - // Utilities needed by various Connection-like objects pub trait TimelineTools { fn set(&mut self, conf: &SafeKeeperConf, zttid: ZTenantTimelineId, create: bool) -> Result<()>; @@ -545,22 +519,41 @@ impl TimelineTools for Option> { } } +struct GlobalTimelinesState { + timelines: HashMap>, + callmemaybe_tx: Option>, +} + lazy_static! { - pub static ref TIMELINES: Mutex>> = - Mutex::new(HashMap::new()); + static ref TIMELINES_STATE: Mutex = Mutex::new(GlobalTimelinesState { + timelines: HashMap::new(), + callmemaybe_tx: None + }); +} + +#[derive(Clone, Copy, Serialize)] +pub struct TimelineDeleteForceResult { + pub dir_existed: bool, + pub was_active: bool, } /// A zero-sized struct used to manage access to the global timelines map. pub struct GlobalTimelines; impl GlobalTimelines { + pub fn set_callmemaybe_tx(callmemaybe_tx: UnboundedSender) { + let mut state = TIMELINES_STATE.lock().unwrap(); + assert!(state.callmemaybe_tx.is_none()); + state.callmemaybe_tx = Some(callmemaybe_tx); + } + fn create_internal( - mut timelines: MutexGuard>>, + mut state: MutexGuard, conf: &SafeKeeperConf, zttid: ZTenantTimelineId, peer_ids: Vec, ) -> Result> { - match timelines.get(&zttid) { + match state.timelines.get(&zttid) { Some(_) => bail!("timeline {} already exists", zttid), None => { // TODO: check directory existence @@ -569,8 +562,12 @@ impl GlobalTimelines { let shared_state = SharedState::create(conf, &zttid, peer_ids) .context("failed to create shared state")?; - let new_tli = Arc::new(Timeline::new(zttid, shared_state)); - timelines.insert(zttid, Arc::clone(&new_tli)); + let new_tli = Arc::new(Timeline::new( + zttid, + state.callmemaybe_tx.as_ref().unwrap().clone(), + shared_state, + )); + state.timelines.insert(zttid, Arc::clone(&new_tli)); Ok(new_tli) } } @@ -581,20 +578,20 @@ impl GlobalTimelines { zttid: ZTenantTimelineId, peer_ids: Vec, ) -> Result> { - let timelines = TIMELINES.lock().unwrap(); - GlobalTimelines::create_internal(timelines, conf, zttid, peer_ids) + let state = TIMELINES_STATE.lock().unwrap(); + GlobalTimelines::create_internal(state, conf, zttid, peer_ids) } - /// Get a timeline with control file loaded from the global TIMELINES map. + /// Get a timeline with control file loaded from the global TIMELINES_STATE.timelines map. /// If control file doesn't exist and create=false, bails out. pub fn get( conf: &SafeKeeperConf, zttid: ZTenantTimelineId, create: bool, ) -> Result> { - let mut timelines = TIMELINES.lock().unwrap(); + let mut state = TIMELINES_STATE.lock().unwrap(); - match timelines.get(&zttid) { + match state.timelines.get(&zttid) { Some(result) => Ok(Arc::clone(result)), None => { let shared_state = @@ -610,20 +607,19 @@ impl GlobalTimelines { .contains("No such file or directory") && create { - return GlobalTimelines::create_internal( - timelines, - conf, - zttid, - vec![], - ); + return GlobalTimelines::create_internal(state, conf, zttid, vec![]); } else { return Err(error); } } }; - let new_tli = Arc::new(Timeline::new(zttid, shared_state)); - timelines.insert(zttid, Arc::clone(&new_tli)); + let new_tli = Arc::new(Timeline::new( + zttid, + state.callmemaybe_tx.as_ref().unwrap().clone(), + shared_state, + )); + state.timelines.insert(zttid, Arc::clone(&new_tli)); Ok(new_tli) } } @@ -631,11 +627,89 @@ impl GlobalTimelines { /// Get ZTenantTimelineIDs of all active timelines. pub fn get_active_timelines() -> Vec { - let timelines = TIMELINES.lock().unwrap(); - timelines + let state = TIMELINES_STATE.lock().unwrap(); + state + .timelines .iter() .filter(|&(_, tli)| tli.is_active()) .map(|(zttid, _)| *zttid) .collect() } + + fn delete_force_internal( + conf: &SafeKeeperConf, + zttid: &ZTenantTimelineId, + was_active: bool, + ) -> Result { + match std::fs::remove_dir_all(conf.timeline_dir(zttid)) { + Ok(_) => Ok(TimelineDeleteForceResult { + dir_existed: true, + was_active, + }), + Err(e) if e.kind() == std::io::ErrorKind::NotFound => Ok(TimelineDeleteForceResult { + dir_existed: false, + was_active, + }), + Err(e) => Err(e.into()), + } + } + + /// Deactivates and deletes the timeline, see `Timeline::deactivate_for_delete()`, the deletes + /// the corresponding data directory. + /// We assume all timeline threads do not care about `GlobalTimelines` not containing the timeline + /// anymore, and they will eventually terminate without panics. + /// + /// There are multiple ways the timeline may be accidentally "re-created" (so we end up with two + /// `Timeline` objects in memory): + /// a) a compute node connects after this method is called, or + /// b) an HTTP GET request about the timeline is made and it's able to restore the current state, or + /// c) an HTTP POST request for timeline creation is made after the timeline is already deleted. + /// TODO: ensure all of the above never happens. + pub fn delete_force( + conf: &SafeKeeperConf, + zttid: &ZTenantTimelineId, + ) -> Result { + info!("deleting timeline {}", zttid); + let was_active = match TIMELINES_STATE.lock().unwrap().timelines.remove(zttid) { + None => false, + Some(tli) => tli.deactivate_for_delete()?, + }; + GlobalTimelines::delete_force_internal(conf, zttid, was_active) + } + + /// Deactivates and deletes all timelines for the tenant, see `delete()`. + /// Returns map of all timelines which the tenant had, `true` if a timeline was active. + /// There may be a race if new timelines are created simultaneously. + pub fn delete_force_all_for_tenant( + conf: &SafeKeeperConf, + tenant_id: &ZTenantId, + ) -> Result> { + info!("deleting all timelines for tenant {}", tenant_id); + let mut to_delete = HashMap::new(); + { + // Keep mutex in this scope. + let timelines = &mut TIMELINES_STATE.lock().unwrap().timelines; + for (&zttid, tli) in timelines.iter() { + if zttid.tenant_id == *tenant_id { + to_delete.insert(zttid, tli.deactivate_for_delete()?); + } + } + // TODO: test that the correct subset of timelines is removed. It's complicated because they are implicitly created currently. + timelines.retain(|zttid, _| !to_delete.contains_key(zttid)); + } + let mut deleted = HashMap::new(); + for (zttid, was_active) in to_delete { + deleted.insert( + zttid, + GlobalTimelines::delete_force_internal(conf, &zttid, was_active)?, + ); + } + // There may be inactive timelines, so delete the whole tenant dir as well. + match std::fs::remove_dir_all(conf.tenant_dir(tenant_id)) { + Ok(_) => (), + Err(e) if e.kind() == std::io::ErrorKind::NotFound => (), + e => e?, + }; + Ok(deleted) + } } diff --git a/safekeeper/src/wal_service.rs b/safekeeper/src/wal_service.rs index 468ac28526..5980160788 100644 --- a/safekeeper/src/wal_service.rs +++ b/safekeeper/src/wal_service.rs @@ -8,29 +8,22 @@ use std::net::{TcpListener, TcpStream}; use std::thread; use tracing::*; -use crate::callmemaybe::CallmeEvent; use crate::handler::SafekeeperPostgresHandler; use crate::SafeKeeperConf; -use tokio::sync::mpsc::UnboundedSender; use utils::postgres_backend::{AuthType, PostgresBackend}; /// Accept incoming TCP connections and spawn them into a background thread. -pub fn thread_main( - conf: SafeKeeperConf, - listener: TcpListener, - tx: UnboundedSender, -) -> Result<()> { +pub fn thread_main(conf: SafeKeeperConf, listener: TcpListener) -> Result<()> { loop { match listener.accept() { Ok((socket, peer_addr)) => { debug!("accepted connection from {}", peer_addr); let conf = conf.clone(); - let tx_clone = tx.clone(); let _ = thread::Builder::new() .name("WAL service thread".into()) .spawn(move || { - if let Err(err) = handle_socket(socket, conf, tx_clone) { + if let Err(err) = handle_socket(socket, conf) { error!("connection handler exited: {}", err); } }) @@ -51,16 +44,12 @@ fn get_tid() -> u64 { /// This is run by `thread_main` above, inside a background thread. /// -fn handle_socket( - socket: TcpStream, - conf: SafeKeeperConf, - tx: UnboundedSender, -) -> Result<()> { +fn handle_socket(socket: TcpStream, conf: SafeKeeperConf) -> Result<()> { let _enter = info_span!("", tid = ?get_tid()).entered(); socket.set_nodelay(true)?; - let mut conn_handler = SafekeeperPostgresHandler::new(conf, tx); + let mut conn_handler = SafekeeperPostgresHandler::new(conf); let pgbackend = PostgresBackend::new(socket, AuthType::Trust, None, false)?; // libpq replication protocol between safekeeper and replicas/pagers pgbackend.run(&mut conn_handler)?; diff --git a/scripts/git-upload b/scripts/git-upload index 4649f6998d..a53987894a 100755 --- a/scripts/git-upload +++ b/scripts/git-upload @@ -80,12 +80,14 @@ class GitRepo: print('No changes detected, quitting') return - run([ + git_with_user = [ 'git', '-c', 'user.name=vipvap', '-c', 'user.email=vipvap@zenith.tech', + ] + run(git_with_user + [ 'commit', '--author="vipvap "', f'--message={message}', @@ -94,7 +96,7 @@ class GitRepo: for _ in range(5): try: run(['git', 'fetch', 'origin', branch]) - run(['git', 'rebase', f'origin/{branch}']) + run(git_with_user + ['rebase', f'origin/{branch}']) run(['git', 'push', 'origin', branch]) return diff --git a/test_runner/README.md b/test_runner/README.md index ee171ae6a0..059bbb83cc 100644 --- a/test_runner/README.md +++ b/test_runner/README.md @@ -51,7 +51,6 @@ Useful environment variables: should go. `TEST_SHARED_FIXTURES`: Try to re-use a single pageserver for all the tests. `ZENITH_PAGESERVER_OVERRIDES`: add a `;`-separated set of configs that will be passed as -`FORCE_MOCK_S3`: inits every test's pageserver with a mock S3 used as a remote storage. `--pageserver-config-override=${value}` parameter values when zenith cli is invoked `RUST_LOG`: logging configuration to pass into Zenith CLI diff --git a/test_runner/batch_others/test_ancestor_branch.py b/test_runner/batch_others/test_ancestor_branch.py index d6b073492d..5dbd6d2e26 100644 --- a/test_runner/batch_others/test_ancestor_branch.py +++ b/test_runner/batch_others/test_ancestor_branch.py @@ -10,18 +10,11 @@ from fixtures.zenith_fixtures import ZenithEnv, ZenithEnvBuilder, ZenithPageserv # Create ancestor branches off the main branch. # def test_ancestor_branch(zenith_env_builder: ZenithEnvBuilder): - - # Use safekeeper in this test to avoid a subtle race condition. - # Without safekeeper, walreceiver reconnection can stuck - # because of IO deadlock. - # - # See https://github.com/zenithdb/zenith/issues/1068 - zenith_env_builder.num_safekeepers = 1 env = zenith_env_builder.init_start() # Override defaults, 1M gc_horizon and 4M checkpoint_distance. # Extend compaction_period and gc_period to disable background compaction and gc. - tenant = env.zenith_cli.create_tenant( + tenant, _ = env.zenith_cli.create_tenant( conf={ 'gc_period': '10 m', 'gc_horizon': '1048576', @@ -35,7 +28,6 @@ def test_ancestor_branch(zenith_env_builder: ZenithEnvBuilder): with psconn.cursor(cursor_factory=psycopg2.extras.DictCursor) as pscur: pscur.execute("failpoints flush-frozen=sleep(10000)") - env.zenith_cli.create_timeline(f'main', tenant_id=tenant) pg_branch0 = env.postgres.create_start('main', tenant_id=tenant) branch0_cur = pg_branch0.connect().cursor() branch0_cur.execute("SHOW zenith.zenith_timeline") diff --git a/test_runner/batch_others/test_backpressure.py b/test_runner/batch_others/test_backpressure.py index 6658b337ec..81f45b749b 100644 --- a/test_runner/batch_others/test_backpressure.py +++ b/test_runner/batch_others/test_backpressure.py @@ -94,7 +94,6 @@ def check_backpressure(pg: Postgres, stop_event: threading.Event, polling_interv @pytest.mark.skip("See https://github.com/neondatabase/neon/issues/1587") def test_backpressure_received_lsn_lag(zenith_env_builder: ZenithEnvBuilder): - zenith_env_builder.num_safekeepers = 1 env = zenith_env_builder.init_start() # Create a branch for us env.zenith_cli.create_branch('test_backpressure') diff --git a/test_runner/batch_others/test_branch_behind.py b/test_runner/batch_others/test_branch_behind.py index 4e2be352f4..fc84af5283 100644 --- a/test_runner/batch_others/test_branch_behind.py +++ b/test_runner/batch_others/test_branch_behind.py @@ -19,6 +19,8 @@ def test_branch_behind(zenith_env_builder: ZenithEnvBuilder): # # See https://github.com/zenithdb/zenith/issues/1068 zenith_env_builder.num_safekeepers = 1 + # Disable pitr, because here we want to test branch creation after GC + zenith_env_builder.pageserver_config_override = "tenant_config={pitr_interval = '0 sec'}" env = zenith_env_builder.init_start() # Branch at the point where only 100 rows were inserted diff --git a/test_runner/batch_others/test_broken_timeline.py b/test_runner/batch_others/test_broken_timeline.py new file mode 100644 index 0000000000..17eadb33b4 --- /dev/null +++ b/test_runner/batch_others/test_broken_timeline.py @@ -0,0 +1,80 @@ +import pytest +from contextlib import closing +from fixtures.zenith_fixtures import ZenithEnvBuilder +from fixtures.log_helper import log +import os + + +# Test restarting page server, while safekeeper and compute node keep +# running. +def test_broken_timeline(zenith_env_builder: ZenithEnvBuilder): + # One safekeeper is enough for this test. + zenith_env_builder.num_safekeepers = 3 + env = zenith_env_builder.init_start() + + tenant_timelines = [] + + for n in range(4): + tenant_id_uuid, timeline_id_uuid = env.zenith_cli.create_tenant() + tenant_id = tenant_id_uuid.hex + timeline_id = timeline_id_uuid.hex + + pg = env.postgres.create_start(f'main', tenant_id=tenant_id_uuid) + with closing(pg.connect()) as conn: + with conn.cursor() as cur: + cur.execute("CREATE TABLE t(key int primary key, value text)") + cur.execute("INSERT INTO t SELECT generate_series(1,100), 'payload'") + + cur.execute("SHOW zenith.zenith_timeline") + timeline_id = cur.fetchone()[0] + pg.stop() + tenant_timelines.append((tenant_id, timeline_id, pg)) + + # Stop the pageserver + env.pageserver.stop() + + # Leave the first timeline alone, but corrupt the others in different ways + (tenant0, timeline0, pg0) = tenant_timelines[0] + + # Corrupt metadata file on timeline 1 + (tenant1, timeline1, pg1) = tenant_timelines[1] + metadata_path = "{}/tenants/{}/timelines/{}/metadata".format(env.repo_dir, tenant1, timeline1) + print(f'overwriting metadata file at {metadata_path}') + f = open(metadata_path, "w") + f.write("overwritten with garbage!") + f.close() + + # Missing layer files file on timeline 2. (This would actually work + # if we had Cloud Storage enabled in this test.) + (tenant2, timeline2, pg2) = tenant_timelines[2] + timeline_path = "{}/tenants/{}/timelines/{}/".format(env.repo_dir, tenant2, timeline2) + for filename in os.listdir(timeline_path): + if filename.startswith('00000'): + # Looks like a layer file. Remove it + os.remove(f'{timeline_path}/{filename}') + + # Corrupt layer files file on timeline 3 + (tenant3, timeline3, pg3) = tenant_timelines[3] + timeline_path = "{}/tenants/{}/timelines/{}/".format(env.repo_dir, tenant3, timeline3) + for filename in os.listdir(timeline_path): + if filename.startswith('00000'): + # Looks like a layer file. Corrupt it + f = open(f'{timeline_path}/{filename}', "w") + f.write("overwritten with garbage!") + f.close() + + env.pageserver.start() + + # Tenant 0 should still work + pg0.start() + with closing(pg0.connect()) as conn: + with conn.cursor() as cur: + cur.execute("SELECT COUNT(*) FROM t") + assert cur.fetchone()[0] == 100 + + # But all others are broken + for n in range(1, 4): + (tenant, timeline, pg) = tenant_timelines[n] + with pytest.raises(Exception, match="Cannot load local timeline") as err: + pg.start() + log.info(f'compute startup failed as expected: {err}') diff --git a/test_runner/batch_others/test_gc_aggressive.py b/test_runner/batch_others/test_gc_aggressive.py index e4e4aa9f4a..519a6dda1c 100644 --- a/test_runner/batch_others/test_gc_aggressive.py +++ b/test_runner/batch_others/test_gc_aggressive.py @@ -1,7 +1,7 @@ import asyncio import random -from fixtures.zenith_fixtures import ZenithEnv, Postgres +from fixtures.zenith_fixtures import ZenithEnv, ZenithEnvBuilder, Postgres from fixtures.log_helper import log # Test configuration @@ -50,9 +50,12 @@ async def update_and_gc(env: ZenithEnv, pg: Postgres, timeline: str): # # (repro for https://github.com/zenithdb/zenith/issues/1047) # -def test_gc_aggressive(zenith_simple_env: ZenithEnv): - env = zenith_simple_env - env.zenith_cli.create_branch("test_gc_aggressive", "empty") +def test_gc_aggressive(zenith_env_builder: ZenithEnvBuilder): + + # Disable pitr, because here we want to test branch creation after GC + zenith_env_builder.pageserver_config_override = "tenant_config={pitr_interval = '0 sec'}" + env = zenith_env_builder.init_start() + env.zenith_cli.create_branch("test_gc_aggressive", "main") pg = env.postgres.create_start('test_gc_aggressive') log.info('postgres is running on test_gc_aggressive branch') diff --git a/test_runner/batch_others/test_next_xid.py b/test_runner/batch_others/test_next_xid.py index 03c27bcd70..1ab1addad3 100644 --- a/test_runner/batch_others/test_next_xid.py +++ b/test_runner/batch_others/test_next_xid.py @@ -6,8 +6,6 @@ from fixtures.zenith_fixtures import ZenithEnvBuilder # Test restarting page server, while safekeeper and compute node keep # running. def test_next_xid(zenith_env_builder: ZenithEnvBuilder): - # One safekeeper is enough for this test. - zenith_env_builder.num_safekeepers = 1 env = zenith_env_builder.init_start() pg = env.postgres.create_start('main') diff --git a/test_runner/batch_others/test_old_request_lsn.py b/test_runner/batch_others/test_old_request_lsn.py index e7400cff96..cf7fe09b1e 100644 --- a/test_runner/batch_others/test_old_request_lsn.py +++ b/test_runner/batch_others/test_old_request_lsn.py @@ -1,5 +1,7 @@ -from fixtures.zenith_fixtures import ZenithEnv +from fixtures.zenith_fixtures import ZenithEnvBuilder from fixtures.log_helper import log +from fixtures.utils import print_gc_result +import psycopg2.extras # @@ -12,9 +14,11 @@ from fixtures.log_helper import log # just a hint that the page hasn't been modified since that LSN, and the page # server should return the latest page version regardless of the LSN. # -def test_old_request_lsn(zenith_simple_env: ZenithEnv): - env = zenith_simple_env - env.zenith_cli.create_branch("test_old_request_lsn", "empty") +def test_old_request_lsn(zenith_env_builder: ZenithEnvBuilder): + # Disable pitr, because here we want to test branch creation after GC + zenith_env_builder.pageserver_config_override = "tenant_config={pitr_interval = '0 sec'}" + env = zenith_env_builder.init_start() + env.zenith_cli.create_branch("test_old_request_lsn", "main") pg = env.postgres.create_start('test_old_request_lsn') log.info('postgres is running on test_old_request_lsn branch') @@ -26,7 +30,7 @@ def test_old_request_lsn(zenith_simple_env: ZenithEnv): timeline = cur.fetchone()[0] psconn = env.pageserver.connect() - pscur = psconn.cursor() + pscur = psconn.cursor(cursor_factory=psycopg2.extras.DictCursor) # Create table, and insert some rows. Make it big enough that it doesn't fit in # shared_buffers. @@ -53,6 +57,9 @@ def test_old_request_lsn(zenith_simple_env: ZenithEnv): # garbage collections so that the page server will remove old page versions. for i in range(10): pscur.execute(f"do_gc {env.initial_tenant.hex} {timeline} 0") + row = pscur.fetchone() + print_gc_result(row) + for j in range(100): cur.execute('UPDATE foo SET val = val + 1 WHERE id = 1;') diff --git a/test_runner/batch_others/test_pageserver_api.py b/test_runner/batch_others/test_pageserver_api.py index 13f6ef358e..7fe3b4dff5 100644 --- a/test_runner/batch_others/test_pageserver_api.py +++ b/test_runner/batch_others/test_pageserver_api.py @@ -1,6 +1,12 @@ from uuid import uuid4, UUID import pytest -from fixtures.zenith_fixtures import ZenithEnv, ZenithEnvBuilder, ZenithPageserverHttpClient +from fixtures.zenith_fixtures import ( + DEFAULT_BRANCH_NAME, + ZenithEnv, + ZenithEnvBuilder, + ZenithPageserverHttpClient, + ZenithPageserverApiException, +) # test that we cannot override node id @@ -48,6 +54,39 @@ def check_client(client: ZenithPageserverHttpClient, initial_tenant: UUID): assert local_timeline_details['timeline_state'] == 'Loaded' +def test_pageserver_http_get_wal_receiver_not_found(zenith_simple_env: ZenithEnv): + env = zenith_simple_env + client = env.pageserver.http_client() + + tenant_id, timeline_id = env.zenith_cli.create_tenant() + + # no PG compute node is running, so no WAL receiver is running + with pytest.raises(ZenithPageserverApiException) as e: + _ = client.wal_receiver_get(tenant_id, timeline_id) + assert "Not Found" in str(e.value) + + +def test_pageserver_http_get_wal_receiver_success(zenith_simple_env: ZenithEnv): + env = zenith_simple_env + client = env.pageserver.http_client() + + tenant_id, timeline_id = env.zenith_cli.create_tenant() + pg = env.postgres.create_start(DEFAULT_BRANCH_NAME, tenant_id=tenant_id) + + res = client.wal_receiver_get(tenant_id, timeline_id) + assert list(res.keys()) == [ + "thread_id", + "wal_producer_connstr", + "last_received_msg_lsn", + "last_received_msg_ts", + ] + + # make a DB modification then expect getting a new WAL receiver's data + pg.safe_psql("CREATE TABLE t(key int primary key, value text)") + res2 = client.wal_receiver_get(tenant_id, timeline_id) + assert res2["last_received_msg_lsn"] > res["last_received_msg_lsn"] + + def test_pageserver_http_api_client(zenith_simple_env: ZenithEnv): env = zenith_simple_env client = env.pageserver.http_client() diff --git a/test_runner/batch_others/test_pageserver_restart.py b/test_runner/batch_others/test_pageserver_restart.py index 20e6f4467e..69f5ea85ce 100644 --- a/test_runner/batch_others/test_pageserver_restart.py +++ b/test_runner/batch_others/test_pageserver_restart.py @@ -5,8 +5,6 @@ from fixtures.log_helper import log # Test restarting page server, while safekeeper and compute node keep # running. def test_pageserver_restart(zenith_env_builder: ZenithEnvBuilder): - # One safekeeper is enough for this test. - zenith_env_builder.num_safekeepers = 1 env = zenith_env_builder.init_start() env.zenith_cli.create_branch('test_pageserver_restart') diff --git a/test_runner/batch_others/test_pitr_gc.py b/test_runner/batch_others/test_pitr_gc.py new file mode 100644 index 0000000000..ee19bddfe8 --- /dev/null +++ b/test_runner/batch_others/test_pitr_gc.py @@ -0,0 +1,77 @@ +import subprocess +from contextlib import closing + +import psycopg2.extras +import pytest +from fixtures.log_helper import log +from fixtures.utils import print_gc_result +from fixtures.zenith_fixtures import ZenithEnvBuilder + + +# +# Check pitr_interval GC behavior. +# Insert some data, run GC and create a branch in the past. +# +def test_pitr_gc(zenith_env_builder: ZenithEnvBuilder): + + zenith_env_builder.num_safekeepers = 1 + # Set pitr interval such that we need to keep the data + zenith_env_builder.pageserver_config_override = "tenant_config={pitr_interval = '1 day', gc_horizon = 0}" + + env = zenith_env_builder.init_start() + pgmain = env.postgres.create_start('main') + log.info("postgres is running on 'main' branch") + + main_pg_conn = pgmain.connect() + main_cur = main_pg_conn.cursor() + + main_cur.execute("SHOW zenith.zenith_timeline") + timeline = main_cur.fetchone()[0] + + # Create table + main_cur.execute('CREATE TABLE foo (t text)') + + for i in range(10000): + main_cur.execute(''' + INSERT INTO foo + SELECT 'long string to consume some space'; + ''') + + if i == 99: + # keep some early lsn to test branch creation after GC + main_cur.execute('SELECT pg_current_wal_insert_lsn(), txid_current()') + res = main_cur.fetchone() + lsn_a = res[0] + xid_a = res[1] + log.info(f'LSN after 100 rows: {lsn_a} xid {xid_a}') + + main_cur.execute('SELECT pg_current_wal_insert_lsn(), txid_current()') + res = main_cur.fetchone() + debug_lsn = res[0] + debug_xid = res[1] + log.info(f'LSN after 10000 rows: {debug_lsn} xid {debug_xid}') + + # run GC + with closing(env.pageserver.connect()) as psconn: + with psconn.cursor(cursor_factory=psycopg2.extras.DictCursor) as pscur: + pscur.execute(f"compact {env.initial_tenant.hex} {timeline}") + # perform agressive GC. Data still should be kept because of the PITR setting. + pscur.execute(f"do_gc {env.initial_tenant.hex} {timeline} 0") + row = pscur.fetchone() + print_gc_result(row) + + # Branch at the point where only 100 rows were inserted + # It must have been preserved by PITR setting + env.zenith_cli.create_branch('test_pitr_gc_hundred', 'main', ancestor_start_lsn=lsn_a) + + pg_hundred = env.postgres.create_start('test_pitr_gc_hundred') + + # On the 'hundred' branch, we should see only 100 rows + hundred_pg_conn = pg_hundred.connect() + hundred_cur = hundred_pg_conn.cursor() + hundred_cur.execute('SELECT count(*) FROM foo') + assert hundred_cur.fetchone() == (100, ) + + # All the rows are visible on the main branch + main_cur.execute('SELECT count(*) FROM foo') + assert main_cur.fetchone() == (10000, ) diff --git a/test_runner/batch_others/test_recovery.py b/test_runner/batch_others/test_recovery.py index dbfa943a7a..eb1747efa5 100644 --- a/test_runner/batch_others/test_recovery.py +++ b/test_runner/batch_others/test_recovery.py @@ -45,14 +45,14 @@ def test_pageserver_recovery(zenith_env_builder: ZenithEnvBuilder): # Configure failpoints pscur.execute( - "failpoints checkpoint-before-sync=sleep(2000);checkpoint-after-sync=panic") + "failpoints checkpoint-before-sync=sleep(2000);checkpoint-after-sync=exit") # Do some updates until pageserver is crashed try: while True: cur.execute("update foo set x=x+1") except Exception as err: - log.info(f"Excepted server crash {err}") + log.info(f"Expected server crash {err}") log.info("Wait before server restart") env.pageserver.stop() diff --git a/test_runner/batch_others/test_remote_storage.py b/test_runner/batch_others/test_remote_storage.py index e205f79957..afbe3c55c7 100644 --- a/test_runner/batch_others/test_remote_storage.py +++ b/test_runner/batch_others/test_remote_storage.py @@ -6,7 +6,7 @@ from contextlib import closing from pathlib import Path import time from uuid import UUID -from fixtures.zenith_fixtures import ZenithEnvBuilder, assert_local, wait_for, wait_for_last_record_lsn, wait_for_upload +from fixtures.zenith_fixtures import ZenithEnvBuilder, assert_local, wait_until, wait_for_last_record_lsn, wait_for_upload from fixtures.log_helper import log from fixtures.utils import lsn_from_hex, lsn_to_hex import pytest @@ -32,7 +32,6 @@ import pytest @pytest.mark.parametrize('storage_type', ['local_fs', 'mock_s3']) def test_remote_storage_backup_and_restore(zenith_env_builder: ZenithEnvBuilder, storage_type: str): # zenith_env_builder.rust_log_override = 'debug' - zenith_env_builder.num_safekeepers = 1 if storage_type == 'local_fs': zenith_env_builder.enable_local_fs_remote_storage() elif storage_type == 'mock_s3': @@ -110,9 +109,9 @@ def test_remote_storage_backup_and_restore(zenith_env_builder: ZenithEnvBuilder, client.timeline_attach(UUID(tenant_id), UUID(timeline_id)) log.info("waiting for timeline redownload") - wait_for(number_of_iterations=10, - interval=1, - func=lambda: assert_local(client, UUID(tenant_id), UUID(timeline_id))) + wait_until(number_of_iterations=10, + interval=1, + func=lambda: assert_local(client, UUID(tenant_id), UUID(timeline_id))) detail = client.timeline_detail(UUID(tenant_id), UUID(timeline_id)) assert detail['local'] is not None diff --git a/test_runner/batch_others/test_tenant_conf.py b/test_runner/batch_others/test_tenant_conf.py index b85a541f10..d627d8a6ee 100644 --- a/test_runner/batch_others/test_tenant_conf.py +++ b/test_runner/batch_others/test_tenant_conf.py @@ -16,7 +16,7 @@ tenant_config={checkpoint_distance = 10000, compaction_target_size = 1048576}''' env = zenith_env_builder.init_start() """Test per tenant configuration""" - tenant = env.zenith_cli.create_tenant(conf={ + tenant, _ = env.zenith_cli.create_tenant(conf={ 'checkpoint_distance': '20000', 'gc_period': '30sec', }) diff --git a/test_runner/batch_others/test_tenant_relocation.py b/test_runner/batch_others/test_tenant_relocation.py index 7e71c0a157..91506e120d 100644 --- a/test_runner/batch_others/test_tenant_relocation.py +++ b/test_runner/batch_others/test_tenant_relocation.py @@ -3,12 +3,14 @@ import os import pathlib import subprocess import threading +import typing from uuid import UUID from fixtures.log_helper import log +from typing import Optional import signal import pytest -from fixtures.zenith_fixtures import PgProtocol, PortDistributor, Postgres, ZenithEnvBuilder, ZenithPageserverHttpClient, assert_local, wait_for, wait_for_last_record_lsn, wait_for_upload, zenith_binpath, pg_distrib_dir +from fixtures.zenith_fixtures import PgProtocol, PortDistributor, Postgres, ZenithEnvBuilder, Etcd, ZenithPageserverHttpClient, assert_local, wait_until, wait_for_last_record_lsn, wait_for_upload, zenith_binpath, pg_distrib_dir from fixtures.utils import lsn_from_hex @@ -21,7 +23,8 @@ def new_pageserver_helper(new_pageserver_dir: pathlib.Path, pageserver_bin: pathlib.Path, remote_storage_mock_path: pathlib.Path, pg_port: int, - http_port: int): + http_port: int, + broker: Optional[Etcd]): """ cannot use ZenithPageserver yet because it depends on zenith cli which currently lacks support for multiple pageservers @@ -38,6 +41,9 @@ def new_pageserver_helper(new_pageserver_dir: pathlib.Path, f"-c remote_storage={{local_path='{remote_storage_mock_path}'}}", ] + if broker is not None: + cmd.append(f"-c broker_endpoints=['{broker.client_url()}']", ) + subprocess.check_output(cmd, text=True) # actually run new pageserver @@ -95,11 +101,14 @@ def load(pg: Postgres, stop_event: threading.Event, load_ok_event: threading.Eve log.info('load thread stopped') +@pytest.mark.skip( + reason= + "needs to replace callmemaybe call with better idea how to migrate timelines between pageservers" +) @pytest.mark.parametrize('with_load', ['with_load', 'without_load']) def test_tenant_relocation(zenith_env_builder: ZenithEnvBuilder, port_distributor: PortDistributor, with_load: str): - zenith_env_builder.num_safekeepers = 1 zenith_env_builder.enable_local_fs_remote_storage() env = zenith_env_builder.init_start() @@ -107,7 +116,7 @@ def test_tenant_relocation(zenith_env_builder: ZenithEnvBuilder, # create folder for remote storage mock remote_storage_mock_path = env.repo_dir / 'local_fs_remote_storage' - tenant = env.zenith_cli.create_tenant(UUID("74ee8b079a0e437eb0afea7d26a07209")) + tenant, _ = env.zenith_cli.create_tenant(UUID("74ee8b079a0e437eb0afea7d26a07209")) log.info("tenant to relocate %s", tenant) # attach does not download ancestor branches (should it?), just use root branch for now @@ -176,12 +185,13 @@ def test_tenant_relocation(zenith_env_builder: ZenithEnvBuilder, pageserver_bin, remote_storage_mock_path, new_pageserver_pg_port, - new_pageserver_http_port): + new_pageserver_http_port, + zenith_env_builder.broker): # call to attach timeline to new pageserver new_pageserver_http.timeline_attach(tenant, timeline) # new pageserver should be in sync (modulo wal tail or vacuum activity) with the old one because there was no new writes since checkpoint - new_timeline_detail = wait_for( + new_timeline_detail = wait_until( number_of_iterations=5, interval=1, func=lambda: assert_local(new_pageserver_http, tenant, timeline)) diff --git a/test_runner/batch_others/test_tenants.py b/test_runner/batch_others/test_tenants.py index 682af8de49..9ccb8cf196 100644 --- a/test_runner/batch_others/test_tenants.py +++ b/test_runner/batch_others/test_tenants.py @@ -1,8 +1,12 @@ from contextlib import closing - +from datetime import datetime +import os import pytest from fixtures.zenith_fixtures import ZenithEnvBuilder +from fixtures.log_helper import log +from fixtures.metrics import parse_metrics +from fixtures.utils import lsn_to_hex @pytest.mark.parametrize('with_safekeepers', [False, True]) @@ -12,8 +16,8 @@ def test_tenants_normal_work(zenith_env_builder: ZenithEnvBuilder, with_safekeep env = zenith_env_builder.init_start() """Tests tenants with and without wal acceptors""" - tenant_1 = env.zenith_cli.create_tenant() - tenant_2 = env.zenith_cli.create_tenant() + tenant_1, _ = env.zenith_cli.create_tenant() + tenant_2, _ = env.zenith_cli.create_tenant() env.zenith_cli.create_timeline(f'test_tenants_normal_work_with_safekeepers{with_safekeepers}', tenant_id=tenant_1) @@ -38,3 +42,79 @@ def test_tenants_normal_work(zenith_env_builder: ZenithEnvBuilder, with_safekeep cur.execute("INSERT INTO t SELECT generate_series(1,100000), 'payload'") cur.execute("SELECT sum(key) FROM t") assert cur.fetchone() == (5000050000, ) + + +def test_metrics_normal_work(zenith_env_builder: ZenithEnvBuilder): + zenith_env_builder.num_safekeepers = 3 + + env = zenith_env_builder.init_start() + tenant_1, _ = env.zenith_cli.create_tenant() + tenant_2, _ = env.zenith_cli.create_tenant() + + timeline_1 = env.zenith_cli.create_timeline('test_metrics_normal_work', tenant_id=tenant_1) + timeline_2 = env.zenith_cli.create_timeline('test_metrics_normal_work', tenant_id=tenant_2) + + pg_tenant1 = env.postgres.create_start('test_metrics_normal_work', tenant_id=tenant_1) + pg_tenant2 = env.postgres.create_start('test_metrics_normal_work', tenant_id=tenant_2) + + for pg in [pg_tenant1, pg_tenant2]: + with closing(pg.connect()) as conn: + with conn.cursor() as cur: + cur.execute("CREATE TABLE t(key int primary key, value text)") + cur.execute("INSERT INTO t SELECT generate_series(1,100000), 'payload'") + cur.execute("SELECT sum(key) FROM t") + assert cur.fetchone() == (5000050000, ) + + collected_metrics = { + "pageserver": env.pageserver.http_client().get_metrics(), + } + for sk in env.safekeepers: + collected_metrics[f'safekeeper{sk.id}'] = sk.http_client().get_metrics_str() + + for name in collected_metrics: + basepath = os.path.join(zenith_env_builder.repo_dir, f'{name}.metrics') + + with open(basepath, 'w') as stdout_f: + print(collected_metrics[name], file=stdout_f, flush=True) + + all_metrics = [parse_metrics(m, name) for name, m in collected_metrics.items()] + ps_metrics = all_metrics[0] + sk_metrics = all_metrics[1:] + + ttids = [{ + 'tenant_id': tenant_1.hex, 'timeline_id': timeline_1.hex + }, { + 'tenant_id': tenant_2.hex, 'timeline_id': timeline_2.hex + }] + + # Test metrics per timeline + for tt in ttids: + log.info(f"Checking metrics for {tt}") + + ps_lsn = int(ps_metrics.query_one("pageserver_last_record_lsn", filter=tt).value) + sk_lsns = [int(sk.query_one("safekeeper_commit_lsn", filter=tt).value) for sk in sk_metrics] + + log.info(f"ps_lsn: {lsn_to_hex(ps_lsn)}") + log.info(f"sk_lsns: {list(map(lsn_to_hex, sk_lsns))}") + + assert ps_lsn <= max(sk_lsns) + assert ps_lsn > 0 + + # Test common metrics + for metrics in all_metrics: + log.info(f"Checking common metrics for {metrics.name}") + + log.info( + f"process_cpu_seconds_total: {metrics.query_one('process_cpu_seconds_total').value}") + log.info(f"process_threads: {int(metrics.query_one('process_threads').value)}") + log.info( + f"process_resident_memory_bytes (MB): {metrics.query_one('process_resident_memory_bytes').value / 1024 / 1024}" + ) + log.info( + f"process_virtual_memory_bytes (MB): {metrics.query_one('process_virtual_memory_bytes').value / 1024 / 1024}" + ) + log.info(f"process_open_fds: {int(metrics.query_one('process_open_fds').value)}") + log.info(f"process_max_fds: {int(metrics.query_one('process_max_fds').value)}") + log.info( + f"process_start_time_seconds (UTC): {datetime.fromtimestamp(metrics.query_one('process_start_time_seconds').value)}" + ) diff --git a/test_runner/batch_others/test_tenants_with_remote_storage.py b/test_runner/batch_others/test_tenants_with_remote_storage.py new file mode 100644 index 0000000000..c00f077fcd --- /dev/null +++ b/test_runner/batch_others/test_tenants_with_remote_storage.py @@ -0,0 +1,97 @@ +# +# Little stress test for the checkpointing and remote storage code. +# +# The test creates several tenants, and runs a simple workload on +# each tenant, in parallel. The test uses remote storage, and a tiny +# checkpoint_distance setting so that a lot of layer files are created. +# + +import asyncio +from contextlib import closing +from uuid import UUID + +import pytest + +from fixtures.zenith_fixtures import ZenithEnvBuilder, ZenithEnv, Postgres, wait_for_last_record_lsn, wait_for_upload +from fixtures.utils import lsn_from_hex + + +async def tenant_workload(env: ZenithEnv, pg: Postgres): + pageserver_conn = await env.pageserver.connect_async() + + pg_conn = await pg.connect_async() + + tenant_id = await pg_conn.fetchval("show zenith.zenith_tenant") + timeline_id = await pg_conn.fetchval("show zenith.zenith_timeline") + + await pg_conn.execute("CREATE TABLE t(key int primary key, value text)") + for i in range(1, 100): + await pg_conn.execute( + f"INSERT INTO t SELECT {i}*1000 + g, 'payload' from generate_series(1,1000) g") + + # we rely upon autocommit after each statement + # as waiting for acceptors happens there + res = await pg_conn.fetchval("SELECT count(*) FROM t") + assert res == i * 1000 + + +async def all_tenants_workload(env: ZenithEnv, tenants_pgs): + workers = [] + for tenant, pg in tenants_pgs: + worker = tenant_workload(env, pg) + workers.append(asyncio.create_task(worker)) + + # await all workers + await asyncio.gather(*workers) + + +@pytest.mark.parametrize('storage_type', ['local_fs', 'mock_s3']) +def test_tenants_many(zenith_env_builder: ZenithEnvBuilder, storage_type: str): + + if storage_type == 'local_fs': + zenith_env_builder.enable_local_fs_remote_storage() + elif storage_type == 'mock_s3': + zenith_env_builder.enable_s3_mock_remote_storage('test_remote_storage_backup_and_restore') + else: + raise RuntimeError(f'Unknown storage type: {storage_type}') + + zenith_env_builder.enable_local_fs_remote_storage() + + env = zenith_env_builder.init_start() + + tenants_pgs = [] + + for i in range(1, 5): + # Use a tiny checkpoint distance, to create a lot of layers quickly + tenant, _ = env.zenith_cli.create_tenant( + conf={ + 'checkpoint_distance': '5000000', + }) + env.zenith_cli.create_timeline(f'test_tenants_many', tenant_id=tenant) + + pg = env.postgres.create_start( + f'test_tenants_many', + tenant_id=tenant, + ) + tenants_pgs.append((tenant, pg)) + + asyncio.run(all_tenants_workload(env, tenants_pgs)) + + # Wait for the remote storage uploads to finish + pageserver_http = env.pageserver.http_client() + for tenant, pg in tenants_pgs: + with closing(pg.connect()) as conn: + with conn.cursor() as cur: + cur.execute("show zenith.zenith_tenant") + tenant_id = cur.fetchone()[0] + cur.execute("show zenith.zenith_timeline") + timeline_id = cur.fetchone()[0] + cur.execute("SELECT pg_current_wal_flush_lsn()") + current_lsn = lsn_from_hex(cur.fetchone()[0]) + + # wait until pageserver receives all the data + wait_for_last_record_lsn(pageserver_http, UUID(tenant_id), UUID(timeline_id), current_lsn) + + # run final checkpoint manually to flush all the data to remote storage + env.pageserver.safe_psql(f"checkpoint {tenant_id} {timeline_id}") + wait_for_upload(pageserver_http, UUID(tenant_id), UUID(timeline_id), current_lsn) diff --git a/test_runner/batch_others/test_timeline_size.py b/test_runner/batch_others/test_timeline_size.py index db33493d61..0b33b56df3 100644 --- a/test_runner/batch_others/test_timeline_size.py +++ b/test_runner/batch_others/test_timeline_size.py @@ -70,7 +70,6 @@ def wait_for_pageserver_catchup(pgmain: Postgres, polling_interval=1, timeout=60 def test_timeline_size_quota(zenith_env_builder: ZenithEnvBuilder): - zenith_env_builder.num_safekeepers = 1 env = zenith_env_builder.init_start() new_timeline_id = env.zenith_cli.create_branch('test_timeline_size_quota') diff --git a/test_runner/batch_others/test_wal_acceptor.py b/test_runner/batch_others/test_wal_acceptor.py index 702c27a79b..e1b7bd91ee 100644 --- a/test_runner/batch_others/test_wal_acceptor.py +++ b/test_runner/batch_others/test_wal_acceptor.py @@ -12,8 +12,8 @@ from contextlib import closing from dataclasses import dataclass, field from multiprocessing import Process, Value from pathlib import Path -from fixtures.zenith_fixtures import PgBin, Postgres, Safekeeper, ZenithEnv, ZenithEnvBuilder, PortDistributor, SafekeeperPort, zenith_binpath, PgProtocol -from fixtures.utils import etcd_path, get_dir_size, lsn_to_hex, mkdir_if_needed, lsn_from_hex +from fixtures.zenith_fixtures import PgBin, Etcd, Postgres, Safekeeper, ZenithEnv, ZenithEnvBuilder, PortDistributor, SafekeeperPort, zenith_binpath, PgProtocol +from fixtures.utils import get_dir_size, lsn_to_hex, mkdir_if_needed, lsn_from_hex from fixtures.log_helper import log from typing import List, Optional, Any @@ -22,7 +22,6 @@ from typing import List, Optional, Any # succeed and data is written def test_normal_work(zenith_env_builder: ZenithEnvBuilder): zenith_env_builder.num_safekeepers = 3 - zenith_env_builder.broker = True env = zenith_env_builder.init_start() env.zenith_cli.create_branch('test_safekeepers_normal_work') @@ -328,10 +327,8 @@ def test_race_conditions(zenith_env_builder: ZenithEnvBuilder, stop_value): # Test that safekeepers push their info to the broker and learn peer status from it -@pytest.mark.skipif(etcd_path() is None, reason="requires etcd which is not present in PATH") def test_broker(zenith_env_builder: ZenithEnvBuilder): zenith_env_builder.num_safekeepers = 3 - zenith_env_builder.broker = True zenith_env_builder.enable_local_fs_remote_storage() env = zenith_env_builder.init_start() @@ -371,10 +368,8 @@ def test_broker(zenith_env_builder: ZenithEnvBuilder): # Test that old WAL consumed by peers and pageserver is removed from safekeepers. -@pytest.mark.skipif(etcd_path() is None, reason="requires etcd which is not present in PATH") def test_wal_removal(zenith_env_builder: ZenithEnvBuilder): zenith_env_builder.num_safekeepers = 2 - zenith_env_builder.broker = True # to advance remote_consistent_llsn zenith_env_builder.enable_local_fs_remote_storage() env = zenith_env_builder.init_start() @@ -557,8 +552,6 @@ def test_sync_safekeepers(zenith_env_builder: ZenithEnvBuilder, def test_timeline_status(zenith_env_builder: ZenithEnvBuilder): - - zenith_env_builder.num_safekeepers = 1 env = zenith_env_builder.init_start() env.zenith_cli.create_branch('test_timeline_status') @@ -599,6 +592,9 @@ class SafekeeperEnv: num_safekeepers: int = 1): self.repo_dir = repo_dir self.port_distributor = port_distributor + self.broker = Etcd(datadir=os.path.join(self.repo_dir, "etcd"), + port=self.port_distributor.get_port(), + peer_port=self.port_distributor.get_port()) self.pg_bin = pg_bin self.num_safekeepers = num_safekeepers self.bin_safekeeper = os.path.join(str(zenith_binpath), 'safekeeper') @@ -645,6 +641,8 @@ class SafekeeperEnv: safekeeper_dir, "--id", str(i), + "--broker-endpoints", + self.broker.client_url(), "--daemonize" ] @@ -698,7 +696,6 @@ def test_safekeeper_without_pageserver(test_output_dir: str, repo_dir, port_distributor, pg_bin, - num_safekeepers=1, ) with env: @@ -850,3 +847,116 @@ def test_wal_deleted_after_broadcast(zenith_env_builder: ZenithEnvBuilder): # there shouldn't be more than 2 WAL segments (but dir may have archive_status files) assert wal_size_after_checkpoint < 16 * 2.5 + + +def test_delete_force(zenith_env_builder: ZenithEnvBuilder): + zenith_env_builder.num_safekeepers = 1 + env = zenith_env_builder.init_start() + + # Create two tenants: one will be deleted, other should be preserved. + tenant_id = env.initial_tenant.hex + timeline_id_1 = env.zenith_cli.create_branch('br1').hex # Acive, delete explicitly + timeline_id_2 = env.zenith_cli.create_branch('br2').hex # Inactive, delete explictly + timeline_id_3 = env.zenith_cli.create_branch('br3').hex # Active, delete with the tenant + timeline_id_4 = env.zenith_cli.create_branch('br4').hex # Inactive, delete with the tenant + + tenant_id_other_uuid, timeline_id_other_uuid = env.zenith_cli.create_tenant() + tenant_id_other = tenant_id_other_uuid.hex + timeline_id_other = timeline_id_other_uuid.hex + + # Populate branches + pg_1 = env.postgres.create_start('br1') + pg_2 = env.postgres.create_start('br2') + pg_3 = env.postgres.create_start('br3') + pg_4 = env.postgres.create_start('br4') + pg_other = env.postgres.create_start('main', tenant_id=uuid.UUID(hex=tenant_id_other)) + for pg in [pg_1, pg_2, pg_3, pg_4, pg_other]: + with closing(pg.connect()) as conn: + with conn.cursor() as cur: + cur.execute('CREATE TABLE t(key int primary key)') + sk = env.safekeepers[0] + sk_data_dir = Path(sk.data_dir()) + sk_http = sk.http_client() + assert (sk_data_dir / tenant_id / timeline_id_1).is_dir() + assert (sk_data_dir / tenant_id / timeline_id_2).is_dir() + assert (sk_data_dir / tenant_id / timeline_id_3).is_dir() + assert (sk_data_dir / tenant_id / timeline_id_4).is_dir() + assert (sk_data_dir / tenant_id_other / timeline_id_other).is_dir() + + # Stop branches which should be inactive and restart Safekeeper to drop its in-memory state. + pg_2.stop_and_destroy() + pg_4.stop_and_destroy() + sk.stop() + sk.start() + + # Ensure connections to Safekeeper are established + for pg in [pg_1, pg_3, pg_other]: + with closing(pg.connect()) as conn: + with conn.cursor() as cur: + cur.execute('INSERT INTO t (key) VALUES (1)') + + # Remove initial tenant's br1 (active) + assert sk_http.timeline_delete_force(tenant_id, timeline_id_1) == { + "dir_existed": True, + "was_active": True, + } + assert not (sk_data_dir / tenant_id / timeline_id_1).exists() + assert (sk_data_dir / tenant_id / timeline_id_2).is_dir() + assert (sk_data_dir / tenant_id / timeline_id_3).is_dir() + assert (sk_data_dir / tenant_id / timeline_id_4).is_dir() + assert (sk_data_dir / tenant_id_other / timeline_id_other).is_dir() + + # Ensure repeated deletion succeeds + assert sk_http.timeline_delete_force(tenant_id, timeline_id_1) == { + "dir_existed": False, "was_active": False + } + assert not (sk_data_dir / tenant_id / timeline_id_1).exists() + assert (sk_data_dir / tenant_id / timeline_id_2).is_dir() + assert (sk_data_dir / tenant_id / timeline_id_3).is_dir() + assert (sk_data_dir / tenant_id / timeline_id_4).is_dir() + assert (sk_data_dir / tenant_id_other / timeline_id_other).is_dir() + + # Remove initial tenant's br2 (inactive) + assert sk_http.timeline_delete_force(tenant_id, timeline_id_2) == { + "dir_existed": True, + "was_active": False, + } + assert not (sk_data_dir / tenant_id / timeline_id_1).exists() + assert not (sk_data_dir / tenant_id / timeline_id_2).exists() + assert (sk_data_dir / tenant_id / timeline_id_3).is_dir() + assert (sk_data_dir / tenant_id / timeline_id_4).is_dir() + assert (sk_data_dir / tenant_id_other / timeline_id_other).is_dir() + + # Remove non-existing branch, should succeed + assert sk_http.timeline_delete_force(tenant_id, '00' * 16) == { + "dir_existed": False, + "was_active": False, + } + assert not (sk_data_dir / tenant_id / timeline_id_1).exists() + assert not (sk_data_dir / tenant_id / timeline_id_2).exists() + assert (sk_data_dir / tenant_id / timeline_id_3).exists() + assert (sk_data_dir / tenant_id / timeline_id_4).is_dir() + assert (sk_data_dir / tenant_id_other / timeline_id_other).is_dir() + + # Remove initial tenant fully (two branches are active) + response = sk_http.tenant_delete_force(tenant_id) + assert response == { + timeline_id_3: { + "dir_existed": True, + "was_active": True, + } + } + assert not (sk_data_dir / tenant_id).exists() + assert (sk_data_dir / tenant_id_other / timeline_id_other).is_dir() + + # Remove initial tenant again. + response = sk_http.tenant_delete_force(tenant_id) + assert response == {} + assert not (sk_data_dir / tenant_id).exists() + assert (sk_data_dir / tenant_id_other / timeline_id_other).is_dir() + + # Ensure the other tenant still works + sk_http.timeline_status(tenant_id_other, timeline_id_other) + with closing(pg_other.connect()) as conn: + with conn.cursor() as cur: + cur.execute('INSERT INTO t (key) VALUES (123)') diff --git a/test_runner/batch_others/test_wal_restore.py b/test_runner/batch_others/test_wal_restore.py index b0f34f4aae..f4aceac5e8 100644 --- a/test_runner/batch_others/test_wal_restore.py +++ b/test_runner/batch_others/test_wal_restore.py @@ -15,7 +15,6 @@ def test_wal_restore(zenith_env_builder: ZenithEnvBuilder, pg_bin: PgBin, test_output_dir, port_distributor: PortDistributor): - zenith_env_builder.num_safekeepers = 1 env = zenith_env_builder.init_start() env.zenith_cli.create_branch("test_wal_restore") pg = env.postgres.create_start('test_wal_restore') diff --git a/test_runner/batch_others/test_zenith_cli.py b/test_runner/batch_others/test_zenith_cli.py index 091d9ac8ba..103d51aae5 100644 --- a/test_runner/batch_others/test_zenith_cli.py +++ b/test_runner/batch_others/test_zenith_cli.py @@ -1,7 +1,7 @@ import uuid import requests -from fixtures.zenith_fixtures import ZenithEnv, ZenithEnvBuilder, ZenithPageserverHttpClient +from fixtures.zenith_fixtures import DEFAULT_BRANCH_NAME, ZenithEnv, ZenithEnvBuilder, ZenithPageserverHttpClient from typing import cast @@ -64,13 +64,13 @@ def test_cli_tenant_list(zenith_simple_env: ZenithEnv): helper_compare_tenant_list(pageserver_http_client, env) # Create new tenant - tenant1 = env.zenith_cli.create_tenant() + tenant1, _ = env.zenith_cli.create_tenant() # check tenant1 appeared helper_compare_tenant_list(pageserver_http_client, env) # Create new tenant - tenant2 = env.zenith_cli.create_tenant() + tenant2, _ = env.zenith_cli.create_tenant() # check tenant2 appeared helper_compare_tenant_list(pageserver_http_client, env) @@ -83,9 +83,17 @@ def test_cli_tenant_list(zenith_simple_env: ZenithEnv): assert tenant2.hex in tenants +def test_cli_tenant_create(zenith_simple_env: ZenithEnv): + env = zenith_simple_env + tenant_id, _ = env.zenith_cli.create_tenant() + timelines = env.zenith_cli.list_timelines(tenant_id) + + # an initial timeline should be created upon tenant creation + assert len(timelines) == 1 + assert timelines[0][0] == DEFAULT_BRANCH_NAME + + def test_cli_ipv4_listeners(zenith_env_builder: ZenithEnvBuilder): - # Start with single sk - zenith_env_builder.num_safekeepers = 1 env = zenith_env_builder.init_start() # Connect to sk port on v4 loopback @@ -101,8 +109,6 @@ def test_cli_ipv4_listeners(zenith_env_builder: ZenithEnvBuilder): def test_cli_start_stop(zenith_env_builder: ZenithEnvBuilder): - # Start with single sk - zenith_env_builder.num_safekeepers = 1 env = zenith_env_builder.init_start() # Stop default ps/sk diff --git a/test_runner/batch_pg_regress/test_isolation.py b/test_runner/batch_pg_regress/test_isolation.py index cde56d9b88..7c99c04fe3 100644 --- a/test_runner/batch_pg_regress/test_isolation.py +++ b/test_runner/batch_pg_regress/test_isolation.py @@ -1,9 +1,12 @@ import os - +import pytest from fixtures.utils import mkdir_if_needed from fixtures.zenith_fixtures import ZenithEnv, base_dir, pg_distrib_dir +# The isolation tests run for a long time, especially in debug mode, +# so use a larger-than-default timeout. +@pytest.mark.timeout(1800) def test_isolation(zenith_simple_env: ZenithEnv, test_output_dir, pg_bin, capsys): env = zenith_simple_env diff --git a/test_runner/batch_pg_regress/test_pg_regress.py b/test_runner/batch_pg_regress/test_pg_regress.py index 07d2574f4a..be7776113a 100644 --- a/test_runner/batch_pg_regress/test_pg_regress.py +++ b/test_runner/batch_pg_regress/test_pg_regress.py @@ -1,9 +1,12 @@ import os - +import pytest from fixtures.utils import mkdir_if_needed from fixtures.zenith_fixtures import ZenithEnv, check_restored_datadir_content, base_dir, pg_distrib_dir +# The pg_regress tests run for a long time, especially in debug mode, +# so use a larger-than-default timeout. +@pytest.mark.timeout(1800) def test_pg_regress(zenith_simple_env: ZenithEnv, test_output_dir: str, pg_bin, capsys): env = zenith_simple_env diff --git a/test_runner/fixtures/benchmark_fixture.py b/test_runner/fixtures/benchmark_fixture.py index 0735f16d73..5fc6076f51 100644 --- a/test_runner/fixtures/benchmark_fixture.py +++ b/test_runner/fixtures/benchmark_fixture.py @@ -236,14 +236,14 @@ class ZenithBenchmarker: """ Fetch the "cumulative # of bytes written" metric from the pageserver """ - metric_name = r'pageserver_disk_io_bytes{io_operation="write"}' + metric_name = r'libmetrics_disk_io_bytes_total{io_operation="write"}' return self.get_int_counter_value(pageserver, metric_name) def get_peak_mem(self, pageserver) -> int: """ Fetch the "maxrss" metric from the pageserver """ - metric_name = r'pageserver_maxrss_kb' + metric_name = r'libmetrics_maxrss_kb' return self.get_int_counter_value(pageserver, metric_name) def get_int_counter_value(self, pageserver, metric_name) -> int: diff --git a/test_runner/fixtures/compare_fixtures.py b/test_runner/fixtures/compare_fixtures.py index d70f57aa52..d572901ed1 100644 --- a/test_runner/fixtures/compare_fixtures.py +++ b/test_runner/fixtures/compare_fixtures.py @@ -106,9 +106,9 @@ class ZenithCompare(PgCompare): report=MetricReport.LOWER_IS_BETTER) total_files = self.zenbenchmark.get_int_counter_value( - self.env.pageserver, "pageserver_num_persistent_files_created") + self.env.pageserver, "pageserver_created_persistent_files_total") total_bytes = self.zenbenchmark.get_int_counter_value( - self.env.pageserver, "pageserver_persistent_bytes_written") + self.env.pageserver, "pageserver_written_persistent_bytes_total") self.zenbenchmark.record("data_uploaded", total_bytes / (1024 * 1024), "MB", diff --git a/test_runner/fixtures/metrics.py b/test_runner/fixtures/metrics.py new file mode 100644 index 0000000000..6fc62c6ea9 --- /dev/null +++ b/test_runner/fixtures/metrics.py @@ -0,0 +1,38 @@ +from dataclasses import dataclass +from prometheus_client.parser import text_string_to_metric_families +from prometheus_client.samples import Sample +from typing import Dict, List +from collections import defaultdict + +from fixtures.log_helper import log + + +class Metrics: + metrics: Dict[str, List[Sample]] + name: str + + def __init__(self, name: str = ""): + self.metrics = defaultdict(list) + self.name = name + + def query_all(self, name: str, filter: Dict[str, str]) -> List[Sample]: + res = [] + for sample in self.metrics[name]: + if all(sample.labels[k] == v for k, v in filter.items()): + res.append(sample) + return res + + def query_one(self, name: str, filter: Dict[str, str] = {}) -> Sample: + res = self.query_all(name, filter) + assert len(res) == 1, f"expected single sample for {name} {filter}, found {res}" + return res[0] + + +def parse_metrics(text: str, name: str = ""): + metrics = Metrics(name) + gen = text_string_to_metric_families(text) + for family in gen: + for sample in family.samples: + metrics.metrics[sample.name].append(sample) + + return metrics diff --git a/test_runner/fixtures/utils.py b/test_runner/fixtures/utils.py index 98af511036..ba9bc6e113 100644 --- a/test_runner/fixtures/utils.py +++ b/test_runner/fixtures/utils.py @@ -1,8 +1,9 @@ import os import shutil import subprocess +from pathlib import Path -from typing import Any, List +from typing import Any, List, Optional from fixtures.log_helper import log @@ -75,13 +76,17 @@ def lsn_from_hex(lsn_hex: str) -> int: def print_gc_result(row): log.info("GC duration {elapsed} ms".format_map(row)) log.info( - " total: {layers_total}, needed_by_cutoff {layers_needed_by_cutoff}, needed_by_branches: {layers_needed_by_branches}, not_updated: {layers_not_updated}, removed: {layers_removed}" + " total: {layers_total}, needed_by_cutoff {layers_needed_by_cutoff}, needed_by_pitr {layers_needed_by_pitr}" + " needed_by_branches: {layers_needed_by_branches}, not_updated: {layers_not_updated}, removed: {layers_removed}" .format_map(row)) -# path to etcd binary or None if not present. -def etcd_path(): - return shutil.which("etcd") +def etcd_path() -> Path: + path_output = shutil.which("etcd") + if path_output is None: + raise RuntimeError('etcd not found in PATH') + else: + return Path(path_output) # Traverse directory to get total size. diff --git a/test_runner/fixtures/zenith_fixtures.py b/test_runner/fixtures/zenith_fixtures.py index 3bb7c606d3..8f9bf1c11b 100644 --- a/test_runner/fixtures/zenith_fixtures.py +++ b/test_runner/fixtures/zenith_fixtures.py @@ -34,7 +34,12 @@ from typing_extensions import Literal import requests import backoff # type: ignore -from .utils import (etcd_path, get_self_dir, mkdir_if_needed, subprocess_capture, lsn_from_hex) +from .utils import (etcd_path, + get_self_dir, + mkdir_if_needed, + subprocess_capture, + lsn_from_hex, + lsn_to_hex) from fixtures.log_helper import log """ This file contains pytest fixtures. A fixture is a test resource that can be @@ -61,7 +66,7 @@ DEFAULT_POSTGRES_DIR = 'tmp_install' DEFAULT_BRANCH_NAME = 'main' BASE_PORT = 15000 -WORKER_PORT_NUM = 100 +WORKER_PORT_NUM = 1000 def pytest_addoption(parser): @@ -178,7 +183,7 @@ def shareable_scope(fixture_name, config) -> Literal["session", "function"]: return 'function' if os.environ.get('TEST_SHARED_FIXTURES') is None else 'session' -@pytest.fixture(scope=shareable_scope) +@pytest.fixture(scope='session') def worker_seq_no(worker_id: str): # worker_id is a pytest-xdist fixture # it can be master or gw @@ -189,7 +194,7 @@ def worker_seq_no(worker_id: str): return int(worker_id[2:]) -@pytest.fixture(scope=shareable_scope) +@pytest.fixture(scope='session') def worker_base_port(worker_seq_no: int): # so we divide ports in ranges of 100 ports # so workers have disjoint set of ports for services @@ -242,11 +247,30 @@ class PortDistributor: 'port range configured for test is exhausted, consider enlarging the range') -@pytest.fixture(scope=shareable_scope) +@pytest.fixture(scope='session') def port_distributor(worker_base_port): return PortDistributor(base_port=worker_base_port, port_number=WORKER_PORT_NUM) +@pytest.fixture(scope='session') +def default_broker(request: Any, port_distributor: PortDistributor): + client_port = port_distributor.get_port() + # multiple pytest sessions could get launched in parallel, get them different datadirs + etcd_datadir = os.path.join(get_test_output_dir(request), f"etcd_datadir_{client_port}") + pathlib.Path(etcd_datadir).mkdir(exist_ok=True, parents=True) + + broker = Etcd(datadir=etcd_datadir, port=client_port, peer_port=port_distributor.get_port()) + yield broker + broker.stop() + + +@pytest.fixture(scope='session') +def mock_s3_server(port_distributor: PortDistributor): + mock_s3_server = MockS3Server(port_distributor.get_port()) + yield mock_s3_server + mock_s3_server.kill() + + class PgProtocol: """ Reusable connection logic """ def __init__(self, **kwargs): @@ -369,7 +393,10 @@ class MockS3Server: ): self.port = port - self.subprocess = subprocess.Popen([f'poetry run moto_server s3 -p{port}'], shell=True) + # XXX: do not use `shell=True` or add `exec ` to the command here otherwise. + # We use `self.subprocess.kill()` to shut down the server, which would not "just" work in Linux + # if a process is started from the shell process. + self.subprocess = subprocess.Popen(['poetry', 'run', 'moto_server', 's3', f'-p{port}']) error = None try: return_code = self.subprocess.poll() @@ -379,7 +406,7 @@ class MockS3Server: error = f"expected mock s3 server to start but it failed with exception: {e}. stdout: '{self.subprocess.stdout}', stderr: '{self.subprocess.stderr}'" if error is not None: log.error(error) - self.subprocess.kill() + self.kill() raise RuntimeError("failed to start s3 mock server") def endpoint(self) -> str: @@ -410,31 +437,26 @@ class ZenithEnvBuilder: def __init__(self, repo_dir: Path, port_distributor: PortDistributor, - pageserver_remote_storage: Optional[RemoteStorage] = None, + broker: Etcd, + mock_s3_server: MockS3Server, + remote_storage: Optional[RemoteStorage] = None, pageserver_config_override: Optional[str] = None, - num_safekeepers: int = 0, + num_safekeepers: int = 1, pageserver_auth_enabled: bool = False, rust_log_override: Optional[str] = None, - default_branch_name=DEFAULT_BRANCH_NAME, - broker: bool = False): + default_branch_name=DEFAULT_BRANCH_NAME): self.repo_dir = repo_dir self.rust_log_override = rust_log_override self.port_distributor = port_distributor - self.pageserver_remote_storage = pageserver_remote_storage + self.remote_storage = remote_storage + self.broker = broker + self.mock_s3_server = mock_s3_server self.pageserver_config_override = pageserver_config_override self.num_safekeepers = num_safekeepers self.pageserver_auth_enabled = pageserver_auth_enabled self.default_branch_name = default_branch_name - self.broker = broker self.env: Optional[ZenithEnv] = None - self.s3_mock_server: Optional[MockS3Server] = None - - if os.getenv('FORCE_MOCK_S3') is not None: - bucket_name = f'{repo_dir.name}_bucket' - log.warning(f'Unconditionally initializing mock S3 server for bucket {bucket_name}') - self.enable_s3_mock_remote_storage(bucket_name) - def init(self) -> ZenithEnv: # Cannot create more than one environment from one builder assert self.env is None, "environment already initialized" @@ -455,9 +477,8 @@ class ZenithEnvBuilder: """ def enable_local_fs_remote_storage(self, force_enable=True): - assert force_enable or self.pageserver_remote_storage is None, "remote storage is enabled already" - self.pageserver_remote_storage = LocalFsStorage( - Path(self.repo_dir / 'local_fs_remote_storage')) + assert force_enable or self.remote_storage is None, "remote storage is enabled already" + self.remote_storage = LocalFsStorage(Path(self.repo_dir / 'local_fs_remote_storage')) """ Sets up the pageserver to use the S3 mock server, creates the bucket, if it's not present already. @@ -466,22 +487,19 @@ class ZenithEnvBuilder: """ def enable_s3_mock_remote_storage(self, bucket_name: str, force_enable=True): - assert force_enable or self.pageserver_remote_storage is None, "remote storage is enabled already" - if not self.s3_mock_server: - self.s3_mock_server = MockS3Server(self.port_distributor.get_port()) - - mock_endpoint = self.s3_mock_server.endpoint() - mock_region = self.s3_mock_server.region() + assert force_enable or self.remote_storage is None, "remote storage is enabled already" + mock_endpoint = self.mock_s3_server.endpoint() + mock_region = self.mock_s3_server.region() boto3.client( 's3', endpoint_url=mock_endpoint, region_name=mock_region, - aws_access_key_id=self.s3_mock_server.access_key(), - aws_secret_access_key=self.s3_mock_server.secret_key(), + aws_access_key_id=self.mock_s3_server.access_key(), + aws_secret_access_key=self.mock_s3_server.secret_key(), ).create_bucket(Bucket=bucket_name) - self.pageserver_remote_storage = S3Storage(bucket=bucket_name, - endpoint=mock_endpoint, - region=mock_region) + self.remote_storage = S3Storage(bucket=bucket_name, + endpoint=mock_endpoint, + region=mock_region) def __enter__(self): return self @@ -495,10 +513,6 @@ class ZenithEnvBuilder: for sk in self.env.safekeepers: sk.stop(immediate=True) self.env.pageserver.stop(immediate=True) - if self.s3_mock_server: - self.s3_mock_server.kill() - if self.env.broker is not None: - self.env.broker.stop() class ZenithEnv: @@ -537,10 +551,12 @@ class ZenithEnv: self.repo_dir = config.repo_dir self.rust_log_override = config.rust_log_override self.port_distributor = config.port_distributor - self.s3_mock_server = config.s3_mock_server + self.s3_mock_server = config.mock_s3_server self.zenith_cli = ZenithCli(env=self) self.postgres = PostgresFactory(self) self.safekeepers: List[Safekeeper] = [] + self.broker = config.broker + self.remote_storage = config.remote_storage # generate initial tenant ID here instead of letting 'zenith init' generate it, # so that we don't need to dig it out of the config file afterwards. @@ -551,14 +567,10 @@ class ZenithEnv: default_tenant_id = '{self.initial_tenant.hex}' """) - self.broker = None - if config.broker: - # keep etcd datadir inside 'repo' - self.broker = Etcd(datadir=os.path.join(self.repo_dir, "etcd"), - port=self.port_distributor.get_port(), - peer_port=self.port_distributor.get_port()) - toml += textwrap.dedent(f""" - broker_endpoints = 'http://127.0.0.1:{self.broker.port}' + toml += textwrap.dedent(f""" + [etcd_broker] + broker_endpoints = ['{self.broker.client_url()}'] + etcd_binary_path = '{self.broker.binary_path}' """) # Create config for pageserver @@ -579,7 +591,6 @@ class ZenithEnv: # Create a corresponding ZenithPageserver object self.pageserver = ZenithPageserver(self, port=pageserver_port, - remote_storage=config.pageserver_remote_storage, config_override=config.pageserver_config_override) # Create config and a Safekeeper object for each safekeeper @@ -603,15 +614,13 @@ class ZenithEnv: self.zenith_cli.init(toml) def start(self): - # Start up the page server, all the safekeepers and the broker + # Start up broker, pageserver and all safekeepers + self.broker.try_start() self.pageserver.start() for safekeeper in self.safekeepers: safekeeper.start() - if self.broker is not None: - self.broker.start() - def get_safekeeper_connstrs(self) -> str: """ Get list of safekeeper endpoints suitable for safekeepers GUC """ return ','.join([f'localhost:{wa.port.pg}' for wa in self.safekeepers]) @@ -624,7 +633,10 @@ class ZenithEnv: @pytest.fixture(scope=shareable_scope) -def _shared_simple_env(request: Any, port_distributor) -> Iterator[ZenithEnv]: +def _shared_simple_env(request: Any, + port_distributor: PortDistributor, + mock_s3_server: MockS3Server, + default_broker: Etcd) -> Iterator[ZenithEnv]: """ Internal fixture backing the `zenith_simple_env` fixture. If TEST_SHARED_FIXTURES is set, this is shared by all tests using `zenith_simple_env`. @@ -638,7 +650,8 @@ def _shared_simple_env(request: Any, port_distributor) -> Iterator[ZenithEnv]: repo_dir = os.path.join(str(top_output_dir), "shared_repo") shutil.rmtree(repo_dir, ignore_errors=True) - with ZenithEnvBuilder(Path(repo_dir), port_distributor) as builder: + with ZenithEnvBuilder(Path(repo_dir), port_distributor, default_broker, + mock_s3_server) as builder: env = builder.init_start() # For convenience in tests, create a branch from the freshly-initialized cluster. @@ -660,12 +673,13 @@ def zenith_simple_env(_shared_simple_env: ZenithEnv) -> Iterator[ZenithEnv]: yield _shared_simple_env _shared_simple_env.postgres.stop_all() - if _shared_simple_env.s3_mock_server: - _shared_simple_env.s3_mock_server.kill() @pytest.fixture(scope='function') -def zenith_env_builder(test_output_dir, port_distributor) -> Iterator[ZenithEnvBuilder]: +def zenith_env_builder(test_output_dir, + port_distributor: PortDistributor, + mock_s3_server: MockS3Server, + default_broker: Etcd) -> Iterator[ZenithEnvBuilder]: """ Fixture to create a Zenith environment for test. @@ -683,7 +697,8 @@ def zenith_env_builder(test_output_dir, port_distributor) -> Iterator[ZenithEnvB repo_dir = os.path.join(test_output_dir, "repo") # Return the builder to the caller - with ZenithEnvBuilder(Path(repo_dir), port_distributor) as builder: + with ZenithEnvBuilder(Path(repo_dir), port_distributor, default_broker, + mock_s3_server) as builder: yield builder @@ -786,6 +801,15 @@ class ZenithPageserverHttpClient(requests.Session): assert isinstance(res_json, dict) return res_json + def wal_receiver_get(self, tenant_id: uuid.UUID, timeline_id: uuid.UUID) -> Dict[Any, Any]: + res = self.get( + f"http://localhost:{self.port}/v1/tenant/{tenant_id.hex}/timeline/{timeline_id.hex}/wal_receiver" + ) + self.verbose_error(res) + res_json = res.json() + assert isinstance(res_json, dict) + return res_json + def get_metrics(self) -> str: res = self.get(f"http://localhost:{self.port}/metrics") self.verbose_error(res) @@ -831,20 +855,25 @@ class ZenithCli: def create_tenant(self, tenant_id: Optional[uuid.UUID] = None, - conf: Optional[Dict[str, str]] = None) -> uuid.UUID: + timeline_id: Optional[uuid.UUID] = None, + conf: Optional[Dict[str, str]] = None) -> Tuple[uuid.UUID, uuid.UUID]: """ Creates a new tenant, returns its id and its initial timeline's id. """ if tenant_id is None: tenant_id = uuid.uuid4() + if timeline_id is None: + timeline_id = uuid.uuid4() if conf is None: - res = self.raw_cli(['tenant', 'create', '--tenant-id', tenant_id.hex]) + res = self.raw_cli([ + 'tenant', 'create', '--tenant-id', tenant_id.hex, '--timeline-id', timeline_id.hex + ]) else: - res = self.raw_cli( - ['tenant', 'create', '--tenant-id', tenant_id.hex] + - sum(list(map(lambda kv: (['-c', kv[0] + ':' + kv[1]]), conf.items())), [])) + res = self.raw_cli([ + 'tenant', 'create', '--tenant-id', tenant_id.hex, '--timeline-id', timeline_id.hex + ] + sum(list(map(lambda kv: (['-c', kv[0] + ':' + kv[1]]), conf.items())), [])) res.check_returncode() - return tenant_id + return tenant_id, timeline_id def config_tenant(self, tenant_id: uuid.UUID, conf: Dict[str, str]): """ @@ -966,9 +995,10 @@ class ZenithCli: cmd = ['init', f'--config={tmp.name}'] if initial_timeline_id: cmd.extend(['--timeline-id', initial_timeline_id.hex]) - append_pageserver_param_overrides(cmd, - self.env.pageserver.remote_storage, - self.env.pageserver.config_override) + append_pageserver_param_overrides( + params_to_update=cmd, + remote_storage=self.env.remote_storage, + pageserver_config_override=self.env.pageserver.config_override) res = self.raw_cli(cmd) res.check_returncode() @@ -989,9 +1019,10 @@ class ZenithCli: def pageserver_start(self, overrides=()) -> 'subprocess.CompletedProcess[str]': start_args = ['pageserver', 'start', *overrides] - append_pageserver_param_overrides(start_args, - self.env.pageserver.remote_storage, - self.env.pageserver.config_override) + append_pageserver_param_overrides( + params_to_update=start_args, + remote_storage=self.env.remote_storage, + pageserver_config_override=self.env.pageserver.config_override) s3_env_vars = None if self.env.s3_mock_server: @@ -1161,16 +1192,11 @@ class ZenithPageserver(PgProtocol): Initializes the repository via `zenith init`. """ - def __init__(self, - env: ZenithEnv, - port: PageserverPort, - remote_storage: Optional[RemoteStorage] = None, - config_override: Optional[str] = None): + def __init__(self, env: ZenithEnv, port: PageserverPort, config_override: Optional[str] = None): super().__init__(host='localhost', port=port.pg, user='zenith_admin') self.env = env self.running = False self.service_port = port - self.remote_storage = remote_storage self.config_override = config_override def start(self, overrides=()) -> 'ZenithPageserver': @@ -1210,21 +1236,21 @@ class ZenithPageserver(PgProtocol): def append_pageserver_param_overrides( params_to_update: List[str], - pageserver_remote_storage: Optional[RemoteStorage], + remote_storage: Optional[RemoteStorage], pageserver_config_override: Optional[str] = None, ): - if pageserver_remote_storage is not None: - if isinstance(pageserver_remote_storage, LocalFsStorage): - pageserver_storage_override = f"local_path='{pageserver_remote_storage.root}'" - elif isinstance(pageserver_remote_storage, S3Storage): - pageserver_storage_override = f"bucket_name='{pageserver_remote_storage.bucket}',\ - bucket_region='{pageserver_remote_storage.region}'" + if remote_storage is not None: + if isinstance(remote_storage, LocalFsStorage): + pageserver_storage_override = f"local_path='{remote_storage.root}'" + elif isinstance(remote_storage, S3Storage): + pageserver_storage_override = f"bucket_name='{remote_storage.bucket}',\ + bucket_region='{remote_storage.region}'" - if pageserver_remote_storage.endpoint is not None: - pageserver_storage_override += f",endpoint='{pageserver_remote_storage.endpoint}'" + if remote_storage.endpoint is not None: + pageserver_storage_override += f",endpoint='{remote_storage.endpoint}'" else: - raise Exception(f'Unknown storage configuration {pageserver_remote_storage}') + raise Exception(f'Unknown storage configuration {remote_storage}') params_to_update.append( f'--pageserver-config-override=remote_storage={{{pageserver_storage_override}}}') @@ -1795,10 +1821,28 @@ class SafekeeperHttpClient(requests.Session): json=body) res.raise_for_status() - def get_metrics(self) -> SafekeeperMetrics: + def timeline_delete_force(self, tenant_id: str, timeline_id: str) -> Dict[Any, Any]: + res = self.delete( + f"http://localhost:{self.port}/v1/tenant/{tenant_id}/timeline/{timeline_id}") + res.raise_for_status() + res_json = res.json() + assert isinstance(res_json, dict) + return res_json + + def tenant_delete_force(self, tenant_id: str) -> Dict[Any, Any]: + res = self.delete(f"http://localhost:{self.port}/v1/tenant/{tenant_id}") + res.raise_for_status() + res_json = res.json() + assert isinstance(res_json, dict) + return res_json + + def get_metrics_str(self) -> str: request_result = self.get(f"http://localhost:{self.port}/metrics") request_result.raise_for_status() - all_metrics_text = request_result.text + return request_result.text + + def get_metrics(self) -> SafekeeperMetrics: + all_metrics_text = self.get_metrics_str() metrics = SafekeeperMetrics() for match in re.finditer( @@ -1820,26 +1864,36 @@ class Etcd: datadir: str port: int peer_port: int + binary_path: Path = etcd_path() handle: Optional[subprocess.Popen[Any]] = None # handle of running daemon + def client_url(self): + return f'http://127.0.0.1:{self.port}' + def check_status(self): s = requests.Session() s.mount('http://', requests.adapters.HTTPAdapter(max_retries=1)) # do not retry - s.get(f"http://localhost:{self.port}/health").raise_for_status() + s.get(f"{self.client_url()}/health").raise_for_status() + + def try_start(self): + if self.handle is not None: + log.debug(f'etcd is already running on port {self.port}') + return - def start(self): pathlib.Path(self.datadir).mkdir(exist_ok=True) - etcd_full_path = etcd_path() - if etcd_full_path is None: - raise Exception('etcd not found') + if not self.binary_path.is_file(): + raise RuntimeError(f"etcd broker binary '{self.binary_path}' is not a file") + + client_url = self.client_url() + log.info(f'Starting etcd to listen incoming connections at "{client_url}"') with open(os.path.join(self.datadir, "etcd.log"), "wb") as log_file: args = [ - etcd_full_path, + self.binary_path, f"--data-dir={self.datadir}", - f"--listen-client-urls=http://localhost:{self.port}", - f"--advertise-client-urls=http://localhost:{self.port}", - f"--listen-peer-urls=http://localhost:{self.peer_port}" + f"--listen-client-urls={client_url}", + f"--advertise-client-urls={client_url}", + f"--listen-peer-urls=http://127.0.0.1:{self.peer_port}" ] self.handle = subprocess.Popen(args, stdout=log_file, stderr=log_file) @@ -1891,7 +1945,12 @@ def test_output_dir(request: Any) -> str: return test_dir -SKIP_DIRS = frozenset(('pg_wal', 'pg_stat', 'pg_stat_tmp', 'pg_subtrans', 'pg_logical')) +SKIP_DIRS = frozenset(('pg_wal', + 'pg_stat', + 'pg_stat_tmp', + 'pg_subtrans', + 'pg_logical', + 'pg_replslot/wal_proposer_slot')) SKIP_FILES = frozenset(('pg_internal.init', 'pg.log', @@ -2017,7 +2076,11 @@ def check_restored_datadir_content(test_output_dir: str, env: ZenithEnv, pg: Pos assert (mismatch, error) == ([], []) -def wait_for(number_of_iterations: int, interval: int, func): +def wait_until(number_of_iterations: int, interval: int, func): + """ + Wait until 'func' returns successfully, without exception. Returns the last return value + from the the function. + """ last_exception = None for i in range(number_of_iterations): try: @@ -2044,9 +2107,15 @@ def remote_consistent_lsn(pageserver_http_client: ZenithPageserverHttpClient, timeline: uuid.UUID) -> int: detail = pageserver_http_client.timeline_detail(tenant, timeline) - lsn_str = detail['remote']['remote_consistent_lsn'] - assert isinstance(lsn_str, str) - return lsn_from_hex(lsn_str) + if detail['remote'] is None: + # No remote information at all. This happens right after creating + # a timeline, before any part of it it has been uploaded to remote + # storage yet. + return 0 + else: + lsn_str = detail['remote']['remote_consistent_lsn'] + assert isinstance(lsn_str, str) + return lsn_from_hex(lsn_str) def wait_for_upload(pageserver_http_client: ZenithPageserverHttpClient, @@ -2054,8 +2123,15 @@ def wait_for_upload(pageserver_http_client: ZenithPageserverHttpClient, timeline: uuid.UUID, lsn: int): """waits for local timeline upload up to specified lsn""" - - wait_for(10, 1, lambda: remote_consistent_lsn(pageserver_http_client, tenant, timeline) >= lsn) + for i in range(10): + current_lsn = remote_consistent_lsn(pageserver_http_client, tenant, timeline) + if current_lsn >= lsn: + return + log.info("waiting for remote_consistent_lsn to reach {}, now {}, iteration {}".format( + lsn_to_hex(lsn), lsn_to_hex(current_lsn), i + 1)) + time.sleep(1) + raise Exception("timed out while waiting for remote_consistent_lsn to reach {}, was {}".format( + lsn_to_hex(lsn), lsn_to_hex(current_lsn))) def last_record_lsn(pageserver_http_client: ZenithPageserverHttpClient, @@ -2073,5 +2149,12 @@ def wait_for_last_record_lsn(pageserver_http_client: ZenithPageserverHttpClient, timeline: uuid.UUID, lsn: int): """waits for pageserver to catch up to a certain lsn""" - - wait_for(10, 1, lambda: last_record_lsn(pageserver_http_client, tenant, timeline) >= lsn) + for i in range(10): + current_lsn = last_record_lsn(pageserver_http_client, tenant, timeline) + if current_lsn >= lsn: + return + log.info("waiting for last_record_lsn to reach {}, now {}, iteration {}".format( + lsn_to_hex(lsn), lsn_to_hex(current_lsn), i + 1)) + time.sleep(1) + raise Exception("timed out while waiting for last_record_lsn to reach {}, was {}".format( + lsn_to_hex(lsn), lsn_to_hex(current_lsn))) diff --git a/test_runner/performance/test_bulk_insert.py b/test_runner/performance/test_bulk_insert.py index 4e73bedcc0..3b57ac73cc 100644 --- a/test_runner/performance/test_bulk_insert.py +++ b/test_runner/performance/test_bulk_insert.py @@ -18,7 +18,6 @@ from fixtures.compare_fixtures import PgCompare, VanillaCompare, ZenithCompare def test_bulk_insert(zenith_with_baseline: PgCompare): env = zenith_with_baseline - # Get the timeline ID of our branch. We need it for the 'do_gc' command with closing(env.pg.connect()) as conn: with conn.cursor() as cur: cur.execute("create table huge (i int, j int);") diff --git a/test_runner/performance/test_bulk_tenant_create.py b/test_runner/performance/test_bulk_tenant_create.py index f0729d3a07..0e16d3e749 100644 --- a/test_runner/performance/test_bulk_tenant_create.py +++ b/test_runner/performance/test_bulk_tenant_create.py @@ -30,7 +30,7 @@ def test_bulk_tenant_create( for i in range(tenants_count): start = timeit.default_timer() - tenant = env.zenith_cli.create_tenant() + tenant, _ = env.zenith_cli.create_tenant() env.zenith_cli.create_timeline( f'test_bulk_tenant_create_{tenants_count}_{i}_{use_safekeepers}', tenant_id=tenant) diff --git a/test_runner/performance/test_random_writes.py b/test_runner/performance/test_random_writes.py index ba9eabcd97..205388bd90 100644 --- a/test_runner/performance/test_random_writes.py +++ b/test_runner/performance/test_random_writes.py @@ -8,7 +8,6 @@ from fixtures.log_helper import log import psycopg2.extras import random import time -from fixtures.utils import print_gc_result # This is a clear-box test that demonstrates the worst case scenario for the diff --git a/test_runner/performance/test_startup.py b/test_runner/performance/test_startup.py index e30912ce32..53b6a3a4fc 100644 --- a/test_runner/performance/test_startup.py +++ b/test_runner/performance/test_startup.py @@ -1,9 +1,11 @@ +import pytest from contextlib import closing - from fixtures.zenith_fixtures import ZenithEnvBuilder from fixtures.benchmark_fixture import ZenithBenchmarker +# This test sometimes runs for longer than the global 5 minute timeout. +@pytest.mark.timeout(600) def test_startup(zenith_env_builder: ZenithEnvBuilder, zenbenchmark: ZenithBenchmarker): zenith_env_builder.num_safekeepers = 3 env = zenith_env_builder.init_start() diff --git a/vendor/postgres b/vendor/postgres index 9a9459a7f9..79af2faf08 160000 --- a/vendor/postgres +++ b/vendor/postgres @@ -1 +1 @@ -Subproject commit 9a9459a7f9cbcaa0e35ff1f2f34c419238fdec7e +Subproject commit 79af2faf08d9bec1b1664a72936727dcca36d253