Compare commits

..

3 Commits

Author SHA1 Message Date
anastasia
a267dfa41f code cleanup for compute_node_rebase branch 2021-04-09 17:25:41 +03:00
anastasia
1b9eb9430c 1. Handle SLRU and nonrel files as pageserver pages: upload them via restore_s3, handle in protocol.
2. Parse pg_control to retrieve systemid, lsn and so on. Store it in pagecache.
3. Setup compute node without files: only request a few essential files from pageserver to bootstrap. And after that route ALL I/O requests to pageserver.
Use initdb --compute-node flag to create such minimal node without files. And GUC 'computenode_mode=true'to request all pages from pageserver
2021-04-08 16:01:24 +03:00
anastasia
9a4fbf365c add test test_pageserver_recovery.
zenith_push postgres to minio and start pageserver using this base image
2021-04-08 15:14:44 +03:00
235 changed files with 20498 additions and 32423 deletions

View File

@@ -1,397 +0,0 @@
version: 2.1
orbs:
python: circleci/python@1.4.0
executors:
zenith-build-executor:
resource_class: xlarge
docker:
- image: cimg/rust:1.52.1
jobs:
check-codestyle:
executor: zenith-build-executor
steps:
- checkout
- run:
name: rustfmt
when: always
command: |
cargo fmt --all -- --check
# A job to build postgres
build-postgres:
executor: zenith-build-executor
parameters:
build_type:
type: enum
enum: ["debug", "release"]
environment:
BUILD_TYPE: << parameters.build_type >>
steps:
# Checkout the git repo (circleci doesn't have a flag to enable submodules here)
- checkout
# Grab the postgres git revision to build a cache key.
# Note this works even though the submodule hasn't been checkout out yet.
- run:
name: Get postgres cache key
command: |
git rev-parse HEAD:vendor/postgres > /tmp/cache-key-postgres
- restore_cache:
name: Restore postgres cache
keys:
# Restore ONLY if the rev key matches exactly
- v04-postgres-cache-<< parameters.build_type >>-{{ checksum "/tmp/cache-key-postgres" }}
# FIXME We could cache our own docker container, instead of installing packages every time.
- run:
name: apt install dependencies
command: |
if [ ! -e tmp_install/bin/postgres ]; then
sudo apt update
sudo apt install build-essential libreadline-dev zlib1g-dev flex bison libseccomp-dev
fi
# Build postgres if the restore_cache didn't find a build.
# `make` can't figure out whether the cache is valid, since
# it only compares file timestamps.
- run:
name: build postgres
command: |
if [ ! -e tmp_install/bin/postgres ]; then
# "depth 1" saves some time by not cloning the whole repo
git submodule update --init --depth 1
make postgres -j8
fi
- save_cache:
name: Save postgres cache
key: v04-postgres-cache-<< parameters.build_type >>-{{ checksum "/tmp/cache-key-postgres" }}
paths:
- tmp_install
# A job to build zenith rust code
build-zenith:
executor: zenith-build-executor
parameters:
build_type:
type: enum
enum: ["debug", "release"]
steps:
- run:
name: apt install dependencies
command: |
sudo apt update
sudo apt install libssl-dev clang
# Checkout the git repo (without submodules)
- checkout
# Grab the postgres git revision to build a cache key.
# Note this works even though the submodule hasn't been checkout out yet.
- run:
name: Get postgres cache key
command: |
git rev-parse HEAD:vendor/postgres > /tmp/cache-key-postgres
- restore_cache:
name: Restore postgres cache
keys:
# Restore ONLY if the rev key matches exactly
- v04-postgres-cache-<< parameters.build_type >>-{{ checksum "/tmp/cache-key-postgres" }}
- restore_cache:
name: Restore rust cache
keys:
# Require an exact match. While an out of date cache might speed up the build,
# there's no way to clean out old packages, so the cache grows every time something
# changes.
- v03-rust-cache-deps-<< parameters.build_type >>-{{ checksum "Cargo.lock" }}
# Build the rust code, including test binaries
- run:
name: Rust build << parameters.build_type >>
command: |
export CARGO_INCREMENTAL=0
BUILD_TYPE="<< parameters.build_type >>"
if [[ $BUILD_TYPE == "debug" ]]; then
echo "Build in debug mode"
cargo build --bins --tests
elif [[ $BUILD_TYPE == "release" ]]; then
echo "Build in release mode"
cargo build --release --bins --tests
fi
- save_cache:
name: Save rust cache
key: v03-rust-cache-deps-<< parameters.build_type >>-{{ checksum "Cargo.lock" }}
paths:
- ~/.cargo/registry
- ~/.cargo/git
- target
# Run style checks
# has to run separately from cargo fmt section
# since needs to run with dependencies
- run:
name: clippy
command: |
./run_clippy.sh
# Run rust unit tests
- run: cargo test
# Install the rust binaries, for use by test jobs
# `--locked` is required; otherwise, `cargo install` will ignore Cargo.lock.
# FIXME: this is a really silly way to install; maybe we should just output
# a tarball as an artifact? Or a .deb package?
- run:
name: cargo install
command: |
export CARGO_INCREMENTAL=0
BUILD_TYPE="<< parameters.build_type >>"
if [[ $BUILD_TYPE == "debug" ]]; then
echo "Install debug mode"
CARGO_FLAGS="--debug"
elif [[ $BUILD_TYPE == "release" ]]; then
echo "Install release mode"
# The default is release mode; there is no --release flag.
CARGO_FLAGS=""
fi
cargo install $CARGO_FLAGS --locked --root /tmp/zenith --path pageserver
cargo install $CARGO_FLAGS --locked --root /tmp/zenith --path walkeeper
cargo install $CARGO_FLAGS --locked --root /tmp/zenith --path zenith
# Install the postgres binaries, for use by test jobs
# FIXME: this is a silly way to do "install"; maybe just output a standard
# postgres package, whatever the favored form is (tarball? .deb package?)
# Note that pg_regress needs some build artifacts that probably aren't
# in the usual package...?
- run:
name: postgres install
command: |
cp -a tmp_install /tmp/zenith/pg_install
# Save the rust output binaries for other jobs in this workflow.
- persist_to_workspace:
root: /tmp/zenith
paths:
- "*"
run-pytest:
#description: "Run pytest"
executor: python/default
parameters:
# pytest args to specify the tests to run.
#
# This can be a test file name, e.g. 'test_pgbench.py, or a subdirectory,
# or '-k foobar' to run tests containing string 'foobar'. See pytest man page
# section SPECIFYING TESTS / SELECTING TESTS for details.
#
# Select the type of Rust build. Must be "release" or "debug".
build_type:
type: string
default: "debug"
# This parameter is required, to prevent the mistake of running all tests in one job.
test_selection:
type: string
default: ""
# Arbitrary parameters to pytest. For example "-s" to prevent capturing stdout/stderr
extra_params:
type: string
default: ""
needs_postgres_source:
type: boolean
default: false
run_in_parallel:
type: boolean
default: true
steps:
- attach_workspace:
at: /tmp/zenith
- checkout
- when:
condition: << parameters.needs_postgres_source >>
steps:
- run: git submodule update --init --depth 1
- run:
name: Install pipenv & deps
working_directory: test_runner
command: |
pip install pipenv
pipenv install
- run:
name: Run pytest
working_directory: test_runner
environment:
- ZENITH_BIN: /tmp/zenith/bin
- POSTGRES_DISTRIB_DIR: /tmp/zenith/pg_install
- TEST_OUTPUT: /tmp/test_output
command: |
TEST_SELECTION="<< parameters.test_selection >>"
EXTRA_PARAMS="<< parameters.extra_params >>"
if [ -z "$TEST_SELECTION" ]; then
echo "test_selection must be set"
exit 1
fi
if << parameters.run_in_parallel >>; then
EXTRA_PARAMS="-n4 $EXTRA_PARAMS"
fi;
./netstat-script.sh &
NS_PID=$!
# Run the tests.
#
# The junit.xml file allows CircleCI to display more fine-grained test information
# in its "Tests" tab in the results page.
# --verbose prints name of each test (helpful when there are
# multiple tests in one file)
# -rA prints summary in the end
# -n4 uses four processes to run tests via pytest-xdist
# -s is not used to prevent pytest from capturing output, because tests are running
# in parallel and logs are mixed between different tests
pipenv run pytest --junitxml=$TEST_OUTPUT/junit.xml --tb=short --verbose -rA $TEST_SELECTION $EXTRA_PARAMS
kill $NS_PID
awk '/===/ {if (count) print count; print; count=0; next} {count++} END {print count}' $TEST_OUTPUT/netstat.stdout > $TEST_OUTPUT/netstat_stats.stdout
- run:
# CircleCI artifacts are preserved one file at a time, so skipping
# this step isn't a good idea. If you want to extract the
# pageserver state, perhaps a tarball would be a better idea.
name: Delete all data but logs
when: always
command: |
du -sh /tmp/test_output/*
find /tmp/test_output -type f ! -name "pg.log" ! -name "pageserver.log" ! -name "safekeeper.log" ! -name "regression.diffs" ! -name "junit.xml" ! -name "*.filediff" ! -name "*.stdout" ! -name "*.stderr" -delete
du -sh /tmp/test_output/*
- store_artifacts:
path: /tmp/test_output
# The store_test_results step tells CircleCI where to find the junit.xml file.
- store_test_results:
path: /tmp/test_output
# Build zenithdb/zenith:latest image and push it to Docker hub
docker-image:
docker:
- image: cimg/base:2021.04
steps:
- checkout
- setup_remote_docker:
docker_layer_caching: true
- run:
name: Init postgres submodule
command: git submodule update --init --depth 1
- run:
name: Build and push Docker image
command: |
echo $DOCKER_PWD | docker login -u $DOCKER_LOGIN --password-stdin
docker build -t zenithdb/zenith:latest . && docker push zenithdb/zenith:latest
# Trigger a new remote CI job
remote-ci-trigger:
docker:
- image: cimg/base:2021.04
parameters:
remote_repo:
type: string
environment:
REMOTE_REPO: << parameters.remote_repo >>
steps:
- run:
name: Set PR's status to pending
command: |
LOCAL_REPO=$CIRCLE_PROJECT_USERNAME/$CIRCLE_PROJECT_REPONAME
curl -f -X POST \
https://api.github.com/repos/$LOCAL_REPO/statuses/$CIRCLE_SHA1 \
-H "Accept: application/vnd.github.v3+json" \
--user "$CI_ACCESS_TOKEN" \
--data \
"{
\"state\": \"pending\",
\"context\": \"zenith-remote-ci\",
\"description\": \"[$REMOTE_REPO] Remote CI job is about to start\"
}"
- run:
name: Request a remote CI test
command: |
LOCAL_REPO=$CIRCLE_PROJECT_USERNAME/$CIRCLE_PROJECT_REPONAME
curl -f -X POST \
https://api.github.com/repos/$REMOTE_REPO/actions/workflows/testing.yml/dispatches \
-H "Accept: application/vnd.github.v3+json" \
--user "$CI_ACCESS_TOKEN" \
--data \
"{
\"ref\": \"main\",
\"inputs\": {
\"ci_job_name\": \"zenith-remote-ci\",
\"commit_hash\": \"$CIRCLE_SHA1\",
\"remote_repo\": \"$LOCAL_REPO\"
}
}"
workflows:
build_and_test:
jobs:
- check-codestyle
- build-postgres:
name: build-postgres-<< matrix.build_type >>
matrix:
parameters:
build_type: ["debug", "release"]
- build-zenith:
name: build-zenith-<< matrix.build_type >>
matrix:
parameters:
build_type: ["debug", "release"]
requires:
- build-postgres-<< matrix.build_type >>
- run-pytest:
name: pg_regress-tests-<< matrix.build_type >>
matrix:
parameters:
build_type: ["debug", "release"]
test_selection: batch_pg_regress
needs_postgres_source: true
requires:
- build-zenith-<< matrix.build_type >>
- run-pytest:
name: other-tests-<< matrix.build_type >>
matrix:
parameters:
build_type: ["debug", "release"]
test_selection: batch_others
requires:
- build-zenith-<< matrix.build_type >>
- run-pytest:
name: benchmarks
build_type: release
test_selection: performance
run_in_parallel: false
requires:
- build-zenith-release
- docker-image:
# Context gives an ability to login
context: Docker Hub
# Build image only for commits to main
filters:
branches:
only:
- main
requires:
- pg_regress-tests-release
- other-tests-release
- remote-ci-trigger:
# Context passes credentials for gh api
context: CI_ACCESS_TOKEN
remote_repo: "zenithdb/console"
requires:
# XXX: Successful build doesn't mean everything is OK, but
# the job to be triggered takes so much time to complete (~22 min)
# that it's better not to wait for the commented-out steps
- build-zenith-debug
# - pg_regress-tests-release
# - other-tests-release

View File

@@ -1,18 +0,0 @@
**/.git/
**/__pycache__
**/.pytest_cache
.git
target
tmp_check
tmp_install
tmp_check_cli
test_output
.vscode
.zenith
integration_tests/.zenith
.mypy_cache
Dockerfile
.dockerignore

View File

@@ -1,45 +0,0 @@
name: Send Notifications
on:
push:
branches: [ main ]
jobs:
send-notifications:
timeout-minutes: 30
name: send commit notifications
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
with:
submodules: true
fetch-depth: 2
- name: Form variables for notification message
id: git_info_grab
run: |
git_stat=$(git show --stat=50)
git_stat="${git_stat//'%'/'%25'}"
git_stat="${git_stat//$'\n'/'%0A'}"
git_stat="${git_stat//$'\r'/'%0D'}"
git_stat="${git_stat// /}" # space -> 'Space En', as github tends to eat ordinary spaces
echo "::set-output name=git_stat::$git_stat"
echo "::set-output name=sha_short::$(git rev-parse --short HEAD)"
echo "##[set-output name=git_branch;]$(echo ${GITHUB_REF#refs/heads/})"
- name: Send notification
uses: appleboy/telegram-action@master
with:
to: ${{ secrets.TELEGRAM_TO }}
token: ${{ secrets.TELEGRAM_TOKEN }}
format: markdown
args: |
*@${{ github.actor }} pushed to* [${{ github.repository }}:${{steps.git_info_grab.outputs.git_branch}}](github.com/${{ github.repository }}/commit/${{steps.git_info_grab.outputs.sha_short }})
```
${{ steps.git_info_grab.outputs.git_stat }}
```

View File

@@ -1,41 +1,49 @@
name: Build and Test
name: regression check
on:
push:
branches: [ main ]
pull_request:
branches: [ main ]
on: [push]
jobs:
regression-check:
strategy:
matrix:
# If we want to duplicate this job for different
# Rust toolchains (e.g. nightly or 1.37.0), add them here.
rust_toolchain: [stable]
os: [ubuntu-latest]
timeout-minutes: 30
name: run regression test suite
runs-on: ${{ matrix.os }}
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
with:
submodules: true
fetch-depth: 2
- name: install rust toolchain ${{ matrix.rust_toolchain }}
uses: actions-rs/toolchain@v1
- name: Form variables for notification message
id: git_info_grab
run: |
git_stat=$(git show --stat=50)
git_stat="${git_stat//'%'/'%25'}"
git_stat="${git_stat//$'\n'/'%0A'}"
git_stat="${git_stat//$'\r'/'%0D'}"
git_stat="${git_stat// /}" # space -> 'Space En', as github tends to eat ordinary spaces
echo "::set-output name=git_stat::$git_stat"
echo "::set-output name=sha_short::$(git rev-parse --short HEAD)"
echo "##[set-output name=git_branch;]$(echo ${GITHUB_REF#refs/heads/})"
- name: Send notification
uses: appleboy/telegram-action@master
with:
profile: minimal
toolchain: ${{ matrix.rust_toolchain }}
override: true
to: ${{ secrets.TELEGRAM_TO }}
token: ${{ secrets.TELEGRAM_TOKEN }}
format: markdown
args: |
*@${{ github.actor }} pushed to* [${{ github.repository }}:${{steps.git_info_grab.outputs.git_branch}}](github.com/${{ github.repository }}/commit/${{steps.git_info_grab.outputs.sha_short }})
```
${{ steps.git_info_grab.outputs.git_stat }}
```
- name: Install postgres dependencies
run: |
sudo apt update
sudo apt install build-essential libreadline-dev zlib1g-dev flex bison libseccomp-dev
sudo apt install build-essential libreadline-dev zlib1g-dev flex bison libxml2-dev libcurl4-openssl-dev
- name: Set pg revision for caching
id: pg_ver
@@ -52,7 +60,11 @@ jobs:
- name: Build postgres
if: steps.cache_pg.outputs.cache-hit != 'true'
run: |
make postgres
./pgbuild.sh
- name: Install rust
run: |
sudo apt install -y cargo
- name: Cache cargo deps
id: cache_cargo
@@ -64,10 +76,13 @@ jobs:
target
key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
- name: Run cargo build
# That build is only to build dependencies and can be skipped if Cargo.lock
# wasn't changed. Next steps need their own build
- name: Install cargo deps
if: steps.cache_cargo.outputs.cache-hit != 'true'
run: |
cargo build --workspace --bins --examples --tests
cargo build
- name: Run cargo test
- name: Run test
run: |
cargo test -- --nocapture --test-threads=1
cargo test --test test_pageserver -- --nocapture --test-threads=1

6
.gitignore vendored
View File

@@ -1,9 +1,3 @@
/target
/tmp_check
/tmp_install
/tmp_check_cli
__pycache__/
test_output/
.vscode
/.zenith
/integration_tests/.zenith

2
.gitmodules vendored
View File

@@ -1,4 +1,4 @@
[submodule "vendor/postgres"]
path = vendor/postgres
url = https://github.com/zenithdb/postgres
url = https://github.com/libzenith/postgres
branch = main

View File

@@ -1,31 +0,0 @@
# How to contribute
Howdy! Usual good software engineering practices apply. Write
tests. Write comments. Follow standard Rust coding practices where
possible. Use 'cargo fmt' and 'clippy' to tidy up formatting.
There are soft spots in the code, which could use cleanup,
refactoring, additional comments, and so forth. Let's try to raise the
bar, and clean things up as we go. Try to leave code in a better shape
than it was before.
## Submitting changes
1. Make a PR for every change.
Even seemingly trivial patches can break things in surprising ways.
Use of common sense is OK. If you're only fixing a typo in a comment,
it's probably fine to just push it. But if in doubt, open a PR.
2. Get at least one +1 on your PR before you push.
For simple patches, it will only take a minute for someone to review
it.
3. Always keep the CI green.
Do not push, if the CI failed on your PR. Even if you think it's not
your patch's fault. Help to fix the root cause if something else has
broken the CI, before pushing.
*Happy Hacking!*

View File

@@ -1,20 +0,0 @@
This software is licensed under the Apache 2.0 License:
----------------------------------------------------------------------------
Copyright 2021 Zenith Labs, Inc
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
----------------------------------------------------------------------------
The PostgreSQL submodule in vendor/postgres is licensed under the
PostgreSQL license. See vendor/postgres/COPYRIGHT.

1717
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,17 +1,6 @@
[workspace]
members = [
"control_plane",
"integration_tests",
"pageserver",
"postgres_ffi",
"proxy",
"walkeeper",
"workspace_hack",
"zenith",
"zenith_metrics",
"zenith_utils",
]
[profile.release]
# This is useful for profiling and, to some extent, debug.
# Besides, debug info should not affect the performance.
debug = true

View File

@@ -1,62 +0,0 @@
#
# Docker image for console integration testing.
#
#
# Build Postgres separately --- this layer will be rebuilt only if one of
# mentioned paths will get any changes.
#
FROM zenithdb/build:buster AS pg-build
WORKDIR /zenith
COPY ./vendor/postgres vendor/postgres
COPY ./Makefile Makefile
ENV BUILD_TYPE release
RUN make -j $(getconf _NPROCESSORS_ONLN) -s postgres
RUN rm -rf postgres_install/build
#
# Build zenith binaries
#
# TODO: build cargo deps as separate layer. We used cargo-chef before but that was
# net time waste in a lot of cases. Copying Cargo.lock with empty lib.rs should do the work.
#
FROM zenithdb/build:buster AS build
WORKDIR /zenith
COPY --from=pg-build /zenith/tmp_install/include/postgresql/server tmp_install/include/postgresql/server
COPY . .
RUN cargo build --release
#
# Copy binaries to resulting image.
#
FROM debian:buster-slim
WORKDIR /data
RUN apt-get update && apt-get -yq install libreadline-dev libseccomp-dev openssl ca-certificates && \
mkdir zenith_install
COPY --from=build /zenith/target/release/pageserver /usr/local/bin
COPY --from=build /zenith/target/release/safekeeper /usr/local/bin
# TODO: temporary alias for compatibility, see https://github.com/zenithdb/zenith/pull/740
RUN ln -s /usr/local/bin/safekeeper /usr/local/bin/wal_acceptor
COPY --from=build /zenith/target/release/proxy /usr/local/bin
COPY --from=pg-build /zenith/tmp_install postgres_install
COPY docker-entrypoint.sh /docker-entrypoint.sh
# Remove build artifacts (~ 500 MB)
RUN rm -rf postgres_install/build && \
# 'Install' Postgres binaries locally
cp -r postgres_install/* /usr/local/ && \
# Prepare an archive of Postgres binaries (should be around 11 MB)
# and keep it inside container for an ease of deploy pipeline.
cd postgres_install && tar -czf /data/postgres_install.tar.gz . && cd .. && \
rm -rf postgres_install
RUN useradd -d /data zenith && chown -R zenith:zenith /data
VOLUME ["/data"]
USER zenith
EXPOSE 6400
ENTRYPOINT ["/docker-entrypoint.sh"]
CMD ["pageserver"]

View File

@@ -1,97 +0,0 @@
#
# Docker image for console integration testing.
#
# We may also reuse it in CI to unify installation process and as a general binaries building
# tool for production servers.
#
# Dynamic linking is used for librocksdb and libstdc++ bacause librocksdb-sys calls
# bindgen with "dynamic" feature flag. This also prevents usage of dockerhub alpine-rust
# images which are statically linked and have guards against any dlopen. I would rather
# prefer all static binaries so we may change the way librocksdb-sys builds or wait until
# we will have our own storage and drop rockdb dependency.
#
# Cargo-chef is used to separate dependencies building from main binaries building. This
# way `docker build` will download and install dependencies only of there are changes to
# out Cargo.toml files.
#
#
# build postgres separately -- this layer will be rebuilt only if one of
# mentioned paths will get any changes
#
FROM alpine:3.13 as pg-build
RUN apk add --update clang llvm compiler-rt compiler-rt-static lld musl-dev binutils \
make bison flex readline-dev zlib-dev perl linux-headers libseccomp-dev
WORKDIR zenith
COPY ./vendor/postgres vendor/postgres
COPY ./Makefile Makefile
# Build using clang and lld
RUN CC='clang' LD='lld' CFLAGS='-fuse-ld=lld --rtlib=compiler-rt' make postgres -j4
#
# Calculate cargo dependencies.
# This will always run, but only generate recipe.json with list of dependencies without
# installing them.
#
FROM alpine:20210212 as cargo-deps-inspect
RUN apk add --update rust cargo
RUN cargo install cargo-chef
WORKDIR zenith
COPY . .
RUN cargo chef prepare --recipe-path recipe.json
#
# Build cargo dependencies.
# This temp cantainner would be build only if recipe.json was changed.
#
FROM alpine:20210212 as deps-build
RUN apk add --update rust cargo openssl-dev clang build-base
# rust-rocksdb can be built against system-wide rocksdb -- that saves about
# 10 minutes during build. Rocksdb apk package is in testing now, but use it
# anyway. In case of any troubles we can download and build rocksdb here manually
# (to cache it as a docker layer).
RUN apk --no-cache --update --repository https://dl-cdn.alpinelinux.org/alpine/edge/testing add rocksdb-dev
WORKDIR zenith
COPY --from=pg-build /zenith/tmp_install/include/postgresql/server tmp_install/include/postgresql/server
COPY --from=cargo-deps-inspect /root/.cargo/bin/cargo-chef /root/.cargo/bin/
COPY --from=cargo-deps-inspect /zenith/recipe.json recipe.json
RUN ROCKSDB_LIB_DIR=/usr/lib/ cargo chef cook --release --recipe-path recipe.json
#
# Build zenith binaries
#
FROM alpine:20210212 as build
RUN apk add --update rust cargo openssl-dev clang build-base
RUN apk --no-cache --update --repository https://dl-cdn.alpinelinux.org/alpine/edge/testing add rocksdb-dev
WORKDIR zenith
COPY . .
# Copy cached dependencies
COPY --from=pg-build /zenith/tmp_install/include/postgresql/server tmp_install/include/postgresql/server
COPY --from=deps-build /zenith/target target
COPY --from=deps-build /root/.cargo /root/.cargo
RUN cargo build --release
#
# Copy binaries to resulting image.
# build-base hare to provide libstdc++ (it will also bring gcc, but leave it this way until we figure
# out how to statically link rocksdb or avoid it at all).
#
FROM alpine:3.13
RUN apk add --update openssl build-base libseccomp-dev
RUN apk --no-cache --update --repository https://dl-cdn.alpinelinux.org/alpine/edge/testing add rocksdb
COPY --from=build /zenith/target/release/pageserver /usr/local/bin
COPY --from=build /zenith/target/release/safekeeper /usr/local/bin
# TODO: temporary alias for compatibility, see https://github.com/zenithdb/zenith/pull/740
RUN ln -s /usr/local/bin/safekeeper /usr/local/bin/wal_acceptor
COPY --from=build /zenith/target/release/proxy /usr/local/bin
COPY --from=pg-build /zenith/tmp_install /usr/local
COPY docker-entrypoint.sh /docker-entrypoint.sh
RUN addgroup zenith && adduser -h /data -D -G zenith zenith
VOLUME ["/data"]
WORKDIR /data
USER zenith
EXPOSE 6400
ENTRYPOINT ["/docker-entrypoint.sh"]
CMD ["pageserver"]

View File

@@ -1,15 +0,0 @@
#
# Image with all the required dependencies to build https://github.com/zenithdb/zenith
# and Postgres from https://github.com/zenithdb/postgres
# Also includes some rust development and build tools.
#
FROM rust:slim-buster
WORKDIR /zenith
# Install postgres and zenith build dependencies
# clang is for rocksdb
RUN apt-get update && apt-get -yq install automake libtool build-essential bison flex libreadline-dev zlib1g-dev libxml2-dev \
libseccomp-dev pkg-config libssl-dev clang
# Install rust tools
RUN rustup component add clippy && cargo install cargo-audit

202
LICENSE
View File

@@ -1,202 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

104
Makefile
View File

@@ -1,104 +0,0 @@
# Seccomp BPF is only available for Linux
UNAME_S := $(shell uname -s)
ifeq ($(UNAME_S),Linux)
SECCOMP = --with-libseccomp
else
SECCOMP =
endif
#
# We differentiate between release / debug build types using the BUILD_TYPE
# environment variable.
#
BUILD_TYPE ?= debug
ifeq ($(BUILD_TYPE),release)
PG_CONFIGURE_OPTS = --enable-debug
PG_CFLAGS = -O2 -g3 $(CFLAGS)
# Unfortunately, `--profile=...` is a nightly feature
CARGO_BUILD_FLAGS += --release
else ifeq ($(BUILD_TYPE),debug)
PG_CONFIGURE_OPTS = --enable-debug --enable-cassert --enable-depend
PG_CFLAGS = -O0 -g3 $(CFLAGS)
else
$(error Bad build type `$(BUILD_TYPE)', see Makefile for options)
endif
# Choose whether we should be silent or verbose
CARGO_BUILD_FLAGS += --$(if $(filter s,$(MAKEFLAGS)),quiet,verbose)
# Fix for a corner case when make doesn't pass a jobserver
CARGO_BUILD_FLAGS += $(filter -j1,$(MAKEFLAGS))
# This option has a side effect of passing make jobserver to cargo.
# However, we shouldn't do this if `make -n` (--dry-run) has been asked.
CARGO_CMD_PREFIX += $(if $(filter n,$(MAKEFLAGS)),,+)
# Force cargo not to print progress bar
CARGO_CMD_PREFIX += CARGO_TERM_PROGRESS_WHEN=never CI=1
#
# Top level Makefile to build Zenith and PostgreSQL
#
.PHONY: all
all: zenith postgres
### Zenith Rust bits
#
# The 'postgres_ffi' depends on the Postgres headers.
.PHONY: zenith
zenith: postgres-headers
+@echo "Compiling Zenith"
$(CARGO_CMD_PREFIX) cargo build $(CARGO_BUILD_FLAGS)
### PostgreSQL parts
tmp_install/build/config.status:
+@echo "Configuring postgres build"
mkdir -p tmp_install/build
(cd tmp_install/build && \
../../vendor/postgres/configure CFLAGS='$(PG_CFLAGS)' \
$(PG_CONFIGURE_OPTS) \
$(SECCOMP) \
--prefix=$(abspath tmp_install) > configure.log)
# nicer alias for running 'configure'
.PHONY: postgres-configure
postgres-configure: tmp_install/build/config.status
# Install the PostgreSQL header files into tmp_install/include
.PHONY: postgres-headers
postgres-headers: postgres-configure
+@echo "Installing PostgreSQL headers"
$(MAKE) -C tmp_install/build/src/include MAKELEVEL=0 install
# Compile and install PostgreSQL and contrib/zenith
.PHONY: postgres
postgres: postgres-configure \
postgres-headers # to prevent `make install` conflicts with zenith's `postgres-headers`
+@echo "Compiling PostgreSQL"
$(MAKE) -C tmp_install/build MAKELEVEL=0 install
+@echo "Compiling contrib/zenith"
$(MAKE) -C tmp_install/build/contrib/zenith install
+@echo "Compiling contrib/zenith_test_utils"
$(MAKE) -C tmp_install/build/contrib/zenith_test_utils install
.PHONY: postgres-clean
postgres-clean:
$(MAKE) -C tmp_install/build MAKELEVEL=0 clean
# This doesn't remove the effects of 'configure'.
.PHONY: clean
clean:
cd tmp_install/build && $(MAKE) clean
$(CARGO_CMD_PREFIX) cargo clean
# This removes everything
.PHONY: distclean
distclean:
rm -rf tmp_install
$(CARGO_CMD_PREFIX) cargo clean
.PHONY: fmt
fmt:
./pre-commit.py --fix-inplace
.PHONY: setup-pre-commit-hook
setup-pre-commit-hook:
ln -s -f ../../pre-commit.py .git/hooks/pre-commit

View File

@@ -1 +0,0 @@
./test_runner/Pipfile

1
Pipfile.lock generated
View File

@@ -1 +0,0 @@
./test_runner/Pipfile.lock

158
README.md
View File

@@ -2,149 +2,43 @@
Zenith substitutes PostgreSQL storage layer and redistributes data across a cluster of nodes
## Architecture overview
A Zenith installation consists of Compute nodes and Storage engine.
Compute nodes are stateless PostgreSQL nodes, backed by zenith storage.
Zenith storage engine consists of two major components:
- Pageserver. Scalable storage backend for compute nodes.
- WAL service. The service that receives WAL from compute node and ensures that it is stored durably.
Pageserver consists of:
- Repository - Zenith storage implementation.
- WAL receiver - service that receives WAL from WAL service and stores it in the repository.
- Page service - service that communicates with compute nodes and responds with pages from the repository.
- WAL redo - service that builds pages from base images and WAL records on Page service request.
## Running local installation
1. Install build dependencies and other useful packages
On Ubuntu or Debian this set of packages should be sufficient to build the code:
```text
apt install build-essential libtool libreadline-dev zlib1g-dev flex bison libseccomp-dev \
libssl-dev clang pkg-config libpq-dev
```
[Rust] 1.52 or later is also required.
To run the `psql` client, install the `postgresql-client` package or modify `PATH` and `LD_LIBRARY_PATH` to include `tmp_install/bin` and `tmp_install/lib`, respectively.
To run the integration tests (not required to use the code), install
Python (3.6 or higher), and install python3 packages with `pipenv` using `pipenv install` in the project directory.
2. Build zenith and patched postgres
```sh
git clone --recursive https://github.com/zenithdb/zenith.git
cd zenith
make -j5
```
3. Start pageserver and postgres on top of it (should be called from repo root):
```sh
# Create repository in .zenith with proper paths to binaries and data
# Later that would be responsibility of a package install script
> ./target/debug/zenith init
pageserver init succeeded
# start pageserver
> ./target/debug/zenith start
Starting pageserver at '127.0.0.1:64000' in .zenith
Pageserver started
# start postgres on top on the pageserver
> ./target/debug/zenith pg start main
Starting postgres node at 'host=127.0.0.1 port=55432 user=stas'
waiting for server to start.... done
# check list of running postgres instances
> ./target/debug/zenith pg list
BRANCH ADDRESS LSN STATUS
main 127.0.0.1:55432 0/1609610 running
```
4. Now it is possible to connect to postgres and run some queries:
```text
> psql -p55432 -h 127.0.0.1 -U zenith_admin postgres
postgres=# CREATE TABLE t(key int primary key, value text);
CREATE TABLE
postgres=# insert into t values(1,1);
INSERT 0 1
postgres=# select * from t;
key | value
-----+-------
1 | 1
(1 row)
```
5. And create branches and run postgres on them:
```sh
# create branch named migration_check
> ./target/debug/zenith branch migration_check main
Created branch 'migration_check' at 0/1609610
# check branches tree
> ./target/debug/zenith branch
main
┗━ @0/1609610: migration_check
# start postgres on that branch
> ./target/debug/zenith pg start migration_check
Starting postgres node at 'host=127.0.0.1 port=55433 user=stas'
waiting for server to start.... done
# this new postgres instance will have all the data from 'main' postgres,
# but all modifications would not affect data in original postgres
> psql -p55433 -h 127.0.0.1 -U zenith_admin postgres
postgres=# select * from t;
key | value
-----+-------
1 | 1
(1 row)
postgres=# insert into t values(2,2);
INSERT 0 1
```
6. If you want to run tests afterwards (see below), you have to stop pageserver and all postgres instances you have just started:
```sh
> ./target/debug/zenith pg stop migration_check
> ./target/debug/zenith pg stop main
> ./target/debug/zenith stop
```
## Running tests
```sh
git clone --recursive https://github.com/zenithdb/zenith.git
make # builds also postgres and installs it to ./tmp_install
cd test_runner
pytest
git clone --recursive https://github.com/libzenith/zenith.git
./pgbuild.sh # builds postgres and installs it to ./tmp_install
cargo test -- --test-threads=1
```
## Documentation
## Source tree layout
Now we use README files to cover design ideas and overall architecture for each module and `rustdoc` style documentation comments. See also [/docs/](/docs/) a top-level overview of all available markdown documentation.
/walkeeper:
- [/docs/sourcetree.md](/docs/sourcetree.md) contains overview of source tree layout.
WAL safekeeper. Written in Rust.
To view your `rustdoc` documentation in a browser, try running `cargo doc --no-deps --open`
/pageserver:
### Postgres-specific terms
Page Server. Written in Rust.
Due to Zenith's very close relation with PostgreSQL internals, there are numerous specific terms used.
Same applies to certain spelling: i.e. we use MB to denote 1024 * 1024 bytes, while MiB would be technically more correct, it's inconsistent with what PostgreSQL code and its documentation use.
Depends on the modified 'postgres' binary for WAL redo.
To get more familiar with this aspect, refer to:
/integration_tests:
Tests with different combinations of a Postgres compute node, WAL safekeeper and Page Server.
/mgmt-console:
Web UI to launch (modified) Postgres servers, using S3 as the backing store. Written in Python.
This is somewhat outdated, as it doesn't use the WAL safekeeper or Page Servers.
/vendor/postgres:
PostgreSQL source tree, with the modifications needed for Zenith.
/vendor/postgres/src/bin/safekeeper:
Extension (safekeeper_proxy) that runs in the compute node, and connects to the WAL safekeepers
and streams the WAL
- [Zenith glossary](/docs/glossary.md)
- [PostgreSQL glossary](https://www.postgresql.org/docs/13/glossary.html)
- Other PostgreSQL documentation and sources (Zenith fork sources can be found [here](https://github.com/zenithdb/postgres))
## Join the development
- Read `CONTRIBUTING.md` to learn about project code style and practices.
- To get familiar with a source tree layout, use [/docs/sourcetree.md](/docs/sourcetree.md).
- To learn more about PostgreSQL internals, check http://www.interdb.jp/pg/index.html

View File

@@ -1,188 +0,0 @@
Create a new Zenith repository in the current directory:
~/git-sandbox/zenith (cli-v2)$ ./target/debug/cli init
The files belonging to this database system will be owned by user "heikki".
This user must also own the server process.
The database cluster will be initialized with locale "en_GB.UTF-8".
The default database encoding has accordingly been set to "UTF8".
The default text search configuration will be set to "english".
Data page checksums are disabled.
creating directory tmp ... ok
creating subdirectories ... ok
selecting dynamic shared memory implementation ... posix
selecting default max_connections ... 100
selecting default shared_buffers ... 128MB
selecting default time zone ... Europe/Helsinki
creating configuration files ... ok
running bootstrap script ... ok
performing post-bootstrap initialization ... ok
syncing data to disk ... ok
initdb: warning: enabling "trust" authentication for local connections
You can change this by editing pg_hba.conf or using the option -A, or
--auth-local and --auth-host, the next time you run initdb.
new zenith repository was created in .zenith
Initially, there is only one branch:
~/git-sandbox/zenith (cli-v2)$ ./target/debug/cli branch
main
Start a local Postgres instance on the branch:
~/git-sandbox/zenith (cli-v2)$ ./target/debug/cli start main
Creating data directory from snapshot at 0/15FFB08...
waiting for server to start....2021-04-13 09:27:43.919 EEST [984664] LOG: starting PostgreSQL 14devel on x86_64-pc-linux-gnu, compiled by gcc (Debian 10.2.1-6) 10.2.1 20210110, 64-bit
2021-04-13 09:27:43.920 EEST [984664] LOG: listening on IPv6 address "::1", port 5432
2021-04-13 09:27:43.920 EEST [984664] LOG: listening on IPv4 address "127.0.0.1", port 5432
2021-04-13 09:27:43.927 EEST [984664] LOG: listening on Unix socket "/tmp/.s.PGSQL.5432"
2021-04-13 09:27:43.939 EEST [984665] LOG: database system was interrupted; last known up at 2021-04-13 09:27:33 EEST
2021-04-13 09:27:43.939 EEST [984665] LOG: creating missing WAL directory "pg_wal/archive_status"
2021-04-13 09:27:44.189 EEST [984665] LOG: database system was not properly shut down; automatic recovery in progress
2021-04-13 09:27:44.195 EEST [984665] LOG: invalid record length at 0/15FFB80: wanted 24, got 0
2021-04-13 09:27:44.195 EEST [984665] LOG: redo is not required
2021-04-13 09:27:44.225 EEST [984664] LOG: database system is ready to accept connections
done
server started
Run some commands against it:
~/git-sandbox/zenith (cli-v2)$ psql postgres -c "create table foo (t text);"
CREATE TABLE
~/git-sandbox/zenith (cli-v2)$ psql postgres -c "insert into foo values ('inserted on the main branch');"
INSERT 0 1
~/git-sandbox/zenith (cli-v2)$ psql postgres -c "select * from foo"
t
-----------------------------
inserted on the main branch
(1 row)
Create a new branch called 'experimental'. We create it from the
current end of the 'main' branch, but you could specify a different
LSN as the start point instead.
~/git-sandbox/zenith (cli-v2)$ ./target/debug/cli branch experimental main
branching at end of WAL: 0/161F478
~/git-sandbox/zenith (cli-v2)$ ./target/debug/cli branch
experimental
main
Start another Postgres instance off the 'experimental' branch:
~/git-sandbox/zenith (cli-v2)$ ./target/debug/cli start experimental -- -o -p5433
Creating data directory from snapshot at 0/15FFB08...
waiting for server to start....2021-04-13 09:28:41.874 EEST [984766] LOG: starting PostgreSQL 14devel on x86_64-pc-linux-gnu, compiled by gcc (Debian 10.2.1-6) 10.2.1 20210110, 64-bit
2021-04-13 09:28:41.875 EEST [984766] LOG: listening on IPv6 address "::1", port 5433
2021-04-13 09:28:41.875 EEST [984766] LOG: listening on IPv4 address "127.0.0.1", port 5433
2021-04-13 09:28:41.883 EEST [984766] LOG: listening on Unix socket "/tmp/.s.PGSQL.5433"
2021-04-13 09:28:41.896 EEST [984767] LOG: database system was interrupted; last known up at 2021-04-13 09:27:33 EEST
2021-04-13 09:28:42.265 EEST [984767] LOG: database system was not properly shut down; automatic recovery in progress
2021-04-13 09:28:42.269 EEST [984767] LOG: redo starts at 0/15FFB80
2021-04-13 09:28:42.272 EEST [984767] LOG: invalid record length at 0/161F4B0: wanted 24, got 0
2021-04-13 09:28:42.272 EEST [984767] LOG: redo done at 0/161F478 system usage: CPU: user: 0.00 s, system: 0.00 s, elapsed: 0.00 s
2021-04-13 09:28:42.321 EEST [984766] LOG: database system is ready to accept connections
done
server started
Insert some a row on the 'experimental' branch:
~/git-sandbox/zenith (cli-v2)$ psql postgres -p5433 -c "select * from foo"
t
-----------------------------
inserted on the main branch
(1 row)
~/git-sandbox/zenith (cli-v2)$ psql postgres -p5433 -c "insert into foo values ('inserted on experimental')"
INSERT 0 1
~/git-sandbox/zenith (cli-v2)$ psql postgres -p5433 -c "select * from foo"
t
-----------------------------
inserted on the main branch
inserted on experimental
(2 rows)
See that the other Postgres instance is still running on 'main' branch on port 5432:
~/git-sandbox/zenith (cli-v2)$ psql postgres -p5432 -c "select * from foo"
t
-----------------------------
inserted on the main branch
(1 row)
Everything is stored in the .zenith directory:
~/git-sandbox/zenith (cli-v2)$ ls -l .zenith/
total 12
drwxr-xr-x 4 heikki heikki 4096 Apr 13 09:28 datadirs
drwxr-xr-x 4 heikki heikki 4096 Apr 13 09:27 refs
drwxr-xr-x 4 heikki heikki 4096 Apr 13 09:28 timelines
The 'datadirs' directory contains the datadirs of the running instances:
~/git-sandbox/zenith (cli-v2)$ ls -l .zenith/datadirs/
total 8
drwx------ 18 heikki heikki 4096 Apr 13 09:27 3c0c634c1674079b2c6d4edf7c91523e
drwx------ 18 heikki heikki 4096 Apr 13 09:28 697e3c103d4b1763cd6e82e4ff361d76
~/git-sandbox/zenith (cli-v2)$ ls -l .zenith/datadirs/3c0c634c1674079b2c6d4edf7c91523e/
total 124
drwxr-xr-x 5 heikki heikki 4096 Apr 13 09:27 base
drwxr-xr-x 2 heikki heikki 4096 Apr 13 09:27 global
drwxr-xr-x 2 heikki heikki 4096 Apr 13 09:27 pg_commit_ts
drwxr-xr-x 2 heikki heikki 4096 Apr 13 09:27 pg_dynshmem
-rw------- 1 heikki heikki 4760 Apr 13 09:27 pg_hba.conf
-rw------- 1 heikki heikki 1636 Apr 13 09:27 pg_ident.conf
drwxr-xr-x 4 heikki heikki 4096 Apr 13 09:32 pg_logical
drwxr-xr-x 4 heikki heikki 4096 Apr 13 09:27 pg_multixact
drwxr-xr-x 2 heikki heikki 4096 Apr 13 09:27 pg_notify
drwxr-xr-x 2 heikki heikki 4096 Apr 13 09:27 pg_replslot
drwxr-xr-x 2 heikki heikki 4096 Apr 13 09:27 pg_serial
drwxr-xr-x 2 heikki heikki 4096 Apr 13 09:27 pg_snapshots
drwxr-xr-x 2 heikki heikki 4096 Apr 13 09:27 pg_stat
drwxr-xr-x 2 heikki heikki 4096 Apr 13 09:34 pg_stat_tmp
drwxr-xr-x 2 heikki heikki 4096 Apr 13 09:27 pg_subtrans
drwxr-xr-x 2 heikki heikki 4096 Apr 13 09:27 pg_tblspc
drwxr-xr-x 2 heikki heikki 4096 Apr 13 09:27 pg_twophase
-rw------- 1 heikki heikki 3 Apr 13 09:27 PG_VERSION
lrwxrwxrwx 1 heikki heikki 52 Apr 13 09:27 pg_wal -> ../../timelines/3c0c634c1674079b2c6d4edf7c91523e/wal
drwxr-xr-x 2 heikki heikki 4096 Apr 13 09:27 pg_xact
-rw------- 1 heikki heikki 88 Apr 13 09:27 postgresql.auto.conf
-rw------- 1 heikki heikki 28688 Apr 13 09:27 postgresql.conf
-rw------- 1 heikki heikki 96 Apr 13 09:27 postmaster.opts
-rw------- 1 heikki heikki 149 Apr 13 09:27 postmaster.pid
Note how 'pg_wal' is just a symlink to the 'timelines' directory. The
datadir is ephemeral, you can delete it at any time, and it can be reconstructed
from the snapshots and WAL stored in the 'timelines' directory. So if you push/pull
the repository, the 'datadirs' are not included. (They are like git working trees)
~/git-sandbox/zenith (cli-v2)$ killall -9 postgres
~/git-sandbox/zenith (cli-v2)$ rm -rf .zenith/datadirs/*
~/git-sandbox/zenith (cli-v2)$ ./target/debug/cli start experimental -- -o -p5433
Creating data directory from snapshot at 0/15FFB08...
waiting for server to start....2021-04-13 09:37:05.476 EEST [985340] LOG: starting PostgreSQL 14devel on x86_64-pc-linux-gnu, compiled by gcc (Debian 10.2.1-6) 10.2.1 20210110, 64-bit
2021-04-13 09:37:05.477 EEST [985340] LOG: listening on IPv6 address "::1", port 5433
2021-04-13 09:37:05.477 EEST [985340] LOG: listening on IPv4 address "127.0.0.1", port 5433
2021-04-13 09:37:05.487 EEST [985340] LOG: listening on Unix socket "/tmp/.s.PGSQL.5433"
2021-04-13 09:37:05.498 EEST [985341] LOG: database system was interrupted; last known up at 2021-04-13 09:27:33 EEST
2021-04-13 09:37:05.808 EEST [985341] LOG: database system was not properly shut down; automatic recovery in progress
2021-04-13 09:37:05.813 EEST [985341] LOG: redo starts at 0/15FFB80
2021-04-13 09:37:05.815 EEST [985341] LOG: invalid record length at 0/161F770: wanted 24, got 0
2021-04-13 09:37:05.815 EEST [985341] LOG: redo done at 0/161F738 system usage: CPU: user: 0.00 s, system: 0.00 s, elapsed: 0.00 s
2021-04-13 09:37:05.866 EEST [985340] LOG: database system is ready to accept connections
done
server started
~/git-sandbox/zenith (cli-v2)$ psql postgres -p5433 -c "select * from foo"
t
-----------------------------
inserted on the main branch
inserted on experimental
(2 rows)

View File

@@ -1 +0,0 @@
tmp_check/

View File

@@ -1,30 +0,0 @@
[package]
name = "control_plane"
version = "0.1.0"
authors = ["Stas Kelvich <stas@zenith.tech>"]
edition = "2018"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
rand = "0.8.3"
tar = "0.4.33"
postgres = { git = "https://github.com/zenithdb/rust-postgres.git", rev="9eb0dbfbeb6a6c1b79099b9f7ae4a8c021877858" }
serde = { version = "1.0", features = ["derive"] }
serde_json = "1"
toml = "0.5"
lazy_static = "1.4"
regex = "1"
anyhow = "1.0"
thiserror = "1"
bytes = "1.0.1"
nix = "0.20"
url = "2.2.2"
hex = { version = "0.4.3", features = ["serde"] }
reqwest = { version = "0.11", features = ["blocking", "json"] }
pageserver = { path = "../pageserver" }
walkeeper = { path = "../walkeeper" }
postgres_ffi = { path = "../postgres_ffi" }
zenith_utils = { path = "../zenith_utils" }
workspace_hack = { path = "../workspace_hack" }

View File

@@ -1,470 +0,0 @@
use std::collections::BTreeMap;
use std::fs::{self, File};
use std::io::Write;
use std::net::SocketAddr;
use std::net::TcpStream;
use std::os::unix::fs::PermissionsExt;
use std::path::PathBuf;
use std::process::{Command, Stdio};
use std::str::FromStr;
use std::sync::Arc;
use std::time::Duration;
use anyhow::{Context, Result};
use zenith_utils::connstring::connection_host_port;
use zenith_utils::lsn::Lsn;
use zenith_utils::postgres_backend::AuthType;
use zenith_utils::zid::ZTenantId;
use zenith_utils::zid::ZTimelineId;
use crate::local_env::LocalEnv;
use crate::postgresql_conf::PostgresConf;
use crate::storage::PageServerNode;
//
// ComputeControlPlane
//
pub struct ComputeControlPlane {
base_port: u16,
pageserver: Arc<PageServerNode>,
pub nodes: BTreeMap<(ZTenantId, String), Arc<PostgresNode>>,
env: LocalEnv,
}
impl ComputeControlPlane {
// Load current nodes with ports from data directories on disk
// Directory structure has the following layout:
// pgdatadirs
// |- tenants
// | |- <tenant_id>
// | | |- <branch name>
pub fn load(env: LocalEnv) -> Result<ComputeControlPlane> {
// TODO: since pageserver do not have config file yet we believe here that
// it is running on default port. Change that when pageserver will have config.
let pageserver = Arc::new(PageServerNode::from_env(&env));
let mut nodes = BTreeMap::default();
let pgdatadirspath = &env.pg_data_dirs_path();
for tenant_dir in fs::read_dir(&pgdatadirspath)
.with_context(|| format!("failed to list {}", pgdatadirspath.display()))?
{
let tenant_dir = tenant_dir?;
for timeline_dir in fs::read_dir(tenant_dir.path())
.with_context(|| format!("failed to list {}", tenant_dir.path().display()))?
{
let node = PostgresNode::from_dir_entry(timeline_dir?, &env, &pageserver)?;
nodes.insert((node.tenantid, node.name.clone()), Arc::new(node));
}
}
Ok(ComputeControlPlane {
base_port: 55431,
pageserver,
nodes,
env,
})
}
fn get_port(&mut self) -> u16 {
1 + self
.nodes
.iter()
.map(|(_name, node)| node.address.port())
.max()
.unwrap_or(self.base_port)
}
pub fn local(local_env: &LocalEnv, pageserver: &Arc<PageServerNode>) -> ComputeControlPlane {
ComputeControlPlane {
base_port: 65431,
pageserver: Arc::clone(pageserver),
nodes: BTreeMap::new(),
env: local_env.clone(),
}
}
pub fn new_node(
&mut self,
tenantid: ZTenantId,
branch_name: &str,
port: Option<u16>,
) -> Result<Arc<PostgresNode>> {
let timeline_id = self
.pageserver
.branch_get_by_name(&tenantid, branch_name)?
.timeline_id;
let port = port.unwrap_or_else(|| self.get_port());
let node = Arc::new(PostgresNode {
name: branch_name.to_owned(),
address: SocketAddr::new("127.0.0.1".parse().unwrap(), port),
env: self.env.clone(),
pageserver: Arc::clone(&self.pageserver),
is_test: false,
timelineid: timeline_id,
tenantid,
uses_wal_proposer: false,
});
node.create_pgdata()?;
node.setup_pg_conf(self.env.auth_type)?;
self.nodes
.insert((tenantid, node.name.clone()), Arc::clone(&node));
Ok(node)
}
}
///////////////////////////////////////////////////////////////////////////////
#[derive(Debug)]
pub struct PostgresNode {
pub address: SocketAddr,
name: String,
pub env: LocalEnv,
pageserver: Arc<PageServerNode>,
is_test: bool,
pub timelineid: ZTimelineId,
pub tenantid: ZTenantId,
uses_wal_proposer: bool,
}
impl PostgresNode {
fn from_dir_entry(
entry: std::fs::DirEntry,
env: &LocalEnv,
pageserver: &Arc<PageServerNode>,
) -> Result<PostgresNode> {
if !entry.file_type()?.is_dir() {
anyhow::bail!(
"PostgresNode::from_dir_entry failed: '{}' is not a directory",
entry.path().display()
);
}
// parse data directory name
let fname = entry.file_name();
let name = fname.to_str().unwrap().to_string();
// Read config file into memory
let cfg_path = entry.path().join("postgresql.conf");
let cfg_path_str = cfg_path.to_string_lossy();
let mut conf_file = File::open(&cfg_path)
.with_context(|| format!("failed to open config file in {}", cfg_path_str))?;
let conf = PostgresConf::read(&mut conf_file)
.with_context(|| format!("failed to read config file in {}", cfg_path_str))?;
// Read a few options from the config file
let context = format!("in config file {}", cfg_path_str);
let port: u16 = conf.parse_field("port", &context)?;
let timelineid: ZTimelineId = conf.parse_field("zenith.zenith_timeline", &context)?;
let tenantid: ZTenantId = conf.parse_field("zenith.zenith_tenant", &context)?;
let uses_wal_proposer = conf.get("wal_acceptors").is_some();
// ok now
Ok(PostgresNode {
address: SocketAddr::new("127.0.0.1".parse().unwrap(), port),
name,
env: env.clone(),
pageserver: Arc::clone(pageserver),
is_test: false,
timelineid,
tenantid,
uses_wal_proposer,
})
}
fn sync_walkeepers(&self) -> Result<Lsn> {
let pg_path = self.env.pg_bin_dir().join("postgres");
let sync_handle = Command::new(pg_path)
.arg("--sync-safekeepers")
.env_clear()
.env("LD_LIBRARY_PATH", self.env.pg_lib_dir().to_str().unwrap())
.env("DYLD_LIBRARY_PATH", self.env.pg_lib_dir().to_str().unwrap())
.env("PGDATA", self.pgdata().to_str().unwrap())
.stdout(Stdio::piped())
// Comment this to avoid capturing stderr (useful if command hangs)
.stderr(Stdio::piped())
.spawn()
.expect("postgres --sync-safekeepers failed to start");
let sync_output = sync_handle
.wait_with_output()
.expect("postgres --sync-safekeepers failed");
if !sync_output.status.success() {
anyhow::bail!(
"sync-safekeepers failed: '{}'",
String::from_utf8_lossy(&sync_output.stderr)
);
}
let lsn = Lsn::from_str(std::str::from_utf8(&sync_output.stdout)?.trim())?;
println!("Walkeepers synced on {}", lsn);
Ok(lsn)
}
/// Get basebackup from the pageserver as a tar archive and extract it
/// to the `self.pgdata()` directory.
fn do_basebackup(&self, lsn: Option<Lsn>) -> Result<()> {
println!(
"Extracting base backup to create postgres instance: path={} port={}",
self.pgdata().display(),
self.address.port()
);
let sql = if let Some(lsn) = lsn {
format!("basebackup {} {} {}", self.tenantid, self.timelineid, lsn)
} else {
format!("basebackup {} {}", self.tenantid, self.timelineid)
};
let mut client = self
.pageserver
.page_server_psql_client()
.with_context(|| "connecting to page server failed")?;
let copyreader = client
.copy_out(sql.as_str())
.with_context(|| "page server 'basebackup' command failed")?;
// Read the archive directly from the `CopyOutReader`
tar::Archive::new(copyreader)
.unpack(&self.pgdata())
.with_context(|| "extracting page backup failed")?;
Ok(())
}
fn create_pgdata(&self) -> Result<()> {
fs::create_dir_all(&self.pgdata()).with_context(|| {
format!(
"could not create data directory {}",
self.pgdata().display()
)
})?;
fs::set_permissions(self.pgdata().as_path(), fs::Permissions::from_mode(0o700))
.with_context(|| {
format!(
"could not set permissions in data directory {}",
self.pgdata().display()
)
})
}
// Connect to a page server, get base backup, and untar it to initialize a
// new data directory
fn setup_pg_conf(&self, auth_type: AuthType) -> Result<()> {
let mut conf = PostgresConf::new();
conf.append("max_wal_senders", "10");
// wal_log_hints is mandatory when running against pageserver (see gh issue#192)
// TODO: is it possible to check wal_log_hints at pageserver side via XLOG_PARAMETER_CHANGE?
conf.append("wal_log_hints", "on");
conf.append("max_replication_slots", "10");
conf.append("hot_standby", "on");
conf.append("shared_buffers", "1MB");
conf.append("fsync", "off");
conf.append("max_connections", "100");
conf.append("wal_sender_timeout", "0");
conf.append("wal_level", "replica");
conf.append("listen_addresses", &self.address.ip().to_string());
conf.append("port", &self.address.port().to_string());
// Never clean up old WAL. TODO: We should use a replication
// slot or something proper, to prevent the compute node
// from removing WAL that hasn't been streamed to the safekeeper or
// page server yet. (gh issue #349)
conf.append("wal_keep_size", "10TB");
// Configure the node to fetch pages from pageserver
let pageserver_connstr = {
let (host, port) = connection_host_port(&self.pageserver.pg_connection_config);
// Set up authentication
//
// $ZENITH_AUTH_TOKEN will be replaced with value from environment
// variable during compute pg startup. It is done this way because
// otherwise user will be able to retrieve the value using SHOW
// command or pg_settings
let password = if let AuthType::ZenithJWT = auth_type {
"$ZENITH_AUTH_TOKEN"
} else {
""
};
format!("host={} port={} password={}", host, port, password)
};
conf.append("shared_preload_libraries", "zenith");
conf.append_line("");
conf.append("zenith.page_server_connstring", &pageserver_connstr);
conf.append("zenith.zenith_tenant", &self.tenantid.to_string());
conf.append("zenith.zenith_timeline", &self.timelineid.to_string());
conf.append_line("");
// Configure the node to stream WAL directly to the pageserver
conf.append("synchronous_standby_names", "pageserver"); // TODO: add a new function arg?
conf.append("zenith.callmemaybe_connstring", &self.connstr());
let mut file = File::create(self.pgdata().join("postgresql.conf"))?;
file.write_all(conf.to_string().as_bytes())?;
Ok(())
}
fn load_basebackup(&self) -> Result<()> {
let lsn = if self.uses_wal_proposer {
// LSN 0 means that it is bootstrap and we need to download just
// latest data from the pageserver. That is a bit clumsy but whole bootstrap
// procedure evolves quite actively right now, so let's think about it again
// when things would be more stable (TODO).
let lsn = self.sync_walkeepers()?;
if lsn == Lsn(0) {
None
} else {
Some(lsn)
}
} else {
None
};
self.do_basebackup(lsn)?;
Ok(())
}
pub fn pgdata(&self) -> PathBuf {
self.env.pg_data_dir(&self.tenantid, &self.name)
}
pub fn status(&self) -> &str {
let timeout = Duration::from_millis(300);
let has_pidfile = self.pgdata().join("postmaster.pid").exists();
let can_connect = TcpStream::connect_timeout(&self.address, timeout).is_ok();
match (has_pidfile, can_connect) {
(true, true) => "running",
(false, false) => "stopped",
(true, false) => "crashed",
(false, true) => "running, no pidfile",
}
}
fn pg_ctl(&self, args: &[&str], auth_token: &Option<String>) -> Result<()> {
let pg_ctl_path = self.env.pg_bin_dir().join("pg_ctl");
let mut cmd = Command::new(pg_ctl_path);
cmd.args(
[
&[
"-D",
self.pgdata().to_str().unwrap(),
"-l",
self.pgdata().join("pg.log").to_str().unwrap(),
"-w", //wait till pg_ctl actually does what was asked
],
args,
]
.concat(),
)
.env_clear()
.env("LD_LIBRARY_PATH", self.env.pg_lib_dir().to_str().unwrap())
.env("DYLD_LIBRARY_PATH", self.env.pg_lib_dir().to_str().unwrap());
if let Some(token) = auth_token {
cmd.env("ZENITH_AUTH_TOKEN", token);
}
let pg_ctl = cmd.status().with_context(|| "pg_ctl failed")?;
if !pg_ctl.success() {
anyhow::bail!("pg_ctl failed");
}
Ok(())
}
pub fn start(&self, auth_token: &Option<String>) -> Result<()> {
// Bail if the node already running.
if self.status() == "running" {
anyhow::bail!("The node is already running");
}
// 1. We always start compute node from scratch, so
// if old dir exists, preserve 'postgresql.conf' and drop the directory
let postgresql_conf_path = self.pgdata().join("postgresql.conf");
let postgresql_conf = fs::read(&postgresql_conf_path).with_context(|| {
format!(
"failed to read config file in {}",
postgresql_conf_path.to_str().unwrap()
)
})?;
fs::remove_dir_all(&self.pgdata())?;
self.create_pgdata()?;
// 2. Bring back config files
fs::write(&postgresql_conf_path, postgresql_conf)?;
// 3. Load basebackup
self.load_basebackup()?;
// 4. Finally start the compute node postgres
println!("Starting postgres node at '{}'", self.connstr());
self.pg_ctl(&["start"], auth_token)
}
pub fn restart(&self, auth_token: &Option<String>) -> Result<()> {
self.pg_ctl(&["restart"], auth_token)
}
pub fn stop(&self, destroy: bool) -> Result<()> {
// If we are going to destroy data directory,
// use immediate shutdown mode, otherwise,
// shutdown gracefully to leave the data directory sane.
//
// Compute node always starts from scratch, so stop
// without destroy only used for testing and debugging.
//
if destroy {
self.pg_ctl(&["-m", "immediate", "stop"], &None)?;
println!(
"Destroying postgres data directory '{}'",
self.pgdata().to_str().unwrap()
);
fs::remove_dir_all(&self.pgdata())?;
} else {
self.pg_ctl(&["stop"], &None)?;
}
Ok(())
}
pub fn connstr(&self) -> String {
format!(
"host={} port={} user={} dbname={}",
self.address.ip(),
self.address.port(),
"zenith_admin",
"postgres"
)
}
// XXX: cache that in control plane
pub fn whoami(&self) -> String {
let output = Command::new("whoami")
.output()
.expect("failed to execute whoami");
assert!(output.status.success(), "whoami failed");
String::from_utf8(output.stdout).unwrap().trim().to_string()
}
}
impl Drop for PostgresNode {
// destructor to clean up state after test is done
// XXX: we may detect failed test by setting some flag in catch_unwind()
// and checking it here. But let just clean datadirs on start.
fn drop(&mut self) {
if self.is_test {
let _ = self.stop(true);
}
}
}

View File

@@ -1,32 +0,0 @@
//
// Local control plane.
//
// Can start, configure and stop postgres instances running as a local processes.
//
// Intended to be used in integration tests and in CLI tools for
// local installations.
//
use anyhow::{anyhow, bail, Context, Result};
use std::fs;
use std::path::Path;
pub mod compute;
pub mod local_env;
pub mod postgresql_conf;
pub mod storage;
/// Read a PID file
///
/// We expect a file that contains a single integer.
/// We return an i32 for compatibility with libc and nix.
pub fn read_pidfile(pidfile: &Path) -> Result<i32> {
let pid_str = fs::read_to_string(pidfile)
.with_context(|| format!("failed to read pidfile {:?}", pidfile))?;
let pid: i32 = pid_str
.parse()
.map_err(|_| anyhow!("failed to parse pidfile {:?}", pidfile))?;
if pid < 1 {
bail!("pidfile {:?} contained bad value '{}'", pidfile, pid);
}
Ok(pid)
}

View File

@@ -1,202 +0,0 @@
//
// This module is responsible for locating and loading paths in a local setup.
//
// Now it also provides init method which acts like a stub for proper installation
// script which will use local paths.
//
use anyhow::{Context, Result};
use serde::{Deserialize, Serialize};
use std::env;
use std::fs;
use std::path::PathBuf;
use std::process::{Command, Stdio};
use zenith_utils::auth::{encode_from_key_path, Claims, Scope};
use zenith_utils::postgres_backend::AuthType;
use zenith_utils::zid::ZTenantId;
//
// This data structures represent deserialized zenith CLI config
//
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct LocalEnv {
// Pageserver connection settings
pub pageserver_pg_port: u16,
pub pageserver_http_port: u16,
// Base directory for both pageserver and compute nodes
pub base_data_dir: PathBuf,
// Path to postgres distribution. It's expected that "bin", "include",
// "lib", "share" from postgres distribution are there. If at some point
// in time we will be able to run against vanilla postgres we may split that
// to four separate paths and match OS-specific installation layout.
pub pg_distrib_dir: PathBuf,
// Path to pageserver binary.
pub zenith_distrib_dir: PathBuf,
// keeping tenant id in config to reduce copy paste when running zenith locally with single tenant
#[serde(with = "hex")]
pub tenantid: ZTenantId,
// jwt auth token used for communication with pageserver
pub auth_token: String,
// used to determine which auth type is used
pub auth_type: AuthType,
// used to issue tokens during e.g pg start
pub private_key_path: PathBuf,
}
impl LocalEnv {
// postgres installation paths
pub fn pg_bin_dir(&self) -> PathBuf {
self.pg_distrib_dir.join("bin")
}
pub fn pg_lib_dir(&self) -> PathBuf {
self.pg_distrib_dir.join("lib")
}
pub fn pageserver_bin(&self) -> Result<PathBuf> {
Ok(self.zenith_distrib_dir.join("pageserver"))
}
pub fn pg_data_dirs_path(&self) -> PathBuf {
self.base_data_dir.join("pgdatadirs").join("tenants")
}
pub fn pg_data_dir(&self, tenantid: &ZTenantId, branch_name: &str) -> PathBuf {
self.pg_data_dirs_path()
.join(tenantid.to_string())
.join(branch_name)
}
// TODO: move pageserver files into ./pageserver
pub fn pageserver_data_dir(&self) -> PathBuf {
self.base_data_dir.clone()
}
}
fn base_path() -> PathBuf {
match std::env::var_os("ZENITH_REPO_DIR") {
Some(val) => PathBuf::from(val.to_str().unwrap()),
None => ".zenith".into(),
}
}
//
// Initialize a new Zenith repository
//
pub fn init(
pageserver_pg_port: u16,
pageserver_http_port: u16,
tenantid: ZTenantId,
auth_type: AuthType,
) -> Result<()> {
// check if config already exists
let base_path = base_path();
if base_path.exists() {
anyhow::bail!(
"{} already exists. Perhaps already initialized?",
base_path.to_str().unwrap()
);
}
fs::create_dir(&base_path)?;
// ok, now check that expected binaries are present
// Find postgres binaries. Follow POSTGRES_DISTRIB_DIR if set, otherwise look in "tmp_install".
let pg_distrib_dir: PathBuf = {
if let Some(postgres_bin) = env::var_os("POSTGRES_DISTRIB_DIR") {
postgres_bin.into()
} else {
let cwd = env::current_dir()?;
cwd.join("tmp_install")
}
};
if !pg_distrib_dir.join("bin/postgres").exists() {
anyhow::bail!("Can't find postgres binary at {:?}", pg_distrib_dir);
}
// generate keys for jwt
// openssl genrsa -out private_key.pem 2048
let private_key_path = base_path.join("auth_private_key.pem");
let keygen_output = Command::new("openssl")
.arg("genrsa")
.args(&["-out", private_key_path.to_str().unwrap()])
.arg("2048")
.stdout(Stdio::null())
.output()
.with_context(|| "failed to generate auth private key")?;
if !keygen_output.status.success() {
anyhow::bail!(
"openssl failed: '{}'",
String::from_utf8_lossy(&keygen_output.stderr)
);
}
let public_key_path = base_path.join("auth_public_key.pem");
// openssl rsa -in private_key.pem -pubout -outform PEM -out public_key.pem
let keygen_output = Command::new("openssl")
.arg("rsa")
.args(&["-in", private_key_path.to_str().unwrap()])
.arg("-pubout")
.args(&["-outform", "PEM"])
.args(&["-out", public_key_path.to_str().unwrap()])
.stdout(Stdio::null())
.output()
.with_context(|| "failed to generate auth private key")?;
if !keygen_output.status.success() {
anyhow::bail!(
"openssl failed: '{}'",
String::from_utf8_lossy(&keygen_output.stderr)
);
}
let auth_token =
encode_from_key_path(&Claims::new(None, Scope::PageServerApi), &private_key_path)?;
// Find zenith binaries.
let zenith_distrib_dir = env::current_exe()?.parent().unwrap().to_owned();
if !zenith_distrib_dir.join("pageserver").exists() {
anyhow::bail!("Can't find pageserver binary.",);
}
let conf = LocalEnv {
pageserver_pg_port,
pageserver_http_port,
pg_distrib_dir,
zenith_distrib_dir,
base_data_dir: base_path,
tenantid,
auth_token,
auth_type,
private_key_path,
};
fs::create_dir_all(conf.pg_data_dirs_path())?;
let toml = toml::to_string_pretty(&conf)?;
fs::write(conf.base_data_dir.join("config"), toml)?;
Ok(())
}
// Locate and load config
pub fn load_config() -> Result<LocalEnv> {
let repopath = base_path();
if !repopath.exists() {
anyhow::bail!(
"Zenith config is not found in {}. You need to run 'zenith init' first",
repopath.to_str().unwrap()
);
}
// TODO: check that it looks like a zenith repository
// load and parse file
let config = fs::read_to_string(repopath.join("config"))?;
toml::from_str(config.as_str()).map_err(|e| e.into())
}

View File

@@ -1,212 +0,0 @@
///
/// Module for parsing postgresql.conf file.
///
/// NOTE: This doesn't implement the full, correct postgresql.conf syntax. Just
/// enough to extract a few settings we need in Zenith, assuming you don't do
/// funny stuff like include-directives or funny escaping.
use anyhow::{anyhow, bail, Context, Result};
use lazy_static::lazy_static;
use regex::Regex;
use std::collections::HashMap;
use std::fmt;
use std::io::BufRead;
use std::str::FromStr;
/// In-memory representation of a postgresql.conf file
#[derive(Default)]
pub struct PostgresConf {
lines: Vec<String>,
hash: HashMap<String, String>,
}
lazy_static! {
static ref CONF_LINE_RE: Regex = Regex::new(r"^((?:\w|\.)+)\s*=\s*(\S+)$").unwrap();
}
impl PostgresConf {
pub fn new() -> PostgresConf {
PostgresConf::default()
}
/// Read file into memory
pub fn read(read: impl std::io::Read) -> Result<PostgresConf> {
let mut result = Self::new();
for line in std::io::BufReader::new(read).lines() {
let line = line?;
// Store each line in a vector, in original format
result.lines.push(line.clone());
// Also parse each line and insert key=value lines into a hash map.
//
// FIXME: This doesn't match exactly the flex/bison grammar in PostgreSQL.
// But it's close enough for our usage.
let line = line.trim();
if line.starts_with('#') {
// comment, ignore
continue;
} else if let Some(caps) = CONF_LINE_RE.captures(line) {
let name = caps.get(1).unwrap().as_str();
let raw_val = caps.get(2).unwrap().as_str();
if let Ok(val) = deescape_str(raw_val) {
// Note: if there's already an entry in the hash map for
// this key, this will replace it. That's the behavior what
// we want; when PostgreSQL reads the file, each line
// overrides any previous value for the same setting.
result.hash.insert(name.to_string(), val.to_string());
}
}
}
Ok(result)
}
/// Return the current value of 'option'
pub fn get(&self, option: &str) -> Option<&str> {
self.hash.get(option).map(|x| x.as_ref())
}
/// Return the current value of a field, parsed to the right datatype.
///
/// This calls the FromStr::parse() function on the value of the field. If
/// the field does not exist, or parsing fails, returns an error.
///
pub fn parse_field<T>(&self, field_name: &str, context: &str) -> Result<T>
where
T: FromStr,
<T as FromStr>::Err: std::error::Error + Send + Sync + 'static,
{
self.get(field_name)
.ok_or_else(|| anyhow!("could not find '{}' option {}", field_name, context))?
.parse::<T>()
.with_context(|| format!("could not parse '{}' option {}", field_name, context))
}
///
/// Note: if you call this multiple times for the same option, the config
/// file will a line for each call. It would be nice to have a function
/// to change an existing line, but that's a TODO.
///
pub fn append(&mut self, option: &str, value: &str) {
self.lines
.push(format!("{}={}\n", option, escape_str(value)));
self.hash.insert(option.to_string(), value.to_string());
}
/// Append an arbitrary non-setting line to the config file
pub fn append_line(&mut self, line: &str) {
self.lines.push(line.to_string());
}
}
impl fmt::Display for PostgresConf {
/// Return the whole configuration file as a string
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
for line in self.lines.iter() {
f.write_str(line)?;
}
Ok(())
}
}
/// Escape a value for putting in postgresql.conf.
fn escape_str(s: &str) -> String {
// If the string doesn't contain anything that needs quoting or escaping, return it
// as it is.
//
// The first part of the regex, before the '|', matches the INTEGER rule in the
// PostgreSQL flex grammar (guc-file.l). It matches plain integers like "123" and
// "-123", and also accepts units like "10MB". The second part of the regex matches
// the UNQUOTED_STRING rule, and accepts strings that contain a single word, beginning
// with a letter. That covers words like "off" or "posix". Everything else is quoted.
//
// This regex is a bit more conservative than the rules in guc-file.l, so we quote some
// strings that PostgreSQL would accept without quoting, but that's OK.
lazy_static! {
static ref UNQUOTED_RE: Regex =
Regex::new(r"(^[-+]?[0-9]+[a-zA-Z]*$)|(^[a-zA-Z][a-zA-Z0-9]*$)").unwrap();
}
if UNQUOTED_RE.is_match(s) {
s.to_string()
} else {
// Otherwise escape and quote it
let s = s
.replace('\\', "\\\\")
.replace('\n', "\\n")
.replace('\'', "''");
"\'".to_owned() + &s + "\'"
}
}
/// De-escape a possibly-quoted value.
///
/// See `DeescapeQuotedString` function in PostgreSQL sources for how PostgreSQL
/// does this.
fn deescape_str(s: &str) -> Result<String> {
// If the string has a quote at the beginning and end, strip them out.
if s.len() >= 2 && s.starts_with('\'') && s.ends_with('\'') {
let mut result = String::new();
let mut iter = s[1..(s.len() - 1)].chars().peekable();
while let Some(c) = iter.next() {
let newc = if c == '\\' {
match iter.next() {
Some('b') => '\x08',
Some('f') => '\x0c',
Some('n') => '\n',
Some('r') => '\r',
Some('t') => '\t',
Some('0'..='7') => {
// TODO
bail!("octal escapes not supported");
}
Some(n) => n,
None => break,
}
} else if c == '\'' && iter.peek() == Some(&'\'') {
// doubled quote becomes just one quote
iter.next().unwrap()
} else {
c
};
result.push(newc);
}
Ok(result)
} else {
Ok(s.to_string())
}
}
#[test]
fn test_postgresql_conf_escapes() -> Result<()> {
assert_eq!(escape_str("foo bar"), "'foo bar'");
// these don't need to be quoted
assert_eq!(escape_str("foo"), "foo");
assert_eq!(escape_str("123"), "123");
assert_eq!(escape_str("+123"), "+123");
assert_eq!(escape_str("-10"), "-10");
assert_eq!(escape_str("1foo"), "1foo");
assert_eq!(escape_str("foo1"), "foo1");
assert_eq!(escape_str("10MB"), "10MB");
assert_eq!(escape_str("-10kB"), "-10kB");
// these need quoting and/or escaping
assert_eq!(escape_str("foo bar"), "'foo bar'");
assert_eq!(escape_str("fo'o"), "'fo''o'");
assert_eq!(escape_str("fo\no"), "'fo\\no'");
assert_eq!(escape_str("fo\\o"), "'fo\\\\o'");
assert_eq!(escape_str("10 cats"), "'10 cats'");
// Test de-escaping
assert_eq!(deescape_str(&escape_str("foo"))?, "foo");
assert_eq!(deescape_str(&escape_str("fo'o\nba\\r"))?, "fo'o\nba\\r");
assert_eq!(deescape_str("'\\b\\f\\n\\r\\t'")?, "\x08\x0c\n\r\t");
// octal-escapes are currently not supported
assert!(deescape_str("'foo\\7\\07\\007'").is_err());
Ok(())
}

View File

@@ -1,343 +0,0 @@
use std::io::Write;
use std::net::TcpStream;
use std::path::PathBuf;
use std::process::Command;
use std::time::Duration;
use std::{io, result, thread};
use anyhow::{anyhow, bail};
use nix::sys::signal::{kill, Signal};
use nix::unistd::Pid;
use pageserver::http::models::{BranchCreateRequest, TenantCreateRequest};
use postgres::{Config, NoTls};
use reqwest::blocking::{Client, RequestBuilder, Response};
use reqwest::{IntoUrl, Method};
use thiserror::Error;
use zenith_utils::http::error::HttpErrorBody;
use zenith_utils::postgres_backend::AuthType;
use zenith_utils::zid::ZTenantId;
use crate::local_env::LocalEnv;
use crate::read_pidfile;
use pageserver::branches::BranchInfo;
use zenith_utils::connstring::connection_address;
#[derive(Error, Debug)]
pub enum PageserverHttpError {
#[error("Reqwest error: {0}")]
Transport(#[from] reqwest::Error),
#[error("Error: {0}")]
Response(String),
}
type Result<T> = result::Result<T, PageserverHttpError>;
pub trait ResponseErrorMessageExt: Sized {
fn error_from_body(self) -> Result<Self>;
}
impl ResponseErrorMessageExt for Response {
fn error_from_body(self) -> Result<Self> {
let status = self.status();
if !(status.is_client_error() || status.is_server_error()) {
return Ok(self);
}
// reqwest do not export it's error construction utility functions, so lets craft the message ourselves
let url = self.url().to_owned();
Err(PageserverHttpError::Response(
match self.json::<HttpErrorBody>() {
Ok(err_body) => format!("Error: {}", err_body.msg),
Err(_) => format!("Http error ({}) at {}.", status.as_u16(), url),
},
))
}
}
//
// Control routines for pageserver.
//
// Used in CLI and tests.
//
#[derive(Debug)]
pub struct PageServerNode {
pub kill_on_exit: bool,
pub pg_connection_config: Config,
pub env: LocalEnv,
pub http_client: Client,
pub http_base_url: String,
}
impl PageServerNode {
pub fn from_env(env: &LocalEnv) -> PageServerNode {
let password = if env.auth_type == AuthType::ZenithJWT {
&env.auth_token
} else {
""
};
PageServerNode {
kill_on_exit: false,
pg_connection_config: Self::pageserver_connection_config(
password,
env.pageserver_pg_port,
),
env: env.clone(),
http_client: Client::new(),
http_base_url: format!("http://127.0.0.1:{}/v1", env.pageserver_http_port),
}
}
fn pageserver_connection_config(password: &str, port: u16) -> Config {
format!("postgresql://no_user:{}@127.0.0.1:{}/no_db", password, port)
.parse()
.unwrap()
}
pub fn init(&self, create_tenant: Option<&str>, enable_auth: bool) -> anyhow::Result<()> {
let mut cmd = Command::new(self.env.pageserver_bin()?);
let listen_pg = format!("127.0.0.1:{}", self.env.pageserver_pg_port);
let listen_http = format!("127.0.0.1:{}", self.env.pageserver_http_port);
let mut args = vec![
"--init",
"-D",
self.env.base_data_dir.to_str().unwrap(),
"--postgres-distrib",
self.env.pg_distrib_dir.to_str().unwrap(),
"--listen-pg",
&listen_pg,
"--listen-http",
&listen_http,
];
if enable_auth {
args.extend(&["--auth-validation-public-key-path", "auth_public_key.pem"]);
args.extend(&["--auth-type", "ZenithJWT"]);
}
if let Some(tenantid) = create_tenant {
args.extend(&["--create-tenant", tenantid])
}
let status = cmd
.args(args)
.env_clear()
.env("RUST_BACKTRACE", "1")
.status()
.expect("pageserver init failed");
if status.success() {
Ok(())
} else {
Err(anyhow!("pageserver init failed"))
}
}
pub fn repo_path(&self) -> PathBuf {
self.env.pageserver_data_dir()
}
pub fn pid_file(&self) -> PathBuf {
self.repo_path().join("pageserver.pid")
}
pub fn start(&self) -> anyhow::Result<()> {
print!(
"Starting pageserver at '{}' in '{}'",
connection_address(&self.pg_connection_config),
self.repo_path().display()
);
io::stdout().flush().unwrap();
let mut cmd = Command::new(self.env.pageserver_bin()?);
cmd.args(&["-D", self.repo_path().to_str().unwrap()])
.arg("-d")
.env_clear()
.env("RUST_BACKTRACE", "1");
if !cmd.status()?.success() {
bail!(
"Pageserver failed to start. See '{}' for details.",
self.repo_path().join("pageserver.log").display()
);
}
// It takes a while for the page server to start up. Wait until it is
// open for business.
const RETRIES: i8 = 15;
for retries in 1..RETRIES {
match self.check_status() {
Ok(_) => {
println!("\nPageserver started");
return Ok(());
}
Err(err) => {
match err {
PageserverHttpError::Transport(err) => {
if err.is_connect() && retries < 5 {
print!(".");
io::stdout().flush().unwrap();
} else {
if retries == 5 {
println!() // put a line break after dots for second message
}
println!(
"Pageserver not responding yet, err {} retrying ({})...",
err, retries
);
}
}
PageserverHttpError::Response(msg) => {
bail!("pageserver failed to start: {} ", msg)
}
}
thread::sleep(Duration::from_secs(1));
}
}
}
bail!("pageserver failed to start in {} seconds", RETRIES);
}
pub fn stop(&self, immediate: bool) -> anyhow::Result<()> {
let pid = read_pidfile(&self.pid_file())?;
let pid = Pid::from_raw(pid);
if immediate {
println!("Stop pageserver immediately");
if kill(pid, Signal::SIGQUIT).is_err() {
bail!("Failed to kill pageserver with pid {}", pid);
}
} else {
println!("Stop pageserver gracefully");
if kill(pid, Signal::SIGTERM).is_err() {
bail!("Failed to stop pageserver with pid {}", pid);
}
}
let address = connection_address(&self.pg_connection_config);
// TODO Remove this "timeout" and handle it on caller side instead.
// Shutting down may take a long time,
// if pageserver checkpoints a lot of data
for _ in 0..100 {
if let Err(_e) = TcpStream::connect(&address) {
println!("Pageserver stopped receiving connections");
//Now check status
match self.check_status() {
Ok(_) => {
println!("Pageserver status is OK. Wait a bit.");
thread::sleep(Duration::from_secs(1));
}
Err(err) => {
println!("Pageserver status is: {}", err);
return Ok(());
}
}
} else {
println!("Pageserver still receives connections");
thread::sleep(Duration::from_secs(1));
}
}
bail!("Failed to stop pageserver with pid {}", pid);
}
pub fn page_server_psql(&self, sql: &str) -> Vec<postgres::SimpleQueryMessage> {
let mut client = self.pg_connection_config.connect(NoTls).unwrap();
println!("Pageserver query: '{}'", sql);
client.simple_query(sql).unwrap()
}
pub fn page_server_psql_client(&self) -> result::Result<postgres::Client, postgres::Error> {
self.pg_connection_config.connect(NoTls)
}
fn http_request<U: IntoUrl>(&self, method: Method, url: U) -> RequestBuilder {
let mut builder = self.http_client.request(method, url);
if self.env.auth_type == AuthType::ZenithJWT {
builder = builder.bearer_auth(&self.env.auth_token)
}
builder
}
pub fn check_status(&self) -> Result<()> {
self.http_request(Method::GET, format!("{}/{}", self.http_base_url, "status"))
.send()?
.error_from_body()?;
Ok(())
}
pub fn tenant_list(&self) -> Result<Vec<String>> {
Ok(self
.http_request(Method::GET, format!("{}/{}", self.http_base_url, "tenant"))
.send()?
.error_from_body()?
.json()?)
}
pub fn tenant_create(&self, tenantid: ZTenantId) -> Result<()> {
Ok(self
.http_request(Method::POST, format!("{}/{}", self.http_base_url, "tenant"))
.json(&TenantCreateRequest {
tenant_id: tenantid,
})
.send()?
.error_from_body()?
.json()?)
}
pub fn branch_list(&self, tenantid: &ZTenantId) -> Result<Vec<BranchInfo>> {
Ok(self
.http_request(
Method::GET,
format!("{}/branch/{}", self.http_base_url, tenantid),
)
.send()?
.error_from_body()?
.json()?)
}
pub fn branch_create(
&self,
branch_name: &str,
startpoint: &str,
tenantid: &ZTenantId,
) -> Result<BranchInfo> {
Ok(self
.http_request(Method::POST, format!("{}/branch", self.http_base_url))
.json(&BranchCreateRequest {
tenant_id: tenantid.to_owned(),
name: branch_name.to_owned(),
start_point: startpoint.to_owned(),
})
.send()?
.error_from_body()?
.json()?)
}
pub fn branch_get_by_name(
&self,
tenantid: &ZTenantId,
branch_name: &str,
) -> Result<BranchInfo> {
Ok(self
.http_request(
Method::GET,
format!("{}/branch/{}/{}", self.http_base_url, tenantid, branch_name),
)
.send()?
.error_for_status()?
.json()?)
}
}
impl Drop for PageServerNode {
fn drop(&mut self) {
// TODO Looks like this flag is never set
if self.kill_on_exit {
let _ = self.stop(true);
}
}
}

View File

@@ -1,13 +0,0 @@
#!/bin/sh
set -eux
if [ "$1" = 'pageserver' ]; then
if [ ! -d "/data/tenants" ]; then
echo "Initializing pageserver data directory"
pageserver --init -D /data --postgres-distrib /usr/local
fi
echo "Staring pageserver at 0.0.0.0:6400"
pageserver -l 0.0.0.0:6400 --listen-http 0.0.0.0:9898 -D /data
else
"$@"
fi

View File

@@ -1,14 +0,0 @@
# Zenith documentation
## Table of contents
- [authentication.md](authentication.md) — pageserver JWT authentication.
- [docker.md](docker.md) — Docker images and building pipeline.
- [glossary.md](glossary.md) — Glossary of all the terms used in codebase.
- [multitenancy.md](multitenancy.md) — how multitenancy is organized in the pageserver and Zenith CLI.
- [sourcetree.md](sourcetree.md) — Overview of the source tree layeout.
- [pageserver/README](/pageserver/README) — pageserver overview.
- [postgres_ffi/README](/postgres_ffi/README) — Postgres FFI overview.
- [test_runner/README.md](/test_runner/README.md) — tests infrastructure overview.
- [walkeeper/README](/walkeeper/README) — WAL service overview.
- [core_changes.md](core_changes.md) - Description of Zenith changes in Postgres core

View File

@@ -1,30 +0,0 @@
## Authentication
### Overview
Current state of authentication includes usage of JWT tokens in communication between compute and pageserver and between CLI and pageserver. JWT token is signed using RSA keys. CLI generates a key pair during call to `zenith init`. Using following openssl commands:
```bash
openssl genrsa -out private_key.pem 2048
openssl rsa -in private_key.pem -pubout -outform PEM -out public_key.pem
```
CLI also generates signed token and saves it in the config for later access to pageserver. Now authentication is optional. Pageserver has two variables in config: `auth_validation_public_key_path` and `auth_type`, so when auth type present and set to `ZenithJWT` pageserver will require authentication for connections. Actual JWT is passed in password field of connection string. There is a caveat for psql, it silently truncates passwords to 100 symbols, so to correctly pass JWT via psql you have to either use PGPASSWORD environment variable, or store password in psql config file.
Currently there is no authentication between compute and safekeepers, because this communication layer is under heavy refactoring. After this refactoring support for authentication will be added there too. Now safekeeper supports "hardcoded" token passed via environment variable to be able to use callmemaybe command in pageserver.
Compute uses token passed via environment variable to communicate to pageserver and in the future to the safekeeper too.
JWT authentication now supports two scopes: tenant and pageserverapi. Tenant scope is intended for use in tenant related api calls, e.g. create_branch. Compute launched for particular tenant also uses this scope. Scope pageserver api is intended to be used by console to manage pageserver. For now we have only one management operation - create tenant.
Examples for token generation in python:
```python
# generate pageserverapi token
management_token = jwt.encode({"scope": "pageserverapi"}, auth_keys.priv, algorithm="RS256")
# generate tenant token
tenant_token = jwt.encode({"scope": "tenant", "tenant_id": ps.initial_tenant}, auth_keys.priv, algorithm="RS256")
```
Utility functions to work with jwts in rust are located in zenith_utils/src/auth.rs

View File

@@ -1,202 +0,0 @@
1. Add t_cid to XLOG record
- Why?
The cmin/cmax on a heap page is a real bummer. I don't see any other way to fix that than bite the bullet and modify the WAL-logging routine to include the cmin/cmax.
To recap, the problem is that the XLOG_HEAP_INSERT record does not include the command id of the inserted row. And same with deletion/update. So in the primary, a row is inserted with current xmin + cmin. But in the replica, the cmin is always set to 1. That works, because the command id is only relevant to the inserting transaction itself. After commit/abort, no one cares abut it anymore.
- Alternatives?
I don't know
2. Add PD_WAL_LOGGED.
- Why?
Postgres sometimes writes data to the page before it is wal-logged. If such page ais swapped out, we will loose this change. The problem is currently solved by setting PD_WAL_LOGGED bit in page header. When page without this bit set is written to the SMGR, then it is forced to be written to the WAL as FPI using log_newpage_copy() function.
There was wrong assumption that it can happen only during construction of some exotic indexes (like gist). It is not true. The same situation can happen with COPY,VACUUM and when record hint bits are set.
- Discussion:
https://discord.com/channels/869525774699462656/882681420986851359
- Alternatives:
Do not store this flag in page header, but associate this bit with shared buffer. Logically it is more correct but in practice we will get not advantages: neither in space, neither in CPU overhead.
3. XLogReadBufferForRedo not always loads and pins requested buffer. So we need to add extra checks that buffer is really pinned. Also do not use BufferGetBlockNumber for buffer returned by XLogReadBufferForRedo.
- Why?
XLogReadBufferForRedo is not pinning pages which are not requested by wal-redo. It is specific only for wal-redo Postgres.
- Alternatives?
No
4. Eliminate reporting of some warnings related with hint bits, for example
"page is not marked all-visible but visibility map bit is set in relation".
- Why?
Hint bit may be not WAL logged.
- Alternative?
Always wal log any page changes.
5. Maintain last written LSN.
- Why?
When compute node requests page from page server, we need to specify LSN. Ideally it should be LSN
of WAL record performing last update of this pages. But we do not know it, because we do not have page.
We can use current WAL flush position, but in this case there is high probability that page server
will be blocked until this peace of WAL is delivered.
As better approximation we can keep max LSN of written page. It will be better to take in account LSNs only of evicted pages,
but SMGR API doesn't provide such knowledge.
- Alternatives?
Maintain map of LSNs of evicted pages.
6. Launching Postgres without WAL.
- Why?
According to Zenith architecture compute node is stateless. So when we are launching
compute node, we need to provide some dummy PG_DATADIR. Relation pages
can be requested on demand from page server. But Postgres still need some non-relational data:
control and configuration files, SLRUs,...
It is currently implemented using basebackup (do not mix with pg_basebackup) which is created
by pageserver. It includes in this tarball config/control files, SLRUs and required directories.
As far as pageserver do not have original (non-scattered) WAL segments, it includes in
this tarball dummy WAL segment which contains only SHUTDOWN_CHECKPOINT record at the beginning of segment,
which redo field points to the end of wal. It allows to load checkpoint record in more or less
standard way with minimal changes of Postgres, but then some special handling is needed,
including restoring previous record position from zenith.signal file.
Also we have to correctly initialize header of last WAL page (pointed by checkpoint.redo)
to pass checks performed by XLogReader.
- Alternatives?
We may not include fake WAL segment in tarball at all and modify xlog.c to load checkpoint record
in special way. But it may only increase number of changes in xlog.c
7. Add redo_read_buffer_filter callback to XLogReadBufferForRedoExtended
- Why?
We need a way in wal-redo Postgres to ignore pages which are not requested by pageserver.
So wal-redo Postgres reconstructs only requested page and for all other returns BLK_DONE
which means that recovery for them is not needed.
- Alternatives?
No
8. Enforce WAL logging of sequence updates.
- Why?
Due to performance reasons Postgres don't want to log each fetching of a value from a sequence,
so we pre-log a few fetches in advance. In the event of crash we can lose
(skip over) as many values as we pre-logged.
But it doesn't work with Zenith because page with sequence value can be evicted from buffer cache
and we will get a gap in sequence values even without crash.
- Alternatives:
Do not try to preserve sequential order but avoid performance penalty.
9. Treat unlogged tables as normal (permanent) tables.
- Why?
Unlogged tables are not transient, so them have to survive node restart (unlike temporary tables).
But as far as compute node is stateless, we need to persist their data to storage node.
And it can only be done through the WAL.
- Alternatives?
* Store unlogged tables locally (violates requirement of stateless compute nodes).
* Prohibit unlogged tables at all.
10. Support start Postgres in wal-redo mode
- Why?
To be able to apply WAL record and reconstruct pages at page server.
- Alternatives?
* Rewrite redo handlers in Rust
* Do not reconstruct pages at page server at all and do it at compute node.
11. WAL proposer
- Why?
WAL proposer is communicating with safekeeper and ensures WAL durability by quorum writes.
It is currently implemented as patch to standard WAL sender.
- Alternatives?
Can be moved to extension if some extra callbacks will be added to wal sender code.
12. Secure Computing BPF API wrapper.
- Why?
Pageserver delegates complex WAL decoding duties to Postgres,
which means that the latter might fall victim to carefully designed
malicious WAL records and start doing harmful things to the system.
To prevent this, it has been decided to limit possible interactions
with the outside world using the Secure Computing BPF mode.
- Alternatives:
* Rewrite redo handlers in Rust.
* Add more checks to guarantee correctness of WAL records.
* Move seccomp.c to extension
* Many other discussed approaches to neutralize incorrect WAL records vulnerabilities.
13. Callbacks for replica feedbacks
- Why?
Allowing waproposer to interact with walsender code.
- Alternatives
Copy walsender code to walproposer.
14. Support multiple SMGR implementations.
- Why?
Postgres provides abstract API for storage manager but it has only one implementation
and provides no way to replace it with custom storage manager.
- Alternatives?
None.
15. Calculate database size as sum of all database relations.
- Why?
Postgres is calculating database size by traversing data directory
but as far as Zenith compute node is stateless we can not do it.
- Alternatives?
Send this request directly to pageserver and calculate real (physical) size
of Zenith representation of database/timeline, rather than sum logical size of all relations.
-----------------------------------------------
Not currently committed but proposed:
1. Disable ring buffer buffer manager strategies
- Why?
Postgres tries to avoid cache flushing by bulk operations (copy, seqscan, vacuum,...).
Even if there are free space in buffer cache, pages may be evicted.
Negative effect of it can be somehow compensated by file system cache, but in case of Zenith
cost of requesting page from page server is much higher.
- Alternatives?
Instead of just prohibiting ring buffer we may try to implement more flexible eviction policy,
for example copy evicted page from ring buffer to some other buffer if there is free space
in buffer cache.
2. Disable marking page as dirty when hint bits are set.
- Why?
Postgres has to modify page twice: first time when some tuple is updated and second time when
hint bits are set. Wal logging hint bits updates requires FPI which significantly increase size of WAL.
- Alternatives?
Add special WAL record for setting page hints.
3. Prefetching
- Why?
As far as pages in Zenith are loaded on demand, to reduce node startup time
and also sppedup some massive queries we need some mechanism for bulk loading to
reduce page request round-trip overhead.
Currently Postgres is supporting prefetching only for bitmap scan.
In Zenith we also use prefetch for sequential and index scan. For sequential scan we prefetch
some number of following pages. For index scan we prefetch pages of heap relation addressed by TIDs.
4. Prewarming.
- Why?
Short downtime (or, in other words, fast compute node restart time) is one of the key feature of Zenith.
But overhead of request-response round-trip for loading pages on demand can make started node warm-up quite slow.
We can capture state of compute node buffer cache and send bulk request for this pages at startup.

View File

@@ -1,38 +0,0 @@
# Docker images of Zenith
## Images
Currently we build two main images:
- [zenithdb/zenith](https://hub.docker.com/repository/docker/zenithdb/zenith) — image with pre-built `pageserver`, `safekeeper` and `proxy` binaries and all the required runtime dependencies. Built from [/Dockerfile](/Dockerfile).
- [zenithdb/compute-node](https://hub.docker.com/repository/docker/zenithdb/compute-node) — compute node image with pre-built Postgres binaries from [zenithdb/postgres](https://github.com/zenithdb/postgres).
And two intermediate images used either to reduce build time or to deliver some additional binary tools from other repos:
- [zenithdb/build](https://hub.docker.com/repository/docker/zenithdb/build) — image with all the dependencies required to build Zenith and compute node images. This image is based on `rust:slim-buster`, so it also has a proper `rust` environment. Built from [/Dockerfile.build](/Dockerfile.build).
- [zenithdb/compute-tools](https://hub.docker.com/repository/docker/zenithdb/compute-tools) — compute node configuration management tools.
## Building pipeline
1. Image `zenithdb/compute-tools` is re-built automatically.
2. Image `zenithdb/build` is built manually. If you want to introduce any new compile time dependencies to Zenith or compute node you have to update this image as well, build it and push to Docker Hub.
Build:
```sh
docker build -t zenithdb/build:buster -f Dockerfile.build .
```
Login:
```sh
docker login
```
Push to Docker Hub:
```sh
docker push zenithdb/build:buster
```
3. Image `zenithdb/compute-node` is built independently in the [zenithdb/postgres](https://github.com/zenithdb/postgres) repo.
4. Image `zenithdb/zenith` is built in this repo after a successful `release` tests run and pushed to Docker Hub automatically.

View File

@@ -1,218 +0,0 @@
# Glossary
### Authentication
### Base image (page image)
### Basebackup
A tarball with files needed to bootstrap a compute node[] and a corresponding command to create it.
NOTE:It has nothing to do with PostgreSQL pg_basebackup.
### Branch
We can create branch at certain LSN using `zenith branch` command.
Each Branch lives in a corresponding timeline[] and has an ancestor[].
### Checkpoint (PostgreSQL)
NOTE: This is an overloaded term.
A checkpoint record in the WAL marks a point in the WAL sequence at which it is guaranteed that all data files have been updated with all information from shared memory modified before that checkpoint;
### Checkpoint (Layered repository)
NOTE: This is an overloaded term.
Whenever enough WAL has been accumulated in memory, the page server []
writes out the changes from in-memory layers into new layer files[]. This process
is called "checkpointing". The page server only creates layer files for
relations that have been modified since the last checkpoint.
Configuration parameter `checkpoint_distance` defines the distance
from current LSN to perform checkpoint of in-memory layers.
Default is `DEFAULT_CHECKPOINT_DISTANCE`.
Set this parameter to `0` to force checkpoint of every layer.
Configuration parameter `checkpoint_period` defines the interval between checkpoint iterations.
Default is `DEFAULT_CHECKPOINT_PERIOD`.
### Compute node
Stateless Postgres node that stores data in pageserver.
### Garbage collection
The process of removing old on-disk layers that are not needed by any timeline anymore.
### Fork
Each of the separate segmented file sets in which a relation is stored. The main fork is where the actual data resides. There also exist two secondary forks for metadata: the free space map and the visibility map.
Each PostgreSQL fork is considered a separate relish.
### Layer
Each layer corresponds to the specific version of a relish Segment in a range of LSNs.
There are two kinds of layers, in-memory and on-disk layers. In-memory
layers are used to ingest incoming WAL, and provide fast access
to the recent page versions. On-disk layers are stored as files on disk, and
are immutable.
### Layer file (on-disk layer)
Layered repository on-disk format is based on immutable files. The
files are called "layer files". Each file corresponds to one RELISH_SEG_SIZE
segment of a PostgreSQL relation fork. There are two kinds of layer
files: image files and delta files. An image file contains a
"snapshot" of the segment at a particular LSN, and a delta file
contains WAL records applicable to the segment, in a range of LSNs.
### Layer map
The layer map tracks what layers exist for all the relishes in a timeline.
### Layered repository
Zenith repository implementation that keeps data in layers.
### LSN
### Page (block)
The basic structure used to store relation data. All pages are of the same size.
This is the unit of data exchange between compute node and pageserver.
### Pageserver
Zenith storage engine: repositories + wal receiver + page service + wal redo.
### Page service
The Page Service listens for GetPage@LSN requests from the Compute Nodes,
and responds with pages from the repository.
### PITR (Point-in-time-recovery)
PostgreSQL's ability to restore up to a specified LSN.
### Primary node
### Proxy
Postgres protocol proxy/router.
This service listens psql port, can check auth via external service
and create new databases and accounts (control plane API in our case).
### Relation
The generic term in PostgreSQL for all objects in a database that have a name and a list of attributes defined in a specific order.
### Relish
We call each relation and other file that is stored in the
repository a "relish". It comes from "rel"-ish, as in "kind of a
rel", because it covers relations as well as other things that are
not relations, but are treated similarly for the purposes of the
storage layer.
### Replication slot
### Replica node
### Repository
Repository stores multiple timelines, forked off from the same initial call to 'initdb'
and has associated WAL redo service.
One repository corresponds to one Tenant.
### Retention policy
How much history do we need to keep around for PITR and read-only nodes?
### Segment (PostgreSQL)
NOTE: This is an overloaded term.
A physical file that stores data for a given relation. File segments are
limited in size by a compile-time setting (1 gigabyte by default), so if a
relation exceeds that size, it is split into multiple segments.
### Segment (Layered Repository)
NOTE: This is an overloaded term.
Segment is a RELISH_SEG_SIZE slice of relish (identified by a SegmentTag).
### SLRU
SLRUs include pg_clog, pg_multixact/members, and
pg_multixact/offsets. There are other SLRUs in PostgreSQL, but
they don't need to be stored permanently (e.g. pg_subtrans),
or we do not support them in zenith yet (pg_commit_ts).
Each SLRU segment is considered a separate relish[].
### Tenant (Multitenancy)
Tenant represents a single customer, interacting with Zenith.
Wal redo[] activity, timelines[], layers[] are managed for each tenant independently.
One pageserver[] can serve multiple tenants at once.
One safekeeper
See `docs/multitenancy.md` for more.
### Timeline
Timeline accepts page changes and serves get_page_at_lsn() and
get_rel_size() requests. The term "timeline" is used internally
in the system, but to users they are exposed as "branches", with
human-friendly names.
NOTE: this has nothing to do with PostgreSQL WAL timelines.
### XLOG
PostgreSQL alias for WAL[].
### WAL (Write-ahead log)
The journal that keeps track of the changes in the database cluster as user- and system-invoked operations take place. It comprises many individual WAL records[] written sequentially to WAL files[].
### WAL acceptor, WAL proposer
In the context of the consensus algorithm, the Postgres
compute node is also known as the WAL proposer, and the safekeeper is also known
as the acceptor. Those are the standard terms in the Paxos algorithm.
### WAL receiver (WAL decoder)
The WAL receiver connects to the external WAL safekeeping service (or
directly to the primary) using PostgreSQL physical streaming
replication, and continuously receives WAL. It decodes the WAL records,
and stores them to the repository.
We keep one WAL receiver active per timeline.
### WAL record
A low-level description of an individual data change.
### WAL redo
A service that runs PostgreSQL in a special wal_redo mode
to apply given WAL records over an old page image and return new page image.
### WAL safekeeper
One node that participates in the quorum. All the safekeepers
together form the WAL service.
### WAL segment (WAL file)
Also known as WAL segment or WAL segment file. Each of the sequentially-numbered files that provide storage space for WAL. The files are all of the same predefined size and are written in sequential order, interspersing changes as they occur in multiple simultaneous sessions.
### WAL service
The service as whole that ensures that WAL is stored durably.
### Web console

View File

@@ -1,59 +0,0 @@
## Multitenancy
### Overview
Zenith supports multitenancy. One pageserver can serve multiple tenants at once. Tenants can be managed via zenith CLI. During page server setup tenant can be created using ```zenith init --create-tenant``` Also tenants can be added into the system on the fly without pageserver restart. This can be done using the following cli command: ```zenith tenant create``` Tenants use random identifiers which can be represented as a 32 symbols hexadecimal string. So zenith tenant create accepts desired tenant id as an optional argument. The concept of timelines/branches is working independently per tenant.
### Tenants in other commands
By default during `zenith init` new tenant is created on the pageserver. Newly created tenant's id is saved to cli config, so other commands can use it automatically if no direct arugment `--tenantid=<tenantid>` is provided. So generally tenantid more frequently appears in internal pageserver interface. Its commands take tenantid argument to distinguish to which tenant operation should be applied. CLI support creation of new tenants.
Examples for cli:
```sh
zenith tenant list
zenith tenant create // generates new id
zenith tenant create ee6016ec31116c1b7c33dfdfca38892f
zenith pg create main // default tenant from zenith init
zenith pg create main --tenantid=ee6016ec31116c1b7c33dfdfca38892f
zenith branch --tenantid=ee6016ec31116c1b7c33dfdfca38892f
```
### Data layout
On the page server tenants introduce one level of indirection, so data directory structured the following way:
```
<pageserver working directory>
├── pageserver.log
├── pageserver.pid
├── pageserver.toml
└── tenants
├── 537cffa58a4fa557e49e19951b5a9d6b
├── de182bc61fb11a5a6b390a8aed3a804a
└── ee6016ec31116c1b7c33dfdfca38891f
```
Wal redo activity and timelines are managed for each tenant independently.
For local environment used for example in tests there also new level of indirection for tenants. It touches `pgdatadirs` directory. Now it contains `tenants` subdirectory so the structure looks the following way:
```
pgdatadirs
└── tenants
├── de182bc61fb11a5a6b390a8aed3a804a
│ └── main
└── ee6016ec31116c1b7c33dfdfca38892f
└── main
```
### Changes to postgres
Tenant id is passed to postgres via GUC the same way as the timeline. Tenant id is added to commands issued to pageserver, namely: pagestream, callmemaybe. Tenant id is also exists in ServerInfo structure, this is needed to pass the value to wal receiver to be able to forward it to the pageserver.
### Safety
For now particular tenant can only appear on a particular pageserver. Set of safekeepers are also pinned to particular (tenantid, timeline) pair so there can only be one writer for particular (tenantid, timeline).

View File

@@ -1,81 +0,0 @@
## Source tree layout
Below you will find a brief overview of each subdir in the source tree in alphabetical order.
`/control_plane`:
Local control plane.
Functions to start, configure and stop pageserver and postgres instances running as a local processes.
Intended to be used in integration tests and in CLI tools for local installations.
`/docs`:
Documentaion of the Zenith features and concepts.
Now it is mostly dev documentation.
`/monitoring`:
TODO
`/pageserver`:
Zenith storage service.
The pageserver has a few different duties:
- Store and manage the data.
- Generate a tarball with files needed to bootstrap ComputeNode.
- Respond to GetPage@LSN requests from the Compute Nodes.
- Receive WAL from the WAL service and decode it.
- Replay WAL that's applicable to the chunks that the Page Server maintains
For more detailed info, see `/pageserver/README`
`/postgres_ffi`:
Utility functions for interacting with PostgreSQL file formats.
Misc constants, copied from PostgreSQL headers.
`/proxy`:
Postgres protocol proxy/router.
This service listens psql port, can check auth via external service
and create new databases and accounts (control plane API in our case).
`/test_runner`:
Integration tests, written in Python using the `pytest` framework.
`/vendor/postgres`:
PostgreSQL source tree, with the modifications needed for Zenith.
`/vendor/postgres/contrib/zenith`:
PostgreSQL extension that implements storage manager API and network communications with remote page server.
`/vendor/postgres/contrib/zenith_test_utils`:
PostgreSQL extension that contains functions needed for testing and debugging.
`/walkeeper`:
The zenith WAL service that receives WAL from a primary compute nodes and streams it to the pageserver.
It acts as a holding area and redistribution center for recently generated WAL.
For more detailed info, see `/walkeeper/README`
`/workspace_hack`:
The workspace_hack crate exists only to pin down some dependencies.
`/zenith`
Main entry point for the 'zenith' CLI utility.
TODO: Doesn't it belong to control_plane?
`/zenith_metrics`:
Helpers for exposing Prometheus metrics from the server.
`/zenith_utils`:
Helpers that are shared between other crates in this repository.

View File

@@ -0,0 +1,16 @@
[package]
name = "integration_tests"
version = "0.1.0"
authors = ["Stas Kelvich <stas@zenith.tech>"]
edition = "2018"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
lazy_static = "1.4.0"
rand = "0.8.3"
postgres = { git = "https://github.com/kelvich/rust-postgres", branch = "replication_rebase" }
tokio-postgres = { git = "https://github.com/kelvich/rust-postgres", branch = "replication_rebase" }
pageserver = { path = "../pageserver" }
walkeeper = { path = "../walkeeper" }

View File

@@ -0,0 +1,844 @@
//
// Local control plane.
//
// Can start, cofigure and stop postgres instances running as a local processes.
//
// Intended to be used in integration tests and in CLI tools for
// local installations.
//
use std::fs::File;
use std::fs::{self, OpenOptions};
use std::path::{Path, PathBuf};
use std::process::Command;
use std::str;
use std::sync::Arc;
use std::{
io::Write,
net::{IpAddr, Ipv4Addr, SocketAddr},
};
use lazy_static::lazy_static;
use postgres::{Client, NoTls};
use postgres;
lazy_static! {
// postgres would be there if it was build by 'make postgres' here in the repo
pub static ref PG_BIN_DIR : PathBuf = Path::new(env!("CARGO_MANIFEST_DIR"))
.join("../tmp_install/bin");
pub static ref PG_LIB_DIR : PathBuf = Path::new(env!("CARGO_MANIFEST_DIR"))
.join("../tmp_install/lib");
pub static ref BIN_DIR : PathBuf = cargo_bin_dir();
pub static ref TEST_WORKDIR : PathBuf = Path::new(env!("CARGO_MANIFEST_DIR"))
.join("tmp_check");
}
// Find the directory where the binaries were put (i.e. target/debug/)
pub fn cargo_bin_dir() -> PathBuf {
let mut pathbuf = std::env::current_exe().ok().unwrap();
pathbuf.pop();
if pathbuf.ends_with("deps") {
pathbuf.pop();
}
return pathbuf;
}
//
// I'm intendedly modelling storage and compute control planes as a separate entities
// as it is closer to the actual setup.
//
pub struct StorageControlPlane {
pub wal_acceptors: Vec<WalAcceptorNode>,
pub page_servers: Vec<PageServerNode>,
}
impl StorageControlPlane {
// postgres <-> page_server
pub fn one_page_server(froms3: bool) -> StorageControlPlane {
let mut cplane = StorageControlPlane {
wal_acceptors: Vec::new(),
page_servers: Vec::new(),
};
let pserver = PageServerNode {
page_service_addr: "127.0.0.1:65200".parse().unwrap(),
data_dir: TEST_WORKDIR.join("pageserver"),
};
pserver.init();
if froms3 {
pserver.start_froms3();
} else {
pserver.start();
}
cplane.page_servers.push(pserver);
cplane
}
pub fn fault_tolerant(redundancy: usize) -> StorageControlPlane {
let mut cplane = StorageControlPlane {
wal_acceptors: Vec::new(),
page_servers: Vec::new(),
};
const WAL_ACCEPTOR_PORT: usize = 54321;
for i in 0..redundancy {
let wal_acceptor = WalAcceptorNode {
listen: format!("127.0.0.1:{}", WAL_ACCEPTOR_PORT + i)
.parse()
.unwrap(),
data_dir: TEST_WORKDIR.join(format!("wal_acceptor_{}", i)),
};
wal_acceptor.init();
wal_acceptor.start();
cplane.wal_acceptors.push(wal_acceptor);
}
cplane
}
pub fn stop(&self) {
for wa in self.wal_acceptors.iter() {
wa.stop();
}
}
// // postgres <-> wal_acceptor x3 <-> page_server
// fn local(&mut self) -> StorageControlPlane {
// }
pub fn page_server_addr(&self) -> &SocketAddr {
&self.page_servers[0].page_service_addr
}
pub fn get_wal_acceptor_conn_info(&self) -> String {
self.wal_acceptors
.iter()
.map(|wa| wa.listen.to_string().to_string())
.collect::<Vec<String>>()
.join(",")
}
pub fn page_server_psql(&self, sql: &str) -> Vec<postgres::SimpleQueryMessage> {
let addr = &self.page_servers[0].page_service_addr;
let connstring = format!(
"host={} port={} dbname={} user={}",
addr.ip(),
addr.port(),
"no_db",
"no_user",
);
let mut client = Client::connect(connstring.as_str(), NoTls).unwrap();
println!("Pageserver query: '{}'", sql);
client.simple_query(sql).unwrap()
}
}
impl Drop for StorageControlPlane {
fn drop(&mut self) {
self.stop();
}
}
pub struct PageServerNode {
page_service_addr: SocketAddr,
data_dir: PathBuf,
}
impl PageServerNode {
// TODO: method to force redo on a specific relation
// TODO: make wal-redo-postgres workable without data directory?
pub fn init(&self) {
fs::create_dir_all(self.data_dir.clone()).unwrap();
let datadir_path = self.data_dir.join("wal_redo_pgdata");
fs::remove_dir_all(datadir_path.to_str().unwrap()).ok();
let initdb = Command::new(PG_BIN_DIR.join("initdb"))
.args(&["-D", datadir_path.to_str().unwrap()])
.arg("-N")
.arg("--no-instructions")
.env_clear()
.env("LD_LIBRARY_PATH", PG_LIB_DIR.to_str().unwrap())
.status()
.expect("failed to execute initdb");
if !initdb.success() {
panic!("initdb failed");
}
}
pub fn start(&self) {
println!("Starting pageserver at '{}'", self.page_service_addr);
let status = Command::new(BIN_DIR.join("pageserver"))
.args(&["-D", self.data_dir.to_str().unwrap()])
.args(&["-l", self.page_service_addr.to_string().as_str()])
.arg("-d")
.arg("--skip-recovery")
.env_clear()
.env("PATH", PG_BIN_DIR.to_str().unwrap()) // path to postres-wal-redo binary
.status()
.expect("failed to start pageserver");
if !status.success() {
panic!("pageserver start failed");
}
}
pub fn start_froms3(&self) {
println!("Starting pageserver at '{}'", self.page_service_addr);
let status = Command::new(BIN_DIR.join("pageserver"))
.args(&["-D", self.data_dir.to_str().unwrap()])
.args(&["-l", self.page_service_addr.to_string().as_str()])
.arg("-d")
.env_clear()
.env("PATH", PG_BIN_DIR.to_str().unwrap()) // path to postres-wal-redo binary
.env("S3_ENDPOINT", "https://127.0.0.1:9000")
.env("S3_REGION", "us-east-1")
.env("S3_ACCESSKEY", "minioadmin")
.env("S3_SECRET", "minioadmin")
.status()
.expect("failed to start pageserver");
if !status.success() {
panic!("pageserver start failed");
}
}
pub fn stop(&self) {
let pidfile = self.data_dir.join("pageserver.pid");
let pid = fs::read_to_string(pidfile).unwrap();
let status = Command::new("kill")
.arg(pid)
.env_clear()
.status()
.expect("failed to execute kill");
if !status.success() {
panic!("kill start failed");
}
}
}
impl Drop for PageServerNode {
fn drop(&mut self) {
self.stop();
// fs::remove_dir_all(self.data_dir.clone()).unwrap();
}
}
pub struct WalAcceptorNode {
listen: SocketAddr,
data_dir: PathBuf,
}
impl WalAcceptorNode {
pub fn init(&self) {
if self.data_dir.exists() {
fs::remove_dir_all(self.data_dir.clone()).unwrap();
}
fs::create_dir_all(self.data_dir.clone()).unwrap();
}
pub fn start(&self) {
println!(
"Starting wal_acceptor in {} listening '{}'",
self.data_dir.to_str().unwrap(),
self.listen
);
let status = Command::new(BIN_DIR.join("wal_acceptor"))
.args(&["-D", self.data_dir.to_str().unwrap()])
.args(&["-l", self.listen.to_string().as_str()])
.arg("-d")
.arg("-n")
.status()
.expect("failed to start wal_acceptor");
if !status.success() {
panic!("wal_acceptor start failed");
}
}
pub fn stop(&self) {
let pidfile = self.data_dir.join("wal_acceptor.pid");
if let Ok(pid) = fs::read_to_string(pidfile) {
let _status = Command::new("kill")
.arg(pid)
.env_clear()
.status()
.expect("failed to execute kill");
}
}
}
impl Drop for WalAcceptorNode {
fn drop(&mut self) {
self.stop();
// fs::remove_dir_all(self.data_dir.clone()).unwrap();
}
}
///////////////////////////////////////////////////////////////////////////////
//
// ComputeControlPlane
//
pub struct ComputeControlPlane<'a> {
pg_bin_dir: PathBuf,
work_dir: PathBuf,
last_assigned_port: u16,
storage_cplane: &'a StorageControlPlane,
nodes: Vec<Arc<PostgresNode>>,
}
impl ComputeControlPlane<'_> {
pub fn local(storage_cplane: &StorageControlPlane) -> ComputeControlPlane {
ComputeControlPlane {
pg_bin_dir: PG_BIN_DIR.to_path_buf(),
work_dir: TEST_WORKDIR.to_path_buf(),
last_assigned_port: 65431,
storage_cplane: storage_cplane,
nodes: Vec::new(),
}
}
// TODO: check port availability and
fn get_port(&mut self) -> u16 {
let port = self.last_assigned_port + 1;
self.last_assigned_port += 1;
port
}
pub fn new_vanilla_node<'a>(&mut self) -> &Arc<PostgresNode> {
// allocate new node entry with generated port
let node_id = self.nodes.len() + 1;
let node = PostgresNode {
_node_id: node_id,
port: self.get_port(),
ip: IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)),
pgdata: self.work_dir.join(format!("compute/pg{}", node_id)),
pg_bin_dir: self.pg_bin_dir.clone(),
};
self.nodes.push(Arc::new(node));
let node = self.nodes.last().unwrap();
// initialize data directory
fs::remove_dir_all(node.pgdata.to_str().unwrap()).ok();
let initdb_path = self.pg_bin_dir.join("initdb");
println!("initdb_path: {}", initdb_path.to_str().unwrap());
let initdb = Command::new(initdb_path)
.args(&["-D", node.pgdata.to_str().unwrap()])
.arg("-N")
.arg("--no-instructions")
.env_clear()
.env("LD_LIBRARY_PATH", PG_LIB_DIR.to_str().unwrap())
.status()
.expect("failed to execute initdb");
if !initdb.success() {
panic!("initdb failed");
}
// // allow local replication connections
// node.append_conf("pg_hba.conf", format!("\
// host replication all {}/32 sspi include_realm=1 map=regress\n\
// ", node.ip).as_str());
// listen for selected port
node.append_conf(
"postgresql.conf",
format!(
"\
max_wal_senders = 10\n\
max_replication_slots = 10\n\
hot_standby = on\n\
shared_buffers = 1MB\n\
max_connections = 100\n\
wal_level = replica\n\
listen_addresses = '{address}'\n\
port = {port}\n\
",
address = node.ip,
port = node.port
)
.as_str(),
);
node
}
// Init compute node without files, only datadir structure
// use initdb --compute-node flag and GUC 'computenode_mode'
// to distinguish the node
pub fn new_minimal_node<'a>(&mut self) -> &Arc<PostgresNode> {
// allocate new node entry with generated port
let node_id = self.nodes.len() + 1;
let node = PostgresNode {
_node_id: node_id,
port: self.get_port(),
ip: IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)),
pgdata: self.work_dir.join(format!("compute/pg{}", node_id)),
pg_bin_dir: self.pg_bin_dir.clone(),
};
self.nodes.push(Arc::new(node));
let node = self.nodes.last().unwrap();
// initialize data directory
fs::remove_dir_all(node.pgdata.to_str().unwrap()).ok();
let initdb_path = self.pg_bin_dir.join("initdb");
println!("initdb_path: {}", initdb_path.to_str().unwrap());
let initdb = Command::new(initdb_path)
.args(&["-D", node.pgdata.to_str().unwrap()])
.arg("-N")
.arg("--no-instructions")
.arg("--compute-node")
.env_clear()
.env("LD_LIBRARY_PATH", PG_LIB_DIR.to_str().unwrap())
.status()
.expect("failed to execute initdb");
if !initdb.success() {
panic!("initdb failed");
}
// // allow local replication connections
// node.append_conf("pg_hba.conf", format!("\
// host replication all {}/32 sspi include_realm=1 map=regress\n\
// ", node.ip).as_str());
// listen for selected port
node.append_conf(
"postgresql.conf",
format!(
"\
max_wal_senders = 10\n\
max_replication_slots = 10\n\
hot_standby = on\n\
shared_buffers = 1MB\n\
max_connections = 100\n\
wal_level = replica\n\
listen_addresses = '{address}'\n\
port = {port}\n\
computenode_mode = true\n\
",
address = node.ip,
port = node.port
)
.as_str(),
);
node
}
pub fn new_node_wo_data(&mut self) -> Arc<PostgresNode> {
let storage_cplane = self.storage_cplane;
let node = self.new_minimal_node();
let pserver = storage_cplane.page_server_addr();
// Configure that node to take pages from pageserver
node.append_conf(
"postgresql.conf",
format!(
"\
page_server_connstring = 'host={} port={}'\n\
",
pserver.ip(),
pserver.port()
)
.as_str(),
);
node.clone()
}
pub fn new_node(&mut self) -> Arc<PostgresNode> {
let storage_cplane = self.storage_cplane;
let node = self.new_vanilla_node();
let pserver = storage_cplane.page_server_addr();
// Configure that node to take pages from pageserver
node.append_conf(
"postgresql.conf",
format!(
"\
page_server_connstring = 'host={} port={}'\n\
",
pserver.ip(),
pserver.port()
)
.as_str(),
);
node.clone()
}
pub fn new_master_node(&mut self) -> Arc<PostgresNode> {
let node = self.new_vanilla_node();
node.append_conf(
"postgresql.conf",
"synchronous_standby_names = 'safekeeper_proxy'\n\
",
);
node.clone()
}
}
///////////////////////////////////////////////////////////////////////////////
pub struct WalProposerNode {
pid: u32,
}
impl WalProposerNode {
pub fn stop(&self) {
let status = Command::new("kill")
.arg(self.pid.to_string())
.env_clear()
.status()
.expect("failed to execute kill");
if !status.success() {
panic!("kill start failed");
}
}
}
impl Drop for WalProposerNode {
fn drop(&mut self) {
self.stop();
}
}
///////////////////////////////////////////////////////////////////////////////
pub struct PostgresNode {
_node_id: usize,
pub port: u16,
pub ip: IpAddr,
pgdata: PathBuf,
pg_bin_dir: PathBuf,
}
impl PostgresNode {
pub fn append_conf(&self, config: &str, opts: &str) {
OpenOptions::new()
.append(true)
.open(self.pgdata.join(config).to_str().unwrap())
.unwrap()
.write_all(opts.as_bytes())
.unwrap();
}
fn pg_ctl(&self, args: &[&str], check_ok: bool) {
let pg_ctl_path = self.pg_bin_dir.join("pg_ctl");
let pg_ctl = Command::new(pg_ctl_path)
.args(
[
&[
"-D",
self.pgdata.to_str().unwrap(),
"-l",
self.pgdata.join("log").to_str().unwrap(),
],
args,
]
.concat(),
)
.env_clear()
.env("LD_LIBRARY_PATH", PG_LIB_DIR.to_str().unwrap())
.status()
.expect("failed to execute pg_ctl");
if check_ok && !pg_ctl.success() {
panic!("pg_ctl failed");
}
}
pub fn start(&self, storage_cplane: &StorageControlPlane) {
if storage_cplane.page_servers.len() != 0 {
let _res =
storage_cplane.page_server_psql(format!("callmemaybe {}", self.connstr()).as_str());
}
println!("Starting postgres node at '{}'", self.connstr());
self.pg_ctl(&["start"], true);
}
pub fn restart(&self) {
self.pg_ctl(&["restart"], true);
}
pub fn stop(&self) {
self.pg_ctl(&["-m", "immediate", "stop"], true);
}
pub fn connstr(&self) -> String {
format!("host={} port={} user={}", self.ip, self.port, self.whoami())
}
// XXX: cache that in control plane
pub fn whoami(&self) -> String {
let output = Command::new("whoami")
.output()
.expect("failed to execute whoami");
if !output.status.success() {
panic!("whoami failed");
}
String::from_utf8(output.stdout).unwrap().trim().to_string()
}
pub fn safe_psql(&self, db: &str, sql: &str) -> Vec<tokio_postgres::Row> {
let connstring = format!(
"host={} port={} dbname={} user={}",
self.ip,
self.port,
db,
self.whoami()
);
let mut client = Client::connect(connstring.as_str(), NoTls).unwrap();
println!("Running {}", sql);
client.query(sql, &[]).unwrap()
}
pub fn open_psql(&self, db: &str) -> Client {
let connstring = format!(
"host={} port={} dbname={} user={}",
self.ip,
self.port,
db,
self.whoami()
);
Client::connect(connstring.as_str(), NoTls).unwrap()
}
pub fn get_pgdata(&self) -> Option<&str> {
self.pgdata.to_str()
}
// Request from pageserver stub controlfile, respective xlog
// and a bunch of files needed to start computenode
//
// NOTE this "file" request is a crutch.
// It asks pageserver to write requested page to the provided filepath
// and thus only works locally.
// TODO receive pages via some libpq protocol.
// The problem I've met is that nonrelfiles are not valid utf8 and cannot be
// handled by simple_query(). that expects test.
// And reqular query() uses prepared queries.
// TODO pass sysid as parameter
pub fn setup_compute_node(&self, sysid: u64, storage_cplane: &StorageControlPlane) {
let mut query;
//Request pg_control from pageserver
query = format!(
"file {}/global/pg_control,{},{},{},{},{},{},{}",
self.pgdata.to_str().unwrap(),
sysid as u64, //sysid
1664, //tablespace
0, //dboid
0, //reloid
42, //forknum pg_control
0, //blkno
0 //lsn
);
storage_cplane.page_server_psql(query.as_str());
//Request pg_xact and pg_multixact from pageserver
//We need them for initial pageserver startup and authentication
//TODO figure out which block number we really need
query = format!(
"file {}/pg_xact/0000,{},{},{},{},{},{},{}",
self.pgdata.to_str().unwrap(),
sysid as u64, //sysid
0, //tablespace
0, //dboid
0, //reloid
44, //forknum
0, //blkno
0 //lsn
);
storage_cplane.page_server_psql(query.as_str());
query = format!(
"file {}/pg_multixact/offsets/0000,{},{},{},{},{},{},{}",
self.pgdata.to_str().unwrap(),
sysid as u64, //sysid
0, //tablespace
0, //dboid
0, //reloid
45, //forknum
0, //blkno
0 //lsn
);
storage_cplane.page_server_psql(query.as_str());
query = format!(
"file {}/pg_multixact/members/0000,{},{},{},{},{},{},{}",
self.pgdata.to_str().unwrap(),
sysid as u64, //sysid
0, //tablespace
0, //dboid
0, //reloid
46, //forknum
0, //blkno
0 //lsn
);
storage_cplane.page_server_psql(query.as_str());
//Request a few shared catalogs needed for authentication
//Without them we cannot setup connection with pageserver to request further pages
let reloids = [1260, 1261, 1262, 2396];
for reloid in reloids.iter() {
//FIXME request all blocks from file, not just 10
for blkno in 0..10 {
query = format!(
"file {}/global/{},{},{},{},{},{},{},{}",
self.pgdata.to_str().unwrap(),
reloid, //suse it as filename
sysid as u64, //sysid
1664, //tablespace
0, //dboid
reloid, //reloid
0, //forknum
blkno, //blkno
0 //lsn
);
storage_cplane.page_server_psql(query.as_str());
}
}
fs::create_dir(format!("{}/base/13006", self.pgdata.to_str().unwrap())).unwrap();
fs::create_dir(format!("{}/base/13007", self.pgdata.to_str().unwrap())).unwrap();
//FIXME figure out what wal file we need to successfully start
let walfilepath = format!(
"{}/pg_wal/000000010000000000000001",
self.pgdata.to_str().unwrap()
);
fs::copy(
"/home/anastasia/zenith/zenith/tmp_check/pgdata/pg_wal/000000010000000000000001",
walfilepath,
)
.unwrap();
println!("before resetwal ");
let pg_resetwal_path = self.pg_bin_dir.join("pg_resetwal");
// Now it does nothing, just prints existing content of pg_control.
// TODO update values with most recent lsn, xid, oid requested from pageserver
let pg_resetwal = Command::new(pg_resetwal_path)
.args(&["-D", self.pgdata.to_str().unwrap()])
.arg("-n") //dry run
//.arg("-f")
//.args(&["--next-transaction-id", "100500"])
//.args(&["--next-oid", "17000"])
//.args(&["--next-transaction-id", "100500"])
.status()
.expect("failed to execute pg_resetwal");
if !pg_resetwal.success() {
panic!("pg_resetwal failed");
}
println!("setup done");
}
pub fn start_proxy(&self, wal_acceptors: String) -> WalProposerNode {
let proxy_path = PG_BIN_DIR.join("safekeeper_proxy");
match Command::new(proxy_path.as_path())
.args(&["-s", &wal_acceptors])
.args(&["-h", &self.ip.to_string()])
.args(&["-p", &self.port.to_string()])
.arg("-v")
.stderr(File::create(TEST_WORKDIR.join("safepkeeper_proxy.log")).unwrap())
.spawn()
{
Ok(child) => WalProposerNode { pid: child.id() },
Err(e) => panic!("Failed to launch {:?}: {}", proxy_path, e),
}
}
pub fn push_to_s3(&self) {
println!("Push to s3 node at '{}'", self.pgdata.to_str().unwrap());
let zenith_push_path = self.pg_bin_dir.join("zenith_push");
println!("zenith_push_path: {}", zenith_push_path.to_str().unwrap());
let status = Command::new(zenith_push_path)
.args(&["-D", self.pgdata.to_str().unwrap()])
.env_clear()
.env("S3_ENDPOINT", "https://127.0.0.1:9000")
.env("S3_REGION", "us-east-1")
.env("S3_ACCESSKEY", "minioadmin")
.env("S3_SECRET", "minioadmin")
// .env("S3_BUCKET", "zenith-testbucket")
.status()
.expect("failed to push node to s3");
if !status.success() {
panic!("zenith_push failed");
}
}
// TODO
pub fn pg_bench() {}
pub fn pg_regress() {}
}
impl Drop for PostgresNode {
// destructor to clean up state after test is done
// XXX: we may detect failed test by setting some flag in catch_unwind()
// and checking it here. But let just clean datadirs on start.
fn drop(&mut self) {
self.stop();
// fs::remove_dir_all(self.pgdata.clone()).unwrap();
}
}
pub fn regress_check(pg: &PostgresNode) {
pg.safe_psql("postgres", "CREATE DATABASE regression");
let regress_run_path = Path::new(env!("CARGO_MANIFEST_DIR")).join("tmp_check/regress");
fs::create_dir_all(regress_run_path.clone()).unwrap();
std::env::set_current_dir(regress_run_path).unwrap();
let regress_build_path =
Path::new(env!("CARGO_MANIFEST_DIR")).join("../tmp_install/build/src/test/regress");
let regress_src_path =
Path::new(env!("CARGO_MANIFEST_DIR")).join("../vendor/postgres/src/test/regress");
let _regress_check = Command::new(regress_build_path.join("pg_regress"))
.args(&[
"--bindir=''",
"--use-existing",
format!("--bindir={}", PG_BIN_DIR.to_str().unwrap()).as_str(),
format!("--dlpath={}", regress_build_path.to_str().unwrap()).as_str(),
format!(
"--schedule={}",
regress_src_path.join("parallel_schedule").to_str().unwrap()
)
.as_str(),
format!("--inputdir={}", regress_src_path.to_str().unwrap()).as_str(),
])
.env_clear()
.env("LD_LIBRARY_PATH", PG_LIB_DIR.to_str().unwrap())
.env("PGPORT", pg.port.to_string())
.env("PGUSER", pg.whoami())
.env("PGHOST", pg.ip.to_string())
.status()
.expect("pg_regress failed");
}

View File

@@ -0,0 +1,7 @@
// test node resettlement to an empty datadir
#[test]
fn test_resettlement() {}
// test seq scan of everythin after restart
#[test]
fn test_cold_seqscan() {}

View File

@@ -0,0 +1,5 @@
#[test]
fn test_actions() {}
#[test]
fn test_regress() {}

View File

@@ -0,0 +1,210 @@
#[allow(dead_code)]
mod control_plane;
use std::thread::sleep;
use std::time::Duration;
use control_plane::ComputeControlPlane;
use control_plane::StorageControlPlane;
// XXX: force all redo at the end
// -- restart + seqscan won't read deleted stuff
// -- pageserver api endpoint to check all rels
//Handcrafted cases with wal records that are (were) problematic for redo.
#[test]
#[ignore]
fn test_redo_cases() {
// Start pageserver that reads WAL directly from that postgres
let storage_cplane = StorageControlPlane::one_page_server(false);
let mut compute_cplane = ComputeControlPlane::local(&storage_cplane);
// start postgres
let node = compute_cplane.new_node();
node.start(&storage_cplane);
println!("await pageserver connection...");
sleep(Duration::from_secs(3));
// check basic work with table
node.safe_psql(
"postgres",
"CREATE TABLE t(key int primary key, value text)",
);
node.safe_psql(
"postgres",
"INSERT INTO t SELECT generate_series(1,100), 'payload'",
);
let count: i64 = node
.safe_psql("postgres", "SELECT sum(key) FROM t")
.first()
.unwrap()
.get(0);
println!("sum = {}", count);
assert_eq!(count, 5050);
//check 'create table as'
node.safe_psql("postgres", "CREATE TABLE t2 AS SELECT * FROM t");
let count: i64 = node
.safe_psql("postgres", "SELECT sum(key) FROM t")
.first()
.unwrap()
.get(0);
println!("sum = {}", count);
assert_eq!(count, 5050);
}
// Runs pg_regress on a compute node
#[test]
#[ignore]
fn test_regress() {
// Start pageserver that reads WAL directly from that postgres
let storage_cplane = StorageControlPlane::one_page_server(false);
let mut compute_cplane = ComputeControlPlane::local(&storage_cplane);
// start postgres
let node = compute_cplane.new_node();
node.start(&storage_cplane);
println!("await pageserver connection...");
sleep(Duration::from_secs(3));
control_plane::regress_check(&node);
}
// Run two postgres instances on one pageserver
#[test]
#[ignore]
fn test_pageserver_multitenancy() {
// Start pageserver that reads WAL directly from that postgres
let storage_cplane = StorageControlPlane::one_page_server(false);
let mut compute_cplane = ComputeControlPlane::local(&storage_cplane);
// Allocate postgres instance, but don't start
let node1 = compute_cplane.new_node();
let node2 = compute_cplane.new_node();
node1.start(&storage_cplane);
node2.start(&storage_cplane);
// XXX: add some extension func to postgres to check walsender conn
// XXX: or better just drop that
println!("await pageserver connection...");
sleep(Duration::from_secs(3));
// check node1
node1.safe_psql(
"postgres",
"CREATE TABLE t(key int primary key, value text)",
);
node1.safe_psql(
"postgres",
"INSERT INTO t SELECT generate_series(1,100), 'payload'",
);
let count: i64 = node1
.safe_psql("postgres", "SELECT sum(key) FROM t")
.first()
.unwrap()
.get(0);
println!("sum = {}", count);
assert_eq!(count, 5050);
// check node2
node2.safe_psql(
"postgres",
"CREATE TABLE t(key int primary key, value text)",
);
node2.safe_psql(
"postgres",
"INSERT INTO t SELECT generate_series(100,200), 'payload'",
);
let count: i64 = node2
.safe_psql("postgres", "SELECT sum(key) FROM t")
.first()
.unwrap()
.get(0);
println!("sum = {}", count);
assert_eq!(count, 15150);
}
#[test]
#[ignore]
// Start pageserver using s3 base image
//
// Requires working minio with hardcoded setup:
// .env("S3_ENDPOINT", "https://127.0.0.1:9000")
// .env("S3_REGION", "us-east-1")
// .env("S3_ACCESSKEY", "minioadmin")
// .env("S3_SECRET", "minioadmin")
// .env("S3_BUCKET", "zenith-testbucket")
// TODO use env variables in test
fn test_pageserver_recovery() {
//This test expects that image is already uploaded to s3
//To upload it use zenith_push before test (see node.push_to_s3() for details)
let storage_cplane = StorageControlPlane::one_page_server(true);
let mut compute_cplane = ComputeControlPlane::local(&storage_cplane);
//Wait while daemon uploads pages from s3
sleep(Duration::from_secs(15));
let node_restored = compute_cplane.new_node_wo_data();
//TODO 6947041219207877724 is a hardcoded sysid for my cluster. Get it somewhere
node_restored.setup_compute_node(6947041219207877724, &storage_cplane);
node_restored.start(&storage_cplane);
let rows = node_restored.safe_psql("postgres", "SELECT relname from pg_class;");
assert_eq!(rows.len(), 395);
}
#[test]
#[ignore]
//Scenario for future test. Not implemented yet
fn test_pageserver_node_switch() {
//Create pageserver
let storage_cplane = StorageControlPlane::one_page_server(false);
let mut compute_cplane = ComputeControlPlane::local(&storage_cplane);
//Create reqular node
let node = compute_cplane.new_node();
node.start(&storage_cplane);
node.safe_psql(
"postgres",
"CREATE TABLE t(key int primary key, value text)",
);
node.safe_psql(
"postgres",
"INSERT INTO t SELECT generate_series(1,100), 'payload'",
);
let count: i64 = node
.safe_psql("postgres", "SELECT sum(key) FROM t")
.first()
.unwrap()
.get(0);
println!("sum = {}", count);
assert_eq!(count, 5050);
//Push all node files to s3
//TODO upload them directly to pageserver
node.push_to_s3();
//Upload data from s3 to pageserver
//storage_cplane.upload_from_s3() //Not implemented yet
//Shut down the node
node.stop();
//Create new node without files
let node_restored = compute_cplane.new_node_wo_data();
// Setup minimal set of files needed to start node and setup pageserver connection
// TODO 6947041219207877724 is a hardcoded sysid. Get it from node
node_restored.setup_compute_node(6947041219207877724, &storage_cplane);
//Start compute node without files
node_restored.start(&storage_cplane);
//Ensure that is has table created on initial node
let rows = node_restored.safe_psql("postgres", "SELECT key from t;");
assert_eq!(rows.len(), 5050);
}

View File

@@ -0,0 +1,222 @@
// Restart acceptors one by one while compute is under the load.
#[allow(dead_code)]
mod control_plane;
use control_plane::ComputeControlPlane;
use control_plane::StorageControlPlane;
use rand::Rng;
use std::sync::Arc;
use std::time::SystemTime;
use std::{thread, time};
#[test]
fn test_acceptors_normal_work() {
// Start pageserver that reads WAL directly from that postgres
const REDUNDANCY: usize = 3;
let storage_cplane = StorageControlPlane::fault_tolerant(REDUNDANCY);
let mut compute_cplane = ComputeControlPlane::local(&storage_cplane);
let wal_acceptors = storage_cplane.get_wal_acceptor_conn_info();
// start postgre
let node = compute_cplane.new_master_node();
node.start(&storage_cplane);
// start proxy
let _proxy = node.start_proxy(wal_acceptors);
// check basic work with table
node.safe_psql(
"postgres",
"CREATE TABLE t(key int primary key, value text)",
);
node.safe_psql(
"postgres",
"INSERT INTO t SELECT generate_series(1,100000), 'payload'",
);
let count: i64 = node
.safe_psql("postgres", "SELECT sum(key) FROM t")
.first()
.unwrap()
.get(0);
println!("sum = {}", count);
assert_eq!(count, 5000050000);
// check wal files equality
}
// Majority is always alive
#[test]
fn test_acceptors_restarts() {
// Start pageserver that reads WAL directly from that postgres
const REDUNDANCY: usize = 3;
const FAULT_PROBABILITY: f32 = 0.01;
let storage_cplane = StorageControlPlane::fault_tolerant(REDUNDANCY);
let mut compute_cplane = ComputeControlPlane::local(&storage_cplane);
let wal_acceptors = storage_cplane.get_wal_acceptor_conn_info();
let mut rng = rand::thread_rng();
// start postgre
let node = compute_cplane.new_master_node();
node.start(&storage_cplane);
// start proxy
let _proxy = node.start_proxy(wal_acceptors);
let mut failed_node: Option<usize> = None;
// check basic work with table
node.safe_psql(
"postgres",
"CREATE TABLE t(key int primary key, value text)",
);
let mut psql = node.open_psql("postgres");
for i in 1..=1000 {
psql.execute("INSERT INTO t values ($1, 'payload')", &[&i])
.unwrap();
let prob: f32 = rng.gen();
if prob <= FAULT_PROBABILITY {
if let Some(node) = failed_node {
storage_cplane.wal_acceptors[node].start();
failed_node = None;
} else {
let node: usize = rng.gen_range(0..REDUNDANCY);
failed_node = Some(node);
storage_cplane.wal_acceptors[node].stop();
}
}
}
let count: i64 = node
.safe_psql("postgres", "SELECT sum(key) FROM t")
.first()
.unwrap()
.get(0);
println!("sum = {}", count);
assert_eq!(count, 500500);
}
fn start_acceptor(cplane: &Arc<StorageControlPlane>, no: usize) {
let cp = cplane.clone();
thread::spawn(move || {
thread::sleep(time::Duration::from_secs(1));
cp.wal_acceptors[no].start();
});
}
// Stop majority of acceptors while compute is under the load. Boot
// them again and check that nothing was losed. Repeat.
// N_CRASHES env var
#[test]
fn test_acceptors_unavalability() {
// Start pageserver that reads WAL directly from that postgres
const REDUNDANCY: usize = 2;
let storage_cplane = StorageControlPlane::fault_tolerant(REDUNDANCY);
let mut compute_cplane = ComputeControlPlane::local(&storage_cplane);
let wal_acceptors = storage_cplane.get_wal_acceptor_conn_info();
// start postgre
let node = compute_cplane.new_master_node();
node.start(&storage_cplane);
// start proxy
let _proxy = node.start_proxy(wal_acceptors);
// check basic work with table
node.safe_psql(
"postgres",
"CREATE TABLE t(key int primary key, value text)",
);
let mut psql = node.open_psql("postgres");
psql.execute("INSERT INTO t values (1, 'payload')", &[])
.unwrap();
storage_cplane.wal_acceptors[0].stop();
let cp = Arc::new(storage_cplane);
start_acceptor(&cp, 0);
let now = SystemTime::now();
psql.execute("INSERT INTO t values (2, 'payload')", &[])
.unwrap();
assert!(now.elapsed().unwrap().as_secs() > 1);
psql.execute("INSERT INTO t values (3, 'payload')", &[])
.unwrap();
cp.wal_acceptors[1].stop();
start_acceptor(&cp, 1);
psql.execute("INSERT INTO t values (4, 'payload')", &[])
.unwrap();
assert!(now.elapsed().unwrap().as_secs() > 2);
psql.execute("INSERT INTO t values (5, 'payload')", &[])
.unwrap();
let count: i64 = node
.safe_psql("postgres", "SELECT sum(key) FROM t")
.first()
.unwrap()
.get(0);
println!("sum = {}", count);
assert_eq!(count, 15);
}
fn simulate_failures(cplane: &Arc<StorageControlPlane>) {
let mut rng = rand::thread_rng();
let n_acceptors = cplane.wal_acceptors.len();
let failure_period = time::Duration::from_secs(1);
loop {
thread::sleep(failure_period);
let mask: u32 = rng.gen_range(0..(1 << n_acceptors));
for i in 0..n_acceptors {
if (mask & (1 << i)) != 0 {
cplane.wal_acceptors[i].stop();
}
}
thread::sleep(failure_period);
for i in 0..n_acceptors {
if (mask & (1 << i)) != 0 {
cplane.wal_acceptors[i].start();
}
}
}
}
// Race condition test
#[test]
fn test_race_conditions() {
// Start pageserver that reads WAL directly from that postgres
const REDUNDANCY: usize = 3;
let storage_cplane = StorageControlPlane::fault_tolerant(REDUNDANCY);
let mut compute_cplane = ComputeControlPlane::local(&storage_cplane);
let wal_acceptors = storage_cplane.get_wal_acceptor_conn_info();
// start postgre
let node = compute_cplane.new_master_node();
node.start(&storage_cplane);
// start proxy
let _proxy = node.start_proxy(wal_acceptors);
// check basic work with table
node.safe_psql(
"postgres",
"CREATE TABLE t(key int primary key, value text)",
);
let cplane = Arc::new(storage_cplane);
let cp = cplane.clone();
thread::spawn(move || {
simulate_failures(&cp);
});
let mut psql = node.open_psql("postgres");
for i in 1..=1000 {
psql.execute("INSERT INTO t values ($1, 'payload')", &[&i])
.unwrap();
}
let count: i64 = node
.safe_psql("postgres", "SELECT sum(key) FROM t")
.first()
.unwrap()
.get(0);
println!("sum = {}", count);
assert_eq!(count, 500500);
cplane.stop();
}

23
mgmt-console/.gitignore vendored Normal file
View File

@@ -0,0 +1,23 @@
# See https://help.github.com/articles/ignoring-files/ for more about ignoring files.
# dependencies
/node_modules
/.pnp
.pnp.js
# testing
/coverage
# production
/build
# misc
.DS_Store
.env.local
.env.development.local
.env.test.local
.env.production.local
npm-debug.log*
yarn-debug.log*
yarn-error.log*

55
mgmt-console/README Normal file
View File

@@ -0,0 +1,55 @@
Mock implementation of a management console.
See demo-howto.txt for usage.
Building and Installation
-------------------------
To compile Postgres:
sudo apt build-dep postgresql
sudo apt install bison flex libz-dev libssl-dev
sudo apt install ccache
sudo apt install libcurl4-openssl-dev libxml2-dev
For the webapp:
# NOTE: This requires at least version 1.1.0 of python3-flask. That's not
# available in Debian Buster, need at least Bullseye.
sudo apt install python3 python3-flask python3-pip npm webpack
pip3 install Flask-BasicAuth
pip3 install boto3
git clone and compile and install patched version of Postgres:
git clone https://github.com/libzenith/postgres.git
cd postgres
git checkout zenith-experiments
./configure --enable-debug --enable-cassert --with-openssl --prefix=/home/heikki/pgsql-install --with-libxml CC="ccache gcc" CFLAGS="-O0"
make -j4 -s install
Get the webapp:
cd ~
git clone https://github.com/libzenith/zenith-mgmt-console.git
cd zenith-mgmt-console
mkdir pgdatadirs
openssl req -new -x509 -days 365 -nodes -text -out server.crt \
-keyout server.key -subj "/CN=zenith-demo"
For Mock S3 server (unless you want to test against a real cloud service):
sudo apt install python3-tornado
cd ~/zenith-mgmt-console
git clone https://github.com/hlinnaka/ms3.git
Compile & run it:
npm install
webpack # compile React app
BASIC_AUTH_PASSWORD=<password> ./launch-local.sh
You can view the contents of the S3 bucket with browser:
http://<server>/list_bucket

340
mgmt-console/app.py Normal file
View File

@@ -0,0 +1,340 @@
from flask import request
from flask_basicauth import BasicAuth
from flask import render_template
from subprocess import PIPE, STDOUT, run, Popen
import html
import os
import re
import shutil
import logging
import time
import boto3
from boto3.session import Session
from botocore.client import Config
from botocore.handlers import set_list_objects_encoding_type_url
from flask import Flask
import waldump
app = Flask(__name__)
app.config['BASIC_AUTH_USERNAME'] = 'zenith'
app.config['BASIC_AUTH_PASSWORD'] = os.getenv('BASIC_AUTH_PASSWORD')
app.config['BASIC_AUTH_FORCE'] = True
basic_auth = BasicAuth(app)
# S3 configuration:
ENDPOINT = os.getenv('S3_ENDPOINT', 'https://localhost:9000')
ACCESS_KEY = os.getenv('S3_ACCESSKEY', 'minioadmin')
SECRET = os.getenv('S3_SECRET', '')
BUCKET = os.getenv('S3_BUCKET', 'foobucket')
print("Using bucket at " + ENDPOINT);
#boto3.set_stream_logger('botocore', logging.DEBUG)
session = Session(aws_access_key_id=ACCESS_KEY,
aws_secret_access_key=SECRET,
region_name=os.getenv('S3_REGION', 'auto'))
# needed for google cloud?
session.events.unregister('before-parameter-build.s3.ListObjects',
set_list_objects_encoding_type_url)
s3resource = session.resource('s3',
endpoint_url=ENDPOINT,
verify=False,
config=Config(signature_version='s3v4'))
s3bucket = s3resource.Bucket(BUCKET)
s3_client = boto3.client('s3',
endpoint_url=ENDPOINT,
verify=False,
config=Config(signature_version='s3v4'),
aws_access_key_id=ACCESS_KEY,
aws_secret_access_key=SECRET)
@app.route("/")
def index():
return render_template("index.html")
@app.route("/api/waldump")
def render_waldump():
return render_template("waldump.html")
@app.route('/api/fetch_wal')
def fetch_wal():
return waldump.fetch_wal(request, s3bucket);
@app.route("/api/server_status")
def server_status():
dirs = os.listdir("pgdatadirs")
dirs.sort()
primary = None
standbys = []
for dirname in dirs:
result = run("pg_ctl status -D pgdatadirs/" + dirname, stdout=PIPE, stderr=STDOUT, universal_newlines=True, shell=True)
srv = {
'datadir': dirname,
'status': result.stdout,
'port': None
}
if dirname == 'primary':
primary = srv;
primary['port'] = 5432;
else:
standby_match = re.search('standby_([0-9]+)', dirname)
if standby_match:
srv['port'] = int(standby_match.group(1))
standbys.append(srv);
return {'primary': primary, 'standbys': standbys}
@app.route('/api/list_bucket')
def list_bucket():
response = 'cloud bucket contents:<br>\n'
for file in s3bucket.objects.all():
response = response + html.escape(file.key) + '<br>\n'
return response
def walpos_str(walpos):
return '{:X}/{:X}'.format(walpos >> 32, walpos & 0xFFFFFFFF)
@app.route('/api/bucket_summary')
def bucket_summary():
nonrelimages = []
minwal = int(0)
maxwal = int(0)
minseqwal = int(0)
maxseqwal = int(0)
for file in s3bucket.objects.all():
path = file.key
match = re.search('nonreldata/nonrel_([0-9A-F]+).tar', path)
if match:
walpos = int(match.group(1), 16)
nonrelimages.append(walpos_str(walpos))
match = re.search('nonreldata/nonrel_([0-9A-F]+)-([0-9A-F]+)', path)
if match:
endwal = int(match.group(2), 16)
if endwal > maxwal:
maxwal = endwal
match = re.search('walarchive/([0-9A-F]{8})([0-9A-F]{8})([0-9A-F]{8})', path)
if match:
tli = int(match.group(1), 16)
logno = int(match.group(2), 16)
segno = int(match.group(3), 16)
# FIXME: this assumes default 16 MB wal segment size
logsegno = logno * (0x100000000 / (16*1024*1024)) + segno
seqwal = int((logsegno + 1) * (16*1024*1024))
if seqwal > maxseqwal:
maxseqwal = seqwal;
if minseqwal == 0 or seqwal < minseqwal:
minseqwal = seqwal;
return {
'nonrelimages': nonrelimages,
'minwal': walpos_str(minwal),
'maxwal': walpos_str(maxwal),
'minseqwal': walpos_str(minseqwal),
'maxseqwal': walpos_str(maxseqwal)
}
def print_cmd_result(cmd_result):
return print_cmd_result_ex(cmd_result.args, cmd_result.returncode, cmd_result.stdout)
def print_cmd_result_ex(cmd, returncode, stdout):
res = ''
res += 'ran command:\n' + str(cmd) + '\n'
res += 'It returned code ' + str(returncode) + '\n'
res += '\n'
res += 'stdout/stderr:\n'
res += stdout
return res
@app.route('/api/init_primary', methods=['GET', 'POST'])
def init_primary():
initdb_result = run("initdb -D pgdatadirs/primary --username=zenith --pwfile=pg-password.txt", stdout=PIPE, stderr=STDOUT, universal_newlines=True, shell=True)
if initdb_result.returncode != 0:
return print_cmd_result(initdb_result)
# Append archive_mode and archive_command and port to postgresql.conf
f=open("pgdatadirs/primary/postgresql.conf", "a+")
f.write("listen_addresses='*'\n")
f.write("archive_mode=on\n")
f.write("archive_command='zenith_push --archive-wal-path=%p --archive-wal-fname=%f'\n")
f.write("ssl=on\n")
f.close()
f=open("pgdatadirs/primary/pg_hba.conf", "a+")
f.write("# allow SSL connections with password from anywhere\n")
f.write("hostssl all all 0.0.0.0/0 md5\n")
f.write("hostssl all all ::0/0 md5\n")
f.close()
shutil.copyfile("server.crt", "pgdatadirs/primary/server.crt")
shutil.copyfile("server.key", "pgdatadirs/primary/server.key")
os.chmod("pgdatadirs/primary/server.key", 0o0600)
start_proc = Popen(args=["pg_ctl", "start", "-D", "pgdatadirs/primary", "-l", "pgdatadirs/primary/log"], stdout=PIPE, stderr=STDOUT, universal_newlines=True, shell=False, start_new_session=True, close_fds=True)
start_rc = start_proc.wait()
start_stdout, start_stderr = start_proc.communicate()
responsestr = print_cmd_result(initdb_result) + '\n'
responsestr += print_cmd_result_ex(start_proc.args, start_rc, start_stdout)
return responsestr
@app.route('/api/zenith_push', methods=['GET', 'POST'])
def zenith_push():
# Stop the primary if it's running
stop_result = run(args=["pg_ctl", "stop", "-D", "pgdatadirs/primary"], stdout=PIPE, stderr=STDOUT, universal_newlines=True, shell=False, start_new_session=True, close_fds=True)
# Call zenith_push
push_result = run("zenith_push -D pgdatadirs/primary", stdout=PIPE, stderr=STDOUT, universal_newlines=True, shell=True)
# Restart the primary
start_proc = Popen(args=["pg_ctl", "start", "-D", "pgdatadirs/primary", "-l", "pgdatadirs/primary/log"], stdout=PIPE, stderr=STDOUT, universal_newlines=True, shell=False, start_new_session=True, close_fds=True)
start_rc = start_proc.wait()
start_stdout, start_stderr = start_proc.communicate()
responsestr = print_cmd_result(stop_result) + '\n'
responsestr += print_cmd_result(push_result) + '\n'
responsestr += print_cmd_result_ex(start_proc.args, start_rc, start_stdout) + '\n'
return responsestr
@app.route('/api/create_standby', methods=['GET', 'POST'])
def create_standby():
walpos = request.form.get('walpos')
if not walpos:
return 'no walpos'
dirs = os.listdir("pgdatadirs")
last_port = 5432
for dirname in dirs:
standby_match = re.search('standby_([0-9]+)', dirname)
if standby_match:
port = int(standby_match.group(1))
if port > last_port:
last_port = port
standby_port = last_port + 1
standby_dir = "pgdatadirs/standby_" + str(standby_port)
# Call zenith_restore
restore_result = run(["zenith_restore", "--end=" + walpos, "-D", standby_dir], stdout=PIPE, stderr=STDOUT, encoding='latin1')
responsestr = print_cmd_result(restore_result)
if restore_result.returncode == 0:
# Append hot_standby and port to postgresql.conf
f=open(standby_dir + "/postgresql.conf", "a+")
f.write("hot_standby=on\n")
f.write("port=" + str(standby_port) + "\n")
f.close()
start_proc = Popen(args=["pg_ctl", "start", "-D", standby_dir, "-l", standby_dir + "/log"], stdout=PIPE, stderr=STDOUT, universal_newlines=True, shell=False, start_new_session=True, close_fds=True)
start_rc = start_proc.wait()
start_stdout, start_stderr = start_proc.communicate()
responsestr += '\n\n' + print_cmd_result_ex(start_proc.args, start_rc, start_stdout)
return responsestr
@app.route('/api/destroy_server', methods=['GET', 'POST'])
def destroy_primary():
datadir = request.form.get('datadir')
# Check that the datadir parameter doesn't contain anything funny.
if not re.match("^[A-Za-z0-9_-]+$", datadir):
raise Exception('invalid datadir: ' + datadir)
# Stop the server if it's running
stop_result = run(args=["pg_ctl", "stop", "-m", "immediate", "-D", "pgdatadirs/" + datadir], stdout=PIPE, stderr=STDOUT, universal_newlines=True, shell=False, start_new_session=True, close_fds=True)
shutil.rmtree('pgdatadirs/' + datadir, ignore_errors=True)
responsestr = print_cmd_result(stop_result) + '\n'
responsestr += 'Deleted datadir ' + datadir + '.\n'
return responsestr
@app.route('/api/restore_primary', methods=['GET', 'POST'])
def restore_primary():
# Call zenith_restore
restore_result = run(["zenith_restore", "-D", "pgdatadirs/primary"], stdout=PIPE, stderr=STDOUT, encoding='latin1')
responsestr = print_cmd_result(restore_result)
# Append restore_command to postgresql.conf, so that it can find the last raw WAL segments
f=open("pgdatadirs/primary/postgresql.conf", "a+")
f.write("listen_addresses='*'\n")
f.write("restore_command='zenith_restore --archive-wal-path=%p --archive-wal-fname=%f'\n")
f.write("ssl=on\n")
f.close()
if restore_result.returncode == 0:
start_proc = Popen(args=["pg_ctl", "start", "-D", "pgdatadirs/primary", "-l", "pgdatadirs/primary/log"], stdout=PIPE, stderr=STDOUT, universal_newlines=True, shell=False, start_new_session=True, close_fds=True)
start_rc = start_proc.wait()
start_stdout, start_stderr = start_proc.communicate()
responsestr += print_cmd_result_ex(start_proc.args, start_rc, start_stdout)
return responsestr
@app.route('/api/slicedice', methods=['GET', 'POST'])
def run_slicedice():
result = run("zenith_slicedice", stdout=PIPE, stderr=STDOUT, universal_newlines=True, shell=True)
responsestr = print_cmd_result(result)
return responsestr
@app.route('/api/reset_demo', methods=['POST'])
def reset_all():
result = run("pkill -9 postgres", stdout=PIPE, stderr=STDOUT, universal_newlines=True, shell=True)
dirs = os.listdir("pgdatadirs")
for dirname in dirs:
shutil.rmtree('pgdatadirs/' + dirname)
for file in s3bucket.objects.all():
s3_client.delete_object(Bucket = BUCKET, Key = file.key)
responsestr = print_cmd_result(result) + '\n'
responsestr += '''
Deleted all Postgres datadirs.
Deleted all files in object storage bucket.
'''
return responsestr
if __name__ == '__main__':
app.run()

View File

@@ -0,0 +1,3 @@
module.exports = {
presets: ["@babel/preset-env", "@babel/preset-react"],
};

View File

@@ -0,0 +1,67 @@
Mock implementation of a management console.
This isn't very different from a "normal" PostgreSQL installation with
a base backup and WAL archive. The main user-visible difference is
that when you create a standby server, we don't restore the whole data
directory, but only the "non-relation" files. Relation files are
restored on demand, when they're accessed the first time. That makes
the "create standby" operation is very fast, but with some delay when
you connect and start running queries instead. Most visible if you
have a large database. (However, see note below about large databases)
Note: lots of things are broken/unsafe. Things will fail if a table is
larger than 1 GB. Or if there are more than 1000 files in the cloud
bucket.
How to use this demo:
1. If there are any leftovers from previous runs, reset by clicking
the RESET DEMO button. This kills and deletes all Postgres servers,
and empties the cloud storage bucket
2. Create primary server by clicking on the "Init primary" button
3. Push a base image of the primary to cloud storage, by clicking the
"push base image" button. (This takes about 30 seconds, be
patient)
4. Connect to primary with psql, and create a test table with a little data.
psql postgres -p5432 -U zenith -h<host>
create table mytable (i int4);
insert into mytable values (1);
select pg_switch_wal();
The Postgres password is the same as for the management console.
3. Now that there's a new WAL segment in the arhive, we can "slice &
dice" it. Click on the "Slice & dice button".
4. Perform more updates on the primary, to generate more WAL.
insert into mytable values (2); select pg_switch_wal();
insert into mytable values (3); select pg_switch_wal();
insert into mytable values (4); select pg_switch_wal();
insert into mytable values (5); select pg_switch_wal();
5. Slice & Dice the WAL again
6. Now you can create read-only standby servers at any point in the
WAL. Type a WAL position in the text box (or use the slider), and
click "Create new standby". The first standby is created at port 5433,
the second at port 5434, and so forth.
7. Connect to the standby with "psql -p 5433". Note that it takes a
few seconds until the connection is established. That's because the
standby has to restore the basic system catalogs, like pg_database and
pg_authid from the backup. After connecting, you can do "\d" to list
tables, this will also take a few seconds, as more catalog tables are
restored from backup. Subsequent commands will be faster.
Run queries in the standby:
select * from mytable;
the result depends on the LSN that you picked when you created the server.

463
mgmt-console/js/app.js Normal file
View File

@@ -0,0 +1,463 @@
import React, { useState, useEffect } from 'react';
import ReactDOM from 'react-dom';
import Loader from "react-loader-spinner";
import { Router, Route, Link, IndexRoute, hashHistory, browserHistory } from 'react-router';
function ServerStatus(props) {
const datadir = props.server.datadir;
const status = props.server.status;
const port = props.server.port;
return (
<div>
<h2>{ datadir == 'primary' ? 'Primary' : datadir }</h2>
status: <div className='status'>{status}</div><br/>
to connect: <span className='shellcommand'>psql -h { window.location.hostname } -p { port } -U zenith postgres</span><br/>
</div>
);
}
function StandbyList(props) {
const bucketSummary = props.bucketSummary;
const standbys = props.standbys;
const maxwalpos = bucketSummary.maxwal ? walpos_to_int(bucketSummary.maxwal) : 0;
const [walposInput, setWalposInput] = useState({ src: 'text', value: '0/0'});
// find earliest base image
const minwalpos = bucketSummary.nonrelimages ? bucketSummary.nonrelimages.reduce((minpos, imgpos_str, index, array) => {
const imgpos = walpos_to_int(imgpos_str);
return (minpos == 0 || imgpos < minpos) ? imgpos : minpos;
}, 0) : 0;
const can_create_standby = minwalpos > 0 && maxwalpos > 0 && maxwalpos >= minwalpos;
var walpos_valid = true;
function create_standby() {
const formdata = new FormData();
formdata.append("walpos", walposStr);
props.startOperation('Creating new standby at ' + walposStr + '...',
fetch("/api/create_standby", { method: 'POST', body: formdata }));
}
function destroy_standby(datadir) {
const formdata = new FormData();
formdata.append("datadir", datadir);
props.startOperation('Destroying ' + datadir + '...',
fetch("/api/destroy_server", { method: 'POST', body: formdata }));
}
const handleSliderChange = (event) => {
setWalposInput({ src: 'slider', value: event.target.value });
}
const handleWalposChange = (event) => {
setWalposInput({ src: 'text', value: event.target.value });
}
var sliderValue;
var walposStr;
if (walposInput.src == 'text')
{
const walpos = walpos_to_int(walposInput.value);
if (walpos >= minwalpos && walpos <= maxwalpos)
walpos_valid = true;
else
walpos_valid = false;
sliderValue = Math.round((walpos - minwalpos) / (maxwalpos - minwalpos) * 100);
walposStr = walposInput.value;
}
else
{
const slider = walposInput.value;
const new_walpos = minwalpos + slider / 100 * (maxwalpos - minwalpos);
console.log('minwalpos: '+ minwalpos);
console.log('maxwalpos: '+ maxwalpos);
walposStr = int_to_walpos(Math.round(new_walpos));
walpos_valid = true;
console.log(walposStr);
}
var standbystatus = ''
if (standbys)
{
standbystatus =
<div>
{
standbys.length > 0 ?
standbys.map((server) =>
<>
<ServerStatus key={ 'status_' + server.datadir} server={server}/>
<button key={ 'destroy_' + server.datadir} onClick={e => destroy_standby(server.datadir)}>Destroy standby</button>
</>
) : "no standby servers"
}
</div>
}
return (
<div>
<h2>Standbys</h2>
<button onClick={create_standby} disabled={!can_create_standby || !walpos_valid}>Create new Standby</button> at LSN
<input type="text" id="walpos_input" value={ walposStr } onChange={handleWalposChange} disabled={!can_create_standby}/>
<input type="range" id="walpos_slider" min="0" max="100" steps="1" value={sliderValue} onChange={handleSliderChange} disabled={!can_create_standby}/>
<br/>
{ standbystatus }
</div>
);
}
function ServerList(props) {
const primary = props.serverStatus ? props.serverStatus.primary : null;
const standbys = props.serverStatus ? props.serverStatus.standbys : [];
const bucketSummary = props.bucketSummary;
var primarystatus = '';
function destroy_primary() {
const formdata = new FormData();
formdata.append("datadir", 'primary');
props.startOperation('Destroying primary...',
fetch("/api/destroy_server", { method: 'POST', body: formdata }));
}
function restore_primary() {
props.startOperation('Restoring primary...',
fetch("/api/restore_primary", { method: 'POST' }));
}
if (primary)
{
primarystatus =
<div>
<ServerStatus server={primary}/>
<button onClick={destroy_primary}>Destroy primary</button>
</div>
}
else
{
primarystatus =
<div>
no primary server<br/>
<button onClick={restore_primary}>Restore primary</button>
</div>
}
return (
<>
{ primarystatus }
<StandbyList standbys={standbys} startOperation={props.startOperation} bucketSummary={props.bucketSummary}/>
<p className="todo">
Should we list the WAL safekeeper nodes here? Or are they part of the Storage? Or not visible to users at all?
</p>
</>
);
}
function BucketSummary(props) {
const bucketSummary = props.bucketSummary;
const startOperation = props.startOperation;
function slicedice() {
startOperation('Slicing sequential WAL to per-relation WAL...',
fetch("/api/slicedice", { method: 'POST' }));
}
if (!bucketSummary.nonrelimages)
{
return <>loading...</>
}
return (
<div>
<div>Base images at following WAL positions:
<ul>
{bucketSummary.nonrelimages.map((img) => (
<li key={img}>{img}</li>
))}
</ul>
</div>
Sliced WAL is available up to { bucketSummary.maxwal }<br/>
Raw WAL is available up to { bucketSummary.maxseqwal }<br/>
<br/>
<button onClick={slicedice}>Slice & Dice WAL</button>
<p className="todo">
Currently, the slicing or "sharding" of the WAL needs to be triggered manually, by clicking the above button.
<br/>
TODO: make it a continuous process that runs in the WAL safekeepers, or in the Page Servers, or as a standalone service.
</p>
</div>
);
}
function ProgressIndicator()
{
return (
<div>
<Loader
type="Puff"
color="#00BFFF"
height={100}
width={100}
/>
</div>
)
}
function walpos_to_int(walpos)
{
const [hi, lo] = walpos.split('/');
return parseInt(hi, 16) + parseInt(lo, 16);
}
function int_to_walpos(x)
{
console.log('converting ' + x);
return (Math.floor((x / 0x100000000)).toString(16) + '/' + (x % 0x100000000).toString(16)).toUpperCase();
}
function OperationStatus(props) {
const lastOperation = props.lastOperation;
const inProgress = props.inProgress;
const operationResult = props.operationResult;
if (lastOperation)
{
return (
<div><h2>Last operation:</h2>
<div>{lastOperation} { (!inProgress && lastOperation) ? 'done!' : '' }</div>
<div className='result'>
{inProgress ? <ProgressIndicator/> : <pre>{operationResult}</pre>}
</div>
</div>
);
}
else
return '';
}
function ActionButtons(props) {
const startOperation = props.startOperation;
const bucketSummary = props.bucketSummary;
function reset_demo() {
startOperation('resetting everything...',
fetch("/api/reset_demo", { method: 'POST' }));
}
function init_primary() {
startOperation('Initializing new primary...',
fetch("/api/init_primary", { method: 'POST' }));
}
function zenith_push() {
startOperation('Pushing new base image...',
fetch("/api/zenith_push", { method: 'POST' }));
}
return (
<div>
<p className="todo">
RESET DEMO deletes everything in the storage bucket, and stops and destroys all servers. This resets the whole demo environment to the initial state.
</p>
<button onClick={reset_demo}>RESET DEMO</button>
<p className="todo">
Init Primary runs initdb to create a new primary server. Click this after Resetting the demo.
</p>
<button onClick={init_primary}>Init primary</button>
<p className="todo">
Push Base Image stops the primary, copies the current state of the primary to the storage bucket as a new base backup, and restarts the primary.
<br/>
TODO: This should be handled by a continuous background process, probably running in the storage nodes. And without having to shut down the cluster, of course.
</p>
<button onClick={zenith_push}>Push base image</button>
</div>
);
}
function Sidenav(props)
{
const toPage = (page) => (event) => {
//event.preventDefault()
props.switchPage(page);
};
return (
<div>
<h3 className="sidenav-item">Menu</h3>
<a href="#servers" onClick={toPage('servers')} className="sidenav-item">Servers</a>
<a href="#storage" onClick={toPage('storage')} className="sidenav-item">Storage</a>
<a href="#snapshots" onClick={toPage('snapshots')} className="sidenav-item">Snapshots</a>
<a href="#demo" onClick={toPage('demo')} className="sidenav-item">Demo</a>
<a href="#import" onClick={toPage('import')} className="sidenav-item">Import / Export</a>
<a href="#jobs" onClick={toPage('jobs')} className="sidenav-item">Jobs</a>
</div>
);
}
function App()
{
const [page, setPage] = useState('servers');
const [serverStatus, setServerStatus] = useState({});
const [bucketSummary, setBucketSummary] = useState({});
const [lastOperation, setLastOperation] = useState('');
const [inProgress, setInProgress] = useState('');
const [operationResult, setOperationResult] = useState('');
useEffect(() => {
reloadStatus();
}, []);
function startOperation(operation, promise)
{
promise.then(result => result.text()).then(resultText => {
operationFinished(resultText);
});
setLastOperation(operation);
setInProgress(true);
setOperationResult('');
}
function operationFinished(result)
{
setInProgress(false);
setOperationResult(result);
reloadStatus();
}
function clearOperation()
{
setLastOperation('')
setInProgress('');
setOperationResult('');
console.log("cleared");
}
function reloadStatus()
{
fetch('/api/server_status').then(res => res.json()).then(data => {
setServerStatus(data);
});
fetch('/api/bucket_summary').then(res => res.json()).then(data => {
setBucketSummary(data);
});
}
const content = () => {
console.log(page);
if (page === 'servers') {
return (
<>
<h1>Server status</h1>
<ServerList startOperation={ startOperation }
serverStatus={ serverStatus }
bucketSummary={ bucketSummary }/>
</>
);
} else if (page === 'storage') {
return (
<>
<h1>Storage bucket status</h1>
<BucketSummary startOperation={ startOperation }
bucketSummary={ bucketSummary }/>
</>
);
} else if (page === 'snapshots') {
return (
<>
<h1>Snapshots</h1>
<p className="todo">
In Zenith, snapshots are just specific points (LSNs) in the WAL history, with a label. A snapshot prevents garbage collecting old data that's still needed to reconstruct the database at that LSN.
</p>
<p className="todo">
TODO:
<ul>
<li>List existing snapshots</li>
<li>Create new snapshot manually, from current state or from a given LSN</li>
<li>Drill into the WAL stream to see what have happened. Provide tools for e.g. finding point where a table was dropped</li>
<li>Create snapshots automatically based on events in the WAL, like if you call pg_create_restore_point(() in the primary</li>
<li>Launch new reader instance at a snapshot</li>
<li>Export snapshot</li>
<li>Rollback cluster to a snapshot</li>
</ul>
</p>
</>
);
} else if (page === 'demo') {
return (
<>
<h1>Misc actions</h1>
<ActionButtons startOperation={ startOperation }
bucketSummary={ bucketSummary }/>
</>
);
} else if (page === 'import') {
return (
<>
<h1>Import & Export tools</h1>
<p className="TODO">TODO:
<ul>
<li>Initialize database from existing backup (pg_basebackup, WAL-G, pgbackrest)</li>
<li>Initialize from a pg_dump or other SQL script</li>
<li>Launch batch job to import data files from S3</li>
<li>Launch batch job to export database with pg_dump to S3</li>
</ul>
These jobs can be run in against reader processing nodes. We can even
spawn a new reader node dedicated to a job, and destry it when the job is done.
</p>
</>
);
} else if (page === 'jobs') {
return (
<>
<h1>Batch jobs</h1>
<p className="TODO">TODO:
<ul>
<li>List running jobs launched from Import & Export tools</li>
<li>List other batch jobs launched by the user</li>
<li>Launch new batch jobs</li>
</ul>
</p>
</>
);
}
}
function switchPage(page)
{
console.log("topage " + page);
setPage(page)
clearOperation();
};
return (
<div className="row">
<div className="sidenav">
<Sidenav switchPage={switchPage} className="column"/>
</div>
<div className="column">
<div>
{ content() }
</div>
<OperationStatus lastOperation={ lastOperation }
inProgress = { inProgress }
operationResult = { operationResult }/>
</div>
</div>
);
}
ReactDOM.render(<App/>, document.getElementById('reactApp'));

105
mgmt-console/js/waldump.js Normal file
View File

@@ -0,0 +1,105 @@
import React, { useState, useEffect } from 'react';
import ReactDOM from 'react-dom';
import Loader from "react-loader-spinner";
function walpos_to_int(walpos)
{
const [hi, lo] = walpos.split('/');
return parseInt(hi, 16) + parseInt(lo, 16);
}
const palette = [
"#003f5c",
"#2f4b7c",
"#665191",
"#a05195",
"#d45087",
"#f95d6a",
"#ff7c43",
"#ffa600"];
function WalRecord(props)
{
const firstwalpos = props.firstwalpos;
const endwalpos = props.endwalpos;
const record = props.record;
const index = props.index;
const xidmap = props.xidmap;
const startpos = walpos_to_int(record.start)
const endpos = walpos_to_int(record.end)
const scale = 1000 / (16*1024*1024)
const startx = (startpos - firstwalpos) * scale;
const endx = (endpos - firstwalpos) * scale;
const xidindex = xidmap[record.xid];
const color = palette[index % palette.length];
const y = 5 + (xidindex) * 20 + (index % 2) * 2;
return (
<line x1={ startx } y1={y} x2={endx} y2={y} stroke={ color } strokeWidth="5">
<title>
start: { record.start } end: { record.end }
</title>
</line>
)
}
function WalFile(props)
{
const walContent = props.walContent;
const firstwalpos = props.firstwalpos;
const xidmap = props.xidmap;
return <svg width="1000" height="200">
{
walContent.records ?
walContent.records.map((record, index) =>
<WalRecord key={record.start} firstwalpos={firstwalpos} record={record} index={index} xidmap={xidmap}/>
) : "no records"
}
</svg>
}
function WalDumpApp()
{
const [walContent, setWalContent] = useState({});
const filename = '00000001000000000000000C';
useEffect(() => {
fetch('/fetch_wal?filename='+filename).then(res => res.json()).then(data => {
setWalContent(data);
});
}, []);
var firstwalpos = 0;
var endwalpos = 0;
var numxids = 0;
var xidmap = {};
if (walContent.records && walContent.records.length > 0)
{
firstwalpos = walpos_to_int(walContent.records[0].start);
endwalpos = firstwalpos + 16*1024*1024;
walContent.records.forEach(rec => {
if (!xidmap[rec.xid])
{
xidmap[rec.xid] = ++numxids;
}
});
}
return (
<>
<h2>{filename}</h2>
<WalFile walContent={walContent} firstwalpos={firstwalpos} endwalpos={endwalpos} xidmap={xidmap}/>
</>
);
}
console.log('hey there');
ReactDOM.render(<WalDumpApp/>, document.getElementById('waldump'));

View File

@@ -0,0 +1,9 @@
#!/bin/bash
#
# NOTE: You must set the following environment variables before running this:
# BASIC_AUTH_PASSWORD - basic http auth password
# S3_ACCESSKEY
# S3_SECRET
S3_ENDPOINT=https://storage.googleapis.com S3_BUCKET=zenith-testbucket PATH=/home/heikki/pgsql-install/bin:$PATH flask run --host=0.0.0.0

8
mgmt-console/launch-local.sh Executable file
View File

@@ -0,0 +1,8 @@
#!/bin/bash
#
# NOTE: You should set the BASIC_AUTH_PASSWORD environment variable before calling
# Launch S3 server
(cd ms3 && python3 -m ms3.app --listen-address=localhost) &
FLASK_ENV=development S3_REGION=auto S3_ENDPOINT=http://localhost:9009 S3_BUCKET=zenith-testbucket PATH=/home/heikki/pgsql.fsmfork/bin:$PATH flask run --host=0.0.0.0

6144
mgmt-console/package-lock.json generated Normal file

File diff suppressed because it is too large Load Diff

27
mgmt-console/package.json Normal file
View File

@@ -0,0 +1,27 @@
{
"name": "starter-kit",
"version": "1.1.0",
"description": "",
"main": "index.js",
"scripts": {
"test": "echo \"Error: no test specified\" && exit 1",
"build": "webpack",
"start": "python app.py"
},
"author": "",
"license": "ISC",
"dependencies": {
"react": "^17.0.1",
"react-dom": "^17.0.1",
"react-loader-spinner": "^4.0.0",
"react-router": "^5.2.0"
},
"devDependencies": {
"@babel/core": "^7.13.1",
"@babel/preset-env": "^7.13.5",
"@babel/preset-react": "^7.12.13",
"babel-loader": "^8.2.2",
"webpack": "^5.24.2",
"webpack-cli": "^4.5.0"
}
}

View File

@@ -0,0 +1,58 @@
<head>
<style>
.status {
font-family: monospace;
background-color: lightgrey;
}
.shellcommand {
font-family: monospace;
background-color: lightgrey;
}
.result {
font-family: monospace;
background-color: lightgrey;
padding: 10px;
}
.todo {font-style: italic;}
h1 {color: blue;}
.column {
float: left;
width: 50%;
padding: 10px;
}
/* Clear floats after the columns */
.row:after {
content: "";
display: table;
clear: both;
}
.sidenav {
float: left;
width: 150px;
padding: 10px;
background-color: pink;
}
.sidenav-item {
padding:10px 0px;
border:none;
display:block;
}
</style>
</head>
<body>
<div id="reactApp"></div>
<!-- Attach React components -->
<script type="text/javascript" src="{{ url_for('static', filename='app_bundle.js') }}"></script>
</body>

View File

@@ -0,0 +1,46 @@
<head>
<style>
.status {
font-family: monospace;
background-color: lightgrey;
}
.shellcommand {
font-family: monospace;
background-color: lightgrey;
}
.result {
font-family: monospace;
background-color: lightgrey;
padding: 10px;
}
h1 {color: blue;}
p {color: red;}
* {
box-sizing: border-box;
}
.row {
display: flex;
}
/* Create two equal columns that sits next to each other */
.column1 {
flex: 30%;
padding: 10px;
}
.column2 {
flex: 70%;
padding: 10px;
}
</style>
</head>
<body>
<div id="waldump"></div>
<!-- Attach React components -->
<script type="text/javascript" src="{{ url_for('static', filename='waldump_bundle.js') }}"></script>
</body>

25
mgmt-console/waldump.py Normal file
View File

@@ -0,0 +1,25 @@
#
# This file contains work-in-progress code to visualize WAL contents.
#
# This is the API endpoint that calls a 'zenith_wal_to_json' executable,
# which is a hacked version of pg_waldump that prints information about the
# records in JSON format. The code in js/waldump.js displays it.
#
import os
import re
from subprocess import PIPE, STDOUT, run, Popen
def fetch_wal(request, s3bucket):
filename = request.args.get('filename')
if not re.match("^[A-Za-z0-9_]+$", filename):
raise Exception('invalid WAL filename: ' + filename)
# FIXME: this downloads the WAL file to current dir. Use a temp dir? Pipe?
s3bucket.download_file('walarchive/' + filename, filename)
result = run("zenith_wal_to_json " + filename, stdout=PIPE, universal_newlines=True, shell=True)
os.unlink(filename);
return result.stdout

View File

@@ -0,0 +1,27 @@
var webpack = require('webpack');
module.exports = {
entry: {
app: './js/app.js',
waldump: './js/waldump.js'
},
output: {
filename: "[name]_bundle.js",
path: __dirname + '/static'
},
module: {
rules: [
{
test: /\.js?$/,
exclude: /node_modules/,
use: {
loader: 'babel-loader',
options: {
presets: ['@babel/preset-env']
}
}
}
]
},
plugins: [
]
};

179
mgmt-console/zenith.py Normal file
View File

@@ -0,0 +1,179 @@
#zenith.py
import click
import testgres
import os
from testgres import PostgresNode
from tabulate import tabulate
zenith_base_dir = '/home/anastasia/zenith/basedir'
@click.group()
def main():
"""Run the Zenith CLI."""
@click.group()
def pg():
"""Db operations
NOTE: 'database' here means one postgresql node
"""
@click.command(name='create')
@click.option('--name', required=True)
@click.option('-s', '--storage-name', help='Name of the storage',
default='zenith-local',
show_default=True)
@click.option('--snapshot', help='init from the snapshot. Snap is a name or URL')
@click.option('--no-start', is_flag=True, help='Do not start created node',
default=False, show_default=True)
def pg_create(name, storage_name, snapshot, no_start):
"""Initialize the database"""
node = PostgresNode()
base_dir = os.path.join(zenith_base_dir, 'pg', name)
node = testgres.get_new_node(name, base_dir=base_dir)
# TODO skip init, instead of that link node with storage or upload it from snapshot
node.init()
if(no_start==False):
node.start()
@click.command(name='start')
@click.option('--name', required=True)
@click.option('--snapshot')
@click.option('--read-only', is_flag=True, help='Start read-only node', show_default=True)
def pg_start(name, snapshot, read_only):
"""Start the database"""
node = PostgresNode()
base_dir = os.path.join(zenith_base_dir, 'pg', name)
node = testgres.get_new_node(name, base_dir=base_dir)
# TODO pass snapshot as a parameter
node.start()
@click.command(name='stop')
@click.option('--name', required=True)
def pg_stop(name):
"""Stop the database"""
node = PostgresNode()
base_dir = os.path.join(zenith_base_dir, 'pg', name)
node = testgres.get_new_node(name, base_dir=base_dir)
node.stop()
@click.command(name='destroy')
@click.option('--name', required=True)
def pg_destroy(name):
"""Drop the database"""
node = PostgresNode()
base_dir = os.path.join(zenith_base_dir, 'pg', name)
node = testgres.get_new_node(name, base_dir=base_dir)
node.cleanup()
@click.command(name='list')
def pg_list():
"""List existing databases"""
dirs = os.listdir(os.path.join(zenith_base_dir, 'pg'))
path={}
status={}
data=[]
for dirname in dirs:
path[dirname] = os.path.join(zenith_base_dir, 'pg', dirname)
fname = os.path.join( path[dirname], 'data/postmaster.pid')
try:
f = open(fname,'r')
status[dirname] = f.readlines()[-1]
except OSError as err:
status[dirname]='inactive'
data.append([dirname , status[dirname], path[dirname]])
print(tabulate(data, headers=['Name', 'Status', 'Path']))
pg.add_command(pg_create)
pg.add_command(pg_destroy)
pg.add_command(pg_start)
pg.add_command(pg_stop)
pg.add_command(pg_list)
@click.group()
def storage():
"""Storage operations"""
@click.command(name='attach')
@click.option('--name')
def storage_attach(name):
"""Attach the storage"""
@click.command(name='detach')
@click.option('--name')
@click.option('--force', is_flag=True, show_default=True)
def storage_detach(name):
"""Detach the storage"""
@click.command(name='list')
def storage_list():
"""List existing storages"""
storage.add_command(storage_attach)
storage.add_command(storage_detach)
storage.add_command(storage_list)
@click.group()
def snapshot():
"""Snapshot operations"""
@click.command(name='create')
def snapshot_create():
"""Create new snapshot"""
@click.command(name='destroy')
def snapshot_destroy():
"""Destroy the snapshot"""
@click.command(name='pull')
def snapshot_pull():
"""Pull remote snapshot"""
@click.command(name='push')
def snapshot_push():
"""Push snapshot to remote"""
@click.command(name='import')
def snapshot_import():
"""Convert given format to zenith snapshot"""
@click.command(name='export')
def snapshot_export():
"""Convert zenith snapshot to PostgreSQL compatible format"""
snapshot.add_command(snapshot_create)
snapshot.add_command(snapshot_destroy)
snapshot.add_command(snapshot_pull)
snapshot.add_command(snapshot_push)
snapshot.add_command(snapshot_import)
snapshot.add_command(snapshot_export)
@click.group()
def wal():
"""WAL operations"""
@click.command()
def wallist(name="list"):
"""List WAL files"""
wal.add_command(wallist)
@click.command()
def console():
"""Open web console"""
main.add_command(pg)
main.add_command(storage)
main.add_command(snapshot)
main.add_command(wal)
main.add_command(console)
if __name__ == '__main__':
main()

View File

@@ -1,25 +0,0 @@
version: "3"
services:
prometheus:
container_name: prometheus
image: prom/prometheus:latest
volumes:
- ./prometheus.yaml:/etc/prometheus/prometheus.yml
# ports:
# - "9090:9090"
# TODO: find a proper portable solution
network_mode: "host"
grafana:
image: grafana/grafana:latest
volumes:
- ./grafana.yaml:/etc/grafana/provisioning/datasources/datasources.yaml
environment:
- GF_AUTH_ANONYMOUS_ENABLED=true
- GF_AUTH_ANONYMOUS_ORG_ROLE=Admin
- GF_AUTH_DISABLE_LOGIN_FORM=true
# ports:
# - "3000:3000"
# TODO: find a proper portable solution
network_mode: "host"

View File

@@ -1,12 +0,0 @@
apiVersion: 1
datasources:
- name: Prometheus
type: prometheus
access: proxy
orgId: 1
url: http://localhost:9090
basicAuth: false
isDefault: false
version: 1
editable: false

View File

@@ -1,5 +0,0 @@
scrape_configs:
- job_name: 'default'
scrape_interval: 10s
static_configs:
- targets: ['localhost:9898']

2373
pageserver/Cargo.lock generated Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -4,44 +4,33 @@ version = "0.1.0"
authors = ["Stas Kelvich <stas@zenith.tech>"]
edition = "2018"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
bookfile = "^0.3"
chrono = "0.4.19"
crossbeam-channel = "0.5.0"
rand = "0.8.3"
regex = "1.4.5"
bytes = { version = "1.0.1", features = ['serde'] }
bytes = "1.0.1"
byteorder = "1.4.3"
fs2 = "0.4.3"
futures = "0.3.13"
hyper = "0.14"
lazy_static = "1.4.0"
slog-stdlog = "4.1.0"
slog-async = "2.6.0"
slog-scope = "4.4.0"
slog-term = "2.8.0"
slog = "2.7.0"
log = "0.4.14"
clap = "2.33.0"
termion = "1.5.6"
tui = "0.14.0"
daemonize = "0.4.1"
tokio = { version = "1.11", features = ["process", "macros", "fs", "rt"] }
postgres-types = { git = "https://github.com/zenithdb/rust-postgres.git", rev="9eb0dbfbeb6a6c1b79099b9f7ae4a8c021877858" }
postgres-protocol = { git = "https://github.com/zenithdb/rust-postgres.git", rev="9eb0dbfbeb6a6c1b79099b9f7ae4a8c021877858" }
postgres = { git = "https://github.com/zenithdb/rust-postgres.git", rev="9eb0dbfbeb6a6c1b79099b9f7ae4a8c021877858" }
routerify = "2"
rust-s3 = { git = "https://github.com/hlinnaka/rust-s3", features = ["no-verify-ssl"] }
tokio = { version = "1.3.0", features = ["full"] }
tokio-stream = { version = "0.1.4" }
tokio-postgres = { git = "https://github.com/kelvich/rust-postgres", branch = "replication_rebase" }
postgres-protocol = { git = "https://github.com/kelvich/rust-postgres", branch = "replication_rebase" }
postgres = { git = "https://github.com/kelvich/rust-postgres", branch = "replication_rebase" }
anyhow = "1.0"
crc32c = "0.6.0"
thiserror = "1.0"
hex = { version = "0.4.3", features = ["serde"] }
tar = "0.4.33"
humantime = "2.1.0"
serde = { version = "1.0", features = ["derive"] }
serde_json = "1"
toml = "0.5"
scopeguard = "1.1.0"
rust-s3 = { version = "0.27.0-rc4", features = ["no-verify-ssl"] }
async-trait = "0.1"
const_format = "0.2.21"
tracing = "0.1.27"
signal-hook = {version = "0.3.10", features = ["extended-siginfo"] }
postgres_ffi = { path = "../postgres_ffi" }
zenith_metrics = { path = "../zenith_metrics" }
zenith_utils = { path = "../zenith_utils" }
workspace_hack = { path = "../workspace_hack" }
[dev-dependencies]
hex-literal = "0.3"

View File

@@ -1,4 +1,91 @@
## Page server architecture
Page Server
===========
How to test
-----------
1. Compile and install Postgres from this repository (there are
modifications, so vanilla Postgres won't do)
./configure --prefix=/home/heikki/zenith-install
2. Compile the page server
cd pageserver
cargo build
3. Create another "dummy" cluster that will be used by the page server when it applies
the WAL records. (shouldn't really need this, getting rid of it is a TODO):
/home/heikki/zenith-install/bin/initdb -D /data/zenith-dummy
4. Initialize and start a new postgres cluster
/home/heikki/zenith-install/bin/initdb -D /data/zenith-test-db --username=postgres
/home/heikki/zenith-install/bin/postgres -D /data/zenith-test-db
5. In another terminal, start the page server.
PGDATA=/data/zenith-dummy PATH=/home/heikki/zenith-install/bin:$PATH ./target/debug/pageserver
It should connect to the postgres instance using streaming replication, and print something
like this:
$ PGDATA=/data/zenith-dummy PATH=/home/heikki/zenith-install/bin:$PATH ./target/debug/pageserver
Starting WAL receiver
connecting...
Starting page server on 127.0.0.1:5430
connected!
page cache is empty
6. You can now open another terminal and issue DDL commands. Generated WAL records will
be streamed to the page servers, and attached to blocks that they apply to in its
page cache
$ psql postgres -U postgres
psql (14devel)
Type "help" for help.
postgres=# create table mydata (i int4);
CREATE TABLE
postgres=# insert into mydata select g from generate_series(1,100) g;
INSERT 0 100
postgres=#
7. The GetPage@LSN interface to the compute nodes isn't working yet, but to simulate
that, the page server generates a test GetPage@LSN call every 5 seconds on a random
block that's in the page cache. In a few seconds, you should see output from that:
testing GetPage@LSN for block 0
WAL record at LSN 23584576 initializes the page
2021-03-19 11:03:13.791 EET [11439] LOG: applied WAL record at 0/167DF40
2021-03-19 11:03:13.791 EET [11439] LOG: applied WAL record at 0/167DF80
2021-03-19 11:03:13.791 EET [11439] LOG: applied WAL record at 0/167DFC0
2021-03-19 11:03:13.791 EET [11439] LOG: applied WAL record at 0/167E018
2021-03-19 11:03:13.791 EET [11439] LOG: applied WAL record at 0/167E058
2021-03-19 11:03:13.791 EET [11439] LOG: applied WAL record at 0/167E098
2021-03-19 11:03:13.791 EET [11439] LOG: applied WAL record at 0/167E0D8
2021-03-19 11:03:13.792 EET [11439] LOG: applied WAL record at 0/167E118
2021-03-19 11:03:13.792 EET [11439] LOG: applied WAL record at 0/167E158
2021-03-19 11:03:13.792 EET [11439] LOG: applied WAL record at 0/167E198
applied 10 WAL records to produce page image at LSN 18446744073709547246
Architecture
============
The Page Server is responsible for all operations on a number of
"chunks" of relation data. A chunk corresponds to a PostgreSQL
relation segment (i.e. one max. 1 GB file in the data directory), but
it holds all the different versions of every page in the segment that
are still needed by the system.
Determining which chunk each Page Server holds is handled elsewhere. (TODO:
currently, there is only one Page Server which holds all chunks)
The Page Server has a few different duties:
@@ -7,12 +94,10 @@ The Page Server has a few different duties:
- Replay WAL that's applicable to the chunks that the Page Server maintains
- Backup to S3
S3 is the main fault-tolerant storage of all data, as there are no Page Server
replicas. We use a separate fault-tolerant WAL service to reduce latency. It
keeps track of WAL records which are not syncted to S3 yet.
The Page Server consists of multiple threads that operate on a shared
repository of page versions:
cache of page versions:
| WAL
V
@@ -25,14 +110,16 @@ repository of page versions:
+---------+ .......... | |
| | . . | |
GetPage@LSN | | . backup . -------> | S3 |
-------------> | Page | repository . . | |
-------------> | Page | page cache . . | |
| Service | .......... | |
page | | +----+
<------------- | |
+---------+ +--------------------+
| Checkpointing / |
| Garbage collection |
+--------------------+
+---------+
...................................
. .
. Garbage Collection / Compaction .
...................................
Legend:
@@ -52,7 +139,7 @@ Page Service
------------
The Page Service listens for GetPage@LSN requests from the Compute Nodes,
and responds with pages from the repository.
and responds with pages from the page cache.
WAL Receiver
@@ -61,59 +148,25 @@ WAL Receiver
The WAL receiver connects to the external WAL safekeeping service (or
directly to the primary) using PostgreSQL physical streaming
replication, and continuously receives WAL. It decodes the WAL records,
and stores them to the repository.
and stores them to the page cache.
Repository
Page Cache
----------
The repository stores all the page versions, or WAL records needed to
reconstruct them. Each tenant has a separate Repository, which is
stored in the .zenith/tenants/<tenantid> directory.
The Page Cache is a data structure, to hold all the different page versions.
It is accessed by all the other threads, to perform their duties.
Repository is an abstract trait, defined in `repository.rs`. It is
implemented by the LayeredRepository object in
`layered_repository.rs`. There is only that one implementation of the
Repository trait, but it's still a useful abstraction that keeps the
interface for the low-level storage functionality clean. The layered
storage format is described in layered_repository/README.md.
Each repository consists of multiple Timelines. Timeline is a
workhorse that accepts page changes from the WAL, and serves
get_page_at_lsn() and get_rel_size() requests. Note: this has nothing
to do with PostgreSQL WAL timeline. The term "timeline" is mostly
interchangeable with "branch", there is a one-to-one mapping from
branch to timeline. A timeline has a unique ID within the tenant,
represented as 16-byte hex string that never changes, whereas a
branch is a user-given name for a timeline.
Each repository also has a WAL redo manager associated with it, see
`walredo.rs`. The WAL redo manager is used to replay PostgreSQL WAL
records, whenever we need to reconstruct a page version from WAL to
satisfy a GetPage@LSN request, or to avoid accumulating too much WAL
for a page. The WAL redo manager uses a Postgres process running in
special zenith wal-redo mode to do the actual WAL redo, and
communicates with the process using a pipe.
Currently, the page cache is implemented fully in-memory. TODO: Store it
on disk. Define a file format.
Checkpointing / Garbage Collection
----------------------------------
TODO: Garbage Collection / Compaction
-------------------------------------
Periodically, the checkpointer thread wakes up and performs housekeeping
duties on the repository. It has two duties:
### Checkpointing
Flush WAL that has accumulated in memory to disk, so that the old WAL
can be truncated away in the WAL safekeepers. Also, to free up memory
for receiving new WAL. This process is called "checkpointing". It's
similar to checkpointing in PostgreSQL or other DBMSs, but in the page
server, checkpointing happens on a per-segment basis.
### Garbage collection
Remove old on-disk layer files that are no longer needed according to the
PITR retention policy
Periodically, the Garbage Collection / Compaction thread runs
and applies pending WAL records, and removes old page versions that
are no longer needed.
TODO: Backup service
@@ -124,7 +177,3 @@ The backup service is responsible for periodically pushing the chunks to S3.
TODO: How/when do restore from S3? Whenever we get a GetPage@LSN request for
a chunk we don't currently have? Or when an external Control Plane tells us?
TODO: Sharding
--------------------
We should be able to run multiple Page Servers that handle sharded data.

41
pageserver/build.rs Normal file
View File

@@ -0,0 +1,41 @@
//
// Triggers postgres build if there is no postgres binary present at
// 'REPO_ROOT/tmp_install/bin/postgres'.
//
// I can see a lot of disadvantages with such automatization and main
// advantage here is ability to build everything and run integration tests
// in a bare repo by running 'cargo test'.
//
// We can interceipt whether it is debug or release build and run
// corresponding pg build. But it seems like an overkill for now.
//
// Problem #1 -- language server in my editor likes calling 'cargo build'
// by himself. So if I delete tmp_install directory it would magically reappear
// after some time. During this compilation 'cargo build' may whine about
// "waiting for file lock on build directory".
//
// Problem #2 -- cargo build would run this only if something is changed in
// the crate.
//
// And generally speaking postgres is not a build dependency for the pageserver,
// just for integration tests. So let's not mix that. I'll leave this file in
// place for some time just in case if anybody would start doing the same.
//
// use std::path::Path;
// use std::process::{Command};
fn main() {
// // build some postgres if it is not done none yet
// if !Path::new("../tmp_install/bin/postgres").exists() {
// let make_res = Command::new("make")
// .arg("postgres")
// .env_clear()
// .status()
// .expect("failed to execute 'make postgres'");
// if !make_res.success() {
// panic!("postgres build failed");
// }
// }
}

62
pageserver/launch.sh Executable file
View File

@@ -0,0 +1,62 @@
#!/bin/sh
#
# Set up a simple Compute Node + Page Server combination locally.
#
# NOTE: This doesn't clean up between invocations. You'll need to manually:
#
# - Kill any previous 'postgres' and 'pageserver' processes
# - Clear the S3 bucket
# - Remove the 'zenith-pgdata' directory
set -e
# Set up some config.
#
# CHANGE THESE ACCORDING TO YOUR S3 INSTALLATION
export S3_REGION=auto
export S3_ENDPOINT=https://localhost:9000
export S3_ACCESSKEY=minioadmin
export S3_SECRET=pikkunen
export S3_BUCKET=zenith-testbucket
COMPUTE_NODE_PGDATA=zenith-pgdata
# 1. Initialize a cluster.
initdb -D $COMPUTE_NODE_PGDATA -U zenith
echo "port=65432" >> $COMPUTE_NODE_PGDATA/postgresql.conf
echo "log_connections=on" >> $COMPUTE_NODE_PGDATA/postgresql.conf
# Use a small shared_buffers, so that we hit the Page Server more
# easily.
echo "shared_buffers = 1MB" >> $COMPUTE_NODE_PGDATA/postgresql.conf
# TODO: page server should use a replication slot, or some other mechanism
# to make sure that the primary doesn't lose data that the page server still
# needs. (The WAL safekeepers should ensure that)
echo "wal_keep_size=10GB" >> $COMPUTE_NODE_PGDATA/postgresql.conf
# Tell the Postgres server how to connect to the Page Server
echo "page_server_connstring='host=localhost port=5430'" >> $COMPUTE_NODE_PGDATA/postgresql.conf
# 2. Run zenith_push to push a base backup fo the database to an S3 bucket. The
# Page Server will read it from there
zenith_push -D $COMPUTE_NODE_PGDATA
# 3. Launch page server
rm -rf /tmp/pgdata-dummy
initdb -N -D /tmp/pgdata-dummy
PGDATA=/tmp/pgdata-dummy ./target/debug/pageserver &
# 4. Start up the Postgres server
postgres -D $COMPUTE_NODE_PGDATA &
echo "ALL SET! You can now connect to Postgres with something like:"
echo ""
echo 'psql "dbname=postgres host=localhost user=zenith port=65432"'

View File

@@ -1,335 +0,0 @@
//!
//! Generate a tarball with files needed to bootstrap ComputeNode.
//!
//! TODO: this module has nothing to do with PostgreSQL pg_basebackup.
//! It could use a better name.
//!
//! Stateless Postgres compute node is launched by sending a tarball
//! which contains non-relational data (multixacts, clog, filenodemaps, twophase files),
//! generated pg_control and dummy segment of WAL.
//! This module is responsible for creation of such tarball
//! from data stored in object storage.
//!
use anyhow::Result;
use bytes::{BufMut, BytesMut};
use log::*;
use std::io;
use std::io::Write;
use std::sync::Arc;
use std::time::SystemTime;
use tar::{Builder, EntryType, Header};
use crate::relish::*;
use crate::repository::Timeline;
use postgres_ffi::xlog_utils::*;
use postgres_ffi::*;
use zenith_utils::lsn::Lsn;
/// This is short-living object only for the time of tarball creation,
/// created mostly to avoid passing a lot of parameters between various functions
/// used for constructing tarball.
pub struct Basebackup<'a> {
ar: Builder<&'a mut dyn Write>,
timeline: &'a Arc<dyn Timeline>,
pub lsn: Lsn,
prev_record_lsn: Lsn,
}
// Create basebackup with non-rel data in it. Omit relational data.
//
// Currently we use empty lsn in two cases:
// * During the basebackup right after timeline creation
// * When working without safekeepers. In this situation it is important to match the lsn
// we are taking basebackup on with the lsn that is used in pageserver's walreceiver
// to start the replication.
impl<'a> Basebackup<'a> {
pub fn new(
write: &'a mut dyn Write,
timeline: &'a Arc<dyn Timeline>,
req_lsn: Option<Lsn>,
) -> Result<Basebackup<'a>> {
// Compute postgres doesn't have any previous WAL files, but the first
// record that it's going to write needs to include the LSN of the
// previous record (xl_prev). We include prev_record_lsn in the
// "zenith.signal" file, so that postgres can read it during startup.
//
// We don't keep full history of record boundaries in the page server,
// however, only the predecessor of the latest record on each
// timeline. So we can only provide prev_record_lsn when you take a
// base backup at the end of the timeline, i.e. at last_record_lsn.
// Even at the end of the timeline, we sometimes don't have a valid
// prev_lsn value; that happens if the timeline was just branched from
// an old LSN and it doesn't have any WAL of its own yet. We will set
// prev_lsn to Lsn(0) if we cannot provide the correct value.
let (backup_prev, backup_lsn) = if let Some(req_lsn) = req_lsn {
// Backup was requested at a particular LSN. Wait for it to arrive.
timeline.wait_lsn(req_lsn)?;
// If the requested point is the end of the timeline, we can
// provide prev_lsn. (get_last_record_rlsn() might return it as
// zero, though, if no WAL has been generated on this timeline
// yet.)
let end_of_timeline = timeline.get_last_record_rlsn();
if req_lsn == end_of_timeline.last {
(end_of_timeline.prev, req_lsn)
} else {
(Lsn(0), req_lsn)
}
} else {
// Backup was requested at end of the timeline.
let end_of_timeline = timeline.get_last_record_rlsn();
(end_of_timeline.prev, end_of_timeline.last)
};
info!(
"taking basebackup lsn={}, prev_lsn={}",
backup_prev, backup_lsn
);
Ok(Basebackup {
ar: Builder::new(write),
timeline,
lsn: backup_lsn,
prev_record_lsn: backup_prev,
})
}
pub fn send_tarball(&mut self) -> anyhow::Result<()> {
// Create pgdata subdirs structure
for dir in pg_constants::PGDATA_SUBDIRS.iter() {
let header = new_tar_header_dir(*dir)?;
self.ar.append(&header, &mut io::empty())?;
}
// Send empty config files.
for filepath in pg_constants::PGDATA_SPECIAL_FILES.iter() {
if *filepath == "pg_hba.conf" {
let data = pg_constants::PG_HBA.as_bytes();
let header = new_tar_header(filepath, data.len() as u64)?;
self.ar.append(&header, data)?;
} else {
let header = new_tar_header(filepath, 0)?;
self.ar.append(&header, &mut io::empty())?;
}
}
// Gather non-relational files from object storage pages.
for obj in self.timeline.list_nonrels(self.lsn)? {
match obj {
RelishTag::Slru { slru, segno } => {
self.add_slru_segment(slru, segno)?;
}
RelishTag::FileNodeMap { spcnode, dbnode } => {
self.add_relmap_file(spcnode, dbnode)?;
}
RelishTag::TwoPhase { xid } => {
self.add_twophase_file(xid)?;
}
_ => {}
}
}
// Generate pg_control and bootstrap WAL segment.
self.add_pgcontrol_file()?;
self.ar.finish()?;
debug!("all tarred up!");
Ok(())
}
//
// Generate SLRU segment files from repository.
//
fn add_slru_segment(&mut self, slru: SlruKind, segno: u32) -> anyhow::Result<()> {
let seg_size = self
.timeline
.get_relish_size(RelishTag::Slru { slru, segno }, self.lsn)?;
if seg_size == None {
trace!(
"SLRU segment {}/{:>04X} was truncated",
slru.to_str(),
segno
);
return Ok(());
}
let nblocks = seg_size.unwrap();
let mut slru_buf: Vec<u8> =
Vec::with_capacity(nblocks as usize * pg_constants::BLCKSZ as usize);
for blknum in 0..nblocks {
let img =
self.timeline
.get_page_at_lsn(RelishTag::Slru { slru, segno }, blknum, self.lsn)?;
assert!(img.len() == pg_constants::BLCKSZ as usize);
slru_buf.extend_from_slice(&img);
}
let segname = format!("{}/{:>04X}", slru.to_str(), segno);
let header = new_tar_header(&segname, slru_buf.len() as u64)?;
self.ar.append(&header, slru_buf.as_slice())?;
trace!("Added to basebackup slru {} relsize {}", segname, nblocks);
Ok(())
}
//
// Extract pg_filenode.map files from repository
// Along with them also send PG_VERSION for each database.
//
fn add_relmap_file(&mut self, spcnode: u32, dbnode: u32) -> anyhow::Result<()> {
let img = self.timeline.get_page_at_lsn(
RelishTag::FileNodeMap { spcnode, dbnode },
0,
self.lsn,
)?;
let path = if spcnode == pg_constants::GLOBALTABLESPACE_OID {
let version_bytes = pg_constants::PG_MAJORVERSION.as_bytes();
let header = new_tar_header("PG_VERSION", version_bytes.len() as u64)?;
self.ar.append(&header, version_bytes)?;
let header = new_tar_header("global/PG_VERSION", version_bytes.len() as u64)?;
self.ar.append(&header, version_bytes)?;
String::from("global/pg_filenode.map") // filenode map for global tablespace
} else {
// User defined tablespaces are not supported
assert!(spcnode == pg_constants::DEFAULTTABLESPACE_OID);
// Append dir path for each database
let path = format!("base/{}", dbnode);
let header = new_tar_header_dir(&path)?;
self.ar.append(&header, &mut io::empty())?;
let dst_path = format!("base/{}/PG_VERSION", dbnode);
let version_bytes = pg_constants::PG_MAJORVERSION.as_bytes();
let header = new_tar_header(&dst_path, version_bytes.len() as u64)?;
self.ar.append(&header, version_bytes)?;
format!("base/{}/pg_filenode.map", dbnode)
};
assert!(img.len() == 512);
let header = new_tar_header(&path, img.len() as u64)?;
self.ar.append(&header, &img[..])?;
Ok(())
}
//
// Extract twophase state files
//
fn add_twophase_file(&mut self, xid: TransactionId) -> anyhow::Result<()> {
let img = self
.timeline
.get_page_at_lsn(RelishTag::TwoPhase { xid }, 0, self.lsn)?;
let mut buf = BytesMut::new();
buf.extend_from_slice(&img[..]);
let crc = crc32c::crc32c(&img[..]);
buf.put_u32_le(crc);
let path = format!("pg_twophase/{:>08X}", xid);
let header = new_tar_header(&path, buf.len() as u64)?;
self.ar.append(&header, &buf[..])?;
Ok(())
}
//
// Add generated pg_control file and bootstrap WAL segment.
// Also send zenith.signal file with extra bootstrap data.
//
fn add_pgcontrol_file(&mut self) -> anyhow::Result<()> {
let checkpoint_bytes = self
.timeline
.get_page_at_lsn(RelishTag::Checkpoint, 0, self.lsn)?;
let pg_control_bytes =
self.timeline
.get_page_at_lsn(RelishTag::ControlFile, 0, self.lsn)?;
let mut pg_control = ControlFileData::decode(&pg_control_bytes)?;
let mut checkpoint = CheckPoint::decode(&checkpoint_bytes)?;
// Generate new pg_control and WAL needed for bootstrap
let checkpoint_segno = self.lsn.segment_number(pg_constants::WAL_SEGMENT_SIZE);
let checkpoint_lsn = XLogSegNoOffsetToRecPtr(
checkpoint_segno,
XLOG_SIZE_OF_XLOG_LONG_PHD as u32,
pg_constants::WAL_SEGMENT_SIZE,
);
checkpoint.redo = normalize_lsn(self.lsn, pg_constants::WAL_SEGMENT_SIZE).0;
//reset some fields we don't want to preserve
//TODO Check this.
//We may need to determine the value from twophase data.
checkpoint.oldestActiveXid = 0;
//save new values in pg_control
pg_control.checkPoint = checkpoint_lsn;
pg_control.checkPointCopy = checkpoint;
pg_control.state = pg_constants::DB_SHUTDOWNED;
// add zenith.signal file
let xl_prev = if self.prev_record_lsn == Lsn(0) {
0xBAD0 // magic value to indicate that we don't know prev_lsn
} else {
self.prev_record_lsn.0
};
self.ar.append(
&new_tar_header("zenith.signal", 8)?,
&xl_prev.to_le_bytes()[..],
)?;
//send pg_control
let pg_control_bytes = pg_control.encode();
let header = new_tar_header("global/pg_control", pg_control_bytes.len() as u64)?;
self.ar.append(&header, &pg_control_bytes[..])?;
//send wal segment
let wal_file_name = XLogFileName(
1, // FIXME: always use Postgres timeline 1
checkpoint_segno,
pg_constants::WAL_SEGMENT_SIZE,
);
let wal_file_path = format!("pg_wal/{}", wal_file_name);
let header = new_tar_header(&wal_file_path, pg_constants::WAL_SEGMENT_SIZE as u64)?;
let wal_seg = generate_wal_segment(&pg_control);
assert!(wal_seg.len() == pg_constants::WAL_SEGMENT_SIZE);
self.ar.append(&header, &wal_seg[..])?;
Ok(())
}
}
//
// Create new tarball entry header
//
fn new_tar_header(path: &str, size: u64) -> anyhow::Result<Header> {
let mut header = Header::new_gnu();
header.set_size(size);
header.set_path(path)?;
header.set_mode(0b110000000); // -rw-------
header.set_mtime(
// use currenttime as last modified time
SystemTime::now()
.duration_since(SystemTime::UNIX_EPOCH)
.unwrap()
.as_secs(),
);
header.set_cksum();
Ok(header)
}
fn new_tar_header_dir(path: &str) -> anyhow::Result<Header> {
let mut header = Header::new_gnu();
header.set_size(0);
header.set_path(path)?;
header.set_mode(0o755); // -rw-------
header.set_entry_type(EntryType::dir());
header.set_mtime(
// use currenttime as last modified time
SystemTime::now()
.duration_since(SystemTime::UNIX_EPOCH)
.unwrap()
.as_secs(),
);
header.set_cksum();
Ok(header)
}

View File

@@ -0,0 +1,43 @@
use anyhow::Result;
use clap::{App, AppSettings};
pub mod pg;
pub mod snapshot;
pub mod storage;
mod subcommand;
fn main() -> Result<()> {
let cli_commands = subcommand::ClapCommands {
commands: vec![
Box::new(pg::PgCmd {
clap_cmd: clap::SubCommand::with_name("pg"),
}),
Box::new(storage::StorageCmd {
clap_cmd: clap::SubCommand::with_name("storage"),
}),
Box::new(snapshot::SnapshotCmd {
clap_cmd: clap::SubCommand::with_name("snapshot"),
}),
],
};
let matches = App::new("zenith")
.about("Zenith CLI")
.version("1.0")
.setting(AppSettings::SubcommandRequiredElseHelp)
.subcommands(cli_commands.generate())
.get_matches();
if let Some(subcommand) = matches.subcommand_name() {
println!("'git {}' was used", subcommand);
}
match matches.subcommand() {
("pg", Some(sub_args)) => cli_commands.commands[0].run(sub_args.clone())?,
("storage", Some(sub_args)) => cli_commands.commands[1].run(sub_args.clone())?,
("snapshot", Some(sub_args)) => cli_commands.commands[2].run(sub_args.clone())?,
("", None) => println!("No subcommand"),
_ => unreachable!(),
}
Ok(())
}

View File

@@ -0,0 +1,105 @@
use anyhow::Result;
use clap::{App, AppSettings, Arg};
use crate::subcommand;
pub struct PgCmd<'a> {
pub clap_cmd: clap::App<'a, 'a>,
}
impl subcommand::SubCommand for PgCmd<'_> {
fn gen_clap_command(&self) -> clap::App {
let c = self.clap_cmd.clone();
c.about("Operations with zenith compute nodes")
.setting(AppSettings::SubcommandRequiredElseHelp)
.subcommand(App::new("list").about("List existing compute nodes"))
.subcommand(
App::new("create")
.about(
"Create (init) new data directory using given storage and start postgres",
)
.arg(
Arg::with_name("name")
.short("n")
.long("name")
.takes_value(true)
.help("Name of the compute node"),
)
.arg(
Arg::with_name("storage")
.short("s")
.long("storage")
.takes_value(true)
.help("Name of the storage node to use"),
)
//TODO should it be just name of uploaded snapshot or some path?
.arg(
Arg::with_name("snapshot")
.long("snapshot")
.takes_value(true)
.help("Name of the snapshot to use"),
)
.arg(
Arg::with_name("nostart")
.long("no-start")
.takes_value(false)
.help("Don't start postgres on the created node"),
),
)
.subcommand(
App::new("destroy")
.about("Stop postgres and destroy node's data directory")
.arg(
Arg::with_name("name")
.short("n")
.long("name")
.takes_value(true)
.help("Name of the compute node"),
),
)
.subcommand(
App::new("start")
.about("Start postgres on the given node")
.arg(
Arg::with_name("name")
.short("n")
.long("name")
.takes_value(true)
.help("Name of the compute node"),
)
.arg(
Arg::with_name("replica")
.long("replica")
.takes_value(false)
.help("Start the compute node as replica"),
),
)
.subcommand(
App::new("stop")
.about("Stop postgres on the given node")
.arg(
Arg::with_name("name")
.short("n")
.long("name")
.takes_value(true)
.help("Name of the compute node"),
),
)
.subcommand(
App::new("show")
.about("Show info about the given node")
.arg(
Arg::with_name("name")
.short("n")
.long("name")
.takes_value(true)
.help("Name of the compute node"),
),
)
}
fn run(&self, args: clap::ArgMatches) -> Result<()> {
println!("Run PgCmd with args {:?}", args);
Ok(())
}
}

View File

@@ -0,0 +1,27 @@
use anyhow::Result;
use clap::{App, AppSettings, Arg};
use crate::subcommand;
pub struct SnapshotCmd<'a> {
pub clap_cmd: clap::App<'a, 'a>,
}
impl subcommand::SubCommand for SnapshotCmd<'_> {
fn gen_clap_command(&self) -> clap::App {
let c = self.clap_cmd.clone();
c.about("Operations with zenith snapshots")
.setting(AppSettings::SubcommandRequiredElseHelp)
.subcommand(App::new("list"))
.subcommand(App::new("create").arg(Arg::with_name("pgdata").required(true)))
.subcommand(App::new("destroy"))
.subcommand(App::new("start"))
.subcommand(App::new("stop"))
.subcommand(App::new("show"))
}
fn run(&self, args: clap::ArgMatches) -> Result<()> {
println!("Run SnapshotCmd with args {:?}", args);
Ok(())
}
}

View File

@@ -0,0 +1,25 @@
use anyhow::Result;
use clap::{App, AppSettings};
use crate::subcommand;
pub struct StorageCmd<'a> {
pub clap_cmd: clap::App<'a, 'a>,
}
impl subcommand::SubCommand for StorageCmd<'_> {
fn gen_clap_command(&self) -> clap::App {
let c = self.clap_cmd.clone();
c.about("Operations with zenith storage nodes")
.setting(AppSettings::SubcommandRequiredElseHelp)
.subcommand(App::new("list"))
.subcommand(App::new("attach"))
.subcommand(App::new("detach"))
.subcommand(App::new("show"))
}
fn run(&self, args: clap::ArgMatches) -> Result<()> {
println!("Run StorageCmd with args {:?}", args);
Ok(())
}
}

View File

@@ -0,0 +1,29 @@
use anyhow::Result;
/// All subcommands need to implement this interface.
pub trait SubCommand {
/// Generates the cli-config that Clap requires for the subcommand.
fn gen_clap_command(&self) -> clap::App;
/// Runs the body of the subcommand.
fn run(&self, args: clap::ArgMatches) -> Result<()>;
}
/// A struct which holds a vector of heap-allocated `Box`es of trait objects all of which must
/// implement the `SubCommand` trait, but other than that, can be of any type.
pub struct ClapCommands {
pub commands: Vec<Box<dyn SubCommand>>,
}
impl ClapCommands {
/// Generates a vector of `clap::Apps` that can be passed into clap's `.subcommands()` method in
/// order to generate the full CLI.
pub fn generate(&self) -> Vec<clap::App> {
let mut v: Vec<clap::App> = Vec::new();
for command in self.commands.iter() {
v.push(command.gen_clap_command());
}
v
}
}

View File

@@ -1,25 +0,0 @@
//! Main entry point for the dump_layerfile executable
//!
//! A handy tool for debugging, that's all.
use anyhow::Result;
use clap::{App, Arg};
use pageserver::layered_repository::dump_layerfile_from_path;
use std::path::PathBuf;
fn main() -> Result<()> {
let arg_matches = App::new("Zenith dump_layerfile utility")
.about("Dump contents of one layer file, for debugging")
.arg(
Arg::with_name("path")
.help("Path to file to dump")
.required(true)
.index(1),
)
.get_matches();
let path = PathBuf::from(arg_matches.value_of("path").unwrap());
dump_layerfile_from_path(&path)?;
Ok(())
}

View File

@@ -2,586 +2,256 @@
// Main entry point for the Page Server executable
//
use pageserver::defaults::*;
use serde::{Deserialize, Serialize};
use std::{
env,
net::TcpListener,
path::{Path, PathBuf},
str::FromStr,
thread,
};
use tracing::*;
use zenith_utils::{auth::JwtAuth, logging, postgres_backend::AuthType};
use log::*;
use std::fs;
use std::io;
use std::path::PathBuf;
use std::thread;
use std::{fs::File, fs::OpenOptions, str::FromStr};
use anyhow::{bail, ensure, Context, Result};
use signal_hook::consts::signal::*;
use signal_hook::consts::TERM_SIGNALS;
use signal_hook::flag;
use signal_hook::iterator::exfiltrator::WithOrigin;
use signal_hook::iterator::SignalsInfo;
use std::process::exit;
use std::sync::atomic::AtomicBool;
use std::sync::Arc;
use clap::{App, Arg, ArgMatches};
use clap::{App, Arg};
use daemonize::Daemonize;
use pageserver::{
branches,
defaults::{
DEFAULT_HTTP_LISTEN_ADDR, DEFAULT_PG_LISTEN_ADDR,
DEFAULT_RELISH_STORAGE_MAX_CONCURRENT_SYNC_LIMITS,
},
http, page_service, relish_storage, tenant_mgr, PageServerConf, RelishStorageConfig,
RelishStorageKind, S3Config, LOG_FILE_NAME,
};
use zenith_utils::http::endpoint;
use zenith_utils::postgres_backend;
use slog;
use slog::Drain;
use slog_scope;
use slog_stdlog;
use const_format::formatcp;
use pageserver::page_service;
use pageserver::restore_s3;
use pageserver::tui;
use pageserver::walreceiver;
use pageserver::PageServerConf;
/// String arguments that can be declared via CLI or config file
#[derive(Serialize, Deserialize)]
struct CfgFileParams {
listen_pg_addr: Option<String>,
listen_http_addr: Option<String>,
checkpoint_distance: Option<String>,
checkpoint_period: Option<String>,
gc_horizon: Option<String>,
gc_period: Option<String>,
pg_distrib_dir: Option<String>,
auth_validation_public_key_path: Option<String>,
auth_type: Option<String>,
// see https://github.com/alexcrichton/toml-rs/blob/6c162e6562c3e432bf04c82a3d1d789d80761a86/examples/enum_external.rs for enum deserialisation examples
relish_storage: Option<RelishStorage>,
relish_storage_max_concurrent_sync: Option<String>,
}
#[derive(Serialize, Deserialize, Clone)]
enum RelishStorage {
Local {
local_path: String,
},
AwsS3 {
bucket_name: String,
bucket_region: String,
#[serde(skip_serializing)]
access_key_id: Option<String>,
#[serde(skip_serializing)]
secret_access_key: Option<String>,
},
}
impl CfgFileParams {
/// Extract string arguments from CLI
fn from_args(arg_matches: &ArgMatches) -> Self {
let get_arg = |arg_name: &str| -> Option<String> {
arg_matches.value_of(arg_name).map(str::to_owned)
};
let relish_storage = if let Some(local_path) = get_arg("relish-storage-local-path") {
Some(RelishStorage::Local { local_path })
} else if let Some((bucket_name, bucket_region)) =
get_arg("relish-storage-s3-bucket").zip(get_arg("relish-storage-region"))
{
Some(RelishStorage::AwsS3 {
bucket_name,
bucket_region,
access_key_id: get_arg("relish-storage-access-key"),
secret_access_key: get_arg("relish-storage-secret-access-key"),
})
} else {
None
};
Self {
listen_pg_addr: get_arg("listen-pg"),
listen_http_addr: get_arg("listen-http"),
checkpoint_distance: get_arg("checkpoint_distance"),
checkpoint_period: get_arg("checkpoint_period"),
gc_horizon: get_arg("gc_horizon"),
gc_period: get_arg("gc_period"),
pg_distrib_dir: get_arg("postgres-distrib"),
auth_validation_public_key_path: get_arg("auth-validation-public-key-path"),
auth_type: get_arg("auth-type"),
relish_storage,
relish_storage_max_concurrent_sync: get_arg("relish-storage-max-concurrent-sync"),
}
}
/// Fill missing values in `self` with `other`
fn or(self, other: CfgFileParams) -> Self {
// TODO cleaner way to do this
Self {
listen_pg_addr: self.listen_pg_addr.or(other.listen_pg_addr),
listen_http_addr: self.listen_http_addr.or(other.listen_http_addr),
checkpoint_distance: self.checkpoint_distance.or(other.checkpoint_distance),
checkpoint_period: self.checkpoint_period.or(other.checkpoint_period),
gc_horizon: self.gc_horizon.or(other.gc_horizon),
gc_period: self.gc_period.or(other.gc_period),
pg_distrib_dir: self.pg_distrib_dir.or(other.pg_distrib_dir),
auth_validation_public_key_path: self
.auth_validation_public_key_path
.or(other.auth_validation_public_key_path),
auth_type: self.auth_type.or(other.auth_type),
relish_storage: self.relish_storage.or(other.relish_storage),
relish_storage_max_concurrent_sync: self
.relish_storage_max_concurrent_sync
.or(other.relish_storage_max_concurrent_sync),
}
}
/// Create a PageServerConf from these string parameters
fn try_into_config(&self) -> Result<PageServerConf> {
let workdir = PathBuf::from(".");
let listen_pg_addr = match self.listen_pg_addr.as_ref() {
Some(addr) => addr.clone(),
None => DEFAULT_PG_LISTEN_ADDR.to_owned(),
};
let listen_http_addr = match self.listen_http_addr.as_ref() {
Some(addr) => addr.clone(),
None => DEFAULT_HTTP_LISTEN_ADDR.to_owned(),
};
let checkpoint_distance: u64 = match self.checkpoint_distance.as_ref() {
Some(checkpoint_distance_str) => checkpoint_distance_str.parse()?,
None => DEFAULT_CHECKPOINT_DISTANCE,
};
let checkpoint_period = match self.checkpoint_period.as_ref() {
Some(checkpoint_period_str) => humantime::parse_duration(checkpoint_period_str)?,
None => DEFAULT_CHECKPOINT_PERIOD,
};
let gc_horizon: u64 = match self.gc_horizon.as_ref() {
Some(horizon_str) => horizon_str.parse()?,
None => DEFAULT_GC_HORIZON,
};
let gc_period = match self.gc_period.as_ref() {
Some(period_str) => humantime::parse_duration(period_str)?,
None => DEFAULT_GC_PERIOD,
};
let pg_distrib_dir = match self.pg_distrib_dir.as_ref() {
Some(pg_distrib_dir_str) => PathBuf::from(pg_distrib_dir_str),
None => env::current_dir()?.join("tmp_install"),
};
let auth_validation_public_key_path = self
.auth_validation_public_key_path
.as_ref()
.map(PathBuf::from);
let auth_type = self
.auth_type
.as_ref()
.map_or(Ok(AuthType::Trust), |auth_type| {
AuthType::from_str(auth_type)
})?;
if !pg_distrib_dir.join("bin/postgres").exists() {
bail!("Can't find postgres binary at {:?}", pg_distrib_dir);
}
if auth_type == AuthType::ZenithJWT {
ensure!(
auth_validation_public_key_path.is_some(),
"Missing auth_validation_public_key_path when auth_type is ZenithJWT"
);
let path_ref = auth_validation_public_key_path.as_ref().unwrap();
ensure!(
path_ref.exists(),
format!("Can't find auth_validation_public_key at {:?}", path_ref)
);
}
let max_concurrent_sync = match self.relish_storage_max_concurrent_sync.as_deref() {
Some(relish_storage_max_concurrent_sync) => {
relish_storage_max_concurrent_sync.parse()?
}
None => DEFAULT_RELISH_STORAGE_MAX_CONCURRENT_SYNC_LIMITS,
};
let relish_storage_config = self.relish_storage.as_ref().map(|storage_params| {
let storage = match storage_params.clone() {
RelishStorage::Local { local_path } => {
RelishStorageKind::LocalFs(PathBuf::from(local_path))
}
RelishStorage::AwsS3 {
bucket_name,
bucket_region,
access_key_id,
secret_access_key,
} => RelishStorageKind::AwsS3(S3Config {
bucket_name,
bucket_region,
access_key_id,
secret_access_key,
}),
};
RelishStorageConfig {
max_concurrent_sync,
storage,
}
});
Ok(PageServerConf {
daemonize: false,
listen_pg_addr,
listen_http_addr,
checkpoint_distance,
checkpoint_period,
gc_horizon,
gc_period,
superuser: String::from(DEFAULT_SUPERUSER),
workdir,
pg_distrib_dir,
auth_validation_public_key_path,
auth_type,
relish_storage_config,
})
}
}
fn main() -> Result<()> {
zenith_metrics::set_common_metrics_prefix("pageserver");
fn main() -> Result<(), io::Error> {
let arg_matches = App::new("Zenith page server")
.about("Materializes WAL stream to pages and serves them to the postgres")
.arg(
Arg::with_name("listen-pg")
.short("l")
.long("listen-pg")
.alias("listen") // keep some compatibility
.takes_value(true)
.help(formatcp!("listen for incoming page requests on ip:port (default: {DEFAULT_PG_LISTEN_ADDR})")),
)
.arg(
Arg::with_name("listen-http")
.long("listen-http")
.alias("http_endpoint") // keep some compatibility
.takes_value(true)
.help(formatcp!("http endpoint address for metrics and management API calls on ip:port (default: {DEFAULT_HTTP_LISTEN_ADDR})")),
)
.arg(
Arg::with_name("daemonize")
.short("d")
.long("daemonize")
.takes_value(false)
.help("Run in the background"),
)
.arg(
Arg::with_name("init")
.long("init")
.takes_value(false)
.help("Initialize pageserver repo"),
)
.arg(
Arg::with_name("checkpoint_distance")
.long("checkpoint_distance")
.takes_value(true)
.help("Distance from current LSN to perform checkpoint of in-memory layers"),
)
.arg(
Arg::with_name("checkpoint_period")
.long("checkpoint_period")
.takes_value(true)
.help("Interval between checkpoint iterations"),
)
.arg(
Arg::with_name("gc_horizon")
.long("gc_horizon")
.takes_value(true)
.help("Distance from current LSN to perform all wal records cleanup"),
)
.arg(
Arg::with_name("gc_period")
.long("gc_period")
.takes_value(true)
.help("Interval between garbage collector iterations"),
)
.arg(
Arg::with_name("workdir")
.short("D")
.long("workdir")
.takes_value(true)
.help("Working directory for the pageserver"),
)
.arg(
Arg::with_name("postgres-distrib")
.long("postgres-distrib")
.takes_value(true)
.help("Postgres distribution directory"),
)
.arg(
Arg::with_name("create-tenant")
.long("create-tenant")
.takes_value(true)
.help("Create tenant during init")
.requires("init"),
)
.arg(
Arg::with_name("auth-validation-public-key-path")
.long("auth-validation-public-key-path")
.takes_value(true)
.help("Path to public key used to validate jwt signature"),
)
.arg(
Arg::with_name("auth-type")
.long("auth-type")
.takes_value(true)
.help("Authentication scheme type. One of: Trust, MD5, ZenithJWT"),
)
.arg(
Arg::with_name("relish-storage-local-path")
.long("relish-storage-local-path")
.takes_value(true)
.help("Path to the local directory, to be used as an external relish storage")
.conflicts_with_all(&[
"relish-storage-s3-bucket",
"relish-storage-region",
"relish-storage-access-key",
"relish-storage-secret-access-key",
]),
)
.arg(
Arg::with_name("relish-storage-s3-bucket")
.long("relish-storage-s3-bucket")
.takes_value(true)
.help("Name of the AWS S3 bucket to use an external relish storage")
.requires("relish-storage-region"),
)
.arg(
Arg::with_name("relish-storage-region")
.long("relish-storage-region")
.takes_value(true)
.help("Region of the AWS S3 bucket"),
)
.arg(
Arg::with_name("relish-storage-access-key")
.long("relish-storage-access-key")
.takes_value(true)
.help("Credentials to access the AWS S3 bucket"),
)
.arg(
Arg::with_name("relish-storage-secret-access-key")
.long("relish-storage-secret-access-key")
.takes_value(true)
.help("Credentials to access the AWS S3 bucket"),
)
.arg(
Arg::with_name("relish-storage-max-concurrent-sync")
.long("relish-storage-max-concurrent-sync")
.takes_value(true)
.help("Maximum allowed concurrent synchronisations with storage"),
)
.arg(Arg::with_name("datadir")
.short("D")
.long("dir")
.takes_value(true)
.help("Path to the page server data directory"))
.arg(Arg::with_name("wal_producer")
.short("w")
.long("wal-producer")
.takes_value(true)
.help("connect to the WAL sender (postgres or wal_acceptor) on connstr (default: 'host=127.0.0.1 port=65432 user=zenith')"))
.arg(Arg::with_name("listen")
.short("l")
.long("listen")
.takes_value(true)
.help("listen for incoming page requests on ip:port (default: 127.0.0.1:5430)"))
.arg(Arg::with_name("interactive")
.short("i")
.long("interactive")
.takes_value(false)
.help("Interactive mode"))
.arg(Arg::with_name("daemonize")
.short("d")
.long("daemonize")
.takes_value(false)
.help("Run in the background"))
.arg(Arg::with_name("skip_recovery")
.long("skip-recovery")
.takes_value(false)
.help("Skip S3 recovery procedy and start empty"))
.get_matches();
let workdir = Path::new(arg_matches.value_of("workdir").unwrap_or(".zenith"));
let cfg_file_path = workdir
.canonicalize()
.with_context(|| format!("Error opening workdir '{}'", workdir.display()))?
.join("pageserver.toml");
let args_params = CfgFileParams::from_args(&arg_matches);
let init = arg_matches.is_present("init");
let create_tenant = arg_matches.value_of("create-tenant");
let params = if init {
// We're initializing the repo, so there's no config file yet
args_params
} else {
// Supplement the CLI arguments with the config file
let cfg_file_contents = std::fs::read_to_string(&cfg_file_path)
.with_context(|| format!("No pageserver config at '{}'", cfg_file_path.display()))?;
let file_params: CfgFileParams = toml::from_str(&cfg_file_contents).with_context(|| {
format!(
"Failed to read '{}' as pageserver config",
cfg_file_path.display()
)
})?;
args_params.or(file_params)
let mut conf = PageServerConf {
data_dir: PathBuf::from("./"),
daemonize: false,
interactive: false,
wal_producer_connstr: None,
listen_addr: "127.0.0.1:5430".parse().unwrap(),
skip_recovery: false,
};
// Set CWD to workdir for non-daemon modes
env::set_current_dir(&workdir).with_context(|| {
format!(
"Failed to set application's current dir to '{}'",
workdir.display()
)
})?;
// Ensure the config is valid, even if just init-ing
let mut conf = params.try_into_config().with_context(|| {
format!(
"Pageserver config at '{}' is not valid",
cfg_file_path.display()
)
})?;
conf.daemonize = arg_matches.is_present("daemonize");
if init && conf.daemonize {
bail!("--daemonize cannot be used with --init")
if let Some(dir) = arg_matches.value_of("datadir") {
conf.data_dir = PathBuf::from(dir);
}
// The configuration is all set up now. Turn it into a 'static
// that can be freely stored in structs and passed across threads
// as a ref.
let conf: &'static PageServerConf = Box::leak(Box::new(conf));
// Create repo and exit if init was requested
if init {
branches::init_pageserver(conf, create_tenant).context("Failed to init pageserver")?;
// write the config file
let cfg_file_contents = toml::to_string_pretty(&params)
.context("Failed to create pageserver config contents for initialisation")?;
// TODO support enable-auth flag
std::fs::write(&cfg_file_path, cfg_file_contents).with_context(|| {
format!(
"Failed to initialize pageserver config at '{}'",
cfg_file_path.display()
)
})?;
Ok(())
} else {
start_pageserver(conf).context("Failed to start pageserver")
if arg_matches.is_present("daemonize") {
conf.daemonize = true;
}
if arg_matches.is_present("interactive") {
conf.interactive = true;
}
if conf.daemonize && conf.interactive {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
"--daemonize is not allowed with --interactive: choose one",
));
}
if arg_matches.is_present("skip_recovery") {
conf.skip_recovery = true;
}
if let Some(addr) = arg_matches.value_of("wal_producer") {
conf.wal_producer_connstr = Some(String::from_str(addr).unwrap());
}
if let Some(addr) = arg_matches.value_of("listen") {
conf.listen_addr = addr.parse().unwrap();
}
start_pageserver(conf)
}
fn start_pageserver(conf: &'static PageServerConf) -> Result<()> {
fn start_pageserver(conf: PageServerConf) -> Result<(), io::Error> {
// Initialize logger
let log_file = logging::init(LOG_FILE_NAME, conf.daemonize)?;
let _scope_guard = init_logging(&conf);
let _log_guard = slog_stdlog::init().unwrap();
let term_now = Arc::new(AtomicBool::new(false));
for sig in TERM_SIGNALS {
// When terminated by a second term signal, exit with exit code 1.
// This will do nothing the first time (because term_now is false).
flag::register_conditional_shutdown(*sig, 1, Arc::clone(&term_now))?;
// But this will "arm" the above for the second time, by setting it to true.
// The order of registering these is important, if you put this one first, it will
// first arm and then terminate all in the first round.
flag::register(*sig, Arc::clone(&term_now))?;
// Note: this `info!(...)` macro comes from `log` crate
info!("standard logging redirected to slog");
let tui_thread: Option<thread::JoinHandle<()>>;
if conf.interactive {
// Initialize the UI
tui_thread = Some(
thread::Builder::new()
.name("UI thread".into())
.spawn(|| {
let _ = tui::ui_main();
})
.unwrap(),
);
//threads.push(tui_thread);
} else {
tui_thread = None;
}
// TODO: Check that it looks like a valid repository before going further
// bind sockets before daemonizing so we report errors early and do not return until we are listening
info!(
"Starting pageserver http handler on {}",
conf.listen_http_addr
);
let http_listener = TcpListener::bind(conf.listen_http_addr.clone())?;
info!(
"Starting pageserver pg protocol handler on {}",
conf.listen_pg_addr
);
let pageserver_listener = TcpListener::bind(conf.listen_pg_addr.clone())?;
if conf.daemonize {
info!("daemonizing...");
// There shouldn't be any logging to stdin/stdout. Redirect it to the main log so
// that we will see any accidental manual fprintf's or backtraces.
let stdout = log_file.try_clone().unwrap();
let stderr = log_file;
// There should'n be any logging to stdin/stdout. Redirect it to the main log so
// that we will see any accidental manual fpritf's or backtraces.
let stdout = OpenOptions::new()
.create(true)
.append(true)
.open(conf.data_dir.join("pageserver.log"))
.unwrap();
let stderr = OpenOptions::new()
.create(true)
.append(true)
.open(conf.data_dir.join("pageserver.log"))
.unwrap();
let daemonize = Daemonize::new()
.pid_file("pageserver.pid")
.working_directory(".")
.pid_file(conf.data_dir.join("pageserver.pid"))
.working_directory(conf.data_dir.clone())
.stdout(stdout)
.stderr(stderr);
match daemonize.start() {
Ok(_) => info!("Success, daemonized"),
Err(err) => error!(%err, "could not daemonize"),
Err(e) => error!("Error, {}", e),
}
}
// keep join handles for spawned threads
// don't spawn threads before daemonizing
let mut join_handles = Vec::new();
let mut threads = Vec::new();
if let Some(handle) = relish_storage::run_storage_sync_thread(conf)? {
join_handles.push(handle);
info!("starting...");
// Before opening up for connections, restore the latest base backup from S3.
// (We don't persist anything to local disk at the moment, so we need to do
// this at every startup)
// TODO move it to a separate function
if !conf.skip_recovery {
restore_s3::restore_main(&conf);
}
// Initialize tenant manager.
tenant_mgr::init(conf);
// initialize authentication for incoming connections
let auth = match &conf.auth_type {
AuthType::Trust | AuthType::MD5 => None,
AuthType::ZenithJWT => {
// unwrap is ok because check is performed when creating config, so path is set and file exists
let key_path = conf.auth_validation_public_key_path.as_ref().unwrap();
Some(JwtAuth::from_key_path(key_path)?.into())
}
};
info!("Using auth: {:#?}", conf.auth_type);
// Spawn a new thread for the http endpoint
// bind before launching separate thread so the error reported before startup exits
let cloned = auth.clone();
let http_endpoint_thread = thread::Builder::new()
.name("http_endpoint_thread".into())
.spawn(move || {
let router = http::make_router(conf, cloned);
endpoint::serve_thread_main(router, http_listener)
})?;
join_handles.push(http_endpoint_thread);
// Spawn a thread to listen for connections. It will spawn further threads
// for each connection.
let page_service_thread = thread::Builder::new()
.name("Page Service thread".into())
.spawn(move || {
page_service::thread_main(conf, auth, pageserver_listener, conf.auth_type)
})?;
for info in SignalsInfo::<WithOrigin>::new(TERM_SIGNALS)?.into_iter() {
match info.signal {
SIGQUIT => {
info!("Got SIGQUIT. Terminate pageserver in immediate shutdown mode");
exit(111);
}
SIGTERM => {
info!("Got SIGINT/SIGTERM. Terminate gracefully in fast shutdown mode");
// Terminate postgres backends
postgres_backend::set_pgbackend_shutdown_requested();
// Stop all tenants and flush their data
tenant_mgr::shutdown_all_tenants()?;
// Wait for pageservice thread to complete the job
page_service_thread
.join()
.expect("thread panicked")
.expect("thread exited with an error");
// Shut down http router
endpoint::shutdown();
// Wait for all threads
for handle in join_handles.into_iter() {
handle
.join()
.expect("thread panicked")
.expect("thread exited with an error");
}
info!("Pageserver shut down successfully completed");
exit(0);
}
// Create directory for wal-redo datadirs
match fs::create_dir(conf.data_dir.join("wal-redo")) {
Ok(_) => {}
Err(e) => match e.kind() {
io::ErrorKind::AlreadyExists => {}
_ => {
debug!("Unknown signal.");
panic!("Failed to create wal-redo data directory: {}", e);
}
}
},
}
// Launch the WAL receiver thread if pageserver was started with --wal-producer
// option. It will try to connect to the WAL safekeeper, and stream the WAL. If
// the connection is lost, it will reconnect on its own. We just fire and forget
// it here.
//
// All other wal receivers are started on demand by "callmemaybe" command
// sent to pageserver.
let conf_copy = conf.clone();
if let Some(wal_producer) = conf.wal_producer_connstr {
let conf = conf_copy.clone();
let walreceiver_thread = thread::Builder::new()
.name("static WAL receiver thread".into())
.spawn(move || {
walreceiver::thread_main(conf, &wal_producer);
})
.unwrap();
threads.push(walreceiver_thread);
}
// GetPage@LSN requests are served by another thread. (It uses async I/O,
// but the code in page_service sets up it own thread pool for that)
let conf = conf_copy.clone();
let page_server_thread = thread::Builder::new()
.name("Page Service thread".into())
.spawn(|| {
// thread code
page_service::thread_main(conf);
})
.unwrap();
threads.push(page_server_thread);
if tui_thread.is_some() {
// The TUI thread exits when the user asks to Quit.
tui_thread.unwrap().join().unwrap();
} else {
// In non-interactive mode, wait forever.
for t in threads {
t.join().unwrap()
}
}
Ok(())
}
fn init_logging(conf: &PageServerConf) -> slog_scope::GlobalLoggerGuard {
if conf.interactive {
tui::init_logging()
} else if conf.daemonize {
let log = conf.data_dir.join("pageserver.log");
let log_file = File::create(log).unwrap_or_else(|_| panic!("Could not create log file"));
let decorator = slog_term::PlainSyncDecorator::new(log_file);
let drain = slog_term::CompactFormat::new(decorator).build();
let drain = slog::Filter::new(drain, |record: &slog::Record| {
if record.level().is_at_least(slog::Level::Info) {
return true;
}
return false;
});
let drain = std::sync::Mutex::new(drain).fuse();
let logger = slog::Logger::root(drain, slog::o!());
slog_scope::set_global_logger(logger)
} else {
let decorator = slog_term::TermDecorator::new().build();
let drain = slog_term::FullFormat::new(decorator).build().fuse();
let drain = slog_async::Async::new(drain).chan_size(1000).build().fuse();
let drain = slog::Filter::new(drain, |record: &slog::Record| {
if record.level().is_at_least(slog::Level::Info) {
return true;
}
if record.level().is_at_least(slog::Level::Debug)
&& record.module().starts_with("pageserver")
{
return true;
}
return false;
})
.fuse();
let logger = slog::Logger::root(drain, slog::o!());
slog_scope::set_global_logger(logger)
}
}

View File

@@ -1,431 +0,0 @@
//!
//! Branch management code
//!
// TODO: move all paths construction to conf impl
//
use anyhow::{bail, ensure, Context, Result};
use postgres_ffi::ControlFileData;
use serde::{Deserialize, Serialize};
use std::{
fs,
path::Path,
process::{Command, Stdio},
str::FromStr,
sync::Arc,
};
use tracing::*;
use zenith_utils::crashsafe_dir;
use zenith_utils::logging;
use zenith_utils::lsn::Lsn;
use zenith_utils::zid::{ZTenantId, ZTimelineId};
use crate::tenant_mgr;
use crate::walredo::WalRedoManager;
use crate::{repository::Repository, PageServerConf};
use crate::{restore_local_repo, LOG_FILE_NAME};
#[derive(Serialize, Deserialize, Clone)]
pub struct BranchInfo {
pub name: String,
#[serde(with = "hex")]
pub timeline_id: ZTimelineId,
pub latest_valid_lsn: Lsn,
pub ancestor_id: Option<String>,
pub ancestor_lsn: Option<String>,
pub current_logical_size: usize,
pub current_logical_size_non_incremental: usize,
}
impl BranchInfo {
pub fn from_path<T: AsRef<Path>>(
path: T,
conf: &PageServerConf,
tenantid: &ZTenantId,
repo: &Arc<dyn Repository>,
) -> Result<Self> {
let name = path
.as_ref()
.file_name()
.unwrap()
.to_str()
.unwrap()
.to_string();
let timeline_id = std::fs::read_to_string(path)?.parse::<ZTimelineId>()?;
let timeline = repo.get_timeline(timeline_id)?;
let ancestor_path = conf.ancestor_path(&timeline_id, tenantid);
let mut ancestor_id: Option<String> = None;
let mut ancestor_lsn: Option<String> = None;
if ancestor_path.exists() {
let ancestor = std::fs::read_to_string(ancestor_path)?;
let mut strings = ancestor.split('@');
ancestor_id = Some(
strings
.next()
.with_context(|| "wrong branch ancestor point in time format")?
.to_owned(),
);
ancestor_lsn = Some(
strings
.next()
.with_context(|| "wrong branch ancestor point in time format")?
.to_owned(),
);
}
Ok(BranchInfo {
name,
timeline_id,
latest_valid_lsn: timeline.get_last_record_lsn(),
ancestor_id,
ancestor_lsn,
current_logical_size: timeline.get_current_logical_size(),
current_logical_size_non_incremental: timeline
.get_current_logical_size_non_incremental(timeline.get_last_record_lsn())?,
})
}
}
#[derive(Debug, Clone, Copy)]
pub struct PointInTime {
pub timelineid: ZTimelineId,
pub lsn: Lsn,
}
pub fn init_pageserver(conf: &'static PageServerConf, create_tenant: Option<&str>) -> Result<()> {
// Initialize logger
// use true as daemonize parameter because otherwise we pollute zenith cli output with a few pages long output of info messages
let _log_file = logging::init(LOG_FILE_NAME, true)?;
// We don't use the real WAL redo manager, because we don't want to spawn the WAL redo
// process during repository initialization.
//
// FIXME: That caused trouble, because the WAL redo manager spawned a thread that launched
// initdb in the background, and it kept running even after the "zenith init" had exited.
// In tests, we started the page server immediately after that, so that initdb was still
// running in the background, and we failed to run initdb again in the same directory. This
// has been solved for the rapid init+start case now, but the general race condition remains
// if you restart the server quickly. The WAL redo manager doesn't use a separate thread
// anymore, but I think that could still happen.
let dummy_redo_mgr = Arc::new(crate::walredo::DummyRedoManager {});
if let Some(tenantid) = create_tenant {
let tenantid = ZTenantId::from_str(tenantid)?;
println!("initializing tenantid {}", tenantid);
create_repo(conf, tenantid, dummy_redo_mgr).with_context(|| "failed to create repo")?;
}
crashsafe_dir::create_dir_all(conf.tenants_path())?;
println!("pageserver init succeeded");
Ok(())
}
pub fn create_repo(
conf: &'static PageServerConf,
tenantid: ZTenantId,
wal_redo_manager: Arc<dyn WalRedoManager + Send + Sync>,
) -> Result<Arc<dyn Repository>> {
let repo_dir = conf.tenant_path(&tenantid);
if repo_dir.exists() {
bail!("repo for {} already exists", tenantid)
}
// top-level dir may exist if we are creating it through CLI
crashsafe_dir::create_dir_all(&repo_dir)
.with_context(|| format!("could not create directory {}", repo_dir.display()))?;
crashsafe_dir::create_dir(conf.timelines_path(&tenantid))?;
crashsafe_dir::create_dir_all(conf.branches_path(&tenantid))?;
crashsafe_dir::create_dir_all(conf.tags_path(&tenantid))?;
info!("created directory structure in {}", repo_dir.display());
let tli = create_timeline(conf, None, &tenantid)?;
let repo = Arc::new(crate::layered_repository::LayeredRepository::new(
conf,
wal_redo_manager,
tenantid,
false,
));
// Load data into pageserver
// TODO To implement zenith import we need to
// move data loading out of create_repo()
bootstrap_timeline(conf, tenantid, tli, repo.as_ref())?;
Ok(repo)
}
// Returns checkpoint LSN from controlfile
fn get_lsn_from_controlfile(path: &Path) -> Result<Lsn> {
// Read control file to extract the LSN
let controlfile_path = path.join("global").join("pg_control");
let controlfile = ControlFileData::decode(&fs::read(controlfile_path)?)?;
let lsn = controlfile.checkPoint;
Ok(Lsn(lsn))
}
// Create the cluster temporarily in 'initdbpath' directory inside the repository
// to get bootstrap data for timeline initialization.
//
fn run_initdb(conf: &'static PageServerConf, initdbpath: &Path) -> Result<()> {
info!("running initdb in {}... ", initdbpath.display());
let initdb_path = conf.pg_bin_dir().join("initdb");
let initdb_output = Command::new(initdb_path)
.args(&["-D", initdbpath.to_str().unwrap()])
.args(&["-U", &conf.superuser])
.arg("--no-instructions")
// This is only used for a temporary installation that is deleted shortly after,
// so no need to fsync it
.arg("--no-sync")
.env_clear()
.env("LD_LIBRARY_PATH", conf.pg_lib_dir().to_str().unwrap())
.env("DYLD_LIBRARY_PATH", conf.pg_lib_dir().to_str().unwrap())
.stdout(Stdio::null())
.output()
.with_context(|| "failed to execute initdb")?;
if !initdb_output.status.success() {
anyhow::bail!(
"initdb failed: '{}'",
String::from_utf8_lossy(&initdb_output.stderr)
);
}
Ok(())
}
//
// - run initdb to init temporary instance and get bootstrap data
// - after initialization complete, remove the temp dir.
//
fn bootstrap_timeline(
conf: &'static PageServerConf,
tenantid: ZTenantId,
tli: ZTimelineId,
repo: &dyn Repository,
) -> Result<()> {
let _enter = info_span!("bootstrapping", timeline = %tli, tenant = %tenantid).entered();
let initdb_path = conf.tenant_path(&tenantid).join("tmp");
// Init temporarily repo to get bootstrap data
run_initdb(conf, &initdb_path)?;
let pgdata_path = initdb_path;
let lsn = get_lsn_from_controlfile(&pgdata_path)?.align();
// Import the contents of the data directory at the initial checkpoint
// LSN, and any WAL after that.
let timeline = repo.create_empty_timeline(tli)?;
restore_local_repo::import_timeline_from_postgres_datadir(
&pgdata_path,
timeline.writer().as_ref(),
lsn,
)?;
timeline.checkpoint()?;
println!(
"created initial timeline {} timeline.lsn {}",
tli,
timeline.get_last_record_lsn()
);
let data = tli.to_string();
fs::write(conf.branch_path("main", &tenantid), data)?;
println!("created main branch");
// Remove temp dir. We don't need it anymore
fs::remove_dir_all(pgdata_path)?;
Ok(())
}
pub(crate) fn get_tenants(conf: &PageServerConf) -> Result<Vec<String>> {
let tenants_dir = conf.tenants_path();
std::fs::read_dir(&tenants_dir)?
.map(|dir_entry_res| {
let dir_entry = dir_entry_res?;
ensure!(dir_entry.file_type()?.is_dir());
Ok(dir_entry.file_name().to_str().unwrap().to_owned())
})
.collect()
}
pub(crate) fn get_branches(conf: &PageServerConf, tenantid: &ZTenantId) -> Result<Vec<BranchInfo>> {
let repo = tenant_mgr::get_repository_for_tenant(*tenantid)?;
// Each branch has a corresponding record (text file) in the refs/branches
// with timeline_id.
let branches_dir = conf.branches_path(tenantid);
std::fs::read_dir(&branches_dir)?
.map(|dir_entry_res| {
let dir_entry = dir_entry_res?;
BranchInfo::from_path(dir_entry.path(), conf, tenantid, &repo)
})
.collect()
}
pub(crate) fn create_branch(
conf: &PageServerConf,
branchname: &str,
startpoint_str: &str,
tenantid: &ZTenantId,
) -> Result<BranchInfo> {
let repo = tenant_mgr::get_repository_for_tenant(*tenantid)?;
if conf.branch_path(branchname, tenantid).exists() {
anyhow::bail!("branch {} already exists", branchname);
}
let mut startpoint = parse_point_in_time(conf, startpoint_str, tenantid)?;
let timeline = repo.get_timeline(startpoint.timelineid)?;
if startpoint.lsn == Lsn(0) {
// Find end of WAL on the old timeline
let end_of_wal = timeline.get_last_record_lsn();
info!("branching at end of WAL: {}", end_of_wal);
startpoint.lsn = end_of_wal;
} else {
// Wait for the WAL to arrive and be processed on the parent branch up
// to the requested branch point. The repository code itself doesn't
// require it, but if we start to receive WAL on the new timeline,
// decoding the new WAL might need to look up previous pages, relation
// sizes etc. and that would get confused if the previous page versions
// are not in the repository yet.
timeline.wait_lsn(startpoint.lsn)?;
}
startpoint.lsn = startpoint.lsn.align();
if timeline.get_start_lsn() > startpoint.lsn {
anyhow::bail!(
"invalid startpoint {} for the branch {}: less than timeline start {}",
startpoint.lsn,
branchname,
timeline.get_start_lsn()
);
}
// create a new timeline directory for it
let newtli = create_timeline(conf, Some(startpoint), tenantid)?;
// Let the Repository backend do its initialization
repo.branch_timeline(startpoint.timelineid, newtli, startpoint.lsn)?;
// Remember the human-readable branch name for the new timeline.
// FIXME: there's a race condition, if you create a branch with the same
// name concurrently.
let data = newtli.to_string();
fs::write(conf.branch_path(branchname, tenantid), data)?;
Ok(BranchInfo {
name: branchname.to_string(),
timeline_id: newtli,
latest_valid_lsn: startpoint.lsn,
ancestor_id: None,
ancestor_lsn: None,
current_logical_size: 0,
current_logical_size_non_incremental: 0,
})
}
//
// Parse user-given string that represents a point-in-time.
//
// We support multiple variants:
//
// Raw timeline id in hex, meaning the end of that timeline:
// bc62e7d612d0e6fe8f99a6dd2f281f9d
//
// A specific LSN on a timeline:
// bc62e7d612d0e6fe8f99a6dd2f281f9d@2/15D3DD8
//
// Same, with a human-friendly branch name:
// main
// main@2/15D3DD8
//
// Human-friendly tag name:
// mytag
//
//
fn parse_point_in_time(
conf: &PageServerConf,
s: &str,
tenantid: &ZTenantId,
) -> Result<PointInTime> {
let mut strings = s.split('@');
let name = strings.next().unwrap();
let lsn: Option<Lsn>;
if let Some(lsnstr) = strings.next() {
lsn = Some(
Lsn::from_str(lsnstr).with_context(|| "invalid LSN in point-in-time specification")?,
);
} else {
lsn = None
}
// Check if it's a tag
if lsn.is_none() {
let tagpath = conf.tag_path(name, tenantid);
if tagpath.exists() {
let pointstr = fs::read_to_string(tagpath)?;
return parse_point_in_time(conf, &pointstr, tenantid);
}
}
// Check if it's a branch
// Check if it's branch @ LSN
let branchpath = conf.branch_path(name, tenantid);
if branchpath.exists() {
let pointstr = fs::read_to_string(branchpath)?;
let mut result = parse_point_in_time(conf, &pointstr, tenantid)?;
result.lsn = lsn.unwrap_or(Lsn(0));
return Ok(result);
}
// Check if it's a timelineid
// Check if it's timelineid @ LSN
if let Ok(timelineid) = ZTimelineId::from_str(name) {
let tlipath = conf.timeline_path(&timelineid, tenantid);
if tlipath.exists() {
return Ok(PointInTime {
timelineid,
lsn: lsn.unwrap_or(Lsn(0)),
});
}
}
bail!("could not parse point-in-time {}", s);
}
fn create_timeline(
conf: &PageServerConf,
ancestor: Option<PointInTime>,
tenantid: &ZTenantId,
) -> Result<ZTimelineId> {
// Create initial timeline
let timelineid = ZTimelineId::generate();
let timelinedir = conf.timeline_path(&timelineid, tenantid);
fs::create_dir(&timelinedir)?;
if let Some(ancestor) = ancestor {
let data = format!("{}@{}", ancestor.timelineid, ancestor.lsn);
fs::write(timelinedir.join("ancestor"), data)?;
}
Ok(timelineid)
}

View File

@@ -0,0 +1,218 @@
#![allow(non_camel_case_types)]
#![allow(non_snake_case)]
use std::fs::File;
use std::io::prelude::*;
use std::io::SeekFrom;
use bytes::{Buf, Bytes};
use log::*;
type XLogRecPtr = u64;
#[repr(C)]
#[derive(Debug, Clone)]
/*
* Body of CheckPoint XLOG records. This is declared here because we keep
* a copy of the latest one in pg_control for possible disaster recovery.
* Changing this struct requires a PG_CONTROL_VERSION bump.
*/
pub struct CheckPoint {
pub redo: XLogRecPtr, /* next RecPtr available when we began to
* create CheckPoint (i.e. REDO start point) */
pub ThisTimeLineID: u32, /* current TLI */
pub PrevTimeLineID: u32, /* previous TLI, if this record begins a new
* timeline (equals ThisTimeLineID otherwise) */
pub fullPageWrites: bool, /* current full_page_writes */
pub nextXid: u64, /* next free transaction ID */
pub nextOid: u32, /* next free OID */
pub nextMulti: u32, /* next free MultiXactId */
pub nextMultiOffset: u32, /* next free MultiXact offset */
pub oldestXid: u32, /* cluster-wide minimum datfrozenxid */
pub oldestXidDB: u32, /* database with minimum datfrozenxid */
pub oldestMulti: u32, /* cluster-wide minimum datminmxid */
pub oldestMultiDB: u32, /* database with minimum datminmxid */
pub time: u64, /* time stamp of checkpoint */
pub oldestCommitTsXid: u32, /* oldest Xid with valid commit
* timestamp */
pub newestCommitTsXid: u32, /* newest Xid with valid commit
* timestamp */
/*
* Oldest XID still running. This is only needed to initialize hot standby
* mode from an online checkpoint, so we only bother calculating this for
* online checkpoints and only when wal_level is replica. Otherwise it's
* set to InvalidTransactionId.
*/
pub oldestActiveXid: u32,
}
#[repr(C)]
#[derive(Debug, Clone)]
pub struct ControlFileDataZenith {
pub system_identifier: u64,
pg_control_version: u32, /* PG_CONTROL_VERSION */
catalog_version_no: u32, /* see catversion.h */
state: i32, /* see enum above */
time: i64, /* time stamp of last pg_control update */
pub checkPoint: XLogRecPtr,
checkPointCopy: CheckPoint, /* copy of last check point record */
unloggedLSN: XLogRecPtr, /* current fake LSN value, for unlogged rels */
minRecoveryPoint: XLogRecPtr,
minRecoveryPointTLI: u32,
backupStartPoint: XLogRecPtr,
backupEndPoint: XLogRecPtr,
backupEndRequired: bool,
}
impl ControlFileDataZenith {
pub fn new() -> ControlFileDataZenith {
ControlFileDataZenith {
system_identifier: 0,
pg_control_version: 0,
catalog_version_no: 0,
state: 0,
time: 0,
checkPoint: 0,
checkPointCopy: {
CheckPoint {
redo: 0,
ThisTimeLineID: 0,
PrevTimeLineID: 0,
fullPageWrites: false,
nextXid: 0,
nextOid: 0,
nextMulti: 0,
nextMultiOffset: 0,
oldestXid: 0,
oldestXidDB: 0,
oldestMulti: 0,
oldestMultiDB: 0,
time: 0,
oldestCommitTsXid: 0,
newestCommitTsXid: 0,
oldestActiveXid: 0,
}
},
unloggedLSN: 0,
minRecoveryPoint: 0,
minRecoveryPointTLI: 0,
backupStartPoint: 0,
backupEndPoint: 0,
backupEndRequired: false,
}
}
}
pub fn decode_pg_control(mut buf: Bytes) -> ControlFileDataZenith {
info!("decode pg_control");
let controlfile: ControlFileDataZenith = ControlFileDataZenith {
system_identifier: buf.get_u64_le(),
pg_control_version: buf.get_u32_le(),
catalog_version_no: buf.get_u32_le(),
state: buf.get_i32_le(),
time: {
buf.advance(4);
buf.get_i64_le()
},
checkPoint: buf.get_u64_le(),
checkPointCopy: {
CheckPoint {
redo: buf.get_u64_le(),
ThisTimeLineID: buf.get_u32_le(),
PrevTimeLineID: buf.get_u32_le(),
fullPageWrites: buf.get_u8() != 0,
nextXid: {
buf.advance(7);
buf.get_u64_le()
},
nextOid: buf.get_u32_le(),
nextMulti: buf.get_u32_le(),
nextMultiOffset: buf.get_u32_le(),
oldestXid: buf.get_u32_le(),
oldestXidDB: buf.get_u32_le(),
oldestMulti: buf.get_u32_le(),
oldestMultiDB: buf.get_u32_le(),
time: {
buf.advance(4);
buf.get_u64_le()
},
oldestCommitTsXid: buf.get_u32_le(),
newestCommitTsXid: buf.get_u32_le(),
oldestActiveXid: buf.get_u32_le(),
}
},
unloggedLSN: buf.get_u64_le(),
minRecoveryPoint: buf.get_u64_le(),
minRecoveryPointTLI: buf.get_u32_le(),
backupStartPoint: {
buf.advance(4);
buf.get_u64_le()
},
backupEndPoint: buf.get_u64_le(),
backupEndRequired: buf.get_u8() != 0,
};
return controlfile;
}
pub fn parse_controlfile(b: Bytes) {
let controlfile = decode_pg_control(b);
info!(
"controlfile {:X}/{:X}",
controlfile.checkPoint >> 32,
controlfile.checkPoint
);
info!("controlfile {:?}", controlfile);
}
const MAX_MAPPINGS: usize = 62;
#[derive(Debug)]
struct RelMapping {
mapoid: u32, /* OID of a catalog */
mapfilenode: u32, /* its filenode number */
}
#[derive(Debug)]
pub struct RelMapFile {
magic: i32, /* always RELMAPPER_FILEMAGIC */
num_mappings: i32, /* number of valid RelMapping entries */
mappings: [u8; MAX_MAPPINGS * 8],
crc: u32, /* CRC of all above */
pad: i32, /* to make the struct size be 512 exactly */
}
pub fn decode_filemapping(mut buf: Bytes) -> RelMapFile {
info!("decode filemap");
let file: RelMapFile = RelMapFile {
magic: buf.get_i32_le(), /* always RELMAPPER_FILEMAGIC */
num_mappings: buf.get_i32_le(), /* number of valid RelMapping entries */
mappings: {
let mut arr = [0 as u8; MAX_MAPPINGS * 8];
buf.copy_to_slice(&mut arr);
arr
},
crc: buf.get_u32_le(), /* CRC of all above */
pad: buf.get_i32_le(),
};
info!("decode filemap {:?}", file);
file
}
pub fn write_buf_to_file(filepath: String, buf: Bytes, blkno: u32) {
info!("write_buf_to_file {}", filepath.clone());
let mut buffer = File::create(filepath.clone()).unwrap();
buffer.seek(SeekFrom::Start(8192 * blkno as u64)).unwrap();
buffer.write_all(&buf).unwrap();
info!("DONE write_buf_to_file {}", filepath);
}

View File

@@ -1,3 +0,0 @@
pub mod models;
pub mod routes;
pub use routes::make_router;

View File

@@ -1,17 +0,0 @@
use serde::{Deserialize, Serialize};
use crate::ZTenantId;
#[derive(Serialize, Deserialize)]
pub struct BranchCreateRequest {
#[serde(with = "hex")]
pub tenant_id: ZTenantId,
pub name: String,
pub start_point: String,
}
#[derive(Serialize, Deserialize)]
pub struct TenantCreateRequest {
#[serde(with = "hex")]
pub tenant_id: ZTenantId,
}

View File

@@ -1,291 +0,0 @@
openapi: "3.0.2"
info:
title: Page Server API
version: "1.0"
servers:
- url: ""
paths:
/v1/status:
description: Healthcheck endpoint
get:
description: Healthcheck
security: []
responses:
"200":
description: OK
content:
application/json:
schema:
type: object
/v1/branch/{tenant_id}:
parameters:
- name: tenant_id
in: path
required: true
schema:
type: string
format: hex
get:
description: Get branches for tenant
responses:
"200":
description: BranchInfo
content:
application/json:
schema:
type: array
items:
$ref: "#/components/schemas/BranchInfo"
"400":
description: Error when no tenant id found in path
content:
application/json:
schema:
$ref: "#/components/schemas/Error"
"401":
description: Unauthorized Error
content:
application/json:
schema:
$ref: "#/components/schemas/UnauthorizedError"
"403":
description: Forbidden Error
content:
application/json:
schema:
$ref: "#/components/schemas/ForbiddenError"
"500":
description: Generic operation error
content:
application/json:
schema:
$ref: "#/components/schemas/Error"
/v1/branch/{tenant_id}/{branch_name}:
parameters:
- name: tenant_id
in: path
required: true
schema:
type: string
format: hex
- name: branch_name
in: path
required: true
schema:
type: string
get:
description: Get branches for tenant
responses:
"200":
description: BranchInfo
content:
application/json:
schema:
$ref: "#/components/schemas/BranchInfo"
"400":
description: Error when no tenant id found in path or no branch name
content:
application/json:
schema:
$ref: "#/components/schemas/Error"
"401":
description: Unauthorized Error
content:
application/json:
schema:
$ref: "#/components/schemas/UnauthorizedError"
"403":
description: Forbidden Error
content:
application/json:
schema:
$ref: "#/components/schemas/ForbiddenError"
"500":
description: Generic operation error
content:
application/json:
schema:
$ref: "#/components/schemas/Error"
/v1/branch/:
post:
description: Create branch
requestBody:
content:
application/json:
schema:
type: object
required:
- "tenant_id"
- "name"
- "start_point"
properties:
tenant_id:
type: string
format: hex
name:
type: string
start_point:
type: string
responses:
"201":
description: BranchInfo
content:
application/json:
schema:
type: array
items:
$ref: "#/components/schemas/BranchInfo"
"400":
description: Malformed branch create request
content:
application/json:
schema:
$ref: "#/components/schemas/Error"
"401":
description: Unauthorized Error
content:
application/json:
schema:
$ref: "#/components/schemas/UnauthorizedError"
"403":
description: Forbidden Error
content:
application/json:
schema:
$ref: "#/components/schemas/ForbiddenError"
"500":
description: Generic operation error
content:
application/json:
schema:
$ref: "#/components/schemas/Error"
/v1/tenant/:
get:
description: Get tenants list
responses:
"200":
description: OK
content:
application/json:
schema:
type: array
items:
type: string
"401":
description: Unauthorized Error
content:
application/json:
schema:
$ref: "#/components/schemas/UnauthorizedError"
"403":
description: Forbidden Error
content:
application/json:
schema:
$ref: "#/components/schemas/ForbiddenError"
"500":
description: Generic operation error
content:
application/json:
schema:
$ref: "#/components/schemas/Error"
post:
description: Create tenant
requestBody:
content:
application/json:
schema:
type: object
required:
- "tenant_id"
properties:
tenant_id:
type: string
format: hex
responses:
"201":
description: CREATED
content:
application/json:
schema:
type: array
items:
type: string
"400":
description: Malformed tenant create request
content:
application/json:
schema:
$ref: "#/components/schemas/Error"
"401":
description: Unauthorized Error
content:
application/json:
schema:
$ref: "#/components/schemas/UnauthorizedError"
"403":
description: Forbidden Error
content:
application/json:
schema:
$ref: "#/components/schemas/ForbiddenError"
"500":
description: Generic operation error
content:
application/json:
schema:
$ref: "#/components/schemas/Error"
components:
securitySchemes:
JWT:
type: http
scheme: bearer
bearerFormat: JWT
schemas:
BranchInfo:
type: object
required:
- name
- timeline_id
- latest_valid_lsn
- current_logical_size
- current_logical_size_non_incremental
properties:
name:
type: string
timeline_id:
type: string
format: hex
ancestor_id:
type: string
ancestor_lsn:
type: string
current_logical_size:
type: integer
current_logical_size_non_incremental:
type: integer
Error:
type: object
required:
- msg
properties:
msg:
type: string
UnauthorizedError:
type: object
required:
- msg
properties:
msg:
type: string
ForbiddenError:
type: object
required:
- msg
properties:
msg:
type: string
security:
- JWT: []

View File

@@ -1,183 +0,0 @@
use std::sync::Arc;
use anyhow::Result;
use hyper::header;
use hyper::StatusCode;
use hyper::{Body, Request, Response, Uri};
use routerify::{ext::RequestExt, RouterBuilder};
use tracing::*;
use zenith_utils::auth::JwtAuth;
use zenith_utils::http::endpoint::attach_openapi_ui;
use zenith_utils::http::endpoint::auth_middleware;
use zenith_utils::http::endpoint::check_permission;
use zenith_utils::http::error::ApiError;
use zenith_utils::http::{
endpoint,
error::HttpErrorBody,
json::{json_request, json_response},
request::get_request_param,
request::parse_request_param,
};
use super::models::BranchCreateRequest;
use super::models::TenantCreateRequest;
use crate::branches::BranchInfo;
use crate::{branches, tenant_mgr, PageServerConf, ZTenantId};
#[derive(Debug)]
struct State {
conf: &'static PageServerConf,
auth: Option<Arc<JwtAuth>>,
allowlist_routes: Vec<Uri>,
}
impl State {
fn new(conf: &'static PageServerConf, auth: Option<Arc<JwtAuth>>) -> Self {
let allowlist_routes = ["/v1/status", "/v1/doc", "/swagger.yml"]
.iter()
.map(|v| v.parse().unwrap())
.collect::<Vec<_>>();
Self {
conf,
auth,
allowlist_routes,
}
}
}
#[inline(always)]
fn get_state(request: &Request<Body>) -> &State {
request
.data::<Arc<State>>()
.expect("unknown state type")
.as_ref()
}
#[inline(always)]
fn get_config(request: &Request<Body>) -> &'static PageServerConf {
get_state(request).conf
}
// healthcheck handler
async fn status_handler(_: Request<Body>) -> Result<Response<Body>, ApiError> {
Ok(Response::builder()
.status(StatusCode::OK)
.header(header::CONTENT_TYPE, "application/json")
.body(Body::from("{}"))
.map_err(ApiError::from_err)?)
}
async fn branch_create_handler(mut request: Request<Body>) -> Result<Response<Body>, ApiError> {
let request_data: BranchCreateRequest = json_request(&mut request).await?;
check_permission(&request, Some(request_data.tenant_id))?;
let response_data = tokio::task::spawn_blocking(move || {
let _enter = info_span!("/branch_create", name = %request_data.name, tenant = %request_data.tenant_id, startpoint=%request_data.start_point).entered();
branches::create_branch(
get_config(&request),
&request_data.name,
&request_data.start_point,
&request_data.tenant_id,
)
})
.await
.map_err(ApiError::from_err)??;
Ok(json_response(StatusCode::CREATED, response_data)?)
}
async fn branch_list_handler(request: Request<Body>) -> Result<Response<Body>, ApiError> {
let tenantid: ZTenantId = parse_request_param(&request, "tenant_id")?;
check_permission(&request, Some(tenantid))?;
let response_data = tokio::task::spawn_blocking(move || {
let _enter = info_span!("branch_list", tenant = %tenantid).entered();
crate::branches::get_branches(get_config(&request), &tenantid)
})
.await
.map_err(ApiError::from_err)??;
Ok(json_response(StatusCode::OK, response_data)?)
}
// TODO add to swagger
async fn branch_detail_handler(request: Request<Body>) -> Result<Response<Body>, ApiError> {
let tenantid: ZTenantId = parse_request_param(&request, "tenant_id")?;
let branch_name: String = get_request_param(&request, "branch_name")?.to_string();
let conf = get_state(&request).conf;
let path = conf.branch_path(&branch_name, &tenantid);
let response_data = tokio::task::spawn_blocking(move || {
let _enter = info_span!("branch_detail", tenant = %tenantid, branch=%branch_name).entered();
let repo = tenant_mgr::get_repository_for_tenant(tenantid)?;
BranchInfo::from_path(path, conf, &tenantid, &repo)
})
.await
.map_err(ApiError::from_err)??;
Ok(json_response(StatusCode::OK, response_data)?)
}
async fn tenant_list_handler(request: Request<Body>) -> Result<Response<Body>, ApiError> {
// check for management permission
check_permission(&request, None)?;
let response_data = tokio::task::spawn_blocking(move || {
let _enter = info_span!("tenant_list").entered();
crate::branches::get_tenants(get_config(&request))
})
.await
.map_err(ApiError::from_err)??;
Ok(json_response(StatusCode::OK, response_data)?)
}
async fn tenant_create_handler(mut request: Request<Body>) -> Result<Response<Body>, ApiError> {
// check for management permission
check_permission(&request, None)?;
let request_data: TenantCreateRequest = json_request(&mut request).await?;
let response_data = tokio::task::spawn_blocking(move || {
let _enter = info_span!("tenant_create", tenant = %request_data.tenant_id).entered();
tenant_mgr::create_repository_for_tenant(get_config(&request), request_data.tenant_id)
})
.await
.map_err(ApiError::from_err)??;
Ok(json_response(StatusCode::CREATED, response_data)?)
}
async fn handler_404(_: Request<Body>) -> Result<Response<Body>, ApiError> {
json_response(
StatusCode::NOT_FOUND,
HttpErrorBody::from_msg("page not found".to_owned()),
)
}
pub fn make_router(
conf: &'static PageServerConf,
auth: Option<Arc<JwtAuth>>,
) -> RouterBuilder<hyper::Body, ApiError> {
let spec = include_bytes!("openapi_spec.yml");
let mut router = attach_openapi_ui(endpoint::make_router(), spec, "/swagger.yml", "/v1/doc");
if auth.is_some() {
router = router.middleware(auth_middleware(|request| {
let state = get_state(request);
if state.allowlist_routes.contains(request.uri()) {
None
} else {
state.auth.as_deref()
}
}))
}
router
.data(Arc::new(State::new(conf, auth)))
.get("/v1/status", status_handler)
.get("/v1/branch/:tenant_id", branch_list_handler)
.get("/v1/branch/:tenant_id/:branch_name", branch_detail_handler)
.post("/v1/branch", branch_create_handler)
.get("/v1/tenant", tenant_list_handler)
.post("/v1/tenant", tenant_create_handler)
.any(handler_404)
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,419 +0,0 @@
# Overview
The on-disk format is based on immutable files. The page server
receives a stream of incoming WAL, parses the WAL records to determine
which pages they apply to, and accumulates the incoming changes in
memory. Every now and then, the accumulated changes are written out to
new immutable files. This process is called checkpointing. Old versions
of on-disk files that are not needed by any timeline are removed by GC
process.
# Terms used in layered repository
- Relish - one PostgreSQL relation or similarly treated file.
- Segment - one slice of a Relish that is stored in a LayeredTimeline.
- Layer - specific version of a relish Segment in a range of LSNs.
Layers can be InMemory or OnDisk:
- InMemory layer is not durably stored and needs to rebuild from WAL on pageserver start.
- OnDisk layer is durably stored.
OnDisk layers can be Image or Delta:
- ImageLayer represents an image or a snapshot of a segment at one particular LSN.
- DeltaLayer represents a collection of WAL records or page images in a range of LSNs.
Dropped segments are always represented on disk by DeltaLayer.
LSN range defined by start_lsn and end_lsn:
- start_lsn is inclusive.
- end_lsn is exclusive.
For an open in-memory layer, the end_lsn is MAX_LSN. For a frozen
in-memory layer or a delta layer, it is a valid end bound. An image
layer represents snapshot at one LSN, so end_lsn is always the
snapshot LSN + 1
Layers can be open or historical:
- Open layer is a writeable one. Only InMemory layer can be open.
FIXME: If open layer is dropped, it is not writeable, so it should be turned into historical,
but now it is not implemented - see bug #569.
- Historical layer is the one that cannot be modified anymore. Now only OnDisk layers can be historical.
- LayerMap - a map that tracks what layers exist for all the relishes in a timeline.
LayerMap consists of two data structures:
- segs - All the layers keyed by segment tag
- open_layers - data structure that hold all open layers ordered by oldest_pending_lsn for quick access during checkpointing. oldest_pending_lsn is the LSN of the oldest page version stored in this layer.
All operations that update InMemory Layers should update both structures to keep them up-to-date.
- LayeredTimeline - implements Timeline interface.
All methods of LayeredTimeline are aware of its ancestors and return data taking them into account.
TODO: Are there any exceptions to this?
For example, timeline.list_rels(lsn) will return all segments that are visible in this timeline at the LSN,
including ones that were not modified in this timeline and thus don't have a layer in the timeline's LayerMap.
TODO:
Describe GC and checkpoint interval settings.
# Layer files (On-disk layers)
The files are called "layer files". Each layer file corresponds
to one RELISH_SEG_SIZE slice of a PostgreSQL relation fork or
non-rel file in a range of LSNs. The layer files
for each timeline are stored in the timeline's subdirectory under
.zenith/tenants/<tenantid>/timelines.
There are two kind of layer file: base images, and deltas. A base
image file contains a layer of a segment as it was at one LSN,
whereas a delta file contains modifications to a segment - mostly in
the form of WAL records - in a range of LSN
base image file:
rel_<spcnode>_<dbnode>_<relnode>_<forknum>_<segno>_<start LSN>
delta file:
rel_<spcnode>_<dbnode>_<relnode>_<forknum>_<segno>_<start LSN>_<end LSN>
For example:
rel_1663_13990_2609_0_10_000000000169C348
rel_1663_13990_2609_0_10_000000000169C348_0000000001702000
In addition to the relations, with "rel_*" prefix, we use the same
format for storing various smaller files from the PostgreSQL data
directory. They will use different suffixes and the naming scheme up
to the LSNs vary. The Zenith source code uses the term "relish" to
mean "a relation, or other file that's treated like a relation in the
storage" For example, a base image of a CLOG segment would be named
like this:
pg_xact_0000_0_00000000198B06B0
There is no difference in how the relation and non-relation files are
managed, except that the first part of file names is different.
Internally, the relations and non-relation files that are managed in
the versioned store are together called "relishes".
If a file has been dropped, the last layer file for it is created
with the _DROPPED suffix, e.g.
rel_1663_13990_2609_0_10_000000000169C348_0000000001702000_DROPPED
## Notation used in this document
The full path of a delta file looks like this:
.zenith/tenants/941ddc8604413b88b3d208bddf90396c/timelines/4af489b06af8eed9e27a841775616962/rel_1663_13990_2609_0_10_000000000169C348_0000000001702000
For simplicity, the examples below use a simplified notation for the
paths. The tenant ID is left out, the timeline ID is replaced with
the human-readable branch name, and spcnode+dbnode+relnode+forkum+segno
with a human-readable table name. The LSNs are also shorter. For
example, a base image file at LSN 100 and a delta file between 100-200
for 'orders' table on 'main' branch is represented like this:
main/orders_100
main/orders_100_200
# Creating layer files
Let's start with a simple example with a system that contains one
branch called 'main' and two tables, 'orders' and 'customers'. The end
of WAL is currently at LSN 250. In this starting situation, you would
have these files on disk:
main/orders_100
main/orders_100_200
main/orders_200
main/customers_100
main/customers_100_200
main/customers_200
In addition to those files, the recent changes between LSN 200 and the
end of WAL at 250 are kept in memory. If the page server crashes, the
latest records between 200-250 need to be re-read from the WAL.
Whenever enough WAL has been accumulated in memory, the page server
writes out the changes in memory into new layer files. This process
is called "checkpointing" (not to be confused with the PostgreSQL
checkpoints, that's a different thing). The page server only creates
layer files for relations that have been modified since the last
checkpoint. For example, if the current end of WAL is at LSN 450, and
the last checkpoint happened at LSN 400 but there hasn't been any
recent changes to 'customers' table, you would have these files on
disk:
main/orders_100
main/orders_100_200
main/orders_200
main/orders_200_300
main/orders_300
main/orders_300_400
main/orders_400
main/customers_100
main/customers_100_200
main/customers_200
If the customers table is modified later, a new file is created for it
at the next checkpoint. The new file will cover the "gap" from the
last layer file, so the LSN ranges are always contiguous:
main/orders_100
main/orders_100_200
main/orders_200
main/orders_200_300
main/orders_300
main/orders_300_400
main/orders_400
main/customers_100
main/customers_100_200
main/customers_200
main/customers_200_500
main/customers_500
## Reading page versions
Whenever a GetPage@LSN request comes in from the compute node, the
page server needs to reconstruct the requested page, as it was at the
requested LSN. To do that, the page server first checks the recent
in-memory layer; if the requested page version is found there, it can
be returned immediatedly without looking at the files on
disk. Otherwise the page server needs to locate the layer file that
contains the requested page version.
For example, if a request comes in for table 'orders' at LSN 250, the
page server would load the 'main/orders_200_300' file into memory, and
reconstruct and return the requested page from it, as it was at
LSN 250. Because the layer file consists of a full image of the
relation at the start LSN and the WAL, reconstructing the page
involves replaying any WAL records applicable to the page between LSNs
200-250, starting from the base image at LSN 200.
# Multiple branches
Imagine that a child branch is created at LSN 250:
@250
----main--+-------------------------->
\
+---child-------------->
Then, the 'orders' table is updated differently on the 'main' and
'child' branches. You now have this situation on disk:
main/orders_100
main/orders_100_200
main/orders_200
main/orders_200_300
main/orders_300
main/orders_300_400
main/orders_400
main/customers_100
main/customers_100_200
main/customers_200
child/orders_250_300
child/orders_300
child/orders_300_400
child/orders_400
Because the 'customers' table hasn't been modified on the child
branch, there is no file for it there. If you request a page for it on
the 'child' branch, the page server will not find any layer file
for it in the 'child' directory, so it will recurse to look into the
parent 'main' branch instead.
From the 'child' branch's point of view, the history for each relation
is linear, and the request's LSN identifies unambiguously which file
you need to look at. For example, the history for the 'orders' table
on the 'main' branch consists of these files:
main/orders_100
main/orders_100_200
main/orders_200
main/orders_200_300
main/orders_300
main/orders_300_400
main/orders_400
And from the 'child' branch's point of view, it consists of these
files:
main/orders_100
main/orders_100_200
main/orders_200
main/orders_200_300
child/orders_250_300
child/orders_300
child/orders_300_400
child/orders_400
The branch metadata includes the point where the child branch was
created, LSN 250. If a page request comes with LSN 275, we read the
page version from the 'child/orders_250_300' file. We might also
need to reconstruct the page version as it was at LSN 250, in order
to replay the WAL up to LSN 275, using 'main/orders_200_300' and
'main/orders_200'. The page versions between 250-300 in the
'main/orders_200_300' file are ignored when operating on the child
branch.
Note: It doesn't make any difference if the child branch is created
when the end of the main branch was at LSN 250, or later when the tip of
the main branch had already moved on. The latter case, creating a
branch at a historic LSN, is how we support PITR in Zenith.
# Garbage collection
In this scheme, we keep creating new layer files over time. We also
need a mechanism to remove old files that are no longer needed,
because disk space isn't infinite.
What files are still needed? Currently, the page server supports PITR
and branching from any branch at any LSN that is "recent enough" from
the tip of the branch. "Recent enough" is defined as an LSN horizon,
which by default is 64 MB. (See DEFAULT_GC_HORIZON). For this
example, let's assume that the LSN horizon is 150 units.
Let's look at the single branch scenario again. Imagine that the end
of the branch is LSN 525, so that the GC horizon is currently at
525-150 = 375
main/orders_100
main/orders_100_200
main/orders_200
main/orders_200_300
main/orders_300
main/orders_300_400
main/orders_400
main/orders_400_500
main/orders_500
main/customers_100
main/customers_100_200
main/customers_200
We can remove the following files because the end LSNs of those files are
older than GC horizon 375, and there are more recent layer files for the
table:
main/orders_100 DELETE
main/orders_100_200 DELETE
main/orders_200 DELETE
main/orders_200_300 DELETE
main/orders_300 STILL NEEDED BY orders_300_400
main/orders_300_400 KEEP, NEWER THAN GC HORIZON
main/orders_400 ..
main/orders_400_500 ..
main/orders_500 ..
main/customers_100 DELETE
main/customers_100_200 DELETE
main/customers_200 KEEP, NO NEWER VERSION
'main/customers_100_200' is old enough, but it cannot be
removed because there is no newer layer file for the table.
Things get slightly more complicated with multiple branches. All of
the above still holds, but in addition to recent files we must also
retain older shapshot files that are still needed by child branches.
For example, if child branch is created at LSN 150, and the 'customers'
table is updated on the branch, you would have these files:
main/orders_100 KEEP, NEEDED BY child BRANCH
main/orders_100_200 KEEP, NEEDED BY child BRANCH
main/orders_200 DELETE
main/orders_200_300 DELETE
main/orders_300 KEEP, NEWER THAN GC HORIZON
main/orders_300_400 KEEP, NEWER THAN GC HORIZON
main/orders_400 KEEP, NEWER THAN GC HORIZON
main/orders_400_500 KEEP, NEWER THAN GC HORIZON
main/orders_500 KEEP, NEWER THAN GC HORIZON
main/customers_100 DELETE
main/customers_100_200 DELETE
main/customers_200 KEEP, NO NEWER VERSION
child/customers_150_300 DELETE
child/customers_300 KEEP, NO NEWER VERSION
In this situation, 'main/orders_100' and 'main/orders_100_200' cannot
be removed, even though they are older than the GC horizon, because
they are still needed by the child branch. 'main/orders_200'
and 'main/orders_200_300' can still be removed.
If 'orders' is modified later on the 'child' branch, we will create a
new base image and delta file for it on the child:
main/orders_100
main/orders_100_200
main/orders_300
main/orders_300_400
main/orders_400
main/orders_400_500
main/orders_500
main/customers_200
child/customers_300
child/orders_150_400
child/orders_400
After this, the 'main/orders_100' and 'main/orders_100_200' file could
be removed. It is no longer needed by the child branch, because there
is a newer layer file there. TODO: This optimization hasn't been
implemented! The GC algorithm will currently keep the file on the
'main' branch anyway, for as long as the child branch exists.
# TODO: On LSN ranges
In principle, each relation can be checkpointed separately, i.e. the
LSN ranges of the files don't need to line up. So this would be legal:
main/orders_100
main/orders_100_200
main/orders_200
main/orders_200_300
main/orders_300
main/orders_300_400
main/orders_400
main/customers_150
main/customers_150_250
main/customers_250
main/customers_250_500
main/customers_500
However, the code currently always checkpoints all relations together.
So that situation doesn't arise in practice.
It would also be OK to have overlapping LSN ranges for the same relation:
main/orders_100
main/orders_100_200
main/orders_200
main/orders_200_300
main/orders_300
main/orders_250_350
main/orders_350
main/orders_300_400
main/orders_400
The code that reads the layer files should cope with this, but this
situation doesn't arise either, because the checkpointing code never
does that. It could be useful, however, as a transient state when
garbage collecting around branch points, or explicit recovery
points. For example, if we start with this:
main/orders_100
main/orders_100_200
main/orders_200
main/orders_200_300
main/orders_300
And there is a branch or explicit recovery point at LSN 150, we could
replace 'main/orders_100_200' with 'main/orders_150' to keep a
layer only at that exact point that's still needed, removing the
other page versions around it. But such compaction has not been
implemented yet.

View File

@@ -1,45 +0,0 @@
use std::{fs::File, io::Write};
use anyhow::Result;
use bookfile::{BookWriter, BoundedReader, ChapterId, ChapterWriter};
use serde::{Deserialize, Serialize};
#[derive(Serialize, Deserialize)]
pub struct BlobRange {
offset: u64,
size: usize,
}
pub fn read_blob(reader: &BoundedReader<&'_ File>, range: &BlobRange) -> Result<Vec<u8>> {
let mut buf = vec![0u8; range.size];
reader.read_exact_at(&mut buf, range.offset)?;
Ok(buf)
}
pub struct BlobWriter<W> {
writer: ChapterWriter<W>,
offset: u64,
}
impl<W: Write> BlobWriter<W> {
// This function takes a BookWriter and creates a new chapter to ensure offset is 0.
pub fn new(book_writer: BookWriter<W>, chapter_id: impl Into<ChapterId>) -> Self {
let writer = book_writer.new_chapter(chapter_id);
Self { writer, offset: 0 }
}
pub fn write_blob(&mut self, blob: &[u8]) -> Result<BlobRange> {
self.writer.write_all(blob)?;
let range = BlobRange {
offset: self.offset,
size: blob.len(),
};
self.offset += blob.len() as u64;
Ok(range)
}
pub fn close(self) -> bookfile::Result<BookWriter<W>> {
self.writer.close()
}
}

View File

@@ -1,577 +0,0 @@
//!
//! A DeltaLayer represents a collection of WAL records or page images in a range of
//! LSNs, for one segment. It is stored on a file on disk.
//!
//! Usually a delta layer only contains differences - in the form of WAL records against
//! a base LSN. However, if a segment is newly created, by creating a new relation or
//! extending an old one, there might be no base image. In that case, all the entries in
//! the delta layer must be page images or WAL records with the 'will_init' flag set, so
//! that they can be replayed without referring to an older page version. Also in some
//! circumstances, the predecessor layer might actually be another delta layer. That
//! can happen when you create a new branch in the middle of a delta layer, and the WAL
//! records on the new branch are put in a new delta layer.
//!
//! When a delta file needs to be accessed, we slurp the metadata and relsize chapters
//! into memory, into the DeltaLayerInner struct. See load() and unload() functions.
//! To access a page/WAL record, we search `page_version_metas` for the block # and LSN.
//! The byte ranges in the metadata can be used to find the page/WAL record in
//! PAGE_VERSIONS_CHAPTER.
//!
//! On disk, the delta files are stored in timelines/<timelineid> directory.
//! Currently, there are no subdirectories, and each delta file is named like this:
//!
//! <spcnode>_<dbnode>_<relnode>_<forknum>_<segno>_<start LSN>_<end LSN>
//!
//! For example:
//!
//! 1663_13990_2609_0_5_000000000169C348_000000000169C349
//!
//! If a relation is dropped, we add a '_DROPPED' to the end of the filename to indicate that.
//! So the above example would become:
//!
//! 1663_13990_2609_0_5_000000000169C348_000000000169C349_DROPPED
//!
//! The end LSN indicates when it was dropped in that case, we don't store it in the
//! file contents in any way.
//!
//! A detlta file is constructed using the 'bookfile' crate. Each file consists of two
//! parts: the page versions and the relation sizes. They are stored as separate chapters.
//!
use crate::layered_repository::blob::BlobWriter;
use crate::layered_repository::filename::{DeltaFileName, PathOrConf};
use crate::layered_repository::storage_layer::{
Layer, PageReconstructData, PageReconstructResult, PageVersion, SegmentTag,
};
use crate::waldecoder;
use crate::PageServerConf;
use crate::{ZTenantId, ZTimelineId};
use anyhow::{bail, ensure, Result};
use log::*;
use serde::{Deserialize, Serialize};
use zenith_utils::vec_map::VecMap;
// avoid binding to Write (conflicts with std::io::Write)
// while being able to use std::fmt::Write's methods
use std::fmt::Write as _;
use std::fs;
use std::fs::File;
use std::io::{BufWriter, Write};
use std::ops::Bound::Included;
use std::path::{Path, PathBuf};
use std::sync::{Mutex, MutexGuard};
use bookfile::{Book, BookWriter};
use zenith_utils::bin_ser::BeSer;
use zenith_utils::lsn::Lsn;
use super::blob::{read_blob, BlobRange};
// Magic constant to identify a Zenith delta file
pub const DELTA_FILE_MAGIC: u32 = 0x5A616E01;
/// Mapping from (block #, lsn) -> page/WAL record
/// byte ranges in PAGE_VERSIONS_CHAPTER
static PAGE_VERSION_METAS_CHAPTER: u64 = 1;
/// Page/WAL bytes - cannot be interpreted
/// without PAGE_VERSION_METAS_CHAPTER
static PAGE_VERSIONS_CHAPTER: u64 = 2;
static REL_SIZES_CHAPTER: u64 = 3;
/// Contains the [`Summary`] struct
static SUMMARY_CHAPTER: u64 = 4;
#[derive(Debug, Serialize, Deserialize, PartialEq, Eq)]
struct Summary {
tenantid: ZTenantId,
timelineid: ZTimelineId,
seg: SegmentTag,
start_lsn: Lsn,
end_lsn: Lsn,
dropped: bool,
}
impl From<&DeltaLayer> for Summary {
fn from(layer: &DeltaLayer) -> Self {
Self {
tenantid: layer.tenantid,
timelineid: layer.timelineid,
seg: layer.seg,
start_lsn: layer.start_lsn,
end_lsn: layer.end_lsn,
dropped: layer.dropped,
}
}
}
///
/// DeltaLayer is the in-memory data structure associated with an
/// on-disk delta file. We keep a DeltaLayer in memory for each
/// file, in the LayerMap. If a layer is in "loaded" state, we have a
/// copy of the file in memory, in 'inner'. Otherwise the struct is
/// just a placeholder for a file that exists on disk, and it needs to
/// be loaded before using it in queries.
///
pub struct DeltaLayer {
path_or_conf: PathOrConf,
pub tenantid: ZTenantId,
pub timelineid: ZTimelineId,
pub seg: SegmentTag,
//
// This entry contains all the changes from 'start_lsn' to 'end_lsn'. The
// start is inclusive, and end is exclusive.
//
pub start_lsn: Lsn,
pub end_lsn: Lsn,
dropped: bool,
inner: Mutex<DeltaLayerInner>,
}
pub struct DeltaLayerInner {
/// If false, the 'page_version_metas' and 'relsizes' have not been
/// loaded into memory yet.
loaded: bool,
/// All versions of all pages in the file are are kept here.
/// Indexed by block number and LSN.
page_version_metas: VecMap<(u32, Lsn), BlobRange>,
/// `relsizes` tracks the size of the relation at different points in time.
relsizes: VecMap<Lsn, u32>,
}
impl Layer for DeltaLayer {
fn get_timeline_id(&self) -> ZTimelineId {
self.timelineid
}
fn get_seg_tag(&self) -> SegmentTag {
self.seg
}
fn is_dropped(&self) -> bool {
self.dropped
}
fn get_start_lsn(&self) -> Lsn {
self.start_lsn
}
fn get_end_lsn(&self) -> Lsn {
self.end_lsn
}
fn filename(&self) -> PathBuf {
PathBuf::from(self.layer_name().to_string())
}
/// Look up given page in the cache.
fn get_page_reconstruct_data(
&self,
blknum: u32,
lsn: Lsn,
reconstruct_data: &mut PageReconstructData,
) -> Result<PageReconstructResult> {
let mut need_image = true;
assert!(self.seg.blknum_in_seg(blknum));
{
// Open the file and lock the metadata in memory
// TODO: avoid opening the file for each read
let (_path, book) = self.open_book()?;
let page_version_reader = book.chapter_reader(PAGE_VERSIONS_CHAPTER)?;
let inner = self.load()?;
// Scan the metadata BTreeMap backwards, starting from the given entry.
let minkey = (blknum, Lsn(0));
let maxkey = (blknum, lsn);
let iter = inner
.page_version_metas
.slice_range((Included(&minkey), Included(&maxkey)))
.iter()
.rev();
for ((_blknum, pv_lsn), blob_range) in iter {
let pv = PageVersion::des(&read_blob(&page_version_reader, blob_range)?)?;
if let Some(img) = pv.page_image {
// Found a page image, return it
reconstruct_data.page_img = Some(img);
need_image = false;
break;
} else if let Some(rec) = pv.record {
let will_init = rec.will_init;
reconstruct_data.records.push((*pv_lsn, rec));
if will_init {
// This WAL record initializes the page, so no need to go further back
need_image = false;
break;
}
} else {
// No base image, and no WAL record. Huh?
bail!("no page image or WAL record for requested page");
}
}
// release metadata lock and close the file
}
// If an older page image is needed to reconstruct the page, let the
// caller know.
if need_image {
Ok(PageReconstructResult::Continue(self.start_lsn))
} else {
Ok(PageReconstructResult::Complete)
}
}
/// Get size of the relation at given LSN
fn get_seg_size(&self, lsn: Lsn) -> Result<u32> {
assert!(lsn >= self.start_lsn);
ensure!(
self.seg.rel.is_blocky(),
"get_seg_size() called on a non-blocky rel"
);
// Scan the BTreeMap backwards, starting from the given entry.
let inner = self.load()?;
let slice = inner
.relsizes
.slice_range((Included(&Lsn(0)), Included(&lsn)));
if let Some((_entry_lsn, entry)) = slice.last() {
Ok(*entry)
} else {
Err(anyhow::anyhow!("could not find seg size in delta layer"))
}
}
/// Does this segment exist at given LSN?
fn get_seg_exists(&self, lsn: Lsn) -> Result<bool> {
// Is the requested LSN after the rel was dropped?
if self.dropped && lsn >= self.end_lsn {
return Ok(false);
}
// Otherwise, it exists.
Ok(true)
}
///
/// Release most of the memory used by this layer. If it's accessed again later,
/// it will need to be loaded back.
///
fn unload(&self) -> Result<()> {
let mut inner = self.inner.lock().unwrap();
inner.page_version_metas = VecMap::default();
inner.relsizes = VecMap::default();
inner.loaded = false;
Ok(())
}
fn delete(&self) -> Result<()> {
// delete underlying file
fs::remove_file(self.path())?;
Ok(())
}
fn is_incremental(&self) -> bool {
true
}
/// debugging function to print out the contents of the layer
fn dump(&self) -> Result<()> {
println!(
"----- delta layer for ten {} tli {} seg {} {}-{} ----",
self.tenantid, self.timelineid, self.seg, self.start_lsn, self.end_lsn
);
println!("--- relsizes ---");
let inner = self.load()?;
for (k, v) in inner.relsizes.as_slice() {
println!(" {}: {}", k, v);
}
println!("--- page versions ---");
let (_path, book) = self.open_book()?;
let chapter = book.chapter_reader(PAGE_VERSIONS_CHAPTER)?;
for ((blk, lsn), blob_range) in inner.page_version_metas.as_slice() {
let mut desc = String::new();
let buf = read_blob(&chapter, blob_range)?;
let pv = PageVersion::des(&buf)?;
if let Some(img) = pv.page_image.as_ref() {
write!(&mut desc, " img {} bytes", img.len())?;
}
if let Some(rec) = pv.record.as_ref() {
let wal_desc = waldecoder::describe_wal_record(&rec.rec);
write!(
&mut desc,
" rec {} bytes will_init: {} {}",
rec.rec.len(),
rec.will_init,
wal_desc
)?;
}
println!(" blk {} at {}: {}", blk, lsn, desc);
}
Ok(())
}
}
impl DeltaLayer {
fn path_for(
path_or_conf: &PathOrConf,
timelineid: ZTimelineId,
tenantid: ZTenantId,
fname: &DeltaFileName,
) -> PathBuf {
match path_or_conf {
PathOrConf::Path(path) => path.clone(),
PathOrConf::Conf(conf) => conf
.timeline_path(&timelineid, &tenantid)
.join(fname.to_string()),
}
}
/// Create a new delta file, using the given page versions and relsizes.
/// The page versions are passed by an iterator; the iterator must return
/// page versions in blknum+lsn order.
///
/// This is used to write the in-memory layer to disk. The in-memory layer uses the same
/// data structure with two btreemaps as we do, so passing the btreemaps is currently
/// expedient.
#[allow(clippy::too_many_arguments)]
pub fn create<'a>(
conf: &'static PageServerConf,
timelineid: ZTimelineId,
tenantid: ZTenantId,
seg: SegmentTag,
start_lsn: Lsn,
end_lsn: Lsn,
dropped: bool,
page_versions: impl Iterator<Item = (u32, Lsn, &'a PageVersion)>,
relsizes: VecMap<Lsn, u32>,
) -> Result<DeltaLayer> {
if seg.rel.is_blocky() {
assert!(!relsizes.is_empty());
}
let delta_layer = DeltaLayer {
path_or_conf: PathOrConf::Conf(conf),
timelineid,
tenantid,
seg,
start_lsn,
end_lsn,
dropped,
inner: Mutex::new(DeltaLayerInner {
loaded: true,
page_version_metas: VecMap::default(),
relsizes,
}),
};
let mut inner = delta_layer.inner.lock().unwrap();
// Write the in-memory btreemaps into a file
let path = delta_layer.path();
// Note: This overwrites any existing file. There shouldn't be any.
// FIXME: throw an error instead?
let file = File::create(&path)?;
let buf_writer = BufWriter::new(file);
let book = BookWriter::new(buf_writer, DELTA_FILE_MAGIC)?;
let mut page_version_writer = BlobWriter::new(book, PAGE_VERSIONS_CHAPTER);
for (blknum, lsn, page_version) in page_versions {
let buf = PageVersion::ser(page_version)?;
let blob_range = page_version_writer.write_blob(&buf)?;
inner
.page_version_metas
.append((blknum, lsn), blob_range)
.unwrap();
}
let book = page_version_writer.close()?;
// Write out page versions
let mut chapter = book.new_chapter(PAGE_VERSION_METAS_CHAPTER);
let buf = VecMap::ser(&inner.page_version_metas)?;
chapter.write_all(&buf)?;
let book = chapter.close()?;
// and relsizes to separate chapter
let mut chapter = book.new_chapter(REL_SIZES_CHAPTER);
let buf = VecMap::ser(&inner.relsizes)?;
chapter.write_all(&buf)?;
let book = chapter.close()?;
let mut chapter = book.new_chapter(SUMMARY_CHAPTER);
let summary = Summary {
tenantid,
timelineid,
seg,
start_lsn,
end_lsn,
dropped,
};
Summary::ser_into(&summary, &mut chapter)?;
let book = chapter.close()?;
// This flushes the underlying 'buf_writer'.
let writer = book.close()?;
writer.get_ref().sync_all()?;
trace!("saved {}", &path.display());
drop(inner);
Ok(delta_layer)
}
fn open_book(&self) -> Result<(PathBuf, Book<File>)> {
let path = Self::path_for(
&self.path_or_conf,
self.timelineid,
self.tenantid,
&self.layer_name(),
);
let file = File::open(&path)?;
let book = Book::new(file)?;
Ok((path, book))
}
///
/// Load the contents of the file into memory
///
fn load(&self) -> Result<MutexGuard<DeltaLayerInner>> {
// quick exit if already loaded
let mut inner = self.inner.lock().unwrap();
if inner.loaded {
return Ok(inner);
}
let (path, book) = self.open_book()?;
match &self.path_or_conf {
PathOrConf::Conf(_) => {
let chapter = book.read_chapter(SUMMARY_CHAPTER)?;
let actual_summary = Summary::des(&chapter)?;
let expected_summary = Summary::from(self);
if actual_summary != expected_summary {
bail!("in-file summary does not match expected summary. actual = {:?} expected = {:?}", actual_summary, expected_summary);
}
}
PathOrConf::Path(path) => {
let actual_filename = Path::new(path.file_name().unwrap());
let expected_filename = self.filename();
if actual_filename != expected_filename {
println!(
"warning: filename does not match what is expected from in-file summary"
);
println!("actual: {:?}", actual_filename);
println!("expected: {:?}", expected_filename);
}
}
}
let chapter = book.read_chapter(PAGE_VERSION_METAS_CHAPTER)?;
let page_version_metas = VecMap::des(&chapter)?;
let chapter = book.read_chapter(REL_SIZES_CHAPTER)?;
let relsizes = VecMap::des(&chapter)?;
debug!("loaded from {}", &path.display());
*inner = DeltaLayerInner {
loaded: true,
page_version_metas,
relsizes,
};
Ok(inner)
}
/// Create a DeltaLayer struct representing an existing file on disk.
pub fn new(
conf: &'static PageServerConf,
timelineid: ZTimelineId,
tenantid: ZTenantId,
filename: &DeltaFileName,
) -> DeltaLayer {
DeltaLayer {
path_or_conf: PathOrConf::Conf(conf),
timelineid,
tenantid,
seg: filename.seg,
start_lsn: filename.start_lsn,
end_lsn: filename.end_lsn,
dropped: filename.dropped,
inner: Mutex::new(DeltaLayerInner {
loaded: false,
page_version_metas: VecMap::default(),
relsizes: VecMap::default(),
}),
}
}
/// Create a DeltaLayer struct representing an existing file on disk.
///
/// This variant is only used for debugging purposes, by the 'dump_layerfile' binary.
pub fn new_for_path(path: &Path, book: &Book<File>) -> Result<Self> {
let chapter = book.read_chapter(SUMMARY_CHAPTER)?;
let summary = Summary::des(&chapter)?;
Ok(DeltaLayer {
path_or_conf: PathOrConf::Path(path.to_path_buf()),
timelineid: summary.timelineid,
tenantid: summary.tenantid,
seg: summary.seg,
start_lsn: summary.start_lsn,
end_lsn: summary.end_lsn,
dropped: summary.dropped,
inner: Mutex::new(DeltaLayerInner {
loaded: false,
page_version_metas: VecMap::default(),
relsizes: VecMap::default(),
}),
})
}
fn layer_name(&self) -> DeltaFileName {
DeltaFileName {
seg: self.seg,
start_lsn: self.start_lsn,
end_lsn: self.end_lsn,
dropped: self.dropped,
}
}
/// Path to the layer file in pageserver workdir.
pub fn path(&self) -> PathBuf {
Self::path_for(
&self.path_or_conf,
self.timelineid,
self.tenantid,
&self.layer_name(),
)
}
}

View File

@@ -1,315 +0,0 @@
//!
//! Helper functions for dealing with filenames of the image and delta layer files.
//!
use crate::layered_repository::storage_layer::SegmentTag;
use crate::relish::*;
use crate::PageServerConf;
use crate::{ZTenantId, ZTimelineId};
use std::fmt;
use std::fs;
use std::path::PathBuf;
use anyhow::Result;
use log::*;
use zenith_utils::lsn::Lsn;
use super::METADATA_FILE_NAME;
// Note: LayeredTimeline::load_layer_map() relies on this sort order
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone)]
pub struct DeltaFileName {
pub seg: SegmentTag,
pub start_lsn: Lsn,
pub end_lsn: Lsn,
pub dropped: bool,
}
/// Represents the filename of a DeltaLayer
///
/// <spcnode>_<dbnode>_<relnode>_<forknum>_<seg>_<start LSN>_<end LSN>
///
/// or if it was dropped:
///
/// <spcnode>_<dbnode>_<relnode>_<forknum>_<seg>_<start LSN>_<end LSN>_DROPPED
///
impl DeltaFileName {
///
/// Parse a string as a delta file name. Returns None if the filename does not
/// match the expected pattern.
///
pub fn parse_str(fname: &str) -> Option<Self> {
let rel;
let mut parts;
if let Some(rest) = fname.strip_prefix("rel_") {
parts = rest.split('_');
rel = RelishTag::Relation(RelTag {
spcnode: parts.next()?.parse::<u32>().ok()?,
dbnode: parts.next()?.parse::<u32>().ok()?,
relnode: parts.next()?.parse::<u32>().ok()?,
forknum: parts.next()?.parse::<u8>().ok()?,
});
} else if let Some(rest) = fname.strip_prefix("pg_xact_") {
parts = rest.split('_');
rel = RelishTag::Slru {
slru: SlruKind::Clog,
segno: u32::from_str_radix(parts.next()?, 16).ok()?,
};
} else if let Some(rest) = fname.strip_prefix("pg_multixact_members_") {
parts = rest.split('_');
rel = RelishTag::Slru {
slru: SlruKind::MultiXactMembers,
segno: u32::from_str_radix(parts.next()?, 16).ok()?,
};
} else if let Some(rest) = fname.strip_prefix("pg_multixact_offsets_") {
parts = rest.split('_');
rel = RelishTag::Slru {
slru: SlruKind::MultiXactOffsets,
segno: u32::from_str_radix(parts.next()?, 16).ok()?,
};
} else if let Some(rest) = fname.strip_prefix("pg_filenodemap_") {
parts = rest.split('_');
rel = RelishTag::FileNodeMap {
spcnode: parts.next()?.parse::<u32>().ok()?,
dbnode: parts.next()?.parse::<u32>().ok()?,
};
} else if let Some(rest) = fname.strip_prefix("pg_twophase_") {
parts = rest.split('_');
rel = RelishTag::TwoPhase {
xid: parts.next()?.parse::<u32>().ok()?,
};
} else if let Some(rest) = fname.strip_prefix("pg_control_checkpoint_") {
parts = rest.split('_');
rel = RelishTag::Checkpoint;
} else if let Some(rest) = fname.strip_prefix("pg_control_") {
parts = rest.split('_');
rel = RelishTag::ControlFile;
} else {
return None;
}
let segno = parts.next()?.parse::<u32>().ok()?;
let seg = SegmentTag { rel, segno };
let start_lsn = Lsn::from_hex(parts.next()?).ok()?;
let end_lsn = Lsn::from_hex(parts.next()?).ok()?;
let mut dropped = false;
if let Some(suffix) = parts.next() {
if suffix == "DROPPED" {
dropped = true;
} else {
return None;
}
}
if parts.next().is_some() {
return None;
}
Some(DeltaFileName {
seg,
start_lsn,
end_lsn,
dropped,
})
}
}
impl fmt::Display for DeltaFileName {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let basename = match self.seg.rel {
RelishTag::Relation(reltag) => format!(
"rel_{}_{}_{}_{}",
reltag.spcnode, reltag.dbnode, reltag.relnode, reltag.forknum
),
RelishTag::Slru {
slru: SlruKind::Clog,
segno,
} => format!("pg_xact_{:04X}", segno),
RelishTag::Slru {
slru: SlruKind::MultiXactMembers,
segno,
} => format!("pg_multixact_members_{:04X}", segno),
RelishTag::Slru {
slru: SlruKind::MultiXactOffsets,
segno,
} => format!("pg_multixact_offsets_{:04X}", segno),
RelishTag::FileNodeMap { spcnode, dbnode } => {
format!("pg_filenodemap_{}_{}", spcnode, dbnode)
}
RelishTag::TwoPhase { xid } => format!("pg_twophase_{}", xid),
RelishTag::Checkpoint => "pg_control_checkpoint".to_string(),
RelishTag::ControlFile => "pg_control".to_string(),
};
write!(
f,
"{}_{}_{:016X}_{:016X}{}",
basename,
self.seg.segno,
u64::from(self.start_lsn),
u64::from(self.end_lsn),
if self.dropped { "_DROPPED" } else { "" }
)
}
}
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone)]
pub struct ImageFileName {
pub seg: SegmentTag,
pub lsn: Lsn,
}
///
/// Represents the filename of an ImageLayer
///
/// <spcnode>_<dbnode>_<relnode>_<forknum>_<seg>_<LSN>
///
impl ImageFileName {
///
/// Parse a string as an image file name. Returns None if the filename does not
/// match the expected pattern.
///
pub fn parse_str(fname: &str) -> Option<Self> {
let rel;
let mut parts;
if let Some(rest) = fname.strip_prefix("rel_") {
parts = rest.split('_');
rel = RelishTag::Relation(RelTag {
spcnode: parts.next()?.parse::<u32>().ok()?,
dbnode: parts.next()?.parse::<u32>().ok()?,
relnode: parts.next()?.parse::<u32>().ok()?,
forknum: parts.next()?.parse::<u8>().ok()?,
});
} else if let Some(rest) = fname.strip_prefix("pg_xact_") {
parts = rest.split('_');
rel = RelishTag::Slru {
slru: SlruKind::Clog,
segno: u32::from_str_radix(parts.next()?, 16).ok()?,
};
} else if let Some(rest) = fname.strip_prefix("pg_multixact_members_") {
parts = rest.split('_');
rel = RelishTag::Slru {
slru: SlruKind::MultiXactMembers,
segno: u32::from_str_radix(parts.next()?, 16).ok()?,
};
} else if let Some(rest) = fname.strip_prefix("pg_multixact_offsets_") {
parts = rest.split('_');
rel = RelishTag::Slru {
slru: SlruKind::MultiXactOffsets,
segno: u32::from_str_radix(parts.next()?, 16).ok()?,
};
} else if let Some(rest) = fname.strip_prefix("pg_filenodemap_") {
parts = rest.split('_');
rel = RelishTag::FileNodeMap {
spcnode: parts.next()?.parse::<u32>().ok()?,
dbnode: parts.next()?.parse::<u32>().ok()?,
};
} else if let Some(rest) = fname.strip_prefix("pg_twophase_") {
parts = rest.split('_');
rel = RelishTag::TwoPhase {
xid: parts.next()?.parse::<u32>().ok()?,
};
} else if let Some(rest) = fname.strip_prefix("pg_control_checkpoint_") {
parts = rest.split('_');
rel = RelishTag::Checkpoint;
} else if let Some(rest) = fname.strip_prefix("pg_control_") {
parts = rest.split('_');
rel = RelishTag::ControlFile;
} else {
return None;
}
let segno = parts.next()?.parse::<u32>().ok()?;
let seg = SegmentTag { rel, segno };
let lsn = Lsn::from_hex(parts.next()?).ok()?;
if parts.next().is_some() {
return None;
}
Some(ImageFileName { seg, lsn })
}
}
impl fmt::Display for ImageFileName {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let basename = match self.seg.rel {
RelishTag::Relation(reltag) => format!(
"rel_{}_{}_{}_{}",
reltag.spcnode, reltag.dbnode, reltag.relnode, reltag.forknum
),
RelishTag::Slru {
slru: SlruKind::Clog,
segno,
} => format!("pg_xact_{:04X}", segno),
RelishTag::Slru {
slru: SlruKind::MultiXactMembers,
segno,
} => format!("pg_multixact_members_{:04X}", segno),
RelishTag::Slru {
slru: SlruKind::MultiXactOffsets,
segno,
} => format!("pg_multixact_offsets_{:04X}", segno),
RelishTag::FileNodeMap { spcnode, dbnode } => {
format!("pg_filenodemap_{}_{}", spcnode, dbnode)
}
RelishTag::TwoPhase { xid } => format!("pg_twophase_{}", xid),
RelishTag::Checkpoint => "pg_control_checkpoint".to_string(),
RelishTag::ControlFile => "pg_control".to_string(),
};
write!(
f,
"{}_{}_{:016X}",
basename,
self.seg.segno,
u64::from(self.lsn),
)
}
}
/// Scan timeline directory and create ImageFileName and DeltaFilename
/// structs representing all files on disk
///
/// TODO: returning an Iterator would be more idiomatic
pub fn list_files(
conf: &'static PageServerConf,
timelineid: ZTimelineId,
tenantid: ZTenantId,
) -> Result<(Vec<ImageFileName>, Vec<DeltaFileName>)> {
let path = conf.timeline_path(&timelineid, &tenantid);
let mut deltafiles: Vec<DeltaFileName> = Vec::new();
let mut imgfiles: Vec<ImageFileName> = Vec::new();
for direntry in fs::read_dir(path)? {
let fname = direntry?.file_name();
let fname = fname.to_str().unwrap();
if let Some(deltafilename) = DeltaFileName::parse_str(fname) {
deltafiles.push(deltafilename);
} else if let Some(imgfilename) = ImageFileName::parse_str(fname) {
imgfiles.push(imgfilename);
} else if fname == METADATA_FILE_NAME || fname == "ancestor" || fname.ends_with(".old") {
// ignore these
} else {
warn!("unrecognized filename in timeline dir: {}", fname);
}
}
Ok((imgfiles, deltafiles))
}
/// Helper enum to hold a PageServerConf, or a path
///
/// This is used by DeltaLayer and ImageLayer. Normally, this holds a reference to the
/// global config, and paths to layer files are constructed using the tenant/timeline
/// path from the config. But in the 'dump_layerfile' binary, we need to construct a Layer
/// struct for a file on disk, without having a page server running, so that we have no
/// config. In that case, we use the Path variant to hold the full path to the file on
/// disk.
pub enum PathOrConf {
Path(PathBuf),
Conf(&'static PageServerConf),
}

View File

@@ -1,490 +0,0 @@
//! An ImageLayer represents an image or a snapshot of a segment at one particular LSN.
//! It is stored in a file on disk.
//!
//! On disk, the image files are stored in timelines/<timelineid> directory.
//! Currently, there are no subdirectories, and each image layer file is named like this:
//!
//! Note that segno is
//! <spcnode>_<dbnode>_<relnode>_<forknum>_<segno>_<LSN>
//!
//! For example:
//!
//! 1663_13990_2609_0_5_000000000169C348
//!
//! An image file is constructed using the 'bookfile' crate.
//!
//! Only metadata is loaded into memory by the load function.
//! When images are needed, they are read directly from disk.
//!
//! For blocky relishes, the images are stored in BLOCKY_IMAGES_CHAPTER.
//! All the images are required to be BLOCK_SIZE, which allows for random access.
//!
//! For non-blocky relishes, the image can be found in NONBLOCKY_IMAGE_CHAPTER.
//!
use crate::layered_repository::filename::{ImageFileName, PathOrConf};
use crate::layered_repository::storage_layer::{
Layer, PageReconstructData, PageReconstructResult, SegmentTag,
};
use crate::layered_repository::LayeredTimeline;
use crate::layered_repository::RELISH_SEG_SIZE;
use crate::PageServerConf;
use crate::{ZTenantId, ZTimelineId};
use anyhow::{anyhow, bail, ensure, Result};
use bytes::Bytes;
use log::*;
use serde::{Deserialize, Serialize};
use std::convert::TryInto;
use std::fs;
use std::fs::File;
use std::io::{BufWriter, Write};
use std::path::{Path, PathBuf};
use std::sync::{Mutex, MutexGuard};
use bookfile::{Book, BookWriter};
use zenith_utils::bin_ser::BeSer;
use zenith_utils::lsn::Lsn;
// Magic constant to identify a Zenith segment image file
pub const IMAGE_FILE_MAGIC: u32 = 0x5A616E01 + 1;
/// Contains each block in block # order
const BLOCKY_IMAGES_CHAPTER: u64 = 1;
const NONBLOCKY_IMAGE_CHAPTER: u64 = 2;
/// Contains the [`Summary`] struct
const SUMMARY_CHAPTER: u64 = 3;
#[derive(Debug, Serialize, Deserialize, PartialEq, Eq)]
struct Summary {
tenantid: ZTenantId,
timelineid: ZTimelineId,
seg: SegmentTag,
lsn: Lsn,
}
impl From<&ImageLayer> for Summary {
fn from(layer: &ImageLayer) -> Self {
Self {
tenantid: layer.tenantid,
timelineid: layer.timelineid,
seg: layer.seg,
lsn: layer.lsn,
}
}
}
const BLOCK_SIZE: usize = 8192;
///
/// ImageLayer is the in-memory data structure associated with an on-disk image
/// file. We keep an ImageLayer in memory for each file, in the LayerMap. If a
/// layer is in "loaded" state, we have a copy of the file in memory, in 'inner'.
/// Otherwise the struct is just a placeholder for a file that exists on disk,
/// and it needs to be loaded before using it in queries.
///
pub struct ImageLayer {
path_or_conf: PathOrConf,
pub tenantid: ZTenantId,
pub timelineid: ZTimelineId,
pub seg: SegmentTag,
// This entry contains an image of all pages as of this LSN
pub lsn: Lsn,
inner: Mutex<ImageLayerInner>,
}
#[derive(Clone)]
enum ImageType {
Blocky { num_blocks: u32 },
NonBlocky,
}
pub struct ImageLayerInner {
/// If false, the 'image_type' has not been
/// loaded into memory yet.
loaded: bool,
/// Derived from filename and bookfile chapter metadata
image_type: ImageType,
}
impl Layer for ImageLayer {
fn filename(&self) -> PathBuf {
PathBuf::from(self.layer_name().to_string())
}
fn get_timeline_id(&self) -> ZTimelineId {
self.timelineid
}
fn get_seg_tag(&self) -> SegmentTag {
self.seg
}
fn is_dropped(&self) -> bool {
false
}
fn get_start_lsn(&self) -> Lsn {
self.lsn
}
fn get_end_lsn(&self) -> Lsn {
// End-bound is exclusive
self.lsn + 1
}
/// Look up given page in the file
fn get_page_reconstruct_data(
&self,
blknum: u32,
lsn: Lsn,
reconstruct_data: &mut PageReconstructData,
) -> Result<PageReconstructResult> {
assert!(lsn >= self.lsn);
let inner = self.load()?;
let base_blknum = blknum % RELISH_SEG_SIZE;
let (_path, book) = self.open_book()?;
let buf = match &inner.image_type {
ImageType::Blocky { num_blocks } => {
if base_blknum >= *num_blocks {
return Ok(PageReconstructResult::Missing(lsn));
}
let mut buf = vec![0u8; BLOCK_SIZE];
let offset = BLOCK_SIZE as u64 * base_blknum as u64;
let chapter = book.chapter_reader(BLOCKY_IMAGES_CHAPTER)?;
chapter.read_exact_at(&mut buf, offset)?;
buf
}
ImageType::NonBlocky => {
ensure!(base_blknum == 0);
book.read_chapter(NONBLOCKY_IMAGE_CHAPTER)?.into_vec()
}
};
reconstruct_data.page_img = Some(Bytes::from(buf));
Ok(PageReconstructResult::Complete)
}
/// Get size of the segment
fn get_seg_size(&self, _lsn: Lsn) -> Result<u32> {
let inner = self.load()?;
match inner.image_type {
ImageType::Blocky { num_blocks } => Ok(num_blocks),
ImageType::NonBlocky => Err(anyhow!("get_seg_size called for non-blocky segment")),
}
}
/// Does this segment exist at given LSN?
fn get_seg_exists(&self, _lsn: Lsn) -> Result<bool> {
Ok(true)
}
///
/// Release most of the memory used by this layer. If it's accessed again later,
/// it will need to be loaded back.
///
fn unload(&self) -> Result<()> {
let mut inner = self.inner.lock().unwrap();
inner.image_type = ImageType::Blocky { num_blocks: 0 };
inner.loaded = false;
Ok(())
}
fn delete(&self) -> Result<()> {
// delete underlying file
fs::remove_file(self.path())?;
Ok(())
}
fn is_incremental(&self) -> bool {
false
}
/// debugging function to print out the contents of the layer
fn dump(&self) -> Result<()> {
println!(
"----- image layer for ten {} tli {} seg {} at {} ----",
self.tenantid, self.timelineid, self.seg, self.lsn
);
let inner = self.load()?;
match inner.image_type {
ImageType::Blocky { num_blocks } => println!("({}) blocks ", num_blocks),
ImageType::NonBlocky => {
let (_path, book) = self.open_book()?;
let chapter = book.read_chapter(NONBLOCKY_IMAGE_CHAPTER)?;
println!("non-blocky ({} bytes)", chapter.len());
}
}
Ok(())
}
}
impl ImageLayer {
fn path_for(
path_or_conf: &PathOrConf,
timelineid: ZTimelineId,
tenantid: ZTenantId,
fname: &ImageFileName,
) -> PathBuf {
match path_or_conf {
PathOrConf::Path(path) => path.to_path_buf(),
PathOrConf::Conf(conf) => conf
.timeline_path(&timelineid, &tenantid)
.join(fname.to_string()),
}
}
/// Create a new image file, using the given array of pages.
fn create(
conf: &'static PageServerConf,
timelineid: ZTimelineId,
tenantid: ZTenantId,
seg: SegmentTag,
lsn: Lsn,
base_images: Vec<Bytes>,
) -> Result<ImageLayer> {
let image_type = if seg.rel.is_blocky() {
let num_blocks: u32 = base_images.len().try_into()?;
ImageType::Blocky { num_blocks }
} else {
assert_eq!(base_images.len(), 1);
ImageType::NonBlocky
};
let layer = ImageLayer {
path_or_conf: PathOrConf::Conf(conf),
timelineid,
tenantid,
seg,
lsn,
inner: Mutex::new(ImageLayerInner {
loaded: true,
image_type: image_type.clone(),
}),
};
let inner = layer.inner.lock().unwrap();
// Write the images into a file
let path = layer.path();
// Note: This overwrites any existing file. There shouldn't be any.
// FIXME: throw an error instead?
let file = File::create(&path)?;
let buf_writer = BufWriter::new(file);
let book = BookWriter::new(buf_writer, IMAGE_FILE_MAGIC)?;
let book = match &image_type {
ImageType::Blocky { .. } => {
let mut chapter = book.new_chapter(BLOCKY_IMAGES_CHAPTER);
for block_bytes in base_images {
assert_eq!(block_bytes.len(), BLOCK_SIZE);
chapter.write_all(&block_bytes)?;
}
chapter.close()?
}
ImageType::NonBlocky => {
let mut chapter = book.new_chapter(NONBLOCKY_IMAGE_CHAPTER);
chapter.write_all(&base_images[0])?;
chapter.close()?
}
};
let mut chapter = book.new_chapter(SUMMARY_CHAPTER);
let summary = Summary {
tenantid,
timelineid,
seg,
lsn,
};
Summary::ser_into(&summary, &mut chapter)?;
let book = chapter.close()?;
// This flushes the underlying 'buf_writer'.
let writer = book.close()?;
writer.get_ref().sync_all()?;
trace!("saved {}", path.display());
drop(inner);
Ok(layer)
}
// Create a new image file by materializing every page in a source layer
// at given LSN.
pub fn create_from_src(
conf: &'static PageServerConf,
timeline: &LayeredTimeline,
src: &dyn Layer,
lsn: Lsn,
) -> Result<ImageLayer> {
let seg = src.get_seg_tag();
let timelineid = timeline.timelineid;
let startblk;
let size;
if seg.rel.is_blocky() {
size = src.get_seg_size(lsn)?;
startblk = seg.segno * RELISH_SEG_SIZE;
} else {
size = 1;
startblk = 0;
}
trace!(
"creating new ImageLayer for {} on timeline {} at {}",
seg,
timelineid,
lsn,
);
let mut base_images: Vec<Bytes> = Vec::new();
for blknum in startblk..(startblk + size) {
let img = timeline.materialize_page(seg, blknum, lsn, &*src)?;
base_images.push(img);
}
Self::create(conf, timelineid, timeline.tenantid, seg, lsn, base_images)
}
///
/// Load the contents of the file into memory
///
fn load(&self) -> Result<MutexGuard<ImageLayerInner>> {
// quick exit if already loaded
let mut inner = self.inner.lock().unwrap();
if inner.loaded {
return Ok(inner);
}
let (path, book) = self.open_book()?;
match &self.path_or_conf {
PathOrConf::Conf(_) => {
let chapter = book.read_chapter(SUMMARY_CHAPTER)?;
let actual_summary = Summary::des(&chapter)?;
let expected_summary = Summary::from(self);
if actual_summary != expected_summary {
bail!("in-file summary does not match expected summary. actual = {:?} expected = {:?}", actual_summary, expected_summary);
}
}
PathOrConf::Path(path) => {
let actual_filename = Path::new(path.file_name().unwrap());
let expected_filename = self.filename();
if actual_filename != expected_filename {
println!(
"warning: filename does not match what is expected from in-file summary"
);
println!("actual: {:?}", actual_filename);
println!("expected: {:?}", expected_filename);
}
}
}
let image_type = if self.seg.rel.is_blocky() {
let chapter = book.chapter_reader(BLOCKY_IMAGES_CHAPTER)?;
let images_len = chapter.len();
ensure!(images_len % BLOCK_SIZE as u64 == 0);
let num_blocks: u32 = (images_len / BLOCK_SIZE as u64).try_into()?;
ImageType::Blocky { num_blocks }
} else {
let _chapter = book.chapter_reader(NONBLOCKY_IMAGE_CHAPTER)?;
ImageType::NonBlocky
};
debug!("loaded from {}", &path.display());
*inner = ImageLayerInner {
loaded: true,
image_type,
};
Ok(inner)
}
fn open_book(&self) -> Result<(PathBuf, Book<File>)> {
let path = self.path();
let file = File::open(&path)?;
let book = Book::new(file)?;
Ok((path, book))
}
/// Create an ImageLayer struct representing an existing file on disk
pub fn new(
conf: &'static PageServerConf,
timelineid: ZTimelineId,
tenantid: ZTenantId,
filename: &ImageFileName,
) -> ImageLayer {
ImageLayer {
path_or_conf: PathOrConf::Conf(conf),
timelineid,
tenantid,
seg: filename.seg,
lsn: filename.lsn,
inner: Mutex::new(ImageLayerInner {
loaded: false,
image_type: ImageType::Blocky { num_blocks: 0 },
}),
}
}
/// Create an ImageLayer struct representing an existing file on disk.
///
/// This variant is only used for debugging purposes, by the 'dump_layerfile' binary.
pub fn new_for_path(path: &Path, book: &Book<File>) -> Result<ImageLayer> {
let chapter = book.read_chapter(SUMMARY_CHAPTER)?;
let summary = Summary::des(&chapter)?;
Ok(ImageLayer {
path_or_conf: PathOrConf::Path(path.to_path_buf()),
timelineid: summary.timelineid,
tenantid: summary.tenantid,
seg: summary.seg,
lsn: summary.lsn,
inner: Mutex::new(ImageLayerInner {
loaded: false,
image_type: ImageType::Blocky { num_blocks: 0 },
}),
})
}
fn layer_name(&self) -> ImageFileName {
ImageFileName {
seg: self.seg,
lsn: self.lsn,
}
}
/// Path to the layer file in pageserver workdir.
pub fn path(&self) -> PathBuf {
Self::path_for(
&self.path_or_conf,
self.timelineid,
self.tenantid,
&self.layer_name(),
)
}
}

View File

@@ -1,659 +0,0 @@
//!
//! An in-memory layer stores recently received page versions in memory. The page versions
//! are held in a BTreeMap, and there's another BTreeMap to track the size of the relation.
//!
use crate::layered_repository::filename::DeltaFileName;
use crate::layered_repository::storage_layer::{
Layer, PageReconstructData, PageReconstructResult, PageVersion, SegmentTag, RELISH_SEG_SIZE,
};
use crate::layered_repository::LayeredTimeline;
use crate::layered_repository::ZERO_PAGE;
use crate::layered_repository::{DeltaLayer, ImageLayer};
use crate::repository::WALRecord;
use crate::PageServerConf;
use crate::{ZTenantId, ZTimelineId};
use anyhow::{bail, ensure, Result};
use bytes::Bytes;
use log::*;
use std::path::PathBuf;
use std::sync::{Arc, RwLock};
use zenith_utils::vec_map::VecMap;
use zenith_utils::lsn::Lsn;
use super::page_versions::PageVersions;
pub struct InMemoryLayer {
conf: &'static PageServerConf,
tenantid: ZTenantId,
timelineid: ZTimelineId,
seg: SegmentTag,
///
/// This layer contains all the changes from 'start_lsn'. The
/// start is inclusive.
///
start_lsn: Lsn,
/// LSN of the oldest page version stored in this layer
oldest_pending_lsn: Lsn,
/// The above fields never change. The parts that do change are in 'inner',
/// and protected by mutex.
inner: RwLock<InMemoryLayerInner>,
/// Predecessor layer might be needed?
incremental: bool,
}
pub struct InMemoryLayerInner {
/// Frozen in-memory layers have an exclusive end LSN.
/// Writes are only allowed when this is None
end_lsn: Option<Lsn>,
/// If this relation was dropped, remember when that happened.
/// The drop LSN is recorded in [`end_lsn`].
dropped: bool,
///
/// All versions of all pages in the layer are are kept here.
/// Indexed by block number and LSN.
///
page_versions: PageVersions,
///
/// `segsizes` tracks the size of the segment at different points in time.
///
/// For a blocky rel, there is always one entry, at the layer's start_lsn,
/// so that determining the size never depends on the predecessor layer. For
/// a non-blocky rel, 'segsizes' is not used and is always empty.
///
segsizes: VecMap<Lsn, u32>,
}
impl InMemoryLayerInner {
fn assert_writeable(&self) {
assert!(self.end_lsn.is_none());
}
fn get_seg_size(&self, lsn: Lsn) -> u32 {
// Scan the BTreeMap backwards, starting from the given entry.
let slice = self.segsizes.slice_range(..=lsn);
// We make sure there is always at least one entry
if let Some((_entry_lsn, entry)) = slice.last() {
*entry
} else {
panic!("could not find seg size in in-memory layer");
}
}
}
impl Layer for InMemoryLayer {
// An in-memory layer doesn't really have a filename as it's not stored on disk,
// but we construct a filename as if it was a delta layer
fn filename(&self) -> PathBuf {
let inner = self.inner.read().unwrap();
let end_lsn;
if let Some(drop_lsn) = inner.end_lsn {
end_lsn = drop_lsn;
} else {
end_lsn = Lsn(u64::MAX);
}
let delta_filename = DeltaFileName {
seg: self.seg,
start_lsn: self.start_lsn,
end_lsn,
dropped: inner.dropped,
}
.to_string();
PathBuf::from(format!("inmem-{}", delta_filename))
}
fn get_timeline_id(&self) -> ZTimelineId {
self.timelineid
}
fn get_seg_tag(&self) -> SegmentTag {
self.seg
}
fn get_start_lsn(&self) -> Lsn {
self.start_lsn
}
fn get_end_lsn(&self) -> Lsn {
let inner = self.inner.read().unwrap();
if let Some(end_lsn) = inner.end_lsn {
end_lsn
} else {
Lsn(u64::MAX)
}
}
fn is_dropped(&self) -> bool {
let inner = self.inner.read().unwrap();
inner.dropped
}
/// Look up given page in the cache.
fn get_page_reconstruct_data(
&self,
blknum: u32,
lsn: Lsn,
reconstruct_data: &mut PageReconstructData,
) -> Result<PageReconstructResult> {
let mut need_image = true;
assert!(self.seg.blknum_in_seg(blknum));
{
let inner = self.inner.read().unwrap();
// Scan the page versions backwards, starting from `lsn`.
let iter = inner
.page_versions
.get_block_lsn_range(blknum, ..=lsn)
.iter()
.rev();
for (entry_lsn, entry) in iter {
if let Some(img) = &entry.page_image {
reconstruct_data.page_img = Some(img.clone());
need_image = false;
break;
} else if let Some(rec) = &entry.record {
reconstruct_data.records.push((*entry_lsn, rec.clone()));
if rec.will_init {
// This WAL record initializes the page, so no need to go further back
need_image = false;
break;
}
} else {
// No base image, and no WAL record. Huh?
bail!("no page image or WAL record for requested page");
}
}
// release lock on 'inner'
}
// If an older page image is needed to reconstruct the page, let the
// caller know
if need_image {
if self.incremental {
Ok(PageReconstructResult::Continue(Lsn(self.start_lsn.0 - 1)))
} else {
Ok(PageReconstructResult::Missing(self.start_lsn))
}
} else {
Ok(PageReconstructResult::Complete)
}
}
/// Get size of the relation at given LSN
fn get_seg_size(&self, lsn: Lsn) -> Result<u32> {
assert!(lsn >= self.start_lsn);
ensure!(
self.seg.rel.is_blocky(),
"get_seg_size() called on a non-blocky rel"
);
let inner = self.inner.read().unwrap();
Ok(inner.get_seg_size(lsn))
}
/// Does this segment exist at given LSN?
fn get_seg_exists(&self, lsn: Lsn) -> Result<bool> {
let inner = self.inner.read().unwrap();
// If the segment created after requested LSN,
// it doesn't exist in the layer. But we shouldn't
// have requested it in the first place.
assert!(lsn >= self.start_lsn);
// Is the requested LSN after the segment was dropped?
if let Some(end_lsn) = inner.end_lsn {
if lsn >= end_lsn {
return Ok(false);
}
}
// Otherwise, it exists
Ok(true)
}
/// Cannot unload anything in an in-memory layer, since there's no backing
/// store. To release memory used by an in-memory layer, use 'freeze' to turn
/// it into an on-disk layer.
fn unload(&self) -> Result<()> {
Ok(())
}
/// Nothing to do here. When you drop the last reference to the layer, it will
/// be deallocated.
fn delete(&self) -> Result<()> {
Ok(())
}
fn is_incremental(&self) -> bool {
self.incremental
}
/// debugging function to print out the contents of the layer
fn dump(&self) -> Result<()> {
let inner = self.inner.read().unwrap();
let end_str = inner
.end_lsn
.as_ref()
.map(Lsn::to_string)
.unwrap_or_default();
println!(
"----- in-memory layer for tli {} seg {} {}-{} {} ----",
self.timelineid, self.seg, self.start_lsn, end_str, inner.dropped,
);
for (k, v) in inner.segsizes.as_slice() {
println!("segsizes {}: {}", k, v);
}
for (blknum, lsn, pv) in inner.page_versions.ordered_page_version_iter(None) {
println!(
"blk {} at {}: {}/{}\n",
blknum,
lsn,
pv.page_image.is_some(),
pv.record.is_some()
);
}
Ok(())
}
}
/// A result of an inmemory layer data being written to disk.
pub struct LayersOnDisk {
pub delta_layers: Vec<DeltaLayer>,
pub image_layers: Vec<ImageLayer>,
}
impl LayersOnDisk {
pub fn is_empty(&self) -> bool {
self.delta_layers.is_empty() && self.image_layers.is_empty()
}
}
impl InMemoryLayer {
/// Return the oldest page version that's stored in this layer
pub fn get_oldest_pending_lsn(&self) -> Lsn {
self.oldest_pending_lsn
}
///
/// Create a new, empty, in-memory layer
///
pub fn create(
conf: &'static PageServerConf,
timelineid: ZTimelineId,
tenantid: ZTenantId,
seg: SegmentTag,
start_lsn: Lsn,
oldest_pending_lsn: Lsn,
) -> Result<InMemoryLayer> {
trace!(
"initializing new empty InMemoryLayer for writing {} on timeline {} at {}",
seg,
timelineid,
start_lsn
);
// The segment is initially empty, so initialize 'segsizes' with 0.
let mut segsizes = VecMap::default();
if seg.rel.is_blocky() {
segsizes.append(start_lsn, 0).unwrap();
}
Ok(InMemoryLayer {
conf,
timelineid,
tenantid,
seg,
start_lsn,
oldest_pending_lsn,
incremental: false,
inner: RwLock::new(InMemoryLayerInner {
end_lsn: None,
dropped: false,
page_versions: PageVersions::default(),
segsizes,
}),
})
}
// Write operations
/// Remember new page version, as a WAL record over previous version
pub fn put_wal_record(&self, lsn: Lsn, blknum: u32, rec: WALRecord) -> u32 {
self.put_page_version(
blknum,
lsn,
PageVersion {
page_image: None,
record: Some(rec),
},
)
}
/// Remember new page version, as a full page image
pub fn put_page_image(&self, blknum: u32, lsn: Lsn, img: Bytes) -> u32 {
self.put_page_version(
blknum,
lsn,
PageVersion {
page_image: Some(img),
record: None,
},
)
}
/// Common subroutine of the public put_wal_record() and put_page_image() functions.
/// Adds the page version to the in-memory tree
pub fn put_page_version(&self, blknum: u32, lsn: Lsn, pv: PageVersion) -> u32 {
assert!(self.seg.blknum_in_seg(blknum));
trace!(
"put_page_version blk {} of {} at {}/{}",
blknum,
self.seg.rel,
self.timelineid,
lsn
);
let mut inner = self.inner.write().unwrap();
inner.assert_writeable();
let old = inner.page_versions.append_or_update_last(blknum, lsn, pv);
if old.is_some() {
// We already had an entry for this LSN. That's odd..
warn!(
"Page version of rel {} blk {} at {} already exists",
self.seg.rel, blknum, lsn
);
}
// Also update the relation size, if this extended the relation.
if self.seg.rel.is_blocky() {
let newsize = blknum - self.seg.segno * RELISH_SEG_SIZE + 1;
// use inner get_seg_size, since calling self.get_seg_size will try to acquire the lock,
// which we've just acquired above
let oldsize = inner.get_seg_size(lsn);
if newsize > oldsize {
trace!(
"enlarging segment {} from {} to {} blocks at {}",
self.seg,
oldsize,
newsize,
lsn
);
// If we are extending the relation by more than one page, initialize the "gap"
// with zeros
//
// XXX: What if the caller initializes the gap with subsequent call with same LSN?
// I don't think that can happen currently, but that is highly dependent on how
// PostgreSQL writes its WAL records and there's no guarantee of it. If it does
// happen, we would hit the "page version already exists" warning above on the
// subsequent call to initialize the gap page.
let gapstart = self.seg.segno * RELISH_SEG_SIZE + oldsize;
for gapblknum in gapstart..blknum {
let zeropv = PageVersion {
page_image: Some(ZERO_PAGE.clone()),
record: None,
};
trace!(
"filling gap blk {} with zeros for write of {}",
gapblknum,
blknum
);
let old = inner
.page_versions
.append_or_update_last(gapblknum, lsn, zeropv);
// We already had an entry for this LSN. That's odd..
if old.is_some() {
warn!(
"Page version of rel {} blk {} at {} already exists",
self.seg.rel, blknum, lsn
);
}
}
inner.segsizes.append_or_update_last(lsn, newsize).unwrap();
return newsize - oldsize;
}
}
0
}
/// Remember that the relation was truncated at given LSN
pub fn put_truncation(&self, lsn: Lsn, segsize: u32) {
assert!(
self.seg.rel.is_blocky(),
"put_truncation() called on a non-blocky rel"
);
let mut inner = self.inner.write().unwrap();
inner.assert_writeable();
// check that this we truncate to a smaller size than segment was before the truncation
let oldsize = inner.get_seg_size(lsn);
assert!(segsize < oldsize);
let old = inner.segsizes.append_or_update_last(lsn, segsize).unwrap();
if old.is_some() {
// We already had an entry for this LSN. That's odd..
warn!("Inserting truncation, but had an entry for the LSN already");
}
}
/// Remember that the segment was dropped at given LSN
pub fn drop_segment(&self, lsn: Lsn) {
let mut inner = self.inner.write().unwrap();
assert!(inner.end_lsn.is_none());
assert!(!inner.dropped);
inner.dropped = true;
assert!(self.start_lsn < lsn);
inner.end_lsn = Some(lsn);
trace!("dropped segment {} at {}", self.seg, lsn);
}
///
/// Initialize a new InMemoryLayer for, by copying the state at the given
/// point in time from given existing layer.
///
pub fn create_successor_layer(
conf: &'static PageServerConf,
src: Arc<dyn Layer>,
timelineid: ZTimelineId,
tenantid: ZTenantId,
start_lsn: Lsn,
oldest_pending_lsn: Lsn,
) -> Result<InMemoryLayer> {
let seg = src.get_seg_tag();
assert!(oldest_pending_lsn.is_aligned());
assert!(oldest_pending_lsn >= start_lsn);
trace!(
"initializing new InMemoryLayer for writing {} on timeline {} at {}",
seg,
timelineid,
start_lsn,
);
// Copy the segment size at the start LSN from the predecessor layer.
let mut segsizes = VecMap::default();
if seg.rel.is_blocky() {
let size = src.get_seg_size(start_lsn)?;
segsizes.append(start_lsn, size).unwrap();
}
Ok(InMemoryLayer {
conf,
timelineid,
tenantid,
seg,
start_lsn,
oldest_pending_lsn,
incremental: true,
inner: RwLock::new(InMemoryLayerInner {
end_lsn: None,
dropped: false,
page_versions: PageVersions::default(),
segsizes,
}),
})
}
pub fn is_writeable(&self) -> bool {
let inner = self.inner.read().unwrap();
inner.end_lsn.is_none()
}
/// Make the layer non-writeable. Only call once.
/// Records the end_lsn for non-dropped layers.
/// `end_lsn` is inclusive
pub fn freeze(&self, end_lsn: Lsn) {
let mut inner = self.inner.write().unwrap();
if inner.end_lsn.is_some() {
assert!(inner.dropped);
} else {
assert!(!inner.dropped);
assert!(self.start_lsn < end_lsn + 1);
inner.end_lsn = Some(Lsn(end_lsn.0 + 1));
if let Some((lsn, _)) = inner.segsizes.as_slice().last() {
assert!(lsn <= &end_lsn, "{:?} {:?}", lsn, end_lsn);
}
for (_blk, lsn, _pv) in inner.page_versions.ordered_page_version_iter(None) {
assert!(lsn <= end_lsn);
}
}
}
/// Write the this frozen in-memory layer to disk.
///
/// Returns new layers that replace this one.
/// If not dropped, returns a new image layer containing the page versions
/// at the `end_lsn`. Can also return a DeltaLayer that includes all the
/// WAL records between start and end LSN. (The delta layer is not needed
/// when a new relish is created with a single LSN, so that the start and
/// end LSN are the same.)
pub fn write_to_disk(&self, timeline: &LayeredTimeline) -> Result<LayersOnDisk> {
trace!(
"write_to_disk {} get_end_lsn is {}",
self.filename().display(),
self.get_end_lsn()
);
// Grab the lock in read-mode. We hold it over the I/O, but because this
// layer is not writeable anymore, no one should be trying to acquire the
// write lock on it, so we shouldn't block anyone. There's one exception
// though: another thread might have grabbed a reference to this layer
// in `get_layer_for_write' just before the checkpointer called
// `freeze`, and then `write_to_disk` on it. When the thread gets the
// lock, it will see that it's not writeable anymore and retry, but it
// would have to wait until we release it. That race condition is very
// rare though, so we just accept the potential latency hit for now.
let inner = self.inner.read().unwrap();
let end_lsn_exclusive = inner.end_lsn.unwrap();
if inner.dropped {
let delta_layer = DeltaLayer::create(
self.conf,
self.timelineid,
self.tenantid,
self.seg,
self.start_lsn,
end_lsn_exclusive,
true,
inner.page_versions.ordered_page_version_iter(None),
inner.segsizes.clone(),
)?;
trace!(
"freeze: created delta layer for dropped segment {} {}-{}",
self.seg,
self.start_lsn,
end_lsn_exclusive
);
return Ok(LayersOnDisk {
delta_layers: vec![delta_layer],
image_layers: Vec::new(),
});
}
// Since `end_lsn` is inclusive, subtract 1.
// We want to make an ImageLayer for the last included LSN,
// so the DeltaLayer should exlcude that LSN.
let end_lsn_inclusive = Lsn(end_lsn_exclusive.0 - 1);
let mut page_versions = inner
.page_versions
.ordered_page_version_iter(Some(end_lsn_inclusive));
let mut delta_layers = Vec::new();
if self.start_lsn != end_lsn_inclusive {
let (segsizes, _) = inner.segsizes.split_at(&end_lsn_exclusive);
// Write the page versions before the cutoff to disk.
let delta_layer = DeltaLayer::create(
self.conf,
self.timelineid,
self.tenantid,
self.seg,
self.start_lsn,
end_lsn_inclusive,
false,
page_versions,
segsizes,
)?;
delta_layers.push(delta_layer);
trace!(
"freeze: created delta layer {} {}-{}",
self.seg,
self.start_lsn,
end_lsn_inclusive
);
} else {
assert!(page_versions.next().is_none());
}
drop(inner);
// Write a new base image layer at the cutoff point
let image_layer =
ImageLayer::create_from_src(self.conf, timeline, self, end_lsn_inclusive)?;
trace!(
"freeze: created image layer {} at {}",
self.seg,
end_lsn_inclusive
);
Ok(LayersOnDisk {
delta_layers,
image_layers: vec![image_layer],
})
}
}

View File

@@ -1,468 +0,0 @@
///
/// IntervalTree is data structure for holding intervals. It is generic
/// to make unit testing possible, but the only real user of it is the layer map,
///
/// It's inspired by the "segment tree" or a "statistic tree" as described in
/// https://en.wikipedia.org/wiki/Segment_tree. However, we use a B-tree to hold
/// the points instead of a binary tree. This is called an "interval tree" instead
/// of "segment tree" because the term "segment" is already using Zenith to mean
/// something else. To add to the confusion, there is another data structure known
/// as "interval tree" out there (see https://en.wikipedia.org/wiki/Interval_tree),
/// for storing intervals, but this isn't that.
///
/// The basic idea is to have a B-tree of "interesting Points". At each Point,
/// there is a list of intervals that contain the point. The Points are formed
/// from the start bounds of each interval; there is a Point for each distinct
/// start bound.
///
/// Operations:
///
/// To find intervals that contain a given point, you search the b-tree to find
/// the nearest Point <= search key. Then you just return the list of intervals.
///
/// To insert an interval, find the Point with start key equal to the inserted item.
/// If the Point doesn't exist yet, create it, by copying all the items from the
/// previous Point that cover the new Point. Then walk right, inserting the new
/// interval to all the Points that are contained by the new interval (including the
/// newly created Point).
///
/// To remove an interval, you scan the tree for all the Points that are contained by
/// the removed interval, and remove it from the list in each Point.
///
/// Requirements and assumptions:
///
/// - Can store overlapping items
/// - But there are not many overlapping items
/// - The interval bounds don't change after it is added to the tree
/// - Intervals are uniquely identified by pointer equality. You must not be insert the
/// same interval object twice, and `remove` uses pointer equality to remove the right
/// interval. It is OK to have two intervals with the same bounds, however.
///
use std::collections::BTreeMap;
use std::fmt::Debug;
use std::ops::Range;
use std::sync::Arc;
pub struct IntervalTree<I: ?Sized>
where
I: IntervalItem,
{
points: BTreeMap<I::Key, Point<I>>,
}
struct Point<I: ?Sized> {
/// All intervals that contain this point, in no particular order.
///
/// We assume that there aren't a lot of overlappingg intervals, so that this vector
/// never grows very large. If that assumption doesn't hold, we could keep this ordered
/// by the end bound, to speed up `search`. But as long as there are only a few elements,
/// a linear search is OK.
elements: Vec<Arc<I>>,
}
/// Abstraction for an interval that can be stored in the tree
///
/// The start bound is inclusive and the end bound is exclusive. End must be greater
/// than start.
pub trait IntervalItem {
type Key: Ord + Copy + Debug + Sized;
fn start_key(&self) -> Self::Key;
fn end_key(&self) -> Self::Key;
fn bounds(&self) -> Range<Self::Key> {
self.start_key()..self.end_key()
}
}
impl<I: ?Sized> IntervalTree<I>
where
I: IntervalItem,
{
/// Return an element that contains 'key', or precedes it.
///
/// If there are multiple candidates, returns the one with the highest 'end' key.
pub fn search(&self, key: I::Key) -> Option<Arc<I>> {
// Find the greatest point that precedes or is equal to the search key. If there is
// none, returns None.
let (_, p) = self.points.range(..=key).next_back()?;
// Find the element with the highest end key at this point
let highest_item = p
.elements
.iter()
.reduce(|a, b| {
// starting with Rust 1.53, could use `std::cmp::min_by_key` here
if a.end_key() > b.end_key() {
a
} else {
b
}
})
.unwrap();
Some(Arc::clone(highest_item))
}
/// Iterate over all items with start bound >= 'key'
pub fn iter_newer(&self, key: I::Key) -> IntervalIter<I> {
IntervalIter {
point_iter: self.points.range(key..),
elem_iter: None,
}
}
/// Iterate over all items
pub fn iter(&self) -> IntervalIter<I> {
IntervalIter {
point_iter: self.points.range(..),
elem_iter: None,
}
}
pub fn insert(&mut self, item: Arc<I>) {
let start_key = item.start_key();
let end_key = item.end_key();
assert!(start_key < end_key);
let bounds = start_key..end_key;
// Find the starting point and walk forward from there
let mut found_start_point = false;
let iter = self.points.range_mut(bounds);
for (point_key, point) in iter {
if *point_key == start_key {
found_start_point = true;
// It is an error to insert the same item to the tree twice.
assert!(
!point.elements.iter().any(|x| Arc::ptr_eq(x, &item)),
"interval is already in the tree"
);
}
point.elements.push(Arc::clone(&item));
}
if !found_start_point {
// Create a new Point for the starting point
// Look at the previous point, and copy over elements that overlap with this
// new point
let mut new_elements: Vec<Arc<I>> = Vec::new();
if let Some((_, prev_point)) = self.points.range(..start_key).next_back() {
let overlapping_prev_elements = prev_point
.elements
.iter()
.filter(|x| x.bounds().contains(&start_key))
.cloned();
new_elements.extend(overlapping_prev_elements);
}
new_elements.push(item);
let new_point = Point {
elements: new_elements,
};
self.points.insert(start_key, new_point);
}
}
pub fn remove(&mut self, item: &Arc<I>) {
// range search points
let start_key = item.start_key();
let end_key = item.end_key();
let bounds = start_key..end_key;
let mut points_to_remove: Vec<I::Key> = Vec::new();
let mut found_start_point = false;
for (point_key, point) in self.points.range_mut(bounds) {
if *point_key == start_key {
found_start_point = true;
}
let len_before = point.elements.len();
point.elements.retain(|other| !Arc::ptr_eq(other, item));
let len_after = point.elements.len();
assert_eq!(len_after + 1, len_before);
if len_after == 0 {
points_to_remove.push(*point_key);
}
}
assert!(found_start_point);
for k in points_to_remove {
self.points.remove(&k).unwrap();
}
}
}
pub struct IntervalIter<'a, I: ?Sized>
where
I: IntervalItem,
{
point_iter: std::collections::btree_map::Range<'a, I::Key, Point<I>>,
elem_iter: Option<(I::Key, std::slice::Iter<'a, Arc<I>>)>,
}
impl<'a, I> Iterator for IntervalIter<'a, I>
where
I: IntervalItem + ?Sized,
{
type Item = Arc<I>;
fn next(&mut self) -> Option<Self::Item> {
// Iterate over all elements in all the points in 'point_iter'. To avoid
// returning the same element twice, we only return each element at its
// starting point.
loop {
// Return next remaining element from the current point
if let Some((point_key, elem_iter)) = &mut self.elem_iter {
for elem in elem_iter {
if elem.start_key() == *point_key {
return Some(Arc::clone(elem));
}
}
}
// No more elements at this point. Move to next point.
if let Some((point_key, point)) = self.point_iter.next() {
self.elem_iter = Some((*point_key, point.elements.iter()));
continue;
} else {
// No more points, all done
return None;
}
}
}
}
impl<I: ?Sized> Default for IntervalTree<I>
where
I: IntervalItem,
{
fn default() -> Self {
IntervalTree {
points: BTreeMap::new(),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::fmt;
#[derive(Debug)]
struct MockItem {
start_key: u32,
end_key: u32,
val: String,
}
impl IntervalItem for MockItem {
type Key = u32;
fn start_key(&self) -> u32 {
self.start_key
}
fn end_key(&self) -> u32 {
self.end_key
}
}
impl MockItem {
fn new(start_key: u32, end_key: u32) -> Self {
MockItem {
start_key,
end_key,
val: format!("{}-{}", start_key, end_key),
}
}
fn new_str(start_key: u32, end_key: u32, val: &str) -> Self {
MockItem {
start_key,
end_key,
val: format!("{}-{}: {}", start_key, end_key, val),
}
}
}
impl fmt::Display for MockItem {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.val)
}
}
#[rustfmt::skip]
fn assert_search(
tree: &IntervalTree<MockItem>,
key: u32,
expected: &[&str],
) -> Option<Arc<MockItem>> {
if let Some(v) = tree.search(key) {
let vstr = v.to_string();
assert!(!expected.is_empty(), "search with {} returned {}, expected None", key, v);
assert!(
expected.contains(&vstr.as_str()),
"search with {} returned {}, expected one of: {:?}",
key, v, expected,
);
Some(v)
} else {
assert!(
expected.is_empty(),
"search with {} returned None, expected one of {:?}",
key, expected
);
None
}
}
fn assert_contents(tree: &IntervalTree<MockItem>, expected: &[&str]) {
let mut contents: Vec<String> = tree.iter().map(|e| e.to_string()).collect();
contents.sort();
assert_eq!(contents, expected);
}
fn dump_tree(tree: &IntervalTree<MockItem>) {
for (point_key, point) in tree.points.iter() {
print!("{}:", point_key);
for e in point.elements.iter() {
print!(" {}", e);
}
println!();
}
}
#[test]
fn test_interval_tree_simple() {
let mut tree: IntervalTree<MockItem> = IntervalTree::default();
// Simple, non-overlapping ranges.
tree.insert(Arc::new(MockItem::new(10, 11)));
tree.insert(Arc::new(MockItem::new(11, 12)));
tree.insert(Arc::new(MockItem::new(12, 13)));
tree.insert(Arc::new(MockItem::new(18, 19)));
tree.insert(Arc::new(MockItem::new(17, 18)));
tree.insert(Arc::new(MockItem::new(15, 16)));
assert_search(&tree, 9, &[]);
assert_search(&tree, 10, &["10-11"]);
assert_search(&tree, 11, &["11-12"]);
assert_search(&tree, 12, &["12-13"]);
assert_search(&tree, 13, &["12-13"]);
assert_search(&tree, 14, &["12-13"]);
assert_search(&tree, 15, &["15-16"]);
assert_search(&tree, 16, &["15-16"]);
assert_search(&tree, 17, &["17-18"]);
assert_search(&tree, 18, &["18-19"]);
assert_search(&tree, 19, &["18-19"]);
assert_search(&tree, 20, &["18-19"]);
// remove a few entries and search around them again
tree.remove(&assert_search(&tree, 10, &["10-11"]).unwrap()); // first entry
tree.remove(&assert_search(&tree, 12, &["12-13"]).unwrap()); // entry in the middle
tree.remove(&assert_search(&tree, 18, &["18-19"]).unwrap()); // last entry
assert_search(&tree, 9, &[]);
assert_search(&tree, 10, &[]);
assert_search(&tree, 11, &["11-12"]);
assert_search(&tree, 12, &["11-12"]);
assert_search(&tree, 14, &["11-12"]);
assert_search(&tree, 15, &["15-16"]);
assert_search(&tree, 17, &["17-18"]);
assert_search(&tree, 18, &["17-18"]);
}
#[test]
fn test_interval_tree_overlap() {
let mut tree: IntervalTree<MockItem> = IntervalTree::default();
// Overlapping items
tree.insert(Arc::new(MockItem::new(22, 24)));
tree.insert(Arc::new(MockItem::new(23, 25)));
let x24_26 = Arc::new(MockItem::new(24, 26));
tree.insert(Arc::clone(&x24_26));
let x26_28 = Arc::new(MockItem::new(26, 28));
tree.insert(Arc::clone(&x26_28));
tree.insert(Arc::new(MockItem::new(25, 27)));
assert_search(&tree, 22, &["22-24"]);
assert_search(&tree, 23, &["22-24", "23-25"]);
assert_search(&tree, 24, &["23-25", "24-26"]);
assert_search(&tree, 25, &["24-26", "25-27"]);
assert_search(&tree, 26, &["25-27", "26-28"]);
assert_search(&tree, 27, &["26-28"]);
assert_search(&tree, 28, &["26-28"]);
assert_search(&tree, 29, &["26-28"]);
tree.remove(&x24_26);
tree.remove(&x26_28);
assert_search(&tree, 23, &["22-24", "23-25"]);
assert_search(&tree, 24, &["23-25"]);
assert_search(&tree, 25, &["25-27"]);
assert_search(&tree, 26, &["25-27"]);
assert_search(&tree, 27, &["25-27"]);
assert_search(&tree, 28, &["25-27"]);
assert_search(&tree, 29, &["25-27"]);
}
#[test]
fn test_interval_tree_nested() {
let mut tree: IntervalTree<MockItem> = IntervalTree::default();
// Items containing other items
tree.insert(Arc::new(MockItem::new(31, 39)));
tree.insert(Arc::new(MockItem::new(32, 34)));
tree.insert(Arc::new(MockItem::new(33, 35)));
tree.insert(Arc::new(MockItem::new(30, 40)));
assert_search(&tree, 30, &["30-40"]);
assert_search(&tree, 31, &["30-40", "31-39"]);
assert_search(&tree, 32, &["30-40", "32-34", "31-39"]);
assert_search(&tree, 33, &["30-40", "32-34", "33-35", "31-39"]);
assert_search(&tree, 34, &["30-40", "33-35", "31-39"]);
assert_search(&tree, 35, &["30-40", "31-39"]);
assert_search(&tree, 36, &["30-40", "31-39"]);
assert_search(&tree, 37, &["30-40", "31-39"]);
assert_search(&tree, 38, &["30-40", "31-39"]);
assert_search(&tree, 39, &["30-40"]);
assert_search(&tree, 40, &["30-40"]);
assert_search(&tree, 41, &["30-40"]);
}
#[test]
fn test_interval_tree_duplicates() {
let mut tree: IntervalTree<MockItem> = IntervalTree::default();
// Duplicate keys
let item_a = Arc::new(MockItem::new_str(55, 56, "a"));
tree.insert(Arc::clone(&item_a));
let item_b = Arc::new(MockItem::new_str(55, 56, "b"));
tree.insert(Arc::clone(&item_b));
let item_c = Arc::new(MockItem::new_str(55, 56, "c"));
tree.insert(Arc::clone(&item_c));
let item_d = Arc::new(MockItem::new_str(54, 56, "d"));
tree.insert(Arc::clone(&item_d));
let item_e = Arc::new(MockItem::new_str(55, 57, "e"));
tree.insert(Arc::clone(&item_e));
dump_tree(&tree);
assert_search(
&tree,
55,
&["55-56: a", "55-56: b", "55-56: c", "54-56: d", "55-57: e"],
);
tree.remove(&item_b);
dump_tree(&tree);
assert_contents(&tree, &["54-56: d", "55-56: a", "55-56: c", "55-57: e"]);
tree.remove(&item_d);
dump_tree(&tree);
assert_contents(&tree, &["55-56: a", "55-56: c", "55-57: e"]);
}
#[test]
#[should_panic]
fn test_interval_tree_insert_twice() {
let mut tree: IntervalTree<MockItem> = IntervalTree::default();
// Inserting the same item twice is not cool
let item = Arc::new(MockItem::new(1, 2));
tree.insert(Arc::clone(&item));
tree.insert(Arc::clone(&item)); // fails assertion
}
}

View File

@@ -1,444 +0,0 @@
//!
//! The layer map tracks what layers exist for all the relishes in a timeline.
//!
//! When the timeline is first accessed, the server lists of all layer files
//! in the timelines/<timelineid> directory, and populates this map with
//! ImageLayer and DeltaLayer structs corresponding to each file. When new WAL
//! is received, we create InMemoryLayers to hold the incoming records. Now and
//! then, in the checkpoint() function, the in-memory layers are frozen, forming
//! new image and delta layers and corresponding files are written to disk.
//!
use crate::layered_repository::interval_tree::{IntervalItem, IntervalIter, IntervalTree};
use crate::layered_repository::storage_layer::{Layer, SegmentTag};
use crate::layered_repository::InMemoryLayer;
use crate::relish::*;
use anyhow::Result;
use lazy_static::lazy_static;
use std::cmp::Ordering;
use std::collections::{BinaryHeap, HashMap};
use std::sync::Arc;
use zenith_metrics::{register_int_gauge, IntGauge};
use zenith_utils::lsn::Lsn;
lazy_static! {
static ref NUM_INMEMORY_LAYERS: IntGauge =
register_int_gauge!("pageserver_inmemory_layers", "Number of layers in memory")
.expect("failed to define a metric");
static ref NUM_ONDISK_LAYERS: IntGauge =
register_int_gauge!("pageserver_ondisk_layers", "Number of layers on-disk")
.expect("failed to define a metric");
}
///
/// LayerMap tracks what layers exist on a timeline.
///
#[derive(Default)]
pub struct LayerMap {
/// All the layers keyed by segment tag
segs: HashMap<SegmentTag, SegEntry>,
/// All in-memory layers, ordered by 'oldest_pending_lsn' and generation
/// of each layer. This allows easy access to the in-memory layer that
/// contains the oldest WAL record.
open_layers: BinaryHeap<OpenLayerEntry>,
/// Generation number, used to distinguish newly inserted entries in the
/// binary heap from older entries during checkpoint.
current_generation: u64,
}
impl LayerMap {
///
/// Look up a layer using the given segment tag and LSN. This differs from a
/// plain key-value lookup in that if there is any layer that covers the
/// given LSN, or precedes the given LSN, it is returned. In other words,
/// you don't need to know the exact start LSN of the layer.
///
pub fn get(&self, tag: &SegmentTag, lsn: Lsn) -> Option<Arc<dyn Layer>> {
let segentry = self.segs.get(tag)?;
segentry.get(lsn)
}
///
/// Get the open layer for given segment for writing. Or None if no open
/// layer exists.
///
pub fn get_open(&self, tag: &SegmentTag) -> Option<Arc<InMemoryLayer>> {
let segentry = self.segs.get(tag)?;
segentry.open.as_ref().map(Arc::clone)
}
///
/// Insert an open in-memory layer
///
pub fn insert_open(&mut self, layer: Arc<InMemoryLayer>) {
let segentry = self.segs.entry(layer.get_seg_tag()).or_default();
segentry.update_open(Arc::clone(&layer));
let oldest_pending_lsn = layer.get_oldest_pending_lsn();
// After a crash and restart, 'oldest_pending_lsn' of the oldest in-memory
// layer becomes the WAL streaming starting point, so it better not point
// in the middle of a WAL record.
assert!(oldest_pending_lsn.is_aligned());
// Also add it to the binary heap
let open_layer_entry = OpenLayerEntry {
oldest_pending_lsn: layer.get_oldest_pending_lsn(),
layer,
generation: self.current_generation,
};
self.open_layers.push(open_layer_entry);
NUM_INMEMORY_LAYERS.inc();
}
/// Remove the oldest in-memory layer
pub fn pop_oldest_open(&mut self) {
// Pop it from the binary heap
let oldest_entry = self.open_layers.pop().unwrap();
let segtag = oldest_entry.layer.get_seg_tag();
// Also remove it from the SegEntry of this segment
let mut segentry = self.segs.get_mut(&segtag).unwrap();
if Arc::ptr_eq(segentry.open.as_ref().unwrap(), &oldest_entry.layer) {
segentry.open = None;
} else {
// We could have already updated segentry.open for
// dropped (non-writeable) layer. This is fine.
assert!(!oldest_entry.layer.is_writeable());
assert!(oldest_entry.layer.is_dropped());
}
NUM_INMEMORY_LAYERS.dec();
}
///
/// Insert an on-disk layer
///
pub fn insert_historic(&mut self, layer: Arc<dyn Layer>) {
let segentry = self.segs.entry(layer.get_seg_tag()).or_default();
segentry.insert_historic(layer);
NUM_ONDISK_LAYERS.inc();
}
///
/// Remove an on-disk layer from the map.
///
/// This should be called when the corresponding file on disk has been deleted.
///
pub fn remove_historic(&mut self, layer: Arc<dyn Layer>) {
let tag = layer.get_seg_tag();
if let Some(segentry) = self.segs.get_mut(&tag) {
segentry.historic.remove(&layer);
}
NUM_ONDISK_LAYERS.dec();
}
// List relations along with a flag that marks if they exist at the given lsn.
// spcnode 0 and dbnode 0 have special meanings and mean all tabespaces/databases.
// Pass Tag if we're only interested in some relations.
pub fn list_relishes(&self, tag: Option<RelTag>, lsn: Lsn) -> Result<HashMap<RelishTag, bool>> {
let mut rels: HashMap<RelishTag, bool> = HashMap::new();
for (seg, segentry) in self.segs.iter() {
match seg.rel {
RelishTag::Relation(reltag) => {
if let Some(request_rel) = tag {
if (request_rel.spcnode == 0 || reltag.spcnode == request_rel.spcnode)
&& (request_rel.dbnode == 0 || reltag.dbnode == request_rel.dbnode)
{
if let Some(exists) = segentry.exists_at_lsn(lsn)? {
rels.insert(seg.rel, exists);
}
}
}
}
_ => {
if tag == None {
if let Some(exists) = segentry.exists_at_lsn(lsn)? {
rels.insert(seg.rel, exists);
}
}
}
}
}
Ok(rels)
}
/// Is there a newer image layer for given segment?
///
/// This is used for garbage collection, to determine if an old layer can
/// be deleted.
pub fn newer_image_layer_exists(&self, seg: SegmentTag, lsn: Lsn) -> bool {
if let Some(segentry) = self.segs.get(&seg) {
segentry.newer_image_layer_exists(lsn)
} else {
false
}
}
/// Is there any layer for given segment that is alive at the lsn?
///
/// This is a public wrapper for SegEntry fucntion,
/// used for garbage collection, to determine if some alive layer
/// exists at the lsn. If so, we shouldn't delete a newer dropped layer
/// to avoid incorrectly making it visible.
pub fn layer_exists_at_lsn(&self, seg: SegmentTag, lsn: Lsn) -> Result<bool> {
Ok(if let Some(segentry) = self.segs.get(&seg) {
segentry.exists_at_lsn(lsn)?.unwrap_or(false)
} else {
false
})
}
/// Return the oldest in-memory layer, along with its generation number.
pub fn peek_oldest_open(&self) -> Option<(Arc<InMemoryLayer>, u64)> {
self.open_layers
.peek()
.map(|oldest_entry| (Arc::clone(&oldest_entry.layer), oldest_entry.generation))
}
/// Increment the generation number used to stamp open in-memory layers. Layers
/// added with `insert_open` after this call will be associated with the new
/// generation. Returns the new generation number.
pub fn increment_generation(&mut self) -> u64 {
self.current_generation += 1;
self.current_generation
}
pub fn iter_historic_layers(&self) -> HistoricLayerIter {
HistoricLayerIter {
seg_iter: self.segs.iter(),
iter: None,
}
}
/// debugging function to print out the contents of the layer map
#[allow(unused)]
pub fn dump(&self) -> Result<()> {
println!("Begin dump LayerMap");
for (seg, segentry) in self.segs.iter() {
if let Some(open) = &segentry.open {
open.dump()?;
}
for layer in segentry.historic.iter() {
layer.dump()?;
}
}
println!("End dump LayerMap");
Ok(())
}
}
impl IntervalItem for dyn Layer {
type Key = Lsn;
fn start_key(&self) -> Lsn {
self.get_start_lsn()
}
fn end_key(&self) -> Lsn {
self.get_end_lsn()
}
}
///
/// Per-segment entry in the LayerMap::segs hash map. Holds all the layers
/// associated with the segment.
///
/// The last layer that is open for writes is always an InMemoryLayer,
/// and is kept in a separate field, because there can be only one for
/// each segment. The older layers, stored on disk, are kept in an
/// IntervalTree.
#[derive(Default)]
struct SegEntry {
open: Option<Arc<InMemoryLayer>>,
historic: IntervalTree<dyn Layer>,
}
impl SegEntry {
/// Does the segment exist at given LSN?
/// Return None if object is not found in this SegEntry.
fn exists_at_lsn(&self, lsn: Lsn) -> Result<Option<bool>> {
if let Some(layer) = self.get(lsn) {
Ok(Some(layer.get_seg_exists(lsn)?))
} else {
Ok(None)
}
}
pub fn get(&self, lsn: Lsn) -> Option<Arc<dyn Layer>> {
if let Some(open) = &self.open {
if open.get_start_lsn() <= lsn {
let x: Arc<dyn Layer> = Arc::clone(open) as _;
return Some(x);
}
}
self.historic.search(lsn)
}
pub fn newer_image_layer_exists(&self, lsn: Lsn) -> bool {
// We only check on-disk layers, because
// in-memory layers are not durable
self.historic
.iter_newer(lsn)
.any(|layer| !layer.is_incremental())
}
// Set new open layer for a SegEntry.
// It's ok to rewrite previous open layer,
// but only if it is not writeable anymore.
pub fn update_open(&mut self, layer: Arc<InMemoryLayer>) {
if let Some(prev_open) = &self.open {
assert!(!prev_open.is_writeable());
}
self.open = Some(layer);
}
pub fn insert_historic(&mut self, layer: Arc<dyn Layer>) {
self.historic.insert(layer);
}
}
/// Entry held in LayerMap::open_layers, with boilerplate comparison routines
/// to implement a min-heap ordered by 'oldest_pending_lsn' and 'generation'
///
/// The generation number associated with each entry can be used to distinguish
/// recently-added entries (i.e after last call to increment_generation()) from older
/// entries with the same 'oldest_pending_lsn'.
struct OpenLayerEntry {
pub oldest_pending_lsn: Lsn, // copy of layer.get_oldest_pending_lsn()
pub generation: u64,
pub layer: Arc<InMemoryLayer>,
}
impl Ord for OpenLayerEntry {
fn cmp(&self, other: &Self) -> Ordering {
// BinaryHeap is a max-heap, and we want a min-heap. Reverse the ordering here
// to get that. Entries with identical oldest_pending_lsn are ordered by generation
other
.oldest_pending_lsn
.cmp(&self.oldest_pending_lsn)
.then_with(|| other.generation.cmp(&self.generation))
}
}
impl PartialOrd for OpenLayerEntry {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl PartialEq for OpenLayerEntry {
fn eq(&self, other: &Self) -> bool {
self.cmp(other) == Ordering::Equal
}
}
impl Eq for OpenLayerEntry {}
/// Iterator returned by LayerMap::iter_historic_layers()
pub struct HistoricLayerIter<'a> {
seg_iter: std::collections::hash_map::Iter<'a, SegmentTag, SegEntry>,
iter: Option<IntervalIter<'a, dyn Layer>>,
}
impl<'a> Iterator for HistoricLayerIter<'a> {
type Item = Arc<dyn Layer>;
fn next(&mut self) -> std::option::Option<<Self as std::iter::Iterator>::Item> {
loop {
if let Some(x) = &mut self.iter {
if let Some(x) = x.next() {
return Some(Arc::clone(&x));
}
}
if let Some((_tag, segentry)) = self.seg_iter.next() {
self.iter = Some(segentry.historic.iter());
continue;
} else {
return None;
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::PageServerConf;
use std::str::FromStr;
use zenith_utils::zid::{ZTenantId, ZTimelineId};
/// Arbitrary relation tag, for testing.
const TESTREL_A: RelishTag = RelishTag::Relation(RelTag {
spcnode: 0,
dbnode: 111,
relnode: 1000,
forknum: 0,
});
/// Construct a dummy InMemoryLayer for testing
fn dummy_inmem_layer(
conf: &'static PageServerConf,
segno: u32,
start_lsn: Lsn,
oldest_pending_lsn: Lsn,
) -> Arc<InMemoryLayer> {
Arc::new(
InMemoryLayer::create(
conf,
ZTimelineId::from_str("00000000000000000000000000000000").unwrap(),
ZTenantId::from_str("00000000000000000000000000000000").unwrap(),
SegmentTag {
rel: TESTREL_A,
segno,
},
start_lsn,
oldest_pending_lsn,
)
.unwrap(),
)
}
#[test]
fn test_open_layers() -> Result<()> {
let conf = PageServerConf::dummy_conf(PageServerConf::test_repo_dir("dummy_inmem_layer"));
let conf = Box::leak(Box::new(conf));
let mut layers = LayerMap::default();
let gen1 = layers.increment_generation();
layers.insert_open(dummy_inmem_layer(conf, 0, Lsn(0x100), Lsn(0x100)));
layers.insert_open(dummy_inmem_layer(conf, 1, Lsn(0x100), Lsn(0x200)));
layers.insert_open(dummy_inmem_layer(conf, 2, Lsn(0x100), Lsn(0x120)));
layers.insert_open(dummy_inmem_layer(conf, 3, Lsn(0x100), Lsn(0x110)));
let gen2 = layers.increment_generation();
layers.insert_open(dummy_inmem_layer(conf, 4, Lsn(0x100), Lsn(0x110)));
layers.insert_open(dummy_inmem_layer(conf, 5, Lsn(0x100), Lsn(0x100)));
// A helper function (closure) to pop the next oldest open entry from the layer map,
// and assert that it is what we'd expect
let mut assert_pop_layer = |expected_segno: u32, expected_generation: u64| {
let (l, generation) = layers.peek_oldest_open().unwrap();
assert!(l.get_seg_tag().segno == expected_segno);
assert!(generation == expected_generation);
layers.pop_oldest_open();
};
assert_pop_layer(0, gen1); // 0x100
assert_pop_layer(5, gen2); // 0x100
assert_pop_layer(3, gen1); // 0x110
assert_pop_layer(4, gen2); // 0x110
assert_pop_layer(2, gen1); // 0x120
assert_pop_layer(1, gen1); // 0x200
Ok(())
}
}

View File

@@ -1,150 +0,0 @@
use std::{collections::HashMap, ops::RangeBounds, slice};
use zenith_utils::{lsn::Lsn, vec_map::VecMap};
use super::storage_layer::PageVersion;
const EMPTY_SLICE: &[(Lsn, PageVersion)] = &[];
#[derive(Debug, Default)]
pub struct PageVersions(HashMap<u32, VecMap<Lsn, PageVersion>>);
impl PageVersions {
pub fn append_or_update_last(
&mut self,
blknum: u32,
lsn: Lsn,
page_version: PageVersion,
) -> Option<PageVersion> {
let map = self.0.entry(blknum).or_insert_with(VecMap::default);
map.append_or_update_last(lsn, page_version).unwrap()
}
/// Get all [`PageVersion`]s in a block
pub fn get_block_slice(&self, blknum: u32) -> &[(Lsn, PageVersion)] {
self.0
.get(&blknum)
.map(VecMap::as_slice)
.unwrap_or(EMPTY_SLICE)
}
/// Get a range of [`PageVersions`] in a block
pub fn get_block_lsn_range<R: RangeBounds<Lsn>>(
&self,
blknum: u32,
range: R,
) -> &[(Lsn, PageVersion)] {
self.0
.get(&blknum)
.map(|vec_map| vec_map.slice_range(range))
.unwrap_or(EMPTY_SLICE)
}
/// Iterate through [`PageVersion`]s in (block, lsn) order.
/// If a [`cutoff_lsn`] is set, only show versions with `lsn < cutoff_lsn`
pub fn ordered_page_version_iter(&self, cutoff_lsn: Option<Lsn>) -> OrderedPageVersionIter<'_> {
let mut ordered_blocks: Vec<u32> = self.0.keys().cloned().collect();
ordered_blocks.sort_unstable();
let slice = ordered_blocks
.first()
.map(|&blknum| self.get_block_slice(blknum))
.unwrap_or(EMPTY_SLICE);
OrderedPageVersionIter {
page_versions: self,
ordered_blocks,
cur_block_idx: 0,
cutoff_lsn,
cur_slice_iter: slice.iter(),
}
}
}
pub struct OrderedPageVersionIter<'a> {
page_versions: &'a PageVersions,
ordered_blocks: Vec<u32>,
cur_block_idx: usize,
cutoff_lsn: Option<Lsn>,
cur_slice_iter: slice::Iter<'a, (Lsn, PageVersion)>,
}
impl OrderedPageVersionIter<'_> {
fn is_lsn_before_cutoff(&self, lsn: &Lsn) -> bool {
if let Some(cutoff_lsn) = self.cutoff_lsn.as_ref() {
lsn < cutoff_lsn
} else {
true
}
}
}
impl<'a> Iterator for OrderedPageVersionIter<'a> {
type Item = (u32, Lsn, &'a PageVersion);
fn next(&mut self) -> Option<Self::Item> {
loop {
if let Some((lsn, page_version)) = self.cur_slice_iter.next() {
if self.is_lsn_before_cutoff(lsn) {
let blknum = self.ordered_blocks[self.cur_block_idx];
return Some((blknum, *lsn, page_version));
}
}
let next_block_idx = self.cur_block_idx + 1;
let blknum: u32 = *self.ordered_blocks.get(next_block_idx)?;
self.cur_block_idx = next_block_idx;
self.cur_slice_iter = self.page_versions.get_block_slice(blknum).iter();
}
}
}
#[cfg(test)]
mod tests {
use super::*;
const EMPTY_PAGE_VERSION: PageVersion = PageVersion {
page_image: None,
record: None,
};
#[test]
fn test_ordered_iter() {
let mut page_versions = PageVersions::default();
const BLOCKS: u32 = 1000;
const LSNS: u64 = 50;
for blknum in 0..BLOCKS {
for lsn in 0..LSNS {
let old = page_versions.append_or_update_last(blknum, Lsn(lsn), EMPTY_PAGE_VERSION);
assert!(old.is_none());
}
}
let mut iter = page_versions.ordered_page_version_iter(None);
for blknum in 0..BLOCKS {
for lsn in 0..LSNS {
let (actual_blknum, actual_lsn, _pv) = iter.next().unwrap();
assert_eq!(actual_blknum, blknum);
assert_eq!(Lsn(lsn), actual_lsn);
}
}
assert!(iter.next().is_none());
assert!(iter.next().is_none()); // should be robust against excessive next() calls
const CUTOFF_LSN: Lsn = Lsn(30);
let mut iter = page_versions.ordered_page_version_iter(Some(CUTOFF_LSN));
for blknum in 0..BLOCKS {
for lsn in 0..CUTOFF_LSN.0 {
let (actual_blknum, actual_lsn, _pv) = iter.next().unwrap();
assert_eq!(actual_blknum, blknum);
assert_eq!(Lsn(lsn), actual_lsn);
}
}
assert!(iter.next().is_none());
assert!(iter.next().is_none()); // should be robust against excessive next() calls
}
}

View File

@@ -1,174 +0,0 @@
//!
//! Common traits and structs for layers
//!
use crate::relish::RelishTag;
use crate::repository::WALRecord;
use crate::ZTimelineId;
use anyhow::Result;
use bytes::Bytes;
use serde::{Deserialize, Serialize};
use std::fmt;
use std::path::PathBuf;
use zenith_utils::lsn::Lsn;
// Size of one segment in pages (10 MB)
pub const RELISH_SEG_SIZE: u32 = 10 * 1024 * 1024 / 8192;
///
/// Each relish stored in the repository is divided into fixed-sized "segments",
/// with 10 MB of key-space, or 1280 8k pages each.
///
#[derive(Debug, PartialEq, Eq, PartialOrd, Hash, Ord, Clone, Copy, Serialize, Deserialize)]
pub struct SegmentTag {
pub rel: RelishTag,
pub segno: u32,
}
impl fmt::Display for SegmentTag {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}.{}", self.rel, self.segno)
}
}
impl SegmentTag {
pub const fn from_blknum(rel: RelishTag, blknum: u32) -> SegmentTag {
SegmentTag {
rel,
segno: blknum / RELISH_SEG_SIZE,
}
}
pub fn blknum_in_seg(&self, blknum: u32) -> bool {
blknum / RELISH_SEG_SIZE == self.segno
}
}
///
/// Represents a version of a page at a specific LSN. The LSN is the key of the
/// entry in the 'page_versions' hash, it is not duplicated here.
///
/// A page version can be stored as a full page image, or as WAL record that needs
/// to be applied over the previous page version to reconstruct this version.
///
/// It's also possible to have both a WAL record and a page image in the same
/// PageVersion. That happens if page version is originally stored as a WAL record
/// but it is later reconstructed by a GetPage@LSN request by performing WAL
/// redo. The get_page_at_lsn() code will store the reconstructed pag image next to
/// the WAL record in that case. TODO: That's pretty accidental, not the result
/// of any grand design. If we want to keep reconstructed page versions around, we
/// probably should have a separate buffer cache so that we could control the
/// replacement policy globally. Or if we keep a reconstructed page image, we
/// could throw away the WAL record.
///
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PageVersion {
/// an 8kb page image
pub page_image: Option<Bytes>,
/// WAL record to get from previous page version to this one.
pub record: Option<WALRecord>,
}
///
/// Data needed to reconstruct a page version
///
/// 'page_img' is the old base image of the page to start the WAL replay with.
/// It can be None, if the first WAL record initializes the page (will_init)
/// 'records' contains the records to apply over the base image.
///
pub struct PageReconstructData {
pub records: Vec<(Lsn, WALRecord)>,
pub page_img: Option<Bytes>,
}
/// Return value from Layer::get_page_reconstruct_data
pub enum PageReconstructResult {
/// Got all the data needed to reconstruct the requested page
Complete,
/// This layer didn't contain all the required data, the caller should look up
/// the predecessor layer at the returned LSN and collect more data from there.
Continue(Lsn),
/// This layer didn't contain data needed to reconstruct the page version at
/// the returned LSN. This is usually considered an error, but might be OK
/// in some circumstances.
Missing(Lsn),
}
///
/// A Layer corresponds to one RELISH_SEG_SIZE slice of a relish in a range of LSNs.
/// There are two kinds of layers, in-memory and on-disk layers. In-memory
/// layers are used to ingest incoming WAL, and provide fast access
/// to the recent page versions. On-disk layers are stored as files on disk, and
/// are immutable. This trait presents the common functionality of
/// in-memory and on-disk layers.
///
pub trait Layer: Send + Sync {
/// Identify the timeline this relish belongs to
fn get_timeline_id(&self) -> ZTimelineId;
/// Identify the relish segment
fn get_seg_tag(&self) -> SegmentTag;
/// Inclusive start bound of the LSN range that this layer holds
fn get_start_lsn(&self) -> Lsn;
/// Exclusive end bound of the LSN range that this layer holds.
///
/// - For an open in-memory layer, this is MAX_LSN.
/// - For a frozen in-memory layer or a delta layer, this is a valid end bound.
/// - An image layer represents snapshot at one LSN, so end_lsn is always the snapshot LSN + 1
fn get_end_lsn(&self) -> Lsn;
/// Is the segment represented by this layer dropped by PostgreSQL?
fn is_dropped(&self) -> bool;
/// Filename used to store this layer on disk. (Even in-memory layers
/// implement this, to print a handy unique identifier for the layer for
/// log messages, even though they're never not on disk.)
fn filename(&self) -> PathBuf;
///
/// Return data needed to reconstruct given page at LSN.
///
/// It is up to the caller to collect more data from previous layer and
/// perform WAL redo, if necessary.
///
/// Note that the 'blknum' is the offset of the page from the beginning
/// of the *relish*, not the beginning of the segment. The requested
/// 'blknum' must be covered by this segment.
///
/// See PageReconstructResult for possible return values. The collected data
/// is appended to reconstruct_data; the caller should pass an empty struct
/// on first call. If this returns PageReconstructResult::Continue, look up
/// the predecessor layer and call again with the same 'reconstruct_data'
/// to collect more data.
fn get_page_reconstruct_data(
&self,
blknum: u32,
lsn: Lsn,
reconstruct_data: &mut PageReconstructData,
) -> Result<PageReconstructResult>;
/// Return size of the segment at given LSN. (Only for blocky relations.)
fn get_seg_size(&self, lsn: Lsn) -> Result<u32>;
/// Does the segment exist at given LSN? Or was it dropped before it.
fn get_seg_exists(&self, lsn: Lsn) -> Result<bool>;
/// Does this layer only contain some data for the segment (incremental),
/// or does it contain a version of every page? This is important to know
/// for garbage collecting old layers: an incremental layer depends on
/// the previous non-incremental layer.
fn is_incremental(&self) -> bool;
/// Release memory used by this layer. There is no corresponding 'load'
/// function, that's done implicitly when you call one of the get-functions.
fn unload(&self) -> Result<()>;
/// Permanently remove this layer from disk.
fn delete(&self) -> Result<()>;
/// Dump summary of the contents of the layer to stdout
fn dump(&self) -> Result<()>;
}

View File

@@ -1,209 +1,26 @@
use zenith_utils::postgres_backend::AuthType;
use zenith_utils::zid::{ZTenantId, ZTimelineId};
use std::net::SocketAddr;
use std::path::PathBuf;
use std::time::Duration;
use lazy_static::lazy_static;
use zenith_metrics::{register_int_gauge_vec, IntGaugeVec};
pub mod basebackup;
pub mod branches;
pub mod http;
pub mod layered_repository;
pub mod controlfile;
pub mod page_cache;
pub mod page_service;
pub mod relish;
pub mod relish_storage;
pub mod repository;
pub mod restore_local_repo;
pub mod tenant_mgr;
#[allow(dead_code)]
pub mod pg_constants;
pub mod restore_s3;
pub mod tui;
pub mod tui_event;
mod tui_logger;
pub mod waldecoder;
pub mod walreceiver;
pub mod walredo;
pub mod defaults {
use const_format::formatcp;
use std::time::Duration;
pub const DEFAULT_PG_LISTEN_PORT: u16 = 64000;
pub const DEFAULT_PG_LISTEN_ADDR: &str = formatcp!("127.0.0.1:{DEFAULT_PG_LISTEN_PORT}");
pub const DEFAULT_HTTP_LISTEN_PORT: u16 = 9898;
pub const DEFAULT_HTTP_LISTEN_ADDR: &str = formatcp!("127.0.0.1:{DEFAULT_HTTP_LISTEN_PORT}");
// FIXME: This current value is very low. I would imagine something like 1 GB or 10 GB
// would be more appropriate. But a low value forces the code to be exercised more,
// which is good for now to trigger bugs.
pub const DEFAULT_CHECKPOINT_DISTANCE: u64 = 256 * 1024 * 1024;
pub const DEFAULT_CHECKPOINT_PERIOD: Duration = Duration::from_secs(1);
pub const DEFAULT_GC_HORIZON: u64 = 64 * 1024 * 1024;
pub const DEFAULT_GC_PERIOD: Duration = Duration::from_secs(100);
pub const DEFAULT_SUPERUSER: &str = "zenith_admin";
pub const DEFAULT_RELISH_STORAGE_MAX_CONCURRENT_SYNC_LIMITS: usize = 100;
}
lazy_static! {
static ref LIVE_CONNECTIONS_COUNT: IntGaugeVec = register_int_gauge_vec!(
"pageserver_live_connections_count",
"Number of live network connections",
&["pageserver_connection_kind"]
)
.expect("failed to define a metric");
}
pub const LOG_FILE_NAME: &str = "pageserver.log";
#[allow(dead_code)]
#[derive(Debug, Clone)]
pub struct PageServerConf {
pub data_dir: PathBuf,
pub daemonize: bool,
pub listen_pg_addr: String,
pub listen_http_addr: String,
// Flush out an inmemory layer, if it's holding WAL older than this
// This puts a backstop on how much WAL needs to be re-digested if the
// page server crashes.
pub checkpoint_distance: u64,
pub checkpoint_period: Duration,
pub gc_horizon: u64,
pub gc_period: Duration,
pub superuser: String,
// Repository directory, relative to current working directory.
// Normally, the page server changes the current working directory
// to the repository, and 'workdir' is always '.'. But we don't do
// that during unit testing, because the current directory is global
// to the process but different unit tests work on different
// repositories.
pub workdir: PathBuf,
pub pg_distrib_dir: PathBuf,
pub auth_type: AuthType,
pub auth_validation_public_key_path: Option<PathBuf>,
pub relish_storage_config: Option<RelishStorageConfig>,
}
impl PageServerConf {
//
// Repository paths, relative to workdir.
//
fn tenants_path(&self) -> PathBuf {
self.workdir.join("tenants")
}
fn tenant_path(&self, tenantid: &ZTenantId) -> PathBuf {
self.tenants_path().join(tenantid.to_string())
}
fn tags_path(&self, tenantid: &ZTenantId) -> PathBuf {
self.tenant_path(tenantid).join("refs").join("tags")
}
fn tag_path(&self, tag_name: &str, tenantid: &ZTenantId) -> PathBuf {
self.tags_path(tenantid).join(tag_name)
}
fn branches_path(&self, tenantid: &ZTenantId) -> PathBuf {
self.tenant_path(tenantid).join("refs").join("branches")
}
fn branch_path(&self, branch_name: &str, tenantid: &ZTenantId) -> PathBuf {
self.branches_path(tenantid).join(branch_name)
}
fn timelines_path(&self, tenantid: &ZTenantId) -> PathBuf {
self.tenant_path(tenantid).join("timelines")
}
fn timeline_path(&self, timelineid: &ZTimelineId, tenantid: &ZTenantId) -> PathBuf {
self.timelines_path(tenantid).join(timelineid.to_string())
}
fn ancestor_path(&self, timelineid: &ZTimelineId, tenantid: &ZTenantId) -> PathBuf {
self.timeline_path(timelineid, tenantid).join("ancestor")
}
//
// Postgres distribution paths
//
pub fn pg_bin_dir(&self) -> PathBuf {
self.pg_distrib_dir.join("bin")
}
pub fn pg_lib_dir(&self) -> PathBuf {
self.pg_distrib_dir.join("lib")
}
#[cfg(test)]
fn test_repo_dir(test_name: &str) -> PathBuf {
PathBuf::from(format!("../tmp_check/test_{}", test_name))
}
#[cfg(test)]
fn dummy_conf(repo_dir: PathBuf) -> Self {
PageServerConf {
daemonize: false,
checkpoint_distance: defaults::DEFAULT_CHECKPOINT_DISTANCE,
checkpoint_period: Duration::from_secs(10),
gc_horizon: defaults::DEFAULT_GC_HORIZON,
gc_period: Duration::from_secs(10),
listen_pg_addr: defaults::DEFAULT_PG_LISTEN_ADDR.to_string(),
listen_http_addr: defaults::DEFAULT_HTTP_LISTEN_ADDR.to_string(),
superuser: "zenith_admin".to_string(),
workdir: repo_dir,
pg_distrib_dir: "".into(),
auth_type: AuthType::Trust,
auth_validation_public_key_path: None,
relish_storage_config: None,
}
}
}
/// External relish storage configuration, enough for creating a client for that storage.
#[derive(Debug, Clone)]
pub struct RelishStorageConfig {
/// Limits the number of concurrent sync operations between pageserver and relish storage.
pub max_concurrent_sync: usize,
/// The storage connection configuration.
pub storage: RelishStorageKind,
}
/// A kind of a relish storage to connect to, with its connection configuration.
#[derive(Debug, Clone)]
pub enum RelishStorageKind {
/// Storage based on local file system.
/// Specify a root folder to place all stored relish data into.
LocalFs(PathBuf),
/// AWS S3 based storage, storing all relishes into the root
/// of the S3 bucket from the config.
AwsS3(S3Config),
}
/// AWS S3 bucket coordinates and access credentials to manage the bucket contents (read and write).
#[derive(Clone)]
pub struct S3Config {
/// Name of the bucket to connect to.
pub bucket_name: String,
/// The region where the bucket is located at.
pub bucket_region: String,
/// "Login" to use when connecting to bucket.
/// Can be empty for cases like AWS k8s IAM
/// where we can allow certain pods to connect
/// to the bucket directly without any credentials.
pub access_key_id: Option<String>,
/// "Password" to use when connecting to bucket.
pub secret_access_key: Option<String>,
}
impl std::fmt::Debug for S3Config {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("S3Config")
.field("bucket_name", &self.bucket_name)
.field("bucket_region", &self.bucket_region)
.finish()
}
pub interactive: bool,
pub wal_producer_connstr: Option<String>,
pub listen_addr: SocketAddr,
pub skip_recovery: bool,
}

View File

@@ -0,0 +1,793 @@
//
// Page Cache holds all the different page versions and WAL records
//
// The Page Cache is a BTreeMap, keyed by the RelFileNode an blocknumber, and the LSN.
// The BTreeMap is protected by a Mutex, and each cache entry is protected by another
// per-entry mutex.
//
use core::ops::Bound::Included;
use std::collections::{BTreeMap, HashMap};
use std::{convert::TryInto, ops::AddAssign};
use std::error::Error;
use std::sync::atomic::AtomicU64;
use std::sync::atomic::Ordering;
use std::sync::{Arc, Condvar, Mutex};
use std::thread;
use std::time::Duration;
// use tokio::sync::RwLock;
use bytes::Bytes;
use lazy_static::lazy_static;
use log::*;
use rand::Rng;
use crate::{controlfile, walredo, PageServerConf};
use crossbeam_channel::unbounded;
use crossbeam_channel::{Receiver, Sender};
// Timeout when waiting or WAL receiver to catch up to an LSN given in a GetPage@LSN call.
static TIMEOUT: Duration = Duration::from_secs(60);
pub struct PageCache {
shared: Mutex<PageCacheShared>,
// Channel for communicating with the WAL redo process here.
pub walredo_sender: Sender<Arc<CacheEntry>>,
pub walredo_receiver: Receiver<Arc<CacheEntry>>,
valid_lsn_condvar: Condvar,
// Counters, for metrics collection.
pub num_entries: AtomicU64,
pub num_page_images: AtomicU64,
pub num_wal_records: AtomicU64,
pub num_getpage_requests: AtomicU64,
// copies of shared.first/last_valid_lsn fields (copied here so
// that they can be read without acquiring the mutex).
pub first_valid_lsn: AtomicU64,
pub last_valid_lsn: AtomicU64,
pub last_record_lsn: AtomicU64,
}
#[derive(Clone)]
pub struct PageCacheStats {
pub num_entries: u64,
pub num_page_images: u64,
pub num_wal_records: u64,
pub num_getpage_requests: u64,
pub first_valid_lsn: u64,
pub last_valid_lsn: u64,
pub last_record_lsn: u64,
}
impl AddAssign for PageCacheStats {
fn add_assign(&mut self, other: Self) {
*self = Self {
num_entries: self.num_entries + other.num_entries,
num_page_images: self.num_page_images + other.num_page_images,
num_wal_records: self.num_wal_records + other.num_wal_records,
num_getpage_requests: self.num_getpage_requests + other.num_getpage_requests,
first_valid_lsn: self.first_valid_lsn + other.first_valid_lsn,
last_valid_lsn: self.last_valid_lsn + other.last_valid_lsn,
last_record_lsn: self.last_record_lsn + other.last_record_lsn,
}
}
}
//
// Shared data structure, holding page cache and related auxiliary information
//
struct PageCacheShared {
// The actual page cache
pagecache: BTreeMap<CacheKey, Arc<CacheEntry>>,
// Relation n_blocks cache
//
// This hashtable should be updated together with the pagecache. Now it is
// accessed unreasonably often through the smgr_nblocks(). It is better to just
// cache it in postgres smgr and ask only on restart.
relsize_cache: HashMap<RelTag, u32>,
// What page versions do we hold in the cache? If we get GetPage with
// LSN < first_valid_lsn, that's an error because we (no longer) hold that
// page version. If we get a request > last_valid_lsn, we need to wait until
// we receive all the WAL up to the request.
//
// last_record_lsn points to the end of last processed WAL record.
// It can lag behind last_valid_lsn, if the WAL receiver has received some WAL
// after the end of last record, but not the whole next record yet. In the
// page cache, we care about last_valid_lsn, but if the WAL receiver needs to
// restart the streaming, it needs to restart at the end of last record, so
// we track them separately. last_record_lsn should perhaps be in
// walreceiver.rs instead of here, but it seems convenient to keep all three
// values together.
//
first_valid_lsn: u64,
last_valid_lsn: u64,
last_record_lsn: u64,
controldata: controlfile::ControlFileDataZenith,
}
lazy_static! {
pub static ref PAGECACHES: Mutex<HashMap<u64, Arc<PageCache>>> = Mutex::new(HashMap::new());
}
pub fn get_pagecache(conf: PageServerConf, sys_id: u64) -> Arc<PageCache> {
let mut pcaches = PAGECACHES.lock().unwrap();
if !pcaches.contains_key(&sys_id) {
pcaches.insert(sys_id, Arc::new(init_page_cache()));
// Initialize the WAL redo thread
//
// Now join_handle is not saved any where and we won'try restart tharead
// if it is dead. We may later stop that treads after some inactivity period
// and restart them on demand.
let _walredo_thread = thread::Builder::new()
.name("WAL redo thread".into())
.spawn(move || {
walredo::wal_redo_main(conf, sys_id);
})
.unwrap();
}
pcaches.get(&sys_id).unwrap().clone()
}
fn init_page_cache() -> PageCache {
// Initialize the channel between the page cache and the WAL applicator
let (s, r) = unbounded();
PageCache {
shared: Mutex::new(PageCacheShared {
pagecache: BTreeMap::new(),
relsize_cache: HashMap::new(),
first_valid_lsn: 0,
last_valid_lsn: 0,
last_record_lsn: 0,
controldata: controlfile::ControlFileDataZenith::new(),
}),
valid_lsn_condvar: Condvar::new(),
walredo_sender: s,
walredo_receiver: r,
num_entries: AtomicU64::new(0),
num_page_images: AtomicU64::new(0),
num_wal_records: AtomicU64::new(0),
num_getpage_requests: AtomicU64::new(0),
first_valid_lsn: AtomicU64::new(0),
last_valid_lsn: AtomicU64::new(0),
last_record_lsn: AtomicU64::new(0),
}
}
//
// We store two kinds of entries in the page cache:
//
// 1. Ready-made images of the block
// 2. WAL records, to be applied on top of the "previous" entry
//
// Some WAL records will initialize the page from scratch. For such records,
// the 'will_init' flag is set. They don't need the previous page image before
// applying. The 'will_init' flag is set for records containing a full-page image,
// and for records with the BKPBLOCK_WILL_INIT flag. These differ from PageImages
// stored directly in the cache entry in that you still need to run the WAL redo
// routine to generate the page image.
//
#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Debug)]
pub struct CacheKey {
pub tag: BufferTag,
pub lsn: u64,
}
pub struct CacheEntry {
pub key: CacheKey,
pub content: Mutex<CacheEntryContent>,
// Condition variable used by the WAL redo service, to wake up
// requester.
//
// FIXME: this takes quite a lot of space. Consider using parking_lot::Condvar
// or something else.
pub walredo_condvar: Condvar,
}
pub struct CacheEntryContent {
pub page_image: Option<Bytes>,
pub wal_record: Option<WALRecord>,
pub apply_pending: bool,
}
impl CacheEntry {
fn new(key: CacheKey) -> CacheEntry {
CacheEntry {
key: key,
content: Mutex::new(CacheEntryContent {
page_image: None,
wal_record: None,
apply_pending: false,
}),
walredo_condvar: Condvar::new(),
}
}
}
#[derive(Eq, PartialEq, Hash, Clone, Copy, Debug)]
pub struct RelTag {
pub spcnode: u32,
pub dbnode: u32,
pub relnode: u32,
pub forknum: u8,
}
#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Copy, Debug)]
pub struct BufferTag {
pub spcnode: u32,
pub dbnode: u32,
pub relnode: u32,
pub forknum: u8,
pub blknum: u32,
}
#[derive(Clone)]
pub struct WALRecord {
pub lsn: u64, // LSN at the *end* of the record
pub will_init: bool,
pub rec: Bytes,
}
// Public interface functions
impl PageCache {
pub fn get_nonrel_page(&self, tag: BufferTag, _reqlsn: u64) -> Result<Bytes, Box<dyn Error>> {
self.num_getpage_requests.fetch_add(1, Ordering::Relaxed);
// Now we don't have versioning for non-rel pages.
// Also at bootstrap we don't know lsn for some files.
// So always request the very latest version
// let lsn = reqlsn;
let lsn = u64::MAX;
let minkey = CacheKey { tag: tag, lsn: 0 };
// Look up to the largest lsn
let maxkey = CacheKey { tag: tag, lsn: lsn };
let entry_rc: Arc<CacheEntry>;
{
let shared = self.shared.lock().unwrap();
let pagecache = &shared.pagecache;
info!("got pagecache {}", pagecache.len());
let mut entries = pagecache.range((Included(&minkey), Included(&maxkey)));
let entry_opt = entries.next_back();
if entry_opt.is_none() {
return Err(format!(
"not found non-rel page with LSN {} for {}/{}/{}.{} blk {}",
lsn, tag.spcnode, tag.dbnode, tag.relnode, tag.forknum, tag.blknum
))?;
}
info!(
"found non-rel page with LSN {} for {}/{}/{}.{} blk {}",
lsn, tag.spcnode, tag.dbnode, tag.relnode, tag.forknum, tag.blknum
);
let (_key, entry) = entry_opt.unwrap();
entry_rc = entry.clone();
// Now that we have a reference to the cache entry, drop the lock on the map.
// It's important to do this before waiting on the condition variable below,
// and better to do it as soon as possible to maximize concurrency.
}
// Lock the cache entry and dig the page image out of it.
let page_img: Bytes;
{
let entry_content = entry_rc.content.lock().unwrap();
if let Some(img) = &entry_content.page_image {
assert!(!entry_content.apply_pending);
page_img = img.clone();
} else if entry_content.wal_record.is_some() {
return Err("non-rel WAL redo is not implemented yet".into());
//
// If this page needs to be reconstructed by applying some WAL,
// send a request to the WAL redo thread.
//
// if !entry_content.apply_pending {
// assert!(!entry_content.apply_pending);
// entry_content.apply_pending = true;
// let s = &self.walredo_sender;
// s.send(entry_rc.clone())?;
// }
// while entry_content.apply_pending {
// entry_content = entry_rc.walredo_condvar.wait(entry_content).unwrap();
//}
// We should now have a page image. If we don't, it means that WAL redo
// failed to reconstruct it. WAL redo should've logged that error already.
// page_img = match &entry_content.page_image {
// Some(p) => p.clone(),
// None => {
// error!("could not apply WAL to reconstruct page image for GetPage@LSN request");
// return Err("could not apply WAL to reconstruct page image".into());
// }
// };
} else {
// No base image, and no WAL record. Huh?
return Err(format!("no page image or WAL record for requested page"))?;
}
}
trace!(
"Returning page for {}/{}/{}.{} blk {}",
tag.spcnode,
tag.dbnode,
tag.relnode,
tag.forknum,
tag.blknum
);
return Ok(page_img);
}
//
// GetPage@LSN
//
// Returns an 8k page image
//
pub fn get_page_at_lsn(&self, tag: BufferTag, reqlsn: u64) -> Result<Bytes, Box<dyn Error>> {
let mut lsn = reqlsn;
if tag.forknum > 40 {
info!(
"get_page_at_lsn got request for page with LSN {} for {}/{}/{}.{} blk {}",
lsn, tag.spcnode, tag.dbnode, tag.relnode, tag.forknum, tag.blknum
);
return self.get_nonrel_page(tag, lsn);
}
if reqlsn == 0 {
let c = self.get_controldata();
lsn = c.checkPoint;
info!("update reqlsn get_page_at_lsn got request for page with LSN {} for {}/{}/{}.{} blk {}", lsn,
tag.spcnode, tag.dbnode, tag.relnode, tag.forknum, tag.blknum);
}
self.num_getpage_requests.fetch_add(1, Ordering::Relaxed);
// Look up cache entry. If it's a page image, return that. If it's a WAL record,
// ask the WAL redo service to reconstruct the page image from the WAL records.
let minkey = CacheKey { tag: tag, lsn: 0 };
let maxkey = CacheKey { tag: tag, lsn: lsn };
let entry_rc: Arc<CacheEntry>;
{
let mut shared = self.shared.lock().unwrap();
let mut waited = false;
// When server just started and created checkpoint lsn,
// but we have not yet established connection,
// requested lsn will be larger than the one we have
while lsn > shared.last_valid_lsn + 500 {
// TODO: Wait for the WAL receiver to catch up
waited = true;
trace!(
"not caught up yet: {}, requested {}",
shared.last_valid_lsn,
lsn
);
let wait_result = self
.valid_lsn_condvar
.wait_timeout(shared, TIMEOUT)
.unwrap();
shared = wait_result.0;
if wait_result.1.timed_out() {
return Err(format!(
"Timed out while waiting for WAL record at LSN {} to arrive",
lsn
))?;
}
}
if waited {
trace!("caught up now, continuing");
}
if lsn < shared.first_valid_lsn {
return Err(format!("LSN {} has already been removed", lsn))?;
}
let pagecache = &shared.pagecache;
let mut entries = pagecache.range((Included(&minkey), Included(&maxkey)));
let entry_opt = entries.next_back();
if entry_opt.is_none() {
//static ZERO_PAGE:[u8; 8192] = [0 as u8; 8192];
//return Ok(Bytes::from_static(&ZERO_PAGE));
return Err("could not find page image")?;
}
let (_key, entry) = entry_opt.unwrap();
entry_rc = entry.clone();
// Now that we have a reference to the cache entry, drop the lock on the map.
// It's important to do this before waiting on the condition variable below,
// and better to do it as soon as possible to maximize concurrency.
}
// Lock the cache entry and dig the page image out of it.
let page_img: Bytes;
{
let mut entry_content = entry_rc.content.lock().unwrap();
if let Some(img) = &entry_content.page_image {
assert!(!entry_content.apply_pending);
page_img = img.clone();
} else if entry_content.wal_record.is_some() {
//
// If this page needs to be reconstructed by applying some WAL,
// send a request to the WAL redo thread.
//
if !entry_content.apply_pending {
assert!(!entry_content.apply_pending);
entry_content.apply_pending = true;
let s = &self.walredo_sender;
s.send(entry_rc.clone())?;
}
while entry_content.apply_pending {
entry_content = entry_rc.walredo_condvar.wait(entry_content).unwrap();
}
// We should now have a page image. If we don't, it means that WAL redo
// failed to reconstruct it. WAL redo should've logged that error already.
page_img = match &entry_content.page_image {
Some(p) => p.clone(),
None => {
error!(
"could not apply WAL to reconstruct page image for GetPage@LSN request"
);
return Err("could not apply WAL to reconstruct page image".into());
}
};
} else {
// No base image, and no WAL record. Huh?
return Err(format!("no page image or WAL record for requested page"))?;
}
}
// FIXME: assumes little-endian. Only used for the debugging log though
let page_lsn_hi = u32::from_le_bytes(page_img.get(0..4).unwrap().try_into().unwrap());
let page_lsn_lo = u32::from_le_bytes(page_img.get(4..8).unwrap().try_into().unwrap());
trace!(
"Returning page with LSN {:X}/{:X} for {}/{}/{}.{} blk {}",
page_lsn_hi,
page_lsn_lo,
tag.spcnode,
tag.dbnode,
tag.relnode,
tag.forknum,
tag.blknum
);
return Ok(page_img);
}
//
// Collect all the WAL records that are needed to reconstruct a page
// image for the given cache entry.
//
// Returns an old page image (if any), and a vector of WAL records to apply
// over it.
//
pub fn collect_records_for_apply(&self, entry: &CacheEntry) -> (Option<Bytes>, Vec<WALRecord>) {
// Scan the BTreeMap backwards, starting from the given entry.
let shared = self.shared.lock().unwrap();
let pagecache = &shared.pagecache;
let minkey = CacheKey {
tag: entry.key.tag,
lsn: 0,
};
let maxkey = CacheKey {
tag: entry.key.tag,
lsn: entry.key.lsn,
};
let entries = pagecache.range((Included(&minkey), Included(&maxkey)));
// the last entry in the range should be the CacheEntry we were given
//let _last_entry = entries.next_back();
//assert!(last_entry == entry);
let mut base_img: Option<Bytes> = None;
let mut records: Vec<WALRecord> = Vec::new();
// Scan backwards, collecting the WAL records, until we hit an
// old page image.
for (_key, e) in entries.rev() {
let e = e.content.lock().unwrap();
if let Some(img) = &e.page_image {
// We have a base image. No need to dig deeper into the list of
// records
base_img = Some(img.clone());
break;
} else if let Some(rec) = &e.wal_record {
records.push(rec.clone());
// If this WAL record initializes the page, no need to dig deeper.
if rec.will_init {
break;
}
} else {
panic!("no base image and no WAL record on cache entry");
}
}
records.reverse();
return (base_img, records);
}
//
// Adds a WAL record to the page cache
//
pub fn put_wal_record(&self, tag: BufferTag, rec: WALRecord) {
let key = CacheKey {
tag: tag,
lsn: rec.lsn,
};
let entry = CacheEntry::new(key.clone());
entry.content.lock().unwrap().wal_record = Some(rec);
let mut shared = self.shared.lock().unwrap();
let rel_tag = RelTag {
spcnode: tag.spcnode,
dbnode: tag.dbnode,
relnode: tag.relnode,
forknum: tag.forknum,
};
let rel_entry = shared.relsize_cache.entry(rel_tag).or_insert(0);
if tag.blknum >= *rel_entry {
*rel_entry = tag.blknum + 1;
}
trace!("put_wal_record lsn: {}", key.lsn);
let oldentry = shared.pagecache.insert(key, Arc::new(entry));
self.num_entries.fetch_add(1, Ordering::Relaxed);
if !oldentry.is_none() {
error!("overwriting WAL record in page cache");
}
self.num_wal_records.fetch_add(1, Ordering::Relaxed);
}
//
// Memorize a full image of a page version
//
pub fn put_page_image(&self, tag: BufferTag, lsn: u64, img: Bytes) {
let key = CacheKey { tag: tag, lsn: lsn };
let entry = CacheEntry::new(key.clone());
entry.content.lock().unwrap().page_image = Some(img);
let mut shared = self.shared.lock().unwrap();
let pagecache = &mut shared.pagecache;
let oldentry = pagecache.insert(key, Arc::new(entry));
self.num_entries.fetch_add(1, Ordering::Relaxed);
assert!(oldentry.is_none());
debug!(
"inserted page image for {}/{}/{}_{} blk {} at {}",
tag.spcnode, tag.dbnode, tag.relnode, tag.forknum, tag.blknum, lsn
);
self.num_page_images.fetch_add(1, Ordering::Relaxed);
}
//
pub fn advance_last_valid_lsn(&self, lsn: u64) {
let mut shared = self.shared.lock().unwrap();
// Can't move backwards.
assert!(lsn >= shared.last_valid_lsn);
shared.last_valid_lsn = lsn;
self.valid_lsn_condvar.notify_all();
self.last_valid_lsn.store(lsn, Ordering::Relaxed);
}
//
// NOTE: this updates last_valid_lsn as well.
//
pub fn advance_last_record_lsn(&self, lsn: u64) {
let mut shared = self.shared.lock().unwrap();
// Can't move backwards.
assert!(lsn >= shared.last_valid_lsn);
assert!(lsn >= shared.last_record_lsn);
shared.last_valid_lsn = lsn;
shared.last_record_lsn = lsn;
self.valid_lsn_condvar.notify_all();
self.last_valid_lsn.store(lsn, Ordering::Relaxed);
self.last_valid_lsn.store(lsn, Ordering::Relaxed);
}
//
pub fn _advance_first_valid_lsn(&self, lsn: u64) {
let mut shared = self.shared.lock().unwrap();
// Can't move backwards.
assert!(lsn >= shared.first_valid_lsn);
// Can't overtake last_valid_lsn (except when we're
// initializing the system and last_valid_lsn hasn't been set yet.
assert!(shared.last_valid_lsn == 0 || lsn < shared.last_valid_lsn);
shared.first_valid_lsn = lsn;
self.first_valid_lsn.store(lsn, Ordering::Relaxed);
}
pub fn init_valid_lsn(&self, lsn: u64) {
let mut shared = self.shared.lock().unwrap();
assert!(shared.first_valid_lsn == 0);
assert!(shared.last_valid_lsn == 0);
assert!(shared.last_record_lsn == 0);
shared.first_valid_lsn = lsn;
shared.last_valid_lsn = lsn;
shared.last_record_lsn = lsn;
self.first_valid_lsn.store(lsn, Ordering::Relaxed);
self.last_valid_lsn.store(lsn, Ordering::Relaxed);
self.last_record_lsn.store(lsn, Ordering::Relaxed);
}
pub fn get_last_valid_lsn(&self) -> u64 {
let shared = self.shared.lock().unwrap();
return shared.last_record_lsn;
}
pub fn set_controldata(&self, c: controlfile::ControlFileDataZenith) {
let mut shared = self.shared.lock().unwrap();
shared.controldata = c;
}
pub fn get_controldata(&self) -> controlfile::ControlFileDataZenith {
let shared = self.shared.lock().unwrap();
return shared.controldata.clone();
}
//
// Simple test function for the WAL redo code:
//
// 1. Pick a page from the page cache at random.
// 2. Request that page with GetPage@LSN, using Max LSN (i.e. get the latest page version)
//
//
pub fn _test_get_page_at_lsn(&self) {
// for quick testing of the get_page_at_lsn() funcion.
//
// Get a random page from the page cache. Apply all its WAL, by requesting
// that page at the highest lsn.
let mut tag: Option<BufferTag> = None;
{
let shared = self.shared.lock().unwrap();
let pagecache = &shared.pagecache;
if pagecache.is_empty() {
info!("page cache is empty");
return;
}
// Find nth entry in the map, where n is picked at random
let n = rand::thread_rng().gen_range(0..pagecache.len());
let mut i = 0;
for (key, _e) in pagecache.iter() {
if i == n {
tag = Some(key.tag);
break;
}
i += 1;
}
}
info!("testing GetPage@LSN for block {}", tag.unwrap().blknum);
match self.get_page_at_lsn(tag.unwrap(), 0xffff_ffff_ffff_eeee) {
Ok(_img) => {
// This prints out the whole page image.
//println!("{:X?}", img);
}
Err(error) => {
error!("GetPage@LSN failed: {}", error);
}
}
}
// FIXME: Shouldn't relation size also be tracked with an LSN?
// If a replica is lagging behind, it needs to get the size as it was on
// the replica's current replay LSN.
pub fn relsize_inc(&self, rel: &RelTag, to: Option<u32>) {
let mut shared = self.shared.lock().unwrap();
let entry = shared.relsize_cache.entry(*rel).or_insert(0);
if let Some(to) = to {
if to >= *entry {
*entry = to + 1;
}
}
trace!("relsize_inc {:?} to {}", rel, entry);
}
pub fn relsize_get(&self, rel: &RelTag) -> u32 {
let mut shared = self.shared.lock().unwrap();
let entry = shared.relsize_cache.entry(*rel).or_insert(0);
*entry
}
pub fn relsize_exist(&self, rel: &RelTag) -> bool {
let shared = self.shared.lock().unwrap();
let relsize_cache = &shared.relsize_cache;
relsize_cache.contains_key(rel)
}
pub fn get_stats(&self) -> PageCacheStats {
PageCacheStats {
num_entries: self.num_entries.load(Ordering::Relaxed),
num_page_images: self.num_page_images.load(Ordering::Relaxed),
num_wal_records: self.num_wal_records.load(Ordering::Relaxed),
num_getpage_requests: self.num_getpage_requests.load(Ordering::Relaxed),
first_valid_lsn: self.first_valid_lsn.load(Ordering::Relaxed),
last_valid_lsn: self.last_valid_lsn.load(Ordering::Relaxed),
last_record_lsn: self.last_record_lsn.load(Ordering::Relaxed),
}
}
}
pub fn get_stats() -> PageCacheStats {
let pcaches = PAGECACHES.lock().unwrap();
let mut stats = PageCacheStats {
num_entries: 0,
num_page_images: 0,
num_wal_records: 0,
num_getpage_requests: 0,
first_valid_lsn: 0,
last_valid_lsn: 0,
last_record_lsn: 0,
};
pcaches.iter().for_each(|(_sys_id, pcache)| {
stats += pcache.get_stats();
});
stats
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,11 @@
// From pg_tablespace_d.h
//
pub const DEFAULTTABLESPACE_OID: u32 = 1663;
pub const GLOBALTABLESPACE_OID: u32 = 1664;
//Special values for non-rel files' tags
//TODO maybe use enum?
pub const PG_CONTROLFILE_FORKNUM: u32 = 42;
pub const PG_FILENODEMAP_FORKNUM: u32 = 43;
pub const PG_XACT_FORKNUM: u32 = 44;
pub const PG_MXACT_OFFSETS_FORKNUM: u32 = 45;
pub const PG_MXACT_MEMBERS_FORKNUM: u32 = 46;

View File

@@ -1,231 +0,0 @@
//!
//! Zenith stores PostgreSQL relations, and some other files, in the
//! repository. The relations (i.e. tables and indexes) take up most
//! of the space in a typical installation, while the other files are
//! small. We call each relation and other file that is stored in the
//! repository a "relish". It comes from "rel"-ish, as in "kind of a
//! rel", because it covers relations as well as other things that are
//! not relations, but are treated similarly for the purposes of the
//! storage layer.
//!
//! This source file contains the definition of the RelishTag struct,
//! which uniquely identifies a relish.
//!
//! Relishes come in two flavors: blocky and non-blocky. Relations and
//! SLRUs are blocky, that is, they are divided into 8k blocks, and
//! the repository tracks their size. Other relishes are non-blocky:
//! the content of the whole relish is stored as one blob. Block
//! number must be passed as 0 for all operations on a non-blocky
//! relish. The one "block" that you store in a non-blocky relish can
//! have arbitrary size, but they are expected to be small, or you
//! will have performance issues.
//!
//! All relishes are versioned by LSN in the repository.
//!
use serde::{Deserialize, Serialize};
use std::fmt;
use postgres_ffi::relfile_utils::forknumber_to_name;
use postgres_ffi::{Oid, TransactionId};
///
/// RelishTag identifies one relish.
///
#[derive(Debug, Clone, Copy, Hash, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)]
pub enum RelishTag {
// Relations correspond to PostgreSQL relation forks. Each
// PostgreSQL relation fork is considered a separate relish.
Relation(RelTag),
// SLRUs include pg_clog, pg_multixact/members, and
// pg_multixact/offsets. There are other SLRUs in PostgreSQL, but
// they don't need to be stored permanently (e.g. pg_subtrans),
// or we do not support them in zenith yet (pg_commit_ts).
//
// These are currently never requested directly by the compute
// nodes, although in principle that would be possible. However,
// when a new compute node is created, these are included in the
// tarball that we send to the compute node to initialize the
// PostgreSQL data directory.
//
// Each SLRU segment in PostgreSQL is considered a separate
// relish. For example, pg_clog/0000, pg_clog/0001, and so forth.
//
// SLRU segments are divided into blocks, like relations.
Slru { slru: SlruKind, segno: u32 },
// Miscellaneous other files that need to be included in the
// tarball at compute node creation. These are non-blocky, and are
// expected to be small.
//
// FileNodeMap represents PostgreSQL's 'pg_filenode.map'
// files. They are needed to map catalog table OIDs to filenode
// numbers. Usually the mapping is done by looking up a relation's
// 'relfilenode' field in the 'pg_class' system table, but that
// doesn't work for 'pg_class' itself and a few other such system
// relations. See PostgreSQL relmapper.c for details.
//
// Each database has a map file for its local mapped catalogs,
// and there is a separate map file for shared catalogs.
//
// These files are always 512 bytes long (although we don't check
// or care about that in the page server).
//
FileNodeMap { spcnode: Oid, dbnode: Oid },
//
// State files for prepared transactions (e.g pg_twophase/1234)
//
TwoPhase { xid: TransactionId },
// The control file, stored in global/pg_control
ControlFile,
// Special entry that represents PostgreSQL checkpoint. It doesn't
// correspond to to any physical file in PostgreSQL, but we use it
// to track fields needed to restore the checkpoint data in the
// control file, when a compute node is created.
Checkpoint,
}
impl RelishTag {
pub const fn is_blocky(&self) -> bool {
match self {
// These relishes work with blocks
RelishTag::Relation(_) | RelishTag::Slru { slru: _, segno: _ } => true,
// and these don't
RelishTag::FileNodeMap {
spcnode: _,
dbnode: _,
}
| RelishTag::TwoPhase { xid: _ }
| RelishTag::ControlFile
| RelishTag::Checkpoint => false,
}
}
// Physical relishes represent files and use
// RelationSizeEntry to track existing and dropped files.
// They can be both blocky and non-blocky.
pub const fn is_physical(&self) -> bool {
match self {
// These relishes represent physical files
RelishTag::Relation(_)
| RelishTag::Slru { .. }
| RelishTag::FileNodeMap { .. }
| RelishTag::TwoPhase { .. } => true,
// and these don't
RelishTag::ControlFile | RelishTag::Checkpoint => false,
}
}
// convenience function to check if this relish is a normal relation.
pub const fn is_relation(&self) -> bool {
matches!(self, RelishTag::Relation(_))
}
}
///
/// Relation data file segment id throughout the Postgres cluster.
///
/// Every data file in Postgres is uniquely identified by 4 numbers:
/// - relation id / node (`relnode`)
/// - database id (`dbnode`)
/// - tablespace id (`spcnode`), in short this is a unique id of a separate
/// directory to store data files.
/// - forknumber (`forknum`) is used to split different kinds of data of the same relation
/// between some set of files (`relnode`, `relnode_fsm`, `relnode_vm`).
///
/// In native Postgres code `RelFileNode` structure and individual `ForkNumber` value
/// are used for the same purpose.
/// [See more related comments here](https:///github.com/postgres/postgres/blob/99c5852e20a0987eca1c38ba0c09329d4076b6a0/src/include/storage/relfilenode.h#L57).
///
#[derive(Debug, PartialEq, Eq, PartialOrd, Hash, Ord, Clone, Copy, Serialize, Deserialize)]
pub struct RelTag {
pub forknum: u8,
pub spcnode: Oid,
pub dbnode: Oid,
pub relnode: Oid,
}
/// Display RelTag in the same format that's used in most PostgreSQL debug messages:
///
/// <spcnode>/<dbnode>/<relnode>[_fsm|_vm|_init]
///
impl fmt::Display for RelTag {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if let Some(forkname) = forknumber_to_name(self.forknum) {
write!(
f,
"{}/{}/{}_{}",
self.spcnode, self.dbnode, self.relnode, forkname
)
} else {
write!(f, "{}/{}/{}", self.spcnode, self.dbnode, self.relnode)
}
}
}
/// Display RelTag in the same format that's used in most PostgreSQL debug messages:
///
/// <spcnode>/<dbnode>/<relnode>[_fsm|_vm|_init]
///
impl fmt::Display for RelishTag {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
RelishTag::Relation(rel) => rel.fmt(f),
RelishTag::Slru { slru, segno } => {
// e.g. pg_clog/0001
write!(f, "{}/{:04X}", slru.to_str(), segno)
}
RelishTag::FileNodeMap { spcnode, dbnode } => {
write!(f, "relmapper file for spc {} db {}", spcnode, dbnode)
}
RelishTag::TwoPhase { xid } => {
write!(f, "pg_twophase/{:08X}", xid)
}
RelishTag::ControlFile => {
write!(f, "control file")
}
RelishTag::Checkpoint => {
write!(f, "checkpoint")
}
}
}
}
///
/// Non-relation transaction status files (clog (a.k.a. pg_xact) and
/// pg_multixact) in Postgres are handled by SLRU (Simple LRU) buffer,
/// hence the name.
///
/// These files are global for a postgres instance.
///
/// These files are divided into segments, which are divided into
/// pages of the same BLCKSZ as used for relation files.
///
#[derive(Debug, Clone, Copy, Hash, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)]
pub enum SlruKind {
Clog,
MultiXactMembers,
MultiXactOffsets,
}
impl SlruKind {
pub fn to_str(&self) -> &'static str {
match self {
Self::Clog => "pg_xact",
Self::MultiXactMembers => "pg_multixact/members",
Self::MultiXactOffsets => "pg_multixact/offsets",
}
}
}
pub const FIRST_NONREL_RELISH_TAG: RelishTag = RelishTag::Slru {
slru: SlruKind::Clog,
segno: 0,
};

View File

@@ -1,87 +0,0 @@
//! Abstractions for the page server to store its relish layer data in the external storage.
//!
//! Main purpose of this module subtree is to provide a set of abstractions to manage the storage state
//! in a way, optimal for page server.
//!
//! The abstractions hide multiple custom external storage API implementations,
//! such as AWS S3, local filesystem, etc., located in the submodules.
mod local_fs;
mod rust_s3;
/// A queue-based storage with the background machinery behind it to synchronize
/// local page server layer files with external storage.
mod synced_storage;
use std::{path::Path, thread};
use anyhow::Context;
pub use self::synced_storage::schedule_timeline_upload;
use self::{local_fs::LocalFs, rust_s3::RustS3};
use crate::{PageServerConf, RelishStorageKind};
pub fn run_storage_sync_thread(
config: &'static PageServerConf,
) -> anyhow::Result<Option<thread::JoinHandle<anyhow::Result<()>>>> {
match &config.relish_storage_config {
Some(relish_storage_config) => {
let max_concurrent_sync = relish_storage_config.max_concurrent_sync;
match &relish_storage_config.storage {
RelishStorageKind::LocalFs(root) => synced_storage::run_storage_sync_thread(
config,
LocalFs::new(root.clone())?,
max_concurrent_sync,
),
RelishStorageKind::AwsS3(s3_config) => synced_storage::run_storage_sync_thread(
config,
RustS3::new(s3_config)?,
max_concurrent_sync,
),
}
}
None => Ok(None),
}
}
/// Storage (potentially remote) API to manage its state.
#[async_trait::async_trait]
pub trait RelishStorage: Send + Sync {
type RelishStoragePath;
fn derive_destination(
page_server_workdir: &Path,
relish_local_path: &Path,
) -> anyhow::Result<Self::RelishStoragePath>;
async fn list_relishes(&self) -> anyhow::Result<Vec<Self::RelishStoragePath>>;
async fn download_relish<W: 'static + std::io::Write + Send>(
&self,
from: &Self::RelishStoragePath,
// rust_s3 `get_object_stream` method requires `std::io::BufWriter` for some reason, not the async counterpart
// that forces us to consume and return the writer to satisfy the blocking operation async wrapper requirements
to: std::io::BufWriter<W>,
) -> anyhow::Result<std::io::BufWriter<W>>;
async fn delete_relish(&self, path: &Self::RelishStoragePath) -> anyhow::Result<()>;
async fn upload_relish<R: tokio::io::AsyncRead + std::marker::Unpin + Send>(
&self,
from: &mut tokio::io::BufReader<R>,
to: &Self::RelishStoragePath,
) -> anyhow::Result<()>;
}
fn strip_workspace_prefix<'a>(
page_server_workdir: &'a Path,
relish_local_path: &'a Path,
) -> anyhow::Result<&'a Path> {
relish_local_path
.strip_prefix(page_server_workdir)
.with_context(|| {
format!(
"Unexpected: relish local path '{}' is not relevant to server workdir",
relish_local_path.display(),
)
})
}

View File

@@ -1,189 +0,0 @@
//! Local filesystem relish storage.
//!
//! Page server already stores layer data on the server, when freezing it.
//! This storage serves a way to
//!
//! * test things locally simply
//! * allow to compabre both binary sets
//! * help validating the relish storage API
use std::{
future::Future,
io::Write,
path::{Path, PathBuf},
pin::Pin,
};
use anyhow::{bail, Context};
use tokio::{fs, io};
use super::{strip_workspace_prefix, RelishStorage};
pub struct LocalFs {
root: PathBuf,
}
impl LocalFs {
/// Atetmpts to create local FS relish storage, also creates the directory provided, if not exists.
pub fn new(root: PathBuf) -> anyhow::Result<Self> {
if !root.exists() {
std::fs::create_dir_all(&root).with_context(|| {
format!(
"Failed to create all directories in the given root path {}",
root.display(),
)
})?;
}
Ok(Self { root })
}
fn resolve_in_storage(&self, path: &Path) -> anyhow::Result<PathBuf> {
if path.is_relative() {
Ok(self.root.join(path))
} else if path.starts_with(&self.root) {
Ok(path.to_path_buf())
} else {
bail!(
"Path '{}' does not belong to the current storage",
path.display()
)
}
}
}
#[async_trait::async_trait]
impl RelishStorage for LocalFs {
type RelishStoragePath = PathBuf;
fn derive_destination(
page_server_workdir: &Path,
relish_local_path: &Path,
) -> anyhow::Result<Self::RelishStoragePath> {
Ok(strip_workspace_prefix(page_server_workdir, relish_local_path)?.to_path_buf())
}
async fn list_relishes(&self) -> anyhow::Result<Vec<Self::RelishStoragePath>> {
Ok(get_all_files(&self.root).await?.into_iter().collect())
}
async fn download_relish<W: 'static + std::io::Write + Send>(
&self,
from: &Self::RelishStoragePath,
mut to: std::io::BufWriter<W>,
) -> anyhow::Result<std::io::BufWriter<W>> {
let file_path = self.resolve_in_storage(from)?;
if file_path.exists() && file_path.is_file() {
let updated_buffer = tokio::task::spawn_blocking(move || {
let mut source = std::io::BufReader::new(
std::fs::OpenOptions::new()
.read(true)
.open(&file_path)
.with_context(|| {
format!(
"Failed to open source file '{}' to use in the download",
file_path.display()
)
})?,
);
std::io::copy(&mut source, &mut to)
.context("Failed to download the relish file")?;
to.flush().context("Failed to flush the download buffer")?;
Ok::<_, anyhow::Error>(to)
})
.await
.context("Failed to spawn a blocking task")??;
Ok(updated_buffer)
} else {
bail!(
"File '{}' either does not exist or is not a file",
file_path.display()
)
}
}
async fn delete_relish(&self, path: &Self::RelishStoragePath) -> anyhow::Result<()> {
let file_path = self.resolve_in_storage(path)?;
if file_path.exists() && file_path.is_file() {
Ok(tokio::fs::remove_file(file_path).await?)
} else {
bail!(
"File '{}' either does not exist or is not a file",
file_path.display()
)
}
}
async fn upload_relish<R: io::AsyncRead + std::marker::Unpin + Send>(
&self,
from: &mut io::BufReader<R>,
to: &Self::RelishStoragePath,
) -> anyhow::Result<()> {
let target_file_path = self.resolve_in_storage(to)?;
create_target_directory(&target_file_path).await?;
let mut destination = io::BufWriter::new(
fs::OpenOptions::new()
.write(true)
.create(true)
.open(&target_file_path)
.await
.with_context(|| {
format!(
"Failed to open target fs destination at '{}'",
target_file_path.display()
)
})?,
);
io::copy_buf(from, &mut destination)
.await
.context("Failed to upload relish to local storage")?;
Ok(())
}
}
fn get_all_files<'a, P>(
directory_path: P,
) -> Pin<Box<dyn Future<Output = anyhow::Result<Vec<PathBuf>>> + Send + Sync + 'a>>
where
P: AsRef<Path> + Send + Sync + 'a,
{
Box::pin(async move {
let directory_path = directory_path.as_ref();
if directory_path.exists() {
if directory_path.is_dir() {
let mut paths = Vec::new();
let mut dir_contents = tokio::fs::read_dir(directory_path).await?;
while let Some(dir_entry) = dir_contents.next_entry().await? {
let file_type = dir_entry.file_type().await?;
let entry_path = dir_entry.path();
if file_type.is_symlink() {
log::debug!("{:?} us a symlink, skipping", entry_path)
} else if file_type.is_dir() {
paths.extend(get_all_files(entry_path).await?.into_iter())
} else {
paths.push(dir_entry.path());
}
}
Ok(paths)
} else {
bail!("Path '{}' is not a directory", directory_path.display())
}
} else {
Ok(Vec::new())
}
})
}
async fn create_target_directory(target_file_path: &Path) -> anyhow::Result<()> {
let target_dir = match target_file_path.parent() {
Some(parent_dir) => parent_dir,
None => bail!(
"Relish path '{}' has no parent directory",
target_file_path.display()
),
};
if !target_dir.exists() {
tokio::fs::create_dir_all(target_dir).await?;
}
Ok(())
}

View File

@@ -1,149 +0,0 @@
//! A wrapper around AWS S3 client library `rust_s3` to be used a relish storage.
use std::io::Write;
use std::path::Path;
use anyhow::Context;
use s3::{bucket::Bucket, creds::Credentials, region::Region};
use crate::{
relish_storage::{strip_workspace_prefix, RelishStorage},
S3Config,
};
const S3_FILE_SEPARATOR: char = '/';
#[derive(Debug)]
pub struct S3ObjectKey(String);
impl S3ObjectKey {
fn key(&self) -> &str {
&self.0
}
}
/// AWS S3 relish storage.
pub struct RustS3 {
bucket: Bucket,
}
impl RustS3 {
/// Creates the relish storage, errors if incorrect AWS S3 configuration provided.
pub fn new(aws_config: &S3Config) -> anyhow::Result<Self> {
let region = aws_config
.bucket_region
.parse::<Region>()
.context("Failed to parse the s3 region from config")?;
let credentials = Credentials::new(
aws_config.access_key_id.as_deref(),
aws_config.secret_access_key.as_deref(),
None,
None,
None,
)
.context("Failed to create the s3 credentials")?;
Ok(Self {
bucket: Bucket::new_with_path_style(
aws_config.bucket_name.as_str(),
region,
credentials,
)
.context("Failed to create the s3 bucket")?,
})
}
}
#[async_trait::async_trait]
impl RelishStorage for RustS3 {
type RelishStoragePath = S3ObjectKey;
fn derive_destination(
page_server_workdir: &Path,
relish_local_path: &Path,
) -> anyhow::Result<Self::RelishStoragePath> {
let relative_path = strip_workspace_prefix(page_server_workdir, relish_local_path)?;
let mut key = String::new();
for segment in relative_path {
key.push(S3_FILE_SEPARATOR);
key.push_str(&segment.to_string_lossy());
}
Ok(S3ObjectKey(key))
}
async fn list_relishes(&self) -> anyhow::Result<Vec<Self::RelishStoragePath>> {
let list_response = self
.bucket
.list(String::new(), None)
.await
.context("Failed to list s3 objects")?;
Ok(list_response
.into_iter()
.flat_map(|response| response.contents)
.map(|s3_object| S3ObjectKey(s3_object.key))
.collect())
}
async fn download_relish<W: 'static + std::io::Write + Send>(
&self,
from: &Self::RelishStoragePath,
mut to: std::io::BufWriter<W>,
) -> anyhow::Result<std::io::BufWriter<W>> {
let code = self
.bucket
.get_object_stream(from.key(), &mut to)
.await
.with_context(|| format!("Failed to download s3 object with key {}", from.key()))?;
if code != 200 {
Err(anyhow::format_err!(
"Received non-200 exit code during downloading object from directory, code: {}",
code
))
} else {
tokio::task::spawn_blocking(move || {
to.flush().context("Failed to fluch the downoad buffer")?;
Ok::<_, anyhow::Error>(to)
})
.await
.context("Failed to joim the download buffer flush task")?
}
}
async fn delete_relish(&self, path: &Self::RelishStoragePath) -> anyhow::Result<()> {
let (_, code) = self
.bucket
.delete_object(path.key())
.await
.with_context(|| format!("Failed to delete s3 object with key {}", path.key()))?;
if code != 200 {
Err(anyhow::format_err!(
"Received non-200 exit code during deleting object with key '{}', code: {}",
path.key(),
code
))
} else {
Ok(())
}
}
async fn upload_relish<R: tokio::io::AsyncRead + std::marker::Unpin + Send>(
&self,
from: &mut tokio::io::BufReader<R>,
to: &Self::RelishStoragePath,
) -> anyhow::Result<()> {
let code = self
.bucket
.put_object_stream(from, to.key())
.await
.with_context(|| format!("Failed to create s3 object with key {}", to.key()))?;
if code != 200 {
Err(anyhow::format_err!(
"Received non-200 exit code during creating object with key '{}', code: {}",
to.key(),
code
))
} else {
Ok(())
}
}
}

View File

@@ -1,57 +0,0 @@
use std::time::Duration;
use std::{collections::BinaryHeap, sync::Mutex, thread};
use crate::tenant_mgr;
use crate::{relish_storage::RelishStorage, PageServerConf};
lazy_static::lazy_static! {
static ref UPLOAD_QUEUE: Mutex<BinaryHeap<SyncTask>> = Mutex::new(BinaryHeap::new());
}
pub fn schedule_timeline_upload(_local_timeline: ()) {
// UPLOAD_QUEUE
// .lock()
// .unwrap()
// .push(SyncTask::Upload(local_timeline))
}
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord)]
enum SyncTask {}
pub fn run_storage_sync_thread<
P: std::fmt::Debug,
S: 'static + RelishStorage<RelishStoragePath = P>,
>(
config: &'static PageServerConf,
relish_storage: S,
max_concurrent_sync: usize,
) -> anyhow::Result<Option<thread::JoinHandle<anyhow::Result<()>>>> {
let runtime = tokio::runtime::Builder::new_current_thread()
.enable_all()
.build()?;
let handle = thread::Builder::new()
.name("Queue based relish storage sync".to_string())
.spawn(move || {
while !tenant_mgr::shutdown_requested() {
let mut queue_accessor = UPLOAD_QUEUE.lock().unwrap();
log::debug!("Upload queue length: {}", queue_accessor.len());
let next_task = queue_accessor.pop();
drop(queue_accessor);
match next_task {
Some(task) => runtime.block_on(async {
// suppress warnings
let _ = (config, task, &relish_storage, max_concurrent_sync);
todo!("omitted for brevity")
}),
None => {
thread::sleep(Duration::from_secs(1));
continue;
}
}
}
log::debug!("Queue based relish storage sync thread shut down");
Ok(())
})?;
Ok(Some(handle))
}

View File

@@ -1,849 +0,0 @@
use crate::relish::*;
use anyhow::Result;
use bytes::{Buf, BufMut, Bytes, BytesMut};
use serde::{Deserialize, Serialize};
use std::collections::HashSet;
use std::ops::{AddAssign, Deref};
use std::sync::Arc;
use std::time::Duration;
use zenith_utils::lsn::{Lsn, RecordLsn};
use zenith_utils::zid::ZTimelineId;
///
/// A repository corresponds to one .zenith directory. One repository holds multiple
/// timelines, forked off from the same initial call to 'initdb'.
pub trait Repository: Send + Sync {
fn shutdown(&self) -> Result<()>;
/// Get Timeline handle for given zenith timeline ID.
fn get_timeline(&self, timelineid: ZTimelineId) -> Result<Arc<dyn Timeline>>;
/// Create a new, empty timeline. The caller is responsible for loading data into it
fn create_empty_timeline(&self, timelineid: ZTimelineId) -> Result<Arc<dyn Timeline>>;
/// Branch a timeline
fn branch_timeline(&self, src: ZTimelineId, dst: ZTimelineId, start_lsn: Lsn) -> Result<()>;
/// perform one garbage collection iteration.
/// garbage collection is periodically performed by gc thread,
/// but it can be explicitly requested through page server api.
///
/// 'timelineid' specifies the timeline to GC, or None for all.
/// `horizon` specifies delta from last lsn to preserve all object versions (pitr interval).
/// `checkpoint_before_gc` parameter is used to force compaction of storage before CG
/// to make tests more deterministic.
/// TODO Do we still need it or we can call checkpoint explicitly in tests where needed?
fn gc_iteration(
&self,
timelineid: Option<ZTimelineId>,
horizon: u64,
checkpoint_before_gc: bool,
) -> Result<GcResult>;
}
///
/// Result of performing GC
///
#[derive(Default)]
pub struct GcResult {
pub ondisk_relfiles_total: u64,
pub ondisk_relfiles_needed_by_cutoff: u64,
pub ondisk_relfiles_needed_by_branches: u64,
pub ondisk_relfiles_not_updated: u64,
pub ondisk_relfiles_needed_as_tombstone: u64,
pub ondisk_relfiles_removed: u64, // # of layer files removed because they have been made obsolete by newer ondisk files.
pub ondisk_relfiles_dropped: u64, // # of layer files removed because the relation was dropped
pub ondisk_nonrelfiles_total: u64,
pub ondisk_nonrelfiles_needed_by_cutoff: u64,
pub ondisk_nonrelfiles_needed_by_branches: u64,
pub ondisk_nonrelfiles_not_updated: u64,
pub ondisk_nonrelfiles_needed_as_tombstone: u64,
pub ondisk_nonrelfiles_removed: u64, // # of layer files removed because they have been made obsolete by newer ondisk files.
pub ondisk_nonrelfiles_dropped: u64, // # of layer files removed because the relation was dropped
pub elapsed: Duration,
}
impl AddAssign for GcResult {
fn add_assign(&mut self, other: Self) {
self.ondisk_relfiles_total += other.ondisk_relfiles_total;
self.ondisk_relfiles_needed_by_cutoff += other.ondisk_relfiles_needed_by_cutoff;
self.ondisk_relfiles_needed_by_branches += other.ondisk_relfiles_needed_by_branches;
self.ondisk_relfiles_not_updated += other.ondisk_relfiles_not_updated;
self.ondisk_relfiles_needed_as_tombstone += other.ondisk_relfiles_needed_as_tombstone;
self.ondisk_relfiles_removed += other.ondisk_relfiles_removed;
self.ondisk_relfiles_dropped += other.ondisk_relfiles_dropped;
self.ondisk_nonrelfiles_total += other.ondisk_nonrelfiles_total;
self.ondisk_nonrelfiles_needed_by_cutoff += other.ondisk_nonrelfiles_needed_by_cutoff;
self.ondisk_nonrelfiles_needed_by_branches += other.ondisk_nonrelfiles_needed_by_branches;
self.ondisk_nonrelfiles_not_updated += other.ondisk_nonrelfiles_not_updated;
self.ondisk_nonrelfiles_needed_as_tombstone += other.ondisk_nonrelfiles_needed_as_tombstone;
self.ondisk_nonrelfiles_removed += other.ondisk_nonrelfiles_removed;
self.ondisk_nonrelfiles_dropped += other.ondisk_nonrelfiles_dropped;
self.elapsed += other.elapsed;
}
}
pub trait Timeline: Send + Sync {
//------------------------------------------------------------------------------
// Public GET functions
//------------------------------------------------------------------------------
///
/// Wait until WAL has been received and processed up to this LSN.
///
/// You should call this before any of the other get_* or list_* functions. Calling
/// those functions with an LSN that has been processed yet is an error.
///
fn wait_lsn(&self, lsn: Lsn) -> Result<()>;
/// Look up given page version.
fn get_page_at_lsn(&self, tag: RelishTag, blknum: u32, lsn: Lsn) -> Result<Bytes>;
/// Get size of a relish
fn get_relish_size(&self, tag: RelishTag, lsn: Lsn) -> Result<Option<u32>>;
/// Does relation exist?
fn get_rel_exists(&self, tag: RelishTag, lsn: Lsn) -> Result<bool>;
/// Get a list of all existing relations
/// Pass RelTag to get relation objects or None to get nonrels.
fn list_relishes(&self, tag: Option<RelTag>, lsn: Lsn) -> Result<HashSet<RelishTag>>;
/// Get a list of all existing relations in given tablespace and database.
fn list_rels(&self, spcnode: u32, dbnode: u32, lsn: Lsn) -> Result<HashSet<RelishTag>>;
/// Get a list of all existing non-relational objects
fn list_nonrels(&self, lsn: Lsn) -> Result<HashSet<RelishTag>>;
//------------------------------------------------------------------------------
// Public PUT functions, to update the repository with new page versions.
//
// These are called by the WAL receiver to digest WAL records.
//------------------------------------------------------------------------------
/// Atomically get both last and prev.
fn get_last_record_rlsn(&self) -> RecordLsn;
/// Get last or prev record separately. Same as get_last_record_rlsn().last/prev.
fn get_last_record_lsn(&self) -> Lsn;
fn get_prev_record_lsn(&self) -> Lsn;
fn get_start_lsn(&self) -> Lsn;
/// Mutate the timeline with a [`TimelineWriter`].
fn writer<'a>(&'a self) -> Box<dyn TimelineWriter + 'a>;
///
/// Flush to disk all data that was written with the put_* functions
///
/// NOTE: This has nothing to do with checkpoint in PostgreSQL. We don't
/// know anything about them here in the repository.
fn checkpoint(&self) -> Result<()>;
/// Retrieve current logical size of the timeline
///
/// NOTE: counted incrementally, includes ancestors,
/// doesnt support TwoPhase relishes yet
fn get_current_logical_size(&self) -> usize;
/// Does the same as get_current_logical_size but counted on demand.
/// Used in tests to ensure thet incremental and non incremental variants match.
fn get_current_logical_size_non_incremental(&self, lsn: Lsn) -> Result<usize>;
}
/// Various functions to mutate the timeline.
// TODO Currently, Deref is used to allow easy access to read methods from this trait.
// This is probably considered a bad practice in Rust and should be fixed eventually,
// but will cause large code changes.
pub trait TimelineWriter: Deref<Target = dyn Timeline> {
/// Put a new page version that can be constructed from a WAL record
///
/// This will implicitly extend the relation, if the page is beyond the
/// current end-of-file.
fn put_wal_record(&self, lsn: Lsn, tag: RelishTag, blknum: u32, rec: WALRecord) -> Result<()>;
/// Like put_wal_record, but with ready-made image of the page.
fn put_page_image(&self, tag: RelishTag, blknum: u32, lsn: Lsn, img: Bytes) -> Result<()>;
/// Truncate relation
fn put_truncation(&self, rel: RelishTag, lsn: Lsn, nblocks: u32) -> Result<()>;
/// This method is used for marking dropped relations and truncated SLRU files and aborted two phase records
fn drop_relish(&self, tag: RelishTag, lsn: Lsn) -> Result<()>;
/// Track end of the latest digested WAL record.
///
/// Advance requires aligned LSN as an argument and would wake wait_lsn() callers.
/// Previous last record LSN is stored alongside the latest and can be read.
fn advance_last_record_lsn(&self, lsn: Lsn);
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub struct WALRecord {
pub will_init: bool,
pub rec: Bytes,
// Remember the offset of main_data in rec,
// so that we don't have to parse the record again.
// If record has no main_data, this offset equals rec.len().
pub main_data_offset: u32,
}
impl WALRecord {
pub fn pack(&self, buf: &mut BytesMut) {
buf.put_u8(self.will_init as u8);
buf.put_u32(self.main_data_offset);
buf.put_u32(self.rec.len() as u32);
buf.put_slice(&self.rec[..]);
}
pub fn unpack(buf: &mut Bytes) -> WALRecord {
let will_init = buf.get_u8() != 0;
let main_data_offset = buf.get_u32();
let rec_len = buf.get_u32() as usize;
let rec = buf.split_to(rec_len);
WALRecord {
will_init,
rec,
main_data_offset,
}
}
}
///
/// Tests that should work the same with any Repository/Timeline implementation.
///
#[allow(clippy::bool_assert_comparison)]
#[cfg(test)]
mod tests {
use super::*;
use crate::layered_repository::{LayeredRepository, METADATA_FILE_NAME};
use crate::walredo::{WalRedoError, WalRedoManager};
use crate::PageServerConf;
use hex_literal::hex;
use postgres_ffi::pg_constants;
use postgres_ffi::xlog_utils::SIZEOF_CHECKPOINT;
use std::fs;
use std::path::PathBuf;
use zenith_utils::zid::ZTenantId;
const TIMELINE_ID: ZTimelineId =
ZTimelineId::from_array(hex!("11223344556677881122334455667788"));
const NEW_TIMELINE_ID: ZTimelineId =
ZTimelineId::from_array(hex!("AA223344556677881122334455667788"));
/// Arbitrary relation tag, for testing.
const TESTREL_A: RelishTag = RelishTag::Relation(RelTag {
spcnode: 0,
dbnode: 111,
relnode: 1000,
forknum: 0,
});
const TESTREL_B: RelishTag = RelishTag::Relation(RelTag {
spcnode: 0,
dbnode: 111,
relnode: 1001,
forknum: 0,
});
/// Convenience function to create a page image with given string as the only content
#[allow(non_snake_case)]
fn TEST_IMG(s: &str) -> Bytes {
let mut buf = BytesMut::new();
buf.extend_from_slice(s.as_bytes());
buf.resize(8192, 0);
buf.freeze()
}
fn assert_current_logical_size(timeline: &Arc<dyn Timeline>, lsn: Lsn) {
let incremental = timeline.get_current_logical_size();
let non_incremental = timeline
.get_current_logical_size_non_incremental(lsn)
.unwrap();
assert_eq!(incremental, non_incremental);
}
static ZERO_PAGE: Bytes = Bytes::from_static(&[0u8; 8192]);
static ZERO_CHECKPOINT: Bytes = Bytes::from_static(&[0u8; SIZEOF_CHECKPOINT]);
struct RepoHarness {
conf: &'static PageServerConf,
tenant_id: ZTenantId,
}
impl RepoHarness {
fn create(test_name: &'static str) -> Result<Self> {
let repo_dir = PageServerConf::test_repo_dir(test_name);
let _ = fs::remove_dir_all(&repo_dir);
fs::create_dir_all(&repo_dir)?;
fs::create_dir_all(&repo_dir.join("timelines"))?;
let conf = PageServerConf::dummy_conf(repo_dir);
// Make a static copy of the config. This can never be free'd, but that's
// OK in a test.
let conf: &'static PageServerConf = Box::leak(Box::new(conf));
let tenant_id = ZTenantId::generate();
fs::create_dir_all(conf.tenant_path(&tenant_id))?;
Ok(Self { conf, tenant_id })
}
fn load(&self) -> Box<dyn Repository> {
let walredo_mgr = Arc::new(TestRedoManager);
Box::new(LayeredRepository::new(
self.conf,
walredo_mgr,
self.tenant_id,
false,
))
}
fn timeline_path(&self, timeline_id: &ZTimelineId) -> PathBuf {
self.conf.timeline_path(timeline_id, &self.tenant_id)
}
}
#[test]
fn test_relsize() -> Result<()> {
let repo = RepoHarness::create("test_relsize")?.load();
// get_timeline() with non-existent timeline id should fail
//repo.get_timeline("11223344556677881122334455667788");
// Create timeline to work on
let tline = repo.create_empty_timeline(TIMELINE_ID)?;
let writer = tline.writer();
writer.put_page_image(TESTREL_A, 0, Lsn(0x20), TEST_IMG("foo blk 0 at 2"))?;
writer.put_page_image(TESTREL_A, 0, Lsn(0x20), TEST_IMG("foo blk 0 at 2"))?;
writer.put_page_image(TESTREL_A, 0, Lsn(0x30), TEST_IMG("foo blk 0 at 3"))?;
writer.put_page_image(TESTREL_A, 1, Lsn(0x40), TEST_IMG("foo blk 1 at 4"))?;
writer.put_page_image(TESTREL_A, 2, Lsn(0x50), TEST_IMG("foo blk 2 at 5"))?;
writer.advance_last_record_lsn(Lsn(0x50));
assert_current_logical_size(&tline, Lsn(0x50));
// The relation was created at LSN 2, not visible at LSN 1 yet.
assert_eq!(tline.get_rel_exists(TESTREL_A, Lsn(0x10))?, false);
assert!(tline.get_relish_size(TESTREL_A, Lsn(0x10))?.is_none());
assert_eq!(tline.get_rel_exists(TESTREL_A, Lsn(0x20))?, true);
assert_eq!(tline.get_relish_size(TESTREL_A, Lsn(0x20))?.unwrap(), 1);
assert_eq!(tline.get_relish_size(TESTREL_A, Lsn(0x50))?.unwrap(), 3);
// Check page contents at each LSN
assert_eq!(
tline.get_page_at_lsn(TESTREL_A, 0, Lsn(0x20))?,
TEST_IMG("foo blk 0 at 2")
);
assert_eq!(
tline.get_page_at_lsn(TESTREL_A, 0, Lsn(0x30))?,
TEST_IMG("foo blk 0 at 3")
);
assert_eq!(
tline.get_page_at_lsn(TESTREL_A, 0, Lsn(0x40))?,
TEST_IMG("foo blk 0 at 3")
);
assert_eq!(
tline.get_page_at_lsn(TESTREL_A, 1, Lsn(0x40))?,
TEST_IMG("foo blk 1 at 4")
);
assert_eq!(
tline.get_page_at_lsn(TESTREL_A, 0, Lsn(0x50))?,
TEST_IMG("foo blk 0 at 3")
);
assert_eq!(
tline.get_page_at_lsn(TESTREL_A, 1, Lsn(0x50))?,
TEST_IMG("foo blk 1 at 4")
);
assert_eq!(
tline.get_page_at_lsn(TESTREL_A, 2, Lsn(0x50))?,
TEST_IMG("foo blk 2 at 5")
);
// Truncate last block
writer.put_truncation(TESTREL_A, Lsn(0x60), 2)?;
writer.advance_last_record_lsn(Lsn(0x60));
assert_current_logical_size(&tline, Lsn(0x60));
// Check reported size and contents after truncation
assert_eq!(tline.get_relish_size(TESTREL_A, Lsn(0x60))?.unwrap(), 2);
assert_eq!(
tline.get_page_at_lsn(TESTREL_A, 0, Lsn(0x60))?,
TEST_IMG("foo blk 0 at 3")
);
assert_eq!(
tline.get_page_at_lsn(TESTREL_A, 1, Lsn(0x60))?,
TEST_IMG("foo blk 1 at 4")
);
// should still see the truncated block with older LSN
assert_eq!(tline.get_relish_size(TESTREL_A, Lsn(0x50))?.unwrap(), 3);
assert_eq!(
tline.get_page_at_lsn(TESTREL_A, 2, Lsn(0x50))?,
TEST_IMG("foo blk 2 at 5")
);
// Truncate to zero length
writer.put_truncation(TESTREL_A, Lsn(0x68), 0)?;
writer.advance_last_record_lsn(Lsn(0x68));
assert_eq!(tline.get_relish_size(TESTREL_A, Lsn(0x68))?.unwrap(), 0);
// Extend from 0 to 2 blocks, leaving a gap
writer.put_page_image(TESTREL_A, 1, Lsn(0x70), TEST_IMG("foo blk 1"))?;
writer.advance_last_record_lsn(Lsn(0x70));
assert_eq!(tline.get_relish_size(TESTREL_A, Lsn(0x70))?.unwrap(), 2);
assert_eq!(tline.get_page_at_lsn(TESTREL_A, 0, Lsn(0x70))?, ZERO_PAGE);
assert_eq!(
tline.get_page_at_lsn(TESTREL_A, 1, Lsn(0x70))?,
TEST_IMG("foo blk 1")
);
// Extend a lot more, leaving a big gap that spans across segments
// FIXME: This is currently broken, see https://github.com/zenithdb/zenith/issues/500
/*
tline.put_page_image(TESTREL_A, 1500, Lsn(0x80), TEST_IMG("foo blk 1500"))?;
tline.advance_last_record_lsn(Lsn(0x80));
assert_eq!(tline.get_relish_size(TESTREL_A, Lsn(0x80))?.unwrap(), 1501);
for blk in 2..1500 {
assert_eq!(
tline.get_page_at_lsn(TESTREL_A, blk, Lsn(0x80))?,
ZERO_PAGE);
}
assert_eq!(
tline.get_page_at_lsn(TESTREL_A, 1500, Lsn(0x80))?,
TEST_IMG("foo blk 1500"));
*/
Ok(())
}
// Test what happens if we dropped a relation
// and then created it again within the same layer.
#[test]
fn test_drop_extend() -> Result<()> {
let repo = RepoHarness::create("test_drop_extend")?.load();
// Create timeline to work on
let tline = repo.create_empty_timeline(TIMELINE_ID)?;
let writer = tline.writer();
writer.put_page_image(TESTREL_A, 0, Lsn(0x20), TEST_IMG("foo blk 0 at 2"))?;
writer.advance_last_record_lsn(Lsn(0x20));
// Check that rel exists and size is correct
assert_eq!(tline.get_rel_exists(TESTREL_A, Lsn(0x20))?, true);
assert_eq!(tline.get_relish_size(TESTREL_A, Lsn(0x20))?.unwrap(), 1);
// Drop relish
writer.drop_relish(TESTREL_A, Lsn(0x30))?;
writer.advance_last_record_lsn(Lsn(0x30));
// Check that rel is not visible anymore
assert_eq!(tline.get_rel_exists(TESTREL_A, Lsn(0x30))?, false);
assert!(tline.get_relish_size(TESTREL_A, Lsn(0x30))?.is_none());
// Extend it again
writer.put_page_image(TESTREL_A, 0, Lsn(0x40), TEST_IMG("foo blk 0 at 4"))?;
writer.advance_last_record_lsn(Lsn(0x40));
// Check that rel exists and size is correct
assert_eq!(tline.get_rel_exists(TESTREL_A, Lsn(0x40))?, true);
assert_eq!(tline.get_relish_size(TESTREL_A, Lsn(0x40))?.unwrap(), 1);
Ok(())
}
// Test what happens if we truncated a relation
// so that one of its segments was dropped
// and then extended it again within the same layer.
#[test]
fn test_truncate_extend() -> Result<()> {
let repo = RepoHarness::create("test_truncate_extend")?.load();
// Create timeline to work on
let tline = repo.create_empty_timeline(TIMELINE_ID)?;
let writer = tline.writer();
//from storage_layer.rs
const RELISH_SEG_SIZE: u32 = 10 * 1024 * 1024 / 8192;
let relsize = RELISH_SEG_SIZE * 2;
// Create relation with relsize blocks
for blkno in 0..relsize {
let lsn = Lsn(0x20);
let data = format!("foo blk {} at {}", blkno, lsn);
writer.put_page_image(TESTREL_A, blkno, lsn, TEST_IMG(&data))?;
}
writer.advance_last_record_lsn(Lsn(0x20));
// The relation was created at LSN 2, not visible at LSN 1 yet.
assert_eq!(tline.get_rel_exists(TESTREL_A, Lsn(0x10))?, false);
assert!(tline.get_relish_size(TESTREL_A, Lsn(0x10))?.is_none());
assert_eq!(tline.get_rel_exists(TESTREL_A, Lsn(0x20))?, true);
assert_eq!(
tline.get_relish_size(TESTREL_A, Lsn(0x20))?.unwrap(),
relsize
);
// Check relation content
for blkno in 0..relsize {
let lsn = Lsn(0x20);
let data = format!("foo blk {} at {}", blkno, lsn);
assert_eq!(
tline.get_page_at_lsn(TESTREL_A, blkno, lsn)?,
TEST_IMG(&data)
);
}
// Truncate relation so that second segment was dropped
// - only leave one page
writer.put_truncation(TESTREL_A, Lsn(0x60), 1)?;
writer.advance_last_record_lsn(Lsn(0x60));
// Check reported size and contents after truncation
assert_eq!(tline.get_relish_size(TESTREL_A, Lsn(0x60))?.unwrap(), 1);
for blkno in 0..1 {
let lsn = Lsn(0x20);
let data = format!("foo blk {} at {}", blkno, lsn);
assert_eq!(
tline.get_page_at_lsn(TESTREL_A, blkno, Lsn(0x60))?,
TEST_IMG(&data)
);
}
// should still see all blocks with older LSN
assert_eq!(
tline.get_relish_size(TESTREL_A, Lsn(0x50))?.unwrap(),
relsize
);
for blkno in 0..relsize {
let lsn = Lsn(0x20);
let data = format!("foo blk {} at {}", blkno, lsn);
assert_eq!(
tline.get_page_at_lsn(TESTREL_A, blkno, Lsn(0x50))?,
TEST_IMG(&data)
);
}
// Extend relation again.
// Add enough blocks to create second segment
for blkno in 0..relsize {
let lsn = Lsn(0x80);
let data = format!("foo blk {} at {}", blkno, lsn);
writer.put_page_image(TESTREL_A, blkno, lsn, TEST_IMG(&data))?;
}
writer.advance_last_record_lsn(Lsn(0x80));
assert_eq!(tline.get_rel_exists(TESTREL_A, Lsn(0x80))?, true);
assert_eq!(
tline.get_relish_size(TESTREL_A, Lsn(0x80))?.unwrap(),
relsize
);
// Check relation content
for blkno in 0..relsize {
let lsn = Lsn(0x80);
let data = format!("foo blk {} at {}", blkno, lsn);
assert_eq!(
tline.get_page_at_lsn(TESTREL_A, blkno, Lsn(0x80))?,
TEST_IMG(&data)
);
}
Ok(())
}
/// Test get_relsize() and truncation with a file larger than 1 GB, so that it's
/// split into multiple 1 GB segments in Postgres.
#[test]
fn test_large_rel() -> Result<()> {
let repo = RepoHarness::create("test_large_rel")?.load();
let tline = repo.create_empty_timeline(TIMELINE_ID)?;
let writer = tline.writer();
let mut lsn = 0x10;
for blknum in 0..pg_constants::RELSEG_SIZE + 1 {
let img = TEST_IMG(&format!("foo blk {} at {}", blknum, Lsn(lsn)));
lsn += 0x10;
writer.put_page_image(TESTREL_A, blknum as u32, Lsn(lsn), img)?;
}
writer.advance_last_record_lsn(Lsn(lsn));
assert_current_logical_size(&tline, Lsn(lsn));
assert_eq!(
tline.get_relish_size(TESTREL_A, Lsn(lsn))?.unwrap(),
pg_constants::RELSEG_SIZE + 1
);
// Truncate one block
lsn += 0x10;
writer.put_truncation(TESTREL_A, Lsn(lsn), pg_constants::RELSEG_SIZE)?;
writer.advance_last_record_lsn(Lsn(lsn));
assert_eq!(
tline.get_relish_size(TESTREL_A, Lsn(lsn))?.unwrap(),
pg_constants::RELSEG_SIZE
);
assert_current_logical_size(&tline, Lsn(lsn));
// Truncate another block
lsn += 0x10;
writer.put_truncation(TESTREL_A, Lsn(lsn), pg_constants::RELSEG_SIZE - 1)?;
writer.advance_last_record_lsn(Lsn(lsn));
assert_eq!(
tline.get_relish_size(TESTREL_A, Lsn(lsn))?.unwrap(),
pg_constants::RELSEG_SIZE - 1
);
assert_current_logical_size(&tline, Lsn(lsn));
// Truncate to 1500, and then truncate all the way down to 0, one block at a time
// This tests the behavior at segment boundaries
let mut size: i32 = 3000;
while size >= 0 {
lsn += 0x10;
writer.put_truncation(TESTREL_A, Lsn(lsn), size as u32)?;
writer.advance_last_record_lsn(Lsn(lsn));
assert_eq!(
tline.get_relish_size(TESTREL_A, Lsn(lsn))?.unwrap(),
size as u32
);
size -= 1;
}
assert_current_logical_size(&tline, Lsn(lsn));
Ok(())
}
///
/// Test list_rels() function, with branches and dropped relations
///
#[test]
fn test_list_rels_drop() -> Result<()> {
let repo = RepoHarness::create("test_list_rels_drop")?.load();
let tline = repo.create_empty_timeline(TIMELINE_ID)?;
let writer = tline.writer();
const TESTDB: u32 = 111;
// Import initial dummy checkpoint record, otherwise the get_timeline() call
// after branching fails below
writer.put_page_image(RelishTag::Checkpoint, 0, Lsn(0x10), ZERO_CHECKPOINT.clone())?;
// Create a relation on the timeline
writer.put_page_image(TESTREL_A, 0, Lsn(0x20), TEST_IMG("foo blk 0 at 2"))?;
writer.advance_last_record_lsn(Lsn(0x30));
// Check that list_rels() lists it after LSN 2, but no before it
assert!(!tline.list_rels(0, TESTDB, Lsn(0x10))?.contains(&TESTREL_A));
assert!(tline.list_rels(0, TESTDB, Lsn(0x20))?.contains(&TESTREL_A));
assert!(tline.list_rels(0, TESTDB, Lsn(0x30))?.contains(&TESTREL_A));
// Create a branch, check that the relation is visible there
repo.branch_timeline(TIMELINE_ID, NEW_TIMELINE_ID, Lsn(0x30))?;
let newtline = repo.get_timeline(NEW_TIMELINE_ID)?;
let new_writer = newtline.writer();
assert!(newtline
.list_rels(0, TESTDB, Lsn(0x30))?
.contains(&TESTREL_A));
// Drop it on the branch
new_writer.drop_relish(TESTREL_A, Lsn(0x40))?;
new_writer.advance_last_record_lsn(Lsn(0x40));
drop(new_writer);
// Check that it's no longer listed on the branch after the point where it was dropped
assert!(newtline
.list_rels(0, TESTDB, Lsn(0x30))?
.contains(&TESTREL_A));
assert!(!newtline
.list_rels(0, TESTDB, Lsn(0x40))?
.contains(&TESTREL_A));
// Run checkpoint and garbage collection and check that it's still not visible
newtline.checkpoint()?;
repo.gc_iteration(Some(NEW_TIMELINE_ID), 0, true)?;
assert!(!newtline
.list_rels(0, TESTDB, Lsn(0x40))?
.contains(&TESTREL_A));
Ok(())
}
///
/// Test branch creation
///
#[test]
fn test_branch() -> Result<()> {
let repo = RepoHarness::create("test_branch")?.load();
let tline = repo.create_empty_timeline(TIMELINE_ID)?;
let writer = tline.writer();
// Import initial dummy checkpoint record, otherwise the get_timeline() call
// after branching fails below
writer.put_page_image(RelishTag::Checkpoint, 0, Lsn(0x10), ZERO_CHECKPOINT.clone())?;
// Create a relation on the timeline
writer.put_page_image(TESTREL_A, 0, Lsn(0x20), TEST_IMG("foo blk 0 at 2"))?;
writer.put_page_image(TESTREL_A, 0, Lsn(0x30), TEST_IMG("foo blk 0 at 3"))?;
writer.put_page_image(TESTREL_A, 0, Lsn(0x40), TEST_IMG("foo blk 0 at 4"))?;
// Create another relation
writer.put_page_image(TESTREL_B, 0, Lsn(0x20), TEST_IMG("foobar blk 0 at 2"))?;
writer.advance_last_record_lsn(Lsn(0x40));
assert_current_logical_size(&tline, Lsn(0x40));
// Branch the history, modify relation differently on the new timeline
repo.branch_timeline(TIMELINE_ID, NEW_TIMELINE_ID, Lsn(0x30))?;
let newtline = repo.get_timeline(NEW_TIMELINE_ID)?;
let new_writer = newtline.writer();
new_writer.put_page_image(TESTREL_A, 0, Lsn(0x40), TEST_IMG("bar blk 0 at 4"))?;
new_writer.advance_last_record_lsn(Lsn(0x40));
// Check page contents on both branches
assert_eq!(
tline.get_page_at_lsn(TESTREL_A, 0, Lsn(0x40))?,
TEST_IMG("foo blk 0 at 4")
);
assert_eq!(
newtline.get_page_at_lsn(TESTREL_A, 0, Lsn(0x40))?,
TEST_IMG("bar blk 0 at 4")
);
assert_eq!(
newtline.get_page_at_lsn(TESTREL_B, 0, Lsn(0x40))?,
TEST_IMG("foobar blk 0 at 2")
);
assert_eq!(newtline.get_relish_size(TESTREL_B, Lsn(0x40))?.unwrap(), 1);
assert_current_logical_size(&tline, Lsn(0x40));
Ok(())
}
#[test]
fn corrupt_metadata() -> Result<()> {
const TEST_NAME: &str = "corrupt_metadata";
let harness = RepoHarness::create(TEST_NAME)?;
let repo = harness.load();
repo.create_empty_timeline(TIMELINE_ID)?;
drop(repo);
let metadata_path = harness.timeline_path(&TIMELINE_ID).join(METADATA_FILE_NAME);
assert!(metadata_path.is_file());
let mut metadata_bytes = std::fs::read(&metadata_path)?;
assert_eq!(metadata_bytes.len(), 512);
metadata_bytes[512 - 4 - 2] ^= 1;
std::fs::write(metadata_path, metadata_bytes)?;
let new_repo = harness.load();
let err = new_repo.get_timeline(TIMELINE_ID).err().unwrap();
assert!(err.to_string().contains("checksum"));
Ok(())
}
#[test]
fn future_layerfiles() -> Result<()> {
const TEST_NAME: &str = "future_layerfiles";
let harness = RepoHarness::create(TEST_NAME)?;
let repo = harness.load();
repo.create_empty_timeline(TIMELINE_ID)?;
drop(repo);
let timeline_path = harness.timeline_path(&TIMELINE_ID);
let make_empty_file = |filename: &str| -> std::io::Result<()> {
let path = timeline_path.join(filename);
assert!(!path.exists());
std::fs::write(&path, &[])?;
Ok(())
};
let image_filename = format!("pg_control_0_{:016X}", 8000);
let delta_filename = format!("pg_control_0_{:016X}_{:016X}", 8000, 8008);
make_empty_file(&image_filename)?;
make_empty_file(&delta_filename)?;
let new_repo = harness.load();
new_repo.get_timeline(TIMELINE_ID).unwrap();
drop(new_repo);
let check_old = |filename: &str, num: u32| {
let path = timeline_path.join(filename);
assert!(!path.exists());
let backup_path = timeline_path.join(format!("{}.{}.old", filename, num));
assert!(backup_path.exists());
};
check_old(&image_filename, 0);
check_old(&delta_filename, 0);
make_empty_file(&image_filename)?;
make_empty_file(&delta_filename)?;
let new_repo = harness.load();
new_repo.get_timeline(TIMELINE_ID).unwrap();
drop(new_repo);
check_old(&image_filename, 0);
check_old(&delta_filename, 0);
check_old(&image_filename, 1);
check_old(&delta_filename, 1);
Ok(())
}
// Mock WAL redo manager that doesn't do much
struct TestRedoManager;
impl WalRedoManager for TestRedoManager {
fn request_redo(
&self,
rel: RelishTag,
blknum: u32,
lsn: Lsn,
base_img: Option<Bytes>,
records: Vec<(Lsn, WALRecord)>,
) -> Result<Bytes, WalRedoError> {
let s = format!(
"redo for {} blk {} to get to {}, with {} and {} records",
rel,
blknum,
lsn,
if base_img.is_some() {
"base image"
} else {
"no base image"
},
records.len()
);
println!("{}", s);
Ok(TEST_IMG(&s))
}
}
}

View File

@@ -1,906 +0,0 @@
//!
//! Import data and WAL from a PostgreSQL data directory and WAL segments into
//! zenith Timeline.
//!
use postgres_ffi::nonrelfile_utils::clogpage_precedes;
use postgres_ffi::nonrelfile_utils::slru_may_delete_clogsegment;
use std::cmp::min;
use std::fs;
use std::fs::File;
use std::io::Read;
use std::path::Path;
use anyhow::{bail, Result};
use bytes::{Buf, Bytes};
use tracing::*;
use crate::relish::*;
use crate::repository::*;
use crate::waldecoder::*;
use postgres_ffi::nonrelfile_utils::mx_offset_to_member_segment;
use postgres_ffi::relfile_utils::*;
use postgres_ffi::xlog_utils::*;
use postgres_ffi::Oid;
use postgres_ffi::{pg_constants, CheckPoint, ControlFileData};
use zenith_utils::lsn::Lsn;
const MAX_MBR_BLKNO: u32 =
pg_constants::MAX_MULTIXACT_ID / pg_constants::MULTIXACT_MEMBERS_PER_PAGE as u32;
static ZERO_PAGE: Bytes = Bytes::from_static(&[0u8; 8192]);
///
/// Import all relation data pages from local disk into the repository.
///
pub fn import_timeline_from_postgres_datadir(
path: &Path,
writer: &dyn TimelineWriter,
lsn: Lsn,
) -> Result<()> {
// Scan 'global'
for direntry in fs::read_dir(path.join("global"))? {
let direntry = direntry?;
match direntry.file_name().to_str() {
None => continue,
Some("pg_control") => {
import_control_file(writer, lsn, &direntry.path())?;
}
Some("pg_filenode.map") => import_nonrel_file(
writer,
lsn,
RelishTag::FileNodeMap {
spcnode: pg_constants::GLOBALTABLESPACE_OID,
dbnode: 0,
},
&direntry.path(),
)?,
// Load any relation files into the page server
_ => import_relfile(
&direntry.path(),
writer,
lsn,
pg_constants::GLOBALTABLESPACE_OID,
0,
)?,
}
}
// Scan 'base'. It contains database dirs, the database OID is the filename.
// E.g. 'base/12345', where 12345 is the database OID.
for direntry in fs::read_dir(path.join("base"))? {
let direntry = direntry?;
//skip all temporary files
if direntry.file_name().to_str().unwrap() == "pgsql_tmp" {
continue;
}
let dboid = direntry.file_name().to_str().unwrap().parse::<u32>()?;
for direntry in fs::read_dir(direntry.path())? {
let direntry = direntry?;
match direntry.file_name().to_str() {
None => continue,
Some("PG_VERSION") => continue,
Some("pg_filenode.map") => import_nonrel_file(
writer,
lsn,
RelishTag::FileNodeMap {
spcnode: pg_constants::DEFAULTTABLESPACE_OID,
dbnode: dboid,
},
&direntry.path(),
)?,
// Load any relation files into the page server
_ => import_relfile(
&direntry.path(),
writer,
lsn,
pg_constants::DEFAULTTABLESPACE_OID,
dboid,
)?,
}
}
}
for entry in fs::read_dir(path.join("pg_xact"))? {
let entry = entry?;
import_slru_file(writer, lsn, SlruKind::Clog, &entry.path())?;
}
for entry in fs::read_dir(path.join("pg_multixact").join("members"))? {
let entry = entry?;
import_slru_file(writer, lsn, SlruKind::MultiXactMembers, &entry.path())?;
}
for entry in fs::read_dir(path.join("pg_multixact").join("offsets"))? {
let entry = entry?;
import_slru_file(writer, lsn, SlruKind::MultiXactOffsets, &entry.path())?;
}
for entry in fs::read_dir(path.join("pg_twophase"))? {
let entry = entry?;
let xid = u32::from_str_radix(entry.path().to_str().unwrap(), 16)?;
import_nonrel_file(writer, lsn, RelishTag::TwoPhase { xid }, &entry.path())?;
}
// TODO: Scan pg_tblspc
writer.advance_last_record_lsn(lsn);
Ok(())
}
// subroutine of import_timeline_from_postgres_datadir(), to load one relation file.
fn import_relfile(
path: &Path,
timeline: &dyn TimelineWriter,
lsn: Lsn,
spcoid: Oid,
dboid: Oid,
) -> Result<()> {
// Does it look like a relation file?
trace!("importing rel file {}", path.display());
let p = parse_relfilename(path.file_name().unwrap().to_str().unwrap());
if let Err(e) = p {
warn!("unrecognized file in postgres datadir: {:?} ({})", path, e);
return Err(e.into());
}
let (relnode, forknum, segno) = p.unwrap();
let mut file = File::open(path)?;
let mut buf: [u8; 8192] = [0u8; 8192];
let mut blknum: u32 = segno * (1024 * 1024 * 1024 / pg_constants::BLCKSZ as u32);
loop {
let r = file.read_exact(&mut buf);
match r {
Ok(_) => {
let rel = RelTag {
spcnode: spcoid,
dbnode: dboid,
relnode,
forknum,
};
let tag = RelishTag::Relation(rel);
timeline.put_page_image(tag, blknum, lsn, Bytes::copy_from_slice(&buf))?;
}
// TODO: UnexpectedEof is expected
Err(err) => match err.kind() {
std::io::ErrorKind::UnexpectedEof => {
// reached EOF. That's expected.
// FIXME: maybe check that we read the full length of the file?
break;
}
_ => {
bail!("error reading file {}: {:#}", path.display(), err);
}
},
};
blknum += 1;
}
Ok(())
}
///
/// Import a "non-blocky" file into the repository
///
/// This is used for small files like the control file, twophase files etc. that
/// are just slurped into the repository as one blob.
///
fn import_nonrel_file(
timeline: &dyn TimelineWriter,
lsn: Lsn,
tag: RelishTag,
path: &Path,
) -> Result<()> {
let mut file = File::open(path)?;
let mut buffer = Vec::new();
// read the whole file
file.read_to_end(&mut buffer)?;
trace!("importing non-rel file {}", path.display());
timeline.put_page_image(tag, 0, lsn, Bytes::copy_from_slice(&buffer[..]))?;
Ok(())
}
///
/// Import pg_control file into the repository.
///
/// The control file is imported as is, but we also extract the checkpoint record
/// from it and store it separated.
fn import_control_file(timeline: &dyn TimelineWriter, lsn: Lsn, path: &Path) -> Result<()> {
let mut file = File::open(path)?;
let mut buffer = Vec::new();
// read the whole file
file.read_to_end(&mut buffer)?;
trace!("importing control file {}", path.display());
// Import it as ControlFile
timeline.put_page_image(
RelishTag::ControlFile,
0,
lsn,
Bytes::copy_from_slice(&buffer[..]),
)?;
// Extract the checkpoint record and import it separately.
let pg_control = ControlFileData::decode(&buffer)?;
let checkpoint_bytes = pg_control.checkPointCopy.encode();
timeline.put_page_image(RelishTag::Checkpoint, 0, lsn, checkpoint_bytes)?;
Ok(())
}
///
/// Import an SLRU segment file
///
fn import_slru_file(
timeline: &dyn TimelineWriter,
lsn: Lsn,
slru: SlruKind,
path: &Path,
) -> Result<()> {
// Does it look like an SLRU file?
let mut file = File::open(path)?;
let mut buf: [u8; 8192] = [0u8; 8192];
let segno = u32::from_str_radix(path.file_name().unwrap().to_str().unwrap(), 16)?;
trace!("importing slru file {}", path.display());
let mut rpageno = 0;
loop {
let r = file.read_exact(&mut buf);
match r {
Ok(_) => {
timeline.put_page_image(
RelishTag::Slru { slru, segno },
rpageno,
lsn,
Bytes::copy_from_slice(&buf),
)?;
}
// TODO: UnexpectedEof is expected
Err(err) => match err.kind() {
std::io::ErrorKind::UnexpectedEof => {
// reached EOF. That's expected.
// FIXME: maybe check that we read the full length of the file?
break;
}
_ => {
bail!("error reading file {}: {:#}", path.display(), err);
}
},
};
rpageno += 1;
// TODO: Check that the file isn't unexpectedly large, not larger than SLRU_PAGES_PER_SEGMENT pages
}
Ok(())
}
///
/// Helper function to parse a WAL record and call the Timeline's PUT functions for all the
/// relations/pages that the record affects.
///
pub fn save_decoded_record(
checkpoint: &mut CheckPoint,
checkpoint_modified: &mut bool,
timeline: &dyn TimelineWriter,
decoded: &DecodedWALRecord,
recdata: Bytes,
lsn: Lsn,
) -> Result<()> {
if checkpoint.update_next_xid(decoded.xl_xid) {
*checkpoint_modified = true;
}
// Iterate through all the blocks that the record modifies, and
// "put" a separate copy of the record for each block.
for blk in decoded.blocks.iter() {
let tag = RelishTag::Relation(RelTag {
spcnode: blk.rnode_spcnode,
dbnode: blk.rnode_dbnode,
relnode: blk.rnode_relnode,
forknum: blk.forknum as u8,
});
let rec = WALRecord {
will_init: blk.will_init || blk.apply_image,
rec: recdata.clone(),
main_data_offset: decoded.main_data_offset as u32,
};
timeline.put_wal_record(lsn, tag, blk.blkno, rec)?;
}
let mut buf = decoded.record.clone();
buf.advance(decoded.main_data_offset);
// Handle a few special record types
if decoded.xl_rmid == pg_constants::RM_SMGR_ID
&& (decoded.xl_info & pg_constants::XLR_RMGR_INFO_MASK) == pg_constants::XLOG_SMGR_TRUNCATE
{
let truncate = XlSmgrTruncate::decode(&mut buf);
save_xlog_smgr_truncate(timeline, lsn, &truncate)?;
} else if decoded.xl_rmid == pg_constants::RM_DBASE_ID {
if (decoded.xl_info & pg_constants::XLR_RMGR_INFO_MASK) == pg_constants::XLOG_DBASE_CREATE {
let createdb = XlCreateDatabase::decode(&mut buf);
save_xlog_dbase_create(timeline, lsn, &createdb)?;
} else if (decoded.xl_info & pg_constants::XLR_RMGR_INFO_MASK)
== pg_constants::XLOG_DBASE_DROP
{
let dropdb = XlDropDatabase::decode(&mut buf);
// To drop the database, we need to drop all the relations in it. Like in
// save_xlog_dbase_create(), use the previous record's LSN in the list_rels() call
let req_lsn = min(timeline.get_last_record_lsn(), lsn);
for tablespace_id in dropdb.tablespace_ids {
let rels = timeline.list_rels(tablespace_id, dropdb.db_id, req_lsn)?;
for rel in rels {
timeline.drop_relish(rel, lsn)?;
}
trace!(
"Drop FileNodeMap {}, {} at lsn {}",
tablespace_id,
dropdb.db_id,
lsn
);
timeline.drop_relish(
RelishTag::FileNodeMap {
spcnode: tablespace_id,
dbnode: dropdb.db_id,
},
lsn,
)?;
}
}
} else if decoded.xl_rmid == pg_constants::RM_TBLSPC_ID {
trace!("XLOG_TBLSPC_CREATE/DROP is not handled yet");
} else if decoded.xl_rmid == pg_constants::RM_CLOG_ID {
let info = decoded.xl_info & !pg_constants::XLR_INFO_MASK;
if info == pg_constants::CLOG_ZEROPAGE {
let pageno = buf.get_u32_le();
let segno = pageno / pg_constants::SLRU_PAGES_PER_SEGMENT;
let rpageno = pageno % pg_constants::SLRU_PAGES_PER_SEGMENT;
timeline.put_page_image(
RelishTag::Slru {
slru: SlruKind::Clog,
segno,
},
rpageno,
lsn,
ZERO_PAGE.clone(),
)?;
} else {
assert!(info == pg_constants::CLOG_TRUNCATE);
let xlrec = XlClogTruncate::decode(&mut buf);
save_clog_truncate_record(checkpoint, checkpoint_modified, timeline, lsn, &xlrec)?;
}
} else if decoded.xl_rmid == pg_constants::RM_XACT_ID {
let info = decoded.xl_info & pg_constants::XLOG_XACT_OPMASK;
if info == pg_constants::XLOG_XACT_COMMIT || info == pg_constants::XLOG_XACT_ABORT {
let parsed_xact = XlXactParsedRecord::decode(&mut buf, decoded.xl_xid, decoded.xl_info);
save_xact_record(timeline, lsn, &parsed_xact, decoded)?;
} else if info == pg_constants::XLOG_XACT_COMMIT_PREPARED
|| info == pg_constants::XLOG_XACT_ABORT_PREPARED
{
let parsed_xact = XlXactParsedRecord::decode(&mut buf, decoded.xl_xid, decoded.xl_info);
save_xact_record(timeline, lsn, &parsed_xact, decoded)?;
// Remove twophase file. see RemoveTwoPhaseFile() in postgres code
trace!(
"Drop twophaseFile for xid {} parsed_xact.xid {} here at {}",
decoded.xl_xid,
parsed_xact.xid,
lsn
);
timeline.drop_relish(
RelishTag::TwoPhase {
xid: parsed_xact.xid,
},
lsn,
)?;
} else if info == pg_constants::XLOG_XACT_PREPARE {
let mut buf = decoded.record.clone();
buf.advance(decoded.main_data_offset);
timeline.put_page_image(
RelishTag::TwoPhase {
xid: decoded.xl_xid,
},
0,
lsn,
Bytes::copy_from_slice(&buf[..]),
)?;
}
} else if decoded.xl_rmid == pg_constants::RM_MULTIXACT_ID {
let info = decoded.xl_info & pg_constants::XLR_RMGR_INFO_MASK;
if info == pg_constants::XLOG_MULTIXACT_ZERO_OFF_PAGE {
let pageno = buf.get_u32_le();
let segno = pageno / pg_constants::SLRU_PAGES_PER_SEGMENT;
let rpageno = pageno % pg_constants::SLRU_PAGES_PER_SEGMENT;
timeline.put_page_image(
RelishTag::Slru {
slru: SlruKind::MultiXactOffsets,
segno,
},
rpageno,
lsn,
ZERO_PAGE.clone(),
)?;
} else if info == pg_constants::XLOG_MULTIXACT_ZERO_MEM_PAGE {
let pageno = buf.get_u32_le();
let segno = pageno / pg_constants::SLRU_PAGES_PER_SEGMENT;
let rpageno = pageno % pg_constants::SLRU_PAGES_PER_SEGMENT;
timeline.put_page_image(
RelishTag::Slru {
slru: SlruKind::MultiXactMembers,
segno,
},
rpageno,
lsn,
ZERO_PAGE.clone(),
)?;
} else if info == pg_constants::XLOG_MULTIXACT_CREATE_ID {
let xlrec = XlMultiXactCreate::decode(&mut buf);
save_multixact_create_record(
checkpoint,
checkpoint_modified,
timeline,
lsn,
&xlrec,
decoded,
)?;
} else if info == pg_constants::XLOG_MULTIXACT_TRUNCATE_ID {
let xlrec = XlMultiXactTruncate::decode(&mut buf);
save_multixact_truncate_record(checkpoint, checkpoint_modified, timeline, lsn, &xlrec)?;
}
} else if decoded.xl_rmid == pg_constants::RM_RELMAP_ID {
let xlrec = XlRelmapUpdate::decode(&mut buf);
save_relmap_page(timeline, lsn, &xlrec, decoded)?;
} else if decoded.xl_rmid == pg_constants::RM_XLOG_ID {
let info = decoded.xl_info & pg_constants::XLR_RMGR_INFO_MASK;
if info == pg_constants::XLOG_NEXTOID {
let next_oid = buf.get_u32_le();
if checkpoint.nextOid != next_oid {
checkpoint.nextOid = next_oid;
*checkpoint_modified = true;
}
} else if info == pg_constants::XLOG_CHECKPOINT_ONLINE
|| info == pg_constants::XLOG_CHECKPOINT_SHUTDOWN
{
let mut checkpoint_bytes = [0u8; SIZEOF_CHECKPOINT];
let mut buf = decoded.record.clone();
buf.advance(decoded.main_data_offset);
buf.copy_to_slice(&mut checkpoint_bytes);
let xlog_checkpoint = CheckPoint::decode(&checkpoint_bytes).unwrap();
trace!(
"xlog_checkpoint.oldestXid={}, checkpoint.oldestXid={}",
xlog_checkpoint.oldestXid,
checkpoint.oldestXid
);
if (checkpoint.oldestXid.wrapping_sub(xlog_checkpoint.oldestXid) as i32) < 0 {
checkpoint.oldestXid = xlog_checkpoint.oldestXid;
*checkpoint_modified = true;
}
}
}
Ok(())
}
/// Subroutine of save_decoded_record(), to handle an XLOG_DBASE_CREATE record.
fn save_xlog_dbase_create(
timeline: &dyn TimelineWriter,
lsn: Lsn,
rec: &XlCreateDatabase,
) -> Result<()> {
let db_id = rec.db_id;
let tablespace_id = rec.tablespace_id;
let src_db_id = rec.src_db_id;
let src_tablespace_id = rec.src_tablespace_id;
// Creating a database is implemented by copying the template (aka. source) database.
// To copy all the relations, we need to ask for the state as of the same LSN, but we
// cannot pass 'lsn' to the Timeline.get_* functions, or they will block waiting for
// the last valid LSN to advance up to it. So we use the previous record's LSN in the
// get calls instead.
let req_lsn = min(timeline.get_last_record_lsn(), lsn);
let rels = timeline.list_rels(src_tablespace_id, src_db_id, req_lsn)?;
trace!("save_create_database: {} rels", rels.len());
let mut num_rels_copied = 0;
let mut num_blocks_copied = 0;
for rel in rels {
if let RelishTag::Relation(src_rel) = rel {
assert_eq!(src_rel.spcnode, src_tablespace_id);
assert_eq!(src_rel.dbnode, src_db_id);
let nblocks = timeline.get_relish_size(rel, req_lsn)?.unwrap_or(0);
let dst_rel = RelTag {
spcnode: tablespace_id,
dbnode: db_id,
relnode: src_rel.relnode,
forknum: src_rel.forknum,
};
// Copy content
for blknum in 0..nblocks {
let content = timeline.get_page_at_lsn(rel, blknum, req_lsn)?;
debug!("copying block {} from {} to {}", blknum, src_rel, dst_rel);
timeline.put_page_image(RelishTag::Relation(dst_rel), blknum, lsn, content)?;
num_blocks_copied += 1;
}
if nblocks == 0 {
// make sure we have some trace of the relation, even if it's empty
timeline.put_truncation(RelishTag::Relation(dst_rel), lsn, 0)?;
}
num_rels_copied += 1;
}
}
// Copy relfilemap
// TODO This implementation is very inefficient -
// it scans all non-rels only to find FileNodeMaps
for tag in timeline.list_nonrels(req_lsn)? {
if let RelishTag::FileNodeMap { spcnode, dbnode } = tag {
if spcnode == src_tablespace_id && dbnode == src_db_id {
let img = timeline.get_page_at_lsn(tag, 0, req_lsn)?;
let new_tag = RelishTag::FileNodeMap {
spcnode: tablespace_id,
dbnode: db_id,
};
timeline.put_page_image(new_tag, 0, lsn, img)?;
break;
}
}
}
info!(
"Created database {}/{}, copied {} blocks in {} rels at {}",
tablespace_id, db_id, num_blocks_copied, num_rels_copied, lsn
);
Ok(())
}
/// Subroutine of save_decoded_record(), to handle an XLOG_SMGR_TRUNCATE record.
///
/// This is the same logic as in PostgreSQL's smgr_redo() function.
fn save_xlog_smgr_truncate(
timeline: &dyn TimelineWriter,
lsn: Lsn,
rec: &XlSmgrTruncate,
) -> Result<()> {
let spcnode = rec.rnode.spcnode;
let dbnode = rec.rnode.dbnode;
let relnode = rec.rnode.relnode;
if (rec.flags & pg_constants::SMGR_TRUNCATE_HEAP) != 0 {
let rel = RelTag {
spcnode,
dbnode,
relnode,
forknum: pg_constants::MAIN_FORKNUM,
};
timeline.put_truncation(RelishTag::Relation(rel), lsn, rec.blkno)?;
}
if (rec.flags & pg_constants::SMGR_TRUNCATE_FSM) != 0 {
let rel = RelTag {
spcnode,
dbnode,
relnode,
forknum: pg_constants::FSM_FORKNUM,
};
// FIXME: 'blkno' stored in the WAL record is the new size of the
// heap. The formula for calculating the new size of the FSM is
// pretty complicated (see FreeSpaceMapPrepareTruncateRel() in
// PostgreSQL), and we should also clear bits in the tail FSM block,
// and update the upper level FSM pages. None of that has been
// implemented. What we do instead, is always just truncate the FSM
// to zero blocks. That's bad for performance, but safe. (The FSM
// isn't needed for correctness, so we could also leave garbage in
// it. Seems more tidy to zap it away.)
if rec.blkno != 0 {
info!("Partial truncation of FSM is not supported");
}
let num_fsm_blocks = 0;
timeline.put_truncation(RelishTag::Relation(rel), lsn, num_fsm_blocks)?;
}
if (rec.flags & pg_constants::SMGR_TRUNCATE_VM) != 0 {
let rel = RelTag {
spcnode,
dbnode,
relnode,
forknum: pg_constants::VISIBILITYMAP_FORKNUM,
};
// FIXME: Like with the FSM above, the logic to truncate the VM
// correctly has not been implemented. Just zap it away completely,
// always. Unlike the FSM, the VM must never have bits incorrectly
// set. From a correctness point of view, it's always OK to clear
// bits or remove it altogether, though.
if rec.blkno != 0 {
info!("Partial truncation of VM is not supported");
}
let num_vm_blocks = 0;
timeline.put_truncation(RelishTag::Relation(rel), lsn, num_vm_blocks)?;
}
Ok(())
}
/// Subroutine of save_decoded_record(), to handle an XLOG_XACT_* records.
///
fn save_xact_record(
timeline: &dyn TimelineWriter,
lsn: Lsn,
parsed: &XlXactParsedRecord,
decoded: &DecodedWALRecord,
) -> Result<()> {
// Record update of CLOG page
let mut pageno = parsed.xid / pg_constants::CLOG_XACTS_PER_PAGE;
let segno = pageno / pg_constants::SLRU_PAGES_PER_SEGMENT;
let rpageno = pageno % pg_constants::SLRU_PAGES_PER_SEGMENT;
let rec = WALRecord {
will_init: false,
rec: decoded.record.clone(),
main_data_offset: decoded.main_data_offset as u32,
};
timeline.put_wal_record(
lsn,
RelishTag::Slru {
slru: SlruKind::Clog,
segno,
},
rpageno,
rec.clone(),
)?;
for subxact in &parsed.subxacts {
let subxact_pageno = subxact / pg_constants::CLOG_XACTS_PER_PAGE;
if subxact_pageno != pageno {
pageno = subxact_pageno;
let segno = pageno / pg_constants::SLRU_PAGES_PER_SEGMENT;
let rpageno = pageno % pg_constants::SLRU_PAGES_PER_SEGMENT;
timeline.put_wal_record(
lsn,
RelishTag::Slru {
slru: SlruKind::Clog,
segno,
},
rpageno,
rec.clone(),
)?;
}
}
for xnode in &parsed.xnodes {
for forknum in pg_constants::MAIN_FORKNUM..=pg_constants::VISIBILITYMAP_FORKNUM {
let rel = RelTag {
forknum,
spcnode: xnode.spcnode,
dbnode: xnode.dbnode,
relnode: xnode.relnode,
};
timeline.drop_relish(RelishTag::Relation(rel), lsn)?;
}
}
Ok(())
}
fn save_clog_truncate_record(
checkpoint: &mut CheckPoint,
checkpoint_modified: &mut bool,
timeline: &dyn TimelineWriter,
lsn: Lsn,
xlrec: &XlClogTruncate,
) -> Result<()> {
info!(
"RM_CLOG_ID truncate pageno {} oldestXid {} oldestXidDB {} lsn {}",
xlrec.pageno, xlrec.oldest_xid, xlrec.oldest_xid_db, lsn
);
// Here we treat oldestXid and oldestXidDB
// differently from postgres redo routines.
// In postgres checkpoint.oldestXid lags behind xlrec.oldest_xid
// until checkpoint happens and updates the value.
// Here we can use the most recent value.
// It's just an optimization, though and can be deleted.
// TODO Figure out if there will be any issues with replica.
checkpoint.oldestXid = xlrec.oldest_xid;
checkpoint.oldestXidDB = xlrec.oldest_xid_db;
*checkpoint_modified = true;
// TODO Treat AdvanceOldestClogXid() or write a comment why we don't need it
let latest_page_number = checkpoint.nextXid.value as u32 / pg_constants::CLOG_XACTS_PER_PAGE;
// Now delete all segments containing pages between xlrec.pageno
// and latest_page_number.
// First, make an important safety check:
// the current endpoint page must not be eligible for removal.
// See SimpleLruTruncate() in slru.c
if clogpage_precedes(latest_page_number, xlrec.pageno) {
info!("could not truncate directory pg_xact apparent wraparound");
return Ok(());
}
// Iterate via SLRU CLOG segments and drop segments that we're ready to truncate
// TODO This implementation is very inefficient -
// it scans all non-rels only to find Clog
//
// We cannot pass 'lsn' to the Timeline.list_nonrels(), or it
// will block waiting for the last valid LSN to advance up to
// it. So we use the previous record's LSN in the get calls
// instead.
let req_lsn = min(timeline.get_last_record_lsn(), lsn);
for obj in timeline.list_nonrels(req_lsn)? {
if let RelishTag::Slru { slru, segno } = obj {
if slru == SlruKind::Clog {
let segpage = segno * pg_constants::SLRU_PAGES_PER_SEGMENT;
if slru_may_delete_clogsegment(segpage, xlrec.pageno) {
timeline.drop_relish(RelishTag::Slru { slru, segno }, lsn)?;
trace!("Drop CLOG segment {:>04X} at lsn {}", segno, lsn);
}
}
}
}
Ok(())
}
fn save_multixact_create_record(
checkpoint: &mut CheckPoint,
checkpoint_modified: &mut bool,
timeline: &dyn TimelineWriter,
lsn: Lsn,
xlrec: &XlMultiXactCreate,
decoded: &DecodedWALRecord,
) -> Result<()> {
let rec = WALRecord {
will_init: false,
rec: decoded.record.clone(),
main_data_offset: decoded.main_data_offset as u32,
};
let pageno = xlrec.mid / pg_constants::MULTIXACT_OFFSETS_PER_PAGE as u32;
let segno = pageno / pg_constants::SLRU_PAGES_PER_SEGMENT;
let rpageno = pageno % pg_constants::SLRU_PAGES_PER_SEGMENT;
timeline.put_wal_record(
lsn,
RelishTag::Slru {
slru: SlruKind::MultiXactOffsets,
segno,
},
rpageno,
rec.clone(),
)?;
let first_mbr_pageno = xlrec.moff / pg_constants::MULTIXACT_MEMBERS_PER_PAGE as u32;
let last_mbr_pageno =
(xlrec.moff + xlrec.nmembers - 1) / pg_constants::MULTIXACT_MEMBERS_PER_PAGE as u32;
// The members SLRU can, in contrast to the offsets one, be filled to almost
// the full range at once. So we need to handle wraparound.
let mut pageno = first_mbr_pageno;
loop {
// Update members page
let segno = pageno / pg_constants::SLRU_PAGES_PER_SEGMENT;
let rpageno = pageno % pg_constants::SLRU_PAGES_PER_SEGMENT;
timeline.put_wal_record(
lsn,
RelishTag::Slru {
slru: SlruKind::MultiXactMembers,
segno,
},
rpageno,
rec.clone(),
)?;
if pageno == last_mbr_pageno {
// last block inclusive
break;
}
// handle wraparound
if pageno == MAX_MBR_BLKNO {
pageno = 0;
} else {
pageno += 1;
}
}
if xlrec.mid >= checkpoint.nextMulti {
checkpoint.nextMulti = xlrec.mid + 1;
*checkpoint_modified = true;
}
if xlrec.moff + xlrec.nmembers > checkpoint.nextMultiOffset {
checkpoint.nextMultiOffset = xlrec.moff + xlrec.nmembers;
*checkpoint_modified = true;
}
let max_mbr_xid = xlrec.members.iter().fold(0u32, |acc, mbr| {
if mbr.xid.wrapping_sub(acc) as i32 > 0 {
mbr.xid
} else {
acc
}
});
if checkpoint.update_next_xid(max_mbr_xid) {
*checkpoint_modified = true;
}
Ok(())
}
fn save_multixact_truncate_record(
checkpoint: &mut CheckPoint,
checkpoint_modified: &mut bool,
timeline: &dyn TimelineWriter,
lsn: Lsn,
xlrec: &XlMultiXactTruncate,
) -> Result<()> {
checkpoint.oldestMulti = xlrec.end_trunc_off;
checkpoint.oldestMultiDB = xlrec.oldest_multi_db;
*checkpoint_modified = true;
// PerformMembersTruncation
let maxsegment: i32 = mx_offset_to_member_segment(pg_constants::MAX_MULTIXACT_OFFSET);
let startsegment: i32 = mx_offset_to_member_segment(xlrec.start_trunc_memb);
let endsegment: i32 = mx_offset_to_member_segment(xlrec.end_trunc_memb);
let mut segment: i32 = startsegment;
// Delete all the segments except the last one. The last segment can still
// contain, possibly partially, valid data.
while segment != endsegment {
timeline.drop_relish(
RelishTag::Slru {
slru: SlruKind::MultiXactMembers,
segno: segment as u32,
},
lsn,
)?;
/* move to next segment, handling wraparound correctly */
if segment == maxsegment {
segment = 0;
} else {
segment += 1;
}
}
// Truncate offsets
// FIXME: this did not handle wraparound correctly
Ok(())
}
fn save_relmap_page(
timeline: &dyn TimelineWriter,
lsn: Lsn,
xlrec: &XlRelmapUpdate,
decoded: &DecodedWALRecord,
) -> Result<()> {
let tag = RelishTag::FileNodeMap {
spcnode: xlrec.tsid,
dbnode: xlrec.dbid,
};
let mut buf = decoded.record.clone();
buf.advance(decoded.main_data_offset);
// skip xl_relmap_update
buf.advance(12);
timeline.put_page_image(tag, 0, lsn, Bytes::copy_from_slice(&buf[..]))?;
Ok(())
}

Some files were not shown because too many files have changed in this diff Show More