mirror of
https://github.com/neondatabase/neon.git
synced 2026-02-03 02:30:37 +00:00
Compare commits
175 Commits
set_hints_
...
vecmap-del
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c4fb509aa2 | ||
|
|
48a6bbe4c1 | ||
|
|
9a687eae28 | ||
|
|
0579c8bbb9 | ||
|
|
b3b8f18f61 | ||
|
|
960c7d69a8 | ||
|
|
60dae0b4ac | ||
|
|
c660926a06 | ||
|
|
7fa04e2d14 | ||
|
|
db4059cd6d | ||
|
|
fdb19fdb92 | ||
|
|
53b4dc944d | ||
|
|
a03e1b3895 | ||
|
|
15f1bcc9c2 | ||
|
|
24580f2493 | ||
|
|
e3945d94fd | ||
|
|
d806c3a47e | ||
|
|
05fe39088b | ||
|
|
530d3eaf09 | ||
|
|
7e190d72a5 | ||
|
|
9c936034b6 | ||
|
|
5719f13cb2 | ||
|
|
d134a9856e | ||
|
|
664b99b5ac | ||
|
|
4256231eb7 | ||
|
|
ae27490281 | ||
|
|
fbd8ca2ff4 | ||
|
|
ec673a5d67 | ||
|
|
7fab38c51e | ||
|
|
84f7dcd052 | ||
|
|
7095a5d551 | ||
|
|
538c2a2a3e | ||
|
|
62f83869f1 | ||
|
|
69670b61c4 | ||
|
|
0a8aaa2c24 | ||
|
|
e474790400 | ||
|
|
2c99e2461a | ||
|
|
cf8e27a554 | ||
|
|
287ea2e5e3 | ||
|
|
86e14f2f1a | ||
|
|
adbae62281 | ||
|
|
3127a4a13b | ||
|
|
6d993410c9 | ||
|
|
fb05e4cb0b | ||
|
|
b0a7234759 | ||
|
|
ddf4b15ebc | ||
|
|
3065532f15 | ||
|
|
d6fc74a412 | ||
|
|
7a370394a7 | ||
|
|
0f3cf8ac94 | ||
|
|
014be8b230 | ||
|
|
08978458be | ||
|
|
2252d9faa8 | ||
|
|
22e15844ae | ||
|
|
ca9af37478 | ||
|
|
aae41e8661 | ||
|
|
8331ce865c | ||
|
|
3bac4d485d | ||
|
|
f84eaf4f05 | ||
|
|
70b08923ed | ||
|
|
c846a824de | ||
|
|
b71e3a40e2 | ||
|
|
41dfc117e7 | ||
|
|
a72707b8cb | ||
|
|
0f770967b4 | ||
|
|
bd9f4794d9 | ||
|
|
ff5cbe2694 | ||
|
|
2319e0ec8f | ||
|
|
d4e037f1e7 | ||
|
|
139936197a | ||
|
|
d4eed61f57 | ||
|
|
7db3a9e7d9 | ||
|
|
c81ee3bd5b | ||
|
|
7fb7f67bb4 | ||
|
|
86164c8b33 | ||
|
|
97c4cd4434 | ||
|
|
a4fc6da57b | ||
|
|
c934e724a8 | ||
|
|
e554f9514f | ||
|
|
d7cff8fbaf | ||
|
|
90ef661673 | ||
|
|
579b5ee944 | ||
|
|
8ebf2fe550 | ||
|
|
16d3dc821a | ||
|
|
a91eeb1c65 | ||
|
|
49c8c03465 | ||
|
|
5344ffc3de | ||
|
|
296586b7ce | ||
|
|
b7aac87ec1 | ||
|
|
ea4c3639e3 | ||
|
|
745627c8ca | ||
|
|
c2af6d98db | ||
|
|
540973eac4 | ||
|
|
ad5f16f724 | ||
|
|
1aa7218fd6 | ||
|
|
1d5abf1253 | ||
|
|
7b3fb760fa | ||
|
|
3743344e64 | ||
|
|
bbe4f39790 | ||
|
|
7dda9f2894 | ||
|
|
8de41f1d70 | ||
|
|
6984d33b4e | ||
|
|
98d4f9cea5 | ||
|
|
87bc18972f | ||
|
|
25b7d424ab | ||
|
|
a5bd306db9 | ||
|
|
0cbee4a416 | ||
|
|
91ff09151d | ||
|
|
fea5954b18 | ||
|
|
b11b0bb088 | ||
|
|
0ede933719 | ||
|
|
3ab60ce76f | ||
|
|
01ef2baef0 | ||
|
|
6a2e4bfdd9 | ||
|
|
9563336d9a | ||
|
|
4ebe643d0c | ||
|
|
dc897fb864 | ||
|
|
a2498f3e67 | ||
|
|
d150f3ce8c | ||
|
|
cff4572774 | ||
|
|
84008a2560 | ||
|
|
6b7f3bc78c | ||
|
|
a68c23448a | ||
|
|
9043f45489 | ||
|
|
6afd99c73f | ||
|
|
18b5165b22 | ||
|
|
6dc66eefb6 | ||
|
|
0aec60938a | ||
|
|
7c62a57e54 | ||
|
|
59e7ca585d | ||
|
|
3dea06b825 | ||
|
|
ab33614ab1 | ||
|
|
03dff207db | ||
|
|
6a8785379a | ||
|
|
507177b42e | ||
|
|
b79754d06e | ||
|
|
674807eee1 | ||
|
|
30c0343727 | ||
|
|
4fae115dc2 | ||
|
|
3d17255400 | ||
|
|
5488ce8834 | ||
|
|
d7313bb85c | ||
|
|
4b73ada26e | ||
|
|
b4ecae33e4 | ||
|
|
1b9e49eb60 | ||
|
|
7a03e32dd5 | ||
|
|
018a606987 | ||
|
|
26782851a9 | ||
|
|
04ee1d5977 | ||
|
|
6245702c7c | ||
|
|
9098f2159d | ||
|
|
292bdaa6a7 | ||
|
|
6f0c065743 | ||
|
|
94c50e3e90 | ||
|
|
f83108002b | ||
|
|
511873aaed | ||
|
|
eb3fd7a8da | ||
|
|
a3214e982d | ||
|
|
1e172230ce | ||
|
|
51d36b9930 | ||
|
|
d1f0b1eda4 | ||
|
|
ed4eed0a19 | ||
|
|
2cf3a70be5 | ||
|
|
6d42ea47bf | ||
|
|
b227c63edf | ||
|
|
45c09c1cdd | ||
|
|
66dcaa4e01 | ||
|
|
a7de53d4c4 | ||
|
|
fabf5ec664 | ||
|
|
c6678c5dea | ||
|
|
1686715ad0 | ||
|
|
7507f4b309 | ||
|
|
bc709561b6 | ||
|
|
0e4cbe0165 | ||
|
|
66929ad6fb |
@@ -24,6 +24,12 @@ jobs:
|
||||
# A job to build postgres
|
||||
build-postgres:
|
||||
executor: zenith-build-executor
|
||||
parameters:
|
||||
build_type:
|
||||
type: enum
|
||||
enum: ["debug", "release"]
|
||||
environment:
|
||||
BUILD_TYPE: << parameters.build_type >>
|
||||
steps:
|
||||
# Checkout the git repo (circleci doesn't have a flag to enable submodules here)
|
||||
- checkout
|
||||
@@ -39,7 +45,7 @@ jobs:
|
||||
name: Restore postgres cache
|
||||
keys:
|
||||
# Restore ONLY if the rev key matches exactly
|
||||
- v03-postgres-cache-{{ checksum "/tmp/cache-key-postgres" }}
|
||||
- v04-postgres-cache-<< parameters.build_type >>-{{ checksum "/tmp/cache-key-postgres" }}
|
||||
|
||||
# FIXME We could cache our own docker container, instead of installing packages every time.
|
||||
- run:
|
||||
@@ -59,12 +65,12 @@ jobs:
|
||||
if [ ! -e tmp_install/bin/postgres ]; then
|
||||
# "depth 1" saves some time by not cloning the whole repo
|
||||
git submodule update --init --depth 1
|
||||
make postgres
|
||||
make postgres -j8
|
||||
fi
|
||||
|
||||
- save_cache:
|
||||
name: Save postgres cache
|
||||
key: v03-postgres-cache-{{ checksum "/tmp/cache-key-postgres" }}
|
||||
key: v04-postgres-cache-<< parameters.build_type >>-{{ checksum "/tmp/cache-key-postgres" }}
|
||||
paths:
|
||||
- tmp_install
|
||||
|
||||
@@ -96,7 +102,7 @@ jobs:
|
||||
name: Restore postgres cache
|
||||
keys:
|
||||
# Restore ONLY if the rev key matches exactly
|
||||
- v03-postgres-cache-{{ checksum "/tmp/cache-key-postgres" }}
|
||||
- v04-postgres-cache-<< parameters.build_type >>-{{ checksum "/tmp/cache-key-postgres" }}
|
||||
|
||||
- restore_cache:
|
||||
name: Restore rust cache
|
||||
@@ -128,6 +134,14 @@ jobs:
|
||||
- ~/.cargo/git
|
||||
- target
|
||||
|
||||
# Run style checks
|
||||
# has to run separately from cargo fmt section
|
||||
# since needs to run with dependencies
|
||||
- run:
|
||||
name: clippy
|
||||
command: |
|
||||
./run_clippy.sh
|
||||
|
||||
# Run rust unit tests
|
||||
- run: cargo test
|
||||
|
||||
@@ -193,6 +207,9 @@ jobs:
|
||||
needs_postgres_source:
|
||||
type: boolean
|
||||
default: false
|
||||
run_in_parallel:
|
||||
type: boolean
|
||||
default: true
|
||||
steps:
|
||||
- attach_workspace:
|
||||
at: /tmp/zenith
|
||||
@@ -221,6 +238,9 @@ jobs:
|
||||
echo "test_selection must be set"
|
||||
exit 1
|
||||
fi
|
||||
if << parameters.run_in_parallel >>; then
|
||||
EXTRA_PARAMS="-n4 $EXTRA_PARAMS"
|
||||
fi;
|
||||
# Run the tests.
|
||||
#
|
||||
# The junit.xml file allows CircleCI to display more fine-grained test information
|
||||
@@ -230,6 +250,7 @@ jobs:
|
||||
# --verbose prints name of each test (helpful when there are
|
||||
# multiple tests in one file)
|
||||
# -rA prints summary in the end
|
||||
# -n4 uses four processes to run tests via pytest-xdist
|
||||
pipenv run pytest --junitxml=$TEST_OUTPUT/junit.xml --tb=short -s --verbose -rA $TEST_SELECTION $EXTRA_PARAMS
|
||||
- run:
|
||||
# CircleCI artifacts are preserved one file at a time, so skipping
|
||||
@@ -239,7 +260,7 @@ jobs:
|
||||
when: always
|
||||
command: |
|
||||
du -sh /tmp/test_output/*
|
||||
find /tmp/test_output -type f ! -name "pg.log" ! -name "pageserver.log" ! -name "wal_acceptor.log" ! -name "regression.diffs" ! -name "junit.xml" -delete
|
||||
find /tmp/test_output -type f ! -name "pg.log" ! -name "pageserver.log" ! -name "wal_acceptor.log" ! -name "regression.diffs" ! -name "junit.xml" ! -name "*.filediff" ! -name "*.stdout" ! -name "*.stderr" -delete
|
||||
du -sh /tmp/test_output/*
|
||||
- store_artifacts:
|
||||
path: /tmp/test_output
|
||||
@@ -264,20 +285,69 @@ jobs:
|
||||
echo $DOCKER_PWD | docker login -u $DOCKER_LOGIN --password-stdin
|
||||
docker build -t zenithdb/zenith:latest . && docker push zenithdb/zenith:latest
|
||||
|
||||
# Trigger a new remote CI job
|
||||
remote-ci-trigger:
|
||||
docker:
|
||||
- image: cimg/base:2021.04
|
||||
parameters:
|
||||
remote_repo:
|
||||
type: string
|
||||
environment:
|
||||
REMOTE_REPO: << parameters.remote_repo >>
|
||||
steps:
|
||||
- run:
|
||||
name: Set PR's status to pending
|
||||
command: |
|
||||
LOCAL_REPO=$CIRCLE_PROJECT_USERNAME/$CIRCLE_PROJECT_REPONAME
|
||||
|
||||
curl -f -X POST \
|
||||
https://api.github.com/repos/$LOCAL_REPO/statuses/$CIRCLE_SHA1 \
|
||||
-H "Accept: application/vnd.github.v3+json" \
|
||||
--user "$CI_ACCESS_TOKEN" \
|
||||
--data \
|
||||
"{
|
||||
\"state\": \"pending\",
|
||||
\"context\": \"zenith-remote-ci\",
|
||||
\"description\": \"[$REMOTE_REPO] Remote CI job is about to start\"
|
||||
}"
|
||||
- run:
|
||||
name: Request a remote CI test
|
||||
command: |
|
||||
LOCAL_REPO=$CIRCLE_PROJECT_USERNAME/$CIRCLE_PROJECT_REPONAME
|
||||
|
||||
curl -f -X POST \
|
||||
https://api.github.com/repos/$REMOTE_REPO/actions/workflows/testing.yml/dispatches \
|
||||
-H "Accept: application/vnd.github.v3+json" \
|
||||
--user "$CI_ACCESS_TOKEN" \
|
||||
--data \
|
||||
"{
|
||||
\"ref\": \"main\",
|
||||
\"inputs\": {
|
||||
\"ci_job_name\": \"zenith-remote-ci\",
|
||||
\"commit_hash\": \"$CIRCLE_SHA1\",
|
||||
\"remote_repo\": \"$LOCAL_REPO\",
|
||||
\"zenith_image_branch\": \"$CIRCLE_BRANCH\"
|
||||
}
|
||||
}"
|
||||
|
||||
workflows:
|
||||
build_and_test:
|
||||
jobs:
|
||||
- check-codestyle
|
||||
- build-postgres
|
||||
- build-postgres:
|
||||
name: build-postgres-<< matrix.build_type >>
|
||||
matrix:
|
||||
parameters:
|
||||
build_type: ["debug", "release"]
|
||||
- build-zenith:
|
||||
name: build-zenith-<< matrix.build_type >>
|
||||
matrix:
|
||||
parameters:
|
||||
build_type: ["debug", "release"]
|
||||
requires:
|
||||
- build-postgres
|
||||
- build-postgres-<< matrix.build_type >>
|
||||
- run-pytest:
|
||||
name: pg_regress tests << matrix.build_type >>
|
||||
name: pg_regress-tests-<< matrix.build_type >>
|
||||
matrix:
|
||||
parameters:
|
||||
build_type: ["debug", "release"]
|
||||
@@ -286,7 +356,7 @@ workflows:
|
||||
requires:
|
||||
- build-zenith-<< matrix.build_type >>
|
||||
- run-pytest:
|
||||
name: other tests << matrix.build_type >>
|
||||
name: other-tests-<< matrix.build_type >>
|
||||
matrix:
|
||||
parameters:
|
||||
build_type: ["debug", "release"]
|
||||
@@ -297,6 +367,7 @@ workflows:
|
||||
name: benchmarks
|
||||
build_type: release
|
||||
test_selection: performance
|
||||
run_in_parallel: false
|
||||
requires:
|
||||
- build-zenith-release
|
||||
- docker-image:
|
||||
@@ -308,5 +379,16 @@ workflows:
|
||||
only:
|
||||
- main
|
||||
requires:
|
||||
- pg_regress tests release
|
||||
- other tests release
|
||||
- pg_regress-tests-release
|
||||
- other-tests-release
|
||||
- remote-ci-trigger:
|
||||
# Context passes credentials for gh api
|
||||
context: CI_ACCESS_TOKEN
|
||||
remote_repo: "zenithdb/console"
|
||||
requires:
|
||||
# XXX: Successful build doesn't mean everything is OK, but
|
||||
# the job to be triggered takes so much time to complete (~22 min)
|
||||
# that it's better not to wait for the commented-out steps
|
||||
- build-zenith-debug
|
||||
# - pg_regress-tests-release
|
||||
# - other-tests-release
|
||||
|
||||
@@ -2,12 +2,17 @@
|
||||
**/__pycache__
|
||||
**/.pytest_cache
|
||||
|
||||
/target
|
||||
/tmp_check
|
||||
/tmp_install
|
||||
/tmp_check_cli
|
||||
/test_output
|
||||
/.vscode
|
||||
/.zenith
|
||||
/integration_tests/.zenith
|
||||
/Dockerfile
|
||||
.git
|
||||
target
|
||||
tmp_check
|
||||
tmp_install
|
||||
tmp_check_cli
|
||||
test_output
|
||||
.vscode
|
||||
.zenith
|
||||
integration_tests/.zenith
|
||||
.mypy_cache
|
||||
|
||||
Dockerfile
|
||||
.dockerignore
|
||||
|
||||
|
||||
125
Cargo.lock
generated
125
Cargo.lock
generated
@@ -307,6 +307,26 @@ dependencies = [
|
||||
"vec_map",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "const_format"
|
||||
version = "0.2.21"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4556f63e28a78fa5e6f310cfea5647a25636def49a338ab69e33b34a3382057b"
|
||||
dependencies = [
|
||||
"const_format_proc_macros",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "const_format_proc_macros"
|
||||
version = "0.2.21"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "552782506c398da94466b364973b563887e0ca078bf33a76d4163736165e3594"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"unicode-xid",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "control_plane"
|
||||
version = "0.1.0"
|
||||
@@ -325,6 +345,7 @@ dependencies = [
|
||||
"serde",
|
||||
"serde_json",
|
||||
"tar",
|
||||
"thiserror",
|
||||
"toml",
|
||||
"url",
|
||||
"walkeeper",
|
||||
@@ -1172,33 +1193,33 @@ name = "pageserver"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"async-trait",
|
||||
"bookfile",
|
||||
"byteorder",
|
||||
"bytes",
|
||||
"chrono",
|
||||
"clap",
|
||||
"const_format",
|
||||
"crc32c",
|
||||
"daemonize",
|
||||
"futures",
|
||||
"hex",
|
||||
"hex-literal",
|
||||
"humantime",
|
||||
"hyper",
|
||||
"lazy_static",
|
||||
"log",
|
||||
"postgres",
|
||||
"postgres-protocol 0.6.1 (git+https://github.com/zenithdb/rust-postgres.git?rev=9eb0dbfbeb6a6c1b79099b9f7ae4a8c021877858)",
|
||||
"postgres-types 0.2.1 (git+https://github.com/zenithdb/rust-postgres.git?rev=9eb0dbfbeb6a6c1b79099b9f7ae4a8c021877858)",
|
||||
"postgres-protocol",
|
||||
"postgres-types",
|
||||
"postgres_ffi",
|
||||
"rand",
|
||||
"regex",
|
||||
"routerify",
|
||||
"rust-s3",
|
||||
"scopeguard",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"slog",
|
||||
"slog-scope",
|
||||
"slog-stdlog",
|
||||
"slog-term",
|
||||
"tar",
|
||||
"thiserror",
|
||||
"tokio",
|
||||
@@ -1301,27 +1322,9 @@ dependencies = [
|
||||
"fallible-iterator",
|
||||
"futures",
|
||||
"log",
|
||||
"postgres-protocol 0.6.1 (git+https://github.com/zenithdb/rust-postgres.git?rev=9eb0dbfbeb6a6c1b79099b9f7ae4a8c021877858)",
|
||||
"postgres-protocol",
|
||||
"tokio",
|
||||
"tokio-postgres 0.7.1",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "postgres-protocol"
|
||||
version = "0.6.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ff3e0f70d32e20923cabf2df02913be7c1842d4c772db8065c00fcfdd1d1bff3"
|
||||
dependencies = [
|
||||
"base64 0.13.0",
|
||||
"byteorder",
|
||||
"bytes",
|
||||
"fallible-iterator",
|
||||
"hmac",
|
||||
"md-5",
|
||||
"memchr",
|
||||
"rand",
|
||||
"sha2",
|
||||
"stringprep",
|
||||
"tokio-postgres",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1342,17 +1345,6 @@ dependencies = [
|
||||
"stringprep",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "postgres-types"
|
||||
version = "0.2.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "430f4131e1b7657b0cd9a2b0c3408d77c9a43a042d300b8c77f981dffcc43a2f"
|
||||
dependencies = [
|
||||
"bytes",
|
||||
"fallible-iterator",
|
||||
"postgres-protocol 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "postgres-types"
|
||||
version = "0.2.1"
|
||||
@@ -1360,7 +1352,7 @@ source = "git+https://github.com/zenithdb/rust-postgres.git?rev=9eb0dbfbeb6a6c1b
|
||||
dependencies = [
|
||||
"bytes",
|
||||
"fallible-iterator",
|
||||
"postgres-protocol 0.6.1 (git+https://github.com/zenithdb/rust-postgres.git?rev=9eb0dbfbeb6a6c1b79099b9f7ae4a8c021877858)",
|
||||
"postgres-protocol",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1423,16 +1415,9 @@ dependencies = [
|
||||
"lazy_static",
|
||||
"memchr",
|
||||
"parking_lot",
|
||||
"protobuf",
|
||||
"thiserror",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "protobuf"
|
||||
version = "2.24.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "db50e77ae196458ccd3dc58a31ea1a90b0698ab1b7928d89f644c25d72070267"
|
||||
|
||||
[[package]]
|
||||
name = "proxy"
|
||||
version = "0.1.0"
|
||||
@@ -1443,11 +1428,12 @@ dependencies = [
|
||||
"hex",
|
||||
"md5",
|
||||
"rand",
|
||||
"reqwest",
|
||||
"rustls",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"tokio",
|
||||
"tokio-postgres 0.7.2",
|
||||
"tokio-postgres",
|
||||
"zenith_utils",
|
||||
]
|
||||
|
||||
@@ -2135,9 +2121,9 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c"
|
||||
|
||||
[[package]]
|
||||
name = "tokio"
|
||||
version = "1.8.1"
|
||||
version = "1.11.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "98c8b05dc14c75ea83d63dd391100353789f5f24b8b3866542a5e85c8be8e985"
|
||||
checksum = "b4efe6fc2395938c8155973d7be49fe8d03a843726e285e100a8a383cc0154ce"
|
||||
dependencies = [
|
||||
"autocfg",
|
||||
"bytes",
|
||||
@@ -2146,7 +2132,6 @@ dependencies = [
|
||||
"mio",
|
||||
"num_cpus",
|
||||
"once_cell",
|
||||
"parking_lot",
|
||||
"pin-project-lite",
|
||||
"signal-hook-registry",
|
||||
"tokio-macros",
|
||||
@@ -2189,31 +2174,8 @@ dependencies = [
|
||||
"percent-encoding",
|
||||
"phf",
|
||||
"pin-project-lite",
|
||||
"postgres-protocol 0.6.1 (git+https://github.com/zenithdb/rust-postgres.git?rev=9eb0dbfbeb6a6c1b79099b9f7ae4a8c021877858)",
|
||||
"postgres-types 0.2.1 (git+https://github.com/zenithdb/rust-postgres.git?rev=9eb0dbfbeb6a6c1b79099b9f7ae4a8c021877858)",
|
||||
"socket2",
|
||||
"tokio",
|
||||
"tokio-util",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tokio-postgres"
|
||||
version = "0.7.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2d2b1383c7e4fb9a09e292c7c6afb7da54418d53b045f1c1fac7a911411a2b8b"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"byteorder",
|
||||
"bytes",
|
||||
"fallible-iterator",
|
||||
"futures",
|
||||
"log",
|
||||
"parking_lot",
|
||||
"percent-encoding",
|
||||
"phf",
|
||||
"pin-project-lite",
|
||||
"postgres-protocol 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"postgres-types 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"postgres-protocol",
|
||||
"postgres-types",
|
||||
"socket2",
|
||||
"tokio",
|
||||
"tokio-util",
|
||||
@@ -2386,16 +2348,12 @@ dependencies = [
|
||||
"log",
|
||||
"pageserver",
|
||||
"postgres",
|
||||
"postgres-protocol 0.6.1 (git+https://github.com/zenithdb/rust-postgres.git?rev=9eb0dbfbeb6a6c1b79099b9f7ae4a8c021877858)",
|
||||
"postgres-protocol",
|
||||
"postgres_ffi",
|
||||
"regex",
|
||||
"rust-s3",
|
||||
"serde",
|
||||
"slog",
|
||||
"slog-async",
|
||||
"slog-scope",
|
||||
"slog-stdlog",
|
||||
"slog-term",
|
||||
"serde_json",
|
||||
"tokio",
|
||||
"tokio-stream",
|
||||
"walkdir",
|
||||
@@ -2620,6 +2578,7 @@ version = "0.1.0"
|
||||
dependencies = [
|
||||
"lazy_static",
|
||||
"libc",
|
||||
"once_cell",
|
||||
"prometheus",
|
||||
]
|
||||
|
||||
@@ -2644,6 +2603,12 @@ dependencies = [
|
||||
"rustls-split",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"slog",
|
||||
"slog-async",
|
||||
"slog-scope",
|
||||
"slog-stdlog",
|
||||
"slog-term",
|
||||
"tempfile",
|
||||
"thiserror",
|
||||
"tokio",
|
||||
"webpki",
|
||||
|
||||
34
Dockerfile
34
Dockerfile
@@ -10,39 +10,21 @@ FROM zenithdb/build:buster AS pg-build
|
||||
WORKDIR /zenith
|
||||
COPY ./vendor/postgres vendor/postgres
|
||||
COPY ./Makefile Makefile
|
||||
ENV BUILD_TYPE release
|
||||
RUN make -j $(getconf _NPROCESSORS_ONLN) -s postgres
|
||||
|
||||
#
|
||||
# Calculate cargo dependencies.
|
||||
# This will always run, but only generate recipe.json with list of dependencies without
|
||||
# installing them.
|
||||
#
|
||||
FROM zenithdb/build:buster AS cargo-deps-inspect
|
||||
WORKDIR /zenith
|
||||
COPY . .
|
||||
RUN cargo chef prepare --recipe-path /zenith/recipe.json
|
||||
|
||||
#
|
||||
# Build cargo dependencies.
|
||||
# This temp cantainner should be rebuilt only if recipe.json was changed.
|
||||
#
|
||||
FROM zenithdb/build:buster AS deps-build
|
||||
WORKDIR /zenith
|
||||
COPY --from=pg-build /zenith/tmp_install/include/postgresql/server tmp_install/include/postgresql/server
|
||||
COPY --from=cargo-deps-inspect /usr/local/cargo/bin/cargo-chef /usr/local/cargo/bin/
|
||||
COPY --from=cargo-deps-inspect /zenith/recipe.json recipe.json
|
||||
RUN ROCKSDB_LIB_DIR=/usr/lib/ cargo chef cook --release --recipe-path recipe.json
|
||||
RUN rm -rf postgres_install/build
|
||||
|
||||
#
|
||||
# Build zenith binaries
|
||||
#
|
||||
# TODO: build cargo deps as separate layer. We used cargo-chef before but that was
|
||||
# net time waste in a lot of cases. Copying Cargo.lock with empty lib.rs should do the work.
|
||||
#
|
||||
FROM zenithdb/build:buster AS build
|
||||
WORKDIR /zenith
|
||||
COPY . .
|
||||
# Copy cached dependencies
|
||||
COPY --from=pg-build /zenith/tmp_install/include/postgresql/server tmp_install/include/postgresql/server
|
||||
COPY --from=deps-build /zenith/target target
|
||||
COPY --from=deps-build /usr/local/cargo/ /usr/local/cargo/
|
||||
|
||||
COPY . .
|
||||
RUN cargo build --release
|
||||
|
||||
#
|
||||
@@ -51,7 +33,7 @@ RUN cargo build --release
|
||||
FROM debian:buster-slim
|
||||
WORKDIR /data
|
||||
|
||||
RUN apt-get update && apt-get -yq install librocksdb-dev libseccomp-dev openssl && \
|
||||
RUN apt-get update && apt-get -yq install libreadline-dev libseccomp-dev openssl ca-certificates && \
|
||||
mkdir zenith_install
|
||||
|
||||
COPY --from=build /zenith/target/release/pageserver /usr/local/bin
|
||||
|
||||
@@ -9,7 +9,7 @@ WORKDIR /zenith
|
||||
# Install postgres and zenith build dependencies
|
||||
# clang is for rocksdb
|
||||
RUN apt-get update && apt-get -yq install automake libtool build-essential bison flex libreadline-dev zlib1g-dev libxml2-dev \
|
||||
libseccomp-dev pkg-config libssl-dev librocksdb-dev clang
|
||||
libseccomp-dev pkg-config libssl-dev clang
|
||||
|
||||
# Install rust tools
|
||||
RUN rustup component add clippy && cargo install cargo-chef cargo-audit
|
||||
RUN rustup component add clippy && cargo install cargo-audit
|
||||
|
||||
18
Makefile
18
Makefile
@@ -6,6 +6,18 @@ else
|
||||
SECCOMP =
|
||||
endif
|
||||
|
||||
#
|
||||
# We differentiate between release / debug build types using the BUILD_TYPE
|
||||
# environment variable.
|
||||
#
|
||||
ifeq ($(BUILD_TYPE),release)
|
||||
PG_CONFIGURE_OPTS = --enable-debug
|
||||
PG_CFLAGS = -O2 -g3 ${CFLAGS}
|
||||
else
|
||||
PG_CONFIGURE_OPTS = --enable-debug --enable-cassert --enable-depend
|
||||
PG_CFLAGS = -O0 -g3 ${CFLAGS}
|
||||
endif
|
||||
|
||||
#
|
||||
# Top level Makefile to build Zenith and PostgreSQL
|
||||
#
|
||||
@@ -30,10 +42,8 @@ tmp_install/build/config.status:
|
||||
+@echo "Configuring postgres build"
|
||||
mkdir -p tmp_install/build
|
||||
(cd tmp_install/build && \
|
||||
../../vendor/postgres/configure CFLAGS='-O0 -g3 $(CFLAGS)' \
|
||||
--enable-cassert \
|
||||
--enable-debug \
|
||||
--enable-depend \
|
||||
../../vendor/postgres/configure CFLAGS='$(PG_CFLAGS)' \
|
||||
$(PG_CONFIGURE_OPTS) \
|
||||
$(SECCOMP) \
|
||||
--prefix=$(abspath tmp_install) > configure.log)
|
||||
|
||||
|
||||
23
README.md
23
README.md
@@ -6,7 +6,7 @@ Zenith substitutes PostgreSQL storage layer and redistributes data across a clus
|
||||
|
||||
A Zenith installation consists of Compute nodes and Storage engine.
|
||||
|
||||
Compute nodes are stateles PostgreSQL nodes, backed by zenith storage.
|
||||
Compute nodes are stateless PostgreSQL nodes, backed by zenith storage.
|
||||
|
||||
Zenith storage engine consists of two major components:
|
||||
- Pageserver. Scalable storage backend for compute nodes.
|
||||
@@ -25,7 +25,7 @@ Pageserver consists of:
|
||||
On Ubuntu or Debian this set of packages should be sufficient to build the code:
|
||||
```text
|
||||
apt install build-essential libtool libreadline-dev zlib1g-dev flex bison libseccomp-dev \
|
||||
libssl-dev clang
|
||||
libssl-dev clang pkg-config libpq-dev
|
||||
```
|
||||
|
||||
[Rust] 1.52 or later is also required.
|
||||
@@ -108,6 +108,13 @@ postgres=# insert into t values(2,2);
|
||||
INSERT 0 1
|
||||
```
|
||||
|
||||
6. If you want to run tests afterwards (see below), you have to stop pageserver and all postgres instances you have just started:
|
||||
```sh
|
||||
> ./target/debug/zenith pg stop migration_check
|
||||
> ./target/debug/zenith pg stop main
|
||||
> ./target/debug/zenith stop
|
||||
```
|
||||
|
||||
## Running tests
|
||||
|
||||
```sh
|
||||
@@ -125,9 +132,19 @@ Now we use README files to cover design ideas and overall architecture for each
|
||||
|
||||
To view your `rustdoc` documentation in a browser, try running `cargo doc --no-deps --open`
|
||||
|
||||
### Postgres-specific terms
|
||||
|
||||
Due to Zenith's very close relation with PostgreSQL internals, there are numerous specific terms used.
|
||||
Same applies to certain spelling: i.e. we use MB to denote 1024 * 1024 bytes, while MiB would be technically more correct, it's inconsistent with what PostgreSQL code and its documentation use.
|
||||
|
||||
To get more familiar with this aspect, refer to:
|
||||
|
||||
- [Zenith glossary](/docs/glossary.md)
|
||||
- [PostgreSQL glossary](https://www.postgresql.org/docs/13/glossary.html)
|
||||
- Other PostgreSQL documentation and sources (Zenith fork sources can be found [here](https://github.com/zenithdb/postgres))
|
||||
|
||||
## Join the development
|
||||
|
||||
- Read `CONTRIBUTING.md` to learn about project code style and practices.
|
||||
- Use glossary in [/docs/glossary.md](/docs/glossary.md)
|
||||
- To get familiar with a source tree layout, use [/docs/sourcetree.md](/docs/sourcetree.md).
|
||||
- To learn more about PostgreSQL internals, check http://www.interdb.jp/pg/index.html
|
||||
|
||||
@@ -16,6 +16,7 @@ toml = "0.5"
|
||||
lazy_static = "1.4"
|
||||
regex = "1"
|
||||
anyhow = "1.0"
|
||||
thiserror = "1"
|
||||
bytes = "1.0.1"
|
||||
nix = "0.20"
|
||||
url = "2.2.2"
|
||||
|
||||
@@ -1,18 +1,16 @@
|
||||
use std::fs::{self, File, OpenOptions};
|
||||
use std::collections::BTreeMap;
|
||||
use std::fs::{self, File};
|
||||
use std::io::Write;
|
||||
use std::net::SocketAddr;
|
||||
use std::net::TcpStream;
|
||||
use std::os::unix::fs::PermissionsExt;
|
||||
use std::process::Command;
|
||||
use std::path::PathBuf;
|
||||
use std::process::{Command, Stdio};
|
||||
use std::str::FromStr;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use std::{collections::BTreeMap, path::PathBuf};
|
||||
|
||||
use anyhow::{Context, Result};
|
||||
use lazy_static::lazy_static;
|
||||
use postgres_ffi::pg_constants;
|
||||
use regex::Regex;
|
||||
use zenith_utils::connstring::connection_host_port;
|
||||
use zenith_utils::lsn::Lsn;
|
||||
use zenith_utils::postgres_backend::AuthType;
|
||||
@@ -20,6 +18,7 @@ use zenith_utils::zid::ZTenantId;
|
||||
use zenith_utils::zid::ZTimelineId;
|
||||
|
||||
use crate::local_env::LocalEnv;
|
||||
use crate::postgresql_conf::PostgresConf;
|
||||
use crate::storage::PageServerNode;
|
||||
|
||||
//
|
||||
@@ -89,15 +88,17 @@ impl ComputeControlPlane {
|
||||
&mut self,
|
||||
tenantid: ZTenantId,
|
||||
branch_name: &str,
|
||||
port: Option<u16>,
|
||||
) -> Result<Arc<PostgresNode>> {
|
||||
let timeline_id = self
|
||||
.pageserver
|
||||
.branch_get_by_name(&tenantid, branch_name)?
|
||||
.timeline_id;
|
||||
|
||||
let port = port.unwrap_or_else(|| self.get_port());
|
||||
let node = Arc::new(PostgresNode {
|
||||
name: branch_name.to_owned(),
|
||||
address: SocketAddr::new("127.0.0.1".parse().unwrap(), self.get_port()),
|
||||
address: SocketAddr::new("127.0.0.1".parse().unwrap(), port),
|
||||
env: self.env.clone(),
|
||||
pageserver: Arc::clone(&self.pageserver),
|
||||
is_test: false,
|
||||
@@ -143,76 +144,25 @@ impl PostgresNode {
|
||||
);
|
||||
}
|
||||
|
||||
lazy_static! {
|
||||
static ref CONF_PORT_RE: Regex = Regex::new(r"(?m)^\s*port\s*=\s*(\d+)\s*$").unwrap();
|
||||
static ref CONF_TIMELINE_RE: Regex =
|
||||
Regex::new(r"(?m)^\s*zenith.zenith_timeline\s*=\s*'(\w+)'\s*$").unwrap();
|
||||
static ref CONF_TENANT_RE: Regex =
|
||||
Regex::new(r"(?m)^\s*zenith.zenith_tenant\s*=\s*'(\w+)'\s*$").unwrap();
|
||||
}
|
||||
|
||||
// parse data directory name
|
||||
let fname = entry.file_name();
|
||||
let name = fname.to_str().unwrap().to_string();
|
||||
|
||||
// find out tcp port in config file
|
||||
// Read config file into memory
|
||||
let cfg_path = entry.path().join("postgresql.conf");
|
||||
let config = fs::read_to_string(cfg_path.clone()).with_context(|| {
|
||||
format!(
|
||||
"failed to read config file in {}",
|
||||
cfg_path.to_str().unwrap()
|
||||
)
|
||||
})?;
|
||||
let cfg_path_str = cfg_path.to_string_lossy();
|
||||
let mut conf_file = File::open(&cfg_path)
|
||||
.with_context(|| format!("failed to open config file in {}", cfg_path_str))?;
|
||||
let conf = PostgresConf::read(&mut conf_file)
|
||||
.with_context(|| format!("failed to read config file in {}", cfg_path_str))?;
|
||||
|
||||
// parse port
|
||||
let err_msg = format!(
|
||||
"failed to find port definition in config file {}",
|
||||
cfg_path.to_str().unwrap()
|
||||
);
|
||||
let port: u16 = CONF_PORT_RE
|
||||
.captures(config.as_str())
|
||||
.ok_or_else(|| anyhow::Error::msg(err_msg.clone() + " 1"))?
|
||||
.iter()
|
||||
.last()
|
||||
.ok_or_else(|| anyhow::Error::msg(err_msg.clone() + " 2"))?
|
||||
.ok_or_else(|| anyhow::Error::msg(err_msg.clone() + " 3"))?
|
||||
.as_str()
|
||||
.parse()
|
||||
.with_context(|| err_msg)?;
|
||||
// Read a few options from the config file
|
||||
let context = format!("in config file {}", cfg_path_str);
|
||||
let port: u16 = conf.parse_field("port", &context)?;
|
||||
let timelineid: ZTimelineId = conf.parse_field("zenith.zenith_timeline", &context)?;
|
||||
let tenantid: ZTenantId = conf.parse_field("zenith.zenith_tenant", &context)?;
|
||||
|
||||
// parse timeline
|
||||
let err_msg = format!(
|
||||
"failed to find timeline definition in config file {}",
|
||||
cfg_path.to_str().unwrap()
|
||||
);
|
||||
let timelineid: ZTimelineId = CONF_TIMELINE_RE
|
||||
.captures(config.as_str())
|
||||
.ok_or_else(|| anyhow::Error::msg(err_msg.clone() + " 1"))?
|
||||
.iter()
|
||||
.last()
|
||||
.ok_or_else(|| anyhow::Error::msg(err_msg.clone() + " 2"))?
|
||||
.ok_or_else(|| anyhow::Error::msg(err_msg.clone() + " 3"))?
|
||||
.as_str()
|
||||
.parse()
|
||||
.with_context(|| err_msg)?;
|
||||
|
||||
// parse tenant
|
||||
let err_msg = format!(
|
||||
"failed to find tenant definition in config file {}",
|
||||
cfg_path.to_str().unwrap()
|
||||
);
|
||||
let tenantid = CONF_TENANT_RE
|
||||
.captures(config.as_str())
|
||||
.ok_or_else(|| anyhow::Error::msg(err_msg.clone() + " 1"))?
|
||||
.iter()
|
||||
.last()
|
||||
.ok_or_else(|| anyhow::Error::msg(err_msg.clone() + " 2"))?
|
||||
.ok_or_else(|| anyhow::Error::msg(err_msg.clone() + " 3"))?
|
||||
.as_str()
|
||||
.parse()
|
||||
.with_context(|| err_msg)?;
|
||||
|
||||
let uses_wal_proposer = config.contains("wal_acceptors");
|
||||
let uses_wal_proposer = conf.get("wal_acceptors").is_some();
|
||||
|
||||
// ok now
|
||||
Ok(PostgresNode {
|
||||
@@ -229,18 +179,24 @@ impl PostgresNode {
|
||||
|
||||
fn sync_walkeepers(&self) -> Result<Lsn> {
|
||||
let pg_path = self.env.pg_bin_dir().join("postgres");
|
||||
let sync_output = Command::new(pg_path)
|
||||
let sync_handle = Command::new(pg_path)
|
||||
.arg("--sync-safekeepers")
|
||||
.env_clear()
|
||||
.env("LD_LIBRARY_PATH", self.env.pg_lib_dir().to_str().unwrap())
|
||||
.env("DYLD_LIBRARY_PATH", self.env.pg_lib_dir().to_str().unwrap())
|
||||
.env("PGDATA", self.pgdata().to_str().unwrap())
|
||||
.output()
|
||||
.with_context(|| "sync-walkeepers failed")?;
|
||||
.stdout(Stdio::piped())
|
||||
// Comment this to avoid capturing stderr (useful if command hangs)
|
||||
.stderr(Stdio::piped())
|
||||
.spawn()
|
||||
.expect("postgres --sync-safekeepers failed to start");
|
||||
|
||||
let sync_output = sync_handle
|
||||
.wait_with_output()
|
||||
.expect("postgres --sync-safekeepers failed");
|
||||
if !sync_output.status.success() {
|
||||
anyhow::bail!(
|
||||
"sync-walkeepers failed: '{}'",
|
||||
"sync-safekeepers failed: '{}'",
|
||||
String::from_utf8_lossy(&sync_output.stderr)
|
||||
);
|
||||
}
|
||||
@@ -301,84 +257,70 @@ impl PostgresNode {
|
||||
// Connect to a page server, get base backup, and untar it to initialize a
|
||||
// new data directory
|
||||
fn setup_pg_conf(&self, auth_type: AuthType) -> Result<()> {
|
||||
File::create(self.pgdata().join("postgresql.conf").to_str().unwrap())?;
|
||||
|
||||
let mut conf = PostgresConf::new();
|
||||
conf.append("max_wal_senders", "10");
|
||||
// wal_log_hints is mandatory when running against pageserver (see gh issue#192)
|
||||
// TODO: is it possible to check wal_log_hints at pageserver side via XLOG_PARAMETER_CHANGE?
|
||||
self.append_conf(
|
||||
"postgresql.conf",
|
||||
&format!(
|
||||
"max_wal_senders = 10\n\
|
||||
wal_log_hints = on\n\
|
||||
max_replication_slots = 10\n\
|
||||
hot_standby = on\n\
|
||||
shared_buffers = 1MB\n\
|
||||
fsync = off\n\
|
||||
max_connections = 100\n\
|
||||
wal_sender_timeout = 0\n\
|
||||
wal_level = replica\n\
|
||||
listen_addresses = '{address}'\n\
|
||||
port = {port}\n",
|
||||
address = self.address.ip(),
|
||||
port = self.address.port()
|
||||
),
|
||||
)?;
|
||||
conf.append("wal_log_hints", "on");
|
||||
conf.append("max_replication_slots", "10");
|
||||
conf.append("hot_standby", "on");
|
||||
conf.append("shared_buffers", "1MB");
|
||||
conf.append("fsync", "off");
|
||||
conf.append("max_connections", "100");
|
||||
conf.append("wal_sender_timeout", "0");
|
||||
conf.append("wal_level", "replica");
|
||||
conf.append("listen_addresses", &self.address.ip().to_string());
|
||||
conf.append("port", &self.address.port().to_string());
|
||||
|
||||
// Never clean up old WAL. TODO: We should use a replication
|
||||
// slot or something proper, to prevent the compute node
|
||||
// from removing WAL that hasn't been streamed to the safekeeper or
|
||||
// page server yet. (gh issue #349)
|
||||
self.append_conf("postgresql.conf", "wal_keep_size='10TB'\n")?;
|
||||
conf.append("wal_keep_size", "10TB");
|
||||
|
||||
// set up authentication
|
||||
let password = if let AuthType::ZenithJWT = auth_type {
|
||||
"$ZENITH_AUTH_TOKEN"
|
||||
} else {
|
||||
""
|
||||
// Configure the node to fetch pages from pageserver
|
||||
let pageserver_connstr = {
|
||||
let (host, port) = connection_host_port(&self.pageserver.pg_connection_config);
|
||||
|
||||
// Set up authentication
|
||||
//
|
||||
// $ZENITH_AUTH_TOKEN will be replaced with value from environment
|
||||
// variable during compute pg startup. It is done this way because
|
||||
// otherwise user will be able to retrieve the value using SHOW
|
||||
// command or pg_settings
|
||||
let password = if let AuthType::ZenithJWT = auth_type {
|
||||
"$ZENITH_AUTH_TOKEN"
|
||||
} else {
|
||||
""
|
||||
};
|
||||
|
||||
format!("host={} port={} password={}", host, port, password)
|
||||
};
|
||||
|
||||
// Configure that node to take pages from pageserver
|
||||
let (host, port) = connection_host_port(&self.pageserver.pg_connection_config);
|
||||
self.append_conf(
|
||||
"postgresql.conf",
|
||||
format!(
|
||||
concat!(
|
||||
"shared_preload_libraries = zenith\n",
|
||||
// $ZENITH_AUTH_TOKEN will be replaced with value from environment variable during compute pg startup
|
||||
// it is done this way because otherwise user will be able to retrieve the value using SHOW command or pg_settings
|
||||
"zenith.page_server_connstring = 'host={} port={} password={}'\n",
|
||||
"zenith.zenith_timeline='{}'\n",
|
||||
"zenith.zenith_tenant='{}'\n",
|
||||
),
|
||||
host, port, password, self.timelineid, self.tenantid,
|
||||
)
|
||||
.as_str(),
|
||||
)?;
|
||||
conf.append("shared_preload_libraries", "zenith");
|
||||
conf.append_line("");
|
||||
conf.append("zenith.page_server_connstring", &pageserver_connstr);
|
||||
conf.append("zenith.zenith_tenant", &self.tenantid.to_string());
|
||||
conf.append("zenith.zenith_timeline", &self.timelineid.to_string());
|
||||
conf.append_line("");
|
||||
|
||||
// Configure the node to stream WAL directly to the pageserver
|
||||
self.append_conf(
|
||||
"postgresql.conf",
|
||||
format!(
|
||||
concat!(
|
||||
"synchronous_standby_names = 'pageserver'\n", // TODO: add a new function arg?
|
||||
"zenith.callmemaybe_connstring = '{}'\n", // FIXME escaping
|
||||
),
|
||||
self.connstr(),
|
||||
)
|
||||
.as_str(),
|
||||
)?;
|
||||
conf.append("synchronous_standby_names", "pageserver"); // TODO: add a new function arg?
|
||||
conf.append("zenith.callmemaybe_connstring", &self.connstr());
|
||||
|
||||
let mut file = File::create(self.pgdata().join("postgresql.conf"))?;
|
||||
file.write_all(conf.to_string().as_bytes())?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn load_basebackup(&self) -> Result<()> {
|
||||
let lsn = if self.uses_wal_proposer {
|
||||
// LSN WAL_SEGMENT_SIZE means that it is bootstrap and we need to download just
|
||||
// LSN 0 means that it is bootstrap and we need to download just
|
||||
// latest data from the pageserver. That is a bit clumsy but whole bootstrap
|
||||
// procedure evolves quite actively right now, so let's think about it again
|
||||
// when things would be more stable (TODO).
|
||||
let lsn = self.sync_walkeepers()?;
|
||||
if lsn == Lsn(pg_constants::WAL_SEGMENT_SIZE as u64) {
|
||||
if lsn == Lsn(0) {
|
||||
None
|
||||
} else {
|
||||
Some(lsn)
|
||||
@@ -409,14 +351,6 @@ impl PostgresNode {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn append_conf(&self, config: &str, opts: &str) -> Result<()> {
|
||||
OpenOptions::new()
|
||||
.append(true)
|
||||
.open(self.pgdata().join(config).to_str().unwrap())?
|
||||
.write_all(opts.as_bytes())?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn pg_ctl(&self, args: &[&str], auth_token: &Option<String>) -> Result<()> {
|
||||
let pg_ctl_path = self.env.pg_bin_dir().join("pg_ctl");
|
||||
let mut cmd = Command::new(pg_ctl_path);
|
||||
@@ -482,13 +416,22 @@ impl PostgresNode {
|
||||
}
|
||||
|
||||
pub fn stop(&self, destroy: bool) -> Result<()> {
|
||||
self.pg_ctl(&["-m", "immediate", "stop"], &None)?;
|
||||
// If we are going to destroy data directory,
|
||||
// use immediate shutdown mode, otherwise,
|
||||
// shutdown gracefully to leave the data directory sane.
|
||||
//
|
||||
// Compute node always starts from scratch, so stop
|
||||
// without destroy only used for testing and debugging.
|
||||
//
|
||||
if destroy {
|
||||
self.pg_ctl(&["-m", "immediate", "stop"], &None)?;
|
||||
println!(
|
||||
"Destroying postgres data directory '{}'",
|
||||
self.pgdata().to_str().unwrap()
|
||||
);
|
||||
fs::remove_dir_all(&self.pgdata())?;
|
||||
} else {
|
||||
self.pg_ctl(&["stop"], &None)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
@@ -509,9 +452,7 @@ impl PostgresNode {
|
||||
.output()
|
||||
.expect("failed to execute whoami");
|
||||
|
||||
if !output.status.success() {
|
||||
panic!("whoami failed");
|
||||
}
|
||||
assert!(output.status.success(), "whoami failed");
|
||||
|
||||
String::from_utf8(output.stdout).unwrap().trim().to_string()
|
||||
}
|
||||
|
||||
@@ -12,6 +12,7 @@ use std::path::Path;
|
||||
|
||||
pub mod compute;
|
||||
pub mod local_env;
|
||||
pub mod postgresql_conf;
|
||||
pub mod storage;
|
||||
|
||||
/// Read a PID file
|
||||
|
||||
@@ -4,27 +4,24 @@
|
||||
// Now it also provides init method which acts like a stub for proper installation
|
||||
// script which will use local paths.
|
||||
//
|
||||
use anyhow::{anyhow, Context, Result};
|
||||
use hex;
|
||||
use anyhow::{Context, Result};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::env;
|
||||
use std::fs;
|
||||
use std::path::PathBuf;
|
||||
use std::process::{Command, Stdio};
|
||||
use std::{collections::BTreeMap, env};
|
||||
use url::Url;
|
||||
use zenith_utils::auth::{encode_from_key_path, Claims, Scope};
|
||||
use zenith_utils::postgres_backend::AuthType;
|
||||
use zenith_utils::zid::ZTenantId;
|
||||
|
||||
pub type Remotes = BTreeMap<String, String>;
|
||||
|
||||
//
|
||||
// This data structures represent deserialized zenith CLI config
|
||||
//
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
pub struct LocalEnv {
|
||||
// Pageserver connection strings
|
||||
pub pageserver_connstring: String,
|
||||
// Pageserver connection settings
|
||||
pub pageserver_pg_port: u16,
|
||||
pub pageserver_http_port: u16,
|
||||
|
||||
// Base directory for both pageserver and compute nodes
|
||||
pub base_data_dir: PathBuf,
|
||||
@@ -35,8 +32,8 @@ pub struct LocalEnv {
|
||||
// to four separate paths and match OS-specific installation layout.
|
||||
pub pg_distrib_dir: PathBuf,
|
||||
|
||||
// Path to pageserver binary. Empty for remote pageserver.
|
||||
pub zenith_distrib_dir: Option<PathBuf>,
|
||||
// Path to pageserver binary.
|
||||
pub zenith_distrib_dir: PathBuf,
|
||||
|
||||
// keeping tenant id in config to reduce copy paste when running zenith locally with single tenant
|
||||
#[serde(with = "hex")]
|
||||
@@ -50,8 +47,6 @@ pub struct LocalEnv {
|
||||
|
||||
// used to issue tokens during e.g pg start
|
||||
pub private_key_path: PathBuf,
|
||||
|
||||
pub remotes: Remotes,
|
||||
}
|
||||
|
||||
impl LocalEnv {
|
||||
@@ -64,11 +59,7 @@ impl LocalEnv {
|
||||
}
|
||||
|
||||
pub fn pageserver_bin(&self) -> Result<PathBuf> {
|
||||
Ok(self
|
||||
.zenith_distrib_dir
|
||||
.as_ref()
|
||||
.ok_or_else(|| anyhow!("Can not manage remote pageserver"))?
|
||||
.join("pageserver"))
|
||||
Ok(self.zenith_distrib_dir.join("pageserver"))
|
||||
}
|
||||
|
||||
pub fn pg_data_dirs_path(&self) -> PathBuf {
|
||||
@@ -98,7 +89,8 @@ fn base_path() -> PathBuf {
|
||||
// Initialize a new Zenith repository
|
||||
//
|
||||
pub fn init(
|
||||
remote_pageserver: Option<&str>,
|
||||
pageserver_pg_port: u16,
|
||||
pageserver_http_port: u16,
|
||||
tenantid: ZTenantId,
|
||||
auth_type: AuthType,
|
||||
) -> Result<()> {
|
||||
@@ -165,39 +157,22 @@ pub fn init(
|
||||
let auth_token =
|
||||
encode_from_key_path(&Claims::new(None, Scope::PageServerApi), &private_key_path)?;
|
||||
|
||||
let conf = if let Some(addr) = remote_pageserver {
|
||||
// check that addr is parsable
|
||||
let _uri = Url::parse(addr).map_err(|e| anyhow!("{}: {}", addr, e))?;
|
||||
// Find zenith binaries.
|
||||
let zenith_distrib_dir = env::current_exe()?.parent().unwrap().to_owned();
|
||||
if !zenith_distrib_dir.join("pageserver").exists() {
|
||||
anyhow::bail!("Can't find pageserver binary.",);
|
||||
}
|
||||
|
||||
LocalEnv {
|
||||
pageserver_connstring: format!("postgresql://{}/", addr),
|
||||
pg_distrib_dir,
|
||||
zenith_distrib_dir: None,
|
||||
base_data_dir: base_path,
|
||||
remotes: BTreeMap::default(),
|
||||
tenantid,
|
||||
auth_token,
|
||||
auth_type,
|
||||
private_key_path,
|
||||
}
|
||||
} else {
|
||||
// Find zenith binaries.
|
||||
let zenith_distrib_dir = env::current_exe()?.parent().unwrap().to_owned();
|
||||
if !zenith_distrib_dir.join("pageserver").exists() {
|
||||
anyhow::bail!("Can't find pageserver binary.",);
|
||||
}
|
||||
|
||||
LocalEnv {
|
||||
pageserver_connstring: "postgresql://127.0.0.1:6400".to_string(),
|
||||
pg_distrib_dir,
|
||||
zenith_distrib_dir: Some(zenith_distrib_dir),
|
||||
base_data_dir: base_path,
|
||||
remotes: BTreeMap::default(),
|
||||
tenantid,
|
||||
auth_token,
|
||||
auth_type,
|
||||
private_key_path,
|
||||
}
|
||||
let conf = LocalEnv {
|
||||
pageserver_pg_port,
|
||||
pageserver_http_port,
|
||||
pg_distrib_dir,
|
||||
zenith_distrib_dir,
|
||||
base_data_dir: base_path,
|
||||
tenantid,
|
||||
auth_token,
|
||||
auth_type,
|
||||
private_key_path,
|
||||
};
|
||||
|
||||
fs::create_dir_all(conf.pg_data_dirs_path())?;
|
||||
@@ -225,12 +200,3 @@ pub fn load_config() -> Result<LocalEnv> {
|
||||
let config = fs::read_to_string(repopath.join("config"))?;
|
||||
toml::from_str(config.as_str()).map_err(|e| e.into())
|
||||
}
|
||||
|
||||
// Save config. We use that to change set of remotes from CLI itself.
|
||||
pub fn save_config(conf: &LocalEnv) -> Result<()> {
|
||||
let config_path = base_path().join("config");
|
||||
let conf_str = toml::to_string_pretty(conf)?;
|
||||
|
||||
fs::write(config_path, conf_str)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
212
control_plane/src/postgresql_conf.rs
Normal file
212
control_plane/src/postgresql_conf.rs
Normal file
@@ -0,0 +1,212 @@
|
||||
///
|
||||
/// Module for parsing postgresql.conf file.
|
||||
///
|
||||
/// NOTE: This doesn't implement the full, correct postgresql.conf syntax. Just
|
||||
/// enough to extract a few settings we need in Zenith, assuming you don't do
|
||||
/// funny stuff like include-directives or funny escaping.
|
||||
use anyhow::{anyhow, bail, Context, Result};
|
||||
use lazy_static::lazy_static;
|
||||
use regex::Regex;
|
||||
use std::collections::HashMap;
|
||||
use std::fmt;
|
||||
use std::io::BufRead;
|
||||
use std::str::FromStr;
|
||||
|
||||
/// In-memory representation of a postgresql.conf file
|
||||
#[derive(Default)]
|
||||
pub struct PostgresConf {
|
||||
lines: Vec<String>,
|
||||
hash: HashMap<String, String>,
|
||||
}
|
||||
|
||||
lazy_static! {
|
||||
static ref CONF_LINE_RE: Regex = Regex::new(r"^((?:\w|\.)+)\s*=\s*(\S+)$").unwrap();
|
||||
}
|
||||
|
||||
impl PostgresConf {
|
||||
pub fn new() -> PostgresConf {
|
||||
PostgresConf::default()
|
||||
}
|
||||
|
||||
/// Read file into memory
|
||||
pub fn read(read: impl std::io::Read) -> Result<PostgresConf> {
|
||||
let mut result = Self::new();
|
||||
|
||||
for line in std::io::BufReader::new(read).lines() {
|
||||
let line = line?;
|
||||
|
||||
// Store each line in a vector, in original format
|
||||
result.lines.push(line.clone());
|
||||
|
||||
// Also parse each line and insert key=value lines into a hash map.
|
||||
//
|
||||
// FIXME: This doesn't match exactly the flex/bison grammar in PostgreSQL.
|
||||
// But it's close enough for our usage.
|
||||
let line = line.trim();
|
||||
if line.starts_with('#') {
|
||||
// comment, ignore
|
||||
continue;
|
||||
} else if let Some(caps) = CONF_LINE_RE.captures(line) {
|
||||
let name = caps.get(1).unwrap().as_str();
|
||||
let raw_val = caps.get(2).unwrap().as_str();
|
||||
|
||||
if let Ok(val) = deescape_str(raw_val) {
|
||||
// Note: if there's already an entry in the hash map for
|
||||
// this key, this will replace it. That's the behavior what
|
||||
// we want; when PostgreSQL reads the file, each line
|
||||
// overrides any previous value for the same setting.
|
||||
result.hash.insert(name.to_string(), val.to_string());
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
/// Return the current value of 'option'
|
||||
pub fn get(&self, option: &str) -> Option<&str> {
|
||||
self.hash.get(option).map(|x| x.as_ref())
|
||||
}
|
||||
|
||||
/// Return the current value of a field, parsed to the right datatype.
|
||||
///
|
||||
/// This calls the FromStr::parse() function on the value of the field. If
|
||||
/// the field does not exist, or parsing fails, returns an error.
|
||||
///
|
||||
pub fn parse_field<T>(&self, field_name: &str, context: &str) -> Result<T>
|
||||
where
|
||||
T: FromStr,
|
||||
<T as FromStr>::Err: std::error::Error + Send + Sync + 'static,
|
||||
{
|
||||
self.get(field_name)
|
||||
.ok_or_else(|| anyhow!("could not find '{}' option {}", field_name, context))?
|
||||
.parse::<T>()
|
||||
.with_context(|| format!("could not parse '{}' option {}", field_name, context))
|
||||
}
|
||||
|
||||
///
|
||||
/// Note: if you call this multiple times for the same option, the config
|
||||
/// file will a line for each call. It would be nice to have a function
|
||||
/// to change an existing line, but that's a TODO.
|
||||
///
|
||||
pub fn append(&mut self, option: &str, value: &str) {
|
||||
self.lines
|
||||
.push(format!("{}={}\n", option, escape_str(value)));
|
||||
self.hash.insert(option.to_string(), value.to_string());
|
||||
}
|
||||
|
||||
/// Append an arbitrary non-setting line to the config file
|
||||
pub fn append_line(&mut self, line: &str) {
|
||||
self.lines.push(line.to_string());
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for PostgresConf {
|
||||
/// Return the whole configuration file as a string
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
for line in self.lines.iter() {
|
||||
f.write_str(line)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Escape a value for putting in postgresql.conf.
|
||||
fn escape_str(s: &str) -> String {
|
||||
// If the string doesn't contain anything that needs quoting or escaping, return it
|
||||
// as it is.
|
||||
//
|
||||
// The first part of the regex, before the '|', matches the INTEGER rule in the
|
||||
// PostgreSQL flex grammar (guc-file.l). It matches plain integers like "123" and
|
||||
// "-123", and also accepts units like "10MB". The second part of the regex matches
|
||||
// the UNQUOTED_STRING rule, and accepts strings that contain a single word, beginning
|
||||
// with a letter. That covers words like "off" or "posix". Everything else is quoted.
|
||||
//
|
||||
// This regex is a bit more conservative than the rules in guc-file.l, so we quote some
|
||||
// strings that PostgreSQL would accept without quoting, but that's OK.
|
||||
lazy_static! {
|
||||
static ref UNQUOTED_RE: Regex =
|
||||
Regex::new(r"(^[-+]?[0-9]+[a-zA-Z]*$)|(^[a-zA-Z][a-zA-Z0-9]*$)").unwrap();
|
||||
}
|
||||
if UNQUOTED_RE.is_match(s) {
|
||||
s.to_string()
|
||||
} else {
|
||||
// Otherwise escape and quote it
|
||||
let s = s
|
||||
.replace('\\', "\\\\")
|
||||
.replace('\n', "\\n")
|
||||
.replace('\'', "''");
|
||||
|
||||
"\'".to_owned() + &s + "\'"
|
||||
}
|
||||
}
|
||||
|
||||
/// De-escape a possibly-quoted value.
|
||||
///
|
||||
/// See `DeescapeQuotedString` function in PostgreSQL sources for how PostgreSQL
|
||||
/// does this.
|
||||
fn deescape_str(s: &str) -> Result<String> {
|
||||
// If the string has a quote at the beginning and end, strip them out.
|
||||
if s.len() >= 2 && s.starts_with('\'') && s.ends_with('\'') {
|
||||
let mut result = String::new();
|
||||
|
||||
let mut iter = s[1..(s.len() - 1)].chars().peekable();
|
||||
while let Some(c) = iter.next() {
|
||||
let newc = if c == '\\' {
|
||||
match iter.next() {
|
||||
Some('b') => '\x08',
|
||||
Some('f') => '\x0c',
|
||||
Some('n') => '\n',
|
||||
Some('r') => '\r',
|
||||
Some('t') => '\t',
|
||||
Some('0'..='7') => {
|
||||
// TODO
|
||||
bail!("octal escapes not supported");
|
||||
}
|
||||
Some(n) => n,
|
||||
None => break,
|
||||
}
|
||||
} else if c == '\'' && iter.peek() == Some(&'\'') {
|
||||
// doubled quote becomes just one quote
|
||||
iter.next().unwrap()
|
||||
} else {
|
||||
c
|
||||
};
|
||||
|
||||
result.push(newc);
|
||||
}
|
||||
Ok(result)
|
||||
} else {
|
||||
Ok(s.to_string())
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_postgresql_conf_escapes() -> Result<()> {
|
||||
assert_eq!(escape_str("foo bar"), "'foo bar'");
|
||||
// these don't need to be quoted
|
||||
assert_eq!(escape_str("foo"), "foo");
|
||||
assert_eq!(escape_str("123"), "123");
|
||||
assert_eq!(escape_str("+123"), "+123");
|
||||
assert_eq!(escape_str("-10"), "-10");
|
||||
assert_eq!(escape_str("1foo"), "1foo");
|
||||
assert_eq!(escape_str("foo1"), "foo1");
|
||||
assert_eq!(escape_str("10MB"), "10MB");
|
||||
assert_eq!(escape_str("-10kB"), "-10kB");
|
||||
|
||||
// these need quoting and/or escaping
|
||||
assert_eq!(escape_str("foo bar"), "'foo bar'");
|
||||
assert_eq!(escape_str("fo'o"), "'fo''o'");
|
||||
assert_eq!(escape_str("fo\no"), "'fo\\no'");
|
||||
assert_eq!(escape_str("fo\\o"), "'fo\\\\o'");
|
||||
assert_eq!(escape_str("10 cats"), "'10 cats'");
|
||||
|
||||
// Test de-escaping
|
||||
assert_eq!(deescape_str(&escape_str("foo"))?, "foo");
|
||||
assert_eq!(deescape_str(&escape_str("fo'o\nba\\r"))?, "fo'o\nba\\r");
|
||||
assert_eq!(deescape_str("'\\b\\f\\n\\r\\t'")?, "\x08\x0c\n\r\t");
|
||||
|
||||
// octal-escapes are currently not supported
|
||||
assert!(deescape_str("'foo\\7\\07\\007'").is_err());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -1,17 +1,19 @@
|
||||
use std::collections::HashMap;
|
||||
use std::io::Write;
|
||||
use std::net::TcpStream;
|
||||
use std::path::PathBuf;
|
||||
use std::process::Command;
|
||||
use std::thread;
|
||||
use std::time::Duration;
|
||||
use std::{io, result, thread};
|
||||
|
||||
use anyhow::{anyhow, bail, ensure, Result};
|
||||
use anyhow::{anyhow, bail};
|
||||
use nix::sys::signal::{kill, Signal};
|
||||
use nix::unistd::Pid;
|
||||
use pageserver::http::models::{BranchCreateRequest, TenantCreateRequest};
|
||||
use postgres::{Config, NoTls};
|
||||
use reqwest::blocking::{Client, RequestBuilder};
|
||||
use reqwest::{IntoUrl, Method, StatusCode};
|
||||
use reqwest::blocking::{Client, RequestBuilder, Response};
|
||||
use reqwest::{IntoUrl, Method};
|
||||
use thiserror::Error;
|
||||
use zenith_utils::http::error::HttpErrorBody;
|
||||
use zenith_utils::postgres_backend::AuthType;
|
||||
use zenith_utils::zid::ZTenantId;
|
||||
|
||||
@@ -20,7 +22,38 @@ use crate::read_pidfile;
|
||||
use pageserver::branches::BranchInfo;
|
||||
use zenith_utils::connstring::connection_address;
|
||||
|
||||
const HTTP_BASE_URL: &str = "http://127.0.0.1:9898/v1";
|
||||
#[derive(Error, Debug)]
|
||||
pub enum PageserverHttpError {
|
||||
#[error("Reqwest error: {0}")]
|
||||
Transport(#[from] reqwest::Error),
|
||||
|
||||
#[error("Error: {0}")]
|
||||
Response(String),
|
||||
}
|
||||
|
||||
type Result<T> = result::Result<T, PageserverHttpError>;
|
||||
|
||||
pub trait ResponseErrorMessageExt: Sized {
|
||||
fn error_from_body(self) -> Result<Self>;
|
||||
}
|
||||
|
||||
impl ResponseErrorMessageExt for Response {
|
||||
fn error_from_body(self) -> Result<Self> {
|
||||
let status = self.status();
|
||||
if !(status.is_client_error() || status.is_server_error()) {
|
||||
return Ok(self);
|
||||
}
|
||||
|
||||
// reqwest do not export it's error construction utility functions, so lets craft the message ourselves
|
||||
let url = self.url().to_owned();
|
||||
Err(PageserverHttpError::Response(
|
||||
match self.json::<HttpErrorBody>() {
|
||||
Ok(err_body) => format!("Error: {}", err_body.msg),
|
||||
Err(_) => format!("Http error ({}) at {}.", status.as_u16(), url),
|
||||
},
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
// Control routines for pageserver.
|
||||
@@ -46,27 +79,36 @@ impl PageServerNode {
|
||||
|
||||
PageServerNode {
|
||||
kill_on_exit: false,
|
||||
pg_connection_config: Self::default_config(password), // default
|
||||
pg_connection_config: Self::pageserver_connection_config(
|
||||
password,
|
||||
env.pageserver_pg_port,
|
||||
),
|
||||
env: env.clone(),
|
||||
http_client: Client::new(),
|
||||
http_base_url: HTTP_BASE_URL.to_owned(),
|
||||
http_base_url: format!("http://localhost:{}/v1", env.pageserver_http_port),
|
||||
}
|
||||
}
|
||||
|
||||
fn default_config(password: &str) -> Config {
|
||||
format!("postgresql://no_user:{}@localhost:64000/no_db", password)
|
||||
fn pageserver_connection_config(password: &str, port: u16) -> Config {
|
||||
format!("postgresql://no_user:{}@localhost:{}/no_db", password, port)
|
||||
.parse()
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
pub fn init(&self, create_tenant: Option<&str>, enable_auth: bool) -> Result<()> {
|
||||
pub fn init(&self, create_tenant: Option<&str>, enable_auth: bool) -> anyhow::Result<()> {
|
||||
let mut cmd = Command::new(self.env.pageserver_bin()?);
|
||||
let listen_pg = format!("localhost:{}", self.env.pageserver_pg_port);
|
||||
let listen_http = format!("localhost:{}", self.env.pageserver_http_port);
|
||||
let mut args = vec![
|
||||
"--init",
|
||||
"-D",
|
||||
self.env.base_data_dir.to_str().unwrap(),
|
||||
"--postgres-distrib",
|
||||
self.env.pg_distrib_dir.to_str().unwrap(),
|
||||
"--listen-pg",
|
||||
&listen_pg,
|
||||
"--listen-http",
|
||||
&listen_http,
|
||||
];
|
||||
|
||||
if enable_auth {
|
||||
@@ -100,12 +142,13 @@ impl PageServerNode {
|
||||
self.repo_path().join("pageserver.pid")
|
||||
}
|
||||
|
||||
pub fn start(&self) -> Result<()> {
|
||||
println!(
|
||||
"Starting pageserver at '{}' in {}",
|
||||
pub fn start(&self) -> anyhow::Result<()> {
|
||||
print!(
|
||||
"Starting pageserver at '{}' in '{}'",
|
||||
connection_address(&self.pg_connection_config),
|
||||
self.repo_path().display()
|
||||
);
|
||||
io::stdout().flush().unwrap();
|
||||
|
||||
let mut cmd = Command::new(self.env.pageserver_bin()?);
|
||||
cmd.args(&["-D", self.repo_path().to_str().unwrap()])
|
||||
@@ -122,25 +165,41 @@ impl PageServerNode {
|
||||
|
||||
// It takes a while for the page server to start up. Wait until it is
|
||||
// open for business.
|
||||
for retries in 1..15 {
|
||||
const RETRIES: i8 = 15;
|
||||
for retries in 1..RETRIES {
|
||||
match self.check_status() {
|
||||
Ok(_) => {
|
||||
println!("Pageserver started");
|
||||
println!("\nPageserver started");
|
||||
return Ok(());
|
||||
}
|
||||
Err(err) => {
|
||||
println!(
|
||||
"Pageserver not responding yet, err {} retrying ({})...",
|
||||
err, retries
|
||||
);
|
||||
match err {
|
||||
PageserverHttpError::Transport(err) => {
|
||||
if err.is_connect() && retries < 5 {
|
||||
print!(".");
|
||||
io::stdout().flush().unwrap();
|
||||
} else {
|
||||
if retries == 5 {
|
||||
println!() // put a line break after dots for second message
|
||||
}
|
||||
println!(
|
||||
"Pageserver not responding yet, err {} retrying ({})...",
|
||||
err, retries
|
||||
);
|
||||
}
|
||||
}
|
||||
PageserverHttpError::Response(msg) => {
|
||||
bail!("pageserver failed to start: {} ", msg)
|
||||
}
|
||||
}
|
||||
thread::sleep(Duration::from_secs(1));
|
||||
}
|
||||
}
|
||||
}
|
||||
bail!("pageserver failed to start");
|
||||
bail!("pageserver failed to start in {} seconds", RETRIES);
|
||||
}
|
||||
|
||||
pub fn stop(&self) -> Result<()> {
|
||||
pub fn stop(&self) -> anyhow::Result<()> {
|
||||
let pid = read_pidfile(&self.pid_file())?;
|
||||
let pid = Pid::from_raw(pid);
|
||||
if kill(pid, Signal::SIGTERM).is_err() {
|
||||
@@ -169,7 +228,7 @@ impl PageServerNode {
|
||||
client.simple_query(sql).unwrap()
|
||||
}
|
||||
|
||||
pub fn page_server_psql_client(&self) -> Result<postgres::Client, postgres::Error> {
|
||||
pub fn page_server_psql_client(&self) -> result::Result<postgres::Client, postgres::Error> {
|
||||
self.pg_connection_config.connect(NoTls)
|
||||
}
|
||||
|
||||
@@ -182,14 +241,9 @@ impl PageServerNode {
|
||||
}
|
||||
|
||||
pub fn check_status(&self) -> Result<()> {
|
||||
let status = self
|
||||
.http_request(Method::GET, format!("{}/{}", self.http_base_url, "status"))
|
||||
self.http_request(Method::GET, format!("{}/{}", self.http_base_url, "status"))
|
||||
.send()?
|
||||
.status();
|
||||
ensure!(
|
||||
status == StatusCode::OK,
|
||||
format!("got unexpected response status {}", status)
|
||||
);
|
||||
.error_from_body()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -197,7 +251,7 @@ impl PageServerNode {
|
||||
Ok(self
|
||||
.http_request(Method::GET, format!("{}/{}", self.http_base_url, "tenant"))
|
||||
.send()?
|
||||
.error_for_status()?
|
||||
.error_from_body()?
|
||||
.json()?)
|
||||
}
|
||||
|
||||
@@ -208,7 +262,7 @@ impl PageServerNode {
|
||||
tenant_id: tenantid,
|
||||
})
|
||||
.send()?
|
||||
.error_for_status()?
|
||||
.error_from_body()?
|
||||
.json()?)
|
||||
}
|
||||
|
||||
@@ -219,7 +273,7 @@ impl PageServerNode {
|
||||
format!("{}/branch/{}", self.http_base_url, tenantid),
|
||||
)
|
||||
.send()?
|
||||
.error_for_status()?
|
||||
.error_from_body()?
|
||||
.json()?)
|
||||
}
|
||||
|
||||
@@ -230,35 +284,30 @@ impl PageServerNode {
|
||||
tenantid: &ZTenantId,
|
||||
) -> Result<BranchInfo> {
|
||||
Ok(self
|
||||
.http_request(Method::POST, format!("{}/{}", self.http_base_url, "branch"))
|
||||
.http_request(Method::POST, format!("{}/branch", self.http_base_url))
|
||||
.json(&BranchCreateRequest {
|
||||
tenant_id: tenantid.to_owned(),
|
||||
name: branch_name.to_owned(),
|
||||
start_point: startpoint.to_owned(),
|
||||
})
|
||||
.send()?
|
||||
.error_for_status()?
|
||||
.error_from_body()?
|
||||
.json()?)
|
||||
}
|
||||
|
||||
// TODO: make this a separate request type and avoid loading all the branches
|
||||
pub fn branch_get_by_name(
|
||||
&self,
|
||||
tenantid: &ZTenantId,
|
||||
branch_name: &str,
|
||||
) -> Result<BranchInfo> {
|
||||
let branch_infos = self.branch_list(tenantid)?;
|
||||
let branch_by_name: Result<HashMap<String, BranchInfo>> = branch_infos
|
||||
.into_iter()
|
||||
.map(|branch_info| Ok((branch_info.name.clone(), branch_info)))
|
||||
.collect();
|
||||
let branch_by_name = branch_by_name?;
|
||||
|
||||
let branch = branch_by_name
|
||||
.get(branch_name)
|
||||
.ok_or_else(|| anyhow!("Branch {} not found", branch_name))?;
|
||||
|
||||
Ok(branch.clone())
|
||||
Ok(self
|
||||
.http_request(
|
||||
Method::GET,
|
||||
format!("{}/branch/{}/{}", self.http_base_url, tenantid, branch_name),
|
||||
)
|
||||
.send()?
|
||||
.error_for_status()?
|
||||
.json()?)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -10,5 +10,5 @@
|
||||
- [pageserver/README](/pageserver/README) — pageserver overview.
|
||||
- [postgres_ffi/README](/postgres_ffi/README) — Postgres FFI overview.
|
||||
- [test_runner/README.md](/test_runner/README.md) — tests infrastructure overview.
|
||||
- [walkeeper/README](/walkeeper/README.md) — WAL service overview.
|
||||
- [walkeeper/README](/walkeeper/README) — WAL service overview.
|
||||
- [core_changes.md](core_changes.md) - Description of Zenith changes in Postgres core
|
||||
|
||||
@@ -30,12 +30,20 @@ writes out the changes from in-memory layers into new layer files[]. This proces
|
||||
is called "checkpointing". The page server only creates layer files for
|
||||
relations that have been modified since the last checkpoint.
|
||||
|
||||
Configuration parameter `checkpoint_distance` defines the distance
|
||||
from current LSN to perform checkpoint of in-memory layers.
|
||||
Default is `DEFAULT_CHECKPOINT_DISTANCE`.
|
||||
Set this parameter to `0` to force checkpoint of every layer.
|
||||
|
||||
Configuration parameter `checkpoint_period` defines the interval between checkpoint iterations.
|
||||
Default is `DEFAULT_CHECKPOINT_PERIOD`.
|
||||
### Compute node
|
||||
|
||||
Stateless Postgres node that stores data in pageserver.
|
||||
|
||||
### Garbage collection
|
||||
|
||||
The process of removing old on-disk layers that are not needed by any timeline anymore.
|
||||
### Fork
|
||||
|
||||
Each of the separate segmented file sets in which a relation is stored. The main fork is where the actual data resides. There also exist two secondary forks for metadata: the free space map and the visibility map.
|
||||
@@ -43,7 +51,7 @@ Each PostgreSQL fork is considered a separate relish.
|
||||
|
||||
### Layer
|
||||
|
||||
Each layer corresponds to one RELISH_SEG_SIZE slice of a relish in a range of LSNs.
|
||||
Each layer corresponds to the specific version of a relish Segment in a range of LSNs.
|
||||
There are two kinds of layers, in-memory and on-disk layers. In-memory
|
||||
layers are used to ingest incoming WAL, and provide fast access
|
||||
to the recent page versions. On-disk layers are stored as files on disk, and
|
||||
@@ -122,6 +130,20 @@ One repository corresponds to one Tenant.
|
||||
|
||||
How much history do we need to keep around for PITR and read-only nodes?
|
||||
|
||||
### Segment (PostgreSQL)
|
||||
|
||||
NOTE: This is an overloaded term.
|
||||
|
||||
A physical file that stores data for a given relation. File segments are
|
||||
limited in size by a compile-time setting (1 gigabyte by default), so if a
|
||||
relation exceeds that size, it is split into multiple segments.
|
||||
|
||||
### Segment (Layered Repository)
|
||||
|
||||
NOTE: This is an overloaded term.
|
||||
|
||||
Segment is a RELISH_SEG_SIZE slice of relish (identified by a SegmentTag).
|
||||
|
||||
### SLRU
|
||||
|
||||
SLRUs include pg_clog, pg_multixact/members, and
|
||||
|
||||
@@ -4,8 +4,6 @@ version = "0.1.0"
|
||||
authors = ["Stas Kelvich <stas@zenith.tech>"]
|
||||
edition = "2018"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
bookfile = "^0.3"
|
||||
chrono = "0.4.19"
|
||||
@@ -16,14 +14,10 @@ byteorder = "1.4.3"
|
||||
futures = "0.3.13"
|
||||
hyper = "0.14"
|
||||
lazy_static = "1.4.0"
|
||||
slog-stdlog = "4.1.0"
|
||||
slog-scope = "4.4.0"
|
||||
slog-term = "2.8.0"
|
||||
slog = "2.7.0"
|
||||
log = "0.4.14"
|
||||
clap = "2.33.0"
|
||||
daemonize = "0.4.1"
|
||||
tokio = { version = "1.5.0", features = ["full"] }
|
||||
tokio = { version = "1.11", features = ["process", "macros", "fs"] }
|
||||
postgres-types = { git = "https://github.com/zenithdb/rust-postgres.git", rev="9eb0dbfbeb6a6c1b79099b9f7ae4a8c021877858" }
|
||||
postgres-protocol = { git = "https://github.com/zenithdb/rust-postgres.git", rev="9eb0dbfbeb6a6c1b79099b9f7ae4a8c021877858" }
|
||||
postgres = { git = "https://github.com/zenithdb/rust-postgres.git", rev="9eb0dbfbeb6a6c1b79099b9f7ae4a8c021877858" }
|
||||
@@ -38,8 +32,14 @@ serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1"
|
||||
toml = "0.5"
|
||||
scopeguard = "1.1.0"
|
||||
rust-s3 = { version = "0.27.0-rc4", features = ["no-verify-ssl"] }
|
||||
async-trait = "0.1"
|
||||
const_format = "0.2.21"
|
||||
|
||||
postgres_ffi = { path = "../postgres_ffi" }
|
||||
zenith_metrics = { path = "../zenith_metrics" }
|
||||
zenith_utils = { path = "../zenith_utils" }
|
||||
workspace_hack = { path = "../workspace_hack" }
|
||||
|
||||
[dev-dependencies]
|
||||
hex-literal = "0.3"
|
||||
|
||||
@@ -7,8 +7,9 @@ The Page Server has a few different duties:
|
||||
- Replay WAL that's applicable to the chunks that the Page Server maintains
|
||||
- Backup to S3
|
||||
|
||||
|
||||
|
||||
S3 is the main fault-tolerant storage of all data, as there are no Page Server
|
||||
replicas. We use a separate fault-tolerant WAL service to reduce latency. It
|
||||
keeps track of WAL records which are not syncted to S3 yet.
|
||||
|
||||
The Page Server consists of multiple threads that operate on a shared
|
||||
repository of page versions:
|
||||
|
||||
@@ -10,6 +10,7 @@
|
||||
//! This module is responsible for creation of such tarball
|
||||
//! from data stored in object storage.
|
||||
//!
|
||||
use anyhow::Result;
|
||||
use bytes::{BufMut, BytesMut};
|
||||
use log::*;
|
||||
use std::io;
|
||||
@@ -22,7 +23,7 @@ use crate::relish::*;
|
||||
use crate::repository::Timeline;
|
||||
use postgres_ffi::xlog_utils::*;
|
||||
use postgres_ffi::*;
|
||||
use zenith_utils::lsn::{Lsn, RecordLsn};
|
||||
use zenith_utils::lsn::Lsn;
|
||||
|
||||
/// This is short-living object only for the time of tarball creation,
|
||||
/// created mostly to avoid passing a lot of parameters between various functions
|
||||
@@ -46,34 +47,38 @@ impl<'a> Basebackup<'a> {
|
||||
write: &'a mut dyn Write,
|
||||
timeline: &'a Arc<dyn Timeline>,
|
||||
req_lsn: Option<Lsn>,
|
||||
) -> Basebackup<'a> {
|
||||
// current_prev may be zero if we are at the start of timeline branched from old lsn
|
||||
let RecordLsn {
|
||||
last: current_last,
|
||||
prev: current_prev,
|
||||
} = timeline.get_last_record_rlsn();
|
||||
|
||||
// Compute postgres doesn't have any previous WAL files, but the first record that this
|
||||
// postgres is going to write need to have LSN of previous record (xl_prev). So we are
|
||||
// writing prev_lsn to "zenith.signal" file so that postgres can read it during the start.
|
||||
// In some cases we don't know prev_lsn (branch or basebackup @old_lsn) so pass Lsn(0)
|
||||
// instead and embrace the wrong xl_prev in this situations.
|
||||
) -> Result<Basebackup<'a>> {
|
||||
// Compute postgres doesn't have any previous WAL files, but the first
|
||||
// record that it's going to write needs to include the LSN of the
|
||||
// previous record (xl_prev). We include prev_record_lsn in the
|
||||
// "zenith.signal" file, so that postgres can read it during startup.
|
||||
//
|
||||
// We don't keep full history of record boundaries in the page server,
|
||||
// however, only the predecessor of the latest record on each
|
||||
// timeline. So we can only provide prev_record_lsn when you take a
|
||||
// base backup at the end of the timeline, i.e. at last_record_lsn.
|
||||
// Even at the end of the timeline, we sometimes don't have a valid
|
||||
// prev_lsn value; that happens if the timeline was just branched from
|
||||
// an old LSN and it doesn't have any WAL of its own yet. We will set
|
||||
// prev_lsn to Lsn(0) if we cannot provide the correct value.
|
||||
let (backup_prev, backup_lsn) = if let Some(req_lsn) = req_lsn {
|
||||
if req_lsn > current_last {
|
||||
// FIXME: now wait_lsn() is inside of list_nonrels() so we don't have a way
|
||||
// to get it from there. It is better to wait just here.
|
||||
(Lsn(0), req_lsn)
|
||||
} else if req_lsn < current_last {
|
||||
// we don't know prev already. We don't currently use basebackup@old_lsn
|
||||
// but may use it for read only replicas in future
|
||||
(Lsn(0), req_lsn)
|
||||
// Backup was requested at a particular LSN. Wait for it to arrive.
|
||||
timeline.wait_lsn(req_lsn)?;
|
||||
|
||||
// If the requested point is the end of the timeline, we can
|
||||
// provide prev_lsn. (get_last_record_rlsn() might return it as
|
||||
// zero, though, if no WAL has been generated on this timeline
|
||||
// yet.)
|
||||
let end_of_timeline = timeline.get_last_record_rlsn();
|
||||
if req_lsn == end_of_timeline.last {
|
||||
(end_of_timeline.prev, req_lsn)
|
||||
} else {
|
||||
// we are exactly at req_lsn and know prev
|
||||
(current_prev, req_lsn)
|
||||
(Lsn(0), req_lsn)
|
||||
}
|
||||
} else {
|
||||
// None in req_lsn means that we are branching from the latest LSN
|
||||
(current_prev, current_last)
|
||||
// Backup was requested at end of the timeline.
|
||||
let end_of_timeline = timeline.get_last_record_rlsn();
|
||||
(end_of_timeline.prev, end_of_timeline.last)
|
||||
};
|
||||
|
||||
info!(
|
||||
@@ -81,12 +86,12 @@ impl<'a> Basebackup<'a> {
|
||||
backup_prev, backup_lsn
|
||||
);
|
||||
|
||||
Basebackup {
|
||||
Ok(Basebackup {
|
||||
ar: Builder::new(write),
|
||||
timeline,
|
||||
lsn: backup_lsn,
|
||||
prev_record_lsn: backup_prev,
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
pub fn send_tarball(&mut self) -> anyhow::Result<()> {
|
||||
@@ -154,11 +159,9 @@ impl<'a> Basebackup<'a> {
|
||||
let mut slru_buf: Vec<u8> =
|
||||
Vec::with_capacity(nblocks as usize * pg_constants::BLCKSZ as usize);
|
||||
for blknum in 0..nblocks {
|
||||
let img = self.timeline.get_page_at_lsn_nowait(
|
||||
RelishTag::Slru { slru, segno },
|
||||
blknum,
|
||||
self.lsn,
|
||||
)?;
|
||||
let img =
|
||||
self.timeline
|
||||
.get_page_at_lsn(RelishTag::Slru { slru, segno }, blknum, self.lsn)?;
|
||||
assert!(img.len() == pg_constants::BLCKSZ as usize);
|
||||
|
||||
slru_buf.extend_from_slice(&img);
|
||||
@@ -177,7 +180,7 @@ impl<'a> Basebackup<'a> {
|
||||
// Along with them also send PG_VERSION for each database.
|
||||
//
|
||||
fn add_relmap_file(&mut self, spcnode: u32, dbnode: u32) -> anyhow::Result<()> {
|
||||
let img = self.timeline.get_page_at_lsn_nowait(
|
||||
let img = self.timeline.get_page_at_lsn(
|
||||
RelishTag::FileNodeMap { spcnode, dbnode },
|
||||
0,
|
||||
self.lsn,
|
||||
@@ -219,7 +222,7 @@ impl<'a> Basebackup<'a> {
|
||||
fn add_twophase_file(&mut self, xid: TransactionId) -> anyhow::Result<()> {
|
||||
let img = self
|
||||
.timeline
|
||||
.get_page_at_lsn_nowait(RelishTag::TwoPhase { xid }, 0, self.lsn)?;
|
||||
.get_page_at_lsn(RelishTag::TwoPhase { xid }, 0, self.lsn)?;
|
||||
|
||||
let mut buf = BytesMut::new();
|
||||
buf.extend_from_slice(&img[..]);
|
||||
@@ -237,12 +240,12 @@ impl<'a> Basebackup<'a> {
|
||||
// Also send zenith.signal file with extra bootstrap data.
|
||||
//
|
||||
fn add_pgcontrol_file(&mut self) -> anyhow::Result<()> {
|
||||
let checkpoint_bytes =
|
||||
self.timeline
|
||||
.get_page_at_lsn_nowait(RelishTag::Checkpoint, 0, self.lsn)?;
|
||||
let checkpoint_bytes = self
|
||||
.timeline
|
||||
.get_page_at_lsn(RelishTag::Checkpoint, 0, self.lsn)?;
|
||||
let pg_control_bytes =
|
||||
self.timeline
|
||||
.get_page_at_lsn_nowait(RelishTag::ControlFile, 0, self.lsn)?;
|
||||
.get_page_at_lsn(RelishTag::ControlFile, 0, self.lsn)?;
|
||||
let mut pg_control = ControlFileData::decode(&pg_control_bytes)?;
|
||||
let mut checkpoint = CheckPoint::decode(&checkpoint_bytes)?;
|
||||
|
||||
|
||||
@@ -3,43 +3,64 @@
|
||||
//
|
||||
|
||||
use log::*;
|
||||
use pageserver::defaults::*;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::{
|
||||
env,
|
||||
net::TcpListener,
|
||||
path::{Path, PathBuf},
|
||||
process::exit,
|
||||
str::FromStr,
|
||||
thread,
|
||||
time::Duration,
|
||||
};
|
||||
use zenith_utils::{auth::JwtAuth, postgres_backend::AuthType};
|
||||
use zenith_utils::{auth::JwtAuth, logging, postgres_backend::AuthType};
|
||||
|
||||
use anyhow::{ensure, Result};
|
||||
use anyhow::{bail, ensure, Context, Result};
|
||||
use clap::{App, Arg, ArgMatches};
|
||||
use daemonize::Daemonize;
|
||||
|
||||
use pageserver::{branches, http, logger, page_service, tenant_mgr, PageServerConf};
|
||||
use pageserver::{
|
||||
branches,
|
||||
defaults::{
|
||||
DEFAULT_HTTP_LISTEN_ADDR, DEFAULT_PG_LISTEN_ADDR,
|
||||
DEFAULT_RELISH_STORAGE_MAX_CONCURRENT_SYNC_LIMITS,
|
||||
},
|
||||
http, page_service, relish_storage, tenant_mgr, PageServerConf, RelishStorageConfig,
|
||||
RelishStorageKind, S3Config, LOG_FILE_NAME,
|
||||
};
|
||||
use zenith_utils::http::endpoint;
|
||||
|
||||
const DEFAULT_LISTEN_ADDR: &str = "127.0.0.1:64000";
|
||||
const DEFAULT_HTTP_ENDPOINT_ADDR: &str = "127.0.0.1:9898";
|
||||
|
||||
const DEFAULT_GC_HORIZON: u64 = 64 * 1024 * 1024;
|
||||
const DEFAULT_GC_PERIOD: Duration = Duration::from_secs(10);
|
||||
|
||||
const DEFAULT_SUPERUSER: &str = "zenith_admin";
|
||||
use const_format::formatcp;
|
||||
|
||||
/// String arguments that can be declared via CLI or config file
|
||||
#[derive(Serialize, Deserialize)]
|
||||
struct CfgFileParams {
|
||||
listen_addr: Option<String>,
|
||||
http_endpoint_addr: Option<String>,
|
||||
listen_pg_addr: Option<String>,
|
||||
listen_http_addr: Option<String>,
|
||||
checkpoint_distance: Option<String>,
|
||||
checkpoint_period: Option<String>,
|
||||
gc_horizon: Option<String>,
|
||||
gc_period: Option<String>,
|
||||
pg_distrib_dir: Option<String>,
|
||||
auth_validation_public_key_path: Option<String>,
|
||||
auth_type: Option<String>,
|
||||
// see https://github.com/alexcrichton/toml-rs/blob/6c162e6562c3e432bf04c82a3d1d789d80761a86/examples/enum_external.rs for enum deserialisation examples
|
||||
relish_storage: Option<RelishStorage>,
|
||||
relish_storage_max_concurrent_sync: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone)]
|
||||
enum RelishStorage {
|
||||
Local {
|
||||
local_path: String,
|
||||
},
|
||||
AwsS3 {
|
||||
bucket_name: String,
|
||||
bucket_region: String,
|
||||
#[serde(skip_serializing)]
|
||||
access_key_id: Option<String>,
|
||||
#[serde(skip_serializing)]
|
||||
secret_access_key: Option<String>,
|
||||
},
|
||||
}
|
||||
|
||||
impl CfgFileParams {
|
||||
@@ -49,14 +70,33 @@ impl CfgFileParams {
|
||||
arg_matches.value_of(arg_name).map(str::to_owned)
|
||||
};
|
||||
|
||||
let relish_storage = if let Some(local_path) = get_arg("relish-storage-local-path") {
|
||||
Some(RelishStorage::Local { local_path })
|
||||
} else if let Some((bucket_name, bucket_region)) =
|
||||
get_arg("relish-storage-s3-bucket").zip(get_arg("relish-storage-region"))
|
||||
{
|
||||
Some(RelishStorage::AwsS3 {
|
||||
bucket_name,
|
||||
bucket_region,
|
||||
access_key_id: get_arg("relish-storage-access-key"),
|
||||
secret_access_key: get_arg("relish-storage-secret-access-key"),
|
||||
})
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
Self {
|
||||
listen_addr: get_arg("listen"),
|
||||
http_endpoint_addr: get_arg("http_endpoint"),
|
||||
listen_pg_addr: get_arg("listen-pg"),
|
||||
listen_http_addr: get_arg("listen-http"),
|
||||
checkpoint_distance: get_arg("checkpoint_distance"),
|
||||
checkpoint_period: get_arg("checkpoint_period"),
|
||||
gc_horizon: get_arg("gc_horizon"),
|
||||
gc_period: get_arg("gc_period"),
|
||||
pg_distrib_dir: get_arg("postgres-distrib"),
|
||||
auth_validation_public_key_path: get_arg("auth-validation-public-key-path"),
|
||||
auth_type: get_arg("auth-type"),
|
||||
relish_storage,
|
||||
relish_storage_max_concurrent_sync: get_arg("relish-storage-max-concurrent-sync"),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -64,8 +104,10 @@ impl CfgFileParams {
|
||||
fn or(self, other: CfgFileParams) -> Self {
|
||||
// TODO cleaner way to do this
|
||||
Self {
|
||||
listen_addr: self.listen_addr.or(other.listen_addr),
|
||||
http_endpoint_addr: self.http_endpoint_addr.or(other.http_endpoint_addr),
|
||||
listen_pg_addr: self.listen_pg_addr.or(other.listen_pg_addr),
|
||||
listen_http_addr: self.listen_http_addr.or(other.listen_http_addr),
|
||||
checkpoint_distance: self.checkpoint_distance.or(other.checkpoint_distance),
|
||||
checkpoint_period: self.checkpoint_period.or(other.checkpoint_period),
|
||||
gc_horizon: self.gc_horizon.or(other.gc_horizon),
|
||||
gc_period: self.gc_period.or(other.gc_period),
|
||||
pg_distrib_dir: self.pg_distrib_dir.or(other.pg_distrib_dir),
|
||||
@@ -73,6 +115,10 @@ impl CfgFileParams {
|
||||
.auth_validation_public_key_path
|
||||
.or(other.auth_validation_public_key_path),
|
||||
auth_type: self.auth_type.or(other.auth_type),
|
||||
relish_storage: self.relish_storage.or(other.relish_storage),
|
||||
relish_storage_max_concurrent_sync: self
|
||||
.relish_storage_max_concurrent_sync
|
||||
.or(other.relish_storage_max_concurrent_sync),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -80,14 +126,23 @@ impl CfgFileParams {
|
||||
fn try_into_config(&self) -> Result<PageServerConf> {
|
||||
let workdir = PathBuf::from(".");
|
||||
|
||||
let listen_addr = match self.listen_addr.as_ref() {
|
||||
let listen_pg_addr = match self.listen_pg_addr.as_ref() {
|
||||
Some(addr) => addr.clone(),
|
||||
None => DEFAULT_LISTEN_ADDR.to_owned(),
|
||||
None => DEFAULT_PG_LISTEN_ADDR.to_owned(),
|
||||
};
|
||||
|
||||
let http_endpoint_addr = match self.http_endpoint_addr.as_ref() {
|
||||
let listen_http_addr = match self.listen_http_addr.as_ref() {
|
||||
Some(addr) => addr.clone(),
|
||||
None => DEFAULT_HTTP_ENDPOINT_ADDR.to_owned(),
|
||||
None => DEFAULT_HTTP_LISTEN_ADDR.to_owned(),
|
||||
};
|
||||
|
||||
let checkpoint_distance: u64 = match self.checkpoint_distance.as_ref() {
|
||||
Some(checkpoint_distance_str) => checkpoint_distance_str.parse()?,
|
||||
None => DEFAULT_CHECKPOINT_DISTANCE,
|
||||
};
|
||||
let checkpoint_period = match self.checkpoint_period.as_ref() {
|
||||
Some(checkpoint_period_str) => humantime::parse_duration(checkpoint_period_str)?,
|
||||
None => DEFAULT_CHECKPOINT_PERIOD,
|
||||
};
|
||||
|
||||
let gc_horizon: u64 = match self.gc_horizon.as_ref() {
|
||||
@@ -117,7 +172,7 @@ impl CfgFileParams {
|
||||
})?;
|
||||
|
||||
if !pg_distrib_dir.join("bin/postgres").exists() {
|
||||
anyhow::bail!("Can't find postgres binary at {:?}", pg_distrib_dir);
|
||||
bail!("Can't find postgres binary at {:?}", pg_distrib_dir);
|
||||
}
|
||||
|
||||
if auth_type == AuthType::ZenithJWT {
|
||||
@@ -132,11 +187,42 @@ impl CfgFileParams {
|
||||
);
|
||||
}
|
||||
|
||||
let max_concurrent_sync = match self.relish_storage_max_concurrent_sync.as_deref() {
|
||||
Some(relish_storage_max_concurrent_sync) => {
|
||||
relish_storage_max_concurrent_sync.parse()?
|
||||
}
|
||||
None => DEFAULT_RELISH_STORAGE_MAX_CONCURRENT_SYNC_LIMITS,
|
||||
};
|
||||
let relish_storage_config = self.relish_storage.as_ref().map(|storage_params| {
|
||||
let storage = match storage_params.clone() {
|
||||
RelishStorage::Local { local_path } => {
|
||||
RelishStorageKind::LocalFs(PathBuf::from(local_path))
|
||||
}
|
||||
RelishStorage::AwsS3 {
|
||||
bucket_name,
|
||||
bucket_region,
|
||||
access_key_id,
|
||||
secret_access_key,
|
||||
} => RelishStorageKind::AwsS3(S3Config {
|
||||
bucket_name,
|
||||
bucket_region,
|
||||
access_key_id,
|
||||
secret_access_key,
|
||||
}),
|
||||
};
|
||||
RelishStorageConfig {
|
||||
max_concurrent_sync,
|
||||
storage,
|
||||
}
|
||||
});
|
||||
|
||||
Ok(PageServerConf {
|
||||
daemonize: false,
|
||||
|
||||
listen_addr,
|
||||
http_endpoint_addr,
|
||||
listen_pg_addr,
|
||||
listen_http_addr,
|
||||
checkpoint_distance,
|
||||
checkpoint_period,
|
||||
gc_horizon,
|
||||
gc_period,
|
||||
|
||||
@@ -148,19 +234,29 @@ impl CfgFileParams {
|
||||
|
||||
auth_validation_public_key_path,
|
||||
auth_type,
|
||||
relish_storage_config,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
fn main() -> Result<()> {
|
||||
zenith_metrics::set_common_metrics_prefix("pageserver");
|
||||
let arg_matches = App::new("Zenith page server")
|
||||
.about("Materializes WAL stream to pages and serves them to the postgres")
|
||||
.arg(
|
||||
Arg::with_name("listen")
|
||||
Arg::with_name("listen-pg")
|
||||
.short("l")
|
||||
.long("listen")
|
||||
.long("listen-pg")
|
||||
.alias("listen") // keep some compatibility
|
||||
.takes_value(true)
|
||||
.help("listen for incoming page requests on ip:port (default: 127.0.0.1:5430)"),
|
||||
.help(formatcp!("listen for incoming page requests on ip:port (default: {DEFAULT_PG_LISTEN_ADDR})")),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("listen-http")
|
||||
.long("listen-http")
|
||||
.alias("http_endpoint") // keep some compatibility
|
||||
.takes_value(true)
|
||||
.help(formatcp!("http endpoint address for metrics and management API calls on ip:port (default: {DEFAULT_HTTP_LISTEN_ADDR})")),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("daemonize")
|
||||
@@ -175,6 +271,18 @@ fn main() -> Result<()> {
|
||||
.takes_value(false)
|
||||
.help("Initialize pageserver repo"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("checkpoint_distance")
|
||||
.long("checkpoint_distance")
|
||||
.takes_value(true)
|
||||
.help("Distance from current LSN to perform checkpoint of in-memory layers"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("checkpoint_period")
|
||||
.long("checkpoint_period")
|
||||
.takes_value(true)
|
||||
.help("Interval between checkpoint iterations"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("gc_horizon")
|
||||
.long("gc_horizon")
|
||||
@@ -219,10 +327,56 @@ fn main() -> Result<()> {
|
||||
.takes_value(true)
|
||||
.help("Authentication scheme type. One of: Trust, MD5, ZenithJWT"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("relish-storage-local-path")
|
||||
.long("relish-storage-local-path")
|
||||
.takes_value(true)
|
||||
.help("Path to the local directory, to be used as an external relish storage")
|
||||
.conflicts_with_all(&[
|
||||
"relish-storage-s3-bucket",
|
||||
"relish-storage-region",
|
||||
"relish-storage-access-key",
|
||||
"relish-storage-secret-access-key",
|
||||
]),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("relish-storage-s3-bucket")
|
||||
.long("relish-storage-s3-bucket")
|
||||
.takes_value(true)
|
||||
.help("Name of the AWS S3 bucket to use an external relish storage")
|
||||
.requires("relish-storage-region"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("relish-storage-region")
|
||||
.long("relish-storage-region")
|
||||
.takes_value(true)
|
||||
.help("Region of the AWS S3 bucket"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("relish-storage-access-key")
|
||||
.long("relish-storage-access-key")
|
||||
.takes_value(true)
|
||||
.help("Credentials to access the AWS S3 bucket"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("relish-storage-secret-access-key")
|
||||
.long("relish-storage-secret-access-key")
|
||||
.takes_value(true)
|
||||
.help("Credentials to access the AWS S3 bucket"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("relish-storage-max-concurrent-sync")
|
||||
.long("relish-storage-max-concurrent-sync")
|
||||
.takes_value(true)
|
||||
.help("Maximum allowed concurrent synchronisations with storage"),
|
||||
)
|
||||
.get_matches();
|
||||
|
||||
let workdir = Path::new(arg_matches.value_of("workdir").unwrap_or(".zenith"));
|
||||
let cfg_file_path = workdir.canonicalize()?.join("pageserver.toml");
|
||||
let cfg_file_path = workdir
|
||||
.canonicalize()
|
||||
.with_context(|| format!("Error opening workdir '{}'", workdir.display()))?
|
||||
.join("pageserver.toml");
|
||||
|
||||
let args_params = CfgFileParams::from_args(&arg_matches);
|
||||
|
||||
@@ -234,22 +388,37 @@ fn main() -> Result<()> {
|
||||
args_params
|
||||
} else {
|
||||
// Supplement the CLI arguments with the config file
|
||||
let cfg_file_contents = std::fs::read_to_string(&cfg_file_path)?;
|
||||
let file_params: CfgFileParams = toml::from_str(&cfg_file_contents)?;
|
||||
let cfg_file_contents = std::fs::read_to_string(&cfg_file_path)
|
||||
.with_context(|| format!("No pageserver config at '{}'", cfg_file_path.display()))?;
|
||||
let file_params: CfgFileParams = toml::from_str(&cfg_file_contents).with_context(|| {
|
||||
format!(
|
||||
"Failed to read '{}' as pageserver config",
|
||||
cfg_file_path.display()
|
||||
)
|
||||
})?;
|
||||
args_params.or(file_params)
|
||||
};
|
||||
|
||||
// Set CWD to workdir for non-daemon modes
|
||||
env::set_current_dir(&workdir)?;
|
||||
env::set_current_dir(&workdir).with_context(|| {
|
||||
format!(
|
||||
"Failed to set application's current dir to '{}'",
|
||||
workdir.display()
|
||||
)
|
||||
})?;
|
||||
|
||||
// Ensure the config is valid, even if just init-ing
|
||||
let mut conf = params.try_into_config()?;
|
||||
let mut conf = params.try_into_config().with_context(|| {
|
||||
format!(
|
||||
"Pageserver config at '{}' is not valid",
|
||||
cfg_file_path.display()
|
||||
)
|
||||
})?;
|
||||
|
||||
conf.daemonize = arg_matches.is_present("daemonize");
|
||||
|
||||
if init && conf.daemonize {
|
||||
eprintln!("--daemonize cannot be used with --init");
|
||||
exit(1);
|
||||
bail!("--daemonize cannot be used with --init")
|
||||
}
|
||||
|
||||
// The configuration is all set up now. Turn it into a 'static
|
||||
@@ -259,28 +428,42 @@ fn main() -> Result<()> {
|
||||
|
||||
// Create repo and exit if init was requested
|
||||
if init {
|
||||
branches::init_pageserver(conf, create_tenant)?;
|
||||
branches::init_pageserver(conf, create_tenant).context("Failed to init pageserver")?;
|
||||
// write the config file
|
||||
let cfg_file_contents = toml::to_string_pretty(¶ms)?;
|
||||
let cfg_file_contents = toml::to_string_pretty(¶ms)
|
||||
.context("Failed to create pageserver config contents for initialisation")?;
|
||||
// TODO support enable-auth flag
|
||||
std::fs::write(&cfg_file_path, cfg_file_contents)?;
|
||||
|
||||
return Ok(());
|
||||
std::fs::write(&cfg_file_path, cfg_file_contents).with_context(|| {
|
||||
format!(
|
||||
"Failed to initialize pageserver config at '{}'",
|
||||
cfg_file_path.display()
|
||||
)
|
||||
})?;
|
||||
Ok(())
|
||||
} else {
|
||||
start_pageserver(conf).context("Failed to start pageserver")
|
||||
}
|
||||
|
||||
start_pageserver(conf)
|
||||
}
|
||||
|
||||
fn start_pageserver(conf: &'static PageServerConf) -> Result<()> {
|
||||
// Initialize logger
|
||||
let (_scope_guard, log_file) = logger::init_logging(conf, "pageserver.log")?;
|
||||
let _log_guard = slog_stdlog::init()?;
|
||||
|
||||
// Note: this `info!(...)` macro comes from `log` crate
|
||||
info!("standard logging redirected to slog");
|
||||
let (_scope_guard, log_file) = logging::init(LOG_FILE_NAME, conf.daemonize)?;
|
||||
|
||||
// TODO: Check that it looks like a valid repository before going further
|
||||
|
||||
// bind sockets before daemonizing so we report errors early and do not return until we are listening
|
||||
info!(
|
||||
"Starting pageserver http handler on {}",
|
||||
conf.listen_http_addr
|
||||
);
|
||||
let http_listener = TcpListener::bind(conf.listen_http_addr.clone())?;
|
||||
|
||||
info!(
|
||||
"Starting pageserver pg protocol handler on {}",
|
||||
conf.listen_pg_addr
|
||||
);
|
||||
let pageserver_listener = TcpListener::bind(conf.listen_pg_addr.clone())?;
|
||||
|
||||
if conf.daemonize {
|
||||
info!("daemonizing...");
|
||||
|
||||
@@ -297,10 +480,20 @@ fn start_pageserver(conf: &'static PageServerConf) -> Result<()> {
|
||||
|
||||
match daemonize.start() {
|
||||
Ok(_) => info!("Success, daemonized"),
|
||||
Err(e) => error!("Error, {}", e),
|
||||
Err(e) => error!("could not daemonize: {:#}", e),
|
||||
}
|
||||
}
|
||||
|
||||
// keep join handles for spawned threads
|
||||
// don't spawn threads before daemonizing
|
||||
let mut join_handles = Vec::new();
|
||||
|
||||
if let Some(handle) = relish_storage::run_storage_sync_thread(conf)? {
|
||||
join_handles.push(handle);
|
||||
}
|
||||
// Initialize tenant manager.
|
||||
tenant_mgr::init(conf);
|
||||
|
||||
// initialize authentication for incoming connections
|
||||
let auth = match &conf.auth_type {
|
||||
AuthType::Trust | AuthType::MD5 => None,
|
||||
@@ -313,21 +506,16 @@ fn start_pageserver(conf: &'static PageServerConf) -> Result<()> {
|
||||
info!("Using auth: {:#?}", conf.auth_type);
|
||||
|
||||
// Spawn a new thread for the http endpoint
|
||||
// bind before launching separate thread so the error reported before startup exits
|
||||
let cloned = auth.clone();
|
||||
thread::Builder::new()
|
||||
let http_endpoint_thread = thread::Builder::new()
|
||||
.name("http_endpoint_thread".into())
|
||||
.spawn(move || {
|
||||
let router = http::make_router(conf, cloned);
|
||||
endpoint::serve_thread_main(router, conf.http_endpoint_addr.clone())
|
||||
endpoint::serve_thread_main(router, http_listener)
|
||||
})?;
|
||||
|
||||
// Check that we can bind to address before starting threads to simplify shutdown
|
||||
// sequence if port is occupied.
|
||||
info!("Starting pageserver on {}", conf.listen_addr);
|
||||
let pageserver_listener = TcpListener::bind(conf.listen_addr.clone())?;
|
||||
|
||||
// Initialize tenant manager.
|
||||
tenant_mgr::init(conf);
|
||||
join_handles.push(http_endpoint_thread);
|
||||
|
||||
// Spawn a thread to listen for connections. It will spawn further threads
|
||||
// for each connection.
|
||||
@@ -337,9 +525,13 @@ fn start_pageserver(conf: &'static PageServerConf) -> Result<()> {
|
||||
page_service::thread_main(conf, auth, pageserver_listener, conf.auth_type)
|
||||
})?;
|
||||
|
||||
page_service_thread
|
||||
.join()
|
||||
.expect("Page service thread has panicked")?;
|
||||
join_handles.push(page_service_thread);
|
||||
|
||||
for handle in join_handles.into_iter() {
|
||||
handle
|
||||
.join()
|
||||
.expect("thread panicked")
|
||||
.expect("thread exited with an error")
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -17,22 +17,78 @@ use std::{
|
||||
use zenith_utils::zid::{ZTenantId, ZTimelineId};
|
||||
|
||||
use log::*;
|
||||
use zenith_utils::crashsafe_dir;
|
||||
use zenith_utils::logging;
|
||||
use zenith_utils::lsn::Lsn;
|
||||
|
||||
use crate::logger;
|
||||
use crate::restore_local_repo;
|
||||
use crate::tenant_mgr;
|
||||
use crate::walredo::WalRedoManager;
|
||||
use crate::{repository::Repository, PageServerConf};
|
||||
use crate::{restore_local_repo, LOG_FILE_NAME};
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone)]
|
||||
pub struct BranchInfo {
|
||||
pub name: String,
|
||||
#[serde(with = "hex")]
|
||||
pub timeline_id: ZTimelineId,
|
||||
pub latest_valid_lsn: Option<Lsn>,
|
||||
pub latest_valid_lsn: Lsn,
|
||||
pub ancestor_id: Option<String>,
|
||||
pub ancestor_lsn: Option<String>,
|
||||
pub current_logical_size: usize,
|
||||
pub current_logical_size_non_incremental: usize,
|
||||
}
|
||||
|
||||
impl BranchInfo {
|
||||
pub fn from_path<T: AsRef<Path>>(
|
||||
path: T,
|
||||
conf: &PageServerConf,
|
||||
tenantid: &ZTenantId,
|
||||
repo: &Arc<dyn Repository>,
|
||||
) -> Result<Self> {
|
||||
let name = path
|
||||
.as_ref()
|
||||
.file_name()
|
||||
.unwrap()
|
||||
.to_str()
|
||||
.unwrap()
|
||||
.to_string();
|
||||
let timeline_id = std::fs::read_to_string(path)?.parse::<ZTimelineId>()?;
|
||||
|
||||
let timeline = repo.get_timeline(timeline_id)?;
|
||||
|
||||
let ancestor_path = conf.ancestor_path(&timeline_id, tenantid);
|
||||
let mut ancestor_id: Option<String> = None;
|
||||
let mut ancestor_lsn: Option<String> = None;
|
||||
|
||||
if ancestor_path.exists() {
|
||||
let ancestor = std::fs::read_to_string(ancestor_path)?;
|
||||
let mut strings = ancestor.split('@');
|
||||
|
||||
ancestor_id = Some(
|
||||
strings
|
||||
.next()
|
||||
.with_context(|| "wrong branch ancestor point in time format")?
|
||||
.to_owned(),
|
||||
);
|
||||
ancestor_lsn = Some(
|
||||
strings
|
||||
.next()
|
||||
.with_context(|| "wrong branch ancestor point in time format")?
|
||||
.to_owned(),
|
||||
);
|
||||
}
|
||||
|
||||
Ok(BranchInfo {
|
||||
name,
|
||||
timeline_id,
|
||||
latest_valid_lsn: timeline.get_last_record_lsn(),
|
||||
ancestor_id,
|
||||
ancestor_lsn,
|
||||
current_logical_size: timeline.get_current_logical_size(),
|
||||
current_logical_size_non_incremental: timeline
|
||||
.get_current_logical_size_non_incremental(timeline.get_last_record_lsn())?,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
@@ -43,8 +99,8 @@ pub struct PointInTime {
|
||||
|
||||
pub fn init_pageserver(conf: &'static PageServerConf, create_tenant: Option<&str>) -> Result<()> {
|
||||
// Initialize logger
|
||||
let (_scope_guard, _log_file) = logger::init_logging(conf, "pageserver.log")?;
|
||||
let _log_guard = slog_stdlog::init()?;
|
||||
// use true as daemonize parameter because otherwise we pollute zenith cli output with a few pages long output of info messages
|
||||
let (_scope_guard, _log_file) = logging::init(LOG_FILE_NAME, true)?;
|
||||
|
||||
// We don't use the real WAL redo manager, because we don't want to spawn the WAL redo
|
||||
// process during repository initialization.
|
||||
@@ -63,7 +119,7 @@ pub fn init_pageserver(conf: &'static PageServerConf, create_tenant: Option<&str
|
||||
println!("initializing tenantid {}", tenantid);
|
||||
create_repo(conf, tenantid, dummy_redo_mgr).with_context(|| "failed to create repo")?;
|
||||
}
|
||||
fs::create_dir_all(conf.tenants_path())?;
|
||||
crashsafe_dir::create_dir_all(conf.tenants_path())?;
|
||||
|
||||
println!("pageserver init succeeded");
|
||||
Ok(())
|
||||
@@ -80,15 +136,12 @@ pub fn create_repo(
|
||||
}
|
||||
|
||||
// top-level dir may exist if we are creating it through CLI
|
||||
fs::create_dir_all(&repo_dir)
|
||||
crashsafe_dir::create_dir_all(&repo_dir)
|
||||
.with_context(|| format!("could not create directory {}", repo_dir.display()))?;
|
||||
|
||||
// Note: this `info!(...)` macro comes from `log` crate
|
||||
info!("standard logging redirected to slog");
|
||||
|
||||
fs::create_dir(conf.timelines_path(&tenantid))?;
|
||||
fs::create_dir_all(conf.branches_path(&tenantid))?;
|
||||
fs::create_dir_all(conf.tags_path(&tenantid))?;
|
||||
crashsafe_dir::create_dir(conf.timelines_path(&tenantid))?;
|
||||
crashsafe_dir::create_dir_all(conf.branches_path(&tenantid))?;
|
||||
crashsafe_dir::create_dir_all(conf.tags_path(&tenantid))?;
|
||||
|
||||
info!("created directory structure in {}", repo_dir.display());
|
||||
|
||||
@@ -98,12 +151,13 @@ pub fn create_repo(
|
||||
conf,
|
||||
wal_redo_manager,
|
||||
tenantid,
|
||||
false,
|
||||
));
|
||||
|
||||
// Load data into pageserver
|
||||
// TODO To implement zenith import we need to
|
||||
// move data loading out of create_repo()
|
||||
bootstrap_timeline(conf, tenantid, tli, &*repo)?;
|
||||
bootstrap_timeline(conf, tenantid, tli, repo.as_ref())?;
|
||||
|
||||
Ok(repo)
|
||||
}
|
||||
@@ -118,7 +172,7 @@ fn get_lsn_from_controlfile(path: &Path) -> Result<Lsn> {
|
||||
Ok(Lsn(lsn))
|
||||
}
|
||||
|
||||
// Create the cluster temporarily in a initdbpath directory inside the repository
|
||||
// Create the cluster temporarily in 'initdbpath' directory inside the repository
|
||||
// to get bootstrap data for timeline initialization.
|
||||
//
|
||||
fn run_initdb(conf: &'static PageServerConf, initdbpath: &Path) -> Result<()> {
|
||||
@@ -166,11 +220,15 @@ fn bootstrap_timeline(
|
||||
|
||||
info!("bootstrap_timeline {:?} at lsn {}", pgdata_path, lsn);
|
||||
|
||||
let timeline = repo.create_empty_timeline(tli, lsn)?;
|
||||
restore_local_repo::import_timeline_from_postgres_datadir(&pgdata_path, &*timeline, lsn)?;
|
||||
|
||||
let wal_dir = pgdata_path.join("pg_wal");
|
||||
restore_local_repo::import_timeline_wal(&wal_dir, &*timeline, timeline.get_last_record_lsn())?;
|
||||
// Import the contents of the data directory at the initial checkpoint
|
||||
// LSN, and any WAL after that.
|
||||
let timeline = repo.create_empty_timeline(tli)?;
|
||||
restore_local_repo::import_timeline_from_postgres_datadir(
|
||||
&pgdata_path,
|
||||
timeline.as_ref(),
|
||||
lsn,
|
||||
)?;
|
||||
timeline.checkpoint()?;
|
||||
|
||||
println!(
|
||||
"created initial timeline {} timeline.lsn {}",
|
||||
@@ -201,7 +259,7 @@ pub(crate) fn get_tenants(conf: &PageServerConf) -> Result<Vec<String>> {
|
||||
}
|
||||
|
||||
pub(crate) fn get_branches(conf: &PageServerConf, tenantid: &ZTenantId) -> Result<Vec<BranchInfo>> {
|
||||
let repo = tenant_mgr::get_repository_for_tenant(tenantid)?;
|
||||
let repo = tenant_mgr::get_repository_for_tenant(*tenantid)?;
|
||||
|
||||
// Each branch has a corresponding record (text file) in the refs/branches
|
||||
// with timeline_id.
|
||||
@@ -210,43 +268,7 @@ pub(crate) fn get_branches(conf: &PageServerConf, tenantid: &ZTenantId) -> Resul
|
||||
std::fs::read_dir(&branches_dir)?
|
||||
.map(|dir_entry_res| {
|
||||
let dir_entry = dir_entry_res?;
|
||||
let name = dir_entry.file_name().to_str().unwrap().to_string();
|
||||
let timeline_id = std::fs::read_to_string(dir_entry.path())?.parse::<ZTimelineId>()?;
|
||||
|
||||
let latest_valid_lsn = repo
|
||||
.get_timeline(timeline_id)
|
||||
.map(|timeline| timeline.get_last_record_lsn())
|
||||
.ok();
|
||||
|
||||
let ancestor_path = conf.ancestor_path(&timeline_id, tenantid);
|
||||
let mut ancestor_id: Option<String> = None;
|
||||
let mut ancestor_lsn: Option<String> = None;
|
||||
|
||||
if ancestor_path.exists() {
|
||||
let ancestor = std::fs::read_to_string(ancestor_path)?;
|
||||
let mut strings = ancestor.split('@');
|
||||
|
||||
ancestor_id = Some(
|
||||
strings
|
||||
.next()
|
||||
.with_context(|| "wrong branch ancestor point in time format")?
|
||||
.to_owned(),
|
||||
);
|
||||
ancestor_lsn = Some(
|
||||
strings
|
||||
.next()
|
||||
.with_context(|| "wrong branch ancestor point in time format")?
|
||||
.to_owned(),
|
||||
);
|
||||
}
|
||||
|
||||
Ok(BranchInfo {
|
||||
name,
|
||||
timeline_id,
|
||||
latest_valid_lsn,
|
||||
ancestor_id,
|
||||
ancestor_lsn,
|
||||
})
|
||||
BranchInfo::from_path(dir_entry.path(), conf, tenantid, &repo)
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
@@ -257,7 +279,7 @@ pub(crate) fn create_branch(
|
||||
startpoint_str: &str,
|
||||
tenantid: &ZTenantId,
|
||||
) -> Result<BranchInfo> {
|
||||
let repo = tenant_mgr::get_repository_for_tenant(tenantid)?;
|
||||
let repo = tenant_mgr::get_repository_for_tenant(*tenantid)?;
|
||||
|
||||
if conf.branch_path(branchname, tenantid).exists() {
|
||||
anyhow::bail!("branch {} already exists", branchname);
|
||||
@@ -270,6 +292,14 @@ pub(crate) fn create_branch(
|
||||
let end_of_wal = timeline.get_last_record_lsn();
|
||||
info!("branching at end of WAL: {}", end_of_wal);
|
||||
startpoint.lsn = end_of_wal;
|
||||
} else {
|
||||
// Wait for the WAL to arrive and be processed on the parent branch up
|
||||
// to the requested branch point. The repository code itself doesn't
|
||||
// require it, but if we start to receive WAL on the new timeline,
|
||||
// decoding the new WAL might need to look up previous pages, relation
|
||||
// sizes etc. and that would get confused if the previous page versions
|
||||
// are not in the repository yet.
|
||||
timeline.wait_lsn(startpoint.lsn)?;
|
||||
}
|
||||
startpoint.lsn = startpoint.lsn.align();
|
||||
if timeline.get_start_lsn() > startpoint.lsn {
|
||||
@@ -296,9 +326,11 @@ pub(crate) fn create_branch(
|
||||
Ok(BranchInfo {
|
||||
name: branchname.to_string(),
|
||||
timeline_id: newtli,
|
||||
latest_valid_lsn: Some(startpoint.lsn),
|
||||
latest_valid_lsn: startpoint.lsn,
|
||||
ancestor_id: None,
|
||||
ancestor_lsn: None,
|
||||
current_logical_size: 0,
|
||||
current_logical_size_non_incremental: 0,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -387,7 +419,6 @@ fn create_timeline(
|
||||
let timelinedir = conf.timeline_path(&timelineid, tenantid);
|
||||
|
||||
fs::create_dir(&timelinedir)?;
|
||||
fs::create_dir(&timelinedir.join("wal"))?;
|
||||
|
||||
if let Some(ancestor) = ancestor {
|
||||
let data = format!("{}@{}", ancestor.timelineid, ancestor.lsn);
|
||||
|
||||
@@ -54,7 +54,52 @@ paths:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/ForbiddenError"
|
||||
|
||||
"500":
|
||||
description: Generic operation error
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/Error"
|
||||
/v1/branch/{tenant_id}/{branch_name}:
|
||||
parameters:
|
||||
- name: tenant_id
|
||||
in: path
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
format: hex
|
||||
- name: branch_name
|
||||
in: path
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
get:
|
||||
description: Get branches for tenant
|
||||
responses:
|
||||
"200":
|
||||
description: BranchInfo
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/BranchInfo"
|
||||
"400":
|
||||
description: Error when no tenant id found in path or no branch name
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/Error"
|
||||
"401":
|
||||
description: Unauthorized Error
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/UnauthorizedError"
|
||||
"403":
|
||||
description: Forbidden Error
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/ForbiddenError"
|
||||
"500":
|
||||
description: Generic operation error
|
||||
content:
|
||||
@@ -203,6 +248,9 @@ components:
|
||||
required:
|
||||
- name
|
||||
- timeline_id
|
||||
- latest_valid_lsn
|
||||
- current_logical_size
|
||||
- current_logical_size_non_incremental
|
||||
properties:
|
||||
name:
|
||||
type: string
|
||||
@@ -213,6 +261,10 @@ components:
|
||||
type: string
|
||||
ancestor_lsn:
|
||||
type: string
|
||||
current_logical_size:
|
||||
type: integer
|
||||
current_logical_size_non_incremental:
|
||||
type: integer
|
||||
Error:
|
||||
type: object
|
||||
required:
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
use std::str::FromStr;
|
||||
use std::sync::Arc;
|
||||
|
||||
use anyhow::Result;
|
||||
@@ -18,10 +19,8 @@ use zenith_utils::http::{
|
||||
|
||||
use super::models::BranchCreateRequest;
|
||||
use super::models::TenantCreateRequest;
|
||||
use crate::{
|
||||
branches::{self},
|
||||
tenant_mgr, PageServerConf, ZTenantId,
|
||||
};
|
||||
use crate::branches::BranchInfo;
|
||||
use crate::{branches, tenant_mgr, PageServerConf, ZTenantId};
|
||||
|
||||
#[derive(Debug)]
|
||||
struct State {
|
||||
@@ -57,6 +56,33 @@ fn get_config(request: &Request<Body>) -> &'static PageServerConf {
|
||||
get_state(request).conf
|
||||
}
|
||||
|
||||
fn get_request_param<'a>(
|
||||
request: &'a Request<Body>,
|
||||
param_name: &str,
|
||||
) -> Result<&'a str, ApiError> {
|
||||
match request.param(param_name) {
|
||||
Some(arg) => Ok(arg),
|
||||
None => {
|
||||
return Err(ApiError::BadRequest(format!(
|
||||
"no {} specified in path param",
|
||||
param_name
|
||||
)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_request_param<T: FromStr>(
|
||||
request: &Request<Body>,
|
||||
param_name: &str,
|
||||
) -> Result<T, ApiError> {
|
||||
match get_request_param(request, param_name)?.parse() {
|
||||
Ok(v) => Ok(v),
|
||||
Err(_) => Err(ApiError::BadRequest(
|
||||
"failed to parse tenant id".to_string(),
|
||||
)),
|
||||
}
|
||||
}
|
||||
|
||||
// healthcheck handler
|
||||
async fn status_handler(_: Request<Body>) -> Result<Response<Body>, ApiError> {
|
||||
Ok(Response::builder()
|
||||
@@ -85,16 +111,7 @@ async fn branch_create_handler(mut request: Request<Body>) -> Result<Response<Bo
|
||||
}
|
||||
|
||||
async fn branch_list_handler(request: Request<Body>) -> Result<Response<Body>, ApiError> {
|
||||
let tenantid: ZTenantId = match request.param("tenant_id") {
|
||||
Some(arg) => arg
|
||||
.parse()
|
||||
.map_err(|_| ApiError::BadRequest("failed to parse tenant id".to_string()))?,
|
||||
None => {
|
||||
return Err(ApiError::BadRequest(
|
||||
"no tenant id specified in path param".to_string(),
|
||||
))
|
||||
}
|
||||
};
|
||||
let tenantid: ZTenantId = parse_request_param(&request, "tenant_id")?;
|
||||
|
||||
check_permission(&request, Some(tenantid))?;
|
||||
|
||||
@@ -106,6 +123,23 @@ async fn branch_list_handler(request: Request<Body>) -> Result<Response<Body>, A
|
||||
Ok(json_response(StatusCode::OK, response_data)?)
|
||||
}
|
||||
|
||||
// TODO add to swagger
|
||||
async fn branch_detail_handler(request: Request<Body>) -> Result<Response<Body>, ApiError> {
|
||||
let tenantid: ZTenantId = parse_request_param(&request, "tenant_id")?;
|
||||
let branch_name: &str = get_request_param(&request, "branch_name")?;
|
||||
let conf = get_state(&request).conf;
|
||||
let path = conf.branch_path(branch_name, &tenantid);
|
||||
|
||||
let response_data = tokio::task::spawn_blocking(move || {
|
||||
let repo = tenant_mgr::get_repository_for_tenant(tenantid)?;
|
||||
BranchInfo::from_path(path, conf, &tenantid, &repo)
|
||||
})
|
||||
.await
|
||||
.map_err(ApiError::from_err)??;
|
||||
|
||||
Ok(json_response(StatusCode::OK, response_data)?)
|
||||
}
|
||||
|
||||
async fn tenant_list_handler(request: Request<Body>) -> Result<Response<Body>, ApiError> {
|
||||
// check for management permission
|
||||
check_permission(&request, None)?;
|
||||
@@ -159,6 +193,7 @@ pub fn make_router(
|
||||
.data(Arc::new(State::new(conf, auth)))
|
||||
.get("/v1/status", status_handler)
|
||||
.get("/v1/branch/:tenant_id", branch_list_handler)
|
||||
.get("/v1/branch/:tenant_id/:branch_name", branch_detail_handler)
|
||||
.post("/v1/branch", branch_create_handler)
|
||||
.get("/v1/tenant", tenant_list_handler)
|
||||
.post("/v1/tenant", tenant_create_handler)
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -4,7 +4,60 @@ The on-disk format is based on immutable files. The page server
|
||||
receives a stream of incoming WAL, parses the WAL records to determine
|
||||
which pages they apply to, and accumulates the incoming changes in
|
||||
memory. Every now and then, the accumulated changes are written out to
|
||||
new files.
|
||||
new immutable files. This process is called checkpointing. Old versions
|
||||
of on-disk files that are not needed by any timeline are removed by GC
|
||||
process.
|
||||
|
||||
# Terms used in layered repository
|
||||
|
||||
- Relish - one PostgreSQL relation or similarly treated file.
|
||||
- Segment - one slice of a Relish that is stored in a LayeredTimeline.
|
||||
- Layer - specific version of a relish Segment in a range of LSNs.
|
||||
|
||||
Layers can be InMemory or OnDisk:
|
||||
- InMemory layer is not durably stored and needs to rebuild from WAL on pageserver start.
|
||||
- OnDisk layer is durably stored.
|
||||
|
||||
OnDisk layers can be Image or Delta:
|
||||
- ImageLayer represents an image or a snapshot of a segment at one particular LSN.
|
||||
- DeltaLayer represents a collection of WAL records or page images in a range of LSNs.
|
||||
|
||||
Dropped segments are always represented on disk by DeltaLayer.
|
||||
|
||||
LSN range defined by start_lsn and end_lsn:
|
||||
- start_lsn is inclusive.
|
||||
- end_lsn is exclusive.
|
||||
|
||||
For an open in-memory layer, the end_lsn is MAX_LSN. For a frozen
|
||||
in-memory layer or a delta layer, it is a valid end bound. An image
|
||||
layer represents snapshot at one LSN, so end_lsn is always the
|
||||
snapshot LSN + 1
|
||||
|
||||
Layers can be open or historical:
|
||||
- Open layer is a writeable one. Only InMemory layer can be open.
|
||||
FIXME: If open layer is dropped, it is not writeable, so it should be turned into historical,
|
||||
but now it is not implemented - see bug #569.
|
||||
- Historical layer is the one that cannot be modified anymore. Now only OnDisk layers can be historical.
|
||||
|
||||
- LayerMap - a map that tracks what layers exist for all the relishes in a timeline.
|
||||
|
||||
LayerMap consists of two data structures:
|
||||
- segs - All the layers keyed by segment tag
|
||||
- open_layers - data structure that hold all open layers ordered by oldest_pending_lsn for quick access during checkpointing. oldest_pending_lsn is the LSN of the oldest page version stored in this layer.
|
||||
|
||||
All operations that update InMemory Layers should update both structures to keep them up-to-date.
|
||||
|
||||
- LayeredTimeline - implements Timeline interface.
|
||||
|
||||
All methods of LayeredTimeline are aware of its ancestors and return data taking them into account.
|
||||
TODO: Are there any exceptions to this?
|
||||
For example, timeline.list_rels(lsn) will return all segments that are visible in this timeline at the LSN,
|
||||
including ones that were not modified in this timeline and thus don't have a layer in the timeline's LayerMap.
|
||||
|
||||
TODO:
|
||||
Describe GC and checkpoint interval settings.
|
||||
|
||||
# Layer files (On-disk layers)
|
||||
|
||||
The files are called "layer files". Each layer file corresponds
|
||||
to one RELISH_SEG_SIZE slice of a PostgreSQL relation fork or
|
||||
|
||||
@@ -16,14 +16,14 @@ pub fn read_blob(reader: &BoundedReader<&'_ File>, range: &BlobRange) -> Result<
|
||||
Ok(buf)
|
||||
}
|
||||
|
||||
pub struct BlobWriter {
|
||||
writer: ChapterWriter<File>,
|
||||
pub struct BlobWriter<W> {
|
||||
writer: ChapterWriter<W>,
|
||||
offset: u64,
|
||||
}
|
||||
|
||||
impl BlobWriter {
|
||||
impl<W: Write> BlobWriter<W> {
|
||||
// This function takes a BookWriter and creates a new chapter to ensure offset is 0.
|
||||
pub fn new(book_writer: BookWriter<File>, chapter_id: impl Into<ChapterId>) -> Self {
|
||||
pub fn new(book_writer: BookWriter<W>, chapter_id: impl Into<ChapterId>) -> Self {
|
||||
let writer = book_writer.new_chapter(chapter_id);
|
||||
Self { writer, offset: 0 }
|
||||
}
|
||||
@@ -39,7 +39,7 @@ impl BlobWriter {
|
||||
Ok(range)
|
||||
}
|
||||
|
||||
pub fn close(self) -> bookfile::Result<BookWriter<File>> {
|
||||
pub fn close(self) -> bookfile::Result<BookWriter<W>> {
|
||||
self.writer.close()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -42,24 +42,22 @@ use crate::layered_repository::filename::{DeltaFileName, PathOrConf};
|
||||
use crate::layered_repository::storage_layer::{
|
||||
Layer, PageReconstructData, PageReconstructResult, PageVersion, SegmentTag,
|
||||
};
|
||||
use crate::repository::WALRecord;
|
||||
use crate::waldecoder;
|
||||
use crate::PageServerConf;
|
||||
use crate::{ZTenantId, ZTimelineId};
|
||||
use anyhow::{bail, Result};
|
||||
use bytes::Bytes;
|
||||
use anyhow::{bail, ensure, Result};
|
||||
use log::*;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::BTreeMap;
|
||||
use zenith_utils::vec_map::VecMap;
|
||||
// avoid binding to Write (conflicts with std::io::Write)
|
||||
// while being able to use std::fmt::Write's methods
|
||||
use std::fmt::Write as _;
|
||||
use std::fs;
|
||||
use std::fs::File;
|
||||
use std::io::Write;
|
||||
use std::io::{BufWriter, Write};
|
||||
use std::ops::Bound::Included;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::{Arc, Mutex, MutexGuard};
|
||||
use std::sync::{Mutex, MutexGuard};
|
||||
|
||||
use bookfile::{Book, BookWriter};
|
||||
|
||||
@@ -67,9 +65,10 @@ use zenith_utils::bin_ser::BeSer;
|
||||
use zenith_utils::lsn::Lsn;
|
||||
|
||||
use super::blob::{read_blob, BlobRange};
|
||||
use super::page_versions::OrderedBlockIter;
|
||||
|
||||
// Magic constant to identify a Zenith delta file
|
||||
static DELTA_FILE_MAGIC: u32 = 0x5A616E01;
|
||||
pub const DELTA_FILE_MAGIC: u32 = 0x5A616E01;
|
||||
|
||||
/// Mapping from (block #, lsn) -> page/WAL record
|
||||
/// byte ranges in PAGE_VERSIONS_CHAPTER
|
||||
@@ -79,10 +78,34 @@ static PAGE_VERSION_METAS_CHAPTER: u64 = 1;
|
||||
static PAGE_VERSIONS_CHAPTER: u64 = 2;
|
||||
static REL_SIZES_CHAPTER: u64 = 3;
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
struct PageVersionMeta {
|
||||
page_image_range: Option<BlobRange>,
|
||||
record_range: Option<BlobRange>,
|
||||
/// Contains the [`Summary`] struct
|
||||
static SUMMARY_CHAPTER: u64 = 4;
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq, Eq)]
|
||||
struct Summary {
|
||||
tenantid: ZTenantId,
|
||||
timelineid: ZTimelineId,
|
||||
seg: SegmentTag,
|
||||
|
||||
start_lsn: Lsn,
|
||||
end_lsn: Lsn,
|
||||
|
||||
dropped: bool,
|
||||
}
|
||||
|
||||
impl From<&DeltaLayer> for Summary {
|
||||
fn from(layer: &DeltaLayer) -> Self {
|
||||
Self {
|
||||
tenantid: layer.tenantid,
|
||||
timelineid: layer.timelineid,
|
||||
seg: layer.seg,
|
||||
|
||||
start_lsn: layer.start_lsn,
|
||||
end_lsn: layer.end_lsn,
|
||||
|
||||
dropped: layer.dropped,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
///
|
||||
@@ -109,9 +132,6 @@ pub struct DeltaLayer {
|
||||
|
||||
dropped: bool,
|
||||
|
||||
/// Predecessor layer
|
||||
predecessor: Option<Arc<dyn Layer>>,
|
||||
|
||||
inner: Mutex<DeltaLayerInner>,
|
||||
}
|
||||
|
||||
@@ -122,10 +142,10 @@ pub struct DeltaLayerInner {
|
||||
|
||||
/// All versions of all pages in the file are are kept here.
|
||||
/// Indexed by block number and LSN.
|
||||
page_version_metas: BTreeMap<(u32, Lsn), PageVersionMeta>,
|
||||
page_version_metas: VecMap<(u32, Lsn), BlobRange>,
|
||||
|
||||
/// `relsizes` tracks the size of the relation at different points in time.
|
||||
relsizes: BTreeMap<Lsn, u32>,
|
||||
relsizes: VecMap<Lsn, u32>,
|
||||
}
|
||||
|
||||
impl Layer for DeltaLayer {
|
||||
@@ -161,6 +181,20 @@ impl Layer for DeltaLayer {
|
||||
)
|
||||
}
|
||||
|
||||
fn path(&self) -> Option<PathBuf> {
|
||||
Some(Self::path_for(
|
||||
&self.path_or_conf,
|
||||
self.timelineid,
|
||||
self.tenantid,
|
||||
&DeltaFileName {
|
||||
seg: self.seg,
|
||||
start_lsn: self.start_lsn,
|
||||
end_lsn: self.end_lsn,
|
||||
dropped: self.dropped,
|
||||
},
|
||||
))
|
||||
}
|
||||
|
||||
/// Look up given page in the cache.
|
||||
fn get_page_reconstruct_data(
|
||||
&self,
|
||||
@@ -168,7 +202,7 @@ impl Layer for DeltaLayer {
|
||||
lsn: Lsn,
|
||||
reconstruct_data: &mut PageReconstructData,
|
||||
) -> Result<PageReconstructResult> {
|
||||
let mut cont_lsn: Option<Lsn> = Some(lsn);
|
||||
let mut need_image = true;
|
||||
|
||||
assert!(self.seg.blknum_in_seg(blknum));
|
||||
|
||||
@@ -182,27 +216,26 @@ impl Layer for DeltaLayer {
|
||||
// Scan the metadata BTreeMap backwards, starting from the given entry.
|
||||
let minkey = (blknum, Lsn(0));
|
||||
let maxkey = (blknum, lsn);
|
||||
let mut iter = inner
|
||||
let iter = inner
|
||||
.page_version_metas
|
||||
.range((Included(&minkey), Included(&maxkey)));
|
||||
while let Some(((_blknum, entry_lsn), entry)) = iter.next_back() {
|
||||
if let Some(img_range) = &entry.page_image_range {
|
||||
.slice_range((Included(&minkey), Included(&maxkey)))
|
||||
.iter()
|
||||
.rev();
|
||||
for ((_blknum, _lsn), blob_range) in iter {
|
||||
let pv = PageVersion::des(&read_blob(&page_version_reader, blob_range)?)?;
|
||||
|
||||
if let Some(img) = pv.page_image {
|
||||
// Found a page image, return it
|
||||
let img = Bytes::from(read_blob(&page_version_reader, img_range)?);
|
||||
reconstruct_data.page_img = Some(img);
|
||||
cont_lsn = None;
|
||||
need_image = false;
|
||||
break;
|
||||
} else if let Some(rec_range) = &entry.record_range {
|
||||
let rec = WALRecord::des(&read_blob(&page_version_reader, rec_range)?)?;
|
||||
} else if let Some(rec) = pv.record {
|
||||
let will_init = rec.will_init;
|
||||
reconstruct_data.records.push(rec);
|
||||
if will_init {
|
||||
// This WAL record initializes the page, so no need to go further back
|
||||
cont_lsn = None;
|
||||
need_image = false;
|
||||
break;
|
||||
} else {
|
||||
// This WAL record needs to be applied against an older page image
|
||||
cont_lsn = Some(*entry_lsn);
|
||||
}
|
||||
} else {
|
||||
// No base image, and no WAL record. Huh?
|
||||
@@ -214,16 +247,9 @@ impl Layer for DeltaLayer {
|
||||
}
|
||||
|
||||
// If an older page image is needed to reconstruct the page, let the
|
||||
// caller know about the predecessor layer.
|
||||
if let Some(cont_lsn) = cont_lsn {
|
||||
if let Some(cont_layer) = &self.predecessor {
|
||||
Ok(PageReconstructResult::Continue(
|
||||
cont_lsn,
|
||||
Arc::clone(cont_layer),
|
||||
))
|
||||
} else {
|
||||
Ok(PageReconstructResult::Missing(cont_lsn))
|
||||
}
|
||||
// caller know.
|
||||
if need_image {
|
||||
Ok(PageReconstructResult::Continue(self.start_lsn))
|
||||
} else {
|
||||
Ok(PageReconstructResult::Complete)
|
||||
}
|
||||
@@ -232,21 +258,22 @@ impl Layer for DeltaLayer {
|
||||
/// Get size of the relation at given LSN
|
||||
fn get_seg_size(&self, lsn: Lsn) -> Result<u32> {
|
||||
assert!(lsn >= self.start_lsn);
|
||||
ensure!(
|
||||
self.seg.rel.is_blocky(),
|
||||
"get_seg_size() called on a non-blocky rel"
|
||||
);
|
||||
|
||||
// Scan the BTreeMap backwards, starting from the given entry.
|
||||
let inner = self.load()?;
|
||||
let mut iter = inner.relsizes.range((Included(&Lsn(0)), Included(&lsn)));
|
||||
let slice = inner
|
||||
.relsizes
|
||||
.slice_range((Included(&Lsn(0)), Included(&lsn)));
|
||||
|
||||
let result;
|
||||
if let Some((_entry_lsn, entry)) = iter.next_back() {
|
||||
result = *entry;
|
||||
// Use the base image if needed
|
||||
} else if let Some(predecessor) = &self.predecessor {
|
||||
result = predecessor.get_seg_size(lsn)?;
|
||||
if let Some((_entry_lsn, entry)) = slice.last() {
|
||||
Ok(*entry)
|
||||
} else {
|
||||
result = 0;
|
||||
Err(anyhow::anyhow!("could not find seg size in delta layer"))
|
||||
}
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
/// Does this segment exist at given LSN?
|
||||
@@ -266,15 +293,17 @@ impl Layer for DeltaLayer {
|
||||
///
|
||||
fn unload(&self) -> Result<()> {
|
||||
let mut inner = self.inner.lock().unwrap();
|
||||
inner.page_version_metas = BTreeMap::new();
|
||||
inner.relsizes = BTreeMap::new();
|
||||
inner.page_version_metas = VecMap::default();
|
||||
inner.relsizes = VecMap::default();
|
||||
inner.loaded = false;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn delete(&self) -> Result<()> {
|
||||
// delete underlying file
|
||||
fs::remove_file(self.path())?;
|
||||
if let Some(path) = self.path() {
|
||||
fs::remove_file(path)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -285,28 +314,28 @@ impl Layer for DeltaLayer {
|
||||
/// debugging function to print out the contents of the layer
|
||||
fn dump(&self) -> Result<()> {
|
||||
println!(
|
||||
"----- delta layer for tli {} seg {} {}-{} ----",
|
||||
self.timelineid, self.seg, self.start_lsn, self.end_lsn
|
||||
"----- delta layer for ten {} tli {} seg {} {}-{} ----",
|
||||
self.tenantid, self.timelineid, self.seg, self.start_lsn, self.end_lsn
|
||||
);
|
||||
|
||||
println!("--- relsizes ---");
|
||||
let inner = self.load()?;
|
||||
for (k, v) in inner.relsizes.iter() {
|
||||
for (k, v) in inner.relsizes.as_slice() {
|
||||
println!(" {}: {}", k, v);
|
||||
}
|
||||
println!("--- page versions ---");
|
||||
let (_path, book) = self.open_book()?;
|
||||
let chapter = book.chapter_reader(PAGE_VERSIONS_CHAPTER)?;
|
||||
for (k, v) in inner.page_version_metas.iter() {
|
||||
for ((blk, lsn), blob_range) in inner.page_version_metas.as_slice() {
|
||||
let mut desc = String::new();
|
||||
|
||||
if let Some(page_image_range) = v.page_image_range.as_ref() {
|
||||
let image = read_blob(&chapter, &page_image_range)?;
|
||||
write!(&mut desc, " img {} bytes", image.len())?;
|
||||
let buf = read_blob(&chapter, blob_range)?;
|
||||
let pv = PageVersion::des(&buf)?;
|
||||
|
||||
if let Some(img) = pv.page_image.as_ref() {
|
||||
write!(&mut desc, " img {} bytes", img.len())?;
|
||||
}
|
||||
if let Some(record_range) = v.record_range.as_ref() {
|
||||
let record_bytes = read_blob(&chapter, record_range)?;
|
||||
let rec = WALRecord::des(&record_bytes)?;
|
||||
if let Some(rec) = pv.record.as_ref() {
|
||||
let wal_desc = waldecoder::describe_wal_record(&rec.rec);
|
||||
write!(
|
||||
&mut desc,
|
||||
@@ -316,7 +345,7 @@ impl Layer for DeltaLayer {
|
||||
wal_desc
|
||||
)?;
|
||||
}
|
||||
println!(" blk {} at {}: {}", k.0, k.1, desc);
|
||||
println!(" blk {} at {}: {}", blk, lsn, desc);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@@ -324,20 +353,6 @@ impl Layer for DeltaLayer {
|
||||
}
|
||||
|
||||
impl DeltaLayer {
|
||||
fn path(&self) -> PathBuf {
|
||||
Self::path_for(
|
||||
&self.path_or_conf,
|
||||
self.timelineid,
|
||||
self.tenantid,
|
||||
&DeltaFileName {
|
||||
seg: self.seg,
|
||||
start_lsn: self.start_lsn,
|
||||
end_lsn: self.end_lsn,
|
||||
dropped: self.dropped,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
fn path_for(
|
||||
path_or_conf: &PathOrConf,
|
||||
timelineid: ZTimelineId,
|
||||
@@ -352,8 +367,9 @@ impl DeltaLayer {
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a new delta file, using the given btreemaps containing the page versions and
|
||||
/// relsizes.
|
||||
/// Create a new delta file, using the given page versions and relsizes.
|
||||
/// The page versions are passed by an iterator; the iterator must return
|
||||
/// page versions in blknum+lsn order.
|
||||
///
|
||||
/// This is used to write the in-memory layer to disk. The in-memory layer uses the same
|
||||
/// data structure with two btreemaps as we do, so passing the btreemaps is currently
|
||||
@@ -367,11 +383,14 @@ impl DeltaLayer {
|
||||
start_lsn: Lsn,
|
||||
end_lsn: Lsn,
|
||||
dropped: bool,
|
||||
predecessor: Option<Arc<dyn Layer>>,
|
||||
page_versions: BTreeMap<(u32, Lsn), PageVersion>,
|
||||
relsizes: BTreeMap<Lsn, u32>,
|
||||
page_versions: OrderedBlockIter,
|
||||
relsizes: VecMap<Lsn, u32>,
|
||||
) -> Result<DeltaLayer> {
|
||||
let delta_layer = DeltaLayer {
|
||||
if seg.rel.is_blocky() {
|
||||
assert!(!relsizes.is_empty());
|
||||
}
|
||||
|
||||
let mut delta_layer = DeltaLayer {
|
||||
path_or_conf: PathOrConf::Conf(conf),
|
||||
timelineid,
|
||||
tenantid,
|
||||
@@ -381,68 +400,74 @@ impl DeltaLayer {
|
||||
dropped,
|
||||
inner: Mutex::new(DeltaLayerInner {
|
||||
loaded: true,
|
||||
page_version_metas: BTreeMap::new(),
|
||||
page_version_metas: VecMap::default(),
|
||||
relsizes,
|
||||
}),
|
||||
predecessor,
|
||||
};
|
||||
let mut inner = delta_layer.inner.lock().unwrap();
|
||||
|
||||
// Write the in-memory btreemaps into a file
|
||||
let path = delta_layer.path();
|
||||
let path = delta_layer
|
||||
.path()
|
||||
.expect("DeltaLayer is supposed to have a layer path on disk");
|
||||
|
||||
let inner = delta_layer.inner.get_mut().unwrap();
|
||||
|
||||
// Note: This overwrites any existing file. There shouldn't be any.
|
||||
// FIXME: throw an error instead?
|
||||
let file = File::create(&path)?;
|
||||
let book = BookWriter::new(file, DELTA_FILE_MAGIC)?;
|
||||
let buf_writer = BufWriter::new(file);
|
||||
let book = BookWriter::new(buf_writer, DELTA_FILE_MAGIC)?;
|
||||
|
||||
let mut page_version_writer = BlobWriter::new(book, PAGE_VERSIONS_CHAPTER);
|
||||
|
||||
for (key, page_version) in page_versions {
|
||||
let page_image_range = page_version
|
||||
.page_image
|
||||
.map(|page_image| page_version_writer.write_blob(page_image.as_ref()))
|
||||
.transpose()?;
|
||||
for (blknum, history) in page_versions {
|
||||
inner.page_version_metas.reserve(history.len());
|
||||
|
||||
let record_range = page_version
|
||||
.record
|
||||
.map(|record| {
|
||||
let buf = WALRecord::ser(&record)?;
|
||||
page_version_writer.write_blob(&buf)
|
||||
})
|
||||
.transpose()?;
|
||||
for (lsn, page_version) in history {
|
||||
let buf = PageVersion::ser(page_version)?;
|
||||
let blob_range = page_version_writer.write_blob(&buf)?;
|
||||
|
||||
let old = inner.page_version_metas.insert(
|
||||
key,
|
||||
PageVersionMeta {
|
||||
page_image_range,
|
||||
record_range,
|
||||
},
|
||||
);
|
||||
|
||||
assert!(old.is_none());
|
||||
inner
|
||||
.page_version_metas
|
||||
.append((blknum, *lsn), blob_range)
|
||||
.unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
let book = page_version_writer.close()?;
|
||||
|
||||
// Write out page versions
|
||||
let mut chapter = book.new_chapter(PAGE_VERSION_METAS_CHAPTER);
|
||||
let buf = BTreeMap::ser(&inner.page_version_metas)?;
|
||||
let buf = VecMap::ser(&inner.page_version_metas)?;
|
||||
chapter.write_all(&buf)?;
|
||||
let book = chapter.close()?;
|
||||
|
||||
// and relsizes to separate chapter
|
||||
let mut chapter = book.new_chapter(REL_SIZES_CHAPTER);
|
||||
let buf = BTreeMap::ser(&inner.relsizes)?;
|
||||
let buf = VecMap::ser(&inner.relsizes)?;
|
||||
chapter.write_all(&buf)?;
|
||||
let book = chapter.close()?;
|
||||
|
||||
book.close()?;
|
||||
let mut chapter = book.new_chapter(SUMMARY_CHAPTER);
|
||||
let summary = Summary {
|
||||
tenantid,
|
||||
timelineid,
|
||||
seg,
|
||||
|
||||
start_lsn,
|
||||
end_lsn,
|
||||
|
||||
dropped,
|
||||
};
|
||||
Summary::ser_into(&summary, &mut chapter)?;
|
||||
let book = chapter.close()?;
|
||||
|
||||
// This flushes the underlying 'buf_writer'.
|
||||
let writer = book.close()?;
|
||||
writer.get_ref().sync_all()?;
|
||||
|
||||
trace!("saved {}", &path.display());
|
||||
|
||||
drop(inner);
|
||||
|
||||
Ok(delta_layer)
|
||||
}
|
||||
|
||||
@@ -478,11 +503,36 @@ impl DeltaLayer {
|
||||
|
||||
let (path, book) = self.open_book()?;
|
||||
|
||||
match &self.path_or_conf {
|
||||
PathOrConf::Conf(_) => {
|
||||
let chapter = book.read_chapter(SUMMARY_CHAPTER)?;
|
||||
let actual_summary = Summary::des(&chapter)?;
|
||||
|
||||
let expected_summary = Summary::from(self);
|
||||
|
||||
if actual_summary != expected_summary {
|
||||
bail!("in-file summary does not match expected summary. actual = {:?} expected = {:?}", actual_summary, expected_summary);
|
||||
}
|
||||
}
|
||||
PathOrConf::Path(path) => {
|
||||
let actual_filename = Path::new(path.file_name().unwrap());
|
||||
let expected_filename = self.filename();
|
||||
|
||||
if actual_filename != expected_filename {
|
||||
println!(
|
||||
"warning: filename does not match what is expected from in-file summary"
|
||||
);
|
||||
println!("actual: {:?}", actual_filename);
|
||||
println!("expected: {:?}", expected_filename);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let chapter = book.read_chapter(PAGE_VERSION_METAS_CHAPTER)?;
|
||||
let page_version_metas = BTreeMap::des(&chapter)?;
|
||||
let page_version_metas = VecMap::des(&chapter)?;
|
||||
|
||||
let chapter = book.read_chapter(REL_SIZES_CHAPTER)?;
|
||||
let relsizes = BTreeMap::des(&chapter)?;
|
||||
let relsizes = VecMap::des(&chapter)?;
|
||||
|
||||
debug!("loaded from {}", &path.display());
|
||||
|
||||
@@ -501,7 +551,6 @@ impl DeltaLayer {
|
||||
timelineid: ZTimelineId,
|
||||
tenantid: ZTenantId,
|
||||
filename: &DeltaFileName,
|
||||
predecessor: Option<Arc<dyn Layer>>,
|
||||
) -> DeltaLayer {
|
||||
DeltaLayer {
|
||||
path_or_conf: PathOrConf::Conf(conf),
|
||||
@@ -513,36 +562,32 @@ impl DeltaLayer {
|
||||
dropped: filename.dropped,
|
||||
inner: Mutex::new(DeltaLayerInner {
|
||||
loaded: false,
|
||||
page_version_metas: BTreeMap::new(),
|
||||
relsizes: BTreeMap::new(),
|
||||
page_version_metas: VecMap::default(),
|
||||
relsizes: VecMap::default(),
|
||||
}),
|
||||
predecessor,
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a DeltaLayer struct representing an existing file on disk.
|
||||
///
|
||||
/// This variant is only used for debugging purposes, by the 'dump_layerfile' binary.
|
||||
pub fn new_for_path(
|
||||
path: &Path,
|
||||
timelineid: ZTimelineId,
|
||||
tenantid: ZTenantId,
|
||||
filename: &DeltaFileName,
|
||||
) -> DeltaLayer {
|
||||
DeltaLayer {
|
||||
pub fn new_for_path(path: &Path, book: &Book<File>) -> Result<Self> {
|
||||
let chapter = book.read_chapter(SUMMARY_CHAPTER)?;
|
||||
let summary = Summary::des(&chapter)?;
|
||||
|
||||
Ok(DeltaLayer {
|
||||
path_or_conf: PathOrConf::Path(path.to_path_buf()),
|
||||
timelineid,
|
||||
tenantid,
|
||||
seg: filename.seg,
|
||||
start_lsn: filename.start_lsn,
|
||||
end_lsn: filename.end_lsn,
|
||||
dropped: filename.dropped,
|
||||
timelineid: summary.timelineid,
|
||||
tenantid: summary.tenantid,
|
||||
seg: summary.seg,
|
||||
start_lsn: summary.start_lsn,
|
||||
end_lsn: summary.end_lsn,
|
||||
dropped: summary.dropped,
|
||||
inner: Mutex::new(DeltaLayerInner {
|
||||
loaded: false,
|
||||
page_version_metas: BTreeMap::new(),
|
||||
relsizes: BTreeMap::new(),
|
||||
page_version_metas: VecMap::default(),
|
||||
relsizes: VecMap::default(),
|
||||
}),
|
||||
predecessor: None,
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -290,7 +290,7 @@ pub fn list_files(
|
||||
deltafiles.push(deltafilename);
|
||||
} else if let Some(imgfilename) = ImageFileName::from_str(fname) {
|
||||
imgfiles.push(imgfilename);
|
||||
} else if fname == "wal" || fname == "metadata" || fname == "ancestor" {
|
||||
} else if fname == "metadata" || fname == "ancestor" || fname.ends_with(".old") {
|
||||
// ignore these
|
||||
} else {
|
||||
warn!("unrecognized filename in timeline dir: {}", fname);
|
||||
|
||||
@@ -29,27 +29,53 @@ use crate::layered_repository::LayeredTimeline;
|
||||
use crate::layered_repository::RELISH_SEG_SIZE;
|
||||
use crate::PageServerConf;
|
||||
use crate::{ZTenantId, ZTimelineId};
|
||||
use anyhow::{anyhow, ensure, Result};
|
||||
use anyhow::{anyhow, bail, ensure, Result};
|
||||
use bytes::Bytes;
|
||||
use log::*;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::convert::TryInto;
|
||||
use std::fs;
|
||||
use std::fs::File;
|
||||
use std::io::Write;
|
||||
use std::io::{BufWriter, Write};
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::{Mutex, MutexGuard};
|
||||
|
||||
use bookfile::{Book, BookWriter};
|
||||
|
||||
use zenith_utils::bin_ser::BeSer;
|
||||
use zenith_utils::lsn::Lsn;
|
||||
|
||||
// Magic constant to identify a Zenith segment image file
|
||||
const IMAGE_FILE_MAGIC: u32 = 0x5A616E01 + 1;
|
||||
pub const IMAGE_FILE_MAGIC: u32 = 0x5A616E01 + 1;
|
||||
|
||||
/// Contains each block in block # order
|
||||
const BLOCKY_IMAGES_CHAPTER: u64 = 1;
|
||||
const NONBLOCKY_IMAGE_CHAPTER: u64 = 2;
|
||||
|
||||
/// Contains the [`Summary`] struct
|
||||
const SUMMARY_CHAPTER: u64 = 3;
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq, Eq)]
|
||||
struct Summary {
|
||||
tenantid: ZTenantId,
|
||||
timelineid: ZTimelineId,
|
||||
seg: SegmentTag,
|
||||
|
||||
lsn: Lsn,
|
||||
}
|
||||
|
||||
impl From<&ImageLayer> for Summary {
|
||||
fn from(layer: &ImageLayer) -> Self {
|
||||
Self {
|
||||
tenantid: layer.tenantid,
|
||||
timelineid: layer.timelineid,
|
||||
seg: layer.seg,
|
||||
|
||||
lsn: layer.lsn,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const BLOCK_SIZE: usize = 8192;
|
||||
|
||||
///
|
||||
@@ -97,6 +123,18 @@ impl Layer for ImageLayer {
|
||||
)
|
||||
}
|
||||
|
||||
fn path(&self) -> Option<PathBuf> {
|
||||
Some(Self::path_for(
|
||||
&self.path_or_conf,
|
||||
self.timelineid,
|
||||
self.tenantid,
|
||||
&ImageFileName {
|
||||
seg: self.seg,
|
||||
lsn: self.lsn,
|
||||
},
|
||||
))
|
||||
}
|
||||
|
||||
fn get_timeline_id(&self) -> ZTimelineId {
|
||||
self.timelineid
|
||||
}
|
||||
@@ -114,7 +152,8 @@ impl Layer for ImageLayer {
|
||||
}
|
||||
|
||||
fn get_end_lsn(&self) -> Lsn {
|
||||
self.lsn
|
||||
// End-bound is exclusive
|
||||
self.lsn + 1
|
||||
}
|
||||
|
||||
/// Look up given page in the file
|
||||
@@ -183,7 +222,9 @@ impl Layer for ImageLayer {
|
||||
|
||||
fn delete(&self) -> Result<()> {
|
||||
// delete underlying file
|
||||
fs::remove_file(self.path())?;
|
||||
if let Some(path) = self.path() {
|
||||
fs::remove_file(path)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -194,8 +235,8 @@ impl Layer for ImageLayer {
|
||||
/// debugging function to print out the contents of the layer
|
||||
fn dump(&self) -> Result<()> {
|
||||
println!(
|
||||
"----- image layer for tli {} seg {} at {} ----",
|
||||
self.timelineid, self.seg, self.lsn
|
||||
"----- image layer for ten {} tli {} seg {} at {} ----",
|
||||
self.tenantid, self.timelineid, self.seg, self.lsn
|
||||
);
|
||||
|
||||
let inner = self.load()?;
|
||||
@@ -214,18 +255,6 @@ impl Layer for ImageLayer {
|
||||
}
|
||||
|
||||
impl ImageLayer {
|
||||
fn path(&self) -> PathBuf {
|
||||
Self::path_for(
|
||||
&self.path_or_conf,
|
||||
self.timelineid,
|
||||
self.tenantid,
|
||||
&ImageFileName {
|
||||
seg: self.seg,
|
||||
lsn: self.lsn,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
fn path_for(
|
||||
path_or_conf: &PathOrConf,
|
||||
timelineid: ZTimelineId,
|
||||
@@ -271,12 +300,14 @@ impl ImageLayer {
|
||||
let inner = layer.inner.lock().unwrap();
|
||||
|
||||
// Write the images into a file
|
||||
let path = layer.path();
|
||||
|
||||
let path = layer
|
||||
.path()
|
||||
.expect("ImageLayer is supposed to have a layer path on disk");
|
||||
// Note: This overwrites any existing file. There shouldn't be any.
|
||||
// FIXME: throw an error instead?
|
||||
let file = File::create(&path)?;
|
||||
let book = BookWriter::new(file, IMAGE_FILE_MAGIC)?;
|
||||
let buf_writer = BufWriter::new(file);
|
||||
let book = BookWriter::new(buf_writer, IMAGE_FILE_MAGIC)?;
|
||||
|
||||
let book = match &image_type {
|
||||
ImageType::Blocky { .. } => {
|
||||
@@ -294,7 +325,20 @@ impl ImageLayer {
|
||||
}
|
||||
};
|
||||
|
||||
book.close()?;
|
||||
let mut chapter = book.new_chapter(SUMMARY_CHAPTER);
|
||||
let summary = Summary {
|
||||
tenantid,
|
||||
timelineid,
|
||||
seg,
|
||||
|
||||
lsn,
|
||||
};
|
||||
Summary::ser_into(&summary, &mut chapter)?;
|
||||
let book = chapter.close()?;
|
||||
|
||||
// This flushes the underlying 'buf_writer'.
|
||||
let writer = book.close()?;
|
||||
writer.get_ref().sync_all()?;
|
||||
|
||||
trace!("saved {}", &path.display());
|
||||
|
||||
@@ -354,6 +398,31 @@ impl ImageLayer {
|
||||
|
||||
let (path, book) = self.open_book()?;
|
||||
|
||||
match &self.path_or_conf {
|
||||
PathOrConf::Conf(_) => {
|
||||
let chapter = book.read_chapter(SUMMARY_CHAPTER)?;
|
||||
let actual_summary = Summary::des(&chapter)?;
|
||||
|
||||
let expected_summary = Summary::from(self);
|
||||
|
||||
if actual_summary != expected_summary {
|
||||
bail!("in-file summary does not match expected summary. actual = {:?} expected = {:?}", actual_summary, expected_summary);
|
||||
}
|
||||
}
|
||||
PathOrConf::Path(path) => {
|
||||
let actual_filename = Path::new(path.file_name().unwrap());
|
||||
let expected_filename = self.filename();
|
||||
|
||||
if actual_filename != expected_filename {
|
||||
println!(
|
||||
"warning: filename does not match what is expected from in-file summary"
|
||||
);
|
||||
println!("actual: {:?}", actual_filename);
|
||||
println!("expected: {:?}", expected_filename);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let image_type = if self.seg.rel.is_blocky() {
|
||||
let chapter = book.chapter_reader(BLOCKY_IMAGES_CHAPTER)?;
|
||||
let images_len = chapter.len();
|
||||
@@ -415,22 +484,20 @@ impl ImageLayer {
|
||||
/// Create an ImageLayer struct representing an existing file on disk.
|
||||
///
|
||||
/// This variant is only used for debugging purposes, by the 'dump_layerfile' binary.
|
||||
pub fn new_for_path(
|
||||
path: &Path,
|
||||
timelineid: ZTimelineId,
|
||||
tenantid: ZTenantId,
|
||||
filename: &ImageFileName,
|
||||
) -> ImageLayer {
|
||||
ImageLayer {
|
||||
pub fn new_for_path(path: &Path, book: &Book<File>) -> Result<ImageLayer> {
|
||||
let chapter = book.read_chapter(SUMMARY_CHAPTER)?;
|
||||
let summary = Summary::des(&chapter)?;
|
||||
|
||||
Ok(ImageLayer {
|
||||
path_or_conf: PathOrConf::Path(path.to_path_buf()),
|
||||
timelineid,
|
||||
tenantid,
|
||||
seg: filename.seg,
|
||||
lsn: filename.lsn,
|
||||
timelineid: summary.timelineid,
|
||||
tenantid: summary.tenantid,
|
||||
seg: summary.seg,
|
||||
lsn: summary.lsn,
|
||||
inner: Mutex::new(ImageLayerInner {
|
||||
loaded: false,
|
||||
image_type: ImageType::Blocky { num_blocks: 0 },
|
||||
}),
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,17 +12,19 @@ use crate::layered_repository::{DeltaLayer, ImageLayer};
|
||||
use crate::repository::WALRecord;
|
||||
use crate::PageServerConf;
|
||||
use crate::{ZTenantId, ZTimelineId};
|
||||
use anyhow::{bail, Result};
|
||||
use anyhow::{bail, ensure, Result};
|
||||
use bytes::Bytes;
|
||||
use log::*;
|
||||
use std::cmp::Ordering;
|
||||
use std::collections::BTreeMap;
|
||||
use std::ops::Bound::Included;
|
||||
use std::cmp::min;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::sync::{Arc, RwLock};
|
||||
use zenith_utils::vec_map::VecMap;
|
||||
|
||||
use zenith_utils::accum::Accum;
|
||||
use zenith_utils::lsn::Lsn;
|
||||
|
||||
use super::page_versions::PageVersions;
|
||||
|
||||
pub struct InMemoryLayer {
|
||||
conf: &'static PageServerConf,
|
||||
tenantid: ZTenantId,
|
||||
@@ -31,20 +33,22 @@ pub struct InMemoryLayer {
|
||||
|
||||
///
|
||||
/// This layer contains all the changes from 'start_lsn'. The
|
||||
/// start is inclusive. There is no end LSN; we only use in-memory
|
||||
/// layer at the end of a timeline.
|
||||
/// start is inclusive.
|
||||
///
|
||||
start_lsn: Lsn,
|
||||
|
||||
/// Frozen in-memory layers have an inclusive end LSN.
|
||||
end_lsn: Option<Lsn>,
|
||||
|
||||
/// LSN of the oldest page version stored in this layer
|
||||
oldest_pending_lsn: Lsn,
|
||||
|
||||
/// The above fields never change. The parts that do change are in 'inner',
|
||||
/// and protected by mutex.
|
||||
inner: Mutex<InMemoryLayerInner>,
|
||||
inner: RwLock<InMemoryLayerInner>,
|
||||
|
||||
/// Predecessor layer
|
||||
predecessor: Option<Arc<dyn Layer>>,
|
||||
/// Predecessor layer might be needed?
|
||||
incremental: bool,
|
||||
}
|
||||
|
||||
pub struct InMemoryLayerInner {
|
||||
@@ -55,23 +59,40 @@ pub struct InMemoryLayerInner {
|
||||
/// All versions of all pages in the layer are are kept here.
|
||||
/// Indexed by block number and LSN.
|
||||
///
|
||||
page_versions: BTreeMap<(u32, Lsn), PageVersion>,
|
||||
page_versions: PageVersions,
|
||||
|
||||
///
|
||||
/// `segsizes` tracks the size of the segment at different points in time.
|
||||
///
|
||||
segsizes: BTreeMap<Lsn, u32>,
|
||||
/// For a blocky rel, there is always one entry, at the layer's start_lsn,
|
||||
/// so that determining the size never depends on the predecessor layer. For
|
||||
/// a non-blocky rel, 'segsizes' is not used and is always empty.
|
||||
///
|
||||
segsizes: VecMap<Lsn, u32>,
|
||||
|
||||
/// Writes are only allowed when true.
|
||||
/// Set to false when this layer is in the process of being replaced.
|
||||
writeable: bool,
|
||||
}
|
||||
|
||||
impl InMemoryLayerInner {
|
||||
fn check_writeable(&self) -> WriteResult<()> {
|
||||
if self.writeable {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(NonWriteableError)
|
||||
}
|
||||
}
|
||||
|
||||
fn get_seg_size(&self, lsn: Lsn) -> u32 {
|
||||
// Scan the BTreeMap backwards, starting from the given entry.
|
||||
let mut iter = self.segsizes.range((Included(&Lsn(0)), Included(&lsn)));
|
||||
let slice = self.segsizes.slice_range(..=lsn);
|
||||
|
||||
if let Some((_entry_lsn, entry)) = iter.next_back() {
|
||||
// We make sure there is always at least one entry
|
||||
if let Some((_entry_lsn, entry)) = slice.last() {
|
||||
*entry
|
||||
} else {
|
||||
0
|
||||
panic!("could not find seg size in in-memory layer");
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -80,7 +101,7 @@ impl Layer for InMemoryLayer {
|
||||
// An in-memory layer doesn't really have a filename as it's not stored on disk,
|
||||
// but we construct a filename as if it was a delta layer
|
||||
fn filename(&self) -> PathBuf {
|
||||
let inner = self.inner.lock().unwrap();
|
||||
let inner = self.inner.read().unwrap();
|
||||
|
||||
let end_lsn;
|
||||
let dropped;
|
||||
@@ -103,6 +124,10 @@ impl Layer for InMemoryLayer {
|
||||
PathBuf::from(format!("inmem-{}", delta_filename))
|
||||
}
|
||||
|
||||
fn path(&self) -> Option<PathBuf> {
|
||||
None
|
||||
}
|
||||
|
||||
fn get_timeline_id(&self) -> ZTimelineId {
|
||||
self.timelineid
|
||||
}
|
||||
@@ -116,7 +141,11 @@ impl Layer for InMemoryLayer {
|
||||
}
|
||||
|
||||
fn get_end_lsn(&self) -> Lsn {
|
||||
let inner = self.inner.lock().unwrap();
|
||||
if let Some(end_lsn) = self.end_lsn {
|
||||
return Lsn(end_lsn.0 + 1);
|
||||
}
|
||||
|
||||
let inner = self.inner.read().unwrap();
|
||||
|
||||
if let Some(drop_lsn) = inner.drop_lsn {
|
||||
drop_lsn
|
||||
@@ -126,7 +155,7 @@ impl Layer for InMemoryLayer {
|
||||
}
|
||||
|
||||
fn is_dropped(&self) -> bool {
|
||||
let inner = self.inner.lock().unwrap();
|
||||
let inner = self.inner.read().unwrap();
|
||||
inner.drop_lsn.is_some()
|
||||
}
|
||||
|
||||
@@ -137,53 +166,46 @@ impl Layer for InMemoryLayer {
|
||||
lsn: Lsn,
|
||||
reconstruct_data: &mut PageReconstructData,
|
||||
) -> Result<PageReconstructResult> {
|
||||
let mut cont_lsn: Option<Lsn> = Some(lsn);
|
||||
let mut need_image = true;
|
||||
|
||||
assert!(self.seg.blknum_in_seg(blknum));
|
||||
|
||||
{
|
||||
let inner = self.inner.lock().unwrap();
|
||||
let inner = self.inner.read().unwrap();
|
||||
|
||||
// Scan the BTreeMap backwards, starting from reconstruct_data.lsn.
|
||||
let minkey = (blknum, Lsn(0));
|
||||
let maxkey = (blknum, lsn);
|
||||
let mut iter = inner
|
||||
// Scan the page versions backwards, starting from `lsn`.
|
||||
let iter = inner
|
||||
.page_versions
|
||||
.range((Included(&minkey), Included(&maxkey)));
|
||||
while let Some(((_blknum, entry_lsn), entry)) = iter.next_back() {
|
||||
.get_block_lsn_range(blknum, ..=lsn)
|
||||
.iter()
|
||||
.rev();
|
||||
for (_entry_lsn, entry) in iter {
|
||||
if let Some(img) = &entry.page_image {
|
||||
reconstruct_data.page_img = Some(img.clone());
|
||||
cont_lsn = None;
|
||||
need_image = false;
|
||||
break;
|
||||
} else if let Some(rec) = &entry.record {
|
||||
reconstruct_data.records.push(rec.clone());
|
||||
if rec.will_init {
|
||||
// This WAL record initializes the page, so no need to go further back
|
||||
cont_lsn = None;
|
||||
need_image = false;
|
||||
break;
|
||||
} else {
|
||||
// This WAL record needs to be applied against an older page image
|
||||
cont_lsn = Some(*entry_lsn);
|
||||
}
|
||||
} else {
|
||||
// No base image, and no WAL record. Huh?
|
||||
bail!("no page image or WAL record for requested page");
|
||||
}
|
||||
}
|
||||
|
||||
// release lock on 'inner'
|
||||
}
|
||||
|
||||
// If an older page image is needed to reconstruct the page, let the
|
||||
// caller know about the predecessor layer.
|
||||
if let Some(cont_lsn) = cont_lsn {
|
||||
if let Some(cont_layer) = &self.predecessor {
|
||||
Ok(PageReconstructResult::Continue(
|
||||
cont_lsn,
|
||||
Arc::clone(cont_layer),
|
||||
))
|
||||
// caller know
|
||||
if need_image {
|
||||
if self.incremental {
|
||||
Ok(PageReconstructResult::Continue(Lsn(self.start_lsn.0 - 1)))
|
||||
} else {
|
||||
Ok(PageReconstructResult::Missing(cont_lsn))
|
||||
Ok(PageReconstructResult::Missing(self.start_lsn))
|
||||
}
|
||||
} else {
|
||||
Ok(PageReconstructResult::Complete)
|
||||
@@ -193,14 +215,23 @@ impl Layer for InMemoryLayer {
|
||||
/// Get size of the relation at given LSN
|
||||
fn get_seg_size(&self, lsn: Lsn) -> Result<u32> {
|
||||
assert!(lsn >= self.start_lsn);
|
||||
ensure!(
|
||||
self.seg.rel.is_blocky(),
|
||||
"get_seg_size() called on a non-blocky rel"
|
||||
);
|
||||
|
||||
let inner = self.inner.lock().unwrap();
|
||||
let inner = self.inner.read().unwrap();
|
||||
Ok(inner.get_seg_size(lsn))
|
||||
}
|
||||
|
||||
/// Does this segment exist at given LSN?
|
||||
fn get_seg_exists(&self, lsn: Lsn) -> Result<bool> {
|
||||
let inner = self.inner.lock().unwrap();
|
||||
let inner = self.inner.read().unwrap();
|
||||
|
||||
// If the segment created after requested LSN,
|
||||
// it doesn't exist in the layer. But we shouldn't
|
||||
// have requested it in the first place.
|
||||
assert!(lsn >= self.start_lsn);
|
||||
|
||||
// Is the requested LSN after the segment was dropped?
|
||||
if let Some(drop_lsn) = inner.drop_lsn {
|
||||
@@ -227,12 +258,12 @@ impl Layer for InMemoryLayer {
|
||||
}
|
||||
|
||||
fn is_incremental(&self) -> bool {
|
||||
self.predecessor.is_some()
|
||||
self.incremental
|
||||
}
|
||||
|
||||
/// debugging function to print out the contents of the layer
|
||||
fn dump(&self) -> Result<()> {
|
||||
let inner = self.inner.lock().unwrap();
|
||||
let inner = self.inner.read().unwrap();
|
||||
|
||||
let end_str = inner
|
||||
.drop_lsn
|
||||
@@ -245,29 +276,46 @@ impl Layer for InMemoryLayer {
|
||||
self.timelineid, self.seg, self.start_lsn, end_str
|
||||
);
|
||||
|
||||
for (k, v) in inner.segsizes.iter() {
|
||||
for (k, v) in inner.segsizes.as_slice() {
|
||||
println!("segsizes {}: {}", k, v);
|
||||
}
|
||||
|
||||
for (k, v) in inner.page_versions.iter() {
|
||||
println!(
|
||||
"blk {} at {}: {}/{}\n",
|
||||
k.0,
|
||||
k.1,
|
||||
v.page_image.is_some(),
|
||||
v.record.is_some()
|
||||
);
|
||||
for (blknum, history) in inner.page_versions.ordered_block_iter(None) {
|
||||
for (lsn, pv) in history {
|
||||
println!(
|
||||
"blk {} at {}: {}/{}\n",
|
||||
blknum,
|
||||
lsn,
|
||||
pv.page_image.is_some(),
|
||||
pv.record.is_some()
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
// Type alias to simplify InMemoryLayer::freeze signature
|
||||
//
|
||||
type SuccessorLayers = (Vec<Arc<dyn Layer>>, Option<Arc<InMemoryLayer>>);
|
||||
/// Write failed because the layer is in process of being replaced.
|
||||
/// See [`LayeredTimeline::perform_write_op`] for how to handle this error.
|
||||
#[derive(Debug)]
|
||||
pub struct NonWriteableError;
|
||||
|
||||
pub type WriteResult<T> = std::result::Result<T, NonWriteableError>;
|
||||
|
||||
/// Helper struct to cleanup `InMemoryLayer::freeze` return signature.
|
||||
pub struct FreezeLayers {
|
||||
/// Replacement layer for the layer which freeze was called on.
|
||||
pub frozen: Arc<InMemoryLayer>,
|
||||
/// New open layer containing leftover data.
|
||||
pub open: Option<Arc<InMemoryLayer>>,
|
||||
}
|
||||
|
||||
impl InMemoryLayer {
|
||||
fn assert_not_frozen(&self) {
|
||||
assert!(self.end_lsn.is_none());
|
||||
}
|
||||
|
||||
/// Return the oldest page version that's stored in this layer
|
||||
pub fn get_oldest_pending_lsn(&self) -> Lsn {
|
||||
self.oldest_pending_lsn
|
||||
@@ -291,26 +339,34 @@ impl InMemoryLayer {
|
||||
start_lsn
|
||||
);
|
||||
|
||||
// The segment is initially empty, so initialize 'segsizes' with 0.
|
||||
let mut segsizes = VecMap::default();
|
||||
if seg.rel.is_blocky() {
|
||||
segsizes.append(start_lsn, 0).unwrap();
|
||||
}
|
||||
|
||||
Ok(InMemoryLayer {
|
||||
conf,
|
||||
timelineid,
|
||||
tenantid,
|
||||
seg,
|
||||
start_lsn,
|
||||
end_lsn: None,
|
||||
oldest_pending_lsn,
|
||||
inner: Mutex::new(InMemoryLayerInner {
|
||||
incremental: false,
|
||||
inner: RwLock::new(InMemoryLayerInner {
|
||||
drop_lsn: None,
|
||||
page_versions: BTreeMap::new(),
|
||||
segsizes: BTreeMap::new(),
|
||||
page_versions: PageVersions::default(),
|
||||
segsizes,
|
||||
writeable: true,
|
||||
}),
|
||||
predecessor: None,
|
||||
})
|
||||
}
|
||||
|
||||
// Write operations
|
||||
|
||||
/// Remember new page version, as a WAL record over previous version
|
||||
pub fn put_wal_record(&self, blknum: u32, rec: WALRecord) -> Result<()> {
|
||||
pub fn put_wal_record(&self, blknum: u32, rec: WALRecord) -> WriteResult<u32> {
|
||||
self.put_page_version(
|
||||
blknum,
|
||||
rec.lsn,
|
||||
@@ -322,7 +378,7 @@ impl InMemoryLayer {
|
||||
}
|
||||
|
||||
/// Remember new page version, as a full page image
|
||||
pub fn put_page_image(&self, blknum: u32, lsn: Lsn, img: Bytes) -> Result<()> {
|
||||
pub fn put_page_image(&self, blknum: u32, lsn: Lsn, img: Bytes) -> WriteResult<u32> {
|
||||
self.put_page_version(
|
||||
blknum,
|
||||
lsn,
|
||||
@@ -335,7 +391,8 @@ impl InMemoryLayer {
|
||||
|
||||
/// Common subroutine of the public put_wal_record() and put_page_image() functions.
|
||||
/// Adds the page version to the in-memory tree
|
||||
pub fn put_page_version(&self, blknum: u32, lsn: Lsn, pv: PageVersion) -> Result<()> {
|
||||
pub fn put_page_version(&self, blknum: u32, lsn: Lsn, pv: PageVersion) -> WriteResult<u32> {
|
||||
self.assert_not_frozen();
|
||||
assert!(self.seg.blknum_in_seg(blknum));
|
||||
|
||||
trace!(
|
||||
@@ -345,9 +402,11 @@ impl InMemoryLayer {
|
||||
self.timelineid,
|
||||
lsn
|
||||
);
|
||||
let mut inner = self.inner.lock().unwrap();
|
||||
let mut inner = self.inner.write().unwrap();
|
||||
|
||||
let old = inner.page_versions.insert((blknum, lsn), pv);
|
||||
inner.check_writeable()?;
|
||||
|
||||
let old = inner.page_versions.append_or_update_last(blknum, lsn, pv);
|
||||
|
||||
if old.is_some() {
|
||||
// We already had an entry for this LSN. That's odd..
|
||||
@@ -361,7 +420,7 @@ impl InMemoryLayer {
|
||||
if self.seg.rel.is_blocky() {
|
||||
let newsize = blknum - self.seg.segno * RELISH_SEG_SIZE + 1;
|
||||
|
||||
// use inner get_seg_size, since calling self.get_seg_size will try to acquire self.inner.lock
|
||||
// use inner get_seg_size, since calling self.get_seg_size will try to acquire the lock,
|
||||
// which we've just acquired above
|
||||
let oldsize = inner.get_seg_size(lsn);
|
||||
if newsize > oldsize {
|
||||
@@ -387,11 +446,14 @@ impl InMemoryLayer {
|
||||
page_image: Some(ZERO_PAGE.clone()),
|
||||
record: None,
|
||||
};
|
||||
println!(
|
||||
trace!(
|
||||
"filling gap blk {} with zeros for write of {}",
|
||||
gapblknum, blknum
|
||||
gapblknum,
|
||||
blknum
|
||||
);
|
||||
let old = inner.page_versions.insert((gapblknum, lsn), zeropv);
|
||||
let old = inner
|
||||
.page_versions
|
||||
.append_or_update_last(gapblknum, lsn, zeropv);
|
||||
// We already had an entry for this LSN. That's odd..
|
||||
|
||||
if old.is_some() {
|
||||
@@ -402,17 +464,29 @@ impl InMemoryLayer {
|
||||
}
|
||||
}
|
||||
|
||||
inner.segsizes.insert(lsn, newsize);
|
||||
inner.segsizes.append_or_update_last(lsn, newsize).unwrap();
|
||||
return Ok(newsize - oldsize);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
Ok(0)
|
||||
}
|
||||
|
||||
/// Remember that the relation was truncated at given LSN
|
||||
pub fn put_truncation(&self, lsn: Lsn, segsize: u32) -> anyhow::Result<()> {
|
||||
let mut inner = self.inner.lock().unwrap();
|
||||
let old = inner.segsizes.insert(lsn, segsize);
|
||||
pub fn put_truncation(&self, lsn: Lsn, segsize: u32) -> WriteResult<()> {
|
||||
assert!(
|
||||
self.seg.rel.is_blocky(),
|
||||
"put_truncation() called on a non-blocky rel"
|
||||
);
|
||||
self.assert_not_frozen();
|
||||
|
||||
let mut inner = self.inner.write().unwrap();
|
||||
inner.check_writeable()?;
|
||||
|
||||
// check that this we truncate to a smaller size than segment was before the truncation
|
||||
let oldsize = inner.get_seg_size(lsn);
|
||||
assert!(segsize < oldsize);
|
||||
|
||||
let old = inner.segsizes.append_or_update_last(lsn, segsize).unwrap();
|
||||
|
||||
if old.is_some() {
|
||||
// We already had an entry for this LSN. That's odd..
|
||||
@@ -423,13 +497,18 @@ impl InMemoryLayer {
|
||||
}
|
||||
|
||||
/// Remember that the segment was dropped at given LSN
|
||||
pub fn drop_segment(&self, lsn: Lsn) -> anyhow::Result<()> {
|
||||
let mut inner = self.inner.lock().unwrap();
|
||||
pub fn drop_segment(&self, lsn: Lsn) -> WriteResult<()> {
|
||||
self.assert_not_frozen();
|
||||
|
||||
let mut inner = self.inner.write().unwrap();
|
||||
|
||||
inner.check_writeable()?;
|
||||
|
||||
assert!(inner.drop_lsn.is_none());
|
||||
inner.drop_lsn = Some(lsn);
|
||||
inner.writeable = false;
|
||||
|
||||
info!("dropped segment {} at {}", self.seg, lsn);
|
||||
trace!("dropped segment {} at {}", self.seg, lsn);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -448,6 +527,9 @@ impl InMemoryLayer {
|
||||
) -> Result<InMemoryLayer> {
|
||||
let seg = src.get_seg_tag();
|
||||
|
||||
assert!(oldest_pending_lsn.is_aligned());
|
||||
assert!(oldest_pending_lsn >= start_lsn);
|
||||
|
||||
trace!(
|
||||
"initializing new InMemoryLayer for writing {} on timeline {} at {}",
|
||||
seg,
|
||||
@@ -455,11 +537,11 @@ impl InMemoryLayer {
|
||||
start_lsn,
|
||||
);
|
||||
|
||||
// For convenience, copy the segment size from the predecessor layer
|
||||
let mut segsizes = BTreeMap::new();
|
||||
// Copy the segment size at the start LSN from the predecessor layer.
|
||||
let mut segsizes = VecMap::default();
|
||||
if seg.rel.is_blocky() {
|
||||
let size = src.get_seg_size(start_lsn)?;
|
||||
segsizes.insert(start_lsn, size);
|
||||
segsizes.append(start_lsn, size).unwrap();
|
||||
}
|
||||
|
||||
Ok(InMemoryLayer {
|
||||
@@ -468,101 +550,177 @@ impl InMemoryLayer {
|
||||
tenantid,
|
||||
seg,
|
||||
start_lsn,
|
||||
end_lsn: None,
|
||||
oldest_pending_lsn,
|
||||
inner: Mutex::new(InMemoryLayerInner {
|
||||
incremental: true,
|
||||
inner: RwLock::new(InMemoryLayerInner {
|
||||
drop_lsn: None,
|
||||
page_versions: BTreeMap::new(),
|
||||
page_versions: PageVersions::default(),
|
||||
segsizes,
|
||||
writeable: true,
|
||||
}),
|
||||
predecessor: Some(src),
|
||||
})
|
||||
}
|
||||
|
||||
///
|
||||
/// Write the this in-memory layer to disk.
|
||||
///
|
||||
/// The cutoff point for the layer that's written to disk is 'end_lsn'.
|
||||
///
|
||||
/// Returns new layers that replace this one. Always returns a new image
|
||||
/// layer containing the page versions at the cutoff LSN, that were written
|
||||
/// to disk, and usually also a DeltaLayer that includes all the WAL records
|
||||
/// between start LSN and the cutoff. (The delta layer is not needed when
|
||||
/// a new relish is created with a single LSN, so that the start and end LSN
|
||||
/// are the same.) If there were page versions newer than 'end_lsn', also
|
||||
/// returns a new in-memory layer containing those page versions. The caller
|
||||
/// replaces this layer with the returned layers in the layer map.
|
||||
///
|
||||
pub fn freeze(
|
||||
&self,
|
||||
cutoff_lsn: Lsn,
|
||||
// This is needed just to call materialize_page()
|
||||
timeline: &LayeredTimeline,
|
||||
) -> Result<SuccessorLayers> {
|
||||
pub fn is_writeable(&self) -> bool {
|
||||
let inner = self.inner.read().unwrap();
|
||||
inner.writeable
|
||||
}
|
||||
|
||||
/// Splits `self` into two InMemoryLayers: `frozen` and `open`.
|
||||
/// All data up to and including `cutoff_lsn`
|
||||
/// is copied to `frozen`, while the remaining data is copied to `open`.
|
||||
/// After completion, self is non-writeable, but not frozen.
|
||||
pub fn freeze(self: Arc<Self>, cutoff_lsn: Lsn) -> Result<FreezeLayers> {
|
||||
info!(
|
||||
"freezing in memory layer for {} on timeline {} at {}",
|
||||
self.seg, self.timelineid, cutoff_lsn
|
||||
"freezing in memory layer {} on timeline {} at {} (oldest {})",
|
||||
self.filename().display(),
|
||||
self.timelineid,
|
||||
cutoff_lsn,
|
||||
self.oldest_pending_lsn
|
||||
);
|
||||
|
||||
let inner = self.inner.lock().unwrap();
|
||||
self.assert_not_frozen();
|
||||
|
||||
// Normally, use the cutoff LSN as the end of the frozen layer.
|
||||
// But if the relation was dropped, we know that there are no
|
||||
// more changes coming in for it, and in particular we know that
|
||||
// there are no changes "in flight" for the LSN anymore, so we use
|
||||
// the drop LSN instead. The drop-LSN could be ahead of the
|
||||
// caller-specified LSN!
|
||||
let dropped = inner.drop_lsn.is_some();
|
||||
let end_lsn = if dropped {
|
||||
inner.drop_lsn.unwrap()
|
||||
} else {
|
||||
cutoff_lsn
|
||||
};
|
||||
let self_ref = self.clone();
|
||||
let mut inner = self_ref.inner.write().unwrap();
|
||||
// Dropped layers don't need any special freeze actions,
|
||||
// they are marked as non-writeable at drop and just
|
||||
// written out to disk by checkpointer.
|
||||
if inner.drop_lsn.is_some() {
|
||||
assert!(!inner.writeable);
|
||||
info!(
|
||||
"freezing in memory layer for {} on timeline {} is dropped at {}",
|
||||
self.seg,
|
||||
self.timelineid,
|
||||
inner.drop_lsn.unwrap()
|
||||
);
|
||||
|
||||
// Divide all the page versions into old and new at the 'end_lsn' cutoff point.
|
||||
let mut before_page_versions;
|
||||
let mut before_segsizes;
|
||||
let mut after_page_versions;
|
||||
let mut after_segsizes;
|
||||
if !dropped {
|
||||
before_segsizes = BTreeMap::new();
|
||||
after_segsizes = BTreeMap::new();
|
||||
for (lsn, size) in inner.segsizes.iter() {
|
||||
if *lsn > end_lsn {
|
||||
after_segsizes.insert(*lsn, *size);
|
||||
} else {
|
||||
before_segsizes.insert(*lsn, *size);
|
||||
}
|
||||
}
|
||||
// There should be no newer layer that refers this non-writeable layer,
|
||||
// because layer that is created after dropped one represents a new rel.
|
||||
return Ok(FreezeLayers {
|
||||
frozen: self,
|
||||
open: None,
|
||||
});
|
||||
}
|
||||
assert!(inner.writeable);
|
||||
inner.writeable = false;
|
||||
|
||||
before_page_versions = BTreeMap::new();
|
||||
after_page_versions = BTreeMap::new();
|
||||
for ((blknum, lsn), pv) in inner.page_versions.iter() {
|
||||
match lsn.cmp(&end_lsn) {
|
||||
Ordering::Less => {
|
||||
before_page_versions.insert((*blknum, *lsn), pv.clone());
|
||||
}
|
||||
Ordering::Equal => {
|
||||
// Page versions at the cutoff LSN will be stored in the
|
||||
// materialized image layer.
|
||||
}
|
||||
Ordering::Greater => {
|
||||
after_page_versions.insert((*blknum, *lsn), pv.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
before_page_versions = inner.page_versions.clone();
|
||||
before_segsizes = inner.segsizes.clone();
|
||||
after_segsizes = BTreeMap::new();
|
||||
after_page_versions = BTreeMap::new();
|
||||
// Divide all the page versions into old and new
|
||||
// at the 'cutoff_lsn' point.
|
||||
let mut after_oldest_lsn: Accum<Lsn> = Accum(None);
|
||||
|
||||
let cutoff_lsn_exclusive = Lsn(cutoff_lsn.0 + 1);
|
||||
|
||||
let (before_segsizes, mut after_segsizes) = inner.segsizes.split_at(&cutoff_lsn_exclusive);
|
||||
if let Some((lsn, _size)) = after_segsizes.as_slice().first() {
|
||||
after_oldest_lsn.accum(min, *lsn);
|
||||
}
|
||||
|
||||
// we can release the lock now.
|
||||
drop(inner);
|
||||
let (before_page_versions, after_page_versions) = inner
|
||||
.page_versions
|
||||
.split_at(cutoff_lsn_exclusive, &mut after_oldest_lsn);
|
||||
|
||||
let frozen = Arc::new(InMemoryLayer {
|
||||
conf: self.conf,
|
||||
tenantid: self.tenantid,
|
||||
timelineid: self.timelineid,
|
||||
seg: self.seg,
|
||||
start_lsn: self.start_lsn,
|
||||
end_lsn: Some(cutoff_lsn),
|
||||
oldest_pending_lsn: self.start_lsn,
|
||||
incremental: self.incremental,
|
||||
inner: RwLock::new(InMemoryLayerInner {
|
||||
drop_lsn: inner.drop_lsn,
|
||||
page_versions: before_page_versions,
|
||||
segsizes: before_segsizes,
|
||||
writeable: false,
|
||||
}),
|
||||
});
|
||||
|
||||
let open = if !after_segsizes.is_empty() || !after_page_versions.is_empty() {
|
||||
let mut new_open = Self::create_successor_layer(
|
||||
self.conf,
|
||||
frozen.clone(),
|
||||
self.timelineid,
|
||||
self.tenantid,
|
||||
cutoff_lsn + 1,
|
||||
after_oldest_lsn.0.unwrap(),
|
||||
)?;
|
||||
|
||||
let new_inner = new_open.inner.get_mut().unwrap();
|
||||
// Ensure page_versions doesn't contain anything
|
||||
// so we can just replace it
|
||||
assert!(new_inner.page_versions.is_empty());
|
||||
new_inner.page_versions = after_page_versions;
|
||||
new_inner.segsizes.extend(&mut after_segsizes).unwrap();
|
||||
|
||||
Some(Arc::new(new_open))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
Ok(FreezeLayers { frozen, open })
|
||||
}
|
||||
|
||||
/// Write the this frozen in-memory layer to disk.
|
||||
///
|
||||
/// Returns new layers that replace this one.
|
||||
/// If not dropped, returns a new image layer containing the page versions
|
||||
/// at the `end_lsn`. Can also return a DeltaLayer that includes all the
|
||||
/// WAL records between start and end LSN. (The delta layer is not needed
|
||||
/// when a new relish is created with a single LSN, so that the start and
|
||||
/// end LSN are the same.)
|
||||
pub fn write_to_disk(&self, timeline: &LayeredTimeline) -> Result<Vec<Arc<dyn Layer>>> {
|
||||
trace!(
|
||||
"write_to_disk {} end_lsn is {} get_end_lsn is {}",
|
||||
self.filename().display(),
|
||||
self.end_lsn.unwrap_or(Lsn(0)),
|
||||
self.get_end_lsn()
|
||||
);
|
||||
|
||||
// Grab the lock in read-mode. We hold it over the I/O, but because this
|
||||
// layer is not writeable anymore, no one should be trying to aquire the
|
||||
// write lock on it, so we shouldn't block anyone. There's one exception
|
||||
// though: another thread might have grabbed a reference to this layer
|
||||
// in `get_layer_for_write' just before the checkpointer called
|
||||
// `freeze`, and then `write_to_disk` on it. When the thread gets the
|
||||
// lock, it will see that it's not writeable anymore and retry, but it
|
||||
// would have to wait until we release it. That race condition is very
|
||||
// rare though, so we just accept the potential latency hit for now.
|
||||
let inner = self.inner.read().unwrap();
|
||||
assert!(!inner.writeable);
|
||||
|
||||
if let Some(drop_lsn) = inner.drop_lsn {
|
||||
let delta_layer = DeltaLayer::create(
|
||||
self.conf,
|
||||
self.timelineid,
|
||||
self.tenantid,
|
||||
self.seg,
|
||||
self.start_lsn,
|
||||
drop_lsn,
|
||||
true,
|
||||
inner.page_versions.ordered_block_iter(None),
|
||||
inner.segsizes.clone(),
|
||||
)?;
|
||||
trace!(
|
||||
"freeze: created delta layer for dropped segment {} {}-{}",
|
||||
self.seg,
|
||||
self.start_lsn,
|
||||
drop_lsn
|
||||
);
|
||||
return Ok(vec![Arc::new(delta_layer)]);
|
||||
}
|
||||
|
||||
let end_lsn = self.end_lsn.unwrap();
|
||||
|
||||
let mut before_page_versions = inner.page_versions.ordered_block_iter(Some(end_lsn));
|
||||
|
||||
let mut frozen_layers: Vec<Arc<dyn Layer>> = Vec::new();
|
||||
|
||||
if self.start_lsn != end_lsn {
|
||||
let (before_segsizes, _after_segsizes) = inner.segsizes.split_at(&Lsn(end_lsn.0 + 1));
|
||||
|
||||
// Write the page versions before the cutoff to disk.
|
||||
let delta_layer = DeltaLayer::create(
|
||||
self.conf,
|
||||
@@ -571,13 +729,11 @@ impl InMemoryLayer {
|
||||
self.seg,
|
||||
self.start_lsn,
|
||||
end_lsn,
|
||||
dropped,
|
||||
self.predecessor.clone(),
|
||||
false,
|
||||
before_page_versions,
|
||||
before_segsizes,
|
||||
)?;
|
||||
let delta_layer_rc: Arc<dyn Layer> = Arc::new(delta_layer);
|
||||
frozen_layers.push(delta_layer_rc);
|
||||
frozen_layers.push(Arc::new(delta_layer));
|
||||
trace!(
|
||||
"freeze: created delta layer {} {}-{}",
|
||||
self.seg,
|
||||
@@ -585,38 +741,16 @@ impl InMemoryLayer {
|
||||
end_lsn
|
||||
);
|
||||
} else {
|
||||
assert!(before_page_versions.is_empty());
|
||||
assert!(before_page_versions.next().is_none());
|
||||
}
|
||||
|
||||
let mut new_open_rc = None;
|
||||
if !dropped {
|
||||
// Write a new base image layer at the cutoff point
|
||||
let imgfile = ImageLayer::create_from_src(self.conf, timeline, self, end_lsn)?;
|
||||
let imgfile_rc: Arc<dyn Layer> = Arc::new(imgfile);
|
||||
frozen_layers.push(Arc::clone(&imgfile_rc));
|
||||
trace!("freeze: created image layer {} at {}", self.seg, end_lsn);
|
||||
drop(inner);
|
||||
|
||||
// If there were any page versions newer than the cutoff, initialize a new in-memory
|
||||
// layer to hold them
|
||||
if !after_segsizes.is_empty() || !after_page_versions.is_empty() {
|
||||
let new_open = Self::create_successor_layer(
|
||||
self.conf,
|
||||
imgfile_rc,
|
||||
self.timelineid,
|
||||
self.tenantid,
|
||||
end_lsn,
|
||||
end_lsn,
|
||||
)?;
|
||||
let mut new_inner = new_open.inner.lock().unwrap();
|
||||
new_inner.page_versions.append(&mut after_page_versions);
|
||||
new_inner.segsizes.append(&mut after_segsizes);
|
||||
drop(new_inner);
|
||||
trace!("freeze: created new in-mem layer {} {}-", self.seg, end_lsn);
|
||||
// Write a new base image layer at the cutoff point
|
||||
let image_layer = ImageLayer::create_from_src(self.conf, timeline, self, end_lsn)?;
|
||||
frozen_layers.push(Arc::new(image_layer));
|
||||
trace!("freeze: created image layer {} at {}", self.seg, end_lsn);
|
||||
|
||||
new_open_rc = Some(Arc::new(new_open))
|
||||
}
|
||||
}
|
||||
|
||||
Ok((frozen_layers, new_open_rc))
|
||||
Ok(frozen_layers)
|
||||
}
|
||||
}
|
||||
|
||||
468
pageserver/src/layered_repository/interval_tree.rs
Normal file
468
pageserver/src/layered_repository/interval_tree.rs
Normal file
@@ -0,0 +1,468 @@
|
||||
///
|
||||
/// IntervalTree is data structure for holding intervals. It is generic
|
||||
/// to make unit testing possible, but the only real user of it is the layer map,
|
||||
///
|
||||
/// It's inspired by the "segment tree" or a "statistic tree" as described in
|
||||
/// https://en.wikipedia.org/wiki/Segment_tree. However, we use a B-tree to hold
|
||||
/// the points instead of a binary tree. This is called an "interval tree" instead
|
||||
/// of "segment tree" because the term "segment" is already using Zenith to mean
|
||||
/// something else. To add to the confusion, there is another data structure known
|
||||
/// as "interval tree" out there (see https://en.wikipedia.org/wiki/Interval_tree),
|
||||
/// for storing intervals, but this isn't that.
|
||||
///
|
||||
/// The basic idea is to have a B-tree of "interesting Points". At each Point,
|
||||
/// there is a list of intervals that contain the point. The Points are formed
|
||||
/// from the start bounds of each interval; there is a Point for each distinct
|
||||
/// start bound.
|
||||
///
|
||||
/// Operations:
|
||||
///
|
||||
/// To find intervals that contain a given point, you search the b-tree to find
|
||||
/// the nearest Point <= search key. Then you just return the list of intervals.
|
||||
///
|
||||
/// To insert an interval, find the Point with start key equal to the inserted item.
|
||||
/// If the Point doesn't exist yet, create it, by copying all the items from the
|
||||
/// previous Point that cover the new Point. Then walk right, inserting the new
|
||||
/// interval to all the Points that are contained by the new interval (including the
|
||||
/// newly created Point).
|
||||
///
|
||||
/// To remove an interval, you scan the tree for all the Points that are contained by
|
||||
/// the removed interval, and remove it from the list in each Point.
|
||||
///
|
||||
/// Requirements and assumptions:
|
||||
///
|
||||
/// - Can store overlapping items
|
||||
/// - But there are not many overlapping items
|
||||
/// - The interval bounds don't change after it is added to the tree
|
||||
/// - Intervals are uniquely identified by pointer equality. You must not be insert the
|
||||
/// same interval object twice, and `remove` uses pointer equality to remove the right
|
||||
/// interval. It is OK to have two intervals with the same bounds, however.
|
||||
///
|
||||
use std::collections::BTreeMap;
|
||||
use std::fmt::Debug;
|
||||
use std::ops::Range;
|
||||
use std::sync::Arc;
|
||||
|
||||
pub struct IntervalTree<I: ?Sized>
|
||||
where
|
||||
I: IntervalItem,
|
||||
{
|
||||
points: BTreeMap<I::Key, Point<I>>,
|
||||
}
|
||||
|
||||
struct Point<I: ?Sized> {
|
||||
/// All intervals that contain this point, in no particular order.
|
||||
///
|
||||
/// We assume that there aren't a lot of overlappingg intervals, so that this vector
|
||||
/// never grows very large. If that assumption doesn't hold, we could keep this ordered
|
||||
/// by the end bound, to speed up `search`. But as long as there are only a few elements,
|
||||
/// a linear search is OK.
|
||||
elements: Vec<Arc<I>>,
|
||||
}
|
||||
|
||||
/// Abstraction for an interval that can be stored in the tree
|
||||
///
|
||||
/// The start bound is inclusive and the end bound is exclusive. End must be greater
|
||||
/// than start.
|
||||
pub trait IntervalItem {
|
||||
type Key: Ord + Copy + Debug + Sized;
|
||||
|
||||
fn start_key(&self) -> Self::Key;
|
||||
fn end_key(&self) -> Self::Key;
|
||||
|
||||
fn bounds(&self) -> Range<Self::Key> {
|
||||
self.start_key()..self.end_key()
|
||||
}
|
||||
}
|
||||
|
||||
impl<I: ?Sized> IntervalTree<I>
|
||||
where
|
||||
I: IntervalItem,
|
||||
{
|
||||
/// Return an element that contains 'key', or precedes it.
|
||||
///
|
||||
/// If there are multiple candidates, returns the one with the highest 'end' key.
|
||||
pub fn search(&self, key: I::Key) -> Option<Arc<I>> {
|
||||
// Find the greatest point that precedes or is equal to the search key. If there is
|
||||
// none, returns None.
|
||||
let (_, p) = self.points.range(..=key).next_back()?;
|
||||
|
||||
// Find the element with the highest end key at this point
|
||||
let highest_item = p
|
||||
.elements
|
||||
.iter()
|
||||
.reduce(|a, b| {
|
||||
// starting with Rust 1.53, could use `std::cmp::min_by_key` here
|
||||
if a.end_key() > b.end_key() {
|
||||
a
|
||||
} else {
|
||||
b
|
||||
}
|
||||
})
|
||||
.unwrap();
|
||||
Some(Arc::clone(highest_item))
|
||||
}
|
||||
|
||||
/// Iterate over all items with start bound >= 'key'
|
||||
pub fn iter_newer(&self, key: I::Key) -> IntervalIter<I> {
|
||||
IntervalIter {
|
||||
point_iter: self.points.range(key..),
|
||||
elem_iter: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Iterate over all items
|
||||
pub fn iter(&self) -> IntervalIter<I> {
|
||||
IntervalIter {
|
||||
point_iter: self.points.range(..),
|
||||
elem_iter: None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn insert(&mut self, item: Arc<I>) {
|
||||
let start_key = item.start_key();
|
||||
let end_key = item.end_key();
|
||||
assert!(start_key < end_key);
|
||||
let bounds = start_key..end_key;
|
||||
|
||||
// Find the starting point and walk forward from there
|
||||
let mut found_start_point = false;
|
||||
let iter = self.points.range_mut(bounds);
|
||||
for (point_key, point) in iter {
|
||||
if *point_key == start_key {
|
||||
found_start_point = true;
|
||||
// It is an error to insert the same item to the tree twice.
|
||||
assert!(
|
||||
!point.elements.iter().any(|x| Arc::ptr_eq(x, &item)),
|
||||
"interval is already in the tree"
|
||||
);
|
||||
}
|
||||
point.elements.push(Arc::clone(&item));
|
||||
}
|
||||
if !found_start_point {
|
||||
// Create a new Point for the starting point
|
||||
|
||||
// Look at the previous point, and copy over elements that overlap with this
|
||||
// new point
|
||||
let mut new_elements: Vec<Arc<I>> = Vec::new();
|
||||
if let Some((_, prev_point)) = self.points.range(..start_key).next_back() {
|
||||
let overlapping_prev_elements = prev_point
|
||||
.elements
|
||||
.iter()
|
||||
.filter(|x| x.bounds().contains(&start_key))
|
||||
.cloned();
|
||||
|
||||
new_elements.extend(overlapping_prev_elements);
|
||||
}
|
||||
new_elements.push(item);
|
||||
|
||||
let new_point = Point {
|
||||
elements: new_elements,
|
||||
};
|
||||
self.points.insert(start_key, new_point);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn remove(&mut self, item: &Arc<I>) {
|
||||
// range search points
|
||||
let start_key = item.start_key();
|
||||
let end_key = item.end_key();
|
||||
let bounds = start_key..end_key;
|
||||
|
||||
let mut points_to_remove: Vec<I::Key> = Vec::new();
|
||||
let mut found_start_point = false;
|
||||
for (point_key, point) in self.points.range_mut(bounds) {
|
||||
if *point_key == start_key {
|
||||
found_start_point = true;
|
||||
}
|
||||
let len_before = point.elements.len();
|
||||
point.elements.retain(|other| !Arc::ptr_eq(other, item));
|
||||
let len_after = point.elements.len();
|
||||
assert_eq!(len_after + 1, len_before);
|
||||
if len_after == 0 {
|
||||
points_to_remove.push(*point_key);
|
||||
}
|
||||
}
|
||||
assert!(found_start_point);
|
||||
|
||||
for k in points_to_remove {
|
||||
self.points.remove(&k).unwrap();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct IntervalIter<'a, I: ?Sized>
|
||||
where
|
||||
I: IntervalItem,
|
||||
{
|
||||
point_iter: std::collections::btree_map::Range<'a, I::Key, Point<I>>,
|
||||
elem_iter: Option<(I::Key, std::slice::Iter<'a, Arc<I>>)>,
|
||||
}
|
||||
|
||||
impl<'a, I> Iterator for IntervalIter<'a, I>
|
||||
where
|
||||
I: IntervalItem + ?Sized,
|
||||
{
|
||||
type Item = Arc<I>;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
// Iterate over all elements in all the points in 'point_iter'. To avoid
|
||||
// returning the same element twice, we only return each element at its
|
||||
// starting point.
|
||||
loop {
|
||||
// Return next remaining element from the current point
|
||||
if let Some((point_key, elem_iter)) = &mut self.elem_iter {
|
||||
for elem in elem_iter {
|
||||
if elem.start_key() == *point_key {
|
||||
return Some(Arc::clone(elem));
|
||||
}
|
||||
}
|
||||
}
|
||||
// No more elements at this point. Move to next point.
|
||||
if let Some((point_key, point)) = self.point_iter.next() {
|
||||
self.elem_iter = Some((*point_key, point.elements.iter()));
|
||||
continue;
|
||||
} else {
|
||||
// No more points, all done
|
||||
return None;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<I: ?Sized> Default for IntervalTree<I>
|
||||
where
|
||||
I: IntervalItem,
|
||||
{
|
||||
fn default() -> Self {
|
||||
IntervalTree {
|
||||
points: BTreeMap::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::fmt;
|
||||
|
||||
#[derive(Debug)]
|
||||
struct MockItem {
|
||||
start_key: u32,
|
||||
end_key: u32,
|
||||
val: String,
|
||||
}
|
||||
impl IntervalItem for MockItem {
|
||||
type Key = u32;
|
||||
|
||||
fn start_key(&self) -> u32 {
|
||||
self.start_key
|
||||
}
|
||||
fn end_key(&self) -> u32 {
|
||||
self.end_key
|
||||
}
|
||||
}
|
||||
impl MockItem {
|
||||
fn new(start_key: u32, end_key: u32) -> Self {
|
||||
MockItem {
|
||||
start_key,
|
||||
end_key,
|
||||
val: format!("{}-{}", start_key, end_key),
|
||||
}
|
||||
}
|
||||
fn new_str(start_key: u32, end_key: u32, val: &str) -> Self {
|
||||
MockItem {
|
||||
start_key,
|
||||
end_key,
|
||||
val: format!("{}-{}: {}", start_key, end_key, val),
|
||||
}
|
||||
}
|
||||
}
|
||||
impl fmt::Display for MockItem {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "{}", self.val)
|
||||
}
|
||||
}
|
||||
#[rustfmt::skip]
|
||||
fn assert_search(
|
||||
tree: &IntervalTree<MockItem>,
|
||||
key: u32,
|
||||
expected: &[&str],
|
||||
) -> Option<Arc<MockItem>> {
|
||||
if let Some(v) = tree.search(key) {
|
||||
let vstr = v.to_string();
|
||||
|
||||
assert!(!expected.is_empty(), "search with {} returned {}, expected None", key, v);
|
||||
assert!(
|
||||
expected.contains(&vstr.as_str()),
|
||||
"search with {} returned {}, expected one of: {:?}",
|
||||
key, v, expected,
|
||||
);
|
||||
|
||||
Some(v)
|
||||
} else {
|
||||
assert!(
|
||||
expected.is_empty(),
|
||||
"search with {} returned None, expected one of {:?}",
|
||||
key, expected
|
||||
);
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
fn assert_contents(tree: &IntervalTree<MockItem>, expected: &[&str]) {
|
||||
let mut contents: Vec<String> = tree.iter().map(|e| e.to_string()).collect();
|
||||
contents.sort();
|
||||
assert_eq!(contents, expected);
|
||||
}
|
||||
|
||||
fn dump_tree(tree: &IntervalTree<MockItem>) {
|
||||
for (point_key, point) in tree.points.iter() {
|
||||
print!("{}:", point_key);
|
||||
for e in point.elements.iter() {
|
||||
print!(" {}", e);
|
||||
}
|
||||
println!();
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_interval_tree_simple() {
|
||||
let mut tree: IntervalTree<MockItem> = IntervalTree::default();
|
||||
|
||||
// Simple, non-overlapping ranges.
|
||||
tree.insert(Arc::new(MockItem::new(10, 11)));
|
||||
tree.insert(Arc::new(MockItem::new(11, 12)));
|
||||
tree.insert(Arc::new(MockItem::new(12, 13)));
|
||||
tree.insert(Arc::new(MockItem::new(18, 19)));
|
||||
tree.insert(Arc::new(MockItem::new(17, 18)));
|
||||
tree.insert(Arc::new(MockItem::new(15, 16)));
|
||||
|
||||
assert_search(&tree, 9, &[]);
|
||||
assert_search(&tree, 10, &["10-11"]);
|
||||
assert_search(&tree, 11, &["11-12"]);
|
||||
assert_search(&tree, 12, &["12-13"]);
|
||||
assert_search(&tree, 13, &["12-13"]);
|
||||
assert_search(&tree, 14, &["12-13"]);
|
||||
assert_search(&tree, 15, &["15-16"]);
|
||||
assert_search(&tree, 16, &["15-16"]);
|
||||
assert_search(&tree, 17, &["17-18"]);
|
||||
assert_search(&tree, 18, &["18-19"]);
|
||||
assert_search(&tree, 19, &["18-19"]);
|
||||
assert_search(&tree, 20, &["18-19"]);
|
||||
|
||||
// remove a few entries and search around them again
|
||||
tree.remove(&assert_search(&tree, 10, &["10-11"]).unwrap()); // first entry
|
||||
tree.remove(&assert_search(&tree, 12, &["12-13"]).unwrap()); // entry in the middle
|
||||
tree.remove(&assert_search(&tree, 18, &["18-19"]).unwrap()); // last entry
|
||||
assert_search(&tree, 9, &[]);
|
||||
assert_search(&tree, 10, &[]);
|
||||
assert_search(&tree, 11, &["11-12"]);
|
||||
assert_search(&tree, 12, &["11-12"]);
|
||||
assert_search(&tree, 14, &["11-12"]);
|
||||
assert_search(&tree, 15, &["15-16"]);
|
||||
assert_search(&tree, 17, &["17-18"]);
|
||||
assert_search(&tree, 18, &["17-18"]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_interval_tree_overlap() {
|
||||
let mut tree: IntervalTree<MockItem> = IntervalTree::default();
|
||||
|
||||
// Overlapping items
|
||||
tree.insert(Arc::new(MockItem::new(22, 24)));
|
||||
tree.insert(Arc::new(MockItem::new(23, 25)));
|
||||
let x24_26 = Arc::new(MockItem::new(24, 26));
|
||||
tree.insert(Arc::clone(&x24_26));
|
||||
let x26_28 = Arc::new(MockItem::new(26, 28));
|
||||
tree.insert(Arc::clone(&x26_28));
|
||||
tree.insert(Arc::new(MockItem::new(25, 27)));
|
||||
|
||||
assert_search(&tree, 22, &["22-24"]);
|
||||
assert_search(&tree, 23, &["22-24", "23-25"]);
|
||||
assert_search(&tree, 24, &["23-25", "24-26"]);
|
||||
assert_search(&tree, 25, &["24-26", "25-27"]);
|
||||
assert_search(&tree, 26, &["25-27", "26-28"]);
|
||||
assert_search(&tree, 27, &["26-28"]);
|
||||
assert_search(&tree, 28, &["26-28"]);
|
||||
assert_search(&tree, 29, &["26-28"]);
|
||||
|
||||
tree.remove(&x24_26);
|
||||
tree.remove(&x26_28);
|
||||
assert_search(&tree, 23, &["22-24", "23-25"]);
|
||||
assert_search(&tree, 24, &["23-25"]);
|
||||
assert_search(&tree, 25, &["25-27"]);
|
||||
assert_search(&tree, 26, &["25-27"]);
|
||||
assert_search(&tree, 27, &["25-27"]);
|
||||
assert_search(&tree, 28, &["25-27"]);
|
||||
assert_search(&tree, 29, &["25-27"]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_interval_tree_nested() {
|
||||
let mut tree: IntervalTree<MockItem> = IntervalTree::default();
|
||||
|
||||
// Items containing other items
|
||||
tree.insert(Arc::new(MockItem::new(31, 39)));
|
||||
tree.insert(Arc::new(MockItem::new(32, 34)));
|
||||
tree.insert(Arc::new(MockItem::new(33, 35)));
|
||||
tree.insert(Arc::new(MockItem::new(30, 40)));
|
||||
|
||||
assert_search(&tree, 30, &["30-40"]);
|
||||
assert_search(&tree, 31, &["30-40", "31-39"]);
|
||||
assert_search(&tree, 32, &["30-40", "32-34", "31-39"]);
|
||||
assert_search(&tree, 33, &["30-40", "32-34", "33-35", "31-39"]);
|
||||
assert_search(&tree, 34, &["30-40", "33-35", "31-39"]);
|
||||
assert_search(&tree, 35, &["30-40", "31-39"]);
|
||||
assert_search(&tree, 36, &["30-40", "31-39"]);
|
||||
assert_search(&tree, 37, &["30-40", "31-39"]);
|
||||
assert_search(&tree, 38, &["30-40", "31-39"]);
|
||||
assert_search(&tree, 39, &["30-40"]);
|
||||
assert_search(&tree, 40, &["30-40"]);
|
||||
assert_search(&tree, 41, &["30-40"]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_interval_tree_duplicates() {
|
||||
let mut tree: IntervalTree<MockItem> = IntervalTree::default();
|
||||
|
||||
// Duplicate keys
|
||||
let item_a = Arc::new(MockItem::new_str(55, 56, "a"));
|
||||
tree.insert(Arc::clone(&item_a));
|
||||
let item_b = Arc::new(MockItem::new_str(55, 56, "b"));
|
||||
tree.insert(Arc::clone(&item_b));
|
||||
let item_c = Arc::new(MockItem::new_str(55, 56, "c"));
|
||||
tree.insert(Arc::clone(&item_c));
|
||||
let item_d = Arc::new(MockItem::new_str(54, 56, "d"));
|
||||
tree.insert(Arc::clone(&item_d));
|
||||
let item_e = Arc::new(MockItem::new_str(55, 57, "e"));
|
||||
tree.insert(Arc::clone(&item_e));
|
||||
|
||||
dump_tree(&tree);
|
||||
|
||||
assert_search(
|
||||
&tree,
|
||||
55,
|
||||
&["55-56: a", "55-56: b", "55-56: c", "54-56: d", "55-57: e"],
|
||||
);
|
||||
tree.remove(&item_b);
|
||||
dump_tree(&tree);
|
||||
|
||||
assert_contents(&tree, &["54-56: d", "55-56: a", "55-56: c", "55-57: e"]);
|
||||
|
||||
tree.remove(&item_d);
|
||||
dump_tree(&tree);
|
||||
assert_contents(&tree, &["55-56: a", "55-56: c", "55-57: e"]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn test_interval_tree_insert_twice() {
|
||||
let mut tree: IntervalTree<MockItem> = IntervalTree::default();
|
||||
|
||||
// Inserting the same item twice is not cool
|
||||
let item = Arc::new(MockItem::new(1, 2));
|
||||
tree.insert(Arc::clone(&item));
|
||||
tree.insert(Arc::clone(&item)); // fails assertion
|
||||
}
|
||||
}
|
||||
@@ -9,16 +9,14 @@
|
||||
//! new image and delta layers and corresponding files are written to disk.
|
||||
//!
|
||||
|
||||
use crate::layered_repository::interval_tree::{IntervalItem, IntervalIter, IntervalTree};
|
||||
use crate::layered_repository::storage_layer::{Layer, SegmentTag};
|
||||
use crate::layered_repository::InMemoryLayer;
|
||||
use crate::relish::*;
|
||||
use anyhow::Result;
|
||||
use lazy_static::lazy_static;
|
||||
use log::*;
|
||||
use std::cmp::Ordering;
|
||||
use std::collections::HashSet;
|
||||
use std::collections::{BTreeMap, BinaryHeap, HashMap};
|
||||
use std::ops::Bound::Included;
|
||||
use std::collections::{BinaryHeap, HashMap};
|
||||
use std::sync::Arc;
|
||||
use zenith_metrics::{register_int_gauge, IntGauge};
|
||||
use zenith_utils::lsn::Lsn;
|
||||
@@ -35,68 +33,21 @@ lazy_static! {
|
||||
///
|
||||
/// LayerMap tracks what layers exist on a timeline.
|
||||
///
|
||||
#[derive(Default)]
|
||||
pub struct LayerMap {
|
||||
/// All the layers keyed by segment tag
|
||||
segs: HashMap<SegmentTag, SegEntry>,
|
||||
|
||||
/// All in-memory layers, ordered by 'oldest_pending_lsn' of each layer.
|
||||
/// This allows easy access to the in-memory layer that contains the
|
||||
/// oldest WAL record.
|
||||
open_segs: BinaryHeap<OpenSegEntry>,
|
||||
/// All in-memory layers, ordered by 'oldest_pending_lsn' and generation
|
||||
/// of each layer. This allows easy access to the in-memory layer that
|
||||
/// contains the oldest WAL record.
|
||||
open_layers: BinaryHeap<OpenLayerEntry>,
|
||||
|
||||
/// Generation number, used to distinguish newly inserted entries in the
|
||||
/// binary heap from older entries during checkpoint.
|
||||
current_generation: u64,
|
||||
}
|
||||
|
||||
///
|
||||
/// Per-segment entry in the LayerMap.segs hash map
|
||||
///
|
||||
/// The last layer that is open for writes is always an InMemoryLayer,
|
||||
/// and is kept in a separate field, because there can be only one for
|
||||
/// each segment. The older layers, stored on disk, are kept in a
|
||||
/// BTreeMap keyed by the layer's start LSN.
|
||||
struct SegEntry {
|
||||
pub open: Option<Arc<InMemoryLayer>>,
|
||||
pub historic: BTreeMap<Lsn, Arc<dyn Layer>>,
|
||||
}
|
||||
|
||||
/// Entry held LayerMap.open_segs, with boilerplate comparison
|
||||
/// routines to implement a min-heap ordered by 'oldest_pending_lsn'
|
||||
///
|
||||
/// Each entry also carries a generation number. It can be used to distinguish
|
||||
/// entries with the same 'oldest_pending_lsn'.
|
||||
struct OpenSegEntry {
|
||||
pub oldest_pending_lsn: Lsn,
|
||||
pub layer: Arc<InMemoryLayer>,
|
||||
pub generation: u64,
|
||||
}
|
||||
impl Ord for OpenSegEntry {
|
||||
fn cmp(&self, other: &Self) -> Ordering {
|
||||
// BinaryHeap is a max-heap, and we want a min-heap. Reverse the ordering here
|
||||
// to get that.
|
||||
other.oldest_pending_lsn.cmp(&self.oldest_pending_lsn)
|
||||
}
|
||||
}
|
||||
impl PartialOrd for OpenSegEntry {
|
||||
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
|
||||
// BinaryHeap is a max-heap, and we want a min-heap. Reverse the ordering here
|
||||
// to get that. Entries with identical oldest_pending_lsn are ordered by generation
|
||||
Some(
|
||||
other
|
||||
.oldest_pending_lsn
|
||||
.cmp(&self.oldest_pending_lsn)
|
||||
.then_with(|| other.generation.cmp(&self.generation)),
|
||||
)
|
||||
}
|
||||
}
|
||||
impl PartialEq for OpenSegEntry {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.oldest_pending_lsn.eq(&other.oldest_pending_lsn)
|
||||
}
|
||||
}
|
||||
impl Eq for OpenSegEntry {}
|
||||
|
||||
impl LayerMap {
|
||||
///
|
||||
/// Look up a layer using the given segment tag and LSN. This differs from a
|
||||
@@ -107,23 +58,7 @@ impl LayerMap {
|
||||
pub fn get(&self, tag: &SegmentTag, lsn: Lsn) -> Option<Arc<dyn Layer>> {
|
||||
let segentry = self.segs.get(tag)?;
|
||||
|
||||
if let Some(open) = &segentry.open {
|
||||
if open.get_start_lsn() <= lsn {
|
||||
let x: Arc<dyn Layer> = Arc::clone(open) as _;
|
||||
return Some(x);
|
||||
}
|
||||
}
|
||||
|
||||
if let Some((_k, v)) = segentry
|
||||
.historic
|
||||
.range((Included(Lsn(0)), Included(lsn)))
|
||||
.next_back()
|
||||
{
|
||||
let x: Arc<dyn Layer> = Arc::clone(v) as _;
|
||||
Some(x)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
segentry.get(lsn)
|
||||
}
|
||||
|
||||
///
|
||||
@@ -132,6 +67,7 @@ impl LayerMap {
|
||||
///
|
||||
pub fn get_open(&self, tag: &SegmentTag) -> Option<Arc<InMemoryLayer>> {
|
||||
let segentry = self.segs.get(tag)?;
|
||||
|
||||
segentry.open.as_ref().map(Arc::clone)
|
||||
}
|
||||
|
||||
@@ -139,38 +75,45 @@ impl LayerMap {
|
||||
/// Insert an open in-memory layer
|
||||
///
|
||||
pub fn insert_open(&mut self, layer: Arc<InMemoryLayer>) {
|
||||
let tag = layer.get_seg_tag();
|
||||
let segentry = self.segs.entry(layer.get_seg_tag()).or_default();
|
||||
|
||||
if let Some(segentry) = self.segs.get_mut(&tag) {
|
||||
if let Some(_old) = &segentry.open {
|
||||
// FIXME: shouldn't exist, but check
|
||||
}
|
||||
segentry.open = Some(Arc::clone(&layer));
|
||||
} else {
|
||||
let segentry = SegEntry {
|
||||
open: Some(Arc::clone(&layer)),
|
||||
historic: BTreeMap::new(),
|
||||
};
|
||||
self.segs.insert(tag, segentry);
|
||||
}
|
||||
segentry.update_open(Arc::clone(&layer));
|
||||
|
||||
let opensegentry = OpenSegEntry {
|
||||
let oldest_pending_lsn = layer.get_oldest_pending_lsn();
|
||||
|
||||
// After a crash and restart, 'oldest_pending_lsn' of the oldest in-memory
|
||||
// layer becomes the WAL streaming starting point, so it better not point
|
||||
// in the middle of a WAL record.
|
||||
assert!(oldest_pending_lsn.is_aligned());
|
||||
|
||||
// Also add it to the binary heap
|
||||
let open_layer_entry = OpenLayerEntry {
|
||||
oldest_pending_lsn: layer.get_oldest_pending_lsn(),
|
||||
layer,
|
||||
generation: self.current_generation,
|
||||
};
|
||||
self.open_segs.push(opensegentry);
|
||||
self.open_layers.push(open_layer_entry);
|
||||
|
||||
NUM_INMEMORY_LAYERS.inc();
|
||||
}
|
||||
|
||||
/// Remove the oldest in-memory layer
|
||||
pub fn pop_oldest_open(&mut self) {
|
||||
let opensegentry = self.open_segs.pop().unwrap();
|
||||
let segtag = opensegentry.layer.get_seg_tag();
|
||||
// Pop it from the binary heap
|
||||
let oldest_entry = self.open_layers.pop().unwrap();
|
||||
let segtag = oldest_entry.layer.get_seg_tag();
|
||||
|
||||
// Also remove it from the SegEntry of this segment
|
||||
let mut segentry = self.segs.get_mut(&segtag).unwrap();
|
||||
segentry.open = None;
|
||||
if Arc::ptr_eq(segentry.open.as_ref().unwrap(), &oldest_entry.layer) {
|
||||
segentry.open = None;
|
||||
} else {
|
||||
// We could have already updated segentry.open for
|
||||
// dropped (non-writeable) layer. This is fine.
|
||||
assert!(!oldest_entry.layer.is_writeable());
|
||||
assert!(oldest_entry.layer.is_dropped());
|
||||
}
|
||||
|
||||
NUM_INMEMORY_LAYERS.dec();
|
||||
}
|
||||
|
||||
@@ -178,21 +121,9 @@ impl LayerMap {
|
||||
/// Insert an on-disk layer
|
||||
///
|
||||
pub fn insert_historic(&mut self, layer: Arc<dyn Layer>) {
|
||||
let tag = layer.get_seg_tag();
|
||||
let start_lsn = layer.get_start_lsn();
|
||||
let segentry = self.segs.entry(layer.get_seg_tag()).or_default();
|
||||
segentry.insert_historic(layer);
|
||||
|
||||
if let Some(segentry) = self.segs.get_mut(&tag) {
|
||||
segentry.historic.insert(start_lsn, layer);
|
||||
} else {
|
||||
let mut historic = BTreeMap::new();
|
||||
historic.insert(start_lsn, layer);
|
||||
|
||||
let segentry = SegEntry {
|
||||
open: None,
|
||||
historic,
|
||||
};
|
||||
self.segs.insert(tag, segentry);
|
||||
}
|
||||
NUM_ONDISK_LAYERS.inc();
|
||||
}
|
||||
|
||||
@@ -201,62 +132,40 @@ impl LayerMap {
|
||||
///
|
||||
/// This should be called when the corresponding file on disk has been deleted.
|
||||
///
|
||||
pub fn remove_historic(&mut self, layer: &dyn Layer) {
|
||||
pub fn remove_historic(&mut self, layer: Arc<dyn Layer>) {
|
||||
let tag = layer.get_seg_tag();
|
||||
let start_lsn = layer.get_start_lsn();
|
||||
|
||||
if let Some(segentry) = self.segs.get_mut(&tag) {
|
||||
segentry.historic.remove(&start_lsn);
|
||||
segentry.historic.remove(&layer);
|
||||
}
|
||||
NUM_ONDISK_LAYERS.dec();
|
||||
}
|
||||
|
||||
// List relations that exist at the lsn
|
||||
pub fn list_rels(&self, spcnode: u32, dbnode: u32, lsn: Lsn) -> Result<HashSet<RelTag>> {
|
||||
let mut rels: HashSet<RelTag> = HashSet::new();
|
||||
// List relations along with a flag that marks if they exist at the given lsn.
|
||||
// spcnode 0 and dbnode 0 have special meanings and mean all tabespaces/databases.
|
||||
// Pass Tag if we're only interested in some relations.
|
||||
pub fn list_relishes(&self, tag: Option<RelTag>, lsn: Lsn) -> Result<HashMap<RelishTag, bool>> {
|
||||
let mut rels: HashMap<RelishTag, bool> = HashMap::new();
|
||||
|
||||
for (seg, segentry) in self.segs.iter() {
|
||||
if let RelishTag::Relation(reltag) = seg.rel {
|
||||
if (spcnode == 0 || reltag.spcnode == spcnode)
|
||||
&& (dbnode == 0 || reltag.dbnode == dbnode)
|
||||
{
|
||||
// Add only if it exists at the requested LSN.
|
||||
if let Some(open) = &segentry.open {
|
||||
if open.get_end_lsn() > lsn {
|
||||
rels.insert(reltag);
|
||||
match seg.rel {
|
||||
RelishTag::Relation(reltag) => {
|
||||
if let Some(request_rel) = tag {
|
||||
if (request_rel.spcnode == 0 || reltag.spcnode == request_rel.spcnode)
|
||||
&& (request_rel.dbnode == 0 || reltag.dbnode == request_rel.dbnode)
|
||||
{
|
||||
if let Some(exists) = segentry.exists_at_lsn(lsn)? {
|
||||
rels.insert(seg.rel, exists);
|
||||
}
|
||||
}
|
||||
} else if let Some((_k, _v)) = segentry
|
||||
.historic
|
||||
.range((Included(Lsn(0)), Included(lsn)))
|
||||
.next_back()
|
||||
{
|
||||
rels.insert(reltag);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(rels)
|
||||
}
|
||||
|
||||
// List non-relation relishes that exist at the lsn
|
||||
pub fn list_nonrels(&self, lsn: Lsn) -> Result<HashSet<RelishTag>> {
|
||||
let mut rels: HashSet<RelishTag> = HashSet::new();
|
||||
|
||||
// Scan the timeline directory to get all rels in this timeline.
|
||||
for (seg, segentry) in self.segs.iter() {
|
||||
if let RelishTag::Relation(_) = seg.rel {
|
||||
} else {
|
||||
// Add only if it exists at the requested LSN.
|
||||
if let Some(open) = &segentry.open {
|
||||
if open.get_end_lsn() > lsn {
|
||||
rels.insert(seg.rel);
|
||||
_ => {
|
||||
if tag == None {
|
||||
if let Some(exists) = segentry.exists_at_lsn(lsn)? {
|
||||
rels.insert(seg.rel, exists);
|
||||
}
|
||||
}
|
||||
} else if let Some((_k, _v)) = segentry
|
||||
.historic
|
||||
.range((Included(Lsn(0)), Included(lsn)))
|
||||
.next_back()
|
||||
{
|
||||
rels.insert(seg.rel);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -269,42 +178,31 @@ impl LayerMap {
|
||||
/// be deleted.
|
||||
pub fn newer_image_layer_exists(&self, seg: SegmentTag, lsn: Lsn) -> bool {
|
||||
if let Some(segentry) = self.segs.get(&seg) {
|
||||
// We only check on-disk layers, because
|
||||
// in-memory layers are not durable
|
||||
for (newer_lsn, layer) in segentry
|
||||
.historic
|
||||
.range((Included(lsn), Included(Lsn(u64::MAX))))
|
||||
{
|
||||
// Ignore layers that depend on an older layer.
|
||||
if layer.is_incremental() {
|
||||
continue;
|
||||
}
|
||||
if layer.get_end_lsn() > lsn {
|
||||
trace!(
|
||||
"found later layer for {}, {} {}-{}",
|
||||
seg,
|
||||
lsn,
|
||||
newer_lsn,
|
||||
layer.get_end_lsn()
|
||||
);
|
||||
return true;
|
||||
} else {
|
||||
trace!("found singleton layer for {}, {} {}", seg, lsn, newer_lsn);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
segentry.newer_image_layer_exists(lsn)
|
||||
} else {
|
||||
false
|
||||
}
|
||||
trace!("no later layer found for {}, {}", seg, lsn);
|
||||
false
|
||||
}
|
||||
|
||||
/// Is there any layer for given segment that is alive at the lsn?
|
||||
///
|
||||
/// This is a public wrapper for SegEntry fucntion,
|
||||
/// used for garbage collection, to determine if some alive layer
|
||||
/// exists at the lsn. If so, we shouldn't delete a newer dropped layer
|
||||
/// to avoid incorrectly making it visible.
|
||||
pub fn layer_exists_at_lsn(&self, seg: SegmentTag, lsn: Lsn) -> Result<bool> {
|
||||
Ok(if let Some(segentry) = self.segs.get(&seg) {
|
||||
segentry.exists_at_lsn(lsn)?.unwrap_or(false)
|
||||
} else {
|
||||
false
|
||||
})
|
||||
}
|
||||
|
||||
/// Return the oldest in-memory layer, along with its generation number.
|
||||
pub fn peek_oldest_open(&self) -> Option<(Arc<InMemoryLayer>, u64)> {
|
||||
if let Some(opensegentry) = self.open_segs.peek() {
|
||||
Some((Arc::clone(&opensegentry.layer), opensegentry.generation))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
self.open_layers
|
||||
.peek()
|
||||
.map(|oldest_entry| (Arc::clone(&oldest_entry.layer), oldest_entry.generation))
|
||||
}
|
||||
|
||||
/// Increment the generation number used to stamp open in-memory layers. Layers
|
||||
@@ -317,7 +215,7 @@ impl LayerMap {
|
||||
|
||||
pub fn iter_historic_layers(&self) -> HistoricLayerIter {
|
||||
HistoricLayerIter {
|
||||
segiter: self.segs.iter(),
|
||||
seg_iter: self.segs.iter(),
|
||||
iter: None,
|
||||
}
|
||||
}
|
||||
@@ -331,7 +229,7 @@ impl LayerMap {
|
||||
open.dump()?;
|
||||
}
|
||||
|
||||
for (_, layer) in segentry.historic.iter() {
|
||||
for layer in segentry.historic.iter() {
|
||||
layer.dump()?;
|
||||
}
|
||||
}
|
||||
@@ -340,19 +238,114 @@ impl LayerMap {
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for LayerMap {
|
||||
fn default() -> Self {
|
||||
LayerMap {
|
||||
segs: HashMap::new(),
|
||||
open_segs: BinaryHeap::new(),
|
||||
current_generation: 0,
|
||||
}
|
||||
impl IntervalItem for dyn Layer {
|
||||
type Key = Lsn;
|
||||
|
||||
fn start_key(&self) -> Lsn {
|
||||
self.get_start_lsn()
|
||||
}
|
||||
fn end_key(&self) -> Lsn {
|
||||
self.get_end_lsn()
|
||||
}
|
||||
}
|
||||
|
||||
///
|
||||
/// Per-segment entry in the LayerMap::segs hash map. Holds all the layers
|
||||
/// associated with the segment.
|
||||
///
|
||||
/// The last layer that is open for writes is always an InMemoryLayer,
|
||||
/// and is kept in a separate field, because there can be only one for
|
||||
/// each segment. The older layers, stored on disk, are kept in an
|
||||
/// IntervalTree.
|
||||
#[derive(Default)]
|
||||
struct SegEntry {
|
||||
open: Option<Arc<InMemoryLayer>>,
|
||||
historic: IntervalTree<dyn Layer>,
|
||||
}
|
||||
|
||||
impl SegEntry {
|
||||
/// Does the segment exist at given LSN?
|
||||
/// Return None if object is not found in this SegEntry.
|
||||
fn exists_at_lsn(&self, lsn: Lsn) -> Result<Option<bool>> {
|
||||
if let Some(layer) = self.get(lsn) {
|
||||
Ok(Some(layer.get_seg_exists(lsn)?))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get(&self, lsn: Lsn) -> Option<Arc<dyn Layer>> {
|
||||
if let Some(open) = &self.open {
|
||||
if open.get_start_lsn() <= lsn {
|
||||
let x: Arc<dyn Layer> = Arc::clone(open) as _;
|
||||
return Some(x);
|
||||
}
|
||||
}
|
||||
|
||||
self.historic.search(lsn)
|
||||
}
|
||||
|
||||
pub fn newer_image_layer_exists(&self, lsn: Lsn) -> bool {
|
||||
// We only check on-disk layers, because
|
||||
// in-memory layers are not durable
|
||||
|
||||
self.historic
|
||||
.iter_newer(lsn)
|
||||
.any(|layer| !layer.is_incremental())
|
||||
}
|
||||
|
||||
// Set new open layer for a SegEntry.
|
||||
// It's ok to rewrite previous open layer,
|
||||
// but only if it is not writeable anymore.
|
||||
pub fn update_open(&mut self, layer: Arc<InMemoryLayer>) {
|
||||
if let Some(prev_open) = &self.open {
|
||||
assert!(!prev_open.is_writeable());
|
||||
}
|
||||
self.open = Some(layer);
|
||||
}
|
||||
|
||||
pub fn insert_historic(&mut self, layer: Arc<dyn Layer>) {
|
||||
self.historic.insert(layer);
|
||||
}
|
||||
}
|
||||
|
||||
/// Entry held in LayerMap::open_layers, with boilerplate comparison routines
|
||||
/// to implement a min-heap ordered by 'oldest_pending_lsn' and 'generation'
|
||||
///
|
||||
/// The generation number associated with each entry can be used to distinguish
|
||||
/// recently-added entries (i.e after last call to increment_generation()) from older
|
||||
/// entries with the same 'oldest_pending_lsn'.
|
||||
struct OpenLayerEntry {
|
||||
pub oldest_pending_lsn: Lsn, // copy of layer.get_oldest_pending_lsn()
|
||||
pub generation: u64,
|
||||
pub layer: Arc<InMemoryLayer>,
|
||||
}
|
||||
impl Ord for OpenLayerEntry {
|
||||
fn cmp(&self, other: &Self) -> Ordering {
|
||||
// BinaryHeap is a max-heap, and we want a min-heap. Reverse the ordering here
|
||||
// to get that. Entries with identical oldest_pending_lsn are ordered by generation
|
||||
other
|
||||
.oldest_pending_lsn
|
||||
.cmp(&self.oldest_pending_lsn)
|
||||
.then_with(|| other.generation.cmp(&self.generation))
|
||||
}
|
||||
}
|
||||
impl PartialOrd for OpenLayerEntry {
|
||||
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
|
||||
Some(self.cmp(other))
|
||||
}
|
||||
}
|
||||
impl PartialEq for OpenLayerEntry {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.cmp(other) == Ordering::Equal
|
||||
}
|
||||
}
|
||||
impl Eq for OpenLayerEntry {}
|
||||
|
||||
/// Iterator returned by LayerMap::iter_historic_layers()
|
||||
pub struct HistoricLayerIter<'a> {
|
||||
segiter: std::collections::hash_map::Iter<'a, SegmentTag, SegEntry>,
|
||||
iter: Option<std::collections::btree_map::Iter<'a, Lsn, Arc<dyn Layer>>>,
|
||||
seg_iter: std::collections::hash_map::Iter<'a, SegmentTag, SegEntry>,
|
||||
iter: Option<IntervalIter<'a, dyn Layer>>,
|
||||
}
|
||||
|
||||
impl<'a> Iterator for HistoricLayerIter<'a> {
|
||||
@@ -362,11 +355,11 @@ impl<'a> Iterator for HistoricLayerIter<'a> {
|
||||
loop {
|
||||
if let Some(x) = &mut self.iter {
|
||||
if let Some(x) = x.next() {
|
||||
return Some(Arc::clone(&*x.1));
|
||||
return Some(Arc::clone(&x));
|
||||
}
|
||||
}
|
||||
if let Some(seg) = self.segiter.next() {
|
||||
self.iter = Some(seg.1.historic.iter());
|
||||
if let Some((_tag, segentry)) = self.seg_iter.next() {
|
||||
self.iter = Some(segentry.historic.iter());
|
||||
continue;
|
||||
} else {
|
||||
return None;
|
||||
@@ -374,3 +367,78 @@ impl<'a> Iterator for HistoricLayerIter<'a> {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::PageServerConf;
|
||||
use std::str::FromStr;
|
||||
use zenith_utils::zid::{ZTenantId, ZTimelineId};
|
||||
|
||||
/// Arbitrary relation tag, for testing.
|
||||
const TESTREL_A: RelishTag = RelishTag::Relation(RelTag {
|
||||
spcnode: 0,
|
||||
dbnode: 111,
|
||||
relnode: 1000,
|
||||
forknum: 0,
|
||||
});
|
||||
|
||||
/// Construct a dummy InMemoryLayer for testing
|
||||
fn dummy_inmem_layer(
|
||||
conf: &'static PageServerConf,
|
||||
segno: u32,
|
||||
start_lsn: Lsn,
|
||||
oldest_pending_lsn: Lsn,
|
||||
) -> Arc<InMemoryLayer> {
|
||||
Arc::new(
|
||||
InMemoryLayer::create(
|
||||
conf,
|
||||
ZTimelineId::from_str("00000000000000000000000000000000").unwrap(),
|
||||
ZTenantId::from_str("00000000000000000000000000000000").unwrap(),
|
||||
SegmentTag {
|
||||
rel: TESTREL_A,
|
||||
segno,
|
||||
},
|
||||
start_lsn,
|
||||
oldest_pending_lsn,
|
||||
)
|
||||
.unwrap(),
|
||||
)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_open_layers() -> Result<()> {
|
||||
let conf = PageServerConf::dummy_conf(PageServerConf::test_repo_dir("dummy_inmem_layer"));
|
||||
let conf = Box::leak(Box::new(conf));
|
||||
|
||||
let mut layers = LayerMap::default();
|
||||
|
||||
let gen1 = layers.increment_generation();
|
||||
layers.insert_open(dummy_inmem_layer(conf, 0, Lsn(0x100), Lsn(0x100)));
|
||||
layers.insert_open(dummy_inmem_layer(conf, 1, Lsn(0x100), Lsn(0x200)));
|
||||
layers.insert_open(dummy_inmem_layer(conf, 2, Lsn(0x100), Lsn(0x120)));
|
||||
layers.insert_open(dummy_inmem_layer(conf, 3, Lsn(0x100), Lsn(0x110)));
|
||||
|
||||
let gen2 = layers.increment_generation();
|
||||
layers.insert_open(dummy_inmem_layer(conf, 4, Lsn(0x100), Lsn(0x110)));
|
||||
layers.insert_open(dummy_inmem_layer(conf, 5, Lsn(0x100), Lsn(0x100)));
|
||||
|
||||
// A helper function (closure) to pop the next oldest open entry from the layer map,
|
||||
// and assert that it is what we'd expect
|
||||
let mut assert_pop_layer = |expected_segno: u32, expected_generation: u64| {
|
||||
let (l, generation) = layers.peek_oldest_open().unwrap();
|
||||
assert!(l.get_seg_tag().segno == expected_segno);
|
||||
assert!(generation == expected_generation);
|
||||
layers.pop_oldest_open();
|
||||
};
|
||||
|
||||
assert_pop_layer(0, gen1); // 0x100
|
||||
assert_pop_layer(5, gen2); // 0x100
|
||||
assert_pop_layer(3, gen1); // 0x110
|
||||
assert_pop_layer(4, gen2); // 0x110
|
||||
assert_pop_layer(2, gen1); // 0x120
|
||||
assert_pop_layer(1, gen1); // 0x200
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
172
pageserver/src/layered_repository/page_versions.rs
Normal file
172
pageserver/src/layered_repository/page_versions.rs
Normal file
@@ -0,0 +1,172 @@
|
||||
use std::{collections::HashMap, ops::RangeBounds};
|
||||
|
||||
use zenith_utils::{accum::Accum, lsn::Lsn, vec_map::VecMap};
|
||||
|
||||
use super::storage_layer::PageVersion;
|
||||
|
||||
const EMPTY_SLICE: &[(Lsn, PageVersion)] = &[];
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
pub struct PageVersions(HashMap<u32, VecMap<Lsn, PageVersion>>);
|
||||
|
||||
impl PageVersions {
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.0.is_empty()
|
||||
}
|
||||
|
||||
pub fn append_or_update_last(
|
||||
&mut self,
|
||||
blknum: u32,
|
||||
lsn: Lsn,
|
||||
page_version: PageVersion,
|
||||
) -> Option<PageVersion> {
|
||||
let map = self.0.entry(blknum).or_insert_with(VecMap::default);
|
||||
map.append_or_update_last(lsn, page_version).unwrap()
|
||||
}
|
||||
|
||||
/// Get all [`PageVersion`]s in a block
|
||||
pub fn get_block_slice(&self, blknum: u32) -> &[(Lsn, PageVersion)] {
|
||||
self.0
|
||||
.get(&blknum)
|
||||
.map(VecMap::as_slice)
|
||||
.unwrap_or(EMPTY_SLICE)
|
||||
}
|
||||
|
||||
/// Get a range of [`PageVersions`] in a block
|
||||
pub fn get_block_lsn_range<R: RangeBounds<Lsn>>(
|
||||
&self,
|
||||
blknum: u32,
|
||||
range: R,
|
||||
) -> &[(Lsn, PageVersion)] {
|
||||
self.0
|
||||
.get(&blknum)
|
||||
.map(|vec_map| vec_map.slice_range(range))
|
||||
.unwrap_or(EMPTY_SLICE)
|
||||
}
|
||||
|
||||
/// Split the page version map into two.
|
||||
///
|
||||
/// Left contains everything up to and not including [`cutoff_lsn`].
|
||||
/// Right contains [`cutoff_lsn`] and everything after.
|
||||
pub fn split_at(&self, cutoff_lsn: Lsn, after_oldest_lsn: &mut Accum<Lsn>) -> (Self, Self) {
|
||||
let mut before_blocks = HashMap::new();
|
||||
let mut after_blocks = HashMap::new();
|
||||
|
||||
for (blknum, vec_map) in self.0.iter() {
|
||||
let (before_versions, after_versions) = vec_map.split_at(&cutoff_lsn);
|
||||
|
||||
if !before_versions.is_empty() {
|
||||
let old = before_blocks.insert(*blknum, before_versions);
|
||||
assert!(old.is_none());
|
||||
}
|
||||
|
||||
if !after_versions.is_empty() {
|
||||
let (first_lsn, _first_pv) = &after_versions.as_slice()[0];
|
||||
after_oldest_lsn.accum(std::cmp::min, *first_lsn);
|
||||
|
||||
let old = after_blocks.insert(*blknum, after_versions);
|
||||
assert!(old.is_none());
|
||||
}
|
||||
}
|
||||
|
||||
(Self(before_blocks), Self(after_blocks))
|
||||
}
|
||||
|
||||
/// Iterate through block-history pairs in block order.
|
||||
/// If a [`cutoff_lsn`] is set, only include history with `lsn < cutoff_lsn`
|
||||
pub fn ordered_block_iter(&self, cutoff_lsn: Option<Lsn>) -> OrderedBlockIter<'_> {
|
||||
let mut ordered_blocks: Vec<u32> = self.0.keys().cloned().collect();
|
||||
ordered_blocks.sort_unstable();
|
||||
|
||||
OrderedBlockIter {
|
||||
page_versions: self,
|
||||
ordered_blocks,
|
||||
cur_block_idx: 0,
|
||||
cutoff_lsn,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct OrderedBlockIter<'a> {
|
||||
page_versions: &'a PageVersions,
|
||||
|
||||
ordered_blocks: Vec<u32>,
|
||||
cur_block_idx: usize,
|
||||
|
||||
cutoff_lsn: Option<Lsn>,
|
||||
}
|
||||
|
||||
impl<'a> Iterator for OrderedBlockIter<'a> {
|
||||
type Item = (u32, &'a [(Lsn, PageVersion)]);
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
while self.cur_block_idx < self.ordered_blocks.len() {
|
||||
let blknum = self.ordered_blocks[self.cur_block_idx];
|
||||
self.cur_block_idx += 1;
|
||||
|
||||
if let Some(cutoff_lsn) = self.cutoff_lsn.as_ref() {
|
||||
let slice = self.page_versions.get_block_lsn_range(blknum, ..cutoff_lsn);
|
||||
if !slice.is_empty() {
|
||||
return Some((blknum, slice));
|
||||
}
|
||||
} else {
|
||||
return Some((blknum, self.page_versions.get_block_slice(blknum)));
|
||||
}
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
const EMPTY_PAGE_VERSION: PageVersion = PageVersion {
|
||||
page_image: None,
|
||||
record: None,
|
||||
};
|
||||
|
||||
#[test]
|
||||
fn test_ordered_iter() {
|
||||
let mut page_versions = PageVersions::default();
|
||||
const BLOCKS: u32 = 1000;
|
||||
const LSNS: u64 = 50;
|
||||
|
||||
for blknum in 0..BLOCKS {
|
||||
for lsn in 0..LSNS {
|
||||
let old = page_versions.append_or_update_last(blknum, Lsn(lsn), EMPTY_PAGE_VERSION);
|
||||
assert!(old.is_none());
|
||||
}
|
||||
}
|
||||
|
||||
let mut iter = page_versions.ordered_block_iter(None);
|
||||
for blknum in 0..BLOCKS {
|
||||
let (actual_blknum, slice) = iter.next().unwrap();
|
||||
assert_eq!(actual_blknum, blknum);
|
||||
assert_eq!(slice.len(), LSNS as usize);
|
||||
for lsn in 0..LSNS {
|
||||
assert_eq!(Lsn(lsn), slice[lsn as usize].0);
|
||||
}
|
||||
}
|
||||
assert!(iter.next().is_none());
|
||||
assert!(iter.next().is_none()); // should be robust against excessive next() calls
|
||||
|
||||
const CUTOFF_LSN: Lsn = Lsn(30);
|
||||
let mut iter = page_versions.ordered_block_iter(Some(CUTOFF_LSN));
|
||||
for blknum in 0..BLOCKS {
|
||||
let (actual_blknum, slice) = iter.next().unwrap();
|
||||
assert_eq!(actual_blknum, blknum);
|
||||
assert_eq!(slice.len(), CUTOFF_LSN.0 as usize);
|
||||
for lsn in 0..CUTOFF_LSN.0 {
|
||||
assert_eq!(Lsn(lsn), slice[lsn as usize].0);
|
||||
}
|
||||
}
|
||||
assert!(iter.next().is_none());
|
||||
assert!(iter.next().is_none()); // should be robust against excessive next() calls
|
||||
|
||||
let mut iter = page_versions.ordered_block_iter(Some(Lsn(0)));
|
||||
assert!(iter.next().is_none());
|
||||
assert!(iter.next().is_none()); // should be robust against excessive next() calls
|
||||
}
|
||||
}
|
||||
@@ -10,7 +10,6 @@ use bytes::Bytes;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::fmt;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
|
||||
use zenith_utils::lsn::Lsn;
|
||||
|
||||
@@ -21,7 +20,7 @@ pub const RELISH_SEG_SIZE: u32 = 10 * 1024 * 1024 / 8192;
|
||||
/// Each relish stored in the repository is divided into fixed-sized "segments",
|
||||
/// with 10 MB of key-space, or 1280 8k pages each.
|
||||
///
|
||||
#[derive(Debug, PartialEq, Eq, PartialOrd, Hash, Ord, Clone, Copy)]
|
||||
#[derive(Debug, PartialEq, Eq, PartialOrd, Hash, Ord, Clone, Copy, Serialize, Deserialize)]
|
||||
pub struct SegmentTag {
|
||||
pub rel: RelishTag,
|
||||
pub segno: u32,
|
||||
@@ -87,9 +86,9 @@ pub struct PageReconstructData {
|
||||
pub enum PageReconstructResult {
|
||||
/// Got all the data needed to reconstruct the requested page
|
||||
Complete,
|
||||
/// This layer didn't contain all the required data, the caller should collect
|
||||
/// more data from the returned predecessor layer at the returned LSN.
|
||||
Continue(Lsn, Arc<dyn Layer>),
|
||||
/// This layer didn't contain all the required data, the caller should look up
|
||||
/// the predecessor layer at the returned LSN and collect more data from there.
|
||||
Continue(Lsn),
|
||||
/// This layer didn't contain data needed to reconstruct the page version at
|
||||
/// the returned LSN. This is usually considered an error, but might be OK
|
||||
/// in some circumstances.
|
||||
@@ -111,20 +110,23 @@ pub trait Layer: Send + Sync {
|
||||
/// Identify the relish segment
|
||||
fn get_seg_tag(&self) -> SegmentTag;
|
||||
|
||||
/// Inclusive start bound of the LSN range that this layer hold
|
||||
/// Inclusive start bound of the LSN range that this layer holds
|
||||
fn get_start_lsn(&self) -> Lsn;
|
||||
|
||||
/// 'end_lsn' meaning depends on the layer kind:
|
||||
/// - in-memory layer is either unbounded (end_lsn = MAX_LSN) or dropped (end_lsn = drop_lsn)
|
||||
/// - image layer represents snapshot at one LSN, so end_lsn = lsn
|
||||
/// - delta layer has end_lsn
|
||||
/// Exclusive end bound of the LSN range that this layer holds.
|
||||
///
|
||||
/// TODO Is end_lsn always exclusive for all layer kinds?
|
||||
/// - For an open in-memory layer, this is MAX_LSN.
|
||||
/// - For a frozen in-memory layer or a delta layer, this is a valid end bound.
|
||||
/// - An image layer represents snapshot at one LSN, so end_lsn is always the snapshot LSN + 1
|
||||
fn get_end_lsn(&self) -> Lsn;
|
||||
|
||||
/// Is the segment represented by this layer dropped by PostgreSQL?
|
||||
fn is_dropped(&self) -> bool;
|
||||
|
||||
/// Gets the physical location of the layer on disk.
|
||||
/// Some layers, such as in-memory, might not have the location.
|
||||
fn path(&self) -> Option<PathBuf>;
|
||||
|
||||
/// Filename used to store this layer on disk. (Even in-memory layers
|
||||
/// implement this, to print a handy unique identifier for the layer for
|
||||
/// log messages, even though they're never not on disk.)
|
||||
@@ -142,8 +144,8 @@ pub trait Layer: Send + Sync {
|
||||
///
|
||||
/// See PageReconstructResult for possible return values. The collected data
|
||||
/// is appended to reconstruct_data; the caller should pass an empty struct
|
||||
/// on first call. If this returns PageReconstructResult::Continue, call
|
||||
/// again on the returned predecessor layer with the same 'reconstruct_data'
|
||||
/// on first call. If this returns PageReconstructResult::Continue, look up
|
||||
/// the predecessor layer and call again with the same 'reconstruct_data'
|
||||
/// to collect more data.
|
||||
fn get_page_reconstruct_data(
|
||||
&self,
|
||||
|
||||
@@ -11,9 +11,9 @@ pub mod basebackup;
|
||||
pub mod branches;
|
||||
pub mod http;
|
||||
pub mod layered_repository;
|
||||
pub mod logger;
|
||||
pub mod page_service;
|
||||
pub mod relish;
|
||||
pub mod relish_storage;
|
||||
pub mod repository;
|
||||
pub mod restore_local_repo;
|
||||
pub mod tenant_mgr;
|
||||
@@ -21,6 +21,28 @@ pub mod waldecoder;
|
||||
pub mod walreceiver;
|
||||
pub mod walredo;
|
||||
|
||||
pub mod defaults {
|
||||
use const_format::formatcp;
|
||||
use std::time::Duration;
|
||||
|
||||
pub const DEFAULT_PG_LISTEN_PORT: u16 = 64000;
|
||||
pub const DEFAULT_PG_LISTEN_ADDR: &str = formatcp!("127.0.0.1:{DEFAULT_PG_LISTEN_PORT}");
|
||||
pub const DEFAULT_HTTP_LISTEN_PORT: u16 = 9898;
|
||||
pub const DEFAULT_HTTP_LISTEN_ADDR: &str = formatcp!("127.0.0.1:{DEFAULT_HTTP_LISTEN_PORT}");
|
||||
|
||||
// FIXME: This current value is very low. I would imagine something like 1 GB or 10 GB
|
||||
// would be more appropriate. But a low value forces the code to be exercised more,
|
||||
// which is good for now to trigger bugs.
|
||||
pub const DEFAULT_CHECKPOINT_DISTANCE: u64 = 256 * 1024 * 1024;
|
||||
pub const DEFAULT_CHECKPOINT_PERIOD: Duration = Duration::from_secs(1);
|
||||
|
||||
pub const DEFAULT_GC_HORIZON: u64 = 64 * 1024 * 1024;
|
||||
pub const DEFAULT_GC_PERIOD: Duration = Duration::from_secs(100);
|
||||
|
||||
pub const DEFAULT_SUPERUSER: &str = "zenith_admin";
|
||||
pub const DEFAULT_RELISH_STORAGE_MAX_CONCURRENT_SYNC_LIMITS: usize = 100;
|
||||
}
|
||||
|
||||
lazy_static! {
|
||||
static ref LIVE_CONNECTIONS_COUNT: IntGaugeVec = register_int_gauge_vec!(
|
||||
"pageserver_live_connections_count",
|
||||
@@ -30,11 +52,19 @@ lazy_static! {
|
||||
.expect("failed to define a metric");
|
||||
}
|
||||
|
||||
pub const LOG_FILE_NAME: &str = "pageserver.log";
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct PageServerConf {
|
||||
pub daemonize: bool,
|
||||
pub listen_addr: String,
|
||||
pub http_endpoint_addr: String,
|
||||
pub listen_pg_addr: String,
|
||||
pub listen_http_addr: String,
|
||||
// Flush out an inmemory layer, if it's holding WAL older than this
|
||||
// This puts a backstop on how much WAL needs to be re-digested if the
|
||||
// page server crashes.
|
||||
pub checkpoint_distance: u64,
|
||||
pub checkpoint_period: Duration,
|
||||
|
||||
pub gc_horizon: u64,
|
||||
pub gc_period: Duration,
|
||||
pub superuser: String,
|
||||
@@ -52,6 +82,7 @@ pub struct PageServerConf {
|
||||
pub auth_type: AuthType,
|
||||
|
||||
pub auth_validation_public_key_path: Option<PathBuf>,
|
||||
pub relish_storage_config: Option<RelishStorageConfig>,
|
||||
}
|
||||
|
||||
impl PageServerConf {
|
||||
@@ -95,10 +126,6 @@ impl PageServerConf {
|
||||
self.timeline_path(timelineid, tenantid).join("ancestor")
|
||||
}
|
||||
|
||||
fn wal_dir_path(&self, timelineid: &ZTimelineId, tenantid: &ZTenantId) -> PathBuf {
|
||||
self.timeline_path(timelineid, tenantid).join("wal")
|
||||
}
|
||||
|
||||
//
|
||||
// Postgres distribution paths
|
||||
//
|
||||
@@ -110,4 +137,73 @@ impl PageServerConf {
|
||||
pub fn pg_lib_dir(&self) -> PathBuf {
|
||||
self.pg_distrib_dir.join("lib")
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
fn test_repo_dir(test_name: &str) -> PathBuf {
|
||||
PathBuf::from(format!("../tmp_check/test_{}", test_name))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
fn dummy_conf(repo_dir: PathBuf) -> Self {
|
||||
PageServerConf {
|
||||
daemonize: false,
|
||||
checkpoint_distance: defaults::DEFAULT_CHECKPOINT_DISTANCE,
|
||||
checkpoint_period: Duration::from_secs(10),
|
||||
gc_horizon: defaults::DEFAULT_GC_HORIZON,
|
||||
gc_period: Duration::from_secs(10),
|
||||
listen_pg_addr: defaults::DEFAULT_PG_LISTEN_ADDR.to_string(),
|
||||
listen_http_addr: defaults::DEFAULT_HTTP_LISTEN_ADDR.to_string(),
|
||||
superuser: "zenith_admin".to_string(),
|
||||
workdir: repo_dir,
|
||||
pg_distrib_dir: "".into(),
|
||||
auth_type: AuthType::Trust,
|
||||
auth_validation_public_key_path: None,
|
||||
relish_storage_config: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// External relish storage configuration, enough for creating a client for that storage.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct RelishStorageConfig {
|
||||
/// Limits the number of concurrent sync operations between pageserver and relish storage.
|
||||
pub max_concurrent_sync: usize,
|
||||
/// The storage connection configuration.
|
||||
pub storage: RelishStorageKind,
|
||||
}
|
||||
|
||||
/// A kind of a relish storage to connect to, with its connection configuration.
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum RelishStorageKind {
|
||||
/// Storage based on local file system.
|
||||
/// Specify a root folder to place all stored relish data into.
|
||||
LocalFs(PathBuf),
|
||||
/// AWS S3 based storage, storing all relishes into the root
|
||||
/// of the S3 bucket from the config.
|
||||
AwsS3(S3Config),
|
||||
}
|
||||
|
||||
/// AWS S3 bucket coordinates and access credentials to manage the bucket contents (read and write).
|
||||
#[derive(Clone)]
|
||||
pub struct S3Config {
|
||||
/// Name of the bucket to connect to.
|
||||
pub bucket_name: String,
|
||||
/// The region where the bucket is located at.
|
||||
pub bucket_region: String,
|
||||
/// "Login" to use when connecting to bucket.
|
||||
/// Can be empty for cases like AWS k8s IAM
|
||||
/// where we can allow certain pods to connect
|
||||
/// to the bucket directly without any credentials.
|
||||
pub access_key_id: Option<String>,
|
||||
/// "Password" to use when connecting to bucket.
|
||||
pub secret_access_key: Option<String>,
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for S3Config {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("S3Config")
|
||||
.field("bucket_name", &self.bucket_name)
|
||||
.field("bucket_region", &self.bucket_region)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,45 +0,0 @@
|
||||
use crate::PageServerConf;
|
||||
|
||||
use anyhow::{Context, Result};
|
||||
use slog::{Drain, FnValue};
|
||||
use std::fs::{File, OpenOptions};
|
||||
|
||||
pub fn init_logging(
|
||||
_conf: &PageServerConf,
|
||||
log_filename: &str,
|
||||
) -> Result<(slog_scope::GlobalLoggerGuard, File)> {
|
||||
// Don't open the same file for output multiple times;
|
||||
// the different fds could overwrite each other's output.
|
||||
let log_file = OpenOptions::new()
|
||||
.create(true)
|
||||
.append(true)
|
||||
.open(&log_filename)
|
||||
.with_context(|| format!("failed to open {:?}", &log_filename))?;
|
||||
|
||||
let logger_file = log_file.try_clone().unwrap();
|
||||
|
||||
let decorator = slog_term::PlainSyncDecorator::new(logger_file);
|
||||
let drain = slog_term::FullFormat::new(decorator).build();
|
||||
let drain = slog::Filter::new(drain, |record: &slog::Record| {
|
||||
if record.level().is_at_least(slog::Level::Info) {
|
||||
return true;
|
||||
}
|
||||
false
|
||||
});
|
||||
let drain = std::sync::Mutex::new(drain).fuse();
|
||||
let logger = slog::Logger::root(
|
||||
drain,
|
||||
slog::o!(
|
||||
"location" =>
|
||||
FnValue(move |record| {
|
||||
format!("{}, {}:{}",
|
||||
record.module(),
|
||||
record.file(),
|
||||
record.line()
|
||||
)
|
||||
}
|
||||
)
|
||||
),
|
||||
);
|
||||
Ok((slog_scope::set_global_logger(logger), log_file))
|
||||
}
|
||||
@@ -10,7 +10,7 @@
|
||||
// *callmemaybe <zenith timelineid> $url* -- ask pageserver to start walreceiver on $url
|
||||
//
|
||||
|
||||
use anyhow::{anyhow, bail, ensure, Context, Result};
|
||||
use anyhow::{anyhow, bail, ensure, Result};
|
||||
use bytes::{Buf, BufMut, Bytes, BytesMut};
|
||||
use lazy_static::lazy_static;
|
||||
use log::*;
|
||||
@@ -35,72 +35,110 @@ use zenith_utils::zid::{ZTenantId, ZTimelineId};
|
||||
use crate::basebackup;
|
||||
use crate::branches;
|
||||
use crate::relish::*;
|
||||
use crate::repository::Timeline;
|
||||
use crate::tenant_mgr;
|
||||
use crate::walreceiver;
|
||||
use crate::PageServerConf;
|
||||
|
||||
// Wrapped in libpq CopyData
|
||||
enum PagestreamFeMessage {
|
||||
Exists(PagestreamRequest),
|
||||
Nblocks(PagestreamRequest),
|
||||
Read(PagestreamRequest),
|
||||
Exists(PagestreamExistsRequest),
|
||||
Nblocks(PagestreamNblocksRequest),
|
||||
GetPage(PagestreamGetPageRequest),
|
||||
}
|
||||
|
||||
// Wrapped in libpq CopyData
|
||||
enum PagestreamBeMessage {
|
||||
Status(PagestreamStatusResponse),
|
||||
Nblocks(PagestreamStatusResponse),
|
||||
Read(PagestreamReadResponse),
|
||||
Exists(PagestreamExistsResponse),
|
||||
Nblocks(PagestreamNblocksResponse),
|
||||
GetPage(PagestreamGetPageResponse),
|
||||
Error(PagestreamErrorResponse),
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct PagestreamRequest {
|
||||
spcnode: u32,
|
||||
dbnode: u32,
|
||||
relnode: u32,
|
||||
forknum: u8,
|
||||
blkno: u32,
|
||||
struct PagestreamExistsRequest {
|
||||
latest: bool,
|
||||
lsn: Lsn,
|
||||
rel: RelTag,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct PagestreamStatusResponse {
|
||||
ok: bool,
|
||||
struct PagestreamNblocksRequest {
|
||||
latest: bool,
|
||||
lsn: Lsn,
|
||||
rel: RelTag,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct PagestreamGetPageRequest {
|
||||
latest: bool,
|
||||
lsn: Lsn,
|
||||
rel: RelTag,
|
||||
blkno: u32,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct PagestreamExistsResponse {
|
||||
exists: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct PagestreamNblocksResponse {
|
||||
n_blocks: u32,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct PagestreamReadResponse {
|
||||
ok: bool,
|
||||
n_blocks: u32,
|
||||
struct PagestreamGetPageResponse {
|
||||
page: Bytes,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct PagestreamErrorResponse {
|
||||
message: String,
|
||||
}
|
||||
|
||||
impl PagestreamFeMessage {
|
||||
fn parse(mut body: Bytes) -> anyhow::Result<PagestreamFeMessage> {
|
||||
// TODO these gets can fail
|
||||
|
||||
let smgr_tag = body.get_u8();
|
||||
let zreq = PagestreamRequest {
|
||||
spcnode: body.get_u32(),
|
||||
dbnode: body.get_u32(),
|
||||
relnode: body.get_u32(),
|
||||
forknum: body.get_u8(),
|
||||
blkno: body.get_u32(),
|
||||
lsn: Lsn::from(body.get_u64()),
|
||||
};
|
||||
|
||||
// these correspond to the ZenithMessageTag enum in pagestore_client.h
|
||||
//
|
||||
// TODO: consider using protobuf or serde bincode for less error prone
|
||||
// serialization.
|
||||
match smgr_tag {
|
||||
0 => Ok(PagestreamFeMessage::Exists(zreq)),
|
||||
1 => Ok(PagestreamFeMessage::Nblocks(zreq)),
|
||||
2 => Ok(PagestreamFeMessage::Read(zreq)),
|
||||
_ => Err(anyhow!(
|
||||
"unknown smgr message tag: {},'{:?}'",
|
||||
smgr_tag,
|
||||
body
|
||||
)),
|
||||
let msg_tag = body.get_u8();
|
||||
match msg_tag {
|
||||
0 => Ok(PagestreamFeMessage::Exists(PagestreamExistsRequest {
|
||||
latest: body.get_u8() != 0,
|
||||
lsn: Lsn::from(body.get_u64()),
|
||||
rel: RelTag {
|
||||
spcnode: body.get_u32(),
|
||||
dbnode: body.get_u32(),
|
||||
relnode: body.get_u32(),
|
||||
forknum: body.get_u8(),
|
||||
},
|
||||
})),
|
||||
1 => Ok(PagestreamFeMessage::Nblocks(PagestreamNblocksRequest {
|
||||
latest: body.get_u8() != 0,
|
||||
lsn: Lsn::from(body.get_u64()),
|
||||
rel: RelTag {
|
||||
spcnode: body.get_u32(),
|
||||
dbnode: body.get_u32(),
|
||||
relnode: body.get_u32(),
|
||||
forknum: body.get_u8(),
|
||||
},
|
||||
})),
|
||||
2 => Ok(PagestreamFeMessage::GetPage(PagestreamGetPageRequest {
|
||||
latest: body.get_u8() != 0,
|
||||
lsn: Lsn::from(body.get_u64()),
|
||||
rel: RelTag {
|
||||
spcnode: body.get_u32(),
|
||||
dbnode: body.get_u32(),
|
||||
relnode: body.get_u32(),
|
||||
forknum: body.get_u8(),
|
||||
},
|
||||
blkno: body.get_u32(),
|
||||
})),
|
||||
_ => bail!("unknown smgr message tag: {},'{:?}'", msg_tag, body),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -110,24 +148,26 @@ impl PagestreamBeMessage {
|
||||
let mut bytes = BytesMut::new();
|
||||
|
||||
match self {
|
||||
Self::Status(resp) => {
|
||||
Self::Exists(resp) => {
|
||||
bytes.put_u8(100); /* tag from pagestore_client.h */
|
||||
bytes.put_u8(resp.ok as u8);
|
||||
bytes.put_u32(resp.n_blocks);
|
||||
bytes.put_u8(resp.exists as u8);
|
||||
}
|
||||
|
||||
Self::Nblocks(resp) => {
|
||||
bytes.put_u8(101); /* tag from pagestore_client.h */
|
||||
bytes.put_u8(resp.ok as u8);
|
||||
bytes.put_u32(resp.n_blocks);
|
||||
}
|
||||
|
||||
Self::Read(resp) => {
|
||||
Self::GetPage(resp) => {
|
||||
bytes.put_u8(102); /* tag from pagestore_client.h */
|
||||
bytes.put_u8(resp.ok as u8);
|
||||
bytes.put_u32(resp.n_blocks);
|
||||
bytes.put(&resp.page[..]);
|
||||
}
|
||||
|
||||
Self::Error(resp) => {
|
||||
bytes.put_u8(103); /* tag from pagestore_client.h */
|
||||
bytes.put(resp.message.as_bytes());
|
||||
bytes.put_u8(0); // null terminator
|
||||
}
|
||||
}
|
||||
|
||||
bytes.into()
|
||||
@@ -154,7 +194,7 @@ pub fn thread_main(
|
||||
let local_auth = auth.clone();
|
||||
thread::spawn(move || {
|
||||
if let Err(err) = page_service_conn_main(conf, local_auth, socket, auth_type) {
|
||||
error!("error: {}", err);
|
||||
error!("page server thread exiting with error: {:#}", err);
|
||||
}
|
||||
});
|
||||
}
|
||||
@@ -214,14 +254,6 @@ impl PageServerHandler {
|
||||
}
|
||||
}
|
||||
|
||||
fn handle_controlfile(&self, pgb: &mut PostgresBackend) -> io::Result<()> {
|
||||
pgb.write_message_noflush(&SINGLE_COL_ROWDESC)?
|
||||
.write_message_noflush(&BeMessage::ControlFile)?
|
||||
.write_message(&BeMessage::CommandComplete(b"SELECT 1"))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn handle_pagerequests(
|
||||
&self,
|
||||
pgb: &mut PostgresBackend,
|
||||
@@ -229,10 +261,7 @@ impl PageServerHandler {
|
||||
tenantid: ZTenantId,
|
||||
) -> anyhow::Result<()> {
|
||||
// Check that the timeline exists
|
||||
let repository = tenant_mgr::get_repository_for_tenant(&tenantid)?;
|
||||
let timeline = repository
|
||||
.get_timeline(timelineid)
|
||||
.context(format!("error fetching timeline {}", timelineid))?;
|
||||
let timeline = tenant_mgr::get_timeline_for_tenant(tenantid, timelineid)?;
|
||||
|
||||
/* switch client to COPYBOTH */
|
||||
pgb.write_message(&BeMessage::CopyBothResponse)?;
|
||||
@@ -248,88 +277,136 @@ impl PageServerHandler {
|
||||
let zenith_fe_msg = PagestreamFeMessage::parse(copy_data_bytes)?;
|
||||
|
||||
let response = match zenith_fe_msg {
|
||||
PagestreamFeMessage::Exists(req) => {
|
||||
let rel = RelTag {
|
||||
spcnode: req.spcnode,
|
||||
dbnode: req.dbnode,
|
||||
relnode: req.relnode,
|
||||
forknum: req.forknum,
|
||||
};
|
||||
let tag = RelishTag::Relation(rel);
|
||||
|
||||
let exist = SMGR_QUERY_TIME
|
||||
.with_label_values(&["get_rel_exists"])
|
||||
.observe_closure_duration(|| {
|
||||
timeline.get_rel_exists(tag, req.lsn).unwrap_or(false)
|
||||
});
|
||||
|
||||
PagestreamBeMessage::Status(PagestreamStatusResponse {
|
||||
ok: exist,
|
||||
n_blocks: 0,
|
||||
})
|
||||
}
|
||||
PagestreamFeMessage::Nblocks(req) => {
|
||||
let rel = RelTag {
|
||||
spcnode: req.spcnode,
|
||||
dbnode: req.dbnode,
|
||||
relnode: req.relnode,
|
||||
forknum: req.forknum,
|
||||
};
|
||||
let tag = RelishTag::Relation(rel);
|
||||
|
||||
let n_blocks = SMGR_QUERY_TIME
|
||||
.with_label_values(&["get_rel_size"])
|
||||
.observe_closure_duration(|| {
|
||||
// Return 0 if relation is not found.
|
||||
// This is what postgres smgr expects.
|
||||
timeline
|
||||
.get_relish_size(tag, req.lsn)
|
||||
.unwrap_or(Some(0))
|
||||
.unwrap_or(0)
|
||||
});
|
||||
|
||||
PagestreamBeMessage::Nblocks(PagestreamStatusResponse { ok: true, n_blocks })
|
||||
}
|
||||
PagestreamFeMessage::Read(req) => {
|
||||
let rel = RelTag {
|
||||
spcnode: req.spcnode,
|
||||
dbnode: req.dbnode,
|
||||
relnode: req.relnode,
|
||||
forknum: req.forknum,
|
||||
};
|
||||
let tag = RelishTag::Relation(rel);
|
||||
|
||||
let read_response = SMGR_QUERY_TIME
|
||||
.with_label_values(&["get_page_at_lsn"])
|
||||
.observe_closure_duration(|| {
|
||||
match timeline.get_page_at_lsn(tag, req.blkno, req.lsn) {
|
||||
Ok(p) => PagestreamReadResponse {
|
||||
ok: true,
|
||||
n_blocks: 0,
|
||||
page: p,
|
||||
},
|
||||
Err(e) => {
|
||||
const ZERO_PAGE: [u8; 8192] = [0; 8192];
|
||||
error!("get_page_at_lsn: {}", e);
|
||||
PagestreamReadResponse {
|
||||
ok: false,
|
||||
n_blocks: 0,
|
||||
page: Bytes::from_static(&ZERO_PAGE),
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
PagestreamBeMessage::Read(read_response)
|
||||
}
|
||||
PagestreamFeMessage::Exists(req) => SMGR_QUERY_TIME
|
||||
.with_label_values(&["get_rel_exists"])
|
||||
.observe_closure_duration(|| {
|
||||
self.handle_get_rel_exists_request(&*timeline, &req)
|
||||
}),
|
||||
PagestreamFeMessage::Nblocks(req) => SMGR_QUERY_TIME
|
||||
.with_label_values(&["get_rel_size"])
|
||||
.observe_closure_duration(|| self.handle_get_nblocks_request(&*timeline, &req)),
|
||||
PagestreamFeMessage::GetPage(req) => SMGR_QUERY_TIME
|
||||
.with_label_values(&["get_page_at_lsn"])
|
||||
.observe_closure_duration(|| {
|
||||
self.handle_get_page_at_lsn_request(&*timeline, &req)
|
||||
}),
|
||||
};
|
||||
|
||||
let response = response.unwrap_or_else(|e| {
|
||||
// print the all details to the log with {:#}, but for the client the
|
||||
// error message is enough
|
||||
error!("error reading relation or page version: {:#}", e);
|
||||
PagestreamBeMessage::Error(PagestreamErrorResponse {
|
||||
message: e.to_string(),
|
||||
})
|
||||
});
|
||||
|
||||
pgb.write_message(&BeMessage::CopyData(&response.serialize()))?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Helper function to handle the LSN from client request.
|
||||
///
|
||||
/// Each GetPage (and Exists and Nblocks) request includes information about
|
||||
/// which version of the page is being requested. The client can request the
|
||||
/// latest version of the page, or the version that's valid at a particular
|
||||
/// LSN. The primary compute node will always request the latest page
|
||||
/// version, while a standby will request a version at the LSN that it's
|
||||
/// currently caught up to.
|
||||
///
|
||||
/// In either case, if the page server hasn't received the WAL up to the
|
||||
/// requested LSN yet, we will wait for it to arrive. The return value is
|
||||
/// the LSN that should be used to look up the page versions.
|
||||
fn wait_or_get_last_lsn(timeline: &dyn Timeline, lsn: Lsn, latest: bool) -> Result<Lsn> {
|
||||
if latest {
|
||||
// Latest page version was requested. If LSN is given, it is a hint
|
||||
// to the page server that there have been no modifications to the
|
||||
// page after that LSN. If we haven't received WAL up to that point,
|
||||
// wait until it arrives.
|
||||
let last_record_lsn = timeline.get_last_record_lsn();
|
||||
|
||||
// Note: this covers the special case that lsn == Lsn(0). That
|
||||
// special case means "return the latest version whatever it is",
|
||||
// and it's used for bootstrapping purposes, when the page server is
|
||||
// connected directly to the compute node. That is needed because
|
||||
// when you connect to the compute node, to receive the WAL, the
|
||||
// walsender process will do a look up in the pg_authid catalog
|
||||
// table for authentication. That poses a deadlock problem: the
|
||||
// catalog table lookup will send a GetPage request, but the GetPage
|
||||
// request will block in the page server because the recent WAL
|
||||
// hasn't been received yet, and it cannot be received until the
|
||||
// walsender completes the authentication and starts streaming the
|
||||
// WAL.
|
||||
if lsn <= last_record_lsn {
|
||||
Ok(last_record_lsn)
|
||||
} else {
|
||||
timeline.wait_lsn(lsn)?;
|
||||
// Since we waited for 'lsn' to arrive, that is now the last
|
||||
// record LSN. (Or close enough for our purposes; the
|
||||
// last-record LSN can advance immediately after we return
|
||||
// anyway)
|
||||
Ok(lsn)
|
||||
}
|
||||
} else {
|
||||
if lsn == Lsn(0) {
|
||||
bail!("invalid LSN(0) in request");
|
||||
}
|
||||
timeline.wait_lsn(lsn)?;
|
||||
Ok(lsn)
|
||||
}
|
||||
}
|
||||
|
||||
fn handle_get_rel_exists_request(
|
||||
&self,
|
||||
timeline: &dyn Timeline,
|
||||
req: &PagestreamExistsRequest,
|
||||
) -> Result<PagestreamBeMessage> {
|
||||
let tag = RelishTag::Relation(req.rel);
|
||||
let lsn = Self::wait_or_get_last_lsn(timeline, req.lsn, req.latest)?;
|
||||
|
||||
let exists = timeline.get_rel_exists(tag, lsn)?;
|
||||
|
||||
Ok(PagestreamBeMessage::Exists(PagestreamExistsResponse {
|
||||
exists,
|
||||
}))
|
||||
}
|
||||
|
||||
fn handle_get_nblocks_request(
|
||||
&self,
|
||||
timeline: &dyn Timeline,
|
||||
req: &PagestreamNblocksRequest,
|
||||
) -> Result<PagestreamBeMessage> {
|
||||
let tag = RelishTag::Relation(req.rel);
|
||||
let lsn = Self::wait_or_get_last_lsn(timeline, req.lsn, req.latest)?;
|
||||
|
||||
let n_blocks = timeline.get_relish_size(tag, lsn)?;
|
||||
|
||||
// Return 0 if relation is not found.
|
||||
// This is what postgres smgr expects.
|
||||
let n_blocks = n_blocks.unwrap_or(0);
|
||||
|
||||
Ok(PagestreamBeMessage::Nblocks(PagestreamNblocksResponse {
|
||||
n_blocks,
|
||||
}))
|
||||
}
|
||||
|
||||
fn handle_get_page_at_lsn_request(
|
||||
&self,
|
||||
timeline: &dyn Timeline,
|
||||
req: &PagestreamGetPageRequest,
|
||||
) -> Result<PagestreamBeMessage> {
|
||||
let tag = RelishTag::Relation(req.rel);
|
||||
let lsn = Self::wait_or_get_last_lsn(timeline, req.lsn, req.latest)?;
|
||||
|
||||
let page = timeline.get_page_at_lsn(tag, req.blkno, lsn)?;
|
||||
|
||||
Ok(PagestreamBeMessage::GetPage(PagestreamGetPageResponse {
|
||||
page,
|
||||
}))
|
||||
}
|
||||
|
||||
fn handle_basebackup_request(
|
||||
&self,
|
||||
pgb: &mut PostgresBackend,
|
||||
@@ -338,10 +415,8 @@ impl PageServerHandler {
|
||||
tenantid: ZTenantId,
|
||||
) -> anyhow::Result<()> {
|
||||
// check that the timeline exists
|
||||
let repository = tenant_mgr::get_repository_for_tenant(&tenantid)?;
|
||||
let timeline = repository
|
||||
.get_timeline(timelineid)
|
||||
.context(format!("error fetching timeline {}", timelineid))?;
|
||||
let timeline = tenant_mgr::get_timeline_for_tenant(tenantid, timelineid)?;
|
||||
|
||||
/* switch client to COPYOUT */
|
||||
pgb.write_message(&BeMessage::CopyOutResponse)?;
|
||||
info!("sent CopyOut");
|
||||
@@ -349,7 +424,7 @@ impl PageServerHandler {
|
||||
/* Send a tarball of the latest layer on the timeline */
|
||||
{
|
||||
let mut writer = CopyDataSink { pgb };
|
||||
let mut basebackup = basebackup::Basebackup::new(&mut writer, &timeline, lsn);
|
||||
let mut basebackup = basebackup::Basebackup::new(&mut writer, &timeline, lsn)?;
|
||||
basebackup.send_tarball()?;
|
||||
}
|
||||
pgb.write_message(&BeMessage::CopyDone)?;
|
||||
@@ -421,9 +496,7 @@ impl postgres_backend::Handler for PageServerHandler {
|
||||
}
|
||||
let query_string = std::str::from_utf8(&query_string)?;
|
||||
|
||||
if query_string.starts_with("controlfile") {
|
||||
self.handle_controlfile(pgb)?;
|
||||
} else if query_string.starts_with("pagestream ") {
|
||||
if query_string.starts_with("pagestream ") {
|
||||
let (_, params_raw) = query_string.split_at("pagestream ".len());
|
||||
let params = params_raw.split(' ').collect::<Vec<_>>();
|
||||
ensure!(
|
||||
@@ -479,10 +552,7 @@ impl postgres_backend::Handler for PageServerHandler {
|
||||
self.check_permission(Some(tenantid))?;
|
||||
|
||||
// Check that the timeline exists
|
||||
let repository = tenant_mgr::get_repository_for_tenant(&tenantid)?;
|
||||
repository
|
||||
.get_timeline(timelineid)
|
||||
.context(format!("error fetching timeline {}", timelineid))?;
|
||||
tenant_mgr::get_timeline_for_tenant(tenantid, timelineid)?;
|
||||
|
||||
walreceiver::launch_wal_receiver(self.conf, timelineid, &connstr, tenantid.to_owned());
|
||||
|
||||
@@ -577,7 +647,7 @@ impl postgres_backend::Handler for PageServerHandler {
|
||||
.map(|h| h.as_str().parse())
|
||||
.unwrap_or(Ok(self.conf.gc_horizon))?;
|
||||
|
||||
let repo = tenant_mgr::get_repository_for_tenant(&tenantid)?;
|
||||
let repo = tenant_mgr::get_repository_for_tenant(tenantid)?;
|
||||
|
||||
let result = repo.gc_iteration(Some(timelineid), gc_horizon, true)?;
|
||||
|
||||
@@ -586,12 +656,14 @@ impl postgres_backend::Handler for PageServerHandler {
|
||||
RowDescriptor::int8_col(b"layer_relfiles_needed_by_cutoff"),
|
||||
RowDescriptor::int8_col(b"layer_relfiles_needed_by_branches"),
|
||||
RowDescriptor::int8_col(b"layer_relfiles_not_updated"),
|
||||
RowDescriptor::int8_col(b"layer_relfiles_needed_as_tombstone"),
|
||||
RowDescriptor::int8_col(b"layer_relfiles_removed"),
|
||||
RowDescriptor::int8_col(b"layer_relfiles_dropped"),
|
||||
RowDescriptor::int8_col(b"layer_nonrelfiles_total"),
|
||||
RowDescriptor::int8_col(b"layer_nonrelfiles_needed_by_cutoff"),
|
||||
RowDescriptor::int8_col(b"layer_nonrelfiles_needed_by_branches"),
|
||||
RowDescriptor::int8_col(b"layer_nonrelfiles_not_updated"),
|
||||
RowDescriptor::int8_col(b"layer_nonrelfiles_needed_as_tombstone"),
|
||||
RowDescriptor::int8_col(b"layer_nonrelfiles_removed"),
|
||||
RowDescriptor::int8_col(b"layer_nonrelfiles_dropped"),
|
||||
RowDescriptor::int8_col(b"elapsed"),
|
||||
@@ -611,6 +683,12 @@ impl postgres_backend::Handler for PageServerHandler {
|
||||
.as_bytes(),
|
||||
),
|
||||
Some(result.ondisk_relfiles_not_updated.to_string().as_bytes()),
|
||||
Some(
|
||||
result
|
||||
.ondisk_relfiles_needed_as_tombstone
|
||||
.to_string()
|
||||
.as_bytes(),
|
||||
),
|
||||
Some(result.ondisk_relfiles_removed.to_string().as_bytes()),
|
||||
Some(result.ondisk_relfiles_dropped.to_string().as_bytes()),
|
||||
Some(result.ondisk_nonrelfiles_total.to_string().as_bytes()),
|
||||
@@ -627,6 +705,12 @@ impl postgres_backend::Handler for PageServerHandler {
|
||||
.as_bytes(),
|
||||
),
|
||||
Some(result.ondisk_nonrelfiles_not_updated.to_string().as_bytes()),
|
||||
Some(
|
||||
result
|
||||
.ondisk_nonrelfiles_needed_as_tombstone
|
||||
.to_string()
|
||||
.as_bytes(),
|
||||
),
|
||||
Some(result.ondisk_nonrelfiles_removed.to_string().as_bytes()),
|
||||
Some(result.ondisk_nonrelfiles_dropped.to_string().as_bytes()),
|
||||
Some(result.elapsed.as_millis().to_string().as_bytes()),
|
||||
|
||||
83
pageserver/src/relish_storage.rs
Normal file
83
pageserver/src/relish_storage.rs
Normal file
@@ -0,0 +1,83 @@
|
||||
//! Abstractions for the page server to store its relish layer data in the external storage.
|
||||
//!
|
||||
//! Main purpose of this module subtree is to provide a set of abstractions to manage the storage state
|
||||
//! in a way, optimal for page server.
|
||||
//!
|
||||
//! The abstractions hide multiple custom external storage API implementations,
|
||||
//! such as AWS S3, local filesystem, etc., located in the submodules.
|
||||
|
||||
mod local_fs;
|
||||
mod rust_s3;
|
||||
/// A queue-based storage with the background machinery behind it to synchronize
|
||||
/// local page server layer files with external storage.
|
||||
mod synced_storage;
|
||||
|
||||
use std::path::Path;
|
||||
use std::thread;
|
||||
|
||||
use anyhow::Context;
|
||||
|
||||
use self::local_fs::LocalFs;
|
||||
pub use self::synced_storage::schedule_timeline_upload;
|
||||
use crate::relish_storage::rust_s3::RustS3;
|
||||
use crate::{PageServerConf, RelishStorageKind};
|
||||
|
||||
pub fn run_storage_sync_thread(
|
||||
config: &'static PageServerConf,
|
||||
) -> anyhow::Result<Option<thread::JoinHandle<anyhow::Result<()>>>> {
|
||||
match &config.relish_storage_config {
|
||||
Some(relish_storage_config) => {
|
||||
let max_concurrent_sync = relish_storage_config.max_concurrent_sync;
|
||||
match &relish_storage_config.storage {
|
||||
RelishStorageKind::LocalFs(root) => synced_storage::run_storage_sync_thread(
|
||||
config,
|
||||
LocalFs::new(root.clone())?,
|
||||
max_concurrent_sync,
|
||||
),
|
||||
RelishStorageKind::AwsS3(s3_config) => synced_storage::run_storage_sync_thread(
|
||||
config,
|
||||
RustS3::new(s3_config)?,
|
||||
max_concurrent_sync,
|
||||
),
|
||||
}
|
||||
}
|
||||
None => Ok(None),
|
||||
}
|
||||
}
|
||||
|
||||
/// Storage (potentially remote) API to manage its state.
|
||||
#[async_trait::async_trait]
|
||||
pub trait RelishStorage: Send + Sync {
|
||||
type RelishStoragePath;
|
||||
|
||||
fn derive_destination(
|
||||
page_server_workdir: &Path,
|
||||
relish_local_path: &Path,
|
||||
) -> anyhow::Result<Self::RelishStoragePath>;
|
||||
|
||||
async fn list_relishes(&self) -> anyhow::Result<Vec<Self::RelishStoragePath>>;
|
||||
|
||||
async fn download_relish(
|
||||
&self,
|
||||
from: &Self::RelishStoragePath,
|
||||
to: &Path,
|
||||
) -> anyhow::Result<()>;
|
||||
|
||||
async fn delete_relish(&self, path: &Self::RelishStoragePath) -> anyhow::Result<()>;
|
||||
|
||||
async fn upload_relish(&self, from: &Path, to: &Self::RelishStoragePath) -> anyhow::Result<()>;
|
||||
}
|
||||
|
||||
fn strip_workspace_prefix<'a>(
|
||||
page_server_workdir: &'a Path,
|
||||
relish_local_path: &'a Path,
|
||||
) -> anyhow::Result<&'a Path> {
|
||||
relish_local_path
|
||||
.strip_prefix(page_server_workdir)
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"Unexpected: relish local path '{}' is not relevant to server workdir",
|
||||
relish_local_path.display(),
|
||||
)
|
||||
})
|
||||
}
|
||||
158
pageserver/src/relish_storage/local_fs.rs
Normal file
158
pageserver/src/relish_storage/local_fs.rs
Normal file
@@ -0,0 +1,158 @@
|
||||
//! Local filesystem relish storage.
|
||||
//!
|
||||
//! Page server already stores layer data on the server, when freezing it.
|
||||
//! This storage serves a way to
|
||||
//!
|
||||
//! * test things locally simply
|
||||
//! * allow to compabre both binary sets
|
||||
//! * help validating the relish storage API
|
||||
|
||||
use std::{
|
||||
future::Future,
|
||||
path::{Path, PathBuf},
|
||||
pin::Pin,
|
||||
};
|
||||
|
||||
use anyhow::{bail, Context};
|
||||
|
||||
use super::{strip_workspace_prefix, RelishStorage};
|
||||
|
||||
pub struct LocalFs {
|
||||
root: PathBuf,
|
||||
}
|
||||
|
||||
impl LocalFs {
|
||||
/// Atetmpts to create local FS relish storage, also creates the directory provided, if not exists.
|
||||
pub fn new(root: PathBuf) -> anyhow::Result<Self> {
|
||||
if !root.exists() {
|
||||
std::fs::create_dir_all(&root).with_context(|| {
|
||||
format!(
|
||||
"Failed to create all directories in the given root path {}",
|
||||
root.display(),
|
||||
)
|
||||
})?;
|
||||
}
|
||||
Ok(Self { root })
|
||||
}
|
||||
|
||||
fn resolve_in_storage(&self, path: &Path) -> anyhow::Result<PathBuf> {
|
||||
if path.is_relative() {
|
||||
Ok(self.root.join(path))
|
||||
} else if path.starts_with(&self.root) {
|
||||
Ok(path.to_path_buf())
|
||||
} else {
|
||||
bail!(
|
||||
"Path '{}' does not belong to the current storage",
|
||||
path.display()
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl RelishStorage for LocalFs {
|
||||
type RelishStoragePath = PathBuf;
|
||||
|
||||
fn derive_destination(
|
||||
page_server_workdir: &Path,
|
||||
relish_local_path: &Path,
|
||||
) -> anyhow::Result<Self::RelishStoragePath> {
|
||||
Ok(strip_workspace_prefix(page_server_workdir, relish_local_path)?.to_path_buf())
|
||||
}
|
||||
|
||||
async fn list_relishes(&self) -> anyhow::Result<Vec<Self::RelishStoragePath>> {
|
||||
Ok(get_all_files(&self.root).await?.into_iter().collect())
|
||||
}
|
||||
|
||||
async fn download_relish(
|
||||
&self,
|
||||
from: &Self::RelishStoragePath,
|
||||
to: &Path,
|
||||
) -> anyhow::Result<()> {
|
||||
let file_path = self.resolve_in_storage(from)?;
|
||||
if file_path.exists() && file_path.is_file() {
|
||||
create_target_directory(to).await?;
|
||||
tokio::fs::copy(file_path, to).await?;
|
||||
Ok(())
|
||||
} else {
|
||||
bail!(
|
||||
"File '{}' either does not exist or is not a file",
|
||||
file_path.display()
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
async fn delete_relish(&self, path: &Self::RelishStoragePath) -> anyhow::Result<()> {
|
||||
let file_path = self.resolve_in_storage(path)?;
|
||||
if file_path.exists() && file_path.is_file() {
|
||||
Ok(tokio::fs::remove_file(file_path).await?)
|
||||
} else {
|
||||
bail!(
|
||||
"File '{}' either does not exist or is not a file",
|
||||
file_path.display()
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
async fn upload_relish(&self, from: &Path, to: &Self::RelishStoragePath) -> anyhow::Result<()> {
|
||||
let target_file_path = self.resolve_in_storage(to)?;
|
||||
create_target_directory(&target_file_path).await?;
|
||||
|
||||
tokio::fs::copy(&from, &target_file_path)
|
||||
.await
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"Failed to upload relish '{}' to local storage",
|
||||
from.display(),
|
||||
)
|
||||
})?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
fn get_all_files<'a, P>(
|
||||
directory_path: P,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<Vec<PathBuf>>> + Send + Sync + 'a>>
|
||||
where
|
||||
P: AsRef<Path> + Send + Sync + 'a,
|
||||
{
|
||||
Box::pin(async move {
|
||||
let directory_path = directory_path.as_ref();
|
||||
if directory_path.exists() {
|
||||
if directory_path.is_dir() {
|
||||
let mut paths = Vec::new();
|
||||
let mut dir_contents = tokio::fs::read_dir(directory_path).await?;
|
||||
while let Some(dir_entry) = dir_contents.next_entry().await? {
|
||||
let file_type = dir_entry.file_type().await?;
|
||||
let entry_path = dir_entry.path();
|
||||
if file_type.is_symlink() {
|
||||
log::debug!("{:?} us a symlink, skipping", entry_path)
|
||||
} else if file_type.is_dir() {
|
||||
paths.extend(get_all_files(entry_path).await?.into_iter())
|
||||
} else {
|
||||
paths.push(dir_entry.path());
|
||||
}
|
||||
}
|
||||
Ok(paths)
|
||||
} else {
|
||||
bail!("Path '{}' is not a directory", directory_path.display())
|
||||
}
|
||||
} else {
|
||||
Ok(Vec::new())
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
async fn create_target_directory(target_file_path: &Path) -> anyhow::Result<()> {
|
||||
let target_dir = match target_file_path.parent() {
|
||||
Some(parent_dir) => parent_dir,
|
||||
None => bail!(
|
||||
"Relish path '{}' has no parent directory",
|
||||
target_file_path.display()
|
||||
),
|
||||
};
|
||||
if !target_dir.exists() {
|
||||
tokio::fs::create_dir_all(target_dir).await?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
144
pageserver/src/relish_storage/rust_s3.rs
Normal file
144
pageserver/src/relish_storage/rust_s3.rs
Normal file
@@ -0,0 +1,144 @@
|
||||
//! A wrapper around AWS S3 client library `rust_s3` to be used a relish storage.
|
||||
|
||||
use std::path::Path;
|
||||
|
||||
use anyhow::Context;
|
||||
use s3::{bucket::Bucket, creds::Credentials, region::Region};
|
||||
|
||||
use crate::{relish_storage::strip_workspace_prefix, S3Config};
|
||||
|
||||
use super::RelishStorage;
|
||||
|
||||
const S3_FILE_SEPARATOR: char = '/';
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct S3ObjectKey(String);
|
||||
|
||||
impl S3ObjectKey {
|
||||
fn key(&self) -> &str {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
/// AWS S3 relish storage.
|
||||
pub struct RustS3 {
|
||||
bucket: Bucket,
|
||||
}
|
||||
|
||||
impl RustS3 {
|
||||
/// Creates the relish storage, errors if incorrect AWS S3 configuration provided.
|
||||
pub fn new(aws_config: &S3Config) -> anyhow::Result<Self> {
|
||||
let region = aws_config
|
||||
.bucket_region
|
||||
.parse::<Region>()
|
||||
.context("Failed to parse the s3 region from config")?;
|
||||
let credentials = Credentials::new(
|
||||
aws_config.access_key_id.as_deref(),
|
||||
aws_config.secret_access_key.as_deref(),
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
)
|
||||
.context("Failed to create the s3 credentials")?;
|
||||
Ok(Self {
|
||||
bucket: Bucket::new_with_path_style(
|
||||
aws_config.bucket_name.as_str(),
|
||||
region,
|
||||
credentials,
|
||||
)
|
||||
.context("Failed to create the s3 bucket")?,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl RelishStorage for RustS3 {
|
||||
type RelishStoragePath = S3ObjectKey;
|
||||
|
||||
fn derive_destination(
|
||||
page_server_workdir: &Path,
|
||||
relish_local_path: &Path,
|
||||
) -> anyhow::Result<Self::RelishStoragePath> {
|
||||
let relative_path = strip_workspace_prefix(page_server_workdir, relish_local_path)?;
|
||||
let mut key = String::new();
|
||||
for segment in relative_path {
|
||||
key.push(S3_FILE_SEPARATOR);
|
||||
key.push_str(&segment.to_string_lossy());
|
||||
}
|
||||
Ok(S3ObjectKey(key))
|
||||
}
|
||||
|
||||
async fn list_relishes(&self) -> anyhow::Result<Vec<Self::RelishStoragePath>> {
|
||||
let list_response = self
|
||||
.bucket
|
||||
.list(String::new(), None)
|
||||
.await
|
||||
.context("Failed to list s3 objects")?;
|
||||
|
||||
Ok(list_response
|
||||
.into_iter()
|
||||
.flat_map(|response| response.contents)
|
||||
.map(|s3_object| S3ObjectKey(s3_object.key))
|
||||
.collect())
|
||||
}
|
||||
|
||||
async fn download_relish(
|
||||
&self,
|
||||
from: &Self::RelishStoragePath,
|
||||
to: &Path,
|
||||
) -> anyhow::Result<()> {
|
||||
let mut target_file = std::fs::OpenOptions::new()
|
||||
.write(true)
|
||||
.open(to)
|
||||
.with_context(|| format!("Failed to open target s3 destination at {}", to.display()))?;
|
||||
let code = self
|
||||
.bucket
|
||||
.get_object_stream(from.key(), &mut target_file)
|
||||
.await
|
||||
.with_context(|| format!("Failed to download s3 object with key {}", from.key()))?;
|
||||
if code != 200 {
|
||||
Err(anyhow::format_err!(
|
||||
"Received non-200 exit code during downloading object from directory, code: {}",
|
||||
code
|
||||
))
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
async fn delete_relish(&self, path: &Self::RelishStoragePath) -> anyhow::Result<()> {
|
||||
let (_, code) = self
|
||||
.bucket
|
||||
.delete_object(path.key())
|
||||
.await
|
||||
.with_context(|| format!("Failed to delete s3 object with key {}", path.key()))?;
|
||||
if code != 200 {
|
||||
Err(anyhow::format_err!(
|
||||
"Received non-200 exit code during deleting object with key '{}', code: {}",
|
||||
path.key(),
|
||||
code
|
||||
))
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
async fn upload_relish(&self, from: &Path, to: &Self::RelishStoragePath) -> anyhow::Result<()> {
|
||||
let mut local_file = tokio::fs::OpenOptions::new().read(true).open(from).await?;
|
||||
|
||||
let code = self
|
||||
.bucket
|
||||
.put_object_stream(&mut local_file, to.key())
|
||||
.await
|
||||
.with_context(|| format!("Failed to create s3 object with key {}", to.key()))?;
|
||||
if code != 200 {
|
||||
Err(anyhow::format_err!(
|
||||
"Received non-200 exit code during creating object with key '{}', code: {}",
|
||||
to.key(),
|
||||
code
|
||||
))
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
}
|
||||
52
pageserver/src/relish_storage/synced_storage.rs
Normal file
52
pageserver/src/relish_storage/synced_storage.rs
Normal file
@@ -0,0 +1,52 @@
|
||||
use std::time::Duration;
|
||||
use std::{collections::BinaryHeap, sync::Mutex, thread};
|
||||
|
||||
use crate::{relish_storage::RelishStorage, PageServerConf};
|
||||
|
||||
lazy_static::lazy_static! {
|
||||
static ref UPLOAD_QUEUE: Mutex<BinaryHeap<SyncTask>> = Mutex::new(BinaryHeap::new());
|
||||
}
|
||||
|
||||
pub fn schedule_timeline_upload(_local_timeline: ()) {
|
||||
// UPLOAD_QUEUE
|
||||
// .lock()
|
||||
// .unwrap()
|
||||
// .push(SyncTask::Upload(local_timeline))
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord)]
|
||||
enum SyncTask {}
|
||||
|
||||
pub fn run_storage_sync_thread<
|
||||
P: std::fmt::Debug,
|
||||
S: 'static + RelishStorage<RelishStoragePath = P>,
|
||||
>(
|
||||
config: &'static PageServerConf,
|
||||
relish_storage: S,
|
||||
max_concurrent_sync: usize,
|
||||
) -> anyhow::Result<Option<thread::JoinHandle<anyhow::Result<()>>>> {
|
||||
let runtime = tokio::runtime::Builder::new_current_thread()
|
||||
.enable_all()
|
||||
.build()?;
|
||||
|
||||
let handle = thread::Builder::new()
|
||||
.name("Queue based relish storage sync".to_string())
|
||||
.spawn(move || loop {
|
||||
let mut queue_accessor = UPLOAD_QUEUE.lock().unwrap();
|
||||
log::debug!("Upload queue length: {}", queue_accessor.len());
|
||||
let next_task = queue_accessor.pop();
|
||||
drop(queue_accessor);
|
||||
match next_task {
|
||||
Some(task) => runtime.block_on(async {
|
||||
// suppress warnings
|
||||
let _ = (config, task, &relish_storage, max_concurrent_sync);
|
||||
todo!("omitted for brevity")
|
||||
}),
|
||||
None => {
|
||||
thread::sleep(Duration::from_secs(1));
|
||||
continue;
|
||||
}
|
||||
}
|
||||
})?;
|
||||
Ok(Some(handle))
|
||||
}
|
||||
@@ -17,11 +17,7 @@ pub trait Repository: Send + Sync {
|
||||
fn get_timeline(&self, timelineid: ZTimelineId) -> Result<Arc<dyn Timeline>>;
|
||||
|
||||
/// Create a new, empty timeline. The caller is responsible for loading data into it
|
||||
fn create_empty_timeline(
|
||||
&self,
|
||||
timelineid: ZTimelineId,
|
||||
start_lsn: Lsn,
|
||||
) -> Result<Arc<dyn Timeline>>;
|
||||
fn create_empty_timeline(&self, timelineid: ZTimelineId) -> Result<Arc<dyn Timeline>>;
|
||||
|
||||
/// Branch a timeline
|
||||
fn branch_timeline(&self, src: ZTimelineId, dst: ZTimelineId, start_lsn: Lsn) -> Result<()>;
|
||||
@@ -32,22 +28,15 @@ pub trait Repository: Send + Sync {
|
||||
///
|
||||
/// 'timelineid' specifies the timeline to GC, or None for all.
|
||||
/// `horizon` specifies delta from last lsn to preserve all object versions (pitr interval).
|
||||
/// `compact` parameter is used to force compaction of storage.
|
||||
/// some storage implementation are based on lsm tree and require periodic merge (compaction).
|
||||
/// usually storage implementation determines itself when compaction should be performed.
|
||||
/// but for gc tests it way be useful to force compaction just after completion of gc iteration
|
||||
/// to make sure that all detected garbage is removed.
|
||||
/// so right now `compact` is set to true when gc explicitly requested through page srver api,
|
||||
/// and is st to false in gc threads which infinitely repeats gc iterations in loop.
|
||||
/// `checkpoint_before_gc` parameter is used to force compaction of storage before CG
|
||||
/// to make tests more deterministic.
|
||||
/// TODO Do we still need it or we can call checkpoint explicitly in tests where needed?
|
||||
fn gc_iteration(
|
||||
&self,
|
||||
timelineid: Option<ZTimelineId>,
|
||||
horizon: u64,
|
||||
compact: bool,
|
||||
checkpoint_before_gc: bool,
|
||||
) -> Result<GcResult>;
|
||||
|
||||
// TODO get timelines?
|
||||
//fn get_stats(&self) -> RepositoryStats;
|
||||
}
|
||||
|
||||
///
|
||||
@@ -59,6 +48,7 @@ pub struct GcResult {
|
||||
pub ondisk_relfiles_needed_by_cutoff: u64,
|
||||
pub ondisk_relfiles_needed_by_branches: u64,
|
||||
pub ondisk_relfiles_not_updated: u64,
|
||||
pub ondisk_relfiles_needed_as_tombstone: u64,
|
||||
pub ondisk_relfiles_removed: u64, // # of layer files removed because they have been made obsolete by newer ondisk files.
|
||||
pub ondisk_relfiles_dropped: u64, // # of layer files removed because the relation was dropped
|
||||
|
||||
@@ -66,6 +56,7 @@ pub struct GcResult {
|
||||
pub ondisk_nonrelfiles_needed_by_cutoff: u64,
|
||||
pub ondisk_nonrelfiles_needed_by_branches: u64,
|
||||
pub ondisk_nonrelfiles_not_updated: u64,
|
||||
pub ondisk_nonrelfiles_needed_as_tombstone: u64,
|
||||
pub ondisk_nonrelfiles_removed: u64, // # of layer files removed because they have been made obsolete by newer ondisk files.
|
||||
pub ondisk_nonrelfiles_dropped: u64, // # of layer files removed because the relation was dropped
|
||||
|
||||
@@ -78,6 +69,7 @@ impl AddAssign for GcResult {
|
||||
self.ondisk_relfiles_needed_by_cutoff += other.ondisk_relfiles_needed_by_cutoff;
|
||||
self.ondisk_relfiles_needed_by_branches += other.ondisk_relfiles_needed_by_branches;
|
||||
self.ondisk_relfiles_not_updated += other.ondisk_relfiles_not_updated;
|
||||
self.ondisk_relfiles_needed_as_tombstone += other.ondisk_relfiles_needed_as_tombstone;
|
||||
self.ondisk_relfiles_removed += other.ondisk_relfiles_removed;
|
||||
self.ondisk_relfiles_dropped += other.ondisk_relfiles_dropped;
|
||||
|
||||
@@ -85,6 +77,7 @@ impl AddAssign for GcResult {
|
||||
self.ondisk_nonrelfiles_needed_by_cutoff += other.ondisk_nonrelfiles_needed_by_cutoff;
|
||||
self.ondisk_nonrelfiles_needed_by_branches += other.ondisk_nonrelfiles_needed_by_branches;
|
||||
self.ondisk_nonrelfiles_not_updated += other.ondisk_nonrelfiles_not_updated;
|
||||
self.ondisk_nonrelfiles_needed_as_tombstone += other.ondisk_nonrelfiles_needed_as_tombstone;
|
||||
self.ondisk_nonrelfiles_removed += other.ondisk_nonrelfiles_removed;
|
||||
self.ondisk_nonrelfiles_dropped += other.ondisk_nonrelfiles_dropped;
|
||||
|
||||
@@ -97,11 +90,16 @@ pub trait Timeline: Send + Sync {
|
||||
// Public GET functions
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
/// Look up given page in the cache.
|
||||
fn get_page_at_lsn(&self, tag: RelishTag, blknum: u32, lsn: Lsn) -> Result<Bytes>;
|
||||
///
|
||||
/// Wait until WAL has been received and processed up to this LSN.
|
||||
///
|
||||
/// You should call this before any of the other get_* or list_* functions. Calling
|
||||
/// those functions with an LSN that has been processed yet is an error.
|
||||
///
|
||||
fn wait_lsn(&self, lsn: Lsn) -> Result<()>;
|
||||
|
||||
/// Look up given page in the cache.
|
||||
fn get_page_at_lsn_nowait(&self, tag: RelishTag, blknum: u32, lsn: Lsn) -> Result<Bytes>;
|
||||
/// Look up given page version.
|
||||
fn get_page_at_lsn(&self, tag: RelishTag, blknum: u32, lsn: Lsn) -> Result<Bytes>;
|
||||
|
||||
/// Get size of a relish
|
||||
fn get_relish_size(&self, tag: RelishTag, lsn: Lsn) -> Result<Option<u32>>;
|
||||
@@ -109,10 +107,14 @@ pub trait Timeline: Send + Sync {
|
||||
/// Does relation exist?
|
||||
fn get_rel_exists(&self, tag: RelishTag, lsn: Lsn) -> Result<bool>;
|
||||
|
||||
/// Get a list of all distinct relations in given tablespace and database.
|
||||
fn list_rels(&self, spcnode: u32, dbnode: u32, lsn: Lsn) -> Result<HashSet<RelTag>>;
|
||||
/// Get a list of all existing relations
|
||||
/// Pass RelTag to get relation objects or None to get nonrels.
|
||||
fn list_relishes(&self, tag: Option<RelTag>, lsn: Lsn) -> Result<HashSet<RelishTag>>;
|
||||
|
||||
/// Get a list of non-relational objects
|
||||
/// Get a list of all existing relations in given tablespace and database.
|
||||
fn list_rels(&self, spcnode: u32, dbnode: u32, lsn: Lsn) -> Result<HashSet<RelishTag>>;
|
||||
|
||||
/// Get a list of all existing non-relational objects
|
||||
fn list_nonrels(&self, lsn: Lsn) -> Result<HashSet<RelishTag>>;
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
@@ -133,7 +135,7 @@ pub trait Timeline: Send + Sync {
|
||||
/// Truncate relation
|
||||
fn put_truncation(&self, rel: RelishTag, lsn: Lsn, nblocks: u32) -> Result<()>;
|
||||
|
||||
/// This method is used for marking dropped relations and truncated SLRU files
|
||||
/// This method is used for marking dropped relations and truncated SLRU files and aborted two phase records
|
||||
fn drop_relish(&self, tag: RelishTag, lsn: Lsn) -> Result<()>;
|
||||
|
||||
/// Track end of the latest digested WAL record.
|
||||
@@ -154,14 +156,16 @@ pub trait Timeline: Send + Sync {
|
||||
/// NOTE: This has nothing to do with checkpoint in PostgreSQL. We don't
|
||||
/// know anything about them here in the repository.
|
||||
fn checkpoint(&self) -> Result<()>;
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct RepositoryStats {
|
||||
pub num_entries: Lsn,
|
||||
pub num_page_images: Lsn,
|
||||
pub num_wal_records: Lsn,
|
||||
pub num_getpage_requests: Lsn,
|
||||
/// Retrieve current logical size of the timeline
|
||||
///
|
||||
/// NOTE: counted incrementally, includes ancestors,
|
||||
/// doesnt support TwoPhase relishes yet
|
||||
fn get_current_logical_size(&self) -> usize;
|
||||
|
||||
/// Does the same as get_current_logical_size but counted on demand.
|
||||
/// Used in tests to ensure thet incremental and non incremental variants match.
|
||||
fn get_current_logical_size_non_incremental(&self, lsn: Lsn) -> Result<usize>;
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
||||
@@ -208,14 +212,18 @@ mod tests {
|
||||
use crate::layered_repository::LayeredRepository;
|
||||
use crate::walredo::{WalRedoError, WalRedoManager};
|
||||
use crate::PageServerConf;
|
||||
use hex_literal::hex;
|
||||
use postgres_ffi::pg_constants;
|
||||
use postgres_ffi::xlog_utils::SIZEOF_CHECKPOINT;
|
||||
use std::fs;
|
||||
use std::path::PathBuf;
|
||||
use std::str::FromStr;
|
||||
use std::time::Duration;
|
||||
use zenith_utils::postgres_backend::AuthType;
|
||||
use zenith_utils::zid::ZTenantId;
|
||||
|
||||
const TIMELINE_ID: ZTimelineId =
|
||||
ZTimelineId::from_array(hex!("11223344556677881122334455667788"));
|
||||
const NEW_TIMELINE_ID: ZTimelineId =
|
||||
ZTimelineId::from_array(hex!("AA223344556677881122334455667788"));
|
||||
|
||||
/// Arbitrary relation tag, for testing.
|
||||
const TESTREL_A: RelishTag = RelishTag::Relation(RelTag {
|
||||
spcnode: 0,
|
||||
@@ -240,52 +248,64 @@ mod tests {
|
||||
buf.freeze()
|
||||
}
|
||||
|
||||
fn assert_current_logical_size(timeline: &Arc<dyn Timeline>, lsn: Lsn) {
|
||||
let incremental = timeline.get_current_logical_size();
|
||||
let non_incremental = timeline
|
||||
.get_current_logical_size_non_incremental(lsn)
|
||||
.unwrap();
|
||||
assert_eq!(incremental, non_incremental);
|
||||
}
|
||||
|
||||
static ZERO_PAGE: Bytes = Bytes::from_static(&[0u8; 8192]);
|
||||
static ZERO_CHECKPOINT: Bytes = Bytes::from_static(&[0u8; SIZEOF_CHECKPOINT]);
|
||||
|
||||
fn get_test_repo(test_name: &str) -> Result<Box<dyn Repository>> {
|
||||
let repo_dir = PathBuf::from(format!("../tmp_check/test_{}", test_name));
|
||||
let _ = fs::remove_dir_all(&repo_dir);
|
||||
fs::create_dir_all(&repo_dir)?;
|
||||
fs::create_dir_all(&repo_dir.join("timelines"))?;
|
||||
struct RepoHarness {
|
||||
conf: &'static PageServerConf,
|
||||
tenant_id: ZTenantId,
|
||||
}
|
||||
|
||||
let conf = PageServerConf {
|
||||
daemonize: false,
|
||||
gc_horizon: 64 * 1024 * 1024,
|
||||
gc_period: Duration::from_secs(10),
|
||||
listen_addr: "127.0.0.1:5430".to_string(),
|
||||
http_endpoint_addr: "127.0.0.1:9898".to_string(),
|
||||
superuser: "zenith_admin".to_string(),
|
||||
workdir: repo_dir,
|
||||
pg_distrib_dir: "".into(),
|
||||
auth_type: AuthType::Trust,
|
||||
auth_validation_public_key_path: None,
|
||||
};
|
||||
// Make a static copy of the config. This can never be free'd, but that's
|
||||
// OK in a test.
|
||||
let conf: &'static PageServerConf = Box::leak(Box::new(conf));
|
||||
let tenantid = ZTenantId::generate();
|
||||
fs::create_dir_all(conf.tenant_path(&tenantid)).unwrap();
|
||||
impl RepoHarness {
|
||||
fn create(test_name: &'static str) -> Result<Self> {
|
||||
let repo_dir = PageServerConf::test_repo_dir(test_name);
|
||||
let _ = fs::remove_dir_all(&repo_dir);
|
||||
fs::create_dir_all(&repo_dir)?;
|
||||
fs::create_dir_all(&repo_dir.join("timelines"))?;
|
||||
|
||||
let walredo_mgr = TestRedoManager {};
|
||||
let conf = PageServerConf::dummy_conf(repo_dir);
|
||||
// Make a static copy of the config. This can never be free'd, but that's
|
||||
// OK in a test.
|
||||
let conf: &'static PageServerConf = Box::leak(Box::new(conf));
|
||||
|
||||
let repo = Box::new(LayeredRepository::new(
|
||||
conf,
|
||||
Arc::new(walredo_mgr),
|
||||
tenantid,
|
||||
));
|
||||
let tenant_id = ZTenantId::generate();
|
||||
fs::create_dir_all(conf.tenant_path(&tenant_id))?;
|
||||
|
||||
Ok(repo)
|
||||
Ok(Self { conf, tenant_id })
|
||||
}
|
||||
|
||||
fn load(&self) -> Box<dyn Repository> {
|
||||
let walredo_mgr = Arc::new(TestRedoManager);
|
||||
|
||||
Box::new(LayeredRepository::new(
|
||||
self.conf,
|
||||
walredo_mgr,
|
||||
self.tenant_id,
|
||||
false,
|
||||
))
|
||||
}
|
||||
|
||||
fn timeline_path(&self, timeline_id: &ZTimelineId) -> PathBuf {
|
||||
self.conf.timeline_path(timeline_id, &self.tenant_id)
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_relsize() -> Result<()> {
|
||||
let repo = get_test_repo("test_relsize")?;
|
||||
let repo = RepoHarness::create("test_relsize")?.load();
|
||||
// get_timeline() with non-existent timeline id should fail
|
||||
//repo.get_timeline("11223344556677881122334455667788");
|
||||
|
||||
// Create timeline to work on
|
||||
let timelineid = ZTimelineId::from_str("11223344556677881122334455667788").unwrap();
|
||||
let tline = repo.create_empty_timeline(timelineid, Lsn(0x00))?;
|
||||
let tline = repo.create_empty_timeline(TIMELINE_ID)?;
|
||||
|
||||
tline.put_page_image(TESTREL_A, 0, Lsn(0x20), TEST_IMG("foo blk 0 at 2"))?;
|
||||
tline.put_page_image(TESTREL_A, 0, Lsn(0x20), TEST_IMG("foo blk 0 at 2"))?;
|
||||
@@ -295,6 +315,8 @@ mod tests {
|
||||
|
||||
tline.advance_last_record_lsn(Lsn(0x50));
|
||||
|
||||
assert_current_logical_size(&tline, Lsn(0x50));
|
||||
|
||||
// The relation was created at LSN 2, not visible at LSN 1 yet.
|
||||
assert_eq!(tline.get_rel_exists(TESTREL_A, Lsn(0x10))?, false);
|
||||
assert!(tline.get_relish_size(TESTREL_A, Lsn(0x10))?.is_none());
|
||||
@@ -339,6 +361,7 @@ mod tests {
|
||||
// Truncate last block
|
||||
tline.put_truncation(TESTREL_A, Lsn(0x60), 2)?;
|
||||
tline.advance_last_record_lsn(Lsn(0x60));
|
||||
assert_current_logical_size(&tline, Lsn(0x60));
|
||||
|
||||
// Check reported size and contents after truncation
|
||||
assert_eq!(tline.get_relish_size(TESTREL_A, Lsn(0x60))?.unwrap(), 2);
|
||||
@@ -359,9 +382,9 @@ mod tests {
|
||||
);
|
||||
|
||||
// Truncate to zero length
|
||||
tline.put_truncation(TESTREL_A, Lsn(0x60), 0)?;
|
||||
tline.advance_last_record_lsn(Lsn(0x60));
|
||||
assert_eq!(tline.get_relish_size(TESTREL_A, Lsn(0x60))?.unwrap(), 0);
|
||||
tline.put_truncation(TESTREL_A, Lsn(0x68), 0)?;
|
||||
tline.advance_last_record_lsn(Lsn(0x68));
|
||||
assert_eq!(tline.get_relish_size(TESTREL_A, Lsn(0x68))?.unwrap(), 0);
|
||||
|
||||
// Extend from 0 to 2 blocks, leaving a gap
|
||||
tline.put_page_image(TESTREL_A, 1, Lsn(0x70), TEST_IMG("foo blk 1"))?;
|
||||
@@ -392,13 +415,148 @@ mod tests {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Test what happens if we dropped a relation
|
||||
// and then created it again within the same layer.
|
||||
#[test]
|
||||
fn test_drop_extend() -> Result<()> {
|
||||
let repo = RepoHarness::create("test_drop_extend")?.load();
|
||||
|
||||
// Create timeline to work on
|
||||
let tline = repo.create_empty_timeline(TIMELINE_ID)?;
|
||||
|
||||
tline.put_page_image(TESTREL_A, 0, Lsn(0x20), TEST_IMG("foo blk 0 at 2"))?;
|
||||
tline.advance_last_record_lsn(Lsn(0x20));
|
||||
|
||||
// Check that rel exists and size is correct
|
||||
assert_eq!(tline.get_rel_exists(TESTREL_A, Lsn(0x20))?, true);
|
||||
assert_eq!(tline.get_relish_size(TESTREL_A, Lsn(0x20))?.unwrap(), 1);
|
||||
|
||||
// Drop relish
|
||||
tline.drop_relish(TESTREL_A, Lsn(0x30))?;
|
||||
tline.advance_last_record_lsn(Lsn(0x30));
|
||||
|
||||
// Check that rel is not visible anymore
|
||||
assert_eq!(tline.get_rel_exists(TESTREL_A, Lsn(0x30))?, false);
|
||||
assert!(tline.get_relish_size(TESTREL_A, Lsn(0x30))?.is_none());
|
||||
|
||||
// Extend it again
|
||||
tline.put_page_image(TESTREL_A, 0, Lsn(0x40), TEST_IMG("foo blk 0 at 4"))?;
|
||||
tline.advance_last_record_lsn(Lsn(0x40));
|
||||
|
||||
// Check that rel exists and size is correct
|
||||
assert_eq!(tline.get_rel_exists(TESTREL_A, Lsn(0x40))?, true);
|
||||
assert_eq!(tline.get_relish_size(TESTREL_A, Lsn(0x40))?.unwrap(), 1);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Test what happens if we truncated a relation
|
||||
// so that one of its segments was dropped
|
||||
// and then extended it again within the same layer.
|
||||
#[test]
|
||||
fn test_truncate_extend() -> Result<()> {
|
||||
let repo = RepoHarness::create("test_truncate_extend")?.load();
|
||||
|
||||
// Create timeline to work on
|
||||
let tline = repo.create_empty_timeline(TIMELINE_ID)?;
|
||||
|
||||
//from storage_layer.rs
|
||||
const RELISH_SEG_SIZE: u32 = 10 * 1024 * 1024 / 8192;
|
||||
let relsize = RELISH_SEG_SIZE * 2;
|
||||
|
||||
// Create relation with relsize blocks
|
||||
for blkno in 0..relsize {
|
||||
let lsn = Lsn(0x20);
|
||||
let data = format!("foo blk {} at {}", blkno, lsn);
|
||||
tline.put_page_image(TESTREL_A, blkno, lsn, TEST_IMG(&data))?;
|
||||
}
|
||||
|
||||
tline.advance_last_record_lsn(Lsn(0x20));
|
||||
|
||||
// The relation was created at LSN 2, not visible at LSN 1 yet.
|
||||
assert_eq!(tline.get_rel_exists(TESTREL_A, Lsn(0x10))?, false);
|
||||
assert!(tline.get_relish_size(TESTREL_A, Lsn(0x10))?.is_none());
|
||||
|
||||
assert_eq!(tline.get_rel_exists(TESTREL_A, Lsn(0x20))?, true);
|
||||
assert_eq!(
|
||||
tline.get_relish_size(TESTREL_A, Lsn(0x20))?.unwrap(),
|
||||
relsize
|
||||
);
|
||||
|
||||
// Check relation content
|
||||
for blkno in 0..relsize {
|
||||
let lsn = Lsn(0x20);
|
||||
let data = format!("foo blk {} at {}", blkno, lsn);
|
||||
assert_eq!(
|
||||
tline.get_page_at_lsn(TESTREL_A, blkno, lsn)?,
|
||||
TEST_IMG(&data)
|
||||
);
|
||||
}
|
||||
|
||||
// Truncate relation so that second segment was dropped
|
||||
// - only leave one page
|
||||
tline.put_truncation(TESTREL_A, Lsn(0x60), 1)?;
|
||||
tline.advance_last_record_lsn(Lsn(0x60));
|
||||
|
||||
// Check reported size and contents after truncation
|
||||
assert_eq!(tline.get_relish_size(TESTREL_A, Lsn(0x60))?.unwrap(), 1);
|
||||
|
||||
for blkno in 0..1 {
|
||||
let lsn = Lsn(0x20);
|
||||
let data = format!("foo blk {} at {}", blkno, lsn);
|
||||
assert_eq!(
|
||||
tline.get_page_at_lsn(TESTREL_A, blkno, Lsn(0x60))?,
|
||||
TEST_IMG(&data)
|
||||
);
|
||||
}
|
||||
|
||||
// should still see all blocks with older LSN
|
||||
assert_eq!(
|
||||
tline.get_relish_size(TESTREL_A, Lsn(0x50))?.unwrap(),
|
||||
relsize
|
||||
);
|
||||
for blkno in 0..relsize {
|
||||
let lsn = Lsn(0x20);
|
||||
let data = format!("foo blk {} at {}", blkno, lsn);
|
||||
assert_eq!(
|
||||
tline.get_page_at_lsn(TESTREL_A, blkno, Lsn(0x50))?,
|
||||
TEST_IMG(&data)
|
||||
);
|
||||
}
|
||||
|
||||
// Extend relation again.
|
||||
// Add enough blocks to create second segment
|
||||
for blkno in 0..relsize {
|
||||
let lsn = Lsn(0x80);
|
||||
let data = format!("foo blk {} at {}", blkno, lsn);
|
||||
tline.put_page_image(TESTREL_A, blkno, lsn, TEST_IMG(&data))?;
|
||||
}
|
||||
tline.advance_last_record_lsn(Lsn(0x80));
|
||||
|
||||
assert_eq!(tline.get_rel_exists(TESTREL_A, Lsn(0x80))?, true);
|
||||
assert_eq!(
|
||||
tline.get_relish_size(TESTREL_A, Lsn(0x80))?.unwrap(),
|
||||
relsize
|
||||
);
|
||||
// Check relation content
|
||||
for blkno in 0..relsize {
|
||||
let lsn = Lsn(0x80);
|
||||
let data = format!("foo blk {} at {}", blkno, lsn);
|
||||
assert_eq!(
|
||||
tline.get_page_at_lsn(TESTREL_A, blkno, Lsn(0x80))?,
|
||||
TEST_IMG(&data)
|
||||
);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Test get_relsize() and truncation with a file larger than 1 GB, so that it's
|
||||
/// split into multiple 1 GB segments in Postgres.
|
||||
#[test]
|
||||
fn test_large_rel() -> Result<()> {
|
||||
let repo = get_test_repo("test_large_rel")?;
|
||||
let timelineid = ZTimelineId::from_str("11223344556677881122334455667788").unwrap();
|
||||
let tline = repo.create_empty_timeline(timelineid, Lsn(0x00))?;
|
||||
let repo = RepoHarness::create("test_large_rel")?.load();
|
||||
let tline = repo.create_empty_timeline(TIMELINE_ID)?;
|
||||
|
||||
let mut lsn = 0x10;
|
||||
for blknum in 0..pg_constants::RELSEG_SIZE + 1 {
|
||||
@@ -408,6 +566,8 @@ mod tests {
|
||||
}
|
||||
tline.advance_last_record_lsn(Lsn(lsn));
|
||||
|
||||
assert_current_logical_size(&tline, Lsn(lsn));
|
||||
|
||||
assert_eq!(
|
||||
tline.get_relish_size(TESTREL_A, Lsn(lsn))?.unwrap(),
|
||||
pg_constants::RELSEG_SIZE + 1
|
||||
@@ -421,6 +581,7 @@ mod tests {
|
||||
tline.get_relish_size(TESTREL_A, Lsn(lsn))?.unwrap(),
|
||||
pg_constants::RELSEG_SIZE
|
||||
);
|
||||
assert_current_logical_size(&tline, Lsn(lsn));
|
||||
|
||||
// Truncate another block
|
||||
lsn += 0x10;
|
||||
@@ -430,6 +591,7 @@ mod tests {
|
||||
tline.get_relish_size(TESTREL_A, Lsn(lsn))?.unwrap(),
|
||||
pg_constants::RELSEG_SIZE - 1
|
||||
);
|
||||
assert_current_logical_size(&tline, Lsn(lsn));
|
||||
|
||||
// Truncate to 1500, and then truncate all the way down to 0, one block at a time
|
||||
// This tests the behavior at segment boundaries
|
||||
@@ -445,6 +607,61 @@ mod tests {
|
||||
|
||||
size -= 1;
|
||||
}
|
||||
assert_current_logical_size(&tline, Lsn(lsn));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
///
|
||||
/// Test list_rels() function, with branches and dropped relations
|
||||
///
|
||||
#[test]
|
||||
fn test_list_rels_drop() -> Result<()> {
|
||||
let repo = RepoHarness::create("test_list_rels_drop")?.load();
|
||||
let tline = repo.create_empty_timeline(TIMELINE_ID)?;
|
||||
const TESTDB: u32 = 111;
|
||||
|
||||
// Import initial dummy checkpoint record, otherwise the get_timeline() call
|
||||
// after branching fails below
|
||||
tline.put_page_image(RelishTag::Checkpoint, 0, Lsn(0x10), ZERO_CHECKPOINT.clone())?;
|
||||
|
||||
// Create a relation on the timeline
|
||||
tline.put_page_image(TESTREL_A, 0, Lsn(0x20), TEST_IMG("foo blk 0 at 2"))?;
|
||||
|
||||
tline.advance_last_record_lsn(Lsn(0x30));
|
||||
|
||||
// Check that list_rels() lists it after LSN 2, but no before it
|
||||
assert!(!tline.list_rels(0, TESTDB, Lsn(0x10))?.contains(&TESTREL_A));
|
||||
assert!(tline.list_rels(0, TESTDB, Lsn(0x20))?.contains(&TESTREL_A));
|
||||
assert!(tline.list_rels(0, TESTDB, Lsn(0x30))?.contains(&TESTREL_A));
|
||||
|
||||
// Create a branch, check that the relation is visible there
|
||||
repo.branch_timeline(TIMELINE_ID, NEW_TIMELINE_ID, Lsn(0x30))?;
|
||||
let newtline = repo.get_timeline(NEW_TIMELINE_ID)?;
|
||||
|
||||
assert!(newtline
|
||||
.list_rels(0, TESTDB, Lsn(0x30))?
|
||||
.contains(&TESTREL_A));
|
||||
|
||||
// Drop it on the branch
|
||||
newtline.drop_relish(TESTREL_A, Lsn(0x40))?;
|
||||
newtline.advance_last_record_lsn(Lsn(0x40));
|
||||
|
||||
// Check that it's no longer listed on the branch after the point where it was dropped
|
||||
assert!(newtline
|
||||
.list_rels(0, TESTDB, Lsn(0x30))?
|
||||
.contains(&TESTREL_A));
|
||||
assert!(!newtline
|
||||
.list_rels(0, TESTDB, Lsn(0x40))?
|
||||
.contains(&TESTREL_A));
|
||||
|
||||
// Run checkpoint and garbage collection and check that it's still not visible
|
||||
newtline.checkpoint()?;
|
||||
repo.gc_iteration(Some(NEW_TIMELINE_ID), 0, true)?;
|
||||
|
||||
assert!(!newtline
|
||||
.list_rels(0, TESTDB, Lsn(0x40))?
|
||||
.contains(&TESTREL_A));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -454,13 +671,12 @@ mod tests {
|
||||
///
|
||||
#[test]
|
||||
fn test_branch() -> Result<()> {
|
||||
let repo = get_test_repo("test_branch")?;
|
||||
let timelineid = ZTimelineId::from_str("11223344556677881122334455667788").unwrap();
|
||||
let tline = repo.create_empty_timeline(timelineid, Lsn(0x00))?;
|
||||
let repo = RepoHarness::create("test_branch")?.load();
|
||||
let tline = repo.create_empty_timeline(TIMELINE_ID)?;
|
||||
|
||||
// Import initial dummy checkpoint record, otherwise the get_timeline() call
|
||||
// after branching fails below
|
||||
tline.put_page_image(RelishTag::Checkpoint, 0, Lsn(0x10), ZERO_PAGE.clone())?;
|
||||
tline.put_page_image(RelishTag::Checkpoint, 0, Lsn(0x10), ZERO_CHECKPOINT.clone())?;
|
||||
|
||||
// Create a relation on the timeline
|
||||
tline.put_page_image(TESTREL_A, 0, Lsn(0x20), TEST_IMG("foo blk 0 at 2"))?;
|
||||
@@ -471,11 +687,11 @@ mod tests {
|
||||
tline.put_page_image(TESTREL_B, 0, Lsn(0x20), TEST_IMG("foobar blk 0 at 2"))?;
|
||||
|
||||
tline.advance_last_record_lsn(Lsn(0x40));
|
||||
assert_current_logical_size(&tline, Lsn(0x40));
|
||||
|
||||
// Branch the history, modify relation differently on the new timeline
|
||||
let newtimelineid = ZTimelineId::from_str("AA223344556677881122334455667788").unwrap();
|
||||
repo.branch_timeline(timelineid, newtimelineid, Lsn(0x30))?;
|
||||
let newtline = repo.get_timeline(newtimelineid)?;
|
||||
repo.branch_timeline(TIMELINE_ID, NEW_TIMELINE_ID, Lsn(0x30))?;
|
||||
let newtline = repo.get_timeline(NEW_TIMELINE_ID)?;
|
||||
|
||||
newtline.put_page_image(TESTREL_A, 0, Lsn(0x40), TEST_IMG("bar blk 0 at 4"))?;
|
||||
newtline.advance_last_record_lsn(Lsn(0x40));
|
||||
@@ -498,16 +714,98 @@ mod tests {
|
||||
|
||||
assert_eq!(newtline.get_relish_size(TESTREL_B, Lsn(0x40))?.unwrap(), 1);
|
||||
|
||||
assert_current_logical_size(&tline, Lsn(0x40));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn corrupt_metadata() -> Result<()> {
|
||||
const TEST_NAME: &str = "corrupt_metadata";
|
||||
let harness = RepoHarness::create(TEST_NAME)?;
|
||||
let repo = harness.load();
|
||||
|
||||
repo.create_empty_timeline(TIMELINE_ID)?;
|
||||
drop(repo);
|
||||
|
||||
let metadata_path = harness.timeline_path(&TIMELINE_ID).join("metadata");
|
||||
|
||||
assert!(metadata_path.is_file());
|
||||
|
||||
let mut metadata_bytes = std::fs::read(&metadata_path)?;
|
||||
assert_eq!(metadata_bytes.len(), 512);
|
||||
metadata_bytes[512 - 4 - 2] ^= 1;
|
||||
std::fs::write(metadata_path, metadata_bytes)?;
|
||||
|
||||
let new_repo = harness.load();
|
||||
let err = new_repo.get_timeline(TIMELINE_ID).err().unwrap();
|
||||
assert!(err.to_string().contains("checksum"));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn future_layerfiles() -> Result<()> {
|
||||
const TEST_NAME: &str = "future_layerfiles";
|
||||
let harness = RepoHarness::create(TEST_NAME)?;
|
||||
let repo = harness.load();
|
||||
|
||||
repo.create_empty_timeline(TIMELINE_ID)?;
|
||||
drop(repo);
|
||||
|
||||
let timeline_path = harness.timeline_path(&TIMELINE_ID);
|
||||
|
||||
let make_empty_file = |filename: &str| -> std::io::Result<()> {
|
||||
let path = timeline_path.join(filename);
|
||||
|
||||
assert!(!path.exists());
|
||||
std::fs::write(&path, &[])?;
|
||||
|
||||
Ok(())
|
||||
};
|
||||
|
||||
let image_filename = format!("pg_control_0_{:016X}", 8000);
|
||||
let delta_filename = format!("pg_control_0_{:016X}_{:016X}", 8000, 8008);
|
||||
|
||||
make_empty_file(&image_filename)?;
|
||||
make_empty_file(&delta_filename)?;
|
||||
|
||||
let new_repo = harness.load();
|
||||
new_repo.get_timeline(TIMELINE_ID).unwrap();
|
||||
drop(new_repo);
|
||||
|
||||
let check_old = |filename: &str, num: u32| {
|
||||
let path = timeline_path.join(filename);
|
||||
assert!(!path.exists());
|
||||
|
||||
let backup_path = timeline_path.join(format!("{}.{}.old", filename, num));
|
||||
assert!(backup_path.exists());
|
||||
};
|
||||
|
||||
check_old(&image_filename, 0);
|
||||
check_old(&delta_filename, 0);
|
||||
|
||||
make_empty_file(&image_filename)?;
|
||||
make_empty_file(&delta_filename)?;
|
||||
|
||||
let new_repo = harness.load();
|
||||
new_repo.get_timeline(TIMELINE_ID).unwrap();
|
||||
drop(new_repo);
|
||||
|
||||
check_old(&image_filename, 0);
|
||||
check_old(&delta_filename, 0);
|
||||
check_old(&image_filename, 1);
|
||||
check_old(&delta_filename, 1);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Mock WAL redo manager that doesn't do much
|
||||
struct TestRedoManager {}
|
||||
struct TestRedoManager;
|
||||
|
||||
impl WalRedoManager for TestRedoManager {
|
||||
fn request_redo(
|
||||
&self,
|
||||
timeline: &dyn Timeline,
|
||||
rel: RelishTag,
|
||||
blknum: u32,
|
||||
lsn: Lsn,
|
||||
|
||||
@@ -9,11 +9,9 @@ use std::cmp::min;
|
||||
use std::fs;
|
||||
use std::fs::File;
|
||||
use std::io::Read;
|
||||
use std::io::Seek;
|
||||
use std::io::SeekFrom;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::path::Path;
|
||||
|
||||
use anyhow::Result;
|
||||
use anyhow::{bail, Result};
|
||||
use bytes::{Buf, Bytes};
|
||||
|
||||
use crate::relish::*;
|
||||
@@ -46,13 +44,7 @@ pub fn import_timeline_from_postgres_datadir(
|
||||
None => continue,
|
||||
|
||||
Some("pg_control") => {
|
||||
import_nonrel_file(timeline, lsn, RelishTag::ControlFile, &direntry.path())?;
|
||||
// Extract checkpoint record from pg_control and store is as separate object
|
||||
let pg_control_bytes =
|
||||
timeline.get_page_at_lsn_nowait(RelishTag::ControlFile, 0, lsn)?;
|
||||
let pg_control = ControlFileData::decode(&pg_control_bytes)?;
|
||||
let checkpoint_bytes = pg_control.checkPointCopy.encode();
|
||||
timeline.put_page_image(RelishTag::Checkpoint, 0, lsn, checkpoint_bytes)?;
|
||||
import_control_file(timeline, lsn, &direntry.path())?;
|
||||
}
|
||||
Some("pg_filenode.map") => import_nonrel_file(
|
||||
timeline,
|
||||
@@ -133,8 +125,7 @@ pub fn import_timeline_from_postgres_datadir(
|
||||
}
|
||||
// TODO: Scan pg_tblspc
|
||||
|
||||
timeline.advance_last_record_lsn(lsn.align());
|
||||
timeline.checkpoint()?;
|
||||
timeline.advance_last_record_lsn(lsn);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -182,8 +173,7 @@ fn import_relfile(
|
||||
break;
|
||||
}
|
||||
_ => {
|
||||
error!("error reading file: {:?} ({})", path, e);
|
||||
break;
|
||||
bail!("error reading file {}: {:#}", path.display(), e);
|
||||
}
|
||||
},
|
||||
};
|
||||
@@ -216,6 +206,35 @@ fn import_nonrel_file(
|
||||
Ok(())
|
||||
}
|
||||
|
||||
///
|
||||
/// Import pg_control file into the repository.
|
||||
///
|
||||
/// The control file is imported as is, but we also extract the checkpoint record
|
||||
/// from it and store it separated.
|
||||
fn import_control_file(timeline: &dyn Timeline, lsn: Lsn, path: &Path) -> Result<()> {
|
||||
let mut file = File::open(path)?;
|
||||
let mut buffer = Vec::new();
|
||||
// read the whole file
|
||||
file.read_to_end(&mut buffer)?;
|
||||
|
||||
info!("importing control file {}", path.display());
|
||||
|
||||
// Import it as ControlFile
|
||||
timeline.put_page_image(
|
||||
RelishTag::ControlFile,
|
||||
0,
|
||||
lsn,
|
||||
Bytes::copy_from_slice(&buffer[..]),
|
||||
)?;
|
||||
|
||||
// Extract the checkpoint record and import it separately.
|
||||
let pg_control = ControlFileData::decode(&buffer)?;
|
||||
let checkpoint_bytes = pg_control.checkPointCopy.encode();
|
||||
timeline.put_page_image(RelishTag::Checkpoint, 0, lsn, checkpoint_bytes)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
///
|
||||
/// Import an SLRU segment file
|
||||
///
|
||||
@@ -248,8 +267,7 @@ fn import_slru_file(timeline: &dyn Timeline, lsn: Lsn, slru: SlruKind, path: &Pa
|
||||
break;
|
||||
}
|
||||
_ => {
|
||||
error!("error reading file: {:?} ({})", path, e);
|
||||
break;
|
||||
bail!("error reading file {}: {:#}", path.display(), e);
|
||||
}
|
||||
},
|
||||
};
|
||||
@@ -261,86 +279,6 @@ fn import_slru_file(timeline: &dyn Timeline, lsn: Lsn, slru: SlruKind, path: &Pa
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Scan PostgreSQL WAL files in given directory
|
||||
/// and load all records >= 'startpoint' into the repository.
|
||||
pub fn import_timeline_wal(walpath: &Path, timeline: &dyn Timeline, startpoint: Lsn) -> Result<()> {
|
||||
let mut waldecoder = WalStreamDecoder::new(startpoint);
|
||||
|
||||
let mut segno = startpoint.segment_number(pg_constants::WAL_SEGMENT_SIZE);
|
||||
let mut offset = startpoint.segment_offset(pg_constants::WAL_SEGMENT_SIZE);
|
||||
let mut last_lsn = startpoint;
|
||||
|
||||
let checkpoint_bytes = timeline.get_page_at_lsn_nowait(RelishTag::Checkpoint, 0, startpoint)?;
|
||||
let mut checkpoint = CheckPoint::decode(&checkpoint_bytes)?;
|
||||
|
||||
loop {
|
||||
// FIXME: assume postgresql tli 1 for now
|
||||
let filename = XLogFileName(1, segno, pg_constants::WAL_SEGMENT_SIZE);
|
||||
let mut buf = Vec::new();
|
||||
|
||||
//Read local file
|
||||
let mut path = walpath.join(&filename);
|
||||
|
||||
// It could be as .partial
|
||||
if !PathBuf::from(&path).exists() {
|
||||
path = walpath.join(filename + ".partial");
|
||||
}
|
||||
|
||||
// Slurp the WAL file
|
||||
let open_result = File::open(&path);
|
||||
if let Err(e) = &open_result {
|
||||
if e.kind() == std::io::ErrorKind::NotFound {
|
||||
break;
|
||||
}
|
||||
}
|
||||
let mut file = open_result?;
|
||||
|
||||
if offset > 0 {
|
||||
file.seek(SeekFrom::Start(offset as u64))?;
|
||||
}
|
||||
|
||||
let nread = file.read_to_end(&mut buf)?;
|
||||
if nread != pg_constants::WAL_SEGMENT_SIZE - offset as usize {
|
||||
// Maybe allow this for .partial files?
|
||||
error!("read only {} bytes from WAL file", nread);
|
||||
}
|
||||
|
||||
waldecoder.feed_bytes(&buf);
|
||||
|
||||
let mut nrecords = 0;
|
||||
loop {
|
||||
let rec = waldecoder.poll_decode();
|
||||
if rec.is_err() {
|
||||
// Assume that an error means we've reached the end of
|
||||
// a partial WAL record. So that's ok.
|
||||
trace!("WAL decoder error {:?}", rec);
|
||||
break;
|
||||
}
|
||||
if let Some((lsn, recdata)) = rec.unwrap() {
|
||||
let decoded = decode_wal_record(recdata.clone());
|
||||
save_decoded_record(&mut checkpoint, timeline, &decoded, recdata, lsn)?;
|
||||
last_lsn = lsn;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
nrecords += 1;
|
||||
}
|
||||
|
||||
info!("imported {} records up to {}", nrecords, last_lsn);
|
||||
|
||||
segno += 1;
|
||||
offset = 0;
|
||||
}
|
||||
|
||||
info!("reached end of WAL at {}", last_lsn);
|
||||
let checkpoint_bytes = checkpoint.encode();
|
||||
timeline.put_page_image(RelishTag::Checkpoint, 0, last_lsn, checkpoint_bytes)?;
|
||||
|
||||
timeline.advance_last_record_lsn(last_lsn.align());
|
||||
timeline.checkpoint()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
///
|
||||
/// Helper function to parse a WAL record and call the Timeline's PUT functions for all the
|
||||
/// relations/pages that the record affects.
|
||||
@@ -392,10 +330,14 @@ pub fn save_decoded_record(
|
||||
{
|
||||
let dropdb = XlDropDatabase::decode(&mut buf);
|
||||
|
||||
// To drop the database, we need to drop all the relations in it. Like in
|
||||
// save_xlog_dbase_create(), use the previous record's LSN in the list_rels() call
|
||||
let req_lsn = min(timeline.get_last_record_lsn(), lsn);
|
||||
|
||||
for tablespace_id in dropdb.tablespace_ids {
|
||||
let rels = timeline.list_rels(tablespace_id, dropdb.db_id, lsn)?;
|
||||
let rels = timeline.list_rels(tablespace_id, dropdb.db_id, req_lsn)?;
|
||||
for rel in rels {
|
||||
timeline.drop_relish(RelishTag::Relation(rel), lsn)?;
|
||||
timeline.drop_relish(rel, lsn)?;
|
||||
}
|
||||
trace!(
|
||||
"Drop FileNodeMap {}, {} at lsn {}",
|
||||
@@ -532,9 +474,6 @@ pub fn save_decoded_record(
|
||||
}
|
||||
}
|
||||
}
|
||||
// Now that this record has been handled, let the repository know that
|
||||
// it is up-to-date to this LSN
|
||||
timeline.advance_last_record_lsn(lsn.align());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -558,37 +497,36 @@ fn save_xlog_dbase_create(timeline: &dyn Timeline, lsn: Lsn, rec: &XlCreateDatab
|
||||
|
||||
let mut num_rels_copied = 0;
|
||||
let mut num_blocks_copied = 0;
|
||||
for src_rel in rels {
|
||||
assert_eq!(src_rel.spcnode, src_tablespace_id);
|
||||
assert_eq!(src_rel.dbnode, src_db_id);
|
||||
for rel in rels {
|
||||
if let RelishTag::Relation(src_rel) = rel {
|
||||
assert_eq!(src_rel.spcnode, src_tablespace_id);
|
||||
assert_eq!(src_rel.dbnode, src_db_id);
|
||||
|
||||
let nblocks = timeline
|
||||
.get_relish_size(RelishTag::Relation(src_rel), req_lsn)?
|
||||
.unwrap_or(0);
|
||||
let dst_rel = RelTag {
|
||||
spcnode: tablespace_id,
|
||||
dbnode: db_id,
|
||||
relnode: src_rel.relnode,
|
||||
forknum: src_rel.forknum,
|
||||
};
|
||||
let nblocks = timeline.get_relish_size(rel, req_lsn)?.unwrap_or(0);
|
||||
let dst_rel = RelTag {
|
||||
spcnode: tablespace_id,
|
||||
dbnode: db_id,
|
||||
relnode: src_rel.relnode,
|
||||
forknum: src_rel.forknum,
|
||||
};
|
||||
|
||||
// Copy content
|
||||
for blknum in 0..nblocks {
|
||||
let content =
|
||||
timeline.get_page_at_lsn_nowait(RelishTag::Relation(src_rel), blknum, req_lsn)?;
|
||||
// Copy content
|
||||
for blknum in 0..nblocks {
|
||||
let content = timeline.get_page_at_lsn(rel, blknum, req_lsn)?;
|
||||
|
||||
debug!("copying block {} from {} to {}", blknum, src_rel, dst_rel);
|
||||
debug!("copying block {} from {} to {}", blknum, src_rel, dst_rel);
|
||||
|
||||
timeline.put_page_image(RelishTag::Relation(dst_rel), blknum, lsn, content)?;
|
||||
num_blocks_copied += 1;
|
||||
timeline.put_page_image(RelishTag::Relation(dst_rel), blknum, lsn, content)?;
|
||||
num_blocks_copied += 1;
|
||||
}
|
||||
|
||||
if nblocks == 0 {
|
||||
// make sure we have some trace of the relation, even if it's empty
|
||||
timeline.put_truncation(RelishTag::Relation(dst_rel), lsn, 0)?;
|
||||
}
|
||||
|
||||
num_rels_copied += 1;
|
||||
}
|
||||
|
||||
if nblocks == 0 {
|
||||
// make sure we have some trace of the relation, even if it's empty
|
||||
timeline.put_truncation(RelishTag::Relation(dst_rel), lsn, 0)?;
|
||||
}
|
||||
|
||||
num_rels_copied += 1;
|
||||
}
|
||||
|
||||
// Copy relfilemap
|
||||
@@ -597,7 +535,7 @@ fn save_xlog_dbase_create(timeline: &dyn Timeline, lsn: Lsn, rec: &XlCreateDatab
|
||||
for tag in timeline.list_nonrels(req_lsn)? {
|
||||
if let RelishTag::FileNodeMap { spcnode, dbnode } = tag {
|
||||
if spcnode == src_tablespace_id && dbnode == src_db_id {
|
||||
let img = timeline.get_page_at_lsn_nowait(tag, 0, req_lsn)?;
|
||||
let img = timeline.get_page_at_lsn(tag, 0, req_lsn)?;
|
||||
let new_tag = RelishTag::FileNodeMap {
|
||||
spcnode: tablespace_id,
|
||||
dbnode: db_id,
|
||||
|
||||
@@ -3,51 +3,90 @@
|
||||
|
||||
use crate::branches;
|
||||
use crate::layered_repository::LayeredRepository;
|
||||
use crate::repository::Repository;
|
||||
use crate::repository::{Repository, Timeline};
|
||||
use crate::walredo::PostgresRedoManager;
|
||||
use crate::PageServerConf;
|
||||
use anyhow::{anyhow, bail, Result};
|
||||
use anyhow::{anyhow, bail, Context, Result};
|
||||
use lazy_static::lazy_static;
|
||||
use log::info;
|
||||
use std::collections::hash_map::Entry;
|
||||
use std::collections::HashMap;
|
||||
use std::fs;
|
||||
use std::str::FromStr;
|
||||
use std::sync::{Arc, Mutex};
|
||||
use zenith_utils::zid::ZTenantId;
|
||||
use std::sync::{Arc, Mutex, MutexGuard};
|
||||
use zenith_utils::zid::{ZTenantId, ZTimelineId};
|
||||
|
||||
lazy_static! {
|
||||
pub static ref REPOSITORY: Mutex<HashMap<ZTenantId, Arc<dyn Repository>>> =
|
||||
static ref REPOSITORY: Mutex<HashMap<ZTenantId, Arc<dyn Repository>>> =
|
||||
Mutex::new(HashMap::new());
|
||||
}
|
||||
|
||||
pub fn init(conf: &'static PageServerConf) {
|
||||
let mut m = REPOSITORY.lock().unwrap();
|
||||
fn access_repository() -> MutexGuard<'static, HashMap<ZTenantId, Arc<dyn Repository>>> {
|
||||
REPOSITORY.lock().unwrap()
|
||||
}
|
||||
|
||||
pub fn init(conf: &'static PageServerConf) {
|
||||
let mut m = access_repository();
|
||||
for dir_entry in fs::read_dir(conf.tenants_path()).unwrap() {
|
||||
let tenantid =
|
||||
ZTenantId::from_str(dir_entry.unwrap().file_name().to_str().unwrap()).unwrap();
|
||||
|
||||
// Set up a WAL redo manager, for applying WAL records.
|
||||
let walredo_mgr = PostgresRedoManager::new(conf, tenantid);
|
||||
|
||||
// Set up an object repository, for actual data storage.
|
||||
let repo = Arc::new(LayeredRepository::new(
|
||||
conf,
|
||||
Arc::new(walredo_mgr),
|
||||
tenantid,
|
||||
));
|
||||
LayeredRepository::launch_checkpointer_thread(conf, repo.clone());
|
||||
|
||||
let repo = init_repo(conf, tenantid);
|
||||
info!("initialized storage for tenant: {}", &tenantid);
|
||||
m.insert(tenantid, repo);
|
||||
}
|
||||
}
|
||||
|
||||
fn init_repo(conf: &'static PageServerConf, tenant_id: ZTenantId) -> Arc<LayeredRepository> {
|
||||
// Set up a WAL redo manager, for applying WAL records.
|
||||
let walredo_mgr = PostgresRedoManager::new(conf, tenant_id);
|
||||
|
||||
// Set up an object repository, for actual data storage.
|
||||
let repo = Arc::new(LayeredRepository::new(
|
||||
conf,
|
||||
Arc::new(walredo_mgr),
|
||||
tenant_id,
|
||||
true,
|
||||
));
|
||||
LayeredRepository::launch_checkpointer_thread(conf, repo.clone());
|
||||
LayeredRepository::launch_gc_thread(conf, repo.clone());
|
||||
repo
|
||||
}
|
||||
|
||||
// TODO kb Currently unused function, will later be used when the relish storage downloads a new layer.
|
||||
// Relevant PR: https://github.com/zenithdb/zenith/pull/686
|
||||
pub fn register_relish_download(
|
||||
conf: &'static PageServerConf,
|
||||
tenant_id: ZTenantId,
|
||||
timeline_id: ZTimelineId,
|
||||
) {
|
||||
log::info!(
|
||||
"Registering new download, tenant id {}, timeline id: {}",
|
||||
tenant_id,
|
||||
timeline_id
|
||||
);
|
||||
match access_repository().entry(tenant_id) {
|
||||
Entry::Occupied(o) => init_timeline(o.get().as_ref(), timeline_id),
|
||||
Entry::Vacant(v) => {
|
||||
log::info!("New repo initialized");
|
||||
let new_repo = init_repo(conf, tenant_id);
|
||||
init_timeline(new_repo.as_ref(), timeline_id);
|
||||
v.insert(new_repo);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn init_timeline(repo: &dyn Repository, timeline_id: ZTimelineId) {
|
||||
match repo.get_timeline(timeline_id) {
|
||||
Ok(_timeline) => log::info!("Successfully initialized timeline {}", timeline_id),
|
||||
Err(e) => log::error!("Failed to init timeline {}, reason: {:#}", timeline_id, e),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn create_repository_for_tenant(
|
||||
conf: &'static PageServerConf,
|
||||
tenantid: ZTenantId,
|
||||
) -> Result<()> {
|
||||
let mut m = REPOSITORY.lock().unwrap();
|
||||
let mut m = access_repository();
|
||||
|
||||
// First check that the tenant doesn't exist already
|
||||
if m.get(&tenantid).is_some() {
|
||||
@@ -61,14 +100,18 @@ pub fn create_repository_for_tenant(
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn insert_repository_for_tenant(tenantid: ZTenantId, repo: Arc<dyn Repository>) {
|
||||
let o = &mut REPOSITORY.lock().unwrap();
|
||||
o.insert(tenantid, repo);
|
||||
}
|
||||
|
||||
pub fn get_repository_for_tenant(tenantid: &ZTenantId) -> Result<Arc<dyn Repository>> {
|
||||
let o = &REPOSITORY.lock().unwrap();
|
||||
o.get(tenantid)
|
||||
.map(|repo| Arc::clone(repo))
|
||||
pub fn get_repository_for_tenant(tenantid: ZTenantId) -> Result<Arc<dyn Repository>> {
|
||||
access_repository()
|
||||
.get(&tenantid)
|
||||
.map(Arc::clone)
|
||||
.ok_or_else(|| anyhow!("repository not found for tenant name {}", tenantid))
|
||||
}
|
||||
|
||||
pub fn get_timeline_for_tenant(
|
||||
tenantid: ZTenantId,
|
||||
timelineid: ZTimelineId,
|
||||
) -> Result<Arc<dyn Timeline>> {
|
||||
get_repository_for_tenant(tenantid)?
|
||||
.get_timeline(timelineid)
|
||||
.with_context(|| format!("cannot fetch timeline {}", timelineid))
|
||||
}
|
||||
|
||||
@@ -54,6 +54,11 @@ impl WalStreamDecoder {
|
||||
}
|
||||
}
|
||||
|
||||
// The latest LSN position fed to the decoder.
|
||||
pub fn available(&self) -> Lsn {
|
||||
self.lsn + self.inputbuf.remaining() as u64
|
||||
}
|
||||
|
||||
pub fn feed_bytes(&mut self, buf: &[u8]) {
|
||||
self.inputbuf.extend_from_slice(buf);
|
||||
}
|
||||
@@ -159,17 +164,10 @@ impl WalStreamDecoder {
|
||||
let recordbuf = recordbuf.freeze();
|
||||
let mut buf = recordbuf.clone();
|
||||
|
||||
let xlogrec = XLogRecord::from_bytes(&mut buf);
|
||||
|
||||
// XLOG_SWITCH records are special. If we see one, we need to skip
|
||||
// to the next WAL segment.
|
||||
let xlogrec = XLogRecord::from_bytes(&mut buf);
|
||||
let mut crc = crc32c_append(0, &recordbuf[XLOG_RECORD_CRC_OFFS + 4..]);
|
||||
crc = crc32c_append(crc, &recordbuf[0..XLOG_RECORD_CRC_OFFS]);
|
||||
if crc != xlogrec.xl_crc {
|
||||
return Err(WalDecodeError {
|
||||
msg: "WAL record crc mismatch".into(),
|
||||
lsn: self.lsn,
|
||||
});
|
||||
}
|
||||
if xlogrec.is_xlog_switch_record() {
|
||||
trace!("saw xlog switch record at {}", self.lsn);
|
||||
self.padlen =
|
||||
@@ -179,6 +177,15 @@ impl WalStreamDecoder {
|
||||
self.padlen = self.lsn.calc_padding(8u32) as u32;
|
||||
}
|
||||
|
||||
let mut crc = crc32c_append(0, &recordbuf[XLOG_RECORD_CRC_OFFS + 4..]);
|
||||
crc = crc32c_append(crc, &recordbuf[0..XLOG_RECORD_CRC_OFFS]);
|
||||
if crc != xlogrec.xl_crc {
|
||||
return Err(WalDecodeError {
|
||||
msg: "WAL record crc mismatch".into(),
|
||||
lsn: self.lsn,
|
||||
});
|
||||
}
|
||||
|
||||
// Always align resulting LSN on 0x8 boundary -- that is important for getPage()
|
||||
// and WalReceiver integration. Since this code is used both for WalReceiver and
|
||||
// initial WAL import let's force alignment right here.
|
||||
|
||||
@@ -10,25 +10,22 @@ use crate::restore_local_repo;
|
||||
use crate::tenant_mgr;
|
||||
use crate::waldecoder::*;
|
||||
use crate::PageServerConf;
|
||||
use anyhow::{Error, Result};
|
||||
use anyhow::{bail, Error, Result};
|
||||
use lazy_static::lazy_static;
|
||||
use log::*;
|
||||
use postgres::fallible_iterator::FallibleIterator;
|
||||
use postgres::replication::ReplicationIter;
|
||||
use postgres::{Client, NoTls, SimpleQueryMessage, SimpleQueryRow};
|
||||
use postgres_ffi::xlog_utils::*;
|
||||
use postgres_ffi::*;
|
||||
use postgres_protocol::message::backend::ReplicationMessage;
|
||||
use postgres_types::PgLsn;
|
||||
use std::cmp::{max, min};
|
||||
use std::cell::Cell;
|
||||
use std::collections::HashMap;
|
||||
use std::fs;
|
||||
use std::fs::{File, OpenOptions};
|
||||
use std::io::{Seek, SeekFrom, Write};
|
||||
use std::str::FromStr;
|
||||
use std::sync::Mutex;
|
||||
use std::thread;
|
||||
use std::thread::sleep;
|
||||
use std::thread_local;
|
||||
use std::time::{Duration, SystemTime};
|
||||
use zenith_utils::lsn::Lsn;
|
||||
use zenith_utils::zid::ZTenantId;
|
||||
@@ -46,6 +43,13 @@ lazy_static! {
|
||||
Mutex::new(HashMap::new());
|
||||
}
|
||||
|
||||
thread_local! {
|
||||
// Boolean that is true only for WAL receiver threads
|
||||
//
|
||||
// This is used in `wait_lsn` to guard against usage that might lead to a deadlock.
|
||||
pub(crate) static IS_WAL_RECEIVER: Cell<bool> = Cell::new(false);
|
||||
}
|
||||
|
||||
// Launch a new WAL receiver, or tell one that's running about change in connection string
|
||||
pub fn launch_wal_receiver(
|
||||
conf: &'static PageServerConf,
|
||||
@@ -69,7 +73,8 @@ pub fn launch_wal_receiver(
|
||||
let _walreceiver_thread = thread::Builder::new()
|
||||
.name("WAL receiver thread".into())
|
||||
.spawn(move || {
|
||||
thread_main(conf, timelineid, &tenantid);
|
||||
IS_WAL_RECEIVER.with(|c| c.set(true));
|
||||
thread_main(conf, timelineid, tenantid);
|
||||
})
|
||||
.unwrap();
|
||||
}
|
||||
@@ -90,7 +95,7 @@ fn get_wal_producer_connstr(timelineid: ZTimelineId) -> String {
|
||||
//
|
||||
// This is the entry point for the WAL receiver thread.
|
||||
//
|
||||
fn thread_main(conf: &'static PageServerConf, timelineid: ZTimelineId, tenantid: &ZTenantId) {
|
||||
fn thread_main(conf: &'static PageServerConf, timelineid: ZTimelineId, tenantid: ZTenantId) {
|
||||
info!(
|
||||
"WAL receiver thread started for timeline : '{}'",
|
||||
timelineid
|
||||
@@ -117,10 +122,10 @@ fn thread_main(conf: &'static PageServerConf, timelineid: ZTimelineId, tenantid:
|
||||
}
|
||||
|
||||
fn walreceiver_main(
|
||||
conf: &PageServerConf,
|
||||
_conf: &PageServerConf,
|
||||
timelineid: ZTimelineId,
|
||||
wal_producer_connstr: &str,
|
||||
tenantid: &ZTenantId,
|
||||
tenantid: ZTenantId,
|
||||
) -> Result<(), Error> {
|
||||
// Connect to the database in replication mode.
|
||||
info!("connecting to {:?}", wal_producer_connstr);
|
||||
@@ -146,8 +151,7 @@ fn walreceiver_main(
|
||||
let end_of_wal = Lsn::from(u64::from(identify.xlogpos));
|
||||
let mut caught_up = false;
|
||||
|
||||
let repository = tenant_mgr::get_repository_for_tenant(tenantid)?;
|
||||
let timeline = repository.get_timeline(timelineid).unwrap();
|
||||
let timeline = tenant_mgr::get_timeline_for_tenant(tenantid, timelineid)?;
|
||||
|
||||
//
|
||||
// Start streaming the WAL, from where we left off previously.
|
||||
@@ -158,7 +162,7 @@ fn walreceiver_main(
|
||||
let mut startpoint = last_rec_lsn;
|
||||
|
||||
if startpoint == Lsn(0) {
|
||||
error!("No previous WAL position");
|
||||
bail!("No previous WAL position");
|
||||
}
|
||||
|
||||
// There might be some padding after the last full record, skip it.
|
||||
@@ -176,7 +180,7 @@ fn walreceiver_main(
|
||||
|
||||
let mut waldecoder = WalStreamDecoder::new(startpoint);
|
||||
|
||||
let checkpoint_bytes = timeline.get_page_at_lsn_nowait(RelishTag::Checkpoint, 0, startpoint)?;
|
||||
let checkpoint_bytes = timeline.get_page_at_lsn(RelishTag::Checkpoint, 0, startpoint)?;
|
||||
let mut checkpoint = CheckPoint::decode(&checkpoint_bytes)?;
|
||||
trace!("CheckPoint.nextXid = {}", checkpoint.nextXid.value);
|
||||
|
||||
@@ -188,16 +192,6 @@ fn walreceiver_main(
|
||||
let data = xlog_data.data();
|
||||
let startlsn = Lsn::from(xlog_data.wal_start());
|
||||
let endlsn = startlsn + data.len() as u64;
|
||||
let prev_last_rec_lsn = last_rec_lsn;
|
||||
|
||||
write_wal_file(
|
||||
conf,
|
||||
startlsn,
|
||||
&timelineid,
|
||||
pg_constants::WAL_SEGMENT_SIZE,
|
||||
data,
|
||||
tenantid,
|
||||
)?;
|
||||
|
||||
trace!("received XLogData between {} and {}", startlsn, endlsn);
|
||||
|
||||
@@ -220,7 +214,6 @@ fn walreceiver_main(
|
||||
recdata,
|
||||
lsn,
|
||||
)?;
|
||||
last_rec_lsn = lsn;
|
||||
|
||||
let new_checkpoint_bytes = checkpoint.encode();
|
||||
// Check if checkpoint data was updated by save_decoded_record
|
||||
@@ -232,34 +225,11 @@ fn walreceiver_main(
|
||||
new_checkpoint_bytes,
|
||||
)?;
|
||||
}
|
||||
}
|
||||
|
||||
// Somewhat arbitrarily, if we have at least 10 complete wal segments (16 MB each),
|
||||
// "checkpoint" the repository to flush all the changes from WAL we've processed
|
||||
// so far to disk. After this, we don't need the original WAL anymore, and it
|
||||
// can be removed. This is probably too aggressive for production, but it's useful
|
||||
// to expose bugs now.
|
||||
//
|
||||
// TODO: We don't actually dare to remove the WAL. It's useful for debugging,
|
||||
// and we might it for logical decoding other things in the future. Although
|
||||
// we should also be able to fetch it back from the WAL safekeepers or S3 if
|
||||
// needed.
|
||||
if prev_last_rec_lsn.segment_number(pg_constants::WAL_SEGMENT_SIZE)
|
||||
!= last_rec_lsn.segment_number(pg_constants::WAL_SEGMENT_SIZE)
|
||||
{
|
||||
info!("switched segment {} to {}", prev_last_rec_lsn, last_rec_lsn);
|
||||
let (oldest_segno, newest_segno) = find_wal_file_range(
|
||||
conf,
|
||||
&timelineid,
|
||||
pg_constants::WAL_SEGMENT_SIZE,
|
||||
last_rec_lsn,
|
||||
tenantid,
|
||||
)?;
|
||||
|
||||
if newest_segno - oldest_segno >= 10 {
|
||||
// TODO: This is where we could remove WAL older than last_rec_lsn.
|
||||
//remove_wal_files(timelineid, pg_constants::WAL_SEGMENT_SIZE, last_rec_lsn)?;
|
||||
}
|
||||
// Now that this record has been fully handled, including updating the
|
||||
// checkpoint data, let the repository know that it is up-to-date to this LSN
|
||||
timeline.advance_last_record_lsn(lsn);
|
||||
last_rec_lsn = lsn;
|
||||
}
|
||||
|
||||
if !caught_up && endlsn >= end_of_wal {
|
||||
@@ -283,7 +253,7 @@ fn walreceiver_main(
|
||||
);
|
||||
|
||||
if reply_requested {
|
||||
Some(timeline.get_last_record_lsn())
|
||||
Some(last_rec_lsn)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
@@ -307,53 +277,15 @@ fn walreceiver_main(
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn find_wal_file_range(
|
||||
conf: &PageServerConf,
|
||||
timeline: &ZTimelineId,
|
||||
wal_seg_size: usize,
|
||||
written_upto: Lsn,
|
||||
tenant: &ZTenantId,
|
||||
) -> Result<(u64, u64)> {
|
||||
let written_upto_segno = written_upto.segment_number(wal_seg_size);
|
||||
|
||||
let mut oldest_segno = written_upto_segno;
|
||||
let mut newest_segno = written_upto_segno;
|
||||
// Scan the wal directory, and count how many WAL filed we could remove
|
||||
let wal_dir = conf.wal_dir_path(timeline, tenant);
|
||||
for entry in fs::read_dir(wal_dir)? {
|
||||
let entry = entry?;
|
||||
let path = entry.path();
|
||||
|
||||
if path.is_dir() {
|
||||
continue;
|
||||
}
|
||||
|
||||
let filename = path.file_name().unwrap().to_str().unwrap();
|
||||
|
||||
if IsXLogFileName(filename) {
|
||||
let (segno, _tli) = XLogFromFileName(filename, wal_seg_size);
|
||||
|
||||
if segno > written_upto_segno {
|
||||
// that's strange.
|
||||
warn!("there is a WAL file from future at {}", path.display());
|
||||
continue;
|
||||
}
|
||||
|
||||
oldest_segno = min(oldest_segno, segno);
|
||||
newest_segno = max(newest_segno, segno);
|
||||
}
|
||||
}
|
||||
// FIXME: would be good to assert that there are no gaps in the WAL files
|
||||
|
||||
Ok((oldest_segno, newest_segno))
|
||||
}
|
||||
|
||||
/// Data returned from the postgres `IDENTIFY_SYSTEM` command
|
||||
///
|
||||
/// See the [postgres docs] for more details.
|
||||
///
|
||||
/// [postgres docs]: https://www.postgresql.org/docs/current/protocol-replication.html
|
||||
#[derive(Debug)]
|
||||
// As of nightly 2021-09-11, fields that are only read by the type's `Debug` impl still count as
|
||||
// unused. Relevant issue: https://github.com/rust-lang/rust/issues/88900
|
||||
#[allow(dead_code)]
|
||||
pub struct IdentifySystem {
|
||||
systemid: u64,
|
||||
timeline: u32,
|
||||
@@ -394,98 +326,3 @@ pub fn identify_system(client: &mut Client) -> Result<IdentifySystem, Error> {
|
||||
Err(IdentifyError.into())
|
||||
}
|
||||
}
|
||||
|
||||
fn write_wal_file(
|
||||
conf: &PageServerConf,
|
||||
startpos: Lsn,
|
||||
timelineid: &ZTimelineId,
|
||||
wal_seg_size: usize,
|
||||
buf: &[u8],
|
||||
tenantid: &ZTenantId,
|
||||
) -> anyhow::Result<()> {
|
||||
let mut bytes_left: usize = buf.len();
|
||||
let mut bytes_written: usize = 0;
|
||||
let mut partial;
|
||||
let mut start_pos = startpos;
|
||||
const ZERO_BLOCK: &[u8] = &[0u8; XLOG_BLCKSZ];
|
||||
|
||||
let wal_dir = conf.wal_dir_path(timelineid, tenantid);
|
||||
|
||||
/* Extract WAL location for this block */
|
||||
let mut xlogoff = start_pos.segment_offset(wal_seg_size);
|
||||
|
||||
while bytes_left != 0 {
|
||||
let bytes_to_write;
|
||||
|
||||
/*
|
||||
* If crossing a WAL boundary, only write up until we reach wal
|
||||
* segment size.
|
||||
*/
|
||||
if xlogoff + bytes_left > wal_seg_size {
|
||||
bytes_to_write = wal_seg_size - xlogoff;
|
||||
} else {
|
||||
bytes_to_write = bytes_left;
|
||||
}
|
||||
|
||||
/* Open file */
|
||||
let segno = start_pos.segment_number(wal_seg_size);
|
||||
let wal_file_name = XLogFileName(
|
||||
1, // FIXME: always use Postgres timeline 1
|
||||
segno,
|
||||
wal_seg_size,
|
||||
);
|
||||
let wal_file_path = wal_dir.join(wal_file_name.clone());
|
||||
let wal_file_partial_path = wal_dir.join(wal_file_name.clone() + ".partial");
|
||||
|
||||
{
|
||||
let mut wal_file: File;
|
||||
/* Try to open already completed segment */
|
||||
if let Ok(file) = OpenOptions::new().write(true).open(&wal_file_path) {
|
||||
wal_file = file;
|
||||
partial = false;
|
||||
} else if let Ok(file) = OpenOptions::new().write(true).open(&wal_file_partial_path) {
|
||||
/* Try to open existed partial file */
|
||||
wal_file = file;
|
||||
partial = true;
|
||||
} else {
|
||||
/* Create and fill new partial file */
|
||||
partial = true;
|
||||
match OpenOptions::new()
|
||||
.create(true)
|
||||
.write(true)
|
||||
.open(&wal_file_partial_path)
|
||||
{
|
||||
Ok(mut file) => {
|
||||
for _ in 0..(wal_seg_size / XLOG_BLCKSZ) {
|
||||
file.write_all(ZERO_BLOCK)?;
|
||||
}
|
||||
wal_file = file;
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Failed to open log file {:?}: {}", &wal_file_path, e);
|
||||
return Err(e.into());
|
||||
}
|
||||
}
|
||||
}
|
||||
wal_file.seek(SeekFrom::Start(xlogoff as u64))?;
|
||||
wal_file.write_all(&buf[bytes_written..(bytes_written + bytes_to_write)])?;
|
||||
|
||||
// FIXME: Flush the file
|
||||
//wal_file.sync_all()?;
|
||||
}
|
||||
/* Write was successful, advance our position */
|
||||
bytes_written += bytes_to_write;
|
||||
bytes_left -= bytes_to_write;
|
||||
start_pos += bytes_to_write as u64;
|
||||
xlogoff += bytes_to_write;
|
||||
|
||||
/* Did we reach the end of a WAL segment? */
|
||||
if start_pos.segment_offset(wal_seg_size) == 0 {
|
||||
xlogoff = 0;
|
||||
if partial {
|
||||
fs::rename(&wal_file_partial_path, &wal_file_path)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -22,8 +22,7 @@ use byteorder::{ByteOrder, LittleEndian};
|
||||
use bytes::{Buf, BufMut, Bytes, BytesMut};
|
||||
use lazy_static::lazy_static;
|
||||
use log::*;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::cell::RefCell;
|
||||
use serde::Serialize;
|
||||
use std::fs;
|
||||
use std::fs::OpenOptions;
|
||||
use std::io::prelude::*;
|
||||
@@ -43,7 +42,7 @@ use zenith_utils::lsn::Lsn;
|
||||
use zenith_utils::zid::ZTenantId;
|
||||
|
||||
use crate::relish::*;
|
||||
use crate::repository::{Timeline, WALRecord};
|
||||
use crate::repository::WALRecord;
|
||||
use crate::waldecoder::XlMultiXactCreate;
|
||||
use crate::waldecoder::XlXactParsedRecord;
|
||||
use crate::PageServerConf;
|
||||
@@ -60,7 +59,7 @@ use postgres_ffi::XLogRecord;
|
||||
/// In Postgres `BufferTag` structure is used for exactly the same purpose.
|
||||
/// [See more related comments here](https://github.com/postgres/postgres/blob/99c5852e20a0987eca1c38ba0c09329d4076b6a0/src/include/storage/buf_internals.h#L91).
|
||||
///
|
||||
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone, Copy, Serialize, Deserialize)]
|
||||
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone, Copy, Serialize)]
|
||||
pub struct BufferTag {
|
||||
pub rel: RelTag,
|
||||
pub blknum: u32,
|
||||
@@ -79,7 +78,6 @@ pub trait WalRedoManager: Send + Sync {
|
||||
/// the reords.
|
||||
fn request_redo(
|
||||
&self,
|
||||
timeline: &dyn Timeline,
|
||||
rel: RelishTag,
|
||||
blknum: u32,
|
||||
lsn: Lsn,
|
||||
@@ -97,7 +95,6 @@ pub struct DummyRedoManager {}
|
||||
impl crate::walredo::WalRedoManager for DummyRedoManager {
|
||||
fn request_redo(
|
||||
&self,
|
||||
_timeline: &dyn Timeline,
|
||||
_rel: RelishTag,
|
||||
_blknum: u32,
|
||||
_lsn: Lsn,
|
||||
@@ -178,7 +175,6 @@ impl WalRedoManager for PostgresRedoManager {
|
||||
///
|
||||
fn request_redo(
|
||||
&self,
|
||||
timeline: &dyn Timeline,
|
||||
rel: RelishTag,
|
||||
blknum: u32,
|
||||
lsn: Lsn,
|
||||
@@ -209,7 +205,7 @@ impl WalRedoManager for PostgresRedoManager {
|
||||
.block_on(PostgresRedoProcess::launch(self.conf, &self.tenantid))?;
|
||||
*process_guard = Some(p);
|
||||
}
|
||||
let process = (*process_guard).as_ref().unwrap();
|
||||
let process = process_guard.as_mut().unwrap();
|
||||
|
||||
self.runtime
|
||||
.block_on(self.handle_apply_request(process, &request))
|
||||
@@ -219,13 +215,6 @@ impl WalRedoManager for PostgresRedoManager {
|
||||
WAL_REDO_WAIT_TIME.observe(lock_time.duration_since(start_time).as_secs_f64());
|
||||
WAL_REDO_TIME.observe(end_time.duration_since(lock_time).as_secs_f64());
|
||||
|
||||
if let Ok(page) = result {
|
||||
let mut buf = BytesMut::new();
|
||||
buf.extend_from_slice(&page);
|
||||
self.set_hint_bits(timeline, &mut buf, lsn, &request.records);
|
||||
return Ok(buf.freeze());
|
||||
}
|
||||
|
||||
result
|
||||
}
|
||||
}
|
||||
@@ -252,123 +241,12 @@ impl PostgresRedoManager {
|
||||
}
|
||||
}
|
||||
|
||||
fn xid_status(&self, timeline: &dyn Timeline, xid: u32, lsn: Lsn) -> u8 {
|
||||
let pageno = xid / pg_constants::CLOG_XACTS_PER_PAGE;
|
||||
let segno = pageno / pg_constants::SLRU_PAGES_PER_SEGMENT;
|
||||
let rpageno = pageno % pg_constants::SLRU_PAGES_PER_SEGMENT;
|
||||
if let Ok(clog_page) = timeline.get_page_at_lsn_nowait(
|
||||
RelishTag::Slru {
|
||||
slru: SlruKind::Clog,
|
||||
segno,
|
||||
},
|
||||
rpageno,
|
||||
lsn,
|
||||
) {
|
||||
postgres_ffi::nonrelfile_utils::transaction_id_get_status(xid, &clog_page[..])
|
||||
} else {
|
||||
pg_constants::TRANSACTION_STATUS_IN_PROGRESS
|
||||
}
|
||||
}
|
||||
|
||||
fn set_hint_bits(
|
||||
&self,
|
||||
timeline: &dyn Timeline,
|
||||
page: &mut BytesMut,
|
||||
lsn: Lsn,
|
||||
records: &Vec<WALRecord>,
|
||||
) {
|
||||
let mut flags = LittleEndian::read_u16(
|
||||
&page[pg_constants::PD_FLAGS_OFFSET..pg_constants::PD_FLAGS_OFFSET + 2],
|
||||
);
|
||||
if (flags & (pg_constants::PD_HEAP_RELATION | pg_constants::PD_NONHEAP_RELATION)) == 0 {
|
||||
// If type of relation was not determined yet,
|
||||
// then do it now
|
||||
for r in records {
|
||||
let xl_rmid = r.rec[pg_constants::XL_RMID_OFFS];
|
||||
if xl_rmid == pg_constants::RM_HEAP_ID || xl_rmid == pg_constants::RM_HEAP2_ID {
|
||||
flags |= pg_constants::PD_HEAP_RELATION;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (flags & pg_constants::PD_HEAP_RELATION) == 0 {
|
||||
flags |= pg_constants::PD_NONHEAP_RELATION;
|
||||
}
|
||||
LittleEndian::write_u16(
|
||||
&mut page[pg_constants::PD_FLAGS_OFFSET..pg_constants::PD_FLAGS_OFFSET + 2],
|
||||
flags,
|
||||
);
|
||||
}
|
||||
if (flags & pg_constants::PD_HEAP_RELATION) != 0 {
|
||||
// Set hint bits for heap relation page
|
||||
let pd_lower = LittleEndian::read_u16(
|
||||
&page[pg_constants::PD_LOWER_OFFSET..pg_constants::PD_LOWER_OFFSET + 2],
|
||||
) as usize;
|
||||
let mut tid_offs = pg_constants::SIZE_OF_PAGE_HEADER_DATA;
|
||||
while tid_offs < pd_lower {
|
||||
let tid = LittleEndian::read_u32(&page[tid_offs..tid_offs + 4]);
|
||||
let lp_off = (tid & 0x7FFF) as usize;
|
||||
if ((tid >> 15) & 3) == pg_constants::LP_NORMAL {
|
||||
// normal item pointer
|
||||
let t_xmin = LittleEndian::read_u32(
|
||||
&page[lp_off + pg_constants::T_XMIN_OFFS
|
||||
..lp_off + pg_constants::T_XMIN_OFFS + 4],
|
||||
);
|
||||
let t_xmax = LittleEndian::read_u32(
|
||||
&page[lp_off + pg_constants::T_XMAX_OFFS
|
||||
..lp_off + pg_constants::T_XMAX_OFFS + 4],
|
||||
);
|
||||
let mut t_infomask = LittleEndian::read_u16(
|
||||
&page[lp_off + pg_constants::T_INFOMASK_OFFS
|
||||
..lp_off + pg_constants::T_INFOMASK_OFFS + 2],
|
||||
);
|
||||
if (t_infomask
|
||||
& (pg_constants::HEAP_XMIN_COMMITTED | pg_constants::HEAP_XMIN_INVALID))
|
||||
== 0
|
||||
&& t_xmin != 0
|
||||
{
|
||||
let status = self.xid_status(timeline, t_xmin, lsn);
|
||||
if status == pg_constants::TRANSACTION_STATUS_COMMITTED {
|
||||
t_infomask |= pg_constants::HEAP_XMIN_COMMITTED;
|
||||
} else if status == pg_constants::TRANSACTION_STATUS_ABORTED {
|
||||
t_infomask |= pg_constants::HEAP_XMIN_INVALID;
|
||||
}
|
||||
LittleEndian::write_u16(
|
||||
&mut page[lp_off + pg_constants::T_INFOMASK_OFFS
|
||||
..lp_off + pg_constants::T_INFOMASK_OFFS + 2],
|
||||
t_infomask,
|
||||
);
|
||||
}
|
||||
if (t_infomask
|
||||
& (pg_constants::HEAP_XMAX_COMMITTED
|
||||
| pg_constants::HEAP_XMAX_INVALID
|
||||
| pg_constants::HEAP_XMAX_IS_MULTI))
|
||||
== 0
|
||||
&& t_xmax != 0
|
||||
{
|
||||
let status = self.xid_status(timeline, t_xmax, lsn);
|
||||
if status == pg_constants::TRANSACTION_STATUS_COMMITTED {
|
||||
t_infomask |= pg_constants::HEAP_XMAX_COMMITTED;
|
||||
} else if status == pg_constants::TRANSACTION_STATUS_ABORTED {
|
||||
t_infomask |= pg_constants::HEAP_XMAX_INVALID;
|
||||
}
|
||||
LittleEndian::write_u16(
|
||||
&mut page[lp_off + pg_constants::T_INFOMASK_OFFS
|
||||
..lp_off + pg_constants::T_INFOMASK_OFFS + 2],
|
||||
t_infomask,
|
||||
);
|
||||
}
|
||||
}
|
||||
tid_offs += 4;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
///
|
||||
/// Process one request for WAL redo.
|
||||
///
|
||||
async fn handle_apply_request(
|
||||
&self,
|
||||
process: &PostgresRedoProcess,
|
||||
process: &mut PostgresRedoProcess,
|
||||
request: &WalRedoRequest,
|
||||
) -> Result<Bytes, WalRedoError> {
|
||||
let rel = request.rel;
|
||||
@@ -420,9 +298,11 @@ impl PostgresRedoManager {
|
||||
// Transaction manager stuff
|
||||
let rec_segno = match rel {
|
||||
RelishTag::Slru { slru, segno } => {
|
||||
if slru != SlruKind::Clog {
|
||||
panic!("Not valid XACT relish tag {:?}", rel);
|
||||
}
|
||||
assert!(
|
||||
slru == SlruKind::Clog,
|
||||
"Not valid XACT relish tag {:?}",
|
||||
rel
|
||||
);
|
||||
segno
|
||||
}
|
||||
_ => panic!("Not valid XACT relish tag {:?}", rel),
|
||||
@@ -542,7 +422,7 @@ impl PostgresRedoManager {
|
||||
);
|
||||
|
||||
if let Err(e) = apply_result {
|
||||
error!("could not apply WAL records: {}", e);
|
||||
error!("could not apply WAL records: {:#}", e);
|
||||
result = Err(WalRedoError::IoError(e));
|
||||
} else {
|
||||
let img = apply_result.unwrap();
|
||||
@@ -559,8 +439,8 @@ impl PostgresRedoManager {
|
||||
/// Handle to the Postgres WAL redo process
|
||||
///
|
||||
struct PostgresRedoProcess {
|
||||
stdin: RefCell<ChildStdin>,
|
||||
stdout: RefCell<ChildStdout>,
|
||||
stdin: ChildStdin,
|
||||
stdout: ChildStdout,
|
||||
}
|
||||
|
||||
impl PostgresRedoProcess {
|
||||
@@ -580,7 +460,7 @@ impl PostgresRedoProcess {
|
||||
if datadir.exists() {
|
||||
info!("directory {:?} exists, removing", &datadir);
|
||||
if let Err(e) = fs::remove_dir_all(&datadir) {
|
||||
error!("could not remove old wal-redo-datadir: {:?}", e);
|
||||
error!("could not remove old wal-redo-datadir: {:#}", e);
|
||||
}
|
||||
}
|
||||
info!("running initdb in {:?}", datadir.display());
|
||||
@@ -653,10 +533,7 @@ impl PostgresRedoProcess {
|
||||
};
|
||||
tokio::spawn(f_stderr);
|
||||
|
||||
Ok(PostgresRedoProcess {
|
||||
stdin: RefCell::new(stdin),
|
||||
stdout: RefCell::new(stdout),
|
||||
})
|
||||
Ok(PostgresRedoProcess { stdin, stdout })
|
||||
}
|
||||
|
||||
//
|
||||
@@ -664,13 +541,14 @@ impl PostgresRedoProcess {
|
||||
// new page image.
|
||||
//
|
||||
async fn apply_wal_records(
|
||||
&self,
|
||||
&mut self,
|
||||
tag: BufferTag,
|
||||
base_img: Option<Bytes>,
|
||||
records: &[WALRecord],
|
||||
) -> Result<Bytes, std::io::Error> {
|
||||
let mut stdin = self.stdin.borrow_mut();
|
||||
let mut stdout = self.stdout.borrow_mut();
|
||||
let stdout = &mut self.stdout;
|
||||
// Buffer the writes to avoid a lot of small syscalls.
|
||||
let mut stdin = tokio::io::BufWriter::new(&mut self.stdin);
|
||||
|
||||
// We do three things simultaneously: send the old base image and WAL records to
|
||||
// the child process's stdin, read the result from child's stdout, and forward any logging
|
||||
|
||||
@@ -58,7 +58,7 @@ impl ControlFileData {
|
||||
let expectedcrc = crc32c::crc32c(&buf[0..OFFSETOF_CRC]);
|
||||
|
||||
// Use serde to deserialize the input as a ControlFileData struct.
|
||||
let controlfile = ControlFileData::des(buf)?;
|
||||
let controlfile = ControlFileData::des_prefix(buf)?;
|
||||
|
||||
// Check the CRC
|
||||
if expectedcrc != controlfile.crc {
|
||||
|
||||
@@ -32,5 +32,5 @@ pub const fn transaction_id_precedes(id1: TransactionId, id2: TransactionId) ->
|
||||
}
|
||||
|
||||
let diff = id1.wrapping_sub(id2) as i32;
|
||||
return diff < 0;
|
||||
diff < 0
|
||||
}
|
||||
|
||||
@@ -46,7 +46,6 @@ pub const SIZE_OF_PAGE_HEADER: u16 = 24;
|
||||
pub const BITS_PER_HEAPBLOCK: u16 = 2;
|
||||
pub const HEAPBLOCKS_PER_PAGE: u16 = (BLCKSZ - SIZE_OF_PAGE_HEADER) * 8 / BITS_PER_HEAPBLOCK;
|
||||
|
||||
pub const TRANSACTION_STATUS_IN_PROGRESS: u8 = 0x00;
|
||||
pub const TRANSACTION_STATUS_COMMITTED: u8 = 0x01;
|
||||
pub const TRANSACTION_STATUS_ABORTED: u8 = 0x02;
|
||||
pub const TRANSACTION_STATUS_SUB_COMMITTED: u8 = 0x03;
|
||||
@@ -192,31 +191,6 @@ pub const XLP_LONG_HEADER: u16 = 0x0002;
|
||||
|
||||
pub const PG_MAJORVERSION: &str = "14";
|
||||
|
||||
// Zenith specific page flags used to distinguish heap and non-heap relations
|
||||
pub const PD_HEAP_RELATION: u16 = 0x10;
|
||||
pub const PD_NONHEAP_RELATION: u16 = 0x20;
|
||||
|
||||
// bufpage.h
|
||||
pub const PD_FLAGS_OFFSET: usize = 10; // PageHeaderData.pd_flags
|
||||
pub const PD_LOWER_OFFSET: usize = 12; // PageHeaderData.pd_lower
|
||||
|
||||
// itemid.h
|
||||
pub const LP_NORMAL: u32 = 1;
|
||||
|
||||
// htup_details.h
|
||||
pub const T_XMIN_OFFS: usize = 0;
|
||||
pub const T_XMAX_OFFS: usize = 4;
|
||||
pub const T_INFOMASK_OFFS: usize = 4 * 3 + 2 * 3 + 2;
|
||||
pub const HEAP_XMIN_COMMITTED: u16 = 0x0100; /* t_xmin committed */
|
||||
pub const HEAP_XMIN_INVALID: u16 = 0x0200; /* t_xmin invalid/aborted */
|
||||
pub const HEAP_XMAX_COMMITTED: u16 = 0x0400; /* t_xmax committed */
|
||||
pub const HEAP_XMAX_INVALID: u16 = 0x0800; /* t_xmax invalid/aborted */
|
||||
pub const HEAP_XMAX_IS_MULTI: u16 = 0x1000; /* t_xmax is a MultiXactId */
|
||||
pub const SIZE_OF_PAGE_HEADER_DATA: usize = 24;
|
||||
|
||||
// xlogrecord.h
|
||||
pub const XL_RMID_OFFS: usize = 17;
|
||||
|
||||
// List of subdirectories inside pgdata.
|
||||
// Copied from src/bin/initdb/initdb.c
|
||||
pub const PGDATA_SUBDIRS: [&str; 22] = [
|
||||
|
||||
@@ -16,14 +16,17 @@ use crate::XLogPageHeaderData;
|
||||
use crate::XLogRecord;
|
||||
use crate::XLOG_PAGE_MAGIC;
|
||||
|
||||
use anyhow::{bail, Result};
|
||||
use byteorder::{ByteOrder, LittleEndian};
|
||||
use bytes::{Buf, Bytes};
|
||||
use bytes::{BufMut, BytesMut};
|
||||
use crc32c::*;
|
||||
use log::*;
|
||||
use std::cmp::max;
|
||||
use std::cmp::min;
|
||||
use std::fs::{self, File};
|
||||
use std::io::prelude::*;
|
||||
use std::io::SeekFrom;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::time::SystemTime;
|
||||
use zenith_utils::lsn::Lsn;
|
||||
@@ -125,8 +128,10 @@ fn find_end_of_wal_segment(
|
||||
segno: XLogSegNo,
|
||||
tli: TimeLineID,
|
||||
wal_seg_size: usize,
|
||||
) -> u32 {
|
||||
let mut offs: usize = 0;
|
||||
start_offset: usize, // start reading at this point
|
||||
) -> Result<u32> {
|
||||
// step back to the beginning of the page to read it in...
|
||||
let mut offs: usize = start_offset - start_offset % XLOG_BLCKSZ;
|
||||
let mut contlen: usize = 0;
|
||||
let mut wal_crc: u32 = 0;
|
||||
let mut crc: u32 = 0;
|
||||
@@ -135,24 +140,33 @@ fn find_end_of_wal_segment(
|
||||
let file_name = XLogFileName(tli, segno, wal_seg_size);
|
||||
let mut last_valid_rec_pos: usize = 0;
|
||||
let mut file = File::open(data_dir.join(file_name.clone() + ".partial")).unwrap();
|
||||
file.seek(SeekFrom::Start(offs as u64))?;
|
||||
let mut rec_hdr = [0u8; XLOG_RECORD_CRC_OFFS];
|
||||
|
||||
while offs < wal_seg_size {
|
||||
// we are at the beginning of the page; read it in
|
||||
if offs % XLOG_BLCKSZ == 0 {
|
||||
if let Ok(bytes_read) = file.read(&mut buf) {
|
||||
if bytes_read != buf.len() {
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
break;
|
||||
let bytes_read = file.read(&mut buf)?;
|
||||
if bytes_read != buf.len() {
|
||||
bail!(
|
||||
"failed to read {} bytes from {} at {}",
|
||||
XLOG_BLCKSZ,
|
||||
file_name,
|
||||
offs
|
||||
);
|
||||
}
|
||||
|
||||
let xlp_magic = LittleEndian::read_u16(&buf[0..2]);
|
||||
let xlp_info = LittleEndian::read_u16(&buf[2..4]);
|
||||
let xlp_rem_len = LittleEndian::read_u32(&buf[XLP_REM_LEN_OFFS..XLP_REM_LEN_OFFS + 4]);
|
||||
// this is expected in current usage when valid WAL starts after page header
|
||||
if xlp_magic != XLOG_PAGE_MAGIC as u16 {
|
||||
info!("Invalid WAL file {}.partial magic {}", file_name, xlp_magic);
|
||||
break;
|
||||
trace!(
|
||||
"invalid WAL file {}.partial magic {} at {:?}",
|
||||
file_name,
|
||||
xlp_magic,
|
||||
Lsn(XLogSegNoOffsetToRecPtr(segno, offs as u32, wal_seg_size)),
|
||||
);
|
||||
}
|
||||
if offs == 0 {
|
||||
offs = XLOG_SIZE_OF_XLOG_LONG_PHD;
|
||||
@@ -162,11 +176,18 @@ fn find_end_of_wal_segment(
|
||||
} else {
|
||||
offs += XLOG_SIZE_OF_XLOG_SHORT_PHD;
|
||||
}
|
||||
// ... and step forward again if asked
|
||||
offs = max(offs, start_offset);
|
||||
|
||||
// beginning of the next record
|
||||
} else if contlen == 0 {
|
||||
let page_offs = offs % XLOG_BLCKSZ;
|
||||
let xl_tot_len = LittleEndian::read_u32(&buf[page_offs..page_offs + 4]) as usize;
|
||||
if xl_tot_len == 0 {
|
||||
info!(
|
||||
"find_end_of_wal_segment reached zeros at {:?}",
|
||||
Lsn(XLogSegNoOffsetToRecPtr(segno, offs as u32, wal_seg_size))
|
||||
);
|
||||
break; // zeros, reached the end
|
||||
}
|
||||
last_valid_rec_pos = offs;
|
||||
@@ -217,7 +238,7 @@ fn find_end_of_wal_segment(
|
||||
}
|
||||
}
|
||||
}
|
||||
last_valid_rec_pos as u32
|
||||
Ok(last_valid_rec_pos as u32)
|
||||
}
|
||||
|
||||
///
|
||||
@@ -230,7 +251,8 @@ pub fn find_end_of_wal(
|
||||
data_dir: &Path,
|
||||
wal_seg_size: usize,
|
||||
precise: bool,
|
||||
) -> (XLogRecPtr, TimeLineID) {
|
||||
start_lsn: Lsn, // start reading WAL at this point or later
|
||||
) -> Result<(XLogRecPtr, TimeLineID)> {
|
||||
let mut high_segno: XLogSegNo = 0;
|
||||
let mut high_tli: TimeLineID = 0;
|
||||
let mut high_ispartial = false;
|
||||
@@ -272,19 +294,32 @@ pub fn find_end_of_wal(
|
||||
high_segno += 1;
|
||||
} else if precise {
|
||||
/* otherwise locate last record in last partial segment */
|
||||
high_offs = find_end_of_wal_segment(data_dir, high_segno, high_tli, wal_seg_size);
|
||||
if start_lsn.segment_number(wal_seg_size) > high_segno {
|
||||
bail!(
|
||||
"provided start_lsn {:?} is beyond highest segno {:?} available",
|
||||
start_lsn,
|
||||
high_segno,
|
||||
);
|
||||
}
|
||||
high_offs = find_end_of_wal_segment(
|
||||
data_dir,
|
||||
high_segno,
|
||||
high_tli,
|
||||
wal_seg_size,
|
||||
start_lsn.segment_offset(wal_seg_size),
|
||||
)?;
|
||||
}
|
||||
let high_ptr = XLogSegNoOffsetToRecPtr(high_segno, high_offs, wal_seg_size);
|
||||
return (high_ptr, high_tli);
|
||||
return Ok((high_ptr, high_tli));
|
||||
}
|
||||
(0, 0)
|
||||
Ok((0, 0))
|
||||
}
|
||||
|
||||
pub fn main() {
|
||||
let mut data_dir = PathBuf::new();
|
||||
data_dir.push(".");
|
||||
let wal_seg_size = 16 * 1024 * 1024;
|
||||
let (wal_end, tli) = find_end_of_wal(&data_dir, wal_seg_size, true);
|
||||
let (wal_end, tli) = find_end_of_wal(&data_dir, wal_seg_size, true, Lsn(0)).unwrap();
|
||||
println!(
|
||||
"wal_end={:>08X}{:>08X}, tli={}",
|
||||
(wal_end >> 32) as u32,
|
||||
@@ -463,7 +498,7 @@ mod tests {
|
||||
let wal_seg_size = 16 * 1024 * 1024;
|
||||
|
||||
// 3. Check end_of_wal on non-partial WAL segment (we treat it as fully populated)
|
||||
let (wal_end, tli) = find_end_of_wal(&wal_dir, wal_seg_size, true);
|
||||
let (wal_end, tli) = find_end_of_wal(&wal_dir, wal_seg_size, true, Lsn(0)).unwrap();
|
||||
let wal_end = Lsn(wal_end);
|
||||
println!("wal_end={}, tli={}", wal_end, tli);
|
||||
assert_eq!(wal_end, "0/2000000".parse::<Lsn>().unwrap());
|
||||
@@ -489,7 +524,7 @@ mod tests {
|
||||
wal_dir.join("000000010000000000000001.partial"),
|
||||
)
|
||||
.unwrap();
|
||||
let (wal_end, tli) = find_end_of_wal(&wal_dir, wal_seg_size, true);
|
||||
let (wal_end, tli) = find_end_of_wal(&wal_dir, wal_seg_size, true, Lsn(0)).unwrap();
|
||||
let wal_end = Lsn(wal_end);
|
||||
println!("wal_end={}, tli={}", wal_end, tli);
|
||||
assert_eq!(wal_end, waldump_wal_end);
|
||||
|
||||
@@ -14,9 +14,10 @@ rand = "0.8.3"
|
||||
hex = "0.4.3"
|
||||
serde = "1"
|
||||
serde_json = "1"
|
||||
tokio = { version = "1.7.1", features = ["full"] }
|
||||
tokio-postgres = "0.7.2"
|
||||
tokio = "1.11"
|
||||
tokio-postgres = { git = "https://github.com/zenithdb/rust-postgres.git", rev="9eb0dbfbeb6a6c1b79099b9f7ae4a8c021877858" }
|
||||
clap = "2.33.0"
|
||||
rustls = "0.19.1"
|
||||
reqwest = { version = "0.11", features = ["blocking", "json"] }
|
||||
|
||||
zenith_utils = { path = "../zenith_utils" }
|
||||
|
||||
@@ -1,17 +1,14 @@
|
||||
use anyhow::{bail, Result};
|
||||
use anyhow::{bail, Context, Result};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
net::{IpAddr, SocketAddr},
|
||||
};
|
||||
use std::net::{SocketAddr, ToSocketAddrs};
|
||||
|
||||
pub struct CPlaneApi {
|
||||
// address: SocketAddr,
|
||||
auth_endpoint: &'static str,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub struct DatabaseInfo {
|
||||
pub host: IpAddr, // TODO: allow host name here too
|
||||
pub host: String,
|
||||
pub port: u16,
|
||||
pub dbname: String,
|
||||
pub user: String,
|
||||
@@ -19,8 +16,13 @@ pub struct DatabaseInfo {
|
||||
}
|
||||
|
||||
impl DatabaseInfo {
|
||||
pub fn socket_addr(&self) -> SocketAddr {
|
||||
SocketAddr::new(self.host, self.port)
|
||||
pub fn socket_addr(&self) -> Result<SocketAddr> {
|
||||
let host_port = format!("{}:{}", self.host, self.port);
|
||||
host_port
|
||||
.to_socket_addrs()
|
||||
.with_context(|| format!("cannot resolve {} to SocketAddr", host_port))?
|
||||
.next()
|
||||
.ok_or_else(|| anyhow::Error::msg("cannot resolve at least one SocketAddr"))
|
||||
}
|
||||
|
||||
pub fn conn_string(&self) -> String {
|
||||
@@ -31,62 +33,35 @@ impl DatabaseInfo {
|
||||
}
|
||||
}
|
||||
|
||||
// mock cplane api
|
||||
impl CPlaneApi {
|
||||
pub fn new(_address: &SocketAddr) -> CPlaneApi {
|
||||
CPlaneApi {
|
||||
// address: address.clone(),
|
||||
}
|
||||
pub fn new(auth_endpoint: &'static str) -> CPlaneApi {
|
||||
CPlaneApi { auth_endpoint }
|
||||
}
|
||||
|
||||
pub fn check_auth(&self, user: &str, md5_response: &[u8], salt: &[u8; 4]) -> Result<()> {
|
||||
// passwords for both is "mypass"
|
||||
let auth_map: HashMap<_, &str> = vec![
|
||||
("stas@zenith", "716ee6e1c4a9364d66285452c47402b1"),
|
||||
("stas2@zenith", "3996f75df64c16a8bfaf01301b61d582"),
|
||||
]
|
||||
.into_iter()
|
||||
.collect();
|
||||
pub fn authenticate_proxy_request(
|
||||
&self,
|
||||
user: &str,
|
||||
database: &str,
|
||||
md5_response: &[u8],
|
||||
salt: &[u8; 4],
|
||||
) -> Result<DatabaseInfo> {
|
||||
let mut url = reqwest::Url::parse(self.auth_endpoint)?;
|
||||
url.query_pairs_mut()
|
||||
.append_pair("login", user)
|
||||
.append_pair("database", database)
|
||||
.append_pair("md5response", std::str::from_utf8(md5_response)?)
|
||||
.append_pair("salt", &hex::encode(salt));
|
||||
|
||||
let stored_hash = auth_map
|
||||
.get(&user)
|
||||
.ok_or_else(|| anyhow::Error::msg("user not found"))?;
|
||||
let salted_stored_hash = format!(
|
||||
"md5{:x}",
|
||||
md5::compute([stored_hash.as_bytes(), salt].concat())
|
||||
);
|
||||
println!("cplane request: {}", url.as_str());
|
||||
|
||||
let received_hash = std::str::from_utf8(md5_response)?;
|
||||
let resp = reqwest::blocking::get(url)?;
|
||||
|
||||
println!(
|
||||
"auth: {} rh={} sh={} ssh={} {:?}",
|
||||
user, received_hash, stored_hash, salted_stored_hash, salt
|
||||
);
|
||||
|
||||
if received_hash == salted_stored_hash {
|
||||
Ok(())
|
||||
if resp.status().is_success() {
|
||||
let conn_info: DatabaseInfo = serde_json::from_str(resp.text()?.as_str())?;
|
||||
println!("got conn info: #{:?}", conn_info);
|
||||
Ok(conn_info)
|
||||
} else {
|
||||
bail!("Auth failed")
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_database_uri(&self, _user: &str, _database: &str) -> Result<DatabaseInfo> {
|
||||
Ok(DatabaseInfo {
|
||||
host: "127.0.0.1".parse()?,
|
||||
port: 5432,
|
||||
dbname: "stas".to_string(),
|
||||
user: "stas".to_string(),
|
||||
password: "mypass".to_string(),
|
||||
})
|
||||
}
|
||||
|
||||
// pub fn create_database(&self, _user: &String, _database: &String) -> Result<DatabaseInfo> {
|
||||
// Ok(DatabaseInfo {
|
||||
// host: "127.0.0.1".parse()?,
|
||||
// port: 5432,
|
||||
// dbname: "stas".to_string(),
|
||||
// user: "stas".to_string(),
|
||||
// password: "mypass".to_string(),
|
||||
// })
|
||||
// }
|
||||
}
|
||||
|
||||
@@ -34,7 +34,7 @@ pub struct ProxyConf {
|
||||
pub redirect_uri: String,
|
||||
|
||||
/// control plane address where we would check auth.
|
||||
pub cplane_address: SocketAddr,
|
||||
pub auth_endpoint: String,
|
||||
|
||||
pub ssl_config: Option<Arc<ServerConfig>>,
|
||||
}
|
||||
@@ -56,8 +56,7 @@ fn configure_ssl(arg_matches: &ArgMatches) -> anyhow::Result<Option<Arc<ServerCo
|
||||
|
||||
let key = {
|
||||
let key_bytes = std::fs::read(key_path).context("SSL key file")?;
|
||||
let mut keys = pemfile::rsa_private_keys(&mut &key_bytes[..])
|
||||
.or_else(|_| pemfile::pkcs8_private_keys(&mut &key_bytes[..]))
|
||||
let mut keys = pemfile::pkcs8_private_keys(&mut &key_bytes[..])
|
||||
.map_err(|_| anyhow!("couldn't read TLS keys"))?;
|
||||
ensure!(keys.len() == 1, "keys.len() = {} (should be 1)", keys.len());
|
||||
keys.pop().unwrap()
|
||||
@@ -102,6 +101,14 @@ fn main() -> anyhow::Result<()> {
|
||||
.help("redirect unauthenticated users to given uri")
|
||||
.default_value("http://localhost:3000/psql_session/"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("auth-endpoint")
|
||||
.short("a")
|
||||
.long("auth-endpoint")
|
||||
.takes_value(true)
|
||||
.help("redirect unauthenticated users to given uri")
|
||||
.default_value("http://localhost:3000/authenticate_proxy_request/"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("ssl-key")
|
||||
.short("k")
|
||||
@@ -122,7 +129,7 @@ fn main() -> anyhow::Result<()> {
|
||||
proxy_address: arg_matches.value_of("proxy").unwrap().parse()?,
|
||||
mgmt_address: arg_matches.value_of("mgmt").unwrap().parse()?,
|
||||
redirect_uri: arg_matches.value_of("uri").unwrap().parse()?,
|
||||
cplane_address: "127.0.0.1:3000".parse()?,
|
||||
auth_endpoint: arg_matches.value_of("auth-endpoint").unwrap().parse()?,
|
||||
ssl_config: configure_ssl(&arg_matches)?,
|
||||
};
|
||||
let state = ProxyState {
|
||||
|
||||
@@ -5,7 +5,7 @@ use std::{
|
||||
|
||||
use anyhow::bail;
|
||||
use bytes::Bytes;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde::Deserialize;
|
||||
use zenith_utils::{
|
||||
postgres_backend::{self, query_from_cstring, AuthType, PostgresBackend},
|
||||
pq_proto::{BeMessage, SINGLE_COL_ROWDESC},
|
||||
@@ -49,7 +49,7 @@ struct MgmtHandler {
|
||||
// "host": "127.0.0.1",
|
||||
// "port": 5432,
|
||||
// "dbname": "stas",
|
||||
// "user": "stas"
|
||||
// "user": "stas",
|
||||
// "password": "mypass"
|
||||
// }
|
||||
// }
|
||||
@@ -60,13 +60,16 @@ struct MgmtHandler {
|
||||
// "Failure": "oops"
|
||||
// }
|
||||
// }
|
||||
#[derive(Serialize, Deserialize)]
|
||||
//
|
||||
// // to test manually by sending a query to mgmt interface:
|
||||
// psql -h 127.0.0.1 -p 9999 -c '{"session_id":"4f10dde522e14739","result":{"Success":{"host":"127.0.0.1","port":5432,"dbname":"stas","user":"stas","password":"stas"}}}'
|
||||
#[derive(Deserialize)]
|
||||
pub struct PsqlSessionResponse {
|
||||
session_id: String,
|
||||
result: PsqlSessionResult,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[derive(Deserialize)]
|
||||
pub enum PsqlSessionResult {
|
||||
Success(DatabaseInfo),
|
||||
Failure(String),
|
||||
@@ -78,34 +81,47 @@ impl postgres_backend::Handler for MgmtHandler {
|
||||
pgb: &mut PostgresBackend,
|
||||
query_string: Bytes,
|
||||
) -> anyhow::Result<()> {
|
||||
let query_string = query_from_cstring(query_string);
|
||||
let res = try_process_query(self, pgb, query_string);
|
||||
// intercept and log error message
|
||||
if res.is_err() {
|
||||
println!("Mgmt query failed: #{:?}", res);
|
||||
}
|
||||
res
|
||||
}
|
||||
}
|
||||
|
||||
println!("Got mgmt query: '{}'", std::str::from_utf8(&query_string)?);
|
||||
fn try_process_query(
|
||||
mgmt: &mut MgmtHandler,
|
||||
pgb: &mut PostgresBackend,
|
||||
query_string: Bytes,
|
||||
) -> anyhow::Result<()> {
|
||||
let query_string = query_from_cstring(query_string);
|
||||
|
||||
let resp: PsqlSessionResponse = serde_json::from_slice(&query_string)?;
|
||||
println!("Got mgmt query: '{}'", std::str::from_utf8(&query_string)?);
|
||||
|
||||
let waiters = self.state.waiters.lock().unwrap();
|
||||
let resp: PsqlSessionResponse = serde_json::from_slice(&query_string)?;
|
||||
|
||||
let sender = waiters
|
||||
.get(&resp.session_id)
|
||||
.ok_or_else(|| anyhow::Error::msg("psql_session_id is not found"))?;
|
||||
let waiters = mgmt.state.waiters.lock().unwrap();
|
||||
|
||||
match resp.result {
|
||||
PsqlSessionResult::Success(db_info) => {
|
||||
sender.send(Ok(db_info))?;
|
||||
let sender = waiters
|
||||
.get(&resp.session_id)
|
||||
.ok_or_else(|| anyhow::Error::msg("psql_session_id is not found"))?;
|
||||
|
||||
pgb.write_message_noflush(&SINGLE_COL_ROWDESC)?
|
||||
.write_message_noflush(&BeMessage::DataRow(&[Some(b"ok")]))?
|
||||
.write_message_noflush(&BeMessage::CommandComplete(b"SELECT 1"))?;
|
||||
pgb.flush()?;
|
||||
Ok(())
|
||||
}
|
||||
match resp.result {
|
||||
PsqlSessionResult::Success(db_info) => {
|
||||
sender.send(Ok(db_info))?;
|
||||
|
||||
PsqlSessionResult::Failure(message) => {
|
||||
sender.send(Err(anyhow::Error::msg(message.clone())))?;
|
||||
pgb.write_message_noflush(&SINGLE_COL_ROWDESC)?
|
||||
.write_message_noflush(&BeMessage::DataRow(&[Some(b"ok")]))?
|
||||
.write_message_noflush(&BeMessage::CommandComplete(b"SELECT 1"))?;
|
||||
pgb.flush()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
bail!("psql session request failed: {}", message)
|
||||
}
|
||||
PsqlSessionResult::Failure(message) => {
|
||||
sender.send(Err(anyhow::Error::msg(message.clone())))?;
|
||||
|
||||
bail!("psql session request failed: {}", message)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -57,7 +57,7 @@ pub fn proxy_conn_main(
|
||||
) -> anyhow::Result<()> {
|
||||
let mut conn = ProxyConnection {
|
||||
state,
|
||||
cplane: CPlaneApi::new(&state.conf.cplane_address),
|
||||
cplane: CPlaneApi::new(&state.conf.auth_endpoint),
|
||||
user: "".into(),
|
||||
database: "".into(),
|
||||
pgb: PostgresBackend::new(
|
||||
@@ -80,6 +80,8 @@ pub fn proxy_conn_main(
|
||||
conn.handle_new_user()?
|
||||
};
|
||||
|
||||
// XXX: move that inside handle_new_user/handle_existing_user to be able to
|
||||
// report wrong connection error.
|
||||
proxy_pass(conn.pgb, db_info)
|
||||
}
|
||||
|
||||
@@ -172,21 +174,31 @@ impl ProxyConnection {
|
||||
.split_last()
|
||||
.ok_or_else(|| anyhow::Error::msg("unexpected password message"))?;
|
||||
|
||||
if let Err(e) = self.check_auth_md5(md5_response) {
|
||||
self.pgb
|
||||
.write_message(&BeMessage::ErrorResponse(format!("{}", e)))?;
|
||||
bail!("auth failed: {}", e);
|
||||
} else {
|
||||
self.pgb
|
||||
.write_message_noflush(&BeMessage::AuthenticationOk)?;
|
||||
self.pgb
|
||||
.write_message_noflush(&BeMessage::ParameterStatus)?;
|
||||
self.pgb.write_message(&BeMessage::ReadyForQuery)?;
|
||||
}
|
||||
}
|
||||
match self.cplane.authenticate_proxy_request(
|
||||
self.user.as_str(),
|
||||
self.database.as_str(),
|
||||
md5_response,
|
||||
&self.md5_salt,
|
||||
) {
|
||||
Err(e) => {
|
||||
self.pgb
|
||||
.write_message(&BeMessage::ErrorResponse(format!("{}", e)))?;
|
||||
|
||||
// ok, we are authorized
|
||||
self.cplane.get_database_uri(&self.user, &self.database)
|
||||
bail!("auth failed: {}", e);
|
||||
}
|
||||
Ok(conn_info) => {
|
||||
self.pgb
|
||||
.write_message_noflush(&BeMessage::AuthenticationOk)?;
|
||||
self.pgb
|
||||
.write_message_noflush(&BeMessage::ParameterStatus)?;
|
||||
self.pgb.write_message(&BeMessage::ReadyForQuery)?;
|
||||
|
||||
Ok(conn_info)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
bail!("protocol violation");
|
||||
}
|
||||
}
|
||||
|
||||
fn handle_new_user(&mut self) -> anyhow::Result<DatabaseInfo> {
|
||||
@@ -232,17 +244,11 @@ databases without opening the browser.
|
||||
|
||||
Ok(dbinfo)
|
||||
}
|
||||
|
||||
fn check_auth_md5(&self, md5_response: &[u8]) -> anyhow::Result<()> {
|
||||
assert!(self.is_existing_user());
|
||||
self.cplane
|
||||
.check_auth(self.user.as_str(), md5_response, &self.md5_salt)
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a TCP connection to a postgres database, authenticate with it, and receive the ReadyForQuery message
|
||||
async fn connect_to_db(db_info: DatabaseInfo) -> anyhow::Result<tokio::net::TcpStream> {
|
||||
let mut socket = tokio::net::TcpStream::connect(db_info.socket_addr()).await?;
|
||||
let mut socket = tokio::net::TcpStream::connect(db_info.socket_addr()?).await?;
|
||||
let config = db_info.conn_string().parse::<tokio_postgres::Config>()?;
|
||||
let _ = config.connect_raw(&mut socket, NoTls).await?;
|
||||
Ok(socket)
|
||||
@@ -273,7 +279,9 @@ fn proxy(
|
||||
|
||||
/// Proxy a client connection to a postgres database
|
||||
fn proxy_pass(pgb: PostgresBackend, db_info: DatabaseInfo) -> anyhow::Result<()> {
|
||||
let runtime = tokio::runtime::Builder::new_current_thread().build()?;
|
||||
let runtime = tokio::runtime::Builder::new_current_thread()
|
||||
.enable_all()
|
||||
.build()?;
|
||||
let db_stream = runtime.block_on(connect_to_db(db_info))?;
|
||||
let db_stream = db_stream.into_std()?;
|
||||
db_stream.set_nonblocking(false)?;
|
||||
|
||||
@@ -8,4 +8,8 @@
|
||||
# warnings and errors right in the editor.
|
||||
# In vscode, this setting is Rust-analyzer>Check On Save:Command
|
||||
|
||||
cargo clippy "${@:2}" -- -A clippy::new_without_default -A clippy::manual_range_contains -A clippy::comparison_chain
|
||||
|
||||
# * `-A unknown_lints` – do not warn about unknown lint suppressions
|
||||
# that people with newer toolchains might use
|
||||
# * `-D warnings` - fail on any warnings (`cargo` returns non-zero exit status)
|
||||
cargo clippy "${@:2}" --all-targets --all-features --all --tests -- -A unknown_lints -D warnings
|
||||
|
||||
@@ -9,6 +9,8 @@ psycopg2 = "*"
|
||||
typing-extensions = "*"
|
||||
pyjwt = {extras = ["crypto"], version = "*"}
|
||||
requests = "*"
|
||||
pytest-xdist = "*"
|
||||
asyncpg = "*"
|
||||
|
||||
[dev-packages]
|
||||
yapf = "*"
|
||||
|
||||
116
test_runner/Pipfile.lock
generated
116
test_runner/Pipfile.lock
generated
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"_meta": {
|
||||
"hash": {
|
||||
"sha256": "b666740289d9c82797e5c39b2a7f0074c865c9183ee878ce4fa5cda7928506ea"
|
||||
"sha256": "3cdc048691824d0b93912b6b78a0aa01dc98f278212c1badb0cc2edbd2103c3a"
|
||||
},
|
||||
"pipfile-spec": 6,
|
||||
"requires": {
|
||||
@@ -16,6 +16,25 @@
|
||||
]
|
||||
},
|
||||
"default": {
|
||||
"asyncpg": {
|
||||
"hashes": [
|
||||
"sha256:129d501f3d30616afd51eb8d3142ef51ba05374256bd5834cec3ef4956a9b317",
|
||||
"sha256:29ef6ae0a617fc13cc2ac5dc8e9b367bb83cba220614b437af9b67766f4b6b20",
|
||||
"sha256:41704c561d354bef01353835a7846e5606faabbeb846214dfcf666cf53319f18",
|
||||
"sha256:556b0e92e2b75dc028b3c4bc9bd5162ddf0053b856437cf1f04c97f9c6837d03",
|
||||
"sha256:8ff5073d4b654e34bd5eaadc01dc4d68b8a9609084d835acd364cd934190a08d",
|
||||
"sha256:a458fc69051fbb67d995fdda46d75a012b5d6200f91e17d23d4751482640ed4c",
|
||||
"sha256:a7095890c96ba36f9f668eb552bb020dddb44f8e73e932f8573efc613ee83843",
|
||||
"sha256:a738f4807c853623d3f93f0fea11f61be6b0e5ca16ea8aeb42c2c7ee742aa853",
|
||||
"sha256:c4fc0205fe4ddd5aeb3dfdc0f7bafd43411181e1f5650189608e5971cceacff1",
|
||||
"sha256:dd2fa063c3344823487d9ddccb40802f02622ddf8bf8a6cc53885ee7a2c1c0c6",
|
||||
"sha256:ddffcb85227bf39cd1bedd4603e0082b243cf3b14ced64dce506a15b05232b83",
|
||||
"sha256:e36c6806883786b19551bb70a4882561f31135dc8105a59662e0376cf5b2cbc5",
|
||||
"sha256:eed43abc6ccf1dc02e0d0efc06ce46a411362f3358847c6b0ec9a43426f91ece"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==0.24.0"
|
||||
},
|
||||
"attrs": {
|
||||
"hashes": [
|
||||
"sha256:149e90d6d8ac20db7a955ad60cf0e6881a3f20d37096140088356da6c716b0b1",
|
||||
@@ -83,30 +102,43 @@
|
||||
},
|
||||
"charset-normalizer": {
|
||||
"hashes": [
|
||||
"sha256:0c8911edd15d19223366a194a513099a302055a962bca2cec0f54b8b63175d8b",
|
||||
"sha256:f23667ebe1084be45f6ae0538e4a5a865206544097e4e8bbcacf42cd02a348f3"
|
||||
"sha256:5d209c0a931f215cee683b6445e2d77677e7e75e159f78def0db09d68fafcaa6",
|
||||
"sha256:5ec46d183433dcbd0ab716f2d7f29d8dee50505b3fdb40c6b985c7c4f5a3591f"
|
||||
],
|
||||
"markers": "python_version >= '3'",
|
||||
"version": "==2.0.4"
|
||||
"version": "==2.0.6"
|
||||
},
|
||||
"cryptography": {
|
||||
"hashes": [
|
||||
"sha256:0f1212a66329c80d68aeeb39b8a16d54ef57071bf22ff4e521657b27372e327d",
|
||||
"sha256:1e056c28420c072c5e3cb36e2b23ee55e260cb04eee08f702e0edfec3fb51959",
|
||||
"sha256:240f5c21aef0b73f40bb9f78d2caff73186700bf1bc6b94285699aff98cc16c6",
|
||||
"sha256:26965837447f9c82f1855e0bc8bc4fb910240b6e0d16a664bb722df3b5b06873",
|
||||
"sha256:37340614f8a5d2fb9aeea67fd159bfe4f5f4ed535b1090ce8ec428b2f15a11f2",
|
||||
"sha256:3d10de8116d25649631977cb37da6cbdd2d6fa0e0281d014a5b7d337255ca713",
|
||||
"sha256:3d8427734c781ea5f1b41d6589c293089704d4759e34597dce91014ac125aad1",
|
||||
"sha256:7ec5d3b029f5fa2b179325908b9cd93db28ab7b85bb6c1db56b10e0b54235177",
|
||||
"sha256:8e56e16617872b0957d1c9742a3f94b43533447fd78321514abbe7db216aa250",
|
||||
"sha256:b01fd6f2737816cb1e08ed4807ae194404790eac7ad030b34f2ce72b332f5586",
|
||||
"sha256:bf40af59ca2465b24e54f671b2de2c59257ddc4f7e5706dbd6930e26823668d3",
|
||||
"sha256:de4e5f7f68220d92b7637fc99847475b59154b7a1b3868fb7385337af54ac9ca",
|
||||
"sha256:eb8cc2afe8b05acbd84a43905832ec78e7b3873fb124ca190f574dca7389a87d",
|
||||
"sha256:ee77aa129f481be46f8d92a1a7db57269a2f23052d5f2433b4621bb457081cc9"
|
||||
"sha256:0a7dcbcd3f1913f664aca35d47c1331fce738d44ec34b7be8b9d332151b0b01e",
|
||||
"sha256:1eb7bb0df6f6f583dd8e054689def236255161ebbcf62b226454ab9ec663746b",
|
||||
"sha256:21ca464b3a4b8d8e86ba0ee5045e103a1fcfac3b39319727bc0fc58c09c6aff7",
|
||||
"sha256:34dae04a0dce5730d8eb7894eab617d8a70d0c97da76b905de9efb7128ad7085",
|
||||
"sha256:3520667fda779eb788ea00080124875be18f2d8f0848ec00733c0ec3bb8219fc",
|
||||
"sha256:3c4129fc3fdc0fa8e40861b5ac0c673315b3c902bbdc05fc176764815b43dd1d",
|
||||
"sha256:3fa3a7ccf96e826affdf1a0a9432be74dc73423125c8f96a909e3835a5ef194a",
|
||||
"sha256:5b0fbfae7ff7febdb74b574055c7466da334a5371f253732d7e2e7525d570498",
|
||||
"sha256:695104a9223a7239d155d7627ad912953b540929ef97ae0c34c7b8bf30857e89",
|
||||
"sha256:8695456444f277af73a4877db9fc979849cd3ee74c198d04fc0776ebc3db52b9",
|
||||
"sha256:94cc5ed4ceaefcbe5bf38c8fba6a21fc1d365bb8fb826ea1688e3370b2e24a1c",
|
||||
"sha256:94fff993ee9bc1b2440d3b7243d488c6a3d9724cc2b09cdb297f6a886d040ef7",
|
||||
"sha256:9965c46c674ba8cc572bc09a03f4c649292ee73e1b683adb1ce81e82e9a6a0fb",
|
||||
"sha256:a00cf305f07b26c351d8d4e1af84ad7501eca8a342dedf24a7acb0e7b7406e14",
|
||||
"sha256:a305600e7a6b7b855cd798e00278161b681ad6e9b7eca94c721d5f588ab212af",
|
||||
"sha256:cd65b60cfe004790c795cc35f272e41a3df4631e2fb6b35aa7ac6ef2859d554e",
|
||||
"sha256:d2a6e5ef66503da51d2110edf6c403dc6b494cc0082f85db12f54e9c5d4c3ec5",
|
||||
"sha256:d9ec0e67a14f9d1d48dd87a2531009a9b251c02ea42851c060b25c782516ff06",
|
||||
"sha256:f44d141b8c4ea5eb4dbc9b3ad992d45580c1d22bf5e24363f2fbf50c2d7ae8a7"
|
||||
],
|
||||
"version": "==3.4.7"
|
||||
"version": "==3.4.8"
|
||||
},
|
||||
"execnet": {
|
||||
"hashes": [
|
||||
"sha256:8f694f3ba9cc92cab508b152dcfe322153975c29bda272e2fd7f3f00f36e47c5",
|
||||
"sha256:a295f7cc774947aac58dde7fdc85f4aa00c42adf5d8f5468fc630c1acf30a142"
|
||||
],
|
||||
"markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'",
|
||||
"version": "==1.9.0"
|
||||
},
|
||||
"idna": {
|
||||
"hashes": [
|
||||
@@ -133,11 +165,11 @@
|
||||
},
|
||||
"pluggy": {
|
||||
"hashes": [
|
||||
"sha256:15b2acde666561e1298d71b523007ed7364de07029219b604cf808bfa1c765b0",
|
||||
"sha256:966c145cd83c96502c3c3868f50408687b38434af77734af1e9ca461a4081d2d"
|
||||
"sha256:4224373bacce55f955a878bf9cfa763c1e360858e330072059e10bad68531159",
|
||||
"sha256:74134bbf457f031a36d68416e1509f34bd5ccc019f0bcc952c7b909d06b37bd3"
|
||||
],
|
||||
"markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'",
|
||||
"version": "==0.13.1"
|
||||
"markers": "python_version >= '3.6'",
|
||||
"version": "==1.0.0"
|
||||
},
|
||||
"psycopg2": {
|
||||
"hashes": [
|
||||
@@ -191,11 +223,27 @@
|
||||
},
|
||||
"pytest": {
|
||||
"hashes": [
|
||||
"sha256:50bcad0a0b9c5a72c8e4e7c9855a3ad496ca6a881a3641b4260605450772c54b",
|
||||
"sha256:91ef2131a9bd6be8f76f1f08eac5c5317221d6ad1e143ae03894b862e8976890"
|
||||
"sha256:131b36680866a76e6781d13f101efb86cf674ebb9762eb70d3082b6f29889e89",
|
||||
"sha256:7310f8d27bc79ced999e760ca304d69f6ba6c6649c0b60fb0e04a4a77cacc134"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==6.2.4"
|
||||
"version": "==6.2.5"
|
||||
},
|
||||
"pytest-forked": {
|
||||
"hashes": [
|
||||
"sha256:6aa9ac7e00ad1a539c41bec6d21011332de671e938c7637378ec9710204e37ca",
|
||||
"sha256:dc4147784048e70ef5d437951728825a131b81714b398d5d52f17c7c144d8815"
|
||||
],
|
||||
"markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'",
|
||||
"version": "==1.3.0"
|
||||
},
|
||||
"pytest-xdist": {
|
||||
"hashes": [
|
||||
"sha256:7b61ebb46997a0820a263553179d6d1e25a8c50d8a8620cd1aa1e20e3be99168",
|
||||
"sha256:89b330316f7fc475f999c81b577c2b926c9569f3d397ae432c0c2e2496d61ff9"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==2.4.0"
|
||||
},
|
||||
"requests": {
|
||||
"hashes": [
|
||||
@@ -215,12 +263,12 @@
|
||||
},
|
||||
"typing-extensions": {
|
||||
"hashes": [
|
||||
"sha256:0ac0f89795dd19de6b97debb0c6af1c70987fd80a2d62d1958f7e56fcc31b497",
|
||||
"sha256:50b6f157849174217d0656f99dc82fe932884fb250826c18350e159ec6cdf342",
|
||||
"sha256:779383f6086d90c99ae41cf0ff39aac8a7937a9283ce0a414e5dd782f4c94a84"
|
||||
"sha256:49f75d16ff11f1cd258e1b988ccff82a3ca5570217d7ad8c5f48205dd99a677e",
|
||||
"sha256:d8226d10bc02a29bcc81df19a26e56a9647f8b0a6d4a83924139f4a8b01f17b7",
|
||||
"sha256:f1d25edafde516b146ecd0613dabcc61409817af4766fbbcfb8d1ad4ec441a34"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==3.10.0.0"
|
||||
"version": "==3.10.0.2"
|
||||
},
|
||||
"urllib3": {
|
||||
"hashes": [
|
||||
@@ -309,12 +357,12 @@
|
||||
},
|
||||
"typing-extensions": {
|
||||
"hashes": [
|
||||
"sha256:0ac0f89795dd19de6b97debb0c6af1c70987fd80a2d62d1958f7e56fcc31b497",
|
||||
"sha256:50b6f157849174217d0656f99dc82fe932884fb250826c18350e159ec6cdf342",
|
||||
"sha256:779383f6086d90c99ae41cf0ff39aac8a7937a9283ce0a414e5dd782f4c94a84"
|
||||
"sha256:49f75d16ff11f1cd258e1b988ccff82a3ca5570217d7ad8c5f48205dd99a677e",
|
||||
"sha256:d8226d10bc02a29bcc81df19a26e56a9647f8b0a6d4a83924139f4a8b01f17b7",
|
||||
"sha256:f1d25edafde516b146ecd0613dabcc61409817af4766fbbcfb8d1ad4ec441a34"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==3.10.0.0"
|
||||
"version": "==3.10.0.2"
|
||||
},
|
||||
"yapf": {
|
||||
"hashes": [
|
||||
|
||||
@@ -1,11 +1,15 @@
|
||||
|
||||
from contextlib import closing
|
||||
from typing import Iterator
|
||||
from uuid import uuid4
|
||||
import psycopg2
|
||||
from fixtures.zenith_fixtures import Postgres, ZenithCli, ZenithPageserver, PgBin
|
||||
from fixtures.zenith_fixtures import PortDistributor, Postgres, ZenithCli, ZenithPageserver, PgBin
|
||||
import pytest
|
||||
|
||||
|
||||
pytest_plugins = ("fixtures.zenith_fixtures")
|
||||
|
||||
|
||||
def test_pageserver_auth(pageserver_auth_enabled: ZenithPageserver):
|
||||
ps = pageserver_auth_enabled
|
||||
|
||||
@@ -42,7 +46,8 @@ def test_compute_auth_to_pageserver(
|
||||
pageserver_auth_enabled: ZenithPageserver,
|
||||
repo_dir: str,
|
||||
with_wal_acceptors: bool,
|
||||
pg_bin: PgBin
|
||||
pg_bin: PgBin,
|
||||
port_distributor: PortDistributor,
|
||||
):
|
||||
ps = pageserver_auth_enabled
|
||||
# since we are in progress of refactoring protocols between compute safekeeper and page server
|
||||
@@ -59,7 +64,7 @@ def test_compute_auth_to_pageserver(
|
||||
repo_dir=repo_dir,
|
||||
pg_bin=pg_bin,
|
||||
tenant_id=ps.initial_tenant,
|
||||
port=55432, # FIXME port distribution is hardcoded in tests and in cli
|
||||
port=port_distributor.get_port(),
|
||||
).create_start(
|
||||
branch,
|
||||
wal_acceptors=wa_factory.get_connstrs() if with_wal_acceptors else None,
|
||||
|
||||
@@ -33,11 +33,11 @@ def test_branch_behind(zenith_cli, pageserver: ZenithPageserver, postgres: Postg
|
||||
main_cur.execute('''
|
||||
INSERT INTO foo
|
||||
SELECT 'long string to consume some space' || g
|
||||
FROM generate_series(1, 100000) g
|
||||
FROM generate_series(1, 200000) g
|
||||
''')
|
||||
main_cur.execute('SELECT pg_current_wal_insert_lsn()')
|
||||
lsn_b = main_cur.fetchone()[0]
|
||||
print('LSN after 100100 rows: ' + lsn_b)
|
||||
print('LSN after 200100 rows: ' + lsn_b)
|
||||
|
||||
# Branch at the point where only 100 rows were inserted
|
||||
zenith_cli.run(["branch", "test_branch_behind_hundred", "test_branch_behind@" + lsn_a])
|
||||
@@ -46,15 +46,15 @@ def test_branch_behind(zenith_cli, pageserver: ZenithPageserver, postgres: Postg
|
||||
main_cur.execute('''
|
||||
INSERT INTO foo
|
||||
SELECT 'long string to consume some space' || g
|
||||
FROM generate_series(1, 100000) g
|
||||
FROM generate_series(1, 200000) g
|
||||
''')
|
||||
main_cur.execute('SELECT pg_current_wal_insert_lsn()')
|
||||
|
||||
main_cur.execute('SELECT pg_current_wal_insert_lsn()')
|
||||
lsn_c = main_cur.fetchone()[0]
|
||||
print('LSN after 200100 rows: ' + lsn_c)
|
||||
print('LSN after 400100 rows: ' + lsn_c)
|
||||
|
||||
# Branch at the point where only 200 rows were inserted
|
||||
# Branch at the point where only 200100 rows were inserted
|
||||
zenith_cli.run(["branch", "test_branch_behind_more", "test_branch_behind@" + lsn_b])
|
||||
|
||||
pg_hundred = postgres.create_start("test_branch_behind_hundred")
|
||||
@@ -70,11 +70,11 @@ def test_branch_behind(zenith_cli, pageserver: ZenithPageserver, postgres: Postg
|
||||
more_pg_conn = pg_more.connect()
|
||||
more_cur = more_pg_conn.cursor()
|
||||
more_cur.execute('SELECT count(*) FROM foo')
|
||||
assert more_cur.fetchone() == (100100, )
|
||||
assert more_cur.fetchone() == (200100, )
|
||||
|
||||
# All the rows are visible on the main branch
|
||||
main_cur.execute('SELECT count(*) FROM foo')
|
||||
assert main_cur.fetchone() == (200100, )
|
||||
assert main_cur.fetchone() == (400100, )
|
||||
|
||||
# Check bad lsn's for branching
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@ import os
|
||||
|
||||
from contextlib import closing
|
||||
|
||||
from fixtures.zenith_fixtures import PostgresFactory, ZenithPageserver, check_restored_datadir_content
|
||||
from fixtures.zenith_fixtures import PostgresFactory, ZenithPageserver
|
||||
|
||||
pytest_plugins = ("fixtures.zenith_fixtures")
|
||||
|
||||
|
||||
@@ -48,6 +48,7 @@ def test_dropdb(
|
||||
pageserver: ZenithPageserver,
|
||||
postgres: PostgresFactory,
|
||||
pg_bin,
|
||||
test_output_dir
|
||||
):
|
||||
zenith_cli.run(["branch", "test_dropdb", "empty"])
|
||||
|
||||
@@ -98,4 +99,4 @@ def test_dropdb(
|
||||
assert os.path.isdir(dbpath) == False
|
||||
|
||||
# Check that we restore the content of the datadir correctly
|
||||
check_restored_datadir_content(zenith_cli, pg, lsn_after_drop, postgres)
|
||||
check_restored_datadir_content(zenith_cli, test_output_dir, pg, pageserver.service_port.pg)
|
||||
|
||||
@@ -9,7 +9,8 @@ pytest_plugins = ("fixtures.zenith_fixtures")
|
||||
# it only checks next_multixact_id field in restored pg_control,
|
||||
# since we don't have functions to check multixact internals.
|
||||
#
|
||||
def test_multixact(pageserver: ZenithPageserver, postgres: PostgresFactory, pg_bin, zenith_cli, base_dir):
|
||||
def test_multixact(pageserver: ZenithPageserver, postgres: PostgresFactory,
|
||||
pg_bin, zenith_cli, base_dir, test_output_dir):
|
||||
# Create a branch for us
|
||||
zenith_cli.run(["branch", "test_multixact", "empty"])
|
||||
pg = postgres.create_start('test_multixact')
|
||||
@@ -65,4 +66,4 @@ def test_multixact(pageserver: ZenithPageserver, postgres: PostgresFactory, pg_b
|
||||
assert next_multixact_id_new == next_multixact_id
|
||||
|
||||
# Check that we restore the content of the datadir correctly
|
||||
check_restored_datadir_content(zenith_cli, pg, lsn, postgres)
|
||||
check_restored_datadir_content(zenith_cli, test_output_dir, pg_new, pageserver.service_port.pg)
|
||||
|
||||
65
test_runner/batch_others/test_old_request_lsn.py
Normal file
65
test_runner/batch_others/test_old_request_lsn.py
Normal file
@@ -0,0 +1,65 @@
|
||||
from contextlib import closing
|
||||
|
||||
from fixtures.zenith_fixtures import PostgresFactory, ZenithPageserver
|
||||
|
||||
pytest_plugins = ("fixtures.zenith_fixtures")
|
||||
|
||||
#
|
||||
# Test where Postgres generates a lot of WAL, and it's garbage collected away, but
|
||||
# no pages are evicted so that Postgres uses an old LSN in a GetPage request.
|
||||
# We had a bug where the page server failed to find the page version because it
|
||||
# thought it was already garbage collected away, because the LSN in the GetPage
|
||||
# request was very old and the WAL from that time was indeed already removed.
|
||||
# In reality, the LSN on a GetPage request coming from a primary server is
|
||||
# just a hint that the page hasn't been modified since that LSN, and the page
|
||||
# server should return the latest page version regardless of the LSN.
|
||||
#
|
||||
def test_old_request_lsn(zenith_cli, pageserver: ZenithPageserver, postgres: PostgresFactory, pg_bin):
|
||||
# Create a branch for us
|
||||
zenith_cli.run(["branch", "test_old_request_lsn", "empty"])
|
||||
pg = postgres.create_start('test_old_request_lsn')
|
||||
print('postgres is running on test_old_request_lsn branch')
|
||||
|
||||
pg_conn = pg.connect()
|
||||
cur = pg_conn.cursor()
|
||||
|
||||
# Get the timeline ID of our branch. We need it for the 'do_gc' command
|
||||
cur.execute("SHOW zenith.zenith_timeline")
|
||||
timeline = cur.fetchone()[0]
|
||||
|
||||
psconn = pageserver.connect()
|
||||
pscur = psconn.cursor()
|
||||
|
||||
# Create table, and insert some rows. Make it big enough that it doesn't fit in
|
||||
# shared_buffers.
|
||||
cur.execute('CREATE TABLE foo (id int4 PRIMARY KEY, val int, t text)')
|
||||
cur.execute('''
|
||||
INSERT INTO foo
|
||||
SELECT g, 1, 'long string to consume some space' || g
|
||||
FROM generate_series(1, 100000) g
|
||||
''')
|
||||
|
||||
# Verify that the table is larger than shared_buffers, so that the SELECT below
|
||||
# will cause GetPage requests.
|
||||
cur.execute('''
|
||||
select setting::int * pg_size_bytes(unit) as shared_buffers, pg_relation_size('foo') as tbl_ize
|
||||
from pg_settings where name = 'shared_buffers'
|
||||
''')
|
||||
row = cur.fetchone()
|
||||
print(f'shared_buffers is {row[0]}, table size {row[1]}');
|
||||
assert int(row[0]) < int(row[1])
|
||||
|
||||
cur.execute('VACUUM foo');
|
||||
|
||||
# Make a lot of updates on a single row, generating a lot of WAL. Trigger
|
||||
# garbage collections so that the page server will remove old page versions.
|
||||
for i in range(10):
|
||||
pscur.execute(f"do_gc {pageserver.initial_tenant} {timeline} 0")
|
||||
for j in range(100):
|
||||
cur.execute('UPDATE foo SET val = val + 1 WHERE id = 1;');
|
||||
|
||||
# All (or at least most of) the updates should've been on the same page, so
|
||||
# that we haven't had to evict any dirty pages for a long time. Now run
|
||||
# a query that sends GetPage@LSN requests with the old LSN.
|
||||
cur.execute("SELECT COUNT(*), SUM(val) FROM foo");
|
||||
assert cur.fetchone() == (100000, 101000)
|
||||
@@ -9,10 +9,7 @@ pytest_plugins = ("fixtures.zenith_fixtures")
|
||||
#
|
||||
# Test restarting and recreating a postgres instance
|
||||
#
|
||||
# XXX: with_wal_acceptors=True fails now, would be fixed with
|
||||
# `postgres --sync-walkeepers` patches.
|
||||
#
|
||||
@pytest.mark.parametrize('with_wal_acceptors', [False])
|
||||
@pytest.mark.parametrize('with_wal_acceptors', [False, True])
|
||||
def test_restart_compute(
|
||||
zenith_cli,
|
||||
pageserver: ZenithPageserver,
|
||||
|
||||
@@ -6,8 +6,8 @@ pytest_plugins = ("fixtures.zenith_fixtures")
|
||||
|
||||
def print_gc_result(row):
|
||||
print("GC duration {elapsed} ms".format_map(row));
|
||||
print(" REL total: {layer_relfiles_total}, needed_by_cutoff {layer_relfiles_needed_by_cutoff}, needed_by_branches: {layer_relfiles_needed_by_branches}, not_updated: {layer_relfiles_not_updated}, removed: {layer_relfiles_removed}, dropped: {layer_relfiles_dropped}".format_map(row))
|
||||
print(" NONREL total: {layer_nonrelfiles_total}, needed_by_cutoff {layer_nonrelfiles_needed_by_cutoff}, needed_by_branches: {layer_nonrelfiles_needed_by_branches}, not_updated: {layer_nonrelfiles_not_updated}, removed: {layer_nonrelfiles_removed}, dropped: {layer_nonrelfiles_dropped}".format_map(row))
|
||||
print(" REL total: {layer_relfiles_total}, needed_by_cutoff {layer_relfiles_needed_by_cutoff}, needed_by_branches: {layer_relfiles_needed_by_branches}, not_updated: {layer_relfiles_not_updated}, needed_as_tombstone {layer_relfiles_needed_as_tombstone}, removed: {layer_relfiles_removed}, dropped: {layer_relfiles_dropped}".format_map(row))
|
||||
print(" NONREL total: {layer_nonrelfiles_total}, needed_by_cutoff {layer_nonrelfiles_needed_by_cutoff}, needed_by_branches: {layer_nonrelfiles_needed_by_branches}, not_updated: {layer_nonrelfiles_not_updated}, needed_as_tombstone {layer_nonrelfiles_needed_as_tombstone}, removed: {layer_nonrelfiles_removed}, dropped: {layer_nonrelfiles_dropped}".format_map(row))
|
||||
|
||||
|
||||
#
|
||||
@@ -58,19 +58,21 @@ def test_layerfiles_gc(zenith_cli, pageserver, postgres, pg_bin):
|
||||
layer_relfiles_remain = row['layer_relfiles_total'] - row['layer_relfiles_removed']
|
||||
assert layer_relfiles_remain > 0
|
||||
|
||||
# Insert a row.
|
||||
# Insert a row and run GC. Checkpoint should freeze the layer
|
||||
# so that there is only the most recent image layer left for the rel,
|
||||
# removing the old image and delta layer.
|
||||
print("Inserting one row and running GC")
|
||||
cur.execute("INSERT INTO foo VALUES (1)")
|
||||
pscur.execute(f"do_gc {pageserver.initial_tenant} {timeline} 0")
|
||||
row = pscur.fetchone()
|
||||
print_gc_result(row);
|
||||
assert row['layer_relfiles_total'] == layer_relfiles_remain + 1
|
||||
assert row['layer_relfiles_removed'] == 1
|
||||
assert row['layer_relfiles_total'] == layer_relfiles_remain + 2
|
||||
assert row['layer_relfiles_removed'] == 2
|
||||
assert row['layer_relfiles_dropped'] == 0
|
||||
|
||||
# Insert two more rows and run GC.
|
||||
# This should create a new layer file with the new contents, and
|
||||
# remove the old one.
|
||||
# This should create new image and delta layer file with the new contents, and
|
||||
# then remove the old one image and the just-created delta layer.
|
||||
print("Inserting two more rows and running GC")
|
||||
cur.execute("INSERT INTO foo VALUES (2)")
|
||||
cur.execute("INSERT INTO foo VALUES (3)")
|
||||
@@ -78,11 +80,11 @@ def test_layerfiles_gc(zenith_cli, pageserver, postgres, pg_bin):
|
||||
pscur.execute(f"do_gc {pageserver.initial_tenant} {timeline} 0")
|
||||
row = pscur.fetchone()
|
||||
print_gc_result(row);
|
||||
assert row['layer_relfiles_total'] == layer_relfiles_remain + 1
|
||||
assert row['layer_relfiles_removed'] == 1
|
||||
assert row['layer_relfiles_total'] == layer_relfiles_remain + 2
|
||||
assert row['layer_relfiles_removed'] == 2
|
||||
assert row['layer_relfiles_dropped'] == 0
|
||||
|
||||
# Do it again. Should again create a new layer file and remove old one.
|
||||
# Do it again. Should again create two new layer files and remove old ones.
|
||||
print("Inserting two more rows and running GC")
|
||||
cur.execute("INSERT INTO foo VALUES (2)")
|
||||
cur.execute("INSERT INTO foo VALUES (3)")
|
||||
@@ -90,8 +92,8 @@ def test_layerfiles_gc(zenith_cli, pageserver, postgres, pg_bin):
|
||||
pscur.execute(f"do_gc {pageserver.initial_tenant} {timeline} 0")
|
||||
row = pscur.fetchone()
|
||||
print_gc_result(row);
|
||||
assert row['layer_relfiles_total'] == layer_relfiles_remain + 1
|
||||
assert row['layer_relfiles_removed'] == 1
|
||||
assert row['layer_relfiles_total'] == layer_relfiles_remain + 2
|
||||
assert row['layer_relfiles_removed'] == 2
|
||||
assert row['layer_relfiles_dropped'] == 0
|
||||
|
||||
# Run GC again, with no changes in the database. Should not remove anything.
|
||||
@@ -113,12 +115,19 @@ def test_layerfiles_gc(zenith_cli, pageserver, postgres, pg_bin):
|
||||
row = pscur.fetchone()
|
||||
print_gc_result(row);
|
||||
|
||||
# We still cannot remove the latest layers
|
||||
# because they serve as tombstones for earlier layers.
|
||||
assert row['layer_relfiles_dropped'] == 0
|
||||
# Each relation fork is counted separately, hence 3.
|
||||
assert row['layer_relfiles_dropped'] == 3
|
||||
assert row['layer_relfiles_needed_as_tombstone'] == 3
|
||||
|
||||
# The catalog updates also create new layer files of the catalogs, which
|
||||
# are counted as 'removed'
|
||||
assert row['layer_relfiles_removed'] > 0
|
||||
|
||||
# TODO Change the test to check actual CG of dropped layers.
|
||||
# Each relation fork is counted separately, hence 3.
|
||||
#assert row['layer_relfiles_dropped'] == 3
|
||||
|
||||
# TODO: perhaps we should count catalog and user relations separately,
|
||||
# to make this kind of testing more robust
|
||||
|
||||
39
test_runner/batch_others/test_timeline_size.py
Normal file
39
test_runner/batch_others/test_timeline_size.py
Normal file
@@ -0,0 +1,39 @@
|
||||
from contextlib import closing
|
||||
from uuid import UUID
|
||||
import psycopg2.extras
|
||||
from fixtures.zenith_fixtures import PostgresFactory, ZenithPageserver
|
||||
|
||||
|
||||
def test_timeline_size(
|
||||
zenith_cli, pageserver: ZenithPageserver, postgres: PostgresFactory, pg_bin
|
||||
):
|
||||
# Branch at the point where only 100 rows were inserted
|
||||
zenith_cli.run(["branch", "test_timeline_size", "empty"])
|
||||
|
||||
client = pageserver.http_client()
|
||||
res = client.branch_detail(UUID(pageserver.initial_tenant), "test_timeline_size")
|
||||
assert res["current_logical_size"] == res["current_logical_size_non_incremental"]
|
||||
|
||||
pgmain = postgres.create_start("test_timeline_size")
|
||||
print("postgres is running on 'test_timeline_size' branch")
|
||||
|
||||
with closing(pgmain.connect()) as conn:
|
||||
with conn.cursor() as cur:
|
||||
cur.execute("SHOW zenith.zenith_timeline")
|
||||
|
||||
# Create table, and insert the first 100 rows
|
||||
cur.execute("CREATE TABLE foo (t text)")
|
||||
cur.execute(
|
||||
"""
|
||||
INSERT INTO foo
|
||||
SELECT 'long string to consume some space' || g
|
||||
FROM generate_series(1, 10) g
|
||||
"""
|
||||
)
|
||||
|
||||
res = client.branch_detail(UUID(pageserver.initial_tenant), "test_timeline_size")
|
||||
assert res["current_logical_size"] == res["current_logical_size_non_incremental"]
|
||||
cur.execute("TRUNCATE foo")
|
||||
|
||||
res = client.branch_detail(UUID(pageserver.initial_tenant), "test_timeline_size")
|
||||
assert res["current_logical_size"] == res["current_logical_size_non_incremental"]
|
||||
@@ -1,10 +1,14 @@
|
||||
import pytest
|
||||
import random
|
||||
import time
|
||||
import os
|
||||
import subprocess
|
||||
import uuid
|
||||
|
||||
from contextlib import closing
|
||||
from multiprocessing import Process, Value
|
||||
from fixtures.zenith_fixtures import WalAcceptorFactory, ZenithPageserver, PostgresFactory
|
||||
from fixtures.zenith_fixtures import WalAcceptorFactory, ZenithPageserver, PostgresFactory, PgBin
|
||||
from fixtures.utils import lsn_to_hex, mkdir_if_needed
|
||||
|
||||
pytest_plugins = ("fixtures.zenith_fixtures")
|
||||
|
||||
@@ -198,3 +202,92 @@ def test_race_conditions(zenith_cli, pageserver: ZenithPageserver, postgres: Pos
|
||||
|
||||
stop_value.value = 1
|
||||
proc.join()
|
||||
|
||||
class ProposerPostgres:
|
||||
"""Object for running safekeepers sync with walproposer"""
|
||||
def __init__(self, pgdata_dir: str, pg_bin: PgBin, timeline_id: str, tenant_id: str):
|
||||
self.pgdata_dir: str = pgdata_dir
|
||||
self.pg_bin: PgBin = pg_bin
|
||||
self.timeline_id: str = timeline_id
|
||||
self.tenant_id: str = tenant_id
|
||||
|
||||
def pg_data_dir_path(self) -> str:
|
||||
""" Path to data directory """
|
||||
return self.pgdata_dir
|
||||
|
||||
def config_file_path(self) -> str:
|
||||
""" Path to postgresql.conf """
|
||||
return os.path.join(self.pgdata_dir, 'postgresql.conf')
|
||||
|
||||
def create_dir_config(self, wal_acceptors: str):
|
||||
""" Create dir and config for running --sync-safekeepers """
|
||||
|
||||
mkdir_if_needed(self.pg_data_dir_path())
|
||||
with open(self.config_file_path(), "w") as f:
|
||||
f.writelines([
|
||||
"synchronous_standby_names = 'walproposer'\n",
|
||||
f"zenith.zenith_timeline = '{self.timeline_id}'\n",
|
||||
f"zenith.zenith_tenant = '{self.tenant_id}'\n",
|
||||
f"wal_acceptors = '{wal_acceptors}'\n",
|
||||
])
|
||||
|
||||
def sync_safekeepers(self) -> str:
|
||||
"""
|
||||
Run 'postgres --sync-safekeepers'.
|
||||
Returns execution result, which is commit_lsn after sync.
|
||||
"""
|
||||
|
||||
command = ["postgres", "--sync-safekeepers"]
|
||||
env = {
|
||||
"PGDATA": self.pg_data_dir_path(),
|
||||
}
|
||||
|
||||
basepath = self.pg_bin.run_capture(command, env)
|
||||
stdout_filename = basepath + '.stdout'
|
||||
|
||||
with open(stdout_filename, 'r') as stdout_f:
|
||||
stdout = stdout_f.read()
|
||||
return stdout.strip("\n ")
|
||||
|
||||
|
||||
# insert wal in all safekeepers and run sync on proposer
|
||||
def test_sync_safekeepers(repo_dir: str, pg_bin: PgBin, wa_factory: WalAcceptorFactory):
|
||||
wa_factory.start_n_new(3)
|
||||
|
||||
timeline_id = uuid.uuid4().hex
|
||||
tenant_id = uuid.uuid4().hex
|
||||
|
||||
# write config for proposer
|
||||
pgdata_dir = os.path.join(repo_dir, "proposer_pgdata")
|
||||
pg = ProposerPostgres(pgdata_dir, pg_bin, timeline_id, tenant_id)
|
||||
pg.create_dir_config(wa_factory.get_connstrs())
|
||||
|
||||
# valid lsn, which is not in the segment start, nor in zero segment
|
||||
epoch_start_lsn = 0x16B9188 # 0/16B9188
|
||||
begin_lsn = epoch_start_lsn
|
||||
|
||||
# append and commit WAL
|
||||
lsn_after_append = []
|
||||
for i in range(3):
|
||||
res = wa_factory.instances[i].append_logical_message(
|
||||
tenant_id,
|
||||
timeline_id,
|
||||
{
|
||||
"lm_prefix": "prefix",
|
||||
"lm_message": "message",
|
||||
"set_commit_lsn": True,
|
||||
"term": 2,
|
||||
"begin_lsn": begin_lsn,
|
||||
"epoch_start_lsn": epoch_start_lsn,
|
||||
"truncate_lsn": epoch_start_lsn,
|
||||
},
|
||||
)
|
||||
lsn_hex = lsn_to_hex(res["inserted_wal"]["end_lsn"])
|
||||
lsn_after_append.append(lsn_hex)
|
||||
print(f"safekeeper[{i}] lsn after append: {lsn_hex}")
|
||||
|
||||
# run sync safekeepers
|
||||
lsn_after_sync = pg.sync_safekeepers()
|
||||
print(f"lsn after sync = {lsn_after_sync}")
|
||||
|
||||
assert all(lsn_after_sync == lsn for lsn in lsn_after_append)
|
||||
|
||||
159
test_runner/batch_others/test_wal_acceptor_async.py
Normal file
159
test_runner/batch_others/test_wal_acceptor_async.py
Normal file
@@ -0,0 +1,159 @@
|
||||
import asyncio
|
||||
import asyncpg
|
||||
import random
|
||||
|
||||
from fixtures.zenith_fixtures import WalAcceptor, WalAcceptorFactory, ZenithPageserver, PostgresFactory, Postgres
|
||||
from typing import List
|
||||
from fixtures.utils import debug_print
|
||||
|
||||
pytest_plugins = ("fixtures.zenith_fixtures")
|
||||
|
||||
|
||||
class BankClient(object):
|
||||
def __init__(self, conn: asyncpg.Connection, n_accounts, init_amount):
|
||||
self.conn: asyncpg.Connection = conn
|
||||
self.n_accounts = n_accounts
|
||||
self.init_amount = init_amount
|
||||
|
||||
async def initdb(self):
|
||||
await self.conn.execute('DROP TABLE IF EXISTS bank_accs')
|
||||
await self.conn.execute('CREATE TABLE bank_accs(uid int primary key, amount int)')
|
||||
await self.conn.execute('''
|
||||
INSERT INTO bank_accs
|
||||
SELECT *, $1 FROM generate_series(0, $2)
|
||||
''', self.init_amount, self.n_accounts - 1)
|
||||
await self.conn.execute('DROP TABLE IF EXISTS bank_log')
|
||||
await self.conn.execute('CREATE TABLE bank_log(from_uid int, to_uid int, amount int)')
|
||||
|
||||
# TODO: Remove when https://github.com/zenithdb/zenith/issues/644 is fixed
|
||||
await self.conn.execute('ALTER TABLE bank_accs SET (autovacuum_enabled = false)')
|
||||
await self.conn.execute('ALTER TABLE bank_log SET (autovacuum_enabled = false)')
|
||||
|
||||
async def check_invariant(self):
|
||||
row = await self.conn.fetchrow('SELECT sum(amount) AS sum FROM bank_accs')
|
||||
assert row['sum'] == self.n_accounts * self.init_amount
|
||||
|
||||
async def bank_transfer(conn: asyncpg.Connection, from_uid, to_uid, amount):
|
||||
# avoid deadlocks by sorting uids
|
||||
if from_uid > to_uid:
|
||||
from_uid, to_uid, amount = to_uid, from_uid, -amount
|
||||
|
||||
async with conn.transaction():
|
||||
await conn.execute(
|
||||
'UPDATE bank_accs SET amount = amount + ($1) WHERE uid = $2',
|
||||
amount, to_uid,
|
||||
)
|
||||
await conn.execute(
|
||||
'UPDATE bank_accs SET amount = amount - ($1) WHERE uid = $2',
|
||||
amount, from_uid,
|
||||
)
|
||||
await conn.execute('INSERT INTO bank_log VALUES ($1, $2, $3)',
|
||||
from_uid, to_uid, amount,
|
||||
)
|
||||
|
||||
class WorkerStats(object):
|
||||
def __init__(self, n_workers):
|
||||
self.counters = [0] * n_workers
|
||||
self.running = True
|
||||
|
||||
def reset(self):
|
||||
self.counters = [0] * len(self.counters)
|
||||
|
||||
def inc_progress(self, worker_id):
|
||||
self.counters[worker_id] += 1
|
||||
|
||||
def check_progress(self):
|
||||
debug_print("Workers progress: {}".format(self.counters))
|
||||
|
||||
# every worker should finish at least one tx
|
||||
assert all(cnt > 0 for cnt in self.counters)
|
||||
|
||||
progress = sum(self.counters)
|
||||
print('All workers made {} transactions'.format(progress))
|
||||
|
||||
|
||||
async def run_random_worker(stats: WorkerStats, pg: Postgres, worker_id, n_accounts, max_transfer):
|
||||
pg_conn = await pg.connect_async()
|
||||
debug_print('Started worker {}'.format(worker_id))
|
||||
|
||||
while stats.running:
|
||||
from_uid = random.randint(0, n_accounts - 1)
|
||||
to_uid = (from_uid + random.randint(1, n_accounts - 1)) % n_accounts
|
||||
amount = random.randint(1, max_transfer)
|
||||
|
||||
await bank_transfer(pg_conn, from_uid, to_uid, amount)
|
||||
stats.inc_progress(worker_id)
|
||||
|
||||
debug_print('Executed transfer({}) {} => {}'.format(amount, from_uid, to_uid))
|
||||
|
||||
debug_print('Finished worker {}'.format(worker_id))
|
||||
|
||||
await pg_conn.close()
|
||||
|
||||
|
||||
# This test will run several iterations and check progress in each of them.
|
||||
# On each iteration 1 acceptor is stopped, and 2 others should allow
|
||||
# background workers execute transactions. In the end, state should remain
|
||||
# consistent.
|
||||
async def run_restarts_under_load(pg: Postgres, acceptors: List[WalAcceptor], n_workers=10):
|
||||
n_accounts = 100
|
||||
init_amount = 100000
|
||||
max_transfer = 100
|
||||
period_time = 10
|
||||
iterations = 6
|
||||
|
||||
pg_conn = await pg.connect_async()
|
||||
bank = BankClient(pg_conn, n_accounts=n_accounts, init_amount=init_amount)
|
||||
# create tables and initial balances
|
||||
await bank.initdb()
|
||||
|
||||
stats = WorkerStats(n_workers)
|
||||
workers = []
|
||||
for worker_id in range(n_workers):
|
||||
worker = run_random_worker(stats, pg, worker_id, bank.n_accounts, max_transfer)
|
||||
workers.append(asyncio.create_task(worker))
|
||||
|
||||
|
||||
for it in range(iterations):
|
||||
victim = acceptors[it % len(acceptors)]
|
||||
victim.stop()
|
||||
|
||||
# Wait till previous victim recovers so it is ready for the next
|
||||
# iteration by making any writing xact.
|
||||
conn = await pg.connect_async()
|
||||
await conn.execute(
|
||||
'UPDATE bank_accs SET amount = amount WHERE uid = 1',
|
||||
timeout=120
|
||||
)
|
||||
await conn.close()
|
||||
|
||||
stats.reset()
|
||||
await asyncio.sleep(period_time)
|
||||
# assert that at least one transaction has completed in every worker
|
||||
stats.check_progress()
|
||||
|
||||
victim.start()
|
||||
|
||||
print('Iterations are finished, exiting coroutines...')
|
||||
stats.running = False
|
||||
# await all workers
|
||||
await asyncio.gather(*workers)
|
||||
# assert balances sum hasn't changed
|
||||
await bank.check_invariant()
|
||||
await pg_conn.close()
|
||||
|
||||
|
||||
# restart acceptors one by one, while executing and validating bank transactions
|
||||
def test_restarts_under_load(zenith_cli, pageserver: ZenithPageserver, postgres: PostgresFactory,
|
||||
wa_factory: WalAcceptorFactory):
|
||||
|
||||
wa_factory.start_n_new(3)
|
||||
|
||||
zenith_cli.run(["branch", "test_wal_acceptors_restarts_under_load", "empty"])
|
||||
pg = postgres.create_start('test_wal_acceptors_restarts_under_load',
|
||||
wal_acceptors=wa_factory.get_connstrs())
|
||||
|
||||
asyncio.run(run_restarts_under_load(pg, wa_factory.instances))
|
||||
|
||||
# TODO: Remove when https://github.com/zenithdb/zenith/issues/644 is fixed
|
||||
pg.stop()
|
||||
@@ -55,4 +55,4 @@ def test_pg_regress(pageserver: ZenithPageserver, postgres: PostgresFactory, pg_
|
||||
lsn = pg.safe_psql('select pg_current_wal_insert_lsn()')[0][0]
|
||||
|
||||
# Check that we restore the content of the datadir correctly
|
||||
check_restored_datadir_content(zenith_cli, pg, lsn, postgres)
|
||||
check_restored_datadir_content(zenith_cli, test_output_dir, pg, pageserver.service_port.pg)
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
import os
|
||||
|
||||
from fixtures.utils import mkdir_if_needed
|
||||
from fixtures.zenith_fixtures import PostgresFactory, check_restored_datadir_content
|
||||
from fixtures.zenith_fixtures import PageserverPort, PostgresFactory, check_restored_datadir_content
|
||||
|
||||
pytest_plugins = ("fixtures.zenith_fixtures")
|
||||
|
||||
|
||||
def test_zenith_regress(postgres: PostgresFactory, pg_bin, zenith_cli, test_output_dir, pg_distrib_dir,
|
||||
base_dir, capsys):
|
||||
base_dir, capsys, pageserver_port: PageserverPort):
|
||||
|
||||
# Create a branch for us
|
||||
zenith_cli.run(["branch", "test_zenith_regress", "empty"])
|
||||
@@ -56,4 +56,4 @@ def test_zenith_regress(postgres: PostgresFactory, pg_bin, zenith_cli, test_outp
|
||||
lsn = pg.safe_psql('select pg_current_wal_insert_lsn()')[0][0]
|
||||
|
||||
# Check that we restore the content of the datadir correctly
|
||||
check_restored_datadir_content(zenith_cli, pg, lsn, postgres)
|
||||
check_restored_datadir_content(zenith_cli, test_output_dir, pg, pageserver_port.pg)
|
||||
|
||||
@@ -136,9 +136,34 @@ class ZenithBenchmarker:
|
||||
# The metric should be an integer, as it's a number of bytes. But in general
|
||||
# all prometheus metrics are floats. So to be pedantic, read it as a float
|
||||
# and round to integer.
|
||||
matches = re.search(r'pageserver_disk_io_bytes{io_operation="write"} (\S+)', all_metrics)
|
||||
matches = re.search(r'^pageserver_disk_io_bytes{io_operation="write"} (\S+)$', all_metrics,
|
||||
re.MULTILINE)
|
||||
return int(round(float(matches.group(1))))
|
||||
|
||||
def get_peak_mem(self, pageserver) -> int:
|
||||
"""
|
||||
Fetch the "maxrss" metric from the pageserver
|
||||
"""
|
||||
# Fetch all the exposed prometheus metrics from page server
|
||||
all_metrics = pageserver.http_client().get_metrics()
|
||||
# See comment in get_io_writes()
|
||||
matches = re.search(r'^pageserver_maxrss_kb (\S+)$', all_metrics,
|
||||
re.MULTILINE)
|
||||
return int(round(float(matches.group(1))))
|
||||
|
||||
def get_timeline_size(self, repo_dir: str, tenantid: str, timelineid: str):
|
||||
"""
|
||||
Calculate the on-disk size of a timeline
|
||||
"""
|
||||
path = "{}/tenants/{}/timelines/{}".format(repo_dir, tenantid, timelineid)
|
||||
|
||||
totalbytes = 0
|
||||
for root, dirs, files in os.walk(path):
|
||||
for name in files:
|
||||
totalbytes += os.path.getsize(os.path.join(root, name))
|
||||
|
||||
return totalbytes
|
||||
|
||||
@contextmanager
|
||||
def record_pageserver_writes(self, pageserver, metric_name):
|
||||
"""
|
||||
|
||||
@@ -14,13 +14,14 @@ def mkdir_if_needed(path: str) -> None:
|
||||
|
||||
Note this won't try to create intermediate directories.
|
||||
"""
|
||||
if os.path.exists(path):
|
||||
assert os.path.isdir(path)
|
||||
return
|
||||
os.mkdir(path)
|
||||
try:
|
||||
os.mkdir(path)
|
||||
except FileExistsError:
|
||||
pass
|
||||
assert os.path.isdir(path)
|
||||
|
||||
|
||||
def subprocess_capture(capture_dir: str, cmd: List[str], **kwargs: Any) -> None:
|
||||
def subprocess_capture(capture_dir: str, cmd: List[str], **kwargs: Any) -> str:
|
||||
""" Run a process and capture its output
|
||||
|
||||
Output will go to files named "cmd_NNN.stdout" and "cmd_NNN.stderr"
|
||||
@@ -28,6 +29,7 @@ def subprocess_capture(capture_dir: str, cmd: List[str], **kwargs: Any) -> None:
|
||||
counter.
|
||||
|
||||
If those files already exist, we will overwrite them.
|
||||
Returns basepath for files with captured output.
|
||||
"""
|
||||
assert type(cmd) is list
|
||||
base = os.path.basename(cmd[0]) + '_{}'.format(global_counter())
|
||||
@@ -40,6 +42,8 @@ def subprocess_capture(capture_dir: str, cmd: List[str], **kwargs: Any) -> None:
|
||||
print('(capturing output to "{}.stdout")'.format(base))
|
||||
subprocess.run(cmd, **kwargs, stdout=stdout_f, stderr=stderr_f)
|
||||
|
||||
return basepath
|
||||
|
||||
|
||||
_global_counter = 0
|
||||
|
||||
@@ -53,3 +57,15 @@ def global_counter() -> int:
|
||||
global _global_counter
|
||||
_global_counter += 1
|
||||
return _global_counter
|
||||
|
||||
def debug_print(*args, **kwargs) -> None:
|
||||
""" Print to the console if TEST_DEBUG_PRINT is set in env.
|
||||
|
||||
All parameters are passed to print().
|
||||
"""
|
||||
if os.environ.get('TEST_DEBUG_PRINT') is not None:
|
||||
print(*args, **kwargs)
|
||||
|
||||
def lsn_to_hex(num: int) -> str:
|
||||
""" Convert lsn from int to standard hex notation. """
|
||||
return "{:X}/{:X}".format(num >> 32, num & 0xffffffff)
|
||||
|
||||
@@ -1,9 +1,11 @@
|
||||
from dataclasses import dataclass
|
||||
from functools import cached_property
|
||||
import asyncpg
|
||||
import os
|
||||
import pathlib
|
||||
import uuid
|
||||
import jwt
|
||||
import json
|
||||
import psycopg2
|
||||
import pytest
|
||||
import shutil
|
||||
@@ -49,8 +51,29 @@ Fn = TypeVar('Fn', bound=Callable[..., Any])
|
||||
DEFAULT_OUTPUT_DIR = 'test_output'
|
||||
DEFAULT_POSTGRES_DIR = 'tmp_install'
|
||||
|
||||
DEFAULT_PAGESERVER_PG_PORT = 64000
|
||||
DEFAULT_PAGESERVER_HTTP_PORT = 9898
|
||||
BASE_PORT = 15000
|
||||
WORKER_PORT_NUM = 100
|
||||
|
||||
def pytest_configure(config):
|
||||
"""
|
||||
Ensure that no unwanted daemons are running before we start testing.
|
||||
Check that we do not owerflow available ports range.
|
||||
"""
|
||||
numprocesses = config.getoption('numprocesses')
|
||||
if numprocesses is not None and BASE_PORT + numprocesses * WORKER_PORT_NUM > 32768: # do not use ephemeral ports
|
||||
raise Exception('Too many workers configured. Cannot distrubute ports for services.')
|
||||
|
||||
# does not use -c as it is not supported on macOS
|
||||
cmd = ['pgrep', 'pageserver|postgres|wal_acceptor']
|
||||
result = subprocess.run(cmd, stdout=subprocess.DEVNULL)
|
||||
if result.returncode == 0:
|
||||
# returncode of 0 means it found something.
|
||||
# This is bad; we don't want any of those processes polluting the
|
||||
# result of the test.
|
||||
# NOTE this shows as an internal pytest error, there might be a better way
|
||||
raise Exception(
|
||||
'Found interfering processes running. Stop all Zenith pageservers, nodes, WALs, as well as stand-alone Postgres.'
|
||||
)
|
||||
|
||||
|
||||
def determine_scope(fixture_name: str, config: Any) -> str:
|
||||
@@ -75,20 +98,6 @@ def zenfixture(func: Fn) -> Fn:
|
||||
return pytest.fixture(func, scope=scope)
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True, scope='session')
|
||||
def safety_check() -> None:
|
||||
""" Ensure that no unwanted daemons are running before we start testing. """
|
||||
|
||||
# does not use -c as it is not supported on macOS
|
||||
cmd = ['pgrep', 'pageserver|postgres|wal_acceptor']
|
||||
result = subprocess.run(cmd, stdout=subprocess.DEVNULL)
|
||||
if result.returncode == 0:
|
||||
# returncode of 0 means it found something.
|
||||
# This is bad; we don't want any of those processes polluting the
|
||||
# result of the test.
|
||||
raise Exception('found interfering processes running')
|
||||
|
||||
|
||||
class PgProtocol:
|
||||
""" Reusable connection logic """
|
||||
def __init__(self, host: str, port: int, username: Optional[str] = None):
|
||||
@@ -124,6 +133,21 @@ class PgProtocol:
|
||||
conn.autocommit = autocommit
|
||||
return conn
|
||||
|
||||
async def connect_async(self, *, dbname: str = 'postgres', username: Optional[str] = None, password: Optional[str] = None) -> asyncpg.Connection:
|
||||
"""
|
||||
Connect to the node from async python.
|
||||
Returns asyncpg's connection object.
|
||||
"""
|
||||
|
||||
conn = await asyncpg.connect(
|
||||
host=self.host,
|
||||
port=self.port,
|
||||
database=dbname,
|
||||
user=username or self.username,
|
||||
password=password,
|
||||
)
|
||||
return conn
|
||||
|
||||
def safe_psql(self, query: str, **kwargs: Any) -> List[Any]:
|
||||
"""
|
||||
Execute query against the node and return all rows.
|
||||
@@ -180,12 +204,16 @@ class ZenithCli:
|
||||
universal_newlines=True,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE)
|
||||
except subprocess.CalledProcessError as err:
|
||||
print(f"Run failed: {err}")
|
||||
print(f" stdout: {err.stdout}")
|
||||
print(f" stderr: {err.stderr}")
|
||||
except subprocess.CalledProcessError as exc:
|
||||
# this way command output will be in recorded and shown in CI in failure message
|
||||
msg = f"""\
|
||||
Run failed: {exc}
|
||||
stdout: {exc.stdout}
|
||||
stderr: {exc.stderr}
|
||||
"""
|
||||
print(msg)
|
||||
|
||||
raise err
|
||||
raise Exception(msg) from exc
|
||||
|
||||
return res
|
||||
|
||||
@@ -224,6 +252,13 @@ class ZenithPageserverHttpClient(requests.Session):
|
||||
res.raise_for_status()
|
||||
return res.json()
|
||||
|
||||
def branch_detail(self, tenant_id: uuid.UUID, name: str) -> Dict:
|
||||
res = self.get(
|
||||
f"http://localhost:{self.port}/v1/branch/{tenant_id.hex}/{name}",
|
||||
)
|
||||
res.raise_for_status()
|
||||
return res.json()
|
||||
|
||||
def tenant_list(self) -> List[str]:
|
||||
res = self.get(f"http://localhost:{self.port}/v1/tenant")
|
||||
res.raise_for_status()
|
||||
@@ -270,24 +305,60 @@ class AuthKeys:
|
||||
return token
|
||||
|
||||
|
||||
@zenfixture
|
||||
def worker_seq_no(worker_id: str):
|
||||
# worker_id is a pytest-xdist fixture
|
||||
# it can be master or gw<number>
|
||||
# parse it to always get a number
|
||||
if worker_id == 'master':
|
||||
return 0
|
||||
assert worker_id.startswith('gw')
|
||||
return int(worker_id[2:])
|
||||
|
||||
|
||||
@zenfixture
|
||||
def worker_base_port(worker_seq_no: int):
|
||||
# so we divide ports in ranges of 100 ports
|
||||
# so workers have disjoint set of ports for services
|
||||
return BASE_PORT + worker_seq_no * WORKER_PORT_NUM
|
||||
|
||||
class PortDistributor:
|
||||
def __init__(self, base_port: int, port_number: int) -> None:
|
||||
self.iterator = iter(range(base_port, base_port + port_number))
|
||||
|
||||
def get_port(self) -> int:
|
||||
try:
|
||||
return next(self.iterator)
|
||||
except StopIteration:
|
||||
raise RuntimeError('port range configured for test is exhausted, consider enlarging the range')
|
||||
|
||||
|
||||
@zenfixture
|
||||
def port_distributor(worker_base_port):
|
||||
return PortDistributor(base_port=worker_base_port, port_number=WORKER_PORT_NUM)
|
||||
|
||||
@dataclass
|
||||
class PageserverPort:
|
||||
pg: int
|
||||
http: int
|
||||
|
||||
|
||||
class ZenithPageserver(PgProtocol):
|
||||
""" An object representing a running pageserver. """
|
||||
def __init__(self, zenith_cli: ZenithCli, repo_dir: str):
|
||||
super().__init__(host='localhost', port=DEFAULT_PAGESERVER_PG_PORT)
|
||||
def __init__(self, zenith_cli: ZenithCli, repo_dir: str, port: PageserverPort):
|
||||
super().__init__(host='localhost', port=port.pg)
|
||||
self.zenith_cli = zenith_cli
|
||||
self.running = False
|
||||
self.initial_tenant = None
|
||||
self.repo_dir = repo_dir
|
||||
self.service_port = port # do not shadow PgProtocol.port which is just int
|
||||
|
||||
def init(self, enable_auth: bool = False) -> 'ZenithPageserver':
|
||||
"""
|
||||
Initialize the repository, i.e. run "zenith init".
|
||||
Returns self.
|
||||
"""
|
||||
cmd = ['init']
|
||||
cmd = ['init', f'--pageserver-pg-port={self.service_port.pg}', f'--pageserver-http-port={self.service_port.http}']
|
||||
if enable_auth:
|
||||
cmd.append('--enable-auth')
|
||||
self.zenith_cli.run(cmd)
|
||||
@@ -337,13 +408,23 @@ class ZenithPageserver(PgProtocol):
|
||||
|
||||
def http_client(self, auth_token: Optional[str] = None):
|
||||
return ZenithPageserverHttpClient(
|
||||
port=DEFAULT_PAGESERVER_HTTP_PORT,
|
||||
port=self.service_port.http,
|
||||
auth_token=auth_token,
|
||||
)
|
||||
|
||||
|
||||
|
||||
|
||||
@zenfixture
|
||||
def pageserver(zenith_cli: ZenithCli, repo_dir: str) -> Iterator[ZenithPageserver]:
|
||||
def pageserver_port(port_distributor: PortDistributor) -> PageserverPort:
|
||||
pg = port_distributor.get_port()
|
||||
http = port_distributor.get_port()
|
||||
print(f"pageserver_port: pg={pg} http={http}")
|
||||
return PageserverPort(pg=pg, http=http)
|
||||
|
||||
|
||||
@zenfixture
|
||||
def pageserver(zenith_cli: ZenithCli, repo_dir: str, pageserver_port: PageserverPort) -> Iterator[ZenithPageserver]:
|
||||
"""
|
||||
The 'pageserver' fixture provides a Page Server that's up and running.
|
||||
|
||||
@@ -355,8 +436,7 @@ def pageserver(zenith_cli: ZenithCli, repo_dir: str) -> Iterator[ZenithPageserve
|
||||
By convention, the test branches are named after the tests. For example,
|
||||
test called 'test_foo' would create and use branches with the 'test_foo' prefix.
|
||||
"""
|
||||
|
||||
ps = ZenithPageserver(zenith_cli, repo_dir).init().start()
|
||||
ps = ZenithPageserver(zenith_cli=zenith_cli, repo_dir=repo_dir, port=pageserver_port).init().start()
|
||||
# For convenience in tests, create a branch from the freshly-initialized cluster.
|
||||
zenith_cli.run(["branch", "empty", "main"])
|
||||
|
||||
@@ -408,17 +488,19 @@ class PgBin:
|
||||
def run_capture(self,
|
||||
command: List[str],
|
||||
env: Optional[Env] = None,
|
||||
cwd: Optional[str] = None) -> None:
|
||||
cwd: Optional[str] = None,
|
||||
**kwargs: Any) -> None:
|
||||
"""
|
||||
Run one of the postgres binaries, with stderr and stdout redirected to a file.
|
||||
|
||||
This is just like `run`, but for chatty programs.
|
||||
This is just like `run`, but for chatty programs. Returns basepath for files
|
||||
with captured output.
|
||||
"""
|
||||
|
||||
self._fixpath(command)
|
||||
print('Running command "{}"'.format(' '.join(command)))
|
||||
env = self._build_env(env)
|
||||
subprocess_capture(self.log_dir, command, env=env, cwd=cwd, check=True)
|
||||
return subprocess_capture(self.log_dir, command, env=env, cwd=cwd, check=True, **kwargs)
|
||||
|
||||
|
||||
@zenfixture
|
||||
@@ -426,8 +508,8 @@ def pg_bin(test_output_dir: str, pg_distrib_dir: str) -> PgBin:
|
||||
return PgBin(test_output_dir, pg_distrib_dir)
|
||||
|
||||
@pytest.fixture
|
||||
def pageserver_auth_enabled(zenith_cli: ZenithCli, repo_dir: str):
|
||||
with ZenithPageserver(zenith_cli, repo_dir).init(enable_auth=True).start() as ps:
|
||||
def pageserver_auth_enabled(zenith_cli: ZenithCli, repo_dir: str, pageserver_port: PageserverPort):
|
||||
with ZenithPageserver(zenith_cli=zenith_cli, repo_dir=repo_dir, port=pageserver_port).init(enable_auth=True).start() as ps:
|
||||
# For convenience in tests, create a branch from the freshly-initialized cluster.
|
||||
zenith_cli.run(["branch", "empty", "main"])
|
||||
yield ps
|
||||
@@ -463,7 +545,7 @@ class Postgres(PgProtocol):
|
||||
if not config_lines:
|
||||
config_lines = []
|
||||
|
||||
self.zenith_cli.run(['pg', 'create', branch, f'--tenantid={self.tenant_id}'])
|
||||
self.zenith_cli.run(['pg', 'create', branch, f'--tenantid={self.tenant_id}', f'--port={self.port}'])
|
||||
self.branch = branch
|
||||
path = pathlib.Path('pgdatadirs') / 'tenants' / self.tenant_id / self.branch
|
||||
self.pgdata_dir = os.path.join(self.repo_dir, path)
|
||||
@@ -486,13 +568,11 @@ class Postgres(PgProtocol):
|
||||
|
||||
print(f"Starting postgres on branch {self.branch}")
|
||||
|
||||
run_result = self.zenith_cli.run(['pg', 'start', self.branch, f'--tenantid={self.tenant_id}'])
|
||||
run_result = self.zenith_cli.run(['pg', 'start', self.branch, f'--tenantid={self.tenant_id}', f'--port={self.port}'])
|
||||
self.running = True
|
||||
|
||||
print(f"stdout: {run_result.stdout}")
|
||||
|
||||
self.pg_bin.run(['pg_controldata', self.pg_data_dir_path()])
|
||||
|
||||
return self
|
||||
|
||||
def pg_data_dir_path(self) -> str:
|
||||
@@ -598,33 +678,15 @@ class Postgres(PgProtocol):
|
||||
def __exit__(self, exc_type, exc, tb):
|
||||
self.stop()
|
||||
|
||||
|
||||
def list_files_to_compare(self):
|
||||
pgdata_files = []
|
||||
for root, _file, filenames in os.walk(self.pgdata_dir):
|
||||
for filename in filenames:
|
||||
rel_dir = os.path.relpath(root, self.pgdata_dir)
|
||||
# Skip some dirs and files we don't want to compare
|
||||
skip_dirs = ['pg_wal', 'pg_stat', 'pg_stat_tmp', 'pg_subtrans', 'pg_logical']
|
||||
skip_files = ['pg_internal.init', 'pg.log', 'zenith.signal', 'postgresql.conf',
|
||||
'postmaster.opts', 'postmaster.pid', 'pg_control']
|
||||
if rel_dir not in skip_dirs and filename not in skip_files:
|
||||
rel_file = os.path.join(rel_dir, filename)
|
||||
pgdata_files.append(rel_file)
|
||||
|
||||
pgdata_files.sort()
|
||||
print(pgdata_files)
|
||||
return pgdata_files
|
||||
|
||||
class PostgresFactory:
|
||||
""" An object representing multiple running postgres daemons. """
|
||||
def __init__(self, zenith_cli: ZenithCli, repo_dir: str, pg_bin: PgBin, initial_tenant: str, base_port: int = 55431):
|
||||
def __init__(self, zenith_cli: ZenithCli, repo_dir: str, pg_bin: PgBin, initial_tenant: str, port_distributor: PortDistributor):
|
||||
self.zenith_cli = zenith_cli
|
||||
self.repo_dir = repo_dir
|
||||
self.num_instances = 0
|
||||
self.instances: List[Postgres] = []
|
||||
self.initial_tenant: str = initial_tenant
|
||||
self.base_port = base_port
|
||||
self.port_distributor = port_distributor
|
||||
self.pg_bin = pg_bin
|
||||
|
||||
def create_start(
|
||||
@@ -634,15 +696,13 @@ class PostgresFactory:
|
||||
wal_acceptors: Optional[str] = None,
|
||||
config_lines: Optional[List[str]] = None
|
||||
) -> Postgres:
|
||||
|
||||
pg = Postgres(
|
||||
zenith_cli=self.zenith_cli,
|
||||
repo_dir=self.repo_dir,
|
||||
pg_bin=self.pg_bin,
|
||||
tenant_id=tenant_id or self.initial_tenant,
|
||||
port=self.base_port + self.num_instances + 1,
|
||||
port=self.port_distributor.get_port(),
|
||||
)
|
||||
|
||||
self.num_instances += 1
|
||||
self.instances.append(pg)
|
||||
|
||||
@@ -665,7 +725,7 @@ class PostgresFactory:
|
||||
repo_dir=self.repo_dir,
|
||||
pg_bin=self.pg_bin,
|
||||
tenant_id=tenant_id or self.initial_tenant,
|
||||
port=self.base_port + self.num_instances + 1,
|
||||
port=self.port_distributor.get_port(),
|
||||
)
|
||||
|
||||
self.num_instances += 1
|
||||
@@ -690,7 +750,7 @@ class PostgresFactory:
|
||||
repo_dir=self.repo_dir,
|
||||
pg_bin=self.pg_bin,
|
||||
tenant_id=tenant_id or self.initial_tenant,
|
||||
port=self.base_port + self.num_instances + 1,
|
||||
port=self.port_distributor.get_port(),
|
||||
)
|
||||
|
||||
self.num_instances += 1
|
||||
@@ -714,8 +774,14 @@ def initial_tenant(pageserver: ZenithPageserver):
|
||||
|
||||
|
||||
@zenfixture
|
||||
def postgres(zenith_cli: ZenithCli, initial_tenant: str, repo_dir: str, pg_bin: PgBin) -> Iterator[PostgresFactory]:
|
||||
pgfactory = PostgresFactory(zenith_cli, repo_dir, pg_bin, initial_tenant=initial_tenant)
|
||||
def postgres(zenith_cli: ZenithCli, initial_tenant: str, repo_dir: str, pg_bin: PgBin, port_distributor: PortDistributor) -> Iterator[PostgresFactory]:
|
||||
pgfactory = PostgresFactory(
|
||||
zenith_cli=zenith_cli,
|
||||
repo_dir=repo_dir,
|
||||
pg_bin=pg_bin,
|
||||
initial_tenant=initial_tenant,
|
||||
port_distributor=port_distributor,
|
||||
)
|
||||
|
||||
yield pgfactory
|
||||
|
||||
@@ -731,10 +797,11 @@ def read_pid(path: Path):
|
||||
@dataclass
|
||||
class WalAcceptor:
|
||||
""" An object representing a running wal acceptor daemon. """
|
||||
bin_path: Path
|
||||
wa_bin_path: Path
|
||||
data_dir: Path
|
||||
port: int
|
||||
num: int # identifier for logging
|
||||
pageserver_port: int
|
||||
auth_token: Optional[str] = None
|
||||
|
||||
def start(self) -> 'WalAcceptor':
|
||||
@@ -742,13 +809,13 @@ class WalAcceptor:
|
||||
self.data_dir.mkdir(parents=True, exist_ok=True)
|
||||
self.pidfile.unlink(missing_ok=True)
|
||||
|
||||
cmd = [str(self.bin_path)]
|
||||
cmd = [str(self.wa_bin_path)]
|
||||
cmd.extend(["-D", str(self.data_dir)])
|
||||
cmd.extend(["-l", "localhost:{}".format(self.port)])
|
||||
cmd.extend(["-l", f"localhost:{self.port}"])
|
||||
cmd.append("--daemonize")
|
||||
cmd.append("--no-sync")
|
||||
# Tell page server it can receive WAL from this WAL safekeeper
|
||||
cmd.extend(["--pageserver", "localhost:{}".format(DEFAULT_PAGESERVER_PG_PORT)])
|
||||
cmd.extend(["--pageserver", f"localhost:{self.pageserver_port}"])
|
||||
cmd.extend(["--recall", "1 second"])
|
||||
print('Running command "{}"'.format(' '.join(cmd)))
|
||||
env = {'PAGESERVER_AUTH_TOKEN': self.auth_token} if self.auth_token else None
|
||||
@@ -792,27 +859,49 @@ class WalAcceptor:
|
||||
pass # pidfile might be obsolete
|
||||
return self
|
||||
|
||||
def append_logical_message(self, tenant_id: str, timeline_id: str, request: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""
|
||||
Send JSON_CTRL query to append LogicalMessage to WAL and modify
|
||||
safekeeper state. It will construct LogicalMessage from provided
|
||||
prefix and message, and then will write it to WAL.
|
||||
"""
|
||||
|
||||
# "replication=0" hacks psycopg not to send additional queries
|
||||
# on startup, see https://github.com/psycopg/psycopg2/pull/482
|
||||
connstr = f"host=localhost port={self.port} replication=0 options='-c ztimelineid={timeline_id} ztenantid={tenant_id}'"
|
||||
|
||||
with closing(psycopg2.connect(connstr)) as conn:
|
||||
# server doesn't support transactions
|
||||
conn.autocommit = True
|
||||
with conn.cursor() as cur:
|
||||
request_json = json.dumps(request)
|
||||
print(f"JSON_CTRL request on port {self.port}: {request_json}")
|
||||
cur.execute("JSON_CTRL " + request_json)
|
||||
all = cur.fetchall()
|
||||
print(f"JSON_CTRL response: {all[0][0]}")
|
||||
return json.loads(all[0][0])
|
||||
|
||||
class WalAcceptorFactory:
|
||||
""" An object representing multiple running wal acceptors. """
|
||||
def __init__(self, zenith_binpath: Path, data_dir: Path):
|
||||
self.wa_binpath = zenith_binpath / 'wal_acceptor'
|
||||
def __init__(self, zenith_binpath: Path, data_dir: Path, pageserver_port: int, port_distributor: PortDistributor):
|
||||
self.wa_bin_path = zenith_binpath / 'wal_acceptor'
|
||||
self.data_dir = data_dir
|
||||
self.instances: List[WalAcceptor] = []
|
||||
self.initial_port = 54321
|
||||
self.port_distributor = port_distributor
|
||||
self.pageserver_port = pageserver_port
|
||||
|
||||
def start_new(self, auth_token: Optional[str] = None) -> WalAcceptor:
|
||||
"""
|
||||
Start new wal acceptor.
|
||||
"""
|
||||
|
||||
wa_num = len(self.instances)
|
||||
wa = WalAcceptor(
|
||||
self.wa_binpath,
|
||||
self.data_dir / "wal_acceptor_{}".format(wa_num),
|
||||
self.initial_port + wa_num,
|
||||
wa_num,
|
||||
auth_token,
|
||||
wa_bin_path=self.wa_bin_path,
|
||||
data_dir=self.data_dir / "wal_acceptor_{}".format(wa_num),
|
||||
port=self.port_distributor.get_port(),
|
||||
num=wa_num,
|
||||
pageserver_port=self.pageserver_port,
|
||||
auth_token=auth_token,
|
||||
)
|
||||
wa.start()
|
||||
self.instances.append(wa)
|
||||
@@ -837,9 +926,14 @@ class WalAcceptorFactory:
|
||||
|
||||
|
||||
@zenfixture
|
||||
def wa_factory(zenith_binpath: str, repo_dir: str) -> Iterator[WalAcceptorFactory]:
|
||||
def wa_factory(zenith_binpath: str, repo_dir: str, pageserver_port: PageserverPort, port_distributor: PortDistributor) -> Iterator[WalAcceptorFactory]:
|
||||
""" Gives WalAcceptorFactory providing wal acceptors. """
|
||||
wafactory = WalAcceptorFactory(Path(zenith_binpath), Path(repo_dir) / "wal_acceptors")
|
||||
wafactory = WalAcceptorFactory(
|
||||
zenith_binpath=Path(zenith_binpath),
|
||||
data_dir=Path(repo_dir) / "wal_acceptors",
|
||||
pageserver_port=pageserver_port.pg,
|
||||
port_distributor=port_distributor,
|
||||
)
|
||||
yield wafactory
|
||||
# After the yield comes any cleanup code we need.
|
||||
print('Starting wal acceptors cleanup')
|
||||
@@ -944,37 +1038,67 @@ class TenantFactory:
|
||||
def tenant_factory(zenith_cli: ZenithCli):
|
||||
return TenantFactory(zenith_cli)
|
||||
|
||||
#
|
||||
# Test helpers
|
||||
#
|
||||
def list_files_to_compare(pgdata_dir: str):
|
||||
pgdata_files = []
|
||||
for root, _file, filenames in os.walk(pgdata_dir):
|
||||
for filename in filenames:
|
||||
rel_dir = os.path.relpath(root, pgdata_dir)
|
||||
# Skip some dirs and files we don't want to compare
|
||||
skip_dirs = ['pg_wal', 'pg_stat', 'pg_stat_tmp', 'pg_subtrans', 'pg_logical']
|
||||
skip_files = ['pg_internal.init', 'pg.log', 'zenith.signal', 'postgresql.conf',
|
||||
'postmaster.opts', 'postmaster.pid', 'pg_control']
|
||||
if rel_dir not in skip_dirs and filename not in skip_files:
|
||||
rel_file = os.path.join(rel_dir, filename)
|
||||
pgdata_files.append(rel_file)
|
||||
|
||||
pgdata_files.sort()
|
||||
print(pgdata_files)
|
||||
return pgdata_files
|
||||
|
||||
# pg is the existing and running compute node, that we want to compare with a basebackup
|
||||
def check_restored_datadir_content(zenith_cli: ZenithCli, test_output_dir: str, pg: Postgres, pageserver_pg_port: int):
|
||||
|
||||
# Get the timeline ID of our branch. We need it for the 'basebackup' command
|
||||
with closing(pg.connect()) as conn:
|
||||
with conn.cursor() as cur:
|
||||
cur.execute("SHOW zenith.zenith_timeline")
|
||||
timeline = cur.fetchone()[0]
|
||||
|
||||
# pg is the existing comute node we want to compare our basebackup to
|
||||
# lsn is the latest lsn of this node
|
||||
def check_restored_datadir_content(zenith_cli, pg, lsn, postgres: PostgresFactory):
|
||||
# stop postgres to ensure that files won't change
|
||||
pg.stop()
|
||||
|
||||
# Take a basebackup from pageserver
|
||||
restored_dir_path = os.path.join(test_output_dir, f"{pg.branch}_restored_datadir")
|
||||
mkdir_if_needed(restored_dir_path)
|
||||
|
||||
psql_path = os.path.join(pg.pg_bin.pg_bin_path, 'psql')
|
||||
|
||||
cmd = rf"""
|
||||
{psql_path} \
|
||||
--no-psqlrc \
|
||||
postgres://localhost:{pageserver_pg_port} \
|
||||
-c 'basebackup {pg.tenant_id} {timeline}' \
|
||||
| tar -x -C {restored_dir_path}
|
||||
"""
|
||||
|
||||
subprocess.check_call(cmd, shell=True)
|
||||
|
||||
# list files we're going to compare
|
||||
pgdata_files = pg.list_files_to_compare()
|
||||
|
||||
# create new branch, but don't start postgres
|
||||
# We only need 'basebackup' result here.
|
||||
zenith_cli.run(
|
||||
["branch", "check_restored_datadir", pg.branch + "@" + lsn])
|
||||
|
||||
pg2 = postgres.create('check_restored_datadir')
|
||||
print('postgres is created on check_restored_datadir branch')
|
||||
|
||||
print('files in a basebackup')
|
||||
# list files we're going to compare
|
||||
pgdata_files2 = pg2.list_files_to_compare()
|
||||
pgdata_files = list_files_to_compare(pg.pgdata_dir)
|
||||
restored_files = list_files_to_compare(restored_dir_path)
|
||||
|
||||
# check that file sets are equal
|
||||
assert pgdata_files == pgdata_files2
|
||||
assert pgdata_files == restored_files
|
||||
|
||||
# compare content of the files
|
||||
# filecmp returns (match, mismatch, error) lists
|
||||
# We've already filtered all mismatching files in list_files_to_compare(),
|
||||
# so here expect that the content is identical
|
||||
(match, mismatch, error) = filecmp.cmpfiles(pg.pgdata_dir,
|
||||
pg2.pgdata_dir,
|
||||
restored_dir_path,
|
||||
pgdata_files,
|
||||
shallow=False)
|
||||
print('filecmp result mismatch and error lists:')
|
||||
@@ -984,8 +1108,8 @@ def check_restored_datadir_content(zenith_cli, pg, lsn, postgres: PostgresFactor
|
||||
for f in mismatch:
|
||||
|
||||
f1 = os.path.join(pg.pgdata_dir, f)
|
||||
f2 = os.path.join(pg2.pgdata_dir, f)
|
||||
stdout_filename = "{}.diff".format(f2)
|
||||
f2 = os.path.join(restored_dir_path, f)
|
||||
stdout_filename = "{}.filediff".format(f2)
|
||||
|
||||
with open(stdout_filename, 'w') as stdout_f:
|
||||
subprocess.run("xxd -b {} > {}.hex ".format(f1, f1), shell=True)
|
||||
|
||||
51
test_runner/performance/test_bulk_insert.py
Normal file
51
test_runner/performance/test_bulk_insert.py
Normal file
@@ -0,0 +1,51 @@
|
||||
import os
|
||||
from contextlib import closing
|
||||
from fixtures.zenith_fixtures import PostgresFactory, ZenithPageserver
|
||||
|
||||
pytest_plugins = ("fixtures.zenith_fixtures", "fixtures.benchmark_fixture")
|
||||
|
||||
#
|
||||
# Run bulk INSERT test.
|
||||
#
|
||||
# Collects metrics:
|
||||
#
|
||||
# 1. Time to INSERT 5 million rows
|
||||
# 2. Disk writes
|
||||
# 3. Disk space used
|
||||
# 4. Peak memory usage
|
||||
#
|
||||
def test_bulk_insert(postgres: PostgresFactory, pageserver: ZenithPageserver, pg_bin, zenith_cli, zenbenchmark, repo_dir: str):
|
||||
# Create a branch for us
|
||||
zenith_cli.run(["branch", "test_bulk_insert", "empty"])
|
||||
|
||||
pg = postgres.create_start('test_bulk_insert')
|
||||
print("postgres is running on 'test_bulk_insert' branch")
|
||||
|
||||
# Open a connection directly to the page server that we'll use to force
|
||||
# flushing the layers to disk
|
||||
psconn = pageserver.connect();
|
||||
pscur = psconn.cursor()
|
||||
|
||||
# Get the timeline ID of our branch. We need it for the 'do_gc' command
|
||||
with closing(pg.connect()) as conn:
|
||||
with conn.cursor() as cur:
|
||||
cur.execute("SHOW zenith.zenith_timeline")
|
||||
timeline = cur.fetchone()[0]
|
||||
|
||||
cur.execute("create table huge (i int, j int);")
|
||||
|
||||
# Run INSERT, recording the time and I/O it takes
|
||||
with zenbenchmark.record_pageserver_writes(pageserver, 'pageserver_writes'):
|
||||
with zenbenchmark.record_duration('insert'):
|
||||
cur.execute("insert into huge values (generate_series(1, 5000000), 0);")
|
||||
|
||||
# Flush the layers from memory to disk. This is included in the reported
|
||||
# time and I/O
|
||||
pscur.execute(f"do_gc {pageserver.initial_tenant} {timeline} 0")
|
||||
|
||||
# Record peak memory usage
|
||||
zenbenchmark.record("peak_mem", zenbenchmark.get_peak_mem(pageserver) / 1024, 'MB')
|
||||
|
||||
# Report disk space used by the repository
|
||||
timeline_size = zenbenchmark.get_timeline_size(repo_dir, pageserver.initial_tenant, timeline)
|
||||
zenbenchmark.record('size', timeline_size / (1024*1024), 'MB')
|
||||
58
test_runner/performance/test_bulk_tenant_create.py
Normal file
58
test_runner/performance/test_bulk_tenant_create.py
Normal file
@@ -0,0 +1,58 @@
|
||||
import timeit
|
||||
import pytest
|
||||
|
||||
from fixtures.zenith_fixtures import (
|
||||
TenantFactory,
|
||||
ZenithCli,
|
||||
PostgresFactory,
|
||||
)
|
||||
|
||||
pytest_plugins = ("fixtures.benchmark_fixture")
|
||||
|
||||
# Run bulk tenant creation test.
|
||||
#
|
||||
# Collects metrics:
|
||||
#
|
||||
# 1. Time to create {1,10,50} tenants
|
||||
# 2. Average creation time per tenant
|
||||
|
||||
|
||||
@pytest.mark.parametrize('tenants_count', [1, 5, 10])
|
||||
@pytest.mark.parametrize('use_wal_acceptors', ['with_wa', 'without_wa'])
|
||||
def test_bulk_tenant_create(
|
||||
zenith_cli: ZenithCli,
|
||||
tenant_factory: TenantFactory,
|
||||
postgres: PostgresFactory,
|
||||
wa_factory,
|
||||
use_wal_acceptors: str,
|
||||
tenants_count: int,
|
||||
zenbenchmark,
|
||||
):
|
||||
"""Measure tenant creation time (with and without wal acceptors)"""
|
||||
|
||||
time_slices = []
|
||||
|
||||
for i in range(tenants_count):
|
||||
start = timeit.default_timer()
|
||||
|
||||
tenant = tenant_factory.create()
|
||||
zenith_cli.run([
|
||||
"branch", f"test_bulk_tenant_create_{tenants_count}_{i}_{use_wal_acceptors}", "main",
|
||||
f"--tenantid={tenant}"
|
||||
])
|
||||
|
||||
if use_wal_acceptors == 'with_wa':
|
||||
wa_factory.start_n_new(3)
|
||||
|
||||
pg_tenant = postgres.create_start(
|
||||
f"test_bulk_tenant_create_{tenants_count}_{i}_{use_wal_acceptors}",
|
||||
tenant,
|
||||
wal_acceptors=wa_factory.get_connstrs() if use_wal_acceptors == 'with_wa' else None,
|
||||
)
|
||||
|
||||
end = timeit.default_timer()
|
||||
time_slices.append(end - start)
|
||||
|
||||
pg_tenant.stop()
|
||||
|
||||
zenbenchmark.record('tenant_creation_time', sum(time_slices) / len(time_slices), 's')
|
||||
@@ -4,19 +4,6 @@ from fixtures.zenith_fixtures import PostgresFactory, ZenithPageserver
|
||||
|
||||
pytest_plugins = ("fixtures.zenith_fixtures", "fixtures.benchmark_fixture")
|
||||
|
||||
def get_timeline_size(repo_dir: str, tenantid: str, timelineid: str):
|
||||
path = "{}/tenants/{}/timelines/{}".format(repo_dir, tenantid, timelineid)
|
||||
|
||||
totalbytes = 0
|
||||
for root, dirs, files in os.walk(path):
|
||||
for name in files:
|
||||
totalbytes += os.path.getsize(os.path.join(root, name))
|
||||
|
||||
if 'wal' in dirs:
|
||||
dirs.remove('wal') # don't visit 'wal' subdirectory
|
||||
|
||||
return totalbytes
|
||||
|
||||
#
|
||||
# Run a very short pgbench test.
|
||||
#
|
||||
@@ -64,5 +51,5 @@ def test_pgbench(postgres: PostgresFactory, pageserver: ZenithPageserver, pg_bin
|
||||
pscur.execute(f"do_gc {pageserver.initial_tenant} {timeline} 0")
|
||||
|
||||
# Report disk space used by the repository
|
||||
timeline_size = get_timeline_size(repo_dir, pageserver.initial_tenant, timeline)
|
||||
timeline_size = zenbenchmark.get_timeline_size(repo_dir, pageserver.initial_tenant, timeline)
|
||||
zenbenchmark.record('size', timeline_size / (1024*1024), 'MB')
|
||||
|
||||
74
test_runner/performance/test_write_amplification.py
Normal file
74
test_runner/performance/test_write_amplification.py
Normal file
@@ -0,0 +1,74 @@
|
||||
# Demonstrate Write Amplification with naive oldest-first layer checkpointing
|
||||
# algorithm.
|
||||
#
|
||||
# In each iteration of the test, we create a new table that's slightly under 10
|
||||
# MB in size (10 MB is the current "segment size" used by the page server). Then
|
||||
# we make a tiny update to all the tables already created. This creates a WAL
|
||||
# pattern where you have a lot of updates on one segment (the newly created
|
||||
# one), alternating with a small updates on all relations. This is the worst
|
||||
# case scenario for the naive checkpointing policy where we write out the layers
|
||||
# in LSN order, writing the oldest layer first. That creates a new 10 MB image
|
||||
# layer to be created for each of those small updates. This is the Write
|
||||
# Amplification problem at its finest.
|
||||
import os
|
||||
from contextlib import closing
|
||||
from fixtures.zenith_fixtures import PostgresFactory, ZenithPageserver
|
||||
|
||||
pytest_plugins = ("fixtures.zenith_fixtures", "fixtures.benchmark_fixture")
|
||||
|
||||
def test_write_amplification(postgres: PostgresFactory, pageserver: ZenithPageserver, pg_bin, zenith_cli, zenbenchmark, repo_dir: str):
|
||||
# Create a branch for us
|
||||
zenith_cli.run(["branch", "test_write_amplification", "empty"])
|
||||
|
||||
pg = postgres.create_start('test_write_amplification')
|
||||
print("postgres is running on 'test_write_amplification' branch")
|
||||
|
||||
# Open a connection directly to the page server that we'll use to force
|
||||
# flushing the layers to disk
|
||||
psconn = pageserver.connect();
|
||||
pscur = psconn.cursor()
|
||||
|
||||
with closing(pg.connect()) as conn:
|
||||
with conn.cursor() as cur:
|
||||
# Get the timeline ID of our branch. We need it for the 'do_gc' command
|
||||
cur.execute("SHOW zenith.zenith_timeline")
|
||||
timeline = cur.fetchone()[0]
|
||||
|
||||
with zenbenchmark.record_pageserver_writes(pageserver, 'pageserver_writes'):
|
||||
with zenbenchmark.record_duration('run'):
|
||||
|
||||
# NOTE: Because each iteration updates every table already created,
|
||||
# the runtime and write amplification is O(n^2), where n is the
|
||||
# number of iterations.
|
||||
for i in range(25):
|
||||
cur.execute(f'''
|
||||
CREATE TABLE tbl{i} AS
|
||||
SELECT g as i, 'long string to consume some space' || g as t
|
||||
FROM generate_series(1, 100000) g
|
||||
''')
|
||||
cur.execute(f"create index on tbl{i} (i);")
|
||||
for j in range(1, i):
|
||||
cur.execute(f"delete from tbl{j} where i = {i}")
|
||||
|
||||
# Force checkpointing. As of this writing, we don't have
|
||||
# a back-pressure mechanism, and the page server cannot
|
||||
# keep up digesting and checkpointing the WAL at the
|
||||
# rate that it is generated. If we don't force a
|
||||
# checkpoint, the WAL will just accumulate in memory
|
||||
# until you hit OOM error. So in effect, we use much
|
||||
# more memory to hold the incoming WAL, and write them
|
||||
# out in larger batches than we'd really want. Using
|
||||
# more memory hides the write amplification problem this
|
||||
# test tries to demonstrate.
|
||||
#
|
||||
# The write amplification problem is real, and using
|
||||
# more memory isn't the right solution. We could
|
||||
# demonstrate the effect also by generating the WAL
|
||||
# slower, adding some delays in this loop. But forcing
|
||||
# the the checkpointing and GC makes the test go faster,
|
||||
# with the same total I/O effect.
|
||||
pscur.execute(f"do_gc {pageserver.initial_tenant} {timeline} 0")
|
||||
|
||||
# Report disk space used by the repository
|
||||
timeline_size = zenbenchmark.get_timeline_size(repo_dir, pageserver.initial_tenant, timeline)
|
||||
zenbenchmark.record('size', timeline_size / (1024*1024), 'MB')
|
||||
2
vendor/postgres
vendored
2
vendor/postgres
vendored
Submodule vendor/postgres updated: 084b228b89...5387eb4a3b
@@ -13,16 +13,12 @@ bytes = "1.0.1"
|
||||
byteorder = "1.4.3"
|
||||
fs2 = "0.4.3"
|
||||
lazy_static = "1.4.0"
|
||||
slog-stdlog = "4.1.0"
|
||||
slog-async = "2.6.0"
|
||||
slog-scope = "4.4.0"
|
||||
slog-term = "2.8.0"
|
||||
slog = "2.7.0"
|
||||
serde_json = "1"
|
||||
log = "0.4.14"
|
||||
clap = "2.33.0"
|
||||
daemonize = "0.4.1"
|
||||
rust-s3 = { version = "0.27.0-rc4", features = ["no-verify-ssl"] }
|
||||
tokio = { version = "1.3.0", features = ["full"] }
|
||||
tokio = "1.11"
|
||||
tokio-stream = { version = "0.1.4" }
|
||||
postgres-protocol = { git = "https://github.com/zenithdb/rust-postgres.git", rev="9eb0dbfbeb6a6c1b79099b9f7ae4a8c021877858" }
|
||||
postgres = { git = "https://github.com/zenithdb/rust-postgres.git", rev="9eb0dbfbeb6a6c1b79099b9f7ae4a8c021877858" }
|
||||
|
||||
@@ -76,6 +76,43 @@ safekeepers.
|
||||
See README_PROTO.md for a more detailed desription of the consensus
|
||||
protocol. spec/ contains TLA+ specification of it.
|
||||
|
||||
# Q&A
|
||||
|
||||
Q: Why have a separate service instead of connecting Page Server directly to a
|
||||
primary PostgreSQL node?
|
||||
A: Page Server is a single server which can be lost. As our primary
|
||||
fault-tolerant storage is S3, we do not want to wait for it before
|
||||
committing a transaction. The WAL service acts as a temporary fault-tolerant
|
||||
storage for recent data before it gets to the Page Server and then finally
|
||||
to S3. Whenever WALs and pages are committed to S3, WAL's storage can be
|
||||
trimmed.
|
||||
|
||||
Q: What if the compute node evicts a page, needs it back, but the page is yet
|
||||
to reach the Page Server?
|
||||
A: If the compute node has evicted a page, all changes from that page are
|
||||
already committed, i.e. they are saved on majority of WAL safekeepers. These
|
||||
WAL records will eventually reach the Page Server. The Page Server notes
|
||||
that the compute note requests pages with a very recent LSN and will not
|
||||
respond to the compute node until it a corresponding WAL is received from WAL
|
||||
safekeepers.
|
||||
|
||||
Q: How long may Page Server wait for?
|
||||
A: Not too long, hopefully. If a page is evicted, it probably was not used for
|
||||
a while, so the WAL service have had enough time to push changes to the Page
|
||||
Server. There may be issues if there is no backpressure and compute node with
|
||||
WAL service run ahead of Page Server, though.
|
||||
There is no backpressure right now, so you may even see some spurious
|
||||
timeouts in tests.
|
||||
|
||||
Q: How do WAL safekeepers communicate with each other?
|
||||
A: They may only send each other messages via the compute node, they never
|
||||
communicate directly with each other.
|
||||
|
||||
Q: Why have a consensus algorithm if there is only a single compute node?
|
||||
A: Actually there may be moments with multiple PostgreSQL nodes running at the
|
||||
same time. E.g. we are bringing one up and one down. We would like to avoid
|
||||
simultaneous writes from different nodes, so there should be a consensus on
|
||||
who is the primary node.
|
||||
|
||||
# Terminology
|
||||
|
||||
|
||||
@@ -1,15 +1,14 @@
|
||||
//
|
||||
// Main entry point for the wal_acceptor executable
|
||||
//
|
||||
use anyhow::{Context, Result};
|
||||
use anyhow::Result;
|
||||
use clap::{App, Arg};
|
||||
use daemonize::Daemonize;
|
||||
use log::*;
|
||||
use slog::Drain;
|
||||
use std::env;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::thread;
|
||||
use std::{env, io};
|
||||
use std::{fs::File, fs::OpenOptions};
|
||||
use zenith_utils::logging;
|
||||
|
||||
use walkeeper::s3_offload;
|
||||
use walkeeper::wal_service;
|
||||
@@ -113,20 +112,7 @@ fn main() -> Result<()> {
|
||||
|
||||
fn start_wal_acceptor(conf: WalAcceptorConf) -> Result<()> {
|
||||
let log_filename = conf.data_dir.join("wal_acceptor.log");
|
||||
// Don't open the same file for output multiple times;
|
||||
// the different fds could overwrite each other's output.
|
||||
let log_file = OpenOptions::new()
|
||||
.create(true)
|
||||
.append(true)
|
||||
.open(&log_filename)
|
||||
.with_context(|| format!("failed to open {:?}", &log_filename))?;
|
||||
|
||||
// Initialize logger
|
||||
let logger_file = log_file.try_clone().unwrap();
|
||||
let _scope_guard = init_logging(&conf, logger_file)?;
|
||||
let _log_guard = slog_stdlog::init().unwrap();
|
||||
// Note: this `info!(...)` macro comes from `log` crate
|
||||
info!("standard logging redirected to slog");
|
||||
let (_scope_guard, log_file) = logging::init(log_filename, conf.daemonize)?;
|
||||
|
||||
if conf.daemonize {
|
||||
info!("daemonizing...");
|
||||
@@ -179,29 +165,3 @@ fn start_wal_acceptor(conf: WalAcceptorConf) -> Result<()> {
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn init_logging(
|
||||
conf: &WalAcceptorConf,
|
||||
log_file: File,
|
||||
) -> Result<slog_scope::GlobalLoggerGuard, io::Error> {
|
||||
if conf.daemonize {
|
||||
let decorator = slog_term::PlainSyncDecorator::new(log_file);
|
||||
let drain = slog_term::CompactFormat::new(decorator).build();
|
||||
let drain = slog::Filter::new(drain, |record: &slog::Record| {
|
||||
record.level().is_at_least(slog::Level::Info)
|
||||
});
|
||||
let drain = std::sync::Mutex::new(drain).fuse();
|
||||
let logger = slog::Logger::root(drain, slog::o!());
|
||||
Ok(slog_scope::set_global_logger(logger))
|
||||
} else {
|
||||
let decorator = slog_term::TermDecorator::new().build();
|
||||
let drain = slog_term::FullFormat::new(decorator).build().fuse();
|
||||
let drain = slog::Filter::new(drain, |record: &slog::Record| {
|
||||
record.level().is_at_least(slog::Level::Info)
|
||||
})
|
||||
.fuse();
|
||||
let drain = slog_async::Async::new(drain).chan_size(1000).build().fuse();
|
||||
let logger = slog::Logger::root(drain, slog::o!());
|
||||
Ok(slog_scope::set_global_logger(logger))
|
||||
}
|
||||
}
|
||||
|
||||
249
walkeeper/src/json_ctrl.rs
Normal file
249
walkeeper/src/json_ctrl.rs
Normal file
@@ -0,0 +1,249 @@
|
||||
//!
|
||||
//! This module implements JSON_CTRL protocol, which allows exchange
|
||||
//! JSON messages over psql for testing purposes.
|
||||
//!
|
||||
//! Currently supports AppendLogicalMessage, which is used for WAL
|
||||
//! modifications in tests.
|
||||
//!
|
||||
|
||||
use anyhow::{anyhow, Result};
|
||||
use bytes::{BufMut, Bytes, BytesMut};
|
||||
use crc32c::crc32c_append;
|
||||
use log::*;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::safekeeper::{AcceptorProposerMessage, AppendResponse};
|
||||
use crate::safekeeper::{
|
||||
AppendRequest, AppendRequestHeader, ProposerAcceptorMessage, ProposerGreeting,
|
||||
};
|
||||
use crate::safekeeper::{SafeKeeperState, Term};
|
||||
use crate::send_wal::SendWalHandler;
|
||||
use crate::timeline::TimelineTools;
|
||||
use postgres_ffi::pg_constants;
|
||||
use postgres_ffi::xlog_utils;
|
||||
use postgres_ffi::{uint32, uint64, Oid, XLogRecord};
|
||||
use zenith_utils::lsn::Lsn;
|
||||
use zenith_utils::postgres_backend::PostgresBackend;
|
||||
use zenith_utils::pq_proto::{BeMessage, RowDescriptor, TEXT_OID};
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
struct AppendLogicalMessage {
|
||||
// prefix and message to build LogicalMessage
|
||||
lm_prefix: String,
|
||||
lm_message: String,
|
||||
|
||||
// if true, commit_lsn will match flush_lsn after append
|
||||
set_commit_lsn: bool,
|
||||
|
||||
// fields from AppendRequestHeader
|
||||
term: Term,
|
||||
epoch_start_lsn: Lsn,
|
||||
begin_lsn: Lsn,
|
||||
truncate_lsn: Lsn,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
struct AppendResult {
|
||||
// safekeeper state after append
|
||||
state: SafeKeeperState,
|
||||
// info about new record in the WAL
|
||||
inserted_wal: InsertedWAL,
|
||||
}
|
||||
|
||||
/// Handles command to craft logical message WAL record with given
|
||||
/// content, and then append it with specified term and lsn. This
|
||||
/// function is used to test safekeepers in different scenarios.
|
||||
pub fn handle_json_ctrl(
|
||||
swh: &mut SendWalHandler,
|
||||
pgb: &mut PostgresBackend,
|
||||
cmd: &Bytes,
|
||||
) -> Result<()> {
|
||||
let cmd = cmd
|
||||
.strip_prefix(b"JSON_CTRL")
|
||||
.ok_or_else(|| anyhow!("invalid prefix"))?;
|
||||
// trim zeroes in the end
|
||||
let cmd = cmd.strip_suffix(&[0u8]).unwrap_or(cmd);
|
||||
|
||||
let append_request: AppendLogicalMessage = serde_json::from_slice(cmd)?;
|
||||
info!("JSON_CTRL request: {:?}", append_request);
|
||||
|
||||
// need to init safekeeper state before AppendRequest
|
||||
prepare_safekeeper(swh)?;
|
||||
|
||||
let inserted_wal = append_logical_message(swh, append_request)?;
|
||||
let response = AppendResult {
|
||||
state: swh.timeline.get().get_info(),
|
||||
inserted_wal,
|
||||
};
|
||||
let response_data = serde_json::to_vec(&response)?;
|
||||
|
||||
pgb.write_message_noflush(&BeMessage::RowDescription(&[RowDescriptor {
|
||||
name: b"json",
|
||||
typoid: TEXT_OID,
|
||||
typlen: -1,
|
||||
..Default::default()
|
||||
}]))?
|
||||
.write_message_noflush(&BeMessage::DataRow(&[Some(&response_data)]))?
|
||||
.write_message(&BeMessage::CommandComplete(b"JSON_CTRL"))?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Prepare safekeeper to process append requests without crashes,
|
||||
/// by sending ProposerGreeting with default server.wal_seg_size.
|
||||
fn prepare_safekeeper(swh: &mut SendWalHandler) -> Result<()> {
|
||||
let greeting_request = ProposerAcceptorMessage::Greeting(ProposerGreeting {
|
||||
protocol_version: 1, // current protocol
|
||||
pg_version: 0, // unknown
|
||||
proposer_id: [0u8; 16],
|
||||
system_id: 0,
|
||||
ztli: swh.timelineid.unwrap(),
|
||||
tenant_id: swh.tenantid.unwrap(),
|
||||
tli: 0,
|
||||
wal_seg_size: pg_constants::WAL_SEGMENT_SIZE as u32, // 16MB, default for tests
|
||||
});
|
||||
|
||||
let response = swh.timeline.get().process_msg(&greeting_request)?;
|
||||
match response {
|
||||
AcceptorProposerMessage::Greeting(_) => Ok(()),
|
||||
_ => anyhow::bail!("not GreetingResponse"),
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
struct InsertedWAL {
|
||||
begin_lsn: Lsn,
|
||||
end_lsn: Lsn,
|
||||
append_response: AppendResponse,
|
||||
}
|
||||
|
||||
/// Extend local WAL with new LogicalMessage record. To do that,
|
||||
/// create AppendRequest with new WAL and pass it to safekeeper.
|
||||
fn append_logical_message(
|
||||
swh: &mut SendWalHandler,
|
||||
msg: AppendLogicalMessage,
|
||||
) -> Result<InsertedWAL> {
|
||||
let wal_data = encode_logical_message(msg.lm_prefix, msg.lm_message);
|
||||
let sk_state = swh.timeline.get().get_info();
|
||||
|
||||
let begin_lsn = msg.begin_lsn;
|
||||
let end_lsn = begin_lsn + wal_data.len() as u64;
|
||||
|
||||
let commit_lsn = if msg.set_commit_lsn {
|
||||
end_lsn
|
||||
} else {
|
||||
sk_state.commit_lsn
|
||||
};
|
||||
|
||||
let append_request = ProposerAcceptorMessage::AppendRequest(AppendRequest {
|
||||
h: AppendRequestHeader {
|
||||
term: msg.term,
|
||||
epoch_start_lsn: begin_lsn,
|
||||
begin_lsn,
|
||||
end_lsn,
|
||||
commit_lsn,
|
||||
truncate_lsn: msg.truncate_lsn,
|
||||
proposer_uuid: [0u8; 16],
|
||||
},
|
||||
wal_data: Bytes::from(wal_data),
|
||||
});
|
||||
|
||||
let response = swh.timeline.get().process_msg(&append_request)?;
|
||||
|
||||
let append_response = match response {
|
||||
AcceptorProposerMessage::AppendResponse(resp) => resp,
|
||||
_ => anyhow::bail!("not AppendResponse"),
|
||||
};
|
||||
|
||||
Ok(InsertedWAL {
|
||||
begin_lsn,
|
||||
end_lsn,
|
||||
append_response,
|
||||
})
|
||||
}
|
||||
|
||||
#[repr(C)]
|
||||
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
|
||||
struct XlLogicalMessage {
|
||||
db_id: Oid,
|
||||
transactional: uint32, // bool, takes 4 bytes due to alignment in C structures
|
||||
prefix_size: uint64,
|
||||
message_size: uint64,
|
||||
}
|
||||
|
||||
impl XlLogicalMessage {
|
||||
pub fn encode(&self) -> Bytes {
|
||||
use zenith_utils::bin_ser::LeSer;
|
||||
self.ser().unwrap().into()
|
||||
}
|
||||
}
|
||||
|
||||
/// Create new WAL record for non-transactional logical message.
|
||||
/// Used for creating artificial WAL for tests, as LogicalMessage
|
||||
/// record is basically no-op.
|
||||
fn encode_logical_message(prefix: String, message: String) -> Vec<u8> {
|
||||
let mut prefix_bytes = BytesMut::with_capacity(prefix.len() + 1);
|
||||
prefix_bytes.put(prefix.as_bytes());
|
||||
prefix_bytes.put_u8(0);
|
||||
|
||||
let message_bytes = message.as_bytes();
|
||||
|
||||
let logical_message = XlLogicalMessage {
|
||||
db_id: 0,
|
||||
transactional: 0,
|
||||
prefix_size: prefix_bytes.len() as u64,
|
||||
message_size: message_bytes.len() as u64,
|
||||
};
|
||||
|
||||
let mainrdata = logical_message.encode();
|
||||
let mainrdata_len: usize = mainrdata.len() + prefix_bytes.len() + message_bytes.len();
|
||||
// only short mainrdata is supported for now
|
||||
assert!(mainrdata_len <= 255);
|
||||
let mainrdata_len = mainrdata_len as u8;
|
||||
|
||||
let mut data: Vec<u8> = vec![pg_constants::XLR_BLOCK_ID_DATA_SHORT, mainrdata_len];
|
||||
data.extend_from_slice(&mainrdata);
|
||||
data.extend_from_slice(&prefix_bytes);
|
||||
data.extend_from_slice(message_bytes);
|
||||
|
||||
let total_len = xlog_utils::XLOG_SIZE_OF_XLOG_RECORD + data.len();
|
||||
|
||||
let mut header = XLogRecord {
|
||||
xl_tot_len: total_len as u32,
|
||||
xl_xid: 0,
|
||||
xl_prev: 0,
|
||||
xl_info: 0,
|
||||
xl_rmid: 21,
|
||||
__bindgen_padding_0: [0u8; 2usize],
|
||||
xl_crc: 0, // crc will be calculated later
|
||||
};
|
||||
|
||||
let header_bytes = header.encode();
|
||||
let crc = crc32c_append(0, &data);
|
||||
let crc = crc32c_append(crc, &header_bytes[0..xlog_utils::XLOG_RECORD_CRC_OFFS]);
|
||||
header.xl_crc = crc;
|
||||
|
||||
let mut wal: Vec<u8> = Vec::new();
|
||||
wal.extend_from_slice(&header.encode());
|
||||
wal.extend_from_slice(&data);
|
||||
|
||||
// WAL start position must be aligned at 8 bytes,
|
||||
// this will add padding for the next WAL record.
|
||||
const PADDING: usize = 8;
|
||||
let padding_rem = wal.len() % PADDING;
|
||||
if padding_rem != 0 {
|
||||
wal.resize(wal.len() + PADDING - padding_rem, 0);
|
||||
}
|
||||
|
||||
wal
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_encode_logical_message() {
|
||||
let expected = [
|
||||
64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 21, 0, 0, 170, 34, 166, 227, 255, 38,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 112, 114, 101, 102,
|
||||
105, 120, 0, 109, 101, 115, 115, 97, 103, 101,
|
||||
];
|
||||
let actual = encode_logical_message("prefix".to_string(), "message".to_string());
|
||||
assert_eq!(expected, actual[..]);
|
||||
}
|
||||
@@ -2,6 +2,7 @@
|
||||
use std::path::PathBuf;
|
||||
use std::time::Duration;
|
||||
|
||||
pub mod json_ctrl;
|
||||
pub mod receive_wal;
|
||||
pub mod replication;
|
||||
pub mod s3_offload;
|
||||
|
||||
@@ -74,7 +74,7 @@ fn request_callback(conf: WalAcceptorConf, timelineid: ZTimelineId, tenantid: ZT
|
||||
|
||||
impl<'pg> ReceiveWalConn<'pg> {
|
||||
pub fn new(pg: &'pg mut PostgresBackend) -> Result<ReceiveWalConn<'pg>> {
|
||||
let peer_addr = pg.get_peer_addr().clone();
|
||||
let peer_addr = *pg.get_peer_addr();
|
||||
Ok(ReceiveWalConn {
|
||||
pg_backend: pg,
|
||||
peer_addr,
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
|
||||
use crate::send_wal::SendWalHandler;
|
||||
use crate::timeline::{Timeline, TimelineTools};
|
||||
use anyhow::{anyhow, Result};
|
||||
use anyhow::{anyhow, Context, Result};
|
||||
use bytes::Bytes;
|
||||
use log::*;
|
||||
use postgres_ffi::xlog_utils::{get_current_timestamp, TimestampTz, XLogFileName, MAX_SEND_SIZE};
|
||||
@@ -25,6 +25,10 @@ use zenith_utils::sock_split::ReadStream;
|
||||
|
||||
pub const END_REPLICATION_MARKER: Lsn = Lsn::MAX;
|
||||
|
||||
// See: https://www.postgresql.org/docs/13/protocol-replication.html
|
||||
const HOT_STANDBY_FEEDBACK_TAG_BYTE: u8 = b'h';
|
||||
const STANDBY_STATUS_UPDATE_TAG_BYTE: u8 = b'r';
|
||||
|
||||
type FullTransactionId = u64;
|
||||
|
||||
/// Hot standby feedback received from replica
|
||||
@@ -81,10 +85,22 @@ impl ReplicationConn {
|
||||
) -> Result<()> {
|
||||
// Wait for replica's feedback.
|
||||
while let Some(msg) = FeMessage::read(&mut stream_in)? {
|
||||
match msg {
|
||||
match &msg {
|
||||
FeMessage::CopyData(m) => {
|
||||
let feedback = HotStandbyFeedback::des(&m)?;
|
||||
subscriber.add_hs_feedback(feedback);
|
||||
// There's two possible data messages that the client is supposed to send here:
|
||||
// `HotStandbyFeedback` and `StandbyStatusUpdate`. We only handle hot standby
|
||||
// feedback.
|
||||
|
||||
match m.first().cloned() {
|
||||
Some(HOT_STANDBY_FEEDBACK_TAG_BYTE) => {
|
||||
// Note: deserializing is on m[1..] because we skip the tag byte.
|
||||
let feedback = HotStandbyFeedback::des(&m[1..])
|
||||
.context("failed to deserialize HotStandbyFeedback")?;
|
||||
subscriber.add_hs_feedback(feedback);
|
||||
}
|
||||
Some(STANDBY_STATUS_UPDATE_TAG_BYTE) => (),
|
||||
_ => warn!("unexpected message {:?}", msg),
|
||||
}
|
||||
}
|
||||
FeMessage::Sync => {}
|
||||
FeMessage::CopyFail => return Err(anyhow!("Copy failed")),
|
||||
@@ -105,7 +121,7 @@ impl ReplicationConn {
|
||||
let mut lsns = caps.map(|cap| cap[1].parse::<Lsn>());
|
||||
let start_pos = lsns
|
||||
.next()
|
||||
.ok_or_else(|| anyhow!("failed to find start LSN"))??;
|
||||
.ok_or_else(|| anyhow!("Failed to parse start LSN from command"))??;
|
||||
let stop_pos = lsns.next().transpose()?.unwrap_or(Lsn(0));
|
||||
Ok((start_pos, stop_pos))
|
||||
}
|
||||
@@ -120,13 +136,12 @@ impl ReplicationConn {
|
||||
}
|
||||
|
||||
// If that failed, try it without the .partial extension.
|
||||
match File::open(&wal_file_path) {
|
||||
Ok(opened_file) => Ok(opened_file),
|
||||
Err(e) => {
|
||||
error!("Failed to open log file {:?}: {}", &wal_file_path, e);
|
||||
Err(e.into())
|
||||
}
|
||||
}
|
||||
File::open(&wal_file_path)
|
||||
.with_context(|| format!("Failed to open WAL file {:?}", wal_file_path))
|
||||
.map_err(|e| {
|
||||
error!("{}", e);
|
||||
e
|
||||
})
|
||||
}
|
||||
|
||||
///
|
||||
@@ -144,7 +159,7 @@ impl ReplicationConn {
|
||||
|
||||
thread::spawn(move || {
|
||||
if let Err(err) = Self::background_thread(bg_stream_in, bg_timeline) {
|
||||
error!("socket error: {}", err);
|
||||
error!("Replication background thread failed: {}", err);
|
||||
}
|
||||
});
|
||||
|
||||
@@ -154,7 +169,7 @@ impl ReplicationConn {
|
||||
loop {
|
||||
wal_seg_size = swh.timeline.get().get_info().server.wal_seg_size as usize;
|
||||
if wal_seg_size == 0 {
|
||||
error!("Can not start replication before connecting to wal_proposer");
|
||||
error!("Cannot start replication before connecting to wal_proposer");
|
||||
sleep(Duration::from_secs(1));
|
||||
} else {
|
||||
break;
|
||||
@@ -216,8 +231,9 @@ impl ReplicationConn {
|
||||
|
||||
// Read some data from the file.
|
||||
let mut file_buf = vec![0u8; send_size];
|
||||
file.seek(SeekFrom::Start(xlogoff as u64))?;
|
||||
file.read_exact(&mut file_buf)?;
|
||||
file.seek(SeekFrom::Start(xlogoff as u64))
|
||||
.and_then(|_| file.read_exact(&mut file_buf))
|
||||
.context("Failed to read data from WAL file")?;
|
||||
|
||||
// Write some data to the network socket.
|
||||
pgb.write_message(&BeMessage::XLogData(XLogDataBody {
|
||||
@@ -225,7 +241,8 @@ impl ReplicationConn {
|
||||
wal_end: end_pos.0,
|
||||
timestamp: get_current_timestamp(),
|
||||
data: &file_buf,
|
||||
}))?;
|
||||
}))
|
||||
.context("Failed to send XLogData")?;
|
||||
|
||||
start_pos += send_size as u64;
|
||||
|
||||
|
||||
@@ -7,6 +7,7 @@ use byteorder::WriteBytesExt;
|
||||
use bytes::Buf;
|
||||
use bytes::Bytes;
|
||||
use log::*;
|
||||
use pageserver::waldecoder::WalStreamDecoder;
|
||||
use postgres_ffi::xlog_utils::TimeLineID;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::cmp::max;
|
||||
@@ -27,7 +28,7 @@ const SK_PROTOCOL_VERSION: u32 = 1;
|
||||
const UNKNOWN_SERVER_VERSION: u32 = 0;
|
||||
|
||||
/// Consensus logical timestamp.
|
||||
type Term = u64;
|
||||
pub type Term = u64;
|
||||
|
||||
/// Unique id of proposer. Not needed for correctness, used for monitoring.
|
||||
type PgUuid = [u8; 16];
|
||||
@@ -71,9 +72,12 @@ pub struct SafeKeeperState {
|
||||
pub proposer_uuid: PgUuid,
|
||||
/// part of WAL acknowledged by quorum and available locally
|
||||
pub commit_lsn: Lsn,
|
||||
/// minimal LSN which may be needed for recovery of some safekeeper (end lsn
|
||||
/// + 1 of last record streamed to everyone)
|
||||
/// minimal LSN which may be needed for recovery of some safekeeper (end_lsn
|
||||
/// of last record streamed to everyone)
|
||||
pub truncate_lsn: Lsn,
|
||||
// Safekeeper starts receiving WAL from this LSN, zeros before it ought to
|
||||
// be skipped during decoding.
|
||||
pub wal_start_lsn: Lsn,
|
||||
}
|
||||
|
||||
impl SafeKeeperState {
|
||||
@@ -93,6 +97,7 @@ impl SafeKeeperState {
|
||||
proposer_uuid: [0; 16],
|
||||
commit_lsn: Lsn(0), /* part of WAL acknowledged by quorum */
|
||||
truncate_lsn: Lsn(0), /* minimal LSN which may be needed for recovery of some safekeeper */
|
||||
wal_start_lsn: Lsn(0),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -106,7 +111,7 @@ impl Default for SafeKeeperState {
|
||||
// protocol messages
|
||||
|
||||
/// Initial Proposer -> Acceptor message
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
#[derive(Debug, Deserialize)]
|
||||
pub struct ProposerGreeting {
|
||||
/// proposer-acceptor protocol version
|
||||
pub protocol_version: u32,
|
||||
@@ -123,50 +128,50 @@ pub struct ProposerGreeting {
|
||||
|
||||
/// Acceptor -> Proposer initial response: the highest term known to me
|
||||
/// (acceptor voted for).
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
#[derive(Debug, Serialize)]
|
||||
pub struct AcceptorGreeting {
|
||||
term: u64,
|
||||
}
|
||||
|
||||
/// Vote request sent from proposer to safekeepers
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
#[derive(Debug, Deserialize)]
|
||||
pub struct VoteRequest {
|
||||
term: Term,
|
||||
}
|
||||
|
||||
/// Vote itself, sent from safekeeper to proposer
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
#[derive(Debug, Serialize)]
|
||||
pub struct VoteResponse {
|
||||
term: Term, // not really needed, just a sanity check
|
||||
vote_given: u64, // fixme u64 due to padding
|
||||
/// Safekeeper's log position, to let proposer choose the most advanced one
|
||||
epoch: Term,
|
||||
flush_lsn: Lsn,
|
||||
restart_lsn: Lsn,
|
||||
truncate_lsn: Lsn,
|
||||
}
|
||||
|
||||
/// Request with WAL message sent from proposer to safekeeper. Along the way it
|
||||
/// announces 1) successful election (with epoch_start_lsn); 2) commit_lsn.
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
#[derive(Debug)]
|
||||
pub struct AppendRequest {
|
||||
h: AppendRequestHeader,
|
||||
wal_data: Bytes,
|
||||
pub h: AppendRequestHeader,
|
||||
pub wal_data: Bytes,
|
||||
}
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[derive(Debug, Clone, Deserialize)]
|
||||
pub struct AppendRequestHeader {
|
||||
term: Term,
|
||||
pub term: Term,
|
||||
// LSN since the proposer appends WAL; determines epoch switch point.
|
||||
epoch_start_lsn: Lsn,
|
||||
pub epoch_start_lsn: Lsn,
|
||||
/// start position of message in WAL
|
||||
begin_lsn: Lsn,
|
||||
pub begin_lsn: Lsn,
|
||||
/// end position of message in WAL
|
||||
end_lsn: Lsn,
|
||||
pub end_lsn: Lsn,
|
||||
/// LSN committed by quorum of safekeepers
|
||||
commit_lsn: Lsn,
|
||||
/// restart LSN position (minimal LSN which may be needed by proposer to perform recovery)
|
||||
restart_lsn: Lsn,
|
||||
pub commit_lsn: Lsn,
|
||||
/// minimal LSN which may be needed by proposer to perform recovery of some safekeeper
|
||||
pub truncate_lsn: Lsn,
|
||||
// only for logging/debugging
|
||||
proposer_uuid: PgUuid,
|
||||
pub proposer_uuid: PgUuid,
|
||||
}
|
||||
|
||||
/// Report safekeeper state to proposer
|
||||
@@ -270,16 +275,15 @@ impl AcceptorProposerMessage {
|
||||
pub trait Storage {
|
||||
/// Persist safekeeper state on disk, optionally syncing it.
|
||||
fn persist(&mut self, s: &SafeKeeperState, sync: bool) -> Result<()>;
|
||||
/// Write piece of wal in buf to disk.
|
||||
fn write_wal(&mut self, s: &SafeKeeperState, startpos: Lsn, buf: &[u8]) -> Result<()>;
|
||||
/// Write piece of wal in buf to disk and sync it.
|
||||
fn write_wal(&mut self, server: &ServerInfo, startpos: Lsn, buf: &[u8]) -> Result<()>;
|
||||
}
|
||||
|
||||
/// SafeKeeper which consumes events (messages from compute) and provides
|
||||
/// replies.
|
||||
#[derive(Debug)]
|
||||
pub struct SafeKeeper<ST: Storage> {
|
||||
/// Locally flushed part of WAL (end_lsn of last record). Established by
|
||||
/// reading wal.
|
||||
/// Locally flushed part of WAL with full records (end_lsn of last record).
|
||||
/// Established by reading wal.
|
||||
pub flush_lsn: Lsn,
|
||||
pub tli: u32,
|
||||
/// not-yet-flushed pairs of same named fields in s.*
|
||||
@@ -288,6 +292,7 @@ pub struct SafeKeeper<ST: Storage> {
|
||||
pub storage: ST,
|
||||
pub s: SafeKeeperState, // persistent part
|
||||
pub elected_proposer_term: Term, // for monitoring/debugging
|
||||
decoder: WalStreamDecoder,
|
||||
}
|
||||
|
||||
impl<ST> SafeKeeper<ST>
|
||||
@@ -304,6 +309,7 @@ where
|
||||
storage,
|
||||
s: state,
|
||||
elected_proposer_term: 0,
|
||||
decoder: WalStreamDecoder::new(Lsn(0)),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -347,7 +353,6 @@ where
|
||||
self.s.server.ztli = msg.ztli;
|
||||
self.s.server.tli = msg.tli;
|
||||
self.s.server.wal_seg_size = msg.wal_seg_size;
|
||||
self.s.proposer_uuid = msg.proposer_id;
|
||||
self.storage.persist(&self.s, true)?;
|
||||
|
||||
info!(
|
||||
@@ -367,7 +372,7 @@ where
|
||||
vote_given: false as u64,
|
||||
epoch: 0,
|
||||
flush_lsn: Lsn(0),
|
||||
restart_lsn: Lsn(0),
|
||||
truncate_lsn: Lsn(0),
|
||||
};
|
||||
if self.s.acceptor_state.term < msg.term {
|
||||
self.s.acceptor_state.term = msg.term;
|
||||
@@ -376,7 +381,7 @@ where
|
||||
resp.vote_given = true as u64;
|
||||
resp.epoch = self.s.acceptor_state.epoch;
|
||||
resp.flush_lsn = self.flush_lsn;
|
||||
resp.restart_lsn = self.s.truncate_lsn;
|
||||
resp.truncate_lsn = self.s.truncate_lsn;
|
||||
}
|
||||
info!("processed VoteRequest for term {}: {:?}", msg.term, &resp);
|
||||
Ok(AcceptorProposerMessage::VoteResponse(resp))
|
||||
@@ -411,10 +416,43 @@ where
|
||||
return Ok(AcceptorProposerMessage::AppendResponse(resp));
|
||||
}
|
||||
|
||||
// do the job
|
||||
self.storage
|
||||
.write_wal(&self.s, msg.h.begin_lsn, &msg.wal_data)?;
|
||||
self.s.proposer_uuid = msg.h.proposer_uuid;
|
||||
let mut sync_control_file = false;
|
||||
|
||||
// do the job
|
||||
let mut last_rec_lsn = Lsn(0);
|
||||
if !msg.wal_data.is_empty() {
|
||||
self.storage
|
||||
.write_wal(&self.s.server, msg.h.begin_lsn, &msg.wal_data)?;
|
||||
|
||||
// figure out last record's end lsn for reporting (if we got the
|
||||
// whole record)
|
||||
if self.decoder.available() != msg.h.begin_lsn {
|
||||
info!(
|
||||
"restart decoder from {} to {}",
|
||||
self.decoder.available(),
|
||||
msg.h.begin_lsn,
|
||||
);
|
||||
self.decoder = WalStreamDecoder::new(msg.h.begin_lsn);
|
||||
}
|
||||
self.decoder.feed_bytes(&msg.wal_data);
|
||||
loop {
|
||||
match self.decoder.poll_decode()? {
|
||||
None => break, // no full record yet
|
||||
Some((lsn, _rec)) => {
|
||||
last_rec_lsn = lsn;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If this was the first record we ever receieved, remember LSN to help
|
||||
// find_end_of_wal skip the hole in the beginning.
|
||||
if self.s.wal_start_lsn == Lsn(0) {
|
||||
self.s.wal_start_lsn = msg.h.begin_lsn;
|
||||
sync_control_file = true;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Epoch switch happen when written WAL record cross the boundary.
|
||||
* The boundary is maximum of last WAL position at this node (FlushLSN) and global
|
||||
@@ -429,21 +467,28 @@ where
|
||||
if self.s.acceptor_state.epoch < msg.h.term
|
||||
&& msg.h.end_lsn >= max(self.flush_lsn, msg.h.epoch_start_lsn)
|
||||
{
|
||||
info!("switched to new epoch {}", msg.h.term);
|
||||
info!(
|
||||
"switched to new epoch {} on receival of request end_lsn={:?}, len={:?}",
|
||||
msg.h.term,
|
||||
msg.h.end_lsn,
|
||||
msg.wal_data.len(),
|
||||
);
|
||||
self.s.acceptor_state.epoch = msg.h.term; /* bump epoch */
|
||||
sync_control_file = true;
|
||||
}
|
||||
if msg.h.end_lsn > self.flush_lsn {
|
||||
self.flush_lsn = msg.h.end_lsn;
|
||||
if last_rec_lsn > self.flush_lsn {
|
||||
self.flush_lsn = last_rec_lsn;
|
||||
}
|
||||
|
||||
self.s.proposer_uuid = msg.h.proposer_uuid;
|
||||
// Advance commit_lsn taking into account what we have locally.
|
||||
// xxx this is wrapped into epoch check because we overwrite wal
|
||||
// instead of truncating it, so without it commit_lsn might include
|
||||
// wrong part. Anyway, nobody is much interested in our commit_lsn while
|
||||
// epoch switch hasn't happened, right?
|
||||
if self.s.acceptor_state.epoch == msg.h.term {
|
||||
// Advance commit_lsn taking into account what we have locally. xxx this
|
||||
// is wrapped into epoch check because we overwrite wal instead of
|
||||
// truncating it, so without it commit_lsn might include wrong part.
|
||||
// Anyway, nobody is much interested in our commit_lsn while epoch
|
||||
// switch hasn't happened, right?
|
||||
//
|
||||
// commit_lsn can be 0, being unknown to new walproposer while he hasn't
|
||||
// collected majority of its epoch acks yet, ignore it in this case.
|
||||
if self.s.acceptor_state.epoch == msg.h.term && msg.h.commit_lsn != Lsn(0) {
|
||||
let commit_lsn = min(msg.h.commit_lsn, self.flush_lsn);
|
||||
// If new commit_lsn reached epoch switch, force sync of control file:
|
||||
// walproposer in sync mode is very interested when this happens.
|
||||
@@ -451,10 +496,10 @@ where
|
||||
commit_lsn >= msg.h.epoch_start_lsn && self.s.commit_lsn < msg.h.epoch_start_lsn;
|
||||
self.commit_lsn = commit_lsn;
|
||||
}
|
||||
self.truncate_lsn = msg.h.restart_lsn;
|
||||
|
||||
self.truncate_lsn = msg.h.truncate_lsn;
|
||||
/*
|
||||
* Update restart LSN in control file.
|
||||
* Update truncate and commit LSN in control file.
|
||||
* To avoid negative impact on performance of extra fsync, do it only
|
||||
* when restart_lsn delta exceeds WAL segment size.
|
||||
*/
|
||||
@@ -474,11 +519,12 @@ where
|
||||
// will be filled by caller code to avoid bothering safekeeper
|
||||
hs_feedback: HotStandbyFeedback::empty(),
|
||||
};
|
||||
debug!(
|
||||
"processed AppendRequest of len {}, end_lsn={:?}, commit_lsn={:?}, resp {:?}",
|
||||
info!(
|
||||
"processed AppendRequest of len {}, end_lsn={:?}, commit_lsn={:?}, truncate_lsn={:?}, resp {:?}",
|
||||
msg.wal_data.len(),
|
||||
msg.h.end_lsn,
|
||||
msg.h.commit_lsn,
|
||||
msg.h.truncate_lsn,
|
||||
&resp,
|
||||
);
|
||||
Ok(AcceptorProposerMessage::AppendResponse(resp))
|
||||
@@ -500,7 +546,7 @@ mod tests {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn write_wal(&mut self, _s: &SafeKeeperState, _startpos: Lsn, _buf: &[u8]) -> Result<()> {
|
||||
fn write_wal(&mut self, _server: &ServerInfo, _startpos: Lsn, _buf: &[u8]) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -548,7 +594,7 @@ mod tests {
|
||||
begin_lsn: Lsn(1),
|
||||
end_lsn: Lsn(2),
|
||||
commit_lsn: Lsn(0),
|
||||
restart_lsn: Lsn(0),
|
||||
truncate_lsn: Lsn(0),
|
||||
proposer_uuid: [0; 16],
|
||||
};
|
||||
let mut append_request = AppendRequest {
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
//! pageserver/any other consumer.
|
||||
//!
|
||||
|
||||
use crate::json_ctrl::handle_json_ctrl;
|
||||
use crate::receive_wal::ReceiveWalConn;
|
||||
use crate::replication::ReplicationConn;
|
||||
use crate::timeline::{Timeline, TimelineTools};
|
||||
@@ -33,13 +34,13 @@ impl postgres_backend::Handler for SendWalHandler {
|
||||
let ztimelineid = sm
|
||||
.params
|
||||
.get("ztimelineid")
|
||||
.ok_or(anyhow!("timelineid is required"))?;
|
||||
.ok_or_else(|| anyhow!("timelineid is required"))?;
|
||||
self.timelineid = Some(ZTimelineId::from_str(ztimelineid)?);
|
||||
|
||||
let ztenantid = sm
|
||||
.params
|
||||
.get("ztenantid")
|
||||
.ok_or(anyhow!("tenantid is required"))?;
|
||||
.ok_or_else(|| anyhow!("tenantid is required"))?;
|
||||
self.tenantid = Some(ZTenantId::from_str(ztenantid)?);
|
||||
|
||||
if let Some(app_name) = sm.params.get("application_name") {
|
||||
@@ -49,9 +50,11 @@ impl postgres_backend::Handler for SendWalHandler {
|
||||
}
|
||||
|
||||
fn process_query(&mut self, pgb: &mut PostgresBackend, query_string: Bytes) -> Result<()> {
|
||||
// START_WAL_PUSH is the only command that initializes the timeline
|
||||
// START_WAL_PUSH is the only command that initializes the timeline in production.
|
||||
// There is also JSON_CTRL command, which should initialize the timeline for testing.
|
||||
if self.timeline.is_none() {
|
||||
if query_string.starts_with(b"START_WAL_PUSH") {
|
||||
if query_string.starts_with(b"START_WAL_PUSH") || query_string.starts_with(b"JSON_CTRL")
|
||||
{
|
||||
self.timeline.set(
|
||||
&self.conf,
|
||||
self.tenantid.unwrap(),
|
||||
@@ -76,6 +79,9 @@ impl postgres_backend::Handler for SendWalHandler {
|
||||
} else if query_string.starts_with(b"START_WAL_PUSH") {
|
||||
ReceiveWalConn::new(pgb)?.run(self)?;
|
||||
Ok(())
|
||||
} else if query_string.starts_with(b"JSON_CTRL") {
|
||||
handle_json_ctrl(self, pgb, &query_string)?;
|
||||
Ok(())
|
||||
} else {
|
||||
bail!("Unexpected command {:?}", query_string);
|
||||
}
|
||||
|
||||
@@ -18,8 +18,8 @@ use zenith_utils::zid::{ZTenantId, ZTimelineId};
|
||||
|
||||
use crate::replication::{HotStandbyFeedback, END_REPLICATION_MARKER};
|
||||
use crate::safekeeper::{
|
||||
AcceptorProposerMessage, ProposerAcceptorMessage, SafeKeeper, SafeKeeperState, Storage,
|
||||
SK_FORMAT_VERSION, SK_MAGIC,
|
||||
AcceptorProposerMessage, ProposerAcceptorMessage, SafeKeeper, SafeKeeperState, ServerInfo,
|
||||
Storage, SK_FORMAT_VERSION, SK_MAGIC,
|
||||
};
|
||||
use crate::WalAcceptorConf;
|
||||
use postgres_ffi::xlog_utils::{XLogFileName, XLOG_BLCKSZ};
|
||||
@@ -27,15 +27,12 @@ use postgres_ffi::xlog_utils::{XLogFileName, XLOG_BLCKSZ};
|
||||
const CONTROL_FILE_NAME: &str = "safekeeper.control";
|
||||
|
||||
/// Shared state associated with database instance (tenant)
|
||||
#[derive(Debug)]
|
||||
struct SharedState {
|
||||
/// Safekeeper object
|
||||
sk: SafeKeeper<FileStorage>,
|
||||
/// opened file control file handle (needed to hold exlusive file lock)
|
||||
control_file: File,
|
||||
/// For receiving-sending wal cooperation
|
||||
/// quorum commit LSN we've notified walsenders about
|
||||
commit_lsn: Lsn,
|
||||
notified_commit_lsn: Lsn,
|
||||
/// combined hot standby feedback from all replicas
|
||||
hs_feedback: HotStandbyFeedback,
|
||||
}
|
||||
@@ -59,20 +56,24 @@ impl SharedState {
|
||||
) -> Result<Self> {
|
||||
let (cf, state) = SharedState::load_control_file(conf, timelineid, create)?;
|
||||
let storage = FileStorage {
|
||||
control_file: cf.try_clone()?,
|
||||
control_file: cf,
|
||||
conf: conf.clone(),
|
||||
};
|
||||
let (flush_lsn, tli) = if state.server.wal_seg_size != 0 {
|
||||
let wal_dir = conf.data_dir.join(format!("{}", timelineid));
|
||||
find_end_of_wal(&wal_dir, state.server.wal_seg_size as usize, true)
|
||||
find_end_of_wal(
|
||||
&wal_dir,
|
||||
state.server.wal_seg_size as usize,
|
||||
true,
|
||||
state.wal_start_lsn,
|
||||
)?
|
||||
} else {
|
||||
(0, 0)
|
||||
};
|
||||
|
||||
Ok(Self {
|
||||
commit_lsn: Lsn(0),
|
||||
notified_commit_lsn: Lsn(0),
|
||||
sk: SafeKeeper::new(Lsn(flush_lsn), tli, storage, state),
|
||||
control_file: cf,
|
||||
hs_feedback: HotStandbyFeedback {
|
||||
ts: 0,
|
||||
xmin: u64::MAX,
|
||||
@@ -161,7 +162,6 @@ impl SharedState {
|
||||
}
|
||||
|
||||
/// Database instance (tenant)
|
||||
#[derive(Debug)]
|
||||
pub struct Timeline {
|
||||
pub timelineid: ZTimelineId,
|
||||
mutex: Mutex<SharedState>,
|
||||
@@ -186,7 +186,7 @@ impl Timeline {
|
||||
pub fn wait_for_lsn(&self, lsn: Lsn) -> Lsn {
|
||||
let mut shared_state = self.mutex.lock().unwrap();
|
||||
loop {
|
||||
let commit_lsn = shared_state.commit_lsn;
|
||||
let commit_lsn = shared_state.notified_commit_lsn;
|
||||
// This must be `>`, not `>=`.
|
||||
if commit_lsn > lsn {
|
||||
return commit_lsn;
|
||||
@@ -198,8 +198,8 @@ impl Timeline {
|
||||
// Notify caught-up WAL senders about new WAL data received
|
||||
pub fn notify_wal_senders(&self, commit_lsn: Lsn) {
|
||||
let mut shared_state = self.mutex.lock().unwrap();
|
||||
if shared_state.commit_lsn < commit_lsn {
|
||||
shared_state.commit_lsn = commit_lsn;
|
||||
if shared_state.notified_commit_lsn < commit_lsn {
|
||||
shared_state.notified_commit_lsn = commit_lsn;
|
||||
self.cond.notify_all();
|
||||
}
|
||||
}
|
||||
@@ -337,14 +337,14 @@ impl Storage for FileStorage {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn write_wal(&mut self, s: &SafeKeeperState, startpos: Lsn, buf: &[u8]) -> Result<()> {
|
||||
fn write_wal(&mut self, server: &ServerInfo, startpos: Lsn, buf: &[u8]) -> Result<()> {
|
||||
let mut bytes_left: usize = buf.len();
|
||||
let mut bytes_written: usize = 0;
|
||||
let mut partial;
|
||||
let mut start_pos = startpos;
|
||||
const ZERO_BLOCK: &[u8] = &[0u8; XLOG_BLCKSZ];
|
||||
let wal_seg_size = s.server.wal_seg_size as usize;
|
||||
let ztli = s.server.ztli;
|
||||
let wal_seg_size = server.wal_seg_size as usize;
|
||||
let ztli = server.ztli;
|
||||
|
||||
/* Extract WAL location for this block */
|
||||
let mut xlogoff = start_pos.segment_offset(wal_seg_size) as usize;
|
||||
@@ -365,7 +365,7 @@ impl Storage for FileStorage {
|
||||
/* Open file */
|
||||
let segno = start_pos.segment_number(wal_seg_size);
|
||||
// note: we basically don't support changing pg timeline
|
||||
let wal_file_name = XLogFileName(s.server.tli, segno, wal_seg_size);
|
||||
let wal_file_name = XLogFileName(server.tli, segno, wal_seg_size);
|
||||
let wal_file_path = self
|
||||
.conf
|
||||
.data_dir
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
use anyhow::{anyhow, bail};
|
||||
use anyhow::anyhow;
|
||||
use anyhow::{Context, Result};
|
||||
use clap::{App, AppSettings, Arg, ArgMatches, SubCommand};
|
||||
use control_plane::compute::ComputeControlPlane;
|
||||
use control_plane::local_env::{self, LocalEnv};
|
||||
use control_plane::local_env;
|
||||
use control_plane::storage::PageServerNode;
|
||||
use std::collections::btree_map::Entry;
|
||||
use pageserver::defaults::{DEFAULT_HTTP_LISTEN_PORT, DEFAULT_PG_LISTEN_PORT};
|
||||
use std::collections::HashMap;
|
||||
use std::process::exit;
|
||||
use std::str::FromStr;
|
||||
@@ -13,7 +13,6 @@ use zenith_utils::postgres_backend::AuthType;
|
||||
use zenith_utils::zid::{ZTenantId, ZTimelineId};
|
||||
|
||||
use pageserver::branches::BranchInfo;
|
||||
use zenith_utils::lsn::Lsn;
|
||||
|
||||
///
|
||||
/// Branches tree element used as a value in the HashMap.
|
||||
@@ -30,7 +29,7 @@ struct BranchTreeEl {
|
||||
// This utility helps to manage zenith installation. That includes following:
|
||||
// * Management of local postgres installations running on top of the
|
||||
// pageserver.
|
||||
// * Providing CLI api to the pageserver (local or remote)
|
||||
// * Providing CLI api to the pageserver
|
||||
// * TODO: export/import to/from usual postgres
|
||||
fn main() -> Result<()> {
|
||||
let timeline_arg = Arg::with_name("timeline")
|
||||
@@ -45,16 +44,27 @@ fn main() -> Result<()> {
|
||||
.takes_value(true)
|
||||
.required(false);
|
||||
|
||||
let port_arg = Arg::with_name("port")
|
||||
.long("port")
|
||||
.required(false)
|
||||
.value_name("port");
|
||||
|
||||
let matches = App::new("Zenith CLI")
|
||||
.setting(AppSettings::ArgRequiredElseHelp)
|
||||
.subcommand(
|
||||
SubCommand::with_name("init")
|
||||
.about("Initialize a new Zenith repository")
|
||||
.arg(
|
||||
Arg::with_name("remote-pageserver")
|
||||
.long("remote-pageserver")
|
||||
Arg::with_name("pageserver-pg-port")
|
||||
.long("pageserver-pg-port")
|
||||
.required(false)
|
||||
.value_name("pageserver-url"),
|
||||
.value_name("pageserver-pg-port"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("pageserver-http-port")
|
||||
.long("pageserver-http-port")
|
||||
.required(false)
|
||||
.value_name("pageserver-http-port"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("enable-auth")
|
||||
@@ -87,7 +97,7 @@ fn main() -> Result<()> {
|
||||
.subcommand(SubCommand::with_name("list").arg(tenantid_arg.clone()))
|
||||
.subcommand(SubCommand::with_name("create")
|
||||
.about("Create a postgres compute node")
|
||||
.arg(timeline_arg.clone()).arg(tenantid_arg.clone())
|
||||
.arg(timeline_arg.clone()).arg(tenantid_arg.clone()).arg(port_arg.clone())
|
||||
.arg(
|
||||
Arg::with_name("config-only")
|
||||
.help("Don't do basebackup, create compute node with only config files")
|
||||
@@ -96,7 +106,11 @@ fn main() -> Result<()> {
|
||||
))
|
||||
.subcommand(SubCommand::with_name("start")
|
||||
.about("Start a postgres compute node.\n This command actually creates new node from scratch, but preserves existing config files")
|
||||
.arg(timeline_arg.clone()).arg(tenantid_arg.clone()))
|
||||
.arg(
|
||||
timeline_arg.clone()
|
||||
).arg(
|
||||
tenantid_arg.clone()
|
||||
).arg(port_arg.clone()))
|
||||
.subcommand(
|
||||
SubCommand::with_name("stop")
|
||||
.arg(timeline_arg.clone())
|
||||
@@ -107,36 +121,36 @@ fn main() -> Result<()> {
|
||||
.long("destroy")
|
||||
.required(false)
|
||||
)
|
||||
)
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("remote")
|
||||
.setting(AppSettings::ArgRequiredElseHelp)
|
||||
.about("Manage remote pagerservers")
|
||||
.subcommand(
|
||||
SubCommand::with_name("add")
|
||||
.about("Add a new remote pageserver")
|
||||
.arg(Arg::with_name("name").required(true))
|
||||
.arg(
|
||||
Arg::with_name("url")
|
||||
.help("PostgreSQL connection URI")
|
||||
.required(true),
|
||||
),
|
||||
),
|
||||
)
|
||||
|
||||
)
|
||||
.get_matches();
|
||||
|
||||
// Create config file
|
||||
if let ("init", Some(init_match)) = matches.subcommand() {
|
||||
let tenantid = ZTenantId::generate();
|
||||
let pageserver_uri = init_match.value_of("pageserver-url");
|
||||
let pageserver_pg_port = match init_match.value_of("pageserver-pg-port") {
|
||||
Some(v) => v.parse()?,
|
||||
None => DEFAULT_PG_LISTEN_PORT,
|
||||
};
|
||||
let pageserver_http_port = match init_match.value_of("pageserver-http-port") {
|
||||
Some(v) => v.parse()?,
|
||||
None => DEFAULT_HTTP_LISTEN_PORT,
|
||||
};
|
||||
|
||||
let auth_type = if init_match.is_present("enable-auth") {
|
||||
AuthType::ZenithJWT
|
||||
} else {
|
||||
AuthType::Trust
|
||||
};
|
||||
local_env::init(pageserver_uri, tenantid, auth_type)
|
||||
.with_context(|| "Failed to create config file")?;
|
||||
|
||||
local_env::init(
|
||||
pageserver_pg_port,
|
||||
pageserver_http_port,
|
||||
tenantid,
|
||||
auth_type,
|
||||
)
|
||||
.with_context(|| "Failed to create config file")?;
|
||||
}
|
||||
|
||||
// all other commands would need config
|
||||
@@ -214,13 +228,6 @@ fn main() -> Result<()> {
|
||||
}
|
||||
}
|
||||
|
||||
("remote", Some(remote_match)) => {
|
||||
if let Err(e) = handle_remote(remote_match, &env) {
|
||||
eprintln!("remote operation failed: {}", e);
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
_ => {}
|
||||
};
|
||||
|
||||
@@ -393,9 +400,7 @@ fn handle_branch(branch_match: &ArgMatches, env: &local_env::LocalEnv) -> Result
|
||||
let branch = pageserver.branch_create(branchname, startpoint_str, &tenantid)?;
|
||||
println!(
|
||||
"Created branch '{}' at {:?} for tenant: {}",
|
||||
branch.name,
|
||||
branch.latest_valid_lsn.unwrap_or(Lsn(0)),
|
||||
tenantid,
|
||||
branch.name, branch.latest_valid_lsn, tenantid,
|
||||
);
|
||||
} else {
|
||||
let tenantid: ZTenantId = branch_match
|
||||
@@ -434,9 +439,7 @@ fn handle_pg(pg_match: &ArgMatches, env: &local_env::LocalEnv) -> Result<()> {
|
||||
node.address,
|
||||
branch_infos
|
||||
.get(&node.timelineid)
|
||||
.map(|bi| bi
|
||||
.latest_valid_lsn
|
||||
.map_or("?".to_string(), |lsn| lsn.to_string()))
|
||||
.map(|bi| bi.latest_valid_lsn.to_string())
|
||||
.unwrap_or_else(|| "?".to_string()),
|
||||
node.status(),
|
||||
);
|
||||
@@ -448,7 +451,11 @@ fn handle_pg(pg_match: &ArgMatches, env: &local_env::LocalEnv) -> Result<()> {
|
||||
.map_or(Ok(env.tenantid), |value| value.parse())?;
|
||||
let timeline_name = create_match.value_of("timeline").unwrap_or("main");
|
||||
|
||||
cplane.new_node(tenantid, timeline_name)?;
|
||||
let port: Option<u16> = match create_match.value_of("port") {
|
||||
Some(p) => Some(p.parse()?),
|
||||
None => None,
|
||||
};
|
||||
cplane.new_node(tenantid, timeline_name, port)?;
|
||||
}
|
||||
("start", Some(start_match)) => {
|
||||
let tenantid: ZTenantId = start_match
|
||||
@@ -456,6 +463,11 @@ fn handle_pg(pg_match: &ArgMatches, env: &local_env::LocalEnv) -> Result<()> {
|
||||
.map_or(Ok(env.tenantid), |value| value.parse())?;
|
||||
let timeline_name = start_match.value_of("timeline").unwrap_or("main");
|
||||
|
||||
let port: Option<u16> = match start_match.value_of("port") {
|
||||
Some(p) => Some(p.parse()?),
|
||||
None => None,
|
||||
};
|
||||
|
||||
let node = cplane.nodes.get(&(tenantid, timeline_name.to_owned()));
|
||||
|
||||
let auth_token = if matches!(env.auth_type, AuthType::ZenithJWT) {
|
||||
@@ -473,7 +485,12 @@ fn handle_pg(pg_match: &ArgMatches, env: &local_env::LocalEnv) -> Result<()> {
|
||||
if let Some(node) = node {
|
||||
node.start(&auth_token)?;
|
||||
} else {
|
||||
let node = cplane.new_node(tenantid, timeline_name)?;
|
||||
// when used with custom port this results in non obvious behaviour
|
||||
// port is remembered from first start command, i e
|
||||
// start --port X
|
||||
// stop
|
||||
// start <-- will also use port X even without explicit port argument
|
||||
let node = cplane.new_node(tenantid, timeline_name, port)?;
|
||||
node.start(&auth_token)?;
|
||||
}
|
||||
}
|
||||
@@ -496,29 +513,3 @@ fn handle_pg(pg_match: &ArgMatches, env: &local_env::LocalEnv) -> Result<()> {
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn handle_remote(remote_match: &ArgMatches, local_env: &LocalEnv) -> Result<()> {
|
||||
match remote_match.subcommand() {
|
||||
("add", Some(args)) => {
|
||||
let name = args.value_of("name").unwrap();
|
||||
let url = args.value_of("url").unwrap();
|
||||
|
||||
// validate the URL
|
||||
postgres::Config::from_str(url)?;
|
||||
|
||||
let mut new_local_env = local_env.clone();
|
||||
|
||||
match new_local_env.remotes.entry(name.to_string()) {
|
||||
Entry::Vacant(vacant) => {
|
||||
vacant.insert(url.to_string());
|
||||
}
|
||||
Entry::Occupied(_) => bail!("origin '{}' already exists", name),
|
||||
}
|
||||
|
||||
local_env::save_config(&new_local_env)?;
|
||||
}
|
||||
_ => bail!("unknown command"),
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ version = "0.1.0"
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
prometheus = "0.12"
|
||||
prometheus = {version = "0.12", default_features=false} # removes protobuf dependency
|
||||
libc = "0.2"
|
||||
lazy_static = "1.4"
|
||||
once_cell = "1.8.0"
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
//! Otherwise, we might not see all metrics registered via
|
||||
//! a default registry.
|
||||
use lazy_static::lazy_static;
|
||||
use once_cell::race::OnceBox;
|
||||
pub use prometheus::{exponential_buckets, linear_buckets};
|
||||
pub use prometheus::{register_histogram, Histogram};
|
||||
pub use prometheus::{register_histogram_vec, HistogramVec};
|
||||
@@ -20,29 +21,67 @@ pub use wrappers::{CountedReader, CountedWriter};
|
||||
/// Metrics gathering is a relatively simple and standalone operation, so
|
||||
/// it might be fine to do it this way to keep things simple.
|
||||
pub fn gather() -> Vec<prometheus::proto::MetricFamily> {
|
||||
update_io_metrics();
|
||||
update_rusage_metrics();
|
||||
prometheus::gather()
|
||||
}
|
||||
|
||||
static COMMON_METRICS_PREFIX: OnceBox<&str> = OnceBox::new();
|
||||
|
||||
/// Sets a prefix which will be used for all common metrics, typically a service
|
||||
/// name like 'pageserver'. Should be executed exactly once in the beginning of
|
||||
/// any executable which uses common metrics.
|
||||
pub fn set_common_metrics_prefix(prefix: &'static str) {
|
||||
// Not unwrap() because metrics may be initialized after multiple threads have been started.
|
||||
COMMON_METRICS_PREFIX
|
||||
.set(prefix.into())
|
||||
.unwrap_or_else(|_| {
|
||||
eprintln!(
|
||||
"set_common_metrics_prefix() was called second time with '{}', exiting",
|
||||
prefix
|
||||
);
|
||||
std::process::exit(1);
|
||||
});
|
||||
}
|
||||
|
||||
/// Prepends a prefix to a common metric name so they are distinguished between
|
||||
/// different services, see https://github.com/zenithdb/zenith/pull/681
|
||||
/// A call to set_common_metrics_prefix() is necessary prior to calling this.
|
||||
pub fn new_common_metric_name(unprefixed_metric_name: &str) -> String {
|
||||
// Not unwrap() because metrics may be initialized after multiple threads have been started.
|
||||
format!(
|
||||
"{}_{}",
|
||||
COMMON_METRICS_PREFIX.get().unwrap_or_else(|| {
|
||||
eprintln!("set_common_metrics_prefix() was not called, but metrics are used, exiting");
|
||||
std::process::exit(1);
|
||||
}),
|
||||
unprefixed_metric_name
|
||||
)
|
||||
}
|
||||
|
||||
lazy_static! {
|
||||
static ref DISK_IO_BYTES: IntGaugeVec = register_int_gauge_vec!(
|
||||
"pageserver_disk_io_bytes",
|
||||
new_common_metric_name("disk_io_bytes"),
|
||||
"Bytes written and read from disk, grouped by the operation (read|write)",
|
||||
&["io_operation"]
|
||||
)
|
||||
.expect("Failed to register disk i/o bytes int gauge vec");
|
||||
static ref MAXRSS_KB: IntGauge = register_int_gauge!(
|
||||
new_common_metric_name("maxrss_kb"),
|
||||
"Memory usage (Maximum Resident Set Size)"
|
||||
)
|
||||
.expect("Failed to register maxrss_kb int gauge");
|
||||
}
|
||||
|
||||
// Records I/O stats in a "cross-platform" way.
|
||||
// Compiles both on macOs and Linux, but current macOs implementation always returns 0 as values for I/O stats.
|
||||
// An alternative is to read procfs (`/proc/[pid]/io`) which does not work under macOs at all, hence abandoned.
|
||||
// Compiles both on macOS and Linux, but current macOS implementation always returns 0 as values for I/O stats.
|
||||
// An alternative is to read procfs (`/proc/[pid]/io`) which does not work under macOS at all, hence abandoned.
|
||||
//
|
||||
// Uses https://www.freebsd.org/cgi/man.cgi?query=getrusage to retrieve the number of block operations
|
||||
// performed by the process.
|
||||
// We know the the size of the block, so we can determine the I/O bytes out of it.
|
||||
// We know the size of the block, so we can determine the I/O bytes out of it.
|
||||
// The value might be not 100% exact, but should be fine for Prometheus metrics in this case.
|
||||
#[allow(clippy::unnecessary_cast)]
|
||||
fn update_io_metrics() {
|
||||
fn update_rusage_metrics() {
|
||||
let rusage_stats = get_rusage_stats();
|
||||
|
||||
const BYTES_IN_BLOCK: i64 = 512;
|
||||
@@ -52,6 +91,7 @@ fn update_io_metrics() {
|
||||
DISK_IO_BYTES
|
||||
.with_label_values(&["write"])
|
||||
.set(rusage_stats.ru_oublock * BYTES_IN_BLOCK);
|
||||
MAXRSS_KB.set(rusage_stats.ru_maxrss);
|
||||
}
|
||||
|
||||
fn get_rusage_stats() -> libc::rusage {
|
||||
|
||||
@@ -17,7 +17,13 @@ routerify = "2"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1"
|
||||
thiserror = "1.0"
|
||||
tokio = { version = "1.5.0", features = ["full"] }
|
||||
tokio = "1.11"
|
||||
|
||||
slog-async = "2.6.0"
|
||||
slog-stdlog = "4.1.0"
|
||||
slog-scope = "4.4.0"
|
||||
slog-term = "2.8.0"
|
||||
slog = "2.7.0"
|
||||
|
||||
zenith_metrics = { path = "../zenith_metrics" }
|
||||
workspace_hack = { path = "../workspace_hack" }
|
||||
@@ -32,3 +38,4 @@ rustls-split = "0.2.1"
|
||||
hex-literal = "0.3"
|
||||
bytes = "1.0"
|
||||
webpki = "0.21"
|
||||
tempfile = "3.2"
|
||||
|
||||
33
zenith_utils/src/accum.rs
Normal file
33
zenith_utils/src/accum.rs
Normal file
@@ -0,0 +1,33 @@
|
||||
/// A helper to "accumulate" a value similar to `Iterator::reduce`, but lets you
|
||||
/// feed the accumulated values by calling the 'accum' function, instead of having an
|
||||
/// iterator.
|
||||
///
|
||||
/// For example, to calculate the smallest value among some integers:
|
||||
///
|
||||
/// ```
|
||||
/// use zenith_utils::accum::Accum;
|
||||
///
|
||||
/// let values = [1, 2, 3];
|
||||
///
|
||||
/// let mut min_value: Accum<u32> = Accum(None);
|
||||
/// for new_value in &values {
|
||||
/// min_value.accum(std::cmp::min, *new_value);
|
||||
/// }
|
||||
///
|
||||
/// assert_eq!(min_value.0.unwrap(), 1);
|
||||
/// ```
|
||||
pub struct Accum<T>(pub Option<T>);
|
||||
impl<T: Copy> Accum<T> {
|
||||
pub fn accum<F>(&mut self, func: F, new_value: T)
|
||||
where
|
||||
F: FnOnce(T, T) -> T,
|
||||
{
|
||||
// If there is no previous value, just store the new value.
|
||||
// Otherwise call the function to decide which one to keep.
|
||||
self.0 = Some(if let Some(accum) = self.0 {
|
||||
func(accum, new_value)
|
||||
} else {
|
||||
new_value
|
||||
});
|
||||
}
|
||||
}
|
||||
@@ -69,14 +69,13 @@ impl From<bincode::Error> for SerializeError {
|
||||
/// Properties:
|
||||
/// - Big endian
|
||||
/// - Fixed integer encoding (i.e. 1u32 is 00000001 not 01)
|
||||
/// - Allow trailing bytes: this means we don't throw an error
|
||||
/// if the deserializer is passed a buffer with more data
|
||||
/// past the end.
|
||||
///
|
||||
/// Does not allow trailing bytes in deserialization. If this is desired, you
|
||||
/// may set [`Options::allow_trailing_bytes`] to explicitly accomodate this.
|
||||
pub fn be_coder() -> impl Options {
|
||||
bincode::DefaultOptions::new()
|
||||
.with_big_endian()
|
||||
.with_fixint_encoding()
|
||||
.allow_trailing_bytes()
|
||||
}
|
||||
|
||||
/// A shortcut that configures little-ending binary serialization
|
||||
@@ -84,21 +83,23 @@ pub fn be_coder() -> impl Options {
|
||||
/// Properties:
|
||||
/// - Little endian
|
||||
/// - Fixed integer encoding (i.e. 1u32 is 00000001 not 01)
|
||||
/// - Allow trailing bytes: this means we don't throw an error
|
||||
/// if the deserializer is passed a buffer with more data
|
||||
/// past the end.
|
||||
///
|
||||
/// Does not allow trailing bytes in deserialization. If this is desired, you
|
||||
/// may set [`Options::allow_trailing_bytes`] to explicitly accomodate this.
|
||||
pub fn le_coder() -> impl Options {
|
||||
bincode::DefaultOptions::new()
|
||||
.with_little_endian()
|
||||
.with_fixint_encoding()
|
||||
.allow_trailing_bytes()
|
||||
}
|
||||
|
||||
/// Binary serialize/deserialize helper functions (Big Endian)
|
||||
///
|
||||
pub trait BeSer: Serialize + DeserializeOwned {
|
||||
pub trait BeSer {
|
||||
/// Serialize into a byte slice
|
||||
fn ser_into_slice(&self, mut b: &mut [u8]) -> Result<(), SerializeError> {
|
||||
fn ser_into_slice(&self, mut b: &mut [u8]) -> Result<(), SerializeError>
|
||||
where
|
||||
Self: Serialize,
|
||||
{
|
||||
// &mut [u8] implements Write, but `ser_into` needs a mutable
|
||||
// reference to that. So we need the slightly awkward "mutable
|
||||
// reference to a mutable reference.
|
||||
@@ -109,24 +110,54 @@ pub trait BeSer: Serialize + DeserializeOwned {
|
||||
///
|
||||
/// This is useful for most `Write` types except `&mut [u8]`, which
|
||||
/// can more easily use [`ser_into_slice`](Self::ser_into_slice).
|
||||
fn ser_into<W: Write>(&self, w: &mut W) -> Result<(), SerializeError> {
|
||||
fn ser_into<W: Write>(&self, w: &mut W) -> Result<(), SerializeError>
|
||||
where
|
||||
Self: Serialize,
|
||||
{
|
||||
be_coder().serialize_into(w, &self).map_err(|e| e.into())
|
||||
}
|
||||
|
||||
/// Serialize into a new heap-allocated buffer
|
||||
fn ser(&self) -> Result<Vec<u8>, SerializeError> {
|
||||
fn ser(&self) -> Result<Vec<u8>, SerializeError>
|
||||
where
|
||||
Self: Serialize,
|
||||
{
|
||||
be_coder().serialize(&self).map_err(|e| e.into())
|
||||
}
|
||||
|
||||
/// Deserialize from a byte slice
|
||||
fn des(buf: &[u8]) -> Result<Self, DeserializeError> {
|
||||
/// Deserialize from the full contents of a byte slice
|
||||
///
|
||||
/// See also: [`BeSer::des_prefix`]
|
||||
fn des(buf: &[u8]) -> Result<Self, DeserializeError>
|
||||
where
|
||||
Self: DeserializeOwned,
|
||||
{
|
||||
be_coder()
|
||||
.deserialize(buf)
|
||||
.or(Err(DeserializeError::BadInput))
|
||||
}
|
||||
|
||||
/// Deserialize from a prefix of the byte slice
|
||||
///
|
||||
/// Uses as much of the byte slice as is necessary to deserialize the
|
||||
/// type, but does not guarantee that the entire slice is used.
|
||||
///
|
||||
/// See also: [`BeSer::des`]
|
||||
fn des_prefix(buf: &[u8]) -> Result<Self, DeserializeError>
|
||||
where
|
||||
Self: DeserializeOwned,
|
||||
{
|
||||
be_coder()
|
||||
.allow_trailing_bytes()
|
||||
.deserialize(buf)
|
||||
.or(Err(DeserializeError::BadInput))
|
||||
}
|
||||
|
||||
/// Deserialize from a reader
|
||||
fn des_from<R: Read>(r: &mut R) -> Result<Self, DeserializeError> {
|
||||
fn des_from<R: Read>(r: &mut R) -> Result<Self, DeserializeError>
|
||||
where
|
||||
Self: DeserializeOwned,
|
||||
{
|
||||
be_coder().deserialize_from(r).map_err(|e| e.into())
|
||||
}
|
||||
|
||||
@@ -134,16 +165,22 @@ pub trait BeSer: Serialize + DeserializeOwned {
|
||||
///
|
||||
/// Note: it may be faster to serialize to a buffer and then measure the
|
||||
/// buffer length, than to call `serialized_size` and then `ser_into`.
|
||||
fn serialized_size(&self) -> Result<u64, SerializeError> {
|
||||
fn serialized_size(&self) -> Result<u64, SerializeError>
|
||||
where
|
||||
Self: Serialize,
|
||||
{
|
||||
be_coder().serialized_size(self).map_err(|e| e.into())
|
||||
}
|
||||
}
|
||||
|
||||
/// Binary serialize/deserialize helper functions (Little Endian)
|
||||
///
|
||||
pub trait LeSer: Serialize + DeserializeOwned {
|
||||
pub trait LeSer {
|
||||
/// Serialize into a byte slice
|
||||
fn ser_into_slice(&self, mut b: &mut [u8]) -> Result<(), SerializeError> {
|
||||
fn ser_into_slice(&self, mut b: &mut [u8]) -> Result<(), SerializeError>
|
||||
where
|
||||
Self: Serialize,
|
||||
{
|
||||
// &mut [u8] implements Write, but `ser_into` needs a mutable
|
||||
// reference to that. So we need the slightly awkward "mutable
|
||||
// reference to a mutable reference.
|
||||
@@ -154,24 +191,54 @@ pub trait LeSer: Serialize + DeserializeOwned {
|
||||
///
|
||||
/// This is useful for most `Write` types except `&mut [u8]`, which
|
||||
/// can more easily use [`ser_into_slice`](Self::ser_into_slice).
|
||||
fn ser_into<W: Write>(&self, w: &mut W) -> Result<(), SerializeError> {
|
||||
fn ser_into<W: Write>(&self, w: &mut W) -> Result<(), SerializeError>
|
||||
where
|
||||
Self: Serialize,
|
||||
{
|
||||
le_coder().serialize_into(w, &self).map_err(|e| e.into())
|
||||
}
|
||||
|
||||
/// Serialize into a new heap-allocated buffer
|
||||
fn ser(&self) -> Result<Vec<u8>, SerializeError> {
|
||||
fn ser(&self) -> Result<Vec<u8>, SerializeError>
|
||||
where
|
||||
Self: Serialize,
|
||||
{
|
||||
le_coder().serialize(&self).map_err(|e| e.into())
|
||||
}
|
||||
|
||||
/// Deserialize from a byte slice
|
||||
fn des(buf: &[u8]) -> Result<Self, DeserializeError> {
|
||||
/// Deserialize from the full contents of a byte slice
|
||||
///
|
||||
/// See also: [`LeSer::des_prefix`]
|
||||
fn des(buf: &[u8]) -> Result<Self, DeserializeError>
|
||||
where
|
||||
Self: DeserializeOwned,
|
||||
{
|
||||
le_coder()
|
||||
.deserialize(buf)
|
||||
.or(Err(DeserializeError::BadInput))
|
||||
}
|
||||
|
||||
/// Deserialize from a prefix of the byte slice
|
||||
///
|
||||
/// Uses as much of the byte slice as is necessary to deserialize the
|
||||
/// type, but does not guarantee that the entire slice is used.
|
||||
///
|
||||
/// See also: [`LeSer::des`]
|
||||
fn des_prefix(buf: &[u8]) -> Result<Self, DeserializeError>
|
||||
where
|
||||
Self: DeserializeOwned,
|
||||
{
|
||||
le_coder()
|
||||
.allow_trailing_bytes()
|
||||
.deserialize(buf)
|
||||
.or(Err(DeserializeError::BadInput))
|
||||
}
|
||||
|
||||
/// Deserialize from a reader
|
||||
fn des_from<R: Read>(r: &mut R) -> Result<Self, DeserializeError> {
|
||||
fn des_from<R: Read>(r: &mut R) -> Result<Self, DeserializeError>
|
||||
where
|
||||
Self: DeserializeOwned,
|
||||
{
|
||||
le_coder().deserialize_from(r).map_err(|e| e.into())
|
||||
}
|
||||
|
||||
@@ -179,14 +246,18 @@ pub trait LeSer: Serialize + DeserializeOwned {
|
||||
///
|
||||
/// Note: it may be faster to serialize to a buffer and then measure the
|
||||
/// buffer length, than to call `serialized_size` and then `ser_into`.
|
||||
fn serialized_size(&self) -> Result<u64, SerializeError> {
|
||||
fn serialized_size(&self) -> Result<u64, SerializeError>
|
||||
where
|
||||
Self: Serialize,
|
||||
{
|
||||
le_coder().serialized_size(self).map_err(|e| e.into())
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> BeSer for T where T: Serialize + DeserializeOwned {}
|
||||
|
||||
impl<T> LeSer for T where T: Serialize + DeserializeOwned {}
|
||||
// Because usage of `BeSer` or `LeSer` can be done with *either* a Serialize or
|
||||
// DeserializeOwned implementation, the blanket implementation has to be for every type.
|
||||
impl<T> BeSer for T {}
|
||||
impl<T> LeSer for T {}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
@@ -247,8 +318,10 @@ mod tests {
|
||||
assert_eq!(decoded, SHORT2);
|
||||
|
||||
// with trailing data
|
||||
let decoded = ShortStruct::des(SHORT2_ENC_BE_TRAILING).unwrap();
|
||||
let decoded = ShortStruct::des_prefix(SHORT2_ENC_BE_TRAILING).unwrap();
|
||||
assert_eq!(decoded, SHORT2);
|
||||
let err = ShortStruct::des(SHORT2_ENC_BE_TRAILING).unwrap_err();
|
||||
assert!(matches!(err, DeserializeError::BadInput));
|
||||
|
||||
// serialize into a `Write` sink.
|
||||
let mut buf = Cursor::new(vec![0xFF; 8]);
|
||||
@@ -279,8 +352,10 @@ mod tests {
|
||||
assert_eq!(decoded, SHORT2);
|
||||
|
||||
// with trailing data
|
||||
let decoded = ShortStruct::des(SHORT2_ENC_LE_TRAILING).unwrap();
|
||||
let decoded = ShortStruct::des_prefix(SHORT2_ENC_LE_TRAILING).unwrap();
|
||||
assert_eq!(decoded, SHORT2);
|
||||
let err = ShortStruct::des(SHORT2_ENC_LE_TRAILING).unwrap_err();
|
||||
assert!(matches!(err, DeserializeError::BadInput));
|
||||
|
||||
// serialize into a `Write` sink.
|
||||
let mut buf = Cursor::new(vec![0xFF; 8]);
|
||||
|
||||
125
zenith_utils/src/crashsafe_dir.rs
Normal file
125
zenith_utils/src/crashsafe_dir.rs
Normal file
@@ -0,0 +1,125 @@
|
||||
use std::{
|
||||
fs::{self, File},
|
||||
io,
|
||||
path::Path,
|
||||
};
|
||||
|
||||
/// Similar to [`std::fs::create_dir`], except we fsync the
|
||||
/// created directory and its parent.
|
||||
pub fn create_dir(path: impl AsRef<Path>) -> io::Result<()> {
|
||||
let path = path.as_ref();
|
||||
|
||||
fs::create_dir(path)?;
|
||||
File::open(path)?.sync_all()?;
|
||||
|
||||
if let Some(parent) = path.parent() {
|
||||
File::open(parent)?.sync_all()
|
||||
} else {
|
||||
Err(io::Error::new(
|
||||
io::ErrorKind::InvalidInput,
|
||||
"can't find parent",
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
/// Similar to [`std::fs::create_dir_all`], except we fsync all
|
||||
/// newly created directories and the pre-existing parent.
|
||||
pub fn create_dir_all(path: impl AsRef<Path>) -> io::Result<()> {
|
||||
let mut path = path.as_ref();
|
||||
|
||||
let mut dirs_to_create = Vec::new();
|
||||
|
||||
// Figure out which directories we need to create.
|
||||
loop {
|
||||
match path.metadata() {
|
||||
Ok(metadata) if metadata.is_dir() => break,
|
||||
Ok(_) => {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::AlreadyExists,
|
||||
format!("non-directory found in path: {:?}", path),
|
||||
));
|
||||
}
|
||||
Err(ref e) if e.kind() == io::ErrorKind::NotFound => {}
|
||||
Err(e) => return Err(e),
|
||||
}
|
||||
|
||||
dirs_to_create.push(path);
|
||||
|
||||
match path.parent() {
|
||||
Some(parent) => path = parent,
|
||||
None => {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::InvalidInput,
|
||||
"can't find parent",
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Create directories from parent to child.
|
||||
for &path in dirs_to_create.iter().rev() {
|
||||
fs::create_dir(path)?;
|
||||
}
|
||||
|
||||
// Fsync the created directories from child to parent.
|
||||
for &path in dirs_to_create.iter() {
|
||||
File::open(path)?.sync_all()?;
|
||||
}
|
||||
|
||||
// If we created any new directories, fsync the parent.
|
||||
if !dirs_to_create.is_empty() {
|
||||
File::open(path)?.sync_all()?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use tempfile::tempdir;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_create_dir_fsyncd() {
|
||||
let dir = tempdir().unwrap();
|
||||
|
||||
let existing_dir_path = dir.path();
|
||||
let err = create_dir(existing_dir_path).unwrap_err();
|
||||
assert_eq!(err.kind(), io::ErrorKind::AlreadyExists);
|
||||
|
||||
let child_dir = existing_dir_path.join("child");
|
||||
create_dir(child_dir).unwrap();
|
||||
|
||||
let nested_child_dir = existing_dir_path.join("child1").join("child2");
|
||||
let err = create_dir(nested_child_dir).unwrap_err();
|
||||
assert_eq!(err.kind(), io::ErrorKind::NotFound);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_create_dir_all_fsyncd() {
|
||||
let dir = tempdir().unwrap();
|
||||
|
||||
let existing_dir_path = dir.path();
|
||||
create_dir_all(existing_dir_path).unwrap();
|
||||
|
||||
let child_dir = existing_dir_path.join("child");
|
||||
assert!(!child_dir.exists());
|
||||
create_dir_all(&child_dir).unwrap();
|
||||
assert!(child_dir.exists());
|
||||
|
||||
let nested_child_dir = existing_dir_path.join("child1").join("child2");
|
||||
assert!(!nested_child_dir.exists());
|
||||
create_dir_all(&nested_child_dir).unwrap();
|
||||
assert!(nested_child_dir.exists());
|
||||
|
||||
let file_path = existing_dir_path.join("file");
|
||||
std::fs::write(&file_path, b"").unwrap();
|
||||
|
||||
let err = create_dir_all(&file_path).unwrap_err();
|
||||
assert_eq!(err.kind(), io::ErrorKind::AlreadyExists);
|
||||
|
||||
let invalid_dir_path = file_path.join("folder");
|
||||
create_dir_all(&invalid_dir_path).unwrap_err();
|
||||
}
|
||||
}
|
||||
@@ -8,14 +8,15 @@ use lazy_static::lazy_static;
|
||||
use routerify::ext::RequestExt;
|
||||
use routerify::RequestInfo;
|
||||
use routerify::{Middleware, Router, RouterBuilder, RouterService};
|
||||
use zenith_metrics::{register_int_counter, IntCounter};
|
||||
use std::net::TcpListener;
|
||||
use zenith_metrics::{new_common_metric_name, register_int_counter, IntCounter};
|
||||
use zenith_metrics::{Encoder, TextEncoder};
|
||||
|
||||
use super::error::ApiError;
|
||||
|
||||
lazy_static! {
|
||||
static ref SERVE_METRICS_COUNT: IntCounter = register_int_counter!(
|
||||
"pageserver_serve_metrics_count",
|
||||
new_common_metric_name("serve_metrics_count"),
|
||||
"Number of metric requests made"
|
||||
)
|
||||
.expect("failed to define a metric");
|
||||
@@ -144,10 +145,9 @@ pub fn check_permission(req: &Request<Body>, tenantid: Option<ZTenantId>) -> Res
|
||||
|
||||
pub fn serve_thread_main(
|
||||
router_builder: RouterBuilder<hyper::Body, ApiError>,
|
||||
addr: String,
|
||||
listener: TcpListener,
|
||||
) -> anyhow::Result<()> {
|
||||
let addr = addr.parse()?;
|
||||
log::info!("Starting an http endpoint at {}", addr);
|
||||
log::info!("Starting a http endoint at {}", listener.local_addr()?);
|
||||
|
||||
// Create a Service from the router above to handle incoming requests.
|
||||
let service = RouterService::new(router_builder.build().map_err(|err| anyhow!(err))?).unwrap();
|
||||
@@ -159,7 +159,7 @@ pub fn serve_thread_main(
|
||||
|
||||
let _guard = runtime.enter();
|
||||
|
||||
let server = Server::bind(&addr).serve(service);
|
||||
let server = Server::from_tcp(listener)?.serve(service);
|
||||
|
||||
runtime.block_on(server)?;
|
||||
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
use anyhow::anyhow;
|
||||
use hyper::{header, Body, Response, StatusCode};
|
||||
use serde::Serialize;
|
||||
use serde_json;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use thiserror::Error;
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
@@ -45,7 +44,7 @@ impl ApiError {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct HttpErrorBody {
|
||||
pub msg: String,
|
||||
}
|
||||
@@ -56,10 +55,10 @@ impl HttpErrorBody {
|
||||
}
|
||||
|
||||
pub fn response_from_msg_and_status(msg: String, status: StatusCode) -> Response<Body> {
|
||||
HttpErrorBody { msg }.into_response(status)
|
||||
HttpErrorBody { msg }.to_response(status)
|
||||
}
|
||||
|
||||
pub fn into_response(&self, status: StatusCode) -> Response<Body> {
|
||||
pub fn to_response(&self, status: StatusCode) -> Response<Body> {
|
||||
Response::builder()
|
||||
.status(status)
|
||||
.header(header::CONTENT_TYPE, "application/json")
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
use bytes::Buf;
|
||||
use hyper::{header, Body, Request, Response, StatusCode};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json;
|
||||
|
||||
use super::error::ApiError;
|
||||
|
||||
|
||||
@@ -8,6 +8,9 @@ pub mod lsn;
|
||||
/// SeqWait allows waiting for a future sequence number to arrive
|
||||
pub mod seqwait;
|
||||
|
||||
/// append only ordered map implemented with a Vec
|
||||
pub mod vec_map;
|
||||
|
||||
// Async version of SeqWait. Currently unused.
|
||||
// pub mod seqwait_async;
|
||||
|
||||
@@ -18,6 +21,9 @@ pub mod pq_proto;
|
||||
// dealing with connstring parsing and handy access to it's parts
|
||||
pub mod connstring;
|
||||
|
||||
// helper functions for creating and fsyncing directories/trees
|
||||
pub mod crashsafe_dir;
|
||||
|
||||
// common authentication routines
|
||||
pub mod auth;
|
||||
|
||||
@@ -28,3 +34,9 @@ pub mod http;
|
||||
|
||||
// socket splitting utils
|
||||
pub mod sock_split;
|
||||
|
||||
// common log initialisation routine
|
||||
pub mod logging;
|
||||
|
||||
// Misc
|
||||
pub mod accum;
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user