mirror of
https://github.com/neondatabase/neon.git
synced 2026-01-31 01:00:36 +00:00
Compare commits
500 Commits
embedded_w
...
set_hints_
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
59ea3973a4 | ||
|
|
08bc808043 | ||
|
|
ba563ee93e | ||
|
|
194b33ac3b | ||
|
|
a190c0eb88 | ||
|
|
2b5405ac6e | ||
|
|
1d75c827a0 | ||
|
|
e1e43f13df | ||
|
|
b2e0490d5e | ||
|
|
1d3c86e17a | ||
|
|
e8c22488b9 | ||
|
|
9c1dbe3783 | ||
|
|
1365f8c703 | ||
|
|
df4ce15456 | ||
|
|
9ed4db273d | ||
|
|
21cf4a3e11 | ||
|
|
2c10224c9a | ||
|
|
c33faf98d1 | ||
|
|
95453bc4af | ||
|
|
3a37877edc | ||
|
|
2145ec5fe8 | ||
|
|
49d14cbde7 | ||
|
|
c3cbb56ff8 | ||
|
|
81479b0218 | ||
|
|
3c5452da88 | ||
|
|
59c19d6e18 | ||
|
|
8c07a36fda | ||
|
|
ddd2c83c64 | ||
|
|
212920e47e | ||
|
|
291c2c9a1b | ||
|
|
d7bebd8074 | ||
|
|
5ac3cb1c72 | ||
|
|
812160ba16 | ||
|
|
91d605f781 | ||
|
|
aae39ecf57 | ||
|
|
8b3a293bb0 | ||
|
|
989ab7e883 | ||
|
|
e9d2181e17 | ||
|
|
8a05d6dde0 | ||
|
|
b45d5368b0 | ||
|
|
625abf3c52 | ||
|
|
c0ace1efff | ||
|
|
03a09b7827 | ||
|
|
63d0a865f4 | ||
|
|
5ac4a27042 | ||
|
|
7c7e89e2ea | ||
|
|
561bf2c510 | ||
|
|
98f49671c1 | ||
|
|
78963ad104 | ||
|
|
27442c3daa | ||
|
|
e29bfa09b2 | ||
|
|
b949127b06 | ||
|
|
a3f3d46016 | ||
|
|
c5fc4e6905 | ||
|
|
9dfee8a3b5 | ||
|
|
263e03f4b8 | ||
|
|
074bd3bb12 | ||
|
|
e1d8f97b9e | ||
|
|
7e7b31a626 | ||
|
|
787806285d | ||
|
|
7474cfac08 | ||
|
|
6cbc08f1fb | ||
|
|
8d3450f4c6 | ||
|
|
4902d1daa8 | ||
|
|
40c79988a8 | ||
|
|
d265b4cdd3 | ||
|
|
beaa2cd0a2 | ||
|
|
c4450907e5 | ||
|
|
de9d5e0aa4 | ||
|
|
4046530160 | ||
|
|
5998744bcc | ||
|
|
250ae643a8 | ||
|
|
3edad463fb | ||
|
|
19fcea99da | ||
|
|
f2f02a8af0 | ||
|
|
b135723994 | ||
|
|
23b5249512 | ||
|
|
41fa02f82b | ||
|
|
81dd4bc41e | ||
|
|
ad8b5c3845 | ||
|
|
dcaa2126f1 | ||
|
|
b29ca232d6 | ||
|
|
8c62b11bd5 | ||
|
|
35b60d509f | ||
|
|
d989580c1c | ||
|
|
798160544c | ||
|
|
39bb6fb19c | ||
|
|
82725725fd | ||
|
|
1c3d51ed92 | ||
|
|
04a309f562 | ||
|
|
20e6cd7724 | ||
|
|
9fed5c8fb7 | ||
|
|
4bce65ff9a | ||
|
|
3319befc30 | ||
|
|
ddb7155bbe | ||
|
|
882f549236 | ||
|
|
8d2b61f4d1 | ||
|
|
11ebcb531f | ||
|
|
a389c2ed7f | ||
|
|
e35a5aa550 | ||
|
|
45f641cabb | ||
|
|
48f4a7b886 | ||
|
|
91f72fabc9 | ||
|
|
cbeb67067c | ||
|
|
921ec390bc | ||
|
|
f37cb21305 | ||
|
|
7ee8de3725 | ||
|
|
047a05efb2 | ||
|
|
0c4ab80eac | ||
|
|
2450f82de5 | ||
|
|
5eb1738e8b | ||
|
|
6e22a8f709 | ||
|
|
f8de71eab0 | ||
|
|
8517d9696d | ||
|
|
97f9021c88 | ||
|
|
0a92b31496 | ||
|
|
6c3726913f | ||
|
|
1bfade8adc | ||
|
|
4eebe22fbb | ||
|
|
20d5e757ca | ||
|
|
70cb399d59 | ||
|
|
ce5333656f | ||
|
|
5f0fd093d7 | ||
|
|
0ee2e16b17 | ||
|
|
b607f0fd8e | ||
|
|
c99a211b01 | ||
|
|
949ac54401 | ||
|
|
e406811375 | ||
|
|
590ace104a | ||
|
|
e475f82ff1 | ||
|
|
a368642790 | ||
|
|
8c7983797b | ||
|
|
5dd9a66f9e | ||
|
|
cc877f1980 | ||
|
|
a5d57ca10b | ||
|
|
3ca3394170 | ||
|
|
e59e0ae2dc | ||
|
|
ec07acfb12 | ||
|
|
fa04096733 | ||
|
|
754892402c | ||
|
|
02b9be488b | ||
|
|
cc3ac2b74c | ||
|
|
1dc2ae6968 | ||
|
|
04ae63a5c4 | ||
|
|
b77fade7b8 | ||
|
|
56565c0f58 | ||
|
|
ed634ec320 | ||
|
|
bcaa59c0b9 | ||
|
|
cb1b4a12a6 | ||
|
|
9ff122835f | ||
|
|
f0030ae003 | ||
|
|
acc0f41985 | ||
|
|
bd7d811921 | ||
|
|
14b6796915 | ||
|
|
3f4815efa2 | ||
|
|
ec03848d2f | ||
|
|
1e6267a35f | ||
|
|
47824c5fca | ||
|
|
767590bbd5 | ||
|
|
d210ba5fdb | ||
|
|
8b656bad5f | ||
|
|
97329d4906 | ||
|
|
6a3b9b1d46 | ||
|
|
c913404739 | ||
|
|
8e42af9b1d | ||
|
|
fe17188464 | ||
|
|
51b50f5cf5 | ||
|
|
9e3fe2b4d4 | ||
|
|
eb1618f2ed | ||
|
|
791312824d | ||
|
|
a17b2a4364 | ||
|
|
c4b2bf7ebd | ||
|
|
0723d49e0b | ||
|
|
ad79ca05e9 | ||
|
|
325dd41277 | ||
|
|
7c96c638aa | ||
|
|
9838c71a47 | ||
|
|
79d9314ba6 | ||
|
|
2b33894e7b | ||
|
|
a118557331 | ||
|
|
8ec234ba78 | ||
|
|
70926adaba | ||
|
|
560f088f05 | ||
|
|
aa404b60fe | ||
|
|
1b6d99db7c | ||
|
|
605b90c6c7 | ||
|
|
6f747893be | ||
|
|
dab34c3dd6 | ||
|
|
bf45bef284 | ||
|
|
d55095ab21 | ||
|
|
a048e0c7c1 | ||
|
|
9436c4161f | ||
|
|
e74b06d999 | ||
|
|
f6705b7a7d | ||
|
|
386403dcd1 | ||
|
|
46e613f423 | ||
|
|
56da87cadc | ||
|
|
dcabe694ee | ||
|
|
842419b91f | ||
|
|
3cded20662 | ||
|
|
eb0a56eb22 | ||
|
|
8a541147e2 | ||
|
|
ed0fcfa9b7 | ||
|
|
c5509b05de | ||
|
|
befefe8d84 | ||
|
|
ad92b66eed | ||
|
|
d119f2bcce | ||
|
|
038dc6c629 | ||
|
|
bfc27bee5e | ||
|
|
19528de03e | ||
|
|
3e69c41a47 | ||
|
|
97681acfcf | ||
|
|
baf8800b96 | ||
|
|
577af8a459 | ||
|
|
75e717fe86 | ||
|
|
4987d5ee1f | ||
|
|
462b8801d2 | ||
|
|
2712eaee15 | ||
|
|
96f4ddd243 | ||
|
|
3386ce6f35 | ||
|
|
9c856ecf43 | ||
|
|
d4329887b3 | ||
|
|
ced338fd20 | ||
|
|
44c35722d8 | ||
|
|
ec44f4b299 | ||
|
|
a31bba19b0 | ||
|
|
0dd46061a8 | ||
|
|
cb2ddf06d0 | ||
|
|
eb7388e3e8 | ||
|
|
b314311f49 | ||
|
|
183a3022a5 | ||
|
|
226204094a | ||
|
|
4f1b22a2c8 | ||
|
|
257ade0688 | ||
|
|
43ece6e2a2 | ||
|
|
f923464b93 | ||
|
|
11efafb05b | ||
|
|
7c5532303e | ||
|
|
39c1d4896c | ||
|
|
d2d5a01522 | ||
|
|
36d6c401bf | ||
|
|
37b0236e9a | ||
|
|
cc169a6896 | ||
|
|
77366b7a76 | ||
|
|
9af04b3b8d | ||
|
|
9b7b311815 | ||
|
|
cb4f5e911c | ||
|
|
6403f1745c | ||
|
|
6d7942ece4 | ||
|
|
09b2c66cf6 | ||
|
|
5c70b52f4a | ||
|
|
19602dc88a | ||
|
|
2b66049b21 | ||
|
|
c3011359ab | ||
|
|
da117f431d | ||
|
|
78aad4fe3f | ||
|
|
1c5be12515 | ||
|
|
96c7594d29 | ||
|
|
7a3794ef18 | ||
|
|
bf56ea8c43 | ||
|
|
bb1446e33a | ||
|
|
0969574d48 | ||
|
|
05a681be2c | ||
|
|
507c1fbbac | ||
|
|
b2f51026aa | ||
|
|
2b0193e6bf | ||
|
|
c31a5e2c8f | ||
|
|
d85d67a6f1 | ||
|
|
9b8e82c6cf | ||
|
|
434374ebb4 | ||
|
|
a7ae552851 | ||
|
|
8b5a061c8e | ||
|
|
8147aa7e93 | ||
|
|
d18cc8a3a8 | ||
|
|
762e9859d6 | ||
|
|
924261f7db | ||
|
|
063429aade | ||
|
|
445e88f50b | ||
|
|
47694ea4f5 | ||
|
|
3364a8d442 | ||
|
|
f2243d7459 | ||
|
|
244fcffc50 | ||
|
|
8c3c9c3394 | ||
|
|
00ce635da9 | ||
|
|
7d5f7462c1 | ||
|
|
bed75f800f | ||
|
|
0e423d481e | ||
|
|
0c74f6fa4e | ||
|
|
fc01fae9b4 | ||
|
|
558a2214bc | ||
|
|
31815bccc0 | ||
|
|
e12cab7c17 | ||
|
|
002cd8ed5b | ||
|
|
588a030803 | ||
|
|
0b9bbdc4ec | ||
|
|
922715a923 | ||
|
|
ac60b68d50 | ||
|
|
1aceea1bdd | ||
|
|
e0cc4dee4f | ||
|
|
fd20101e5c | ||
|
|
5a73a6fdfc | ||
|
|
4608b1ec70 | ||
|
|
ccb2eea7fd | ||
|
|
6b615cbde1 | ||
|
|
adc0e04205 | ||
|
|
78e3edf2e9 | ||
|
|
4b6563a55d | ||
|
|
34f4207501 | ||
|
|
d1d2d5ce69 | ||
|
|
60ea26316e | ||
|
|
cffc979058 | ||
|
|
1af6607fc3 | ||
|
|
cb6e2d9ddb | ||
|
|
1ccf82f932 | ||
|
|
b1a424dfa9 | ||
|
|
263acef1cc | ||
|
|
7c73afc1af | ||
|
|
e8f0a9bb80 | ||
|
|
6f9175ca2d | ||
|
|
69fa10ff86 | ||
|
|
d5fe515363 | ||
|
|
6a9c036ac1 | ||
|
|
6f9a582973 | ||
|
|
a0e23e6f3f | ||
|
|
84508d4f68 | ||
|
|
fb230dcf32 | ||
|
|
4aabc9a682 | ||
|
|
0fe81b2993 | ||
|
|
eb1f1a347d | ||
|
|
064aa44a06 | ||
|
|
d6ee61b5cf | ||
|
|
4b78a16b82 | ||
|
|
c093ee5e4b | ||
|
|
7685372cae | ||
|
|
ce54133ec4 | ||
|
|
610e14a7fc | ||
|
|
35a1c3d521 | ||
|
|
22b7e74c83 | ||
|
|
d95e1da742 | ||
|
|
40d047c146 | ||
|
|
42f3dd47d2 | ||
|
|
c2b2ab974c | ||
|
|
6ad6e5bd84 | ||
|
|
d534aeb9e1 | ||
|
|
d45839879c | ||
|
|
1f6ca23db6 | ||
|
|
2127a65e27 | ||
|
|
ecf2d181c4 | ||
|
|
c1bfa32771 | ||
|
|
8465738aa5 | ||
|
|
87d7ce816d | ||
|
|
f38c2e620e | ||
|
|
86056abd0e | ||
|
|
2bf2dd1d88 | ||
|
|
874d82fd4c | ||
|
|
3645133700 | ||
|
|
20b6279beb | ||
|
|
06f96f9600 | ||
|
|
b5f60f3874 | ||
|
|
0ec56cd21f | ||
|
|
600e1a0080 | ||
|
|
9c94a34ae7 | ||
|
|
9c0ac251df | ||
|
|
872ed24408 | ||
|
|
2f25d17e11 | ||
|
|
8faa6fa392 | ||
|
|
4d5a41301d | ||
|
|
4c35b22626 | ||
|
|
9fe3b73e13 | ||
|
|
e0146304e6 | ||
|
|
fbb04c592a | ||
|
|
8f43d7637c | ||
|
|
cf30303d8f | ||
|
|
1ec157653e | ||
|
|
858ca3a4ce | ||
|
|
d744ddee7c | ||
|
|
3296b7d770 | ||
|
|
2148ae78ab | ||
|
|
78dcf2207e | ||
|
|
74b78608d9 | ||
|
|
a11558b84f | ||
|
|
513696a485 | ||
|
|
cedc2eb5c2 | ||
|
|
e3e593f571 | ||
|
|
c12e393e74 | ||
|
|
d59cb2ca7a | ||
|
|
58f34a8d76 | ||
|
|
31462f4b71 | ||
|
|
538f903861 | ||
|
|
e6a7241c3a | ||
|
|
709b778904 | ||
|
|
aa8debf4e8 | ||
|
|
1912546e52 | ||
|
|
a6178c135f | ||
|
|
2ff16da6af | ||
|
|
21ea70c8f5 | ||
|
|
2b2d24433a | ||
|
|
66bced0f36 | ||
|
|
9ba7bc2695 | ||
|
|
8624bddc79 | ||
|
|
45b1495f37 | ||
|
|
23be5021f8 | ||
|
|
f954d5c501 | ||
|
|
ab2f0ad1a8 | ||
|
|
52fbcbde0a | ||
|
|
e602807476 | ||
|
|
398d522d88 | ||
|
|
746f667311 | ||
|
|
53ea6702bd | ||
|
|
952424b78c | ||
|
|
d737c40eec | ||
|
|
532918e13d | ||
|
|
b266c28345 | ||
|
|
04dc698d4b | ||
|
|
6b11b4250e | ||
|
|
15d1c1f8bf | ||
|
|
9ece1e863d | ||
|
|
2870150365 | ||
|
|
7b281900f9 | ||
|
|
97992226d3 | ||
|
|
270356ec38 | ||
|
|
c2db828481 | ||
|
|
71e93faed7 | ||
|
|
54d52e07db | ||
|
|
4dccdb33ab | ||
|
|
38c4b6f02f | ||
|
|
6ff3f1b9fd | ||
|
|
4c5e23d014 | ||
|
|
99d80aba52 | ||
|
|
2f2dff4c8d | ||
|
|
22e7fcbf2d | ||
|
|
372617a4f5 | ||
|
|
49d1921a28 | ||
|
|
d8e509d29e | ||
|
|
d5bfe84d9e | ||
|
|
8fff26ad49 | ||
|
|
5f4e32f505 | ||
|
|
fb71c85a79 | ||
|
|
ff76226a35 | ||
|
|
6e748147b6 | ||
|
|
e5df42feef | ||
|
|
73647e5715 | ||
|
|
95db33f3f9 | ||
|
|
bace19ffbe | ||
|
|
60d66267a9 | ||
|
|
294320e6a8 | ||
|
|
28b4d9abb3 | ||
|
|
8d8bc304c1 | ||
|
|
4788248e11 | ||
|
|
0cbb3798da | ||
|
|
36c12247b9 | ||
|
|
1767208563 | ||
|
|
d25656797c | ||
|
|
6c825dcbaa | ||
|
|
4b46693c81 | ||
|
|
8952066ecb | ||
|
|
d26b76fe7c | ||
|
|
df5a55c445 | ||
|
|
e5e5c3e067 | ||
|
|
b7575582b8 | ||
|
|
77fd24b950 | ||
|
|
61af9bb889 | ||
|
|
a68f60415b | ||
|
|
e7ca580922 | ||
|
|
33d126ecbe | ||
|
|
15db0d1d6f | ||
|
|
29f122009a | ||
|
|
bf0a0cb55d | ||
|
|
0fe5abadf5 | ||
|
|
1591f058c6 | ||
|
|
efa4ecaa7c | ||
|
|
8e57c2e413 | ||
|
|
4dd63821bd | ||
|
|
eeec1a3dcb | ||
|
|
b484b896b6 | ||
|
|
e5413be5fa | ||
|
|
b9c0d22045 | ||
|
|
2e0d45d092 | ||
|
|
86932c20eb | ||
|
|
f5b45a172c | ||
|
|
e6a0987182 | ||
|
|
aa64391265 | ||
|
|
aac913f9dc | ||
|
|
4e2e5bb4e6 | ||
|
|
3e15a5c325 | ||
|
|
ce646ea845 | ||
|
|
effcabb590 | ||
|
|
a08dfb1c2c | ||
|
|
a3818dee58 | ||
|
|
219cbe2d9c | ||
|
|
129f85f652 | ||
|
|
790f1b05c6 | ||
|
|
37cd662ab2 | ||
|
|
277a4d4582 | ||
|
|
1cdeba9db7 | ||
|
|
7d104e5660 | ||
|
|
49530145d8 | ||
|
|
da96965897 | ||
|
|
3762b53986 | ||
|
|
9ad99152b8 |
312
.circleci/config.yml
Normal file
312
.circleci/config.yml
Normal file
@@ -0,0 +1,312 @@
|
||||
version: 2.1
|
||||
|
||||
orbs:
|
||||
python: circleci/python@1.4.0
|
||||
|
||||
executors:
|
||||
zenith-build-executor:
|
||||
resource_class: xlarge
|
||||
docker:
|
||||
- image: cimg/rust:1.52.1
|
||||
|
||||
jobs:
|
||||
check-codestyle:
|
||||
executor: zenith-build-executor
|
||||
steps:
|
||||
- checkout
|
||||
|
||||
- run:
|
||||
name: rustfmt
|
||||
when: always
|
||||
command: |
|
||||
cargo fmt --all -- --check
|
||||
|
||||
# A job to build postgres
|
||||
build-postgres:
|
||||
executor: zenith-build-executor
|
||||
steps:
|
||||
# Checkout the git repo (circleci doesn't have a flag to enable submodules here)
|
||||
- checkout
|
||||
|
||||
# Grab the postgres git revision to build a cache key.
|
||||
# Note this works even though the submodule hasn't been checkout out yet.
|
||||
- run:
|
||||
name: Get postgres cache key
|
||||
command: |
|
||||
git rev-parse HEAD:vendor/postgres > /tmp/cache-key-postgres
|
||||
|
||||
- restore_cache:
|
||||
name: Restore postgres cache
|
||||
keys:
|
||||
# Restore ONLY if the rev key matches exactly
|
||||
- v03-postgres-cache-{{ checksum "/tmp/cache-key-postgres" }}
|
||||
|
||||
# FIXME We could cache our own docker container, instead of installing packages every time.
|
||||
- run:
|
||||
name: apt install dependencies
|
||||
command: |
|
||||
if [ ! -e tmp_install/bin/postgres ]; then
|
||||
sudo apt update
|
||||
sudo apt install build-essential libreadline-dev zlib1g-dev flex bison libseccomp-dev
|
||||
fi
|
||||
|
||||
# Build postgres if the restore_cache didn't find a build.
|
||||
# `make` can't figure out whether the cache is valid, since
|
||||
# it only compares file timestamps.
|
||||
- run:
|
||||
name: build postgres
|
||||
command: |
|
||||
if [ ! -e tmp_install/bin/postgres ]; then
|
||||
# "depth 1" saves some time by not cloning the whole repo
|
||||
git submodule update --init --depth 1
|
||||
make postgres
|
||||
fi
|
||||
|
||||
- save_cache:
|
||||
name: Save postgres cache
|
||||
key: v03-postgres-cache-{{ checksum "/tmp/cache-key-postgres" }}
|
||||
paths:
|
||||
- tmp_install
|
||||
|
||||
# A job to build zenith rust code
|
||||
build-zenith:
|
||||
executor: zenith-build-executor
|
||||
parameters:
|
||||
build_type:
|
||||
type: enum
|
||||
enum: ["debug", "release"]
|
||||
steps:
|
||||
- run:
|
||||
name: apt install dependencies
|
||||
command: |
|
||||
sudo apt update
|
||||
sudo apt install libssl-dev clang
|
||||
|
||||
# Checkout the git repo (without submodules)
|
||||
- checkout
|
||||
|
||||
# Grab the postgres git revision to build a cache key.
|
||||
# Note this works even though the submodule hasn't been checkout out yet.
|
||||
- run:
|
||||
name: Get postgres cache key
|
||||
command: |
|
||||
git rev-parse HEAD:vendor/postgres > /tmp/cache-key-postgres
|
||||
|
||||
- restore_cache:
|
||||
name: Restore postgres cache
|
||||
keys:
|
||||
# Restore ONLY if the rev key matches exactly
|
||||
- v03-postgres-cache-{{ checksum "/tmp/cache-key-postgres" }}
|
||||
|
||||
- restore_cache:
|
||||
name: Restore rust cache
|
||||
keys:
|
||||
# Require an exact match. While an out of date cache might speed up the build,
|
||||
# there's no way to clean out old packages, so the cache grows every time something
|
||||
# changes.
|
||||
- v03-rust-cache-deps-<< parameters.build_type >>-{{ checksum "Cargo.lock" }}
|
||||
|
||||
# Build the rust code, including test binaries
|
||||
- run:
|
||||
name: Rust build << parameters.build_type >>
|
||||
command: |
|
||||
export CARGO_INCREMENTAL=0
|
||||
BUILD_TYPE="<< parameters.build_type >>"
|
||||
if [[ $BUILD_TYPE == "debug" ]]; then
|
||||
echo "Build in debug mode"
|
||||
cargo build --bins --tests
|
||||
elif [[ $BUILD_TYPE == "release" ]]; then
|
||||
echo "Build in release mode"
|
||||
cargo build --release --bins --tests
|
||||
fi
|
||||
|
||||
- save_cache:
|
||||
name: Save rust cache
|
||||
key: v03-rust-cache-deps-<< parameters.build_type >>-{{ checksum "Cargo.lock" }}
|
||||
paths:
|
||||
- ~/.cargo/registry
|
||||
- ~/.cargo/git
|
||||
- target
|
||||
|
||||
# Run rust unit tests
|
||||
- run: cargo test
|
||||
|
||||
# Install the rust binaries, for use by test jobs
|
||||
# `--locked` is required; otherwise, `cargo install` will ignore Cargo.lock.
|
||||
# FIXME: this is a really silly way to install; maybe we should just output
|
||||
# a tarball as an artifact? Or a .deb package?
|
||||
- run:
|
||||
name: cargo install
|
||||
command: |
|
||||
export CARGO_INCREMENTAL=0
|
||||
BUILD_TYPE="<< parameters.build_type >>"
|
||||
if [[ $BUILD_TYPE == "debug" ]]; then
|
||||
echo "Install debug mode"
|
||||
CARGO_FLAGS="--debug"
|
||||
elif [[ $BUILD_TYPE == "release" ]]; then
|
||||
echo "Install release mode"
|
||||
# The default is release mode; there is no --release flag.
|
||||
CARGO_FLAGS=""
|
||||
fi
|
||||
cargo install $CARGO_FLAGS --locked --root /tmp/zenith --path pageserver
|
||||
cargo install $CARGO_FLAGS --locked --root /tmp/zenith --path walkeeper
|
||||
cargo install $CARGO_FLAGS --locked --root /tmp/zenith --path zenith
|
||||
|
||||
# Install the postgres binaries, for use by test jobs
|
||||
# FIXME: this is a silly way to do "install"; maybe just output a standard
|
||||
# postgres package, whatever the favored form is (tarball? .deb package?)
|
||||
# Note that pg_regress needs some build artifacts that probably aren't
|
||||
# in the usual package...?
|
||||
- run:
|
||||
name: postgres install
|
||||
command: |
|
||||
cp -a tmp_install /tmp/zenith/pg_install
|
||||
|
||||
# Save the rust output binaries for other jobs in this workflow.
|
||||
- persist_to_workspace:
|
||||
root: /tmp/zenith
|
||||
paths:
|
||||
- "*"
|
||||
|
||||
run-pytest:
|
||||
#description: "Run pytest"
|
||||
executor: python/default
|
||||
parameters:
|
||||
# pytest args to specify the tests to run.
|
||||
#
|
||||
# This can be a test file name, e.g. 'test_pgbench.py, or a subdirectory,
|
||||
# or '-k foobar' to run tests containing string 'foobar'. See pytest man page
|
||||
# section SPECIFYING TESTS / SELECTING TESTS for details.
|
||||
#
|
||||
# Select the type of Rust build. Must be "release" or "debug".
|
||||
build_type:
|
||||
type: string
|
||||
default: "debug"
|
||||
# This parameter is required, to prevent the mistake of running all tests in one job.
|
||||
test_selection:
|
||||
type: string
|
||||
default: ""
|
||||
# Arbitrary parameters to pytest. For example "-s" to prevent capturing stdout/stderr
|
||||
extra_params:
|
||||
type: string
|
||||
default: ""
|
||||
needs_postgres_source:
|
||||
type: boolean
|
||||
default: false
|
||||
steps:
|
||||
- attach_workspace:
|
||||
at: /tmp/zenith
|
||||
- checkout
|
||||
- when:
|
||||
condition: << parameters.needs_postgres_source >>
|
||||
steps:
|
||||
- run: git submodule update --init --depth 1
|
||||
- run:
|
||||
name: Install pipenv & deps
|
||||
working_directory: test_runner
|
||||
command: |
|
||||
pip install pipenv
|
||||
pipenv install
|
||||
- run:
|
||||
name: Run pytest
|
||||
working_directory: test_runner
|
||||
environment:
|
||||
- ZENITH_BIN: /tmp/zenith/bin
|
||||
- POSTGRES_DISTRIB_DIR: /tmp/zenith/pg_install
|
||||
- TEST_OUTPUT: /tmp/test_output
|
||||
command: |
|
||||
TEST_SELECTION="<< parameters.test_selection >>"
|
||||
EXTRA_PARAMS="<< parameters.extra_params >>"
|
||||
if [ -z "$TEST_SELECTION" ]; then
|
||||
echo "test_selection must be set"
|
||||
exit 1
|
||||
fi
|
||||
# Run the tests.
|
||||
#
|
||||
# The junit.xml file allows CircleCI to display more fine-grained test information
|
||||
# in its "Tests" tab in the results page.
|
||||
# -s prevents pytest from capturing output, which helps to see
|
||||
# what's going on if the test hangs
|
||||
# --verbose prints name of each test (helpful when there are
|
||||
# multiple tests in one file)
|
||||
# -rA prints summary in the end
|
||||
pipenv run pytest --junitxml=$TEST_OUTPUT/junit.xml --tb=short -s --verbose -rA $TEST_SELECTION $EXTRA_PARAMS
|
||||
- run:
|
||||
# CircleCI artifacts are preserved one file at a time, so skipping
|
||||
# this step isn't a good idea. If you want to extract the
|
||||
# pageserver state, perhaps a tarball would be a better idea.
|
||||
name: Delete all data but logs
|
||||
when: always
|
||||
command: |
|
||||
du -sh /tmp/test_output/*
|
||||
find /tmp/test_output -type f ! -name "pg.log" ! -name "pageserver.log" ! -name "wal_acceptor.log" ! -name "regression.diffs" ! -name "junit.xml" -delete
|
||||
du -sh /tmp/test_output/*
|
||||
- store_artifacts:
|
||||
path: /tmp/test_output
|
||||
# The store_test_results step tells CircleCI where to find the junit.xml file.
|
||||
- store_test_results:
|
||||
path: /tmp/test_output
|
||||
|
||||
# Build zenithdb/zenith:latest image and push it to Docker hub
|
||||
docker-image:
|
||||
docker:
|
||||
- image: cimg/base:2021.04
|
||||
steps:
|
||||
- checkout
|
||||
- setup_remote_docker:
|
||||
docker_layer_caching: true
|
||||
- run:
|
||||
name: Init postgres submodule
|
||||
command: git submodule update --init --depth 1
|
||||
- run:
|
||||
name: Build and push Docker image
|
||||
command: |
|
||||
echo $DOCKER_PWD | docker login -u $DOCKER_LOGIN --password-stdin
|
||||
docker build -t zenithdb/zenith:latest . && docker push zenithdb/zenith:latest
|
||||
|
||||
workflows:
|
||||
build_and_test:
|
||||
jobs:
|
||||
- check-codestyle
|
||||
- build-postgres
|
||||
- build-zenith:
|
||||
name: build-zenith-<< matrix.build_type >>
|
||||
matrix:
|
||||
parameters:
|
||||
build_type: ["debug", "release"]
|
||||
requires:
|
||||
- build-postgres
|
||||
- run-pytest:
|
||||
name: pg_regress tests << matrix.build_type >>
|
||||
matrix:
|
||||
parameters:
|
||||
build_type: ["debug", "release"]
|
||||
test_selection: batch_pg_regress
|
||||
needs_postgres_source: true
|
||||
requires:
|
||||
- build-zenith-<< matrix.build_type >>
|
||||
- run-pytest:
|
||||
name: other tests << matrix.build_type >>
|
||||
matrix:
|
||||
parameters:
|
||||
build_type: ["debug", "release"]
|
||||
test_selection: batch_others
|
||||
requires:
|
||||
- build-zenith-<< matrix.build_type >>
|
||||
- run-pytest:
|
||||
name: benchmarks
|
||||
build_type: release
|
||||
test_selection: performance
|
||||
requires:
|
||||
- build-zenith-release
|
||||
- docker-image:
|
||||
# Context gives an ability to login
|
||||
context: Docker Hub
|
||||
# Build image only for commits to main
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- main
|
||||
requires:
|
||||
- pg_regress tests release
|
||||
- other tests release
|
||||
13
.dockerignore
Normal file
13
.dockerignore
Normal file
@@ -0,0 +1,13 @@
|
||||
**/.git/
|
||||
**/__pycache__
|
||||
**/.pytest_cache
|
||||
|
||||
/target
|
||||
/tmp_check
|
||||
/tmp_install
|
||||
/tmp_check_cli
|
||||
/test_output
|
||||
/.vscode
|
||||
/.zenith
|
||||
/integration_tests/.zenith
|
||||
/Dockerfile
|
||||
2
.github/workflows/testing.yml
vendored
2
.github/workflows/testing.yml
vendored
@@ -35,7 +35,7 @@ jobs:
|
||||
- name: Install postgres dependencies
|
||||
run: |
|
||||
sudo apt update
|
||||
sudo apt install build-essential libreadline-dev zlib1g-dev flex bison libxml2-dev libcurl4-openssl-dev
|
||||
sudo apt install build-essential libreadline-dev zlib1g-dev flex bison libseccomp-dev
|
||||
|
||||
- name: Set pg revision for caching
|
||||
id: pg_ver
|
||||
|
||||
5
.gitignore
vendored
5
.gitignore
vendored
@@ -2,5 +2,8 @@
|
||||
/tmp_check
|
||||
/tmp_install
|
||||
/tmp_check_cli
|
||||
__pycache__/
|
||||
test_output/
|
||||
.vscode
|
||||
.zenith
|
||||
/.zenith
|
||||
/integration_tests/.zenith
|
||||
|
||||
2
.gitmodules
vendored
2
.gitmodules
vendored
@@ -1,4 +1,4 @@
|
||||
[submodule "vendor/postgres"]
|
||||
path = vendor/postgres
|
||||
url = https://github.com/libzenith/postgres
|
||||
url = https://github.com/zenithdb/postgres
|
||||
branch = main
|
||||
|
||||
31
CONTRIBUTING.md
Normal file
31
CONTRIBUTING.md
Normal file
@@ -0,0 +1,31 @@
|
||||
# How to contribute
|
||||
|
||||
Howdy! Usual good software engineering practices apply. Write
|
||||
tests. Write comments. Follow standard Rust coding practices where
|
||||
possible. Use 'cargo fmt' and 'clippy' to tidy up formatting.
|
||||
|
||||
There are soft spots in the code, which could use cleanup,
|
||||
refactoring, additional comments, and so forth. Let's try to raise the
|
||||
bar, and clean things up as we go. Try to leave code in a better shape
|
||||
than it was before.
|
||||
|
||||
## Submitting changes
|
||||
|
||||
1. Make a PR for every change.
|
||||
|
||||
Even seemingly trivial patches can break things in surprising ways.
|
||||
Use of common sense is OK. If you're only fixing a typo in a comment,
|
||||
it's probably fine to just push it. But if in doubt, open a PR.
|
||||
|
||||
2. Get at least one +1 on your PR before you push.
|
||||
|
||||
For simple patches, it will only take a minute for someone to review
|
||||
it.
|
||||
|
||||
3. Always keep the CI green.
|
||||
|
||||
Do not push, if the CI failed on your PR. Even if you think it's not
|
||||
your patch's fault. Help to fix the root cause if something else has
|
||||
broken the CI, before pushing.
|
||||
|
||||
*Happy Hacking!*
|
||||
20
COPYRIGHT
Normal file
20
COPYRIGHT
Normal file
@@ -0,0 +1,20 @@
|
||||
This software is licensed under the Apache 2.0 License:
|
||||
|
||||
----------------------------------------------------------------------------
|
||||
Copyright 2021 Zenith Labs, Inc
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
----------------------------------------------------------------------------
|
||||
|
||||
The PostgreSQL submodule in vendor/postgres is licensed under the
|
||||
PostgreSQL license. See vendor/postgres/COPYRIGHT.
|
||||
1419
Cargo.lock
generated
1419
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
15
Cargo.toml
15
Cargo.toml
@@ -1,10 +1,17 @@
|
||||
[workspace]
|
||||
members = [
|
||||
"integration_tests",
|
||||
"pageserver",
|
||||
"walkeeper",
|
||||
"zenith",
|
||||
"control_plane",
|
||||
"pageserver",
|
||||
"postgres_ffi",
|
||||
"proxy",
|
||||
"walkeeper",
|
||||
"workspace_hack",
|
||||
"zenith",
|
||||
"zenith_metrics",
|
||||
"zenith_utils",
|
||||
]
|
||||
|
||||
[profile.release]
|
||||
# This is useful for profiling and, to some extent, debug.
|
||||
# Besides, debug info should not affect the performance.
|
||||
debug = true
|
||||
|
||||
78
Dockerfile
Normal file
78
Dockerfile
Normal file
@@ -0,0 +1,78 @@
|
||||
#
|
||||
# Docker image for console integration testing.
|
||||
#
|
||||
|
||||
#
|
||||
# Build Postgres separately --- this layer will be rebuilt only if one of
|
||||
# mentioned paths will get any changes.
|
||||
#
|
||||
FROM zenithdb/build:buster AS pg-build
|
||||
WORKDIR /zenith
|
||||
COPY ./vendor/postgres vendor/postgres
|
||||
COPY ./Makefile Makefile
|
||||
RUN make -j $(getconf _NPROCESSORS_ONLN) -s postgres
|
||||
|
||||
#
|
||||
# Calculate cargo dependencies.
|
||||
# This will always run, but only generate recipe.json with list of dependencies without
|
||||
# installing them.
|
||||
#
|
||||
FROM zenithdb/build:buster AS cargo-deps-inspect
|
||||
WORKDIR /zenith
|
||||
COPY . .
|
||||
RUN cargo chef prepare --recipe-path /zenith/recipe.json
|
||||
|
||||
#
|
||||
# Build cargo dependencies.
|
||||
# This temp cantainner should be rebuilt only if recipe.json was changed.
|
||||
#
|
||||
FROM zenithdb/build:buster AS deps-build
|
||||
WORKDIR /zenith
|
||||
COPY --from=pg-build /zenith/tmp_install/include/postgresql/server tmp_install/include/postgresql/server
|
||||
COPY --from=cargo-deps-inspect /usr/local/cargo/bin/cargo-chef /usr/local/cargo/bin/
|
||||
COPY --from=cargo-deps-inspect /zenith/recipe.json recipe.json
|
||||
RUN ROCKSDB_LIB_DIR=/usr/lib/ cargo chef cook --release --recipe-path recipe.json
|
||||
|
||||
#
|
||||
# Build zenith binaries
|
||||
#
|
||||
FROM zenithdb/build:buster AS build
|
||||
WORKDIR /zenith
|
||||
COPY . .
|
||||
# Copy cached dependencies
|
||||
COPY --from=pg-build /zenith/tmp_install/include/postgresql/server tmp_install/include/postgresql/server
|
||||
COPY --from=deps-build /zenith/target target
|
||||
COPY --from=deps-build /usr/local/cargo/ /usr/local/cargo/
|
||||
RUN cargo build --release
|
||||
|
||||
#
|
||||
# Copy binaries to resulting image.
|
||||
#
|
||||
FROM debian:buster-slim
|
||||
WORKDIR /data
|
||||
|
||||
RUN apt-get update && apt-get -yq install librocksdb-dev libseccomp-dev openssl && \
|
||||
mkdir zenith_install
|
||||
|
||||
COPY --from=build /zenith/target/release/pageserver /usr/local/bin
|
||||
COPY --from=build /zenith/target/release/wal_acceptor /usr/local/bin
|
||||
COPY --from=build /zenith/target/release/proxy /usr/local/bin
|
||||
COPY --from=pg-build /zenith/tmp_install postgres_install
|
||||
COPY docker-entrypoint.sh /docker-entrypoint.sh
|
||||
|
||||
# Remove build artifacts (~ 500 MB)
|
||||
RUN rm -rf postgres_install/build && \
|
||||
# 'Install' Postgres binaries locally
|
||||
cp -r postgres_install/* /usr/local/ && \
|
||||
# Prepare an archive of Postgres binaries (should be around 11 MB)
|
||||
# and keep it inside container for an ease of deploy pipeline.
|
||||
cd postgres_install && tar -czf /data/postgres_install.tar.gz . && cd .. && \
|
||||
rm -rf postgres_install
|
||||
|
||||
RUN useradd -d /data zenith && chown -R zenith:zenith /data
|
||||
|
||||
VOLUME ["/data"]
|
||||
USER zenith
|
||||
EXPOSE 6400
|
||||
ENTRYPOINT ["/docker-entrypoint.sh"]
|
||||
CMD ["pageserver"]
|
||||
95
Dockerfile.alpine
Normal file
95
Dockerfile.alpine
Normal file
@@ -0,0 +1,95 @@
|
||||
#
|
||||
# Docker image for console integration testing.
|
||||
#
|
||||
# We may also reuse it in CI to unify installation process and as a general binaries building
|
||||
# tool for production servers.
|
||||
#
|
||||
# Dynamic linking is used for librocksdb and libstdc++ bacause librocksdb-sys calls
|
||||
# bindgen with "dynamic" feature flag. This also prevents usage of dockerhub alpine-rust
|
||||
# images which are statically linked and have guards against any dlopen. I would rather
|
||||
# prefer all static binaries so we may change the way librocksdb-sys builds or wait until
|
||||
# we will have our own storage and drop rockdb dependency.
|
||||
#
|
||||
# Cargo-chef is used to separate dependencies building from main binaries building. This
|
||||
# way `docker build` will download and install dependencies only of there are changes to
|
||||
# out Cargo.toml files.
|
||||
#
|
||||
|
||||
|
||||
#
|
||||
# build postgres separately -- this layer will be rebuilt only if one of
|
||||
# mentioned paths will get any changes
|
||||
#
|
||||
FROM alpine:3.13 as pg-build
|
||||
RUN apk add --update clang llvm compiler-rt compiler-rt-static lld musl-dev binutils \
|
||||
make bison flex readline-dev zlib-dev perl linux-headers libseccomp-dev
|
||||
WORKDIR zenith
|
||||
COPY ./vendor/postgres vendor/postgres
|
||||
COPY ./Makefile Makefile
|
||||
# Build using clang and lld
|
||||
RUN CC='clang' LD='lld' CFLAGS='-fuse-ld=lld --rtlib=compiler-rt' make postgres -j4
|
||||
|
||||
#
|
||||
# Calculate cargo dependencies.
|
||||
# This will always run, but only generate recipe.json with list of dependencies without
|
||||
# installing them.
|
||||
#
|
||||
FROM alpine:20210212 as cargo-deps-inspect
|
||||
RUN apk add --update rust cargo
|
||||
RUN cargo install cargo-chef
|
||||
WORKDIR zenith
|
||||
COPY . .
|
||||
RUN cargo chef prepare --recipe-path recipe.json
|
||||
|
||||
#
|
||||
# Build cargo dependencies.
|
||||
# This temp cantainner would be build only if recipe.json was changed.
|
||||
#
|
||||
FROM alpine:20210212 as deps-build
|
||||
RUN apk add --update rust cargo openssl-dev clang build-base
|
||||
# rust-rocksdb can be built against system-wide rocksdb -- that saves about
|
||||
# 10 minutes during build. Rocksdb apk package is in testing now, but use it
|
||||
# anyway. In case of any troubles we can download and build rocksdb here manually
|
||||
# (to cache it as a docker layer).
|
||||
RUN apk --no-cache --update --repository https://dl-cdn.alpinelinux.org/alpine/edge/testing add rocksdb-dev
|
||||
WORKDIR zenith
|
||||
COPY --from=pg-build /zenith/tmp_install/include/postgresql/server tmp_install/include/postgresql/server
|
||||
COPY --from=cargo-deps-inspect /root/.cargo/bin/cargo-chef /root/.cargo/bin/
|
||||
COPY --from=cargo-deps-inspect /zenith/recipe.json recipe.json
|
||||
RUN ROCKSDB_LIB_DIR=/usr/lib/ cargo chef cook --release --recipe-path recipe.json
|
||||
|
||||
#
|
||||
# Build zenith binaries
|
||||
#
|
||||
FROM alpine:20210212 as build
|
||||
RUN apk add --update rust cargo openssl-dev clang build-base
|
||||
RUN apk --no-cache --update --repository https://dl-cdn.alpinelinux.org/alpine/edge/testing add rocksdb-dev
|
||||
WORKDIR zenith
|
||||
COPY . .
|
||||
# Copy cached dependencies
|
||||
COPY --from=pg-build /zenith/tmp_install/include/postgresql/server tmp_install/include/postgresql/server
|
||||
COPY --from=deps-build /zenith/target target
|
||||
COPY --from=deps-build /root/.cargo /root/.cargo
|
||||
RUN cargo build --release
|
||||
|
||||
#
|
||||
# Copy binaries to resulting image.
|
||||
# build-base hare to provide libstdc++ (it will also bring gcc, but leave it this way until we figure
|
||||
# out how to statically link rocksdb or avoid it at all).
|
||||
#
|
||||
FROM alpine:3.13
|
||||
RUN apk add --update openssl build-base libseccomp-dev
|
||||
RUN apk --no-cache --update --repository https://dl-cdn.alpinelinux.org/alpine/edge/testing add rocksdb
|
||||
COPY --from=build /zenith/target/release/pageserver /usr/local/bin
|
||||
COPY --from=build /zenith/target/release/wal_acceptor /usr/local/bin
|
||||
COPY --from=build /zenith/target/release/proxy /usr/local/bin
|
||||
COPY --from=pg-build /zenith/tmp_install /usr/local
|
||||
COPY docker-entrypoint.sh /docker-entrypoint.sh
|
||||
|
||||
RUN addgroup zenith && adduser -h /data -D -G zenith zenith
|
||||
VOLUME ["/data"]
|
||||
WORKDIR /data
|
||||
USER zenith
|
||||
EXPOSE 6400
|
||||
ENTRYPOINT ["/docker-entrypoint.sh"]
|
||||
CMD ["pageserver"]
|
||||
15
Dockerfile.build
Normal file
15
Dockerfile.build
Normal file
@@ -0,0 +1,15 @@
|
||||
#
|
||||
# Image with all the required dependencies to build https://github.com/zenithdb/zenith
|
||||
# and Postgres from https://github.com/zenithdb/postgres
|
||||
# Also includes some rust development and build tools.
|
||||
#
|
||||
FROM rust:slim-buster
|
||||
WORKDIR /zenith
|
||||
|
||||
# Install postgres and zenith build dependencies
|
||||
# clang is for rocksdb
|
||||
RUN apt-get update && apt-get -yq install automake libtool build-essential bison flex libreadline-dev zlib1g-dev libxml2-dev \
|
||||
libseccomp-dev pkg-config libssl-dev librocksdb-dev clang
|
||||
|
||||
# Install rust tools
|
||||
RUN rustup component add clippy && cargo install cargo-chef cargo-audit
|
||||
202
LICENSE
Normal file
202
LICENSE
Normal file
@@ -0,0 +1,202 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
35
Makefile
35
Makefile
@@ -1,3 +1,11 @@
|
||||
# Seccomp BPF is only available for Linux
|
||||
UNAME_S := $(shell uname -s)
|
||||
ifeq ($(UNAME_S),Linux)
|
||||
SECCOMP = --with-libseccomp
|
||||
else
|
||||
SECCOMP =
|
||||
endif
|
||||
|
||||
#
|
||||
# Top level Makefile to build Zenith and PostgreSQL
|
||||
#
|
||||
@@ -13,6 +21,7 @@ all: zenith postgres
|
||||
### Zenith Rust bits
|
||||
#
|
||||
# The 'postgres_ffi' depends on the Postgres headers.
|
||||
.PHONY: zenith
|
||||
zenith: postgres-headers
|
||||
cargo build
|
||||
|
||||
@@ -21,21 +30,33 @@ tmp_install/build/config.status:
|
||||
+@echo "Configuring postgres build"
|
||||
mkdir -p tmp_install/build
|
||||
(cd tmp_install/build && \
|
||||
../../vendor/postgres/configure CFLAGS='-O0' --enable-debug --enable-cassert \
|
||||
--enable-depend --with-libxml --prefix=$(abspath tmp_install) > configure.log)
|
||||
../../vendor/postgres/configure CFLAGS='-O0 -g3 $(CFLAGS)' \
|
||||
--enable-cassert \
|
||||
--enable-debug \
|
||||
--enable-depend \
|
||||
$(SECCOMP) \
|
||||
--prefix=$(abspath tmp_install) > configure.log)
|
||||
|
||||
# nicer alias for running 'configure'
|
||||
.PHONY: postgres-configure
|
||||
postgres-configure: tmp_install/build/config.status
|
||||
|
||||
# Install the PostgreSQL header files into tmp_install/include
|
||||
.PHONY: postgres-headers
|
||||
postgres-headers: postgres-configure
|
||||
+@echo "Installing PostgreSQL headers"
|
||||
$(MAKE) -C tmp_install/build/src/include MAKELEVEL=0 install
|
||||
|
||||
# Compile and install PostgreSQL
|
||||
|
||||
# Compile and install PostgreSQL and contrib/zenith
|
||||
.PHONY: postgres
|
||||
postgres: postgres-configure
|
||||
+@echo "Compiling PostgreSQL"
|
||||
$(MAKE) -C tmp_install/build MAKELEVEL=0 install
|
||||
+@echo "Compiling contrib/zenith"
|
||||
$(MAKE) -C tmp_install/build/contrib/zenith install
|
||||
+@echo "Compiling contrib/zenith_test_utils"
|
||||
$(MAKE) -C tmp_install/build/contrib/zenith_test_utils install
|
||||
|
||||
postgres-clean:
|
||||
$(MAKE) -C tmp_install/build MAKELEVEL=0 clean
|
||||
@@ -50,4 +71,10 @@ distclean:
|
||||
rm -rf tmp_install
|
||||
cargo clean
|
||||
|
||||
.PHONY: postgres-configure postgres postgres-headers zenith
|
||||
.PHONY: fmt
|
||||
fmt:
|
||||
./pre-commit.py --fix-inplace
|
||||
|
||||
.PHONY: setup-pre-commit-hook
|
||||
setup-pre-commit-hook:
|
||||
ln -s -f ../../pre-commit.py .git/hooks/pre-commit
|
||||
|
||||
1
Pipfile.lock
generated
Symbolic link
1
Pipfile.lock
generated
Symbolic link
@@ -0,0 +1 @@
|
||||
./test_runner/Pipfile.lock
|
||||
150
README.md
150
README.md
@@ -2,90 +2,132 @@
|
||||
|
||||
Zenith substitutes PostgreSQL storage layer and redistributes data across a cluster of nodes
|
||||
|
||||
## Architecture overview
|
||||
|
||||
A Zenith installation consists of Compute nodes and Storage engine.
|
||||
|
||||
Compute nodes are stateles PostgreSQL nodes, backed by zenith storage.
|
||||
|
||||
Zenith storage engine consists of two major components:
|
||||
- Pageserver. Scalable storage backend for compute nodes.
|
||||
- WAL service. The service that receives WAL from compute node and ensures that it is stored durably.
|
||||
|
||||
Pageserver consists of:
|
||||
- Repository - Zenith storage implementation.
|
||||
- WAL receiver - service that receives WAL from WAL service and stores it in the repository.
|
||||
- Page service - service that communicates with compute nodes and responds with pages from the repository.
|
||||
- WAL redo - service that builds pages from base images and WAL records on Page service request.
|
||||
|
||||
## Running local installation
|
||||
|
||||
1. Build zenith and patched postgres
|
||||
```sh
|
||||
git clone --recursive https://github.com/libzenith/zenith.git
|
||||
cd zenith
|
||||
make
|
||||
1. Install build dependencies and other useful packages
|
||||
|
||||
On Ubuntu or Debian this set of packages should be sufficient to build the code:
|
||||
```text
|
||||
apt install build-essential libtool libreadline-dev zlib1g-dev flex bison libseccomp-dev \
|
||||
libssl-dev clang
|
||||
```
|
||||
|
||||
2. Start pageserver and postggres on top of it (should be called from repo root):
|
||||
[Rust] 1.52 or later is also required.
|
||||
|
||||
To run the `psql` client, install the `postgresql-client` package or modify `PATH` and `LD_LIBRARY_PATH` to include `tmp_install/bin` and `tmp_install/lib`, respectively.
|
||||
|
||||
To run the integration tests (not required to use the code), install
|
||||
Python (3.6 or higher), and install python3 packages with `pipenv` using `pipenv install` in the project directory.
|
||||
|
||||
2. Build zenith and patched postgres
|
||||
```sh
|
||||
# Create ~/.zenith with proper paths to binaries and data
|
||||
git clone --recursive https://github.com/zenithdb/zenith.git
|
||||
cd zenith
|
||||
make -j5
|
||||
```
|
||||
|
||||
3. Start pageserver and postgres on top of it (should be called from repo root):
|
||||
```sh
|
||||
# Create repository in .zenith with proper paths to binaries and data
|
||||
# Later that would be responsibility of a package install script
|
||||
>./target/debug/zenith init
|
||||
> ./target/debug/zenith init
|
||||
pageserver init succeeded
|
||||
|
||||
# start pageserver
|
||||
> ./target/debug/zenith pageserver start
|
||||
Starting pageserver at '127.0.0.1:64000'
|
||||
> ./target/debug/zenith start
|
||||
Starting pageserver at '127.0.0.1:64000' in .zenith
|
||||
Pageserver started
|
||||
|
||||
# create and configure postgres data dir
|
||||
> ./target/debug/zenith pg create
|
||||
Creating new postgres: path=/Users/user/code/zenith/tmp_check_cli/compute/pg1 port=55432
|
||||
Database initialized
|
||||
# start postgres on top on the pageserver
|
||||
> ./target/debug/zenith pg start main
|
||||
Starting postgres node at 'host=127.0.0.1 port=55432 user=stas'
|
||||
waiting for server to start.... done
|
||||
|
||||
# start it
|
||||
> ./target/debug/zenith pg start pg1
|
||||
|
||||
# look up status and connection info
|
||||
> ./target/debug/zenith pg list
|
||||
NODE ADDRESS STATUS
|
||||
pg1 127.0.0.1:55432 running
|
||||
# check list of running postgres instances
|
||||
> ./target/debug/zenith pg list
|
||||
BRANCH ADDRESS LSN STATUS
|
||||
main 127.0.0.1:55432 0/1609610 running
|
||||
```
|
||||
|
||||
3. Now it is possible to connect to postgres and run some queries:
|
||||
```
|
||||
> psql -p55432 -h 127.0.0.1 postgres
|
||||
4. Now it is possible to connect to postgres and run some queries:
|
||||
```text
|
||||
> psql -p55432 -h 127.0.0.1 -U zenith_admin postgres
|
||||
postgres=# CREATE TABLE t(key int primary key, value text);
|
||||
CREATE TABLE
|
||||
postgres=# insert into t values(1,1);
|
||||
INSERT 0 1
|
||||
postgres=# select * from t;
|
||||
key | value
|
||||
key | value
|
||||
-----+-------
|
||||
1 | 1
|
||||
(1 row)
|
||||
```
|
||||
|
||||
5. And create branches and run postgres on them:
|
||||
```sh
|
||||
# create branch named migration_check
|
||||
> ./target/debug/zenith branch migration_check main
|
||||
Created branch 'migration_check' at 0/1609610
|
||||
|
||||
# check branches tree
|
||||
> ./target/debug/zenith branch
|
||||
main
|
||||
┗━ @0/1609610: migration_check
|
||||
|
||||
# start postgres on that branch
|
||||
> ./target/debug/zenith pg start migration_check
|
||||
Starting postgres node at 'host=127.0.0.1 port=55433 user=stas'
|
||||
waiting for server to start.... done
|
||||
|
||||
# this new postgres instance will have all the data from 'main' postgres,
|
||||
# but all modifications would not affect data in original postgres
|
||||
> psql -p55433 -h 127.0.0.1 -U zenith_admin postgres
|
||||
postgres=# select * from t;
|
||||
key | value
|
||||
-----+-------
|
||||
1 | 1
|
||||
(1 row)
|
||||
|
||||
postgres=# insert into t values(2,2);
|
||||
INSERT 0 1
|
||||
```
|
||||
|
||||
## Running tests
|
||||
|
||||
```sh
|
||||
git clone --recursive https://github.com/libzenith/zenith.git
|
||||
git clone --recursive https://github.com/zenithdb/zenith.git
|
||||
make # builds also postgres and installs it to ./tmp_install
|
||||
cargo test -- --test-threads=1
|
||||
cd test_runner
|
||||
pytest
|
||||
```
|
||||
|
||||
## Source tree layout
|
||||
## Documentation
|
||||
|
||||
/walkeeper:
|
||||
Now we use README files to cover design ideas and overall architecture for each module and `rustdoc` style documentation comments. See also [/docs/](/docs/) a top-level overview of all available markdown documentation.
|
||||
|
||||
WAL safekeeper. Written in Rust.
|
||||
|
||||
/pageserver:
|
||||
|
||||
Page Server. Written in Rust.
|
||||
|
||||
Depends on the modified 'postgres' binary for WAL redo.
|
||||
|
||||
/integration_tests:
|
||||
|
||||
Tests with different combinations of a Postgres compute node, WAL safekeeper and Page Server.
|
||||
|
||||
/mgmt-console:
|
||||
|
||||
Web UI to launch (modified) Postgres servers, using S3 as the backing store. Written in Python.
|
||||
This is somewhat outdated, as it doesn't use the WAL safekeeper or Page Servers.
|
||||
|
||||
/vendor/postgres:
|
||||
|
||||
PostgreSQL source tree, with the modifications needed for Zenith.
|
||||
|
||||
/vendor/postgres/src/bin/safekeeper:
|
||||
|
||||
Extension (safekeeper_proxy) that runs in the compute node, and connects to the WAL safekeepers
|
||||
and streams the WAL
|
||||
- [/docs/sourcetree.md](/docs/sourcetree.md) contains overview of source tree layout.
|
||||
|
||||
To view your `rustdoc` documentation in a browser, try running `cargo doc --no-deps --open`
|
||||
|
||||
## Join the development
|
||||
|
||||
- Read `CONTRIBUTING.md` to learn about project code style and practices.
|
||||
- Use glossary in [/docs/glossary.md](/docs/glossary.md)
|
||||
- To get familiar with a source tree layout, use [/docs/sourcetree.md](/docs/sourcetree.md).
|
||||
- To learn more about PostgreSQL internals, check http://www.interdb.jp/pg/index.html
|
||||
|
||||
@@ -9,19 +9,21 @@ edition = "2018"
|
||||
[dependencies]
|
||||
rand = "0.8.3"
|
||||
tar = "0.4.33"
|
||||
postgres = { git = "https://github.com/zenithdb/rust-postgres.git", rev="a0d067b66447951d1276a53fb09886539c3fa094" }
|
||||
tokio-postgres = { git = "https://github.com/zenithdb/rust-postgres.git", rev="a0d067b66447951d1276a53fb09886539c3fa094" }
|
||||
|
||||
serde = ""
|
||||
serde_derive = ""
|
||||
toml = ""
|
||||
lazy_static = ""
|
||||
postgres = { git = "https://github.com/zenithdb/rust-postgres.git", rev="9eb0dbfbeb6a6c1b79099b9f7ae4a8c021877858" }
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1"
|
||||
toml = "0.5"
|
||||
lazy_static = "1.4"
|
||||
regex = "1"
|
||||
anyhow = "1.0"
|
||||
hex = "0.4.3"
|
||||
bytes = "1.0.1"
|
||||
fs_extra = "1.2.0"
|
||||
nix = "0.20"
|
||||
url = "2.2.2"
|
||||
hex = { version = "0.4.3", features = ["serde"] }
|
||||
reqwest = { version = "0.11", features = ["blocking", "json"] }
|
||||
|
||||
pageserver = { path = "../pageserver" }
|
||||
walkeeper = { path = "../walkeeper" }
|
||||
postgres_ffi = { path = "../postgres_ffi" }
|
||||
zenith_utils = { path = "../zenith_utils" }
|
||||
workspace_hack = { path = "../workspace_hack" }
|
||||
|
||||
@@ -1,23 +1,26 @@
|
||||
use std::fs::{self, File, OpenOptions};
|
||||
use std::io::{Read, Write};
|
||||
use std::io::Write;
|
||||
use std::net::SocketAddr;
|
||||
use std::net::TcpStream;
|
||||
use std::os::unix::fs::PermissionsExt;
|
||||
use std::path::Path;
|
||||
use std::process::{Command, ExitStatus};
|
||||
use std::process::Command;
|
||||
use std::str::FromStr;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use std::{collections::BTreeMap, path::PathBuf};
|
||||
|
||||
use anyhow::{Context, Result};
|
||||
use lazy_static::lazy_static;
|
||||
use postgres_ffi::pg_constants;
|
||||
use regex::Regex;
|
||||
|
||||
use postgres::{Client, NoTls};
|
||||
use zenith_utils::connstring::connection_host_port;
|
||||
use zenith_utils::lsn::Lsn;
|
||||
use zenith_utils::postgres_backend::AuthType;
|
||||
use zenith_utils::zid::ZTenantId;
|
||||
use zenith_utils::zid::ZTimelineId;
|
||||
|
||||
use crate::local_env::LocalEnv;
|
||||
use crate::storage::{PageServerNode, WalProposerNode};
|
||||
use pageserver::{zenith_repo_dir, ZTimelineId};
|
||||
use crate::storage::PageServerNode;
|
||||
|
||||
//
|
||||
// ComputeControlPlane
|
||||
@@ -25,27 +28,36 @@ use pageserver::{zenith_repo_dir, ZTimelineId};
|
||||
pub struct ComputeControlPlane {
|
||||
base_port: u16,
|
||||
pageserver: Arc<PageServerNode>,
|
||||
pub nodes: BTreeMap<String, Arc<PostgresNode>>,
|
||||
pub nodes: BTreeMap<(ZTenantId, String), Arc<PostgresNode>>,
|
||||
env: LocalEnv,
|
||||
}
|
||||
|
||||
impl ComputeControlPlane {
|
||||
// Load current nodes with ports from data directories on disk
|
||||
// Directory structure has the following layout:
|
||||
// pgdatadirs
|
||||
// |- tenants
|
||||
// | |- <tenant_id>
|
||||
// | | |- <branch name>
|
||||
pub fn load(env: LocalEnv) -> Result<ComputeControlPlane> {
|
||||
// TODO: since pageserver do not have config file yet we believe here that
|
||||
// it is running on default port. Change that when pageserver will have config.
|
||||
let pageserver = Arc::new(PageServerNode::from_env(&env));
|
||||
|
||||
let pgdatadirspath = env.repo_path.join("pgdatadirs");
|
||||
let nodes: Result<BTreeMap<_, _>> = fs::read_dir(&pgdatadirspath)
|
||||
let mut nodes = BTreeMap::default();
|
||||
let pgdatadirspath = &env.pg_data_dirs_path();
|
||||
|
||||
for tenant_dir in fs::read_dir(&pgdatadirspath)
|
||||
.with_context(|| format!("failed to list {}", pgdatadirspath.display()))?
|
||||
.into_iter()
|
||||
.map(|f| {
|
||||
PostgresNode::from_dir_entry(f?, &env, &pageserver)
|
||||
.map(|node| (node.name.clone(), Arc::new(node)))
|
||||
})
|
||||
.collect();
|
||||
let nodes = nodes?;
|
||||
{
|
||||
let tenant_dir = tenant_dir?;
|
||||
for timeline_dir in fs::read_dir(tenant_dir.path())
|
||||
.with_context(|| format!("failed to list {}", tenant_dir.path().display()))?
|
||||
{
|
||||
let node = PostgresNode::from_dir_entry(timeline_dir?, &env, &pageserver)?;
|
||||
nodes.insert((node.tenantid, node.name.clone()), Arc::new(node));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(ComputeControlPlane {
|
||||
base_port: 55431,
|
||||
@@ -73,71 +85,32 @@ impl ComputeControlPlane {
|
||||
}
|
||||
}
|
||||
|
||||
/// Connect to a page server, get base backup, and untar it to initialize a
|
||||
/// new data directory
|
||||
pub fn new_from_page_server(
|
||||
pub fn new_node(
|
||||
&mut self,
|
||||
is_test: bool,
|
||||
timelineid: ZTimelineId,
|
||||
tenantid: ZTenantId,
|
||||
branch_name: &str,
|
||||
) -> Result<Arc<PostgresNode>> {
|
||||
let node_id = self.nodes.len() as u32 + 1;
|
||||
let timeline_id = self
|
||||
.pageserver
|
||||
.branch_get_by_name(&tenantid, branch_name)?
|
||||
.timeline_id;
|
||||
|
||||
let node = Arc::new(PostgresNode {
|
||||
name: format!("pg{}", node_id),
|
||||
name: branch_name.to_owned(),
|
||||
address: SocketAddr::new("127.0.0.1".parse().unwrap(), self.get_port()),
|
||||
env: self.env.clone(),
|
||||
pageserver: Arc::clone(&self.pageserver),
|
||||
is_test,
|
||||
timelineid,
|
||||
is_test: false,
|
||||
timelineid: timeline_id,
|
||||
tenantid,
|
||||
uses_wal_proposer: false,
|
||||
});
|
||||
|
||||
node.init_from_page_server()?;
|
||||
self.nodes.insert(node.name.clone(), Arc::clone(&node));
|
||||
node.create_pgdata()?;
|
||||
node.setup_pg_conf(self.env.auth_type)?;
|
||||
|
||||
Ok(node)
|
||||
}
|
||||
|
||||
pub fn new_test_node(&mut self, timelineid: ZTimelineId) -> Arc<PostgresNode> {
|
||||
let node = self.new_from_page_server(true, timelineid);
|
||||
assert!(node.is_ok());
|
||||
let node = node.unwrap();
|
||||
|
||||
// Configure the node to stream WAL directly to the pageserver
|
||||
node.append_conf(
|
||||
"postgresql.conf",
|
||||
format!(
|
||||
"callmemaybe_connstring = '{}'\n", // FIXME escaping
|
||||
node.connstr()
|
||||
)
|
||||
.as_str(),
|
||||
);
|
||||
|
||||
node
|
||||
}
|
||||
|
||||
pub fn new_test_master_node(&mut self, timelineid: ZTimelineId) -> Arc<PostgresNode> {
|
||||
let node = self.new_from_page_server(true, timelineid).unwrap();
|
||||
|
||||
node.append_conf(
|
||||
"postgresql.conf",
|
||||
"synchronous_standby_names = 'safekeeper_proxy'\n",
|
||||
);
|
||||
|
||||
node
|
||||
}
|
||||
|
||||
pub fn new_node(&mut self, timelineid: ZTimelineId) -> Result<Arc<PostgresNode>> {
|
||||
let node = self.new_from_page_server(false, timelineid).unwrap();
|
||||
|
||||
// Configure the node to stream WAL directly to the pageserver
|
||||
node.append_conf(
|
||||
"postgresql.conf",
|
||||
format!(
|
||||
"callmemaybe_connstring = '{}'\n", // FIXME escaping
|
||||
node.connstr()
|
||||
)
|
||||
.as_str(),
|
||||
);
|
||||
self.nodes
|
||||
.insert((tenantid, node.name.clone()), Arc::clone(&node));
|
||||
|
||||
Ok(node)
|
||||
}
|
||||
@@ -145,13 +118,16 @@ impl ComputeControlPlane {
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct PostgresNode {
|
||||
pub address: SocketAddr,
|
||||
name: String,
|
||||
pub env: LocalEnv,
|
||||
pageserver: Arc<PageServerNode>,
|
||||
is_test: bool,
|
||||
timelineid: ZTimelineId,
|
||||
pub timelineid: ZTimelineId,
|
||||
pub tenantid: ZTenantId,
|
||||
uses_wal_proposer: bool,
|
||||
}
|
||||
|
||||
impl PostgresNode {
|
||||
@@ -169,6 +145,10 @@ impl PostgresNode {
|
||||
|
||||
lazy_static! {
|
||||
static ref CONF_PORT_RE: Regex = Regex::new(r"(?m)^\s*port\s*=\s*(\d+)\s*$").unwrap();
|
||||
static ref CONF_TIMELINE_RE: Regex =
|
||||
Regex::new(r"(?m)^\s*zenith.zenith_timeline\s*=\s*'(\w+)'\s*$").unwrap();
|
||||
static ref CONF_TENANT_RE: Regex =
|
||||
Regex::new(r"(?m)^\s*zenith.zenith_tenant\s*=\s*'(\w+)'\s*$").unwrap();
|
||||
}
|
||||
|
||||
// parse data directory name
|
||||
@@ -184,6 +164,7 @@ impl PostgresNode {
|
||||
)
|
||||
})?;
|
||||
|
||||
// parse port
|
||||
let err_msg = format!(
|
||||
"failed to find port definition in config file {}",
|
||||
cfg_path.to_str().unwrap()
|
||||
@@ -199,11 +180,39 @@ impl PostgresNode {
|
||||
.parse()
|
||||
.with_context(|| err_msg)?;
|
||||
|
||||
// FIXME: What timeline is this server on? Would have to parse the postgresql.conf
|
||||
// file for that, too. It's currently not needed for anything, but it would be
|
||||
// nice to list the timeline in "zenith pg list"
|
||||
let timelineid_buf = [0u8; 16];
|
||||
let timelineid = ZTimelineId::from(timelineid_buf);
|
||||
// parse timeline
|
||||
let err_msg = format!(
|
||||
"failed to find timeline definition in config file {}",
|
||||
cfg_path.to_str().unwrap()
|
||||
);
|
||||
let timelineid: ZTimelineId = CONF_TIMELINE_RE
|
||||
.captures(config.as_str())
|
||||
.ok_or_else(|| anyhow::Error::msg(err_msg.clone() + " 1"))?
|
||||
.iter()
|
||||
.last()
|
||||
.ok_or_else(|| anyhow::Error::msg(err_msg.clone() + " 2"))?
|
||||
.ok_or_else(|| anyhow::Error::msg(err_msg.clone() + " 3"))?
|
||||
.as_str()
|
||||
.parse()
|
||||
.with_context(|| err_msg)?;
|
||||
|
||||
// parse tenant
|
||||
let err_msg = format!(
|
||||
"failed to find tenant definition in config file {}",
|
||||
cfg_path.to_str().unwrap()
|
||||
);
|
||||
let tenantid = CONF_TENANT_RE
|
||||
.captures(config.as_str())
|
||||
.ok_or_else(|| anyhow::Error::msg(err_msg.clone() + " 1"))?
|
||||
.iter()
|
||||
.last()
|
||||
.ok_or_else(|| anyhow::Error::msg(err_msg.clone() + " 2"))?
|
||||
.ok_or_else(|| anyhow::Error::msg(err_msg.clone() + " 3"))?
|
||||
.as_str()
|
||||
.parse()
|
||||
.with_context(|| err_msg)?;
|
||||
|
||||
let uses_wal_proposer = config.contains("wal_acceptors");
|
||||
|
||||
// ok now
|
||||
Ok(PostgresNode {
|
||||
@@ -213,106 +222,178 @@ impl PostgresNode {
|
||||
pageserver: Arc::clone(pageserver),
|
||||
is_test: false,
|
||||
timelineid,
|
||||
tenantid,
|
||||
uses_wal_proposer,
|
||||
})
|
||||
}
|
||||
|
||||
// Connect to a page server, get base backup, and untar it to initialize a
|
||||
// new data directory
|
||||
pub fn init_from_page_server(&self) -> Result<()> {
|
||||
let pgdata = self.pgdata();
|
||||
fn sync_walkeepers(&self) -> Result<Lsn> {
|
||||
let pg_path = self.env.pg_bin_dir().join("postgres");
|
||||
let sync_output = Command::new(pg_path)
|
||||
.arg("--sync-safekeepers")
|
||||
.env_clear()
|
||||
.env("LD_LIBRARY_PATH", self.env.pg_lib_dir().to_str().unwrap())
|
||||
.env("DYLD_LIBRARY_PATH", self.env.pg_lib_dir().to_str().unwrap())
|
||||
.env("PGDATA", self.pgdata().to_str().unwrap())
|
||||
.output()
|
||||
.with_context(|| "sync-walkeepers failed")?;
|
||||
|
||||
if !sync_output.status.success() {
|
||||
anyhow::bail!(
|
||||
"sync-walkeepers failed: '{}'",
|
||||
String::from_utf8_lossy(&sync_output.stderr)
|
||||
);
|
||||
}
|
||||
|
||||
let lsn = Lsn::from_str(std::str::from_utf8(&sync_output.stdout)?.trim())?;
|
||||
println!("Walkeepers synced on {}", lsn);
|
||||
Ok(lsn)
|
||||
}
|
||||
|
||||
/// Get basebackup from the pageserver as a tar archive and extract it
|
||||
/// to the `self.pgdata()` directory.
|
||||
fn do_basebackup(&self, lsn: Option<Lsn>) -> Result<()> {
|
||||
println!(
|
||||
"Extracting base backup to create postgres instance: path={} port={}",
|
||||
pgdata.display(),
|
||||
self.pgdata().display(),
|
||||
self.address.port()
|
||||
);
|
||||
|
||||
// initialize data directory
|
||||
if self.is_test {
|
||||
fs::remove_dir_all(&pgdata).ok();
|
||||
}
|
||||
let sql = if let Some(lsn) = lsn {
|
||||
format!("basebackup {} {} {}", self.tenantid, self.timelineid, lsn)
|
||||
} else {
|
||||
format!("basebackup {} {}", self.tenantid, self.timelineid)
|
||||
};
|
||||
|
||||
let sql = format!("basebackup {}", self.timelineid);
|
||||
let mut client = self
|
||||
.pageserver
|
||||
.page_server_psql_client()
|
||||
.with_context(|| "connecting to page server failed")?;
|
||||
|
||||
fs::create_dir_all(&pgdata)
|
||||
.with_context(|| format!("could not create data directory {}", pgdata.display()))?;
|
||||
fs::set_permissions(pgdata.as_path(), fs::Permissions::from_mode(0o700)).with_context(
|
||||
|| {
|
||||
format!(
|
||||
"could not set permissions in data directory {}",
|
||||
pgdata.display()
|
||||
)
|
||||
},
|
||||
)?;
|
||||
|
||||
// FIXME: The compute node should be able to stream the WAL it needs from the WAL safekeepers or archive.
|
||||
// But that's not implemented yet. For now, 'pg_wal' is included in the base backup tarball that
|
||||
// we receive from the Page Server, so we don't need to create the empty 'pg_wal' directory here.
|
||||
//fs::create_dir_all(pgdata.join("pg_wal"))?;
|
||||
|
||||
let mut copyreader = client
|
||||
let copyreader = client
|
||||
.copy_out(sql.as_str())
|
||||
.with_context(|| "page server 'basebackup' command failed")?;
|
||||
|
||||
// FIXME: Currently, we slurp the whole tarball into memory, and then extract it,
|
||||
// but we really should do this:
|
||||
//let mut ar = tar::Archive::new(copyreader);
|
||||
let mut buf = vec![];
|
||||
copyreader
|
||||
.read_to_end(&mut buf)
|
||||
.with_context(|| "reading base backup from page server failed")?;
|
||||
let mut ar = tar::Archive::new(buf.as_slice());
|
||||
ar.unpack(&pgdata)
|
||||
// Read the archive directly from the `CopyOutReader`
|
||||
tar::Archive::new(copyreader)
|
||||
.unpack(&self.pgdata())
|
||||
.with_context(|| "extracting page backup failed")?;
|
||||
|
||||
// listen for selected port
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn create_pgdata(&self) -> Result<()> {
|
||||
fs::create_dir_all(&self.pgdata()).with_context(|| {
|
||||
format!(
|
||||
"could not create data directory {}",
|
||||
self.pgdata().display()
|
||||
)
|
||||
})?;
|
||||
fs::set_permissions(self.pgdata().as_path(), fs::Permissions::from_mode(0o700))
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"could not set permissions in data directory {}",
|
||||
self.pgdata().display()
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
// Connect to a page server, get base backup, and untar it to initialize a
|
||||
// new data directory
|
||||
fn setup_pg_conf(&self, auth_type: AuthType) -> Result<()> {
|
||||
File::create(self.pgdata().join("postgresql.conf").to_str().unwrap())?;
|
||||
|
||||
// wal_log_hints is mandatory when running against pageserver (see gh issue#192)
|
||||
// TODO: is it possible to check wal_log_hints at pageserver side via XLOG_PARAMETER_CHANGE?
|
||||
self.append_conf(
|
||||
"postgresql.conf",
|
||||
&format!(
|
||||
"max_wal_senders = 10\n\
|
||||
wal_log_hints = on\n\
|
||||
max_replication_slots = 10\n\
|
||||
hot_standby = on\n\
|
||||
shared_buffers = 1MB\n\
|
||||
fsync = off\n\
|
||||
fsync = off\n\
|
||||
max_connections = 100\n\
|
||||
wal_sender_timeout = 0\n\
|
||||
wal_sender_timeout = 0\n\
|
||||
wal_level = replica\n\
|
||||
listen_addresses = '{address}'\n\
|
||||
port = {port}\n",
|
||||
address = self.address.ip(),
|
||||
port = self.address.port()
|
||||
),
|
||||
);
|
||||
)?;
|
||||
|
||||
// Never clean up old WAL. TODO: We should use a replication
|
||||
// slot or something proper, to prevent the compute node
|
||||
// from removing WAL that hasn't been streamed to the safekeepr or
|
||||
// page server yet. But this will do for now.
|
||||
self.append_conf("postgresql.conf", "wal_keep_size='10TB'\n");
|
||||
// from removing WAL that hasn't been streamed to the safekeeper or
|
||||
// page server yet. (gh issue #349)
|
||||
self.append_conf("postgresql.conf", "wal_keep_size='10TB'\n")?;
|
||||
|
||||
// Connect it to the page server.
|
||||
// set up authentication
|
||||
let password = if let AuthType::ZenithJWT = auth_type {
|
||||
"$ZENITH_AUTH_TOKEN"
|
||||
} else {
|
||||
""
|
||||
};
|
||||
|
||||
// Configure that node to take pages from pageserver
|
||||
let (host, port) = connection_host_port(&self.pageserver.pg_connection_config);
|
||||
self.append_conf(
|
||||
"postgresql.conf",
|
||||
&format!(
|
||||
"page_server_connstring = 'host={} port={}'\n\
|
||||
zenith_timeline='{}'\n",
|
||||
self.pageserver.address().ip(),
|
||||
self.pageserver.address().port(),
|
||||
self.timelineid
|
||||
),
|
||||
);
|
||||
format!(
|
||||
concat!(
|
||||
"shared_preload_libraries = zenith\n",
|
||||
// $ZENITH_AUTH_TOKEN will be replaced with value from environment variable during compute pg startup
|
||||
// it is done this way because otherwise user will be able to retrieve the value using SHOW command or pg_settings
|
||||
"zenith.page_server_connstring = 'host={} port={} password={}'\n",
|
||||
"zenith.zenith_timeline='{}'\n",
|
||||
"zenith.zenith_tenant='{}'\n",
|
||||
),
|
||||
host, port, password, self.timelineid, self.tenantid,
|
||||
)
|
||||
.as_str(),
|
||||
)?;
|
||||
|
||||
// Configure the node to stream WAL directly to the pageserver
|
||||
self.append_conf(
|
||||
"postgresql.conf",
|
||||
format!(
|
||||
concat!(
|
||||
"synchronous_standby_names = 'pageserver'\n", // TODO: add a new function arg?
|
||||
"zenith.callmemaybe_connstring = '{}'\n", // FIXME escaping
|
||||
),
|
||||
self.connstr(),
|
||||
)
|
||||
.as_str(),
|
||||
)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn pgdata(&self) -> PathBuf {
|
||||
self.env.repo_path.join("pgdatadirs").join(&self.name)
|
||||
fn load_basebackup(&self) -> Result<()> {
|
||||
let lsn = if self.uses_wal_proposer {
|
||||
// LSN WAL_SEGMENT_SIZE means that it is bootstrap and we need to download just
|
||||
// latest data from the pageserver. That is a bit clumsy but whole bootstrap
|
||||
// procedure evolves quite actively right now, so let's think about it again
|
||||
// when things would be more stable (TODO).
|
||||
let lsn = self.sync_walkeepers()?;
|
||||
if lsn == Lsn(pg_constants::WAL_SEGMENT_SIZE as u64) {
|
||||
None
|
||||
} else {
|
||||
Some(lsn)
|
||||
}
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
self.do_basebackup(lsn)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn pgdata(&self) -> PathBuf {
|
||||
self.env.pg_data_dir(&self.tenantid, &self.name)
|
||||
}
|
||||
|
||||
pub fn status(&self) -> &str {
|
||||
@@ -328,61 +409,97 @@ impl PostgresNode {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn append_conf(&self, config: &str, opts: &str) {
|
||||
pub fn append_conf(&self, config: &str, opts: &str) -> Result<()> {
|
||||
OpenOptions::new()
|
||||
.append(true)
|
||||
.open(self.pgdata().join(config).to_str().unwrap())
|
||||
.unwrap()
|
||||
.write_all(opts.as_bytes())
|
||||
.unwrap();
|
||||
.open(self.pgdata().join(config).to_str().unwrap())?
|
||||
.write_all(opts.as_bytes())?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn pg_ctl(&self, args: &[&str]) -> Result<()> {
|
||||
fn pg_ctl(&self, args: &[&str], auth_token: &Option<String>) -> Result<()> {
|
||||
let pg_ctl_path = self.env.pg_bin_dir().join("pg_ctl");
|
||||
let mut cmd = Command::new(pg_ctl_path);
|
||||
cmd.args(
|
||||
[
|
||||
&[
|
||||
"-D",
|
||||
self.pgdata().to_str().unwrap(),
|
||||
"-l",
|
||||
self.pgdata().join("pg.log").to_str().unwrap(),
|
||||
"-w", //wait till pg_ctl actually does what was asked
|
||||
],
|
||||
args,
|
||||
]
|
||||
.concat(),
|
||||
)
|
||||
.env_clear()
|
||||
.env("LD_LIBRARY_PATH", self.env.pg_lib_dir().to_str().unwrap())
|
||||
.env("DYLD_LIBRARY_PATH", self.env.pg_lib_dir().to_str().unwrap());
|
||||
|
||||
if let Some(token) = auth_token {
|
||||
cmd.env("ZENITH_AUTH_TOKEN", token);
|
||||
}
|
||||
let pg_ctl = cmd.status().with_context(|| "pg_ctl failed")?;
|
||||
|
||||
let pg_ctl = Command::new(pg_ctl_path)
|
||||
.args(
|
||||
[
|
||||
&[
|
||||
"-D",
|
||||
self.pgdata().to_str().unwrap(),
|
||||
"-l",
|
||||
self.pgdata().join("log").to_str().unwrap(),
|
||||
],
|
||||
args,
|
||||
]
|
||||
.concat(),
|
||||
)
|
||||
.env_clear()
|
||||
.env("LD_LIBRARY_PATH", self.env.pg_lib_dir().to_str().unwrap())
|
||||
.env("DYLD_LIBRARY_PATH", self.env.pg_lib_dir().to_str().unwrap())
|
||||
.status()
|
||||
.with_context(|| "pg_ctl failed")?;
|
||||
if !pg_ctl.success() {
|
||||
anyhow::bail!("pg_ctl failed");
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn start(&self) -> Result<()> {
|
||||
pub fn start(&self, auth_token: &Option<String>) -> Result<()> {
|
||||
// Bail if the node already running.
|
||||
if self.status() == "running" {
|
||||
anyhow::bail!("The node is already running");
|
||||
}
|
||||
|
||||
// 1. We always start compute node from scratch, so
|
||||
// if old dir exists, preserve 'postgresql.conf' and drop the directory
|
||||
let postgresql_conf_path = self.pgdata().join("postgresql.conf");
|
||||
let postgresql_conf = fs::read(&postgresql_conf_path).with_context(|| {
|
||||
format!(
|
||||
"failed to read config file in {}",
|
||||
postgresql_conf_path.to_str().unwrap()
|
||||
)
|
||||
})?;
|
||||
fs::remove_dir_all(&self.pgdata())?;
|
||||
self.create_pgdata()?;
|
||||
|
||||
// 2. Bring back config files
|
||||
fs::write(&postgresql_conf_path, postgresql_conf)?;
|
||||
|
||||
// 3. Load basebackup
|
||||
self.load_basebackup()?;
|
||||
|
||||
// 4. Finally start the compute node postgres
|
||||
println!("Starting postgres node at '{}'", self.connstr());
|
||||
self.pg_ctl(&["start"])
|
||||
self.pg_ctl(&["start"], auth_token)
|
||||
}
|
||||
|
||||
pub fn restart(&self) -> Result<()> {
|
||||
self.pg_ctl(&["restart"])
|
||||
pub fn restart(&self, auth_token: &Option<String>) -> Result<()> {
|
||||
self.pg_ctl(&["restart"], auth_token)
|
||||
}
|
||||
|
||||
pub fn stop(&self) -> Result<()> {
|
||||
self.pg_ctl(&["-m", "immediate", "stop"])
|
||||
pub fn stop(&self, destroy: bool) -> Result<()> {
|
||||
self.pg_ctl(&["-m", "immediate", "stop"], &None)?;
|
||||
if destroy {
|
||||
println!(
|
||||
"Destroying postgres data directory '{}'",
|
||||
self.pgdata().to_str().unwrap()
|
||||
);
|
||||
fs::remove_dir_all(&self.pgdata())?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn connstr(&self) -> String {
|
||||
format!(
|
||||
"host={} port={} user={}",
|
||||
"host={} port={} user={} dbname={}",
|
||||
self.address.ip(),
|
||||
self.address.port(),
|
||||
self.whoami()
|
||||
"zenith_admin",
|
||||
"postgres"
|
||||
)
|
||||
}
|
||||
|
||||
@@ -398,152 +515,6 @@ impl PostgresNode {
|
||||
|
||||
String::from_utf8(output.stdout).unwrap().trim().to_string()
|
||||
}
|
||||
|
||||
fn dump_log_file(&self) {
|
||||
if let Ok(mut file) = File::open(self.env.repo_path.join("pageserver.log")) {
|
||||
let mut buffer = String::new();
|
||||
file.read_to_string(&mut buffer).unwrap();
|
||||
println!("--------------- pageserver.log:\n{}", buffer);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn safe_psql(&self, db: &str, sql: &str) -> Vec<tokio_postgres::Row> {
|
||||
let connstring = format!(
|
||||
"host={} port={} dbname={} user={}",
|
||||
self.address.ip(),
|
||||
self.address.port(),
|
||||
db,
|
||||
self.whoami()
|
||||
);
|
||||
let mut client = Client::connect(connstring.as_str(), NoTls).unwrap();
|
||||
|
||||
println!("Running {}", sql);
|
||||
let result = client.query(sql, &[]);
|
||||
if result.is_err() {
|
||||
self.dump_log_file();
|
||||
}
|
||||
result.unwrap()
|
||||
}
|
||||
|
||||
pub fn open_psql(&self, db: &str) -> Client {
|
||||
let connstring = format!(
|
||||
"host={} port={} dbname={} user={}",
|
||||
self.address.ip(),
|
||||
self.address.port(),
|
||||
db,
|
||||
self.whoami()
|
||||
);
|
||||
Client::connect(connstring.as_str(), NoTls).unwrap()
|
||||
}
|
||||
|
||||
pub fn start_proxy(&self, wal_acceptors: &str) -> WalProposerNode {
|
||||
let proxy_path = self.env.pg_bin_dir().join("safekeeper_proxy");
|
||||
match Command::new(proxy_path.as_path())
|
||||
.args(&["--ztimelineid", &self.timelineid.to_string()])
|
||||
.args(&["-s", wal_acceptors])
|
||||
.args(&["-h", &self.address.ip().to_string()])
|
||||
.args(&["-p", &self.address.port().to_string()])
|
||||
.arg("-v")
|
||||
.stderr(
|
||||
OpenOptions::new()
|
||||
.create(true)
|
||||
.append(true)
|
||||
.open(self.pgdata().join("safekeeper_proxy.log"))
|
||||
.unwrap(),
|
||||
)
|
||||
.spawn()
|
||||
{
|
||||
Ok(child) => WalProposerNode { pid: child.id() },
|
||||
Err(e) => panic!("Failed to launch {:?}: {}", proxy_path, e),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn pg_regress(&self) -> ExitStatus {
|
||||
self.safe_psql("postgres", "CREATE DATABASE regression");
|
||||
let data_dir = zenith_repo_dir();
|
||||
let regress_run_path = data_dir.join("regress");
|
||||
fs::create_dir_all(®ress_run_path).unwrap();
|
||||
fs::create_dir_all(regress_run_path.join("testtablespace")).unwrap();
|
||||
std::env::set_current_dir(regress_run_path).unwrap();
|
||||
|
||||
let regress_build_path =
|
||||
Path::new(env!("CARGO_MANIFEST_DIR")).join("../tmp_install/build/src/test/regress");
|
||||
let regress_src_path =
|
||||
Path::new(env!("CARGO_MANIFEST_DIR")).join("../vendor/postgres/src/test/regress");
|
||||
|
||||
let regress_check = Command::new(regress_build_path.join("pg_regress"))
|
||||
.args(&[
|
||||
"--bindir=''",
|
||||
"--use-existing",
|
||||
format!("--bindir={}", self.env.pg_bin_dir().to_str().unwrap()).as_str(),
|
||||
format!("--dlpath={}", regress_build_path.to_str().unwrap()).as_str(),
|
||||
format!(
|
||||
"--schedule={}",
|
||||
regress_src_path.join("parallel_schedule").to_str().unwrap()
|
||||
)
|
||||
.as_str(),
|
||||
format!("--inputdir={}", regress_src_path.to_str().unwrap()).as_str(),
|
||||
])
|
||||
.env_clear()
|
||||
.env("LD_LIBRARY_PATH", self.env.pg_lib_dir().to_str().unwrap())
|
||||
.env("DYLD_LIBRARY_PATH", self.env.pg_lib_dir().to_str().unwrap())
|
||||
.env("PGPORT", self.address.port().to_string())
|
||||
.env("PGUSER", self.whoami())
|
||||
.env("PGHOST", self.address.ip().to_string())
|
||||
.status()
|
||||
.expect("pg_regress failed");
|
||||
if !regress_check.success() {
|
||||
if let Ok(mut file) = File::open("regression.diffs") {
|
||||
let mut buffer = String::new();
|
||||
file.read_to_string(&mut buffer).unwrap();
|
||||
println!("--------------- regression.diffs:\n{}", buffer);
|
||||
}
|
||||
self.dump_log_file();
|
||||
if let Ok(mut file) = File::open(
|
||||
self.env
|
||||
.repo_path
|
||||
.join("pgdatadirs")
|
||||
.join("pg1")
|
||||
.join("log"),
|
||||
) {
|
||||
let mut buffer = String::new();
|
||||
file.read_to_string(&mut buffer).unwrap();
|
||||
println!("--------------- pgdatadirs/pg1/log:\n{}", buffer);
|
||||
}
|
||||
}
|
||||
regress_check
|
||||
}
|
||||
|
||||
pub fn pg_bench(&self, clients: u32, seconds: u32) -> ExitStatus {
|
||||
let port = self.address.port().to_string();
|
||||
let clients = clients.to_string();
|
||||
let seconds = seconds.to_string();
|
||||
let _pg_bench_init = Command::new(self.env.pg_bin_dir().join("pgbench"))
|
||||
.args(&["-i", "-p", port.as_str(), "postgres"])
|
||||
.env("LD_LIBRARY_PATH", self.env.pg_lib_dir().to_str().unwrap())
|
||||
.env("DYLD_LIBRARY_PATH", self.env.pg_lib_dir().to_str().unwrap())
|
||||
.status()
|
||||
.expect("pgbench -i");
|
||||
let pg_bench_run = Command::new(self.env.pg_bin_dir().join("pgbench"))
|
||||
.args(&[
|
||||
"-p",
|
||||
port.as_str(),
|
||||
"-T",
|
||||
seconds.as_str(),
|
||||
"-P",
|
||||
"1",
|
||||
"-c",
|
||||
clients.as_str(),
|
||||
"-M",
|
||||
"prepared",
|
||||
"postgres",
|
||||
])
|
||||
.env("LD_LIBRARY_PATH", self.env.pg_lib_dir().to_str().unwrap())
|
||||
.env("DYLD_LIBRARY_PATH", self.env.pg_lib_dir().to_str().unwrap())
|
||||
.status()
|
||||
.expect("pgbench run");
|
||||
pg_bench_run
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for PostgresNode {
|
||||
@@ -552,7 +523,7 @@ impl Drop for PostgresNode {
|
||||
// and checking it here. But let just clean datadirs on start.
|
||||
fn drop(&mut self) {
|
||||
if self.is_test {
|
||||
let _ = self.stop();
|
||||
let _ = self.stop(true);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,12 +1,31 @@
|
||||
//
|
||||
// Local control plane.
|
||||
//
|
||||
// Can start, cofigure and stop postgres instances running as a local processes.
|
||||
// Can start, configure and stop postgres instances running as a local processes.
|
||||
//
|
||||
// Intended to be used in integration tests and in CLI tools for
|
||||
// local installations.
|
||||
//
|
||||
use anyhow::{anyhow, bail, Context, Result};
|
||||
use std::fs;
|
||||
use std::path::Path;
|
||||
|
||||
pub mod compute;
|
||||
pub mod local_env;
|
||||
pub mod storage;
|
||||
|
||||
/// Read a PID file
|
||||
///
|
||||
/// We expect a file that contains a single integer.
|
||||
/// We return an i32 for compatibility with libc and nix.
|
||||
pub fn read_pidfile(pidfile: &Path) -> Result<i32> {
|
||||
let pid_str = fs::read_to_string(pidfile)
|
||||
.with_context(|| format!("failed to read pidfile {:?}", pidfile))?;
|
||||
let pid: i32 = pid_str
|
||||
.parse()
|
||||
.map_err(|_| anyhow!("failed to parse pidfile {:?}", pidfile))?;
|
||||
if pid < 1 {
|
||||
bail!("pidfile {:?} contained bad value '{}'", pidfile, pid);
|
||||
}
|
||||
Ok(pid)
|
||||
}
|
||||
|
||||
@@ -4,34 +4,30 @@
|
||||
// Now it also provides init method which acts like a stub for proper installation
|
||||
// script which will use local paths.
|
||||
//
|
||||
use anyhow::Context;
|
||||
use bytes::Bytes;
|
||||
use rand::Rng;
|
||||
use std::env;
|
||||
use anyhow::{anyhow, Context, Result};
|
||||
use hex;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::fs;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::path::PathBuf;
|
||||
use std::process::{Command, Stdio};
|
||||
use std::{collections::BTreeMap, env};
|
||||
use url::Url;
|
||||
use zenith_utils::auth::{encode_from_key_path, Claims, Scope};
|
||||
use zenith_utils::postgres_backend::AuthType;
|
||||
use zenith_utils::zid::ZTenantId;
|
||||
|
||||
use anyhow::Result;
|
||||
use serde_derive::{Deserialize, Serialize};
|
||||
|
||||
use pageserver::zenith_repo_dir;
|
||||
use pageserver::ZTimelineId;
|
||||
use postgres_ffi::xlog_utils;
|
||||
pub type Remotes = BTreeMap<String, String>;
|
||||
|
||||
//
|
||||
// This data structure represents deserialized zenith config, which should be
|
||||
// located in ~/.zenith
|
||||
// This data structures represent deserialized zenith CLI config
|
||||
//
|
||||
// TODO: should we also support ZENITH_CONF env var?
|
||||
//
|
||||
#[derive(Serialize, Deserialize, Clone)]
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
pub struct LocalEnv {
|
||||
// Path to the Repository. Here page server and compute nodes will create and store their data.
|
||||
pub repo_path: PathBuf,
|
||||
// Pageserver connection strings
|
||||
pub pageserver_connstring: String,
|
||||
|
||||
// System identifier, from the PostgreSQL control file
|
||||
pub systemid: u64,
|
||||
// Base directory for both pageserver and compute nodes
|
||||
pub base_data_dir: PathBuf,
|
||||
|
||||
// Path to postgres distribution. It's expected that "bin", "include",
|
||||
// "lib", "share" from postgres distribution are there. If at some point
|
||||
@@ -39,191 +35,183 @@ pub struct LocalEnv {
|
||||
// to four separate paths and match OS-specific installation layout.
|
||||
pub pg_distrib_dir: PathBuf,
|
||||
|
||||
// Path to pageserver binary.
|
||||
pub zenith_distrib_dir: PathBuf,
|
||||
// Path to pageserver binary. Empty for remote pageserver.
|
||||
pub zenith_distrib_dir: Option<PathBuf>,
|
||||
|
||||
// keeping tenant id in config to reduce copy paste when running zenith locally with single tenant
|
||||
#[serde(with = "hex")]
|
||||
pub tenantid: ZTenantId,
|
||||
|
||||
// jwt auth token used for communication with pageserver
|
||||
pub auth_token: String,
|
||||
|
||||
// used to determine which auth type is used
|
||||
pub auth_type: AuthType,
|
||||
|
||||
// used to issue tokens during e.g pg start
|
||||
pub private_key_path: PathBuf,
|
||||
|
||||
pub remotes: Remotes,
|
||||
}
|
||||
|
||||
impl LocalEnv {
|
||||
// postgres installation
|
||||
// postgres installation paths
|
||||
pub fn pg_bin_dir(&self) -> PathBuf {
|
||||
self.pg_distrib_dir.join("bin")
|
||||
}
|
||||
pub fn pg_lib_dir(&self) -> PathBuf {
|
||||
self.pg_distrib_dir.join("lib")
|
||||
}
|
||||
|
||||
pub fn pageserver_bin(&self) -> Result<PathBuf> {
|
||||
Ok(self
|
||||
.zenith_distrib_dir
|
||||
.as_ref()
|
||||
.ok_or_else(|| anyhow!("Can not manage remote pageserver"))?
|
||||
.join("pageserver"))
|
||||
}
|
||||
|
||||
pub fn pg_data_dirs_path(&self) -> PathBuf {
|
||||
self.base_data_dir.join("pgdatadirs").join("tenants")
|
||||
}
|
||||
|
||||
pub fn pg_data_dir(&self, tenantid: &ZTenantId, branch_name: &str) -> PathBuf {
|
||||
self.pg_data_dirs_path()
|
||||
.join(tenantid.to_string())
|
||||
.join(branch_name)
|
||||
}
|
||||
|
||||
// TODO: move pageserver files into ./pageserver
|
||||
pub fn pageserver_data_dir(&self) -> PathBuf {
|
||||
self.base_data_dir.clone()
|
||||
}
|
||||
}
|
||||
|
||||
fn base_path() -> PathBuf {
|
||||
match std::env::var_os("ZENITH_REPO_DIR") {
|
||||
Some(val) => PathBuf::from(val.to_str().unwrap()),
|
||||
None => ".zenith".into(),
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
// Initialize a new Zenith repository
|
||||
//
|
||||
pub fn init() -> Result<()> {
|
||||
pub fn init(
|
||||
remote_pageserver: Option<&str>,
|
||||
tenantid: ZTenantId,
|
||||
auth_type: AuthType,
|
||||
) -> Result<()> {
|
||||
// check if config already exists
|
||||
let repo_path = zenith_repo_dir();
|
||||
if repo_path.exists() {
|
||||
let base_path = base_path();
|
||||
if base_path.exists() {
|
||||
anyhow::bail!(
|
||||
"{} already exists. Perhaps already initialized?",
|
||||
repo_path.to_str().unwrap()
|
||||
);
|
||||
}
|
||||
|
||||
// Now we can run init only from crate directory, so check that current dir is our crate.
|
||||
// Use 'pageserver/Cargo.toml' existence as evidendce.
|
||||
let cargo_path = env::current_dir()?;
|
||||
if !cargo_path.join("pageserver/Cargo.toml").exists() {
|
||||
anyhow::bail!(
|
||||
"Current directory does not look like a zenith repo. \
|
||||
Please, run 'init' from zenith repo root."
|
||||
base_path.to_str().unwrap()
|
||||
);
|
||||
}
|
||||
fs::create_dir(&base_path)?;
|
||||
|
||||
// ok, now check that expected binaries are present
|
||||
|
||||
// check postgres
|
||||
let pg_distrib_dir = cargo_path.join("tmp_install");
|
||||
let pg_path = pg_distrib_dir.join("bin/postgres");
|
||||
if !pg_path.exists() {
|
||||
anyhow::bail!(
|
||||
"Can't find postres binary at {}. \
|
||||
Perhaps 'make postgres' is needed to build it first.",
|
||||
pg_path.to_str().unwrap()
|
||||
);
|
||||
}
|
||||
|
||||
// check pageserver
|
||||
let zenith_distrib_dir = cargo_path.join("target/debug/");
|
||||
let pageserver_path = zenith_distrib_dir.join("pageserver");
|
||||
if !pageserver_path.exists() {
|
||||
anyhow::bail!(
|
||||
"Can't find pageserver binary at {}. Please build it.",
|
||||
pageserver_path.to_str().unwrap()
|
||||
);
|
||||
}
|
||||
|
||||
// ok, we are good to go
|
||||
let mut conf = LocalEnv {
|
||||
repo_path,
|
||||
pg_distrib_dir,
|
||||
zenith_distrib_dir,
|
||||
systemid: 0,
|
||||
// Find postgres binaries. Follow POSTGRES_DISTRIB_DIR if set, otherwise look in "tmp_install".
|
||||
let pg_distrib_dir: PathBuf = {
|
||||
if let Some(postgres_bin) = env::var_os("POSTGRES_DISTRIB_DIR") {
|
||||
postgres_bin.into()
|
||||
} else {
|
||||
let cwd = env::current_dir()?;
|
||||
cwd.join("tmp_install")
|
||||
}
|
||||
};
|
||||
init_repo(&mut conf)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn init_repo(local_env: &mut LocalEnv) -> Result<()> {
|
||||
let repopath = &local_env.repo_path;
|
||||
fs::create_dir(&repopath)
|
||||
.with_context(|| format!("could not create directory {}", repopath.display()))?;
|
||||
fs::create_dir(repopath.join("pgdatadirs"))?;
|
||||
fs::create_dir(repopath.join("timelines"))?;
|
||||
fs::create_dir(repopath.join("refs"))?;
|
||||
fs::create_dir(repopath.join("refs").join("branches"))?;
|
||||
fs::create_dir(repopath.join("refs").join("tags"))?;
|
||||
println!("created directory structure in {}", repopath.display());
|
||||
|
||||
// Create initial timeline
|
||||
let tli = create_timeline(&local_env, None)?;
|
||||
let timelinedir = repopath.join("timelines").join(tli.to_string());
|
||||
println!("created initial timeline {}", timelinedir.display());
|
||||
|
||||
// Run initdb
|
||||
//
|
||||
// We create the cluster temporarily in a "tmp" directory inside the repository,
|
||||
// and move it to the right location from there.
|
||||
let tmppath = repopath.join("tmp");
|
||||
|
||||
let initdb_path = local_env.pg_bin_dir().join("initdb");
|
||||
let initdb = Command::new(initdb_path)
|
||||
.args(&["-D", tmppath.to_str().unwrap()])
|
||||
.arg("--no-instructions")
|
||||
.env_clear()
|
||||
.env("LD_LIBRARY_PATH", local_env.pg_lib_dir().to_str().unwrap())
|
||||
.env(
|
||||
"DYLD_LIBRARY_PATH",
|
||||
local_env.pg_lib_dir().to_str().unwrap(),
|
||||
)
|
||||
.stdout(Stdio::null())
|
||||
.status()
|
||||
.with_context(|| "failed to execute initdb")?;
|
||||
if !initdb.success() {
|
||||
anyhow::bail!("initdb failed");
|
||||
if !pg_distrib_dir.join("bin/postgres").exists() {
|
||||
anyhow::bail!("Can't find postgres binary at {:?}", pg_distrib_dir);
|
||||
}
|
||||
println!("initdb succeeded");
|
||||
|
||||
// Read control file to extract the LSN and system id
|
||||
let controlfile_path = tmppath.join("global").join("pg_control");
|
||||
let controlfile = postgres_ffi::decode_pg_control(Bytes::from(fs::read(controlfile_path)?))?;
|
||||
let systemid = controlfile.system_identifier;
|
||||
let lsn = controlfile.checkPoint;
|
||||
let lsnstr = format!("{:016X}", lsn);
|
||||
// generate keys for jwt
|
||||
// openssl genrsa -out private_key.pem 2048
|
||||
let private_key_path = base_path.join("auth_private_key.pem");
|
||||
let keygen_output = Command::new("openssl")
|
||||
.arg("genrsa")
|
||||
.args(&["-out", private_key_path.to_str().unwrap()])
|
||||
.arg("2048")
|
||||
.stdout(Stdio::null())
|
||||
.output()
|
||||
.with_context(|| "failed to generate auth private key")?;
|
||||
if !keygen_output.status.success() {
|
||||
anyhow::bail!(
|
||||
"openssl failed: '{}'",
|
||||
String::from_utf8_lossy(&keygen_output.stderr)
|
||||
);
|
||||
}
|
||||
|
||||
// Move the initial WAL file
|
||||
fs::rename(
|
||||
tmppath.join("pg_wal").join("000000010000000000000001"),
|
||||
timelinedir
|
||||
.join("wal")
|
||||
.join("000000010000000000000001.partial"),
|
||||
)?;
|
||||
println!("moved initial WAL file");
|
||||
let public_key_path = base_path.join("auth_public_key.pem");
|
||||
// openssl rsa -in private_key.pem -pubout -outform PEM -out public_key.pem
|
||||
let keygen_output = Command::new("openssl")
|
||||
.arg("rsa")
|
||||
.args(&["-in", private_key_path.to_str().unwrap()])
|
||||
.arg("-pubout")
|
||||
.args(&["-outform", "PEM"])
|
||||
.args(&["-out", public_key_path.to_str().unwrap()])
|
||||
.stdout(Stdio::null())
|
||||
.output()
|
||||
.with_context(|| "failed to generate auth private key")?;
|
||||
if !keygen_output.status.success() {
|
||||
anyhow::bail!(
|
||||
"openssl failed: '{}'",
|
||||
String::from_utf8_lossy(&keygen_output.stderr)
|
||||
);
|
||||
}
|
||||
|
||||
// Remove pg_wal
|
||||
fs::remove_dir_all(tmppath.join("pg_wal"))?;
|
||||
let auth_token =
|
||||
encode_from_key_path(&Claims::new(None, Scope::PageServerApi), &private_key_path)?;
|
||||
|
||||
force_crash_recovery(&tmppath)?;
|
||||
println!("updated pg_control");
|
||||
let conf = if let Some(addr) = remote_pageserver {
|
||||
// check that addr is parsable
|
||||
let _uri = Url::parse(addr).map_err(|e| anyhow!("{}: {}", addr, e))?;
|
||||
|
||||
let target = timelinedir.join("snapshots").join(&lsnstr);
|
||||
fs::rename(tmppath, &target)?;
|
||||
println!("moved 'tmp' to {}", target.display());
|
||||
LocalEnv {
|
||||
pageserver_connstring: format!("postgresql://{}/", addr),
|
||||
pg_distrib_dir,
|
||||
zenith_distrib_dir: None,
|
||||
base_data_dir: base_path,
|
||||
remotes: BTreeMap::default(),
|
||||
tenantid,
|
||||
auth_token,
|
||||
auth_type,
|
||||
private_key_path,
|
||||
}
|
||||
} else {
|
||||
// Find zenith binaries.
|
||||
let zenith_distrib_dir = env::current_exe()?.parent().unwrap().to_owned();
|
||||
if !zenith_distrib_dir.join("pageserver").exists() {
|
||||
anyhow::bail!("Can't find pageserver binary.",);
|
||||
}
|
||||
|
||||
// Create 'main' branch to refer to the initial timeline
|
||||
let data = tli.to_string();
|
||||
fs::write(repopath.join("refs").join("branches").join("main"), data)?;
|
||||
println!("created main branch");
|
||||
LocalEnv {
|
||||
pageserver_connstring: "postgresql://127.0.0.1:6400".to_string(),
|
||||
pg_distrib_dir,
|
||||
zenith_distrib_dir: Some(zenith_distrib_dir),
|
||||
base_data_dir: base_path,
|
||||
remotes: BTreeMap::default(),
|
||||
tenantid,
|
||||
auth_token,
|
||||
auth_type,
|
||||
private_key_path,
|
||||
}
|
||||
};
|
||||
|
||||
// Also update the system id in the LocalEnv
|
||||
local_env.systemid = systemid;
|
||||
fs::create_dir_all(conf.pg_data_dirs_path())?;
|
||||
|
||||
// write config
|
||||
let toml = toml::to_string(&local_env)?;
|
||||
fs::write(repopath.join("config"), toml)?;
|
||||
|
||||
println!(
|
||||
"new zenith repository was created in {}",
|
||||
repopath.display()
|
||||
);
|
||||
let toml = toml::to_string_pretty(&conf)?;
|
||||
fs::write(conf.base_data_dir.join("config"), toml)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// If control file says the cluster was shut down cleanly, modify it, to mark
|
||||
// it as crashed. That forces crash recovery when you start the cluster.
|
||||
//
|
||||
// FIXME:
|
||||
// We currently do this to the initial snapshot in "zenith init". It would
|
||||
// be more natural to do this when the snapshot is restored instead, but we
|
||||
// currently don't have any code to create new snapshots, so it doesn't matter
|
||||
// Or better yet, use a less hacky way of putting the cluster into recovery.
|
||||
// Perhaps create a backup label file in the data directory when it's restored.
|
||||
fn force_crash_recovery(datadir: &Path) -> Result<()> {
|
||||
// Read in the control file
|
||||
let controlfilepath = datadir.to_path_buf().join("global").join("pg_control");
|
||||
let mut controlfile =
|
||||
postgres_ffi::decode_pg_control(Bytes::from(fs::read(controlfilepath.as_path())?))?;
|
||||
// Locate and load config
|
||||
pub fn load_config() -> Result<LocalEnv> {
|
||||
let repopath = base_path();
|
||||
|
||||
controlfile.state = postgres_ffi::DBState_DB_IN_PRODUCTION;
|
||||
|
||||
fs::write(
|
||||
controlfilepath.as_path(),
|
||||
postgres_ffi::encode_pg_control(controlfile),
|
||||
)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// check that config file is present
|
||||
pub fn load_config(repopath: &Path) -> Result<LocalEnv> {
|
||||
if !repopath.exists() {
|
||||
anyhow::bail!(
|
||||
"Zenith config is not found in {}. You need to run 'zenith init' first",
|
||||
@@ -231,159 +219,18 @@ pub fn load_config(repopath: &Path) -> Result<LocalEnv> {
|
||||
);
|
||||
}
|
||||
|
||||
// TODO: check that it looks like a zenith repository
|
||||
|
||||
// load and parse file
|
||||
let config = fs::read_to_string(repopath.join("config"))?;
|
||||
toml::from_str(config.as_str()).map_err(|e| e.into())
|
||||
}
|
||||
|
||||
// local env for tests
|
||||
pub fn test_env(testname: &str) -> LocalEnv {
|
||||
fs::create_dir_all("../tmp_check").expect("could not create directory ../tmp_check");
|
||||
|
||||
let repo_path = Path::new(env!("CARGO_MANIFEST_DIR"))
|
||||
.join("../tmp_check/")
|
||||
.join(testname);
|
||||
|
||||
// Remove remnants of old test repo
|
||||
let _ = fs::remove_dir_all(&repo_path);
|
||||
|
||||
let mut local_env = LocalEnv {
|
||||
repo_path,
|
||||
pg_distrib_dir: Path::new(env!("CARGO_MANIFEST_DIR")).join("../tmp_install"),
|
||||
zenith_distrib_dir: cargo_bin_dir(),
|
||||
systemid: 0,
|
||||
};
|
||||
init_repo(&mut local_env).expect("could not initialize zenith repository");
|
||||
local_env
|
||||
}
|
||||
|
||||
// Find the directory where the binaries were put (i.e. target/debug/)
|
||||
pub fn cargo_bin_dir() -> PathBuf {
|
||||
let mut pathbuf = std::env::current_exe().unwrap();
|
||||
|
||||
pathbuf.pop();
|
||||
if pathbuf.ends_with("deps") {
|
||||
pathbuf.pop();
|
||||
}
|
||||
|
||||
pathbuf
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct PointInTime {
|
||||
pub timelineid: ZTimelineId,
|
||||
pub lsn: u64,
|
||||
}
|
||||
|
||||
fn create_timeline(local_env: &LocalEnv, ancestor: Option<PointInTime>) -> Result<ZTimelineId> {
|
||||
let repopath = &local_env.repo_path;
|
||||
|
||||
// Create initial timeline
|
||||
let mut tli_buf = [0u8; 16];
|
||||
rand::thread_rng().fill(&mut tli_buf);
|
||||
let timelineid = ZTimelineId::from(tli_buf);
|
||||
|
||||
let timelinedir = repopath.join("timelines").join(timelineid.to_string());
|
||||
|
||||
fs::create_dir(&timelinedir)?;
|
||||
fs::create_dir(&timelinedir.join("snapshots"))?;
|
||||
fs::create_dir(&timelinedir.join("wal"))?;
|
||||
|
||||
if let Some(ancestor) = ancestor {
|
||||
let data = format!(
|
||||
"{}@{:X}/{:X}",
|
||||
ancestor.timelineid,
|
||||
ancestor.lsn >> 32,
|
||||
ancestor.lsn & 0xffffffff
|
||||
);
|
||||
fs::write(timelinedir.join("ancestor"), data)?;
|
||||
}
|
||||
|
||||
Ok(timelineid)
|
||||
}
|
||||
|
||||
// Parse an LSN in the format used in filenames
|
||||
//
|
||||
// For example: 00000000015D3DD8
|
||||
//
|
||||
fn parse_lsn(s: &str) -> std::result::Result<u64, std::num::ParseIntError> {
|
||||
u64::from_str_radix(s, 16)
|
||||
}
|
||||
|
||||
// Create a new branch in the repository (for the "zenith branch" subcommand)
|
||||
pub fn create_branch(
|
||||
local_env: &LocalEnv,
|
||||
branchname: &str,
|
||||
startpoint: PointInTime,
|
||||
) -> Result<()> {
|
||||
let repopath = &local_env.repo_path;
|
||||
|
||||
// create a new timeline for it
|
||||
let newtli = create_timeline(local_env, Some(startpoint))?;
|
||||
let newtimelinedir = repopath.join("timelines").join(newtli.to_string());
|
||||
|
||||
let data = newtli.to_string();
|
||||
fs::write(
|
||||
repopath.join("refs").join("branches").join(branchname),
|
||||
data,
|
||||
)?;
|
||||
|
||||
// Copy the latest snapshot (TODO: before the startpoint) and all WAL
|
||||
// TODO: be smarter and avoid the copying...
|
||||
let (_maxsnapshot, oldsnapshotdir) = find_latest_snapshot(local_env, startpoint.timelineid)?;
|
||||
let copy_opts = fs_extra::dir::CopyOptions::new();
|
||||
fs_extra::dir::copy(oldsnapshotdir, newtimelinedir.join("snapshots"), ©_opts)?;
|
||||
|
||||
let oldtimelinedir = repopath
|
||||
.join("timelines")
|
||||
.join(startpoint.timelineid.to_string());
|
||||
let mut copy_opts = fs_extra::dir::CopyOptions::new();
|
||||
copy_opts.content_only = true;
|
||||
fs_extra::dir::copy(
|
||||
oldtimelinedir.join("wal"),
|
||||
newtimelinedir.join("wal"),
|
||||
©_opts,
|
||||
)?;
|
||||
// Save config. We use that to change set of remotes from CLI itself.
|
||||
pub fn save_config(conf: &LocalEnv) -> Result<()> {
|
||||
let config_path = base_path().join("config");
|
||||
let conf_str = toml::to_string_pretty(conf)?;
|
||||
|
||||
fs::write(config_path, conf_str)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Find the end of valid WAL in a wal directory
|
||||
pub fn find_end_of_wal(local_env: &LocalEnv, timeline: ZTimelineId) -> Result<u64> {
|
||||
let repopath = &local_env.repo_path;
|
||||
let waldir = repopath
|
||||
.join("timelines")
|
||||
.join(timeline.to_string())
|
||||
.join("wal");
|
||||
|
||||
let (lsn, _tli) = xlog_utils::find_end_of_wal(&waldir, 16 * 1024 * 1024, true);
|
||||
|
||||
Ok(lsn)
|
||||
}
|
||||
|
||||
// Find the latest snapshot for a timeline
|
||||
fn find_latest_snapshot(local_env: &LocalEnv, timeline: ZTimelineId) -> Result<(u64, PathBuf)> {
|
||||
let repopath = &local_env.repo_path;
|
||||
|
||||
let snapshotsdir = repopath
|
||||
.join("timelines")
|
||||
.join(timeline.to_string())
|
||||
.join("snapshots");
|
||||
let paths = fs::read_dir(&snapshotsdir)?;
|
||||
let mut maxsnapshot: u64 = 0;
|
||||
let mut snapshotdir: Option<PathBuf> = None;
|
||||
for path in paths {
|
||||
let path = path?;
|
||||
let filename = path.file_name().to_str().unwrap().to_owned();
|
||||
if let Ok(lsn) = parse_lsn(&filename) {
|
||||
maxsnapshot = std::cmp::max(lsn, maxsnapshot);
|
||||
snapshotdir = Some(path.path());
|
||||
}
|
||||
}
|
||||
if maxsnapshot == 0 {
|
||||
// TODO: check ancestor timeline
|
||||
anyhow::bail!("no snapshot found in {}", snapshotsdir.display());
|
||||
}
|
||||
|
||||
Ok((maxsnapshot, snapshotdir.unwrap()))
|
||||
}
|
||||
|
||||
@@ -1,177 +1,120 @@
|
||||
use anyhow::Result;
|
||||
use std::fs;
|
||||
use std::io;
|
||||
use std::net::SocketAddr;
|
||||
use std::collections::HashMap;
|
||||
use std::net::TcpStream;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::path::PathBuf;
|
||||
use std::process::Command;
|
||||
use std::str::FromStr;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::Arc;
|
||||
use std::thread;
|
||||
use std::time::Duration;
|
||||
|
||||
use postgres::{Client, NoTls};
|
||||
use anyhow::{anyhow, bail, ensure, Result};
|
||||
use nix::sys::signal::{kill, Signal};
|
||||
use nix::unistd::Pid;
|
||||
use pageserver::http::models::{BranchCreateRequest, TenantCreateRequest};
|
||||
use postgres::{Config, NoTls};
|
||||
use reqwest::blocking::{Client, RequestBuilder};
|
||||
use reqwest::{IntoUrl, Method, StatusCode};
|
||||
use zenith_utils::postgres_backend::AuthType;
|
||||
use zenith_utils::zid::ZTenantId;
|
||||
|
||||
use crate::local_env::LocalEnv;
|
||||
use pageserver::ZTimelineId;
|
||||
use crate::read_pidfile;
|
||||
use pageserver::branches::BranchInfo;
|
||||
use zenith_utils::connstring::connection_address;
|
||||
|
||||
//
|
||||
// Collection of several example deployments useful for tests.
|
||||
//
|
||||
// I'm intendedly modelling storage and compute control planes as a separate entities
|
||||
// as it is closer to the actual setup.
|
||||
//
|
||||
pub struct TestStorageControlPlane {
|
||||
pub wal_acceptors: Vec<WalAcceptorNode>,
|
||||
pub pageserver: Arc<PageServerNode>,
|
||||
pub test_done: AtomicBool,
|
||||
pub repopath: PathBuf,
|
||||
}
|
||||
|
||||
impl TestStorageControlPlane {
|
||||
// Peek into the repository, to grab the timeline ID of given branch
|
||||
pub fn get_branch_timeline(&self, branchname: &str) -> ZTimelineId {
|
||||
let branchpath = self.repopath.join("refs/branches/".to_owned() + branchname);
|
||||
|
||||
ZTimelineId::from_str(&(fs::read_to_string(&branchpath).unwrap())).unwrap()
|
||||
}
|
||||
|
||||
// postgres <-> page_server
|
||||
//
|
||||
// Initialize a new repository and configure a page server to run in it
|
||||
//
|
||||
pub fn one_page_server(local_env: &LocalEnv) -> TestStorageControlPlane {
|
||||
let repopath = local_env.repo_path.clone();
|
||||
|
||||
let pserver = Arc::new(PageServerNode {
|
||||
env: local_env.clone(),
|
||||
kill_on_exit: true,
|
||||
listen_address: None,
|
||||
});
|
||||
pserver.start().unwrap();
|
||||
|
||||
TestStorageControlPlane {
|
||||
wal_acceptors: Vec::new(),
|
||||
pageserver: pserver,
|
||||
test_done: AtomicBool::new(false),
|
||||
repopath,
|
||||
}
|
||||
}
|
||||
|
||||
// postgres <-> {wal_acceptor1, wal_acceptor2, ...}
|
||||
pub fn fault_tolerant(local_env: &LocalEnv, redundancy: usize) -> TestStorageControlPlane {
|
||||
let repopath = local_env.repo_path.clone();
|
||||
|
||||
let mut cplane = TestStorageControlPlane {
|
||||
wal_acceptors: Vec::new(),
|
||||
pageserver: Arc::new(PageServerNode {
|
||||
env: local_env.clone(),
|
||||
kill_on_exit: true,
|
||||
listen_address: None,
|
||||
}),
|
||||
test_done: AtomicBool::new(false),
|
||||
repopath,
|
||||
};
|
||||
cplane.pageserver.start().unwrap();
|
||||
|
||||
const WAL_ACCEPTOR_PORT: usize = 54321;
|
||||
|
||||
for i in 0..redundancy {
|
||||
let wal_acceptor = WalAcceptorNode {
|
||||
listen: format!("127.0.0.1:{}", WAL_ACCEPTOR_PORT + i)
|
||||
.parse()
|
||||
.unwrap(),
|
||||
data_dir: local_env.repo_path.join(format!("wal_acceptor_{}", i)),
|
||||
env: local_env.clone(),
|
||||
};
|
||||
wal_acceptor.init();
|
||||
wal_acceptor.start();
|
||||
cplane.wal_acceptors.push(wal_acceptor);
|
||||
}
|
||||
cplane
|
||||
}
|
||||
|
||||
pub fn stop(&self) {
|
||||
for wa in self.wal_acceptors.iter() {
|
||||
let _ = wa.stop();
|
||||
}
|
||||
self.test_done.store(true, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
pub fn get_wal_acceptor_conn_info(&self) -> String {
|
||||
self.wal_acceptors
|
||||
.iter()
|
||||
.map(|wa| wa.listen.to_string())
|
||||
.collect::<Vec<String>>()
|
||||
.join(",")
|
||||
}
|
||||
|
||||
pub fn is_running(&self) -> bool {
|
||||
self.test_done.load(Ordering::Relaxed)
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for TestStorageControlPlane {
|
||||
fn drop(&mut self) {
|
||||
self.stop();
|
||||
}
|
||||
}
|
||||
const HTTP_BASE_URL: &str = "http://127.0.0.1:9898/v1";
|
||||
|
||||
//
|
||||
// Control routines for pageserver.
|
||||
//
|
||||
// Used in CLI and tests.
|
||||
//
|
||||
#[derive(Debug)]
|
||||
pub struct PageServerNode {
|
||||
kill_on_exit: bool,
|
||||
listen_address: Option<SocketAddr>,
|
||||
pub kill_on_exit: bool,
|
||||
pub pg_connection_config: Config,
|
||||
pub env: LocalEnv,
|
||||
pub http_client: Client,
|
||||
pub http_base_url: String,
|
||||
}
|
||||
|
||||
impl PageServerNode {
|
||||
pub fn from_env(env: &LocalEnv) -> PageServerNode {
|
||||
let password = if env.auth_type == AuthType::ZenithJWT {
|
||||
&env.auth_token
|
||||
} else {
|
||||
""
|
||||
};
|
||||
|
||||
PageServerNode {
|
||||
kill_on_exit: false,
|
||||
listen_address: None, // default
|
||||
pg_connection_config: Self::default_config(password), // default
|
||||
env: env.clone(),
|
||||
http_client: Client::new(),
|
||||
http_base_url: HTTP_BASE_URL.to_owned(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn address(&self) -> SocketAddr {
|
||||
match self.listen_address {
|
||||
Some(addr) => addr,
|
||||
None => "127.0.0.1:64000".parse().unwrap(),
|
||||
fn default_config(password: &str) -> Config {
|
||||
format!("postgresql://no_user:{}@localhost:64000/no_db", password)
|
||||
.parse()
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
pub fn init(&self, create_tenant: Option<&str>, enable_auth: bool) -> Result<()> {
|
||||
let mut cmd = Command::new(self.env.pageserver_bin()?);
|
||||
let mut args = vec![
|
||||
"--init",
|
||||
"-D",
|
||||
self.env.base_data_dir.to_str().unwrap(),
|
||||
"--postgres-distrib",
|
||||
self.env.pg_distrib_dir.to_str().unwrap(),
|
||||
];
|
||||
|
||||
if enable_auth {
|
||||
args.extend(&["--auth-validation-public-key-path", "auth_public_key.pem"]);
|
||||
args.extend(&["--auth-type", "ZenithJWT"]);
|
||||
}
|
||||
|
||||
if let Some(tenantid) = create_tenant {
|
||||
args.extend(&["--create-tenant", tenantid])
|
||||
}
|
||||
|
||||
let status = cmd
|
||||
.args(args)
|
||||
.env_clear()
|
||||
.env("RUST_BACKTRACE", "1")
|
||||
.status()
|
||||
.expect("pageserver init failed");
|
||||
|
||||
if status.success() {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(anyhow!("pageserver init failed"))
|
||||
}
|
||||
}
|
||||
|
||||
pub fn repo_path(&self) -> PathBuf {
|
||||
self.env.repo_path.clone()
|
||||
self.env.pageserver_data_dir()
|
||||
}
|
||||
|
||||
pub fn pid_file(&self) -> PathBuf {
|
||||
self.env.repo_path.join("pageserver.pid")
|
||||
self.repo_path().join("pageserver.pid")
|
||||
}
|
||||
|
||||
pub fn start(&self) -> Result<()> {
|
||||
println!(
|
||||
"Starting pageserver at '{}' in {}",
|
||||
self.address(),
|
||||
connection_address(&self.pg_connection_config),
|
||||
self.repo_path().display()
|
||||
);
|
||||
|
||||
let mut cmd = Command::new(self.env.zenith_distrib_dir.join("pageserver"));
|
||||
cmd.args(&["-l", self.address().to_string().as_str()])
|
||||
let mut cmd = Command::new(self.env.pageserver_bin()?);
|
||||
cmd.args(&["-D", self.repo_path().to_str().unwrap()])
|
||||
.arg("-d")
|
||||
.env_clear()
|
||||
.env("RUST_BACKTRACE", "1")
|
||||
.env("ZENITH_REPO_DIR", self.repo_path())
|
||||
.env("PATH", self.env.pg_bin_dir().to_str().unwrap()) // needs postres-wal-redo binary
|
||||
.env("LD_LIBRARY_PATH", self.env.pg_lib_dir().to_str().unwrap())
|
||||
.env("DYLD_LIBRARY_PATH", self.env.pg_lib_dir().to_str().unwrap());
|
||||
.env("RUST_BACKTRACE", "1");
|
||||
|
||||
if !cmd.status()?.success() {
|
||||
anyhow::bail!(
|
||||
bail!(
|
||||
"Pageserver failed to start. See '{}' for details.",
|
||||
self.repo_path().join("pageserver.log").display()
|
||||
);
|
||||
@@ -180,74 +123,142 @@ impl PageServerNode {
|
||||
// It takes a while for the page server to start up. Wait until it is
|
||||
// open for business.
|
||||
for retries in 1..15 {
|
||||
let client = self.page_server_psql_client();
|
||||
if client.is_ok() {
|
||||
break;
|
||||
} else {
|
||||
println!("page server not responding yet, retrying ({})...", retries);
|
||||
thread::sleep(Duration::from_secs(1));
|
||||
match self.check_status() {
|
||||
Ok(_) => {
|
||||
println!("Pageserver started");
|
||||
return Ok(());
|
||||
}
|
||||
Err(err) => {
|
||||
println!(
|
||||
"Pageserver not responding yet, err {} retrying ({})...",
|
||||
err, retries
|
||||
);
|
||||
thread::sleep(Duration::from_secs(1));
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
bail!("pageserver failed to start");
|
||||
}
|
||||
|
||||
pub fn stop(&self) -> Result<()> {
|
||||
let pidfile = self.pid_file();
|
||||
let pid = read_pidfile(&pidfile)?;
|
||||
|
||||
let status = Command::new("kill")
|
||||
.arg(&pid)
|
||||
.env_clear()
|
||||
.status()
|
||||
.expect("failed to execute kill");
|
||||
|
||||
if !status.success() {
|
||||
anyhow::bail!("Failed to kill pageserver with pid {}", pid);
|
||||
let pid = read_pidfile(&self.pid_file())?;
|
||||
let pid = Pid::from_raw(pid);
|
||||
if kill(pid, Signal::SIGTERM).is_err() {
|
||||
bail!("Failed to kill pageserver with pid {}", pid);
|
||||
}
|
||||
|
||||
// await for pageserver stop
|
||||
// wait for pageserver stop
|
||||
let address = connection_address(&self.pg_connection_config);
|
||||
for _ in 0..5 {
|
||||
let stream = TcpStream::connect(self.address());
|
||||
let stream = TcpStream::connect(&address);
|
||||
thread::sleep(Duration::from_secs(1));
|
||||
if let Err(_e) = stream {
|
||||
println!("Pageserver stopped");
|
||||
return Ok(());
|
||||
}
|
||||
println!("Stopping pageserver on {}", self.address());
|
||||
thread::sleep(Duration::from_secs(1));
|
||||
println!("Stopping pageserver on {}", address);
|
||||
}
|
||||
|
||||
// ok, we failed to stop pageserver, let's panic
|
||||
if !status.success() {
|
||||
anyhow::bail!("Failed to stop pageserver with pid {}", pid);
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
bail!("Failed to stop pageserver with pid {}", pid);
|
||||
}
|
||||
|
||||
pub fn page_server_psql(&self, sql: &str) -> Vec<postgres::SimpleQueryMessage> {
|
||||
let connstring = format!(
|
||||
"host={} port={} dbname={} user={}",
|
||||
self.address().ip(),
|
||||
self.address().port(),
|
||||
"no_db",
|
||||
"no_user",
|
||||
);
|
||||
let mut client = Client::connect(connstring.as_str(), NoTls).unwrap();
|
||||
let mut client = self.pg_connection_config.connect(NoTls).unwrap();
|
||||
|
||||
println!("Pageserver query: '{}'", sql);
|
||||
client.simple_query(sql).unwrap()
|
||||
}
|
||||
|
||||
pub fn page_server_psql_client(
|
||||
&self,
|
||||
) -> std::result::Result<postgres::Client, postgres::Error> {
|
||||
let connstring = format!(
|
||||
"host={} port={} dbname={} user={}",
|
||||
self.address().ip(),
|
||||
self.address().port(),
|
||||
"no_db",
|
||||
"no_user",
|
||||
pub fn page_server_psql_client(&self) -> Result<postgres::Client, postgres::Error> {
|
||||
self.pg_connection_config.connect(NoTls)
|
||||
}
|
||||
|
||||
fn http_request<U: IntoUrl>(&self, method: Method, url: U) -> RequestBuilder {
|
||||
let mut builder = self.http_client.request(method, url);
|
||||
if self.env.auth_type == AuthType::ZenithJWT {
|
||||
builder = builder.bearer_auth(&self.env.auth_token)
|
||||
}
|
||||
builder
|
||||
}
|
||||
|
||||
pub fn check_status(&self) -> Result<()> {
|
||||
let status = self
|
||||
.http_request(Method::GET, format!("{}/{}", self.http_base_url, "status"))
|
||||
.send()?
|
||||
.status();
|
||||
ensure!(
|
||||
status == StatusCode::OK,
|
||||
format!("got unexpected response status {}", status)
|
||||
);
|
||||
Client::connect(connstring.as_str(), NoTls)
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn tenant_list(&self) -> Result<Vec<String>> {
|
||||
Ok(self
|
||||
.http_request(Method::GET, format!("{}/{}", self.http_base_url, "tenant"))
|
||||
.send()?
|
||||
.error_for_status()?
|
||||
.json()?)
|
||||
}
|
||||
|
||||
pub fn tenant_create(&self, tenantid: ZTenantId) -> Result<()> {
|
||||
Ok(self
|
||||
.http_request(Method::POST, format!("{}/{}", self.http_base_url, "tenant"))
|
||||
.json(&TenantCreateRequest {
|
||||
tenant_id: tenantid,
|
||||
})
|
||||
.send()?
|
||||
.error_for_status()?
|
||||
.json()?)
|
||||
}
|
||||
|
||||
pub fn branch_list(&self, tenantid: &ZTenantId) -> Result<Vec<BranchInfo>> {
|
||||
Ok(self
|
||||
.http_request(
|
||||
Method::GET,
|
||||
format!("{}/branch/{}", self.http_base_url, tenantid),
|
||||
)
|
||||
.send()?
|
||||
.error_for_status()?
|
||||
.json()?)
|
||||
}
|
||||
|
||||
pub fn branch_create(
|
||||
&self,
|
||||
branch_name: &str,
|
||||
startpoint: &str,
|
||||
tenantid: &ZTenantId,
|
||||
) -> Result<BranchInfo> {
|
||||
Ok(self
|
||||
.http_request(Method::POST, format!("{}/{}", self.http_base_url, "branch"))
|
||||
.json(&BranchCreateRequest {
|
||||
tenant_id: tenantid.to_owned(),
|
||||
name: branch_name.to_owned(),
|
||||
start_point: startpoint.to_owned(),
|
||||
})
|
||||
.send()?
|
||||
.error_for_status()?
|
||||
.json()?)
|
||||
}
|
||||
|
||||
// TODO: make this a separate request type and avoid loading all the branches
|
||||
pub fn branch_get_by_name(
|
||||
&self,
|
||||
tenantid: &ZTenantId,
|
||||
branch_name: &str,
|
||||
) -> Result<BranchInfo> {
|
||||
let branch_infos = self.branch_list(tenantid)?;
|
||||
let branch_by_name: Result<HashMap<String, BranchInfo>> = branch_infos
|
||||
.into_iter()
|
||||
.map(|branch_info| Ok((branch_info.name.clone(), branch_info)))
|
||||
.collect();
|
||||
let branch_by_name = branch_by_name?;
|
||||
|
||||
let branch = branch_by_name
|
||||
.get(branch_name)
|
||||
.ok_or_else(|| anyhow!("Branch {} not found", branch_name))?;
|
||||
|
||||
Ok(branch.clone())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -258,106 +269,3 @@ impl Drop for PageServerNode {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
// Control routines for WalAcceptor.
|
||||
//
|
||||
// Now used only in test setups.
|
||||
//
|
||||
pub struct WalAcceptorNode {
|
||||
listen: SocketAddr,
|
||||
data_dir: PathBuf,
|
||||
env: LocalEnv,
|
||||
}
|
||||
|
||||
impl WalAcceptorNode {
|
||||
pub fn init(&self) {
|
||||
if self.data_dir.exists() {
|
||||
fs::remove_dir_all(self.data_dir.clone()).unwrap();
|
||||
}
|
||||
fs::create_dir_all(self.data_dir.clone()).unwrap();
|
||||
}
|
||||
|
||||
pub fn start(&self) {
|
||||
println!(
|
||||
"Starting wal_acceptor in {} listening '{}'",
|
||||
self.data_dir.to_str().unwrap(),
|
||||
self.listen
|
||||
);
|
||||
|
||||
let status = Command::new(self.env.zenith_distrib_dir.join("wal_acceptor"))
|
||||
.args(&["-D", self.data_dir.to_str().unwrap()])
|
||||
.args(&["-l", self.listen.to_string().as_str()])
|
||||
.args(&["--systemid", &self.env.systemid.to_string()])
|
||||
// Tell page server it can receive WAL from this WAL safekeeper
|
||||
// FIXME: If there are multiple safekeepers, they will all inform
|
||||
// the page server. Only the last "notification" will stay in effect.
|
||||
// So it's pretty random which safekeeper the page server will connect to
|
||||
.args(&["--pageserver", "127.0.0.1:64000"])
|
||||
.arg("-d")
|
||||
.arg("-n")
|
||||
.status()
|
||||
.expect("failed to start wal_acceptor");
|
||||
|
||||
if !status.success() {
|
||||
panic!("wal_acceptor start failed");
|
||||
}
|
||||
}
|
||||
|
||||
pub fn stop(&self) -> std::result::Result<(), io::Error> {
|
||||
println!("Stopping wal acceptor on {}", self.listen);
|
||||
let pidfile = self.data_dir.join("wal_acceptor.pid");
|
||||
let pid = read_pidfile(&pidfile)?;
|
||||
// Ignores any failures when running this command
|
||||
let _status = Command::new("kill")
|
||||
.arg(pid)
|
||||
.env_clear()
|
||||
.status()
|
||||
.expect("failed to execute kill");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for WalAcceptorNode {
|
||||
fn drop(&mut self) {
|
||||
self.stop().unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
pub struct WalProposerNode {
|
||||
pub pid: u32,
|
||||
}
|
||||
|
||||
impl WalProposerNode {
|
||||
pub fn stop(&self) {
|
||||
let status = Command::new("kill")
|
||||
.arg(self.pid.to_string())
|
||||
.env_clear()
|
||||
.status()
|
||||
.expect("failed to execute kill");
|
||||
|
||||
if !status.success() {
|
||||
panic!("kill start failed");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for WalProposerNode {
|
||||
fn drop(&mut self) {
|
||||
self.stop();
|
||||
}
|
||||
}
|
||||
|
||||
/// Read a PID file
|
||||
///
|
||||
/// This should contain an unsigned integer, but we return it as a String
|
||||
/// because our callers only want to pass it back into a subcommand.
|
||||
fn read_pidfile(pidfile: &Path) -> std::result::Result<String, io::Error> {
|
||||
fs::read_to_string(pidfile).map_err(|err| {
|
||||
eprintln!("failed to read pidfile {:?}: {:?}", pidfile, err);
|
||||
err
|
||||
})
|
||||
}
|
||||
|
||||
13
docker-entrypoint.sh
Executable file
13
docker-entrypoint.sh
Executable file
@@ -0,0 +1,13 @@
|
||||
#!/bin/sh
|
||||
set -eux
|
||||
|
||||
if [ "$1" = 'pageserver' ]; then
|
||||
if [ ! -d "/data/tenants" ]; then
|
||||
echo "Initializing pageserver data directory"
|
||||
pageserver --init -D /data --postgres-distrib /usr/local
|
||||
fi
|
||||
echo "Staring pageserver at 0.0.0.0:6400"
|
||||
pageserver -l 0.0.0.0:6400 -D /data
|
||||
else
|
||||
"$@"
|
||||
fi
|
||||
14
docs/README.md
Normal file
14
docs/README.md
Normal file
@@ -0,0 +1,14 @@
|
||||
# Zenith documentation
|
||||
|
||||
## Table of contents
|
||||
|
||||
- [authentication.md](authentication.md) — pageserver JWT authentication.
|
||||
- [docker.md](docker.md) — Docker images and building pipeline.
|
||||
- [glossary.md](glossary.md) — Glossary of all the terms used in codebase.
|
||||
- [multitenancy.md](multitenancy.md) — how multitenancy is organized in the pageserver and Zenith CLI.
|
||||
- [sourcetree.md](sourcetree.md) — Overview of the source tree layeout.
|
||||
- [pageserver/README](/pageserver/README) — pageserver overview.
|
||||
- [postgres_ffi/README](/postgres_ffi/README) — Postgres FFI overview.
|
||||
- [test_runner/README.md](/test_runner/README.md) — tests infrastructure overview.
|
||||
- [walkeeper/README](/walkeeper/README.md) — WAL service overview.
|
||||
- [core_changes.md](core_changes.md) - Description of Zenith changes in Postgres core
|
||||
30
docs/authentication.md
Normal file
30
docs/authentication.md
Normal file
@@ -0,0 +1,30 @@
|
||||
## Authentication
|
||||
|
||||
### Overview
|
||||
|
||||
Current state of authentication includes usage of JWT tokens in communication between compute and pageserver and between CLI and pageserver. JWT token is signed using RSA keys. CLI generates a key pair during call to `zenith init`. Using following openssl commands:
|
||||
|
||||
```bash
|
||||
openssl genrsa -out private_key.pem 2048
|
||||
openssl rsa -in private_key.pem -pubout -outform PEM -out public_key.pem
|
||||
```
|
||||
|
||||
CLI also generates signed token and saves it in the config for later access to pageserver. Now authentication is optional. Pageserver has two variables in config: `auth_validation_public_key_path` and `auth_type`, so when auth type present and set to `ZenithJWT` pageserver will require authentication for connections. Actual JWT is passed in password field of connection string. There is a caveat for psql, it silently truncates passwords to 100 symbols, so to correctly pass JWT via psql you have to either use PGPASSWORD environment variable, or store password in psql config file.
|
||||
|
||||
Currently there is no authentication between compute and safekeepers, because this communication layer is under heavy refactoring. After this refactoring support for authentication will be added there too. Now safekeeper supports "hardcoded" token passed via environment variable to be able to use callmemaybe command in pageserver.
|
||||
|
||||
Compute uses token passed via environment variable to communicate to pageserver and in the future to the safekeeper too.
|
||||
|
||||
JWT authentication now supports two scopes: tenant and pageserverapi. Tenant scope is intended for use in tenant related api calls, e.g. create_branch. Compute launched for particular tenant also uses this scope. Scope pageserver api is intended to be used by console to manage pageserver. For now we have only one management operation - create tenant.
|
||||
|
||||
Examples for token generation in python:
|
||||
|
||||
```python
|
||||
# generate pageserverapi token
|
||||
management_token = jwt.encode({"scope": "pageserverapi"}, auth_keys.priv, algorithm="RS256")
|
||||
|
||||
# generate tenant token
|
||||
tenant_token = jwt.encode({"scope": "tenant", "tenant_id": ps.initial_tenant}, auth_keys.priv, algorithm="RS256")
|
||||
```
|
||||
|
||||
Utility functions to work with jwts in rust are located in zenith_utils/src/auth.rs
|
||||
202
docs/core_changes.md
Normal file
202
docs/core_changes.md
Normal file
@@ -0,0 +1,202 @@
|
||||
1. Add t_cid to XLOG record
|
||||
- Why?
|
||||
The cmin/cmax on a heap page is a real bummer. I don't see any other way to fix that than bite the bullet and modify the WAL-logging routine to include the cmin/cmax.
|
||||
|
||||
To recap, the problem is that the XLOG_HEAP_INSERT record does not include the command id of the inserted row. And same with deletion/update. So in the primary, a row is inserted with current xmin + cmin. But in the replica, the cmin is always set to 1. That works, because the command id is only relevant to the inserting transaction itself. After commit/abort, no one cares abut it anymore.
|
||||
|
||||
- Alternatives?
|
||||
I don't know
|
||||
|
||||
2. Add PD_WAL_LOGGED.
|
||||
- Why?
|
||||
Postgres sometimes writes data to the page before it is wal-logged. If such page ais swapped out, we will loose this change. The problem is currently solved by setting PD_WAL_LOGGED bit in page header. When page without this bit set is written to the SMGR, then it is forced to be written to the WAL as FPI using log_newpage_copy() function.
|
||||
|
||||
There was wrong assumption that it can happen only during construction of some exotic indexes (like gist). It is not true. The same situation can happen with COPY,VACUUM and when record hint bits are set.
|
||||
|
||||
- Discussion:
|
||||
https://discord.com/channels/869525774699462656/882681420986851359
|
||||
|
||||
- Alternatives:
|
||||
Do not store this flag in page header, but associate this bit with shared buffer. Logically it is more correct but in practice we will get not advantages: neither in space, neither in CPU overhead.
|
||||
|
||||
|
||||
3. XLogReadBufferForRedo not always loads and pins requested buffer. So we need to add extra checks that buffer is really pinned. Also do not use BufferGetBlockNumber for buffer returned by XLogReadBufferForRedo.
|
||||
- Why?
|
||||
XLogReadBufferForRedo is not pinning pages which are not requested by wal-redo. It is specific only for wal-redo Postgres.
|
||||
|
||||
- Alternatives?
|
||||
No
|
||||
|
||||
|
||||
4. Eliminate reporting of some warnings related with hint bits, for example
|
||||
"page is not marked all-visible but visibility map bit is set in relation".
|
||||
- Why?
|
||||
Hint bit may be not WAL logged.
|
||||
|
||||
- Alternative?
|
||||
Always wal log any page changes.
|
||||
|
||||
|
||||
5. Maintain last written LSN.
|
||||
- Why?
|
||||
When compute node requests page from page server, we need to specify LSN. Ideally it should be LSN
|
||||
of WAL record performing last update of this pages. But we do not know it, because we do not have page.
|
||||
We can use current WAL flush position, but in this case there is high probability that page server
|
||||
will be blocked until this peace of WAL is delivered.
|
||||
As better approximation we can keep max LSN of written page. It will be better to take in account LSNs only of evicted pages,
|
||||
but SMGR API doesn't provide such knowledge.
|
||||
|
||||
- Alternatives?
|
||||
Maintain map of LSNs of evicted pages.
|
||||
|
||||
|
||||
6. Launching Postgres without WAL.
|
||||
- Why?
|
||||
According to Zenith architecture compute node is stateless. So when we are launching
|
||||
compute node, we need to provide some dummy PG_DATADIR. Relation pages
|
||||
can be requested on demand from page server. But Postgres still need some non-relational data:
|
||||
control and configuration files, SLRUs,...
|
||||
It is currently implemented using basebackup (do not mix with pg_basebackup) which is created
|
||||
by pageserver. It includes in this tarball config/control files, SLRUs and required directories.
|
||||
As far as pageserver do not have original (non-scattered) WAL segments, it includes in
|
||||
this tarball dummy WAL segment which contains only SHUTDOWN_CHECKPOINT record at the beginning of segment,
|
||||
which redo field points to the end of wal. It allows to load checkpoint record in more or less
|
||||
standard way with minimal changes of Postgres, but then some special handling is needed,
|
||||
including restoring previous record position from zenith.signal file.
|
||||
Also we have to correctly initialize header of last WAL page (pointed by checkpoint.redo)
|
||||
to pass checks performed by XLogReader.
|
||||
|
||||
- Alternatives?
|
||||
We may not include fake WAL segment in tarball at all and modify xlog.c to load checkpoint record
|
||||
in special way. But it may only increase number of changes in xlog.c
|
||||
|
||||
7. Add redo_read_buffer_filter callback to XLogReadBufferForRedoExtended
|
||||
- Why?
|
||||
We need a way in wal-redo Postgres to ignore pages which are not requested by pageserver.
|
||||
So wal-redo Postgres reconstructs only requested page and for all other returns BLK_DONE
|
||||
which means that recovery for them is not needed.
|
||||
|
||||
- Alternatives?
|
||||
No
|
||||
|
||||
8. Enforce WAL logging of sequence updates.
|
||||
- Why?
|
||||
Due to performance reasons Postgres don't want to log each fetching of a value from a sequence,
|
||||
so we pre-log a few fetches in advance. In the event of crash we can lose
|
||||
(skip over) as many values as we pre-logged.
|
||||
But it doesn't work with Zenith because page with sequence value can be evicted from buffer cache
|
||||
and we will get a gap in sequence values even without crash.
|
||||
|
||||
- Alternatives:
|
||||
Do not try to preserve sequential order but avoid performance penalty.
|
||||
|
||||
|
||||
9. Treat unlogged tables as normal (permanent) tables.
|
||||
- Why?
|
||||
Unlogged tables are not transient, so them have to survive node restart (unlike temporary tables).
|
||||
But as far as compute node is stateless, we need to persist their data to storage node.
|
||||
And it can only be done through the WAL.
|
||||
|
||||
- Alternatives?
|
||||
* Store unlogged tables locally (violates requirement of stateless compute nodes).
|
||||
* Prohibit unlogged tables at all.
|
||||
|
||||
|
||||
10. Support start Postgres in wal-redo mode
|
||||
- Why?
|
||||
To be able to apply WAL record and reconstruct pages at page server.
|
||||
|
||||
- Alternatives?
|
||||
* Rewrite redo handlers in Rust
|
||||
* Do not reconstruct pages at page server at all and do it at compute node.
|
||||
|
||||
|
||||
11. WAL proposer
|
||||
- Why?
|
||||
WAL proposer is communicating with safekeeper and ensures WAL durability by quorum writes.
|
||||
It is currently implemented as patch to standard WAL sender.
|
||||
|
||||
- Alternatives?
|
||||
Can be moved to extension if some extra callbacks will be added to wal sender code.
|
||||
|
||||
|
||||
12. Secure Computing BPF API wrapper.
|
||||
- Why?
|
||||
Pageserver delegates complex WAL decoding duties to Postgres,
|
||||
which means that the latter might fall victim to carefully designed
|
||||
malicious WAL records and start doing harmful things to the system.
|
||||
To prevent this, it has been decided to limit possible interactions
|
||||
with the outside world using the Secure Computing BPF mode.
|
||||
|
||||
- Alternatives:
|
||||
* Rewrite redo handlers in Rust.
|
||||
* Add more checks to guarantee correctness of WAL records.
|
||||
* Move seccomp.c to extension
|
||||
* Many other discussed approaches to neutralize incorrect WAL records vulnerabilities.
|
||||
|
||||
|
||||
13. Callbacks for replica feedbacks
|
||||
- Why?
|
||||
Allowing waproposer to interact with walsender code.
|
||||
|
||||
- Alternatives
|
||||
Copy walsender code to walproposer.
|
||||
|
||||
|
||||
14. Support multiple SMGR implementations.
|
||||
- Why?
|
||||
Postgres provides abstract API for storage manager but it has only one implementation
|
||||
and provides no way to replace it with custom storage manager.
|
||||
|
||||
- Alternatives?
|
||||
None.
|
||||
|
||||
|
||||
15. Calculate database size as sum of all database relations.
|
||||
- Why?
|
||||
Postgres is calculating database size by traversing data directory
|
||||
but as far as Zenith compute node is stateless we can not do it.
|
||||
|
||||
- Alternatives?
|
||||
Send this request directly to pageserver and calculate real (physical) size
|
||||
of Zenith representation of database/timeline, rather than sum logical size of all relations.
|
||||
|
||||
|
||||
-----------------------------------------------
|
||||
Not currently committed but proposed:
|
||||
|
||||
1. Disable ring buffer buffer manager strategies
|
||||
- Why?
|
||||
Postgres tries to avoid cache flushing by bulk operations (copy, seqscan, vacuum,...).
|
||||
Even if there are free space in buffer cache, pages may be evicted.
|
||||
Negative effect of it can be somehow compensated by file system cache, but in case of Zenith
|
||||
cost of requesting page from page server is much higher.
|
||||
|
||||
- Alternatives?
|
||||
Instead of just prohibiting ring buffer we may try to implement more flexible eviction policy,
|
||||
for example copy evicted page from ring buffer to some other buffer if there is free space
|
||||
in buffer cache.
|
||||
|
||||
2. Disable marking page as dirty when hint bits are set.
|
||||
- Why?
|
||||
Postgres has to modify page twice: first time when some tuple is updated and second time when
|
||||
hint bits are set. Wal logging hint bits updates requires FPI which significantly increase size of WAL.
|
||||
|
||||
- Alternatives?
|
||||
Add special WAL record for setting page hints.
|
||||
|
||||
3. Prefetching
|
||||
- Why?
|
||||
As far as pages in Zenith are loaded on demand, to reduce node startup time
|
||||
and also sppedup some massive queries we need some mechanism for bulk loading to
|
||||
reduce page request round-trip overhead.
|
||||
|
||||
Currently Postgres is supporting prefetching only for bitmap scan.
|
||||
In Zenith we also use prefetch for sequential and index scan. For sequential scan we prefetch
|
||||
some number of following pages. For index scan we prefetch pages of heap relation addressed by TIDs.
|
||||
|
||||
4. Prewarming.
|
||||
- Why?
|
||||
Short downtime (or, in other words, fast compute node restart time) is one of the key feature of Zenith.
|
||||
But overhead of request-response round-trip for loading pages on demand can make started node warm-up quite slow.
|
||||
We can capture state of compute node buffer cache and send bulk request for this pages at startup.
|
||||
38
docs/docker.md
Normal file
38
docs/docker.md
Normal file
@@ -0,0 +1,38 @@
|
||||
# Docker images of Zenith
|
||||
|
||||
## Images
|
||||
|
||||
Currently we build two main images:
|
||||
|
||||
- [zenithdb/zenith](https://hub.docker.com/repository/docker/zenithdb/zenith) — image with pre-built `pageserver`, `wal_acceptor` and `proxy` binaries and all the required runtime dependencies. Built from [/Dockerfile](/Dockerfile).
|
||||
- [zenithdb/compute-node](https://hub.docker.com/repository/docker/zenithdb/compute-node) — compute node image with pre-built Postgres binaries from [zenithdb/postgres](https://github.com/zenithdb/postgres).
|
||||
|
||||
And two intermediate images used either to reduce build time or to deliver some additional binary tools from other repos:
|
||||
|
||||
- [zenithdb/build](https://hub.docker.com/repository/docker/zenithdb/build) — image with all the dependencies required to build Zenith and compute node images. This image is based on `rust:slim-buster`, so it also has a proper `rust` environment. Built from [/Dockerfile.build](/Dockerfile.build).
|
||||
- [zenithdb/compute-tools](https://hub.docker.com/repository/docker/zenithdb/compute-tools) — compute node configuration management tools.
|
||||
|
||||
## Building pipeline
|
||||
|
||||
1. Image `zenithdb/compute-tools` is re-built automatically.
|
||||
|
||||
2. Image `zenithdb/build` is built manually. If you want to introduce any new compile time dependencies to Zenith or compute node you have to update this image as well, build it and push to Docker Hub.
|
||||
|
||||
Build:
|
||||
```sh
|
||||
docker build -t zenithdb/build:buster -f Dockerfile.build .
|
||||
```
|
||||
|
||||
Login:
|
||||
```sh
|
||||
docker login
|
||||
```
|
||||
|
||||
Push to Docker Hub:
|
||||
```sh
|
||||
docker push zenithdb/build:buster
|
||||
```
|
||||
|
||||
3. Image `zenithdb/compute-node` is built independently in the [zenithdb/postgres](https://github.com/zenithdb/postgres) repo.
|
||||
|
||||
4. Image `zenithdb/zenith` is built in this repo after a successful `release` tests run and pushed to Docker Hub automatically.
|
||||
196
docs/glossary.md
Normal file
196
docs/glossary.md
Normal file
@@ -0,0 +1,196 @@
|
||||
# Glossary
|
||||
|
||||
### Authentication
|
||||
|
||||
### Base image (page image)
|
||||
|
||||
### Basebackup
|
||||
|
||||
A tarball with files needed to bootstrap a compute node[] and a corresponding command to create it.
|
||||
NOTE:It has nothing to do with PostgreSQL pg_basebackup.
|
||||
|
||||
### Branch
|
||||
|
||||
We can create branch at certain LSN using `zenith branch` command.
|
||||
Each Branch lives in a corresponding timeline[] and has an ancestor[].
|
||||
|
||||
|
||||
### Checkpoint (PostgreSQL)
|
||||
|
||||
NOTE: This is an overloaded term.
|
||||
|
||||
A checkpoint record in the WAL marks a point in the WAL sequence at which it is guaranteed that all data files have been updated with all information from shared memory modified before that checkpoint;
|
||||
|
||||
### Checkpoint (Layered repository)
|
||||
|
||||
NOTE: This is an overloaded term.
|
||||
|
||||
Whenever enough WAL has been accumulated in memory, the page server []
|
||||
writes out the changes from in-memory layers into new layer files[]. This process
|
||||
is called "checkpointing". The page server only creates layer files for
|
||||
relations that have been modified since the last checkpoint.
|
||||
|
||||
### Compute node
|
||||
|
||||
Stateless Postgres node that stores data in pageserver.
|
||||
|
||||
### Garbage collection
|
||||
|
||||
### Fork
|
||||
|
||||
Each of the separate segmented file sets in which a relation is stored. The main fork is where the actual data resides. There also exist two secondary forks for metadata: the free space map and the visibility map.
|
||||
Each PostgreSQL fork is considered a separate relish.
|
||||
|
||||
### Layer
|
||||
|
||||
Each layer corresponds to one RELISH_SEG_SIZE slice of a relish in a range of LSNs.
|
||||
There are two kinds of layers, in-memory and on-disk layers. In-memory
|
||||
layers are used to ingest incoming WAL, and provide fast access
|
||||
to the recent page versions. On-disk layers are stored as files on disk, and
|
||||
are immutable.
|
||||
### Layer file (on-disk layer)
|
||||
|
||||
Layered repository on-disk format is based on immutable files. The
|
||||
files are called "layer files". Each file corresponds to one RELISH_SEG_SIZE
|
||||
segment of a PostgreSQL relation fork. There are two kinds of layer
|
||||
files: image files and delta files. An image file contains a
|
||||
"snapshot" of the segment at a particular LSN, and a delta file
|
||||
contains WAL records applicable to the segment, in a range of LSNs.
|
||||
|
||||
### Layer map
|
||||
|
||||
The layer map tracks what layers exist for all the relishes in a timeline.
|
||||
### Layered repository
|
||||
|
||||
Zenith repository implementation that keeps data in layers.
|
||||
### LSN
|
||||
|
||||
|
||||
### Page (block)
|
||||
|
||||
The basic structure used to store relation data. All pages are of the same size.
|
||||
This is the unit of data exchange between compute node and pageserver.
|
||||
|
||||
### Pageserver
|
||||
|
||||
Zenith storage engine: repositories + wal receiver + page service + wal redo.
|
||||
|
||||
### Page service
|
||||
|
||||
The Page Service listens for GetPage@LSN requests from the Compute Nodes,
|
||||
and responds with pages from the repository.
|
||||
|
||||
|
||||
### PITR (Point-in-time-recovery)
|
||||
|
||||
PostgreSQL's ability to restore up to a specified LSN.
|
||||
|
||||
### Primary node
|
||||
|
||||
|
||||
### Proxy
|
||||
|
||||
Postgres protocol proxy/router.
|
||||
This service listens psql port, can check auth via external service
|
||||
and create new databases and accounts (control plane API in our case).
|
||||
|
||||
### Relation
|
||||
|
||||
The generic term in PostgreSQL for all objects in a database that have a name and a list of attributes defined in a specific order.
|
||||
|
||||
### Relish
|
||||
|
||||
We call each relation and other file that is stored in the
|
||||
repository a "relish". It comes from "rel"-ish, as in "kind of a
|
||||
rel", because it covers relations as well as other things that are
|
||||
not relations, but are treated similarly for the purposes of the
|
||||
storage layer.
|
||||
|
||||
### Replication slot
|
||||
|
||||
|
||||
### Replica node
|
||||
|
||||
|
||||
### Repository
|
||||
|
||||
Repository stores multiple timelines, forked off from the same initial call to 'initdb'
|
||||
and has associated WAL redo service.
|
||||
One repository corresponds to one Tenant.
|
||||
|
||||
### Retention policy
|
||||
|
||||
How much history do we need to keep around for PITR and read-only nodes?
|
||||
|
||||
### SLRU
|
||||
|
||||
SLRUs include pg_clog, pg_multixact/members, and
|
||||
pg_multixact/offsets. There are other SLRUs in PostgreSQL, but
|
||||
they don't need to be stored permanently (e.g. pg_subtrans),
|
||||
or we do not support them in zenith yet (pg_commit_ts).
|
||||
Each SLRU segment is considered a separate relish[].
|
||||
|
||||
### Tenant (Multitenancy)
|
||||
Tenant represents a single customer, interacting with Zenith.
|
||||
Wal redo[] activity, timelines[], layers[] are managed for each tenant independently.
|
||||
One pageserver[] can serve multiple tenants at once.
|
||||
One safekeeper
|
||||
|
||||
See `docs/multitenancy.md` for more.
|
||||
|
||||
### Timeline
|
||||
|
||||
Timeline accepts page changes and serves get_page_at_lsn() and
|
||||
get_rel_size() requests. The term "timeline" is used internally
|
||||
in the system, but to users they are exposed as "branches", with
|
||||
human-friendly names.
|
||||
|
||||
NOTE: this has nothing to do with PostgreSQL WAL timelines.
|
||||
|
||||
### XLOG
|
||||
|
||||
PostgreSQL alias for WAL[].
|
||||
|
||||
### WAL (Write-ahead log)
|
||||
|
||||
The journal that keeps track of the changes in the database cluster as user- and system-invoked operations take place. It comprises many individual WAL records[] written sequentially to WAL files[].
|
||||
|
||||
### WAL acceptor, WAL proposer
|
||||
|
||||
In the context of the consensus algorithm, the Postgres
|
||||
compute node is also known as the WAL proposer, and the safekeeper is also known
|
||||
as the acceptor. Those are the standard terms in the Paxos algorithm.
|
||||
|
||||
### WAL receiver (WAL decoder)
|
||||
|
||||
The WAL receiver connects to the external WAL safekeeping service (or
|
||||
directly to the primary) using PostgreSQL physical streaming
|
||||
replication, and continuously receives WAL. It decodes the WAL records,
|
||||
and stores them to the repository.
|
||||
|
||||
We keep one WAL receiver active per timeline.
|
||||
|
||||
### WAL record
|
||||
|
||||
A low-level description of an individual data change.
|
||||
|
||||
### WAL redo
|
||||
|
||||
A service that runs PostgreSQL in a special wal_redo mode
|
||||
to apply given WAL records over an old page image and return new page image.
|
||||
|
||||
### WAL safekeeper
|
||||
|
||||
One node that participates in the quorum. All the safekeepers
|
||||
together form the WAL service.
|
||||
|
||||
### WAL segment (WAL file)
|
||||
|
||||
Also known as WAL segment or WAL segment file. Each of the sequentially-numbered files that provide storage space for WAL. The files are all of the same predefined size and are written in sequential order, interspersing changes as they occur in multiple simultaneous sessions.
|
||||
|
||||
### WAL service
|
||||
|
||||
The service as whole that ensures that WAL is stored durably.
|
||||
|
||||
### Web console
|
||||
|
||||
59
docs/multitenancy.md
Normal file
59
docs/multitenancy.md
Normal file
@@ -0,0 +1,59 @@
|
||||
## Multitenancy
|
||||
|
||||
### Overview
|
||||
|
||||
Zenith supports multitenancy. One pageserver can serve multiple tenants at once. Tenants can be managed via zenith CLI. During page server setup tenant can be created using ```zenith init --create-tenant``` Also tenants can be added into the system on the fly without pageserver restart. This can be done using the following cli command: ```zenith tenant create``` Tenants use random identifiers which can be represented as a 32 symbols hexadecimal string. So zenith tenant create accepts desired tenant id as an optional argument. The concept of timelines/branches is working independently per tenant.
|
||||
|
||||
### Tenants in other commands
|
||||
|
||||
By default during `zenith init` new tenant is created on the pageserver. Newly created tenant's id is saved to cli config, so other commands can use it automatically if no direct arugment `--tenantid=<tenantid>` is provided. So generally tenantid more frequently appears in internal pageserver interface. Its commands take tenantid argument to distinguish to which tenant operation should be applied. CLI support creation of new tenants.
|
||||
|
||||
Examples for cli:
|
||||
|
||||
```sh
|
||||
zenith tenant list
|
||||
|
||||
zenith tenant create // generates new id
|
||||
|
||||
zenith tenant create ee6016ec31116c1b7c33dfdfca38892f
|
||||
|
||||
zenith pg create main // default tenant from zenith init
|
||||
|
||||
zenith pg create main --tenantid=ee6016ec31116c1b7c33dfdfca38892f
|
||||
|
||||
zenith branch --tenantid=ee6016ec31116c1b7c33dfdfca38892f
|
||||
```
|
||||
|
||||
### Data layout
|
||||
|
||||
On the page server tenants introduce one level of indirection, so data directory structured the following way:
|
||||
```
|
||||
<pageserver working directory>
|
||||
├── pageserver.log
|
||||
├── pageserver.pid
|
||||
├── pageserver.toml
|
||||
└── tenants
|
||||
├── 537cffa58a4fa557e49e19951b5a9d6b
|
||||
├── de182bc61fb11a5a6b390a8aed3a804a
|
||||
└── ee6016ec31116c1b7c33dfdfca38891f
|
||||
```
|
||||
Wal redo activity and timelines are managed for each tenant independently.
|
||||
|
||||
For local environment used for example in tests there also new level of indirection for tenants. It touches `pgdatadirs` directory. Now it contains `tenants` subdirectory so the structure looks the following way:
|
||||
|
||||
```
|
||||
pgdatadirs
|
||||
└── tenants
|
||||
├── de182bc61fb11a5a6b390a8aed3a804a
|
||||
│ └── main
|
||||
└── ee6016ec31116c1b7c33dfdfca38892f
|
||||
└── main
|
||||
```
|
||||
|
||||
### Changes to postgres
|
||||
|
||||
Tenant id is passed to postgres via GUC the same way as the timeline. Tenant id is added to commands issued to pageserver, namely: pagestream, callmemaybe. Tenant id is also exists in ServerInfo structure, this is needed to pass the value to wal receiver to be able to forward it to the pageserver.
|
||||
|
||||
### Safety
|
||||
|
||||
For now particular tenant can only appear on a particular pageserver. Set of WAL acceptors are also pinned to particular (tenantid, timeline) pair so there can only be one writer for particular (tenantid, timeline).
|
||||
81
docs/sourcetree.md
Normal file
81
docs/sourcetree.md
Normal file
@@ -0,0 +1,81 @@
|
||||
## Source tree layout
|
||||
|
||||
Below you will find a brief overview of each subdir in the source tree in alphabetical order.
|
||||
|
||||
`/control_plane`:
|
||||
|
||||
Local control plane.
|
||||
Functions to start, configure and stop pageserver and postgres instances running as a local processes.
|
||||
Intended to be used in integration tests and in CLI tools for local installations.
|
||||
|
||||
`/docs`:
|
||||
|
||||
Documentaion of the Zenith features and concepts.
|
||||
Now it is mostly dev documentation.
|
||||
|
||||
`/monitoring`:
|
||||
|
||||
TODO
|
||||
|
||||
`/pageserver`:
|
||||
|
||||
Zenith storage service.
|
||||
The pageserver has a few different duties:
|
||||
|
||||
- Store and manage the data.
|
||||
- Generate a tarball with files needed to bootstrap ComputeNode.
|
||||
- Respond to GetPage@LSN requests from the Compute Nodes.
|
||||
- Receive WAL from the WAL service and decode it.
|
||||
- Replay WAL that's applicable to the chunks that the Page Server maintains
|
||||
|
||||
For more detailed info, see `/pageserver/README`
|
||||
|
||||
`/postgres_ffi`:
|
||||
|
||||
Utility functions for interacting with PostgreSQL file formats.
|
||||
Misc constants, copied from PostgreSQL headers.
|
||||
|
||||
`/proxy`:
|
||||
|
||||
Postgres protocol proxy/router.
|
||||
This service listens psql port, can check auth via external service
|
||||
and create new databases and accounts (control plane API in our case).
|
||||
|
||||
`/test_runner`:
|
||||
|
||||
Integration tests, written in Python using the `pytest` framework.
|
||||
|
||||
`/vendor/postgres`:
|
||||
|
||||
PostgreSQL source tree, with the modifications needed for Zenith.
|
||||
|
||||
`/vendor/postgres/contrib/zenith`:
|
||||
|
||||
PostgreSQL extension that implements storage manager API and network communications with remote page server.
|
||||
|
||||
`/vendor/postgres/contrib/zenith_test_utils`:
|
||||
|
||||
PostgreSQL extension that contains functions needed for testing and debugging.
|
||||
|
||||
`/walkeeper`:
|
||||
|
||||
The zenith WAL service that receives WAL from a primary compute nodes and streams it to the pageserver.
|
||||
It acts as a holding area and redistribution center for recently generated WAL.
|
||||
|
||||
For more detailed info, see `/walkeeper/README`
|
||||
|
||||
`/workspace_hack`:
|
||||
The workspace_hack crate exists only to pin down some dependencies.
|
||||
|
||||
`/zenith`
|
||||
|
||||
Main entry point for the 'zenith' CLI utility.
|
||||
TODO: Doesn't it belong to control_plane?
|
||||
|
||||
`/zenith_metrics`:
|
||||
|
||||
Helpers for exposing Prometheus metrics from the server.
|
||||
|
||||
`/zenith_utils`:
|
||||
|
||||
Helpers that are shared between other crates in this repository.
|
||||
1
integration_tests/.gitignore
vendored
1
integration_tests/.gitignore
vendored
@@ -1 +0,0 @@
|
||||
tmp_check/
|
||||
@@ -1,17 +0,0 @@
|
||||
[package]
|
||||
name = "integration_tests"
|
||||
version = "0.1.0"
|
||||
authors = ["Stas Kelvich <stas@zenith.tech>"]
|
||||
edition = "2018"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
lazy_static = "1.4.0"
|
||||
rand = "0.8.3"
|
||||
postgres = { git = "https://github.com/zenithdb/rust-postgres.git", rev="a0d067b66447951d1276a53fb09886539c3fa094" }
|
||||
tokio-postgres = { git = "https://github.com/zenithdb/rust-postgres.git", rev="a0d067b66447951d1276a53fb09886539c3fa094" }
|
||||
|
||||
pageserver = { path = "../pageserver" }
|
||||
walkeeper = { path = "../walkeeper" }
|
||||
control_plane = { path = "../control_plane" }
|
||||
@@ -1,11 +0,0 @@
|
||||
// test node resettlement to an empty datadir
|
||||
|
||||
// TODO
|
||||
/*
|
||||
#[test]
|
||||
fn test_resettlement() {}
|
||||
|
||||
// test seq scan of everythin after restart
|
||||
#[test]
|
||||
fn test_cold_seqscan() {}
|
||||
*/
|
||||
@@ -1,8 +0,0 @@
|
||||
// TODO
|
||||
/*
|
||||
#[test]
|
||||
fn test_actions() {}
|
||||
|
||||
#[test]
|
||||
fn test_regress() {}
|
||||
*/
|
||||
@@ -1,150 +0,0 @@
|
||||
// mod control_plane;
|
||||
use control_plane::compute::ComputeControlPlane;
|
||||
use control_plane::local_env;
|
||||
use control_plane::local_env::PointInTime;
|
||||
use control_plane::storage::TestStorageControlPlane;
|
||||
|
||||
// XXX: force all redo at the end
|
||||
// -- restart + seqscan won't read deleted stuff
|
||||
// -- pageserver api endpoint to check all rels
|
||||
#[test]
|
||||
fn test_redo_cases() {
|
||||
let local_env = local_env::test_env("test_redo_cases");
|
||||
|
||||
// Start pageserver that reads WAL directly from that postgres
|
||||
let storage_cplane = TestStorageControlPlane::one_page_server(&local_env);
|
||||
let mut compute_cplane = ComputeControlPlane::local(&local_env, &storage_cplane.pageserver);
|
||||
|
||||
// start postgres
|
||||
let maintli = storage_cplane.get_branch_timeline("main");
|
||||
let node = compute_cplane.new_test_node(maintli);
|
||||
node.start().unwrap();
|
||||
|
||||
// check basic work with table
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"CREATE TABLE t(key int primary key, value text)",
|
||||
);
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"INSERT INTO t SELECT generate_series(1,100000), 'payload'",
|
||||
);
|
||||
let count: i64 = node
|
||||
.safe_psql("postgres", "SELECT sum(key) FROM t")
|
||||
.first()
|
||||
.unwrap()
|
||||
.get(0);
|
||||
println!("sum = {}", count);
|
||||
assert_eq!(count, 5000050000);
|
||||
|
||||
// check 'create table as'
|
||||
node.safe_psql("postgres", "CREATE TABLE t2 AS SELECT * FROM t");
|
||||
let count: i64 = node
|
||||
.safe_psql("postgres", "SELECT sum(key) FROM t")
|
||||
.first()
|
||||
.unwrap()
|
||||
.get(0);
|
||||
println!("sum = {}", count);
|
||||
assert_eq!(count, 5000050000);
|
||||
}
|
||||
|
||||
// Runs pg_regress on a compute node
|
||||
#[test]
|
||||
fn test_regress() {
|
||||
let local_env = local_env::test_env("test_regress");
|
||||
|
||||
// Start pageserver that reads WAL directly from that postgres
|
||||
let storage_cplane = TestStorageControlPlane::one_page_server(&local_env);
|
||||
let mut compute_cplane = ComputeControlPlane::local(&local_env, &storage_cplane.pageserver);
|
||||
|
||||
// start postgres
|
||||
let maintli = storage_cplane.get_branch_timeline("main");
|
||||
let node = compute_cplane.new_test_node(maintli);
|
||||
node.start().unwrap();
|
||||
|
||||
let status = node.pg_regress();
|
||||
assert!(status.success());
|
||||
}
|
||||
|
||||
// Runs pg_bench on a compute node
|
||||
#[test]
|
||||
fn pgbench() {
|
||||
let local_env = local_env::test_env("pgbench");
|
||||
|
||||
// Start pageserver that reads WAL directly from that postgres
|
||||
let storage_cplane = TestStorageControlPlane::one_page_server(&local_env);
|
||||
let mut compute_cplane = ComputeControlPlane::local(&local_env, &storage_cplane.pageserver);
|
||||
|
||||
// start postgres
|
||||
let maintli = storage_cplane.get_branch_timeline("main");
|
||||
let node = compute_cplane.new_test_node(maintli);
|
||||
node.start().unwrap();
|
||||
|
||||
let status = node.pg_bench(10, 100);
|
||||
assert!(status.success());
|
||||
}
|
||||
|
||||
// Run two postgres instances on one pageserver, on different timelines
|
||||
#[test]
|
||||
fn test_pageserver_two_timelines() {
|
||||
let local_env = local_env::test_env("test_pageserver_two_timelines");
|
||||
|
||||
// Start pageserver that reads WAL directly from that postgres
|
||||
let storage_cplane = TestStorageControlPlane::one_page_server(&local_env);
|
||||
let mut compute_cplane = ComputeControlPlane::local(&local_env, &storage_cplane.pageserver);
|
||||
|
||||
let maintli = storage_cplane.get_branch_timeline("main");
|
||||
|
||||
// Create new branch at the end of 'main'
|
||||
let startpoint = local_env::find_end_of_wal(&local_env, maintli).unwrap();
|
||||
local_env::create_branch(
|
||||
&local_env,
|
||||
"experimental",
|
||||
PointInTime {
|
||||
timelineid: maintli,
|
||||
lsn: startpoint,
|
||||
},
|
||||
)
|
||||
.unwrap();
|
||||
let experimentaltli = storage_cplane.get_branch_timeline("experimental");
|
||||
|
||||
// Launch postgres instances on both branches
|
||||
let node1 = compute_cplane.new_test_node(maintli);
|
||||
let node2 = compute_cplane.new_test_node(experimentaltli);
|
||||
node1.start().unwrap();
|
||||
node2.start().unwrap();
|
||||
|
||||
// check node1
|
||||
node1.safe_psql(
|
||||
"postgres",
|
||||
"CREATE TABLE t(key int primary key, value text)",
|
||||
);
|
||||
node1.safe_psql(
|
||||
"postgres",
|
||||
"INSERT INTO t SELECT generate_series(1,100000), 'payload'",
|
||||
);
|
||||
let count: i64 = node1
|
||||
.safe_psql("postgres", "SELECT sum(key) FROM t")
|
||||
.first()
|
||||
.unwrap()
|
||||
.get(0);
|
||||
println!("sum = {}", count);
|
||||
assert_eq!(count, 5000050000);
|
||||
|
||||
// check node2
|
||||
node2.safe_psql(
|
||||
"postgres",
|
||||
"CREATE TABLE t(key int primary key, value text)",
|
||||
);
|
||||
node2.safe_psql(
|
||||
"postgres",
|
||||
"INSERT INTO t SELECT generate_series(100000,200000), 'payload'",
|
||||
);
|
||||
let count: i64 = node2
|
||||
.safe_psql("postgres", "SELECT sum(key) FROM t")
|
||||
.first()
|
||||
.unwrap()
|
||||
.get(0);
|
||||
println!("sum = {}", count);
|
||||
assert_eq!(count, 15000150000);
|
||||
}
|
||||
@@ -1,355 +0,0 @@
|
||||
// Restart acceptors one by one while compute is under the load.
|
||||
use control_plane::compute::ComputeControlPlane;
|
||||
use control_plane::local_env;
|
||||
use control_plane::local_env::PointInTime;
|
||||
use control_plane::storage::TestStorageControlPlane;
|
||||
use pageserver::ZTimelineId;
|
||||
|
||||
use rand::Rng;
|
||||
use std::sync::Arc;
|
||||
use std::time::SystemTime;
|
||||
use std::{thread, time};
|
||||
|
||||
const DOWNTIME: u64 = 2;
|
||||
|
||||
#[test]
|
||||
//#[ignore]
|
||||
fn test_embedded_wal_proposer() {
|
||||
let local_env = local_env::test_env("test_embedded_wal_proposer");
|
||||
|
||||
const REDUNDANCY: usize = 3;
|
||||
let storage_cplane = TestStorageControlPlane::fault_tolerant(&local_env, REDUNDANCY);
|
||||
let mut compute_cplane = ComputeControlPlane::local(&local_env, &storage_cplane.pageserver);
|
||||
let wal_acceptors = storage_cplane.get_wal_acceptor_conn_info();
|
||||
|
||||
// start postgres
|
||||
let maintli = storage_cplane.get_branch_timeline("main");
|
||||
let node = compute_cplane.new_test_master_node(maintli);
|
||||
node.append_conf(
|
||||
"postgresql.conf",
|
||||
&format!("wal_acceptors='{}'\n", wal_acceptors),
|
||||
);
|
||||
node.start().unwrap();
|
||||
|
||||
// check basic work with table
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"CREATE TABLE t(key int primary key, value text)",
|
||||
);
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"INSERT INTO t SELECT generate_series(1,100000), 'payload'",
|
||||
);
|
||||
let count: i64 = node
|
||||
.safe_psql("postgres", "SELECT sum(key) FROM t")
|
||||
.first()
|
||||
.unwrap()
|
||||
.get(0);
|
||||
println!("sum = {}", count);
|
||||
assert_eq!(count, 5000050000);
|
||||
// check wal files equality
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_acceptors_normal_work() {
|
||||
let local_env = local_env::test_env("test_acceptors_normal_work");
|
||||
|
||||
const REDUNDANCY: usize = 3;
|
||||
let storage_cplane = TestStorageControlPlane::fault_tolerant(&local_env, REDUNDANCY);
|
||||
let mut compute_cplane = ComputeControlPlane::local(&local_env, &storage_cplane.pageserver);
|
||||
let wal_acceptors = storage_cplane.get_wal_acceptor_conn_info();
|
||||
|
||||
// start postgres
|
||||
let maintli = storage_cplane.get_branch_timeline("main");
|
||||
let node = compute_cplane.new_test_master_node(maintli);
|
||||
node.start().unwrap();
|
||||
|
||||
// start proxy
|
||||
let _proxy = node.start_proxy(&wal_acceptors);
|
||||
|
||||
// check basic work with table
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"CREATE TABLE t(key int primary key, value text)",
|
||||
);
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"INSERT INTO t SELECT generate_series(1,100000), 'payload'",
|
||||
);
|
||||
let count: i64 = node
|
||||
.safe_psql("postgres", "SELECT sum(key) FROM t")
|
||||
.first()
|
||||
.unwrap()
|
||||
.get(0);
|
||||
println!("sum = {}", count);
|
||||
assert_eq!(count, 5000050000);
|
||||
// check wal files equality
|
||||
}
|
||||
|
||||
// Run page server and multiple safekeepers, and multiple compute nodes running
|
||||
// against different timelines.
|
||||
#[test]
|
||||
fn test_many_timelines() {
|
||||
// Initialize a new repository, and set up WAL safekeepers and page server.
|
||||
const REDUNDANCY: usize = 3;
|
||||
const N_TIMELINES: usize = 5;
|
||||
let local_env = local_env::test_env("test_many_timelines");
|
||||
let storage_cplane = TestStorageControlPlane::fault_tolerant(&local_env, REDUNDANCY);
|
||||
let mut compute_cplane = ComputeControlPlane::local(&local_env, &storage_cplane.pageserver);
|
||||
let wal_acceptors = storage_cplane.get_wal_acceptor_conn_info();
|
||||
|
||||
// Create branches
|
||||
let mut timelines: Vec<ZTimelineId> = Vec::new();
|
||||
let maintli = storage_cplane.get_branch_timeline("main"); // main branch
|
||||
timelines.push(maintli);
|
||||
let startpoint = local_env::find_end_of_wal(&local_env, maintli).unwrap();
|
||||
for i in 1..N_TIMELINES {
|
||||
// additional branches
|
||||
let branchname = format!("experimental{}", i);
|
||||
local_env::create_branch(
|
||||
&local_env,
|
||||
&branchname,
|
||||
PointInTime {
|
||||
timelineid: maintli,
|
||||
lsn: startpoint,
|
||||
},
|
||||
)
|
||||
.unwrap();
|
||||
let tli = storage_cplane.get_branch_timeline(&branchname);
|
||||
timelines.push(tli);
|
||||
}
|
||||
|
||||
// start postgres on each timeline
|
||||
let mut nodes = Vec::new();
|
||||
for tli in timelines {
|
||||
let node = compute_cplane.new_test_node(tli);
|
||||
nodes.push(node.clone());
|
||||
node.start().unwrap();
|
||||
node.start_proxy(&wal_acceptors);
|
||||
}
|
||||
|
||||
// create schema
|
||||
for node in &nodes {
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"CREATE TABLE t(key int primary key, value text)",
|
||||
);
|
||||
}
|
||||
|
||||
// Populate data
|
||||
for node in &nodes {
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"INSERT INTO t SELECT generate_series(1,100000), 'payload'",
|
||||
);
|
||||
}
|
||||
|
||||
// Check data
|
||||
for node in &nodes {
|
||||
let count: i64 = node
|
||||
.safe_psql("postgres", "SELECT sum(key) FROM t")
|
||||
.first()
|
||||
.unwrap()
|
||||
.get(0);
|
||||
println!("sum = {}", count);
|
||||
assert_eq!(count, 5000050000);
|
||||
}
|
||||
}
|
||||
|
||||
// Majority is always alive
|
||||
#[test]
|
||||
fn test_acceptors_restarts() {
|
||||
let local_env = local_env::test_env("test_acceptors_restarts");
|
||||
|
||||
// Start pageserver that reads WAL directly from that postgres
|
||||
const REDUNDANCY: usize = 3;
|
||||
const FAULT_PROBABILITY: f32 = 0.01;
|
||||
|
||||
let storage_cplane = TestStorageControlPlane::fault_tolerant(&local_env, REDUNDANCY);
|
||||
let mut compute_cplane = ComputeControlPlane::local(&local_env, &storage_cplane.pageserver);
|
||||
let wal_acceptors = storage_cplane.get_wal_acceptor_conn_info();
|
||||
let mut rng = rand::thread_rng();
|
||||
|
||||
// start postgres
|
||||
let maintli = storage_cplane.get_branch_timeline("main");
|
||||
let node = compute_cplane.new_test_master_node(maintli);
|
||||
node.start().unwrap();
|
||||
|
||||
// start proxy
|
||||
let _proxy = node.start_proxy(&wal_acceptors);
|
||||
let mut failed_node: Option<usize> = None;
|
||||
|
||||
// check basic work with table
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"CREATE TABLE t(key int primary key, value text)",
|
||||
);
|
||||
let mut psql = node.open_psql("postgres");
|
||||
for i in 1..=1000 {
|
||||
psql.execute("INSERT INTO t values ($1, 'payload')", &[&i])
|
||||
.unwrap();
|
||||
let prob: f32 = rng.gen();
|
||||
if prob <= FAULT_PROBABILITY {
|
||||
if let Some(node) = failed_node {
|
||||
storage_cplane.wal_acceptors[node].start();
|
||||
failed_node = None;
|
||||
} else {
|
||||
let node: usize = rng.gen_range(0..REDUNDANCY);
|
||||
failed_node = Some(node);
|
||||
storage_cplane.wal_acceptors[node].stop().unwrap();
|
||||
}
|
||||
}
|
||||
}
|
||||
let count: i64 = node
|
||||
.safe_psql("postgres", "SELECT sum(key) FROM t")
|
||||
.first()
|
||||
.unwrap()
|
||||
.get(0);
|
||||
println!("sum = {}", count);
|
||||
assert_eq!(count, 500500);
|
||||
}
|
||||
|
||||
fn start_acceptor(cplane: &Arc<TestStorageControlPlane>, no: usize) {
|
||||
let cp = cplane.clone();
|
||||
thread::spawn(move || {
|
||||
thread::sleep(time::Duration::from_secs(DOWNTIME));
|
||||
cp.wal_acceptors[no].start();
|
||||
});
|
||||
}
|
||||
|
||||
// Stop majority of acceptors while compute is under the load. Boot
|
||||
// them again and check that nothing was losed. Repeat.
|
||||
// N_CRASHES env var
|
||||
#[test]
|
||||
fn test_acceptors_unavailability() {
|
||||
let local_env = local_env::test_env("test_acceptors_unavailability");
|
||||
|
||||
// Start pageserver that reads WAL directly from that postgres
|
||||
const REDUNDANCY: usize = 2;
|
||||
|
||||
let storage_cplane = TestStorageControlPlane::fault_tolerant(&local_env, REDUNDANCY);
|
||||
let mut compute_cplane = ComputeControlPlane::local(&local_env, &storage_cplane.pageserver);
|
||||
let wal_acceptors = storage_cplane.get_wal_acceptor_conn_info();
|
||||
|
||||
// start postgres
|
||||
let maintli = storage_cplane.get_branch_timeline("main");
|
||||
let node = compute_cplane.new_test_master_node(maintli);
|
||||
node.start().unwrap();
|
||||
|
||||
// start proxy
|
||||
let _proxy = node.start_proxy(&wal_acceptors);
|
||||
|
||||
// check basic work with table
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"CREATE TABLE t(key int primary key, value text)",
|
||||
);
|
||||
let mut psql = node.open_psql("postgres");
|
||||
psql.execute("INSERT INTO t values (1, 'payload')", &[])
|
||||
.unwrap();
|
||||
|
||||
// Shut down all wal acceptors
|
||||
storage_cplane.wal_acceptors[0].stop().unwrap();
|
||||
let cp = Arc::new(storage_cplane);
|
||||
start_acceptor(&cp, 0);
|
||||
let now = SystemTime::now();
|
||||
psql.execute("INSERT INTO t values (2, 'payload')", &[])
|
||||
.unwrap();
|
||||
// Here we check that the query above was hanging
|
||||
// while wal_acceptor was unavailiable
|
||||
assert!(now.elapsed().unwrap().as_secs() >= DOWNTIME);
|
||||
psql.execute("INSERT INTO t values (3, 'payload')", &[])
|
||||
.unwrap();
|
||||
|
||||
cp.wal_acceptors[1].stop().unwrap();
|
||||
start_acceptor(&cp, 1);
|
||||
psql.execute("INSERT INTO t values (4, 'payload')", &[])
|
||||
.unwrap();
|
||||
// Here we check that the query above was hanging
|
||||
// while wal_acceptor was unavailiable
|
||||
assert!(now.elapsed().unwrap().as_secs() >= 2 * DOWNTIME);
|
||||
|
||||
psql.execute("INSERT INTO t values (5, 'payload')", &[])
|
||||
.unwrap();
|
||||
|
||||
let count: i64 = node
|
||||
.safe_psql("postgres", "SELECT sum(key) FROM t")
|
||||
.first()
|
||||
.unwrap()
|
||||
.get(0);
|
||||
println!("sum = {}", count);
|
||||
// Ensure that all inserts succeeded.
|
||||
// Including ones that were waiting for wal acceptor restart.
|
||||
assert_eq!(count, 15);
|
||||
}
|
||||
|
||||
fn simulate_failures(cplane: Arc<TestStorageControlPlane>) {
|
||||
let mut rng = rand::thread_rng();
|
||||
let n_acceptors = cplane.wal_acceptors.len();
|
||||
let failure_period = time::Duration::from_secs(1);
|
||||
while cplane.is_running() {
|
||||
thread::sleep(failure_period);
|
||||
let mask: u32 = rng.gen_range(0..(1 << n_acceptors));
|
||||
for i in 0..n_acceptors {
|
||||
if (mask & (1 << i)) != 0 {
|
||||
cplane.wal_acceptors[i].stop().unwrap();
|
||||
}
|
||||
}
|
||||
thread::sleep(failure_period);
|
||||
for i in 0..n_acceptors {
|
||||
if (mask & (1 << i)) != 0 {
|
||||
cplane.wal_acceptors[i].start();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Race condition test
|
||||
#[test]
|
||||
fn test_race_conditions() {
|
||||
let local_env = local_env::test_env("test_race_conditions");
|
||||
|
||||
// Start pageserver that reads WAL directly from that postgres
|
||||
const REDUNDANCY: usize = 3;
|
||||
|
||||
let storage_cplane = Arc::new(TestStorageControlPlane::fault_tolerant(
|
||||
&local_env, REDUNDANCY,
|
||||
));
|
||||
let mut compute_cplane = ComputeControlPlane::local(&local_env, &storage_cplane.pageserver);
|
||||
let wal_acceptors = storage_cplane.get_wal_acceptor_conn_info();
|
||||
|
||||
// start postgres
|
||||
let maintli = storage_cplane.get_branch_timeline("main");
|
||||
let node = compute_cplane.new_test_master_node(maintli);
|
||||
node.start().unwrap();
|
||||
|
||||
// start proxy
|
||||
let _proxy = node.start_proxy(&wal_acceptors);
|
||||
|
||||
// check basic work with table
|
||||
node.safe_psql(
|
||||
"postgres",
|
||||
"CREATE TABLE t(key int primary key, value text)",
|
||||
);
|
||||
|
||||
let cp = storage_cplane.clone();
|
||||
let failures_thread = thread::spawn(move || {
|
||||
simulate_failures(cp);
|
||||
});
|
||||
|
||||
let mut psql = node.open_psql("postgres");
|
||||
for i in 1..=1000 {
|
||||
psql.execute("INSERT INTO t values ($1, 'payload')", &[&i])
|
||||
.unwrap();
|
||||
}
|
||||
let count: i64 = node
|
||||
.safe_psql("postgres", "SELECT sum(key) FROM t")
|
||||
.first()
|
||||
.unwrap()
|
||||
.get(0);
|
||||
println!("sum = {}", count);
|
||||
assert_eq!(count, 500500);
|
||||
|
||||
storage_cplane.stop();
|
||||
failures_thread.join().unwrap();
|
||||
}
|
||||
23
mgmt-console/.gitignore
vendored
23
mgmt-console/.gitignore
vendored
@@ -1,23 +0,0 @@
|
||||
# See https://help.github.com/articles/ignoring-files/ for more about ignoring files.
|
||||
|
||||
# dependencies
|
||||
/node_modules
|
||||
/.pnp
|
||||
.pnp.js
|
||||
|
||||
# testing
|
||||
/coverage
|
||||
|
||||
# production
|
||||
/build
|
||||
|
||||
# misc
|
||||
.DS_Store
|
||||
.env.local
|
||||
.env.development.local
|
||||
.env.test.local
|
||||
.env.production.local
|
||||
|
||||
npm-debug.log*
|
||||
yarn-debug.log*
|
||||
yarn-error.log*
|
||||
@@ -1,55 +0,0 @@
|
||||
Mock implementation of a management console.
|
||||
|
||||
See demo-howto.txt for usage.
|
||||
|
||||
Building and Installation
|
||||
-------------------------
|
||||
|
||||
To compile Postgres:
|
||||
sudo apt build-dep postgresql
|
||||
sudo apt install bison flex libz-dev libssl-dev
|
||||
sudo apt install ccache
|
||||
sudo apt install libcurl4-openssl-dev libxml2-dev
|
||||
|
||||
For the webapp:
|
||||
# NOTE: This requires at least version 1.1.0 of python3-flask. That's not
|
||||
# available in Debian Buster, need at least Bullseye.
|
||||
|
||||
sudo apt install python3 python3-flask python3-pip npm webpack
|
||||
pip3 install Flask-BasicAuth
|
||||
pip3 install boto3
|
||||
|
||||
git clone and compile and install patched version of Postgres:
|
||||
|
||||
git clone https://github.com/libzenith/postgres.git
|
||||
cd postgres
|
||||
git checkout zenith-experiments
|
||||
./configure --enable-debug --enable-cassert --with-openssl --prefix=/home/heikki/pgsql-install --with-libxml CC="ccache gcc" CFLAGS="-O0"
|
||||
make -j4 -s install
|
||||
|
||||
Get the webapp:
|
||||
cd ~
|
||||
git clone https://github.com/libzenith/zenith-mgmt-console.git
|
||||
cd zenith-mgmt-console
|
||||
mkdir pgdatadirs
|
||||
|
||||
|
||||
openssl req -new -x509 -days 365 -nodes -text -out server.crt \
|
||||
-keyout server.key -subj "/CN=zenith-demo"
|
||||
|
||||
For Mock S3 server (unless you want to test against a real cloud service):
|
||||
sudo apt install python3-tornado
|
||||
|
||||
cd ~/zenith-mgmt-console
|
||||
git clone https://github.com/hlinnaka/ms3.git
|
||||
|
||||
Compile & run it:
|
||||
npm install
|
||||
webpack # compile React app
|
||||
|
||||
BASIC_AUTH_PASSWORD=<password> ./launch-local.sh
|
||||
|
||||
|
||||
You can view the contents of the S3 bucket with browser:
|
||||
|
||||
http://<server>/list_bucket
|
||||
@@ -1,340 +0,0 @@
|
||||
from flask import request
|
||||
from flask_basicauth import BasicAuth
|
||||
from flask import render_template
|
||||
from subprocess import PIPE, STDOUT, run, Popen
|
||||
import html
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
import logging
|
||||
import time
|
||||
|
||||
import boto3
|
||||
from boto3.session import Session
|
||||
from botocore.client import Config
|
||||
from botocore.handlers import set_list_objects_encoding_type_url
|
||||
|
||||
from flask import Flask
|
||||
|
||||
import waldump
|
||||
|
||||
|
||||
app = Flask(__name__)
|
||||
|
||||
app.config['BASIC_AUTH_USERNAME'] = 'zenith'
|
||||
app.config['BASIC_AUTH_PASSWORD'] = os.getenv('BASIC_AUTH_PASSWORD')
|
||||
app.config['BASIC_AUTH_FORCE'] = True
|
||||
|
||||
basic_auth = BasicAuth(app)
|
||||
|
||||
# S3 configuration:
|
||||
|
||||
ENDPOINT = os.getenv('S3_ENDPOINT', 'https://localhost:9000')
|
||||
ACCESS_KEY = os.getenv('S3_ACCESSKEY', 'minioadmin')
|
||||
SECRET = os.getenv('S3_SECRET', '')
|
||||
BUCKET = os.getenv('S3_BUCKET', 'foobucket')
|
||||
|
||||
print("Using bucket at " + ENDPOINT);
|
||||
|
||||
#boto3.set_stream_logger('botocore', logging.DEBUG)
|
||||
|
||||
session = Session(aws_access_key_id=ACCESS_KEY,
|
||||
aws_secret_access_key=SECRET,
|
||||
region_name=os.getenv('S3_REGION', 'auto'))
|
||||
|
||||
# needed for google cloud?
|
||||
session.events.unregister('before-parameter-build.s3.ListObjects',
|
||||
set_list_objects_encoding_type_url)
|
||||
|
||||
s3resource = session.resource('s3',
|
||||
endpoint_url=ENDPOINT,
|
||||
verify=False,
|
||||
config=Config(signature_version='s3v4'))
|
||||
s3bucket = s3resource.Bucket(BUCKET)
|
||||
|
||||
s3_client = boto3.client('s3',
|
||||
endpoint_url=ENDPOINT,
|
||||
verify=False,
|
||||
config=Config(signature_version='s3v4'),
|
||||
aws_access_key_id=ACCESS_KEY,
|
||||
aws_secret_access_key=SECRET)
|
||||
|
||||
|
||||
@app.route("/")
|
||||
def index():
|
||||
return render_template("index.html")
|
||||
|
||||
|
||||
@app.route("/api/waldump")
|
||||
def render_waldump():
|
||||
return render_template("waldump.html")
|
||||
|
||||
@app.route('/api/fetch_wal')
|
||||
def fetch_wal():
|
||||
return waldump.fetch_wal(request, s3bucket);
|
||||
|
||||
@app.route("/api/server_status")
|
||||
def server_status():
|
||||
dirs = os.listdir("pgdatadirs")
|
||||
dirs.sort()
|
||||
|
||||
primary = None
|
||||
standbys = []
|
||||
|
||||
for dirname in dirs:
|
||||
|
||||
result = run("pg_ctl status -D pgdatadirs/" + dirname, stdout=PIPE, stderr=STDOUT, universal_newlines=True, shell=True)
|
||||
|
||||
srv = {
|
||||
'datadir': dirname,
|
||||
'status': result.stdout,
|
||||
'port': None
|
||||
}
|
||||
|
||||
if dirname == 'primary':
|
||||
primary = srv;
|
||||
primary['port'] = 5432;
|
||||
else:
|
||||
standby_match = re.search('standby_([0-9]+)', dirname)
|
||||
if standby_match:
|
||||
srv['port'] = int(standby_match.group(1))
|
||||
|
||||
standbys.append(srv);
|
||||
|
||||
return {'primary': primary, 'standbys': standbys}
|
||||
|
||||
@app.route('/api/list_bucket')
|
||||
def list_bucket():
|
||||
|
||||
response = 'cloud bucket contents:<br>\n'
|
||||
|
||||
for file in s3bucket.objects.all():
|
||||
response = response + html.escape(file.key) + '<br>\n'
|
||||
|
||||
return response
|
||||
|
||||
def walpos_str(walpos):
|
||||
return '{:X}/{:X}'.format(walpos >> 32, walpos & 0xFFFFFFFF)
|
||||
|
||||
@app.route('/api/bucket_summary')
|
||||
def bucket_summary():
|
||||
|
||||
nonrelimages = []
|
||||
minwal = int(0)
|
||||
maxwal = int(0)
|
||||
minseqwal = int(0)
|
||||
maxseqwal = int(0)
|
||||
|
||||
for file in s3bucket.objects.all():
|
||||
path = file.key
|
||||
match = re.search('nonreldata/nonrel_([0-9A-F]+).tar', path)
|
||||
if match:
|
||||
walpos = int(match.group(1), 16)
|
||||
nonrelimages.append(walpos_str(walpos))
|
||||
|
||||
match = re.search('nonreldata/nonrel_([0-9A-F]+)-([0-9A-F]+)', path)
|
||||
if match:
|
||||
endwal = int(match.group(2), 16)
|
||||
if endwal > maxwal:
|
||||
maxwal = endwal
|
||||
|
||||
match = re.search('walarchive/([0-9A-F]{8})([0-9A-F]{8})([0-9A-F]{8})', path)
|
||||
if match:
|
||||
tli = int(match.group(1), 16)
|
||||
logno = int(match.group(2), 16)
|
||||
segno = int(match.group(3), 16)
|
||||
# FIXME: this assumes default 16 MB wal segment size
|
||||
logsegno = logno * (0x100000000 / (16*1024*1024)) + segno
|
||||
|
||||
seqwal = int((logsegno + 1) * (16*1024*1024))
|
||||
|
||||
if seqwal > maxseqwal:
|
||||
maxseqwal = seqwal;
|
||||
if minseqwal == 0 or seqwal < minseqwal:
|
||||
minseqwal = seqwal;
|
||||
|
||||
return {
|
||||
'nonrelimages': nonrelimages,
|
||||
'minwal': walpos_str(minwal),
|
||||
'maxwal': walpos_str(maxwal),
|
||||
'minseqwal': walpos_str(minseqwal),
|
||||
'maxseqwal': walpos_str(maxseqwal)
|
||||
}
|
||||
|
||||
def print_cmd_result(cmd_result):
|
||||
return print_cmd_result_ex(cmd_result.args, cmd_result.returncode, cmd_result.stdout)
|
||||
|
||||
def print_cmd_result_ex(cmd, returncode, stdout):
|
||||
res = ''
|
||||
res += 'ran command:\n' + str(cmd) + '\n'
|
||||
res += 'It returned code ' + str(returncode) + '\n'
|
||||
res += '\n'
|
||||
res += 'stdout/stderr:\n'
|
||||
res += stdout
|
||||
|
||||
return res
|
||||
|
||||
@app.route('/api/init_primary', methods=['GET', 'POST'])
|
||||
def init_primary():
|
||||
|
||||
initdb_result = run("initdb -D pgdatadirs/primary --username=zenith --pwfile=pg-password.txt", stdout=PIPE, stderr=STDOUT, universal_newlines=True, shell=True)
|
||||
if initdb_result.returncode != 0:
|
||||
return print_cmd_result(initdb_result)
|
||||
|
||||
# Append archive_mode and archive_command and port to postgresql.conf
|
||||
f=open("pgdatadirs/primary/postgresql.conf", "a+")
|
||||
f.write("listen_addresses='*'\n")
|
||||
f.write("archive_mode=on\n")
|
||||
f.write("archive_command='zenith_push --archive-wal-path=%p --archive-wal-fname=%f'\n")
|
||||
f.write("ssl=on\n")
|
||||
f.close()
|
||||
|
||||
f=open("pgdatadirs/primary/pg_hba.conf", "a+")
|
||||
f.write("# allow SSL connections with password from anywhere\n")
|
||||
f.write("hostssl all all 0.0.0.0/0 md5\n")
|
||||
f.write("hostssl all all ::0/0 md5\n")
|
||||
f.close()
|
||||
|
||||
shutil.copyfile("server.crt", "pgdatadirs/primary/server.crt")
|
||||
shutil.copyfile("server.key", "pgdatadirs/primary/server.key")
|
||||
os.chmod("pgdatadirs/primary/server.key", 0o0600)
|
||||
|
||||
start_proc = Popen(args=["pg_ctl", "start", "-D", "pgdatadirs/primary", "-l", "pgdatadirs/primary/log"], stdout=PIPE, stderr=STDOUT, universal_newlines=True, shell=False, start_new_session=True, close_fds=True)
|
||||
start_rc = start_proc.wait()
|
||||
start_stdout, start_stderr = start_proc.communicate()
|
||||
|
||||
responsestr = print_cmd_result(initdb_result) + '\n'
|
||||
responsestr += print_cmd_result_ex(start_proc.args, start_rc, start_stdout)
|
||||
|
||||
return responsestr
|
||||
|
||||
@app.route('/api/zenith_push', methods=['GET', 'POST'])
|
||||
def zenith_push():
|
||||
# Stop the primary if it's running
|
||||
stop_result = run(args=["pg_ctl", "stop", "-D", "pgdatadirs/primary"], stdout=PIPE, stderr=STDOUT, universal_newlines=True, shell=False, start_new_session=True, close_fds=True)
|
||||
|
||||
# Call zenith_push
|
||||
push_result = run("zenith_push -D pgdatadirs/primary", stdout=PIPE, stderr=STDOUT, universal_newlines=True, shell=True)
|
||||
|
||||
# Restart the primary
|
||||
start_proc = Popen(args=["pg_ctl", "start", "-D", "pgdatadirs/primary", "-l", "pgdatadirs/primary/log"], stdout=PIPE, stderr=STDOUT, universal_newlines=True, shell=False, start_new_session=True, close_fds=True)
|
||||
start_rc = start_proc.wait()
|
||||
start_stdout, start_stderr = start_proc.communicate()
|
||||
|
||||
responsestr = print_cmd_result(stop_result) + '\n'
|
||||
responsestr += print_cmd_result(push_result) + '\n'
|
||||
responsestr += print_cmd_result_ex(start_proc.args, start_rc, start_stdout) + '\n'
|
||||
|
||||
return responsestr
|
||||
|
||||
@app.route('/api/create_standby', methods=['GET', 'POST'])
|
||||
def create_standby():
|
||||
|
||||
walpos = request.form.get('walpos')
|
||||
if not walpos:
|
||||
return 'no walpos'
|
||||
|
||||
dirs = os.listdir("pgdatadirs")
|
||||
|
||||
last_port = 5432
|
||||
|
||||
for dirname in dirs:
|
||||
|
||||
standby_match = re.search('standby_([0-9]+)', dirname)
|
||||
if standby_match:
|
||||
port = int(standby_match.group(1))
|
||||
if port > last_port:
|
||||
last_port = port
|
||||
|
||||
standby_port = last_port + 1
|
||||
|
||||
standby_dir = "pgdatadirs/standby_" + str(standby_port)
|
||||
|
||||
# Call zenith_restore
|
||||
restore_result = run(["zenith_restore", "--end=" + walpos, "-D", standby_dir], stdout=PIPE, stderr=STDOUT, encoding='latin1')
|
||||
responsestr = print_cmd_result(restore_result)
|
||||
|
||||
if restore_result.returncode == 0:
|
||||
# Append hot_standby and port to postgresql.conf
|
||||
f=open(standby_dir + "/postgresql.conf", "a+")
|
||||
f.write("hot_standby=on\n")
|
||||
f.write("port=" + str(standby_port) + "\n")
|
||||
f.close()
|
||||
|
||||
start_proc = Popen(args=["pg_ctl", "start", "-D", standby_dir, "-l", standby_dir + "/log"], stdout=PIPE, stderr=STDOUT, universal_newlines=True, shell=False, start_new_session=True, close_fds=True)
|
||||
start_rc = start_proc.wait()
|
||||
start_stdout, start_stderr = start_proc.communicate()
|
||||
responsestr += '\n\n' + print_cmd_result_ex(start_proc.args, start_rc, start_stdout)
|
||||
|
||||
return responsestr
|
||||
|
||||
@app.route('/api/destroy_server', methods=['GET', 'POST'])
|
||||
def destroy_primary():
|
||||
|
||||
datadir = request.form.get('datadir')
|
||||
|
||||
# Check that the datadir parameter doesn't contain anything funny.
|
||||
if not re.match("^[A-Za-z0-9_-]+$", datadir):
|
||||
raise Exception('invalid datadir: ' + datadir)
|
||||
|
||||
# Stop the server if it's running
|
||||
stop_result = run(args=["pg_ctl", "stop", "-m", "immediate", "-D", "pgdatadirs/" + datadir], stdout=PIPE, stderr=STDOUT, universal_newlines=True, shell=False, start_new_session=True, close_fds=True)
|
||||
|
||||
shutil.rmtree('pgdatadirs/' + datadir, ignore_errors=True)
|
||||
|
||||
responsestr = print_cmd_result(stop_result) + '\n'
|
||||
responsestr += 'Deleted datadir ' + datadir + '.\n'
|
||||
|
||||
return responsestr
|
||||
|
||||
@app.route('/api/restore_primary', methods=['GET', 'POST'])
|
||||
def restore_primary():
|
||||
|
||||
# Call zenith_restore
|
||||
restore_result = run(["zenith_restore", "-D", "pgdatadirs/primary"], stdout=PIPE, stderr=STDOUT, encoding='latin1')
|
||||
responsestr = print_cmd_result(restore_result)
|
||||
|
||||
# Append restore_command to postgresql.conf, so that it can find the last raw WAL segments
|
||||
f=open("pgdatadirs/primary/postgresql.conf", "a+")
|
||||
f.write("listen_addresses='*'\n")
|
||||
f.write("restore_command='zenith_restore --archive-wal-path=%p --archive-wal-fname=%f'\n")
|
||||
f.write("ssl=on\n")
|
||||
f.close()
|
||||
|
||||
if restore_result.returncode == 0:
|
||||
start_proc = Popen(args=["pg_ctl", "start", "-D", "pgdatadirs/primary", "-l", "pgdatadirs/primary/log"], stdout=PIPE, stderr=STDOUT, universal_newlines=True, shell=False, start_new_session=True, close_fds=True)
|
||||
start_rc = start_proc.wait()
|
||||
start_stdout, start_stderr = start_proc.communicate()
|
||||
responsestr += print_cmd_result_ex(start_proc.args, start_rc, start_stdout)
|
||||
|
||||
return responsestr
|
||||
|
||||
@app.route('/api/slicedice', methods=['GET', 'POST'])
|
||||
def run_slicedice():
|
||||
result = run("zenith_slicedice", stdout=PIPE, stderr=STDOUT, universal_newlines=True, shell=True)
|
||||
|
||||
responsestr = print_cmd_result(result)
|
||||
|
||||
return responsestr
|
||||
|
||||
@app.route('/api/reset_demo', methods=['POST'])
|
||||
def reset_all():
|
||||
result = run("pkill -9 postgres", stdout=PIPE, stderr=STDOUT, universal_newlines=True, shell=True)
|
||||
|
||||
dirs = os.listdir("pgdatadirs")
|
||||
for dirname in dirs:
|
||||
shutil.rmtree('pgdatadirs/' + dirname)
|
||||
|
||||
for file in s3bucket.objects.all():
|
||||
s3_client.delete_object(Bucket = BUCKET, Key = file.key)
|
||||
|
||||
responsestr = print_cmd_result(result) + '\n'
|
||||
responsestr += '''
|
||||
Deleted all Postgres datadirs.
|
||||
Deleted all files in object storage bucket.
|
||||
'''
|
||||
|
||||
return responsestr
|
||||
|
||||
if __name__ == '__main__':
|
||||
app.run()
|
||||
@@ -1,3 +0,0 @@
|
||||
module.exports = {
|
||||
presets: ["@babel/preset-env", "@babel/preset-react"],
|
||||
};
|
||||
@@ -1,67 +0,0 @@
|
||||
Mock implementation of a management console.
|
||||
|
||||
This isn't very different from a "normal" PostgreSQL installation with
|
||||
a base backup and WAL archive. The main user-visible difference is
|
||||
that when you create a standby server, we don't restore the whole data
|
||||
directory, but only the "non-relation" files. Relation files are
|
||||
restored on demand, when they're accessed the first time. That makes
|
||||
the "create standby" operation is very fast, but with some delay when
|
||||
you connect and start running queries instead. Most visible if you
|
||||
have a large database. (However, see note below about large databases)
|
||||
|
||||
Note: lots of things are broken/unsafe. Things will fail if a table is
|
||||
larger than 1 GB. Or if there are more than 1000 files in the cloud
|
||||
bucket.
|
||||
|
||||
How to use this demo:
|
||||
|
||||
1. If there are any leftovers from previous runs, reset by clicking
|
||||
the RESET DEMO button. This kills and deletes all Postgres servers,
|
||||
and empties the cloud storage bucket
|
||||
|
||||
2. Create primary server by clicking on the "Init primary" button
|
||||
|
||||
3. Push a base image of the primary to cloud storage, by clicking the
|
||||
"push base image" button. (This takes about 30 seconds, be
|
||||
patient)
|
||||
|
||||
4. Connect to primary with psql, and create a test table with a little data.
|
||||
|
||||
psql postgres -p5432 -U zenith -h<host>
|
||||
|
||||
create table mytable (i int4);
|
||||
|
||||
insert into mytable values (1);
|
||||
select pg_switch_wal();
|
||||
|
||||
The Postgres password is the same as for the management console.
|
||||
|
||||
3. Now that there's a new WAL segment in the arhive, we can "slice &
|
||||
dice" it. Click on the "Slice & dice button".
|
||||
|
||||
4. Perform more updates on the primary, to generate more WAL.
|
||||
|
||||
insert into mytable values (2); select pg_switch_wal();
|
||||
insert into mytable values (3); select pg_switch_wal();
|
||||
insert into mytable values (4); select pg_switch_wal();
|
||||
insert into mytable values (5); select pg_switch_wal();
|
||||
|
||||
5. Slice & Dice the WAL again
|
||||
|
||||
6. Now you can create read-only standby servers at any point in the
|
||||
WAL. Type a WAL position in the text box (or use the slider), and
|
||||
click "Create new standby". The first standby is created at port 5433,
|
||||
the second at port 5434, and so forth.
|
||||
|
||||
7. Connect to the standby with "psql -p 5433". Note that it takes a
|
||||
few seconds until the connection is established. That's because the
|
||||
standby has to restore the basic system catalogs, like pg_database and
|
||||
pg_authid from the backup. After connecting, you can do "\d" to list
|
||||
tables, this will also take a few seconds, as more catalog tables are
|
||||
restored from backup. Subsequent commands will be faster.
|
||||
|
||||
Run queries in the standby:
|
||||
|
||||
select * from mytable;
|
||||
|
||||
the result depends on the LSN that you picked when you created the server.
|
||||
@@ -1,463 +0,0 @@
|
||||
import React, { useState, useEffect } from 'react';
|
||||
import ReactDOM from 'react-dom';
|
||||
import Loader from "react-loader-spinner";
|
||||
import { Router, Route, Link, IndexRoute, hashHistory, browserHistory } from 'react-router';
|
||||
|
||||
function ServerStatus(props) {
|
||||
const datadir = props.server.datadir;
|
||||
const status = props.server.status;
|
||||
const port = props.server.port;
|
||||
|
||||
return (
|
||||
<div>
|
||||
<h2>{ datadir == 'primary' ? 'Primary' : datadir }</h2>
|
||||
status: <div className='status'>{status}</div><br/>
|
||||
to connect: <span className='shellcommand'>psql -h { window.location.hostname } -p { port } -U zenith postgres</span><br/>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
function StandbyList(props) {
|
||||
const bucketSummary = props.bucketSummary;
|
||||
const standbys = props.standbys;
|
||||
const maxwalpos = bucketSummary.maxwal ? walpos_to_int(bucketSummary.maxwal) : 0;
|
||||
|
||||
const [walposInput, setWalposInput] = useState({ src: 'text', value: '0/0'});
|
||||
|
||||
// find earliest base image
|
||||
const minwalpos = bucketSummary.nonrelimages ? bucketSummary.nonrelimages.reduce((minpos, imgpos_str, index, array) => {
|
||||
const imgpos = walpos_to_int(imgpos_str);
|
||||
return (minpos == 0 || imgpos < minpos) ? imgpos : minpos;
|
||||
}, 0) : 0;
|
||||
|
||||
const can_create_standby = minwalpos > 0 && maxwalpos > 0 && maxwalpos >= minwalpos;
|
||||
var walpos_valid = true;
|
||||
|
||||
function create_standby() {
|
||||
const formdata = new FormData();
|
||||
formdata.append("walpos", walposStr);
|
||||
|
||||
props.startOperation('Creating new standby at ' + walposStr + '...',
|
||||
fetch("/api/create_standby", { method: 'POST', body: formdata }));
|
||||
}
|
||||
|
||||
function destroy_standby(datadir) {
|
||||
const formdata = new FormData();
|
||||
formdata.append("datadir", datadir);
|
||||
props.startOperation('Destroying ' + datadir + '...',
|
||||
fetch("/api/destroy_server", { method: 'POST', body: formdata }));
|
||||
}
|
||||
|
||||
const handleSliderChange = (event) => {
|
||||
setWalposInput({ src: 'slider', value: event.target.value });
|
||||
}
|
||||
|
||||
const handleWalposChange = (event) => {
|
||||
setWalposInput({ src: 'text', value: event.target.value });
|
||||
}
|
||||
|
||||
var sliderValue;
|
||||
var walposStr;
|
||||
if (walposInput.src == 'text')
|
||||
{
|
||||
const walpos = walpos_to_int(walposInput.value);
|
||||
|
||||
if (walpos >= minwalpos && walpos <= maxwalpos)
|
||||
walpos_valid = true;
|
||||
else
|
||||
walpos_valid = false;
|
||||
|
||||
sliderValue = Math.round((walpos - minwalpos) / (maxwalpos - minwalpos) * 100);
|
||||
walposStr = walposInput.value;
|
||||
}
|
||||
else
|
||||
{
|
||||
const slider = walposInput.value;
|
||||
const new_walpos = minwalpos + slider / 100 * (maxwalpos - minwalpos);
|
||||
|
||||
console.log('minwalpos: '+ minwalpos);
|
||||
console.log('maxwalpos: '+ maxwalpos);
|
||||
|
||||
walposStr = int_to_walpos(Math.round(new_walpos));
|
||||
walpos_valid = true;
|
||||
console.log(walposStr);
|
||||
}
|
||||
|
||||
var standbystatus = ''
|
||||
if (standbys)
|
||||
{
|
||||
standbystatus =
|
||||
<div>
|
||||
{
|
||||
standbys.length > 0 ?
|
||||
standbys.map((server) =>
|
||||
<>
|
||||
<ServerStatus key={ 'status_' + server.datadir} server={server}/>
|
||||
<button key={ 'destroy_' + server.datadir} onClick={e => destroy_standby(server.datadir)}>Destroy standby</button>
|
||||
</>
|
||||
) : "no standby servers"
|
||||
}
|
||||
</div>
|
||||
}
|
||||
|
||||
return (
|
||||
<div>
|
||||
<h2>Standbys</h2>
|
||||
<button onClick={create_standby} disabled={!can_create_standby || !walpos_valid}>Create new Standby</button> at LSN
|
||||
<input type="text" id="walpos_input" value={ walposStr } onChange={handleWalposChange} disabled={!can_create_standby}/>
|
||||
<input type="range" id="walpos_slider" min="0" max="100" steps="1" value={sliderValue} onChange={handleSliderChange} disabled={!can_create_standby}/>
|
||||
<br/>
|
||||
{ standbystatus }
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
function ServerList(props) {
|
||||
const primary = props.serverStatus ? props.serverStatus.primary : null;
|
||||
const standbys = props.serverStatus ? props.serverStatus.standbys : [];
|
||||
const bucketSummary = props.bucketSummary;
|
||||
|
||||
var primarystatus = '';
|
||||
|
||||
function destroy_primary() {
|
||||
const formdata = new FormData();
|
||||
formdata.append("datadir", 'primary');
|
||||
props.startOperation('Destroying primary...',
|
||||
fetch("/api/destroy_server", { method: 'POST', body: formdata }));
|
||||
}
|
||||
|
||||
function restore_primary() {
|
||||
props.startOperation('Restoring primary...',
|
||||
fetch("/api/restore_primary", { method: 'POST' }));
|
||||
}
|
||||
|
||||
if (primary)
|
||||
{
|
||||
primarystatus =
|
||||
<div>
|
||||
<ServerStatus server={primary}/>
|
||||
<button onClick={destroy_primary}>Destroy primary</button>
|
||||
</div>
|
||||
}
|
||||
else
|
||||
{
|
||||
primarystatus =
|
||||
<div>
|
||||
no primary server<br/>
|
||||
<button onClick={restore_primary}>Restore primary</button>
|
||||
</div>
|
||||
}
|
||||
|
||||
return (
|
||||
<>
|
||||
{ primarystatus }
|
||||
<StandbyList standbys={standbys} startOperation={props.startOperation} bucketSummary={props.bucketSummary}/>
|
||||
<p className="todo">
|
||||
Should we list the WAL safekeeper nodes here? Or are they part of the Storage? Or not visible to users at all?
|
||||
</p>
|
||||
</>
|
||||
);
|
||||
}
|
||||
|
||||
function BucketSummary(props) {
|
||||
const bucketSummary = props.bucketSummary;
|
||||
const startOperation = props.startOperation;
|
||||
|
||||
function slicedice() {
|
||||
startOperation('Slicing sequential WAL to per-relation WAL...',
|
||||
fetch("/api/slicedice", { method: 'POST' }));
|
||||
}
|
||||
|
||||
if (!bucketSummary.nonrelimages)
|
||||
{
|
||||
return <>loading...</>
|
||||
}
|
||||
|
||||
return (
|
||||
<div>
|
||||
<div>Base images at following WAL positions:
|
||||
<ul>
|
||||
{bucketSummary.nonrelimages.map((img) => (
|
||||
<li key={img}>{img}</li>
|
||||
))}
|
||||
</ul>
|
||||
</div>
|
||||
Sliced WAL is available up to { bucketSummary.maxwal }<br/>
|
||||
Raw WAL is available up to { bucketSummary.maxseqwal }<br/>
|
||||
|
||||
<br/>
|
||||
<button onClick={slicedice}>Slice & Dice WAL</button>
|
||||
<p className="todo">
|
||||
Currently, the slicing or "sharding" of the WAL needs to be triggered manually, by clicking the above button.
|
||||
<br/>
|
||||
TODO: make it a continuous process that runs in the WAL safekeepers, or in the Page Servers, or as a standalone service.
|
||||
</p>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
function ProgressIndicator()
|
||||
{
|
||||
return (
|
||||
<div>
|
||||
<Loader
|
||||
type="Puff"
|
||||
color="#00BFFF"
|
||||
height={100}
|
||||
width={100}
|
||||
/>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
function walpos_to_int(walpos)
|
||||
{
|
||||
const [hi, lo] = walpos.split('/');
|
||||
|
||||
return parseInt(hi, 16) + parseInt(lo, 16);
|
||||
}
|
||||
|
||||
function int_to_walpos(x)
|
||||
{
|
||||
console.log('converting ' + x);
|
||||
return (Math.floor((x / 0x100000000)).toString(16) + '/' + (x % 0x100000000).toString(16)).toUpperCase();
|
||||
}
|
||||
|
||||
function OperationStatus(props) {
|
||||
const lastOperation = props.lastOperation;
|
||||
const inProgress = props.inProgress;
|
||||
const operationResult = props.operationResult;
|
||||
|
||||
if (lastOperation)
|
||||
{
|
||||
return (
|
||||
<div><h2>Last operation:</h2>
|
||||
<div>{lastOperation} { (!inProgress && lastOperation) ? 'done!' : '' }</div>
|
||||
<div className='result'>
|
||||
{inProgress ? <ProgressIndicator/> : <pre>{operationResult}</pre>}
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
else
|
||||
return '';
|
||||
}
|
||||
|
||||
function ActionButtons(props) {
|
||||
|
||||
const startOperation = props.startOperation;
|
||||
const bucketSummary = props.bucketSummary;
|
||||
|
||||
function reset_demo() {
|
||||
startOperation('resetting everything...',
|
||||
fetch("/api/reset_demo", { method: 'POST' }));
|
||||
}
|
||||
|
||||
function init_primary() {
|
||||
startOperation('Initializing new primary...',
|
||||
fetch("/api/init_primary", { method: 'POST' }));
|
||||
}
|
||||
|
||||
function zenith_push() {
|
||||
startOperation('Pushing new base image...',
|
||||
fetch("/api/zenith_push", { method: 'POST' }));
|
||||
}
|
||||
|
||||
return (
|
||||
<div>
|
||||
<p className="todo">
|
||||
RESET DEMO deletes everything in the storage bucket, and stops and destroys all servers. This resets the whole demo environment to the initial state.
|
||||
</p>
|
||||
<button onClick={reset_demo}>RESET DEMO</button>
|
||||
<p className="todo">
|
||||
Init Primary runs initdb to create a new primary server. Click this after Resetting the demo.
|
||||
</p>
|
||||
|
||||
<button onClick={init_primary}>Init primary</button>
|
||||
|
||||
<p className="todo">
|
||||
Push Base Image stops the primary, copies the current state of the primary to the storage bucket as a new base backup, and restarts the primary.
|
||||
<br/>
|
||||
TODO: This should be handled by a continuous background process, probably running in the storage nodes. And without having to shut down the cluster, of course.
|
||||
</p>
|
||||
|
||||
<button onClick={zenith_push}>Push base image</button>
|
||||
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
function Sidenav(props)
|
||||
{
|
||||
const toPage = (page) => (event) => {
|
||||
//event.preventDefault()
|
||||
props.switchPage(page);
|
||||
};
|
||||
return (
|
||||
<div>
|
||||
<h3 className="sidenav-item">Menu</h3>
|
||||
<a href="#servers" onClick={toPage('servers')} className="sidenav-item">Servers</a>
|
||||
<a href="#storage" onClick={toPage('storage')} className="sidenav-item">Storage</a>
|
||||
<a href="#snapshots" onClick={toPage('snapshots')} className="sidenav-item">Snapshots</a>
|
||||
<a href="#demo" onClick={toPage('demo')} className="sidenav-item">Demo</a>
|
||||
<a href="#import" onClick={toPage('import')} className="sidenav-item">Import / Export</a>
|
||||
<a href="#jobs" onClick={toPage('jobs')} className="sidenav-item">Jobs</a>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
function App()
|
||||
{
|
||||
const [page, setPage] = useState('servers');
|
||||
const [serverStatus, setServerStatus] = useState({});
|
||||
const [bucketSummary, setBucketSummary] = useState({});
|
||||
const [lastOperation, setLastOperation] = useState('');
|
||||
const [inProgress, setInProgress] = useState('');
|
||||
const [operationResult, setOperationResult] = useState('');
|
||||
|
||||
useEffect(() => {
|
||||
reloadStatus();
|
||||
}, []);
|
||||
|
||||
function startOperation(operation, promise)
|
||||
{
|
||||
promise.then(result => result.text()).then(resultText => {
|
||||
operationFinished(resultText);
|
||||
});
|
||||
|
||||
setLastOperation(operation);
|
||||
setInProgress(true);
|
||||
setOperationResult('');
|
||||
}
|
||||
|
||||
function operationFinished(result)
|
||||
{
|
||||
setInProgress(false);
|
||||
setOperationResult(result);
|
||||
reloadStatus();
|
||||
}
|
||||
|
||||
function clearOperation()
|
||||
{
|
||||
setLastOperation('')
|
||||
setInProgress('');
|
||||
setOperationResult('');
|
||||
console.log("cleared");
|
||||
}
|
||||
|
||||
function reloadStatus()
|
||||
{
|
||||
fetch('/api/server_status').then(res => res.json()).then(data => {
|
||||
setServerStatus(data);
|
||||
});
|
||||
|
||||
fetch('/api/bucket_summary').then(res => res.json()).then(data => {
|
||||
setBucketSummary(data);
|
||||
});
|
||||
}
|
||||
|
||||
const content = () => {
|
||||
console.log(page);
|
||||
if (page === 'servers') {
|
||||
return (
|
||||
<>
|
||||
<h1>Server status</h1>
|
||||
<ServerList startOperation={ startOperation }
|
||||
serverStatus={ serverStatus }
|
||||
bucketSummary={ bucketSummary }/>
|
||||
</>
|
||||
);
|
||||
} else if (page === 'storage') {
|
||||
return (
|
||||
<>
|
||||
<h1>Storage bucket status</h1>
|
||||
<BucketSummary startOperation={ startOperation }
|
||||
bucketSummary={ bucketSummary }/>
|
||||
</>
|
||||
);
|
||||
} else if (page === 'snapshots') {
|
||||
return (
|
||||
<>
|
||||
<h1>Snapshots</h1>
|
||||
<p className="todo">
|
||||
In Zenith, snapshots are just specific points (LSNs) in the WAL history, with a label. A snapshot prevents garbage collecting old data that's still needed to reconstruct the database at that LSN.
|
||||
</p>
|
||||
<p className="todo">
|
||||
TODO:
|
||||
<ul>
|
||||
<li>List existing snapshots</li>
|
||||
<li>Create new snapshot manually, from current state or from a given LSN</li>
|
||||
<li>Drill into the WAL stream to see what have happened. Provide tools for e.g. finding point where a table was dropped</li>
|
||||
<li>Create snapshots automatically based on events in the WAL, like if you call pg_create_restore_point(() in the primary</li>
|
||||
<li>Launch new reader instance at a snapshot</li>
|
||||
<li>Export snapshot</li>
|
||||
<li>Rollback cluster to a snapshot</li>
|
||||
</ul>
|
||||
</p>
|
||||
</>
|
||||
);
|
||||
} else if (page === 'demo') {
|
||||
return (
|
||||
<>
|
||||
<h1>Misc actions</h1>
|
||||
<ActionButtons startOperation={ startOperation }
|
||||
bucketSummary={ bucketSummary }/>
|
||||
</>
|
||||
);
|
||||
} else if (page === 'import') {
|
||||
return (
|
||||
<>
|
||||
<h1>Import & Export tools</h1>
|
||||
<p className="TODO">TODO:
|
||||
<ul>
|
||||
<li>Initialize database from existing backup (pg_basebackup, WAL-G, pgbackrest)</li>
|
||||
<li>Initialize from a pg_dump or other SQL script</li>
|
||||
<li>Launch batch job to import data files from S3</li>
|
||||
<li>Launch batch job to export database with pg_dump to S3</li>
|
||||
</ul>
|
||||
These jobs can be run in against reader processing nodes. We can even
|
||||
spawn a new reader node dedicated to a job, and destry it when the job is done.
|
||||
</p>
|
||||
</>
|
||||
);
|
||||
} else if (page === 'jobs') {
|
||||
return (
|
||||
<>
|
||||
<h1>Batch jobs</h1>
|
||||
<p className="TODO">TODO:
|
||||
<ul>
|
||||
<li>List running jobs launched from Import & Export tools</li>
|
||||
<li>List other batch jobs launched by the user</li>
|
||||
<li>Launch new batch jobs</li>
|
||||
</ul>
|
||||
</p>
|
||||
</>
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
function switchPage(page)
|
||||
{
|
||||
console.log("topage " + page);
|
||||
setPage(page)
|
||||
clearOperation();
|
||||
};
|
||||
|
||||
return (
|
||||
<div className="row">
|
||||
<div className="sidenav">
|
||||
<Sidenav switchPage={switchPage} className="column"/>
|
||||
</div>
|
||||
<div className="column">
|
||||
<div>
|
||||
{ content() }
|
||||
</div>
|
||||
<OperationStatus lastOperation={ lastOperation }
|
||||
inProgress = { inProgress }
|
||||
operationResult = { operationResult }/>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
ReactDOM.render(<App/>, document.getElementById('reactApp'));
|
||||
@@ -1,105 +0,0 @@
|
||||
import React, { useState, useEffect } from 'react';
|
||||
import ReactDOM from 'react-dom';
|
||||
import Loader from "react-loader-spinner";
|
||||
|
||||
function walpos_to_int(walpos)
|
||||
{
|
||||
const [hi, lo] = walpos.split('/');
|
||||
|
||||
return parseInt(hi, 16) + parseInt(lo, 16);
|
||||
}
|
||||
|
||||
const palette = [
|
||||
"#003f5c",
|
||||
"#2f4b7c",
|
||||
"#665191",
|
||||
"#a05195",
|
||||
"#d45087",
|
||||
"#f95d6a",
|
||||
"#ff7c43",
|
||||
"#ffa600"];
|
||||
|
||||
function WalRecord(props)
|
||||
{
|
||||
const firstwalpos = props.firstwalpos;
|
||||
const endwalpos = props.endwalpos;
|
||||
const record = props.record;
|
||||
const index = props.index;
|
||||
const xidmap = props.xidmap;
|
||||
|
||||
const startpos = walpos_to_int(record.start)
|
||||
const endpos = walpos_to_int(record.end)
|
||||
|
||||
const scale = 1000 / (16*1024*1024)
|
||||
const startx = (startpos - firstwalpos) * scale;
|
||||
const endx = (endpos - firstwalpos) * scale;
|
||||
|
||||
const xidindex = xidmap[record.xid];
|
||||
const color = palette[index % palette.length];
|
||||
|
||||
const y = 5 + (xidindex) * 20 + (index % 2) * 2;
|
||||
|
||||
return (
|
||||
<line x1={ startx } y1={y} x2={endx} y2={y} stroke={ color } strokeWidth="5">
|
||||
<title>
|
||||
start: { record.start } end: { record.end }
|
||||
</title>
|
||||
</line>
|
||||
)
|
||||
}
|
||||
|
||||
function WalFile(props)
|
||||
{
|
||||
const walContent = props.walContent;
|
||||
const firstwalpos = props.firstwalpos;
|
||||
const xidmap = props.xidmap;
|
||||
|
||||
return <svg width="1000" height="200">
|
||||
{
|
||||
walContent.records ?
|
||||
walContent.records.map((record, index) =>
|
||||
<WalRecord key={record.start} firstwalpos={firstwalpos} record={record} index={index} xidmap={xidmap}/>
|
||||
) : "no records"
|
||||
}
|
||||
</svg>
|
||||
}
|
||||
|
||||
function WalDumpApp()
|
||||
{
|
||||
const [walContent, setWalContent] = useState({});
|
||||
|
||||
const filename = '00000001000000000000000C';
|
||||
|
||||
useEffect(() => {
|
||||
fetch('/fetch_wal?filename='+filename).then(res => res.json()).then(data => {
|
||||
setWalContent(data);
|
||||
});
|
||||
}, []);
|
||||
|
||||
var firstwalpos = 0;
|
||||
var endwalpos = 0;
|
||||
var numxids = 0;
|
||||
var xidmap = {};
|
||||
if (walContent.records && walContent.records.length > 0)
|
||||
{
|
||||
firstwalpos = walpos_to_int(walContent.records[0].start);
|
||||
endwalpos = firstwalpos + 16*1024*1024;
|
||||
|
||||
walContent.records.forEach(rec => {
|
||||
if (!xidmap[rec.xid])
|
||||
{
|
||||
xidmap[rec.xid] = ++numxids;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
return (
|
||||
<>
|
||||
<h2>{filename}</h2>
|
||||
<WalFile walContent={walContent} firstwalpos={firstwalpos} endwalpos={endwalpos} xidmap={xidmap}/>
|
||||
</>
|
||||
);
|
||||
}
|
||||
|
||||
console.log('hey there');
|
||||
ReactDOM.render(<WalDumpApp/>, document.getElementById('waldump'));
|
||||
@@ -1,9 +0,0 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# NOTE: You must set the following environment variables before running this:
|
||||
# BASIC_AUTH_PASSWORD - basic http auth password
|
||||
# S3_ACCESSKEY
|
||||
# S3_SECRET
|
||||
|
||||
|
||||
S3_ENDPOINT=https://storage.googleapis.com S3_BUCKET=zenith-testbucket PATH=/home/heikki/pgsql-install/bin:$PATH flask run --host=0.0.0.0
|
||||
@@ -1,8 +0,0 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# NOTE: You should set the BASIC_AUTH_PASSWORD environment variable before calling
|
||||
|
||||
# Launch S3 server
|
||||
(cd ms3 && python3 -m ms3.app --listen-address=localhost) &
|
||||
|
||||
FLASK_ENV=development S3_REGION=auto S3_ENDPOINT=http://localhost:9009 S3_BUCKET=zenith-testbucket PATH=/home/heikki/pgsql.fsmfork/bin:$PATH flask run --host=0.0.0.0
|
||||
6144
mgmt-console/package-lock.json
generated
6144
mgmt-console/package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@@ -1,27 +0,0 @@
|
||||
{
|
||||
"name": "starter-kit",
|
||||
"version": "1.1.0",
|
||||
"description": "",
|
||||
"main": "index.js",
|
||||
"scripts": {
|
||||
"test": "echo \"Error: no test specified\" && exit 1",
|
||||
"build": "webpack",
|
||||
"start": "python app.py"
|
||||
},
|
||||
"author": "",
|
||||
"license": "ISC",
|
||||
"dependencies": {
|
||||
"react": "^17.0.1",
|
||||
"react-dom": "^17.0.1",
|
||||
"react-loader-spinner": "^4.0.0",
|
||||
"react-router": "^5.2.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/core": "^7.13.1",
|
||||
"@babel/preset-env": "^7.13.5",
|
||||
"@babel/preset-react": "^7.12.13",
|
||||
"babel-loader": "^8.2.2",
|
||||
"webpack": "^5.24.2",
|
||||
"webpack-cli": "^4.5.0"
|
||||
}
|
||||
}
|
||||
@@ -1,58 +0,0 @@
|
||||
<head>
|
||||
|
||||
<style>
|
||||
.status {
|
||||
font-family: monospace;
|
||||
background-color: lightgrey;
|
||||
}
|
||||
.shellcommand {
|
||||
font-family: monospace;
|
||||
background-color: lightgrey;
|
||||
}
|
||||
.result {
|
||||
font-family: monospace;
|
||||
background-color: lightgrey;
|
||||
padding: 10px;
|
||||
}
|
||||
|
||||
|
||||
.todo {font-style: italic;}
|
||||
|
||||
|
||||
h1 {color: blue;}
|
||||
|
||||
.column {
|
||||
float: left;
|
||||
width: 50%;
|
||||
padding: 10px;
|
||||
}
|
||||
/* Clear floats after the columns */
|
||||
.row:after {
|
||||
content: "";
|
||||
display: table;
|
||||
clear: both;
|
||||
}
|
||||
|
||||
.sidenav {
|
||||
float: left;
|
||||
width: 150px;
|
||||
padding: 10px;
|
||||
background-color: pink;
|
||||
}
|
||||
|
||||
.sidenav-item {
|
||||
padding:10px 0px;
|
||||
border:none;
|
||||
display:block;
|
||||
}
|
||||
|
||||
</style>
|
||||
|
||||
</head>
|
||||
|
||||
<body>
|
||||
<div id="reactApp"></div>
|
||||
|
||||
<!-- Attach React components -->
|
||||
<script type="text/javascript" src="{{ url_for('static', filename='app_bundle.js') }}"></script>
|
||||
</body>
|
||||
@@ -1,46 +0,0 @@
|
||||
<head>
|
||||
|
||||
<style>
|
||||
.status {
|
||||
font-family: monospace;
|
||||
background-color: lightgrey;
|
||||
}
|
||||
.shellcommand {
|
||||
font-family: monospace;
|
||||
background-color: lightgrey;
|
||||
}
|
||||
.result {
|
||||
font-family: monospace;
|
||||
background-color: lightgrey;
|
||||
padding: 10px;
|
||||
}
|
||||
h1 {color: blue;}
|
||||
p {color: red;}
|
||||
|
||||
* {
|
||||
box-sizing: border-box;
|
||||
}
|
||||
|
||||
.row {
|
||||
display: flex;
|
||||
}
|
||||
|
||||
/* Create two equal columns that sits next to each other */
|
||||
.column1 {
|
||||
flex: 30%;
|
||||
padding: 10px;
|
||||
}
|
||||
.column2 {
|
||||
flex: 70%;
|
||||
padding: 10px;
|
||||
}
|
||||
</style>
|
||||
|
||||
</head>
|
||||
|
||||
<body>
|
||||
<div id="waldump"></div>
|
||||
|
||||
<!-- Attach React components -->
|
||||
<script type="text/javascript" src="{{ url_for('static', filename='waldump_bundle.js') }}"></script>
|
||||
</body>
|
||||
@@ -1,25 +0,0 @@
|
||||
#
|
||||
# This file contains work-in-progress code to visualize WAL contents.
|
||||
#
|
||||
# This is the API endpoint that calls a 'zenith_wal_to_json' executable,
|
||||
# which is a hacked version of pg_waldump that prints information about the
|
||||
# records in JSON format. The code in js/waldump.js displays it.
|
||||
#
|
||||
|
||||
import os
|
||||
import re
|
||||
from subprocess import PIPE, STDOUT, run, Popen
|
||||
|
||||
def fetch_wal(request, s3bucket):
|
||||
filename = request.args.get('filename')
|
||||
if not re.match("^[A-Za-z0-9_]+$", filename):
|
||||
raise Exception('invalid WAL filename: ' + filename)
|
||||
|
||||
# FIXME: this downloads the WAL file to current dir. Use a temp dir? Pipe?
|
||||
s3bucket.download_file('walarchive/' + filename, filename)
|
||||
|
||||
result = run("zenith_wal_to_json " + filename, stdout=PIPE, universal_newlines=True, shell=True)
|
||||
|
||||
os.unlink(filename);
|
||||
|
||||
return result.stdout
|
||||
@@ -1,27 +0,0 @@
|
||||
var webpack = require('webpack');
|
||||
module.exports = {
|
||||
entry: {
|
||||
app: './js/app.js',
|
||||
waldump: './js/waldump.js'
|
||||
},
|
||||
output: {
|
||||
filename: "[name]_bundle.js",
|
||||
path: __dirname + '/static'
|
||||
},
|
||||
module: {
|
||||
rules: [
|
||||
{
|
||||
test: /\.js?$/,
|
||||
exclude: /node_modules/,
|
||||
use: {
|
||||
loader: 'babel-loader',
|
||||
options: {
|
||||
presets: ['@babel/preset-env']
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
plugins: [
|
||||
]
|
||||
};
|
||||
@@ -1,179 +0,0 @@
|
||||
#zenith.py
|
||||
import click
|
||||
import testgres
|
||||
import os
|
||||
|
||||
from testgres import PostgresNode
|
||||
from tabulate import tabulate
|
||||
|
||||
zenith_base_dir = '/home/anastasia/zenith/basedir'
|
||||
|
||||
@click.group()
|
||||
def main():
|
||||
"""Run the Zenith CLI."""
|
||||
|
||||
@click.group()
|
||||
def pg():
|
||||
"""Db operations
|
||||
|
||||
NOTE: 'database' here means one postgresql node
|
||||
"""
|
||||
|
||||
@click.command(name='create')
|
||||
@click.option('--name', required=True)
|
||||
@click.option('-s', '--storage-name', help='Name of the storage',
|
||||
default='zenith-local',
|
||||
show_default=True)
|
||||
@click.option('--snapshot', help='init from the snapshot. Snap is a name or URL')
|
||||
@click.option('--no-start', is_flag=True, help='Do not start created node',
|
||||
default=False, show_default=True)
|
||||
def pg_create(name, storage_name, snapshot, no_start):
|
||||
"""Initialize the database"""
|
||||
node = PostgresNode()
|
||||
base_dir = os.path.join(zenith_base_dir, 'pg', name)
|
||||
node = testgres.get_new_node(name, base_dir=base_dir)
|
||||
# TODO skip init, instead of that link node with storage or upload it from snapshot
|
||||
node.init()
|
||||
if(no_start==False):
|
||||
node.start()
|
||||
|
||||
@click.command(name='start')
|
||||
@click.option('--name', required=True)
|
||||
@click.option('--snapshot')
|
||||
@click.option('--read-only', is_flag=True, help='Start read-only node', show_default=True)
|
||||
def pg_start(name, snapshot, read_only):
|
||||
"""Start the database"""
|
||||
node = PostgresNode()
|
||||
base_dir = os.path.join(zenith_base_dir, 'pg', name)
|
||||
node = testgres.get_new_node(name, base_dir=base_dir)
|
||||
# TODO pass snapshot as a parameter
|
||||
node.start()
|
||||
|
||||
@click.command(name='stop')
|
||||
@click.option('--name', required=True)
|
||||
def pg_stop(name):
|
||||
"""Stop the database"""
|
||||
node = PostgresNode()
|
||||
base_dir = os.path.join(zenith_base_dir, 'pg', name)
|
||||
node = testgres.get_new_node(name, base_dir=base_dir)
|
||||
node.stop()
|
||||
|
||||
@click.command(name='destroy')
|
||||
@click.option('--name', required=True)
|
||||
def pg_destroy(name):
|
||||
"""Drop the database"""
|
||||
node = PostgresNode()
|
||||
base_dir = os.path.join(zenith_base_dir, 'pg', name)
|
||||
node = testgres.get_new_node(name, base_dir=base_dir)
|
||||
node.cleanup()
|
||||
|
||||
@click.command(name='list')
|
||||
def pg_list():
|
||||
"""List existing databases"""
|
||||
dirs = os.listdir(os.path.join(zenith_base_dir, 'pg'))
|
||||
path={}
|
||||
status={}
|
||||
data=[]
|
||||
|
||||
for dirname in dirs:
|
||||
path[dirname] = os.path.join(zenith_base_dir, 'pg', dirname)
|
||||
fname = os.path.join( path[dirname], 'data/postmaster.pid')
|
||||
try:
|
||||
f = open(fname,'r')
|
||||
status[dirname] = f.readlines()[-1]
|
||||
except OSError as err:
|
||||
status[dirname]='inactive'
|
||||
data.append([dirname , status[dirname], path[dirname]])
|
||||
|
||||
print(tabulate(data, headers=['Name', 'Status', 'Path']))
|
||||
|
||||
pg.add_command(pg_create)
|
||||
pg.add_command(pg_destroy)
|
||||
pg.add_command(pg_start)
|
||||
pg.add_command(pg_stop)
|
||||
pg.add_command(pg_list)
|
||||
|
||||
|
||||
|
||||
@click.group()
|
||||
def storage():
|
||||
"""Storage operations"""
|
||||
|
||||
@click.command(name='attach')
|
||||
@click.option('--name')
|
||||
def storage_attach(name):
|
||||
"""Attach the storage"""
|
||||
|
||||
@click.command(name='detach')
|
||||
@click.option('--name')
|
||||
@click.option('--force', is_flag=True, show_default=True)
|
||||
def storage_detach(name):
|
||||
"""Detach the storage"""
|
||||
|
||||
@click.command(name='list')
|
||||
def storage_list():
|
||||
"""List existing storages"""
|
||||
|
||||
storage.add_command(storage_attach)
|
||||
storage.add_command(storage_detach)
|
||||
storage.add_command(storage_list)
|
||||
|
||||
@click.group()
|
||||
def snapshot():
|
||||
"""Snapshot operations"""
|
||||
|
||||
@click.command(name='create')
|
||||
def snapshot_create():
|
||||
"""Create new snapshot"""
|
||||
|
||||
@click.command(name='destroy')
|
||||
def snapshot_destroy():
|
||||
"""Destroy the snapshot"""
|
||||
|
||||
@click.command(name='pull')
|
||||
def snapshot_pull():
|
||||
"""Pull remote snapshot"""
|
||||
|
||||
@click.command(name='push')
|
||||
def snapshot_push():
|
||||
"""Push snapshot to remote"""
|
||||
|
||||
@click.command(name='import')
|
||||
def snapshot_import():
|
||||
"""Convert given format to zenith snapshot"""
|
||||
|
||||
@click.command(name='export')
|
||||
def snapshot_export():
|
||||
"""Convert zenith snapshot to PostgreSQL compatible format"""
|
||||
|
||||
snapshot.add_command(snapshot_create)
|
||||
snapshot.add_command(snapshot_destroy)
|
||||
snapshot.add_command(snapshot_pull)
|
||||
snapshot.add_command(snapshot_push)
|
||||
snapshot.add_command(snapshot_import)
|
||||
snapshot.add_command(snapshot_export)
|
||||
|
||||
@click.group()
|
||||
def wal():
|
||||
"""WAL operations"""
|
||||
|
||||
@click.command()
|
||||
def wallist(name="list"):
|
||||
"""List WAL files"""
|
||||
|
||||
wal.add_command(wallist)
|
||||
|
||||
|
||||
@click.command()
|
||||
def console():
|
||||
"""Open web console"""
|
||||
|
||||
main.add_command(pg)
|
||||
main.add_command(storage)
|
||||
main.add_command(snapshot)
|
||||
main.add_command(wal)
|
||||
main.add_command(console)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
25
monitoring/docker-compose.yml
Normal file
25
monitoring/docker-compose.yml
Normal file
@@ -0,0 +1,25 @@
|
||||
version: "3"
|
||||
services:
|
||||
|
||||
prometheus:
|
||||
container_name: prometheus
|
||||
image: prom/prometheus:latest
|
||||
volumes:
|
||||
- ./prometheus.yaml:/etc/prometheus/prometheus.yml
|
||||
# ports:
|
||||
# - "9090:9090"
|
||||
# TODO: find a proper portable solution
|
||||
network_mode: "host"
|
||||
|
||||
grafana:
|
||||
image: grafana/grafana:latest
|
||||
volumes:
|
||||
- ./grafana.yaml:/etc/grafana/provisioning/datasources/datasources.yaml
|
||||
environment:
|
||||
- GF_AUTH_ANONYMOUS_ENABLED=true
|
||||
- GF_AUTH_ANONYMOUS_ORG_ROLE=Admin
|
||||
- GF_AUTH_DISABLE_LOGIN_FORM=true
|
||||
# ports:
|
||||
# - "3000:3000"
|
||||
# TODO: find a proper portable solution
|
||||
network_mode: "host"
|
||||
12
monitoring/grafana.yaml
Normal file
12
monitoring/grafana.yaml
Normal file
@@ -0,0 +1,12 @@
|
||||
apiVersion: 1
|
||||
|
||||
datasources:
|
||||
- name: Prometheus
|
||||
type: prometheus
|
||||
access: proxy
|
||||
orgId: 1
|
||||
url: http://localhost:9090
|
||||
basicAuth: false
|
||||
isDefault: false
|
||||
version: 1
|
||||
editable: false
|
||||
5
monitoring/prometheus.yaml
Normal file
5
monitoring/prometheus.yaml
Normal file
@@ -0,0 +1,5 @@
|
||||
scrape_configs:
|
||||
- job_name: 'default'
|
||||
scrape_interval: 10s
|
||||
static_configs:
|
||||
- targets: ['localhost:9898']
|
||||
@@ -7,38 +7,39 @@ edition = "2018"
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
bookfile = "^0.3"
|
||||
chrono = "0.4.19"
|
||||
rand = "0.8.3"
|
||||
regex = "1.4.5"
|
||||
bytes = "1.0.1"
|
||||
bytes = { version = "1.0.1", features = ['serde'] }
|
||||
byteorder = "1.4.3"
|
||||
futures = "0.3.13"
|
||||
hyper = "0.14"
|
||||
lazy_static = "1.4.0"
|
||||
slog-stdlog = "4.1.0"
|
||||
slog-async = "2.6.0"
|
||||
slog-scope = "4.4.0"
|
||||
slog-term = "2.8.0"
|
||||
slog = "2.7.0"
|
||||
log = "0.4.14"
|
||||
clap = "2.33.0"
|
||||
termion = "1.5.6"
|
||||
tui = "0.14.0"
|
||||
daemonize = "0.4.1"
|
||||
rust-s3 = { git = "https://github.com/hlinnaka/rust-s3", rev="7f15a24ec7daa0a5d9516da706212745f9042818", features = ["no-verify-ssl"] }
|
||||
tokio = { version = "1.3.0", features = ["full"] }
|
||||
tokio-stream = { version = "0.1.4" }
|
||||
tokio-postgres = { git = "https://github.com/zenithdb/rust-postgres.git", rev="a0d067b66447951d1276a53fb09886539c3fa094" }
|
||||
postgres-types = { git = "https://github.com/zenithdb/rust-postgres.git", rev="a0d067b66447951d1276a53fb09886539c3fa094" }
|
||||
postgres-protocol = { git = "https://github.com/zenithdb/rust-postgres.git", rev="a0d067b66447951d1276a53fb09886539c3fa094" }
|
||||
postgres = { git = "https://github.com/zenithdb/rust-postgres.git", rev="a0d067b66447951d1276a53fb09886539c3fa094" }
|
||||
rocksdb = "0.16.0"
|
||||
tokio = { version = "1.5.0", features = ["full"] }
|
||||
postgres-types = { git = "https://github.com/zenithdb/rust-postgres.git", rev="9eb0dbfbeb6a6c1b79099b9f7ae4a8c021877858" }
|
||||
postgres-protocol = { git = "https://github.com/zenithdb/rust-postgres.git", rev="9eb0dbfbeb6a6c1b79099b9f7ae4a8c021877858" }
|
||||
postgres = { git = "https://github.com/zenithdb/rust-postgres.git", rev="9eb0dbfbeb6a6c1b79099b9f7ae4a8c021877858" }
|
||||
routerify = "2"
|
||||
anyhow = "1.0"
|
||||
crc32c = "0.6.0"
|
||||
walkdir = "2"
|
||||
thiserror = "1.0"
|
||||
hex = "0.4.3"
|
||||
hex = { version = "0.4.3", features = ["serde"] }
|
||||
tar = "0.4.33"
|
||||
parse_duration = "*"
|
||||
humantime = "2.1.0"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1"
|
||||
toml = "0.5"
|
||||
scopeguard = "1.1.0"
|
||||
|
||||
postgres_ffi = { path = "../postgres_ffi" }
|
||||
zenith_metrics = { path = "../zenith_metrics" }
|
||||
zenith_utils = { path = "../zenith_utils" }
|
||||
workspace_hack = { path = "../workspace_hack" }
|
||||
|
||||
@@ -1,91 +1,4 @@
|
||||
Page Server
|
||||
===========
|
||||
|
||||
|
||||
How to test
|
||||
-----------
|
||||
|
||||
|
||||
1. Compile and install Postgres from this repository (there are
|
||||
modifications, so vanilla Postgres won't do)
|
||||
|
||||
./configure --prefix=/home/heikki/zenith-install
|
||||
|
||||
2. Compile the page server
|
||||
|
||||
cd pageserver
|
||||
cargo build
|
||||
|
||||
3. Create another "dummy" cluster that will be used by the page server when it applies
|
||||
the WAL records. (shouldn't really need this, getting rid of it is a TODO):
|
||||
|
||||
/home/heikki/zenith-install/bin/initdb -D /data/zenith-dummy
|
||||
|
||||
|
||||
4. Initialize and start a new postgres cluster
|
||||
|
||||
/home/heikki/zenith-install/bin/initdb -D /data/zenith-test-db --username=postgres
|
||||
/home/heikki/zenith-install/bin/postgres -D /data/zenith-test-db
|
||||
|
||||
5. In another terminal, start the page server.
|
||||
|
||||
PGDATA=/data/zenith-dummy PATH=/home/heikki/zenith-install/bin:$PATH ./target/debug/pageserver
|
||||
|
||||
It should connect to the postgres instance using streaming replication, and print something
|
||||
like this:
|
||||
|
||||
$ PGDATA=/data/zenith-dummy PATH=/home/heikki/zenith-install/bin:$PATH ./target/debug/pageserver
|
||||
Starting WAL receiver
|
||||
connecting...
|
||||
Starting page server on 127.0.0.1:5430
|
||||
connected!
|
||||
page cache is empty
|
||||
|
||||
6. You can now open another terminal and issue DDL commands. Generated WAL records will
|
||||
be streamed to the page servers, and attached to blocks that they apply to in its
|
||||
page cache
|
||||
|
||||
$ psql postgres -U postgres
|
||||
psql (14devel)
|
||||
Type "help" for help.
|
||||
|
||||
postgres=# create table mydata (i int4);
|
||||
CREATE TABLE
|
||||
postgres=# insert into mydata select g from generate_series(1,100) g;
|
||||
INSERT 0 100
|
||||
postgres=#
|
||||
|
||||
7. The GetPage@LSN interface to the compute nodes isn't working yet, but to simulate
|
||||
that, the page server generates a test GetPage@LSN call every 5 seconds on a random
|
||||
block that's in the page cache. In a few seconds, you should see output from that:
|
||||
|
||||
testing GetPage@LSN for block 0
|
||||
WAL record at LSN 23584576 initializes the page
|
||||
2021-03-19 11:03:13.791 EET [11439] LOG: applied WAL record at 0/167DF40
|
||||
2021-03-19 11:03:13.791 EET [11439] LOG: applied WAL record at 0/167DF80
|
||||
2021-03-19 11:03:13.791 EET [11439] LOG: applied WAL record at 0/167DFC0
|
||||
2021-03-19 11:03:13.791 EET [11439] LOG: applied WAL record at 0/167E018
|
||||
2021-03-19 11:03:13.791 EET [11439] LOG: applied WAL record at 0/167E058
|
||||
2021-03-19 11:03:13.791 EET [11439] LOG: applied WAL record at 0/167E098
|
||||
2021-03-19 11:03:13.791 EET [11439] LOG: applied WAL record at 0/167E0D8
|
||||
2021-03-19 11:03:13.792 EET [11439] LOG: applied WAL record at 0/167E118
|
||||
2021-03-19 11:03:13.792 EET [11439] LOG: applied WAL record at 0/167E158
|
||||
2021-03-19 11:03:13.792 EET [11439] LOG: applied WAL record at 0/167E198
|
||||
applied 10 WAL records to produce page image at LSN 18446744073709547246
|
||||
|
||||
|
||||
|
||||
Architecture
|
||||
============
|
||||
|
||||
The Page Server is responsible for all operations on a number of
|
||||
"chunks" of relation data. A chunk corresponds to a PostgreSQL
|
||||
relation segment (i.e. one max. 1 GB file in the data directory), but
|
||||
it holds all the different versions of every page in the segment that
|
||||
are still needed by the system.
|
||||
|
||||
Determining which chunk each Page Server holds is handled elsewhere. (TODO:
|
||||
currently, there is only one Page Server which holds all chunks)
|
||||
## Page server architecture
|
||||
|
||||
The Page Server has a few different duties:
|
||||
|
||||
@@ -95,10 +8,11 @@ The Page Server has a few different duties:
|
||||
- Backup to S3
|
||||
|
||||
|
||||
The Page Server consists of multiple threads that operate on a shared
|
||||
cache of page versions:
|
||||
|
||||
|
||||
The Page Server consists of multiple threads that operate on a shared
|
||||
repository of page versions:
|
||||
|
||||
| WAL
|
||||
V
|
||||
+--------------+
|
||||
@@ -110,16 +24,14 @@ cache of page versions:
|
||||
+---------+ .......... | |
|
||||
| | . . | |
|
||||
GetPage@LSN | | . backup . -------> | S3 |
|
||||
-------------> | Page | page cache . . | |
|
||||
-------------> | Page | repository . . | |
|
||||
| Service | .......... | |
|
||||
page | | +----+
|
||||
<------------- | |
|
||||
+---------+
|
||||
|
||||
...................................
|
||||
. .
|
||||
. Garbage Collection / Compaction .
|
||||
...................................
|
||||
+---------+ +--------------------+
|
||||
| Checkpointing / |
|
||||
| Garbage collection |
|
||||
+--------------------+
|
||||
|
||||
Legend:
|
||||
|
||||
@@ -139,7 +51,7 @@ Page Service
|
||||
------------
|
||||
|
||||
The Page Service listens for GetPage@LSN requests from the Compute Nodes,
|
||||
and responds with pages from the page cache.
|
||||
and responds with pages from the repository.
|
||||
|
||||
|
||||
WAL Receiver
|
||||
@@ -148,25 +60,59 @@ WAL Receiver
|
||||
The WAL receiver connects to the external WAL safekeeping service (or
|
||||
directly to the primary) using PostgreSQL physical streaming
|
||||
replication, and continuously receives WAL. It decodes the WAL records,
|
||||
and stores them to the page cache.
|
||||
and stores them to the repository.
|
||||
|
||||
|
||||
Page Cache
|
||||
Repository
|
||||
----------
|
||||
|
||||
The Page Cache is a data structure, to hold all the different page versions.
|
||||
It is accessed by all the other threads, to perform their duties.
|
||||
The repository stores all the page versions, or WAL records needed to
|
||||
reconstruct them. Each tenant has a separate Repository, which is
|
||||
stored in the .zenith/tenants/<tenantid> directory.
|
||||
|
||||
Currently, the page cache is implemented fully in-memory. TODO: Store it
|
||||
on disk. Define a file format.
|
||||
Repository is an abstract trait, defined in `repository.rs`. It is
|
||||
implemented by the LayeredRepository object in
|
||||
`layered_repository.rs`. There is only that one implementation of the
|
||||
Repository trait, but it's still a useful abstraction that keeps the
|
||||
interface for the low-level storage functionality clean. The layered
|
||||
storage format is described in layered_repository/README.md.
|
||||
|
||||
Each repository consists of multiple Timelines. Timeline is a
|
||||
workhorse that accepts page changes from the WAL, and serves
|
||||
get_page_at_lsn() and get_rel_size() requests. Note: this has nothing
|
||||
to do with PostgreSQL WAL timeline. The term "timeline" is mostly
|
||||
interchangeable with "branch", there is a one-to-one mapping from
|
||||
branch to timeline. A timeline has a unique ID within the tenant,
|
||||
represented as 16-byte hex string that never changes, whereas a
|
||||
branch is a user-given name for a timeline.
|
||||
|
||||
Each repository also has a WAL redo manager associated with it, see
|
||||
`walredo.rs`. The WAL redo manager is used to replay PostgreSQL WAL
|
||||
records, whenever we need to reconstruct a page version from WAL to
|
||||
satisfy a GetPage@LSN request, or to avoid accumulating too much WAL
|
||||
for a page. The WAL redo manager uses a Postgres process running in
|
||||
special zenith wal-redo mode to do the actual WAL redo, and
|
||||
communicates with the process using a pipe.
|
||||
|
||||
|
||||
TODO: Garbage Collection / Compaction
|
||||
-------------------------------------
|
||||
Checkpointing / Garbage Collection
|
||||
----------------------------------
|
||||
|
||||
Periodically, the Garbage Collection / Compaction thread runs
|
||||
and applies pending WAL records, and removes old page versions that
|
||||
are no longer needed.
|
||||
Periodically, the checkpointer thread wakes up and performs housekeeping
|
||||
duties on the repository. It has two duties:
|
||||
|
||||
### Checkpointing
|
||||
|
||||
Flush WAL that has accumulated in memory to disk, so that the old WAL
|
||||
can be truncated away in the WAL safekeepers. Also, to free up memory
|
||||
for receiving new WAL. This process is called "checkpointing". It's
|
||||
similar to checkpointing in PostgreSQL or other DBMSs, but in the page
|
||||
server, checkpointing happens on a per-segment basis.
|
||||
|
||||
### Garbage collection
|
||||
|
||||
Remove old on-disk layer files that are no longer needed according to the
|
||||
PITR retention policy
|
||||
|
||||
|
||||
TODO: Backup service
|
||||
@@ -177,3 +123,7 @@ The backup service is responsible for periodically pushing the chunks to S3.
|
||||
TODO: How/when do restore from S3? Whenever we get a GetPage@LSN request for
|
||||
a chunk we don't currently have? Or when an external Control Plane tells us?
|
||||
|
||||
TODO: Sharding
|
||||
--------------------
|
||||
|
||||
We should be able to run multiple Page Servers that handle sharded data.
|
||||
|
||||
@@ -1,62 +0,0 @@
|
||||
#!/bin/sh
|
||||
#
|
||||
# Set up a simple Compute Node + Page Server combination locally.
|
||||
#
|
||||
# NOTE: This doesn't clean up between invocations. You'll need to manually:
|
||||
#
|
||||
# - Kill any previous 'postgres' and 'pageserver' processes
|
||||
# - Clear the S3 bucket
|
||||
# - Remove the 'zenith-pgdata' directory
|
||||
|
||||
|
||||
set -e
|
||||
|
||||
# Set up some config.
|
||||
#
|
||||
# CHANGE THESE ACCORDING TO YOUR S3 INSTALLATION
|
||||
export S3_REGION=auto
|
||||
export S3_ENDPOINT=https://localhost:9000
|
||||
export S3_ACCESSKEY=minioadmin
|
||||
export S3_SECRET=pikkunen
|
||||
export S3_BUCKET=zenith-testbucket
|
||||
|
||||
|
||||
COMPUTE_NODE_PGDATA=zenith-pgdata
|
||||
|
||||
|
||||
# 1. Initialize a cluster.
|
||||
initdb -D $COMPUTE_NODE_PGDATA -U zenith
|
||||
|
||||
echo "port=65432" >> $COMPUTE_NODE_PGDATA/postgresql.conf
|
||||
echo "log_connections=on" >> $COMPUTE_NODE_PGDATA/postgresql.conf
|
||||
|
||||
# Use a small shared_buffers, so that we hit the Page Server more
|
||||
# easily.
|
||||
echo "shared_buffers = 1MB" >> $COMPUTE_NODE_PGDATA/postgresql.conf
|
||||
|
||||
# TODO: page server should use a replication slot, or some other mechanism
|
||||
# to make sure that the primary doesn't lose data that the page server still
|
||||
# needs. (The WAL safekeepers should ensure that)
|
||||
echo "wal_keep_size=10GB" >> $COMPUTE_NODE_PGDATA/postgresql.conf
|
||||
|
||||
# Tell the Postgres server how to connect to the Page Server
|
||||
echo "page_server_connstring='host=localhost port=5430'" >> $COMPUTE_NODE_PGDATA/postgresql.conf
|
||||
|
||||
|
||||
# 2. Run zenith_push to push a base backup fo the database to an S3 bucket. The
|
||||
# Page Server will read it from there
|
||||
zenith_push -D $COMPUTE_NODE_PGDATA
|
||||
|
||||
|
||||
# 3. Launch page server
|
||||
rm -rf /tmp/pgdata-dummy
|
||||
initdb -N -D /tmp/pgdata-dummy
|
||||
PGDATA=/tmp/pgdata-dummy ./target/debug/pageserver &
|
||||
|
||||
# 4. Start up the Postgres server
|
||||
postgres -D $COMPUTE_NODE_PGDATA &
|
||||
|
||||
|
||||
echo "ALL SET! You can now connect to Postgres with something like:"
|
||||
echo ""
|
||||
echo 'psql "dbname=postgres host=localhost user=zenith port=65432"'
|
||||
@@ -1,158 +1,333 @@
|
||||
use crate::ZTimelineId;
|
||||
//!
|
||||
//! Generate a tarball with files needed to bootstrap ComputeNode.
|
||||
//!
|
||||
//! TODO: this module has nothing to do with PostgreSQL pg_basebackup.
|
||||
//! It could use a better name.
|
||||
//!
|
||||
//! Stateless Postgres compute node is launched by sending a tarball
|
||||
//! which contains non-relational data (multixacts, clog, filenodemaps, twophase files),
|
||||
//! generated pg_control and dummy segment of WAL.
|
||||
//! This module is responsible for creation of such tarball
|
||||
//! from data stored in object storage.
|
||||
//!
|
||||
use bytes::{BufMut, BytesMut};
|
||||
use log::*;
|
||||
use postgres_ffi::FilePathError;
|
||||
use regex::Regex;
|
||||
use std::io;
|
||||
use std::io::Write;
|
||||
use tar::Builder;
|
||||
use walkdir::WalkDir;
|
||||
use std::sync::Arc;
|
||||
use std::time::SystemTime;
|
||||
use tar::{Builder, EntryType, Header};
|
||||
|
||||
pub fn send_snapshot_tarball(
|
||||
write: &mut dyn Write,
|
||||
timelineid: ZTimelineId,
|
||||
snapshotlsn: u64,
|
||||
) -> Result<(), std::io::Error> {
|
||||
let mut ar = Builder::new(write);
|
||||
use crate::relish::*;
|
||||
use crate::repository::Timeline;
|
||||
use postgres_ffi::xlog_utils::*;
|
||||
use postgres_ffi::*;
|
||||
use zenith_utils::lsn::{Lsn, RecordLsn};
|
||||
|
||||
let snappath = format!("timelines/{}/snapshots/{:016X}", timelineid, snapshotlsn);
|
||||
let walpath = format!("timelines/{}/wal", timelineid);
|
||||
/// This is short-living object only for the time of tarball creation,
|
||||
/// created mostly to avoid passing a lot of parameters between various functions
|
||||
/// used for constructing tarball.
|
||||
pub struct Basebackup<'a> {
|
||||
ar: Builder<&'a mut dyn Write>,
|
||||
timeline: &'a Arc<dyn Timeline>,
|
||||
lsn: Lsn,
|
||||
prev_record_lsn: Lsn,
|
||||
}
|
||||
|
||||
debug!("sending tarball of snapshot in {}", snappath);
|
||||
//ar.append_dir_all("", &snappath)?;
|
||||
// Create basebackup with non-rel data in it. Omit relational data.
|
||||
//
|
||||
// Currently we use empty lsn in two cases:
|
||||
// * During the basebackup right after timeline creation
|
||||
// * When working without safekeepers. In this situation it is important to match the lsn
|
||||
// we are taking basebackup on with the lsn that is used in pageserver's walreceiver
|
||||
// to start the replication.
|
||||
impl<'a> Basebackup<'a> {
|
||||
pub fn new(
|
||||
write: &'a mut dyn Write,
|
||||
timeline: &'a Arc<dyn Timeline>,
|
||||
req_lsn: Option<Lsn>,
|
||||
) -> Basebackup<'a> {
|
||||
// current_prev may be zero if we are at the start of timeline branched from old lsn
|
||||
let RecordLsn {
|
||||
last: current_last,
|
||||
prev: current_prev,
|
||||
} = timeline.get_last_record_rlsn();
|
||||
|
||||
for entry in WalkDir::new(&snappath) {
|
||||
let entry = entry?;
|
||||
let fullpath = entry.path();
|
||||
let relpath = entry.path().strip_prefix(&snappath).unwrap();
|
||||
|
||||
if relpath.to_str().unwrap() == "" {
|
||||
continue;
|
||||
}
|
||||
|
||||
if entry.file_type().is_dir() {
|
||||
trace!(
|
||||
"sending dir {} as {}",
|
||||
fullpath.display(),
|
||||
relpath.display()
|
||||
);
|
||||
ar.append_dir(relpath, fullpath)?;
|
||||
} else if entry.file_type().is_symlink() {
|
||||
error!("ignoring symlink in snapshot dir");
|
||||
} else if entry.file_type().is_file() {
|
||||
// Shared catalogs are exempt
|
||||
if relpath.starts_with("global/") {
|
||||
trace!("sending shared catalog {}", relpath.display());
|
||||
ar.append_path_with_name(fullpath, relpath)?;
|
||||
} else if !is_rel_file_path(relpath.to_str().unwrap()) {
|
||||
trace!("sending {}", relpath.display());
|
||||
ar.append_path_with_name(fullpath, relpath)?;
|
||||
// Compute postgres doesn't have any previous WAL files, but the first record that this
|
||||
// postgres is going to write need to have LSN of previous record (xl_prev). So we are
|
||||
// writing prev_lsn to "zenith.signal" file so that postgres can read it during the start.
|
||||
// In some cases we don't know prev_lsn (branch or basebackup @old_lsn) so pass Lsn(0)
|
||||
// instead and embrace the wrong xl_prev in this situations.
|
||||
let (backup_prev, backup_lsn) = if let Some(req_lsn) = req_lsn {
|
||||
if req_lsn > current_last {
|
||||
// FIXME: now wait_lsn() is inside of list_nonrels() so we don't have a way
|
||||
// to get it from there. It is better to wait just here.
|
||||
(Lsn(0), req_lsn)
|
||||
} else if req_lsn < current_last {
|
||||
// we don't know prev already. We don't currently use basebackup@old_lsn
|
||||
// but may use it for read only replicas in future
|
||||
(Lsn(0), req_lsn)
|
||||
} else {
|
||||
trace!("not sending {}", relpath.display());
|
||||
// FIXME: send all files for now
|
||||
ar.append_path_with_name(fullpath, relpath)?;
|
||||
// we are exactly at req_lsn and know prev
|
||||
(current_prev, req_lsn)
|
||||
}
|
||||
} else {
|
||||
error!("unknown file type: {}", fullpath.display());
|
||||
}
|
||||
}
|
||||
|
||||
// FIXME: also send all the WAL
|
||||
for entry in std::fs::read_dir(&walpath)? {
|
||||
let entry = entry?;
|
||||
let fullpath = &entry.path();
|
||||
let relpath = fullpath.strip_prefix(&walpath).unwrap();
|
||||
|
||||
if !entry.path().is_file() {
|
||||
continue;
|
||||
}
|
||||
|
||||
let archive_fname = relpath.to_str().unwrap();
|
||||
let archive_fname = archive_fname
|
||||
.strip_suffix(".partial")
|
||||
.unwrap_or(&archive_fname);
|
||||
let archive_path = "pg_wal/".to_owned() + archive_fname;
|
||||
ar.append_path_with_name(fullpath, archive_path)?;
|
||||
}
|
||||
|
||||
ar.finish()?;
|
||||
debug!("all tarred up!");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// formats:
|
||||
// <oid>
|
||||
// <oid>_<fork name>
|
||||
// <oid>.<segment number>
|
||||
// <oid>_<fork name>.<segment number>
|
||||
|
||||
fn parse_filename(fname: &str) -> Result<(u32, u8, u32), FilePathError> {
|
||||
let re = Regex::new(r"^(?P<relnode>\d+)(_(?P<forkname>[a-z]+))?(\.(?P<segno>\d+))?$").unwrap();
|
||||
|
||||
let caps = re
|
||||
.captures(fname)
|
||||
.ok_or_else(|| FilePathError::new("invalid relation data file name"))?;
|
||||
|
||||
let relnode_str = caps.name("relnode").unwrap().as_str();
|
||||
let relnode = u32::from_str_radix(relnode_str, 10)?;
|
||||
|
||||
let forkname = caps.name("forkname").map(|f| f.as_str());
|
||||
let forknum = postgres_ffi::forkname_to_forknum(forkname)?;
|
||||
|
||||
let segno_match = caps.name("segno");
|
||||
let segno = if segno_match.is_none() {
|
||||
0
|
||||
} else {
|
||||
u32::from_str_radix(segno_match.unwrap().as_str(), 10)?
|
||||
};
|
||||
|
||||
Ok((relnode, forknum, segno))
|
||||
}
|
||||
|
||||
fn parse_rel_file_path(path: &str) -> Result<(), FilePathError> {
|
||||
/*
|
||||
* Relation data files can be in one of the following directories:
|
||||
*
|
||||
* global/
|
||||
* shared relations
|
||||
*
|
||||
* base/<db oid>/
|
||||
* regular relations, default tablespace
|
||||
*
|
||||
* pg_tblspc/<tblspc oid>/<tblspc version>/
|
||||
* within a non-default tablespace (the name of the directory
|
||||
* depends on version)
|
||||
*
|
||||
* And the relation data files themselves have a filename like:
|
||||
*
|
||||
* <oid>.<segment number>
|
||||
*/
|
||||
if let Some(fname) = path.strip_prefix("global/") {
|
||||
let (_relnode, _forknum, _segno) = parse_filename(fname)?;
|
||||
|
||||
Ok(())
|
||||
} else if let Some(dbpath) = path.strip_prefix("base/") {
|
||||
let mut s = dbpath.split('/');
|
||||
let dbnode_str = s
|
||||
.next()
|
||||
.ok_or_else(|| FilePathError::new("invalid relation data file name"))?;
|
||||
let _dbnode = u32::from_str_radix(dbnode_str, 10)?;
|
||||
let fname = s
|
||||
.next()
|
||||
.ok_or_else(|| FilePathError::new("invalid relation data file name"))?;
|
||||
if s.next().is_some() {
|
||||
return Err(FilePathError::new("invalid relation data file name"));
|
||||
// None in req_lsn means that we are branching from the latest LSN
|
||||
(current_prev, current_last)
|
||||
};
|
||||
|
||||
let (_relnode, _forknum, _segno) = parse_filename(fname)?;
|
||||
info!(
|
||||
"taking basebackup lsn={}, prev_lsn={}",
|
||||
backup_prev, backup_lsn
|
||||
);
|
||||
|
||||
Basebackup {
|
||||
ar: Builder::new(write),
|
||||
timeline,
|
||||
lsn: backup_lsn,
|
||||
prev_record_lsn: backup_prev,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn send_tarball(&mut self) -> anyhow::Result<()> {
|
||||
// Create pgdata subdirs structure
|
||||
for dir in pg_constants::PGDATA_SUBDIRS.iter() {
|
||||
info!("send subdir {:?}", *dir);
|
||||
let header = new_tar_header_dir(*dir)?;
|
||||
self.ar.append(&header, &mut io::empty())?;
|
||||
}
|
||||
|
||||
// Send empty config files.
|
||||
for filepath in pg_constants::PGDATA_SPECIAL_FILES.iter() {
|
||||
if *filepath == "pg_hba.conf" {
|
||||
let data = pg_constants::PG_HBA.as_bytes();
|
||||
let header = new_tar_header(filepath, data.len() as u64)?;
|
||||
self.ar.append(&header, data)?;
|
||||
} else {
|
||||
let header = new_tar_header(filepath, 0)?;
|
||||
self.ar.append(&header, &mut io::empty())?;
|
||||
}
|
||||
}
|
||||
|
||||
// Gather non-relational files from object storage pages.
|
||||
for obj in self.timeline.list_nonrels(self.lsn)? {
|
||||
match obj {
|
||||
RelishTag::Slru { slru, segno } => {
|
||||
self.add_slru_segment(slru, segno)?;
|
||||
}
|
||||
RelishTag::FileNodeMap { spcnode, dbnode } => {
|
||||
self.add_relmap_file(spcnode, dbnode)?;
|
||||
}
|
||||
RelishTag::TwoPhase { xid } => {
|
||||
self.add_twophase_file(xid)?;
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
|
||||
// Generate pg_control and bootstrap WAL segment.
|
||||
self.add_pgcontrol_file()?;
|
||||
self.ar.finish()?;
|
||||
debug!("all tarred up!");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
//
|
||||
// Generate SLRU segment files from repository.
|
||||
//
|
||||
fn add_slru_segment(&mut self, slru: SlruKind, segno: u32) -> anyhow::Result<()> {
|
||||
let seg_size = self
|
||||
.timeline
|
||||
.get_relish_size(RelishTag::Slru { slru, segno }, self.lsn)?;
|
||||
|
||||
if seg_size == None {
|
||||
trace!(
|
||||
"SLRU segment {}/{:>04X} was truncated",
|
||||
slru.to_str(),
|
||||
segno
|
||||
);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let nblocks = seg_size.unwrap();
|
||||
|
||||
let mut slru_buf: Vec<u8> =
|
||||
Vec::with_capacity(nblocks as usize * pg_constants::BLCKSZ as usize);
|
||||
for blknum in 0..nblocks {
|
||||
let img = self.timeline.get_page_at_lsn_nowait(
|
||||
RelishTag::Slru { slru, segno },
|
||||
blknum,
|
||||
self.lsn,
|
||||
)?;
|
||||
assert!(img.len() == pg_constants::BLCKSZ as usize);
|
||||
|
||||
slru_buf.extend_from_slice(&img);
|
||||
}
|
||||
|
||||
let segname = format!("{}/{:>04X}", slru.to_str(), segno);
|
||||
let header = new_tar_header(&segname, slru_buf.len() as u64)?;
|
||||
self.ar.append(&header, slru_buf.as_slice())?;
|
||||
|
||||
trace!("Added to basebackup slru {} relsize {}", segname, nblocks);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
//
|
||||
// Extract pg_filenode.map files from repository
|
||||
// Along with them also send PG_VERSION for each database.
|
||||
//
|
||||
fn add_relmap_file(&mut self, spcnode: u32, dbnode: u32) -> anyhow::Result<()> {
|
||||
let img = self.timeline.get_page_at_lsn_nowait(
|
||||
RelishTag::FileNodeMap { spcnode, dbnode },
|
||||
0,
|
||||
self.lsn,
|
||||
)?;
|
||||
let path = if spcnode == pg_constants::GLOBALTABLESPACE_OID {
|
||||
let version_bytes = pg_constants::PG_MAJORVERSION.as_bytes();
|
||||
let header = new_tar_header("PG_VERSION", version_bytes.len() as u64)?;
|
||||
self.ar.append(&header, version_bytes)?;
|
||||
|
||||
let header = new_tar_header("global/PG_VERSION", version_bytes.len() as u64)?;
|
||||
self.ar.append(&header, version_bytes)?;
|
||||
|
||||
String::from("global/pg_filenode.map") // filenode map for global tablespace
|
||||
} else {
|
||||
// User defined tablespaces are not supported
|
||||
assert!(spcnode == pg_constants::DEFAULTTABLESPACE_OID);
|
||||
|
||||
// Append dir path for each database
|
||||
let path = format!("base/{}", dbnode);
|
||||
let header = new_tar_header_dir(&path)?;
|
||||
self.ar.append(&header, &mut io::empty())?;
|
||||
|
||||
let dst_path = format!("base/{}/PG_VERSION", dbnode);
|
||||
let version_bytes = pg_constants::PG_MAJORVERSION.as_bytes();
|
||||
let header = new_tar_header(&dst_path, version_bytes.len() as u64)?;
|
||||
self.ar.append(&header, version_bytes)?;
|
||||
|
||||
format!("base/{}/pg_filenode.map", dbnode)
|
||||
};
|
||||
assert!(img.len() == 512);
|
||||
let header = new_tar_header(&path, img.len() as u64)?;
|
||||
self.ar.append(&header, &img[..])?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
//
|
||||
// Extract twophase state files
|
||||
//
|
||||
fn add_twophase_file(&mut self, xid: TransactionId) -> anyhow::Result<()> {
|
||||
let img = self
|
||||
.timeline
|
||||
.get_page_at_lsn_nowait(RelishTag::TwoPhase { xid }, 0, self.lsn)?;
|
||||
|
||||
let mut buf = BytesMut::new();
|
||||
buf.extend_from_slice(&img[..]);
|
||||
let crc = crc32c::crc32c(&img[..]);
|
||||
buf.put_u32_le(crc);
|
||||
let path = format!("pg_twophase/{:>08X}", xid);
|
||||
let header = new_tar_header(&path, buf.len() as u64)?;
|
||||
self.ar.append(&header, &buf[..])?;
|
||||
|
||||
Ok(())
|
||||
} else if let Some(_) = path.strip_prefix("pg_tblspc/") {
|
||||
// TODO
|
||||
Err(FilePathError::new("tablespaces not supported"))
|
||||
} else {
|
||||
Err(FilePathError::new("invalid relation data file name"))
|
||||
}
|
||||
|
||||
//
|
||||
// Add generated pg_control file and bootstrap WAL segment.
|
||||
// Also send zenith.signal file with extra bootstrap data.
|
||||
//
|
||||
fn add_pgcontrol_file(&mut self) -> anyhow::Result<()> {
|
||||
let checkpoint_bytes =
|
||||
self.timeline
|
||||
.get_page_at_lsn_nowait(RelishTag::Checkpoint, 0, self.lsn)?;
|
||||
let pg_control_bytes =
|
||||
self.timeline
|
||||
.get_page_at_lsn_nowait(RelishTag::ControlFile, 0, self.lsn)?;
|
||||
let mut pg_control = ControlFileData::decode(&pg_control_bytes)?;
|
||||
let mut checkpoint = CheckPoint::decode(&checkpoint_bytes)?;
|
||||
|
||||
// Generate new pg_control and WAL needed for bootstrap
|
||||
let checkpoint_segno = self.lsn.segment_number(pg_constants::WAL_SEGMENT_SIZE);
|
||||
let checkpoint_lsn = XLogSegNoOffsetToRecPtr(
|
||||
checkpoint_segno,
|
||||
XLOG_SIZE_OF_XLOG_LONG_PHD as u32,
|
||||
pg_constants::WAL_SEGMENT_SIZE,
|
||||
);
|
||||
checkpoint.redo = normalize_lsn(self.lsn, pg_constants::WAL_SEGMENT_SIZE).0;
|
||||
|
||||
//reset some fields we don't want to preserve
|
||||
//TODO Check this.
|
||||
//We may need to determine the value from twophase data.
|
||||
checkpoint.oldestActiveXid = 0;
|
||||
|
||||
//save new values in pg_control
|
||||
pg_control.checkPoint = checkpoint_lsn;
|
||||
pg_control.checkPointCopy = checkpoint;
|
||||
pg_control.state = pg_constants::DB_SHUTDOWNED;
|
||||
|
||||
// add zenith.signal file
|
||||
let xl_prev = if self.prev_record_lsn == Lsn(0) {
|
||||
0xBAD0 // magic value to indicate that we don't know prev_lsn
|
||||
} else {
|
||||
self.prev_record_lsn.0
|
||||
};
|
||||
self.ar.append(
|
||||
&new_tar_header("zenith.signal", 8)?,
|
||||
&xl_prev.to_le_bytes()[..],
|
||||
)?;
|
||||
|
||||
//send pg_control
|
||||
let pg_control_bytes = pg_control.encode();
|
||||
let header = new_tar_header("global/pg_control", pg_control_bytes.len() as u64)?;
|
||||
self.ar.append(&header, &pg_control_bytes[..])?;
|
||||
|
||||
//send wal segment
|
||||
let wal_file_name = XLogFileName(
|
||||
1, // FIXME: always use Postgres timeline 1
|
||||
checkpoint_segno,
|
||||
pg_constants::WAL_SEGMENT_SIZE,
|
||||
);
|
||||
let wal_file_path = format!("pg_wal/{}", wal_file_name);
|
||||
let header = new_tar_header(&wal_file_path, pg_constants::WAL_SEGMENT_SIZE as u64)?;
|
||||
let wal_seg = generate_wal_segment(&pg_control);
|
||||
assert!(wal_seg.len() == pg_constants::WAL_SEGMENT_SIZE);
|
||||
self.ar.append(&header, &wal_seg[..])?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
fn is_rel_file_path(path: &str) -> bool {
|
||||
parse_rel_file_path(path).is_ok()
|
||||
//
|
||||
// Create new tarball entry header
|
||||
//
|
||||
fn new_tar_header(path: &str, size: u64) -> anyhow::Result<Header> {
|
||||
let mut header = Header::new_gnu();
|
||||
header.set_size(size);
|
||||
header.set_path(path)?;
|
||||
header.set_mode(0b110000000); // -rw-------
|
||||
header.set_mtime(
|
||||
// use currenttime as last modified time
|
||||
SystemTime::now()
|
||||
.duration_since(SystemTime::UNIX_EPOCH)
|
||||
.unwrap()
|
||||
.as_secs(),
|
||||
);
|
||||
header.set_cksum();
|
||||
Ok(header)
|
||||
}
|
||||
|
||||
fn new_tar_header_dir(path: &str) -> anyhow::Result<Header> {
|
||||
let mut header = Header::new_gnu();
|
||||
header.set_size(0);
|
||||
header.set_path(path)?;
|
||||
header.set_mode(0o755); // -rw-------
|
||||
header.set_entry_type(EntryType::dir());
|
||||
header.set_mtime(
|
||||
// use currenttime as last modified time
|
||||
SystemTime::now()
|
||||
.duration_since(SystemTime::UNIX_EPOCH)
|
||||
.unwrap()
|
||||
.as_secs(),
|
||||
);
|
||||
header.set_cksum();
|
||||
Ok(header)
|
||||
}
|
||||
|
||||
25
pageserver/src/bin/dump_layerfile.rs
Normal file
25
pageserver/src/bin/dump_layerfile.rs
Normal file
@@ -0,0 +1,25 @@
|
||||
//! Main entry point for the dump_layerfile executable
|
||||
//!
|
||||
//! A handy tool for debugging, that's all.
|
||||
use anyhow::Result;
|
||||
use clap::{App, Arg};
|
||||
use pageserver::layered_repository::dump_layerfile_from_path;
|
||||
use std::path::PathBuf;
|
||||
|
||||
fn main() -> Result<()> {
|
||||
let arg_matches = App::new("Zenith dump_layerfile utility")
|
||||
.about("Dump contents of one layer file, for debugging")
|
||||
.arg(
|
||||
Arg::with_name("path")
|
||||
.help("Path to file to dump")
|
||||
.required(true)
|
||||
.index(1),
|
||||
)
|
||||
.get_matches();
|
||||
|
||||
let path = PathBuf::from(arg_matches.value_of("path").unwrap());
|
||||
|
||||
dump_layerfile_from_path(&path)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -3,25 +3,154 @@
|
||||
//
|
||||
|
||||
use log::*;
|
||||
use parse_duration::parse;
|
||||
use std::fs::{self, OpenOptions};
|
||||
use std::io;
|
||||
use std::process::exit;
|
||||
use std::thread;
|
||||
use std::time::Duration;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::{
|
||||
env,
|
||||
net::TcpListener,
|
||||
path::{Path, PathBuf},
|
||||
process::exit,
|
||||
str::FromStr,
|
||||
thread,
|
||||
time::Duration,
|
||||
};
|
||||
use zenith_utils::{auth::JwtAuth, postgres_backend::AuthType};
|
||||
|
||||
use anyhow::{Context, Result};
|
||||
use clap::{App, Arg};
|
||||
use anyhow::{ensure, Result};
|
||||
use clap::{App, Arg, ArgMatches};
|
||||
use daemonize::Daemonize;
|
||||
|
||||
use slog::Drain;
|
||||
use pageserver::{branches, http, logger, page_service, tenant_mgr, PageServerConf};
|
||||
use zenith_utils::http::endpoint;
|
||||
|
||||
use pageserver::{page_service, tui, zenith_repo_dir, PageServerConf};
|
||||
const DEFAULT_LISTEN_ADDR: &str = "127.0.0.1:64000";
|
||||
const DEFAULT_HTTP_ENDPOINT_ADDR: &str = "127.0.0.1:9898";
|
||||
|
||||
const DEFAULT_GC_HORIZON: u64 = 64 * 1024 * 1024;
|
||||
const DEFAULT_GC_PERIOD_SEC: u64 = 10;
|
||||
//const DEFAULT_GC_HORIZON: u64 = 1024 * 1024 * 1024;
|
||||
//const DEFAULT_GC_PERIOD_SEC: u64 = 600;
|
||||
const DEFAULT_GC_PERIOD: Duration = Duration::from_secs(10);
|
||||
|
||||
const DEFAULT_SUPERUSER: &str = "zenith_admin";
|
||||
|
||||
/// String arguments that can be declared via CLI or config file
|
||||
#[derive(Serialize, Deserialize)]
|
||||
struct CfgFileParams {
|
||||
listen_addr: Option<String>,
|
||||
http_endpoint_addr: Option<String>,
|
||||
gc_horizon: Option<String>,
|
||||
gc_period: Option<String>,
|
||||
pg_distrib_dir: Option<String>,
|
||||
auth_validation_public_key_path: Option<String>,
|
||||
auth_type: Option<String>,
|
||||
}
|
||||
|
||||
impl CfgFileParams {
|
||||
/// Extract string arguments from CLI
|
||||
fn from_args(arg_matches: &ArgMatches) -> Self {
|
||||
let get_arg = |arg_name: &str| -> Option<String> {
|
||||
arg_matches.value_of(arg_name).map(str::to_owned)
|
||||
};
|
||||
|
||||
Self {
|
||||
listen_addr: get_arg("listen"),
|
||||
http_endpoint_addr: get_arg("http_endpoint"),
|
||||
gc_horizon: get_arg("gc_horizon"),
|
||||
gc_period: get_arg("gc_period"),
|
||||
pg_distrib_dir: get_arg("postgres-distrib"),
|
||||
auth_validation_public_key_path: get_arg("auth-validation-public-key-path"),
|
||||
auth_type: get_arg("auth-type"),
|
||||
}
|
||||
}
|
||||
|
||||
/// Fill missing values in `self` with `other`
|
||||
fn or(self, other: CfgFileParams) -> Self {
|
||||
// TODO cleaner way to do this
|
||||
Self {
|
||||
listen_addr: self.listen_addr.or(other.listen_addr),
|
||||
http_endpoint_addr: self.http_endpoint_addr.or(other.http_endpoint_addr),
|
||||
gc_horizon: self.gc_horizon.or(other.gc_horizon),
|
||||
gc_period: self.gc_period.or(other.gc_period),
|
||||
pg_distrib_dir: self.pg_distrib_dir.or(other.pg_distrib_dir),
|
||||
auth_validation_public_key_path: self
|
||||
.auth_validation_public_key_path
|
||||
.or(other.auth_validation_public_key_path),
|
||||
auth_type: self.auth_type.or(other.auth_type),
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a PageServerConf from these string parameters
|
||||
fn try_into_config(&self) -> Result<PageServerConf> {
|
||||
let workdir = PathBuf::from(".");
|
||||
|
||||
let listen_addr = match self.listen_addr.as_ref() {
|
||||
Some(addr) => addr.clone(),
|
||||
None => DEFAULT_LISTEN_ADDR.to_owned(),
|
||||
};
|
||||
|
||||
let http_endpoint_addr = match self.http_endpoint_addr.as_ref() {
|
||||
Some(addr) => addr.clone(),
|
||||
None => DEFAULT_HTTP_ENDPOINT_ADDR.to_owned(),
|
||||
};
|
||||
|
||||
let gc_horizon: u64 = match self.gc_horizon.as_ref() {
|
||||
Some(horizon_str) => horizon_str.parse()?,
|
||||
None => DEFAULT_GC_HORIZON,
|
||||
};
|
||||
let gc_period = match self.gc_period.as_ref() {
|
||||
Some(period_str) => humantime::parse_duration(period_str)?,
|
||||
None => DEFAULT_GC_PERIOD,
|
||||
};
|
||||
|
||||
let pg_distrib_dir = match self.pg_distrib_dir.as_ref() {
|
||||
Some(pg_distrib_dir_str) => PathBuf::from(pg_distrib_dir_str),
|
||||
None => env::current_dir()?.join("tmp_install"),
|
||||
};
|
||||
|
||||
let auth_validation_public_key_path = self
|
||||
.auth_validation_public_key_path
|
||||
.as_ref()
|
||||
.map(PathBuf::from);
|
||||
|
||||
let auth_type = self
|
||||
.auth_type
|
||||
.as_ref()
|
||||
.map_or(Ok(AuthType::Trust), |auth_type| {
|
||||
AuthType::from_str(auth_type)
|
||||
})?;
|
||||
|
||||
if !pg_distrib_dir.join("bin/postgres").exists() {
|
||||
anyhow::bail!("Can't find postgres binary at {:?}", pg_distrib_dir);
|
||||
}
|
||||
|
||||
if auth_type == AuthType::ZenithJWT {
|
||||
ensure!(
|
||||
auth_validation_public_key_path.is_some(),
|
||||
"Missing auth_validation_public_key_path when auth_type is ZenithJWT"
|
||||
);
|
||||
let path_ref = auth_validation_public_key_path.as_ref().unwrap();
|
||||
ensure!(
|
||||
path_ref.exists(),
|
||||
format!("Can't find auth_validation_public_key at {:?}", path_ref)
|
||||
);
|
||||
}
|
||||
|
||||
Ok(PageServerConf {
|
||||
daemonize: false,
|
||||
|
||||
listen_addr,
|
||||
http_endpoint_addr,
|
||||
gc_horizon,
|
||||
gc_period,
|
||||
|
||||
superuser: String::from(DEFAULT_SUPERUSER),
|
||||
|
||||
workdir,
|
||||
|
||||
pg_distrib_dir,
|
||||
|
||||
auth_validation_public_key_path,
|
||||
auth_type,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
fn main() -> Result<()> {
|
||||
let arg_matches = App::new("Zenith page server")
|
||||
@@ -33,13 +162,6 @@ fn main() -> Result<()> {
|
||||
.takes_value(true)
|
||||
.help("listen for incoming page requests on ip:port (default: 127.0.0.1:5430)"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("interactive")
|
||||
.short("i")
|
||||
.long("interactive")
|
||||
.takes_value(false)
|
||||
.help("Interactive mode"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("daemonize")
|
||||
.short("d")
|
||||
@@ -47,6 +169,12 @@ fn main() -> Result<()> {
|
||||
.takes_value(false)
|
||||
.help("Run in the background"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("init")
|
||||
.long("init")
|
||||
.takes_value(false)
|
||||
.help("Initialize pageserver repo"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("gc_horizon")
|
||||
.long("gc_horizon")
|
||||
@@ -59,90 +187,111 @@ fn main() -> Result<()> {
|
||||
.takes_value(true)
|
||||
.help("Interval between garbage collector iterations"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("workdir")
|
||||
.short("D")
|
||||
.long("workdir")
|
||||
.takes_value(true)
|
||||
.help("Working directory for the pageserver"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("postgres-distrib")
|
||||
.long("postgres-distrib")
|
||||
.takes_value(true)
|
||||
.help("Postgres distribution directory"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("create-tenant")
|
||||
.long("create-tenant")
|
||||
.takes_value(true)
|
||||
.help("Create tenant during init")
|
||||
.requires("init"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("auth-validation-public-key-path")
|
||||
.long("auth-validation-public-key-path")
|
||||
.takes_value(true)
|
||||
.help("Path to public key used to validate jwt signature"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("auth-type")
|
||||
.long("auth-type")
|
||||
.takes_value(true)
|
||||
.help("Authentication scheme type. One of: Trust, MD5, ZenithJWT"),
|
||||
)
|
||||
.get_matches();
|
||||
|
||||
let mut conf = PageServerConf {
|
||||
daemonize: false,
|
||||
interactive: false,
|
||||
gc_horizon: DEFAULT_GC_HORIZON,
|
||||
gc_period: Duration::from_secs(DEFAULT_GC_PERIOD_SEC),
|
||||
listen_addr: "127.0.0.1:5430".parse().unwrap(),
|
||||
let workdir = Path::new(arg_matches.value_of("workdir").unwrap_or(".zenith"));
|
||||
let cfg_file_path = workdir.canonicalize()?.join("pageserver.toml");
|
||||
|
||||
let args_params = CfgFileParams::from_args(&arg_matches);
|
||||
|
||||
let init = arg_matches.is_present("init");
|
||||
let create_tenant = arg_matches.value_of("create-tenant");
|
||||
|
||||
let params = if init {
|
||||
// We're initializing the repo, so there's no config file yet
|
||||
args_params
|
||||
} else {
|
||||
// Supplement the CLI arguments with the config file
|
||||
let cfg_file_contents = std::fs::read_to_string(&cfg_file_path)?;
|
||||
let file_params: CfgFileParams = toml::from_str(&cfg_file_contents)?;
|
||||
args_params.or(file_params)
|
||||
};
|
||||
|
||||
if arg_matches.is_present("daemonize") {
|
||||
conf.daemonize = true;
|
||||
}
|
||||
// Set CWD to workdir for non-daemon modes
|
||||
env::set_current_dir(&workdir)?;
|
||||
|
||||
if arg_matches.is_present("interactive") {
|
||||
conf.interactive = true;
|
||||
}
|
||||
// Ensure the config is valid, even if just init-ing
|
||||
let mut conf = params.try_into_config()?;
|
||||
|
||||
if conf.daemonize && conf.interactive {
|
||||
eprintln!("--daemonize is not allowed with --interactive: choose one");
|
||||
conf.daemonize = arg_matches.is_present("daemonize");
|
||||
|
||||
if init && conf.daemonize {
|
||||
eprintln!("--daemonize cannot be used with --init");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if let Some(addr) = arg_matches.value_of("listen") {
|
||||
conf.listen_addr = addr.parse()?;
|
||||
// The configuration is all set up now. Turn it into a 'static
|
||||
// that can be freely stored in structs and passed across threads
|
||||
// as a ref.
|
||||
let conf: &'static PageServerConf = Box::leak(Box::new(conf));
|
||||
|
||||
// Create repo and exit if init was requested
|
||||
if init {
|
||||
branches::init_pageserver(conf, create_tenant)?;
|
||||
// write the config file
|
||||
let cfg_file_contents = toml::to_string_pretty(¶ms)?;
|
||||
// TODO support enable-auth flag
|
||||
std::fs::write(&cfg_file_path, cfg_file_contents)?;
|
||||
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
if let Some(horizon) = arg_matches.value_of("gc_horizon") {
|
||||
conf.gc_horizon = horizon.parse()?;
|
||||
}
|
||||
|
||||
if let Some(period) = arg_matches.value_of("gc_period") {
|
||||
conf.gc_period = parse(period)?;
|
||||
}
|
||||
|
||||
start_pageserver(&conf)
|
||||
start_pageserver(conf)
|
||||
}
|
||||
|
||||
fn start_pageserver(conf: &PageServerConf) -> Result<()> {
|
||||
fn start_pageserver(conf: &'static PageServerConf) -> Result<()> {
|
||||
// Initialize logger
|
||||
let _scope_guard = init_logging(&conf)?;
|
||||
let (_scope_guard, log_file) = logger::init_logging(conf, "pageserver.log")?;
|
||||
let _log_guard = slog_stdlog::init()?;
|
||||
|
||||
// Note: this `info!(...)` macro comes from `log` crate
|
||||
info!("standard logging redirected to slog");
|
||||
|
||||
let tui_thread: Option<thread::JoinHandle<()>>;
|
||||
if conf.interactive {
|
||||
// Initialize the UI
|
||||
tui_thread = Some(
|
||||
thread::Builder::new()
|
||||
.name("UI thread".into())
|
||||
.spawn(|| {
|
||||
let _ = tui::ui_main();
|
||||
})
|
||||
.unwrap(),
|
||||
);
|
||||
//threads.push(tui_thread);
|
||||
} else {
|
||||
tui_thread = None;
|
||||
}
|
||||
// TODO: Check that it looks like a valid repository before going further
|
||||
|
||||
if conf.daemonize {
|
||||
info!("daemonizing...");
|
||||
|
||||
let repodir = zenith_repo_dir();
|
||||
|
||||
// There should'n be any logging to stdin/stdout. Redirect it to the main log so
|
||||
// There shouldn't be any logging to stdin/stdout. Redirect it to the main log so
|
||||
// that we will see any accidental manual fprintf's or backtraces.
|
||||
let log_filename = repodir.join("pageserver.log");
|
||||
let stdout = OpenOptions::new()
|
||||
.create(true)
|
||||
.append(true)
|
||||
.open(&log_filename)
|
||||
.with_context(|| format!("failed to open {:?}", &log_filename))?;
|
||||
let stderr = OpenOptions::new()
|
||||
.create(true)
|
||||
.append(true)
|
||||
.open(&log_filename)
|
||||
.with_context(|| format!("failed to open {:?}", &log_filename))?;
|
||||
let stdout = log_file.try_clone().unwrap();
|
||||
let stderr = log_file;
|
||||
|
||||
let daemonize = Daemonize::new()
|
||||
.pid_file(repodir.join("pageserver.pid"))
|
||||
.working_directory(repodir)
|
||||
.pid_file("pageserver.pid")
|
||||
.working_directory(".")
|
||||
.stdout(stdout)
|
||||
.stderr(stderr);
|
||||
|
||||
@@ -150,95 +299,47 @@ fn start_pageserver(conf: &PageServerConf) -> Result<()> {
|
||||
Ok(_) => info!("Success, daemonized"),
|
||||
Err(e) => error!("Error, {}", e),
|
||||
}
|
||||
} else {
|
||||
// change into the repository directory. In daemon mode, Daemonize
|
||||
// does this for us.
|
||||
let repodir = zenith_repo_dir();
|
||||
std::env::set_current_dir(&repodir)?;
|
||||
info!("Changed current directory to repository in {:?}", &repodir);
|
||||
}
|
||||
|
||||
let mut threads = Vec::new();
|
||||
// initialize authentication for incoming connections
|
||||
let auth = match &conf.auth_type {
|
||||
AuthType::Trust | AuthType::MD5 => None,
|
||||
AuthType::ZenithJWT => {
|
||||
// unwrap is ok because check is performed when creating config, so path is set and file exists
|
||||
let key_path = conf.auth_validation_public_key_path.as_ref().unwrap();
|
||||
Some(JwtAuth::from_key_path(key_path)?.into())
|
||||
}
|
||||
};
|
||||
info!("Using auth: {:#?}", conf.auth_type);
|
||||
|
||||
// TODO: Check that it looks like a valid repository before going further
|
||||
// Spawn a new thread for the http endpoint
|
||||
let cloned = auth.clone();
|
||||
thread::Builder::new()
|
||||
.name("http_endpoint_thread".into())
|
||||
.spawn(move || {
|
||||
let router = http::make_router(conf, cloned);
|
||||
endpoint::serve_thread_main(router, conf.http_endpoint_addr.clone())
|
||||
})?;
|
||||
|
||||
// Create directory for wal-redo datadirs
|
||||
match fs::create_dir("wal-redo") {
|
||||
Ok(_) => {}
|
||||
Err(e) => match e.kind() {
|
||||
io::ErrorKind::AlreadyExists => {}
|
||||
_ => {
|
||||
anyhow::bail!("Failed to create wal-redo data directory: {}", e);
|
||||
}
|
||||
},
|
||||
}
|
||||
// Check that we can bind to address before starting threads to simplify shutdown
|
||||
// sequence if port is occupied.
|
||||
info!("Starting pageserver on {}", conf.listen_addr);
|
||||
let pageserver_listener = TcpListener::bind(conf.listen_addr.clone())?;
|
||||
|
||||
// GetPage@LSN requests are served by another thread. (It uses async I/O,
|
||||
// but the code in page_service sets up it own thread pool for that)
|
||||
let conf_copy = conf.clone();
|
||||
let page_server_thread = thread::Builder::new()
|
||||
// Initialize tenant manager.
|
||||
tenant_mgr::init(conf);
|
||||
|
||||
// Spawn a thread to listen for connections. It will spawn further threads
|
||||
// for each connection.
|
||||
let page_service_thread = thread::Builder::new()
|
||||
.name("Page Service thread".into())
|
||||
.spawn(move || {
|
||||
// thread code
|
||||
page_service::thread_main(&conf_copy);
|
||||
})
|
||||
.unwrap();
|
||||
threads.push(page_server_thread);
|
||||
page_service::thread_main(conf, auth, pageserver_listener, conf.auth_type)
|
||||
})?;
|
||||
|
||||
page_service_thread
|
||||
.join()
|
||||
.expect("Page service thread has panicked")?;
|
||||
|
||||
if let Some(tui_thread) = tui_thread {
|
||||
// The TUI thread exits when the user asks to Quit.
|
||||
tui_thread.join().unwrap();
|
||||
} else {
|
||||
// In non-interactive mode, wait forever.
|
||||
for t in threads {
|
||||
t.join().unwrap()
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn init_logging(conf: &PageServerConf) -> Result<slog_scope::GlobalLoggerGuard, io::Error> {
|
||||
if conf.interactive {
|
||||
Ok(tui::init_logging())
|
||||
} else if conf.daemonize {
|
||||
let log = zenith_repo_dir().join("pageserver.log");
|
||||
let log_file = OpenOptions::new()
|
||||
.create(true)
|
||||
.append(true)
|
||||
.open(&log)
|
||||
.map_err(|err| {
|
||||
// We failed to initialize logging, so we can't log this message with error!
|
||||
eprintln!("Could not create log file {:?}: {}", log, err);
|
||||
err
|
||||
})?;
|
||||
let decorator = slog_term::PlainSyncDecorator::new(log_file);
|
||||
let drain = slog_term::CompactFormat::new(decorator).build();
|
||||
let drain = slog::Filter::new(drain, |record: &slog::Record| {
|
||||
if record.level().is_at_least(slog::Level::Info) {
|
||||
return true;
|
||||
}
|
||||
false
|
||||
});
|
||||
let drain = std::sync::Mutex::new(drain).fuse();
|
||||
let logger = slog::Logger::root(drain, slog::o!());
|
||||
Ok(slog_scope::set_global_logger(logger))
|
||||
} else {
|
||||
let decorator = slog_term::TermDecorator::new().build();
|
||||
let drain = slog_term::FullFormat::new(decorator).build().fuse();
|
||||
let drain = slog_async::Async::new(drain).chan_size(1000).build().fuse();
|
||||
let drain = slog::Filter::new(drain, |record: &slog::Record| {
|
||||
if record.level().is_at_least(slog::Level::Info) {
|
||||
return true;
|
||||
}
|
||||
if record.level().is_at_least(slog::Level::Debug)
|
||||
&& record.module().starts_with("pageserver")
|
||||
{
|
||||
return true;
|
||||
}
|
||||
false
|
||||
})
|
||||
.fuse();
|
||||
let logger = slog::Logger::root(drain, slog::o!());
|
||||
Ok(slog_scope::set_global_logger(logger))
|
||||
}
|
||||
}
|
||||
|
||||
398
pageserver/src/branches.rs
Normal file
398
pageserver/src/branches.rs
Normal file
@@ -0,0 +1,398 @@
|
||||
//!
|
||||
//! Branch management code
|
||||
//!
|
||||
// TODO: move all paths construction to conf impl
|
||||
//
|
||||
|
||||
use anyhow::{bail, ensure, Context, Result};
|
||||
use postgres_ffi::ControlFileData;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::{
|
||||
fs,
|
||||
path::Path,
|
||||
process::{Command, Stdio},
|
||||
str::FromStr,
|
||||
sync::Arc,
|
||||
};
|
||||
use zenith_utils::zid::{ZTenantId, ZTimelineId};
|
||||
|
||||
use log::*;
|
||||
use zenith_utils::lsn::Lsn;
|
||||
|
||||
use crate::logger;
|
||||
use crate::restore_local_repo;
|
||||
use crate::tenant_mgr;
|
||||
use crate::walredo::WalRedoManager;
|
||||
use crate::{repository::Repository, PageServerConf};
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone)]
|
||||
pub struct BranchInfo {
|
||||
pub name: String,
|
||||
#[serde(with = "hex")]
|
||||
pub timeline_id: ZTimelineId,
|
||||
pub latest_valid_lsn: Option<Lsn>,
|
||||
pub ancestor_id: Option<String>,
|
||||
pub ancestor_lsn: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct PointInTime {
|
||||
pub timelineid: ZTimelineId,
|
||||
pub lsn: Lsn,
|
||||
}
|
||||
|
||||
pub fn init_pageserver(conf: &'static PageServerConf, create_tenant: Option<&str>) -> Result<()> {
|
||||
// Initialize logger
|
||||
let (_scope_guard, _log_file) = logger::init_logging(conf, "pageserver.log")?;
|
||||
let _log_guard = slog_stdlog::init()?;
|
||||
|
||||
// We don't use the real WAL redo manager, because we don't want to spawn the WAL redo
|
||||
// process during repository initialization.
|
||||
//
|
||||
// FIXME: That caused trouble, because the WAL redo manager spawned a thread that launched
|
||||
// initdb in the background, and it kept running even after the "zenith init" had exited.
|
||||
// In tests, we started the page server immediately after that, so that initdb was still
|
||||
// running in the background, and we failed to run initdb again in the same directory. This
|
||||
// has been solved for the rapid init+start case now, but the general race condition remains
|
||||
// if you restart the server quickly. The WAL redo manager doesn't use a separate thread
|
||||
// anymore, but I think that could still happen.
|
||||
let dummy_redo_mgr = Arc::new(crate::walredo::DummyRedoManager {});
|
||||
|
||||
if let Some(tenantid) = create_tenant {
|
||||
let tenantid = ZTenantId::from_str(tenantid)?;
|
||||
println!("initializing tenantid {}", tenantid);
|
||||
create_repo(conf, tenantid, dummy_redo_mgr).with_context(|| "failed to create repo")?;
|
||||
}
|
||||
fs::create_dir_all(conf.tenants_path())?;
|
||||
|
||||
println!("pageserver init succeeded");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn create_repo(
|
||||
conf: &'static PageServerConf,
|
||||
tenantid: ZTenantId,
|
||||
wal_redo_manager: Arc<dyn WalRedoManager + Send + Sync>,
|
||||
) -> Result<Arc<dyn Repository>> {
|
||||
let repo_dir = conf.tenant_path(&tenantid);
|
||||
if repo_dir.exists() {
|
||||
bail!("repo for {} already exists", tenantid)
|
||||
}
|
||||
|
||||
// top-level dir may exist if we are creating it through CLI
|
||||
fs::create_dir_all(&repo_dir)
|
||||
.with_context(|| format!("could not create directory {}", repo_dir.display()))?;
|
||||
|
||||
// Note: this `info!(...)` macro comes from `log` crate
|
||||
info!("standard logging redirected to slog");
|
||||
|
||||
fs::create_dir(conf.timelines_path(&tenantid))?;
|
||||
fs::create_dir_all(conf.branches_path(&tenantid))?;
|
||||
fs::create_dir_all(conf.tags_path(&tenantid))?;
|
||||
|
||||
info!("created directory structure in {}", repo_dir.display());
|
||||
|
||||
let tli = create_timeline(conf, None, &tenantid)?;
|
||||
|
||||
let repo = Arc::new(crate::layered_repository::LayeredRepository::new(
|
||||
conf,
|
||||
wal_redo_manager,
|
||||
tenantid,
|
||||
));
|
||||
|
||||
// Load data into pageserver
|
||||
// TODO To implement zenith import we need to
|
||||
// move data loading out of create_repo()
|
||||
bootstrap_timeline(conf, tenantid, tli, &*repo)?;
|
||||
|
||||
Ok(repo)
|
||||
}
|
||||
|
||||
// Returns checkpoint LSN from controlfile
|
||||
fn get_lsn_from_controlfile(path: &Path) -> Result<Lsn> {
|
||||
// Read control file to extract the LSN
|
||||
let controlfile_path = path.join("global").join("pg_control");
|
||||
let controlfile = ControlFileData::decode(&fs::read(controlfile_path)?)?;
|
||||
let lsn = controlfile.checkPoint;
|
||||
|
||||
Ok(Lsn(lsn))
|
||||
}
|
||||
|
||||
// Create the cluster temporarily in a initdbpath directory inside the repository
|
||||
// to get bootstrap data for timeline initialization.
|
||||
//
|
||||
fn run_initdb(conf: &'static PageServerConf, initdbpath: &Path) -> Result<()> {
|
||||
info!("running initdb... ");
|
||||
|
||||
let initdb_path = conf.pg_bin_dir().join("initdb");
|
||||
let initdb_output = Command::new(initdb_path)
|
||||
.args(&["-D", initdbpath.to_str().unwrap()])
|
||||
.args(&["-U", &conf.superuser])
|
||||
.arg("--no-instructions")
|
||||
.env_clear()
|
||||
.env("LD_LIBRARY_PATH", conf.pg_lib_dir().to_str().unwrap())
|
||||
.env("DYLD_LIBRARY_PATH", conf.pg_lib_dir().to_str().unwrap())
|
||||
.stdout(Stdio::null())
|
||||
.output()
|
||||
.with_context(|| "failed to execute initdb")?;
|
||||
if !initdb_output.status.success() {
|
||||
anyhow::bail!(
|
||||
"initdb failed: '{}'",
|
||||
String::from_utf8_lossy(&initdb_output.stderr)
|
||||
);
|
||||
}
|
||||
info!("initdb succeeded");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
//
|
||||
// - run initdb to init temporary instance and get bootstrap data
|
||||
// - after initialization complete, remove the temp dir.
|
||||
//
|
||||
fn bootstrap_timeline(
|
||||
conf: &'static PageServerConf,
|
||||
tenantid: ZTenantId,
|
||||
tli: ZTimelineId,
|
||||
repo: &dyn Repository,
|
||||
) -> Result<()> {
|
||||
let initdb_path = conf.tenant_path(&tenantid).join("tmp");
|
||||
|
||||
// Init temporarily repo to get bootstrap data
|
||||
run_initdb(conf, &initdb_path)?;
|
||||
let pgdata_path = initdb_path;
|
||||
|
||||
let lsn = get_lsn_from_controlfile(&pgdata_path)?.align();
|
||||
|
||||
info!("bootstrap_timeline {:?} at lsn {}", pgdata_path, lsn);
|
||||
|
||||
let timeline = repo.create_empty_timeline(tli, lsn)?;
|
||||
restore_local_repo::import_timeline_from_postgres_datadir(&pgdata_path, &*timeline, lsn)?;
|
||||
|
||||
let wal_dir = pgdata_path.join("pg_wal");
|
||||
restore_local_repo::import_timeline_wal(&wal_dir, &*timeline, timeline.get_last_record_lsn())?;
|
||||
|
||||
println!(
|
||||
"created initial timeline {} timeline.lsn {}",
|
||||
tli,
|
||||
timeline.get_last_record_lsn()
|
||||
);
|
||||
|
||||
let data = tli.to_string();
|
||||
fs::write(conf.branch_path("main", &tenantid), data)?;
|
||||
println!("created main branch");
|
||||
|
||||
// Remove temp dir. We don't need it anymore
|
||||
fs::remove_dir_all(pgdata_path)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) fn get_tenants(conf: &PageServerConf) -> Result<Vec<String>> {
|
||||
let tenants_dir = conf.tenants_path();
|
||||
|
||||
std::fs::read_dir(&tenants_dir)?
|
||||
.map(|dir_entry_res| {
|
||||
let dir_entry = dir_entry_res?;
|
||||
ensure!(dir_entry.file_type()?.is_dir());
|
||||
Ok(dir_entry.file_name().to_str().unwrap().to_owned())
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub(crate) fn get_branches(conf: &PageServerConf, tenantid: &ZTenantId) -> Result<Vec<BranchInfo>> {
|
||||
let repo = tenant_mgr::get_repository_for_tenant(tenantid)?;
|
||||
|
||||
// Each branch has a corresponding record (text file) in the refs/branches
|
||||
// with timeline_id.
|
||||
let branches_dir = conf.branches_path(tenantid);
|
||||
|
||||
std::fs::read_dir(&branches_dir)?
|
||||
.map(|dir_entry_res| {
|
||||
let dir_entry = dir_entry_res?;
|
||||
let name = dir_entry.file_name().to_str().unwrap().to_string();
|
||||
let timeline_id = std::fs::read_to_string(dir_entry.path())?.parse::<ZTimelineId>()?;
|
||||
|
||||
let latest_valid_lsn = repo
|
||||
.get_timeline(timeline_id)
|
||||
.map(|timeline| timeline.get_last_record_lsn())
|
||||
.ok();
|
||||
|
||||
let ancestor_path = conf.ancestor_path(&timeline_id, tenantid);
|
||||
let mut ancestor_id: Option<String> = None;
|
||||
let mut ancestor_lsn: Option<String> = None;
|
||||
|
||||
if ancestor_path.exists() {
|
||||
let ancestor = std::fs::read_to_string(ancestor_path)?;
|
||||
let mut strings = ancestor.split('@');
|
||||
|
||||
ancestor_id = Some(
|
||||
strings
|
||||
.next()
|
||||
.with_context(|| "wrong branch ancestor point in time format")?
|
||||
.to_owned(),
|
||||
);
|
||||
ancestor_lsn = Some(
|
||||
strings
|
||||
.next()
|
||||
.with_context(|| "wrong branch ancestor point in time format")?
|
||||
.to_owned(),
|
||||
);
|
||||
}
|
||||
|
||||
Ok(BranchInfo {
|
||||
name,
|
||||
timeline_id,
|
||||
latest_valid_lsn,
|
||||
ancestor_id,
|
||||
ancestor_lsn,
|
||||
})
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub(crate) fn create_branch(
|
||||
conf: &PageServerConf,
|
||||
branchname: &str,
|
||||
startpoint_str: &str,
|
||||
tenantid: &ZTenantId,
|
||||
) -> Result<BranchInfo> {
|
||||
let repo = tenant_mgr::get_repository_for_tenant(tenantid)?;
|
||||
|
||||
if conf.branch_path(branchname, tenantid).exists() {
|
||||
anyhow::bail!("branch {} already exists", branchname);
|
||||
}
|
||||
|
||||
let mut startpoint = parse_point_in_time(conf, startpoint_str, tenantid)?;
|
||||
let timeline = repo.get_timeline(startpoint.timelineid)?;
|
||||
if startpoint.lsn == Lsn(0) {
|
||||
// Find end of WAL on the old timeline
|
||||
let end_of_wal = timeline.get_last_record_lsn();
|
||||
info!("branching at end of WAL: {}", end_of_wal);
|
||||
startpoint.lsn = end_of_wal;
|
||||
}
|
||||
startpoint.lsn = startpoint.lsn.align();
|
||||
if timeline.get_start_lsn() > startpoint.lsn {
|
||||
anyhow::bail!(
|
||||
"invalid startpoint {} for the branch {}: less than timeline start {}",
|
||||
startpoint.lsn,
|
||||
branchname,
|
||||
timeline.get_start_lsn()
|
||||
);
|
||||
}
|
||||
|
||||
// create a new timeline directory for it
|
||||
let newtli = create_timeline(conf, Some(startpoint), tenantid)?;
|
||||
|
||||
// Let the Repository backend do its initialization
|
||||
repo.branch_timeline(startpoint.timelineid, newtli, startpoint.lsn)?;
|
||||
|
||||
// Remember the human-readable branch name for the new timeline.
|
||||
// FIXME: there's a race condition, if you create a branch with the same
|
||||
// name concurrently.
|
||||
let data = newtli.to_string();
|
||||
fs::write(conf.branch_path(branchname, tenantid), data)?;
|
||||
|
||||
Ok(BranchInfo {
|
||||
name: branchname.to_string(),
|
||||
timeline_id: newtli,
|
||||
latest_valid_lsn: Some(startpoint.lsn),
|
||||
ancestor_id: None,
|
||||
ancestor_lsn: None,
|
||||
})
|
||||
}
|
||||
|
||||
//
|
||||
// Parse user-given string that represents a point-in-time.
|
||||
//
|
||||
// We support multiple variants:
|
||||
//
|
||||
// Raw timeline id in hex, meaning the end of that timeline:
|
||||
// bc62e7d612d0e6fe8f99a6dd2f281f9d
|
||||
//
|
||||
// A specific LSN on a timeline:
|
||||
// bc62e7d612d0e6fe8f99a6dd2f281f9d@2/15D3DD8
|
||||
//
|
||||
// Same, with a human-friendly branch name:
|
||||
// main
|
||||
// main@2/15D3DD8
|
||||
//
|
||||
// Human-friendly tag name:
|
||||
// mytag
|
||||
//
|
||||
//
|
||||
fn parse_point_in_time(
|
||||
conf: &PageServerConf,
|
||||
s: &str,
|
||||
tenantid: &ZTenantId,
|
||||
) -> Result<PointInTime> {
|
||||
let mut strings = s.split('@');
|
||||
let name = strings.next().unwrap();
|
||||
|
||||
let lsn: Option<Lsn>;
|
||||
if let Some(lsnstr) = strings.next() {
|
||||
lsn = Some(
|
||||
Lsn::from_str(lsnstr).with_context(|| "invalid LSN in point-in-time specification")?,
|
||||
);
|
||||
} else {
|
||||
lsn = None
|
||||
}
|
||||
|
||||
// Check if it's a tag
|
||||
if lsn.is_none() {
|
||||
let tagpath = conf.tag_path(name, tenantid);
|
||||
if tagpath.exists() {
|
||||
let pointstr = fs::read_to_string(tagpath)?;
|
||||
|
||||
return parse_point_in_time(conf, &pointstr, tenantid);
|
||||
}
|
||||
}
|
||||
|
||||
// Check if it's a branch
|
||||
// Check if it's branch @ LSN
|
||||
let branchpath = conf.branch_path(name, tenantid);
|
||||
if branchpath.exists() {
|
||||
let pointstr = fs::read_to_string(branchpath)?;
|
||||
|
||||
let mut result = parse_point_in_time(conf, &pointstr, tenantid)?;
|
||||
|
||||
result.lsn = lsn.unwrap_or(Lsn(0));
|
||||
return Ok(result);
|
||||
}
|
||||
|
||||
// Check if it's a timelineid
|
||||
// Check if it's timelineid @ LSN
|
||||
if let Ok(timelineid) = ZTimelineId::from_str(name) {
|
||||
let tlipath = conf.timeline_path(&timelineid, tenantid);
|
||||
if tlipath.exists() {
|
||||
return Ok(PointInTime {
|
||||
timelineid,
|
||||
lsn: lsn.unwrap_or(Lsn(0)),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
bail!("could not parse point-in-time {}", s);
|
||||
}
|
||||
|
||||
fn create_timeline(
|
||||
conf: &PageServerConf,
|
||||
ancestor: Option<PointInTime>,
|
||||
tenantid: &ZTenantId,
|
||||
) -> Result<ZTimelineId> {
|
||||
// Create initial timeline
|
||||
|
||||
let timelineid = ZTimelineId::generate();
|
||||
|
||||
let timelinedir = conf.timeline_path(&timelineid, tenantid);
|
||||
|
||||
fs::create_dir(&timelinedir)?;
|
||||
fs::create_dir(&timelinedir.join("wal"))?;
|
||||
|
||||
if let Some(ancestor) = ancestor {
|
||||
let data = format!("{}@{}", ancestor.timelineid, ancestor.lsn);
|
||||
fs::write(timelinedir.join("ancestor"), data)?;
|
||||
}
|
||||
|
||||
Ok(timelineid)
|
||||
}
|
||||
3
pageserver/src/http/mod.rs
Normal file
3
pageserver/src/http/mod.rs
Normal file
@@ -0,0 +1,3 @@
|
||||
pub mod models;
|
||||
pub mod routes;
|
||||
pub use routes::make_router;
|
||||
17
pageserver/src/http/models.rs
Normal file
17
pageserver/src/http/models.rs
Normal file
@@ -0,0 +1,17 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::ZTenantId;
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct BranchCreateRequest {
|
||||
#[serde(with = "hex")]
|
||||
pub tenant_id: ZTenantId,
|
||||
pub name: String,
|
||||
pub start_point: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct TenantCreateRequest {
|
||||
#[serde(with = "hex")]
|
||||
pub tenant_id: ZTenantId,
|
||||
}
|
||||
239
pageserver/src/http/openapi_spec.yml
Normal file
239
pageserver/src/http/openapi_spec.yml
Normal file
@@ -0,0 +1,239 @@
|
||||
openapi: "3.0.2"
|
||||
info:
|
||||
title: Page Server API
|
||||
version: "1.0"
|
||||
servers:
|
||||
- url: ""
|
||||
paths:
|
||||
/v1/status:
|
||||
description: Healthcheck endpoint
|
||||
get:
|
||||
description: Healthcheck
|
||||
security: []
|
||||
responses:
|
||||
"200":
|
||||
description: OK
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: object
|
||||
/v1/branch/{tenant_id}:
|
||||
parameters:
|
||||
- name: tenant_id
|
||||
in: path
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
format: hex
|
||||
get:
|
||||
description: Get branches for tenant
|
||||
responses:
|
||||
"200":
|
||||
description: BranchInfo
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: array
|
||||
items:
|
||||
$ref: "#/components/schemas/BranchInfo"
|
||||
"400":
|
||||
description: Error when no tenant id found in path
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/Error"
|
||||
"401":
|
||||
description: Unauthorized Error
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/UnauthorizedError"
|
||||
"403":
|
||||
description: Forbidden Error
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/ForbiddenError"
|
||||
|
||||
"500":
|
||||
description: Generic operation error
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/Error"
|
||||
/v1/branch/:
|
||||
post:
|
||||
description: Create branch
|
||||
requestBody:
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: object
|
||||
required:
|
||||
- "tenant_id"
|
||||
- "name"
|
||||
- "start_point"
|
||||
properties:
|
||||
tenant_id:
|
||||
type: string
|
||||
format: hex
|
||||
name:
|
||||
type: string
|
||||
start_point:
|
||||
type: string
|
||||
responses:
|
||||
"201":
|
||||
description: BranchInfo
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: array
|
||||
items:
|
||||
$ref: "#/components/schemas/BranchInfo"
|
||||
"400":
|
||||
description: Malformed branch create request
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/Error"
|
||||
"401":
|
||||
description: Unauthorized Error
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/UnauthorizedError"
|
||||
"403":
|
||||
description: Forbidden Error
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/ForbiddenError"
|
||||
"500":
|
||||
description: Generic operation error
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/Error"
|
||||
/v1/tenant/:
|
||||
get:
|
||||
description: Get tenants list
|
||||
responses:
|
||||
"200":
|
||||
description: OK
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
"401":
|
||||
description: Unauthorized Error
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/UnauthorizedError"
|
||||
"403":
|
||||
description: Forbidden Error
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/ForbiddenError"
|
||||
"500":
|
||||
description: Generic operation error
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/Error"
|
||||
post:
|
||||
description: Create tenant
|
||||
requestBody:
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: object
|
||||
required:
|
||||
- "tenant_id"
|
||||
properties:
|
||||
tenant_id:
|
||||
type: string
|
||||
format: hex
|
||||
responses:
|
||||
"201":
|
||||
description: CREATED
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
"400":
|
||||
description: Malformed tenant create request
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/Error"
|
||||
"401":
|
||||
description: Unauthorized Error
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/UnauthorizedError"
|
||||
"403":
|
||||
description: Forbidden Error
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/ForbiddenError"
|
||||
"500":
|
||||
description: Generic operation error
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/Error"
|
||||
|
||||
components:
|
||||
securitySchemes:
|
||||
JWT:
|
||||
type: http
|
||||
scheme: bearer
|
||||
bearerFormat: JWT
|
||||
schemas:
|
||||
BranchInfo:
|
||||
type: object
|
||||
required:
|
||||
- name
|
||||
- timeline_id
|
||||
properties:
|
||||
name:
|
||||
type: string
|
||||
timeline_id:
|
||||
type: string
|
||||
format: hex
|
||||
ancestor_id:
|
||||
type: string
|
||||
ancestor_lsn:
|
||||
type: string
|
||||
Error:
|
||||
type: object
|
||||
required:
|
||||
- msg
|
||||
properties:
|
||||
msg:
|
||||
type: string
|
||||
UnauthorizedError:
|
||||
type: object
|
||||
required:
|
||||
- msg
|
||||
properties:
|
||||
msg:
|
||||
type: string
|
||||
ForbiddenError:
|
||||
type: object
|
||||
required:
|
||||
- msg
|
||||
properties:
|
||||
msg:
|
||||
type: string
|
||||
|
||||
security:
|
||||
- JWT: []
|
||||
166
pageserver/src/http/routes.rs
Normal file
166
pageserver/src/http/routes.rs
Normal file
@@ -0,0 +1,166 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use anyhow::Result;
|
||||
use hyper::header;
|
||||
use hyper::StatusCode;
|
||||
use hyper::{Body, Request, Response, Uri};
|
||||
use routerify::{ext::RequestExt, RouterBuilder};
|
||||
use zenith_utils::auth::JwtAuth;
|
||||
use zenith_utils::http::endpoint::attach_openapi_ui;
|
||||
use zenith_utils::http::endpoint::auth_middleware;
|
||||
use zenith_utils::http::endpoint::check_permission;
|
||||
use zenith_utils::http::error::ApiError;
|
||||
use zenith_utils::http::{
|
||||
endpoint,
|
||||
error::HttpErrorBody,
|
||||
json::{json_request, json_response},
|
||||
};
|
||||
|
||||
use super::models::BranchCreateRequest;
|
||||
use super::models::TenantCreateRequest;
|
||||
use crate::{
|
||||
branches::{self},
|
||||
tenant_mgr, PageServerConf, ZTenantId,
|
||||
};
|
||||
|
||||
#[derive(Debug)]
|
||||
struct State {
|
||||
conf: &'static PageServerConf,
|
||||
auth: Option<Arc<JwtAuth>>,
|
||||
allowlist_routes: Vec<Uri>,
|
||||
}
|
||||
|
||||
impl State {
|
||||
fn new(conf: &'static PageServerConf, auth: Option<Arc<JwtAuth>>) -> Self {
|
||||
let allowlist_routes = ["/v1/status", "/v1/doc", "/swagger.yml"]
|
||||
.iter()
|
||||
.map(|v| v.parse().unwrap())
|
||||
.collect::<Vec<_>>();
|
||||
Self {
|
||||
conf,
|
||||
auth,
|
||||
allowlist_routes,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn get_state(request: &Request<Body>) -> &State {
|
||||
request
|
||||
.data::<Arc<State>>()
|
||||
.expect("unknown state type")
|
||||
.as_ref()
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn get_config(request: &Request<Body>) -> &'static PageServerConf {
|
||||
get_state(request).conf
|
||||
}
|
||||
|
||||
// healthcheck handler
|
||||
async fn status_handler(_: Request<Body>) -> Result<Response<Body>, ApiError> {
|
||||
Ok(Response::builder()
|
||||
.status(StatusCode::OK)
|
||||
.header(header::CONTENT_TYPE, "application/json")
|
||||
.body(Body::from("{}"))
|
||||
.map_err(ApiError::from_err)?)
|
||||
}
|
||||
|
||||
async fn branch_create_handler(mut request: Request<Body>) -> Result<Response<Body>, ApiError> {
|
||||
let request_data: BranchCreateRequest = json_request(&mut request).await?;
|
||||
|
||||
check_permission(&request, Some(request_data.tenant_id))?;
|
||||
|
||||
let response_data = tokio::task::spawn_blocking(move || {
|
||||
branches::create_branch(
|
||||
get_config(&request),
|
||||
&request_data.name,
|
||||
&request_data.start_point,
|
||||
&request_data.tenant_id,
|
||||
)
|
||||
})
|
||||
.await
|
||||
.map_err(ApiError::from_err)??;
|
||||
Ok(json_response(StatusCode::CREATED, response_data)?)
|
||||
}
|
||||
|
||||
async fn branch_list_handler(request: Request<Body>) -> Result<Response<Body>, ApiError> {
|
||||
let tenantid: ZTenantId = match request.param("tenant_id") {
|
||||
Some(arg) => arg
|
||||
.parse()
|
||||
.map_err(|_| ApiError::BadRequest("failed to parse tenant id".to_string()))?,
|
||||
None => {
|
||||
return Err(ApiError::BadRequest(
|
||||
"no tenant id specified in path param".to_string(),
|
||||
))
|
||||
}
|
||||
};
|
||||
|
||||
check_permission(&request, Some(tenantid))?;
|
||||
|
||||
let response_data = tokio::task::spawn_blocking(move || {
|
||||
crate::branches::get_branches(get_config(&request), &tenantid)
|
||||
})
|
||||
.await
|
||||
.map_err(ApiError::from_err)??;
|
||||
Ok(json_response(StatusCode::OK, response_data)?)
|
||||
}
|
||||
|
||||
async fn tenant_list_handler(request: Request<Body>) -> Result<Response<Body>, ApiError> {
|
||||
// check for management permission
|
||||
check_permission(&request, None)?;
|
||||
|
||||
let response_data =
|
||||
tokio::task::spawn_blocking(move || crate::branches::get_tenants(get_config(&request)))
|
||||
.await
|
||||
.map_err(ApiError::from_err)??;
|
||||
Ok(json_response(StatusCode::OK, response_data)?)
|
||||
}
|
||||
|
||||
async fn tenant_create_handler(mut request: Request<Body>) -> Result<Response<Body>, ApiError> {
|
||||
// check for management permission
|
||||
check_permission(&request, None)?;
|
||||
|
||||
let request_data: TenantCreateRequest = json_request(&mut request).await?;
|
||||
|
||||
let response_data = tokio::task::spawn_blocking(move || {
|
||||
tenant_mgr::create_repository_for_tenant(get_config(&request), request_data.tenant_id)
|
||||
})
|
||||
.await
|
||||
.map_err(ApiError::from_err)??;
|
||||
Ok(json_response(StatusCode::CREATED, response_data)?)
|
||||
}
|
||||
|
||||
async fn handler_404(_: Request<Body>) -> Result<Response<Body>, ApiError> {
|
||||
json_response(
|
||||
StatusCode::NOT_FOUND,
|
||||
HttpErrorBody::from_msg("page not found".to_owned()),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn make_router(
|
||||
conf: &'static PageServerConf,
|
||||
auth: Option<Arc<JwtAuth>>,
|
||||
) -> RouterBuilder<hyper::Body, ApiError> {
|
||||
let spec = include_bytes!("openapi_spec.yml");
|
||||
let mut router = attach_openapi_ui(endpoint::make_router(), spec, "/swagger.yml", "/v1/doc");
|
||||
if auth.is_some() {
|
||||
router = router.middleware(auth_middleware(|request| {
|
||||
let state = get_state(request);
|
||||
if state.allowlist_routes.contains(request.uri()) {
|
||||
None
|
||||
} else {
|
||||
state.auth.as_deref()
|
||||
}
|
||||
}))
|
||||
}
|
||||
|
||||
router
|
||||
.data(Arc::new(State::new(conf, auth)))
|
||||
.get("/v1/status", status_handler)
|
||||
.get("/v1/branch/:tenant_id", branch_list_handler)
|
||||
.post("/v1/branch", branch_create_handler)
|
||||
.get("/v1/tenant", tenant_list_handler)
|
||||
.post("/v1/tenant", tenant_create_handler)
|
||||
.any(handler_404)
|
||||
}
|
||||
1479
pageserver/src/layered_repository.rs
Normal file
1479
pageserver/src/layered_repository.rs
Normal file
File diff suppressed because it is too large
Load Diff
366
pageserver/src/layered_repository/README.md
Normal file
366
pageserver/src/layered_repository/README.md
Normal file
@@ -0,0 +1,366 @@
|
||||
# Overview
|
||||
|
||||
The on-disk format is based on immutable files. The page server
|
||||
receives a stream of incoming WAL, parses the WAL records to determine
|
||||
which pages they apply to, and accumulates the incoming changes in
|
||||
memory. Every now and then, the accumulated changes are written out to
|
||||
new files.
|
||||
|
||||
The files are called "layer files". Each layer file corresponds
|
||||
to one RELISH_SEG_SIZE slice of a PostgreSQL relation fork or
|
||||
non-rel file in a range of LSNs. The layer files
|
||||
for each timeline are stored in the timeline's subdirectory under
|
||||
.zenith/tenants/<tenantid>/timelines.
|
||||
|
||||
There are two kind of layer file: base images, and deltas. A base
|
||||
image file contains a layer of a segment as it was at one LSN,
|
||||
whereas a delta file contains modifications to a segment - mostly in
|
||||
the form of WAL records - in a range of LSN
|
||||
|
||||
base image file:
|
||||
|
||||
rel_<spcnode>_<dbnode>_<relnode>_<forknum>_<segno>_<start LSN>
|
||||
|
||||
delta file:
|
||||
|
||||
rel_<spcnode>_<dbnode>_<relnode>_<forknum>_<segno>_<start LSN>_<end LSN>
|
||||
|
||||
For example:
|
||||
|
||||
rel_1663_13990_2609_0_10_000000000169C348
|
||||
rel_1663_13990_2609_0_10_000000000169C348_0000000001702000
|
||||
|
||||
In addition to the relations, with "rel_*" prefix, we use the same
|
||||
format for storing various smaller files from the PostgreSQL data
|
||||
directory. They will use different suffixes and the naming scheme up
|
||||
to the LSNs vary. The Zenith source code uses the term "relish" to
|
||||
mean "a relation, or other file that's treated like a relation in the
|
||||
storage" For example, a base image of a CLOG segment would be named
|
||||
like this:
|
||||
|
||||
pg_xact_0000_0_00000000198B06B0
|
||||
|
||||
There is no difference in how the relation and non-relation files are
|
||||
managed, except that the first part of file names is different.
|
||||
Internally, the relations and non-relation files that are managed in
|
||||
the versioned store are together called "relishes".
|
||||
|
||||
If a file has been dropped, the last layer file for it is created
|
||||
with the _DROPPED suffix, e.g.
|
||||
|
||||
rel_1663_13990_2609_0_10_000000000169C348_0000000001702000_DROPPED
|
||||
|
||||
|
||||
## Notation used in this document
|
||||
|
||||
The full path of a delta file looks like this:
|
||||
|
||||
.zenith/tenants/941ddc8604413b88b3d208bddf90396c/timelines/4af489b06af8eed9e27a841775616962/rel_1663_13990_2609_0_10_000000000169C348_0000000001702000
|
||||
|
||||
For simplicity, the examples below use a simplified notation for the
|
||||
paths. The tenant ID is left out, the timeline ID is replaced with
|
||||
the human-readable branch name, and spcnode+dbnode+relnode+forkum+segno
|
||||
with a human-readable table name. The LSNs are also shorter. For
|
||||
example, a base image file at LSN 100 and a delta file between 100-200
|
||||
for 'orders' table on 'main' branch is represented like this:
|
||||
|
||||
main/orders_100
|
||||
main/orders_100_200
|
||||
|
||||
|
||||
# Creating layer files
|
||||
|
||||
Let's start with a simple example with a system that contains one
|
||||
branch called 'main' and two tables, 'orders' and 'customers'. The end
|
||||
of WAL is currently at LSN 250. In this starting situation, you would
|
||||
have these files on disk:
|
||||
|
||||
main/orders_100
|
||||
main/orders_100_200
|
||||
main/orders_200
|
||||
main/customers_100
|
||||
main/customers_100_200
|
||||
main/customers_200
|
||||
|
||||
In addition to those files, the recent changes between LSN 200 and the
|
||||
end of WAL at 250 are kept in memory. If the page server crashes, the
|
||||
latest records between 200-250 need to be re-read from the WAL.
|
||||
|
||||
Whenever enough WAL has been accumulated in memory, the page server
|
||||
writes out the changes in memory into new layer files. This process
|
||||
is called "checkpointing" (not to be confused with the PostgreSQL
|
||||
checkpoints, that's a different thing). The page server only creates
|
||||
layer files for relations that have been modified since the last
|
||||
checkpoint. For example, if the current end of WAL is at LSN 450, and
|
||||
the last checkpoint happened at LSN 400 but there hasn't been any
|
||||
recent changes to 'customers' table, you would have these files on
|
||||
disk:
|
||||
|
||||
main/orders_100
|
||||
main/orders_100_200
|
||||
main/orders_200
|
||||
main/orders_200_300
|
||||
main/orders_300
|
||||
main/orders_300_400
|
||||
main/orders_400
|
||||
main/customers_100
|
||||
main/customers_100_200
|
||||
main/customers_200
|
||||
|
||||
If the customers table is modified later, a new file is created for it
|
||||
at the next checkpoint. The new file will cover the "gap" from the
|
||||
last layer file, so the LSN ranges are always contiguous:
|
||||
|
||||
main/orders_100
|
||||
main/orders_100_200
|
||||
main/orders_200
|
||||
main/orders_200_300
|
||||
main/orders_300
|
||||
main/orders_300_400
|
||||
main/orders_400
|
||||
main/customers_100
|
||||
main/customers_100_200
|
||||
main/customers_200
|
||||
main/customers_200_500
|
||||
main/customers_500
|
||||
|
||||
## Reading page versions
|
||||
|
||||
Whenever a GetPage@LSN request comes in from the compute node, the
|
||||
page server needs to reconstruct the requested page, as it was at the
|
||||
requested LSN. To do that, the page server first checks the recent
|
||||
in-memory layer; if the requested page version is found there, it can
|
||||
be returned immediatedly without looking at the files on
|
||||
disk. Otherwise the page server needs to locate the layer file that
|
||||
contains the requested page version.
|
||||
|
||||
For example, if a request comes in for table 'orders' at LSN 250, the
|
||||
page server would load the 'main/orders_200_300' file into memory, and
|
||||
reconstruct and return the requested page from it, as it was at
|
||||
LSN 250. Because the layer file consists of a full image of the
|
||||
relation at the start LSN and the WAL, reconstructing the page
|
||||
involves replaying any WAL records applicable to the page between LSNs
|
||||
200-250, starting from the base image at LSN 200.
|
||||
|
||||
# Multiple branches
|
||||
|
||||
Imagine that a child branch is created at LSN 250:
|
||||
|
||||
@250
|
||||
----main--+-------------------------->
|
||||
\
|
||||
+---child-------------->
|
||||
|
||||
|
||||
Then, the 'orders' table is updated differently on the 'main' and
|
||||
'child' branches. You now have this situation on disk:
|
||||
|
||||
main/orders_100
|
||||
main/orders_100_200
|
||||
main/orders_200
|
||||
main/orders_200_300
|
||||
main/orders_300
|
||||
main/orders_300_400
|
||||
main/orders_400
|
||||
main/customers_100
|
||||
main/customers_100_200
|
||||
main/customers_200
|
||||
child/orders_250_300
|
||||
child/orders_300
|
||||
child/orders_300_400
|
||||
child/orders_400
|
||||
|
||||
Because the 'customers' table hasn't been modified on the child
|
||||
branch, there is no file for it there. If you request a page for it on
|
||||
the 'child' branch, the page server will not find any layer file
|
||||
for it in the 'child' directory, so it will recurse to look into the
|
||||
parent 'main' branch instead.
|
||||
|
||||
From the 'child' branch's point of view, the history for each relation
|
||||
is linear, and the request's LSN identifies unambiguously which file
|
||||
you need to look at. For example, the history for the 'orders' table
|
||||
on the 'main' branch consists of these files:
|
||||
|
||||
main/orders_100
|
||||
main/orders_100_200
|
||||
main/orders_200
|
||||
main/orders_200_300
|
||||
main/orders_300
|
||||
main/orders_300_400
|
||||
main/orders_400
|
||||
|
||||
And from the 'child' branch's point of view, it consists of these
|
||||
files:
|
||||
|
||||
main/orders_100
|
||||
main/orders_100_200
|
||||
main/orders_200
|
||||
main/orders_200_300
|
||||
child/orders_250_300
|
||||
child/orders_300
|
||||
child/orders_300_400
|
||||
child/orders_400
|
||||
|
||||
The branch metadata includes the point where the child branch was
|
||||
created, LSN 250. If a page request comes with LSN 275, we read the
|
||||
page version from the 'child/orders_250_300' file. We might also
|
||||
need to reconstruct the page version as it was at LSN 250, in order
|
||||
to replay the WAL up to LSN 275, using 'main/orders_200_300' and
|
||||
'main/orders_200'. The page versions between 250-300 in the
|
||||
'main/orders_200_300' file are ignored when operating on the child
|
||||
branch.
|
||||
|
||||
Note: It doesn't make any difference if the child branch is created
|
||||
when the end of the main branch was at LSN 250, or later when the tip of
|
||||
the main branch had already moved on. The latter case, creating a
|
||||
branch at a historic LSN, is how we support PITR in Zenith.
|
||||
|
||||
|
||||
# Garbage collection
|
||||
|
||||
In this scheme, we keep creating new layer files over time. We also
|
||||
need a mechanism to remove old files that are no longer needed,
|
||||
because disk space isn't infinite.
|
||||
|
||||
What files are still needed? Currently, the page server supports PITR
|
||||
and branching from any branch at any LSN that is "recent enough" from
|
||||
the tip of the branch. "Recent enough" is defined as an LSN horizon,
|
||||
which by default is 64 MB. (See DEFAULT_GC_HORIZON). For this
|
||||
example, let's assume that the LSN horizon is 150 units.
|
||||
|
||||
Let's look at the single branch scenario again. Imagine that the end
|
||||
of the branch is LSN 525, so that the GC horizon is currently at
|
||||
525-150 = 375
|
||||
|
||||
main/orders_100
|
||||
main/orders_100_200
|
||||
main/orders_200
|
||||
main/orders_200_300
|
||||
main/orders_300
|
||||
main/orders_300_400
|
||||
main/orders_400
|
||||
main/orders_400_500
|
||||
main/orders_500
|
||||
main/customers_100
|
||||
main/customers_100_200
|
||||
main/customers_200
|
||||
|
||||
We can remove the following files because the end LSNs of those files are
|
||||
older than GC horizon 375, and there are more recent layer files for the
|
||||
table:
|
||||
|
||||
main/orders_100 DELETE
|
||||
main/orders_100_200 DELETE
|
||||
main/orders_200 DELETE
|
||||
main/orders_200_300 DELETE
|
||||
main/orders_300 STILL NEEDED BY orders_300_400
|
||||
main/orders_300_400 KEEP, NEWER THAN GC HORIZON
|
||||
main/orders_400 ..
|
||||
main/orders_400_500 ..
|
||||
main/orders_500 ..
|
||||
main/customers_100 DELETE
|
||||
main/customers_100_200 DELETE
|
||||
main/customers_200 KEEP, NO NEWER VERSION
|
||||
|
||||
'main/customers_100_200' is old enough, but it cannot be
|
||||
removed because there is no newer layer file for the table.
|
||||
|
||||
Things get slightly more complicated with multiple branches. All of
|
||||
the above still holds, but in addition to recent files we must also
|
||||
retain older shapshot files that are still needed by child branches.
|
||||
For example, if child branch is created at LSN 150, and the 'customers'
|
||||
table is updated on the branch, you would have these files:
|
||||
|
||||
main/orders_100 KEEP, NEEDED BY child BRANCH
|
||||
main/orders_100_200 KEEP, NEEDED BY child BRANCH
|
||||
main/orders_200 DELETE
|
||||
main/orders_200_300 DELETE
|
||||
main/orders_300 KEEP, NEWER THAN GC HORIZON
|
||||
main/orders_300_400 KEEP, NEWER THAN GC HORIZON
|
||||
main/orders_400 KEEP, NEWER THAN GC HORIZON
|
||||
main/orders_400_500 KEEP, NEWER THAN GC HORIZON
|
||||
main/orders_500 KEEP, NEWER THAN GC HORIZON
|
||||
main/customers_100 DELETE
|
||||
main/customers_100_200 DELETE
|
||||
main/customers_200 KEEP, NO NEWER VERSION
|
||||
child/customers_150_300 DELETE
|
||||
child/customers_300 KEEP, NO NEWER VERSION
|
||||
|
||||
In this situation, 'main/orders_100' and 'main/orders_100_200' cannot
|
||||
be removed, even though they are older than the GC horizon, because
|
||||
they are still needed by the child branch. 'main/orders_200'
|
||||
and 'main/orders_200_300' can still be removed.
|
||||
|
||||
If 'orders' is modified later on the 'child' branch, we will create a
|
||||
new base image and delta file for it on the child:
|
||||
|
||||
main/orders_100
|
||||
main/orders_100_200
|
||||
|
||||
main/orders_300
|
||||
main/orders_300_400
|
||||
main/orders_400
|
||||
main/orders_400_500
|
||||
main/orders_500
|
||||
main/customers_200
|
||||
child/customers_300
|
||||
child/orders_150_400
|
||||
child/orders_400
|
||||
|
||||
After this, the 'main/orders_100' and 'main/orders_100_200' file could
|
||||
be removed. It is no longer needed by the child branch, because there
|
||||
is a newer layer file there. TODO: This optimization hasn't been
|
||||
implemented! The GC algorithm will currently keep the file on the
|
||||
'main' branch anyway, for as long as the child branch exists.
|
||||
|
||||
|
||||
# TODO: On LSN ranges
|
||||
|
||||
In principle, each relation can be checkpointed separately, i.e. the
|
||||
LSN ranges of the files don't need to line up. So this would be legal:
|
||||
|
||||
main/orders_100
|
||||
main/orders_100_200
|
||||
main/orders_200
|
||||
main/orders_200_300
|
||||
main/orders_300
|
||||
main/orders_300_400
|
||||
main/orders_400
|
||||
main/customers_150
|
||||
main/customers_150_250
|
||||
main/customers_250
|
||||
main/customers_250_500
|
||||
main/customers_500
|
||||
|
||||
However, the code currently always checkpoints all relations together.
|
||||
So that situation doesn't arise in practice.
|
||||
|
||||
It would also be OK to have overlapping LSN ranges for the same relation:
|
||||
|
||||
main/orders_100
|
||||
main/orders_100_200
|
||||
main/orders_200
|
||||
main/orders_200_300
|
||||
main/orders_300
|
||||
main/orders_250_350
|
||||
main/orders_350
|
||||
main/orders_300_400
|
||||
main/orders_400
|
||||
|
||||
The code that reads the layer files should cope with this, but this
|
||||
situation doesn't arise either, because the checkpointing code never
|
||||
does that. It could be useful, however, as a transient state when
|
||||
garbage collecting around branch points, or explicit recovery
|
||||
points. For example, if we start with this:
|
||||
|
||||
main/orders_100
|
||||
main/orders_100_200
|
||||
main/orders_200
|
||||
main/orders_200_300
|
||||
main/orders_300
|
||||
|
||||
And there is a branch or explicit recovery point at LSN 150, we could
|
||||
replace 'main/orders_100_200' with 'main/orders_150' to keep a
|
||||
layer only at that exact point that's still needed, removing the
|
||||
other page versions around it. But such compaction has not been
|
||||
implemented yet.
|
||||
45
pageserver/src/layered_repository/blob.rs
Normal file
45
pageserver/src/layered_repository/blob.rs
Normal file
@@ -0,0 +1,45 @@
|
||||
use std::{fs::File, io::Write};
|
||||
|
||||
use anyhow::Result;
|
||||
use bookfile::{BookWriter, BoundedReader, ChapterId, ChapterWriter};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct BlobRange {
|
||||
offset: u64,
|
||||
size: usize,
|
||||
}
|
||||
|
||||
pub fn read_blob(reader: &BoundedReader<&'_ File>, range: &BlobRange) -> Result<Vec<u8>> {
|
||||
let mut buf = vec![0u8; range.size];
|
||||
reader.read_exact_at(&mut buf, range.offset)?;
|
||||
Ok(buf)
|
||||
}
|
||||
|
||||
pub struct BlobWriter {
|
||||
writer: ChapterWriter<File>,
|
||||
offset: u64,
|
||||
}
|
||||
|
||||
impl BlobWriter {
|
||||
// This function takes a BookWriter and creates a new chapter to ensure offset is 0.
|
||||
pub fn new(book_writer: BookWriter<File>, chapter_id: impl Into<ChapterId>) -> Self {
|
||||
let writer = book_writer.new_chapter(chapter_id);
|
||||
Self { writer, offset: 0 }
|
||||
}
|
||||
|
||||
pub fn write_blob(&mut self, blob: &[u8]) -> Result<BlobRange> {
|
||||
self.writer.write_all(blob)?;
|
||||
|
||||
let range = BlobRange {
|
||||
offset: self.offset,
|
||||
size: blob.len(),
|
||||
};
|
||||
self.offset += blob.len() as u64;
|
||||
Ok(range)
|
||||
}
|
||||
|
||||
pub fn close(self) -> bookfile::Result<BookWriter<File>> {
|
||||
self.writer.close()
|
||||
}
|
||||
}
|
||||
548
pageserver/src/layered_repository/delta_layer.rs
Normal file
548
pageserver/src/layered_repository/delta_layer.rs
Normal file
@@ -0,0 +1,548 @@
|
||||
//!
|
||||
//! A DeltaLayer represents a collection of WAL records or page images in a range of
|
||||
//! LSNs, for one segment. It is stored on a file on disk.
|
||||
//!
|
||||
//! Usually a delta layer only contains differences - in the form of WAL records against
|
||||
//! a base LSN. However, if a segment is newly created, by creating a new relation or
|
||||
//! extending an old one, there might be no base image. In that case, all the entries in
|
||||
//! the delta layer must be page images or WAL records with the 'will_init' flag set, so
|
||||
//! that they can be replayed without referring to an older page version. Also in some
|
||||
//! circumstances, the predecessor layer might actually be another delta layer. That
|
||||
//! can happen when you create a new branch in the middle of a delta layer, and the WAL
|
||||
//! records on the new branch are put in a new delta layer.
|
||||
//!
|
||||
//! When a delta file needs to be accessed, we slurp the metadata and relsize chapters
|
||||
//! into memory, into the DeltaLayerInner struct. See load() and unload() functions.
|
||||
//! To access a page/WAL record, we search `page_version_metas` for the block # and LSN.
|
||||
//! The byte ranges in the metadata can be used to find the page/WAL record in
|
||||
//! PAGE_VERSIONS_CHAPTER.
|
||||
//!
|
||||
//! On disk, the delta files are stored in timelines/<timelineid> directory.
|
||||
//! Currently, there are no subdirectories, and each delta file is named like this:
|
||||
//!
|
||||
//! <spcnode>_<dbnode>_<relnode>_<forknum>_<segno>_<start LSN>_<end LSN>
|
||||
//!
|
||||
//! For example:
|
||||
//!
|
||||
//! 1663_13990_2609_0_5_000000000169C348_000000000169C349
|
||||
//!
|
||||
//! If a relation is dropped, we add a '_DROPPED' to the end of the filename to indicate that.
|
||||
//! So the above example would become:
|
||||
//!
|
||||
//! 1663_13990_2609_0_5_000000000169C348_000000000169C349_DROPPED
|
||||
//!
|
||||
//! The end LSN indicates when it was dropped in that case, we don't store it in the
|
||||
//! file contents in any way.
|
||||
//!
|
||||
//! A detlta file is constructed using the 'bookfile' crate. Each file consists of two
|
||||
//! parts: the page versions and the relation sizes. They are stored as separate chapters.
|
||||
//!
|
||||
use crate::layered_repository::blob::BlobWriter;
|
||||
use crate::layered_repository::filename::{DeltaFileName, PathOrConf};
|
||||
use crate::layered_repository::storage_layer::{
|
||||
Layer, PageReconstructData, PageReconstructResult, PageVersion, SegmentTag,
|
||||
};
|
||||
use crate::repository::WALRecord;
|
||||
use crate::waldecoder;
|
||||
use crate::PageServerConf;
|
||||
use crate::{ZTenantId, ZTimelineId};
|
||||
use anyhow::{bail, Result};
|
||||
use bytes::Bytes;
|
||||
use log::*;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::BTreeMap;
|
||||
// avoid binding to Write (conflicts with std::io::Write)
|
||||
// while being able to use std::fmt::Write's methods
|
||||
use std::fmt::Write as _;
|
||||
use std::fs;
|
||||
use std::fs::File;
|
||||
use std::io::Write;
|
||||
use std::ops::Bound::Included;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::{Arc, Mutex, MutexGuard};
|
||||
|
||||
use bookfile::{Book, BookWriter};
|
||||
|
||||
use zenith_utils::bin_ser::BeSer;
|
||||
use zenith_utils::lsn::Lsn;
|
||||
|
||||
use super::blob::{read_blob, BlobRange};
|
||||
|
||||
// Magic constant to identify a Zenith delta file
|
||||
static DELTA_FILE_MAGIC: u32 = 0x5A616E01;
|
||||
|
||||
/// Mapping from (block #, lsn) -> page/WAL record
|
||||
/// byte ranges in PAGE_VERSIONS_CHAPTER
|
||||
static PAGE_VERSION_METAS_CHAPTER: u64 = 1;
|
||||
/// Page/WAL bytes - cannot be interpreted
|
||||
/// without PAGE_VERSION_METAS_CHAPTER
|
||||
static PAGE_VERSIONS_CHAPTER: u64 = 2;
|
||||
static REL_SIZES_CHAPTER: u64 = 3;
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
struct PageVersionMeta {
|
||||
page_image_range: Option<BlobRange>,
|
||||
record_range: Option<BlobRange>,
|
||||
}
|
||||
|
||||
///
|
||||
/// DeltaLayer is the in-memory data structure associated with an
|
||||
/// on-disk delta file. We keep a DeltaLayer in memory for each
|
||||
/// file, in the LayerMap. If a layer is in "loaded" state, we have a
|
||||
/// copy of the file in memory, in 'inner'. Otherwise the struct is
|
||||
/// just a placeholder for a file that exists on disk, and it needs to
|
||||
/// be loaded before using it in queries.
|
||||
///
|
||||
pub struct DeltaLayer {
|
||||
path_or_conf: PathOrConf,
|
||||
|
||||
pub tenantid: ZTenantId,
|
||||
pub timelineid: ZTimelineId,
|
||||
pub seg: SegmentTag,
|
||||
|
||||
//
|
||||
// This entry contains all the changes from 'start_lsn' to 'end_lsn'. The
|
||||
// start is inclusive, and end is exclusive.
|
||||
//
|
||||
pub start_lsn: Lsn,
|
||||
pub end_lsn: Lsn,
|
||||
|
||||
dropped: bool,
|
||||
|
||||
/// Predecessor layer
|
||||
predecessor: Option<Arc<dyn Layer>>,
|
||||
|
||||
inner: Mutex<DeltaLayerInner>,
|
||||
}
|
||||
|
||||
pub struct DeltaLayerInner {
|
||||
/// If false, the 'page_version_metas' and 'relsizes' have not been
|
||||
/// loaded into memory yet.
|
||||
loaded: bool,
|
||||
|
||||
/// All versions of all pages in the file are are kept here.
|
||||
/// Indexed by block number and LSN.
|
||||
page_version_metas: BTreeMap<(u32, Lsn), PageVersionMeta>,
|
||||
|
||||
/// `relsizes` tracks the size of the relation at different points in time.
|
||||
relsizes: BTreeMap<Lsn, u32>,
|
||||
}
|
||||
|
||||
impl Layer for DeltaLayer {
|
||||
fn get_timeline_id(&self) -> ZTimelineId {
|
||||
self.timelineid
|
||||
}
|
||||
|
||||
fn get_seg_tag(&self) -> SegmentTag {
|
||||
self.seg
|
||||
}
|
||||
|
||||
fn is_dropped(&self) -> bool {
|
||||
self.dropped
|
||||
}
|
||||
|
||||
fn get_start_lsn(&self) -> Lsn {
|
||||
self.start_lsn
|
||||
}
|
||||
|
||||
fn get_end_lsn(&self) -> Lsn {
|
||||
self.end_lsn
|
||||
}
|
||||
|
||||
fn filename(&self) -> PathBuf {
|
||||
PathBuf::from(
|
||||
DeltaFileName {
|
||||
seg: self.seg,
|
||||
start_lsn: self.start_lsn,
|
||||
end_lsn: self.end_lsn,
|
||||
dropped: self.dropped,
|
||||
}
|
||||
.to_string(),
|
||||
)
|
||||
}
|
||||
|
||||
/// Look up given page in the cache.
|
||||
fn get_page_reconstruct_data(
|
||||
&self,
|
||||
blknum: u32,
|
||||
lsn: Lsn,
|
||||
reconstruct_data: &mut PageReconstructData,
|
||||
) -> Result<PageReconstructResult> {
|
||||
let mut cont_lsn: Option<Lsn> = Some(lsn);
|
||||
|
||||
assert!(self.seg.blknum_in_seg(blknum));
|
||||
|
||||
{
|
||||
// Open the file and lock the metadata in memory
|
||||
// TODO: avoid opening the file for each read
|
||||
let (_path, book) = self.open_book()?;
|
||||
let page_version_reader = book.chapter_reader(PAGE_VERSIONS_CHAPTER)?;
|
||||
let inner = self.load()?;
|
||||
|
||||
// Scan the metadata BTreeMap backwards, starting from the given entry.
|
||||
let minkey = (blknum, Lsn(0));
|
||||
let maxkey = (blknum, lsn);
|
||||
let mut iter = inner
|
||||
.page_version_metas
|
||||
.range((Included(&minkey), Included(&maxkey)));
|
||||
while let Some(((_blknum, entry_lsn), entry)) = iter.next_back() {
|
||||
if let Some(img_range) = &entry.page_image_range {
|
||||
// Found a page image, return it
|
||||
let img = Bytes::from(read_blob(&page_version_reader, img_range)?);
|
||||
reconstruct_data.page_img = Some(img);
|
||||
cont_lsn = None;
|
||||
break;
|
||||
} else if let Some(rec_range) = &entry.record_range {
|
||||
let rec = WALRecord::des(&read_blob(&page_version_reader, rec_range)?)?;
|
||||
let will_init = rec.will_init;
|
||||
reconstruct_data.records.push(rec);
|
||||
if will_init {
|
||||
// This WAL record initializes the page, so no need to go further back
|
||||
cont_lsn = None;
|
||||
break;
|
||||
} else {
|
||||
// This WAL record needs to be applied against an older page image
|
||||
cont_lsn = Some(*entry_lsn);
|
||||
}
|
||||
} else {
|
||||
// No base image, and no WAL record. Huh?
|
||||
bail!("no page image or WAL record for requested page");
|
||||
}
|
||||
}
|
||||
|
||||
// release metadata lock and close the file
|
||||
}
|
||||
|
||||
// If an older page image is needed to reconstruct the page, let the
|
||||
// caller know about the predecessor layer.
|
||||
if let Some(cont_lsn) = cont_lsn {
|
||||
if let Some(cont_layer) = &self.predecessor {
|
||||
Ok(PageReconstructResult::Continue(
|
||||
cont_lsn,
|
||||
Arc::clone(cont_layer),
|
||||
))
|
||||
} else {
|
||||
Ok(PageReconstructResult::Missing(cont_lsn))
|
||||
}
|
||||
} else {
|
||||
Ok(PageReconstructResult::Complete)
|
||||
}
|
||||
}
|
||||
|
||||
/// Get size of the relation at given LSN
|
||||
fn get_seg_size(&self, lsn: Lsn) -> Result<u32> {
|
||||
assert!(lsn >= self.start_lsn);
|
||||
|
||||
// Scan the BTreeMap backwards, starting from the given entry.
|
||||
let inner = self.load()?;
|
||||
let mut iter = inner.relsizes.range((Included(&Lsn(0)), Included(&lsn)));
|
||||
|
||||
let result;
|
||||
if let Some((_entry_lsn, entry)) = iter.next_back() {
|
||||
result = *entry;
|
||||
// Use the base image if needed
|
||||
} else if let Some(predecessor) = &self.predecessor {
|
||||
result = predecessor.get_seg_size(lsn)?;
|
||||
} else {
|
||||
result = 0;
|
||||
}
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
/// Does this segment exist at given LSN?
|
||||
fn get_seg_exists(&self, lsn: Lsn) -> Result<bool> {
|
||||
// Is the requested LSN after the rel was dropped?
|
||||
if self.dropped && lsn >= self.end_lsn {
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
// Otherwise, it exists.
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
///
|
||||
/// Release most of the memory used by this layer. If it's accessed again later,
|
||||
/// it will need to be loaded back.
|
||||
///
|
||||
fn unload(&self) -> Result<()> {
|
||||
let mut inner = self.inner.lock().unwrap();
|
||||
inner.page_version_metas = BTreeMap::new();
|
||||
inner.relsizes = BTreeMap::new();
|
||||
inner.loaded = false;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn delete(&self) -> Result<()> {
|
||||
// delete underlying file
|
||||
fs::remove_file(self.path())?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn is_incremental(&self) -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
/// debugging function to print out the contents of the layer
|
||||
fn dump(&self) -> Result<()> {
|
||||
println!(
|
||||
"----- delta layer for tli {} seg {} {}-{} ----",
|
||||
self.timelineid, self.seg, self.start_lsn, self.end_lsn
|
||||
);
|
||||
|
||||
println!("--- relsizes ---");
|
||||
let inner = self.load()?;
|
||||
for (k, v) in inner.relsizes.iter() {
|
||||
println!(" {}: {}", k, v);
|
||||
}
|
||||
println!("--- page versions ---");
|
||||
let (_path, book) = self.open_book()?;
|
||||
let chapter = book.chapter_reader(PAGE_VERSIONS_CHAPTER)?;
|
||||
for (k, v) in inner.page_version_metas.iter() {
|
||||
let mut desc = String::new();
|
||||
|
||||
if let Some(page_image_range) = v.page_image_range.as_ref() {
|
||||
let image = read_blob(&chapter, &page_image_range)?;
|
||||
write!(&mut desc, " img {} bytes", image.len())?;
|
||||
}
|
||||
if let Some(record_range) = v.record_range.as_ref() {
|
||||
let record_bytes = read_blob(&chapter, record_range)?;
|
||||
let rec = WALRecord::des(&record_bytes)?;
|
||||
let wal_desc = waldecoder::describe_wal_record(&rec.rec);
|
||||
write!(
|
||||
&mut desc,
|
||||
" rec {} bytes will_init: {} {}",
|
||||
rec.rec.len(),
|
||||
rec.will_init,
|
||||
wal_desc
|
||||
)?;
|
||||
}
|
||||
println!(" blk {} at {}: {}", k.0, k.1, desc);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl DeltaLayer {
|
||||
fn path(&self) -> PathBuf {
|
||||
Self::path_for(
|
||||
&self.path_or_conf,
|
||||
self.timelineid,
|
||||
self.tenantid,
|
||||
&DeltaFileName {
|
||||
seg: self.seg,
|
||||
start_lsn: self.start_lsn,
|
||||
end_lsn: self.end_lsn,
|
||||
dropped: self.dropped,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
fn path_for(
|
||||
path_or_conf: &PathOrConf,
|
||||
timelineid: ZTimelineId,
|
||||
tenantid: ZTenantId,
|
||||
fname: &DeltaFileName,
|
||||
) -> PathBuf {
|
||||
match path_or_conf {
|
||||
PathOrConf::Path(path) => path.clone(),
|
||||
PathOrConf::Conf(conf) => conf
|
||||
.timeline_path(&timelineid, &tenantid)
|
||||
.join(fname.to_string()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a new delta file, using the given btreemaps containing the page versions and
|
||||
/// relsizes.
|
||||
///
|
||||
/// This is used to write the in-memory layer to disk. The in-memory layer uses the same
|
||||
/// data structure with two btreemaps as we do, so passing the btreemaps is currently
|
||||
/// expedient.
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn create(
|
||||
conf: &'static PageServerConf,
|
||||
timelineid: ZTimelineId,
|
||||
tenantid: ZTenantId,
|
||||
seg: SegmentTag,
|
||||
start_lsn: Lsn,
|
||||
end_lsn: Lsn,
|
||||
dropped: bool,
|
||||
predecessor: Option<Arc<dyn Layer>>,
|
||||
page_versions: BTreeMap<(u32, Lsn), PageVersion>,
|
||||
relsizes: BTreeMap<Lsn, u32>,
|
||||
) -> Result<DeltaLayer> {
|
||||
let delta_layer = DeltaLayer {
|
||||
path_or_conf: PathOrConf::Conf(conf),
|
||||
timelineid,
|
||||
tenantid,
|
||||
seg,
|
||||
start_lsn,
|
||||
end_lsn,
|
||||
dropped,
|
||||
inner: Mutex::new(DeltaLayerInner {
|
||||
loaded: true,
|
||||
page_version_metas: BTreeMap::new(),
|
||||
relsizes,
|
||||
}),
|
||||
predecessor,
|
||||
};
|
||||
let mut inner = delta_layer.inner.lock().unwrap();
|
||||
|
||||
// Write the in-memory btreemaps into a file
|
||||
let path = delta_layer.path();
|
||||
|
||||
// Note: This overwrites any existing file. There shouldn't be any.
|
||||
// FIXME: throw an error instead?
|
||||
let file = File::create(&path)?;
|
||||
let book = BookWriter::new(file, DELTA_FILE_MAGIC)?;
|
||||
|
||||
let mut page_version_writer = BlobWriter::new(book, PAGE_VERSIONS_CHAPTER);
|
||||
|
||||
for (key, page_version) in page_versions {
|
||||
let page_image_range = page_version
|
||||
.page_image
|
||||
.map(|page_image| page_version_writer.write_blob(page_image.as_ref()))
|
||||
.transpose()?;
|
||||
|
||||
let record_range = page_version
|
||||
.record
|
||||
.map(|record| {
|
||||
let buf = WALRecord::ser(&record)?;
|
||||
page_version_writer.write_blob(&buf)
|
||||
})
|
||||
.transpose()?;
|
||||
|
||||
let old = inner.page_version_metas.insert(
|
||||
key,
|
||||
PageVersionMeta {
|
||||
page_image_range,
|
||||
record_range,
|
||||
},
|
||||
);
|
||||
|
||||
assert!(old.is_none());
|
||||
}
|
||||
|
||||
let book = page_version_writer.close()?;
|
||||
|
||||
// Write out page versions
|
||||
let mut chapter = book.new_chapter(PAGE_VERSION_METAS_CHAPTER);
|
||||
let buf = BTreeMap::ser(&inner.page_version_metas)?;
|
||||
chapter.write_all(&buf)?;
|
||||
let book = chapter.close()?;
|
||||
|
||||
// and relsizes to separate chapter
|
||||
let mut chapter = book.new_chapter(REL_SIZES_CHAPTER);
|
||||
let buf = BTreeMap::ser(&inner.relsizes)?;
|
||||
chapter.write_all(&buf)?;
|
||||
let book = chapter.close()?;
|
||||
|
||||
book.close()?;
|
||||
|
||||
trace!("saved {}", &path.display());
|
||||
|
||||
drop(inner);
|
||||
|
||||
Ok(delta_layer)
|
||||
}
|
||||
|
||||
fn open_book(&self) -> Result<(PathBuf, Book<File>)> {
|
||||
let path = Self::path_for(
|
||||
&self.path_or_conf,
|
||||
self.timelineid,
|
||||
self.tenantid,
|
||||
&DeltaFileName {
|
||||
seg: self.seg,
|
||||
start_lsn: self.start_lsn,
|
||||
end_lsn: self.end_lsn,
|
||||
dropped: self.dropped,
|
||||
},
|
||||
);
|
||||
|
||||
let file = File::open(&path)?;
|
||||
let book = Book::new(file)?;
|
||||
|
||||
Ok((path, book))
|
||||
}
|
||||
|
||||
///
|
||||
/// Load the contents of the file into memory
|
||||
///
|
||||
fn load(&self) -> Result<MutexGuard<DeltaLayerInner>> {
|
||||
// quick exit if already loaded
|
||||
let mut inner = self.inner.lock().unwrap();
|
||||
|
||||
if inner.loaded {
|
||||
return Ok(inner);
|
||||
}
|
||||
|
||||
let (path, book) = self.open_book()?;
|
||||
|
||||
let chapter = book.read_chapter(PAGE_VERSION_METAS_CHAPTER)?;
|
||||
let page_version_metas = BTreeMap::des(&chapter)?;
|
||||
|
||||
let chapter = book.read_chapter(REL_SIZES_CHAPTER)?;
|
||||
let relsizes = BTreeMap::des(&chapter)?;
|
||||
|
||||
debug!("loaded from {}", &path.display());
|
||||
|
||||
*inner = DeltaLayerInner {
|
||||
loaded: true,
|
||||
page_version_metas,
|
||||
relsizes,
|
||||
};
|
||||
|
||||
Ok(inner)
|
||||
}
|
||||
|
||||
/// Create a DeltaLayer struct representing an existing file on disk.
|
||||
pub fn new(
|
||||
conf: &'static PageServerConf,
|
||||
timelineid: ZTimelineId,
|
||||
tenantid: ZTenantId,
|
||||
filename: &DeltaFileName,
|
||||
predecessor: Option<Arc<dyn Layer>>,
|
||||
) -> DeltaLayer {
|
||||
DeltaLayer {
|
||||
path_or_conf: PathOrConf::Conf(conf),
|
||||
timelineid,
|
||||
tenantid,
|
||||
seg: filename.seg,
|
||||
start_lsn: filename.start_lsn,
|
||||
end_lsn: filename.end_lsn,
|
||||
dropped: filename.dropped,
|
||||
inner: Mutex::new(DeltaLayerInner {
|
||||
loaded: false,
|
||||
page_version_metas: BTreeMap::new(),
|
||||
relsizes: BTreeMap::new(),
|
||||
}),
|
||||
predecessor,
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a DeltaLayer struct representing an existing file on disk.
|
||||
///
|
||||
/// This variant is only used for debugging purposes, by the 'dump_layerfile' binary.
|
||||
pub fn new_for_path(
|
||||
path: &Path,
|
||||
timelineid: ZTimelineId,
|
||||
tenantid: ZTenantId,
|
||||
filename: &DeltaFileName,
|
||||
) -> DeltaLayer {
|
||||
DeltaLayer {
|
||||
path_or_conf: PathOrConf::Path(path.to_path_buf()),
|
||||
timelineid,
|
||||
tenantid,
|
||||
seg: filename.seg,
|
||||
start_lsn: filename.start_lsn,
|
||||
end_lsn: filename.end_lsn,
|
||||
dropped: filename.dropped,
|
||||
inner: Mutex::new(DeltaLayerInner {
|
||||
loaded: false,
|
||||
page_version_metas: BTreeMap::new(),
|
||||
relsizes: BTreeMap::new(),
|
||||
}),
|
||||
predecessor: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
313
pageserver/src/layered_repository/filename.rs
Normal file
313
pageserver/src/layered_repository/filename.rs
Normal file
@@ -0,0 +1,313 @@
|
||||
//!
|
||||
//! Helper functions for dealing with filenames of the image and delta layer files.
|
||||
//!
|
||||
use crate::layered_repository::storage_layer::SegmentTag;
|
||||
use crate::relish::*;
|
||||
use crate::PageServerConf;
|
||||
use crate::{ZTenantId, ZTimelineId};
|
||||
use std::fmt;
|
||||
use std::fs;
|
||||
use std::path::PathBuf;
|
||||
|
||||
use anyhow::Result;
|
||||
use log::*;
|
||||
use zenith_utils::lsn::Lsn;
|
||||
|
||||
// Note: LayeredTimeline::load_layer_map() relies on this sort order
|
||||
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone)]
|
||||
pub struct DeltaFileName {
|
||||
pub seg: SegmentTag,
|
||||
pub start_lsn: Lsn,
|
||||
pub end_lsn: Lsn,
|
||||
pub dropped: bool,
|
||||
}
|
||||
|
||||
/// Represents the filename of a DeltaLayer
|
||||
///
|
||||
/// <spcnode>_<dbnode>_<relnode>_<forknum>_<seg>_<start LSN>_<end LSN>
|
||||
///
|
||||
/// or if it was dropped:
|
||||
///
|
||||
/// <spcnode>_<dbnode>_<relnode>_<forknum>_<seg>_<start LSN>_<end LSN>_DROPPED
|
||||
///
|
||||
impl DeltaFileName {
|
||||
///
|
||||
/// Parse a string as a delta file name. Returns None if the filename does not
|
||||
/// match the expected pattern.
|
||||
///
|
||||
pub fn from_str(fname: &str) -> Option<Self> {
|
||||
let rel;
|
||||
let mut parts;
|
||||
if let Some(rest) = fname.strip_prefix("rel_") {
|
||||
parts = rest.split('_');
|
||||
rel = RelishTag::Relation(RelTag {
|
||||
spcnode: parts.next()?.parse::<u32>().ok()?,
|
||||
dbnode: parts.next()?.parse::<u32>().ok()?,
|
||||
relnode: parts.next()?.parse::<u32>().ok()?,
|
||||
forknum: parts.next()?.parse::<u8>().ok()?,
|
||||
});
|
||||
} else if let Some(rest) = fname.strip_prefix("pg_xact_") {
|
||||
parts = rest.split('_');
|
||||
rel = RelishTag::Slru {
|
||||
slru: SlruKind::Clog,
|
||||
segno: u32::from_str_radix(parts.next()?, 16).ok()?,
|
||||
};
|
||||
} else if let Some(rest) = fname.strip_prefix("pg_multixact_members_") {
|
||||
parts = rest.split('_');
|
||||
rel = RelishTag::Slru {
|
||||
slru: SlruKind::MultiXactMembers,
|
||||
segno: u32::from_str_radix(parts.next()?, 16).ok()?,
|
||||
};
|
||||
} else if let Some(rest) = fname.strip_prefix("pg_multixact_offsets_") {
|
||||
parts = rest.split('_');
|
||||
rel = RelishTag::Slru {
|
||||
slru: SlruKind::MultiXactOffsets,
|
||||
segno: u32::from_str_radix(parts.next()?, 16).ok()?,
|
||||
};
|
||||
} else if let Some(rest) = fname.strip_prefix("pg_filenodemap_") {
|
||||
parts = rest.split('_');
|
||||
rel = RelishTag::FileNodeMap {
|
||||
spcnode: parts.next()?.parse::<u32>().ok()?,
|
||||
dbnode: parts.next()?.parse::<u32>().ok()?,
|
||||
};
|
||||
} else if let Some(rest) = fname.strip_prefix("pg_twophase_") {
|
||||
parts = rest.split('_');
|
||||
rel = RelishTag::TwoPhase {
|
||||
xid: parts.next()?.parse::<u32>().ok()?,
|
||||
};
|
||||
} else if let Some(rest) = fname.strip_prefix("pg_control_checkpoint_") {
|
||||
parts = rest.split('_');
|
||||
rel = RelishTag::Checkpoint;
|
||||
} else if let Some(rest) = fname.strip_prefix("pg_control_") {
|
||||
parts = rest.split('_');
|
||||
rel = RelishTag::ControlFile;
|
||||
} else {
|
||||
return None;
|
||||
}
|
||||
|
||||
let segno = parts.next()?.parse::<u32>().ok()?;
|
||||
|
||||
let seg = SegmentTag { rel, segno };
|
||||
|
||||
let start_lsn = Lsn::from_hex(parts.next()?).ok()?;
|
||||
let end_lsn = Lsn::from_hex(parts.next()?).ok()?;
|
||||
|
||||
let mut dropped = false;
|
||||
if let Some(suffix) = parts.next() {
|
||||
if suffix == "DROPPED" {
|
||||
dropped = true;
|
||||
} else {
|
||||
return None;
|
||||
}
|
||||
}
|
||||
if parts.next().is_some() {
|
||||
return None;
|
||||
}
|
||||
|
||||
Some(DeltaFileName {
|
||||
seg,
|
||||
start_lsn,
|
||||
end_lsn,
|
||||
dropped,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for DeltaFileName {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
let basename = match self.seg.rel {
|
||||
RelishTag::Relation(reltag) => format!(
|
||||
"rel_{}_{}_{}_{}",
|
||||
reltag.spcnode, reltag.dbnode, reltag.relnode, reltag.forknum
|
||||
),
|
||||
RelishTag::Slru {
|
||||
slru: SlruKind::Clog,
|
||||
segno,
|
||||
} => format!("pg_xact_{:04X}", segno),
|
||||
RelishTag::Slru {
|
||||
slru: SlruKind::MultiXactMembers,
|
||||
segno,
|
||||
} => format!("pg_multixact_members_{:04X}", segno),
|
||||
RelishTag::Slru {
|
||||
slru: SlruKind::MultiXactOffsets,
|
||||
segno,
|
||||
} => format!("pg_multixact_offsets_{:04X}", segno),
|
||||
RelishTag::FileNodeMap { spcnode, dbnode } => {
|
||||
format!("pg_filenodemap_{}_{}", spcnode, dbnode)
|
||||
}
|
||||
RelishTag::TwoPhase { xid } => format!("pg_twophase_{}", xid),
|
||||
RelishTag::Checkpoint => "pg_control_checkpoint".to_string(),
|
||||
RelishTag::ControlFile => "pg_control".to_string(),
|
||||
};
|
||||
|
||||
write!(
|
||||
f,
|
||||
"{}_{}_{:016X}_{:016X}{}",
|
||||
basename,
|
||||
self.seg.segno,
|
||||
u64::from(self.start_lsn),
|
||||
u64::from(self.end_lsn),
|
||||
if self.dropped { "_DROPPED" } else { "" }
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone)]
|
||||
pub struct ImageFileName {
|
||||
pub seg: SegmentTag,
|
||||
pub lsn: Lsn,
|
||||
}
|
||||
|
||||
///
|
||||
/// Represents the filename of an ImageLayer
|
||||
///
|
||||
/// <spcnode>_<dbnode>_<relnode>_<forknum>_<seg>_<LSN>
|
||||
///
|
||||
impl ImageFileName {
|
||||
///
|
||||
/// Parse a string as an image file name. Returns None if the filename does not
|
||||
/// match the expected pattern.
|
||||
///
|
||||
pub fn from_str(fname: &str) -> Option<Self> {
|
||||
let rel;
|
||||
let mut parts;
|
||||
if let Some(rest) = fname.strip_prefix("rel_") {
|
||||
parts = rest.split('_');
|
||||
rel = RelishTag::Relation(RelTag {
|
||||
spcnode: parts.next()?.parse::<u32>().ok()?,
|
||||
dbnode: parts.next()?.parse::<u32>().ok()?,
|
||||
relnode: parts.next()?.parse::<u32>().ok()?,
|
||||
forknum: parts.next()?.parse::<u8>().ok()?,
|
||||
});
|
||||
} else if let Some(rest) = fname.strip_prefix("pg_xact_") {
|
||||
parts = rest.split('_');
|
||||
rel = RelishTag::Slru {
|
||||
slru: SlruKind::Clog,
|
||||
segno: u32::from_str_radix(parts.next()?, 16).ok()?,
|
||||
};
|
||||
} else if let Some(rest) = fname.strip_prefix("pg_multixact_members_") {
|
||||
parts = rest.split('_');
|
||||
rel = RelishTag::Slru {
|
||||
slru: SlruKind::MultiXactMembers,
|
||||
segno: u32::from_str_radix(parts.next()?, 16).ok()?,
|
||||
};
|
||||
} else if let Some(rest) = fname.strip_prefix("pg_multixact_offsets_") {
|
||||
parts = rest.split('_');
|
||||
rel = RelishTag::Slru {
|
||||
slru: SlruKind::MultiXactOffsets,
|
||||
segno: u32::from_str_radix(parts.next()?, 16).ok()?,
|
||||
};
|
||||
} else if let Some(rest) = fname.strip_prefix("pg_filenodemap_") {
|
||||
parts = rest.split('_');
|
||||
rel = RelishTag::FileNodeMap {
|
||||
spcnode: parts.next()?.parse::<u32>().ok()?,
|
||||
dbnode: parts.next()?.parse::<u32>().ok()?,
|
||||
};
|
||||
} else if let Some(rest) = fname.strip_prefix("pg_twophase_") {
|
||||
parts = rest.split('_');
|
||||
rel = RelishTag::TwoPhase {
|
||||
xid: parts.next()?.parse::<u32>().ok()?,
|
||||
};
|
||||
} else if let Some(rest) = fname.strip_prefix("pg_control_checkpoint_") {
|
||||
parts = rest.split('_');
|
||||
rel = RelishTag::Checkpoint;
|
||||
} else if let Some(rest) = fname.strip_prefix("pg_control_") {
|
||||
parts = rest.split('_');
|
||||
rel = RelishTag::ControlFile;
|
||||
} else {
|
||||
return None;
|
||||
}
|
||||
|
||||
let segno = parts.next()?.parse::<u32>().ok()?;
|
||||
|
||||
let seg = SegmentTag { rel, segno };
|
||||
|
||||
let lsn = Lsn::from_hex(parts.next()?).ok()?;
|
||||
|
||||
if parts.next().is_some() {
|
||||
return None;
|
||||
}
|
||||
|
||||
Some(ImageFileName { seg, lsn })
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for ImageFileName {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
let basename = match self.seg.rel {
|
||||
RelishTag::Relation(reltag) => format!(
|
||||
"rel_{}_{}_{}_{}",
|
||||
reltag.spcnode, reltag.dbnode, reltag.relnode, reltag.forknum
|
||||
),
|
||||
RelishTag::Slru {
|
||||
slru: SlruKind::Clog,
|
||||
segno,
|
||||
} => format!("pg_xact_{:04X}", segno),
|
||||
RelishTag::Slru {
|
||||
slru: SlruKind::MultiXactMembers,
|
||||
segno,
|
||||
} => format!("pg_multixact_members_{:04X}", segno),
|
||||
RelishTag::Slru {
|
||||
slru: SlruKind::MultiXactOffsets,
|
||||
segno,
|
||||
} => format!("pg_multixact_offsets_{:04X}", segno),
|
||||
RelishTag::FileNodeMap { spcnode, dbnode } => {
|
||||
format!("pg_filenodemap_{}_{}", spcnode, dbnode)
|
||||
}
|
||||
RelishTag::TwoPhase { xid } => format!("pg_twophase_{}", xid),
|
||||
RelishTag::Checkpoint => "pg_control_checkpoint".to_string(),
|
||||
RelishTag::ControlFile => "pg_control".to_string(),
|
||||
};
|
||||
|
||||
write!(
|
||||
f,
|
||||
"{}_{}_{:016X}",
|
||||
basename,
|
||||
self.seg.segno,
|
||||
u64::from(self.lsn),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/// Scan timeline directory and create ImageFileName and DeltaFilename
|
||||
/// structs representing all files on disk
|
||||
///
|
||||
/// TODO: returning an Iterator would be more idiomatic
|
||||
pub fn list_files(
|
||||
conf: &'static PageServerConf,
|
||||
timelineid: ZTimelineId,
|
||||
tenantid: ZTenantId,
|
||||
) -> Result<(Vec<ImageFileName>, Vec<DeltaFileName>)> {
|
||||
let path = conf.timeline_path(&timelineid, &tenantid);
|
||||
|
||||
let mut deltafiles: Vec<DeltaFileName> = Vec::new();
|
||||
let mut imgfiles: Vec<ImageFileName> = Vec::new();
|
||||
for direntry in fs::read_dir(path)? {
|
||||
let fname = direntry?.file_name();
|
||||
let fname = fname.to_str().unwrap();
|
||||
|
||||
if let Some(deltafilename) = DeltaFileName::from_str(fname) {
|
||||
deltafiles.push(deltafilename);
|
||||
} else if let Some(imgfilename) = ImageFileName::from_str(fname) {
|
||||
imgfiles.push(imgfilename);
|
||||
} else if fname == "wal" || fname == "metadata" || fname == "ancestor" {
|
||||
// ignore these
|
||||
} else {
|
||||
warn!("unrecognized filename in timeline dir: {}", fname);
|
||||
}
|
||||
}
|
||||
Ok((imgfiles, deltafiles))
|
||||
}
|
||||
|
||||
/// Helper enum to hold a PageServerConf, or a path
|
||||
///
|
||||
/// This is used by DeltaLayer and ImageLayer. Normally, this holds a reference to the
|
||||
/// global config, and paths to layer files are constructed using the tenant/timeline
|
||||
/// path from the config. But in the 'dump_layerfile' binary, we need to construct a Layer
|
||||
/// struct for a file on disk, without having a page server running, so that we have no
|
||||
/// config. In that case, we use the Path variant to hold the full path to the file on
|
||||
/// disk.
|
||||
pub enum PathOrConf {
|
||||
Path(PathBuf),
|
||||
Conf(&'static PageServerConf),
|
||||
}
|
||||
436
pageserver/src/layered_repository/image_layer.rs
Normal file
436
pageserver/src/layered_repository/image_layer.rs
Normal file
@@ -0,0 +1,436 @@
|
||||
//! An ImageLayer represents an image or a snapshot of a segment at one particular LSN.
|
||||
//! It is stored in a file on disk.
|
||||
//!
|
||||
//! On disk, the image files are stored in timelines/<timelineid> directory.
|
||||
//! Currently, there are no subdirectories, and each image layer file is named like this:
|
||||
//!
|
||||
//! Note that segno is
|
||||
//! <spcnode>_<dbnode>_<relnode>_<forknum>_<segno>_<LSN>
|
||||
//!
|
||||
//! For example:
|
||||
//!
|
||||
//! 1663_13990_2609_0_5_000000000169C348
|
||||
//!
|
||||
//! An image file is constructed using the 'bookfile' crate.
|
||||
//!
|
||||
//! Only metadata is loaded into memory by the load function.
|
||||
//! When images are needed, they are read directly from disk.
|
||||
//!
|
||||
//! For blocky relishes, the images are stored in BLOCKY_IMAGES_CHAPTER.
|
||||
//! All the images are required to be BLOCK_SIZE, which allows for random access.
|
||||
//!
|
||||
//! For non-blocky relishes, the image can be found in NONBLOCKY_IMAGE_CHAPTER.
|
||||
//!
|
||||
use crate::layered_repository::filename::{ImageFileName, PathOrConf};
|
||||
use crate::layered_repository::storage_layer::{
|
||||
Layer, PageReconstructData, PageReconstructResult, SegmentTag,
|
||||
};
|
||||
use crate::layered_repository::LayeredTimeline;
|
||||
use crate::layered_repository::RELISH_SEG_SIZE;
|
||||
use crate::PageServerConf;
|
||||
use crate::{ZTenantId, ZTimelineId};
|
||||
use anyhow::{anyhow, ensure, Result};
|
||||
use bytes::Bytes;
|
||||
use log::*;
|
||||
use std::convert::TryInto;
|
||||
use std::fs;
|
||||
use std::fs::File;
|
||||
use std::io::Write;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::{Mutex, MutexGuard};
|
||||
|
||||
use bookfile::{Book, BookWriter};
|
||||
|
||||
use zenith_utils::lsn::Lsn;
|
||||
|
||||
// Magic constant to identify a Zenith segment image file
|
||||
const IMAGE_FILE_MAGIC: u32 = 0x5A616E01 + 1;
|
||||
|
||||
/// Contains each block in block # order
|
||||
const BLOCKY_IMAGES_CHAPTER: u64 = 1;
|
||||
const NONBLOCKY_IMAGE_CHAPTER: u64 = 2;
|
||||
|
||||
const BLOCK_SIZE: usize = 8192;
|
||||
|
||||
///
|
||||
/// ImageLayer is the in-memory data structure associated with an on-disk image
|
||||
/// file. We keep an ImageLayer in memory for each file, in the LayerMap. If a
|
||||
/// layer is in "loaded" state, we have a copy of the file in memory, in 'inner'.
|
||||
/// Otherwise the struct is just a placeholder for a file that exists on disk,
|
||||
/// and it needs to be loaded before using it in queries.
|
||||
///
|
||||
pub struct ImageLayer {
|
||||
path_or_conf: PathOrConf,
|
||||
pub tenantid: ZTenantId,
|
||||
pub timelineid: ZTimelineId,
|
||||
pub seg: SegmentTag,
|
||||
|
||||
// This entry contains an image of all pages as of this LSN
|
||||
pub lsn: Lsn,
|
||||
|
||||
inner: Mutex<ImageLayerInner>,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
enum ImageType {
|
||||
Blocky { num_blocks: u32 },
|
||||
NonBlocky,
|
||||
}
|
||||
|
||||
pub struct ImageLayerInner {
|
||||
/// If false, the 'image_type' has not been
|
||||
/// loaded into memory yet.
|
||||
loaded: bool,
|
||||
|
||||
/// Derived from filename and bookfile chapter metadata
|
||||
image_type: ImageType,
|
||||
}
|
||||
|
||||
impl Layer for ImageLayer {
|
||||
fn filename(&self) -> PathBuf {
|
||||
PathBuf::from(
|
||||
ImageFileName {
|
||||
seg: self.seg,
|
||||
lsn: self.lsn,
|
||||
}
|
||||
.to_string(),
|
||||
)
|
||||
}
|
||||
|
||||
fn get_timeline_id(&self) -> ZTimelineId {
|
||||
self.timelineid
|
||||
}
|
||||
|
||||
fn get_seg_tag(&self) -> SegmentTag {
|
||||
self.seg
|
||||
}
|
||||
|
||||
fn is_dropped(&self) -> bool {
|
||||
false
|
||||
}
|
||||
|
||||
fn get_start_lsn(&self) -> Lsn {
|
||||
self.lsn
|
||||
}
|
||||
|
||||
fn get_end_lsn(&self) -> Lsn {
|
||||
self.lsn
|
||||
}
|
||||
|
||||
/// Look up given page in the file
|
||||
fn get_page_reconstruct_data(
|
||||
&self,
|
||||
blknum: u32,
|
||||
lsn: Lsn,
|
||||
reconstruct_data: &mut PageReconstructData,
|
||||
) -> Result<PageReconstructResult> {
|
||||
assert!(lsn >= self.lsn);
|
||||
|
||||
let inner = self.load()?;
|
||||
|
||||
let base_blknum = blknum % RELISH_SEG_SIZE;
|
||||
|
||||
let (_path, book) = self.open_book()?;
|
||||
|
||||
let buf = match &inner.image_type {
|
||||
ImageType::Blocky { num_blocks } => {
|
||||
if base_blknum >= *num_blocks {
|
||||
return Ok(PageReconstructResult::Missing(lsn));
|
||||
}
|
||||
|
||||
let mut buf = vec![0u8; BLOCK_SIZE];
|
||||
let offset = BLOCK_SIZE as u64 * base_blknum as u64;
|
||||
|
||||
let chapter = book.chapter_reader(BLOCKY_IMAGES_CHAPTER)?;
|
||||
chapter.read_exact_at(&mut buf, offset)?;
|
||||
|
||||
buf
|
||||
}
|
||||
ImageType::NonBlocky => {
|
||||
ensure!(base_blknum == 0);
|
||||
book.read_chapter(NONBLOCKY_IMAGE_CHAPTER)?.into_vec()
|
||||
}
|
||||
};
|
||||
|
||||
reconstruct_data.page_img = Some(Bytes::from(buf));
|
||||
Ok(PageReconstructResult::Complete)
|
||||
}
|
||||
|
||||
/// Get size of the segment
|
||||
fn get_seg_size(&self, _lsn: Lsn) -> Result<u32> {
|
||||
let inner = self.load()?;
|
||||
match inner.image_type {
|
||||
ImageType::Blocky { num_blocks } => Ok(num_blocks),
|
||||
ImageType::NonBlocky => Err(anyhow!("get_seg_size called for non-blocky segment")),
|
||||
}
|
||||
}
|
||||
|
||||
/// Does this segment exist at given LSN?
|
||||
fn get_seg_exists(&self, _lsn: Lsn) -> Result<bool> {
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
///
|
||||
/// Release most of the memory used by this layer. If it's accessed again later,
|
||||
/// it will need to be loaded back.
|
||||
///
|
||||
fn unload(&self) -> Result<()> {
|
||||
let mut inner = self.inner.lock().unwrap();
|
||||
inner.image_type = ImageType::Blocky { num_blocks: 0 };
|
||||
inner.loaded = false;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn delete(&self) -> Result<()> {
|
||||
// delete underlying file
|
||||
fs::remove_file(self.path())?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn is_incremental(&self) -> bool {
|
||||
false
|
||||
}
|
||||
|
||||
/// debugging function to print out the contents of the layer
|
||||
fn dump(&self) -> Result<()> {
|
||||
println!(
|
||||
"----- image layer for tli {} seg {} at {} ----",
|
||||
self.timelineid, self.seg, self.lsn
|
||||
);
|
||||
|
||||
let inner = self.load()?;
|
||||
|
||||
match inner.image_type {
|
||||
ImageType::Blocky { num_blocks } => println!("({}) blocks ", num_blocks),
|
||||
ImageType::NonBlocky => {
|
||||
let (_path, book) = self.open_book()?;
|
||||
let chapter = book.read_chapter(NONBLOCKY_IMAGE_CHAPTER)?;
|
||||
println!("non-blocky ({} bytes)", chapter.len());
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl ImageLayer {
|
||||
fn path(&self) -> PathBuf {
|
||||
Self::path_for(
|
||||
&self.path_or_conf,
|
||||
self.timelineid,
|
||||
self.tenantid,
|
||||
&ImageFileName {
|
||||
seg: self.seg,
|
||||
lsn: self.lsn,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
fn path_for(
|
||||
path_or_conf: &PathOrConf,
|
||||
timelineid: ZTimelineId,
|
||||
tenantid: ZTenantId,
|
||||
fname: &ImageFileName,
|
||||
) -> PathBuf {
|
||||
match path_or_conf {
|
||||
PathOrConf::Path(path) => path.to_path_buf(),
|
||||
PathOrConf::Conf(conf) => conf
|
||||
.timeline_path(&timelineid, &tenantid)
|
||||
.join(fname.to_string()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a new image file, using the given array of pages.
|
||||
fn create(
|
||||
conf: &'static PageServerConf,
|
||||
timelineid: ZTimelineId,
|
||||
tenantid: ZTenantId,
|
||||
seg: SegmentTag,
|
||||
lsn: Lsn,
|
||||
base_images: Vec<Bytes>,
|
||||
) -> Result<ImageLayer> {
|
||||
let image_type = if seg.rel.is_blocky() {
|
||||
let num_blocks: u32 = base_images.len().try_into()?;
|
||||
ImageType::Blocky { num_blocks }
|
||||
} else {
|
||||
assert_eq!(base_images.len(), 1);
|
||||
ImageType::NonBlocky
|
||||
};
|
||||
|
||||
let layer = ImageLayer {
|
||||
path_or_conf: PathOrConf::Conf(conf),
|
||||
timelineid,
|
||||
tenantid,
|
||||
seg,
|
||||
lsn,
|
||||
inner: Mutex::new(ImageLayerInner {
|
||||
loaded: true,
|
||||
image_type: image_type.clone(),
|
||||
}),
|
||||
};
|
||||
let inner = layer.inner.lock().unwrap();
|
||||
|
||||
// Write the images into a file
|
||||
let path = layer.path();
|
||||
|
||||
// Note: This overwrites any existing file. There shouldn't be any.
|
||||
// FIXME: throw an error instead?
|
||||
let file = File::create(&path)?;
|
||||
let book = BookWriter::new(file, IMAGE_FILE_MAGIC)?;
|
||||
|
||||
let book = match &image_type {
|
||||
ImageType::Blocky { .. } => {
|
||||
let mut chapter = book.new_chapter(BLOCKY_IMAGES_CHAPTER);
|
||||
for block_bytes in base_images {
|
||||
assert_eq!(block_bytes.len(), BLOCK_SIZE);
|
||||
chapter.write_all(&block_bytes)?;
|
||||
}
|
||||
chapter.close()?
|
||||
}
|
||||
ImageType::NonBlocky => {
|
||||
let mut chapter = book.new_chapter(NONBLOCKY_IMAGE_CHAPTER);
|
||||
chapter.write_all(&base_images[0])?;
|
||||
chapter.close()?
|
||||
}
|
||||
};
|
||||
|
||||
book.close()?;
|
||||
|
||||
trace!("saved {}", &path.display());
|
||||
|
||||
drop(inner);
|
||||
|
||||
Ok(layer)
|
||||
}
|
||||
|
||||
// Create a new image file by materializing every page in a source layer
|
||||
// at given LSN.
|
||||
pub fn create_from_src(
|
||||
conf: &'static PageServerConf,
|
||||
timeline: &LayeredTimeline,
|
||||
src: &dyn Layer,
|
||||
lsn: Lsn,
|
||||
) -> Result<ImageLayer> {
|
||||
let seg = src.get_seg_tag();
|
||||
let timelineid = timeline.timelineid;
|
||||
|
||||
let startblk;
|
||||
let size;
|
||||
if seg.rel.is_blocky() {
|
||||
size = src.get_seg_size(lsn)?;
|
||||
startblk = seg.segno * RELISH_SEG_SIZE;
|
||||
} else {
|
||||
size = 1;
|
||||
startblk = 0;
|
||||
}
|
||||
|
||||
trace!(
|
||||
"creating new ImageLayer for {} on timeline {} at {}",
|
||||
seg,
|
||||
timelineid,
|
||||
lsn,
|
||||
);
|
||||
|
||||
let mut base_images: Vec<Bytes> = Vec::new();
|
||||
for blknum in startblk..(startblk + size) {
|
||||
let img = timeline.materialize_page(seg, blknum, lsn, &*src)?;
|
||||
|
||||
base_images.push(img);
|
||||
}
|
||||
|
||||
Self::create(conf, timelineid, timeline.tenantid, seg, lsn, base_images)
|
||||
}
|
||||
|
||||
///
|
||||
/// Load the contents of the file into memory
|
||||
///
|
||||
fn load(&self) -> Result<MutexGuard<ImageLayerInner>> {
|
||||
// quick exit if already loaded
|
||||
let mut inner = self.inner.lock().unwrap();
|
||||
|
||||
if inner.loaded {
|
||||
return Ok(inner);
|
||||
}
|
||||
|
||||
let (path, book) = self.open_book()?;
|
||||
|
||||
let image_type = if self.seg.rel.is_blocky() {
|
||||
let chapter = book.chapter_reader(BLOCKY_IMAGES_CHAPTER)?;
|
||||
let images_len = chapter.len();
|
||||
ensure!(images_len % BLOCK_SIZE as u64 == 0);
|
||||
let num_blocks: u32 = (images_len / BLOCK_SIZE as u64).try_into()?;
|
||||
ImageType::Blocky { num_blocks }
|
||||
} else {
|
||||
let _chapter = book.chapter_reader(NONBLOCKY_IMAGE_CHAPTER)?;
|
||||
ImageType::NonBlocky
|
||||
};
|
||||
|
||||
debug!("loaded from {}", &path.display());
|
||||
|
||||
*inner = ImageLayerInner {
|
||||
loaded: true,
|
||||
image_type,
|
||||
};
|
||||
|
||||
Ok(inner)
|
||||
}
|
||||
|
||||
fn open_book(&self) -> Result<(PathBuf, Book<File>)> {
|
||||
let path = Self::path_for(
|
||||
&self.path_or_conf,
|
||||
self.timelineid,
|
||||
self.tenantid,
|
||||
&ImageFileName {
|
||||
seg: self.seg,
|
||||
lsn: self.lsn,
|
||||
},
|
||||
);
|
||||
|
||||
let file = File::open(&path)?;
|
||||
let book = Book::new(file)?;
|
||||
|
||||
Ok((path, book))
|
||||
}
|
||||
|
||||
/// Create an ImageLayer struct representing an existing file on disk
|
||||
pub fn new(
|
||||
conf: &'static PageServerConf,
|
||||
timelineid: ZTimelineId,
|
||||
tenantid: ZTenantId,
|
||||
filename: &ImageFileName,
|
||||
) -> ImageLayer {
|
||||
ImageLayer {
|
||||
path_or_conf: PathOrConf::Conf(conf),
|
||||
timelineid,
|
||||
tenantid,
|
||||
seg: filename.seg,
|
||||
lsn: filename.lsn,
|
||||
inner: Mutex::new(ImageLayerInner {
|
||||
loaded: false,
|
||||
image_type: ImageType::Blocky { num_blocks: 0 },
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
/// Create an ImageLayer struct representing an existing file on disk.
|
||||
///
|
||||
/// This variant is only used for debugging purposes, by the 'dump_layerfile' binary.
|
||||
pub fn new_for_path(
|
||||
path: &Path,
|
||||
timelineid: ZTimelineId,
|
||||
tenantid: ZTenantId,
|
||||
filename: &ImageFileName,
|
||||
) -> ImageLayer {
|
||||
ImageLayer {
|
||||
path_or_conf: PathOrConf::Path(path.to_path_buf()),
|
||||
timelineid,
|
||||
tenantid,
|
||||
seg: filename.seg,
|
||||
lsn: filename.lsn,
|
||||
inner: Mutex::new(ImageLayerInner {
|
||||
loaded: false,
|
||||
image_type: ImageType::Blocky { num_blocks: 0 },
|
||||
}),
|
||||
}
|
||||
}
|
||||
}
|
||||
622
pageserver/src/layered_repository/inmemory_layer.rs
Normal file
622
pageserver/src/layered_repository/inmemory_layer.rs
Normal file
@@ -0,0 +1,622 @@
|
||||
//!
|
||||
//! An in-memory layer stores recently received page versions in memory. The page versions
|
||||
//! are held in a BTreeMap, and there's another BTreeMap to track the size of the relation.
|
||||
//!
|
||||
use crate::layered_repository::filename::DeltaFileName;
|
||||
use crate::layered_repository::storage_layer::{
|
||||
Layer, PageReconstructData, PageReconstructResult, PageVersion, SegmentTag, RELISH_SEG_SIZE,
|
||||
};
|
||||
use crate::layered_repository::LayeredTimeline;
|
||||
use crate::layered_repository::ZERO_PAGE;
|
||||
use crate::layered_repository::{DeltaLayer, ImageLayer};
|
||||
use crate::repository::WALRecord;
|
||||
use crate::PageServerConf;
|
||||
use crate::{ZTenantId, ZTimelineId};
|
||||
use anyhow::{bail, Result};
|
||||
use bytes::Bytes;
|
||||
use log::*;
|
||||
use std::cmp::Ordering;
|
||||
use std::collections::BTreeMap;
|
||||
use std::ops::Bound::Included;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::{Arc, Mutex};
|
||||
|
||||
use zenith_utils::lsn::Lsn;
|
||||
|
||||
pub struct InMemoryLayer {
|
||||
conf: &'static PageServerConf,
|
||||
tenantid: ZTenantId,
|
||||
timelineid: ZTimelineId,
|
||||
seg: SegmentTag,
|
||||
|
||||
///
|
||||
/// This layer contains all the changes from 'start_lsn'. The
|
||||
/// start is inclusive. There is no end LSN; we only use in-memory
|
||||
/// layer at the end of a timeline.
|
||||
///
|
||||
start_lsn: Lsn,
|
||||
|
||||
/// LSN of the oldest page version stored in this layer
|
||||
oldest_pending_lsn: Lsn,
|
||||
|
||||
/// The above fields never change. The parts that do change are in 'inner',
|
||||
/// and protected by mutex.
|
||||
inner: Mutex<InMemoryLayerInner>,
|
||||
|
||||
/// Predecessor layer
|
||||
predecessor: Option<Arc<dyn Layer>>,
|
||||
}
|
||||
|
||||
pub struct InMemoryLayerInner {
|
||||
/// If this relation was dropped, remember when that happened.
|
||||
drop_lsn: Option<Lsn>,
|
||||
|
||||
///
|
||||
/// All versions of all pages in the layer are are kept here.
|
||||
/// Indexed by block number and LSN.
|
||||
///
|
||||
page_versions: BTreeMap<(u32, Lsn), PageVersion>,
|
||||
|
||||
///
|
||||
/// `segsizes` tracks the size of the segment at different points in time.
|
||||
///
|
||||
segsizes: BTreeMap<Lsn, u32>,
|
||||
}
|
||||
|
||||
impl InMemoryLayerInner {
|
||||
fn get_seg_size(&self, lsn: Lsn) -> u32 {
|
||||
// Scan the BTreeMap backwards, starting from the given entry.
|
||||
let mut iter = self.segsizes.range((Included(&Lsn(0)), Included(&lsn)));
|
||||
|
||||
if let Some((_entry_lsn, entry)) = iter.next_back() {
|
||||
*entry
|
||||
} else {
|
||||
0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Layer for InMemoryLayer {
|
||||
// An in-memory layer doesn't really have a filename as it's not stored on disk,
|
||||
// but we construct a filename as if it was a delta layer
|
||||
fn filename(&self) -> PathBuf {
|
||||
let inner = self.inner.lock().unwrap();
|
||||
|
||||
let end_lsn;
|
||||
let dropped;
|
||||
if let Some(drop_lsn) = inner.drop_lsn {
|
||||
end_lsn = drop_lsn;
|
||||
dropped = true;
|
||||
} else {
|
||||
end_lsn = Lsn(u64::MAX);
|
||||
dropped = false;
|
||||
}
|
||||
|
||||
let delta_filename = DeltaFileName {
|
||||
seg: self.seg,
|
||||
start_lsn: self.start_lsn,
|
||||
end_lsn,
|
||||
dropped,
|
||||
}
|
||||
.to_string();
|
||||
|
||||
PathBuf::from(format!("inmem-{}", delta_filename))
|
||||
}
|
||||
|
||||
fn get_timeline_id(&self) -> ZTimelineId {
|
||||
self.timelineid
|
||||
}
|
||||
|
||||
fn get_seg_tag(&self) -> SegmentTag {
|
||||
self.seg
|
||||
}
|
||||
|
||||
fn get_start_lsn(&self) -> Lsn {
|
||||
self.start_lsn
|
||||
}
|
||||
|
||||
fn get_end_lsn(&self) -> Lsn {
|
||||
let inner = self.inner.lock().unwrap();
|
||||
|
||||
if let Some(drop_lsn) = inner.drop_lsn {
|
||||
drop_lsn
|
||||
} else {
|
||||
Lsn(u64::MAX)
|
||||
}
|
||||
}
|
||||
|
||||
fn is_dropped(&self) -> bool {
|
||||
let inner = self.inner.lock().unwrap();
|
||||
inner.drop_lsn.is_some()
|
||||
}
|
||||
|
||||
/// Look up given page in the cache.
|
||||
fn get_page_reconstruct_data(
|
||||
&self,
|
||||
blknum: u32,
|
||||
lsn: Lsn,
|
||||
reconstruct_data: &mut PageReconstructData,
|
||||
) -> Result<PageReconstructResult> {
|
||||
let mut cont_lsn: Option<Lsn> = Some(lsn);
|
||||
|
||||
assert!(self.seg.blknum_in_seg(blknum));
|
||||
|
||||
{
|
||||
let inner = self.inner.lock().unwrap();
|
||||
|
||||
// Scan the BTreeMap backwards, starting from reconstruct_data.lsn.
|
||||
let minkey = (blknum, Lsn(0));
|
||||
let maxkey = (blknum, lsn);
|
||||
let mut iter = inner
|
||||
.page_versions
|
||||
.range((Included(&minkey), Included(&maxkey)));
|
||||
while let Some(((_blknum, entry_lsn), entry)) = iter.next_back() {
|
||||
if let Some(img) = &entry.page_image {
|
||||
reconstruct_data.page_img = Some(img.clone());
|
||||
cont_lsn = None;
|
||||
break;
|
||||
} else if let Some(rec) = &entry.record {
|
||||
reconstruct_data.records.push(rec.clone());
|
||||
if rec.will_init {
|
||||
// This WAL record initializes the page, so no need to go further back
|
||||
cont_lsn = None;
|
||||
break;
|
||||
} else {
|
||||
// This WAL record needs to be applied against an older page image
|
||||
cont_lsn = Some(*entry_lsn);
|
||||
}
|
||||
} else {
|
||||
// No base image, and no WAL record. Huh?
|
||||
bail!("no page image or WAL record for requested page");
|
||||
}
|
||||
}
|
||||
|
||||
// release lock on 'inner'
|
||||
}
|
||||
|
||||
// If an older page image is needed to reconstruct the page, let the
|
||||
// caller know about the predecessor layer.
|
||||
if let Some(cont_lsn) = cont_lsn {
|
||||
if let Some(cont_layer) = &self.predecessor {
|
||||
Ok(PageReconstructResult::Continue(
|
||||
cont_lsn,
|
||||
Arc::clone(cont_layer),
|
||||
))
|
||||
} else {
|
||||
Ok(PageReconstructResult::Missing(cont_lsn))
|
||||
}
|
||||
} else {
|
||||
Ok(PageReconstructResult::Complete)
|
||||
}
|
||||
}
|
||||
|
||||
/// Get size of the relation at given LSN
|
||||
fn get_seg_size(&self, lsn: Lsn) -> Result<u32> {
|
||||
assert!(lsn >= self.start_lsn);
|
||||
|
||||
let inner = self.inner.lock().unwrap();
|
||||
Ok(inner.get_seg_size(lsn))
|
||||
}
|
||||
|
||||
/// Does this segment exist at given LSN?
|
||||
fn get_seg_exists(&self, lsn: Lsn) -> Result<bool> {
|
||||
let inner = self.inner.lock().unwrap();
|
||||
|
||||
// Is the requested LSN after the segment was dropped?
|
||||
if let Some(drop_lsn) = inner.drop_lsn {
|
||||
if lsn >= drop_lsn {
|
||||
return Ok(false);
|
||||
}
|
||||
}
|
||||
|
||||
// Otherwise, it exists
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
/// Cannot unload anything in an in-memory layer, since there's no backing
|
||||
/// store. To release memory used by an in-memory layer, use 'freeze' to turn
|
||||
/// it into an on-disk layer.
|
||||
fn unload(&self) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Nothing to do here. When you drop the last reference to the layer, it will
|
||||
/// be deallocated.
|
||||
fn delete(&self) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn is_incremental(&self) -> bool {
|
||||
self.predecessor.is_some()
|
||||
}
|
||||
|
||||
/// debugging function to print out the contents of the layer
|
||||
fn dump(&self) -> Result<()> {
|
||||
let inner = self.inner.lock().unwrap();
|
||||
|
||||
let end_str = inner
|
||||
.drop_lsn
|
||||
.as_ref()
|
||||
.map(|drop_lsn| drop_lsn.to_string())
|
||||
.unwrap_or_default();
|
||||
|
||||
println!(
|
||||
"----- in-memory layer for tli {} seg {} {}-{} ----",
|
||||
self.timelineid, self.seg, self.start_lsn, end_str
|
||||
);
|
||||
|
||||
for (k, v) in inner.segsizes.iter() {
|
||||
println!("segsizes {}: {}", k, v);
|
||||
}
|
||||
|
||||
for (k, v) in inner.page_versions.iter() {
|
||||
println!(
|
||||
"blk {} at {}: {}/{}\n",
|
||||
k.0,
|
||||
k.1,
|
||||
v.page_image.is_some(),
|
||||
v.record.is_some()
|
||||
);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
// Type alias to simplify InMemoryLayer::freeze signature
|
||||
//
|
||||
type SuccessorLayers = (Vec<Arc<dyn Layer>>, Option<Arc<InMemoryLayer>>);
|
||||
|
||||
impl InMemoryLayer {
|
||||
/// Return the oldest page version that's stored in this layer
|
||||
pub fn get_oldest_pending_lsn(&self) -> Lsn {
|
||||
self.oldest_pending_lsn
|
||||
}
|
||||
|
||||
///
|
||||
/// Create a new, empty, in-memory layer
|
||||
///
|
||||
pub fn create(
|
||||
conf: &'static PageServerConf,
|
||||
timelineid: ZTimelineId,
|
||||
tenantid: ZTenantId,
|
||||
seg: SegmentTag,
|
||||
start_lsn: Lsn,
|
||||
oldest_pending_lsn: Lsn,
|
||||
) -> Result<InMemoryLayer> {
|
||||
trace!(
|
||||
"initializing new empty InMemoryLayer for writing {} on timeline {} at {}",
|
||||
seg,
|
||||
timelineid,
|
||||
start_lsn
|
||||
);
|
||||
|
||||
Ok(InMemoryLayer {
|
||||
conf,
|
||||
timelineid,
|
||||
tenantid,
|
||||
seg,
|
||||
start_lsn,
|
||||
oldest_pending_lsn,
|
||||
inner: Mutex::new(InMemoryLayerInner {
|
||||
drop_lsn: None,
|
||||
page_versions: BTreeMap::new(),
|
||||
segsizes: BTreeMap::new(),
|
||||
}),
|
||||
predecessor: None,
|
||||
})
|
||||
}
|
||||
|
||||
// Write operations
|
||||
|
||||
/// Remember new page version, as a WAL record over previous version
|
||||
pub fn put_wal_record(&self, blknum: u32, rec: WALRecord) -> Result<()> {
|
||||
self.put_page_version(
|
||||
blknum,
|
||||
rec.lsn,
|
||||
PageVersion {
|
||||
page_image: None,
|
||||
record: Some(rec),
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
/// Remember new page version, as a full page image
|
||||
pub fn put_page_image(&self, blknum: u32, lsn: Lsn, img: Bytes) -> Result<()> {
|
||||
self.put_page_version(
|
||||
blknum,
|
||||
lsn,
|
||||
PageVersion {
|
||||
page_image: Some(img),
|
||||
record: None,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
/// Common subroutine of the public put_wal_record() and put_page_image() functions.
|
||||
/// Adds the page version to the in-memory tree
|
||||
pub fn put_page_version(&self, blknum: u32, lsn: Lsn, pv: PageVersion) -> Result<()> {
|
||||
assert!(self.seg.blknum_in_seg(blknum));
|
||||
|
||||
trace!(
|
||||
"put_page_version blk {} of {} at {}/{}",
|
||||
blknum,
|
||||
self.seg.rel,
|
||||
self.timelineid,
|
||||
lsn
|
||||
);
|
||||
let mut inner = self.inner.lock().unwrap();
|
||||
|
||||
let old = inner.page_versions.insert((blknum, lsn), pv);
|
||||
|
||||
if old.is_some() {
|
||||
// We already had an entry for this LSN. That's odd..
|
||||
warn!(
|
||||
"Page version of rel {} blk {} at {} already exists",
|
||||
self.seg.rel, blknum, lsn
|
||||
);
|
||||
}
|
||||
|
||||
// Also update the relation size, if this extended the relation.
|
||||
if self.seg.rel.is_blocky() {
|
||||
let newsize = blknum - self.seg.segno * RELISH_SEG_SIZE + 1;
|
||||
|
||||
// use inner get_seg_size, since calling self.get_seg_size will try to acquire self.inner.lock
|
||||
// which we've just acquired above
|
||||
let oldsize = inner.get_seg_size(lsn);
|
||||
if newsize > oldsize {
|
||||
trace!(
|
||||
"enlarging segment {} from {} to {} blocks at {}",
|
||||
self.seg,
|
||||
oldsize,
|
||||
newsize,
|
||||
lsn
|
||||
);
|
||||
|
||||
// If we are extending the relation by more than one page, initialize the "gap"
|
||||
// with zeros
|
||||
//
|
||||
// XXX: What if the caller initializes the gap with subsequent call with same LSN?
|
||||
// I don't think that can happen currently, but that is highly dependent on how
|
||||
// PostgreSQL writes its WAL records and there's no guarantee of it. If it does
|
||||
// happen, we would hit the "page version already exists" warning above on the
|
||||
// subsequent call to initialize the gap page.
|
||||
let gapstart = self.seg.segno * RELISH_SEG_SIZE + oldsize;
|
||||
for gapblknum in gapstart..blknum {
|
||||
let zeropv = PageVersion {
|
||||
page_image: Some(ZERO_PAGE.clone()),
|
||||
record: None,
|
||||
};
|
||||
println!(
|
||||
"filling gap blk {} with zeros for write of {}",
|
||||
gapblknum, blknum
|
||||
);
|
||||
let old = inner.page_versions.insert((gapblknum, lsn), zeropv);
|
||||
// We already had an entry for this LSN. That's odd..
|
||||
|
||||
if old.is_some() {
|
||||
warn!(
|
||||
"Page version of rel {} blk {} at {} already exists",
|
||||
self.seg.rel, blknum, lsn
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
inner.segsizes.insert(lsn, newsize);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Remember that the relation was truncated at given LSN
|
||||
pub fn put_truncation(&self, lsn: Lsn, segsize: u32) -> anyhow::Result<()> {
|
||||
let mut inner = self.inner.lock().unwrap();
|
||||
let old = inner.segsizes.insert(lsn, segsize);
|
||||
|
||||
if old.is_some() {
|
||||
// We already had an entry for this LSN. That's odd..
|
||||
warn!("Inserting truncation, but had an entry for the LSN already");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Remember that the segment was dropped at given LSN
|
||||
pub fn drop_segment(&self, lsn: Lsn) -> anyhow::Result<()> {
|
||||
let mut inner = self.inner.lock().unwrap();
|
||||
|
||||
assert!(inner.drop_lsn.is_none());
|
||||
inner.drop_lsn = Some(lsn);
|
||||
|
||||
info!("dropped segment {} at {}", self.seg, lsn);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
///
|
||||
/// Initialize a new InMemoryLayer for, by copying the state at the given
|
||||
/// point in time from given existing layer.
|
||||
///
|
||||
pub fn create_successor_layer(
|
||||
conf: &'static PageServerConf,
|
||||
src: Arc<dyn Layer>,
|
||||
timelineid: ZTimelineId,
|
||||
tenantid: ZTenantId,
|
||||
start_lsn: Lsn,
|
||||
oldest_pending_lsn: Lsn,
|
||||
) -> Result<InMemoryLayer> {
|
||||
let seg = src.get_seg_tag();
|
||||
|
||||
trace!(
|
||||
"initializing new InMemoryLayer for writing {} on timeline {} at {}",
|
||||
seg,
|
||||
timelineid,
|
||||
start_lsn,
|
||||
);
|
||||
|
||||
// For convenience, copy the segment size from the predecessor layer
|
||||
let mut segsizes = BTreeMap::new();
|
||||
if seg.rel.is_blocky() {
|
||||
let size = src.get_seg_size(start_lsn)?;
|
||||
segsizes.insert(start_lsn, size);
|
||||
}
|
||||
|
||||
Ok(InMemoryLayer {
|
||||
conf,
|
||||
timelineid,
|
||||
tenantid,
|
||||
seg,
|
||||
start_lsn,
|
||||
oldest_pending_lsn,
|
||||
inner: Mutex::new(InMemoryLayerInner {
|
||||
drop_lsn: None,
|
||||
page_versions: BTreeMap::new(),
|
||||
segsizes,
|
||||
}),
|
||||
predecessor: Some(src),
|
||||
})
|
||||
}
|
||||
|
||||
///
|
||||
/// Write the this in-memory layer to disk.
|
||||
///
|
||||
/// The cutoff point for the layer that's written to disk is 'end_lsn'.
|
||||
///
|
||||
/// Returns new layers that replace this one. Always returns a new image
|
||||
/// layer containing the page versions at the cutoff LSN, that were written
|
||||
/// to disk, and usually also a DeltaLayer that includes all the WAL records
|
||||
/// between start LSN and the cutoff. (The delta layer is not needed when
|
||||
/// a new relish is created with a single LSN, so that the start and end LSN
|
||||
/// are the same.) If there were page versions newer than 'end_lsn', also
|
||||
/// returns a new in-memory layer containing those page versions. The caller
|
||||
/// replaces this layer with the returned layers in the layer map.
|
||||
///
|
||||
pub fn freeze(
|
||||
&self,
|
||||
cutoff_lsn: Lsn,
|
||||
// This is needed just to call materialize_page()
|
||||
timeline: &LayeredTimeline,
|
||||
) -> Result<SuccessorLayers> {
|
||||
info!(
|
||||
"freezing in memory layer for {} on timeline {} at {}",
|
||||
self.seg, self.timelineid, cutoff_lsn
|
||||
);
|
||||
|
||||
let inner = self.inner.lock().unwrap();
|
||||
|
||||
// Normally, use the cutoff LSN as the end of the frozen layer.
|
||||
// But if the relation was dropped, we know that there are no
|
||||
// more changes coming in for it, and in particular we know that
|
||||
// there are no changes "in flight" for the LSN anymore, so we use
|
||||
// the drop LSN instead. The drop-LSN could be ahead of the
|
||||
// caller-specified LSN!
|
||||
let dropped = inner.drop_lsn.is_some();
|
||||
let end_lsn = if dropped {
|
||||
inner.drop_lsn.unwrap()
|
||||
} else {
|
||||
cutoff_lsn
|
||||
};
|
||||
|
||||
// Divide all the page versions into old and new at the 'end_lsn' cutoff point.
|
||||
let mut before_page_versions;
|
||||
let mut before_segsizes;
|
||||
let mut after_page_versions;
|
||||
let mut after_segsizes;
|
||||
if !dropped {
|
||||
before_segsizes = BTreeMap::new();
|
||||
after_segsizes = BTreeMap::new();
|
||||
for (lsn, size) in inner.segsizes.iter() {
|
||||
if *lsn > end_lsn {
|
||||
after_segsizes.insert(*lsn, *size);
|
||||
} else {
|
||||
before_segsizes.insert(*lsn, *size);
|
||||
}
|
||||
}
|
||||
|
||||
before_page_versions = BTreeMap::new();
|
||||
after_page_versions = BTreeMap::new();
|
||||
for ((blknum, lsn), pv) in inner.page_versions.iter() {
|
||||
match lsn.cmp(&end_lsn) {
|
||||
Ordering::Less => {
|
||||
before_page_versions.insert((*blknum, *lsn), pv.clone());
|
||||
}
|
||||
Ordering::Equal => {
|
||||
// Page versions at the cutoff LSN will be stored in the
|
||||
// materialized image layer.
|
||||
}
|
||||
Ordering::Greater => {
|
||||
after_page_versions.insert((*blknum, *lsn), pv.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
before_page_versions = inner.page_versions.clone();
|
||||
before_segsizes = inner.segsizes.clone();
|
||||
after_segsizes = BTreeMap::new();
|
||||
after_page_versions = BTreeMap::new();
|
||||
}
|
||||
|
||||
// we can release the lock now.
|
||||
drop(inner);
|
||||
|
||||
let mut frozen_layers: Vec<Arc<dyn Layer>> = Vec::new();
|
||||
|
||||
if self.start_lsn != end_lsn {
|
||||
// Write the page versions before the cutoff to disk.
|
||||
let delta_layer = DeltaLayer::create(
|
||||
self.conf,
|
||||
self.timelineid,
|
||||
self.tenantid,
|
||||
self.seg,
|
||||
self.start_lsn,
|
||||
end_lsn,
|
||||
dropped,
|
||||
self.predecessor.clone(),
|
||||
before_page_versions,
|
||||
before_segsizes,
|
||||
)?;
|
||||
let delta_layer_rc: Arc<dyn Layer> = Arc::new(delta_layer);
|
||||
frozen_layers.push(delta_layer_rc);
|
||||
trace!(
|
||||
"freeze: created delta layer {} {}-{}",
|
||||
self.seg,
|
||||
self.start_lsn,
|
||||
end_lsn
|
||||
);
|
||||
} else {
|
||||
assert!(before_page_versions.is_empty());
|
||||
}
|
||||
|
||||
let mut new_open_rc = None;
|
||||
if !dropped {
|
||||
// Write a new base image layer at the cutoff point
|
||||
let imgfile = ImageLayer::create_from_src(self.conf, timeline, self, end_lsn)?;
|
||||
let imgfile_rc: Arc<dyn Layer> = Arc::new(imgfile);
|
||||
frozen_layers.push(Arc::clone(&imgfile_rc));
|
||||
trace!("freeze: created image layer {} at {}", self.seg, end_lsn);
|
||||
|
||||
// If there were any page versions newer than the cutoff, initialize a new in-memory
|
||||
// layer to hold them
|
||||
if !after_segsizes.is_empty() || !after_page_versions.is_empty() {
|
||||
let new_open = Self::create_successor_layer(
|
||||
self.conf,
|
||||
imgfile_rc,
|
||||
self.timelineid,
|
||||
self.tenantid,
|
||||
end_lsn,
|
||||
end_lsn,
|
||||
)?;
|
||||
let mut new_inner = new_open.inner.lock().unwrap();
|
||||
new_inner.page_versions.append(&mut after_page_versions);
|
||||
new_inner.segsizes.append(&mut after_segsizes);
|
||||
drop(new_inner);
|
||||
trace!("freeze: created new in-mem layer {} {}-", self.seg, end_lsn);
|
||||
|
||||
new_open_rc = Some(Arc::new(new_open))
|
||||
}
|
||||
}
|
||||
|
||||
Ok((frozen_layers, new_open_rc))
|
||||
}
|
||||
}
|
||||
376
pageserver/src/layered_repository/layer_map.rs
Normal file
376
pageserver/src/layered_repository/layer_map.rs
Normal file
@@ -0,0 +1,376 @@
|
||||
//!
|
||||
//! The layer map tracks what layers exist for all the relishes in a timeline.
|
||||
//!
|
||||
//! When the timeline is first accessed, the server lists of all layer files
|
||||
//! in the timelines/<timelineid> directory, and populates this map with
|
||||
//! ImageLayer and DeltaLayer structs corresponding to each file. When new WAL
|
||||
//! is received, we create InMemoryLayers to hold the incoming records. Now and
|
||||
//! then, in the checkpoint() function, the in-memory layers are frozen, forming
|
||||
//! new image and delta layers and corresponding files are written to disk.
|
||||
//!
|
||||
|
||||
use crate::layered_repository::storage_layer::{Layer, SegmentTag};
|
||||
use crate::layered_repository::InMemoryLayer;
|
||||
use crate::relish::*;
|
||||
use anyhow::Result;
|
||||
use lazy_static::lazy_static;
|
||||
use log::*;
|
||||
use std::cmp::Ordering;
|
||||
use std::collections::HashSet;
|
||||
use std::collections::{BTreeMap, BinaryHeap, HashMap};
|
||||
use std::ops::Bound::Included;
|
||||
use std::sync::Arc;
|
||||
use zenith_metrics::{register_int_gauge, IntGauge};
|
||||
use zenith_utils::lsn::Lsn;
|
||||
|
||||
lazy_static! {
|
||||
static ref NUM_INMEMORY_LAYERS: IntGauge =
|
||||
register_int_gauge!("pageserver_inmemory_layers", "Number of layers in memory")
|
||||
.expect("failed to define a metric");
|
||||
static ref NUM_ONDISK_LAYERS: IntGauge =
|
||||
register_int_gauge!("pageserver_ondisk_layers", "Number of layers on-disk")
|
||||
.expect("failed to define a metric");
|
||||
}
|
||||
|
||||
///
|
||||
/// LayerMap tracks what layers exist on a timeline.
|
||||
///
|
||||
pub struct LayerMap {
|
||||
/// All the layers keyed by segment tag
|
||||
segs: HashMap<SegmentTag, SegEntry>,
|
||||
|
||||
/// All in-memory layers, ordered by 'oldest_pending_lsn' of each layer.
|
||||
/// This allows easy access to the in-memory layer that contains the
|
||||
/// oldest WAL record.
|
||||
open_segs: BinaryHeap<OpenSegEntry>,
|
||||
|
||||
/// Generation number, used to distinguish newly inserted entries in the
|
||||
/// binary heap from older entries during checkpoint.
|
||||
current_generation: u64,
|
||||
}
|
||||
|
||||
///
|
||||
/// Per-segment entry in the LayerMap.segs hash map
|
||||
///
|
||||
/// The last layer that is open for writes is always an InMemoryLayer,
|
||||
/// and is kept in a separate field, because there can be only one for
|
||||
/// each segment. The older layers, stored on disk, are kept in a
|
||||
/// BTreeMap keyed by the layer's start LSN.
|
||||
struct SegEntry {
|
||||
pub open: Option<Arc<InMemoryLayer>>,
|
||||
pub historic: BTreeMap<Lsn, Arc<dyn Layer>>,
|
||||
}
|
||||
|
||||
/// Entry held LayerMap.open_segs, with boilerplate comparison
|
||||
/// routines to implement a min-heap ordered by 'oldest_pending_lsn'
|
||||
///
|
||||
/// Each entry also carries a generation number. It can be used to distinguish
|
||||
/// entries with the same 'oldest_pending_lsn'.
|
||||
struct OpenSegEntry {
|
||||
pub oldest_pending_lsn: Lsn,
|
||||
pub layer: Arc<InMemoryLayer>,
|
||||
pub generation: u64,
|
||||
}
|
||||
impl Ord for OpenSegEntry {
|
||||
fn cmp(&self, other: &Self) -> Ordering {
|
||||
// BinaryHeap is a max-heap, and we want a min-heap. Reverse the ordering here
|
||||
// to get that.
|
||||
other.oldest_pending_lsn.cmp(&self.oldest_pending_lsn)
|
||||
}
|
||||
}
|
||||
impl PartialOrd for OpenSegEntry {
|
||||
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
|
||||
// BinaryHeap is a max-heap, and we want a min-heap. Reverse the ordering here
|
||||
// to get that. Entries with identical oldest_pending_lsn are ordered by generation
|
||||
Some(
|
||||
other
|
||||
.oldest_pending_lsn
|
||||
.cmp(&self.oldest_pending_lsn)
|
||||
.then_with(|| other.generation.cmp(&self.generation)),
|
||||
)
|
||||
}
|
||||
}
|
||||
impl PartialEq for OpenSegEntry {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.oldest_pending_lsn.eq(&other.oldest_pending_lsn)
|
||||
}
|
||||
}
|
||||
impl Eq for OpenSegEntry {}
|
||||
|
||||
impl LayerMap {
|
||||
///
|
||||
/// Look up a layer using the given segment tag and LSN. This differs from a
|
||||
/// plain key-value lookup in that if there is any layer that covers the
|
||||
/// given LSN, or precedes the given LSN, it is returned. In other words,
|
||||
/// you don't need to know the exact start LSN of the layer.
|
||||
///
|
||||
pub fn get(&self, tag: &SegmentTag, lsn: Lsn) -> Option<Arc<dyn Layer>> {
|
||||
let segentry = self.segs.get(tag)?;
|
||||
|
||||
if let Some(open) = &segentry.open {
|
||||
if open.get_start_lsn() <= lsn {
|
||||
let x: Arc<dyn Layer> = Arc::clone(open) as _;
|
||||
return Some(x);
|
||||
}
|
||||
}
|
||||
|
||||
if let Some((_k, v)) = segentry
|
||||
.historic
|
||||
.range((Included(Lsn(0)), Included(lsn)))
|
||||
.next_back()
|
||||
{
|
||||
let x: Arc<dyn Layer> = Arc::clone(v) as _;
|
||||
Some(x)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
///
|
||||
/// Get the open layer for given segment for writing. Or None if no open
|
||||
/// layer exists.
|
||||
///
|
||||
pub fn get_open(&self, tag: &SegmentTag) -> Option<Arc<InMemoryLayer>> {
|
||||
let segentry = self.segs.get(tag)?;
|
||||
segentry.open.as_ref().map(Arc::clone)
|
||||
}
|
||||
|
||||
///
|
||||
/// Insert an open in-memory layer
|
||||
///
|
||||
pub fn insert_open(&mut self, layer: Arc<InMemoryLayer>) {
|
||||
let tag = layer.get_seg_tag();
|
||||
|
||||
if let Some(segentry) = self.segs.get_mut(&tag) {
|
||||
if let Some(_old) = &segentry.open {
|
||||
// FIXME: shouldn't exist, but check
|
||||
}
|
||||
segentry.open = Some(Arc::clone(&layer));
|
||||
} else {
|
||||
let segentry = SegEntry {
|
||||
open: Some(Arc::clone(&layer)),
|
||||
historic: BTreeMap::new(),
|
||||
};
|
||||
self.segs.insert(tag, segentry);
|
||||
}
|
||||
|
||||
let opensegentry = OpenSegEntry {
|
||||
oldest_pending_lsn: layer.get_oldest_pending_lsn(),
|
||||
layer,
|
||||
generation: self.current_generation,
|
||||
};
|
||||
self.open_segs.push(opensegentry);
|
||||
|
||||
NUM_INMEMORY_LAYERS.inc();
|
||||
}
|
||||
|
||||
/// Remove the oldest in-memory layer
|
||||
pub fn pop_oldest_open(&mut self) {
|
||||
let opensegentry = self.open_segs.pop().unwrap();
|
||||
let segtag = opensegentry.layer.get_seg_tag();
|
||||
|
||||
let mut segentry = self.segs.get_mut(&segtag).unwrap();
|
||||
segentry.open = None;
|
||||
NUM_INMEMORY_LAYERS.dec();
|
||||
}
|
||||
|
||||
///
|
||||
/// Insert an on-disk layer
|
||||
///
|
||||
pub fn insert_historic(&mut self, layer: Arc<dyn Layer>) {
|
||||
let tag = layer.get_seg_tag();
|
||||
let start_lsn = layer.get_start_lsn();
|
||||
|
||||
if let Some(segentry) = self.segs.get_mut(&tag) {
|
||||
segentry.historic.insert(start_lsn, layer);
|
||||
} else {
|
||||
let mut historic = BTreeMap::new();
|
||||
historic.insert(start_lsn, layer);
|
||||
|
||||
let segentry = SegEntry {
|
||||
open: None,
|
||||
historic,
|
||||
};
|
||||
self.segs.insert(tag, segentry);
|
||||
}
|
||||
NUM_ONDISK_LAYERS.inc();
|
||||
}
|
||||
|
||||
///
|
||||
/// Remove an on-disk layer from the map.
|
||||
///
|
||||
/// This should be called when the corresponding file on disk has been deleted.
|
||||
///
|
||||
pub fn remove_historic(&mut self, layer: &dyn Layer) {
|
||||
let tag = layer.get_seg_tag();
|
||||
let start_lsn = layer.get_start_lsn();
|
||||
|
||||
if let Some(segentry) = self.segs.get_mut(&tag) {
|
||||
segentry.historic.remove(&start_lsn);
|
||||
}
|
||||
NUM_ONDISK_LAYERS.dec();
|
||||
}
|
||||
|
||||
// List relations that exist at the lsn
|
||||
pub fn list_rels(&self, spcnode: u32, dbnode: u32, lsn: Lsn) -> Result<HashSet<RelTag>> {
|
||||
let mut rels: HashSet<RelTag> = HashSet::new();
|
||||
|
||||
for (seg, segentry) in self.segs.iter() {
|
||||
if let RelishTag::Relation(reltag) = seg.rel {
|
||||
if (spcnode == 0 || reltag.spcnode == spcnode)
|
||||
&& (dbnode == 0 || reltag.dbnode == dbnode)
|
||||
{
|
||||
// Add only if it exists at the requested LSN.
|
||||
if let Some(open) = &segentry.open {
|
||||
if open.get_end_lsn() > lsn {
|
||||
rels.insert(reltag);
|
||||
}
|
||||
} else if let Some((_k, _v)) = segentry
|
||||
.historic
|
||||
.range((Included(Lsn(0)), Included(lsn)))
|
||||
.next_back()
|
||||
{
|
||||
rels.insert(reltag);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(rels)
|
||||
}
|
||||
|
||||
// List non-relation relishes that exist at the lsn
|
||||
pub fn list_nonrels(&self, lsn: Lsn) -> Result<HashSet<RelishTag>> {
|
||||
let mut rels: HashSet<RelishTag> = HashSet::new();
|
||||
|
||||
// Scan the timeline directory to get all rels in this timeline.
|
||||
for (seg, segentry) in self.segs.iter() {
|
||||
if let RelishTag::Relation(_) = seg.rel {
|
||||
} else {
|
||||
// Add only if it exists at the requested LSN.
|
||||
if let Some(open) = &segentry.open {
|
||||
if open.get_end_lsn() > lsn {
|
||||
rels.insert(seg.rel);
|
||||
}
|
||||
} else if let Some((_k, _v)) = segentry
|
||||
.historic
|
||||
.range((Included(Lsn(0)), Included(lsn)))
|
||||
.next_back()
|
||||
{
|
||||
rels.insert(seg.rel);
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(rels)
|
||||
}
|
||||
|
||||
/// Is there a newer image layer for given segment?
|
||||
///
|
||||
/// This is used for garbage collection, to determine if an old layer can
|
||||
/// be deleted.
|
||||
pub fn newer_image_layer_exists(&self, seg: SegmentTag, lsn: Lsn) -> bool {
|
||||
if let Some(segentry) = self.segs.get(&seg) {
|
||||
// We only check on-disk layers, because
|
||||
// in-memory layers are not durable
|
||||
for (newer_lsn, layer) in segentry
|
||||
.historic
|
||||
.range((Included(lsn), Included(Lsn(u64::MAX))))
|
||||
{
|
||||
// Ignore layers that depend on an older layer.
|
||||
if layer.is_incremental() {
|
||||
continue;
|
||||
}
|
||||
if layer.get_end_lsn() > lsn {
|
||||
trace!(
|
||||
"found later layer for {}, {} {}-{}",
|
||||
seg,
|
||||
lsn,
|
||||
newer_lsn,
|
||||
layer.get_end_lsn()
|
||||
);
|
||||
return true;
|
||||
} else {
|
||||
trace!("found singleton layer for {}, {} {}", seg, lsn, newer_lsn);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
trace!("no later layer found for {}, {}", seg, lsn);
|
||||
false
|
||||
}
|
||||
|
||||
/// Return the oldest in-memory layer, along with its generation number.
|
||||
pub fn peek_oldest_open(&self) -> Option<(Arc<InMemoryLayer>, u64)> {
|
||||
if let Some(opensegentry) = self.open_segs.peek() {
|
||||
Some((Arc::clone(&opensegentry.layer), opensegentry.generation))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Increment the generation number used to stamp open in-memory layers. Layers
|
||||
/// added with `insert_open` after this call will be associated with the new
|
||||
/// generation. Returns the new generation number.
|
||||
pub fn increment_generation(&mut self) -> u64 {
|
||||
self.current_generation += 1;
|
||||
self.current_generation
|
||||
}
|
||||
|
||||
pub fn iter_historic_layers(&self) -> HistoricLayerIter {
|
||||
HistoricLayerIter {
|
||||
segiter: self.segs.iter(),
|
||||
iter: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// debugging function to print out the contents of the layer map
|
||||
#[allow(unused)]
|
||||
pub fn dump(&self) -> Result<()> {
|
||||
println!("Begin dump LayerMap");
|
||||
for (seg, segentry) in self.segs.iter() {
|
||||
if let Some(open) = &segentry.open {
|
||||
open.dump()?;
|
||||
}
|
||||
|
||||
for (_, layer) in segentry.historic.iter() {
|
||||
layer.dump()?;
|
||||
}
|
||||
}
|
||||
println!("End dump LayerMap");
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for LayerMap {
|
||||
fn default() -> Self {
|
||||
LayerMap {
|
||||
segs: HashMap::new(),
|
||||
open_segs: BinaryHeap::new(),
|
||||
current_generation: 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct HistoricLayerIter<'a> {
|
||||
segiter: std::collections::hash_map::Iter<'a, SegmentTag, SegEntry>,
|
||||
iter: Option<std::collections::btree_map::Iter<'a, Lsn, Arc<dyn Layer>>>,
|
||||
}
|
||||
|
||||
impl<'a> Iterator for HistoricLayerIter<'a> {
|
||||
type Item = Arc<dyn Layer>;
|
||||
|
||||
fn next(&mut self) -> std::option::Option<<Self as std::iter::Iterator>::Item> {
|
||||
loop {
|
||||
if let Some(x) = &mut self.iter {
|
||||
if let Some(x) = x.next() {
|
||||
return Some(Arc::clone(&*x.1));
|
||||
}
|
||||
}
|
||||
if let Some(seg) = self.segiter.next() {
|
||||
self.iter = Some(seg.1.historic.iter());
|
||||
continue;
|
||||
} else {
|
||||
return None;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
176
pageserver/src/layered_repository/storage_layer.rs
Normal file
176
pageserver/src/layered_repository/storage_layer.rs
Normal file
@@ -0,0 +1,176 @@
|
||||
//!
|
||||
//! Common traits and structs for layers
|
||||
//!
|
||||
|
||||
use crate::relish::RelishTag;
|
||||
use crate::repository::WALRecord;
|
||||
use crate::ZTimelineId;
|
||||
use anyhow::Result;
|
||||
use bytes::Bytes;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::fmt;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
|
||||
use zenith_utils::lsn::Lsn;
|
||||
|
||||
// Size of one segment in pages (10 MB)
|
||||
pub const RELISH_SEG_SIZE: u32 = 10 * 1024 * 1024 / 8192;
|
||||
|
||||
///
|
||||
/// Each relish stored in the repository is divided into fixed-sized "segments",
|
||||
/// with 10 MB of key-space, or 1280 8k pages each.
|
||||
///
|
||||
#[derive(Debug, PartialEq, Eq, PartialOrd, Hash, Ord, Clone, Copy)]
|
||||
pub struct SegmentTag {
|
||||
pub rel: RelishTag,
|
||||
pub segno: u32,
|
||||
}
|
||||
|
||||
impl fmt::Display for SegmentTag {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "{}.{}", self.rel, self.segno)
|
||||
}
|
||||
}
|
||||
|
||||
impl SegmentTag {
|
||||
pub const fn from_blknum(rel: RelishTag, blknum: u32) -> SegmentTag {
|
||||
SegmentTag {
|
||||
rel,
|
||||
segno: blknum / RELISH_SEG_SIZE,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn blknum_in_seg(&self, blknum: u32) -> bool {
|
||||
blknum / RELISH_SEG_SIZE == self.segno
|
||||
}
|
||||
}
|
||||
|
||||
///
|
||||
/// Represents a version of a page at a specific LSN. The LSN is the key of the
|
||||
/// entry in the 'page_versions' hash, it is not duplicated here.
|
||||
///
|
||||
/// A page version can be stored as a full page image, or as WAL record that needs
|
||||
/// to be applied over the previous page version to reconstruct this version.
|
||||
///
|
||||
/// It's also possible to have both a WAL record and a page image in the same
|
||||
/// PageVersion. That happens if page version is originally stored as a WAL record
|
||||
/// but it is later reconstructed by a GetPage@LSN request by performing WAL
|
||||
/// redo. The get_page_at_lsn() code will store the reconstructed pag image next to
|
||||
/// the WAL record in that case. TODO: That's pretty accidental, not the result
|
||||
/// of any grand design. If we want to keep reconstructed page versions around, we
|
||||
/// probably should have a separate buffer cache so that we could control the
|
||||
/// replacement policy globally. Or if we keep a reconstructed page image, we
|
||||
/// could throw away the WAL record.
|
||||
///
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct PageVersion {
|
||||
/// an 8kb page image
|
||||
pub page_image: Option<Bytes>,
|
||||
/// WAL record to get from previous page version to this one.
|
||||
pub record: Option<WALRecord>,
|
||||
}
|
||||
|
||||
///
|
||||
/// Data needed to reconstruct a page version
|
||||
///
|
||||
/// 'page_img' is the old base image of the page to start the WAL replay with.
|
||||
/// It can be None, if the first WAL record initializes the page (will_init)
|
||||
/// 'records' contains the records to apply over the base image.
|
||||
///
|
||||
pub struct PageReconstructData {
|
||||
pub records: Vec<WALRecord>,
|
||||
pub page_img: Option<Bytes>,
|
||||
}
|
||||
|
||||
/// Return value from Layer::get_page_reconstruct_data
|
||||
pub enum PageReconstructResult {
|
||||
/// Got all the data needed to reconstruct the requested page
|
||||
Complete,
|
||||
/// This layer didn't contain all the required data, the caller should collect
|
||||
/// more data from the returned predecessor layer at the returned LSN.
|
||||
Continue(Lsn, Arc<dyn Layer>),
|
||||
/// This layer didn't contain data needed to reconstruct the page version at
|
||||
/// the returned LSN. This is usually considered an error, but might be OK
|
||||
/// in some circumstances.
|
||||
Missing(Lsn),
|
||||
}
|
||||
|
||||
///
|
||||
/// A Layer corresponds to one RELISH_SEG_SIZE slice of a relish in a range of LSNs.
|
||||
/// There are two kinds of layers, in-memory and on-disk layers. In-memory
|
||||
/// layers are used to ingest incoming WAL, and provide fast access
|
||||
/// to the recent page versions. On-disk layers are stored as files on disk, and
|
||||
/// are immutable. This trait presents the common functionality of
|
||||
/// in-memory and on-disk layers.
|
||||
///
|
||||
pub trait Layer: Send + Sync {
|
||||
/// Identify the timeline this relish belongs to
|
||||
fn get_timeline_id(&self) -> ZTimelineId;
|
||||
|
||||
/// Identify the relish segment
|
||||
fn get_seg_tag(&self) -> SegmentTag;
|
||||
|
||||
/// Inclusive start bound of the LSN range that this layer hold
|
||||
fn get_start_lsn(&self) -> Lsn;
|
||||
|
||||
/// 'end_lsn' meaning depends on the layer kind:
|
||||
/// - in-memory layer is either unbounded (end_lsn = MAX_LSN) or dropped (end_lsn = drop_lsn)
|
||||
/// - image layer represents snapshot at one LSN, so end_lsn = lsn
|
||||
/// - delta layer has end_lsn
|
||||
///
|
||||
/// TODO Is end_lsn always exclusive for all layer kinds?
|
||||
fn get_end_lsn(&self) -> Lsn;
|
||||
|
||||
/// Is the segment represented by this layer dropped by PostgreSQL?
|
||||
fn is_dropped(&self) -> bool;
|
||||
|
||||
/// Filename used to store this layer on disk. (Even in-memory layers
|
||||
/// implement this, to print a handy unique identifier for the layer for
|
||||
/// log messages, even though they're never not on disk.)
|
||||
fn filename(&self) -> PathBuf;
|
||||
|
||||
///
|
||||
/// Return data needed to reconstruct given page at LSN.
|
||||
///
|
||||
/// It is up to the caller to collect more data from previous layer and
|
||||
/// perform WAL redo, if necessary.
|
||||
///
|
||||
/// Note that the 'blknum' is the offset of the page from the beginning
|
||||
/// of the *relish*, not the beginning of the segment. The requested
|
||||
/// 'blknum' must be covered by this segment.
|
||||
///
|
||||
/// See PageReconstructResult for possible return values. The collected data
|
||||
/// is appended to reconstruct_data; the caller should pass an empty struct
|
||||
/// on first call. If this returns PageReconstructResult::Continue, call
|
||||
/// again on the returned predecessor layer with the same 'reconstruct_data'
|
||||
/// to collect more data.
|
||||
fn get_page_reconstruct_data(
|
||||
&self,
|
||||
blknum: u32,
|
||||
lsn: Lsn,
|
||||
reconstruct_data: &mut PageReconstructData,
|
||||
) -> Result<PageReconstructResult>;
|
||||
|
||||
/// Return size of the segment at given LSN. (Only for blocky relations.)
|
||||
fn get_seg_size(&self, lsn: Lsn) -> Result<u32>;
|
||||
|
||||
/// Does the segment exist at given LSN? Or was it dropped before it.
|
||||
fn get_seg_exists(&self, lsn: Lsn) -> Result<bool>;
|
||||
|
||||
/// Does this layer only contain some data for the segment (incremental),
|
||||
/// or does it contain a version of every page? This is important to know
|
||||
/// for garbage collecting old layers: an incremental layer depends on
|
||||
/// the previous non-incremental layer.
|
||||
fn is_incremental(&self) -> bool;
|
||||
|
||||
/// Release memory used by this layer. There is no corresponding 'load'
|
||||
/// function, that's done implicitly when you call one of the get-functions.
|
||||
fn unload(&self) -> Result<()>;
|
||||
|
||||
/// Permanently remove this layer from disk.
|
||||
fn delete(&self) -> Result<()>;
|
||||
|
||||
/// Dump summary of the contents of the layer to stdout
|
||||
fn dump(&self) -> Result<()>;
|
||||
}
|
||||
@@ -1,94 +1,113 @@
|
||||
use std::fmt;
|
||||
use std::net::SocketAddr;
|
||||
use zenith_utils::postgres_backend::AuthType;
|
||||
use zenith_utils::zid::{ZTenantId, ZTimelineId};
|
||||
|
||||
use std::path::PathBuf;
|
||||
use std::str::FromStr;
|
||||
use std::time::Duration;
|
||||
|
||||
use lazy_static::lazy_static;
|
||||
use zenith_metrics::{register_int_gauge_vec, IntGaugeVec};
|
||||
|
||||
pub mod basebackup;
|
||||
pub mod page_cache;
|
||||
pub mod branches;
|
||||
pub mod http;
|
||||
pub mod layered_repository;
|
||||
pub mod logger;
|
||||
pub mod page_service;
|
||||
pub mod relish;
|
||||
pub mod repository;
|
||||
pub mod restore_local_repo;
|
||||
pub mod tui;
|
||||
pub mod tui_event;
|
||||
mod tui_logger;
|
||||
pub mod tenant_mgr;
|
||||
pub mod waldecoder;
|
||||
pub mod walreceiver;
|
||||
pub mod walredo;
|
||||
|
||||
lazy_static! {
|
||||
static ref LIVE_CONNECTIONS_COUNT: IntGaugeVec = register_int_gauge_vec!(
|
||||
"pageserver_live_connections_count",
|
||||
"Number of live network connections",
|
||||
&["pageserver_connection_kind"]
|
||||
)
|
||||
.expect("failed to define a metric");
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct PageServerConf {
|
||||
pub daemonize: bool,
|
||||
pub interactive: bool,
|
||||
pub listen_addr: SocketAddr,
|
||||
pub listen_addr: String,
|
||||
pub http_endpoint_addr: String,
|
||||
pub gc_horizon: u64,
|
||||
pub gc_period: Duration,
|
||||
pub superuser: String,
|
||||
|
||||
// Repository directory, relative to current working directory.
|
||||
// Normally, the page server changes the current working directory
|
||||
// to the repository, and 'workdir' is always '.'. But we don't do
|
||||
// that during unit testing, because the current directory is global
|
||||
// to the process but different unit tests work on different
|
||||
// repositories.
|
||||
pub workdir: PathBuf,
|
||||
|
||||
pub pg_distrib_dir: PathBuf,
|
||||
|
||||
pub auth_type: AuthType,
|
||||
|
||||
pub auth_validation_public_key_path: Option<PathBuf>,
|
||||
}
|
||||
|
||||
/// Zenith Timeline ID is a 128-bit random ID.
|
||||
///
|
||||
/// Zenith timeline IDs are different from PostgreSQL timeline
|
||||
/// IDs. They serve a similar purpose though: they differentiate
|
||||
/// between different "histories" of the same cluster. However,
|
||||
/// PostgreSQL timeline IDs are a bit cumbersome, because they are only
|
||||
/// 32-bits wide, and they must be in ascending order in any given
|
||||
/// timeline history. Those limitations mean that we cannot generate a
|
||||
/// new PostgreSQL timeline ID by just generating a random number. And
|
||||
/// that in turn is problematic for the "pull/push" workflow, where you
|
||||
/// have a local copy of a zenith repository, and you periodically sync
|
||||
/// the local changes with a remote server. When you work "detached"
|
||||
/// from the remote server, you cannot create a PostgreSQL timeline ID
|
||||
/// that's guaranteed to be different from all existing timelines in
|
||||
/// the remote server. For example, if two people are having a clone of
|
||||
/// the repository on their laptops, and they both create a new branch
|
||||
/// with different name. What timeline ID would they assign to their
|
||||
/// branches? If they pick the same one, and later try to push the
|
||||
/// branches to the same remote server, they will get mixed up.
|
||||
///
|
||||
/// To avoid those issues, Zenith has its own concept of timelines that
|
||||
/// is separate from PostgreSQL timelines, and doesn't have those
|
||||
/// limitations. A zenith timeline is identified by a 128-bit ID, which
|
||||
/// is usually printed out as a hex string.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
|
||||
pub struct ZTimelineId([u8; 16]);
|
||||
impl PageServerConf {
|
||||
//
|
||||
// Repository paths, relative to workdir.
|
||||
//
|
||||
|
||||
impl FromStr for ZTimelineId {
|
||||
type Err = hex::FromHexError;
|
||||
fn tenants_path(&self) -> PathBuf {
|
||||
self.workdir.join("tenants")
|
||||
}
|
||||
|
||||
fn from_str(s: &str) -> Result<ZTimelineId, Self::Err> {
|
||||
let timelineid = hex::decode(s)?;
|
||||
fn tenant_path(&self, tenantid: &ZTenantId) -> PathBuf {
|
||||
self.tenants_path().join(tenantid.to_string())
|
||||
}
|
||||
|
||||
let mut buf: [u8; 16] = [0u8; 16];
|
||||
buf.copy_from_slice(timelineid.as_slice());
|
||||
Ok(ZTimelineId(buf))
|
||||
}
|
||||
}
|
||||
|
||||
impl ZTimelineId {
|
||||
pub fn from(b: [u8; 16]) -> ZTimelineId {
|
||||
ZTimelineId(b)
|
||||
}
|
||||
|
||||
pub fn get_from_buf(buf: &mut dyn bytes::Buf) -> ZTimelineId {
|
||||
let mut arr = [0u8; 16];
|
||||
buf.copy_to_slice(&mut arr);
|
||||
ZTimelineId::from(arr)
|
||||
}
|
||||
|
||||
pub fn as_arr(&self) -> [u8; 16] {
|
||||
self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for ZTimelineId {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.write_str(&hex::encode(self.0))
|
||||
}
|
||||
}
|
||||
|
||||
pub fn zenith_repo_dir() -> PathBuf {
|
||||
// Find repository path
|
||||
match std::env::var_os("ZENITH_REPO_DIR") {
|
||||
Some(val) => PathBuf::from(val.to_str().unwrap()),
|
||||
None => ".zenith".into(),
|
||||
fn tags_path(&self, tenantid: &ZTenantId) -> PathBuf {
|
||||
self.tenant_path(tenantid).join("refs").join("tags")
|
||||
}
|
||||
|
||||
fn tag_path(&self, tag_name: &str, tenantid: &ZTenantId) -> PathBuf {
|
||||
self.tags_path(tenantid).join(tag_name)
|
||||
}
|
||||
|
||||
fn branches_path(&self, tenantid: &ZTenantId) -> PathBuf {
|
||||
self.tenant_path(tenantid).join("refs").join("branches")
|
||||
}
|
||||
|
||||
fn branch_path(&self, branch_name: &str, tenantid: &ZTenantId) -> PathBuf {
|
||||
self.branches_path(tenantid).join(branch_name)
|
||||
}
|
||||
|
||||
fn timelines_path(&self, tenantid: &ZTenantId) -> PathBuf {
|
||||
self.tenant_path(tenantid).join("timelines")
|
||||
}
|
||||
|
||||
fn timeline_path(&self, timelineid: &ZTimelineId, tenantid: &ZTenantId) -> PathBuf {
|
||||
self.timelines_path(tenantid).join(timelineid.to_string())
|
||||
}
|
||||
|
||||
fn ancestor_path(&self, timelineid: &ZTimelineId, tenantid: &ZTenantId) -> PathBuf {
|
||||
self.timeline_path(timelineid, tenantid).join("ancestor")
|
||||
}
|
||||
|
||||
fn wal_dir_path(&self, timelineid: &ZTimelineId, tenantid: &ZTenantId) -> PathBuf {
|
||||
self.timeline_path(timelineid, tenantid).join("wal")
|
||||
}
|
||||
|
||||
//
|
||||
// Postgres distribution paths
|
||||
//
|
||||
|
||||
pub fn pg_bin_dir(&self) -> PathBuf {
|
||||
self.pg_distrib_dir.join("bin")
|
||||
}
|
||||
|
||||
pub fn pg_lib_dir(&self) -> PathBuf {
|
||||
self.pg_distrib_dir.join("lib")
|
||||
}
|
||||
}
|
||||
|
||||
45
pageserver/src/logger.rs
Normal file
45
pageserver/src/logger.rs
Normal file
@@ -0,0 +1,45 @@
|
||||
use crate::PageServerConf;
|
||||
|
||||
use anyhow::{Context, Result};
|
||||
use slog::{Drain, FnValue};
|
||||
use std::fs::{File, OpenOptions};
|
||||
|
||||
pub fn init_logging(
|
||||
_conf: &PageServerConf,
|
||||
log_filename: &str,
|
||||
) -> Result<(slog_scope::GlobalLoggerGuard, File)> {
|
||||
// Don't open the same file for output multiple times;
|
||||
// the different fds could overwrite each other's output.
|
||||
let log_file = OpenOptions::new()
|
||||
.create(true)
|
||||
.append(true)
|
||||
.open(&log_filename)
|
||||
.with_context(|| format!("failed to open {:?}", &log_filename))?;
|
||||
|
||||
let logger_file = log_file.try_clone().unwrap();
|
||||
|
||||
let decorator = slog_term::PlainSyncDecorator::new(logger_file);
|
||||
let drain = slog_term::FullFormat::new(decorator).build();
|
||||
let drain = slog::Filter::new(drain, |record: &slog::Record| {
|
||||
if record.level().is_at_least(slog::Level::Info) {
|
||||
return true;
|
||||
}
|
||||
false
|
||||
});
|
||||
let drain = std::sync::Mutex::new(drain).fuse();
|
||||
let logger = slog::Logger::root(
|
||||
drain,
|
||||
slog::o!(
|
||||
"location" =>
|
||||
FnValue(move |record| {
|
||||
format!("{}, {}:{}",
|
||||
record.module(),
|
||||
record.file(),
|
||||
record.line()
|
||||
)
|
||||
}
|
||||
)
|
||||
),
|
||||
);
|
||||
Ok((slog_scope::set_global_logger(logger), log_file))
|
||||
}
|
||||
@@ -1,922 +0,0 @@
|
||||
//
|
||||
// Page Cache holds all the different page versions and WAL records
|
||||
//
|
||||
// Currently, the page cache uses RocksDB to store WAL wal records and
|
||||
// full page images, keyed by the RelFileNode, blocknumber, and the
|
||||
// LSN.
|
||||
|
||||
use crate::restore_local_repo::restore_timeline;
|
||||
use crate::waldecoder::Oid;
|
||||
use crate::walredo::WalRedoManager;
|
||||
use crate::ZTimelineId;
|
||||
use crate::{zenith_repo_dir, PageServerConf};
|
||||
use anyhow::{bail, Context};
|
||||
use bytes::{Buf, BufMut, Bytes, BytesMut};
|
||||
use lazy_static::lazy_static;
|
||||
use log::*;
|
||||
use std::cmp::min;
|
||||
use std::collections::HashMap;
|
||||
use std::sync::atomic::AtomicU64;
|
||||
use std::sync::atomic::Ordering;
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::thread;
|
||||
use std::time::{Duration, Instant};
|
||||
use std::{convert::TryInto, ops::AddAssign};
|
||||
use zenith_utils::lsn::{AtomicLsn, Lsn};
|
||||
use zenith_utils::seqwait::SeqWait;
|
||||
|
||||
// Timeout when waiting or WAL receiver to catch up to an LSN given in a GetPage@LSN call.
|
||||
static TIMEOUT: Duration = Duration::from_secs(60);
|
||||
|
||||
pub struct PageCache {
|
||||
// RocksDB handle
|
||||
db: rocksdb::DB,
|
||||
|
||||
// WAL redo manager
|
||||
walredo_mgr: WalRedoManager,
|
||||
|
||||
// What page versions do we hold in the cache? If we get GetPage with
|
||||
// LSN < first_valid_lsn, that's an error because we (no longer) hold that
|
||||
// page version. If we get a request > last_valid_lsn, we need to wait until
|
||||
// we receive all the WAL up to the request. The SeqWait provides functions
|
||||
// for that.
|
||||
//
|
||||
// last_record_lsn points to the end of last processed WAL record.
|
||||
// It can lag behind last_valid_lsn, if the WAL receiver has received some WAL
|
||||
// after the end of last record, but not the whole next record yet. In the
|
||||
// page cache, we care about last_valid_lsn, but if the WAL receiver needs to
|
||||
// restart the streaming, it needs to restart at the end of last record, so
|
||||
// we track them separately. last_record_lsn should perhaps be in
|
||||
// walreceiver.rs instead of here, but it seems convenient to keep all three
|
||||
// values together.
|
||||
//
|
||||
first_valid_lsn: AtomicLsn,
|
||||
last_valid_lsn: SeqWait<Lsn>,
|
||||
last_record_lsn: AtomicLsn,
|
||||
|
||||
// Counters, for metrics collection.
|
||||
pub num_entries: AtomicU64,
|
||||
pub num_page_images: AtomicU64,
|
||||
pub num_wal_records: AtomicU64,
|
||||
pub num_getpage_requests: AtomicU64,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct PageCacheStats {
|
||||
pub num_entries: u64,
|
||||
pub num_page_images: u64,
|
||||
pub num_wal_records: u64,
|
||||
pub num_getpage_requests: u64,
|
||||
}
|
||||
|
||||
impl AddAssign for PageCacheStats {
|
||||
fn add_assign(&mut self, other: Self) {
|
||||
self.num_entries += other.num_entries;
|
||||
self.num_page_images += other.num_page_images;
|
||||
self.num_wal_records += other.num_wal_records;
|
||||
self.num_getpage_requests += other.num_getpage_requests;
|
||||
}
|
||||
}
|
||||
|
||||
lazy_static! {
|
||||
pub static ref PAGECACHES: Mutex<HashMap<ZTimelineId, Arc<PageCache>>> =
|
||||
Mutex::new(HashMap::new());
|
||||
}
|
||||
|
||||
// Get Page Cache for given timeline. It is assumed to already exist.
|
||||
pub fn get_pagecache(_conf: &PageServerConf, timelineid: ZTimelineId) -> Option<Arc<PageCache>> {
|
||||
let pcaches = PAGECACHES.lock().unwrap();
|
||||
|
||||
match pcaches.get(&timelineid) {
|
||||
Some(pcache) => Some(pcache.clone()),
|
||||
None => None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_or_restore_pagecache(
|
||||
conf: &PageServerConf,
|
||||
timelineid: ZTimelineId,
|
||||
) -> anyhow::Result<Arc<PageCache>> {
|
||||
let mut pcaches = PAGECACHES.lock().unwrap();
|
||||
match pcaches.get(&timelineid) {
|
||||
Some(pcache) => Ok(pcache.clone()),
|
||||
None => {
|
||||
let pcache = init_page_cache(conf, timelineid);
|
||||
|
||||
restore_timeline(conf, &pcache, timelineid)?;
|
||||
|
||||
let result = Arc::new(pcache);
|
||||
|
||||
pcaches.insert(timelineid, result.clone());
|
||||
|
||||
if conf.gc_horizon != 0 {
|
||||
let conf_copy = conf.clone();
|
||||
let _gc_thread = thread::Builder::new()
|
||||
.name("Garbage collection thread".into())
|
||||
.spawn(move || {
|
||||
gc_thread_main(&conf_copy, timelineid);
|
||||
})
|
||||
.unwrap();
|
||||
}
|
||||
Ok(result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn gc_thread_main(conf: &PageServerConf, timelineid: ZTimelineId) {
|
||||
info!("Garbage collection thread started {}", timelineid);
|
||||
let pcache = get_pagecache(conf, timelineid).unwrap();
|
||||
|
||||
pcache.do_gc(conf).unwrap();
|
||||
}
|
||||
|
||||
fn open_rocksdb(_conf: &PageServerConf, timelineid: ZTimelineId) -> rocksdb::DB {
|
||||
let path = zenith_repo_dir().join(timelineid.to_string());
|
||||
let mut opts = rocksdb::Options::default();
|
||||
opts.create_if_missing(true);
|
||||
opts.set_use_fsync(true);
|
||||
opts.set_compression_type(rocksdb::DBCompressionType::Lz4);
|
||||
opts.set_compaction_filter("ttl", move |_level: u32, _key: &[u8], val: &[u8]| {
|
||||
if (val[0] & UNUSED_VERSION_FLAG) != 0 {
|
||||
rocksdb::compaction_filter::Decision::Remove
|
||||
} else {
|
||||
rocksdb::compaction_filter::Decision::Keep
|
||||
}
|
||||
});
|
||||
rocksdb::DB::open(&opts, &path).unwrap()
|
||||
}
|
||||
|
||||
fn init_page_cache(conf: &PageServerConf, timelineid: ZTimelineId) -> PageCache {
|
||||
PageCache {
|
||||
db: open_rocksdb(&conf, timelineid),
|
||||
|
||||
walredo_mgr: WalRedoManager::new(conf, timelineid),
|
||||
|
||||
first_valid_lsn: AtomicLsn::new(0),
|
||||
last_valid_lsn: SeqWait::new(Lsn(0)),
|
||||
last_record_lsn: AtomicLsn::new(0),
|
||||
|
||||
num_entries: AtomicU64::new(0),
|
||||
num_page_images: AtomicU64::new(0),
|
||||
num_wal_records: AtomicU64::new(0),
|
||||
num_getpage_requests: AtomicU64::new(0),
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
// We store two kinds of entries in the page cache:
|
||||
//
|
||||
// 1. Ready-made images of the block
|
||||
// 2. WAL records, to be applied on top of the "previous" entry
|
||||
//
|
||||
// Some WAL records will initialize the page from scratch. For such records,
|
||||
// the 'will_init' flag is set. They don't need the previous page image before
|
||||
// applying. The 'will_init' flag is set for records containing a full-page image,
|
||||
// and for records with the BKPBLOCK_WILL_INIT flag. These differ from PageImages
|
||||
// stored directly in the cache entry in that you still need to run the WAL redo
|
||||
// routine to generate the page image.
|
||||
//
|
||||
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone)]
|
||||
pub struct CacheKey {
|
||||
pub tag: BufferTag,
|
||||
pub lsn: Lsn,
|
||||
}
|
||||
|
||||
impl CacheKey {
|
||||
pub fn pack(&self, buf: &mut BytesMut) {
|
||||
self.tag.pack(buf);
|
||||
buf.put_u64(self.lsn.0);
|
||||
}
|
||||
pub fn unpack(buf: &mut BytesMut) -> CacheKey {
|
||||
CacheKey {
|
||||
tag: BufferTag::unpack(buf),
|
||||
lsn: Lsn::from(buf.get_u64()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct CacheEntryContent {
|
||||
pub page_image: Option<Bytes>,
|
||||
pub wal_record: Option<WALRecord>,
|
||||
}
|
||||
|
||||
const PAGE_IMAGE_FLAG: u8 = 1u8;
|
||||
const UNUSED_VERSION_FLAG: u8 = 2u8;
|
||||
const TRUNCATED_FLAG: u8 = 4u8;
|
||||
|
||||
impl CacheEntryContent {
|
||||
pub fn pack(&self, buf: &mut BytesMut) {
|
||||
if let Some(image) = &self.page_image {
|
||||
buf.put_u8(PAGE_IMAGE_FLAG);
|
||||
buf.put_u16(image.len() as u16);
|
||||
buf.put_slice(&image[..]);
|
||||
} else if let Some(rec) = &self.wal_record {
|
||||
if rec.truncate {
|
||||
buf.put_u8(TRUNCATED_FLAG);
|
||||
} else {
|
||||
buf.put_u8(0);
|
||||
}
|
||||
rec.pack(buf);
|
||||
}
|
||||
}
|
||||
pub fn unpack(buf: &mut BytesMut) -> CacheEntryContent {
|
||||
if (buf.get_u8() & PAGE_IMAGE_FLAG) != 0 {
|
||||
let mut dst = vec![0u8; buf.get_u16() as usize];
|
||||
buf.copy_to_slice(&mut dst);
|
||||
CacheEntryContent {
|
||||
page_image: Some(Bytes::from(dst)),
|
||||
wal_record: None,
|
||||
}
|
||||
} else {
|
||||
CacheEntryContent {
|
||||
page_image: None,
|
||||
wal_record: Some(WALRecord::unpack(buf)),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, PartialOrd, Hash, Ord, Clone, Copy)]
|
||||
pub struct RelTag {
|
||||
pub spcnode: u32,
|
||||
pub dbnode: u32,
|
||||
pub relnode: u32,
|
||||
pub forknum: u8,
|
||||
}
|
||||
|
||||
impl RelTag {
|
||||
pub fn pack(&self, buf: &mut BytesMut) {
|
||||
buf.put_u32(self.spcnode);
|
||||
buf.put_u32(self.dbnode);
|
||||
buf.put_u32(self.relnode);
|
||||
buf.put_u32(self.forknum as u32); // encode forknum as u32 to provide compatibility with wal_redo_postgres
|
||||
}
|
||||
pub fn unpack(buf: &mut BytesMut) -> RelTag {
|
||||
RelTag {
|
||||
spcnode: buf.get_u32(),
|
||||
dbnode: buf.get_u32(),
|
||||
relnode: buf.get_u32(),
|
||||
forknum: buf.get_u32() as u8,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone, Copy)]
|
||||
pub struct BufferTag {
|
||||
pub rel: RelTag,
|
||||
pub blknum: u32,
|
||||
}
|
||||
|
||||
impl BufferTag {
|
||||
pub fn pack(&self, buf: &mut BytesMut) {
|
||||
self.rel.pack(buf);
|
||||
buf.put_u32(self.blknum);
|
||||
}
|
||||
pub fn unpack(buf: &mut BytesMut) -> BufferTag {
|
||||
BufferTag {
|
||||
rel: RelTag::unpack(buf),
|
||||
blknum: buf.get_u32(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct WALRecord {
|
||||
pub lsn: Lsn, // LSN at the *end* of the record
|
||||
pub will_init: bool,
|
||||
pub truncate: bool,
|
||||
pub rec: Bytes,
|
||||
// Remember the offset of main_data in rec,
|
||||
// so that we don't have to parse the record again.
|
||||
// If record has no main_data, this offset equals rec.len().
|
||||
pub main_data_offset: u32,
|
||||
}
|
||||
|
||||
impl WALRecord {
|
||||
pub fn pack(&self, buf: &mut BytesMut) {
|
||||
buf.put_u64(self.lsn.0);
|
||||
buf.put_u8(self.will_init as u8);
|
||||
buf.put_u8(self.truncate as u8);
|
||||
buf.put_u32(self.main_data_offset);
|
||||
buf.put_u32(self.rec.len() as u32);
|
||||
buf.put_slice(&self.rec[..]);
|
||||
}
|
||||
pub fn unpack(buf: &mut BytesMut) -> WALRecord {
|
||||
let lsn = Lsn::from(buf.get_u64());
|
||||
let will_init = buf.get_u8() != 0;
|
||||
let truncate = buf.get_u8() != 0;
|
||||
let main_data_offset = buf.get_u32();
|
||||
let mut dst = vec![0u8; buf.get_u32() as usize];
|
||||
buf.copy_to_slice(&mut dst);
|
||||
WALRecord {
|
||||
lsn,
|
||||
will_init,
|
||||
truncate,
|
||||
rec: Bytes::from(dst),
|
||||
main_data_offset,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl PageCache {
|
||||
// Public GET interface functions
|
||||
|
||||
///
|
||||
/// GetPage@LSN
|
||||
///
|
||||
/// Returns an 8k page image
|
||||
///
|
||||
pub fn get_page_at_lsn(&self, tag: BufferTag, req_lsn: Lsn) -> anyhow::Result<Bytes> {
|
||||
self.num_getpage_requests.fetch_add(1, Ordering::Relaxed);
|
||||
|
||||
let lsn = self.wait_lsn(req_lsn)?;
|
||||
|
||||
// Look up cache entry. If it's a page image, return that. If it's a WAL record,
|
||||
// ask the WAL redo service to reconstruct the page image from the WAL records.
|
||||
let key = CacheKey { tag, lsn };
|
||||
|
||||
let mut buf = BytesMut::new();
|
||||
key.pack(&mut buf);
|
||||
let mut iter = self.db.raw_iterator();
|
||||
iter.seek_for_prev(&buf[..]);
|
||||
|
||||
if iter.valid() {
|
||||
let k = iter.key().unwrap();
|
||||
buf.clear();
|
||||
buf.extend_from_slice(&k);
|
||||
let key = CacheKey::unpack(&mut buf);
|
||||
if key.tag == tag {
|
||||
let v = iter.value().unwrap();
|
||||
buf.clear();
|
||||
buf.extend_from_slice(&v);
|
||||
let content = CacheEntryContent::unpack(&mut buf);
|
||||
let page_img: Bytes;
|
||||
if let Some(img) = &content.page_image {
|
||||
page_img = img.clone();
|
||||
} else if content.wal_record.is_some() {
|
||||
// Request the WAL redo manager to apply the WAL records for us.
|
||||
let (base_img, records) = self.collect_records_for_apply(tag, lsn);
|
||||
page_img = self.walredo_mgr.request_redo(tag, lsn, base_img, records)?;
|
||||
|
||||
self.put_page_image(tag, lsn, page_img.clone());
|
||||
} else {
|
||||
// No base image, and no WAL record. Huh?
|
||||
bail!("no page image or WAL record for requested page");
|
||||
}
|
||||
// FIXME: assumes little-endian. Only used for the debugging log though
|
||||
let page_lsn_hi =
|
||||
u32::from_le_bytes(page_img.get(0..4).unwrap().try_into().unwrap());
|
||||
let page_lsn_lo =
|
||||
u32::from_le_bytes(page_img.get(4..8).unwrap().try_into().unwrap());
|
||||
debug!(
|
||||
"Returning page with LSN {:X}/{:X} for {}/{}/{}.{} blk {}",
|
||||
page_lsn_hi,
|
||||
page_lsn_lo,
|
||||
tag.rel.spcnode,
|
||||
tag.rel.dbnode,
|
||||
tag.rel.relnode,
|
||||
tag.rel.forknum,
|
||||
tag.blknum
|
||||
);
|
||||
return Ok(page_img);
|
||||
}
|
||||
}
|
||||
static ZERO_PAGE: [u8; 8192] = [0u8; 8192];
|
||||
debug!("Page {:?} at {}({}) not found", tag, req_lsn, lsn);
|
||||
Ok(Bytes::from_static(&ZERO_PAGE))
|
||||
/* return Err("could not find page image")?; */
|
||||
}
|
||||
|
||||
///
|
||||
/// Get size of relation at given LSN.
|
||||
///
|
||||
pub fn relsize_get(&self, rel: &RelTag, lsn: Lsn) -> anyhow::Result<u32> {
|
||||
self.wait_lsn(lsn)?;
|
||||
self.relsize_get_nowait(rel, lsn)
|
||||
}
|
||||
|
||||
///
|
||||
/// Does relation exist at given LSN?
|
||||
///
|
||||
pub fn relsize_exist(&self, rel: &RelTag, req_lsn: Lsn) -> anyhow::Result<bool> {
|
||||
let lsn = self.wait_lsn(req_lsn)?;
|
||||
|
||||
let key = CacheKey {
|
||||
tag: BufferTag {
|
||||
rel: *rel,
|
||||
blknum: u32::MAX,
|
||||
},
|
||||
lsn,
|
||||
};
|
||||
let mut buf = BytesMut::new();
|
||||
key.pack(&mut buf);
|
||||
let mut iter = self.db.raw_iterator();
|
||||
iter.seek_for_prev(&buf[..]);
|
||||
if iter.valid() {
|
||||
let k = iter.key().unwrap();
|
||||
buf.clear();
|
||||
buf.extend_from_slice(&k);
|
||||
let tag = BufferTag::unpack(&mut buf);
|
||||
if tag.rel == *rel {
|
||||
debug!("Relation {:?} exists at {}", rel, lsn);
|
||||
return Ok(true);
|
||||
}
|
||||
}
|
||||
debug!("Relation {:?} doesn't exist at {}", rel, lsn);
|
||||
Ok(false)
|
||||
}
|
||||
|
||||
// Other public functions, for updating the page cache.
|
||||
// These are used by the WAL receiver and WAL redo.
|
||||
|
||||
///
|
||||
/// Collect all the WAL records that are needed to reconstruct a page
|
||||
/// image for the given cache entry.
|
||||
///
|
||||
/// Returns an old page image (if any), and a vector of WAL records to apply
|
||||
/// over it.
|
||||
///
|
||||
pub fn collect_records_for_apply(
|
||||
&self,
|
||||
tag: BufferTag,
|
||||
lsn: Lsn,
|
||||
) -> (Option<Bytes>, Vec<WALRecord>) {
|
||||
let mut buf = BytesMut::new();
|
||||
let key = CacheKey { tag, lsn };
|
||||
key.pack(&mut buf);
|
||||
|
||||
let mut base_img: Option<Bytes> = None;
|
||||
let mut records: Vec<WALRecord> = Vec::new();
|
||||
|
||||
let mut iter = self.db.raw_iterator();
|
||||
iter.seek_for_prev(&buf[..]);
|
||||
|
||||
// Scan backwards, collecting the WAL records, until we hit an
|
||||
// old page image.
|
||||
while iter.valid() {
|
||||
let k = iter.key().unwrap();
|
||||
buf.clear();
|
||||
buf.extend_from_slice(&k);
|
||||
let key = CacheKey::unpack(&mut buf);
|
||||
if key.tag != tag {
|
||||
break;
|
||||
}
|
||||
let v = iter.value().unwrap();
|
||||
buf.clear();
|
||||
buf.extend_from_slice(&v);
|
||||
let content = CacheEntryContent::unpack(&mut buf);
|
||||
if let Some(img) = &content.page_image {
|
||||
// We have a base image. No need to dig deeper into the list of
|
||||
// records
|
||||
base_img = Some(img.clone());
|
||||
break;
|
||||
} else if let Some(rec) = &content.wal_record {
|
||||
records.push(rec.clone());
|
||||
// If this WAL record initializes the page, no need to dig deeper.
|
||||
if rec.will_init {
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
panic!("no base image and no WAL record on cache entry");
|
||||
}
|
||||
iter.prev();
|
||||
}
|
||||
records.reverse();
|
||||
(base_img, records)
|
||||
}
|
||||
|
||||
///
|
||||
/// Adds a WAL record to the page cache
|
||||
///
|
||||
pub fn put_wal_record(&self, tag: BufferTag, rec: WALRecord) {
|
||||
let lsn = rec.lsn;
|
||||
let key = CacheKey { tag, lsn };
|
||||
|
||||
let content = CacheEntryContent {
|
||||
page_image: None,
|
||||
wal_record: Some(rec),
|
||||
};
|
||||
|
||||
let mut key_buf = BytesMut::new();
|
||||
key.pack(&mut key_buf);
|
||||
let mut val_buf = BytesMut::new();
|
||||
content.pack(&mut val_buf);
|
||||
|
||||
let _res = self.db.put(&key_buf[..], &val_buf[..]);
|
||||
//trace!("put_wal_record lsn: {}", lsn);
|
||||
|
||||
self.num_entries.fetch_add(1, Ordering::Relaxed);
|
||||
self.num_wal_records.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
///
|
||||
/// Adds a relation-wide WAL record (like truncate) to the page cache,
|
||||
/// associating it with all pages started with specified block number
|
||||
///
|
||||
pub fn put_rel_wal_record(&self, tag: BufferTag, rec: WALRecord) -> anyhow::Result<()> {
|
||||
let mut key = CacheKey { tag, lsn: rec.lsn };
|
||||
|
||||
// What was the size of the relation before this record?
|
||||
let last_lsn = self.last_valid_lsn.load();
|
||||
let old_rel_size = self.relsize_get_nowait(&tag.rel, last_lsn)?;
|
||||
|
||||
let content = CacheEntryContent {
|
||||
page_image: None,
|
||||
wal_record: Some(rec),
|
||||
};
|
||||
// set new relation size
|
||||
trace!("Truncate relation {:?}", tag);
|
||||
let mut key_buf = BytesMut::new();
|
||||
let mut val_buf = BytesMut::new();
|
||||
content.pack(&mut val_buf);
|
||||
|
||||
for blknum in tag.blknum..old_rel_size {
|
||||
key_buf.clear();
|
||||
key.tag.blknum = blknum;
|
||||
key.pack(&mut key_buf);
|
||||
trace!("put_wal_record lsn: {}", key.lsn);
|
||||
let _res = self.db.put(&key_buf[..], &val_buf[..]);
|
||||
}
|
||||
let n = (old_rel_size - tag.blknum) as u64;
|
||||
self.num_entries.fetch_add(n, Ordering::Relaxed);
|
||||
self.num_wal_records.fetch_add(n, Ordering::Relaxed);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
///
|
||||
/// Memorize a full image of a page version
|
||||
///
|
||||
pub fn put_page_image(&self, tag: BufferTag, lsn: Lsn, img: Bytes) {
|
||||
let key = CacheKey { tag, lsn };
|
||||
let content = CacheEntryContent {
|
||||
page_image: Some(img),
|
||||
wal_record: None,
|
||||
};
|
||||
|
||||
let mut key_buf = BytesMut::new();
|
||||
key.pack(&mut key_buf);
|
||||
let mut val_buf = BytesMut::new();
|
||||
content.pack(&mut val_buf);
|
||||
|
||||
trace!("put_wal_record lsn: {}", key.lsn);
|
||||
let _res = self.db.put(&key_buf[..], &val_buf[..]);
|
||||
|
||||
//debug!("inserted page image for {}/{}/{}_{} blk {} at {}",
|
||||
// tag.spcnode, tag.dbnode, tag.relnode, tag.forknum, tag.blknum, lsn);
|
||||
self.num_page_images.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
pub fn create_database(
|
||||
&self,
|
||||
lsn: Lsn,
|
||||
db_id: Oid,
|
||||
tablespace_id: Oid,
|
||||
src_db_id: Oid,
|
||||
src_tablespace_id: Oid,
|
||||
) -> anyhow::Result<()> {
|
||||
let mut buf = BytesMut::new();
|
||||
let key = CacheKey {
|
||||
tag: BufferTag {
|
||||
rel: RelTag {
|
||||
spcnode: src_tablespace_id,
|
||||
dbnode: src_db_id,
|
||||
relnode: 0,
|
||||
forknum: 0u8,
|
||||
},
|
||||
blknum: 0,
|
||||
},
|
||||
lsn: Lsn(0),
|
||||
};
|
||||
key.pack(&mut buf);
|
||||
let mut iter = self.db.raw_iterator();
|
||||
iter.seek(&buf[..]);
|
||||
let mut n = 0;
|
||||
while iter.valid() {
|
||||
let k = iter.key().unwrap();
|
||||
let v = iter.value().unwrap();
|
||||
buf.clear();
|
||||
buf.extend_from_slice(&k);
|
||||
let mut key = CacheKey::unpack(&mut buf);
|
||||
if key.tag.rel.spcnode != src_tablespace_id || key.tag.rel.dbnode != src_db_id {
|
||||
break;
|
||||
}
|
||||
key.tag.rel.spcnode = tablespace_id;
|
||||
key.tag.rel.dbnode = db_id;
|
||||
key.lsn = lsn;
|
||||
buf.clear();
|
||||
key.pack(&mut buf);
|
||||
|
||||
self.db.put(&buf[..], v)?;
|
||||
n += 1;
|
||||
iter.next();
|
||||
}
|
||||
info!(
|
||||
"Create database {}/{}, copy {} entries",
|
||||
tablespace_id, db_id, n
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Remember that WAL has been received and added to the page cache up to the given LSN
|
||||
pub fn advance_last_valid_lsn(&self, lsn: Lsn) {
|
||||
let old = self.last_valid_lsn.advance(lsn);
|
||||
|
||||
// Can't move backwards.
|
||||
if lsn < old {
|
||||
warn!(
|
||||
"attempted to move last valid LSN backwards (was {}, new {})",
|
||||
old, lsn
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
///
|
||||
/// Remember the (end of) last valid WAL record remembered in the page cache.
|
||||
///
|
||||
/// NOTE: this updates last_valid_lsn as well.
|
||||
///
|
||||
pub fn advance_last_record_lsn(&self, lsn: Lsn) {
|
||||
// Can't move backwards.
|
||||
let old = self.last_record_lsn.fetch_max(lsn);
|
||||
assert!(old <= lsn);
|
||||
|
||||
// Also advance last_valid_lsn
|
||||
let old = self.last_valid_lsn.advance(lsn);
|
||||
// Can't move backwards.
|
||||
if lsn < old {
|
||||
warn!(
|
||||
"attempted to move last record LSN backwards (was {}, new {})",
|
||||
old, lsn
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
///
|
||||
/// Remember the beginning of valid WAL.
|
||||
///
|
||||
/// TODO: This should be called by garbage collection, so that if an older
|
||||
/// page is requested, we will return an error to the requestor.
|
||||
pub fn _advance_first_valid_lsn(&self, lsn: Lsn) {
|
||||
// Can't overtake last_valid_lsn (except when we're
|
||||
// initializing the system and last_valid_lsn hasn't been set yet.
|
||||
let last_valid_lsn = self.last_valid_lsn.load();
|
||||
assert!(last_valid_lsn == Lsn(0) || lsn < last_valid_lsn);
|
||||
|
||||
let old = self.first_valid_lsn.fetch_max(lsn);
|
||||
// Can't move backwards.
|
||||
assert!(lsn >= old);
|
||||
}
|
||||
|
||||
pub fn init_valid_lsn(&self, lsn: Lsn) {
|
||||
let old = self.last_valid_lsn.advance(lsn);
|
||||
assert!(old == Lsn(0));
|
||||
let old = self.last_record_lsn.fetch_max(lsn);
|
||||
assert!(old == Lsn(0));
|
||||
let old = self.first_valid_lsn.fetch_max(lsn);
|
||||
assert!(old == Lsn(0));
|
||||
}
|
||||
|
||||
pub fn get_last_valid_lsn(&self) -> Lsn {
|
||||
self.last_valid_lsn.load()
|
||||
}
|
||||
|
||||
//
|
||||
// Get statistics to be displayed in the user interface.
|
||||
//
|
||||
pub fn get_stats(&self) -> PageCacheStats {
|
||||
PageCacheStats {
|
||||
num_entries: self.num_entries.load(Ordering::Relaxed),
|
||||
num_page_images: self.num_page_images.load(Ordering::Relaxed),
|
||||
num_wal_records: self.num_wal_records.load(Ordering::Relaxed),
|
||||
num_getpage_requests: self.num_getpage_requests.load(Ordering::Relaxed),
|
||||
}
|
||||
}
|
||||
|
||||
// Internal functions
|
||||
|
||||
//
|
||||
// Internal function to get relation size at given LSN.
|
||||
//
|
||||
// The caller must ensure that WAL has been received up to 'lsn'.
|
||||
//
|
||||
fn relsize_get_nowait(&self, rel: &RelTag, lsn: Lsn) -> anyhow::Result<u32> {
|
||||
assert!(lsn <= self.last_valid_lsn.load());
|
||||
|
||||
let mut key = CacheKey {
|
||||
tag: BufferTag {
|
||||
rel: *rel,
|
||||
blknum: u32::MAX,
|
||||
},
|
||||
lsn,
|
||||
};
|
||||
let mut buf = BytesMut::new();
|
||||
let mut iter = self.db.raw_iterator();
|
||||
|
||||
loop {
|
||||
buf.clear();
|
||||
key.pack(&mut buf);
|
||||
iter.seek_for_prev(&buf[..]);
|
||||
if iter.valid() {
|
||||
let k = iter.key().unwrap();
|
||||
let v = iter.value().unwrap();
|
||||
buf.clear();
|
||||
buf.extend_from_slice(&k);
|
||||
let tag = BufferTag::unpack(&mut buf);
|
||||
if tag.rel == *rel {
|
||||
buf.clear();
|
||||
buf.extend_from_slice(&v);
|
||||
let content = CacheEntryContent::unpack(&mut buf);
|
||||
if let Some(rec) = &content.wal_record {
|
||||
if rec.truncate {
|
||||
if tag.blknum > 0 {
|
||||
key.tag.blknum = tag.blknum - 1;
|
||||
continue;
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
let relsize = tag.blknum + 1;
|
||||
debug!("Size of relation {:?} at {} is {}", rel, lsn, relsize);
|
||||
return Ok(relsize);
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
debug!("Size of relation {:?} at {} is zero", rel, lsn);
|
||||
Ok(0)
|
||||
}
|
||||
|
||||
fn do_gc(&self, conf: &PageServerConf) -> anyhow::Result<Bytes> {
|
||||
let mut buf = BytesMut::new();
|
||||
loop {
|
||||
thread::sleep(conf.gc_period);
|
||||
let last_lsn = self.get_last_valid_lsn();
|
||||
|
||||
// checked_sub() returns None on overflow.
|
||||
if let Some(horizon) = last_lsn.checked_sub(conf.gc_horizon) {
|
||||
let mut maxkey = CacheKey {
|
||||
tag: BufferTag {
|
||||
rel: RelTag {
|
||||
spcnode: u32::MAX,
|
||||
dbnode: u32::MAX,
|
||||
relnode: u32::MAX,
|
||||
forknum: u8::MAX,
|
||||
},
|
||||
blknum: u32::MAX,
|
||||
},
|
||||
lsn: Lsn::MAX,
|
||||
};
|
||||
let now = Instant::now();
|
||||
let mut reconstructed = 0u64;
|
||||
let mut truncated = 0u64;
|
||||
let mut inspected = 0u64;
|
||||
let mut deleted = 0u64;
|
||||
loop {
|
||||
buf.clear();
|
||||
maxkey.pack(&mut buf);
|
||||
let mut iter = self.db.raw_iterator();
|
||||
iter.seek_for_prev(&buf[..]);
|
||||
if iter.valid() {
|
||||
let k = iter.key().unwrap();
|
||||
let v = iter.value().unwrap();
|
||||
|
||||
inspected += 1;
|
||||
|
||||
buf.clear();
|
||||
buf.extend_from_slice(&k);
|
||||
let key = CacheKey::unpack(&mut buf);
|
||||
|
||||
// Construct boundaries for old records cleanup
|
||||
maxkey.tag = key.tag;
|
||||
let last_lsn = key.lsn;
|
||||
maxkey.lsn = min(horizon, last_lsn); // do not remove last version
|
||||
|
||||
let mut minkey = maxkey.clone();
|
||||
minkey.lsn = Lsn(0); // first version
|
||||
|
||||
// reconstruct most recent page version
|
||||
if (v[0] & (TRUNCATED_FLAG | PAGE_IMAGE_FLAG)) == 0 {
|
||||
trace!("Reconstruct most recent page {:?}", key);
|
||||
// force reconstruction of most recent page version
|
||||
let (base_img, records) =
|
||||
self.collect_records_for_apply(key.tag, key.lsn);
|
||||
let new_img = self
|
||||
.walredo_mgr
|
||||
.request_redo(key.tag, key.lsn, base_img, records)?;
|
||||
|
||||
self.put_page_image(key.tag, key.lsn, new_img.clone());
|
||||
|
||||
reconstructed += 1;
|
||||
}
|
||||
|
||||
buf.clear();
|
||||
maxkey.pack(&mut buf);
|
||||
|
||||
iter.seek_for_prev(&buf[..]);
|
||||
if iter.valid() {
|
||||
// do not remove last version
|
||||
if last_lsn > horizon {
|
||||
// locate most recent record before horizon
|
||||
let k = iter.key().unwrap();
|
||||
buf.clear();
|
||||
buf.extend_from_slice(&k);
|
||||
let key = CacheKey::unpack(&mut buf);
|
||||
if key.tag == maxkey.tag {
|
||||
let v = iter.value().unwrap();
|
||||
if (v[0] & (TRUNCATED_FLAG | PAGE_IMAGE_FLAG)) == 0 {
|
||||
trace!("Reconstruct horizon page {:?}", key);
|
||||
let (base_img, records) =
|
||||
self.collect_records_for_apply(key.tag, key.lsn);
|
||||
let new_img = self
|
||||
.walredo_mgr
|
||||
.request_redo(key.tag, key.lsn, base_img, records)?;
|
||||
self.put_page_image(key.tag, key.lsn, new_img.clone());
|
||||
|
||||
truncated += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
// remove records prior to horizon
|
||||
loop {
|
||||
iter.prev();
|
||||
if !iter.valid() {
|
||||
break;
|
||||
}
|
||||
let k = iter.key().unwrap();
|
||||
buf.clear();
|
||||
buf.extend_from_slice(&k);
|
||||
let key = CacheKey::unpack(&mut buf);
|
||||
if key.tag != maxkey.tag {
|
||||
break;
|
||||
}
|
||||
let v = iter.value().unwrap();
|
||||
if (v[0] & UNUSED_VERSION_FLAG) == 0 {
|
||||
let mut v = v.to_owned();
|
||||
v[0] |= UNUSED_VERSION_FLAG;
|
||||
self.db.put(k, &v[..])?;
|
||||
deleted += 1;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
maxkey = minkey;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
info!("Garbage collection completed in {:?}:\n{} version chains inspected, {} pages reconstructed, {} version histories truncated, {} versions deleted",
|
||||
now.elapsed(), inspected, reconstructed, truncated, deleted);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
// Wait until WAL has been received up to the given LSN.
|
||||
//
|
||||
fn wait_lsn(&self, mut lsn: Lsn) -> anyhow::Result<Lsn> {
|
||||
// When invalid LSN is requested, it means "don't wait, return latest version of the page"
|
||||
// This is necessary for bootstrap.
|
||||
if lsn == Lsn(0) {
|
||||
let last_valid_lsn = self.last_valid_lsn.load();
|
||||
trace!(
|
||||
"walreceiver doesn't work yet last_valid_lsn {}, requested {}",
|
||||
last_valid_lsn,
|
||||
lsn
|
||||
);
|
||||
lsn = last_valid_lsn;
|
||||
}
|
||||
|
||||
self.last_valid_lsn
|
||||
.wait_for_timeout(lsn, TIMEOUT)
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"Timed out while waiting for WAL record at LSN {} to arrive",
|
||||
lsn
|
||||
)
|
||||
})?;
|
||||
|
||||
Ok(lsn)
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
// Get statistics to be displayed in the user interface.
|
||||
//
|
||||
// This combines the stats from all PageCache instances
|
||||
//
|
||||
pub fn get_stats() -> PageCacheStats {
|
||||
let pcaches = PAGECACHES.lock().unwrap();
|
||||
|
||||
let mut stats = PageCacheStats {
|
||||
num_entries: 0,
|
||||
num_page_images: 0,
|
||||
num_wal_records: 0,
|
||||
num_getpage_requests: 0,
|
||||
};
|
||||
|
||||
pcaches.iter().for_each(|(_sys_id, pcache)| {
|
||||
stats += pcache.get_stats();
|
||||
});
|
||||
stats
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
231
pageserver/src/relish.rs
Normal file
231
pageserver/src/relish.rs
Normal file
@@ -0,0 +1,231 @@
|
||||
//!
|
||||
//! Zenith stores PostgreSQL relations, and some other files, in the
|
||||
//! repository. The relations (i.e. tables and indexes) take up most
|
||||
//! of the space in a typical installation, while the other files are
|
||||
//! small. We call each relation and other file that is stored in the
|
||||
//! repository a "relish". It comes from "rel"-ish, as in "kind of a
|
||||
//! rel", because it covers relations as well as other things that are
|
||||
//! not relations, but are treated similarly for the purposes of the
|
||||
//! storage layer.
|
||||
//!
|
||||
//! This source file contains the definition of the RelishTag struct,
|
||||
//! which uniquely identifies a relish.
|
||||
//!
|
||||
//! Relishes come in two flavors: blocky and non-blocky. Relations and
|
||||
//! SLRUs are blocky, that is, they are divided into 8k blocks, and
|
||||
//! the repository tracks their size. Other relishes are non-blocky:
|
||||
//! the content of the whole relish is stored as one blob. Block
|
||||
//! number must be passed as 0 for all operations on a non-blocky
|
||||
//! relish. The one "block" that you store in a non-blocky relish can
|
||||
//! have arbitrary size, but they are expected to be small, or you
|
||||
//! will have performance issues.
|
||||
//!
|
||||
//! All relishes are versioned by LSN in the repository.
|
||||
//!
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::fmt;
|
||||
|
||||
use postgres_ffi::relfile_utils::forknumber_to_name;
|
||||
use postgres_ffi::{Oid, TransactionId};
|
||||
|
||||
///
|
||||
/// RelishTag identifies one relish.
|
||||
///
|
||||
#[derive(Debug, Clone, Copy, Hash, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)]
|
||||
pub enum RelishTag {
|
||||
// Relations correspond to PostgreSQL relation forks. Each
|
||||
// PostgreSQL relation fork is considered a separate relish.
|
||||
Relation(RelTag),
|
||||
|
||||
// SLRUs include pg_clog, pg_multixact/members, and
|
||||
// pg_multixact/offsets. There are other SLRUs in PostgreSQL, but
|
||||
// they don't need to be stored permanently (e.g. pg_subtrans),
|
||||
// or we do not support them in zenith yet (pg_commit_ts).
|
||||
//
|
||||
// These are currently never requested directly by the compute
|
||||
// nodes, although in principle that would be possible. However,
|
||||
// when a new compute node is created, these are included in the
|
||||
// tarball that we send to the compute node to initialize the
|
||||
// PostgreSQL data directory.
|
||||
//
|
||||
// Each SLRU segment in PostgreSQL is considered a separate
|
||||
// relish. For example, pg_clog/0000, pg_clog/0001, and so forth.
|
||||
//
|
||||
// SLRU segments are divided into blocks, like relations.
|
||||
Slru { slru: SlruKind, segno: u32 },
|
||||
|
||||
// Miscellaneous other files that need to be included in the
|
||||
// tarball at compute node creation. These are non-blocky, and are
|
||||
// expected to be small.
|
||||
|
||||
//
|
||||
// FileNodeMap represents PostgreSQL's 'pg_filenode.map'
|
||||
// files. They are needed to map catalog table OIDs to filenode
|
||||
// numbers. Usually the mapping is done by looking up a relation's
|
||||
// 'relfilenode' field in the 'pg_class' system table, but that
|
||||
// doesn't work for 'pg_class' itself and a few other such system
|
||||
// relations. See PostgreSQL relmapper.c for details.
|
||||
//
|
||||
// Each database has a map file for its local mapped catalogs,
|
||||
// and there is a separate map file for shared catalogs.
|
||||
//
|
||||
// These files are always 512 bytes long (although we don't check
|
||||
// or care about that in the page server).
|
||||
//
|
||||
FileNodeMap { spcnode: Oid, dbnode: Oid },
|
||||
|
||||
//
|
||||
// State files for prepared transactions (e.g pg_twophase/1234)
|
||||
//
|
||||
TwoPhase { xid: TransactionId },
|
||||
|
||||
// The control file, stored in global/pg_control
|
||||
ControlFile,
|
||||
|
||||
// Special entry that represents PostgreSQL checkpoint. It doesn't
|
||||
// correspond to to any physical file in PostgreSQL, but we use it
|
||||
// to track fields needed to restore the checkpoint data in the
|
||||
// control file, when a compute node is created.
|
||||
Checkpoint,
|
||||
}
|
||||
|
||||
impl RelishTag {
|
||||
pub const fn is_blocky(&self) -> bool {
|
||||
match self {
|
||||
// These relishes work with blocks
|
||||
RelishTag::Relation(_) | RelishTag::Slru { slru: _, segno: _ } => true,
|
||||
|
||||
// and these don't
|
||||
RelishTag::FileNodeMap {
|
||||
spcnode: _,
|
||||
dbnode: _,
|
||||
}
|
||||
| RelishTag::TwoPhase { xid: _ }
|
||||
| RelishTag::ControlFile
|
||||
| RelishTag::Checkpoint => false,
|
||||
}
|
||||
}
|
||||
|
||||
// Physical relishes represent files and use
|
||||
// RelationSizeEntry to track existing and dropped files.
|
||||
// They can be both blocky and non-blocky.
|
||||
pub const fn is_physical(&self) -> bool {
|
||||
match self {
|
||||
// These relishes represent physical files
|
||||
RelishTag::Relation(_)
|
||||
| RelishTag::Slru { .. }
|
||||
| RelishTag::FileNodeMap { .. }
|
||||
| RelishTag::TwoPhase { .. } => true,
|
||||
|
||||
// and these don't
|
||||
RelishTag::ControlFile | RelishTag::Checkpoint => false,
|
||||
}
|
||||
}
|
||||
|
||||
// convenience function to check if this relish is a normal relation.
|
||||
pub const fn is_relation(&self) -> bool {
|
||||
matches!(self, RelishTag::Relation(_))
|
||||
}
|
||||
}
|
||||
|
||||
///
|
||||
/// Relation data file segment id throughout the Postgres cluster.
|
||||
///
|
||||
/// Every data file in Postgres is uniquely identified by 4 numbers:
|
||||
/// - relation id / node (`relnode`)
|
||||
/// - database id (`dbnode`)
|
||||
/// - tablespace id (`spcnode`), in short this is a unique id of a separate
|
||||
/// directory to store data files.
|
||||
/// - forknumber (`forknum`) is used to split different kinds of data of the same relation
|
||||
/// between some set of files (`relnode`, `relnode_fsm`, `relnode_vm`).
|
||||
///
|
||||
/// In native Postgres code `RelFileNode` structure and individual `ForkNumber` value
|
||||
/// are used for the same purpose.
|
||||
/// [See more related comments here](https:///github.com/postgres/postgres/blob/99c5852e20a0987eca1c38ba0c09329d4076b6a0/src/include/storage/relfilenode.h#L57).
|
||||
///
|
||||
#[derive(Debug, PartialEq, Eq, PartialOrd, Hash, Ord, Clone, Copy, Serialize, Deserialize)]
|
||||
pub struct RelTag {
|
||||
pub forknum: u8,
|
||||
pub spcnode: Oid,
|
||||
pub dbnode: Oid,
|
||||
pub relnode: Oid,
|
||||
}
|
||||
|
||||
/// Display RelTag in the same format that's used in most PostgreSQL debug messages:
|
||||
///
|
||||
/// <spcnode>/<dbnode>/<relnode>[_fsm|_vm|_init]
|
||||
///
|
||||
impl fmt::Display for RelTag {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
if let Some(forkname) = forknumber_to_name(self.forknum) {
|
||||
write!(
|
||||
f,
|
||||
"{}/{}/{}_{}",
|
||||
self.spcnode, self.dbnode, self.relnode, forkname
|
||||
)
|
||||
} else {
|
||||
write!(f, "{}/{}/{}", self.spcnode, self.dbnode, self.relnode)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Display RelTag in the same format that's used in most PostgreSQL debug messages:
|
||||
///
|
||||
/// <spcnode>/<dbnode>/<relnode>[_fsm|_vm|_init]
|
||||
///
|
||||
impl fmt::Display for RelishTag {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self {
|
||||
RelishTag::Relation(rel) => rel.fmt(f),
|
||||
RelishTag::Slru { slru, segno } => {
|
||||
// e.g. pg_clog/0001
|
||||
write!(f, "{}/{:04X}", slru.to_str(), segno)
|
||||
}
|
||||
RelishTag::FileNodeMap { spcnode, dbnode } => {
|
||||
write!(f, "relmapper file for spc {} db {}", spcnode, dbnode)
|
||||
}
|
||||
RelishTag::TwoPhase { xid } => {
|
||||
write!(f, "pg_twophase/{:08X}", xid)
|
||||
}
|
||||
RelishTag::ControlFile => {
|
||||
write!(f, "control file")
|
||||
}
|
||||
RelishTag::Checkpoint => {
|
||||
write!(f, "checkpoint")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
///
|
||||
/// Non-relation transaction status files (clog (a.k.a. pg_xact) and
|
||||
/// pg_multixact) in Postgres are handled by SLRU (Simple LRU) buffer,
|
||||
/// hence the name.
|
||||
///
|
||||
/// These files are global for a postgres instance.
|
||||
///
|
||||
/// These files are divided into segments, which are divided into
|
||||
/// pages of the same BLCKSZ as used for relation files.
|
||||
///
|
||||
#[derive(Debug, Clone, Copy, Hash, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)]
|
||||
pub enum SlruKind {
|
||||
Clog,
|
||||
MultiXactMembers,
|
||||
MultiXactOffsets,
|
||||
}
|
||||
|
||||
impl SlruKind {
|
||||
pub fn to_str(&self) -> &'static str {
|
||||
match self {
|
||||
Self::Clog => "pg_xact",
|
||||
Self::MultiXactMembers => "pg_multixact/members",
|
||||
Self::MultiXactOffsets => "pg_multixact/offsets",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub const FIRST_NONREL_RELISH_TAG: RelishTag = RelishTag::Slru {
|
||||
slru: SlruKind::Clog,
|
||||
segno: 0,
|
||||
};
|
||||
533
pageserver/src/repository.rs
Normal file
533
pageserver/src/repository.rs
Normal file
@@ -0,0 +1,533 @@
|
||||
use crate::relish::*;
|
||||
use anyhow::Result;
|
||||
use bytes::{Buf, BufMut, Bytes, BytesMut};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashSet;
|
||||
use std::ops::AddAssign;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use zenith_utils::lsn::{Lsn, RecordLsn};
|
||||
use zenith_utils::zid::ZTimelineId;
|
||||
|
||||
///
|
||||
/// A repository corresponds to one .zenith directory. One repository holds multiple
|
||||
/// timelines, forked off from the same initial call to 'initdb'.
|
||||
pub trait Repository: Send + Sync {
|
||||
/// Get Timeline handle for given zenith timeline ID.
|
||||
fn get_timeline(&self, timelineid: ZTimelineId) -> Result<Arc<dyn Timeline>>;
|
||||
|
||||
/// Create a new, empty timeline. The caller is responsible for loading data into it
|
||||
fn create_empty_timeline(
|
||||
&self,
|
||||
timelineid: ZTimelineId,
|
||||
start_lsn: Lsn,
|
||||
) -> Result<Arc<dyn Timeline>>;
|
||||
|
||||
/// Branch a timeline
|
||||
fn branch_timeline(&self, src: ZTimelineId, dst: ZTimelineId, start_lsn: Lsn) -> Result<()>;
|
||||
|
||||
/// perform one garbage collection iteration.
|
||||
/// garbage collection is periodically performed by gc thread,
|
||||
/// but it can be explicitly requested through page server api.
|
||||
///
|
||||
/// 'timelineid' specifies the timeline to GC, or None for all.
|
||||
/// `horizon` specifies delta from last lsn to preserve all object versions (pitr interval).
|
||||
/// `compact` parameter is used to force compaction of storage.
|
||||
/// some storage implementation are based on lsm tree and require periodic merge (compaction).
|
||||
/// usually storage implementation determines itself when compaction should be performed.
|
||||
/// but for gc tests it way be useful to force compaction just after completion of gc iteration
|
||||
/// to make sure that all detected garbage is removed.
|
||||
/// so right now `compact` is set to true when gc explicitly requested through page srver api,
|
||||
/// and is st to false in gc threads which infinitely repeats gc iterations in loop.
|
||||
fn gc_iteration(
|
||||
&self,
|
||||
timelineid: Option<ZTimelineId>,
|
||||
horizon: u64,
|
||||
compact: bool,
|
||||
) -> Result<GcResult>;
|
||||
|
||||
// TODO get timelines?
|
||||
//fn get_stats(&self) -> RepositoryStats;
|
||||
}
|
||||
|
||||
///
|
||||
/// Result of performing GC
|
||||
///
|
||||
#[derive(Default)]
|
||||
pub struct GcResult {
|
||||
pub ondisk_relfiles_total: u64,
|
||||
pub ondisk_relfiles_needed_by_cutoff: u64,
|
||||
pub ondisk_relfiles_needed_by_branches: u64,
|
||||
pub ondisk_relfiles_not_updated: u64,
|
||||
pub ondisk_relfiles_removed: u64, // # of layer files removed because they have been made obsolete by newer ondisk files.
|
||||
pub ondisk_relfiles_dropped: u64, // # of layer files removed because the relation was dropped
|
||||
|
||||
pub ondisk_nonrelfiles_total: u64,
|
||||
pub ondisk_nonrelfiles_needed_by_cutoff: u64,
|
||||
pub ondisk_nonrelfiles_needed_by_branches: u64,
|
||||
pub ondisk_nonrelfiles_not_updated: u64,
|
||||
pub ondisk_nonrelfiles_removed: u64, // # of layer files removed because they have been made obsolete by newer ondisk files.
|
||||
pub ondisk_nonrelfiles_dropped: u64, // # of layer files removed because the relation was dropped
|
||||
|
||||
pub elapsed: Duration,
|
||||
}
|
||||
|
||||
impl AddAssign for GcResult {
|
||||
fn add_assign(&mut self, other: Self) {
|
||||
self.ondisk_relfiles_total += other.ondisk_relfiles_total;
|
||||
self.ondisk_relfiles_needed_by_cutoff += other.ondisk_relfiles_needed_by_cutoff;
|
||||
self.ondisk_relfiles_needed_by_branches += other.ondisk_relfiles_needed_by_branches;
|
||||
self.ondisk_relfiles_not_updated += other.ondisk_relfiles_not_updated;
|
||||
self.ondisk_relfiles_removed += other.ondisk_relfiles_removed;
|
||||
self.ondisk_relfiles_dropped += other.ondisk_relfiles_dropped;
|
||||
|
||||
self.ondisk_nonrelfiles_total += other.ondisk_nonrelfiles_total;
|
||||
self.ondisk_nonrelfiles_needed_by_cutoff += other.ondisk_nonrelfiles_needed_by_cutoff;
|
||||
self.ondisk_nonrelfiles_needed_by_branches += other.ondisk_nonrelfiles_needed_by_branches;
|
||||
self.ondisk_nonrelfiles_not_updated += other.ondisk_nonrelfiles_not_updated;
|
||||
self.ondisk_nonrelfiles_removed += other.ondisk_nonrelfiles_removed;
|
||||
self.ondisk_nonrelfiles_dropped += other.ondisk_nonrelfiles_dropped;
|
||||
|
||||
self.elapsed += other.elapsed;
|
||||
}
|
||||
}
|
||||
|
||||
pub trait Timeline: Send + Sync {
|
||||
//------------------------------------------------------------------------------
|
||||
// Public GET functions
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
/// Look up given page in the cache.
|
||||
fn get_page_at_lsn(&self, tag: RelishTag, blknum: u32, lsn: Lsn) -> Result<Bytes>;
|
||||
|
||||
/// Look up given page in the cache.
|
||||
fn get_page_at_lsn_nowait(&self, tag: RelishTag, blknum: u32, lsn: Lsn) -> Result<Bytes>;
|
||||
|
||||
/// Get size of a relish
|
||||
fn get_relish_size(&self, tag: RelishTag, lsn: Lsn) -> Result<Option<u32>>;
|
||||
|
||||
/// Does relation exist?
|
||||
fn get_rel_exists(&self, tag: RelishTag, lsn: Lsn) -> Result<bool>;
|
||||
|
||||
/// Get a list of all distinct relations in given tablespace and database.
|
||||
fn list_rels(&self, spcnode: u32, dbnode: u32, lsn: Lsn) -> Result<HashSet<RelTag>>;
|
||||
|
||||
/// Get a list of non-relational objects
|
||||
fn list_nonrels(&self, lsn: Lsn) -> Result<HashSet<RelishTag>>;
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
// Public PUT functions, to update the repository with new page versions.
|
||||
//
|
||||
// These are called by the WAL receiver to digest WAL records.
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
/// Put a new page version that can be constructed from a WAL record
|
||||
///
|
||||
/// This will implicitly extend the relation, if the page is beyond the
|
||||
/// current end-of-file.
|
||||
fn put_wal_record(&self, tag: RelishTag, blknum: u32, rec: WALRecord) -> Result<()>;
|
||||
|
||||
/// Like put_wal_record, but with ready-made image of the page.
|
||||
fn put_page_image(&self, tag: RelishTag, blknum: u32, lsn: Lsn, img: Bytes) -> Result<()>;
|
||||
|
||||
/// Truncate relation
|
||||
fn put_truncation(&self, rel: RelishTag, lsn: Lsn, nblocks: u32) -> Result<()>;
|
||||
|
||||
/// This method is used for marking dropped relations and truncated SLRU files
|
||||
fn drop_relish(&self, tag: RelishTag, lsn: Lsn) -> Result<()>;
|
||||
|
||||
/// Track end of the latest digested WAL record.
|
||||
///
|
||||
/// Advance requires aligned LSN as an argument and would wake wait_lsn() callers.
|
||||
/// Previous last record LSN is stored alongside the latest and can be read.
|
||||
fn advance_last_record_lsn(&self, lsn: Lsn);
|
||||
/// Atomically get both last and prev.
|
||||
fn get_last_record_rlsn(&self) -> RecordLsn;
|
||||
/// Get last or prev record separately. Same as get_last_record_rlsn().last/prev.
|
||||
fn get_last_record_lsn(&self) -> Lsn;
|
||||
fn get_prev_record_lsn(&self) -> Lsn;
|
||||
fn get_start_lsn(&self) -> Lsn;
|
||||
|
||||
///
|
||||
/// Flush to disk all data that was written with the put_* functions
|
||||
///
|
||||
/// NOTE: This has nothing to do with checkpoint in PostgreSQL. We don't
|
||||
/// know anything about them here in the repository.
|
||||
fn checkpoint(&self) -> Result<()>;
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct RepositoryStats {
|
||||
pub num_entries: Lsn,
|
||||
pub num_page_images: Lsn,
|
||||
pub num_wal_records: Lsn,
|
||||
pub num_getpage_requests: Lsn,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
||||
pub struct WALRecord {
|
||||
pub lsn: Lsn, // LSN at the *end* of the record
|
||||
pub will_init: bool,
|
||||
pub rec: Bytes,
|
||||
// Remember the offset of main_data in rec,
|
||||
// so that we don't have to parse the record again.
|
||||
// If record has no main_data, this offset equals rec.len().
|
||||
pub main_data_offset: u32,
|
||||
}
|
||||
|
||||
impl WALRecord {
|
||||
pub fn pack(&self, buf: &mut BytesMut) {
|
||||
buf.put_u64(self.lsn.0);
|
||||
buf.put_u8(self.will_init as u8);
|
||||
buf.put_u32(self.main_data_offset);
|
||||
buf.put_u32(self.rec.len() as u32);
|
||||
buf.put_slice(&self.rec[..]);
|
||||
}
|
||||
pub fn unpack(buf: &mut Bytes) -> WALRecord {
|
||||
let lsn = Lsn::from(buf.get_u64());
|
||||
let will_init = buf.get_u8() != 0;
|
||||
let main_data_offset = buf.get_u32();
|
||||
let mut dst = vec![0u8; buf.get_u32() as usize];
|
||||
buf.copy_to_slice(&mut dst);
|
||||
WALRecord {
|
||||
lsn,
|
||||
will_init,
|
||||
rec: Bytes::from(dst),
|
||||
main_data_offset,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
///
|
||||
/// Tests that should work the same with any Repository/Timeline implementation.
|
||||
///
|
||||
#[allow(clippy::bool_assert_comparison)]
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::layered_repository::LayeredRepository;
|
||||
use crate::walredo::{WalRedoError, WalRedoManager};
|
||||
use crate::PageServerConf;
|
||||
use postgres_ffi::pg_constants;
|
||||
use std::fs;
|
||||
use std::path::PathBuf;
|
||||
use std::str::FromStr;
|
||||
use std::time::Duration;
|
||||
use zenith_utils::postgres_backend::AuthType;
|
||||
use zenith_utils::zid::ZTenantId;
|
||||
|
||||
/// Arbitrary relation tag, for testing.
|
||||
const TESTREL_A: RelishTag = RelishTag::Relation(RelTag {
|
||||
spcnode: 0,
|
||||
dbnode: 111,
|
||||
relnode: 1000,
|
||||
forknum: 0,
|
||||
});
|
||||
const TESTREL_B: RelishTag = RelishTag::Relation(RelTag {
|
||||
spcnode: 0,
|
||||
dbnode: 111,
|
||||
relnode: 1001,
|
||||
forknum: 0,
|
||||
});
|
||||
|
||||
/// Convenience function to create a page image with given string as the only content
|
||||
#[allow(non_snake_case)]
|
||||
fn TEST_IMG(s: &str) -> Bytes {
|
||||
let mut buf = BytesMut::new();
|
||||
buf.extend_from_slice(s.as_bytes());
|
||||
buf.resize(8192, 0);
|
||||
|
||||
buf.freeze()
|
||||
}
|
||||
|
||||
static ZERO_PAGE: Bytes = Bytes::from_static(&[0u8; 8192]);
|
||||
|
||||
fn get_test_repo(test_name: &str) -> Result<Box<dyn Repository>> {
|
||||
let repo_dir = PathBuf::from(format!("../tmp_check/test_{}", test_name));
|
||||
let _ = fs::remove_dir_all(&repo_dir);
|
||||
fs::create_dir_all(&repo_dir)?;
|
||||
fs::create_dir_all(&repo_dir.join("timelines"))?;
|
||||
|
||||
let conf = PageServerConf {
|
||||
daemonize: false,
|
||||
gc_horizon: 64 * 1024 * 1024,
|
||||
gc_period: Duration::from_secs(10),
|
||||
listen_addr: "127.0.0.1:5430".to_string(),
|
||||
http_endpoint_addr: "127.0.0.1:9898".to_string(),
|
||||
superuser: "zenith_admin".to_string(),
|
||||
workdir: repo_dir,
|
||||
pg_distrib_dir: "".into(),
|
||||
auth_type: AuthType::Trust,
|
||||
auth_validation_public_key_path: None,
|
||||
};
|
||||
// Make a static copy of the config. This can never be free'd, but that's
|
||||
// OK in a test.
|
||||
let conf: &'static PageServerConf = Box::leak(Box::new(conf));
|
||||
let tenantid = ZTenantId::generate();
|
||||
fs::create_dir_all(conf.tenant_path(&tenantid)).unwrap();
|
||||
|
||||
let walredo_mgr = TestRedoManager {};
|
||||
|
||||
let repo = Box::new(LayeredRepository::new(
|
||||
conf,
|
||||
Arc::new(walredo_mgr),
|
||||
tenantid,
|
||||
));
|
||||
|
||||
Ok(repo)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_relsize() -> Result<()> {
|
||||
let repo = get_test_repo("test_relsize")?;
|
||||
// get_timeline() with non-existent timeline id should fail
|
||||
//repo.get_timeline("11223344556677881122334455667788");
|
||||
|
||||
// Create timeline to work on
|
||||
let timelineid = ZTimelineId::from_str("11223344556677881122334455667788").unwrap();
|
||||
let tline = repo.create_empty_timeline(timelineid, Lsn(0x00))?;
|
||||
|
||||
tline.put_page_image(TESTREL_A, 0, Lsn(0x20), TEST_IMG("foo blk 0 at 2"))?;
|
||||
tline.put_page_image(TESTREL_A, 0, Lsn(0x20), TEST_IMG("foo blk 0 at 2"))?;
|
||||
tline.put_page_image(TESTREL_A, 0, Lsn(0x30), TEST_IMG("foo blk 0 at 3"))?;
|
||||
tline.put_page_image(TESTREL_A, 1, Lsn(0x40), TEST_IMG("foo blk 1 at 4"))?;
|
||||
tline.put_page_image(TESTREL_A, 2, Lsn(0x50), TEST_IMG("foo blk 2 at 5"))?;
|
||||
|
||||
tline.advance_last_record_lsn(Lsn(0x50));
|
||||
|
||||
// The relation was created at LSN 2, not visible at LSN 1 yet.
|
||||
assert_eq!(tline.get_rel_exists(TESTREL_A, Lsn(0x10))?, false);
|
||||
assert!(tline.get_relish_size(TESTREL_A, Lsn(0x10))?.is_none());
|
||||
|
||||
assert_eq!(tline.get_rel_exists(TESTREL_A, Lsn(0x20))?, true);
|
||||
assert_eq!(tline.get_relish_size(TESTREL_A, Lsn(0x20))?.unwrap(), 1);
|
||||
assert_eq!(tline.get_relish_size(TESTREL_A, Lsn(0x50))?.unwrap(), 3);
|
||||
|
||||
// Check page contents at each LSN
|
||||
assert_eq!(
|
||||
tline.get_page_at_lsn(TESTREL_A, 0, Lsn(0x20))?,
|
||||
TEST_IMG("foo blk 0 at 2")
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
tline.get_page_at_lsn(TESTREL_A, 0, Lsn(0x30))?,
|
||||
TEST_IMG("foo blk 0 at 3")
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
tline.get_page_at_lsn(TESTREL_A, 0, Lsn(0x40))?,
|
||||
TEST_IMG("foo blk 0 at 3")
|
||||
);
|
||||
assert_eq!(
|
||||
tline.get_page_at_lsn(TESTREL_A, 1, Lsn(0x40))?,
|
||||
TEST_IMG("foo blk 1 at 4")
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
tline.get_page_at_lsn(TESTREL_A, 0, Lsn(0x50))?,
|
||||
TEST_IMG("foo blk 0 at 3")
|
||||
);
|
||||
assert_eq!(
|
||||
tline.get_page_at_lsn(TESTREL_A, 1, Lsn(0x50))?,
|
||||
TEST_IMG("foo blk 1 at 4")
|
||||
);
|
||||
assert_eq!(
|
||||
tline.get_page_at_lsn(TESTREL_A, 2, Lsn(0x50))?,
|
||||
TEST_IMG("foo blk 2 at 5")
|
||||
);
|
||||
|
||||
// Truncate last block
|
||||
tline.put_truncation(TESTREL_A, Lsn(0x60), 2)?;
|
||||
tline.advance_last_record_lsn(Lsn(0x60));
|
||||
|
||||
// Check reported size and contents after truncation
|
||||
assert_eq!(tline.get_relish_size(TESTREL_A, Lsn(0x60))?.unwrap(), 2);
|
||||
assert_eq!(
|
||||
tline.get_page_at_lsn(TESTREL_A, 0, Lsn(0x60))?,
|
||||
TEST_IMG("foo blk 0 at 3")
|
||||
);
|
||||
assert_eq!(
|
||||
tline.get_page_at_lsn(TESTREL_A, 1, Lsn(0x60))?,
|
||||
TEST_IMG("foo blk 1 at 4")
|
||||
);
|
||||
|
||||
// should still see the truncated block with older LSN
|
||||
assert_eq!(tline.get_relish_size(TESTREL_A, Lsn(0x50))?.unwrap(), 3);
|
||||
assert_eq!(
|
||||
tline.get_page_at_lsn(TESTREL_A, 2, Lsn(0x50))?,
|
||||
TEST_IMG("foo blk 2 at 5")
|
||||
);
|
||||
|
||||
// Truncate to zero length
|
||||
tline.put_truncation(TESTREL_A, Lsn(0x60), 0)?;
|
||||
tline.advance_last_record_lsn(Lsn(0x60));
|
||||
assert_eq!(tline.get_relish_size(TESTREL_A, Lsn(0x60))?.unwrap(), 0);
|
||||
|
||||
// Extend from 0 to 2 blocks, leaving a gap
|
||||
tline.put_page_image(TESTREL_A, 1, Lsn(0x70), TEST_IMG("foo blk 1"))?;
|
||||
tline.advance_last_record_lsn(Lsn(0x70));
|
||||
assert_eq!(tline.get_relish_size(TESTREL_A, Lsn(0x70))?.unwrap(), 2);
|
||||
assert_eq!(tline.get_page_at_lsn(TESTREL_A, 0, Lsn(0x70))?, ZERO_PAGE);
|
||||
assert_eq!(
|
||||
tline.get_page_at_lsn(TESTREL_A, 1, Lsn(0x70))?,
|
||||
TEST_IMG("foo blk 1")
|
||||
);
|
||||
|
||||
// Extend a lot more, leaving a big gap that spans across segments
|
||||
// FIXME: This is currently broken, see https://github.com/zenithdb/zenith/issues/500
|
||||
/*
|
||||
tline.put_page_image(TESTREL_A, 1500, Lsn(0x80), TEST_IMG("foo blk 1500"))?;
|
||||
tline.advance_last_record_lsn(Lsn(0x80));
|
||||
assert_eq!(tline.get_relish_size(TESTREL_A, Lsn(0x80))?.unwrap(), 1501);
|
||||
for blk in 2..1500 {
|
||||
assert_eq!(
|
||||
tline.get_page_at_lsn(TESTREL_A, blk, Lsn(0x80))?,
|
||||
ZERO_PAGE);
|
||||
}
|
||||
assert_eq!(
|
||||
tline.get_page_at_lsn(TESTREL_A, 1500, Lsn(0x80))?,
|
||||
TEST_IMG("foo blk 1500"));
|
||||
*/
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Test get_relsize() and truncation with a file larger than 1 GB, so that it's
|
||||
/// split into multiple 1 GB segments in Postgres.
|
||||
#[test]
|
||||
fn test_large_rel() -> Result<()> {
|
||||
let repo = get_test_repo("test_large_rel")?;
|
||||
let timelineid = ZTimelineId::from_str("11223344556677881122334455667788").unwrap();
|
||||
let tline = repo.create_empty_timeline(timelineid, Lsn(0x00))?;
|
||||
|
||||
let mut lsn = 0x10;
|
||||
for blknum in 0..pg_constants::RELSEG_SIZE + 1 {
|
||||
let img = TEST_IMG(&format!("foo blk {} at {}", blknum, Lsn(lsn)));
|
||||
lsn += 0x10;
|
||||
tline.put_page_image(TESTREL_A, blknum as u32, Lsn(lsn), img)?;
|
||||
}
|
||||
tline.advance_last_record_lsn(Lsn(lsn));
|
||||
|
||||
assert_eq!(
|
||||
tline.get_relish_size(TESTREL_A, Lsn(lsn))?.unwrap(),
|
||||
pg_constants::RELSEG_SIZE + 1
|
||||
);
|
||||
|
||||
// Truncate one block
|
||||
lsn += 0x10;
|
||||
tline.put_truncation(TESTREL_A, Lsn(lsn), pg_constants::RELSEG_SIZE)?;
|
||||
tline.advance_last_record_lsn(Lsn(lsn));
|
||||
assert_eq!(
|
||||
tline.get_relish_size(TESTREL_A, Lsn(lsn))?.unwrap(),
|
||||
pg_constants::RELSEG_SIZE
|
||||
);
|
||||
|
||||
// Truncate another block
|
||||
lsn += 0x10;
|
||||
tline.put_truncation(TESTREL_A, Lsn(lsn), pg_constants::RELSEG_SIZE - 1)?;
|
||||
tline.advance_last_record_lsn(Lsn(lsn));
|
||||
assert_eq!(
|
||||
tline.get_relish_size(TESTREL_A, Lsn(lsn))?.unwrap(),
|
||||
pg_constants::RELSEG_SIZE - 1
|
||||
);
|
||||
|
||||
// Truncate to 1500, and then truncate all the way down to 0, one block at a time
|
||||
// This tests the behavior at segment boundaries
|
||||
let mut size: i32 = 3000;
|
||||
while size >= 0 {
|
||||
lsn += 0x10;
|
||||
tline.put_truncation(TESTREL_A, Lsn(lsn), size as u32)?;
|
||||
tline.advance_last_record_lsn(Lsn(lsn));
|
||||
assert_eq!(
|
||||
tline.get_relish_size(TESTREL_A, Lsn(lsn))?.unwrap(),
|
||||
size as u32
|
||||
);
|
||||
|
||||
size -= 1;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
///
|
||||
/// Test branch creation
|
||||
///
|
||||
#[test]
|
||||
fn test_branch() -> Result<()> {
|
||||
let repo = get_test_repo("test_branch")?;
|
||||
let timelineid = ZTimelineId::from_str("11223344556677881122334455667788").unwrap();
|
||||
let tline = repo.create_empty_timeline(timelineid, Lsn(0x00))?;
|
||||
|
||||
// Import initial dummy checkpoint record, otherwise the get_timeline() call
|
||||
// after branching fails below
|
||||
tline.put_page_image(RelishTag::Checkpoint, 0, Lsn(0x10), ZERO_PAGE.clone())?;
|
||||
|
||||
// Create a relation on the timeline
|
||||
tline.put_page_image(TESTREL_A, 0, Lsn(0x20), TEST_IMG("foo blk 0 at 2"))?;
|
||||
tline.put_page_image(TESTREL_A, 0, Lsn(0x30), TEST_IMG("foo blk 0 at 3"))?;
|
||||
tline.put_page_image(TESTREL_A, 0, Lsn(0x40), TEST_IMG("foo blk 0 at 4"))?;
|
||||
|
||||
// Create another relation
|
||||
tline.put_page_image(TESTREL_B, 0, Lsn(0x20), TEST_IMG("foobar blk 0 at 2"))?;
|
||||
|
||||
tline.advance_last_record_lsn(Lsn(0x40));
|
||||
|
||||
// Branch the history, modify relation differently on the new timeline
|
||||
let newtimelineid = ZTimelineId::from_str("AA223344556677881122334455667788").unwrap();
|
||||
repo.branch_timeline(timelineid, newtimelineid, Lsn(0x30))?;
|
||||
let newtline = repo.get_timeline(newtimelineid)?;
|
||||
|
||||
newtline.put_page_image(TESTREL_A, 0, Lsn(0x40), TEST_IMG("bar blk 0 at 4"))?;
|
||||
newtline.advance_last_record_lsn(Lsn(0x40));
|
||||
|
||||
// Check page contents on both branches
|
||||
assert_eq!(
|
||||
tline.get_page_at_lsn(TESTREL_A, 0, Lsn(0x40))?,
|
||||
TEST_IMG("foo blk 0 at 4")
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
newtline.get_page_at_lsn(TESTREL_A, 0, Lsn(0x40))?,
|
||||
TEST_IMG("bar blk 0 at 4")
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
newtline.get_page_at_lsn(TESTREL_B, 0, Lsn(0x40))?,
|
||||
TEST_IMG("foobar blk 0 at 2")
|
||||
);
|
||||
|
||||
assert_eq!(newtline.get_relish_size(TESTREL_B, Lsn(0x40))?.unwrap(), 1);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Mock WAL redo manager that doesn't do much
|
||||
struct TestRedoManager {}
|
||||
|
||||
impl WalRedoManager for TestRedoManager {
|
||||
fn request_redo(
|
||||
&self,
|
||||
timeline: &dyn Timeline,
|
||||
rel: RelishTag,
|
||||
blknum: u32,
|
||||
lsn: Lsn,
|
||||
base_img: Option<Bytes>,
|
||||
records: Vec<WALRecord>,
|
||||
) -> Result<Bytes, WalRedoError> {
|
||||
let s = format!(
|
||||
"redo for {} blk {} to get to {}, with {} and {} records",
|
||||
rel,
|
||||
blknum,
|
||||
lsn,
|
||||
if base_img.is_some() {
|
||||
"base image"
|
||||
} else {
|
||||
"no base image"
|
||||
},
|
||||
records.len()
|
||||
);
|
||||
println!("{}", s);
|
||||
Ok(TEST_IMG(&s))
|
||||
}
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,278 +0,0 @@
|
||||
//
|
||||
// Restore chunks from S3
|
||||
//
|
||||
// This runs once at Page Server startup. It loads all the "base images" from
|
||||
// S3 into the in-memory page cache. It also initializes the "last valid LSN"
|
||||
// in the page cache to the LSN of the base image, so that when the WAL receiver
|
||||
// is started, it starts streaming from that LSN.
|
||||
//
|
||||
|
||||
use bytes::{Buf, BytesMut};
|
||||
use log::*;
|
||||
use regex::Regex;
|
||||
use std::env;
|
||||
use std::fmt;
|
||||
|
||||
use s3::bucket::Bucket;
|
||||
use s3::creds::Credentials;
|
||||
use s3::region::Region;
|
||||
use s3::S3Error;
|
||||
|
||||
use tokio::runtime;
|
||||
|
||||
use futures::future;
|
||||
|
||||
use crate::{page_cache, PageServerConf};
|
||||
|
||||
struct Storage {
|
||||
region: Region,
|
||||
credentials: Credentials,
|
||||
bucket: String,
|
||||
}
|
||||
|
||||
pub fn restore_main(conf: &PageServerConf) {
|
||||
// Create a new thread pool
|
||||
let runtime = runtime::Runtime::new().unwrap();
|
||||
|
||||
runtime.block_on(async {
|
||||
let result = restore_chunk(conf).await;
|
||||
|
||||
match result {
|
||||
Ok(_) => {}
|
||||
Err(err) => {
|
||||
error!("S3 error: {}", err);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
//
|
||||
// Restores one chunk from S3.
|
||||
//
|
||||
// 1. Fetch the last base image >= given LSN
|
||||
// 2. Fetch all WAL
|
||||
//
|
||||
// Load it all into the page cache.
|
||||
//
|
||||
async fn restore_chunk(conf: &PageServerConf) -> Result<(), S3Error> {
|
||||
let backend = Storage {
|
||||
region: Region::Custom {
|
||||
region: env::var("S3_REGION").unwrap(),
|
||||
endpoint: env::var("S3_ENDPOINT").unwrap(),
|
||||
},
|
||||
credentials: Credentials::new(
|
||||
Some(&env::var("S3_ACCESSKEY").unwrap()),
|
||||
Some(&env::var("S3_SECRET").unwrap()),
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
)
|
||||
.unwrap(),
|
||||
bucket: "zenith-testbucket".to_string(),
|
||||
};
|
||||
|
||||
info!("Restoring from S3...");
|
||||
|
||||
// Create Bucket in REGION for BUCKET
|
||||
let bucket = Bucket::new_with_path_style(&backend.bucket, backend.region, backend.credentials)?;
|
||||
|
||||
// List out contents of directory
|
||||
let results: Vec<s3::serde_types::ListBucketResult> = bucket
|
||||
.list("relationdata/".to_string(), Some("".to_string()))
|
||||
.await?;
|
||||
|
||||
// TODO: get that from backup
|
||||
let sys_id: u64 = 42;
|
||||
let mut oldest_lsn = 0;
|
||||
let mut slurp_futures: Vec<_> = Vec::new();
|
||||
|
||||
for result in results {
|
||||
for object in result.contents {
|
||||
// Download every relation file, slurping them into memory
|
||||
|
||||
let key = object.key;
|
||||
let relpath = key.strip_prefix("relationdata/").unwrap();
|
||||
|
||||
let parsed = parse_rel_file_path(&relpath);
|
||||
|
||||
match parsed {
|
||||
Ok(p) => {
|
||||
if oldest_lsn == 0 || p.lsn < oldest_lsn {
|
||||
oldest_lsn = p.lsn;
|
||||
}
|
||||
let b = bucket.clone();
|
||||
let f = slurp_base_file(conf, sys_id, b, key.to_string(), p);
|
||||
|
||||
slurp_futures.push(f);
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("unrecognized file: {} ({})", relpath, e);
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
if oldest_lsn == 0 {
|
||||
panic!("no base backup found");
|
||||
}
|
||||
|
||||
let pcache = page_cache::get_pagecache(conf, sys_id);
|
||||
pcache.init_valid_lsn(oldest_lsn);
|
||||
|
||||
info!("{} files to restore...", slurp_futures.len());
|
||||
|
||||
future::join_all(slurp_futures).await;
|
||||
info!("restored!");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// From pg_tablespace_d.h
|
||||
//
|
||||
// FIXME: we'll probably need these elsewhere too, move to some common location
|
||||
const DEFAULTTABLESPACE_OID: u32 = 1663;
|
||||
const GLOBALTABLESPACE_OID: u32 = 1664;
|
||||
|
||||
#[derive(Debug)]
|
||||
struct ParsedBaseImageFileName {
|
||||
pub spcnode: u32,
|
||||
pub dbnode: u32,
|
||||
pub relnode: u32,
|
||||
pub forknum: u8,
|
||||
pub segno: u32,
|
||||
|
||||
pub lsn: u64,
|
||||
}
|
||||
|
||||
// formats:
|
||||
// <oid>
|
||||
// <oid>_<fork name>
|
||||
// <oid>.<segment number>
|
||||
// <oid>_<fork name>.<segment number>
|
||||
|
||||
fn parse_filename(fname: &str) -> Result<(u32, u8, u32, u64), FilePathError> {
|
||||
let re = Regex::new(r"^(?P<relnode>\d+)(_(?P<forkname>[a-z]+))?(\.(?P<segno>\d+))?_(?P<lsnhi>[[:xdigit:]]{8})(?P<lsnlo>[[:xdigit:]]{8})$").unwrap();
|
||||
|
||||
let caps = re
|
||||
.captures(fname)
|
||||
.ok_or_else(|| FilePathError::new("invalid relation data file name"))?;
|
||||
|
||||
let relnode_str = caps.name("relnode").unwrap().as_str();
|
||||
let relnode: u32 = relnode_str.parse()?;
|
||||
|
||||
let forkname = caps.name("forkname").map(|f| f.as_str());
|
||||
let forknum = forkname_to_forknum(forkname)?;
|
||||
|
||||
let segno_match = caps.name("segno");
|
||||
let segno = if segno_match.is_none() {
|
||||
0
|
||||
} else {
|
||||
segno_match.unwrap().as_str().parse::<u32>()?
|
||||
};
|
||||
|
||||
let lsn_hi: u64 = caps.name("lsnhi").unwrap().as_str().parse()?;
|
||||
let lsn_lo: u64 = caps.name("lsnlo").unwrap().as_str().parse()?;
|
||||
let lsn = lsn_hi << 32 | lsn_lo;
|
||||
|
||||
Ok((relnode, forknum, segno, lsn))
|
||||
}
|
||||
|
||||
fn parse_rel_file_path(path: &str) -> Result<ParsedBaseImageFileName, FilePathError> {
|
||||
/*
|
||||
* Relation data files can be in one of the following directories:
|
||||
*
|
||||
* global/
|
||||
* shared relations
|
||||
*
|
||||
* base/<db oid>/
|
||||
* regular relations, default tablespace
|
||||
*
|
||||
* pg_tblspc/<tblspc oid>/<tblspc version>/
|
||||
* within a non-default tablespace (the name of the directory
|
||||
* depends on version)
|
||||
*
|
||||
* And the relation data files themselves have a filename like:
|
||||
*
|
||||
* <oid>.<segment number>
|
||||
*/
|
||||
if let Some(fname) = path.strip_prefix("global/") {
|
||||
let (relnode, forknum, segno, lsn) = parse_filename(fname)?;
|
||||
|
||||
Ok(ParsedBaseImageFileName {
|
||||
spcnode: GLOBALTABLESPACE_OID,
|
||||
dbnode: 0,
|
||||
relnode,
|
||||
forknum,
|
||||
segno,
|
||||
lsn,
|
||||
})
|
||||
} else if let Some(dbpath) = path.strip_prefix("base/") {
|
||||
let mut s = dbpath.split("/");
|
||||
let dbnode_str = s
|
||||
.next()
|
||||
.ok_or_else(|| FilePathError::new("invalid relation data file name"))?;
|
||||
let dbnode: u32 = dbnode_str.parse()?;
|
||||
let fname = s
|
||||
.next()
|
||||
.ok_or_else(|| FilePathError::new("invalid relation data file name"))?;
|
||||
if s.next().is_some() {
|
||||
return Err(FilePathError::new("invalid relation data file name"));
|
||||
};
|
||||
|
||||
let (relnode, forknum, segno, lsn) = parse_filename(fname)?;
|
||||
|
||||
Ok(ParsedBaseImageFileName {
|
||||
spcnode: DEFAULTTABLESPACE_OID,
|
||||
dbnode,
|
||||
relnode,
|
||||
forknum,
|
||||
segno,
|
||||
lsn,
|
||||
})
|
||||
} else if let Some(_) = path.strip_prefix("pg_tblspc/") {
|
||||
// TODO
|
||||
Err(FilePathError::new("tablespaces not supported"))
|
||||
} else {
|
||||
Err(FilePathError::new("invalid relation data file name"))
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
// Load a base file from S3, and insert it into the page cache
|
||||
//
|
||||
async fn slurp_base_file(
|
||||
conf: &PageServerConf,
|
||||
sys_id: u64,
|
||||
bucket: Bucket,
|
||||
s3path: String,
|
||||
parsed: ParsedBaseImageFileName,
|
||||
) {
|
||||
// FIXME: rust-s3 opens a new connection for each request. Should reuse
|
||||
// the reqwest::Client object. But that requires changes to rust-s3 itself.
|
||||
let (data, code) = bucket.get_object(s3path.clone()).await.unwrap();
|
||||
|
||||
trace!("got response: {} on {}", code, &s3path);
|
||||
assert_eq!(200, code);
|
||||
|
||||
let mut bytes = BytesMut::from(data.as_slice()).freeze();
|
||||
|
||||
let mut blknum: u32 = parsed.segno * (1024 * 1024 * 1024 / pg_constants::BLCKSZ as u32);
|
||||
|
||||
let pcache = page_cache::get_pagecache(conf, sys_id);
|
||||
|
||||
while bytes.remaining() >= 8192 {
|
||||
let tag = page_cache::BufferTag {
|
||||
rel: page_cache::RelTag {
|
||||
spcnode: parsed.spcnode,
|
||||
dbnode: parsed.dbnode,
|
||||
relnode: parsed.relnode,
|
||||
forknum: parsed.forknum,
|
||||
},
|
||||
blknum,
|
||||
};
|
||||
|
||||
pcache.put_page_image(tag, parsed.lsn, bytes.copy_to_bytes(8192));
|
||||
|
||||
blknum += 1;
|
||||
}
|
||||
}
|
||||
74
pageserver/src/tenant_mgr.rs
Normal file
74
pageserver/src/tenant_mgr.rs
Normal file
@@ -0,0 +1,74 @@
|
||||
//! This module acts as a switchboard to access different repositories managed by this
|
||||
//! page server.
|
||||
|
||||
use crate::branches;
|
||||
use crate::layered_repository::LayeredRepository;
|
||||
use crate::repository::Repository;
|
||||
use crate::walredo::PostgresRedoManager;
|
||||
use crate::PageServerConf;
|
||||
use anyhow::{anyhow, bail, Result};
|
||||
use lazy_static::lazy_static;
|
||||
use log::info;
|
||||
use std::collections::HashMap;
|
||||
use std::fs;
|
||||
use std::str::FromStr;
|
||||
use std::sync::{Arc, Mutex};
|
||||
use zenith_utils::zid::ZTenantId;
|
||||
|
||||
lazy_static! {
|
||||
pub static ref REPOSITORY: Mutex<HashMap<ZTenantId, Arc<dyn Repository>>> =
|
||||
Mutex::new(HashMap::new());
|
||||
}
|
||||
|
||||
pub fn init(conf: &'static PageServerConf) {
|
||||
let mut m = REPOSITORY.lock().unwrap();
|
||||
|
||||
for dir_entry in fs::read_dir(conf.tenants_path()).unwrap() {
|
||||
let tenantid =
|
||||
ZTenantId::from_str(dir_entry.unwrap().file_name().to_str().unwrap()).unwrap();
|
||||
|
||||
// Set up a WAL redo manager, for applying WAL records.
|
||||
let walredo_mgr = PostgresRedoManager::new(conf, tenantid);
|
||||
|
||||
// Set up an object repository, for actual data storage.
|
||||
let repo = Arc::new(LayeredRepository::new(
|
||||
conf,
|
||||
Arc::new(walredo_mgr),
|
||||
tenantid,
|
||||
));
|
||||
LayeredRepository::launch_checkpointer_thread(conf, repo.clone());
|
||||
|
||||
info!("initialized storage for tenant: {}", &tenantid);
|
||||
m.insert(tenantid, repo);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn create_repository_for_tenant(
|
||||
conf: &'static PageServerConf,
|
||||
tenantid: ZTenantId,
|
||||
) -> Result<()> {
|
||||
let mut m = REPOSITORY.lock().unwrap();
|
||||
|
||||
// First check that the tenant doesn't exist already
|
||||
if m.get(&tenantid).is_some() {
|
||||
bail!("tenant {} already exists", tenantid);
|
||||
}
|
||||
let wal_redo_manager = Arc::new(PostgresRedoManager::new(conf, tenantid));
|
||||
let repo = branches::create_repo(conf, tenantid, wal_redo_manager)?;
|
||||
|
||||
m.insert(tenantid, repo);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn insert_repository_for_tenant(tenantid: ZTenantId, repo: Arc<dyn Repository>) {
|
||||
let o = &mut REPOSITORY.lock().unwrap();
|
||||
o.insert(tenantid, repo);
|
||||
}
|
||||
|
||||
pub fn get_repository_for_tenant(tenantid: &ZTenantId) -> Result<Arc<dyn Repository>> {
|
||||
let o = &REPOSITORY.lock().unwrap();
|
||||
o.get(tenantid)
|
||||
.map(|repo| Arc::clone(repo))
|
||||
.ok_or_else(|| anyhow!("repository not found for tenant name {}", tenantid))
|
||||
}
|
||||
@@ -1,300 +0,0 @@
|
||||
use crate::tui_event::{Event, Events};
|
||||
use crate::tui_logger::TuiLogger;
|
||||
use crate::tui_logger::TuiLoggerWidget;
|
||||
|
||||
use lazy_static::lazy_static;
|
||||
use std::sync::Arc;
|
||||
use std::{error::Error, io};
|
||||
use termion::{event::Key, input::MouseTerminal, raw::IntoRawMode, screen::AlternateScreen};
|
||||
use tui::backend::TermionBackend;
|
||||
use tui::buffer::Buffer;
|
||||
use tui::layout::{Constraint, Direction, Layout, Rect};
|
||||
use tui::style::{Color, Modifier, Style};
|
||||
use tui::text::{Span, Spans, Text};
|
||||
use tui::widgets::{Block, BorderType, Borders, Paragraph, Widget};
|
||||
use tui::Terminal;
|
||||
|
||||
use slog::Drain;
|
||||
|
||||
lazy_static! {
|
||||
pub static ref PAGESERVICE_DRAIN: Arc<TuiLogger> = Arc::new(TuiLogger::default());
|
||||
pub static ref WALRECEIVER_DRAIN: Arc<TuiLogger> = Arc::new(TuiLogger::default());
|
||||
pub static ref WALREDO_DRAIN: Arc<TuiLogger> = Arc::new(TuiLogger::default());
|
||||
pub static ref CATCHALL_DRAIN: Arc<TuiLogger> = Arc::new(TuiLogger::default());
|
||||
}
|
||||
|
||||
pub fn init_logging() -> slog_scope::GlobalLoggerGuard {
|
||||
let pageservice_drain =
|
||||
slog::Filter::new(PAGESERVICE_DRAIN.as_ref(), |record: &slog::Record| {
|
||||
if record.level().is_at_least(slog::Level::Debug)
|
||||
&& record.module().starts_with("pageserver::page_service")
|
||||
{
|
||||
return true;
|
||||
}
|
||||
false
|
||||
})
|
||||
.fuse();
|
||||
|
||||
let walredo_drain = slog::Filter::new(WALREDO_DRAIN.as_ref(), |record: &slog::Record| {
|
||||
if record.level().is_at_least(slog::Level::Debug)
|
||||
&& record.module().starts_with("pageserver::walredo")
|
||||
{
|
||||
return true;
|
||||
}
|
||||
false
|
||||
})
|
||||
.fuse();
|
||||
|
||||
let walreceiver_drain =
|
||||
slog::Filter::new(WALRECEIVER_DRAIN.as_ref(), |record: &slog::Record| {
|
||||
if record.level().is_at_least(slog::Level::Debug)
|
||||
&& record.module().starts_with("pageserver::walreceiver")
|
||||
{
|
||||
return true;
|
||||
}
|
||||
false
|
||||
})
|
||||
.fuse();
|
||||
|
||||
let catchall_drain = slog::Filter::new(CATCHALL_DRAIN.as_ref(), |record: &slog::Record| {
|
||||
if record.level().is_at_least(slog::Level::Info) {
|
||||
return true;
|
||||
}
|
||||
if record.level().is_at_least(slog::Level::Debug)
|
||||
&& record.module().starts_with("pageserver")
|
||||
{
|
||||
return true;
|
||||
}
|
||||
false
|
||||
})
|
||||
.fuse();
|
||||
|
||||
let drain = pageservice_drain;
|
||||
let drain = slog::Duplicate::new(drain, walreceiver_drain).fuse();
|
||||
let drain = slog::Duplicate::new(drain, walredo_drain).fuse();
|
||||
let drain = slog::Duplicate::new(drain, catchall_drain).fuse();
|
||||
let drain = slog_async::Async::new(drain).chan_size(1000).build().fuse();
|
||||
let drain = slog::Filter::new(drain, |record: &slog::Record| {
|
||||
if record.level().is_at_least(slog::Level::Info) {
|
||||
return true;
|
||||
}
|
||||
if record.level().is_at_least(slog::Level::Debug)
|
||||
&& record.module().starts_with("pageserver")
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
false
|
||||
})
|
||||
.fuse();
|
||||
let logger = slog::Logger::root(drain, slog::o!());
|
||||
slog_scope::set_global_logger(logger)
|
||||
}
|
||||
|
||||
pub fn ui_main() -> Result<(), Box<dyn Error>> {
|
||||
// Terminal initialization
|
||||
let stdout = io::stdout().into_raw_mode()?;
|
||||
let stdout = MouseTerminal::from(stdout);
|
||||
let stdout = AlternateScreen::from(stdout);
|
||||
let backend = TermionBackend::new(stdout);
|
||||
let mut terminal = Terminal::new(backend)?;
|
||||
|
||||
// Setup event handlers
|
||||
let events = Events::new();
|
||||
|
||||
loop {
|
||||
terminal.draw(|f| {
|
||||
let size = f.size();
|
||||
|
||||
// +----------------+----------------+
|
||||
// | | |
|
||||
// | top_top_left | top_top_right |
|
||||
// | | |
|
||||
// +----------------+----------------|
|
||||
// | | |
|
||||
// | top_bot_left | top_left_right |
|
||||
// | | |
|
||||
// +----------------+----------------+
|
||||
// | |
|
||||
// | bottom |
|
||||
// | |
|
||||
// +---------------------------------+
|
||||
let chunks = Layout::default()
|
||||
.direction(Direction::Vertical)
|
||||
.constraints([Constraint::Percentage(70), Constraint::Percentage(30)].as_ref())
|
||||
.split(size);
|
||||
let top_chunk = chunks[0];
|
||||
let bottom_chunk = chunks[1];
|
||||
|
||||
let top_chunks = Layout::default()
|
||||
.direction(Direction::Horizontal)
|
||||
.constraints([Constraint::Percentage(50), Constraint::Percentage(50)].as_ref())
|
||||
.split(top_chunk);
|
||||
let top_left_chunk = top_chunks[0];
|
||||
let top_right_chunk = top_chunks[1];
|
||||
|
||||
let c = Layout::default()
|
||||
.direction(Direction::Vertical)
|
||||
.constraints([Constraint::Percentage(50), Constraint::Percentage(50)].as_ref())
|
||||
.split(top_left_chunk);
|
||||
let top_top_left_chunk = c[0];
|
||||
let top_bot_left_chunk = c[1];
|
||||
|
||||
let c = Layout::default()
|
||||
.direction(Direction::Vertical)
|
||||
.constraints([Constraint::Percentage(50), Constraint::Percentage(50)].as_ref())
|
||||
.split(top_right_chunk);
|
||||
let top_top_right_chunk = c[0];
|
||||
let top_bot_right_chunk = c[1];
|
||||
|
||||
f.render_widget(
|
||||
LogWidget::new(PAGESERVICE_DRAIN.as_ref(), "Page Service"),
|
||||
top_top_left_chunk,
|
||||
);
|
||||
|
||||
f.render_widget(
|
||||
LogWidget::new(WALREDO_DRAIN.as_ref(), "WAL Redo"),
|
||||
top_bot_left_chunk,
|
||||
);
|
||||
|
||||
f.render_widget(
|
||||
LogWidget::new(WALRECEIVER_DRAIN.as_ref(), "WAL Receiver"),
|
||||
top_top_right_chunk,
|
||||
);
|
||||
|
||||
f.render_widget(MetricsWidget {}, top_bot_right_chunk);
|
||||
|
||||
f.render_widget(
|
||||
LogWidget::new(CATCHALL_DRAIN.as_ref(), "All Log").show_module(true),
|
||||
bottom_chunk,
|
||||
);
|
||||
})?;
|
||||
|
||||
// If ther user presses 'q', quit.
|
||||
if let Event::Input(key) = events.next()? {
|
||||
match key {
|
||||
Key::Char('q') => {
|
||||
break;
|
||||
}
|
||||
_ => (),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
terminal.show_cursor().unwrap();
|
||||
terminal.clear().unwrap();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
struct LogWidget<'a> {
|
||||
logger: &'a TuiLogger,
|
||||
title: &'a str,
|
||||
show_module: bool,
|
||||
}
|
||||
|
||||
impl<'a> LogWidget<'a> {
|
||||
fn new(logger: &'a TuiLogger, title: &'a str) -> LogWidget<'a> {
|
||||
LogWidget {
|
||||
logger,
|
||||
title,
|
||||
show_module: false,
|
||||
}
|
||||
}
|
||||
|
||||
fn show_module(mut self, b: bool) -> LogWidget<'a> {
|
||||
self.show_module = b;
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> Widget for LogWidget<'a> {
|
||||
fn render(self, area: Rect, buf: &mut Buffer) {
|
||||
let w = TuiLoggerWidget::default(self.logger)
|
||||
.block(
|
||||
Block::default()
|
||||
.borders(Borders::ALL)
|
||||
.title(self.title)
|
||||
.border_type(BorderType::Rounded),
|
||||
)
|
||||
.show_module(true)
|
||||
.style_error(Style::default().fg(Color::Red))
|
||||
.style_warn(Style::default().fg(Color::Yellow))
|
||||
.style_info(Style::default().fg(Color::Green));
|
||||
w.render(area, buf);
|
||||
}
|
||||
}
|
||||
|
||||
// Render a widget to show some metrics
|
||||
struct MetricsWidget {}
|
||||
|
||||
fn get_metric_u64(title: &str, value: u64) -> Spans {
|
||||
Spans::from(vec![
|
||||
Span::styled(format!("{:<20}", title), Style::default()),
|
||||
Span::raw(": "),
|
||||
Span::styled(
|
||||
value.to_string(),
|
||||
Style::default().add_modifier(Modifier::BOLD),
|
||||
),
|
||||
])
|
||||
}
|
||||
|
||||
// This is not used since LSNs were removed from page cache stats.
|
||||
// Maybe it will be used in the future?
|
||||
fn _get_metric_str<'a>(title: &str, value: &'a str) -> Spans<'a> {
|
||||
Spans::from(vec![
|
||||
Span::styled(format!("{:<20}", title), Style::default()),
|
||||
Span::raw(": "),
|
||||
Span::styled(value, Style::default().add_modifier(Modifier::BOLD)),
|
||||
])
|
||||
}
|
||||
|
||||
impl tui::widgets::Widget for MetricsWidget {
|
||||
fn render(self, area: Rect, buf: &mut Buffer) {
|
||||
let block = Block::default()
|
||||
.borders(Borders::ALL)
|
||||
.title("Page Cache Metrics")
|
||||
.border_type(BorderType::Rounded);
|
||||
let inner_area = block.inner(area);
|
||||
|
||||
block.render(area, buf);
|
||||
|
||||
let mut lines: Vec<Spans> = Vec::new();
|
||||
|
||||
let page_cache_stats = crate::page_cache::get_stats();
|
||||
|
||||
// This is not used since LSNs were removed from page cache stats.
|
||||
// Maybe it will be used in the future?
|
||||
/*
|
||||
let lsnrange = format!(
|
||||
"{} - {}",
|
||||
page_cache_stats.first_valid_lsn, page_cache_stats.last_valid_lsn
|
||||
);
|
||||
let last_valid_recordlsn_str = page_cache_stats.last_record_lsn.to_string();
|
||||
lines.push(get_metric_str("Valid LSN range", &lsnrange));
|
||||
lines.push(get_metric_str("Last record LSN", &last_valid_recordlsn_str));
|
||||
*/
|
||||
|
||||
lines.push(get_metric_u64(
|
||||
"# of cache entries",
|
||||
page_cache_stats.num_entries,
|
||||
));
|
||||
lines.push(get_metric_u64(
|
||||
"# of page images",
|
||||
page_cache_stats.num_page_images,
|
||||
));
|
||||
lines.push(get_metric_u64(
|
||||
"# of WAL records",
|
||||
page_cache_stats.num_wal_records,
|
||||
));
|
||||
lines.push(get_metric_u64(
|
||||
"# of GetPage@LSN calls",
|
||||
page_cache_stats.num_getpage_requests,
|
||||
));
|
||||
|
||||
let text = Text::from(lines);
|
||||
|
||||
Paragraph::new(text).render(inner_area, buf);
|
||||
}
|
||||
}
|
||||
@@ -1,96 +0,0 @@
|
||||
use std::io;
|
||||
use std::sync::mpsc;
|
||||
use std::sync::{
|
||||
atomic::{AtomicBool, Ordering},
|
||||
Arc,
|
||||
};
|
||||
use std::thread;
|
||||
use std::time::Duration;
|
||||
|
||||
use termion::event::Key;
|
||||
use termion::input::TermRead;
|
||||
|
||||
pub enum Event<I> {
|
||||
Input(I),
|
||||
Tick,
|
||||
}
|
||||
|
||||
/// A small event handler that wrap termion input and tick events. Each event
|
||||
/// type is handled in its own thread and returned to a common `Receiver`
|
||||
#[allow(dead_code)]
|
||||
pub struct Events {
|
||||
rx: mpsc::Receiver<Event<Key>>,
|
||||
input_handle: thread::JoinHandle<()>,
|
||||
ignore_exit_key: Arc<AtomicBool>,
|
||||
tick_handle: thread::JoinHandle<()>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct Config {
|
||||
pub exit_key: Key,
|
||||
pub tick_rate: Duration,
|
||||
}
|
||||
|
||||
impl Default for Config {
|
||||
fn default() -> Config {
|
||||
Config {
|
||||
exit_key: Key::Char('q'),
|
||||
tick_rate: Duration::from_millis(250),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Events {
|
||||
pub fn new() -> Events {
|
||||
Events::with_config(Config::default())
|
||||
}
|
||||
|
||||
pub fn with_config(config: Config) -> Events {
|
||||
let (tx, rx) = mpsc::channel();
|
||||
let ignore_exit_key = Arc::new(AtomicBool::new(false));
|
||||
let input_handle = {
|
||||
let tx = tx.clone();
|
||||
let ignore_exit_key = ignore_exit_key.clone();
|
||||
thread::spawn(move || {
|
||||
let stdin = io::stdin();
|
||||
for evt in stdin.keys() {
|
||||
if let Ok(key) = evt {
|
||||
if let Err(err) = tx.send(Event::Input(key)) {
|
||||
eprintln!("{}", err);
|
||||
return;
|
||||
}
|
||||
if !ignore_exit_key.load(Ordering::Relaxed) && key == config.exit_key {
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
};
|
||||
let tick_handle = {
|
||||
thread::spawn(move || loop {
|
||||
if tx.send(Event::Tick).is_err() {
|
||||
break;
|
||||
}
|
||||
thread::sleep(config.tick_rate);
|
||||
})
|
||||
};
|
||||
Events {
|
||||
rx,
|
||||
input_handle,
|
||||
ignore_exit_key,
|
||||
tick_handle,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn next(&self) -> Result<Event<Key>, mpsc::RecvError> {
|
||||
self.rx.recv()
|
||||
}
|
||||
|
||||
pub fn disable_exit_key(&mut self) {
|
||||
self.ignore_exit_key.store(true, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
pub fn enable_exit_key(&mut self) {
|
||||
self.ignore_exit_key.store(false, Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
@@ -1,199 +0,0 @@
|
||||
//
|
||||
// A TUI Widget that displays log entries
|
||||
//
|
||||
// This is heavily inspired by gin66's tui_logger crate at https://github.com/gin66/tui-logger,
|
||||
// but I wrote this based on the 'slog' module, which simplified things a lot. tui-logger also
|
||||
// implemented the slog Drain trait, but it had a model of one global buffer for the records.
|
||||
// With this implementation, each TuiLogger is a separate ring buffer and separate slog Drain.
|
||||
// Also, I didn't do any of the "hot log" stuff that gin66's implementation had, you can use an
|
||||
// AsyncDrain to buffer and handle overflow if desired.
|
||||
//
|
||||
use chrono::offset::Local;
|
||||
use chrono::DateTime;
|
||||
use slog::{Drain, Level, OwnedKVList, Record};
|
||||
use slog_async::AsyncRecord;
|
||||
use std::collections::VecDeque;
|
||||
use std::sync::Mutex;
|
||||
use std::time::SystemTime;
|
||||
use tui::buffer::Buffer;
|
||||
use tui::layout::Rect;
|
||||
use tui::style::{Modifier, Style};
|
||||
use tui::text::{Span, Spans};
|
||||
use tui::widgets::{Block, Paragraph, Widget, Wrap};
|
||||
|
||||
// Size of the log ring buffer, in # of records
|
||||
static BUFFER_SIZE: usize = 1000;
|
||||
|
||||
pub struct TuiLogger {
|
||||
events: Mutex<VecDeque<(SystemTime, AsyncRecord)>>,
|
||||
}
|
||||
|
||||
impl<'a> Default for TuiLogger {
|
||||
fn default() -> TuiLogger {
|
||||
TuiLogger {
|
||||
events: Mutex::new(VecDeque::with_capacity(BUFFER_SIZE)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Drain for TuiLogger {
|
||||
type Ok = ();
|
||||
type Err = slog::Error;
|
||||
|
||||
fn log(&self, record: &Record, values: &OwnedKVList) -> Result<Self::Ok, Self::Err> {
|
||||
let mut events = self.events.lock().unwrap();
|
||||
|
||||
let now = SystemTime::now();
|
||||
let asyncrec = AsyncRecord::from(record, values);
|
||||
events.push_front((now, asyncrec));
|
||||
|
||||
if events.len() > BUFFER_SIZE {
|
||||
events.pop_back();
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
// TuiLoggerWidget renders a TuiLogger ring buffer
|
||||
pub struct TuiLoggerWidget<'b> {
|
||||
block: Option<Block<'b>>,
|
||||
/// Base style of the widget
|
||||
style: Style,
|
||||
/// Level based style
|
||||
style_error: Option<Style>,
|
||||
style_warn: Option<Style>,
|
||||
style_debug: Option<Style>,
|
||||
style_trace: Option<Style>,
|
||||
style_info: Option<Style>,
|
||||
show_module: bool,
|
||||
logger: &'b TuiLogger,
|
||||
}
|
||||
impl<'b> TuiLoggerWidget<'b> {
|
||||
pub fn default(logger: &'b TuiLogger) -> TuiLoggerWidget<'b> {
|
||||
TuiLoggerWidget {
|
||||
block: None,
|
||||
style: Default::default(),
|
||||
style_error: None,
|
||||
style_warn: None,
|
||||
style_debug: None,
|
||||
style_trace: None,
|
||||
style_info: None,
|
||||
show_module: true,
|
||||
logger,
|
||||
}
|
||||
}
|
||||
}
|
||||
impl<'b> TuiLoggerWidget<'b> {
|
||||
pub fn block(mut self, block: Block<'b>) -> TuiLoggerWidget<'b> {
|
||||
self.block = Some(block);
|
||||
self
|
||||
}
|
||||
#[allow(unused)]
|
||||
pub fn style(mut self, style: Style) -> TuiLoggerWidget<'b> {
|
||||
self.style = style;
|
||||
self
|
||||
}
|
||||
pub fn style_error(mut self, style: Style) -> TuiLoggerWidget<'b> {
|
||||
self.style_error = Some(style);
|
||||
self
|
||||
}
|
||||
pub fn style_warn(mut self, style: Style) -> TuiLoggerWidget<'b> {
|
||||
self.style_warn = Some(style);
|
||||
self
|
||||
}
|
||||
pub fn style_info(mut self, style: Style) -> TuiLoggerWidget<'b> {
|
||||
self.style_info = Some(style);
|
||||
self
|
||||
}
|
||||
#[allow(unused)]
|
||||
pub fn style_trace(mut self, style: Style) -> TuiLoggerWidget<'b> {
|
||||
self.style_trace = Some(style);
|
||||
self
|
||||
}
|
||||
#[allow(unused)]
|
||||
pub fn style_debug(mut self, style: Style) -> TuiLoggerWidget<'b> {
|
||||
self.style_debug = Some(style);
|
||||
self
|
||||
}
|
||||
|
||||
pub fn show_module(mut self, b: bool) -> TuiLoggerWidget<'b> {
|
||||
self.show_module = b;
|
||||
self
|
||||
}
|
||||
}
|
||||
impl<'b> Widget for TuiLoggerWidget<'b> {
|
||||
fn render(mut self, area: Rect, buf: &mut Buffer) {
|
||||
buf.set_style(area, self.style);
|
||||
let list_area = match self.block.take() {
|
||||
Some(b) => {
|
||||
let inner_area = b.inner(area);
|
||||
b.render(area, buf);
|
||||
inner_area
|
||||
}
|
||||
None => area,
|
||||
};
|
||||
if list_area.width == 0 || list_area.height == 0 {
|
||||
return;
|
||||
}
|
||||
|
||||
let la_height = list_area.height as usize;
|
||||
|
||||
//
|
||||
// Iterate through the records in the buffer. The records are
|
||||
// pushed to the front, so the newest records come first.
|
||||
//
|
||||
let mut lines: Vec<Spans> = Vec::new();
|
||||
|
||||
let style_msg = Style::default().add_modifier(Modifier::BOLD);
|
||||
{
|
||||
let events = self.logger.events.lock().unwrap();
|
||||
|
||||
for evt in events.iter() {
|
||||
let (timestamp, rec) = evt;
|
||||
|
||||
rec.as_record_values(|rec, _kwlist| {
|
||||
let mut line: Vec<Span> = Vec::new();
|
||||
|
||||
let datetime: DateTime<Local> = timestamp.clone().into();
|
||||
let ts = format!("{}", datetime.format("%H:%M:%S%.3f "));
|
||||
line.push(Span::raw(ts));
|
||||
|
||||
let (lvl_style, txt, with_loc) = match rec.level() {
|
||||
Level::Critical => (self.style_error, "CRIT ", true),
|
||||
Level::Error => (self.style_error, "ERROR", true),
|
||||
Level::Warning => (self.style_warn, "WARN ", true),
|
||||
Level::Info => (self.style_info, "INFO ", false),
|
||||
Level::Debug => (self.style_debug, "DEBUG", true),
|
||||
Level::Trace => (self.style_trace, "TRACE", true),
|
||||
};
|
||||
line.push(Span::styled(txt, lvl_style.unwrap_or_default()));
|
||||
|
||||
if self.show_module {
|
||||
line.push(Span::raw(" "));
|
||||
line.push(Span::raw(rec.module()));
|
||||
}
|
||||
if with_loc {
|
||||
let loc = format!(" {}:{}", rec.file(), rec.line());
|
||||
line.push(Span::raw(loc));
|
||||
}
|
||||
let msg = format!(" {}", rec.msg());
|
||||
line.push(Span::styled(msg, style_msg));
|
||||
|
||||
lines.push(Spans::from(line));
|
||||
});
|
||||
if lines.len() == la_height {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
lines.reverse();
|
||||
|
||||
let text = tui::text::Text::from(lines);
|
||||
|
||||
Paragraph::new(text)
|
||||
.wrap(Wrap { trim: true })
|
||||
.render(list_area, buf);
|
||||
}
|
||||
}
|
||||
@@ -1,44 +1,21 @@
|
||||
//!
|
||||
//! WAL decoder. For each WAL record, it decodes the record to figure out which data blocks
|
||||
//! the record affects, so that they can be stored in repository.
|
||||
//!
|
||||
use bytes::{Buf, BufMut, Bytes, BytesMut};
|
||||
use crc32c::*;
|
||||
use log::*;
|
||||
use postgres_ffi::pg_constants;
|
||||
use postgres_ffi::xlog_utils::XLogRecord;
|
||||
use postgres_ffi::xlog_utils::*;
|
||||
use postgres_ffi::XLogLongPageHeaderData;
|
||||
use postgres_ffi::XLogPageHeaderData;
|
||||
use postgres_ffi::XLogRecord;
|
||||
use postgres_ffi::{BlockNumber, OffsetNumber};
|
||||
use postgres_ffi::{MultiXactId, MultiXactOffset, MultiXactStatus, Oid, TransactionId};
|
||||
use std::cmp::min;
|
||||
use std::str;
|
||||
use thiserror::Error;
|
||||
use zenith_utils::lsn::Lsn;
|
||||
|
||||
// FIXME: this is configurable in PostgreSQL, 16 MB is the default
|
||||
const WAL_SEGMENT_SIZE: u64 = 16 * 1024 * 1024;
|
||||
|
||||
// From PostgreSQL headers
|
||||
|
||||
#[repr(C)]
|
||||
#[derive(Debug)]
|
||||
pub struct XLogPageHeaderData {
|
||||
xlp_magic: u16, /* magic value for correctness checks */
|
||||
xlp_info: u16, /* flag bits, see below */
|
||||
xlp_tli: u32, /* TimeLineID of first record on page */
|
||||
xlp_pageaddr: u64, /* XLOG address of this page */
|
||||
xlp_rem_len: u32, /* total len of remaining data for record */
|
||||
}
|
||||
|
||||
// FIXME: this assumes MAXIMUM_ALIGNOF 8. There are 4 padding bytes at end
|
||||
#[allow(non_upper_case_globals)]
|
||||
const SizeOfXLogShortPHD: usize = 2 + 2 + 4 + 8 + 4 + 4;
|
||||
|
||||
#[repr(C)]
|
||||
#[derive(Debug)]
|
||||
pub struct XLogLongPageHeaderData {
|
||||
std: XLogPageHeaderData, /* standard header fields */
|
||||
xlp_sysid: u64, /* system identifier from pg_control */
|
||||
xlp_seg_size: u32, /* just as a cross-check */
|
||||
xlp_xlog_blcksz: u32, /* just as a cross-check */
|
||||
}
|
||||
|
||||
// FIXME: this assumes MAXIMUM_ALIGNOF 8.
|
||||
#[allow(non_upper_case_globals)]
|
||||
const SizeOfXLogLongPHD: usize = (2 + 2 + 4 + 8 + 4) + 4 + 8 + 4 + 4;
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub struct WalStreamDecoder {
|
||||
lsn: Lsn,
|
||||
@@ -85,21 +62,22 @@ impl WalStreamDecoder {
|
||||
/// decoder so far.
|
||||
///
|
||||
/// Returns one of the following:
|
||||
/// Ok((u64, Bytes)): a tuple containing the LSN of next record, and the record itself
|
||||
/// Ok((Lsn, Bytes)): a tuple containing the LSN of next record, and the record itself
|
||||
/// Ok(None): there is not enough data in the input buffer. Feed more by calling the `feed_bytes` function
|
||||
/// Err(WalDecodeError): an error occured while decoding, meaning the input was invalid.
|
||||
///
|
||||
pub fn poll_decode(&mut self) -> Result<Option<(Lsn, Bytes)>, WalDecodeError> {
|
||||
loop {
|
||||
// parse and verify page boundaries as we go
|
||||
if self.lsn.segment_offset(WAL_SEGMENT_SIZE) == 0 {
|
||||
if self.lsn.segment_offset(pg_constants::WAL_SEGMENT_SIZE) == 0 {
|
||||
// parse long header
|
||||
|
||||
if self.inputbuf.remaining() < SizeOfXLogLongPHD {
|
||||
if self.inputbuf.remaining() < XLOG_SIZE_OF_XLOG_LONG_PHD {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
let hdr = self.decode_XLogLongPageHeaderData();
|
||||
let hdr = XLogLongPageHeaderData::from_bytes(&mut self.inputbuf);
|
||||
|
||||
if hdr.std.xlp_pageaddr != self.lsn.0 {
|
||||
return Err(WalDecodeError {
|
||||
msg: "invalid xlog segment header".into(),
|
||||
@@ -108,14 +86,15 @@ impl WalStreamDecoder {
|
||||
}
|
||||
// TODO: verify the remaining fields in the header
|
||||
|
||||
self.lsn += SizeOfXLogLongPHD as u64;
|
||||
self.lsn += XLOG_SIZE_OF_XLOG_LONG_PHD as u64;
|
||||
continue;
|
||||
} else if self.lsn.block_offset() == 0 {
|
||||
if self.inputbuf.remaining() < SizeOfXLogShortPHD {
|
||||
if self.inputbuf.remaining() < XLOG_SIZE_OF_XLOG_SHORT_PHD {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
let hdr = self.decode_XLogPageHeaderData();
|
||||
let hdr = XLogPageHeaderData::from_bytes(&mut self.inputbuf);
|
||||
|
||||
if hdr.xlp_pageaddr != self.lsn.0 {
|
||||
return Err(WalDecodeError {
|
||||
msg: "invalid xlog page header".into(),
|
||||
@@ -124,7 +103,7 @@ impl WalStreamDecoder {
|
||||
}
|
||||
// TODO: verify the remaining fields in the header
|
||||
|
||||
self.lsn += SizeOfXLogShortPHD as u64;
|
||||
self.lsn += XLOG_SIZE_OF_XLOG_SHORT_PHD as u64;
|
||||
continue;
|
||||
} else if self.padlen > 0 {
|
||||
if self.inputbuf.remaining() < self.padlen as usize {
|
||||
@@ -145,7 +124,7 @@ impl WalStreamDecoder {
|
||||
// read xl_tot_len FIXME: assumes little-endian
|
||||
self.startlsn = self.lsn;
|
||||
let xl_tot_len = self.inputbuf.get_u32_le();
|
||||
if xl_tot_len < SizeOfXLogRecord {
|
||||
if (xl_tot_len as usize) < XLOG_SIZE_OF_XLOG_RECORD {
|
||||
return Err(WalDecodeError {
|
||||
msg: format!("invalid xl_tot_len {}", xl_tot_len),
|
||||
lsn: self.lsn,
|
||||
@@ -183,15 +162,27 @@ impl WalStreamDecoder {
|
||||
// XLOG_SWITCH records are special. If we see one, we need to skip
|
||||
// to the next WAL segment.
|
||||
let xlogrec = XLogRecord::from_bytes(&mut buf);
|
||||
let mut crc = crc32c_append(0, &recordbuf[XLOG_RECORD_CRC_OFFS + 4..]);
|
||||
crc = crc32c_append(crc, &recordbuf[0..XLOG_RECORD_CRC_OFFS]);
|
||||
if crc != xlogrec.xl_crc {
|
||||
return Err(WalDecodeError {
|
||||
msg: "WAL record crc mismatch".into(),
|
||||
lsn: self.lsn,
|
||||
});
|
||||
}
|
||||
if xlogrec.is_xlog_switch_record() {
|
||||
trace!("saw xlog switch record at {}", self.lsn);
|
||||
self.padlen = self.lsn.calc_padding(WAL_SEGMENT_SIZE) as u32;
|
||||
self.padlen =
|
||||
self.lsn.calc_padding(pg_constants::WAL_SEGMENT_SIZE as u64) as u32;
|
||||
} else {
|
||||
// Pad to an 8-byte boundary
|
||||
self.padlen = self.lsn.calc_padding(8u32) as u32;
|
||||
}
|
||||
|
||||
let result = (self.lsn, recordbuf);
|
||||
// Always align resulting LSN on 0x8 boundary -- that is important for getPage()
|
||||
// and WalReceiver integration. Since this code is used both for WalReceiver and
|
||||
// initial WAL import let's force alignment right here.
|
||||
let result = (self.lsn.align(), recordbuf);
|
||||
return Ok(Some(result));
|
||||
}
|
||||
continue;
|
||||
@@ -203,43 +194,10 @@ impl WalStreamDecoder {
|
||||
|
||||
// deal with xlog_switch records
|
||||
}
|
||||
|
||||
#[allow(non_snake_case)]
|
||||
fn decode_XLogPageHeaderData(&mut self) -> XLogPageHeaderData {
|
||||
let buf = &mut self.inputbuf;
|
||||
|
||||
// FIXME: Assume little-endian
|
||||
|
||||
let hdr: XLogPageHeaderData = XLogPageHeaderData {
|
||||
xlp_magic: buf.get_u16_le(),
|
||||
xlp_info: buf.get_u16_le(),
|
||||
xlp_tli: buf.get_u32_le(),
|
||||
xlp_pageaddr: buf.get_u64_le(),
|
||||
xlp_rem_len: buf.get_u32_le(),
|
||||
};
|
||||
// 4 bytes of padding, on 64-bit systems
|
||||
buf.advance(4);
|
||||
|
||||
// FIXME: check that hdr.xlp_rem_len matches self.contlen
|
||||
//println!("next xlog page (xlp_rem_len: {})", hdr.xlp_rem_len);
|
||||
|
||||
hdr
|
||||
}
|
||||
|
||||
#[allow(non_snake_case)]
|
||||
fn decode_XLogLongPageHeaderData(&mut self) -> XLogLongPageHeaderData {
|
||||
let hdr: XLogLongPageHeaderData = XLogLongPageHeaderData {
|
||||
std: self.decode_XLogPageHeaderData(),
|
||||
xlp_sysid: self.inputbuf.get_u64_le(),
|
||||
xlp_seg_size: self.inputbuf.get_u32_le(),
|
||||
xlp_xlog_blcksz: self.inputbuf.get_u32_le(),
|
||||
};
|
||||
|
||||
hdr
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
#[derive(Default)]
|
||||
pub struct DecodedBkpBlock {
|
||||
/* Is this block ref in use? */
|
||||
//in_use: bool,
|
||||
@@ -258,7 +216,7 @@ pub struct DecodedBkpBlock {
|
||||
/* Information on full-page image, if any */
|
||||
has_image: bool, /* has image, even for consistency checking */
|
||||
pub apply_image: bool, /* has image that should be restored */
|
||||
pub will_init: bool,
|
||||
pub will_init: bool, /* record doesn't need previous page version to apply */
|
||||
//char *bkp_image;
|
||||
hole_offset: u16,
|
||||
hole_length: u16,
|
||||
@@ -272,32 +230,12 @@ pub struct DecodedBkpBlock {
|
||||
|
||||
impl DecodedBkpBlock {
|
||||
pub fn new() -> DecodedBkpBlock {
|
||||
DecodedBkpBlock {
|
||||
rnode_spcnode: 0,
|
||||
rnode_dbnode: 0,
|
||||
rnode_relnode: 0,
|
||||
forknum: 0,
|
||||
blkno: 0,
|
||||
|
||||
flags: 0,
|
||||
has_image: false,
|
||||
apply_image: false,
|
||||
will_init: false,
|
||||
hole_offset: 0,
|
||||
hole_length: 0,
|
||||
bimg_len: 0,
|
||||
bimg_info: 0,
|
||||
|
||||
has_data: false,
|
||||
data_len: 0,
|
||||
}
|
||||
Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(non_upper_case_globals)]
|
||||
const SizeOfXLogRecord: u32 = 24;
|
||||
|
||||
pub struct DecodedWALRecord {
|
||||
pub xl_xid: TransactionId,
|
||||
pub xl_info: u8,
|
||||
pub xl_rmid: u8,
|
||||
pub record: Bytes, // raw XLogRecord
|
||||
@@ -306,11 +244,6 @@ pub struct DecodedWALRecord {
|
||||
pub main_data_offset: usize,
|
||||
}
|
||||
|
||||
pub type Oid = u32;
|
||||
pub type TransactionId = u32;
|
||||
pub type BlockNumber = u32;
|
||||
pub type OffsetNumber = u16;
|
||||
|
||||
#[repr(C)]
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct RelFileNode {
|
||||
@@ -319,6 +252,24 @@ pub struct RelFileNode {
|
||||
pub relnode: Oid, /* relation */
|
||||
}
|
||||
|
||||
#[repr(C)]
|
||||
#[derive(Debug)]
|
||||
pub struct XlRelmapUpdate {
|
||||
pub dbid: Oid, /* database ID, or 0 for shared map */
|
||||
pub tsid: Oid, /* database's tablespace, or pg_global */
|
||||
pub nbytes: i32, /* size of relmap data */
|
||||
}
|
||||
|
||||
impl XlRelmapUpdate {
|
||||
pub fn decode(buf: &mut Bytes) -> XlRelmapUpdate {
|
||||
XlRelmapUpdate {
|
||||
dbid: buf.get_u32_le(),
|
||||
tsid: buf.get_u32_le(),
|
||||
nbytes: buf.get_i32_le(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[repr(C)]
|
||||
#[derive(Debug)]
|
||||
pub struct XlSmgrTruncate {
|
||||
@@ -328,9 +279,7 @@ pub struct XlSmgrTruncate {
|
||||
}
|
||||
|
||||
impl XlSmgrTruncate {
|
||||
pub fn decode(decoded: &DecodedWALRecord) -> XlSmgrTruncate {
|
||||
let mut buf = decoded.record.clone();
|
||||
buf.advance((SizeOfXLogRecord + 2) as usize);
|
||||
pub fn decode(buf: &mut Bytes) -> XlSmgrTruncate {
|
||||
XlSmgrTruncate {
|
||||
blkno: buf.get_u32_le(),
|
||||
rnode: RelFileNode {
|
||||
@@ -353,9 +302,7 @@ pub struct XlCreateDatabase {
|
||||
}
|
||||
|
||||
impl XlCreateDatabase {
|
||||
pub fn decode(decoded: &DecodedWALRecord) -> XlCreateDatabase {
|
||||
let mut buf = decoded.record.clone();
|
||||
buf.advance((SizeOfXLogRecord + 2) as usize);
|
||||
pub fn decode(buf: &mut Bytes) -> XlCreateDatabase {
|
||||
XlCreateDatabase {
|
||||
db_id: buf.get_u32_le(),
|
||||
tablespace_id: buf.get_u32_le(),
|
||||
@@ -365,6 +312,31 @@ impl XlCreateDatabase {
|
||||
}
|
||||
}
|
||||
|
||||
#[repr(C)]
|
||||
#[derive(Debug)]
|
||||
pub struct XlDropDatabase {
|
||||
pub db_id: Oid,
|
||||
pub n_tablespaces: Oid, /* number of tablespace IDs */
|
||||
pub tablespace_ids: Vec<Oid>,
|
||||
}
|
||||
|
||||
impl XlDropDatabase {
|
||||
pub fn decode(buf: &mut Bytes) -> XlDropDatabase {
|
||||
let mut rec = XlDropDatabase {
|
||||
db_id: buf.get_u32_le(),
|
||||
n_tablespaces: buf.get_u32_le(),
|
||||
tablespace_ids: Vec::<Oid>::new(),
|
||||
};
|
||||
|
||||
for _i in 0..rec.n_tablespaces {
|
||||
let id = buf.get_u32_le();
|
||||
rec.tablespace_ids.push(id);
|
||||
}
|
||||
|
||||
rec
|
||||
}
|
||||
}
|
||||
|
||||
#[repr(C)]
|
||||
#[derive(Debug)]
|
||||
pub struct XlHeapInsert {
|
||||
@@ -385,6 +357,7 @@ impl XlHeapInsert {
|
||||
#[derive(Debug)]
|
||||
pub struct XlHeapMultiInsert {
|
||||
pub flags: u8,
|
||||
pub _padding: u8,
|
||||
pub ntuples: u16,
|
||||
}
|
||||
|
||||
@@ -392,6 +365,7 @@ impl XlHeapMultiInsert {
|
||||
pub fn decode(buf: &mut Bytes) -> XlHeapMultiInsert {
|
||||
XlHeapMultiInsert {
|
||||
flags: buf.get_u8(),
|
||||
_padding: buf.get_u8(),
|
||||
ntuples: buf.get_u16_le(),
|
||||
}
|
||||
}
|
||||
@@ -402,6 +376,8 @@ impl XlHeapMultiInsert {
|
||||
pub struct XlHeapDelete {
|
||||
pub xmax: TransactionId,
|
||||
pub offnum: OffsetNumber,
|
||||
pub _padding: u16,
|
||||
pub t_cid: u32,
|
||||
pub infobits_set: u8,
|
||||
pub flags: u8,
|
||||
}
|
||||
@@ -411,6 +387,8 @@ impl XlHeapDelete {
|
||||
XlHeapDelete {
|
||||
xmax: buf.get_u32_le(),
|
||||
offnum: buf.get_u16_le(),
|
||||
_padding: buf.get_u16_le(),
|
||||
t_cid: buf.get_u32_le(),
|
||||
infobits_set: buf.get_u8(),
|
||||
flags: buf.get_u8(),
|
||||
}
|
||||
@@ -424,6 +402,7 @@ pub struct XlHeapUpdate {
|
||||
pub old_offnum: OffsetNumber,
|
||||
pub old_infobits_set: u8,
|
||||
pub flags: u8,
|
||||
pub t_cid: u32,
|
||||
pub new_xmax: TransactionId,
|
||||
pub new_offnum: OffsetNumber,
|
||||
}
|
||||
@@ -435,14 +414,197 @@ impl XlHeapUpdate {
|
||||
old_offnum: buf.get_u16_le(),
|
||||
old_infobits_set: buf.get_u8(),
|
||||
flags: buf.get_u8(),
|
||||
t_cid: buf.get_u32(),
|
||||
new_xmax: buf.get_u32_le(),
|
||||
new_offnum: buf.get_u16_le(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
// Routines to decode a WAL record and figure out which blocks are modified
|
||||
///
|
||||
/// Note: Parsing some fields is missing, because they're not needed.
|
||||
///
|
||||
/// This is similar to the xl_xact_parsed_commit and
|
||||
/// xl_xact_parsed_abort structs in PostgreSQL, but we use the same
|
||||
/// struct for commits and aborts.
|
||||
///
|
||||
#[derive(Debug)]
|
||||
pub struct XlXactParsedRecord {
|
||||
pub xid: TransactionId,
|
||||
pub info: u8,
|
||||
pub xact_time: TimestampTz,
|
||||
pub xinfo: u32,
|
||||
|
||||
pub db_id: Oid, /* MyDatabaseId */
|
||||
pub ts_id: Oid, /* MyDatabaseTableSpace */
|
||||
|
||||
pub subxacts: Vec<TransactionId>,
|
||||
|
||||
pub xnodes: Vec<RelFileNode>,
|
||||
}
|
||||
|
||||
impl XlXactParsedRecord {
|
||||
/// Decode a XLOG_XACT_COMMIT/ABORT/COMMIT_PREPARED/ABORT_PREPARED
|
||||
/// record. This should agree with the ParseCommitRecord and ParseAbortRecord
|
||||
/// functions in PostgreSQL (in src/backend/access/rmgr/xactdesc.c)
|
||||
pub fn decode(buf: &mut Bytes, mut xid: TransactionId, xl_info: u8) -> XlXactParsedRecord {
|
||||
let info = xl_info & pg_constants::XLOG_XACT_OPMASK;
|
||||
// The record starts with time of commit/abort
|
||||
let xact_time = buf.get_i64_le();
|
||||
let xinfo;
|
||||
if xl_info & pg_constants::XLOG_XACT_HAS_INFO != 0 {
|
||||
xinfo = buf.get_u32_le();
|
||||
} else {
|
||||
xinfo = 0;
|
||||
}
|
||||
let db_id;
|
||||
let ts_id;
|
||||
if xinfo & pg_constants::XACT_XINFO_HAS_DBINFO != 0 {
|
||||
db_id = buf.get_u32_le();
|
||||
ts_id = buf.get_u32_le();
|
||||
} else {
|
||||
db_id = 0;
|
||||
ts_id = 0;
|
||||
}
|
||||
let mut subxacts = Vec::<TransactionId>::new();
|
||||
if xinfo & pg_constants::XACT_XINFO_HAS_SUBXACTS != 0 {
|
||||
let nsubxacts = buf.get_i32_le();
|
||||
for _i in 0..nsubxacts {
|
||||
let subxact = buf.get_u32_le();
|
||||
subxacts.push(subxact);
|
||||
}
|
||||
}
|
||||
let mut xnodes = Vec::<RelFileNode>::new();
|
||||
if xinfo & pg_constants::XACT_XINFO_HAS_RELFILENODES != 0 {
|
||||
let nrels = buf.get_i32_le();
|
||||
for _i in 0..nrels {
|
||||
let spcnode = buf.get_u32_le();
|
||||
let dbnode = buf.get_u32_le();
|
||||
let relnode = buf.get_u32_le();
|
||||
trace!(
|
||||
"XLOG_XACT_COMMIT relfilenode {}/{}/{}",
|
||||
spcnode,
|
||||
dbnode,
|
||||
relnode
|
||||
);
|
||||
xnodes.push(RelFileNode {
|
||||
spcnode,
|
||||
dbnode,
|
||||
relnode,
|
||||
});
|
||||
}
|
||||
}
|
||||
if xinfo & pg_constants::XACT_XINFO_HAS_INVALS != 0 {
|
||||
let nmsgs = buf.get_i32_le();
|
||||
for _i in 0..nmsgs {
|
||||
let sizeof_shared_invalidation_message = 0;
|
||||
buf.advance(sizeof_shared_invalidation_message);
|
||||
}
|
||||
}
|
||||
if xinfo & pg_constants::XACT_XINFO_HAS_TWOPHASE != 0 {
|
||||
xid = buf.get_u32_le();
|
||||
trace!("XLOG_XACT_COMMIT-XACT_XINFO_HAS_TWOPHASE");
|
||||
}
|
||||
XlXactParsedRecord {
|
||||
xid,
|
||||
info,
|
||||
xact_time,
|
||||
xinfo,
|
||||
db_id,
|
||||
ts_id,
|
||||
subxacts,
|
||||
xnodes,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[repr(C)]
|
||||
#[derive(Debug)]
|
||||
pub struct XlClogTruncate {
|
||||
pub pageno: u32,
|
||||
pub oldest_xid: TransactionId,
|
||||
pub oldest_xid_db: Oid,
|
||||
}
|
||||
|
||||
impl XlClogTruncate {
|
||||
pub fn decode(buf: &mut Bytes) -> XlClogTruncate {
|
||||
XlClogTruncate {
|
||||
pageno: buf.get_u32_le(),
|
||||
oldest_xid: buf.get_u32_le(),
|
||||
oldest_xid_db: buf.get_u32_le(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[repr(C)]
|
||||
#[derive(Debug)]
|
||||
pub struct MultiXactMember {
|
||||
pub xid: TransactionId,
|
||||
pub status: MultiXactStatus,
|
||||
}
|
||||
|
||||
impl MultiXactMember {
|
||||
pub fn decode(buf: &mut Bytes) -> MultiXactMember {
|
||||
MultiXactMember {
|
||||
xid: buf.get_u32_le(),
|
||||
status: buf.get_u32_le(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[repr(C)]
|
||||
#[derive(Debug)]
|
||||
pub struct XlMultiXactCreate {
|
||||
pub mid: MultiXactId, /* new MultiXact's ID */
|
||||
pub moff: MultiXactOffset, /* its starting offset in members file */
|
||||
pub nmembers: u32, /* number of member XIDs */
|
||||
pub members: Vec<MultiXactMember>,
|
||||
}
|
||||
|
||||
impl XlMultiXactCreate {
|
||||
pub fn decode(buf: &mut Bytes) -> XlMultiXactCreate {
|
||||
let mid = buf.get_u32_le();
|
||||
let moff = buf.get_u32_le();
|
||||
let nmembers = buf.get_u32_le();
|
||||
let mut members = Vec::new();
|
||||
for _ in 0..nmembers {
|
||||
members.push(MultiXactMember::decode(buf));
|
||||
}
|
||||
XlMultiXactCreate {
|
||||
mid,
|
||||
moff,
|
||||
nmembers,
|
||||
members,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[repr(C)]
|
||||
#[derive(Debug)]
|
||||
pub struct XlMultiXactTruncate {
|
||||
pub oldest_multi_db: Oid,
|
||||
/* to-be-truncated range of multixact offsets */
|
||||
pub start_trunc_off: MultiXactId, /* just for completeness' sake */
|
||||
pub end_trunc_off: MultiXactId,
|
||||
|
||||
/* to-be-truncated range of multixact members */
|
||||
pub start_trunc_memb: MultiXactOffset,
|
||||
pub end_trunc_memb: MultiXactOffset,
|
||||
}
|
||||
|
||||
impl XlMultiXactTruncate {
|
||||
pub fn decode(buf: &mut Bytes) -> XlMultiXactTruncate {
|
||||
XlMultiXactTruncate {
|
||||
oldest_multi_db: buf.get_u32_le(),
|
||||
start_trunc_off: buf.get_u32_le(),
|
||||
end_trunc_off: buf.get_u32_le(),
|
||||
start_trunc_memb: buf.get_u32_le(),
|
||||
end_trunc_memb: buf.get_u32_le(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Main routine to decode a WAL record and figure out which blocks are modified
|
||||
//
|
||||
// See xlogrecord.h for details
|
||||
// The overall layout of an XLOG record is:
|
||||
@@ -479,9 +641,9 @@ pub fn decode_wal_record(record: Bytes) -> DecodedWALRecord {
|
||||
xlogrec.xl_info
|
||||
);
|
||||
|
||||
let remaining = xlogrec.xl_tot_len - SizeOfXLogRecord;
|
||||
let remaining: usize = xlogrec.xl_tot_len as usize - XLOG_SIZE_OF_XLOG_RECORD;
|
||||
|
||||
if buf.remaining() != remaining as usize {
|
||||
if buf.remaining() != remaining {
|
||||
//TODO error
|
||||
}
|
||||
|
||||
@@ -689,179 +851,14 @@ pub fn decode_wal_record(record: Bytes) -> DecodedWALRecord {
|
||||
assert_eq!(buf.remaining(), main_data_len as usize);
|
||||
}
|
||||
|
||||
//5. Handle special CLOG and XACT records
|
||||
if xlogrec.xl_rmid == pg_constants::RM_CLOG_ID {
|
||||
let mut blk = DecodedBkpBlock::new();
|
||||
blk.forknum = pg_constants::PG_XACT_FORKNUM;
|
||||
blk.blkno = buf.get_i32_le() as u32;
|
||||
blk.will_init = true;
|
||||
trace!("RM_CLOG_ID updates block {}", blk.blkno);
|
||||
blocks.push(blk);
|
||||
} else if xlogrec.xl_rmid == pg_constants::RM_XACT_ID {
|
||||
let info = xlogrec.xl_info & pg_constants::XLOG_XACT_OPMASK;
|
||||
if info == pg_constants::XLOG_XACT_COMMIT {
|
||||
let mut blk = DecodedBkpBlock::new();
|
||||
blk.forknum = pg_constants::PG_XACT_FORKNUM;
|
||||
blk.blkno = xlogrec.xl_xid / pg_constants::CLOG_XACTS_PER_PAGE;
|
||||
trace!(
|
||||
"XLOG_XACT_COMMIT xl_info {} xl_prev {:X}/{:X} xid {} updates block {} main_data_len {}",
|
||||
xlogrec.xl_info, (xlogrec.xl_prev >> 32),
|
||||
xlogrec.xl_prev & 0xffffffff,
|
||||
xlogrec.xl_xid,
|
||||
blk.blkno,
|
||||
main_data_len
|
||||
);
|
||||
blocks.push(blk);
|
||||
|
||||
//parse commit record to extract subtrans entries
|
||||
// xl_xact_commit starts with time of commit
|
||||
let _xact_time = buf.get_i64_le();
|
||||
|
||||
let mut xinfo = 0;
|
||||
if xlogrec.xl_info & pg_constants::XLOG_XACT_HAS_INFO != 0 {
|
||||
xinfo = buf.get_u32_le();
|
||||
}
|
||||
if xinfo & pg_constants::XACT_XINFO_HAS_DBINFO != 0 {
|
||||
let _dbid = buf.get_u32_le();
|
||||
let _tsid = buf.get_u32_le();
|
||||
}
|
||||
if xinfo & pg_constants::XACT_XINFO_HAS_SUBXACTS != 0 {
|
||||
let nsubxacts = buf.get_i32_le();
|
||||
let mut prev_blkno = u32::MAX;
|
||||
for _i in 0..nsubxacts {
|
||||
let subxact = buf.get_u32_le();
|
||||
let blkno = subxact / pg_constants::CLOG_XACTS_PER_PAGE;
|
||||
if prev_blkno != blkno {
|
||||
prev_blkno = blkno;
|
||||
let mut blk = DecodedBkpBlock::new();
|
||||
blk.forknum = pg_constants::PG_XACT_FORKNUM;
|
||||
blk.blkno = blkno;
|
||||
blocks.push(blk);
|
||||
}
|
||||
}
|
||||
}
|
||||
if xinfo & pg_constants::XACT_XINFO_HAS_RELFILENODES != 0 {
|
||||
let nrels = buf.get_i32_le();
|
||||
for _i in 0..nrels {
|
||||
let spcnode = buf.get_u32_le();
|
||||
let dbnode = buf.get_u32_le();
|
||||
let relnode = buf.get_u32_le();
|
||||
//TODO handle this too?
|
||||
trace!(
|
||||
"XLOG_XACT_COMMIT relfilenode {}/{}/{}",
|
||||
spcnode,
|
||||
dbnode,
|
||||
relnode
|
||||
);
|
||||
}
|
||||
}
|
||||
if xinfo & pg_constants::XACT_XINFO_HAS_INVALS != 0 {
|
||||
let nmsgs = buf.get_i32_le();
|
||||
for _i in 0..nmsgs {
|
||||
let sizeof_shared_invalidation_message = 0;
|
||||
buf.advance(sizeof_shared_invalidation_message);
|
||||
}
|
||||
}
|
||||
if xinfo & pg_constants::XACT_XINFO_HAS_TWOPHASE != 0 {
|
||||
let _xid = buf.get_u32_le();
|
||||
trace!("XLOG_XACT_COMMIT-XACT_XINFO_HAS_TWOPHASE");
|
||||
//TODO handle this to be able to restore pg_twophase on node start
|
||||
}
|
||||
} else if info == pg_constants::XLOG_XACT_ABORT {
|
||||
let mut blk = DecodedBkpBlock::new();
|
||||
blk.forknum = pg_constants::PG_XACT_FORKNUM;
|
||||
blk.blkno = xlogrec.xl_xid / pg_constants::CLOG_XACTS_PER_PAGE;
|
||||
trace!(
|
||||
"XLOG_XACT_ABORT xl_info {} xl_prev {:X}/{:X} xid {} updates block {} main_data_len {}",
|
||||
xlogrec.xl_info, (xlogrec.xl_prev >> 32),
|
||||
xlogrec.xl_prev & 0xffffffff,
|
||||
xlogrec.xl_xid,
|
||||
blk.blkno,
|
||||
main_data_len
|
||||
);
|
||||
blocks.push(blk);
|
||||
//parse abort record to extract subtrans entries
|
||||
// xl_xact_abort starts with time of commit
|
||||
let _xact_time = buf.get_i64_le();
|
||||
|
||||
let mut xinfo = 0;
|
||||
if xlogrec.xl_info & pg_constants::XLOG_XACT_HAS_INFO != 0 {
|
||||
xinfo = buf.get_u32_le();
|
||||
}
|
||||
if xinfo & pg_constants::XACT_XINFO_HAS_DBINFO != 0 {
|
||||
let _dbid = buf.get_u32_le();
|
||||
let _tsid = buf.get_u32_le();
|
||||
}
|
||||
if xinfo & pg_constants::XACT_XINFO_HAS_SUBXACTS != 0 {
|
||||
let nsubxacts = buf.get_i32_le();
|
||||
let mut prev_blkno = u32::MAX;
|
||||
for _i in 0..nsubxacts {
|
||||
let subxact = buf.get_u32_le();
|
||||
let blkno = subxact / pg_constants::CLOG_XACTS_PER_PAGE;
|
||||
if prev_blkno != blkno {
|
||||
prev_blkno = blkno;
|
||||
let mut blk = DecodedBkpBlock::new();
|
||||
blk.forknum = pg_constants::PG_XACT_FORKNUM;
|
||||
blk.blkno = blkno;
|
||||
blocks.push(blk);
|
||||
}
|
||||
}
|
||||
}
|
||||
if xinfo & pg_constants::XACT_XINFO_HAS_RELFILENODES != 0 {
|
||||
let nrels = buf.get_i32_le();
|
||||
for _i in 0..nrels {
|
||||
let spcnode = buf.get_u32_le();
|
||||
let dbnode = buf.get_u32_le();
|
||||
let relnode = buf.get_u32_le();
|
||||
//TODO save these too
|
||||
trace!(
|
||||
"XLOG_XACT_ABORT relfilenode {}/{}/{}",
|
||||
spcnode,
|
||||
dbnode,
|
||||
relnode
|
||||
);
|
||||
}
|
||||
}
|
||||
if xinfo & pg_constants::XACT_XINFO_HAS_TWOPHASE != 0 {
|
||||
let _xid = buf.get_u32_le();
|
||||
trace!("XLOG_XACT_ABORT-XACT_XINFO_HAS_TWOPHASE");
|
||||
}
|
||||
}
|
||||
} else if xlogrec.xl_rmid == pg_constants::RM_DBASE_ID {
|
||||
let info = xlogrec.xl_info & !pg_constants::XLR_INFO_MASK;
|
||||
if info == pg_constants::XLOG_DBASE_CREATE {
|
||||
//buf points to main_data
|
||||
let db_id = buf.get_u32_le();
|
||||
let tablespace_id = buf.get_u32_le();
|
||||
let src_db_id = buf.get_u32_le();
|
||||
let src_tablespace_id = buf.get_u32_le();
|
||||
trace!(
|
||||
"XLOG_DBASE_CREATE tablespace_id/db_id {}/{} src_db_id {}/{}",
|
||||
tablespace_id,
|
||||
db_id,
|
||||
src_tablespace_id,
|
||||
src_db_id
|
||||
);
|
||||
// in postgres it is implemented as copydir
|
||||
// we need to copy all pages in page_cache
|
||||
} else {
|
||||
trace!("XLOG_DBASE_DROP is not handled yet");
|
||||
}
|
||||
} else if xlogrec.xl_rmid == pg_constants::RM_TBLSPC_ID {
|
||||
let info = xlogrec.xl_info & !pg_constants::XLR_INFO_MASK;
|
||||
if info == pg_constants::XLOG_TBLSPC_CREATE {
|
||||
//buf points to main_data
|
||||
let ts_id = buf.get_u32_le();
|
||||
let ts_path = str::from_utf8(&buf).unwrap();
|
||||
trace!("XLOG_TBLSPC_CREATE ts_id {} ts_path {}", ts_id, ts_path);
|
||||
} else {
|
||||
trace!("XLOG_TBLSPC_DROP is not handled yet");
|
||||
}
|
||||
} else if xlogrec.xl_rmid == pg_constants::RM_HEAP_ID {
|
||||
let info = xlogrec.xl_info & pg_constants::XLOG_XACT_OPMASK;
|
||||
// 5. Handle a few special record types that modify blocks without registering
|
||||
// them with the standard mechanism.
|
||||
if xlogrec.xl_rmid == pg_constants::RM_HEAP_ID {
|
||||
let info = xlogrec.xl_info & pg_constants::XLOG_HEAP_OPMASK;
|
||||
let blkno = blocks[0].blkno / pg_constants::HEAPBLOCKS_PER_PAGE as u32;
|
||||
if info == pg_constants::XLOG_HEAP_INSERT {
|
||||
let xlrec = XlHeapInsert::decode(&mut buf);
|
||||
assert_eq!(0, buf.remaining());
|
||||
if (xlrec.flags
|
||||
& (pg_constants::XLH_INSERT_ALL_VISIBLE_CLEARED
|
||||
| pg_constants::XLH_INSERT_ALL_FROZEN_SET))
|
||||
@@ -877,6 +874,7 @@ pub fn decode_wal_record(record: Bytes) -> DecodedWALRecord {
|
||||
}
|
||||
} else if info == pg_constants::XLOG_HEAP_DELETE {
|
||||
let xlrec = XlHeapDelete::decode(&mut buf);
|
||||
assert_eq!(0, buf.remaining());
|
||||
if (xlrec.flags & pg_constants::XLH_DELETE_ALL_VISIBLE_CLEARED) != 0 {
|
||||
let mut blk = DecodedBkpBlock::new();
|
||||
blk.forknum = pg_constants::VISIBILITYMAP_FORKNUM;
|
||||
@@ -890,6 +888,9 @@ pub fn decode_wal_record(record: Bytes) -> DecodedWALRecord {
|
||||
|| info == pg_constants::XLOG_HEAP_HOT_UPDATE
|
||||
{
|
||||
let xlrec = XlHeapUpdate::decode(&mut buf);
|
||||
// the size of tuple data is inferred from the size of the record.
|
||||
// we can't validate the remaining number of bytes without parsing
|
||||
// the tuple data.
|
||||
if (xlrec.flags & pg_constants::XLH_UPDATE_NEW_ALL_VISIBLE_CLEARED) != 0 {
|
||||
let mut blk = DecodedBkpBlock::new();
|
||||
blk.forknum = pg_constants::VISIBILITYMAP_FORKNUM;
|
||||
@@ -912,9 +913,18 @@ pub fn decode_wal_record(record: Bytes) -> DecodedWALRecord {
|
||||
}
|
||||
}
|
||||
} else if xlogrec.xl_rmid == pg_constants::RM_HEAP2_ID {
|
||||
let info = xlogrec.xl_info & pg_constants::XLOG_XACT_OPMASK;
|
||||
let info = xlogrec.xl_info & pg_constants::XLOG_HEAP_OPMASK;
|
||||
if info == pg_constants::XLOG_HEAP2_MULTI_INSERT {
|
||||
let xlrec = XlHeapMultiInsert::decode(&mut buf);
|
||||
|
||||
let offset_array_len = if xlogrec.xl_info & pg_constants::XLOG_HEAP_INIT_PAGE > 0 {
|
||||
// the offsets array is omitted if XLOG_HEAP_INIT_PAGE is set
|
||||
0
|
||||
} else {
|
||||
std::mem::size_of::<u16>() * xlrec.ntuples as usize
|
||||
};
|
||||
assert_eq!(offset_array_len, buf.remaining());
|
||||
|
||||
if (xlrec.flags
|
||||
& (pg_constants::XLH_INSERT_ALL_VISIBLE_CLEARED
|
||||
| pg_constants::XLH_INSERT_ALL_FROZEN_SET))
|
||||
@@ -933,6 +943,7 @@ pub fn decode_wal_record(record: Bytes) -> DecodedWALRecord {
|
||||
}
|
||||
|
||||
DecodedWALRecord {
|
||||
xl_xid: xlogrec.xl_xid,
|
||||
xl_info: xlogrec.xl_info,
|
||||
xl_rmid: xlogrec.xl_rmid,
|
||||
record,
|
||||
@@ -940,3 +951,71 @@ pub fn decode_wal_record(record: Bytes) -> DecodedWALRecord {
|
||||
main_data_offset,
|
||||
}
|
||||
}
|
||||
|
||||
///
|
||||
/// Build a human-readable string to describe a WAL record
|
||||
///
|
||||
/// For debugging purposes
|
||||
pub fn describe_wal_record(record: &Bytes) -> String {
|
||||
// TODO: It would be nice to use the PostgreSQL rmgrdesc infrastructure for this.
|
||||
// Maybe use the postgres wal redo process, the same used for replaying WAL records?
|
||||
// Or could we compile the rmgrdesc routines into the dump_layer_file() binary directly,
|
||||
// without worrying about security?
|
||||
//
|
||||
// But for now, we have a hand-written code for a few common WAL record types here.
|
||||
|
||||
let mut buf = record.clone();
|
||||
|
||||
// 1. Parse XLogRecord struct
|
||||
|
||||
// FIXME: assume little-endian here
|
||||
let xlogrec = XLogRecord::from_bytes(&mut buf);
|
||||
|
||||
let unknown_str: String;
|
||||
|
||||
let result: &str = match xlogrec.xl_rmid {
|
||||
pg_constants::RM_HEAP2_ID => {
|
||||
let info = xlogrec.xl_info & pg_constants::XLOG_HEAP_OPMASK;
|
||||
match info {
|
||||
pg_constants::XLOG_HEAP2_MULTI_INSERT => "HEAP2 MULTI_INSERT",
|
||||
pg_constants::XLOG_HEAP2_VISIBLE => "HEAP2 VISIBLE",
|
||||
_ => {
|
||||
unknown_str = format!("HEAP2 UNKNOWN_0x{:02x}", info);
|
||||
&unknown_str
|
||||
}
|
||||
}
|
||||
}
|
||||
pg_constants::RM_HEAP_ID => {
|
||||
let info = xlogrec.xl_info & pg_constants::XLOG_HEAP_OPMASK;
|
||||
match info {
|
||||
pg_constants::XLOG_HEAP_INSERT => "HEAP INSERT",
|
||||
pg_constants::XLOG_HEAP_DELETE => "HEAP DELETE",
|
||||
pg_constants::XLOG_HEAP_UPDATE => "HEAP UPDATE",
|
||||
pg_constants::XLOG_HEAP_HOT_UPDATE => "HEAP HOT_UPDATE",
|
||||
_ => {
|
||||
unknown_str = format!("HEAP2 UNKNOWN_0x{:02x}", info);
|
||||
&unknown_str
|
||||
}
|
||||
}
|
||||
}
|
||||
pg_constants::RM_XLOG_ID => {
|
||||
let info = xlogrec.xl_info & pg_constants::XLR_RMGR_INFO_MASK;
|
||||
match info {
|
||||
pg_constants::XLOG_FPI => "XLOG FPI",
|
||||
pg_constants::XLOG_FPI_FOR_HINT => "XLOG FPI_FOR_HINT",
|
||||
_ => {
|
||||
unknown_str = format!("XLOG UNKNOWN_0x{:02x}", info);
|
||||
&unknown_str
|
||||
}
|
||||
}
|
||||
}
|
||||
rmid => {
|
||||
let info = xlogrec.xl_info & pg_constants::XLR_RMGR_INFO_MASK;
|
||||
|
||||
unknown_str = format!("UNKNOWN_RM_{} INFO_0x{:02x}", rmid, info);
|
||||
&unknown_str
|
||||
}
|
||||
};
|
||||
|
||||
String::from(result)
|
||||
}
|
||||
|
||||
@@ -1,38 +1,38 @@
|
||||
//!
|
||||
//! WAL receiver
|
||||
//!
|
||||
//! The WAL receiver connects to the WAL safekeeper service, and streams WAL.
|
||||
//! For each WAL record, it decodes the record to figure out which data blocks
|
||||
//! the record affects, and adds the records to the page cache.
|
||||
//! WAL receiver connects to the WAL safekeeper service, streams WAL,
|
||||
//! decodes records and saves them in the repository for the correct
|
||||
//! timeline.
|
||||
//!
|
||||
//! We keep one WAL receiver active per timeline.
|
||||
|
||||
use crate::page_cache;
|
||||
use crate::page_cache::{BufferTag, RelTag};
|
||||
use crate::relish::*;
|
||||
use crate::restore_local_repo;
|
||||
use crate::tenant_mgr;
|
||||
use crate::waldecoder::*;
|
||||
use crate::PageServerConf;
|
||||
use crate::ZTimelineId;
|
||||
use anyhow::Error;
|
||||
use anyhow::{Error, Result};
|
||||
use lazy_static::lazy_static;
|
||||
use log::*;
|
||||
use postgres_ffi::pg_constants;
|
||||
use postgres::fallible_iterator::FallibleIterator;
|
||||
use postgres::replication::ReplicationIter;
|
||||
use postgres::{Client, NoTls, SimpleQueryMessage, SimpleQueryRow};
|
||||
use postgres_ffi::xlog_utils::*;
|
||||
use postgres_ffi::*;
|
||||
use postgres_protocol::message::backend::ReplicationMessage;
|
||||
use postgres_types::PgLsn;
|
||||
use std::cmp::{max, min};
|
||||
use std::collections::HashMap;
|
||||
use std::fs;
|
||||
use std::fs::{File, OpenOptions};
|
||||
use std::io::{Seek, SeekFrom, Write};
|
||||
use std::path::PathBuf;
|
||||
use std::str::FromStr;
|
||||
use std::sync::Mutex;
|
||||
use std::thread;
|
||||
use std::thread::sleep;
|
||||
use std::time::Duration;
|
||||
use tokio::runtime::Runtime;
|
||||
use tokio_postgres::replication::{PgTimestamp, ReplicationStream};
|
||||
use tokio_postgres::{NoTls, SimpleQueryMessage, SimpleQueryRow};
|
||||
use tokio_stream::StreamExt;
|
||||
use std::time::{Duration, SystemTime};
|
||||
use zenith_utils::lsn::Lsn;
|
||||
use zenith_utils::zid::ZTenantId;
|
||||
use zenith_utils::zid::ZTimelineId;
|
||||
|
||||
//
|
||||
// We keep one WAL Receiver active per timeline.
|
||||
@@ -48,9 +48,10 @@ lazy_static! {
|
||||
|
||||
// Launch a new WAL receiver, or tell one that's running about change in connection string
|
||||
pub fn launch_wal_receiver(
|
||||
conf: &PageServerConf,
|
||||
conf: &'static PageServerConf,
|
||||
timelineid: ZTimelineId,
|
||||
wal_producer_connstr: &str,
|
||||
tenantid: ZTenantId,
|
||||
) {
|
||||
let mut receivers = WAL_RECEIVERS.lock().unwrap();
|
||||
|
||||
@@ -65,11 +66,10 @@ pub fn launch_wal_receiver(
|
||||
receivers.insert(timelineid, receiver);
|
||||
|
||||
// Also launch a new thread to handle this connection
|
||||
let conf_copy = conf.clone();
|
||||
let _walreceiver_thread = thread::Builder::new()
|
||||
.name("WAL receiver thread".into())
|
||||
.spawn(move || {
|
||||
thread_main(&conf_copy, timelineid);
|
||||
thread_main(conf, timelineid, &tenantid);
|
||||
})
|
||||
.unwrap();
|
||||
}
|
||||
@@ -90,22 +90,12 @@ fn get_wal_producer_connstr(timelineid: ZTimelineId) -> String {
|
||||
//
|
||||
// This is the entry point for the WAL receiver thread.
|
||||
//
|
||||
fn thread_main(conf: &PageServerConf, timelineid: ZTimelineId) {
|
||||
fn thread_main(conf: &'static PageServerConf, timelineid: ZTimelineId, tenantid: &ZTenantId) {
|
||||
info!(
|
||||
"WAL receiver thread started for timeline : '{}'",
|
||||
timelineid
|
||||
);
|
||||
|
||||
// We need a tokio runtime to call the rust-postgres copy_both function.
|
||||
// Most functions in the rust-postgres driver have a blocking wrapper,
|
||||
// but copy_both does not (TODO: the copy_both support is still work-in-progress
|
||||
// as of this writing. Check later if that has changed, or implement the
|
||||
// wrapper ourselves in rust-postgres)
|
||||
let runtime = tokio::runtime::Builder::new_current_thread()
|
||||
.enable_all()
|
||||
.build()
|
||||
.unwrap();
|
||||
|
||||
//
|
||||
// Make a connection to the WAL safekeeper, or directly to the primary PostgreSQL server,
|
||||
// and start streaming WAL from it. If the connection is lost, keep retrying.
|
||||
@@ -114,7 +104,7 @@ fn thread_main(conf: &PageServerConf, timelineid: ZTimelineId) {
|
||||
// Look up the current WAL producer address
|
||||
let wal_producer_connstr = get_wal_producer_connstr(timelineid);
|
||||
|
||||
let res = walreceiver_main(&runtime, conf, timelineid, &wal_producer_connstr);
|
||||
let res = walreceiver_main(conf, timelineid, &wal_producer_connstr, tenantid);
|
||||
|
||||
if let Err(e) = res {
|
||||
info!(
|
||||
@@ -127,212 +117,237 @@ fn thread_main(conf: &PageServerConf, timelineid: ZTimelineId) {
|
||||
}
|
||||
|
||||
fn walreceiver_main(
|
||||
runtime: &Runtime,
|
||||
conf: &PageServerConf,
|
||||
timelineid: ZTimelineId,
|
||||
wal_producer_connstr: &str,
|
||||
tenantid: &ZTenantId,
|
||||
) -> Result<(), Error> {
|
||||
// Connect to the database in replication mode.
|
||||
info!("connecting to {:?}", wal_producer_connstr);
|
||||
let connect_cfg = format!("{} replication=true", wal_producer_connstr);
|
||||
let connect_cfg = format!(
|
||||
"{} application_name=pageserver replication=true",
|
||||
wal_producer_connstr
|
||||
);
|
||||
|
||||
let (rclient, connection) = runtime.block_on(tokio_postgres::connect(&connect_cfg, NoTls))?;
|
||||
let mut rclient = Client::connect(&connect_cfg, NoTls)?;
|
||||
info!("connected!");
|
||||
|
||||
// The connection object performs the actual communication with the database,
|
||||
// so spawn it off to run on its own.
|
||||
runtime.spawn(async move {
|
||||
if let Err(e) = connection.await {
|
||||
error!("connection error: {}", e);
|
||||
}
|
||||
});
|
||||
// Immediately increment the gauge, then create a job to decrement it on thread exit.
|
||||
// One of the pros of `defer!` is that this will *most probably*
|
||||
// get called, even in presence of panics.
|
||||
let gauge = crate::LIVE_CONNECTIONS_COUNT.with_label_values(&["wal_receiver"]);
|
||||
gauge.inc();
|
||||
scopeguard::defer! {
|
||||
gauge.dec();
|
||||
}
|
||||
|
||||
let identify = identify_system(runtime, &rclient)?;
|
||||
let identify = identify_system(&mut rclient)?;
|
||||
info!("{:?}", identify);
|
||||
let end_of_wal = Lsn::from(u64::from(identify.xlogpos));
|
||||
let mut caught_up = false;
|
||||
|
||||
let pcache = page_cache::get_pagecache(&conf, timelineid).unwrap();
|
||||
let repository = tenant_mgr::get_repository_for_tenant(tenantid)?;
|
||||
let timeline = repository.get_timeline(timelineid).unwrap();
|
||||
|
||||
//
|
||||
// Start streaming the WAL, from where we left off previously.
|
||||
//
|
||||
let mut startpoint = pcache.get_last_valid_lsn();
|
||||
let last_valid_lsn = pcache.get_last_valid_lsn();
|
||||
// If we had previously received WAL up to some point in the middle of a WAL record, we
|
||||
// better start from the end of last full WAL record, not in the middle of one.
|
||||
let mut last_rec_lsn = timeline.get_last_record_lsn();
|
||||
let mut startpoint = last_rec_lsn;
|
||||
|
||||
if startpoint == Lsn(0) {
|
||||
// If we start here with identify.xlogpos we will have race condition with
|
||||
// postgres start: insert into postgres may request page that was modified with lsn
|
||||
// smaller than identify.xlogpos.
|
||||
//
|
||||
// Current procedure for starting postgres will anyway be changed to something
|
||||
// different like having 'initdb' method on a pageserver (or importing some shared
|
||||
// empty database snapshot), so for now I just put start of first segment which
|
||||
// seems to be a valid record.
|
||||
pcache.init_valid_lsn(Lsn(0x0100_0000));
|
||||
startpoint = Lsn(0x0100_0000);
|
||||
} else {
|
||||
// There might be some padding after the last full record, skip it.
|
||||
//
|
||||
// FIXME: It probably would be better to always start streaming from the beginning
|
||||
// of the page, or the segment, so that we could check the page/segment headers
|
||||
// too. Just for the sake of paranoia.
|
||||
startpoint += startpoint.calc_padding(8u32);
|
||||
error!("No previous WAL position");
|
||||
}
|
||||
debug!(
|
||||
"last_valid_lsn {} starting replication from {} for timeline {}, server is at {}...",
|
||||
last_valid_lsn, startpoint, timelineid, end_of_wal
|
||||
|
||||
// There might be some padding after the last full record, skip it.
|
||||
startpoint += startpoint.calc_padding(8u32);
|
||||
|
||||
info!(
|
||||
"last_record_lsn {} starting replication from {} for timeline {}, server is at {}...",
|
||||
last_rec_lsn, startpoint, timelineid, end_of_wal
|
||||
);
|
||||
|
||||
let query = format!("START_REPLICATION PHYSICAL {}", startpoint);
|
||||
|
||||
let copy_stream = runtime.block_on(rclient.copy_both_simple::<bytes::Bytes>(&query))?;
|
||||
|
||||
let physical_stream = ReplicationStream::new(copy_stream);
|
||||
tokio::pin!(physical_stream);
|
||||
let copy_stream = rclient.copy_both_simple(&query)?;
|
||||
let mut physical_stream = ReplicationIter::new(copy_stream);
|
||||
|
||||
let mut waldecoder = WalStreamDecoder::new(startpoint);
|
||||
|
||||
while let Some(replication_message) = runtime.block_on(physical_stream.next()) {
|
||||
match replication_message? {
|
||||
let checkpoint_bytes = timeline.get_page_at_lsn_nowait(RelishTag::Checkpoint, 0, startpoint)?;
|
||||
let mut checkpoint = CheckPoint::decode(&checkpoint_bytes)?;
|
||||
trace!("CheckPoint.nextXid = {}", checkpoint.nextXid.value);
|
||||
|
||||
while let Some(replication_message) = physical_stream.next()? {
|
||||
let status_update = match replication_message {
|
||||
ReplicationMessage::XLogData(xlog_data) => {
|
||||
// Pass the WAL data to the decoder, and see if we can decode
|
||||
// more records as a result.
|
||||
let data = xlog_data.data();
|
||||
let startlsn = Lsn::from(xlog_data.wal_start());
|
||||
let endlsn = startlsn + data.len() as u64;
|
||||
let prev_last_rec_lsn = last_rec_lsn;
|
||||
|
||||
write_wal_file(
|
||||
conf,
|
||||
startlsn,
|
||||
timelineid,
|
||||
16 * 1024 * 1024, // FIXME
|
||||
&timelineid,
|
||||
pg_constants::WAL_SEGMENT_SIZE,
|
||||
data,
|
||||
tenantid,
|
||||
)?;
|
||||
|
||||
trace!("received XLogData between {} and {}", startlsn, endlsn);
|
||||
|
||||
waldecoder.feed_bytes(data);
|
||||
|
||||
loop {
|
||||
if let Some((lsn, recdata)) = waldecoder.poll_decode()? {
|
||||
let decoded = decode_wal_record(recdata.clone());
|
||||
// Put the WAL record to the page cache. We make a separate copy of
|
||||
// it for every block it modifies. (The actual WAL record is kept in
|
||||
// a Bytes, which uses a reference counter for the underlying buffer,
|
||||
// so having multiple copies of it doesn't cost that much)
|
||||
for blk in decoded.blocks.iter() {
|
||||
let tag = BufferTag {
|
||||
rel: RelTag {
|
||||
spcnode: blk.rnode_spcnode,
|
||||
dbnode: blk.rnode_dbnode,
|
||||
relnode: blk.rnode_relnode,
|
||||
forknum: blk.forknum as u8,
|
||||
},
|
||||
blknum: blk.blkno,
|
||||
};
|
||||
while let Some((lsn, recdata)) = waldecoder.poll_decode()? {
|
||||
// Save old checkpoint value to compare with it after decoding WAL record
|
||||
let old_checkpoint_bytes = checkpoint.encode();
|
||||
let decoded = decode_wal_record(recdata.clone());
|
||||
|
||||
let rec = page_cache::WALRecord {
|
||||
lsn,
|
||||
will_init: blk.will_init || blk.apply_image,
|
||||
truncate: false,
|
||||
rec: recdata.clone(),
|
||||
main_data_offset: decoded.main_data_offset as u32,
|
||||
};
|
||||
// It is important to deal with the aligned records as lsn in getPage@LSN is
|
||||
// aligned and can be several bytes bigger. Without this alignment we are
|
||||
// at risk of hittind a deadlock.
|
||||
assert!(lsn.is_aligned());
|
||||
|
||||
pcache.put_wal_record(tag, rec);
|
||||
}
|
||||
// include truncate wal record in all pages
|
||||
if decoded.xl_rmid == pg_constants::RM_SMGR_ID
|
||||
&& (decoded.xl_info & pg_constants::XLR_RMGR_INFO_MASK)
|
||||
== pg_constants::XLOG_SMGR_TRUNCATE
|
||||
{
|
||||
let truncate = XlSmgrTruncate::decode(&decoded);
|
||||
if (truncate.flags & pg_constants::SMGR_TRUNCATE_HEAP) != 0 {
|
||||
let tag = BufferTag {
|
||||
rel: RelTag {
|
||||
spcnode: truncate.rnode.spcnode,
|
||||
dbnode: truncate.rnode.dbnode,
|
||||
relnode: truncate.rnode.relnode,
|
||||
forknum: pg_constants::MAIN_FORKNUM,
|
||||
},
|
||||
blknum: truncate.blkno,
|
||||
};
|
||||
let rec = page_cache::WALRecord {
|
||||
lsn,
|
||||
will_init: false,
|
||||
truncate: true,
|
||||
rec: recdata.clone(),
|
||||
main_data_offset: decoded.main_data_offset as u32,
|
||||
};
|
||||
pcache.put_rel_wal_record(tag, rec)?;
|
||||
}
|
||||
} else if decoded.xl_rmid == pg_constants::RM_DBASE_ID
|
||||
&& (decoded.xl_info & pg_constants::XLR_RMGR_INFO_MASK)
|
||||
== pg_constants::XLOG_DBASE_CREATE
|
||||
{
|
||||
let createdb = XlCreateDatabase::decode(&decoded);
|
||||
pcache.create_database(
|
||||
lsn,
|
||||
createdb.db_id,
|
||||
createdb.tablespace_id,
|
||||
createdb.src_db_id,
|
||||
createdb.src_tablespace_id,
|
||||
)?;
|
||||
}
|
||||
// Now that this record has been handled, let the page cache know that
|
||||
// it is up-to-date to this LSN
|
||||
pcache.advance_last_record_lsn(lsn);
|
||||
} else {
|
||||
break;
|
||||
restore_local_repo::save_decoded_record(
|
||||
&mut checkpoint,
|
||||
&*timeline,
|
||||
&decoded,
|
||||
recdata,
|
||||
lsn,
|
||||
)?;
|
||||
last_rec_lsn = lsn;
|
||||
|
||||
let new_checkpoint_bytes = checkpoint.encode();
|
||||
// Check if checkpoint data was updated by save_decoded_record
|
||||
if new_checkpoint_bytes != old_checkpoint_bytes {
|
||||
timeline.put_page_image(
|
||||
RelishTag::Checkpoint,
|
||||
0,
|
||||
lsn,
|
||||
new_checkpoint_bytes,
|
||||
)?;
|
||||
}
|
||||
}
|
||||
|
||||
// Update the last_valid LSN value in the page cache one more time. We updated
|
||||
// it in the loop above, between each WAL record, but we might have received
|
||||
// a partial record after the last completed record. Our page cache's value
|
||||
// better reflect that, because GetPage@LSN requests might also point in the
|
||||
// middle of a record, if the request LSN was taken from the server's current
|
||||
// flush ptr.
|
||||
pcache.advance_last_valid_lsn(endlsn);
|
||||
// Somewhat arbitrarily, if we have at least 10 complete wal segments (16 MB each),
|
||||
// "checkpoint" the repository to flush all the changes from WAL we've processed
|
||||
// so far to disk. After this, we don't need the original WAL anymore, and it
|
||||
// can be removed. This is probably too aggressive for production, but it's useful
|
||||
// to expose bugs now.
|
||||
//
|
||||
// TODO: We don't actually dare to remove the WAL. It's useful for debugging,
|
||||
// and we might it for logical decoding other things in the future. Although
|
||||
// we should also be able to fetch it back from the WAL safekeepers or S3 if
|
||||
// needed.
|
||||
if prev_last_rec_lsn.segment_number(pg_constants::WAL_SEGMENT_SIZE)
|
||||
!= last_rec_lsn.segment_number(pg_constants::WAL_SEGMENT_SIZE)
|
||||
{
|
||||
info!("switched segment {} to {}", prev_last_rec_lsn, last_rec_lsn);
|
||||
let (oldest_segno, newest_segno) = find_wal_file_range(
|
||||
conf,
|
||||
&timelineid,
|
||||
pg_constants::WAL_SEGMENT_SIZE,
|
||||
last_rec_lsn,
|
||||
tenantid,
|
||||
)?;
|
||||
|
||||
if newest_segno - oldest_segno >= 10 {
|
||||
// TODO: This is where we could remove WAL older than last_rec_lsn.
|
||||
//remove_wal_files(timelineid, pg_constants::WAL_SEGMENT_SIZE, last_rec_lsn)?;
|
||||
}
|
||||
}
|
||||
|
||||
if !caught_up && endlsn >= end_of_wal {
|
||||
info!("caught up at LSN {}", endlsn);
|
||||
caught_up = true;
|
||||
}
|
||||
|
||||
Some(endlsn)
|
||||
}
|
||||
|
||||
ReplicationMessage::PrimaryKeepAlive(keepalive) => {
|
||||
let wal_end = keepalive.wal_end();
|
||||
let timestamp = keepalive.timestamp();
|
||||
let reply_requested: bool = keepalive.reply() != 0;
|
||||
let reply_requested = keepalive.reply() != 0;
|
||||
|
||||
trace!(
|
||||
"received PrimaryKeepAlive(wal_end: {}, timestamp: {} reply: {})",
|
||||
"received PrimaryKeepAlive(wal_end: {}, timestamp: {:?} reply: {})",
|
||||
wal_end,
|
||||
timestamp,
|
||||
reply_requested,
|
||||
);
|
||||
if reply_requested {
|
||||
// TODO: More thought should go into what values are sent here.
|
||||
let last_lsn = PgLsn::from(u64::from(pcache.get_last_valid_lsn()));
|
||||
let write_lsn = last_lsn;
|
||||
let flush_lsn = last_lsn;
|
||||
let apply_lsn = PgLsn::INVALID;
|
||||
let ts = PgTimestamp::now()?;
|
||||
const NO_REPLY: u8 = 0u8;
|
||||
|
||||
runtime.block_on(
|
||||
physical_stream
|
||||
.as_mut()
|
||||
.standby_status_update(write_lsn, flush_lsn, apply_lsn, ts, NO_REPLY),
|
||||
)?;
|
||||
if reply_requested {
|
||||
Some(timeline.get_last_record_lsn())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
_ => (),
|
||||
|
||||
_ => None,
|
||||
};
|
||||
|
||||
if let Some(last_lsn) = status_update {
|
||||
// TODO: More thought should go into what values are sent here.
|
||||
let last_lsn = PgLsn::from(u64::from(last_lsn));
|
||||
let write_lsn = last_lsn;
|
||||
let flush_lsn = last_lsn;
|
||||
let apply_lsn = PgLsn::from(0);
|
||||
let ts = SystemTime::now();
|
||||
const NO_REPLY: u8 = 0;
|
||||
|
||||
physical_stream.standby_status_update(write_lsn, flush_lsn, apply_lsn, ts, NO_REPLY)?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn find_wal_file_range(
|
||||
conf: &PageServerConf,
|
||||
timeline: &ZTimelineId,
|
||||
wal_seg_size: usize,
|
||||
written_upto: Lsn,
|
||||
tenant: &ZTenantId,
|
||||
) -> Result<(u64, u64)> {
|
||||
let written_upto_segno = written_upto.segment_number(wal_seg_size);
|
||||
|
||||
let mut oldest_segno = written_upto_segno;
|
||||
let mut newest_segno = written_upto_segno;
|
||||
// Scan the wal directory, and count how many WAL filed we could remove
|
||||
let wal_dir = conf.wal_dir_path(timeline, tenant);
|
||||
for entry in fs::read_dir(wal_dir)? {
|
||||
let entry = entry?;
|
||||
let path = entry.path();
|
||||
|
||||
if path.is_dir() {
|
||||
continue;
|
||||
}
|
||||
|
||||
let filename = path.file_name().unwrap().to_str().unwrap();
|
||||
|
||||
if IsXLogFileName(filename) {
|
||||
let (segno, _tli) = XLogFromFileName(filename, wal_seg_size);
|
||||
|
||||
if segno > written_upto_segno {
|
||||
// that's strange.
|
||||
warn!("there is a WAL file from future at {}", path.display());
|
||||
continue;
|
||||
}
|
||||
|
||||
oldest_segno = min(oldest_segno, segno);
|
||||
newest_segno = max(newest_segno, segno);
|
||||
}
|
||||
}
|
||||
// FIXME: would be good to assert that there are no gaps in the WAL files
|
||||
|
||||
Ok((oldest_segno, newest_segno))
|
||||
}
|
||||
|
||||
/// Data returned from the postgres `IDENTIFY_SYSTEM` command
|
||||
///
|
||||
/// See the [postgres docs] for more details.
|
||||
@@ -353,12 +368,9 @@ pub struct IdentifySystem {
|
||||
pub struct IdentifyError;
|
||||
|
||||
/// Run the postgres `IDENTIFY_SYSTEM` command
|
||||
pub fn identify_system(
|
||||
runtime: &Runtime,
|
||||
client: &tokio_postgres::Client,
|
||||
) -> Result<IdentifySystem, Error> {
|
||||
pub fn identify_system(client: &mut Client) -> Result<IdentifySystem, Error> {
|
||||
let query_str = "IDENTIFY_SYSTEM";
|
||||
let response = runtime.block_on(client.simple_query(query_str))?;
|
||||
let response = client.simple_query(query_str)?;
|
||||
|
||||
// get(N) from row, then parse it as some destination type.
|
||||
fn get_parse<T>(row: &SimpleQueryRow, idx: usize) -> Result<T, IdentifyError>
|
||||
@@ -379,15 +391,17 @@ pub fn identify_system(
|
||||
dbname: get_parse(first_row, 3).ok(),
|
||||
})
|
||||
} else {
|
||||
Err(IdentifyError)?
|
||||
Err(IdentifyError.into())
|
||||
}
|
||||
}
|
||||
|
||||
fn write_wal_file(
|
||||
conf: &PageServerConf,
|
||||
startpos: Lsn,
|
||||
timeline: ZTimelineId,
|
||||
timelineid: &ZTimelineId,
|
||||
wal_seg_size: usize,
|
||||
buf: &[u8],
|
||||
tenantid: &ZTenantId,
|
||||
) -> anyhow::Result<()> {
|
||||
let mut bytes_left: usize = buf.len();
|
||||
let mut bytes_written: usize = 0;
|
||||
@@ -395,10 +409,10 @@ fn write_wal_file(
|
||||
let mut start_pos = startpos;
|
||||
const ZERO_BLOCK: &[u8] = &[0u8; XLOG_BLCKSZ];
|
||||
|
||||
let wal_dir = PathBuf::from(format!("timelines/{}/wal", timeline));
|
||||
let wal_dir = conf.wal_dir_path(timelineid, tenantid);
|
||||
|
||||
/* Extract WAL location for this block */
|
||||
let mut xlogoff = start_pos.segment_offset(wal_seg_size as u64) as usize;
|
||||
let mut xlogoff = start_pos.segment_offset(wal_seg_size);
|
||||
|
||||
while bytes_left != 0 {
|
||||
let bytes_to_write;
|
||||
@@ -414,7 +428,7 @@ fn write_wal_file(
|
||||
}
|
||||
|
||||
/* Open file */
|
||||
let segno = start_pos.segment_number(wal_seg_size as u64);
|
||||
let segno = start_pos.segment_number(wal_seg_size);
|
||||
let wal_file_name = XLogFileName(
|
||||
1, // FIXME: always use Postgres timeline 1
|
||||
segno,
|
||||
@@ -443,7 +457,7 @@ fn write_wal_file(
|
||||
{
|
||||
Ok(mut file) => {
|
||||
for _ in 0..(wal_seg_size / XLOG_BLCKSZ) {
|
||||
file.write_all(&ZERO_BLOCK)?;
|
||||
file.write_all(ZERO_BLOCK)?;
|
||||
}
|
||||
wal_file = file;
|
||||
}
|
||||
@@ -466,7 +480,7 @@ fn write_wal_file(
|
||||
xlogoff += bytes_to_write;
|
||||
|
||||
/* Did we reach the end of a WAL segment? */
|
||||
if start_pos.segment_offset(wal_seg_size as u64) == 0 {
|
||||
if start_pos.segment_offset(wal_seg_size) == 0 {
|
||||
xlogoff = 0;
|
||||
if partial {
|
||||
fs::rename(&wal_file_partial_path, &wal_file_path)?;
|
||||
|
||||
@@ -1,22 +1,28 @@
|
||||
//
|
||||
// WAL redo
|
||||
//
|
||||
// We rely on Postgres to perform WAL redo for us. We launch a
|
||||
// postgres process in special "wal redo" mode that's similar to
|
||||
// single-user mode. We then pass the the previous page image, if any,
|
||||
// and all the WAL records we want to apply, to the postgress
|
||||
// process. Then we get the page image back. Communication with the
|
||||
// postgres process happens via stdin/stdout
|
||||
//
|
||||
// See src/backend/tcop/zenith_wal_redo.c for the other side of
|
||||
// this communication.
|
||||
//
|
||||
// TODO: Even though the postgres code runs in a separate process,
|
||||
// it's not a secure sandbox.
|
||||
//
|
||||
//!
|
||||
//! WAL redo. This service runs PostgreSQL in a special wal_redo mode
|
||||
//! to apply given WAL records over an old page image and return new
|
||||
//! page image.
|
||||
//!
|
||||
//! We rely on Postgres to perform WAL redo for us. We launch a
|
||||
//! postgres process in special "wal redo" mode that's similar to
|
||||
//! single-user mode. We then pass the previous page image, if any,
|
||||
//! and all the WAL records we want to apply, to the postgres
|
||||
//! process. Then we get the page image back. Communication with the
|
||||
//! postgres process happens via stdin/stdout
|
||||
//!
|
||||
//! See src/backend/tcop/zenith_wal_redo.c for the other side of
|
||||
//! this communication.
|
||||
//!
|
||||
//! The Postgres process is assumed to be secure against malicious WAL
|
||||
//! records. It achieves it by dropping privileges before replaying
|
||||
//! any WAL records, so that even if an attacker hijacks the Postgres
|
||||
//! process, he cannot escape out of it.
|
||||
//!
|
||||
use byteorder::{ByteOrder, LittleEndian};
|
||||
use bytes::{Buf, BufMut, Bytes, BytesMut};
|
||||
use lazy_static::lazy_static;
|
||||
use log::*;
|
||||
use std::assert;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::cell::RefCell;
|
||||
use std::fs;
|
||||
use std::fs::OpenOptions;
|
||||
@@ -24,7 +30,6 @@ use std::io::prelude::*;
|
||||
use std::io::Error;
|
||||
use std::path::PathBuf;
|
||||
use std::process::Stdio;
|
||||
use std::sync::mpsc;
|
||||
use std::sync::Mutex;
|
||||
use std::time::Duration;
|
||||
use std::time::Instant;
|
||||
@@ -32,43 +37,123 @@ use tokio::io::AsyncBufReadExt;
|
||||
use tokio::io::{AsyncReadExt, AsyncWriteExt};
|
||||
use tokio::process::{ChildStdin, ChildStdout, Command};
|
||||
use tokio::time::timeout;
|
||||
use zenith_metrics::{register_histogram, register_int_counter, Histogram, IntCounter};
|
||||
use zenith_utils::bin_ser::BeSer;
|
||||
use zenith_utils::lsn::Lsn;
|
||||
use zenith_utils::zid::ZTenantId;
|
||||
|
||||
use crate::page_cache::BufferTag;
|
||||
use crate::page_cache::WALRecord;
|
||||
use crate::relish::*;
|
||||
use crate::repository::{Timeline, WALRecord};
|
||||
use crate::waldecoder::XlMultiXactCreate;
|
||||
use crate::waldecoder::XlXactParsedRecord;
|
||||
use crate::PageServerConf;
|
||||
use crate::ZTimelineId;
|
||||
use postgres_ffi::nonrelfile_utils::mx_offset_to_flags_bitshift;
|
||||
use postgres_ffi::nonrelfile_utils::mx_offset_to_flags_offset;
|
||||
use postgres_ffi::nonrelfile_utils::mx_offset_to_member_offset;
|
||||
use postgres_ffi::nonrelfile_utils::transaction_id_set_status;
|
||||
use postgres_ffi::pg_constants;
|
||||
use postgres_ffi::xlog_utils::XLogRecord;
|
||||
use postgres_ffi::XLogRecord;
|
||||
|
||||
///
|
||||
/// `RelTag` + block number (`blknum`) gives us a unique id of the page in the cluster.
|
||||
///
|
||||
/// In Postgres `BufferTag` structure is used for exactly the same purpose.
|
||||
/// [See more related comments here](https://github.com/postgres/postgres/blob/99c5852e20a0987eca1c38ba0c09329d4076b6a0/src/include/storage/buf_internals.h#L91).
|
||||
///
|
||||
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone, Copy, Serialize, Deserialize)]
|
||||
pub struct BufferTag {
|
||||
pub rel: RelTag,
|
||||
pub blknum: u32,
|
||||
}
|
||||
|
||||
///
|
||||
/// WAL Redo Manager is responsible for replaying WAL records.
|
||||
///
|
||||
/// Callers use the WAL redo manager through this abstract interface,
|
||||
/// which makes it easy to mock it in tests.
|
||||
pub trait WalRedoManager: Send + Sync {
|
||||
/// Apply some WAL records.
|
||||
///
|
||||
/// The caller passes an old page image, and WAL records that should be
|
||||
/// applied over it. The return value is a new page image, after applying
|
||||
/// the reords.
|
||||
fn request_redo(
|
||||
&self,
|
||||
timeline: &dyn Timeline,
|
||||
rel: RelishTag,
|
||||
blknum: u32,
|
||||
lsn: Lsn,
|
||||
base_img: Option<Bytes>,
|
||||
records: Vec<WALRecord>,
|
||||
) -> Result<Bytes, WalRedoError>;
|
||||
}
|
||||
|
||||
///
|
||||
/// A dummy WAL Redo Manager implementation that doesn't allow replaying
|
||||
/// anything. Currently used during bootstrapping (zenith init), to create
|
||||
/// a Repository object without launching the real WAL redo process.
|
||||
///
|
||||
pub struct DummyRedoManager {}
|
||||
impl crate::walredo::WalRedoManager for DummyRedoManager {
|
||||
fn request_redo(
|
||||
&self,
|
||||
_timeline: &dyn Timeline,
|
||||
_rel: RelishTag,
|
||||
_blknum: u32,
|
||||
_lsn: Lsn,
|
||||
_base_img: Option<Bytes>,
|
||||
_records: Vec<WALRecord>,
|
||||
) -> Result<Bytes, WalRedoError> {
|
||||
Err(WalRedoError::InvalidState)
|
||||
}
|
||||
}
|
||||
|
||||
static TIMEOUT: Duration = Duration::from_secs(20);
|
||||
|
||||
///
|
||||
/// A WAL redo manager consists of two parts: WalRedoManager, and
|
||||
/// WalRedoManagerInternal. WalRedoManager is the public struct
|
||||
/// that can be used to send redo requests to the manager.
|
||||
/// WalRedoManagerInternal is used by the manager thread itself.
|
||||
///
|
||||
pub struct WalRedoManager {
|
||||
request_tx: Mutex<mpsc::Sender<WalRedoRequest>>,
|
||||
// Metrics collected on WAL redo operations
|
||||
//
|
||||
// We collect the time spent in actual WAL redo ('redo'), and time waiting
|
||||
// for access to the postgres process ('wait') since there is only one for
|
||||
// each tenant.
|
||||
lazy_static! {
|
||||
static ref WAL_REDO_TIME: Histogram =
|
||||
register_histogram!("pageserver_wal_redo_time", "Time spent on WAL redo")
|
||||
.expect("failed to define a metric");
|
||||
static ref WAL_REDO_WAIT_TIME: Histogram = register_histogram!(
|
||||
"pageserver_wal_redo_wait_time",
|
||||
"Time spent waiting for access to the WAL redo process"
|
||||
)
|
||||
.expect("failed to define a metric");
|
||||
static ref WAL_REDO_RECORD_COUNTER: IntCounter = register_int_counter!(
|
||||
"pageserver_wal_records_replayed",
|
||||
"Number of WAL records replayed"
|
||||
)
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
struct WalRedoManagerInternal {
|
||||
_conf: PageServerConf,
|
||||
timelineid: ZTimelineId,
|
||||
///
|
||||
/// This is the real implementation that uses a Postgres process to
|
||||
/// perform WAL replay. Only one thread can use the processs at a time,
|
||||
/// that is controlled by the Mutex. In the future, we might want to
|
||||
/// launch a pool of processes to allow concurrent replay of multiple
|
||||
/// records.
|
||||
///
|
||||
pub struct PostgresRedoManager {
|
||||
tenantid: ZTenantId,
|
||||
conf: &'static PageServerConf,
|
||||
|
||||
request_rx: mpsc::Receiver<WalRedoRequest>,
|
||||
runtime: tokio::runtime::Runtime,
|
||||
process: Mutex<Option<PostgresRedoProcess>>,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct WalRedoRequest {
|
||||
tag: BufferTag,
|
||||
rel: RelishTag,
|
||||
blknum: u32,
|
||||
lsn: Lsn,
|
||||
|
||||
base_img: Option<Bytes>,
|
||||
records: Vec<WALRecord>,
|
||||
|
||||
response_channel: mpsc::Sender<Result<Bytes, WalRedoError>>,
|
||||
}
|
||||
|
||||
/// An error happened in WAL redo
|
||||
@@ -76,90 +161,80 @@ struct WalRedoRequest {
|
||||
pub enum WalRedoError {
|
||||
#[error(transparent)]
|
||||
IoError(#[from] std::io::Error),
|
||||
|
||||
#[error("cannot perform WAL redo now")]
|
||||
InvalidState,
|
||||
}
|
||||
|
||||
///
|
||||
/// Public interface of WAL redo manager
|
||||
///
|
||||
impl WalRedoManager {
|
||||
impl WalRedoManager for PostgresRedoManager {
|
||||
///
|
||||
/// Create a new WalRedoManager.
|
||||
/// Request the WAL redo manager to apply some WAL records
|
||||
///
|
||||
/// This only initializes the struct. You need to call WalRedoManager::launch to
|
||||
/// start the thread that processes the requests.
|
||||
pub fn new(conf: &PageServerConf, timelineid: ZTimelineId) -> WalRedoManager {
|
||||
let (tx, rx) = mpsc::channel();
|
||||
|
||||
//
|
||||
// Launch the WAL redo thread
|
||||
//
|
||||
// Get mutable references to the values that we need to pass to the
|
||||
// thread.
|
||||
let request_rx = rx;
|
||||
let conf_copy = conf.clone();
|
||||
|
||||
// Currently, the join handle is not saved anywhere and we
|
||||
// won't try restart the thread if it dies.
|
||||
let _walredo_thread = std::thread::Builder::new()
|
||||
.name("WAL redo thread".into())
|
||||
.spawn(move || {
|
||||
let mut internal = WalRedoManagerInternal {
|
||||
_conf: conf_copy,
|
||||
timelineid,
|
||||
request_rx,
|
||||
};
|
||||
internal.wal_redo_main();
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
WalRedoManager {
|
||||
request_tx: Mutex::new(tx),
|
||||
}
|
||||
}
|
||||
|
||||
/// The WAL redo is handled by a separate thread, so this just sends a request
|
||||
/// to the thread and waits for response.
|
||||
///
|
||||
/// Request the WAL redo manager to apply WAL records, to reconstruct the page image
|
||||
/// of the given page version.
|
||||
///
|
||||
pub fn request_redo(
|
||||
fn request_redo(
|
||||
&self,
|
||||
tag: BufferTag,
|
||||
timeline: &dyn Timeline,
|
||||
rel: RelishTag,
|
||||
blknum: u32,
|
||||
lsn: Lsn,
|
||||
base_img: Option<Bytes>,
|
||||
records: Vec<WALRecord>,
|
||||
) -> Result<Bytes, WalRedoError> {
|
||||
// Create a channel where to receive the response
|
||||
let (tx, rx) = mpsc::channel::<Result<Bytes, WalRedoError>>();
|
||||
let start_time;
|
||||
let lock_time;
|
||||
let end_time;
|
||||
|
||||
let request = WalRedoRequest {
|
||||
tag,
|
||||
rel,
|
||||
blknum,
|
||||
lsn,
|
||||
base_img,
|
||||
records,
|
||||
response_channel: tx,
|
||||
};
|
||||
|
||||
self.request_tx
|
||||
.lock()
|
||||
.unwrap()
|
||||
.send(request)
|
||||
.expect("could not send WAL redo request");
|
||||
start_time = Instant::now();
|
||||
let result = {
|
||||
let mut process_guard = self.process.lock().unwrap();
|
||||
lock_time = Instant::now();
|
||||
|
||||
rx.recv()
|
||||
.expect("could not receive response to WAL redo request")
|
||||
// launch the WAL redo process on first use
|
||||
if process_guard.is_none() {
|
||||
let p = self
|
||||
.runtime
|
||||
.block_on(PostgresRedoProcess::launch(self.conf, &self.tenantid))?;
|
||||
*process_guard = Some(p);
|
||||
}
|
||||
let process = (*process_guard).as_ref().unwrap();
|
||||
|
||||
self.runtime
|
||||
.block_on(self.handle_apply_request(process, &request))
|
||||
};
|
||||
end_time = Instant::now();
|
||||
|
||||
WAL_REDO_WAIT_TIME.observe(lock_time.duration_since(start_time).as_secs_f64());
|
||||
WAL_REDO_TIME.observe(end_time.duration_since(lock_time).as_secs_f64());
|
||||
|
||||
if let Ok(page) = result {
|
||||
let mut buf = BytesMut::new();
|
||||
buf.extend_from_slice(&page);
|
||||
self.set_hint_bits(timeline, &mut buf, lsn, &request.records);
|
||||
return Ok(buf.freeze());
|
||||
}
|
||||
|
||||
result
|
||||
}
|
||||
}
|
||||
|
||||
///
|
||||
/// WAL redo thread
|
||||
///
|
||||
impl WalRedoManagerInternal {
|
||||
//
|
||||
// Main entry point for the WAL applicator thread.
|
||||
//
|
||||
fn wal_redo_main(&mut self) {
|
||||
info!("WAL redo thread started {}", self.timelineid);
|
||||
|
||||
impl PostgresRedoManager {
|
||||
///
|
||||
/// Create a new PostgresRedoManager.
|
||||
///
|
||||
pub fn new(conf: &'static PageServerConf, tenantid: ZTenantId) -> PostgresRedoManager {
|
||||
// We block on waiting for requests on the walredo request channel, but
|
||||
// use async I/O to communicate with the child process. Initialize the
|
||||
// runtime for the async part.
|
||||
@@ -168,44 +243,124 @@ impl WalRedoManagerInternal {
|
||||
.build()
|
||||
.unwrap();
|
||||
|
||||
let process: WalRedoProcess;
|
||||
let datadir = format!("wal-redo/{}", self.timelineid);
|
||||
|
||||
info!("launching WAL redo postgres process {}", self.timelineid);
|
||||
|
||||
process = runtime.block_on(WalRedoProcess::launch(&datadir)).unwrap();
|
||||
info!("WAL redo postgres started");
|
||||
|
||||
// Loop forever, handling requests as they come.
|
||||
loop {
|
||||
let request = self.request_rx.recv().unwrap();
|
||||
|
||||
let result = runtime.block_on(self.handle_apply_request(&process, &request));
|
||||
let result_ok = result.is_ok();
|
||||
|
||||
// Send the result to the requester
|
||||
let _ = request.response_channel.send(result);
|
||||
|
||||
if !result_ok {
|
||||
error!("wal-redo-postgres filed to apply request {:?}", request);
|
||||
}
|
||||
// The actual process is launched lazily, on first request.
|
||||
PostgresRedoManager {
|
||||
runtime,
|
||||
tenantid,
|
||||
conf,
|
||||
process: Mutex::new(None),
|
||||
}
|
||||
}
|
||||
|
||||
fn transaction_id_set_status_bit(&self, xid: u32, status: u8, page: &mut BytesMut) {
|
||||
trace!(
|
||||
"handle_apply_request for RM_XACT_ID-{} (1-commit, 2-abort, 3-sub_commit)",
|
||||
status
|
||||
fn xid_status(&self, timeline: &dyn Timeline, xid: u32, lsn: Lsn) -> u8 {
|
||||
let pageno = xid / pg_constants::CLOG_XACTS_PER_PAGE;
|
||||
let segno = pageno / pg_constants::SLRU_PAGES_PER_SEGMENT;
|
||||
let rpageno = pageno % pg_constants::SLRU_PAGES_PER_SEGMENT;
|
||||
if let Ok(clog_page) = timeline.get_page_at_lsn_nowait(
|
||||
RelishTag::Slru {
|
||||
slru: SlruKind::Clog,
|
||||
segno,
|
||||
},
|
||||
rpageno,
|
||||
lsn,
|
||||
) {
|
||||
postgres_ffi::nonrelfile_utils::transaction_id_get_status(xid, &clog_page[..])
|
||||
} else {
|
||||
pg_constants::TRANSACTION_STATUS_IN_PROGRESS
|
||||
}
|
||||
}
|
||||
|
||||
fn set_hint_bits(
|
||||
&self,
|
||||
timeline: &dyn Timeline,
|
||||
page: &mut BytesMut,
|
||||
lsn: Lsn,
|
||||
records: &Vec<WALRecord>,
|
||||
) {
|
||||
let mut flags = LittleEndian::read_u16(
|
||||
&page[pg_constants::PD_FLAGS_OFFSET..pg_constants::PD_FLAGS_OFFSET + 2],
|
||||
);
|
||||
|
||||
let byteno: usize = ((xid as u32 % pg_constants::CLOG_XACTS_PER_PAGE as u32)
|
||||
/ pg_constants::CLOG_XACTS_PER_BYTE) as usize;
|
||||
|
||||
let bshift: u8 = ((xid % pg_constants::CLOG_XACTS_PER_BYTE)
|
||||
* pg_constants::CLOG_BITS_PER_XACT as u32) as u8;
|
||||
|
||||
page[byteno] =
|
||||
(page[byteno] & !(pg_constants::CLOG_XACT_BITMASK << bshift)) | (status << bshift);
|
||||
if (flags & (pg_constants::PD_HEAP_RELATION | pg_constants::PD_NONHEAP_RELATION)) == 0 {
|
||||
// If type of relation was not determined yet,
|
||||
// then do it now
|
||||
for r in records {
|
||||
let xl_rmid = r.rec[pg_constants::XL_RMID_OFFS];
|
||||
if xl_rmid == pg_constants::RM_HEAP_ID || xl_rmid == pg_constants::RM_HEAP2_ID {
|
||||
flags |= pg_constants::PD_HEAP_RELATION;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (flags & pg_constants::PD_HEAP_RELATION) == 0 {
|
||||
flags |= pg_constants::PD_NONHEAP_RELATION;
|
||||
}
|
||||
LittleEndian::write_u16(
|
||||
&mut page[pg_constants::PD_FLAGS_OFFSET..pg_constants::PD_FLAGS_OFFSET + 2],
|
||||
flags,
|
||||
);
|
||||
}
|
||||
if (flags & pg_constants::PD_HEAP_RELATION) != 0 {
|
||||
// Set hint bits for heap relation page
|
||||
let pd_lower = LittleEndian::read_u16(
|
||||
&page[pg_constants::PD_LOWER_OFFSET..pg_constants::PD_LOWER_OFFSET + 2],
|
||||
) as usize;
|
||||
let mut tid_offs = pg_constants::SIZE_OF_PAGE_HEADER_DATA;
|
||||
while tid_offs < pd_lower {
|
||||
let tid = LittleEndian::read_u32(&page[tid_offs..tid_offs + 4]);
|
||||
let lp_off = (tid & 0x7FFF) as usize;
|
||||
if ((tid >> 15) & 3) == pg_constants::LP_NORMAL {
|
||||
// normal item pointer
|
||||
let t_xmin = LittleEndian::read_u32(
|
||||
&page[lp_off + pg_constants::T_XMIN_OFFS
|
||||
..lp_off + pg_constants::T_XMIN_OFFS + 4],
|
||||
);
|
||||
let t_xmax = LittleEndian::read_u32(
|
||||
&page[lp_off + pg_constants::T_XMAX_OFFS
|
||||
..lp_off + pg_constants::T_XMAX_OFFS + 4],
|
||||
);
|
||||
let mut t_infomask = LittleEndian::read_u16(
|
||||
&page[lp_off + pg_constants::T_INFOMASK_OFFS
|
||||
..lp_off + pg_constants::T_INFOMASK_OFFS + 2],
|
||||
);
|
||||
if (t_infomask
|
||||
& (pg_constants::HEAP_XMIN_COMMITTED | pg_constants::HEAP_XMIN_INVALID))
|
||||
== 0
|
||||
&& t_xmin != 0
|
||||
{
|
||||
let status = self.xid_status(timeline, t_xmin, lsn);
|
||||
if status == pg_constants::TRANSACTION_STATUS_COMMITTED {
|
||||
t_infomask |= pg_constants::HEAP_XMIN_COMMITTED;
|
||||
} else if status == pg_constants::TRANSACTION_STATUS_ABORTED {
|
||||
t_infomask |= pg_constants::HEAP_XMIN_INVALID;
|
||||
}
|
||||
LittleEndian::write_u16(
|
||||
&mut page[lp_off + pg_constants::T_INFOMASK_OFFS
|
||||
..lp_off + pg_constants::T_INFOMASK_OFFS + 2],
|
||||
t_infomask,
|
||||
);
|
||||
}
|
||||
if (t_infomask
|
||||
& (pg_constants::HEAP_XMAX_COMMITTED
|
||||
| pg_constants::HEAP_XMAX_INVALID
|
||||
| pg_constants::HEAP_XMAX_IS_MULTI))
|
||||
== 0
|
||||
&& t_xmax != 0
|
||||
{
|
||||
let status = self.xid_status(timeline, t_xmax, lsn);
|
||||
if status == pg_constants::TRANSACTION_STATUS_COMMITTED {
|
||||
t_infomask |= pg_constants::HEAP_XMAX_COMMITTED;
|
||||
} else if status == pg_constants::TRANSACTION_STATUS_ABORTED {
|
||||
t_infomask |= pg_constants::HEAP_XMAX_INVALID;
|
||||
}
|
||||
LittleEndian::write_u16(
|
||||
&mut page[lp_off + pg_constants::T_INFOMASK_OFFS
|
||||
..lp_off + pg_constants::T_INFOMASK_OFFS + 2],
|
||||
t_infomask,
|
||||
);
|
||||
}
|
||||
}
|
||||
tid_offs += 4;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
///
|
||||
@@ -213,10 +368,11 @@ impl WalRedoManagerInternal {
|
||||
///
|
||||
async fn handle_apply_request(
|
||||
&self,
|
||||
process: &WalRedoProcess,
|
||||
process: &PostgresRedoProcess,
|
||||
request: &WalRedoRequest,
|
||||
) -> Result<Bytes, WalRedoError> {
|
||||
let tag = request.tag;
|
||||
let rel = request.rel;
|
||||
let blknum = request.blknum;
|
||||
let lsn = request.lsn;
|
||||
let base_img = request.base_img.clone();
|
||||
let records = &request.records;
|
||||
@@ -226,17 +382,28 @@ impl WalRedoManagerInternal {
|
||||
let start = Instant::now();
|
||||
|
||||
let apply_result: Result<Bytes, Error>;
|
||||
if tag.rel.forknum == pg_constants::PG_XACT_FORKNUM {
|
||||
if let RelishTag::Relation(rel) = rel {
|
||||
// Relational WAL records are applied using wal-redo-postgres
|
||||
let buf_tag = BufferTag { rel, blknum };
|
||||
apply_result = process.apply_wal_records(buf_tag, base_img, records).await;
|
||||
} else {
|
||||
// Non-relational WAL records are handled here, with custom code that has the
|
||||
// same effects as the corresponding Postgres WAL redo function.
|
||||
const ZERO_PAGE: [u8; 8192] = [0u8; 8192];
|
||||
let mut page = BytesMut::new();
|
||||
if let Some(fpi) = base_img {
|
||||
// If full-page image is provided, then use it...
|
||||
page.extend_from_slice(&fpi[..]);
|
||||
} else {
|
||||
// otherwise initialize page with zeros
|
||||
page.extend_from_slice(&ZERO_PAGE);
|
||||
}
|
||||
// Apply all collected WAL records
|
||||
for record in records {
|
||||
let mut buf = record.rec.clone();
|
||||
|
||||
WAL_REDO_RECORD_COUNTER.inc();
|
||||
|
||||
// 1. Parse XLogRecord struct
|
||||
// FIXME: refactor to avoid code duplication.
|
||||
let xlogrec = XLogRecord::from_bytes(&mut buf);
|
||||
@@ -249,85 +416,125 @@ impl WalRedoManagerInternal {
|
||||
buf.advance(skip);
|
||||
}
|
||||
|
||||
if xlogrec.xl_rmid == pg_constants::RM_CLOG_ID {
|
||||
let info = xlogrec.xl_info & !pg_constants::XLR_INFO_MASK;
|
||||
if info == pg_constants::CLOG_ZEROPAGE {
|
||||
page.clone_from_slice(&ZERO_PAGE);
|
||||
if xlogrec.xl_rmid == pg_constants::RM_XACT_ID {
|
||||
// Transaction manager stuff
|
||||
let rec_segno = match rel {
|
||||
RelishTag::Slru { slru, segno } => {
|
||||
if slru != SlruKind::Clog {
|
||||
panic!("Not valid XACT relish tag {:?}", rel);
|
||||
}
|
||||
segno
|
||||
}
|
||||
_ => panic!("Not valid XACT relish tag {:?}", rel),
|
||||
};
|
||||
let parsed_xact =
|
||||
XlXactParsedRecord::decode(&mut buf, xlogrec.xl_xid, xlogrec.xl_info);
|
||||
if parsed_xact.info == pg_constants::XLOG_XACT_COMMIT
|
||||
|| parsed_xact.info == pg_constants::XLOG_XACT_COMMIT_PREPARED
|
||||
{
|
||||
transaction_id_set_status(
|
||||
parsed_xact.xid,
|
||||
pg_constants::TRANSACTION_STATUS_COMMITTED,
|
||||
&mut page,
|
||||
);
|
||||
for subxact in &parsed_xact.subxacts {
|
||||
let pageno = *subxact as u32 / pg_constants::CLOG_XACTS_PER_PAGE;
|
||||
let segno = pageno / pg_constants::SLRU_PAGES_PER_SEGMENT;
|
||||
let rpageno = pageno % pg_constants::SLRU_PAGES_PER_SEGMENT;
|
||||
// only update xids on the requested page
|
||||
if rec_segno == segno && blknum == rpageno {
|
||||
transaction_id_set_status(
|
||||
*subxact,
|
||||
pg_constants::TRANSACTION_STATUS_COMMITTED,
|
||||
&mut page,
|
||||
);
|
||||
}
|
||||
}
|
||||
} else if parsed_xact.info == pg_constants::XLOG_XACT_ABORT
|
||||
|| parsed_xact.info == pg_constants::XLOG_XACT_ABORT_PREPARED
|
||||
{
|
||||
transaction_id_set_status(
|
||||
parsed_xact.xid,
|
||||
pg_constants::TRANSACTION_STATUS_ABORTED,
|
||||
&mut page,
|
||||
);
|
||||
for subxact in &parsed_xact.subxacts {
|
||||
let pageno = *subxact as u32 / pg_constants::CLOG_XACTS_PER_PAGE;
|
||||
let segno = pageno / pg_constants::SLRU_PAGES_PER_SEGMENT;
|
||||
let rpageno = pageno % pg_constants::SLRU_PAGES_PER_SEGMENT;
|
||||
// only update xids on the requested page
|
||||
if rec_segno == segno && blknum == rpageno {
|
||||
transaction_id_set_status(
|
||||
*subxact,
|
||||
pg_constants::TRANSACTION_STATUS_ABORTED,
|
||||
&mut page,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if xlogrec.xl_rmid == pg_constants::RM_XACT_ID {
|
||||
let info = xlogrec.xl_info & pg_constants::XLOG_XACT_OPMASK;
|
||||
let mut status = 0;
|
||||
if info == pg_constants::XLOG_XACT_COMMIT {
|
||||
status = pg_constants::TRANSACTION_STATUS_COMMITTED;
|
||||
self.transaction_id_set_status_bit(xlogrec.xl_xid, status, &mut page);
|
||||
//handle subtrans
|
||||
let _xact_time = buf.get_i64_le();
|
||||
let mut xinfo = 0;
|
||||
if xlogrec.xl_info & pg_constants::XLOG_XACT_HAS_INFO != 0 {
|
||||
xinfo = buf.get_u32_le();
|
||||
if xinfo & pg_constants::XACT_XINFO_HAS_DBINFO != 0 {
|
||||
let _dbid = buf.get_u32_le();
|
||||
let _tsid = buf.get_u32_le();
|
||||
}
|
||||
}
|
||||
|
||||
if xinfo & pg_constants::XACT_XINFO_HAS_SUBXACTS != 0 {
|
||||
let nsubxacts = buf.get_i32_le();
|
||||
for _i in 0..nsubxacts {
|
||||
let subxact = buf.get_u32_le();
|
||||
let blkno = subxact as u32 / pg_constants::CLOG_XACTS_PER_PAGE;
|
||||
// only update xids on the requested page
|
||||
if tag.blknum == blkno {
|
||||
status = pg_constants::TRANSACTION_STATUS_SUB_COMMITTED;
|
||||
self.transaction_id_set_status_bit(subxact, status, &mut page);
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if info == pg_constants::XLOG_XACT_ABORT {
|
||||
status = pg_constants::TRANSACTION_STATUS_ABORTED;
|
||||
self.transaction_id_set_status_bit(xlogrec.xl_xid, status, &mut page);
|
||||
//handle subtrans
|
||||
let _xact_time = buf.get_i64_le();
|
||||
let mut xinfo = 0;
|
||||
if xlogrec.xl_info & pg_constants::XLOG_XACT_HAS_INFO != 0 {
|
||||
xinfo = buf.get_u32_le();
|
||||
if xinfo & pg_constants::XACT_XINFO_HAS_DBINFO != 0 {
|
||||
let _dbid = buf.get_u32_le();
|
||||
let _tsid = buf.get_u32_le();
|
||||
}
|
||||
}
|
||||
|
||||
if xinfo & pg_constants::XACT_XINFO_HAS_SUBXACTS != 0 {
|
||||
let nsubxacts = buf.get_i32_le();
|
||||
for _i in 0..nsubxacts {
|
||||
let subxact = buf.get_u32_le();
|
||||
let blkno = subxact as u32 / pg_constants::CLOG_XACTS_PER_PAGE;
|
||||
// only update xids on the requested page
|
||||
if tag.blknum == blkno {
|
||||
status = pg_constants::TRANSACTION_STATUS_ABORTED;
|
||||
self.transaction_id_set_status_bit(subxact, status, &mut page);
|
||||
} else if xlogrec.xl_rmid == pg_constants::RM_MULTIXACT_ID {
|
||||
// Multixact operations
|
||||
let info = xlogrec.xl_info & pg_constants::XLR_RMGR_INFO_MASK;
|
||||
if info == pg_constants::XLOG_MULTIXACT_CREATE_ID {
|
||||
let xlrec = XlMultiXactCreate::decode(&mut buf);
|
||||
if let RelishTag::Slru {
|
||||
slru,
|
||||
segno: rec_segno,
|
||||
} = rel
|
||||
{
|
||||
if slru == SlruKind::MultiXactMembers {
|
||||
for i in 0..xlrec.nmembers {
|
||||
let pageno =
|
||||
i / pg_constants::MULTIXACT_MEMBERS_PER_PAGE as u32;
|
||||
let segno = pageno / pg_constants::SLRU_PAGES_PER_SEGMENT;
|
||||
let rpageno = pageno % pg_constants::SLRU_PAGES_PER_SEGMENT;
|
||||
if segno == rec_segno && rpageno == blknum {
|
||||
// update only target block
|
||||
let offset = xlrec.moff + i;
|
||||
let memberoff = mx_offset_to_member_offset(offset);
|
||||
let flagsoff = mx_offset_to_flags_offset(offset);
|
||||
let bshift = mx_offset_to_flags_bitshift(offset);
|
||||
let mut flagsval =
|
||||
LittleEndian::read_u32(&page[flagsoff..flagsoff + 4]);
|
||||
flagsval &= !(((1
|
||||
<< pg_constants::MXACT_MEMBER_BITS_PER_XACT)
|
||||
- 1)
|
||||
<< bshift);
|
||||
flagsval |= xlrec.members[i as usize].status << bshift;
|
||||
LittleEndian::write_u32(
|
||||
&mut page[flagsoff..flagsoff + 4],
|
||||
flagsval,
|
||||
);
|
||||
LittleEndian::write_u32(
|
||||
&mut page[memberoff..memberoff + 4],
|
||||
xlrec.members[i as usize].xid,
|
||||
);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Multixact offsets SLRU
|
||||
let offs = (xlrec.mid
|
||||
% pg_constants::MULTIXACT_OFFSETS_PER_PAGE as u32
|
||||
* 4) as usize;
|
||||
LittleEndian::write_u32(&mut page[offs..offs + 4], xlrec.moff);
|
||||
}
|
||||
} else {
|
||||
panic!();
|
||||
}
|
||||
} else {
|
||||
trace!("handle_apply_request for RM_XACT_ID-{} NOT SUPPORTED YET. RETURN. lsn {} main_data_offset {}, rec.len {}",
|
||||
status,
|
||||
record.lsn,
|
||||
record.main_data_offset, record.rec.len());
|
||||
panic!();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
apply_result = Ok::<Bytes, Error>(page.freeze());
|
||||
} else {
|
||||
apply_result = process.apply_wal_records(tag, base_img, records).await;
|
||||
}
|
||||
|
||||
let duration = start.elapsed();
|
||||
|
||||
let result: Result<Bytes, WalRedoError>;
|
||||
|
||||
trace!(
|
||||
debug!(
|
||||
"applied {} WAL records in {} ms to reconstruct page image at LSN {}",
|
||||
nrecords,
|
||||
duration.as_millis(),
|
||||
@@ -348,24 +555,41 @@ impl WalRedoManagerInternal {
|
||||
}
|
||||
}
|
||||
|
||||
struct WalRedoProcess {
|
||||
///
|
||||
/// Handle to the Postgres WAL redo process
|
||||
///
|
||||
struct PostgresRedoProcess {
|
||||
stdin: RefCell<ChildStdin>,
|
||||
stdout: RefCell<ChildStdout>,
|
||||
}
|
||||
|
||||
impl WalRedoProcess {
|
||||
impl PostgresRedoProcess {
|
||||
//
|
||||
// Start postgres binary in special WAL redo mode.
|
||||
//
|
||||
// Tests who run pageserver binary are setting proper PG_BIN_DIR
|
||||
// and PG_LIB_DIR so that WalRedo would start right postgres. We may later
|
||||
// switch to setting same things in pageserver config file.
|
||||
async fn launch(datadir: &str) -> Result<WalRedoProcess, Error> {
|
||||
// Create empty data directory for wal-redo postgres deleting old one.
|
||||
fs::remove_dir_all(datadir).ok();
|
||||
let initdb = Command::new("initdb")
|
||||
.args(&["-D", datadir])
|
||||
async fn launch(
|
||||
conf: &PageServerConf,
|
||||
tenantid: &ZTenantId,
|
||||
) -> Result<PostgresRedoProcess, Error> {
|
||||
// FIXME: We need a dummy Postgres cluster to run the process in. Currently, we
|
||||
// just create one with constant name. That fails if you try to launch more than
|
||||
// one WAL redo manager concurrently.
|
||||
let datadir = conf.tenant_path(tenantid).join("wal-redo-datadir");
|
||||
|
||||
// Create empty data directory for wal-redo postgres, deleting old one first.
|
||||
if datadir.exists() {
|
||||
info!("directory {:?} exists, removing", &datadir);
|
||||
if let Err(e) = fs::remove_dir_all(&datadir) {
|
||||
error!("could not remove old wal-redo-datadir: {:?}", e);
|
||||
}
|
||||
}
|
||||
info!("running initdb in {:?}", datadir.display());
|
||||
let initdb = Command::new(conf.pg_bin_dir().join("initdb"))
|
||||
.args(&["-D", datadir.to_str().unwrap()])
|
||||
.arg("-N")
|
||||
.env_clear()
|
||||
.env("LD_LIBRARY_PATH", conf.pg_lib_dir().to_str().unwrap())
|
||||
.env("DYLD_LIBRARY_PATH", conf.pg_lib_dir().to_str().unwrap())
|
||||
.output()
|
||||
.await
|
||||
.expect("failed to execute initdb");
|
||||
@@ -383,18 +607,26 @@ impl WalRedoProcess {
|
||||
.open(PathBuf::from(&datadir).join("postgresql.conf"))?;
|
||||
config.write_all(b"shared_buffers=128kB\n")?;
|
||||
config.write_all(b"fsync=off\n")?;
|
||||
config.write_all(b"shared_preload_libraries=zenith\n")?;
|
||||
config.write_all(b"zenith.wal_redo=on\n")?;
|
||||
}
|
||||
// Start postgres itself
|
||||
let mut child = Command::new("postgres")
|
||||
let mut child = Command::new(conf.pg_bin_dir().join("postgres"))
|
||||
.arg("--wal-redo")
|
||||
.stdin(Stdio::piped())
|
||||
.stderr(Stdio::piped())
|
||||
.stdout(Stdio::piped())
|
||||
.env("PGDATA", datadir)
|
||||
.env_clear()
|
||||
.env("LD_LIBRARY_PATH", conf.pg_lib_dir().to_str().unwrap())
|
||||
.env("DYLD_LIBRARY_PATH", conf.pg_lib_dir().to_str().unwrap())
|
||||
.env("PGDATA", &datadir)
|
||||
.spawn()
|
||||
.expect("postgres --wal-redo command failed to start");
|
||||
|
||||
info!("launched WAL redo postgres process on {}", datadir);
|
||||
info!(
|
||||
"launched WAL redo postgres process on {:?}",
|
||||
datadir.display()
|
||||
);
|
||||
|
||||
let stdin = child.stdin.take().expect("failed to open child's stdin");
|
||||
let stderr = child.stderr.take().expect("failed to open child's stderr");
|
||||
@@ -421,7 +653,7 @@ impl WalRedoProcess {
|
||||
};
|
||||
tokio::spawn(f_stderr);
|
||||
|
||||
Ok(WalRedoProcess {
|
||||
Ok(PostgresRedoProcess {
|
||||
stdin: RefCell::new(stdin),
|
||||
stdout: RefCell::new(stdout),
|
||||
})
|
||||
@@ -435,7 +667,7 @@ impl WalRedoProcess {
|
||||
&self,
|
||||
tag: BufferTag,
|
||||
base_img: Option<Bytes>,
|
||||
records: &Vec<WALRecord>,
|
||||
records: &[WALRecord],
|
||||
) -> Result<Bytes, std::io::Error> {
|
||||
let mut stdin = self.stdin.borrow_mut();
|
||||
let mut stdout = self.stdout.borrow_mut();
|
||||
@@ -467,6 +699,8 @@ impl WalRedoProcess {
|
||||
for rec in records.iter() {
|
||||
let r = rec.clone();
|
||||
|
||||
WAL_REDO_RECORD_COUNTER.inc();
|
||||
|
||||
stdin
|
||||
.write_all(&build_apply_record_msg(r.lsn, r.rec))
|
||||
.await?;
|
||||
@@ -493,7 +727,7 @@ impl WalRedoProcess {
|
||||
Ok::<[u8; 8192], Error>(buf)
|
||||
};
|
||||
|
||||
let res = futures::try_join!(f_stdout, f_stdin)?;
|
||||
let res = tokio::try_join!(f_stdout, f_stdin)?;
|
||||
|
||||
let buf = res.0;
|
||||
|
||||
@@ -506,14 +740,31 @@ impl WalRedoProcess {
|
||||
// explanation of the protocol.
|
||||
|
||||
fn build_begin_redo_for_block_msg(tag: BufferTag) -> Bytes {
|
||||
let len = 4 + 5 * 4;
|
||||
let len = 4 + 1 + 4 * 4;
|
||||
let mut buf = BytesMut::with_capacity(1 + len);
|
||||
|
||||
buf.put_u8(b'B');
|
||||
buf.put_u32(len as u32);
|
||||
tag.pack(&mut buf);
|
||||
|
||||
assert!(buf.len() == 1 + len);
|
||||
// FIXME: this is a temporary hack that should go away when we refactor
|
||||
// the postgres protocol serialization + handlers.
|
||||
//
|
||||
// BytesMut is a dynamic growable buffer, used a lot in tokio code but
|
||||
// not in the std library. To write to a BytesMut from a serde serializer,
|
||||
// we need to either:
|
||||
// - pre-allocate the required buffer space. This is annoying because we
|
||||
// shouldn't care what the exact serialized size is-- that's the
|
||||
// serializer's job.
|
||||
// - Or, we need to create a temporary "writer" (which implements the
|
||||
// `Write` trait). It's a bit awkward, because the writer consumes the
|
||||
// underlying BytesMut, and we need to extract it later with
|
||||
// `into_inner`.
|
||||
let mut writer = buf.writer();
|
||||
tag.ser_into(&mut writer)
|
||||
.expect("serialize BufferTag should always succeed");
|
||||
let buf = writer.into_inner();
|
||||
|
||||
debug_assert!(buf.len() == 1 + len);
|
||||
|
||||
buf.freeze()
|
||||
}
|
||||
@@ -521,15 +772,18 @@ fn build_begin_redo_for_block_msg(tag: BufferTag) -> Bytes {
|
||||
fn build_push_page_msg(tag: BufferTag, base_img: Bytes) -> Bytes {
|
||||
assert!(base_img.len() == 8192);
|
||||
|
||||
let len = 4 + 5 * 4 + base_img.len();
|
||||
let len = 4 + 1 + 4 * 4 + base_img.len();
|
||||
let mut buf = BytesMut::with_capacity(1 + len);
|
||||
|
||||
buf.put_u8(b'P');
|
||||
buf.put_u32(len as u32);
|
||||
tag.pack(&mut buf);
|
||||
let mut writer = buf.writer();
|
||||
tag.ser_into(&mut writer)
|
||||
.expect("serialize BufferTag should always succeed");
|
||||
let mut buf = writer.into_inner();
|
||||
buf.put(base_img);
|
||||
|
||||
assert!(buf.len() == 1 + len);
|
||||
debug_assert!(buf.len() == 1 + len);
|
||||
|
||||
buf.freeze()
|
||||
}
|
||||
@@ -543,20 +797,23 @@ fn build_apply_record_msg(endlsn: Lsn, rec: Bytes) -> Bytes {
|
||||
buf.put_u64(endlsn.0);
|
||||
buf.put(rec);
|
||||
|
||||
assert!(buf.len() == 1 + len);
|
||||
debug_assert!(buf.len() == 1 + len);
|
||||
|
||||
buf.freeze()
|
||||
}
|
||||
|
||||
fn build_get_page_msg(tag: BufferTag) -> Bytes {
|
||||
let len = 4 + 5 * 4;
|
||||
let len = 4 + 1 + 4 * 4;
|
||||
let mut buf = BytesMut::with_capacity(1 + len);
|
||||
|
||||
buf.put_u8(b'G');
|
||||
buf.put_u32(len as u32);
|
||||
tag.pack(&mut buf);
|
||||
let mut writer = buf.writer();
|
||||
tag.ser_into(&mut writer)
|
||||
.expect("serialize BufferTag should always succeed");
|
||||
let buf = writer.into_inner();
|
||||
|
||||
assert!(buf.len() == 1 + len);
|
||||
debug_assert!(buf.len() == 1 + len);
|
||||
|
||||
buf.freeze()
|
||||
}
|
||||
|
||||
@@ -9,12 +9,19 @@ edition = "2018"
|
||||
[dependencies]
|
||||
chrono = "0.4.19"
|
||||
rand = "0.8.3"
|
||||
regex = "1.4.5"
|
||||
bytes = "1.0.1"
|
||||
byteorder = "1.4.3"
|
||||
anyhow = "1.0"
|
||||
crc32c = "0.6.0"
|
||||
hex = "0.4.3"
|
||||
lazy_static = "1.4"
|
||||
log = "0.4.14"
|
||||
memoffset = "0.6.2"
|
||||
thiserror = "1.0"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
workspace_hack = { path = "../workspace_hack" }
|
||||
zenith_utils = { path = "../zenith_utils" }
|
||||
|
||||
[build-dependencies]
|
||||
bindgen = "0.57"
|
||||
bindgen = "0.59.1"
|
||||
|
||||
@@ -1,3 +1,20 @@
|
||||
This module contains utility functions for interacting with PostgreSQL
|
||||
file formats.
|
||||
This module contains utilities for working with PostgreSQL file
|
||||
formats. It's a collection of structs that are auto-generated from the
|
||||
PostgreSQL header files using bindgen, and Rust functions to read and
|
||||
manipulate them.
|
||||
|
||||
There are also a bunch of constants in `pg_constants.rs` that are copied
|
||||
from various PostgreSQL headers, rather than auto-generated. They mostly
|
||||
should be auto-generated too, but that's a TODO.
|
||||
|
||||
The PostgreSQL on-disk file format is not portable across different
|
||||
CPU architectures and operating systems. It is also subject to change
|
||||
in each major PostgreSQL version. Currently, this module is based on
|
||||
PostgreSQL v14, but in the future we will probably need a separate
|
||||
copy for each PostgreSQL version.
|
||||
|
||||
TODO: Currently, there is also some code that deals with WAL records
|
||||
in pageserver/src/waldecoder.rs. That should be moved into this
|
||||
module. The rest of the codebase should not have intimate knowledge of
|
||||
PostgreSQL file formats or WAL layout, that knowledge should be
|
||||
encapsulated in this module.
|
||||
|
||||
@@ -3,6 +3,44 @@ extern crate bindgen;
|
||||
use std::env;
|
||||
use std::path::PathBuf;
|
||||
|
||||
use bindgen::callbacks::ParseCallbacks;
|
||||
|
||||
#[derive(Debug)]
|
||||
struct PostgresFfiCallbacks;
|
||||
|
||||
impl ParseCallbacks for PostgresFfiCallbacks {
|
||||
fn include_file(&self, filename: &str) {
|
||||
// This does the equivalent of passing bindgen::CargoCallbacks
|
||||
// to the builder .parse_callbacks() method.
|
||||
let cargo_callbacks = bindgen::CargoCallbacks;
|
||||
cargo_callbacks.include_file(filename)
|
||||
}
|
||||
|
||||
// Add any custom #[derive] attributes to the data structures that bindgen
|
||||
// creates.
|
||||
fn add_derives(&self, name: &str) -> Vec<String> {
|
||||
// This is the list of data structures that we want to serialize/deserialize.
|
||||
let serde_list = [
|
||||
"XLogRecord",
|
||||
"XLogPageHeaderData",
|
||||
"XLogLongPageHeaderData",
|
||||
"CheckPoint",
|
||||
"FullTransactionId",
|
||||
"ControlFileData",
|
||||
];
|
||||
|
||||
if serde_list.contains(&name) {
|
||||
vec![
|
||||
"Default".into(), // Default allows us to easily fill the padding fields with 0.
|
||||
"Serialize".into(),
|
||||
"Deserialize".into(),
|
||||
]
|
||||
} else {
|
||||
vec![]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn main() {
|
||||
// Tell cargo to invalidate the built crate whenever the wrapper changes
|
||||
println!("cargo:rerun-if-changed=pg_control_ffi.h");
|
||||
@@ -11,27 +49,50 @@ fn main() {
|
||||
// to bindgen, and lets you build up options for
|
||||
// the resulting bindings.
|
||||
let bindings = bindgen::Builder::default()
|
||||
// The input header we would like to generate
|
||||
// bindings for.
|
||||
//
|
||||
// All the needed PostgreSQL headers are included from 'pg_control_ffi.h'
|
||||
//
|
||||
.header("pg_control_ffi.h")
|
||||
//
|
||||
// Tell cargo to invalidate the built crate whenever any of the
|
||||
// included header files changed.
|
||||
.parse_callbacks(Box::new(bindgen::CargoCallbacks))
|
||||
.whitelist_type("ControlFileData")
|
||||
.whitelist_var("PG_CONTROL_FILE_SIZE")
|
||||
.whitelist_var("PG_CONTROLFILEDATA_OFFSETOF_CRC")
|
||||
.whitelist_type("DBState")
|
||||
//
|
||||
.parse_callbacks(Box::new(PostgresFfiCallbacks))
|
||||
//
|
||||
// These are the types and constants that we want to generate bindings for
|
||||
//
|
||||
.allowlist_type("BlockNumber")
|
||||
.allowlist_type("OffsetNumber")
|
||||
.allowlist_type("MultiXactId")
|
||||
.allowlist_type("MultiXactOffset")
|
||||
.allowlist_type("MultiXactStatus")
|
||||
.allowlist_type("ControlFileData")
|
||||
.allowlist_type("CheckPoint")
|
||||
.allowlist_type("FullTransactionId")
|
||||
.allowlist_type("XLogRecord")
|
||||
.allowlist_type("XLogPageHeaderData")
|
||||
.allowlist_type("XLogLongPageHeaderData")
|
||||
.allowlist_var("XLOG_PAGE_MAGIC")
|
||||
.allowlist_var("PG_CONTROL_FILE_SIZE")
|
||||
.allowlist_var("PG_CONTROLFILEDATA_OFFSETOF_CRC")
|
||||
.allowlist_type("DBState")
|
||||
// Because structs are used for serialization, tell bindgen to emit
|
||||
// explicit padding fields.
|
||||
.explicit_padding(true)
|
||||
//
|
||||
// Path the server include dir. It is in tmp_install/include/server, if you did
|
||||
// "configure --prefix=<path to tmp_install>". But if you used "configure --prefix=/",
|
||||
// and used DESTDIR to move it into tmp_install, then it's in
|
||||
// tmp_install/include/postgres/server
|
||||
// 'pg_config --includedir-server' would perhaps be the more proper way to find it,
|
||||
// but this will do for now.
|
||||
//
|
||||
.clang_arg("-I../tmp_install/include/server")
|
||||
.clang_arg("-I../tmp_install/include/postgresql/server")
|
||||
//
|
||||
// Finish the builder and generate the bindings.
|
||||
//
|
||||
.generate()
|
||||
// Unwrap the Result and panic on failure.
|
||||
.expect("Unable to generate bindings");
|
||||
|
||||
// Write the bindings to the $OUT_DIR/bindings.rs file.
|
||||
|
||||
@@ -1,4 +1,13 @@
|
||||
/*
|
||||
* This header file is the input to bindgen. It includes all the
|
||||
* PostgreSQL headers that we need to auto-generate Rust structs
|
||||
* from. If you need to expose a new struct to Rust code, add the
|
||||
* header here, and whitelist the struct in the build.rs file.
|
||||
*/
|
||||
#include "c.h"
|
||||
#include "catalog/pg_control.h"
|
||||
#include "access/xlog_internal.h"
|
||||
|
||||
const uint32 PG_CONTROLFILEDATA_OFFSETOF_CRC = offsetof(ControlFileData, crc);
|
||||
#include "storage/block.h"
|
||||
#include "storage/off.h"
|
||||
#include "access/multixact.h"
|
||||
|
||||
98
postgres_ffi/samples/pg_hba.conf
Normal file
98
postgres_ffi/samples/pg_hba.conf
Normal file
@@ -0,0 +1,98 @@
|
||||
# PostgreSQL Client Authentication Configuration File
|
||||
# ===================================================
|
||||
#
|
||||
# Refer to the "Client Authentication" section in the PostgreSQL
|
||||
# documentation for a complete description of this file. A short
|
||||
# synopsis follows.
|
||||
#
|
||||
# This file controls: which hosts are allowed to connect, how clients
|
||||
# are authenticated, which PostgreSQL user names they can use, which
|
||||
# databases they can access. Records take one of these forms:
|
||||
#
|
||||
# local DATABASE USER METHOD [OPTIONS]
|
||||
# host DATABASE USER ADDRESS METHOD [OPTIONS]
|
||||
# hostssl DATABASE USER ADDRESS METHOD [OPTIONS]
|
||||
# hostnossl DATABASE USER ADDRESS METHOD [OPTIONS]
|
||||
# hostgssenc DATABASE USER ADDRESS METHOD [OPTIONS]
|
||||
# hostnogssenc DATABASE USER ADDRESS METHOD [OPTIONS]
|
||||
#
|
||||
# (The uppercase items must be replaced by actual values.)
|
||||
#
|
||||
# The first field is the connection type:
|
||||
# - "local" is a Unix-domain socket
|
||||
# - "host" is a TCP/IP socket (encrypted or not)
|
||||
# - "hostssl" is a TCP/IP socket that is SSL-encrypted
|
||||
# - "hostnossl" is a TCP/IP socket that is not SSL-encrypted
|
||||
# - "hostgssenc" is a TCP/IP socket that is GSSAPI-encrypted
|
||||
# - "hostnogssenc" is a TCP/IP socket that is not GSSAPI-encrypted
|
||||
#
|
||||
# DATABASE can be "all", "sameuser", "samerole", "replication", a
|
||||
# database name, or a comma-separated list thereof. The "all"
|
||||
# keyword does not match "replication". Access to replication
|
||||
# must be enabled in a separate record (see example below).
|
||||
#
|
||||
# USER can be "all", a user name, a group name prefixed with "+", or a
|
||||
# comma-separated list thereof. In both the DATABASE and USER fields
|
||||
# you can also write a file name prefixed with "@" to include names
|
||||
# from a separate file.
|
||||
#
|
||||
# ADDRESS specifies the set of hosts the record matches. It can be a
|
||||
# host name, or it is made up of an IP address and a CIDR mask that is
|
||||
# an integer (between 0 and 32 (IPv4) or 128 (IPv6) inclusive) that
|
||||
# specifies the number of significant bits in the mask. A host name
|
||||
# that starts with a dot (.) matches a suffix of the actual host name.
|
||||
# Alternatively, you can write an IP address and netmask in separate
|
||||
# columns to specify the set of hosts. Instead of a CIDR-address, you
|
||||
# can write "samehost" to match any of the server's own IP addresses,
|
||||
# or "samenet" to match any address in any subnet that the server is
|
||||
# directly connected to.
|
||||
#
|
||||
# METHOD can be "trust", "reject", "md5", "password", "scram-sha-256",
|
||||
# "gss", "sspi", "ident", "peer", "pam", "ldap", "radius" or "cert".
|
||||
# Note that "password" sends passwords in clear text; "md5" or
|
||||
# "scram-sha-256" are preferred since they send encrypted passwords.
|
||||
#
|
||||
# OPTIONS are a set of options for the authentication in the format
|
||||
# NAME=VALUE. The available options depend on the different
|
||||
# authentication methods -- refer to the "Client Authentication"
|
||||
# section in the documentation for a list of which options are
|
||||
# available for which authentication methods.
|
||||
#
|
||||
# Database and user names containing spaces, commas, quotes and other
|
||||
# special characters must be quoted. Quoting one of the keywords
|
||||
# "all", "sameuser", "samerole" or "replication" makes the name lose
|
||||
# its special character, and just match a database or username with
|
||||
# that name.
|
||||
#
|
||||
# This file is read on server startup and when the server receives a
|
||||
# SIGHUP signal. If you edit the file on a running system, you have to
|
||||
# SIGHUP the server for the changes to take effect, run "pg_ctl reload",
|
||||
# or execute "SELECT pg_reload_conf()".
|
||||
#
|
||||
# Put your actual configuration here
|
||||
# ----------------------------------
|
||||
#
|
||||
# If you want to allow non-local connections, you need to add more
|
||||
# "host" records. In that case you will also need to make PostgreSQL
|
||||
# listen on a non-local interface via the listen_addresses
|
||||
# configuration parameter, or via the -i or -h command line switches.
|
||||
|
||||
# CAUTION: Configuring the system for local "trust" authentication
|
||||
# allows any local user to connect as any PostgreSQL user, including
|
||||
# the database superuser. If you do not trust all your local users,
|
||||
# use another authentication method.
|
||||
|
||||
|
||||
# TYPE DATABASE USER ADDRESS METHOD
|
||||
|
||||
# "local" is for Unix domain socket connections only
|
||||
local all all trust
|
||||
# IPv4 local connections:
|
||||
host all all 127.0.0.1/32 trust
|
||||
# IPv6 local connections:
|
||||
host all all ::1/128 trust
|
||||
# Allow replication connections from localhost, by a user with the
|
||||
# replication privilege.
|
||||
local replication all trust
|
||||
host replication all 127.0.0.1/32 trust
|
||||
host replication all ::1/128 trust
|
||||
97
postgres_ffi/src/controlfile_utils.rs
Normal file
97
postgres_ffi/src/controlfile_utils.rs
Normal file
@@ -0,0 +1,97 @@
|
||||
//!
|
||||
//! Utilities for reading and writing the PostgreSQL control file.
|
||||
//!
|
||||
//! The PostgreSQL control file is one the first things that the PostgreSQL
|
||||
//! server reads when it starts up. It indicates whether the server was shut
|
||||
//! down cleanly, or if it crashed or was restored from online backup so that
|
||||
//! WAL recovery needs to be performed. It also contains a copy of the latest
|
||||
//! checkpoint record and its location in the WAL.
|
||||
//!
|
||||
//! The control file also contains fields for detecting whether the
|
||||
//! data directory is compatible with a postgres binary. That includes
|
||||
//! a version number, configuration options that can be set at
|
||||
//! compilation time like the block size, and the platform's alignment
|
||||
//! and endianness information. (The PostgreSQL on-disk file format is
|
||||
//! not portable across platforms.)
|
||||
//!
|
||||
//! The control file is stored in the PostgreSQL data directory, as
|
||||
//! `global/pg_control`. The data stored in it is designed to be smaller than
|
||||
//! 512 bytes, on the assumption that it can be updated atomically. The actual
|
||||
//! file is larger, 8192 bytes, but the rest of it is just filled with zeros.
|
||||
//!
|
||||
//! See src/include/catalog/pg_control.h in the PostgreSQL sources for more
|
||||
//! information. You can use PostgreSQL's pg_controldata utility to view its
|
||||
//! contents.
|
||||
//!
|
||||
use crate::{ControlFileData, PG_CONTROL_FILE_SIZE};
|
||||
|
||||
use anyhow::{bail, Result};
|
||||
use bytes::{Bytes, BytesMut};
|
||||
|
||||
/// Equivalent to sizeof(ControlFileData) in C
|
||||
const SIZEOF_CONTROLDATA: usize = std::mem::size_of::<ControlFileData>();
|
||||
|
||||
impl ControlFileData {
|
||||
/// Compute the offset of the `crc` field within the `ControlFileData` struct.
|
||||
/// Equivalent to offsetof(ControlFileData, crc) in C.
|
||||
// Someday this can be const when the right compiler features land.
|
||||
fn pg_control_crc_offset() -> usize {
|
||||
memoffset::offset_of!(ControlFileData, crc)
|
||||
}
|
||||
|
||||
///
|
||||
/// Interpret a slice of bytes as a Postgres control file.
|
||||
///
|
||||
pub fn decode(buf: &[u8]) -> Result<ControlFileData> {
|
||||
use zenith_utils::bin_ser::LeSer;
|
||||
|
||||
// Check that the slice has the expected size. The control file is
|
||||
// padded with zeros up to a 512 byte sector size, so accept a
|
||||
// larger size too, so that the caller can just the whole file
|
||||
// contents without knowing the exact size of the struct.
|
||||
if buf.len() < SIZEOF_CONTROLDATA {
|
||||
bail!("control file is too short");
|
||||
}
|
||||
|
||||
// Compute the expected CRC of the content.
|
||||
let OFFSETOF_CRC = Self::pg_control_crc_offset();
|
||||
let expectedcrc = crc32c::crc32c(&buf[0..OFFSETOF_CRC]);
|
||||
|
||||
// Use serde to deserialize the input as a ControlFileData struct.
|
||||
let controlfile = ControlFileData::des(buf)?;
|
||||
|
||||
// Check the CRC
|
||||
if expectedcrc != controlfile.crc {
|
||||
bail!(
|
||||
"invalid CRC in control file: expected {:08X}, was {:08X}",
|
||||
expectedcrc,
|
||||
controlfile.crc
|
||||
);
|
||||
}
|
||||
|
||||
Ok(controlfile)
|
||||
}
|
||||
|
||||
///
|
||||
/// Convert a struct representing a Postgres control file into raw bytes.
|
||||
///
|
||||
/// The CRC is recomputed to match the contents of the fields.
|
||||
pub fn encode(&self) -> Bytes {
|
||||
use zenith_utils::bin_ser::LeSer;
|
||||
|
||||
// Serialize into a new buffer.
|
||||
let b = self.ser().unwrap();
|
||||
|
||||
// Recompute the CRC
|
||||
let OFFSETOF_CRC = Self::pg_control_crc_offset();
|
||||
let newcrc = crc32c::crc32c(&b[0..OFFSETOF_CRC]);
|
||||
|
||||
let mut buf = BytesMut::with_capacity(PG_CONTROL_FILE_SIZE as usize);
|
||||
buf.extend_from_slice(&b[0..OFFSETOF_CRC]);
|
||||
buf.extend_from_slice(&newcrc.to_ne_bytes());
|
||||
// Fill the rest of the control file with zeros.
|
||||
buf.resize(PG_CONTROL_FILE_SIZE as usize, 0);
|
||||
|
||||
buf.into()
|
||||
}
|
||||
}
|
||||
@@ -1,116 +1,36 @@
|
||||
#![allow(non_upper_case_globals)]
|
||||
#![allow(non_camel_case_types)]
|
||||
#![allow(non_snake_case)]
|
||||
// suppress warnings on rust 1.53 due to bindgen unit tests.
|
||||
// https://github.com/rust-lang/rust-bindgen/issues/1651
|
||||
#![allow(deref_nullptr)]
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
include!(concat!(env!("OUT_DIR"), "/bindings.rs"));
|
||||
|
||||
pub mod controlfile_utils;
|
||||
pub mod nonrelfile_utils;
|
||||
pub mod pg_constants;
|
||||
pub mod relfile_utils;
|
||||
pub mod xlog_utils;
|
||||
|
||||
use bytes::{Buf, Bytes, BytesMut};
|
||||
use std::error::Error;
|
||||
use std::fmt;
|
||||
|
||||
// sizeof(ControlFileData)
|
||||
const SIZEOF_CONTROLDATA: usize = std::mem::size_of::<ControlFileData>();
|
||||
const OFFSETOF_CRC: usize = PG_CONTROLFILEDATA_OFFSETOF_CRC as usize;
|
||||
|
||||
impl ControlFileData {
|
||||
// Initialize an all-zeros ControlFileData struct
|
||||
pub fn new() -> ControlFileData {
|
||||
let controlfile: ControlFileData;
|
||||
|
||||
let b = [0u8; SIZEOF_CONTROLDATA];
|
||||
controlfile =
|
||||
unsafe { std::mem::transmute::<[u8; SIZEOF_CONTROLDATA], ControlFileData>(b) };
|
||||
|
||||
controlfile
|
||||
}
|
||||
// See TransactionIdIsNormal in transam.h
|
||||
pub const fn transaction_id_is_normal(id: TransactionId) -> bool {
|
||||
id > pg_constants::FIRST_NORMAL_TRANSACTION_ID
|
||||
}
|
||||
|
||||
pub fn decode_pg_control(mut buf: Bytes) -> Result<ControlFileData, anyhow::Error> {
|
||||
let mut b: [u8; SIZEOF_CONTROLDATA] = [0u8; SIZEOF_CONTROLDATA];
|
||||
buf.copy_to_slice(&mut b);
|
||||
// See TransactionIdPrecedes in transam.c
|
||||
pub const fn transaction_id_precedes(id1: TransactionId, id2: TransactionId) -> bool {
|
||||
/*
|
||||
* If either ID is a permanent XID then we can just do unsigned
|
||||
* comparison. If both are normal, do a modulo-2^32 comparison.
|
||||
*/
|
||||
|
||||
let controlfile: ControlFileData;
|
||||
|
||||
// TODO: verify CRC
|
||||
let mut data_without_crc: [u8; OFFSETOF_CRC] = [0u8; OFFSETOF_CRC];
|
||||
data_without_crc.copy_from_slice(&b[0..OFFSETOF_CRC]);
|
||||
let expectedcrc = crc32c::crc32c(&data_without_crc);
|
||||
|
||||
controlfile = unsafe { std::mem::transmute::<[u8; SIZEOF_CONTROLDATA], ControlFileData>(b) };
|
||||
|
||||
if expectedcrc != controlfile.crc {
|
||||
anyhow::bail!(
|
||||
"invalid CRC in control file: expected {:08X}, was {:08X}",
|
||||
expectedcrc,
|
||||
controlfile.crc
|
||||
);
|
||||
if !(transaction_id_is_normal(id1)) || !transaction_id_is_normal(id2) {
|
||||
return id1 < id2;
|
||||
}
|
||||
|
||||
Ok(controlfile)
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct FilePathError {
|
||||
msg: String,
|
||||
}
|
||||
|
||||
impl Error for FilePathError {
|
||||
fn description(&self) -> &str {
|
||||
&self.msg
|
||||
}
|
||||
}
|
||||
|
||||
impl FilePathError {
|
||||
pub fn new(msg: &str) -> FilePathError {
|
||||
FilePathError {
|
||||
msg: msg.to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<core::num::ParseIntError> for FilePathError {
|
||||
fn from(e: core::num::ParseIntError) -> Self {
|
||||
return FilePathError {
|
||||
msg: format!("invalid filename: {}", e),
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for FilePathError {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "invalid filename")
|
||||
}
|
||||
}
|
||||
|
||||
pub fn forkname_to_forknum(forkname: Option<&str>) -> Result<u8, FilePathError> {
|
||||
match forkname {
|
||||
// "main" is not in filenames, it's implicit if the fork name is not present
|
||||
None => Ok(pg_constants::MAIN_FORKNUM),
|
||||
Some("fsm") => Ok(pg_constants::FSM_FORKNUM),
|
||||
Some("vm") => Ok(pg_constants::VISIBILITYMAP_FORKNUM),
|
||||
Some("init") => Ok(pg_constants::INIT_FORKNUM),
|
||||
Some(_) => Err(FilePathError::new("invalid forkname")),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn encode_pg_control(controlfile: ControlFileData) -> Bytes {
|
||||
let b: [u8; SIZEOF_CONTROLDATA];
|
||||
|
||||
b = unsafe { std::mem::transmute::<ControlFileData, [u8; SIZEOF_CONTROLDATA]>(controlfile) };
|
||||
|
||||
// Recompute the CRC
|
||||
let mut data_without_crc: [u8; OFFSETOF_CRC] = [0u8; OFFSETOF_CRC];
|
||||
data_without_crc.copy_from_slice(&b[0..OFFSETOF_CRC]);
|
||||
let newcrc = crc32c::crc32c(&data_without_crc);
|
||||
|
||||
let mut buf = BytesMut::with_capacity(PG_CONTROL_FILE_SIZE as usize);
|
||||
|
||||
buf.extend_from_slice(&b[0..OFFSETOF_CRC]);
|
||||
buf.extend_from_slice(&newcrc.to_ne_bytes());
|
||||
// Fill the rest of the control file with zeros.
|
||||
buf.resize(PG_CONTROL_FILE_SIZE as usize, 0);
|
||||
|
||||
buf.into()
|
||||
let diff = id1.wrapping_sub(id2) as i32;
|
||||
return diff < 0;
|
||||
}
|
||||
|
||||
82
postgres_ffi/src/nonrelfile_utils.rs
Normal file
82
postgres_ffi/src/nonrelfile_utils.rs
Normal file
@@ -0,0 +1,82 @@
|
||||
//!
|
||||
//! Common utilities for dealing with PostgreSQL non-relation files.
|
||||
//!
|
||||
use crate::{pg_constants, transaction_id_precedes};
|
||||
use bytes::BytesMut;
|
||||
use log::*;
|
||||
|
||||
use crate::MultiXactId;
|
||||
|
||||
pub fn transaction_id_set_status(xid: u32, status: u8, page: &mut BytesMut) {
|
||||
trace!(
|
||||
"handle_apply_request for RM_XACT_ID-{} (1-commit, 2-abort, 3-sub_commit)",
|
||||
status
|
||||
);
|
||||
|
||||
let byteno: usize = ((xid as u32 % pg_constants::CLOG_XACTS_PER_PAGE as u32)
|
||||
/ pg_constants::CLOG_XACTS_PER_BYTE) as usize;
|
||||
|
||||
let bshift: u8 =
|
||||
((xid % pg_constants::CLOG_XACTS_PER_BYTE) * pg_constants::CLOG_BITS_PER_XACT as u32) as u8;
|
||||
|
||||
page[byteno] =
|
||||
(page[byteno] & !(pg_constants::CLOG_XACT_BITMASK << bshift)) | (status << bshift);
|
||||
}
|
||||
|
||||
pub fn transaction_id_get_status(xid: u32, page: &[u8]) -> u8 {
|
||||
let byteno: usize = ((xid as u32 % pg_constants::CLOG_XACTS_PER_PAGE as u32)
|
||||
/ pg_constants::CLOG_XACTS_PER_BYTE) as usize;
|
||||
|
||||
let bshift: u8 =
|
||||
((xid % pg_constants::CLOG_XACTS_PER_BYTE) * pg_constants::CLOG_BITS_PER_XACT as u32) as u8;
|
||||
|
||||
((page[byteno] >> bshift) & pg_constants::CLOG_XACT_BITMASK) as u8
|
||||
}
|
||||
|
||||
// See CLOGPagePrecedes in clog.c
|
||||
pub const fn clogpage_precedes(page1: u32, page2: u32) -> bool {
|
||||
let mut xid1 = page1 * pg_constants::CLOG_XACTS_PER_PAGE;
|
||||
xid1 += pg_constants::FIRST_NORMAL_TRANSACTION_ID + 1;
|
||||
let mut xid2 = page2 * pg_constants::CLOG_XACTS_PER_PAGE;
|
||||
xid2 += pg_constants::FIRST_NORMAL_TRANSACTION_ID + 1;
|
||||
|
||||
transaction_id_precedes(xid1, xid2)
|
||||
&& transaction_id_precedes(xid1, xid2 + pg_constants::CLOG_XACTS_PER_PAGE - 1)
|
||||
}
|
||||
|
||||
// See SlruMayDeleteSegment() in slru.c
|
||||
pub fn slru_may_delete_clogsegment(segpage: u32, cutoff_page: u32) -> bool {
|
||||
let seg_last_page = segpage + pg_constants::SLRU_PAGES_PER_SEGMENT - 1;
|
||||
|
||||
assert_eq!(segpage % pg_constants::SLRU_PAGES_PER_SEGMENT, 0);
|
||||
|
||||
clogpage_precedes(segpage, cutoff_page) && clogpage_precedes(seg_last_page, cutoff_page)
|
||||
}
|
||||
|
||||
// Multixact utils
|
||||
|
||||
pub fn mx_offset_to_flags_offset(xid: MultiXactId) -> usize {
|
||||
((xid / pg_constants::MULTIXACT_MEMBERS_PER_MEMBERGROUP as u32) as u16
|
||||
% pg_constants::MULTIXACT_MEMBERGROUPS_PER_PAGE
|
||||
* pg_constants::MULTIXACT_MEMBERGROUP_SIZE) as usize
|
||||
}
|
||||
|
||||
pub fn mx_offset_to_flags_bitshift(xid: MultiXactId) -> u16 {
|
||||
(xid as u16) % pg_constants::MULTIXACT_MEMBERS_PER_MEMBERGROUP
|
||||
* pg_constants::MXACT_MEMBER_BITS_PER_XACT
|
||||
}
|
||||
|
||||
/* Location (byte offset within page) of TransactionId of given member */
|
||||
pub fn mx_offset_to_member_offset(xid: MultiXactId) -> usize {
|
||||
mx_offset_to_flags_offset(xid)
|
||||
+ (pg_constants::MULTIXACT_FLAGBYTES_PER_GROUP
|
||||
+ (xid as u16 % pg_constants::MULTIXACT_MEMBERS_PER_MEMBERGROUP) * 4) as usize
|
||||
}
|
||||
|
||||
fn mx_offset_to_member_page(xid: u32) -> u32 {
|
||||
xid / pg_constants::MULTIXACT_MEMBERS_PER_PAGE as u32
|
||||
}
|
||||
|
||||
pub fn mx_offset_to_member_segment(xid: u32) -> i32 {
|
||||
(mx_offset_to_member_page(xid) / pg_constants::SLRU_PAGES_PER_SEGMENT) as i32
|
||||
}
|
||||
@@ -1,24 +1,41 @@
|
||||
//!
|
||||
//! Misc constants, copied from PostgreSQL headers.
|
||||
//!
|
||||
//! TODO: These probably should be auto-generated using bindgen,
|
||||
//! rather than copied by hand. Although on the other hand, it's nice
|
||||
//! to have them all here in one place, and have the ability to add
|
||||
//! comments on them.
|
||||
//!
|
||||
|
||||
//
|
||||
// From pg_tablespace_d.h
|
||||
//
|
||||
pub const DEFAULTTABLESPACE_OID: u32 = 1663;
|
||||
pub const GLOBALTABLESPACE_OID: u32 = 1664;
|
||||
//TODO maybe use enum?
|
||||
|
||||
//
|
||||
// Fork numbers, from relpath.h
|
||||
//
|
||||
pub const MAIN_FORKNUM: u8 = 0;
|
||||
pub const FSM_FORKNUM: u8 = 1;
|
||||
pub const VISIBILITYMAP_FORKNUM: u8 = 2;
|
||||
pub const INIT_FORKNUM: u8 = 3;
|
||||
//Special values for non-rel files' tags
|
||||
pub const PG_CONTROLFILE_FORKNUM: u8 = 42;
|
||||
pub const PG_FILENODEMAP_FORKNUM: u8 = 43;
|
||||
pub const PG_XACT_FORKNUM: u8 = 44;
|
||||
pub const PG_MXACT_OFFSETS_FORKNUM: u8 = 45;
|
||||
pub const PG_MXACT_MEMBERS_FORKNUM: u8 = 46;
|
||||
|
||||
// From storage_xlog.h
|
||||
pub const SMGR_TRUNCATE_HEAP: u32 = 0x0001;
|
||||
pub const SMGR_TRUNCATE_VM: u32 = 0x0002;
|
||||
pub const SMGR_TRUNCATE_FSM: u32 = 0x0004;
|
||||
|
||||
// from pg_config.h. These can be changed with configure options --with-blocksize=BLOCKSIZE and
|
||||
// --with-segsize=SEGSIZE, but assume the defaults for now.
|
||||
pub const BLCKSZ: u16 = 8192;
|
||||
pub const RELSEG_SIZE: u32 = 1024 * 1024 * 1024 / (BLCKSZ as u32);
|
||||
|
||||
//
|
||||
// constants from clog.h
|
||||
//
|
||||
pub const CLOG_XACTS_PER_BYTE: u32 = 4;
|
||||
pub const CLOG_XACTS_PER_PAGE: u32 = 8192 * CLOG_XACTS_PER_BYTE;
|
||||
pub const CLOG_XACTS_PER_PAGE: u32 = BLCKSZ as u32 * CLOG_XACTS_PER_BYTE;
|
||||
pub const CLOG_BITS_PER_XACT: u8 = 2;
|
||||
pub const CLOG_XACT_BITMASK: u8 = (1 << CLOG_BITS_PER_XACT) - 1;
|
||||
|
||||
@@ -29,6 +46,7 @@ pub const SIZE_OF_PAGE_HEADER: u16 = 24;
|
||||
pub const BITS_PER_HEAPBLOCK: u16 = 2;
|
||||
pub const HEAPBLOCKS_PER_PAGE: u16 = (BLCKSZ - SIZE_OF_PAGE_HEADER) * 8 / BITS_PER_HEAPBLOCK;
|
||||
|
||||
pub const TRANSACTION_STATUS_IN_PROGRESS: u8 = 0x00;
|
||||
pub const TRANSACTION_STATUS_COMMITTED: u8 = 0x01;
|
||||
pub const TRANSACTION_STATUS_ABORTED: u8 = 0x02;
|
||||
pub const TRANSACTION_STATUS_SUB_COMMITTED: u8 = 0x03;
|
||||
@@ -38,13 +56,18 @@ pub const CLOG_TRUNCATE: u8 = 0x10;
|
||||
|
||||
// From xact.h
|
||||
pub const XLOG_XACT_COMMIT: u8 = 0x00;
|
||||
pub const XLOG_XACT_PREPARE: u8 = 0x10;
|
||||
pub const XLOG_XACT_ABORT: u8 = 0x20;
|
||||
pub const XLOG_XACT_COMMIT_PREPARED: u8 = 0x30;
|
||||
pub const XLOG_XACT_ABORT_PREPARED: u8 = 0x40;
|
||||
|
||||
// From srlu.h
|
||||
pub const SLRU_PAGES_PER_SEGMENT: u32 = 32;
|
||||
pub const SLRU_SEG_SIZE: usize = BLCKSZ as usize * SLRU_PAGES_PER_SEGMENT as usize;
|
||||
|
||||
/* mask for filtering opcodes out of xl_info */
|
||||
pub const XLOG_XACT_OPMASK: u8 = 0x70;
|
||||
pub const XLOG_HEAP_OPMASK: u8 = 0x70;
|
||||
/* does this record have a 'xinfo' field or not */
|
||||
pub const XLOG_XACT_HAS_INFO: u8 = 0x80;
|
||||
|
||||
@@ -62,15 +85,42 @@ pub const XACT_XINFO_HAS_TWOPHASE: u32 = 1u32 << 4;
|
||||
// pub const XACT_XINFO_HAS_GID: u32 = 1u32 << 7;
|
||||
|
||||
// From pg_control.h and rmgrlist.h
|
||||
pub const XLOG_NEXTOID: u8 = 0x30;
|
||||
pub const XLOG_SWITCH: u8 = 0x40;
|
||||
pub const XLOG_SMGR_TRUNCATE: u8 = 0x20;
|
||||
pub const SMGR_TRUNCATE_HEAP: u32 = 0x0001;
|
||||
pub const XLOG_FPI_FOR_HINT: u8 = 0xA0;
|
||||
pub const XLOG_FPI: u8 = 0xB0;
|
||||
pub const DB_SHUTDOWNED: u32 = 1;
|
||||
|
||||
// From multixact.h
|
||||
pub const FIRST_MULTIXACT_ID: u32 = 1;
|
||||
pub const MAX_MULTIXACT_ID: u32 = 0xFFFFFFFF;
|
||||
pub const MAX_MULTIXACT_OFFSET: u32 = 0xFFFFFFFF;
|
||||
|
||||
pub const XLOG_MULTIXACT_ZERO_OFF_PAGE: u8 = 0x00;
|
||||
pub const XLOG_MULTIXACT_ZERO_MEM_PAGE: u8 = 0x10;
|
||||
pub const XLOG_MULTIXACT_CREATE_ID: u8 = 0x20;
|
||||
pub const XLOG_MULTIXACT_TRUNCATE_ID: u8 = 0x30;
|
||||
|
||||
pub const MULTIXACT_OFFSETS_PER_PAGE: u16 = BLCKSZ / 4;
|
||||
pub const MXACT_MEMBER_BITS_PER_XACT: u16 = 8;
|
||||
pub const MXACT_MEMBER_FLAGS_PER_BYTE: u16 = 1;
|
||||
pub const MULTIXACT_FLAGBYTES_PER_GROUP: u16 = 4;
|
||||
pub const MULTIXACT_MEMBERS_PER_MEMBERGROUP: u16 =
|
||||
MULTIXACT_FLAGBYTES_PER_GROUP * MXACT_MEMBER_FLAGS_PER_BYTE;
|
||||
/* size in bytes of a complete group */
|
||||
pub const MULTIXACT_MEMBERGROUP_SIZE: u16 =
|
||||
4 * MULTIXACT_MEMBERS_PER_MEMBERGROUP + MULTIXACT_FLAGBYTES_PER_GROUP;
|
||||
pub const MULTIXACT_MEMBERGROUPS_PER_PAGE: u16 = BLCKSZ / MULTIXACT_MEMBERGROUP_SIZE;
|
||||
pub const MULTIXACT_MEMBERS_PER_PAGE: u16 =
|
||||
MULTIXACT_MEMBERGROUPS_PER_PAGE * MULTIXACT_MEMBERS_PER_MEMBERGROUP;
|
||||
|
||||
// From heapam_xlog.h
|
||||
pub const XLOG_HEAP_INSERT: u8 = 0x00;
|
||||
pub const XLOG_HEAP_DELETE: u8 = 0x10;
|
||||
pub const XLOG_HEAP_UPDATE: u8 = 0x20;
|
||||
pub const XLOG_HEAP_HOT_UPDATE: u8 = 0x40;
|
||||
pub const XLOG_HEAP_INIT_PAGE: u8 = 0x80;
|
||||
pub const XLOG_HEAP2_VISIBLE: u8 = 0x40;
|
||||
pub const XLOG_HEAP2_MULTI_INSERT: u8 = 0x50;
|
||||
pub const XLH_INSERT_ALL_FROZEN_SET: u8 = (1 << 5) as u8;
|
||||
@@ -104,9 +154,6 @@ pub const XLOG_TBLSPC_DROP: u8 = 0x10;
|
||||
|
||||
pub const SIZEOF_XLOGRECORD: u32 = 24;
|
||||
|
||||
// FIXME:
|
||||
pub const BLCKSZ: u16 = 8192;
|
||||
|
||||
//
|
||||
// from xlogrecord.h
|
||||
//
|
||||
@@ -128,3 +175,80 @@ pub const BKPBLOCK_SAME_REL: u8 = 0x80; /* RelFileNode omitted, same as previous
|
||||
pub const BKPIMAGE_HAS_HOLE: u8 = 0x01; /* page image has "hole" */
|
||||
pub const BKPIMAGE_IS_COMPRESSED: u8 = 0x02; /* page image is compressed */
|
||||
pub const BKPIMAGE_APPLY: u8 = 0x04; /* page image should be restored during replay */
|
||||
|
||||
/* From transam.h */
|
||||
pub const FIRST_NORMAL_TRANSACTION_ID: u32 = 3;
|
||||
pub const INVALID_TRANSACTION_ID: u32 = 0;
|
||||
pub const FIRST_BOOTSTRAP_OBJECT_ID: u32 = 12000;
|
||||
pub const FIRST_NORMAL_OBJECT_ID: u32 = 16384;
|
||||
|
||||
/* FIXME: pageserver should request wal_seg_size from compute node */
|
||||
pub const WAL_SEGMENT_SIZE: usize = 16 * 1024 * 1024;
|
||||
|
||||
pub const XLOG_BLCKSZ: usize = 8192;
|
||||
pub const XLOG_CHECKPOINT_SHUTDOWN: u8 = 0x00;
|
||||
pub const XLOG_CHECKPOINT_ONLINE: u8 = 0x10;
|
||||
pub const XLP_LONG_HEADER: u16 = 0x0002;
|
||||
|
||||
pub const PG_MAJORVERSION: &str = "14";
|
||||
|
||||
// Zenith specific page flags used to distinguish heap and non-heap relations
|
||||
pub const PD_HEAP_RELATION: u16 = 0x10;
|
||||
pub const PD_NONHEAP_RELATION: u16 = 0x20;
|
||||
|
||||
// bufpage.h
|
||||
pub const PD_FLAGS_OFFSET: usize = 10; // PageHeaderData.pd_flags
|
||||
pub const PD_LOWER_OFFSET: usize = 12; // PageHeaderData.pd_lower
|
||||
|
||||
// itemid.h
|
||||
pub const LP_NORMAL: u32 = 1;
|
||||
|
||||
// htup_details.h
|
||||
pub const T_XMIN_OFFS: usize = 0;
|
||||
pub const T_XMAX_OFFS: usize = 4;
|
||||
pub const T_INFOMASK_OFFS: usize = 4 * 3 + 2 * 3 + 2;
|
||||
pub const HEAP_XMIN_COMMITTED: u16 = 0x0100; /* t_xmin committed */
|
||||
pub const HEAP_XMIN_INVALID: u16 = 0x0200; /* t_xmin invalid/aborted */
|
||||
pub const HEAP_XMAX_COMMITTED: u16 = 0x0400; /* t_xmax committed */
|
||||
pub const HEAP_XMAX_INVALID: u16 = 0x0800; /* t_xmax invalid/aborted */
|
||||
pub const HEAP_XMAX_IS_MULTI: u16 = 0x1000; /* t_xmax is a MultiXactId */
|
||||
pub const SIZE_OF_PAGE_HEADER_DATA: usize = 24;
|
||||
|
||||
// xlogrecord.h
|
||||
pub const XL_RMID_OFFS: usize = 17;
|
||||
|
||||
// List of subdirectories inside pgdata.
|
||||
// Copied from src/bin/initdb/initdb.c
|
||||
pub const PGDATA_SUBDIRS: [&str; 22] = [
|
||||
"global",
|
||||
"pg_wal/archive_status",
|
||||
"pg_commit_ts",
|
||||
"pg_dynshmem",
|
||||
"pg_notify",
|
||||
"pg_serial",
|
||||
"pg_snapshots",
|
||||
"pg_subtrans",
|
||||
"pg_twophase",
|
||||
"pg_multixact",
|
||||
"pg_multixact/members",
|
||||
"pg_multixact/offsets",
|
||||
"base",
|
||||
"base/1",
|
||||
"pg_replslot",
|
||||
"pg_tblspc",
|
||||
"pg_stat",
|
||||
"pg_stat_tmp",
|
||||
"pg_xact",
|
||||
"pg_logical",
|
||||
"pg_logical/snapshots",
|
||||
"pg_logical/mappings",
|
||||
];
|
||||
|
||||
// Don't include postgresql.conf as it is inconvenient on node start:
|
||||
// we need postgresql.conf before basebackup to synchronize safekeepers
|
||||
// so no point in overwriting it during backup restore. Rest of the files
|
||||
// here are not needed before backup so it is okay to edit them after.
|
||||
pub const PGDATA_SPECIAL_FILES: [&str; 3] =
|
||||
["pg_hba.conf", "pg_ident.conf", "postgresql.auto.conf"];
|
||||
|
||||
pub static PG_HBA: &str = include_str!("../samples/pg_hba.conf");
|
||||
|
||||
141
postgres_ffi/src/relfile_utils.rs
Normal file
141
postgres_ffi/src/relfile_utils.rs
Normal file
@@ -0,0 +1,141 @@
|
||||
//!
|
||||
//! Common utilities for dealing with PostgreSQL relation files.
|
||||
//!
|
||||
use crate::pg_constants;
|
||||
use lazy_static::lazy_static;
|
||||
use regex::Regex;
|
||||
|
||||
#[derive(Debug, Clone, thiserror::Error, PartialEq)]
|
||||
pub enum FilePathError {
|
||||
#[error("invalid relation fork name")]
|
||||
InvalidForkName,
|
||||
#[error("invalid relation data file name")]
|
||||
InvalidFileName,
|
||||
}
|
||||
|
||||
impl From<core::num::ParseIntError> for FilePathError {
|
||||
fn from(_e: core::num::ParseIntError) -> Self {
|
||||
FilePathError::InvalidFileName
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert Postgres relation file's fork suffix to fork number.
|
||||
pub fn forkname_to_number(forkname: Option<&str>) -> Result<u8, FilePathError> {
|
||||
match forkname {
|
||||
// "main" is not in filenames, it's implicit if the fork name is not present
|
||||
None => Ok(pg_constants::MAIN_FORKNUM),
|
||||
Some("fsm") => Ok(pg_constants::FSM_FORKNUM),
|
||||
Some("vm") => Ok(pg_constants::VISIBILITYMAP_FORKNUM),
|
||||
Some("init") => Ok(pg_constants::INIT_FORKNUM),
|
||||
Some(_) => Err(FilePathError::InvalidForkName),
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert Postgres fork number to the right suffix of the relation data file.
|
||||
pub fn forknumber_to_name(forknum: u8) -> Option<&'static str> {
|
||||
match forknum {
|
||||
pg_constants::MAIN_FORKNUM => None,
|
||||
pg_constants::FSM_FORKNUM => Some("fsm"),
|
||||
pg_constants::VISIBILITYMAP_FORKNUM => Some("vm"),
|
||||
pg_constants::INIT_FORKNUM => Some("init"),
|
||||
_ => Some("UNKNOWN FORKNUM"),
|
||||
}
|
||||
}
|
||||
|
||||
///
|
||||
/// Parse a filename of a relation file. Returns (relfilenode, forknum, segno) tuple.
|
||||
///
|
||||
/// Formats:
|
||||
/// <oid>
|
||||
/// <oid>_<fork name>
|
||||
/// <oid>.<segment number>
|
||||
/// <oid>_<fork name>.<segment number>
|
||||
///
|
||||
/// See functions relpath() and _mdfd_segpath() in PostgreSQL sources.
|
||||
///
|
||||
pub fn parse_relfilename(fname: &str) -> Result<(u32, u8, u32), FilePathError> {
|
||||
lazy_static! {
|
||||
static ref RELFILE_RE: Regex =
|
||||
Regex::new(r"^(?P<relnode>\d+)(_(?P<forkname>[a-z]+))?(\.(?P<segno>\d+))?$").unwrap();
|
||||
}
|
||||
let caps = RELFILE_RE
|
||||
.captures(fname)
|
||||
.ok_or(FilePathError::InvalidFileName)?;
|
||||
|
||||
let relnode_str = caps.name("relnode").unwrap().as_str();
|
||||
let relnode = relnode_str.parse::<u32>()?;
|
||||
|
||||
let forkname = caps.name("forkname").map(|f| f.as_str());
|
||||
let forknum = forkname_to_number(forkname)?;
|
||||
|
||||
let segno_match = caps.name("segno");
|
||||
let segno = if segno_match.is_none() {
|
||||
0
|
||||
} else {
|
||||
segno_match.unwrap().as_str().parse::<u32>()?
|
||||
};
|
||||
|
||||
Ok((relnode, forknum, segno))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_parse_valid_relfilenames() {
|
||||
assert_eq!(parse_relfilename("1234"), Ok((1234, 0, 0)));
|
||||
assert_eq!(parse_relfilename("1234_fsm"), Ok((1234, 1, 0)));
|
||||
assert_eq!(parse_relfilename("1234_vm"), Ok((1234, 2, 0)));
|
||||
assert_eq!(parse_relfilename("1234_init"), Ok((1234, 3, 0)));
|
||||
|
||||
assert_eq!(parse_relfilename("1234.12"), Ok((1234, 0, 12)));
|
||||
assert_eq!(parse_relfilename("1234_fsm.12"), Ok((1234, 1, 12)));
|
||||
assert_eq!(parse_relfilename("1234_vm.12"), Ok((1234, 2, 12)));
|
||||
assert_eq!(parse_relfilename("1234_init.12"), Ok((1234, 3, 12)));
|
||||
|
||||
// relfilenode is unsigned, so it can go up to 2^32-1
|
||||
assert_eq!(parse_relfilename("3147483648"), Ok((3147483648, 0, 0)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_invalid_relfilenames() {
|
||||
assert_eq!(
|
||||
parse_relfilename("foo"),
|
||||
Err(FilePathError::InvalidFileName)
|
||||
);
|
||||
assert_eq!(
|
||||
parse_relfilename("1.2.3"),
|
||||
Err(FilePathError::InvalidFileName)
|
||||
);
|
||||
assert_eq!(
|
||||
parse_relfilename("1234_invalid"),
|
||||
Err(FilePathError::InvalidForkName)
|
||||
);
|
||||
assert_eq!(
|
||||
parse_relfilename("1234_"),
|
||||
Err(FilePathError::InvalidFileName)
|
||||
);
|
||||
|
||||
// too large for u32
|
||||
assert_eq!(
|
||||
parse_relfilename("12345678901"),
|
||||
Err(FilePathError::InvalidFileName)
|
||||
);
|
||||
assert_eq!(
|
||||
parse_relfilename("-1234"),
|
||||
Err(FilePathError::InvalidFileName)
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_weird_relfilenames() {
|
||||
// we accept 0 for the relfilenode, but PostgreSQL should never do that.
|
||||
assert_eq!(parse_relfilename("0"), Ok((0, 0, 0)));
|
||||
|
||||
// PostgreSQL has a limit of 2^32-2 blocks in a table. With 8k block size and
|
||||
// 1 GB segments, the max segment number is 32767. But we accept larger values
|
||||
// currently.
|
||||
assert_eq!(parse_relfilename("1.123456"), Ok((1, 0, 123456)));
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user