mirror of
https://github.com/neondatabase/neon.git
synced 2026-01-18 10:52:55 +00:00
Compare commits
410 Commits
docker-mul
...
ps-thread-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d84c4907a9 | ||
|
|
77c35906f5 | ||
|
|
b70cf3f7b1 | ||
|
|
304641afe5 | ||
|
|
13ed1e32b4 | ||
|
|
858ce6a8b5 | ||
|
|
491a1870ce | ||
|
|
30376d87ac | ||
|
|
887b4248d2 | ||
|
|
cf59b51519 | ||
|
|
0a7735a656 | ||
|
|
64a602b8f3 | ||
|
|
10e4da3997 | ||
|
|
de37f982db | ||
|
|
d4e155aaa3 | ||
|
|
dd6dca9072 | ||
|
|
ef40e404cf | ||
|
|
11a44eda0e | ||
|
|
30a7598172 | ||
|
|
1ad5658d9c | ||
|
|
954859f6c5 | ||
|
|
4024bfe736 | ||
|
|
2ef0e5c6ed | ||
|
|
52a7e3155e | ||
|
|
ad5eaa6027 | ||
|
|
0f3ec83172 | ||
|
|
c46fe90010 | ||
|
|
bc569dde51 | ||
|
|
02e5083695 | ||
|
|
c4bc604e5f | ||
|
|
b8880bfaab | ||
|
|
e2cf77441d | ||
|
|
b68e3b03ed | ||
|
|
e58c83870f | ||
|
|
b9fd8a36ad | ||
|
|
748c5a577b | ||
|
|
51a0f2683b | ||
|
|
9dfa145c7c | ||
|
|
5642d0b2b8 | ||
|
|
2f83f793bc | ||
|
|
2f9b17b9e5 | ||
|
|
e7cba0b607 | ||
|
|
ff7e9a86c6 | ||
|
|
9ede38b6c4 | ||
|
|
62449d6068 | ||
|
|
baa59512b8 | ||
|
|
87a6c4d051 | ||
|
|
801b749e1d | ||
|
|
5cb501c2b3 | ||
|
|
ad25736f3a | ||
|
|
9a396e1feb | ||
|
|
0323bb5870 | ||
|
|
af0195b604 | ||
|
|
9df8915b03 | ||
|
|
4b1bd32e4a | ||
|
|
68ba6a58a0 | ||
|
|
8f479a712f | ||
|
|
2477d2f9e2 | ||
|
|
992874c916 | ||
|
|
3128e8c75c | ||
|
|
f3f12db2cb | ||
|
|
038ea4c128 | ||
|
|
7e1db8c8a1 | ||
|
|
aa933d3961 | ||
|
|
67b4e38092 | ||
|
|
05f8e6a050 | ||
|
|
76388abeb6 | ||
|
|
2911eb084a | ||
|
|
6cca57f95a | ||
|
|
4a46b01caf | ||
|
|
5c5c3c64f3 | ||
|
|
29539b0561 | ||
|
|
695b5f9d88 | ||
|
|
66694e736a | ||
|
|
091cefaa92 | ||
|
|
aeb4f81c3b | ||
|
|
6391862d8a | ||
|
|
b2e35fffa6 | ||
|
|
8b9d523f3c | ||
|
|
3fd234da07 | ||
|
|
778744d35c | ||
|
|
eabf6f89e4 | ||
|
|
fec050ce97 | ||
|
|
d060a97c54 | ||
|
|
78a6cb247f | ||
|
|
8f6a161271 | ||
|
|
56f6269a8e | ||
|
|
1fb3d08185 | ||
|
|
867aede715 | ||
|
|
d3f356e7a8 | ||
|
|
5f83c9290b | ||
|
|
a4700c9bbe | ||
|
|
dafdf9b952 | ||
|
|
263d60f12d | ||
|
|
abcd7a4b1f | ||
|
|
81cad6277a | ||
|
|
629688fd6c | ||
|
|
9d3779c124 | ||
|
|
334a1d6b5d | ||
|
|
e41ad3be0f | ||
|
|
e113c6fa8d | ||
|
|
cbdfd8c719 | ||
|
|
86bf4301b7 | ||
|
|
9eaa21317c | ||
|
|
e660e12f79 | ||
|
|
ac52f4f2d6 | ||
|
|
5e95338ee9 | ||
|
|
170badd626 | ||
|
|
91fb21225a | ||
|
|
3e6087a12f | ||
|
|
44bfc529f6 | ||
|
|
ef72eb84cf | ||
|
|
a1e34772e5 | ||
|
|
389bd1faeb | ||
|
|
c15aa04714 | ||
|
|
52e0816fa5 | ||
|
|
81417788c8 | ||
|
|
81879f8137 | ||
|
|
5b29774532 | ||
|
|
0ca2bd929b | ||
|
|
9b7dcc2bae | ||
|
|
3136a0754a | ||
|
|
787f0d33f0 | ||
|
|
ed5f9acca9 | ||
|
|
4bc338babc | ||
|
|
3ab090b43a | ||
|
|
7126979950 | ||
|
|
9946cd1125 | ||
|
|
ab20f2c491 | ||
|
|
c9d897f9b6 | ||
|
|
e97f94cc30 | ||
|
|
2cb39a1624 | ||
|
|
93e0ac2b7a | ||
|
|
d5ae9db997 | ||
|
|
9e4de6bed0 | ||
|
|
4a8c663452 | ||
|
|
a009fe912a | ||
|
|
19954dfd8a | ||
|
|
570db6f168 | ||
|
|
cdf04b6a9f | ||
|
|
a0781f229c | ||
|
|
1d36c5a39e | ||
|
|
49da76237b | ||
|
|
1fd08107ca | ||
|
|
58d5136a61 | ||
|
|
87020f8126 | ||
|
|
20414c4b16 | ||
|
|
9b7a8e67a4 | ||
|
|
4af87f3d60 | ||
|
|
0fbe657b2f | ||
|
|
07a9553700 | ||
|
|
dc7e3ff05a | ||
|
|
4f172e7612 | ||
|
|
0e9ee772af | ||
|
|
db63fa64ae | ||
|
|
8e2a6661e9 | ||
|
|
214567bf8f | ||
|
|
c4b57e4b8f | ||
|
|
5d9851f5d1 | ||
|
|
81ba23094e | ||
|
|
d5258cdc4d | ||
|
|
6bc78a0e77 | ||
|
|
6fe443e239 | ||
|
|
d0c246ac3c | ||
|
|
2f784144fe | ||
|
|
222b723354 | ||
|
|
089ba6abfe | ||
|
|
a5a478c321 | ||
|
|
fcf613b6e3 | ||
|
|
572b3f48cf | ||
|
|
bef9b837f1 | ||
|
|
232fe14297 | ||
|
|
92031d376a | ||
|
|
1f0b406b63 | ||
|
|
4c9447589a | ||
|
|
9e5423c867 | ||
|
|
43c16c5145 | ||
|
|
af712798e7 | ||
|
|
f5da652388 | ||
|
|
8745b022a9 | ||
|
|
a40b7cd516 | ||
|
|
1aa8fe43cf | ||
|
|
649f324fe3 | ||
|
|
8609234204 | ||
|
|
5c5629910f | ||
|
|
277e41f4b7 | ||
|
|
ce0243bc12 | ||
|
|
ec3bc74165 | ||
|
|
9594362f74 | ||
|
|
eee0f51e0c | ||
|
|
fd78110c2b | ||
|
|
be6a6958e2 | ||
|
|
0e44887929 | ||
|
|
1aa57fc262 | ||
|
|
9a4f0930c0 | ||
|
|
d88f8b4a7e | ||
|
|
8a901de52a | ||
|
|
a883202495 | ||
|
|
780b46ad27 | ||
|
|
75002adc14 | ||
|
|
07342f7519 | ||
|
|
55de0b88f5 | ||
|
|
d56a0ee19a | ||
|
|
18dfc769d8 | ||
|
|
5e04dad360 | ||
|
|
b8cba059a5 | ||
|
|
e3fa00972e | ||
|
|
b39d1b1717 | ||
|
|
28bc8e3f5c | ||
|
|
6244fd9e7e | ||
|
|
f6b1d76c30 | ||
|
|
edc7bebcb5 | ||
|
|
a201d33edc | ||
|
|
825d363170 | ||
|
|
b9a1a75b0d | ||
|
|
d3a9cb44a6 | ||
|
|
c718870517 | ||
|
|
8437fc056e | ||
|
|
8b8d78a3a0 | ||
|
|
8a86276a6e | ||
|
|
0be7ed0cb5 | ||
|
|
e80ae4306a | ||
|
|
123fcd5d0d | ||
|
|
15434ba7e0 | ||
|
|
a4d0d78e9e | ||
|
|
e13bdd77fe | ||
|
|
bd6bef468c | ||
|
|
77ed2a0fa0 | ||
|
|
37ebbb598d | ||
|
|
063f9ba81d | ||
|
|
3b069f5aef | ||
|
|
b19870cd88 | ||
|
|
7738254f83 | ||
|
|
a7544eead5 | ||
|
|
ab124c161b | ||
|
|
1fddb0556f | ||
|
|
15a2a2bf04 | ||
|
|
705f51db27 | ||
|
|
9c1a9a1d9f | ||
|
|
d5a96d3d50 | ||
|
|
d93fc371f3 | ||
|
|
5d7bd8643a | ||
|
|
a8a7dc9ca6 | ||
|
|
b2ad8342d2 | ||
|
|
27dadba52c | ||
|
|
f67d010d1b | ||
|
|
093ad8ab59 | ||
|
|
c51d545fd9 | ||
|
|
fe6fccfdae | ||
|
|
dd74c66ef0 | ||
|
|
a5e10c4f64 | ||
|
|
7b5482bac0 | ||
|
|
c7569dce47 | ||
|
|
4d0f7fd1e4 | ||
|
|
f49990ed43 | ||
|
|
0c91091c63 | ||
|
|
10f811e886 | ||
|
|
2883a25761 | ||
|
|
87f306c516 | ||
|
|
5b34afe893 | ||
|
|
15b19a0a57 | ||
|
|
934bbcba0f | ||
|
|
cffac59a41 | ||
|
|
8e37d345a8 | ||
|
|
f86cf93435 | ||
|
|
66eb2a1dd3 | ||
|
|
9424bfae22 | ||
|
|
1d90b1b205 | ||
|
|
949f8b4633 | ||
|
|
a0f9a0d350 | ||
|
|
26a68612d9 | ||
|
|
850dfd02df | ||
|
|
c8a1192b53 | ||
|
|
137d616e76 | ||
|
|
917c640818 | ||
|
|
c1b3836df1 | ||
|
|
5120ba4b5f | ||
|
|
e4670a5f1e | ||
|
|
7fae894648 | ||
|
|
058123f7ef | ||
|
|
87edbd38c7 | ||
|
|
58ee5d005f | ||
|
|
468366a28f | ||
|
|
b815f5fb9f | ||
|
|
74a0942a77 | ||
|
|
1a4682a04a | ||
|
|
993b544ad0 | ||
|
|
dba1d36a4a | ||
|
|
ca81a550ef | ||
|
|
65a0b2736b | ||
|
|
cca886682b | ||
|
|
c8f47cd38e | ||
|
|
92787159f7 | ||
|
|
abb422d5de | ||
|
|
fdc15de8b2 | ||
|
|
207286f2b8 | ||
|
|
d2b896381a | ||
|
|
009f6d4ae8 | ||
|
|
1b31379456 | ||
|
|
4c64b10aec | ||
|
|
ad262a46ad | ||
|
|
ce533835e5 | ||
|
|
e5bf520b18 | ||
|
|
9512e21b9e | ||
|
|
a26d565282 | ||
|
|
a47dade622 | ||
|
|
9cce430430 | ||
|
|
4bf4bacf01 | ||
|
|
335abfcc28 | ||
|
|
afb3342e46 | ||
|
|
5563ff123f | ||
|
|
0a557b2fa9 | ||
|
|
9632c352ab | ||
|
|
328e3b4189 | ||
|
|
47f6a1f9a8 | ||
|
|
a4829712f4 | ||
|
|
d4d26f619d | ||
|
|
36481f3374 | ||
|
|
d951dd8977 | ||
|
|
ea13838be7 | ||
|
|
b51f23cdf0 | ||
|
|
3cfcdb92ed | ||
|
|
d7af965982 | ||
|
|
7c1c7702d2 | ||
|
|
6eef401602 | ||
|
|
c5b5905ed3 | ||
|
|
76b74349cb | ||
|
|
b08e340f60 | ||
|
|
a25fa29bc9 | ||
|
|
ccf3c8cc30 | ||
|
|
c45ee13b4e | ||
|
|
f1e7db9d0d | ||
|
|
fa8a6c0e94 | ||
|
|
1e8ca497e0 | ||
|
|
a504cc87ab | ||
|
|
5268bbc840 | ||
|
|
e1d770939b | ||
|
|
2866a9e82e | ||
|
|
b67cddb303 | ||
|
|
cb1d84d980 | ||
|
|
642797b69e | ||
|
|
3ed156a5b6 | ||
|
|
2d93b129a0 | ||
|
|
32c7859659 | ||
|
|
729ac38ea8 | ||
|
|
d69b0539ba | ||
|
|
ec78babad2 | ||
|
|
9350dfb215 | ||
|
|
8ac8be5206 | ||
|
|
c2927353a5 | ||
|
|
33251a9d8f | ||
|
|
c045ae7a9b | ||
|
|
602ccb7d5f | ||
|
|
5df21e1058 | ||
|
|
08135910a5 | ||
|
|
f58a22d07e | ||
|
|
cedde559b8 | ||
|
|
49d1d1ddf9 | ||
|
|
86045ac36c | ||
|
|
79f0e44a20 | ||
|
|
c44695f34b | ||
|
|
5abe2129c6 | ||
|
|
63dd7bce7e | ||
|
|
f3c73f5797 | ||
|
|
e6f2d70517 | ||
|
|
be6d1cc360 | ||
|
|
703716228e | ||
|
|
458bc0c838 | ||
|
|
39591ef627 | ||
|
|
37c440c5d3 | ||
|
|
81e94d1897 | ||
|
|
7bc1274a03 | ||
|
|
5f5a11525c | ||
|
|
e209764877 | ||
|
|
65290b2e96 | ||
|
|
127df96635 | ||
|
|
924d8d489a | ||
|
|
026eb64a83 | ||
|
|
45124856b1 | ||
|
|
38c6f6ce16 | ||
|
|
caa62eff2a | ||
|
|
d3542c34f1 | ||
|
|
7fb62fc849 | ||
|
|
9d6ae06663 | ||
|
|
06c28174c2 | ||
|
|
8af1b43074 | ||
|
|
17b7caddcb | ||
|
|
dab30c27b6 | ||
|
|
bad1dd9759 | ||
|
|
d29836d0d5 | ||
|
|
adb0b3dada | ||
|
|
5e0f39cc9e | ||
|
|
0a34a592d5 | ||
|
|
19aaa91f6d | ||
|
|
404aab9373 | ||
|
|
bc6db2c10e | ||
|
|
772d853dcf | ||
|
|
ab4d272149 | ||
|
|
f70a5cad61 | ||
|
|
7aba299dbd | ||
|
|
4b3b19f444 | ||
|
|
8ab4c8a050 | ||
|
|
7c4a653230 | ||
|
|
a3cd8f0e6d | ||
|
|
65c851a451 | ||
|
|
23cf2fa984 | ||
|
|
ce8d6ae958 | ||
|
|
384b2a91fa | ||
|
|
233c4811db | ||
|
|
2fd4c390cb |
4
.circleci/ansible/.gitignore
vendored
Normal file
4
.circleci/ansible/.gitignore
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
zenith_install.tar.gz
|
||||
.zenith_current_version
|
||||
neon_install.tar.gz
|
||||
.neon_current_version
|
||||
10
.circleci/ansible/ansible.cfg
Normal file
10
.circleci/ansible/ansible.cfg
Normal file
@@ -0,0 +1,10 @@
|
||||
[defaults]
|
||||
|
||||
localhost_warning = False
|
||||
host_key_checking = False
|
||||
timeout = 30
|
||||
|
||||
[ssh_connection]
|
||||
ssh_args = -F ./ansible.ssh.cfg
|
||||
scp_if_ssh = True
|
||||
pipelining = True
|
||||
11
.circleci/ansible/ansible.ssh.cfg
Normal file
11
.circleci/ansible/ansible.ssh.cfg
Normal file
@@ -0,0 +1,11 @@
|
||||
Host tele.zenith.tech
|
||||
User admin
|
||||
Port 3023
|
||||
StrictHostKeyChecking no
|
||||
UserKnownHostsFile /dev/null
|
||||
|
||||
Host * !tele.zenith.tech
|
||||
User admin
|
||||
StrictHostKeyChecking no
|
||||
UserKnownHostsFile /dev/null
|
||||
ProxyJump tele.zenith.tech
|
||||
176
.circleci/ansible/deploy.yaml
Normal file
176
.circleci/ansible/deploy.yaml
Normal file
@@ -0,0 +1,176 @@
|
||||
- name: Upload Neon binaries
|
||||
hosts: storage
|
||||
gather_facts: False
|
||||
remote_user: admin
|
||||
|
||||
tasks:
|
||||
|
||||
- name: get latest version of Neon binaries
|
||||
register: current_version_file
|
||||
set_fact:
|
||||
current_version: "{{ lookup('file', '.neon_current_version') | trim }}"
|
||||
tags:
|
||||
- pageserver
|
||||
- safekeeper
|
||||
|
||||
- name: inform about versions
|
||||
debug: msg="Version to deploy - {{ current_version }}"
|
||||
tags:
|
||||
- pageserver
|
||||
- safekeeper
|
||||
|
||||
- name: upload and extract Neon binaries to /usr/local
|
||||
ansible.builtin.unarchive:
|
||||
owner: root
|
||||
group: root
|
||||
src: neon_install.tar.gz
|
||||
dest: /usr/local
|
||||
become: true
|
||||
tags:
|
||||
- pageserver
|
||||
- safekeeper
|
||||
- binaries
|
||||
- putbinaries
|
||||
|
||||
- name: Deploy pageserver
|
||||
hosts: pageservers
|
||||
gather_facts: False
|
||||
remote_user: admin
|
||||
|
||||
tasks:
|
||||
|
||||
- name: upload init script
|
||||
when: console_mgmt_base_url is defined
|
||||
ansible.builtin.template:
|
||||
src: scripts/init_pageserver.sh
|
||||
dest: /tmp/init_pageserver.sh
|
||||
owner: root
|
||||
group: root
|
||||
mode: '0755'
|
||||
become: true
|
||||
tags:
|
||||
- pageserver
|
||||
|
||||
- name: init pageserver
|
||||
shell:
|
||||
cmd: /tmp/init_pageserver.sh
|
||||
args:
|
||||
creates: "/storage/pageserver/data/tenants"
|
||||
environment:
|
||||
ZENITH_REPO_DIR: "/storage/pageserver/data"
|
||||
LD_LIBRARY_PATH: "/usr/local/lib"
|
||||
become: true
|
||||
tags:
|
||||
- pageserver
|
||||
|
||||
- name: update remote storage (s3) config
|
||||
lineinfile:
|
||||
path: /storage/pageserver/data/pageserver.toml
|
||||
line: "{{ item }}"
|
||||
loop:
|
||||
- "[remote_storage]"
|
||||
- "bucket_name = '{{ bucket_name }}'"
|
||||
- "bucket_region = '{{ bucket_region }}'"
|
||||
- "prefix_in_bucket = '{{ inventory_hostname }}'"
|
||||
become: true
|
||||
tags:
|
||||
- pageserver
|
||||
|
||||
- name: upload systemd service definition
|
||||
ansible.builtin.template:
|
||||
src: systemd/pageserver.service
|
||||
dest: /etc/systemd/system/pageserver.service
|
||||
owner: root
|
||||
group: root
|
||||
mode: '0644'
|
||||
become: true
|
||||
tags:
|
||||
- pageserver
|
||||
|
||||
- name: start systemd service
|
||||
ansible.builtin.systemd:
|
||||
daemon_reload: yes
|
||||
name: pageserver
|
||||
enabled: yes
|
||||
state: restarted
|
||||
become: true
|
||||
tags:
|
||||
- pageserver
|
||||
|
||||
- name: post version to console
|
||||
when: console_mgmt_base_url is defined
|
||||
shell:
|
||||
cmd: |
|
||||
INSTANCE_ID=$(curl -s http://169.254.169.254/latest/meta-data/instance-id)
|
||||
curl -sfS -d '{"version": {{ current_version }} }' -X PATCH {{ console_mgmt_base_url }}/api/v1/pageservers/$INSTANCE_ID
|
||||
tags:
|
||||
- pageserver
|
||||
|
||||
- name: Deploy safekeeper
|
||||
hosts: safekeepers
|
||||
gather_facts: False
|
||||
remote_user: admin
|
||||
|
||||
tasks:
|
||||
|
||||
- name: upload init script
|
||||
when: console_mgmt_base_url is defined
|
||||
ansible.builtin.template:
|
||||
src: scripts/init_safekeeper.sh
|
||||
dest: /tmp/init_safekeeper.sh
|
||||
owner: root
|
||||
group: root
|
||||
mode: '0755'
|
||||
become: true
|
||||
tags:
|
||||
- safekeeper
|
||||
|
||||
- name: init safekeeper
|
||||
shell:
|
||||
cmd: /tmp/init_safekeeper.sh
|
||||
args:
|
||||
creates: "/storage/safekeeper/data/safekeeper.id"
|
||||
environment:
|
||||
ZENITH_REPO_DIR: "/storage/safekeeper/data"
|
||||
LD_LIBRARY_PATH: "/usr/local/lib"
|
||||
become: true
|
||||
tags:
|
||||
- safekeeper
|
||||
|
||||
# in the future safekeepers should discover pageservers byself
|
||||
# but currently use first pageserver that was discovered
|
||||
- name: set first pageserver var for safekeepers
|
||||
set_fact:
|
||||
first_pageserver: "{{ hostvars[groups['pageservers'][0]]['inventory_hostname'] }}"
|
||||
tags:
|
||||
- safekeeper
|
||||
|
||||
- name: upload systemd service definition
|
||||
ansible.builtin.template:
|
||||
src: systemd/safekeeper.service
|
||||
dest: /etc/systemd/system/safekeeper.service
|
||||
owner: root
|
||||
group: root
|
||||
mode: '0644'
|
||||
become: true
|
||||
tags:
|
||||
- safekeeper
|
||||
|
||||
- name: start systemd service
|
||||
ansible.builtin.systemd:
|
||||
daemon_reload: yes
|
||||
name: safekeeper
|
||||
enabled: yes
|
||||
state: restarted
|
||||
become: true
|
||||
tags:
|
||||
- safekeeper
|
||||
|
||||
- name: post version to console
|
||||
when: console_mgmt_base_url is defined
|
||||
shell:
|
||||
cmd: |
|
||||
INSTANCE_ID=$(curl -s http://169.254.169.254/latest/meta-data/instance-id)
|
||||
curl -sfS -d '{"version": {{ current_version }} }' -X PATCH {{ console_mgmt_base_url }}/api/v1/safekeepers/$INSTANCE_ID
|
||||
tags:
|
||||
- safekeeper
|
||||
52
.circleci/ansible/get_binaries.sh
Executable file
52
.circleci/ansible/get_binaries.sh
Executable file
@@ -0,0 +1,52 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
RELEASE=${RELEASE:-false}
|
||||
|
||||
# look at docker hub for latest tag for neon docker image
|
||||
if [ "${RELEASE}" = "true" ]; then
|
||||
echo "search latest relase tag"
|
||||
VERSION=$(curl -s https://registry.hub.docker.com/v1/repositories/neondatabase/neon/tags |jq -r -S '.[].name' | grep release | sed 's/release-//g' | grep -E '^[0-9]+$' | sort -n | tail -1)
|
||||
if [ -z "${VERSION}" ]; then
|
||||
echo "no any docker tags found, exiting..."
|
||||
exit 1
|
||||
else
|
||||
TAG="release-${VERSION}"
|
||||
fi
|
||||
else
|
||||
echo "search latest dev tag"
|
||||
VERSION=$(curl -s https://registry.hub.docker.com/v1/repositories/neondatabase/neon/tags |jq -r -S '.[].name' | grep -E '^[0-9]+$' | sort -n | tail -1)
|
||||
if [ -z "${VERSION}" ]; then
|
||||
echo "no any docker tags found, exiting..."
|
||||
exit 1
|
||||
else
|
||||
TAG="${VERSION}"
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "found ${VERSION}"
|
||||
|
||||
# do initial cleanup
|
||||
rm -rf neon_install postgres_install.tar.gz neon_install.tar.gz .neon_current_version
|
||||
mkdir neon_install
|
||||
|
||||
# retrive binaries from docker image
|
||||
echo "getting binaries from docker image"
|
||||
docker pull --quiet neondatabase/neon:${TAG}
|
||||
ID=$(docker create neondatabase/neon:${TAG})
|
||||
docker cp ${ID}:/data/postgres_install.tar.gz .
|
||||
tar -xzf postgres_install.tar.gz -C neon_install
|
||||
docker cp ${ID}:/usr/local/bin/pageserver neon_install/bin/
|
||||
docker cp ${ID}:/usr/local/bin/safekeeper neon_install/bin/
|
||||
docker cp ${ID}:/usr/local/bin/proxy neon_install/bin/
|
||||
docker cp ${ID}:/usr/local/bin/postgres neon_install/bin/
|
||||
docker rm -vf ${ID}
|
||||
|
||||
# store version to file (for ansible playbooks) and create binaries tarball
|
||||
echo ${VERSION} > neon_install/.neon_current_version
|
||||
echo ${VERSION} > .neon_current_version
|
||||
tar -czf neon_install.tar.gz -C neon_install .
|
||||
|
||||
# do final cleaup
|
||||
rm -rf neon_install postgres_install.tar.gz
|
||||
17
.circleci/ansible/production.hosts
Normal file
17
.circleci/ansible/production.hosts
Normal file
@@ -0,0 +1,17 @@
|
||||
[pageservers]
|
||||
zenith-1-ps-1 console_region_id=1
|
||||
|
||||
[safekeepers]
|
||||
zenith-1-sk-1 console_region_id=1
|
||||
zenith-1-sk-2 console_region_id=1
|
||||
zenith-1-sk-3 console_region_id=1
|
||||
|
||||
[storage:children]
|
||||
pageservers
|
||||
safekeepers
|
||||
|
||||
[storage:vars]
|
||||
console_mgmt_base_url = http://console-release.local
|
||||
bucket_name = zenith-storage-oregon
|
||||
bucket_region = us-west-2
|
||||
etcd_endpoints = etcd-release.local:2379
|
||||
30
.circleci/ansible/scripts/init_pageserver.sh
Normal file
30
.circleci/ansible/scripts/init_pageserver.sh
Normal file
@@ -0,0 +1,30 @@
|
||||
#!/bin/sh
|
||||
|
||||
# get instance id from meta-data service
|
||||
INSTANCE_ID=$(curl -s http://169.254.169.254/latest/meta-data/instance-id)
|
||||
|
||||
# store fqdn hostname in var
|
||||
HOST=$(hostname -f)
|
||||
|
||||
|
||||
cat <<EOF | tee /tmp/payload
|
||||
{
|
||||
"version": 1,
|
||||
"host": "${HOST}",
|
||||
"port": 6400,
|
||||
"region_id": {{ console_region_id }},
|
||||
"instance_id": "${INSTANCE_ID}",
|
||||
"http_host": "${HOST}",
|
||||
"http_port": 9898
|
||||
}
|
||||
EOF
|
||||
|
||||
# check if pageserver already registered or not
|
||||
if ! curl -sf -X PATCH -d '{}' {{ console_mgmt_base_url }}/api/v1/pageservers/${INSTANCE_ID} -o /dev/null; then
|
||||
|
||||
# not registered, so register it now
|
||||
ID=$(curl -sf -X POST {{ console_mgmt_base_url }}/api/v1/pageservers -d@/tmp/payload | jq -r '.ID')
|
||||
|
||||
# init pageserver
|
||||
sudo -u pageserver /usr/local/bin/pageserver -c "id=${ID}" -c "pg_distrib_dir='/usr/local'" --init -D /storage/pageserver/data
|
||||
fi
|
||||
30
.circleci/ansible/scripts/init_safekeeper.sh
Normal file
30
.circleci/ansible/scripts/init_safekeeper.sh
Normal file
@@ -0,0 +1,30 @@
|
||||
#!/bin/sh
|
||||
|
||||
# get instance id from meta-data service
|
||||
INSTANCE_ID=$(curl -s http://169.254.169.254/latest/meta-data/instance-id)
|
||||
|
||||
# store fqdn hostname in var
|
||||
HOST=$(hostname -f)
|
||||
|
||||
|
||||
cat <<EOF | tee /tmp/payload
|
||||
{
|
||||
"version": 1,
|
||||
"host": "${HOST}",
|
||||
"port": 6500,
|
||||
"region_id": {{ console_region_id }},
|
||||
"instance_id": "${INSTANCE_ID}",
|
||||
"http_host": "${HOST}",
|
||||
"http_port": 7676
|
||||
}
|
||||
EOF
|
||||
|
||||
# check if safekeeper already registered or not
|
||||
if ! curl -sf -X PATCH -d '{}' {{ console_mgmt_base_url }}/api/v1/safekeepers/${INSTANCE_ID} -o /dev/null; then
|
||||
|
||||
# not registered, so register it now
|
||||
ID=$(curl -sf -X POST {{ console_mgmt_base_url }}/api/v1/safekeepers -d@/tmp/payload | jq -r '.ID')
|
||||
|
||||
# init safekeeper
|
||||
sudo -u safekeeper /usr/local/bin/safekeeper --id ${ID} --init -D /storage/safekeeper/data
|
||||
fi
|
||||
18
.circleci/ansible/staging.hosts
Normal file
18
.circleci/ansible/staging.hosts
Normal file
@@ -0,0 +1,18 @@
|
||||
[pageservers]
|
||||
#zenith-us-stage-ps-1 console_region_id=27
|
||||
zenith-us-stage-ps-2 console_region_id=27
|
||||
|
||||
[safekeepers]
|
||||
zenith-us-stage-sk-1 console_region_id=27
|
||||
zenith-us-stage-sk-2 console_region_id=27
|
||||
zenith-us-stage-sk-4 console_region_id=27
|
||||
|
||||
[storage:children]
|
||||
pageservers
|
||||
safekeepers
|
||||
|
||||
[storage:vars]
|
||||
console_mgmt_base_url = http://console-staging.local
|
||||
bucket_name = zenith-staging-storage-us-east-1
|
||||
bucket_region = us-east-1
|
||||
etcd_endpoints = etcd-staging.local:2379
|
||||
18
.circleci/ansible/systemd/pageserver.service
Normal file
18
.circleci/ansible/systemd/pageserver.service
Normal file
@@ -0,0 +1,18 @@
|
||||
[Unit]
|
||||
Description=Zenith pageserver
|
||||
After=network.target auditd.service
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=pageserver
|
||||
Environment=RUST_BACKTRACE=1 ZENITH_REPO_DIR=/storage/pageserver LD_LIBRARY_PATH=/usr/local/lib
|
||||
ExecStart=/usr/local/bin/pageserver -c "pg_distrib_dir='/usr/local'" -c "listen_pg_addr='0.0.0.0:6400'" -c "listen_http_addr='0.0.0.0:9898'" -D /storage/pageserver/data
|
||||
ExecReload=/bin/kill -HUP $MAINPID
|
||||
KillMode=mixed
|
||||
KillSignal=SIGINT
|
||||
Restart=on-failure
|
||||
TimeoutSec=10
|
||||
LimitNOFILE=30000000
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
18
.circleci/ansible/systemd/safekeeper.service
Normal file
18
.circleci/ansible/systemd/safekeeper.service
Normal file
@@ -0,0 +1,18 @@
|
||||
[Unit]
|
||||
Description=Zenith safekeeper
|
||||
After=network.target auditd.service
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=safekeeper
|
||||
Environment=RUST_BACKTRACE=1 ZENITH_REPO_DIR=/storage/safekeeper/data LD_LIBRARY_PATH=/usr/local/lib
|
||||
ExecStart=/usr/local/bin/safekeeper -l {{ inventory_hostname }}.local:6500 --listen-http {{ inventory_hostname }}.local:7676 -p {{ first_pageserver }}:6400 -D /storage/safekeeper/data --broker-endpoints={{ etcd_endpoints }}
|
||||
ExecReload=/bin/kill -HUP $MAINPID
|
||||
KillMode=mixed
|
||||
KillSignal=SIGINT
|
||||
Restart=on-failure
|
||||
TimeoutSec=10
|
||||
LimitNOFILE=30000000
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
@@ -1,28 +1,28 @@
|
||||
version: 2.1
|
||||
|
||||
executors:
|
||||
zenith-build-executor:
|
||||
neon-xlarge-executor:
|
||||
resource_class: xlarge
|
||||
docker:
|
||||
- image: cimg/rust:1.56.1
|
||||
zenith-python-executor:
|
||||
# NB: when changed, do not forget to update rust image tag in all Dockerfiles
|
||||
- image: zimg/rust:1.58
|
||||
neon-executor:
|
||||
docker:
|
||||
- image: cimg/python:3.7.10 # Oldest available 3.7 with Ubuntu 20.04 (for GLIBC and Rust) at CirlceCI
|
||||
- image: zimg/rust:1.58
|
||||
|
||||
jobs:
|
||||
check-codestyle-rust:
|
||||
executor: zenith-build-executor
|
||||
executor: neon-xlarge-executor
|
||||
steps:
|
||||
- checkout
|
||||
- run:
|
||||
name: rustfmt
|
||||
when: always
|
||||
command: |
|
||||
cargo fmt --all -- --check
|
||||
command: cargo fmt --all -- --check
|
||||
|
||||
# A job to build postgres
|
||||
build-postgres:
|
||||
executor: zenith-build-executor
|
||||
executor: neon-xlarge-executor
|
||||
parameters:
|
||||
build_type:
|
||||
type: enum
|
||||
@@ -34,11 +34,13 @@ jobs:
|
||||
- checkout
|
||||
|
||||
# Grab the postgres git revision to build a cache key.
|
||||
# Append makefile as it could change the way postgres is built.
|
||||
# Note this works even though the submodule hasn't been checkout out yet.
|
||||
- run:
|
||||
name: Get postgres cache key
|
||||
command: |
|
||||
git rev-parse HEAD:vendor/postgres > /tmp/cache-key-postgres
|
||||
git rev-parse HEAD:vendor/postgres > /tmp/cache-key-postgres
|
||||
cat Makefile >> /tmp/cache-key-postgres
|
||||
|
||||
- restore_cache:
|
||||
name: Restore postgres cache
|
||||
@@ -46,15 +48,6 @@ jobs:
|
||||
# Restore ONLY if the rev key matches exactly
|
||||
- v04-postgres-cache-<< parameters.build_type >>-{{ checksum "/tmp/cache-key-postgres" }}
|
||||
|
||||
# FIXME We could cache our own docker container, instead of installing packages every time.
|
||||
- run:
|
||||
name: apt install dependencies
|
||||
command: |
|
||||
if [ ! -e tmp_install/bin/postgres ]; then
|
||||
sudo apt update
|
||||
sudo apt install build-essential libreadline-dev zlib1g-dev flex bison libseccomp-dev
|
||||
fi
|
||||
|
||||
# Build postgres if the restore_cache didn't find a build.
|
||||
# `make` can't figure out whether the cache is valid, since
|
||||
# it only compares file timestamps.
|
||||
@@ -64,7 +57,8 @@ jobs:
|
||||
if [ ! -e tmp_install/bin/postgres ]; then
|
||||
# "depth 1" saves some time by not cloning the whole repo
|
||||
git submodule update --init --depth 1
|
||||
make postgres -j8
|
||||
# bail out on any warnings
|
||||
COPT='-Werror' mold -run make postgres -j$(nproc)
|
||||
fi
|
||||
|
||||
- save_cache:
|
||||
@@ -73,9 +67,9 @@ jobs:
|
||||
paths:
|
||||
- tmp_install
|
||||
|
||||
# A job to build zenith rust code
|
||||
build-zenith:
|
||||
executor: zenith-build-executor
|
||||
# A job to build Neon rust code
|
||||
build-neon:
|
||||
executor: neon-xlarge-executor
|
||||
parameters:
|
||||
build_type:
|
||||
type: enum
|
||||
@@ -83,21 +77,18 @@ jobs:
|
||||
environment:
|
||||
BUILD_TYPE: << parameters.build_type >>
|
||||
steps:
|
||||
- run:
|
||||
name: apt install dependencies
|
||||
command: |
|
||||
sudo apt update
|
||||
sudo apt install libssl-dev clang
|
||||
|
||||
# Checkout the git repo (without submodules)
|
||||
- checkout
|
||||
|
||||
# Grab the postgres git revision to build a cache key.
|
||||
# Append makefile as it could change the way postgres is built.
|
||||
# Note this works even though the submodule hasn't been checkout out yet.
|
||||
- run:
|
||||
name: Get postgres cache key
|
||||
command: |
|
||||
git rev-parse HEAD:vendor/postgres > /tmp/cache-key-postgres
|
||||
cat Makefile >> /tmp/cache-key-postgres
|
||||
|
||||
|
||||
- restore_cache:
|
||||
name: Restore postgres cache
|
||||
@@ -122,11 +113,16 @@ jobs:
|
||||
CARGO_FLAGS=
|
||||
elif [[ $BUILD_TYPE == "release" ]]; then
|
||||
cov_prefix=()
|
||||
CARGO_FLAGS=--release
|
||||
CARGO_FLAGS="--release --features profiling"
|
||||
fi
|
||||
|
||||
export CARGO_INCREMENTAL=0
|
||||
"${cov_prefix[@]}" cargo build $CARGO_FLAGS --bins --tests
|
||||
export CACHEPOT_BUCKET=zenith-rust-cachepot
|
||||
export RUSTC_WRAPPER=cachepot
|
||||
export AWS_ACCESS_KEY_ID="${CACHEPOT_AWS_ACCESS_KEY_ID}"
|
||||
export AWS_SECRET_ACCESS_KEY="${CACHEPOT_AWS_SECRET_ACCESS_KEY}"
|
||||
"${cov_prefix[@]}" mold -run cargo build $CARGO_FLAGS --features failpoints --bins --tests
|
||||
cachepot -s
|
||||
|
||||
- save_cache:
|
||||
name: Save rust cache
|
||||
@@ -136,31 +132,19 @@ jobs:
|
||||
- ~/.cargo/git
|
||||
- target
|
||||
|
||||
# Run style checks
|
||||
# has to run separately from cargo fmt section
|
||||
# since needs to run with dependencies
|
||||
- run:
|
||||
name: cargo clippy
|
||||
command: |
|
||||
if [[ $BUILD_TYPE == "debug" ]]; then
|
||||
cov_prefix=(scripts/coverage "--profraw-prefix=$CIRCLE_JOB" --dir=/tmp/zenith/coverage run)
|
||||
elif [[ $BUILD_TYPE == "release" ]]; then
|
||||
cov_prefix=()
|
||||
fi
|
||||
|
||||
"${cov_prefix[@]}" ./run_clippy.sh
|
||||
|
||||
# Run rust unit tests
|
||||
- run:
|
||||
name: cargo test
|
||||
command: |
|
||||
if [[ $BUILD_TYPE == "debug" ]]; then
|
||||
cov_prefix=(scripts/coverage "--profraw-prefix=$CIRCLE_JOB" --dir=/tmp/zenith/coverage run)
|
||||
CARGO_FLAGS=
|
||||
elif [[ $BUILD_TYPE == "release" ]]; then
|
||||
cov_prefix=()
|
||||
CARGO_FLAGS=--release
|
||||
fi
|
||||
|
||||
"${cov_prefix[@]}" cargo test
|
||||
"${cov_prefix[@]}" cargo test $CARGO_FLAGS
|
||||
|
||||
# Install the rust binaries, for use by test jobs
|
||||
- run:
|
||||
@@ -210,6 +194,14 @@ jobs:
|
||||
command: |
|
||||
cp -a tmp_install /tmp/zenith/pg_install
|
||||
|
||||
- run:
|
||||
name: Merge coverage data
|
||||
command: |
|
||||
# This will speed up workspace uploads
|
||||
if [[ $BUILD_TYPE == "debug" ]]; then
|
||||
scripts/coverage "--profraw-prefix=$CIRCLE_JOB" --dir=/tmp/zenith/coverage merge
|
||||
fi
|
||||
|
||||
# Save the rust binaries and coverage data for other jobs in this workflow.
|
||||
- persist_to_workspace:
|
||||
root: /tmp/zenith
|
||||
@@ -217,23 +209,30 @@ jobs:
|
||||
- "*"
|
||||
|
||||
check-codestyle-python:
|
||||
executor: zenith-python-executor
|
||||
executor: neon-executor
|
||||
steps:
|
||||
- checkout
|
||||
- restore_cache:
|
||||
keys:
|
||||
- v2-python-deps-{{ checksum "poetry.lock" }}
|
||||
- run:
|
||||
name: Install deps
|
||||
command: pipenv --python 3.7 install --dev
|
||||
command: ./scripts/pysync
|
||||
- save_cache:
|
||||
key: v2-python-deps-{{ checksum "poetry.lock" }}
|
||||
paths:
|
||||
- /home/circleci/.cache/pypoetry/virtualenvs
|
||||
- run:
|
||||
name: Run yapf to ensure code format
|
||||
when: always
|
||||
command: pipenv run yapf --recursive --diff .
|
||||
command: poetry run yapf --recursive --diff .
|
||||
- run:
|
||||
name: Run mypy to check types
|
||||
when: always
|
||||
command: pipenv run mypy .
|
||||
command: poetry run mypy .
|
||||
|
||||
run-pytest:
|
||||
executor: zenith-python-executor
|
||||
executor: neon-executor
|
||||
parameters:
|
||||
# pytest args to specify the tests to run.
|
||||
#
|
||||
@@ -272,9 +271,16 @@ jobs:
|
||||
condition: << parameters.needs_postgres_source >>
|
||||
steps:
|
||||
- run: git submodule update --init --depth 1
|
||||
- restore_cache:
|
||||
keys:
|
||||
- v2-python-deps-{{ checksum "poetry.lock" }}
|
||||
- run:
|
||||
name: Install deps
|
||||
command: pipenv --python 3.7 install
|
||||
command: ./scripts/pysync
|
||||
- save_cache:
|
||||
key: v2-python-deps-{{ checksum "poetry.lock" }}
|
||||
paths:
|
||||
- /home/circleci/.cache/pypoetry/virtualenvs
|
||||
- run:
|
||||
name: Run pytest
|
||||
# pytest doesn't output test logs in real time, so CI job may fail with
|
||||
@@ -291,6 +297,7 @@ jobs:
|
||||
- PLATFORM: zenith-local-ci
|
||||
command: |
|
||||
PERF_REPORT_DIR="$(realpath test_runner/perf-report-local)"
|
||||
rm -rf $PERF_REPORT_DIR
|
||||
|
||||
TEST_SELECTION="test_runner/<< parameters.test_selection >>"
|
||||
EXTRA_PARAMS="<< parameters.extra_params >>"
|
||||
@@ -326,7 +333,7 @@ jobs:
|
||||
# -n4 uses four processes to run tests via pytest-xdist
|
||||
# -s is not used to prevent pytest from capturing output, because tests are running
|
||||
# in parallel and logs are mixed between different tests
|
||||
"${cov_prefix[@]}" pipenv run pytest \
|
||||
"${cov_prefix[@]}" ./scripts/pytest \
|
||||
--junitxml=$TEST_OUTPUT/junit.xml \
|
||||
--tb=short \
|
||||
--verbose \
|
||||
@@ -335,7 +342,6 @@ jobs:
|
||||
|
||||
if << parameters.save_perf_report >>; then
|
||||
if [[ $CIRCLE_BRANCH == "main" ]]; then
|
||||
# TODO: reuse scripts/git-upload
|
||||
export REPORT_FROM="$PERF_REPORT_DIR"
|
||||
export REPORT_TO=local
|
||||
scripts/generate_and_push_perf_report.sh
|
||||
@@ -349,13 +355,20 @@ jobs:
|
||||
when: always
|
||||
command: |
|
||||
du -sh /tmp/test_output/*
|
||||
find /tmp/test_output -type f ! -name "pg.log" ! -name "pageserver.log" ! -name "safekeeper.log" ! -name "regression.diffs" ! -name "junit.xml" ! -name "*.filediff" ! -name "*.stdout" ! -name "*.stderr" -delete
|
||||
find /tmp/test_output -type f ! -name "pg.log" ! -name "pageserver.log" ! -name "safekeeper.log" ! -name "regression.diffs" ! -name "junit.xml" ! -name "*.filediff" ! -name "*.stdout" ! -name "*.stderr" ! -name "flamegraph.svg" -delete
|
||||
du -sh /tmp/test_output/*
|
||||
- store_artifacts:
|
||||
path: /tmp/test_output
|
||||
# The store_test_results step tells CircleCI where to find the junit.xml file.
|
||||
- store_test_results:
|
||||
path: /tmp/test_output
|
||||
- run:
|
||||
name: Merge coverage data
|
||||
command: |
|
||||
# This will speed up workspace uploads
|
||||
if [[ $BUILD_TYPE == "debug" ]]; then
|
||||
scripts/coverage "--profraw-prefix=$CIRCLE_JOB" --dir=/tmp/zenith/coverage merge
|
||||
fi
|
||||
# Save coverage data (if any)
|
||||
- persist_to_workspace:
|
||||
root: /tmp/zenith
|
||||
@@ -363,7 +376,7 @@ jobs:
|
||||
- "*"
|
||||
|
||||
coverage-report:
|
||||
executor: zenith-build-executor
|
||||
executor: neon-xlarge-executor
|
||||
steps:
|
||||
- attach_workspace:
|
||||
at: /tmp/zenith
|
||||
@@ -375,16 +388,10 @@ jobs:
|
||||
# there's no way to clean out old packages, so the cache grows every time something
|
||||
# changes.
|
||||
- v04-rust-cache-deps-debug-{{ checksum "Cargo.lock" }}
|
||||
- run:
|
||||
name: Install llvm-tools
|
||||
command: |
|
||||
# TODO: install a proper symbol demangler, e.g. rustfilt
|
||||
# TODO: we should embed this into a docker image
|
||||
rustup component add llvm-tools-preview
|
||||
- run:
|
||||
name: Build coverage report
|
||||
command: |
|
||||
COMMIT_URL=https://github.com/zenithdb/zenith/commit/$CIRCLE_SHA1
|
||||
COMMIT_URL=https://github.com/neondatabase/neon/commit/$CIRCLE_SHA1
|
||||
|
||||
scripts/coverage \
|
||||
--dir=/tmp/zenith/coverage report \
|
||||
@@ -395,11 +402,11 @@ jobs:
|
||||
name: Upload coverage report
|
||||
command: |
|
||||
LOCAL_REPO=$CIRCLE_PROJECT_USERNAME/$CIRCLE_PROJECT_REPONAME
|
||||
REPORT_URL=https://zenithdb.github.io/zenith-coverage-data/$CIRCLE_SHA1
|
||||
COMMIT_URL=https://github.com/zenithdb/zenith/commit/$CIRCLE_SHA1
|
||||
REPORT_URL=https://neondatabase.github.io/zenith-coverage-data/$CIRCLE_SHA1
|
||||
COMMIT_URL=https://github.com/neondatabase/neon/commit/$CIRCLE_SHA1
|
||||
|
||||
scripts/git-upload \
|
||||
--repo=https://$VIP_VAP_ACCESS_TOKEN@github.com/zenithdb/zenith-coverage-data.git \
|
||||
--repo=https://$VIP_VAP_ACCESS_TOKEN@github.com/neondatabase/zenith-coverage-data.git \
|
||||
--message="Add code coverage for $COMMIT_URL" \
|
||||
copy /tmp/zenith/coverage/report $CIRCLE_SHA1 # COPY FROM TO_RELATIVE
|
||||
|
||||
@@ -416,7 +423,7 @@ jobs:
|
||||
\"target_url\": \"$REPORT_URL\"
|
||||
}"
|
||||
|
||||
# Build zenithdb/zenith:latest image and push it to Docker hub
|
||||
# Build neondatabase/neon:latest image and push it to Docker hub
|
||||
docker-image:
|
||||
docker:
|
||||
- image: cimg/base:2021.04
|
||||
@@ -430,12 +437,18 @@ jobs:
|
||||
- run:
|
||||
name: Build and push Docker image
|
||||
command: |
|
||||
echo $DOCKER_PWD | docker login -u $DOCKER_LOGIN --password-stdin
|
||||
echo $NEON_DOCKER_PWD | docker login -u $NEON_DOCKER_LOGIN --password-stdin
|
||||
DOCKER_TAG=$(git log --oneline|wc -l)
|
||||
docker build --build-arg GIT_VERSION=$CIRCLE_SHA1 -t zenithdb/zenith:latest . && docker push zenithdb/zenith:latest
|
||||
docker tag zenithdb/zenith:latest zenithdb/zenith:${DOCKER_TAG} && docker push zenithdb/zenith:${DOCKER_TAG}
|
||||
docker build \
|
||||
--pull \
|
||||
--build-arg GIT_VERSION=${CIRCLE_SHA1} \
|
||||
--build-arg AWS_ACCESS_KEY_ID="${CACHEPOT_AWS_ACCESS_KEY_ID}" \
|
||||
--build-arg AWS_SECRET_ACCESS_KEY="${CACHEPOT_AWS_SECRET_ACCESS_KEY}" \
|
||||
--tag neondatabase/neon:${DOCKER_TAG} --tag neondatabase/neon:latest .
|
||||
docker push neondatabase/neon:${DOCKER_TAG}
|
||||
docker push neondatabase/neon:latest
|
||||
|
||||
# Build zenithdb/compute-node:latest image and push it to Docker hub
|
||||
# Build neondatabase/compute-node:latest image and push it to Docker hub
|
||||
docker-image-compute:
|
||||
docker:
|
||||
- image: cimg/base:2021.04
|
||||
@@ -443,27 +456,86 @@ jobs:
|
||||
- checkout
|
||||
- setup_remote_docker:
|
||||
docker_layer_caching: true
|
||||
- run:
|
||||
name: Login to docker hub
|
||||
command: echo $DOCKER_PWD | docker login -u $DOCKER_LOGIN --password-stdin
|
||||
- run:
|
||||
name: Setup buildx
|
||||
command: docker run -it --rm --privileged tonistiigi/binfmt --install all
|
||||
# Build zenithdb/compute-tools:latest image and push it to Docker hub
|
||||
# Build neondatabase/compute-tools:latest image and push it to Docker hub
|
||||
# TODO: this should probably also use versioned tag, not just :latest.
|
||||
# XXX: but should it? We build and use it only locally now.
|
||||
- run:
|
||||
name: Build and push compute-tools Docker image
|
||||
command: docker buildx build --platform linux/amd64,linux/arm64 --push -t zenithdb/compute-tools:latest compute_tools
|
||||
command: |
|
||||
echo $NEON_DOCKER_PWD | docker login -u $NEON_DOCKER_LOGIN --password-stdin
|
||||
docker build \
|
||||
--build-arg AWS_ACCESS_KEY_ID="${CACHEPOT_AWS_ACCESS_KEY_ID}" \
|
||||
--build-arg AWS_SECRET_ACCESS_KEY="${CACHEPOT_AWS_SECRET_ACCESS_KEY}" \
|
||||
--tag neondatabase/compute-tools:latest -f Dockerfile.compute-tools .
|
||||
docker push neondatabase/compute-tools:latest
|
||||
- run:
|
||||
name: Init postgres submodule
|
||||
command: git submodule update --init --depth 1
|
||||
- run:
|
||||
name: Build and push compute-node Docker image
|
||||
command: |
|
||||
echo $NEON_DOCKER_PWD | docker login -u $NEON_DOCKER_LOGIN --password-stdin
|
||||
DOCKER_TAG=$(git log --oneline|wc -l)
|
||||
docker buildx build --platform linux/amd64,linux/arm64 --push -t zenithdb/compute-node:latest vendor/postgres
|
||||
docker buildx build --platform linux/amd64,linux/arm64 --push -t zenithdb/compute-node:${DOCKER_TAG} vendor/postgres
|
||||
docker build --tag neondatabase/compute-node:${DOCKER_TAG} --tag neondatabase/compute-node:latest vendor/postgres
|
||||
docker push neondatabase/compute-node:${DOCKER_TAG}
|
||||
docker push neondatabase/compute-node:latest
|
||||
|
||||
# Build production neondatabase/neon:release image and push it to Docker hub
|
||||
docker-image-release:
|
||||
docker:
|
||||
- image: cimg/base:2021.04
|
||||
steps:
|
||||
- checkout
|
||||
- setup_remote_docker:
|
||||
docker_layer_caching: true
|
||||
- run:
|
||||
name: Init postgres submodule
|
||||
command: git submodule update --init --depth 1
|
||||
- run:
|
||||
name: Build and push Docker image
|
||||
command: |
|
||||
echo $NEON_DOCKER_PWD | docker login -u $NEON_DOCKER_LOGIN --password-stdin
|
||||
DOCKER_TAG="release-$(git log --oneline|wc -l)"
|
||||
docker build \
|
||||
--pull \
|
||||
--build-arg GIT_VERSION=${CIRCLE_SHA1} \
|
||||
--build-arg AWS_ACCESS_KEY_ID="${CACHEPOT_AWS_ACCESS_KEY_ID}" \
|
||||
--build-arg AWS_SECRET_ACCESS_KEY="${CACHEPOT_AWS_SECRET_ACCESS_KEY}" \
|
||||
--tag neondatabase/neon:${DOCKER_TAG} --tag neondatabase/neon:release .
|
||||
docker push neondatabase/neon:${DOCKER_TAG}
|
||||
docker push neondatabase/neon:release
|
||||
|
||||
# Build production neondatabase/compute-node:release image and push it to Docker hub
|
||||
docker-image-compute-release:
|
||||
docker:
|
||||
- image: cimg/base:2021.04
|
||||
steps:
|
||||
- checkout
|
||||
- setup_remote_docker:
|
||||
docker_layer_caching: true
|
||||
# Build neondatabase/compute-tools:release image and push it to Docker hub
|
||||
# TODO: this should probably also use versioned tag, not just :latest.
|
||||
# XXX: but should it? We build and use it only locally now.
|
||||
- run:
|
||||
name: Build and push compute-tools Docker image
|
||||
command: |
|
||||
echo $NEON_DOCKER_PWD | docker login -u $NEON_DOCKER_LOGIN --password-stdin
|
||||
docker build \
|
||||
--build-arg AWS_ACCESS_KEY_ID="${CACHEPOT_AWS_ACCESS_KEY_ID}" \
|
||||
--build-arg AWS_SECRET_ACCESS_KEY="${CACHEPOT_AWS_SECRET_ACCESS_KEY}" \
|
||||
--tag neondatabase/compute-tools:release -f Dockerfile.compute-tools .
|
||||
docker push neondatabase/compute-tools:release
|
||||
- run:
|
||||
name: Init postgres submodule
|
||||
command: git submodule update --init --depth 1
|
||||
- run:
|
||||
name: Build and push compute-node Docker image
|
||||
command: |
|
||||
echo $NEON_DOCKER_PWD | docker login -u $NEON_DOCKER_LOGIN --password-stdin
|
||||
DOCKER_TAG="release-$(git log --oneline|wc -l)"
|
||||
docker build --tag neondatabase/compute-node:${DOCKER_TAG} --tag neondatabase/compute-node:release vendor/postgres
|
||||
docker push neondatabase/compute-node:${DOCKER_TAG}
|
||||
docker push neondatabase/compute-node:release
|
||||
|
||||
deploy-staging:
|
||||
docker:
|
||||
@@ -471,40 +543,25 @@ jobs:
|
||||
steps:
|
||||
- checkout
|
||||
- setup_remote_docker
|
||||
- run:
|
||||
name: Get Zenith binaries
|
||||
command: |
|
||||
rm -rf zenith_install postgres_install.tar.gz zenith_install.tar.gz
|
||||
mkdir zenith_install
|
||||
DOCKER_TAG=$(git log --oneline|wc -l)
|
||||
docker pull --quiet zenithdb/zenith:${DOCKER_TAG}
|
||||
ID=$(docker create zenithdb/zenith:${DOCKER_TAG})
|
||||
docker cp $ID:/data/postgres_install.tar.gz .
|
||||
tar -xzf postgres_install.tar.gz -C zenith_install && rm postgres_install.tar.gz
|
||||
docker cp $ID:/usr/local/bin/pageserver zenith_install/bin/
|
||||
docker cp $ID:/usr/local/bin/safekeeper zenith_install/bin/
|
||||
docker cp $ID:/usr/local/bin/proxy zenith_install/bin/
|
||||
docker cp $ID:/usr/local/bin/postgres zenith_install/bin/
|
||||
docker rm -v $ID
|
||||
echo ${DOCKER_TAG} | tee zenith_install/.zenith_current_version
|
||||
tar -czf zenith_install.tar.gz -C zenith_install .
|
||||
ls -la zenith_install.tar.gz
|
||||
- run:
|
||||
name: Setup ansible
|
||||
command: |
|
||||
pip install --progress-bar off --user ansible boto3
|
||||
ansible-galaxy collection install amazon.aws
|
||||
- run:
|
||||
name: Apply re-deploy playbook
|
||||
environment:
|
||||
ANSIBLE_HOST_KEY_CHECKING: false
|
||||
name: Redeploy
|
||||
command: |
|
||||
echo "${STAGING_SSH_KEY}" | base64 --decode | ssh-add -
|
||||
export AWS_REGION=${STAGING_AWS_REGION}
|
||||
export AWS_ACCESS_KEY_ID=${STAGING_AWS_ACCESS_KEY_ID}
|
||||
export AWS_SECRET_ACCESS_KEY=${STAGING_AWS_SECRET_ACCESS_KEY}
|
||||
ansible-playbook .circleci/storage-redeploy.playbook.yml
|
||||
rm -f zenith_install.tar.gz
|
||||
cd "$(pwd)/.circleci/ansible"
|
||||
|
||||
./get_binaries.sh
|
||||
|
||||
echo "${TELEPORT_SSH_KEY}" | tr -d '\n'| base64 --decode >ssh-key
|
||||
echo "${TELEPORT_SSH_CERT}" | tr -d '\n'| base64 --decode >ssh-key-cert.pub
|
||||
chmod 0600 ssh-key
|
||||
ssh-add ssh-key
|
||||
rm -f ssh-key ssh-key-cert.pub
|
||||
|
||||
ansible-playbook deploy.yaml -i staging.hosts
|
||||
rm -f neon_install.tar.gz .neon_current_version
|
||||
|
||||
deploy-staging-proxy:
|
||||
docker:
|
||||
@@ -522,12 +579,62 @@ jobs:
|
||||
name: Setup helm v3
|
||||
command: |
|
||||
curl -s https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash
|
||||
helm repo add zenithdb https://zenithdb.github.io/helm-charts
|
||||
helm repo add neondatabase https://neondatabase.github.io/helm-charts
|
||||
- run:
|
||||
name: Re-deploy proxy
|
||||
command: |
|
||||
DOCKER_TAG=$(git log --oneline|wc -l)
|
||||
helm upgrade zenith-proxy zenithdb/zenith-proxy --install -f .circleci/proxy.staging.yaml --set image.tag=${DOCKER_TAG} --wait
|
||||
helm upgrade neon-proxy neondatabase/neon-proxy --install -f .circleci/helm-values/staging.proxy.yaml --set image.tag=${DOCKER_TAG} --wait
|
||||
helm upgrade neon-proxy-scram neondatabase/neon-proxy --install -f .circleci/helm-values/staging.proxy-scram.yaml --set image.tag=${DOCKER_TAG} --wait
|
||||
|
||||
deploy-release:
|
||||
docker:
|
||||
- image: cimg/python:3.10
|
||||
steps:
|
||||
- checkout
|
||||
- setup_remote_docker
|
||||
- run:
|
||||
name: Setup ansible
|
||||
command: |
|
||||
pip install --progress-bar off --user ansible boto3
|
||||
- run:
|
||||
name: Redeploy
|
||||
command: |
|
||||
cd "$(pwd)/.circleci/ansible"
|
||||
|
||||
RELEASE=true ./get_binaries.sh
|
||||
|
||||
echo "${TELEPORT_SSH_KEY}" | tr -d '\n'| base64 --decode >ssh-key
|
||||
echo "${TELEPORT_SSH_CERT}" | tr -d '\n'| base64 --decode >ssh-key-cert.pub
|
||||
chmod 0600 ssh-key
|
||||
ssh-add ssh-key
|
||||
rm -f ssh-key ssh-key-cert.pub
|
||||
|
||||
ansible-playbook deploy.yaml -i production.hosts
|
||||
rm -f neon_install.tar.gz .neon_current_version
|
||||
|
||||
deploy-release-proxy:
|
||||
docker:
|
||||
- image: cimg/base:2021.04
|
||||
environment:
|
||||
KUBECONFIG: .kubeconfig
|
||||
steps:
|
||||
- checkout
|
||||
- run:
|
||||
name: Store kubeconfig file
|
||||
command: |
|
||||
echo "${PRODUCTION_KUBECONFIG_DATA}" | base64 --decode > ${KUBECONFIG}
|
||||
chmod 0600 ${KUBECONFIG}
|
||||
- run:
|
||||
name: Setup helm v3
|
||||
command: |
|
||||
curl -s https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash
|
||||
helm repo add zenithdb https://neondatabase.github.io/helm-charts
|
||||
- run:
|
||||
name: Re-deploy proxy
|
||||
command: |
|
||||
DOCKER_TAG="release-$(git log --oneline|wc -l)"
|
||||
helm upgrade zenith-proxy zenithdb/zenith-proxy --install -f .circleci/helm-values/production.proxy.yaml --set image.tag=${DOCKER_TAG} --wait
|
||||
|
||||
# Trigger a new remote CI job
|
||||
remote-ci-trigger:
|
||||
@@ -551,7 +658,7 @@ jobs:
|
||||
--data \
|
||||
"{
|
||||
\"state\": \"pending\",
|
||||
\"context\": \"zenith-remote-ci\",
|
||||
\"context\": \"neon-cloud-e2e\",
|
||||
\"description\": \"[$REMOTE_REPO] Remote CI job is about to start\"
|
||||
}"
|
||||
- run:
|
||||
@@ -567,61 +674,12 @@ jobs:
|
||||
"{
|
||||
\"ref\": \"main\",
|
||||
\"inputs\": {
|
||||
\"ci_job_name\": \"zenith-remote-ci\",
|
||||
\"ci_job_name\": \"neon-cloud-e2e\",
|
||||
\"commit_hash\": \"$CIRCLE_SHA1\",
|
||||
\"remote_repo\": \"$LOCAL_REPO\"
|
||||
}
|
||||
}"
|
||||
|
||||
#
|
||||
#
|
||||
# compute-tools jobs
|
||||
# TODO: unify with main build_and_test pipeline
|
||||
#
|
||||
#
|
||||
compute-tools-test:
|
||||
executor: zenith-build-executor
|
||||
working_directory: ~/repo/compute_tools
|
||||
steps:
|
||||
- checkout:
|
||||
path: ~/repo
|
||||
|
||||
- restore_cache:
|
||||
name: Restore rust cache
|
||||
keys:
|
||||
# Require an exact match. While an out of date cache might speed up the build,
|
||||
# there's no way to clean out old packages, so the cache grows every time something
|
||||
# changes.
|
||||
- v03-rust-cache-deps-debug-{{ checksum "Cargo.lock" }}
|
||||
|
||||
# Build the rust code, including test binaries
|
||||
- run:
|
||||
name: Rust build
|
||||
environment:
|
||||
CARGO_INCREMENTAL: 0
|
||||
command: cargo build --bins --tests
|
||||
|
||||
- save_cache:
|
||||
name: Save rust cache
|
||||
key: v03-rust-cache-deps-debug-{{ checksum "Cargo.lock" }}
|
||||
paths:
|
||||
- ~/.cargo/registry
|
||||
- ~/.cargo/git
|
||||
- target
|
||||
|
||||
# Run Rust formatting checks
|
||||
- run:
|
||||
name: cargo fmt check
|
||||
command: cargo fmt --all -- --check
|
||||
|
||||
# Run Rust linter (clippy)
|
||||
- run:
|
||||
name: cargo clippy check
|
||||
command: cargo clippy --all --all-targets -- -Dwarnings -Drust-2018-idioms
|
||||
|
||||
# Run Rust integration and unittests
|
||||
- run: cargo test
|
||||
|
||||
workflows:
|
||||
build_and_test:
|
||||
jobs:
|
||||
@@ -632,8 +690,8 @@ workflows:
|
||||
matrix:
|
||||
parameters:
|
||||
build_type: ["debug", "release"]
|
||||
- build-zenith:
|
||||
name: build-zenith-<< matrix.build_type >>
|
||||
- build-neon:
|
||||
name: build-neon-<< matrix.build_type >>
|
||||
matrix:
|
||||
parameters:
|
||||
build_type: ["debug", "release"]
|
||||
@@ -641,13 +699,14 @@ workflows:
|
||||
- build-postgres-<< matrix.build_type >>
|
||||
- run-pytest:
|
||||
name: pg_regress-tests-<< matrix.build_type >>
|
||||
context: PERF_TEST_RESULT_CONNSTR
|
||||
matrix:
|
||||
parameters:
|
||||
build_type: ["debug", "release"]
|
||||
test_selection: batch_pg_regress
|
||||
needs_postgres_source: true
|
||||
requires:
|
||||
- build-zenith-<< matrix.build_type >>
|
||||
- build-neon-<< matrix.build_type >>
|
||||
- run-pytest:
|
||||
name: other-tests-<< matrix.build_type >>
|
||||
matrix:
|
||||
@@ -655,22 +714,22 @@ workflows:
|
||||
build_type: ["debug", "release"]
|
||||
test_selection: batch_others
|
||||
requires:
|
||||
- build-zenith-<< matrix.build_type >>
|
||||
- build-neon-<< matrix.build_type >>
|
||||
- run-pytest:
|
||||
name: benchmarks
|
||||
context: PERF_TEST_RESULT_CONNSTR
|
||||
build_type: release
|
||||
test_selection: performance
|
||||
run_in_parallel: false
|
||||
save_perf_report: true
|
||||
requires:
|
||||
- build-zenith-release
|
||||
- build-neon-release
|
||||
- coverage-report:
|
||||
# Context passes credentials for gh api
|
||||
context: CI_ACCESS_TOKEN
|
||||
requires:
|
||||
# TODO: consider adding more
|
||||
- other-tests-debug
|
||||
- compute-tools-test
|
||||
- docker-image:
|
||||
# Context gives an ability to login
|
||||
context: Docker Hub
|
||||
@@ -690,11 +749,9 @@ workflows:
|
||||
branches:
|
||||
only:
|
||||
- main
|
||||
- docker-multi-platform
|
||||
# requires:
|
||||
# - pg_regress-tests-release
|
||||
# - other-tests-release
|
||||
# - compute-tools-test
|
||||
requires:
|
||||
- pg_regress-tests-release
|
||||
- other-tests-release
|
||||
- deploy-staging:
|
||||
# Context gives an ability to login
|
||||
context: Docker Hub
|
||||
@@ -713,14 +770,55 @@ workflows:
|
||||
- main
|
||||
requires:
|
||||
- docker-image
|
||||
|
||||
- docker-image-release:
|
||||
# Context gives an ability to login
|
||||
context: Docker Hub
|
||||
# Build image only for commits to main
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- release
|
||||
requires:
|
||||
- pg_regress-tests-release
|
||||
- other-tests-release
|
||||
- docker-image-compute-release:
|
||||
# Context gives an ability to login
|
||||
context: Docker Hub
|
||||
# Build image only for commits to main
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- release
|
||||
requires:
|
||||
- pg_regress-tests-release
|
||||
- other-tests-release
|
||||
- deploy-release:
|
||||
# Context gives an ability to login
|
||||
context: Docker Hub
|
||||
# deploy only for commits to main
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- release
|
||||
requires:
|
||||
- docker-image-release
|
||||
- deploy-release-proxy:
|
||||
# deploy only for commits to main
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- release
|
||||
requires:
|
||||
- docker-image-release
|
||||
- remote-ci-trigger:
|
||||
# Context passes credentials for gh api
|
||||
context: CI_ACCESS_TOKEN
|
||||
remote_repo: "zenithdb/console"
|
||||
remote_repo: "neondatabase/cloud"
|
||||
requires:
|
||||
# XXX: Successful build doesn't mean everything is OK, but
|
||||
# the job to be triggered takes so much time to complete (~22 min)
|
||||
# that it's better not to wait for the commented-out steps
|
||||
- build-zenith-debug
|
||||
- build-neon-release
|
||||
# - pg_regress-tests-release
|
||||
# - other-tests-release
|
||||
|
||||
38
.circleci/helm-values/production.proxy.yaml
Normal file
38
.circleci/helm-values/production.proxy.yaml
Normal file
@@ -0,0 +1,38 @@
|
||||
# Helm chart values for zenith-proxy.
|
||||
# This is a YAML-formatted file.
|
||||
|
||||
image:
|
||||
repository: neondatabase/neon
|
||||
|
||||
settings:
|
||||
authEndpoint: "https://console.neon.tech/authenticate_proxy_request/"
|
||||
uri: "https://console.neon.tech/psql_session/"
|
||||
|
||||
# -- Additional labels for zenith-proxy pods
|
||||
podLabels:
|
||||
zenith_service: proxy
|
||||
zenith_env: production
|
||||
zenith_region: us-west-2
|
||||
zenith_region_slug: oregon
|
||||
|
||||
service:
|
||||
annotations:
|
||||
service.beta.kubernetes.io/aws-load-balancer-type: external
|
||||
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
||||
service.beta.kubernetes.io/aws-load-balancer-scheme: internal
|
||||
external-dns.alpha.kubernetes.io/hostname: proxy-release.local
|
||||
type: LoadBalancer
|
||||
|
||||
exposedService:
|
||||
annotations:
|
||||
service.beta.kubernetes.io/aws-load-balancer-type: external
|
||||
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
||||
service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing
|
||||
external-dns.alpha.kubernetes.io/hostname: start.zenith.tech,connect.neon.tech,pg.neon.tech
|
||||
|
||||
metrics:
|
||||
enabled: true
|
||||
serviceMonitor:
|
||||
enabled: true
|
||||
selector:
|
||||
release: kube-prometheus-stack
|
||||
31
.circleci/helm-values/staging.proxy-scram.yaml
Normal file
31
.circleci/helm-values/staging.proxy-scram.yaml
Normal file
@@ -0,0 +1,31 @@
|
||||
# Helm chart values for zenith-proxy.
|
||||
# This is a YAML-formatted file.
|
||||
|
||||
image:
|
||||
repository: neondatabase/neon
|
||||
|
||||
settings:
|
||||
authBackend: "console"
|
||||
authEndpoint: "http://console-staging.local/management/api/v2"
|
||||
domain: "*.cloud.stage.neon.tech"
|
||||
|
||||
# -- Additional labels for zenith-proxy pods
|
||||
podLabels:
|
||||
zenith_service: proxy-scram
|
||||
zenith_env: staging
|
||||
zenith_region: us-east-1
|
||||
zenith_region_slug: virginia
|
||||
|
||||
exposedService:
|
||||
annotations:
|
||||
service.beta.kubernetes.io/aws-load-balancer-type: external
|
||||
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
||||
service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing
|
||||
external-dns.alpha.kubernetes.io/hostname: cloud.stage.neon.tech
|
||||
|
||||
metrics:
|
||||
enabled: true
|
||||
serviceMonitor:
|
||||
enabled: true
|
||||
selector:
|
||||
release: kube-prometheus-stack
|
||||
30
.circleci/helm-values/staging.proxy.yaml
Normal file
30
.circleci/helm-values/staging.proxy.yaml
Normal file
@@ -0,0 +1,30 @@
|
||||
# Helm chart values for zenith-proxy.
|
||||
# This is a YAML-formatted file.
|
||||
|
||||
image:
|
||||
repository: neondatabase/neon
|
||||
|
||||
settings:
|
||||
authEndpoint: "https://console.stage.neon.tech/authenticate_proxy_request/"
|
||||
uri: "https://console.stage.neon.tech/psql_session/"
|
||||
|
||||
# -- Additional labels for zenith-proxy pods
|
||||
podLabels:
|
||||
zenith_service: proxy
|
||||
zenith_env: staging
|
||||
zenith_region: us-east-1
|
||||
zenith_region_slug: virginia
|
||||
|
||||
exposedService:
|
||||
annotations:
|
||||
service.beta.kubernetes.io/aws-load-balancer-type: external
|
||||
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
||||
service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing
|
||||
external-dns.alpha.kubernetes.io/hostname: connect.stage.neon.tech
|
||||
|
||||
metrics:
|
||||
enabled: true
|
||||
serviceMonitor:
|
||||
enabled: true
|
||||
selector:
|
||||
release: kube-prometheus-stack
|
||||
@@ -1,13 +0,0 @@
|
||||
# Helm chart values for zenith-proxy.
|
||||
# This is a YAML-formatted file.
|
||||
|
||||
settings:
|
||||
authEndpoint: "https://console.stage.zenith.tech/authenticate_proxy_request/"
|
||||
uri: "https://console.stage.zenith.tech/psql_session/"
|
||||
|
||||
exposedService:
|
||||
annotations:
|
||||
service.beta.kubernetes.io/aws-load-balancer-type: external
|
||||
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
||||
service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing
|
||||
external-dns.alpha.kubernetes.io/hostname: start.stage.zenith.tech
|
||||
@@ -1,138 +0,0 @@
|
||||
- name: discover storage nodes
|
||||
hosts: localhost
|
||||
connection: local
|
||||
gather_facts: False
|
||||
|
||||
tasks:
|
||||
|
||||
- name: discover safekeepers
|
||||
no_log: true
|
||||
ec2_instance_info:
|
||||
filters:
|
||||
"tag:zenith_env": "staging"
|
||||
"tag:zenith_service": "safekeeper"
|
||||
register: ec2_safekeepers
|
||||
|
||||
- name: discover pageservers
|
||||
no_log: true
|
||||
ec2_instance_info:
|
||||
filters:
|
||||
"tag:zenith_env": "staging"
|
||||
"tag:zenith_service": "pageserver"
|
||||
register: ec2_pageservers
|
||||
|
||||
- name: add safekeepers to host group
|
||||
no_log: true
|
||||
add_host:
|
||||
name: safekeeper-{{ ansible_loop.index }}
|
||||
ansible_host: "{{ item.public_ip_address }}"
|
||||
groups:
|
||||
- storage
|
||||
- safekeepers
|
||||
with_items: "{{ ec2_safekeepers.instances }}"
|
||||
loop_control:
|
||||
extended: yes
|
||||
|
||||
- name: add pageservers to host group
|
||||
no_log: true
|
||||
add_host:
|
||||
name: pageserver-{{ ansible_loop.index }}
|
||||
ansible_host: "{{ item.public_ip_address }}"
|
||||
groups:
|
||||
- storage
|
||||
- pageservers
|
||||
with_items: "{{ ec2_pageservers.instances }}"
|
||||
loop_control:
|
||||
extended: yes
|
||||
|
||||
- name: Retrive versions
|
||||
hosts: storage
|
||||
gather_facts: False
|
||||
remote_user: admin
|
||||
|
||||
tasks:
|
||||
|
||||
- name: Get current version of binaries
|
||||
set_fact:
|
||||
current_version: "{{lookup('file', '../zenith_install/.zenith_current_version') }}"
|
||||
|
||||
- name: Check that file with version exists on host
|
||||
stat:
|
||||
path: /usr/local/.zenith_current_version
|
||||
register: version_file
|
||||
|
||||
- name: Try to get current version from the host
|
||||
when: version_file.stat.exists
|
||||
ansible.builtin.fetch:
|
||||
src: /usr/local/.zenith_current_version
|
||||
dest: .remote_version.{{ inventory_hostname }}
|
||||
fail_on_missing: no
|
||||
flat: yes
|
||||
|
||||
- name: Store remote version to variable
|
||||
when: version_file.stat.exists
|
||||
set_fact:
|
||||
remote_version: "{{ lookup('file', '.remote_version.{{ inventory_hostname }}') }}"
|
||||
|
||||
- name: Store default value of remote version to variable in case when remote version file not found
|
||||
when: not version_file.stat.exists
|
||||
set_fact:
|
||||
remote_version: "000"
|
||||
|
||||
- name: Extract Zenith binaries
|
||||
hosts: storage
|
||||
gather_facts: False
|
||||
remote_user: admin
|
||||
|
||||
tasks:
|
||||
|
||||
- name: Inform about version conflict
|
||||
when: current_version <= remote_version
|
||||
debug: msg="Current version {{ current_version }} LE than remote {{ remote_version }}"
|
||||
|
||||
- name: Extract Zenith binaries to /usr/local
|
||||
when: current_version > remote_version
|
||||
ansible.builtin.unarchive:
|
||||
src: ../zenith_install.tar.gz
|
||||
dest: /usr/local
|
||||
become: true
|
||||
|
||||
- name: Restart safekeepers
|
||||
hosts: safekeepers
|
||||
gather_facts: False
|
||||
remote_user: admin
|
||||
|
||||
tasks:
|
||||
|
||||
- name: Inform about version conflict
|
||||
when: current_version <= remote_version
|
||||
debug: msg="Current version {{ current_version }} LE than remote {{ remote_version }}"
|
||||
|
||||
- name: Restart systemd service
|
||||
when: current_version > remote_version
|
||||
ansible.builtin.systemd:
|
||||
daemon_reload: yes
|
||||
name: safekeeper
|
||||
enabled: yes
|
||||
state: restarted
|
||||
become: true
|
||||
|
||||
- name: Restart pageservers
|
||||
hosts: pageservers
|
||||
gather_facts: False
|
||||
remote_user: admin
|
||||
|
||||
tasks:
|
||||
|
||||
- name: Inform about version conflict
|
||||
when: current_version <= remote_version
|
||||
debug: msg="Current version {{ current_version }} LE than remote {{ remote_version }}"
|
||||
|
||||
- name: Restart systemd service
|
||||
when: current_version > remote_version
|
||||
ansible.builtin.systemd:
|
||||
daemon_reload: yes
|
||||
name: pageserver
|
||||
enabled: yes
|
||||
state: restarted
|
||||
become: true
|
||||
26
.config/hakari.toml
Normal file
26
.config/hakari.toml
Normal file
@@ -0,0 +1,26 @@
|
||||
# This file contains settings for `cargo hakari`.
|
||||
# See https://docs.rs/cargo-hakari/latest/cargo_hakari/config for a full list of options.
|
||||
|
||||
hakari-package = "workspace_hack"
|
||||
|
||||
# Format for `workspace-hack = ...` lines in other Cargo.tomls. Requires cargo-hakari 0.9.8 or above.
|
||||
dep-format-version = "2"
|
||||
|
||||
# Setting workspace.resolver = "2" in the root Cargo.toml is HIGHLY recommended.
|
||||
# Hakari works much better with the new feature resolver.
|
||||
# For more about the new feature resolver, see:
|
||||
# https://blog.rust-lang.org/2021/03/25/Rust-1.51.0.html#cargos-new-feature-resolver
|
||||
# Have to keep the resolver still here since hakari requires this field,
|
||||
# despite it's now the default for 2021 edition & cargo.
|
||||
resolver = "2"
|
||||
|
||||
# Add triples corresponding to platforms commonly used by developers here.
|
||||
# https://doc.rust-lang.org/rustc/platform-support.html
|
||||
platforms = [
|
||||
# "x86_64-unknown-linux-gnu",
|
||||
# "x86_64-apple-darwin",
|
||||
# "x86_64-pc-windows-msvc",
|
||||
]
|
||||
|
||||
# Write out exact versions rather than a semver range. (Defaults to false.)
|
||||
# exact-versions = true
|
||||
37
.github/workflows/benchmarking.yml
vendored
37
.github/workflows/benchmarking.yml
vendored
@@ -3,7 +3,7 @@ name: benchmarking
|
||||
on:
|
||||
# uncomment to run on push for debugging your PR
|
||||
# push:
|
||||
# branches: [ mybranch ]
|
||||
# branches: [ your branch ]
|
||||
schedule:
|
||||
# * is a special character in YAML so you have to quote this string
|
||||
# ┌───────────── minute (0 - 59)
|
||||
@@ -26,7 +26,7 @@ jobs:
|
||||
runs-on: [self-hosted, zenith-benchmarker]
|
||||
|
||||
env:
|
||||
PG_BIN: "/usr/pgsql-13/bin"
|
||||
POSTGRES_DISTRIB_DIR: "/usr/pgsql-13"
|
||||
|
||||
steps:
|
||||
- name: Checkout zenith repo
|
||||
@@ -36,22 +36,22 @@ jobs:
|
||||
# see https://github.com/actions/setup-python/issues/162
|
||||
# and probably https://github.com/actions/setup-python/issues/162#issuecomment-865387976 in particular
|
||||
# so the simplest solution to me is to use already installed system python and spin virtualenvs for job runs.
|
||||
# there is Python 3.7.10 already installed on the machine so use it to install pipenv and then use pipenv's virtuealenvs
|
||||
- name: Install pipenv & deps
|
||||
# there is Python 3.7.10 already installed on the machine so use it to install poetry and then use poetry's virtuealenvs
|
||||
- name: Install poetry & deps
|
||||
run: |
|
||||
python3 -m pip install --upgrade pipenv wheel
|
||||
# since pip/pipenv caches are reused there shouldn't be any troubles with install every time
|
||||
pipenv install
|
||||
python3 -m pip install --upgrade poetry wheel
|
||||
# since pip/poetry caches are reused there shouldn't be any troubles with install every time
|
||||
./scripts/pysync
|
||||
|
||||
- name: Show versions
|
||||
run: |
|
||||
echo Python
|
||||
python3 --version
|
||||
pipenv run python3 --version
|
||||
echo Pipenv
|
||||
pipenv --version
|
||||
poetry run python3 --version
|
||||
echo Poetry
|
||||
poetry --version
|
||||
echo Pgbench
|
||||
$PG_BIN/pgbench --version
|
||||
$POSTGRES_DISTRIB_DIR/bin/pgbench --version
|
||||
|
||||
# FIXME cluster setup is skipped due to various changes in console API
|
||||
# for now pre created cluster is used. When API gain some stability
|
||||
@@ -66,7 +66,7 @@ jobs:
|
||||
|
||||
echo "Starting cluster"
|
||||
# wake up the cluster
|
||||
$PG_BIN/psql $BENCHMARK_CONNSTR -c "SELECT 1"
|
||||
$POSTGRES_DISTRIB_DIR/bin/psql $BENCHMARK_CONNSTR -c "SELECT 1"
|
||||
|
||||
- name: Run benchmark
|
||||
# pgbench is installed system wide from official repo
|
||||
@@ -83,17 +83,24 @@ jobs:
|
||||
# sudo yum install postgresql13-contrib
|
||||
# actual binaries are located in /usr/pgsql-13/bin/
|
||||
env:
|
||||
TEST_PG_BENCH_TRANSACTIONS_MATRIX: "5000,10000,20000"
|
||||
TEST_PG_BENCH_SCALES_MATRIX: "10,15"
|
||||
# The pgbench test runs two tests of given duration against each scale.
|
||||
# So the total runtime with these parameters is 2 * 2 * 300 = 1200, or 20 minutes.
|
||||
# Plus time needed to initialize the test databases.
|
||||
TEST_PG_BENCH_DURATIONS_MATRIX: "300"
|
||||
TEST_PG_BENCH_SCALES_MATRIX: "10,100"
|
||||
PLATFORM: "zenith-staging"
|
||||
BENCHMARK_CONNSTR: "${{ secrets.BENCHMARK_STAGING_CONNSTR }}"
|
||||
REMOTE_ENV: "1" # indicate to test harness that we do not have zenith binaries locally
|
||||
run: |
|
||||
# just to be sure that no data was cached on self hosted runner
|
||||
# since it might generate duplicates when calling ingest_perf_test_result.py
|
||||
rm -rf perf-report-staging
|
||||
mkdir -p perf-report-staging
|
||||
pipenv run pytest test_runner/performance/ -v -m "remote_cluster" --skip-interfering-proc-check --out-dir perf-report-staging
|
||||
./scripts/pytest test_runner/performance/ -v -m "remote_cluster" --skip-interfering-proc-check --out-dir perf-report-staging
|
||||
|
||||
- name: Submit result
|
||||
env:
|
||||
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
|
||||
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
|
||||
run: |
|
||||
REPORT_FROM=$(realpath perf-report-staging) REPORT_TO=staging scripts/generate_and_push_perf_report.sh
|
||||
|
||||
44
.github/workflows/docker-builder.yml
vendored
44
.github/workflows/docker-builder.yml
vendored
@@ -1,44 +0,0 @@
|
||||
## Build docker image zenithdb/build:buster for linux/adm64 and linux/arm64 platforms
|
||||
|
||||
name: docker-builder
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'docker-multi-platform'
|
||||
schedule:
|
||||
# * is a special character in YAML so you have to quote this string
|
||||
# buil daily at 5:30am
|
||||
- cron: '30 5 * * *'
|
||||
|
||||
jobs:
|
||||
docker-builder-buster:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
submodules: false
|
||||
-
|
||||
name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v1
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
-
|
||||
name: Login to DockerHub
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||
-
|
||||
name: Build and push zenithdb/build:buster
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
push: true
|
||||
file: Dockerfile.build
|
||||
platforms: linux/amd64,linux/arm64
|
||||
cache-from: type=registry,ref=zenithdb/build:buster
|
||||
tags: zenithdb/build:buster
|
||||
|
||||
29
.github/workflows/testing.yml
vendored
29
.github/workflows/testing.yml
vendored
@@ -1,10 +1,6 @@
|
||||
name: Build and Test
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ main ]
|
||||
pull_request:
|
||||
branches: [ main ]
|
||||
on: push
|
||||
|
||||
jobs:
|
||||
regression-check:
|
||||
@@ -13,7 +9,7 @@ jobs:
|
||||
# If we want to duplicate this job for different
|
||||
# Rust toolchains (e.g. nightly or 1.37.0), add them here.
|
||||
rust_toolchain: [stable]
|
||||
os: [ubuntu-latest]
|
||||
os: [ubuntu-latest, macos-latest]
|
||||
timeout-minutes: 30
|
||||
name: run regression test suite
|
||||
runs-on: ${{ matrix.os }}
|
||||
@@ -32,11 +28,16 @@ jobs:
|
||||
toolchain: ${{ matrix.rust_toolchain }}
|
||||
override: true
|
||||
|
||||
- name: Install postgres dependencies
|
||||
- name: Install Ubuntu postgres dependencies
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
run: |
|
||||
sudo apt update
|
||||
sudo apt install build-essential libreadline-dev zlib1g-dev flex bison libseccomp-dev
|
||||
|
||||
- name: Install macOs postgres dependencies
|
||||
if: matrix.os == 'macos-latest'
|
||||
run: brew install flex bison
|
||||
|
||||
- name: Set pg revision for caching
|
||||
id: pg_ver
|
||||
run: echo ::set-output name=pg_rev::$(git rev-parse HEAD:vendor/postgres)
|
||||
@@ -51,8 +52,7 @@ jobs:
|
||||
|
||||
- name: Build postgres
|
||||
if: steps.cache_pg.outputs.cache-hit != 'true'
|
||||
run: |
|
||||
make postgres
|
||||
run: make postgres
|
||||
|
||||
- name: Cache cargo deps
|
||||
id: cache_cargo
|
||||
@@ -62,13 +62,10 @@ jobs:
|
||||
~/.cargo/registry
|
||||
~/.cargo/git
|
||||
target
|
||||
key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
|
||||
key: ${{ runner.os }}-cargo-${{ hashFiles('./Cargo.lock') }}
|
||||
|
||||
# Use `env CARGO_INCREMENTAL=0` to mitigate https://github.com/rust-lang/rust/issues/91696 for rustc 1.57.0
|
||||
- name: Run cargo build
|
||||
run: |
|
||||
env CARGO_INCREMENTAL=0 cargo build --workspace --bins --examples --tests
|
||||
- name: Run cargo clippy
|
||||
run: ./run_clippy.sh
|
||||
|
||||
- name: Run cargo test
|
||||
run: |
|
||||
env CARGO_INCREMENTAL=0 cargo test -- --nocapture --test-threads=1
|
||||
run: cargo test --all --all-targets
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -11,3 +11,6 @@ test_output/
|
||||
# Coverage
|
||||
*.profraw
|
||||
*.profdata
|
||||
|
||||
*.key
|
||||
*.crt
|
||||
|
||||
2556
Cargo.lock
generated
2556
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
14
Cargo.toml
14
Cargo.toml
@@ -1,17 +1,21 @@
|
||||
[workspace]
|
||||
members = [
|
||||
"compute_tools",
|
||||
"control_plane",
|
||||
"pageserver",
|
||||
"postgres_ffi",
|
||||
"proxy",
|
||||
"walkeeper",
|
||||
"safekeeper",
|
||||
"workspace_hack",
|
||||
"zenith",
|
||||
"zenith_metrics",
|
||||
"zenith_utils",
|
||||
"neon_local",
|
||||
"libs/*",
|
||||
]
|
||||
|
||||
[profile.release]
|
||||
# This is useful for profiling and, to some extent, debug.
|
||||
# Besides, debug info should not affect the performance.
|
||||
debug = true
|
||||
|
||||
# This is only needed for proxy's tests.
|
||||
# TODO: we should probably fork `tokio-postgres-rustls` instead.
|
||||
[patch.crates-io]
|
||||
tokio-postgres = { git = "https://github.com/zenithdb/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" }
|
||||
|
||||
88
Dockerfile
88
Dockerfile
@@ -1,62 +1,60 @@
|
||||
#
|
||||
# Docker image for console integration testing.
|
||||
#
|
||||
# Build Postgres
|
||||
FROM zimg/rust:1.58 AS pg-build
|
||||
WORKDIR /pg
|
||||
|
||||
USER root
|
||||
|
||||
COPY vendor/postgres vendor/postgres
|
||||
COPY Makefile Makefile
|
||||
|
||||
#
|
||||
# Build Postgres separately --- this layer will be rebuilt only if one of
|
||||
# mentioned paths will get any changes.
|
||||
#
|
||||
FROM zenithdb/build:buster AS pg-build
|
||||
WORKDIR /zenith
|
||||
COPY ./vendor/postgres vendor/postgres
|
||||
COPY ./Makefile Makefile
|
||||
ENV BUILD_TYPE release
|
||||
RUN make -j $(getconf _NPROCESSORS_ONLN) -s postgres
|
||||
RUN rm -rf postgres_install/build
|
||||
RUN set -e \
|
||||
&& mold -run make -j $(nproc) -s postgres \
|
||||
&& rm -rf tmp_install/build \
|
||||
&& tar -C tmp_install -czf /postgres_install.tar.gz .
|
||||
|
||||
#
|
||||
# Build zenith binaries
|
||||
#
|
||||
# TODO: build cargo deps as separate layer. We used cargo-chef before but that was
|
||||
# net time waste in a lot of cases. Copying Cargo.lock with empty lib.rs should do the work.
|
||||
#
|
||||
FROM zenithdb/build:buster AS build
|
||||
FROM zimg/rust:1.58 AS build
|
||||
ARG GIT_VERSION=local
|
||||
|
||||
ARG GIT_VERSION
|
||||
RUN if [ -z "$GIT_VERSION" ]; then echo "GIT_VERSION is reqired, use build_arg to pass it"; exit 1; fi
|
||||
|
||||
WORKDIR /zenith
|
||||
COPY --from=pg-build /zenith/tmp_install/include/postgresql/server tmp_install/include/postgresql/server
|
||||
ARG CACHEPOT_BUCKET=zenith-rust-cachepot
|
||||
ARG AWS_ACCESS_KEY_ID
|
||||
ARG AWS_SECRET_ACCESS_KEY
|
||||
|
||||
COPY --from=pg-build /pg/tmp_install/include/postgresql/server tmp_install/include/postgresql/server
|
||||
COPY . .
|
||||
RUN GIT_VERSION=$GIT_VERSION cargo build --release
|
||||
|
||||
# Show build caching stats to check if it was used in the end.
|
||||
# Has to be the part of the same RUN since cachepot daemon is killed in the end of this RUN, loosing the compilation stats.
|
||||
RUN set -e \
|
||||
&& sudo -E "PATH=$PATH" mold -run cargo build --release \
|
||||
&& cachepot -s
|
||||
|
||||
# Build final image
|
||||
#
|
||||
# Copy binaries to resulting image.
|
||||
#
|
||||
FROM debian:buster-slim
|
||||
FROM debian:bullseye-slim
|
||||
WORKDIR /data
|
||||
|
||||
RUN apt-get update && apt-get -yq install libreadline-dev libseccomp-dev openssl ca-certificates && \
|
||||
mkdir zenith_install
|
||||
RUN set -e \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y \
|
||||
libreadline-dev \
|
||||
libseccomp-dev \
|
||||
openssl \
|
||||
ca-certificates \
|
||||
&& rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* \
|
||||
&& useradd -d /data zenith \
|
||||
&& chown -R zenith:zenith /data
|
||||
|
||||
COPY --from=build --chown=zenith:zenith /home/circleci/project/target/release/pageserver /usr/local/bin
|
||||
COPY --from=build --chown=zenith:zenith /home/circleci/project/target/release/safekeeper /usr/local/bin
|
||||
COPY --from=build --chown=zenith:zenith /home/circleci/project/target/release/proxy /usr/local/bin
|
||||
|
||||
COPY --from=pg-build /pg/tmp_install/ /usr/local/
|
||||
COPY --from=pg-build /postgres_install.tar.gz /data/
|
||||
|
||||
COPY --from=build /zenith/target/release/pageserver /usr/local/bin
|
||||
COPY --from=build /zenith/target/release/safekeeper /usr/local/bin
|
||||
COPY --from=build /zenith/target/release/proxy /usr/local/bin
|
||||
COPY --from=pg-build /zenith/tmp_install postgres_install
|
||||
COPY docker-entrypoint.sh /docker-entrypoint.sh
|
||||
|
||||
# Remove build artifacts (~ 500 MB)
|
||||
RUN rm -rf postgres_install/build && \
|
||||
# 'Install' Postgres binaries locally
|
||||
cp -r postgres_install/* /usr/local/ && \
|
||||
# Prepare an archive of Postgres binaries (should be around 11 MB)
|
||||
# and keep it inside container for an ease of deploy pipeline.
|
||||
cd postgres_install && tar -czf /data/postgres_install.tar.gz . && cd .. && \
|
||||
rm -rf postgres_install
|
||||
|
||||
RUN useradd -d /data zenith && chown -R zenith:zenith /data
|
||||
|
||||
VOLUME ["/data"]
|
||||
USER zenith
|
||||
EXPOSE 6400
|
||||
|
||||
@@ -1,15 +0,0 @@
|
||||
#
|
||||
# Image with all the required dependencies to build https://github.com/zenithdb/zenith
|
||||
# and Postgres from https://github.com/zenithdb/postgres
|
||||
# Also includes some rust development and build tools.
|
||||
#
|
||||
FROM rust:slim-buster
|
||||
WORKDIR /zenith
|
||||
|
||||
# Install postgres and zenith build dependencies
|
||||
# clang is for rocksdb
|
||||
RUN apt-get update && apt-get -yq install automake libtool build-essential bison flex libreadline-dev zlib1g-dev libxml2-dev \
|
||||
libseccomp-dev pkg-config libssl-dev clang
|
||||
|
||||
# Install rust tools
|
||||
RUN rustup component add clippy && cargo install cargo-audit
|
||||
18
Dockerfile.compute-tools
Normal file
18
Dockerfile.compute-tools
Normal file
@@ -0,0 +1,18 @@
|
||||
# First transient image to build compute_tools binaries
|
||||
# NB: keep in sync with rust image version in .circle/config.yml
|
||||
FROM zimg/rust:1.58 AS rust-build
|
||||
|
||||
ARG CACHEPOT_BUCKET=zenith-rust-cachepot
|
||||
ARG AWS_ACCESS_KEY_ID
|
||||
ARG AWS_SECRET_ACCESS_KEY
|
||||
|
||||
COPY . .
|
||||
|
||||
RUN set -e \
|
||||
&& sudo -E "PATH=$PATH" mold -run cargo build -p compute_tools --release \
|
||||
&& cachepot -s
|
||||
|
||||
# Final image that only has one binary
|
||||
FROM debian:buster-slim
|
||||
|
||||
COPY --from=rust-build /home/circleci/project/target/release/zenith_ctl /usr/local/bin/zenith_ctl
|
||||
5
Makefile
5
Makefile
@@ -78,6 +78,11 @@ postgres: postgres-configure \
|
||||
$(MAKE) -C tmp_install/build/contrib/zenith install
|
||||
+@echo "Compiling contrib/zenith_test_utils"
|
||||
$(MAKE) -C tmp_install/build/contrib/zenith_test_utils install
|
||||
+@echo "Compiling pg_buffercache"
|
||||
$(MAKE) -C tmp_install/build/contrib/pg_buffercache install
|
||||
+@echo "Compiling pageinspect"
|
||||
$(MAKE) -C tmp_install/build/contrib/pageinspect install
|
||||
|
||||
|
||||
.PHONY: postgres-clean
|
||||
postgres-clean:
|
||||
|
||||
30
Pipfile
30
Pipfile
@@ -1,30 +0,0 @@
|
||||
[[source]]
|
||||
url = "https://pypi.python.org/simple"
|
||||
verify_ssl = true
|
||||
name = "pypi"
|
||||
|
||||
[packages]
|
||||
pytest = ">=6.0.0"
|
||||
typing-extensions = "*"
|
||||
pyjwt = {extras = ["crypto"], version = "*"}
|
||||
requests = "*"
|
||||
pytest-xdist = "*"
|
||||
asyncpg = "*"
|
||||
cached-property = "*"
|
||||
psycopg2-binary = "*"
|
||||
jinja2 = "*"
|
||||
|
||||
[dev-packages]
|
||||
# Behavior may change slightly between versions. These are run continuously,
|
||||
# so we pin exact versions to avoid suprising breaks. Update if comfortable.
|
||||
yapf = "==0.31.0"
|
||||
mypy = "==0.910"
|
||||
# Non-pinned packages follow.
|
||||
pipenv = "*"
|
||||
flake8 = "*"
|
||||
types-requests = "*"
|
||||
types-psycopg2 = "*"
|
||||
|
||||
[requires]
|
||||
# we need at least 3.7, but pipenv doesn't allow to say this directly
|
||||
python_version = "3"
|
||||
652
Pipfile.lock
generated
652
Pipfile.lock
generated
@@ -1,652 +0,0 @@
|
||||
{
|
||||
"_meta": {
|
||||
"hash": {
|
||||
"sha256": "c309cb963a7b07ae3d30e9cbf08b495f77bdecc0e5356fc89d133c4fbcb65b2b"
|
||||
},
|
||||
"pipfile-spec": 6,
|
||||
"requires": {
|
||||
"python_version": "3"
|
||||
},
|
||||
"sources": [
|
||||
{
|
||||
"name": "pypi",
|
||||
"url": "https://pypi.python.org/simple",
|
||||
"verify_ssl": true
|
||||
}
|
||||
]
|
||||
},
|
||||
"default": {
|
||||
"asyncpg": {
|
||||
"hashes": [
|
||||
"sha256:129d501f3d30616afd51eb8d3142ef51ba05374256bd5834cec3ef4956a9b317",
|
||||
"sha256:29ef6ae0a617fc13cc2ac5dc8e9b367bb83cba220614b437af9b67766f4b6b20",
|
||||
"sha256:41704c561d354bef01353835a7846e5606faabbeb846214dfcf666cf53319f18",
|
||||
"sha256:556b0e92e2b75dc028b3c4bc9bd5162ddf0053b856437cf1f04c97f9c6837d03",
|
||||
"sha256:8ff5073d4b654e34bd5eaadc01dc4d68b8a9609084d835acd364cd934190a08d",
|
||||
"sha256:a458fc69051fbb67d995fdda46d75a012b5d6200f91e17d23d4751482640ed4c",
|
||||
"sha256:a7095890c96ba36f9f668eb552bb020dddb44f8e73e932f8573efc613ee83843",
|
||||
"sha256:a738f4807c853623d3f93f0fea11f61be6b0e5ca16ea8aeb42c2c7ee742aa853",
|
||||
"sha256:c4fc0205fe4ddd5aeb3dfdc0f7bafd43411181e1f5650189608e5971cceacff1",
|
||||
"sha256:dd2fa063c3344823487d9ddccb40802f02622ddf8bf8a6cc53885ee7a2c1c0c6",
|
||||
"sha256:ddffcb85227bf39cd1bedd4603e0082b243cf3b14ced64dce506a15b05232b83",
|
||||
"sha256:e36c6806883786b19551bb70a4882561f31135dc8105a59662e0376cf5b2cbc5",
|
||||
"sha256:eed43abc6ccf1dc02e0d0efc06ce46a411362f3358847c6b0ec9a43426f91ece"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==0.24.0"
|
||||
},
|
||||
"attrs": {
|
||||
"hashes": [
|
||||
"sha256:149e90d6d8ac20db7a955ad60cf0e6881a3f20d37096140088356da6c716b0b1",
|
||||
"sha256:ef6aaac3ca6cd92904cdd0d83f629a15f18053ec84e6432106f7a4d04ae4f5fb"
|
||||
],
|
||||
"markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'",
|
||||
"version": "==21.2.0"
|
||||
},
|
||||
"cached-property": {
|
||||
"hashes": [
|
||||
"sha256:9fa5755838eecbb2d234c3aa390bd80fbd3ac6b6869109bfc1b499f7bd89a130",
|
||||
"sha256:df4f613cf7ad9a588cc381aaf4a512d26265ecebd5eb9e1ba12f1319eb85a6a0"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==1.5.2"
|
||||
},
|
||||
"certifi": {
|
||||
"hashes": [
|
||||
"sha256:78884e7c1d4b00ce3cea67b44566851c4343c120abd683433ce934a68ea58872",
|
||||
"sha256:d62a0163eb4c2344ac042ab2bdf75399a71a2d8c7d47eac2e2ee91b9d6339569"
|
||||
],
|
||||
"version": "==2021.10.8"
|
||||
},
|
||||
"cffi": {
|
||||
"hashes": [
|
||||
"sha256:00c878c90cb53ccfaae6b8bc18ad05d2036553e6d9d1d9dbcf323bbe83854ca3",
|
||||
"sha256:0104fb5ae2391d46a4cb082abdd5c69ea4eab79d8d44eaaf79f1b1fd806ee4c2",
|
||||
"sha256:06c48159c1abed75c2e721b1715c379fa3200c7784271b3c46df01383b593636",
|
||||
"sha256:0808014eb713677ec1292301ea4c81ad277b6cdf2fdd90fd540af98c0b101d20",
|
||||
"sha256:10dffb601ccfb65262a27233ac273d552ddc4d8ae1bf93b21c94b8511bffe728",
|
||||
"sha256:14cd121ea63ecdae71efa69c15c5543a4b5fbcd0bbe2aad864baca0063cecf27",
|
||||
"sha256:17771976e82e9f94976180f76468546834d22a7cc404b17c22df2a2c81db0c66",
|
||||
"sha256:181dee03b1170ff1969489acf1c26533710231c58f95534e3edac87fff06c443",
|
||||
"sha256:23cfe892bd5dd8941608f93348c0737e369e51c100d03718f108bf1add7bd6d0",
|
||||
"sha256:263cc3d821c4ab2213cbe8cd8b355a7f72a8324577dc865ef98487c1aeee2bc7",
|
||||
"sha256:2756c88cbb94231c7a147402476be2c4df2f6078099a6f4a480d239a8817ae39",
|
||||
"sha256:27c219baf94952ae9d50ec19651a687b826792055353d07648a5695413e0c605",
|
||||
"sha256:2a23af14f408d53d5e6cd4e3d9a24ff9e05906ad574822a10563efcef137979a",
|
||||
"sha256:31fb708d9d7c3f49a60f04cf5b119aeefe5644daba1cd2a0fe389b674fd1de37",
|
||||
"sha256:3415c89f9204ee60cd09b235810be700e993e343a408693e80ce7f6a40108029",
|
||||
"sha256:3773c4d81e6e818df2efbc7dd77325ca0dcb688116050fb2b3011218eda36139",
|
||||
"sha256:3b96a311ac60a3f6be21d2572e46ce67f09abcf4d09344c49274eb9e0bf345fc",
|
||||
"sha256:3f7d084648d77af029acb79a0ff49a0ad7e9d09057a9bf46596dac9514dc07df",
|
||||
"sha256:41d45de54cd277a7878919867c0f08b0cf817605e4eb94093e7516505d3c8d14",
|
||||
"sha256:4238e6dab5d6a8ba812de994bbb0a79bddbdf80994e4ce802b6f6f3142fcc880",
|
||||
"sha256:45db3a33139e9c8f7c09234b5784a5e33d31fd6907800b316decad50af323ff2",
|
||||
"sha256:45e8636704eacc432a206ac7345a5d3d2c62d95a507ec70d62f23cd91770482a",
|
||||
"sha256:4958391dbd6249d7ad855b9ca88fae690783a6be9e86df65865058ed81fc860e",
|
||||
"sha256:4a306fa632e8f0928956a41fa8e1d6243c71e7eb59ffbd165fc0b41e316b2474",
|
||||
"sha256:57e9ac9ccc3101fac9d6014fba037473e4358ef4e89f8e181f8951a2c0162024",
|
||||
"sha256:59888172256cac5629e60e72e86598027aca6bf01fa2465bdb676d37636573e8",
|
||||
"sha256:5e069f72d497312b24fcc02073d70cb989045d1c91cbd53979366077959933e0",
|
||||
"sha256:64d4ec9f448dfe041705426000cc13e34e6e5bb13736e9fd62e34a0b0c41566e",
|
||||
"sha256:6dc2737a3674b3e344847c8686cf29e500584ccad76204efea14f451d4cc669a",
|
||||
"sha256:74fdfdbfdc48d3f47148976f49fab3251e550a8720bebc99bf1483f5bfb5db3e",
|
||||
"sha256:75e4024375654472cc27e91cbe9eaa08567f7fbdf822638be2814ce059f58032",
|
||||
"sha256:786902fb9ba7433aae840e0ed609f45c7bcd4e225ebb9c753aa39725bb3e6ad6",
|
||||
"sha256:8b6c2ea03845c9f501ed1313e78de148cd3f6cad741a75d43a29b43da27f2e1e",
|
||||
"sha256:91d77d2a782be4274da750752bb1650a97bfd8f291022b379bb8e01c66b4e96b",
|
||||
"sha256:91ec59c33514b7c7559a6acda53bbfe1b283949c34fe7440bcf917f96ac0723e",
|
||||
"sha256:920f0d66a896c2d99f0adbb391f990a84091179542c205fa53ce5787aff87954",
|
||||
"sha256:a5263e363c27b653a90078143adb3d076c1a748ec9ecc78ea2fb916f9b861962",
|
||||
"sha256:abb9a20a72ac4e0fdb50dae135ba5e77880518e742077ced47eb1499e29a443c",
|
||||
"sha256:c2051981a968d7de9dd2d7b87bcb9c939c74a34626a6e2f8181455dd49ed69e4",
|
||||
"sha256:c21c9e3896c23007803a875460fb786118f0cdd4434359577ea25eb556e34c55",
|
||||
"sha256:c2502a1a03b6312837279c8c1bd3ebedf6c12c4228ddbad40912d671ccc8a962",
|
||||
"sha256:d4d692a89c5cf08a8557fdeb329b82e7bf609aadfaed6c0d79f5a449a3c7c023",
|
||||
"sha256:da5db4e883f1ce37f55c667e5c0de439df76ac4cb55964655906306918e7363c",
|
||||
"sha256:e7022a66d9b55e93e1a845d8c9eba2a1bebd4966cd8bfc25d9cd07d515b33fa6",
|
||||
"sha256:ef1f279350da2c586a69d32fc8733092fd32cc8ac95139a00377841f59a3f8d8",
|
||||
"sha256:f54a64f8b0c8ff0b64d18aa76675262e1700f3995182267998c31ae974fbc382",
|
||||
"sha256:f5c7150ad32ba43a07c4479f40241756145a1f03b43480e058cfd862bf5041c7",
|
||||
"sha256:f6f824dc3bce0edab5f427efcfb1d63ee75b6fcb7282900ccaf925be84efb0fc",
|
||||
"sha256:fd8a250edc26254fe5b33be00402e6d287f562b6a5b2152dec302fa15bb3e997",
|
||||
"sha256:ffaa5c925128e29efbde7301d8ecaf35c8c60ffbcd6a1ffd3a552177c8e5e796"
|
||||
],
|
||||
"version": "==1.15.0"
|
||||
},
|
||||
"charset-normalizer": {
|
||||
"hashes": [
|
||||
"sha256:e019de665e2bcf9c2b64e2e5aa025fa991da8720daa3c1138cadd2fd1856aed0",
|
||||
"sha256:f7af805c321bfa1ce6714c51f254e0d5bb5e5834039bc17db7ebe3a4cec9492b"
|
||||
],
|
||||
"markers": "python_version >= '3'",
|
||||
"version": "==2.0.7"
|
||||
},
|
||||
"cryptography": {
|
||||
"hashes": [
|
||||
"sha256:07bb7fbfb5de0980590ddfc7f13081520def06dc9ed214000ad4372fb4e3c7f6",
|
||||
"sha256:18d90f4711bf63e2fb21e8c8e51ed8189438e6b35a6d996201ebd98a26abbbe6",
|
||||
"sha256:1ed82abf16df40a60942a8c211251ae72858b25b7421ce2497c2eb7a1cee817c",
|
||||
"sha256:22a38e96118a4ce3b97509443feace1d1011d0571fae81fc3ad35f25ba3ea999",
|
||||
"sha256:2d69645f535f4b2c722cfb07a8eab916265545b3475fdb34e0be2f4ee8b0b15e",
|
||||
"sha256:4a2d0e0acc20ede0f06ef7aa58546eee96d2592c00f450c9acb89c5879b61992",
|
||||
"sha256:54b2605e5475944e2213258e0ab8696f4f357a31371e538ef21e8d61c843c28d",
|
||||
"sha256:7075b304cd567694dc692ffc9747f3e9cb393cc4aa4fb7b9f3abd6f5c4e43588",
|
||||
"sha256:7b7ceeff114c31f285528ba8b390d3e9cfa2da17b56f11d366769a807f17cbaa",
|
||||
"sha256:7eba2cebca600a7806b893cb1d541a6e910afa87e97acf2021a22b32da1df52d",
|
||||
"sha256:928185a6d1ccdb816e883f56ebe92e975a262d31cc536429041921f8cb5a62fd",
|
||||
"sha256:9933f28f70d0517686bd7de36166dda42094eac49415459d9bdf5e7df3e0086d",
|
||||
"sha256:a688ebcd08250eab5bb5bca318cc05a8c66de5e4171a65ca51db6bd753ff8953",
|
||||
"sha256:abb5a361d2585bb95012a19ed9b2c8f412c5d723a9836418fab7aaa0243e67d2",
|
||||
"sha256:c10c797ac89c746e488d2ee92bd4abd593615694ee17b2500578b63cad6b93a8",
|
||||
"sha256:ced40344e811d6abba00295ced98c01aecf0c2de39481792d87af4fa58b7b4d6",
|
||||
"sha256:d57e0cdc1b44b6cdf8af1d01807db06886f10177469312fbde8f44ccbb284bc9",
|
||||
"sha256:d99915d6ab265c22873f1b4d6ea5ef462ef797b4140be4c9d8b179915e0985c6",
|
||||
"sha256:eb80e8a1f91e4b7ef8b33041591e6d89b2b8e122d787e87eeb2b08da71bb16ad",
|
||||
"sha256:ebeddd119f526bcf323a89f853afb12e225902a24d29b55fe18dd6fcb2838a76"
|
||||
],
|
||||
"version": "==35.0.0"
|
||||
},
|
||||
"execnet": {
|
||||
"hashes": [
|
||||
"sha256:8f694f3ba9cc92cab508b152dcfe322153975c29bda272e2fd7f3f00f36e47c5",
|
||||
"sha256:a295f7cc774947aac58dde7fdc85f4aa00c42adf5d8f5468fc630c1acf30a142"
|
||||
],
|
||||
"markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'",
|
||||
"version": "==1.9.0"
|
||||
},
|
||||
"idna": {
|
||||
"hashes": [
|
||||
"sha256:84d9dd047ffa80596e0f246e2eab0b391788b0503584e8945f2368256d2735ff",
|
||||
"sha256:9d643ff0a55b762d5cdb124b8eaa99c66322e2157b69160bc32796e824360e6d"
|
||||
],
|
||||
"markers": "python_version >= '3'",
|
||||
"version": "==3.3"
|
||||
},
|
||||
"importlib-metadata": {
|
||||
"hashes": [
|
||||
"sha256:b618b6d2d5ffa2f16add5697cf57a46c76a56229b0ed1c438322e4e95645bd15",
|
||||
"sha256:f284b3e11256ad1e5d03ab86bb2ccd6f5339688ff17a4d797a0fe7df326f23b1"
|
||||
],
|
||||
"markers": "python_version < '3.8'",
|
||||
"version": "==4.8.1"
|
||||
},
|
||||
"iniconfig": {
|
||||
"hashes": [
|
||||
"sha256:011e24c64b7f47f6ebd835bb12a743f2fbe9a26d4cecaa7f53bc4f35ee9da8b3",
|
||||
"sha256:bc3af051d7d14b2ee5ef9969666def0cd1a000e121eaea580d4a313df4b37f32"
|
||||
],
|
||||
"version": "==1.1.1"
|
||||
},
|
||||
"jinja2": {
|
||||
"hashes": [
|
||||
"sha256:827a0e32839ab1600d4eb1c4c33ec5a8edfbc5cb42dafa13b81f182f97784b45",
|
||||
"sha256:8569982d3f0889eed11dd620c706d39b60c36d6d25843961f33f77fb6bc6b20c"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==3.0.2"
|
||||
},
|
||||
"markupsafe": {
|
||||
"hashes": [
|
||||
"sha256:01a9b8ea66f1658938f65b93a85ebe8bc016e6769611be228d797c9d998dd298",
|
||||
"sha256:023cb26ec21ece8dc3907c0e8320058b2e0cb3c55cf9564da612bc325bed5e64",
|
||||
"sha256:0446679737af14f45767963a1a9ef7620189912317d095f2d9ffa183a4d25d2b",
|
||||
"sha256:04635854b943835a6ea959e948d19dcd311762c5c0c6e1f0e16ee57022669194",
|
||||
"sha256:0717a7390a68be14b8c793ba258e075c6f4ca819f15edfc2a3a027c823718567",
|
||||
"sha256:0955295dd5eec6cb6cc2fe1698f4c6d84af2e92de33fbcac4111913cd100a6ff",
|
||||
"sha256:0d4b31cc67ab36e3392bbf3862cfbadac3db12bdd8b02a2731f509ed5b829724",
|
||||
"sha256:10f82115e21dc0dfec9ab5c0223652f7197feb168c940f3ef61563fc2d6beb74",
|
||||
"sha256:168cd0a3642de83558a5153c8bd34f175a9a6e7f6dc6384b9655d2697312a646",
|
||||
"sha256:1d609f577dc6e1aa17d746f8bd3c31aa4d258f4070d61b2aa5c4166c1539de35",
|
||||
"sha256:1f2ade76b9903f39aa442b4aadd2177decb66525062db244b35d71d0ee8599b6",
|
||||
"sha256:20dca64a3ef2d6e4d5d615a3fd418ad3bde77a47ec8a23d984a12b5b4c74491a",
|
||||
"sha256:2a7d351cbd8cfeb19ca00de495e224dea7e7d919659c2841bbb7f420ad03e2d6",
|
||||
"sha256:2d7d807855b419fc2ed3e631034685db6079889a1f01d5d9dac950f764da3dad",
|
||||
"sha256:2ef54abee730b502252bcdf31b10dacb0a416229b72c18b19e24a4509f273d26",
|
||||
"sha256:36bc903cbb393720fad60fc28c10de6acf10dc6cc883f3e24ee4012371399a38",
|
||||
"sha256:37205cac2a79194e3750b0af2a5720d95f786a55ce7df90c3af697bfa100eaac",
|
||||
"sha256:3c112550557578c26af18a1ccc9e090bfe03832ae994343cfdacd287db6a6ae7",
|
||||
"sha256:3dd007d54ee88b46be476e293f48c85048603f5f516008bee124ddd891398ed6",
|
||||
"sha256:4296f2b1ce8c86a6aea78613c34bb1a672ea0e3de9c6ba08a960efe0b0a09047",
|
||||
"sha256:47ab1e7b91c098ab893b828deafa1203de86d0bc6ab587b160f78fe6c4011f75",
|
||||
"sha256:49e3ceeabbfb9d66c3aef5af3a60cc43b85c33df25ce03d0031a608b0a8b2e3f",
|
||||
"sha256:4dc8f9fb58f7364b63fd9f85013b780ef83c11857ae79f2feda41e270468dd9b",
|
||||
"sha256:4efca8f86c54b22348a5467704e3fec767b2db12fc39c6d963168ab1d3fc9135",
|
||||
"sha256:53edb4da6925ad13c07b6d26c2a852bd81e364f95301c66e930ab2aef5b5ddd8",
|
||||
"sha256:5855f8438a7d1d458206a2466bf82b0f104a3724bf96a1c781ab731e4201731a",
|
||||
"sha256:594c67807fb16238b30c44bdf74f36c02cdf22d1c8cda91ef8a0ed8dabf5620a",
|
||||
"sha256:5b6d930f030f8ed98e3e6c98ffa0652bdb82601e7a016ec2ab5d7ff23baa78d1",
|
||||
"sha256:5bb28c636d87e840583ee3adeb78172efc47c8b26127267f54a9c0ec251d41a9",
|
||||
"sha256:60bf42e36abfaf9aff1f50f52644b336d4f0a3fd6d8a60ca0d054ac9f713a864",
|
||||
"sha256:611d1ad9a4288cf3e3c16014564df047fe08410e628f89805e475368bd304914",
|
||||
"sha256:6300b8454aa6930a24b9618fbb54b5a68135092bc666f7b06901f897fa5c2fee",
|
||||
"sha256:63f3268ba69ace99cab4e3e3b5840b03340efed0948ab8f78d2fd87ee5442a4f",
|
||||
"sha256:6557b31b5e2c9ddf0de32a691f2312a32f77cd7681d8af66c2692efdbef84c18",
|
||||
"sha256:693ce3f9e70a6cf7d2fb9e6c9d8b204b6b39897a2c4a1aa65728d5ac97dcc1d8",
|
||||
"sha256:6a7fae0dd14cf60ad5ff42baa2e95727c3d81ded453457771d02b7d2b3f9c0c2",
|
||||
"sha256:6c4ca60fa24e85fe25b912b01e62cb969d69a23a5d5867682dd3e80b5b02581d",
|
||||
"sha256:6fcf051089389abe060c9cd7caa212c707e58153afa2c649f00346ce6d260f1b",
|
||||
"sha256:7d91275b0245b1da4d4cfa07e0faedd5b0812efc15b702576d103293e252af1b",
|
||||
"sha256:89c687013cb1cd489a0f0ac24febe8c7a666e6e221b783e53ac50ebf68e45d86",
|
||||
"sha256:8d206346619592c6200148b01a2142798c989edcb9c896f9ac9722a99d4e77e6",
|
||||
"sha256:905fec760bd2fa1388bb5b489ee8ee5f7291d692638ea5f67982d968366bef9f",
|
||||
"sha256:97383d78eb34da7e1fa37dd273c20ad4320929af65d156e35a5e2d89566d9dfb",
|
||||
"sha256:984d76483eb32f1bcb536dc27e4ad56bba4baa70be32fa87152832cdd9db0833",
|
||||
"sha256:99df47edb6bda1249d3e80fdabb1dab8c08ef3975f69aed437cb69d0a5de1e28",
|
||||
"sha256:9f02365d4e99430a12647f09b6cc8bab61a6564363f313126f775eb4f6ef798e",
|
||||
"sha256:a30e67a65b53ea0a5e62fe23682cfe22712e01f453b95233b25502f7c61cb415",
|
||||
"sha256:ab3ef638ace319fa26553db0624c4699e31a28bb2a835c5faca8f8acf6a5a902",
|
||||
"sha256:aca6377c0cb8a8253e493c6b451565ac77e98c2951c45f913e0b52facdcff83f",
|
||||
"sha256:add36cb2dbb8b736611303cd3bfcee00afd96471b09cda130da3581cbdc56a6d",
|
||||
"sha256:b2f4bf27480f5e5e8ce285a8c8fd176c0b03e93dcc6646477d4630e83440c6a9",
|
||||
"sha256:b7f2d075102dc8c794cbde1947378051c4e5180d52d276987b8d28a3bd58c17d",
|
||||
"sha256:baa1a4e8f868845af802979fcdbf0bb11f94f1cb7ced4c4b8a351bb60d108145",
|
||||
"sha256:be98f628055368795d818ebf93da628541e10b75b41c559fdf36d104c5787066",
|
||||
"sha256:bf5d821ffabf0ef3533c39c518f3357b171a1651c1ff6827325e4489b0e46c3c",
|
||||
"sha256:c47adbc92fc1bb2b3274c4b3a43ae0e4573d9fbff4f54cd484555edbf030baf1",
|
||||
"sha256:cdfba22ea2f0029c9261a4bd07e830a8da012291fbe44dc794e488b6c9bb353a",
|
||||
"sha256:d6c7ebd4e944c85e2c3421e612a7057a2f48d478d79e61800d81468a8d842207",
|
||||
"sha256:d7f9850398e85aba693bb640262d3611788b1f29a79f0c93c565694658f4071f",
|
||||
"sha256:d8446c54dc28c01e5a2dbac5a25f071f6653e6e40f3a8818e8b45d790fe6ef53",
|
||||
"sha256:deb993cacb280823246a026e3b2d81c493c53de6acfd5e6bfe31ab3402bb37dd",
|
||||
"sha256:e0f138900af21926a02425cf736db95be9f4af72ba1bb21453432a07f6082134",
|
||||
"sha256:e9936f0b261d4df76ad22f8fee3ae83b60d7c3e871292cd42f40b81b70afae85",
|
||||
"sha256:f0567c4dc99f264f49fe27da5f735f414c4e7e7dd850cfd8e69f0862d7c74ea9",
|
||||
"sha256:f5653a225f31e113b152e56f154ccbe59eeb1c7487b39b9d9f9cdb58e6c79dc5",
|
||||
"sha256:f826e31d18b516f653fe296d967d700fddad5901ae07c622bb3705955e1faa94",
|
||||
"sha256:f8ba0e8349a38d3001fae7eadded3f6606f0da5d748ee53cc1dab1d6527b9509",
|
||||
"sha256:f9081981fe268bd86831e5c75f7de206ef275defcb82bc70740ae6dc507aee51",
|
||||
"sha256:fa130dd50c57d53368c9d59395cb5526eda596d3ffe36666cd81a44d56e48872"
|
||||
],
|
||||
"markers": "python_version >= '3.6'",
|
||||
"version": "==2.0.1"
|
||||
},
|
||||
"packaging": {
|
||||
"hashes": [
|
||||
"sha256:096d689d78ca690e4cd8a89568ba06d07ca097e3306a4381635073ca91479966",
|
||||
"sha256:14317396d1e8cdb122989b916fa2c7e9ca8e2be9e8060a6eff75b6b7b4d8a7e0"
|
||||
],
|
||||
"markers": "python_version >= '3.6'",
|
||||
"version": "==21.2"
|
||||
},
|
||||
"pluggy": {
|
||||
"hashes": [
|
||||
"sha256:4224373bacce55f955a878bf9cfa763c1e360858e330072059e10bad68531159",
|
||||
"sha256:74134bbf457f031a36d68416e1509f34bd5ccc019f0bcc952c7b909d06b37bd3"
|
||||
],
|
||||
"markers": "python_version >= '3.6'",
|
||||
"version": "==1.0.0"
|
||||
},
|
||||
"psycopg2-binary": {
|
||||
"hashes": [
|
||||
"sha256:0b7dae87f0b729922e06f85f667de7bf16455d411971b2043bbd9577af9d1975",
|
||||
"sha256:0f2e04bd2a2ab54fa44ee67fe2d002bb90cee1c0f1cc0ebc3148af7b02034cbd",
|
||||
"sha256:123c3fb684e9abfc47218d3784c7b4c47c8587951ea4dd5bc38b6636ac57f616",
|
||||
"sha256:1473c0215b0613dd938db54a653f68251a45a78b05f6fc21af4326f40e8360a2",
|
||||
"sha256:14db1752acdd2187d99cb2ca0a1a6dfe57fc65c3281e0f20e597aac8d2a5bd90",
|
||||
"sha256:1e3a362790edc0a365385b1ac4cc0acc429a0c0d662d829a50b6ce743ae61b5a",
|
||||
"sha256:1e85b74cbbb3056e3656f1cc4781294df03383127a8114cbc6531e8b8367bf1e",
|
||||
"sha256:20f1ab44d8c352074e2d7ca67dc00843067788791be373e67a0911998787ce7d",
|
||||
"sha256:24b0b6688b9f31a911f2361fe818492650795c9e5d3a1bc647acbd7440142a4f",
|
||||
"sha256:2f62c207d1740b0bde5c4e949f857b044818f734a3d57f1d0d0edc65050532ed",
|
||||
"sha256:3242b9619de955ab44581a03a64bdd7d5e470cc4183e8fcadd85ab9d3756ce7a",
|
||||
"sha256:35c4310f8febe41f442d3c65066ca93cccefd75013df3d8c736c5b93ec288140",
|
||||
"sha256:4235f9d5ddcab0b8dbd723dca56ea2922b485ea00e1dafacf33b0c7e840b3d32",
|
||||
"sha256:542875f62bc56e91c6eac05a0deadeae20e1730be4c6334d8f04c944fcd99759",
|
||||
"sha256:5ced67f1e34e1a450cdb48eb53ca73b60aa0af21c46b9b35ac3e581cf9f00e31",
|
||||
"sha256:661509f51531ec125e52357a489ea3806640d0ca37d9dada461ffc69ee1e7b6e",
|
||||
"sha256:7360647ea04db2e7dff1648d1da825c8cf68dc5fbd80b8fb5b3ee9f068dcd21a",
|
||||
"sha256:736b8797b58febabb85494142c627bd182b50d2a7ec65322983e71065ad3034c",
|
||||
"sha256:8c13d72ed6af7fd2c8acbd95661cf9477f94e381fce0792c04981a8283b52917",
|
||||
"sha256:988b47ac70d204aed01589ed342303da7c4d84b56c2f4c4b8b00deda123372bf",
|
||||
"sha256:995fc41ebda5a7a663a254a1dcac52638c3e847f48307b5416ee373da15075d7",
|
||||
"sha256:a36c7eb6152ba5467fb264d73844877be8b0847874d4822b7cf2d3c0cb8cdcb0",
|
||||
"sha256:aed4a9a7e3221b3e252c39d0bf794c438dc5453bc2963e8befe9d4cd324dff72",
|
||||
"sha256:aef9aee84ec78af51107181d02fe8773b100b01c5dfde351184ad9223eab3698",
|
||||
"sha256:b0221ca5a9837e040ebf61f48899926b5783668b7807419e4adae8175a31f773",
|
||||
"sha256:b4d7679a08fea64573c969f6994a2631908bb2c0e69a7235648642f3d2e39a68",
|
||||
"sha256:c250a7ec489b652c892e4f0a5d122cc14c3780f9f643e1a326754aedf82d9a76",
|
||||
"sha256:ca86db5b561b894f9e5f115d6a159fff2a2570a652e07889d8a383b5fae66eb4",
|
||||
"sha256:cfc523edecddaef56f6740d7de1ce24a2fdf94fd5e704091856a201872e37f9f",
|
||||
"sha256:d92272c7c16e105788efe2cfa5d680f07e34e0c29b03c1908f8636f55d5f915a",
|
||||
"sha256:da113b70f6ec40e7d81b43d1b139b9db6a05727ab8be1ee559f3a69854a69d34",
|
||||
"sha256:f6fac64a38f6768e7bc7b035b9e10d8a538a9fadce06b983fb3e6fa55ac5f5ce",
|
||||
"sha256:f8559617b1fcf59a9aedba2c9838b5b6aa211ffedecabca412b92a1ff75aac1a",
|
||||
"sha256:fbb42a541b1093385a2d8c7eec94d26d30437d0e77c1d25dae1dcc46741a385e"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==2.9.1"
|
||||
},
|
||||
"py": {
|
||||
"hashes": [
|
||||
"sha256:21b81bda15b66ef5e1a777a21c4dcd9c20ad3efd0b3f817e7a809035269e1bd3",
|
||||
"sha256:3b80836aa6d1feeaa108e046da6423ab8f6ceda6468545ae8d02d9d58d18818a"
|
||||
],
|
||||
"markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'",
|
||||
"version": "==1.10.0"
|
||||
},
|
||||
"pycparser": {
|
||||
"hashes": [
|
||||
"sha256:2d475327684562c3a96cc71adf7dc8c4f0565175cf86b6d7a404ff4c771f15f0",
|
||||
"sha256:7582ad22678f0fcd81102833f60ef8d0e57288b6b5fb00323d101be910e35705"
|
||||
],
|
||||
"markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'",
|
||||
"version": "==2.20"
|
||||
},
|
||||
"pyjwt": {
|
||||
"extras": [
|
||||
"crypto"
|
||||
],
|
||||
"hashes": [
|
||||
"sha256:b888b4d56f06f6dcd777210c334e69c737be74755d3e5e9ee3fe67dc18a0ee41",
|
||||
"sha256:e0c4bb8d9f0af0c7f5b1ec4c5036309617d03d56932877f2f7a0beeb5318322f"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==2.3.0"
|
||||
},
|
||||
"pyparsing": {
|
||||
"hashes": [
|
||||
"sha256:c203ec8783bf771a155b207279b9bccb8dea02d8f0c9e5f8ead507bc3246ecc1",
|
||||
"sha256:ef9d7589ef3c200abe66653d3f1ab1033c3c419ae9b9bdb1240a85b024efc88b"
|
||||
],
|
||||
"markers": "python_version >= '2.6' and python_version not in '3.0, 3.1, 3.2, 3.3'",
|
||||
"version": "==2.4.7"
|
||||
},
|
||||
"pytest": {
|
||||
"hashes": [
|
||||
"sha256:131b36680866a76e6781d13f101efb86cf674ebb9762eb70d3082b6f29889e89",
|
||||
"sha256:7310f8d27bc79ced999e760ca304d69f6ba6c6649c0b60fb0e04a4a77cacc134"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==6.2.5"
|
||||
},
|
||||
"pytest-forked": {
|
||||
"hashes": [
|
||||
"sha256:6aa9ac7e00ad1a539c41bec6d21011332de671e938c7637378ec9710204e37ca",
|
||||
"sha256:dc4147784048e70ef5d437951728825a131b81714b398d5d52f17c7c144d8815"
|
||||
],
|
||||
"markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'",
|
||||
"version": "==1.3.0"
|
||||
},
|
||||
"pytest-xdist": {
|
||||
"hashes": [
|
||||
"sha256:7b61ebb46997a0820a263553179d6d1e25a8c50d8a8620cd1aa1e20e3be99168",
|
||||
"sha256:89b330316f7fc475f999c81b577c2b926c9569f3d397ae432c0c2e2496d61ff9"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==2.4.0"
|
||||
},
|
||||
"requests": {
|
||||
"hashes": [
|
||||
"sha256:6c1246513ecd5ecd4528a0906f910e8f0f9c6b8ec72030dc9fd154dc1a6efd24",
|
||||
"sha256:b8aa58f8cf793ffd8782d3d8cb19e66ef36f7aba4353eec859e74678b01b07a7"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==2.26.0"
|
||||
},
|
||||
"toml": {
|
||||
"hashes": [
|
||||
"sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b",
|
||||
"sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f"
|
||||
],
|
||||
"markers": "python_version >= '2.6' and python_version not in '3.0, 3.1, 3.2, 3.3'",
|
||||
"version": "==0.10.2"
|
||||
},
|
||||
"typing-extensions": {
|
||||
"hashes": [
|
||||
"sha256:49f75d16ff11f1cd258e1b988ccff82a3ca5570217d7ad8c5f48205dd99a677e",
|
||||
"sha256:d8226d10bc02a29bcc81df19a26e56a9647f8b0a6d4a83924139f4a8b01f17b7",
|
||||
"sha256:f1d25edafde516b146ecd0613dabcc61409817af4766fbbcfb8d1ad4ec441a34"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==3.10.0.2"
|
||||
},
|
||||
"urllib3": {
|
||||
"hashes": [
|
||||
"sha256:4987c65554f7a2dbf30c18fd48778ef124af6fab771a377103da0585e2336ece",
|
||||
"sha256:c4fdf4019605b6e5423637e01bc9fe4daef873709a7973e195ceba0a62bbc844"
|
||||
],
|
||||
"markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4' and python_version < '4'",
|
||||
"version": "==1.26.7"
|
||||
},
|
||||
"zipp": {
|
||||
"hashes": [
|
||||
"sha256:71c644c5369f4a6e07636f0aa966270449561fcea2e3d6747b8d23efaa9d7832",
|
||||
"sha256:9fe5ea21568a0a70e50f273397638d39b03353731e6cbbb3fd8502a33fec40bc"
|
||||
],
|
||||
"markers": "python_version >= '3.6'",
|
||||
"version": "==3.6.0"
|
||||
}
|
||||
},
|
||||
"develop": {
|
||||
"backports.entry-points-selectable": {
|
||||
"hashes": [
|
||||
"sha256:988468260ec1c196dab6ae1149260e2f5472c9110334e5d51adcb77867361f6a",
|
||||
"sha256:a6d9a871cde5e15b4c4a53e3d43ba890cc6861ec1332c9c2428c92f977192acc"
|
||||
],
|
||||
"markers": "python_version >= '2.7'",
|
||||
"version": "==1.1.0"
|
||||
},
|
||||
"certifi": {
|
||||
"hashes": [
|
||||
"sha256:78884e7c1d4b00ce3cea67b44566851c4343c120abd683433ce934a68ea58872",
|
||||
"sha256:d62a0163eb4c2344ac042ab2bdf75399a71a2d8c7d47eac2e2ee91b9d6339569"
|
||||
],
|
||||
"version": "==2021.10.8"
|
||||
},
|
||||
"distlib": {
|
||||
"hashes": [
|
||||
"sha256:c8b54e8454e5bf6237cc84c20e8264c3e991e824ef27e8f1e81049867d861e31",
|
||||
"sha256:d982d0751ff6eaaab5e2ec8e691d949ee80eddf01a62eaa96ddb11531fe16b05"
|
||||
],
|
||||
"version": "==0.3.3"
|
||||
},
|
||||
"filelock": {
|
||||
"hashes": [
|
||||
"sha256:7afc856f74fa7006a289fd10fa840e1eebd8bbff6bffb69c26c54a0512ea8cf8",
|
||||
"sha256:bb2a1c717df74c48a2d00ed625e5a66f8572a3a30baacb7657add1d7bac4097b"
|
||||
],
|
||||
"markers": "python_version >= '3.6'",
|
||||
"version": "==3.3.2"
|
||||
},
|
||||
"flake8": {
|
||||
"hashes": [
|
||||
"sha256:479b1304f72536a55948cb40a32dce8bb0ffe3501e26eaf292c7e60eb5e0428d",
|
||||
"sha256:806e034dda44114815e23c16ef92f95c91e4c71100ff52813adf7132a6ad870d"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==4.0.1"
|
||||
},
|
||||
"importlib-metadata": {
|
||||
"hashes": [
|
||||
"sha256:b618b6d2d5ffa2f16add5697cf57a46c76a56229b0ed1c438322e4e95645bd15",
|
||||
"sha256:f284b3e11256ad1e5d03ab86bb2ccd6f5339688ff17a4d797a0fe7df326f23b1"
|
||||
],
|
||||
"markers": "python_version < '3.8'",
|
||||
"version": "==4.8.1"
|
||||
},
|
||||
"mccabe": {
|
||||
"hashes": [
|
||||
"sha256:ab8a6258860da4b6677da4bd2fe5dc2c659cff31b3ee4f7f5d64e79735b80d42",
|
||||
"sha256:dd8d182285a0fe56bace7f45b5e7d1a6ebcbf524e8f3bd87eb0f125271b8831f"
|
||||
],
|
||||
"version": "==0.6.1"
|
||||
},
|
||||
"mypy": {
|
||||
"hashes": [
|
||||
"sha256:088cd9c7904b4ad80bec811053272986611b84221835e079be5bcad029e79dd9",
|
||||
"sha256:0aadfb2d3935988ec3815952e44058a3100499f5be5b28c34ac9d79f002a4a9a",
|
||||
"sha256:119bed3832d961f3a880787bf621634ba042cb8dc850a7429f643508eeac97b9",
|
||||
"sha256:1a85e280d4d217150ce8cb1a6dddffd14e753a4e0c3cf90baabb32cefa41b59e",
|
||||
"sha256:3c4b8ca36877fc75339253721f69603a9c7fdb5d4d5a95a1a1b899d8b86a4de2",
|
||||
"sha256:3e382b29f8e0ccf19a2df2b29a167591245df90c0b5a2542249873b5c1d78212",
|
||||
"sha256:42c266ced41b65ed40a282c575705325fa7991af370036d3f134518336636f5b",
|
||||
"sha256:53fd2eb27a8ee2892614370896956af2ff61254c275aaee4c230ae771cadd885",
|
||||
"sha256:704098302473cb31a218f1775a873b376b30b4c18229421e9e9dc8916fd16150",
|
||||
"sha256:7df1ead20c81371ccd6091fa3e2878559b5c4d4caadaf1a484cf88d93ca06703",
|
||||
"sha256:866c41f28cee548475f146aa4d39a51cf3b6a84246969f3759cb3e9c742fc072",
|
||||
"sha256:a155d80ea6cee511a3694b108c4494a39f42de11ee4e61e72bc424c490e46457",
|
||||
"sha256:adaeee09bfde366d2c13fe6093a7df5df83c9a2ba98638c7d76b010694db760e",
|
||||
"sha256:b6fb13123aeef4a3abbcfd7e71773ff3ff1526a7d3dc538f3929a49b42be03f0",
|
||||
"sha256:b94e4b785e304a04ea0828759172a15add27088520dc7e49ceade7834275bedb",
|
||||
"sha256:c0df2d30ed496a08de5daed2a9ea807d07c21ae0ab23acf541ab88c24b26ab97",
|
||||
"sha256:c6c2602dffb74867498f86e6129fd52a2770c48b7cd3ece77ada4fa38f94eba8",
|
||||
"sha256:ceb6e0a6e27fb364fb3853389607cf7eb3a126ad335790fa1e14ed02fba50811",
|
||||
"sha256:d9dd839eb0dc1bbe866a288ba3c1afc33a202015d2ad83b31e875b5905a079b6",
|
||||
"sha256:e4dab234478e3bd3ce83bac4193b2ecd9cf94e720ddd95ce69840273bf44f6de",
|
||||
"sha256:ec4e0cd079db280b6bdabdc807047ff3e199f334050db5cbb91ba3e959a67504",
|
||||
"sha256:ecd2c3fe726758037234c93df7e98deb257fd15c24c9180dacf1ef829da5f921",
|
||||
"sha256:ef565033fa5a958e62796867b1df10c40263ea9ded87164d67572834e57a174d"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==0.910"
|
||||
},
|
||||
"mypy-extensions": {
|
||||
"hashes": [
|
||||
"sha256:090fedd75945a69ae91ce1303b5824f428daf5a028d2f6ab8a299250a846f15d",
|
||||
"sha256:2d82818f5bb3e369420cb3c4060a7970edba416647068eb4c5343488a6c604a8"
|
||||
],
|
||||
"version": "==0.4.3"
|
||||
},
|
||||
"pipenv": {
|
||||
"hashes": [
|
||||
"sha256:05958fadcd70b2de6a27542fcd2bd72dd5c59c6d35307fdac3e06361fb06e30e",
|
||||
"sha256:d180f5be4775c552fd5e69ae18a9d6099d9dafb462efe54f11c72cb5f4d5e977"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==2021.5.29"
|
||||
},
|
||||
"platformdirs": {
|
||||
"hashes": [
|
||||
"sha256:367a5e80b3d04d2428ffa76d33f124cf11e8fff2acdaa9b43d545f5c7d661ef2",
|
||||
"sha256:8868bbe3c3c80d42f20156f22e7131d2fb321f5bc86a2a345375c6481a67021d"
|
||||
],
|
||||
"markers": "python_version >= '3.6'",
|
||||
"version": "==2.4.0"
|
||||
},
|
||||
"pycodestyle": {
|
||||
"hashes": [
|
||||
"sha256:720f8b39dde8b293825e7ff02c475f3077124006db4f440dcbc9a20b76548a20",
|
||||
"sha256:eddd5847ef438ea1c7870ca7eb78a9d47ce0cdb4851a5523949f2601d0cbbe7f"
|
||||
],
|
||||
"markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'",
|
||||
"version": "==2.8.0"
|
||||
},
|
||||
"pyflakes": {
|
||||
"hashes": [
|
||||
"sha256:05a85c2872edf37a4ed30b0cce2f6093e1d0581f8c19d7393122da7e25b2b24c",
|
||||
"sha256:3bb3a3f256f4b7968c9c788781e4ff07dce46bdf12339dcda61053375426ee2e"
|
||||
],
|
||||
"markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'",
|
||||
"version": "==2.4.0"
|
||||
},
|
||||
"six": {
|
||||
"hashes": [
|
||||
"sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926",
|
||||
"sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"
|
||||
],
|
||||
"markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'",
|
||||
"version": "==1.16.0"
|
||||
},
|
||||
"toml": {
|
||||
"hashes": [
|
||||
"sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b",
|
||||
"sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f"
|
||||
],
|
||||
"markers": "python_version >= '2.6' and python_version not in '3.0, 3.1, 3.2, 3.3'",
|
||||
"version": "==0.10.2"
|
||||
},
|
||||
"typed-ast": {
|
||||
"hashes": [
|
||||
"sha256:01ae5f73431d21eead5015997ab41afa53aa1fbe252f9da060be5dad2c730ace",
|
||||
"sha256:067a74454df670dcaa4e59349a2e5c81e567d8d65458d480a5b3dfecec08c5ff",
|
||||
"sha256:0fb71b8c643187d7492c1f8352f2c15b4c4af3f6338f21681d3681b3dc31a266",
|
||||
"sha256:1b3ead4a96c9101bef08f9f7d1217c096f31667617b58de957f690c92378b528",
|
||||
"sha256:2068531575a125b87a41802130fa7e29f26c09a2833fea68d9a40cf33902eba6",
|
||||
"sha256:209596a4ec71d990d71d5e0d312ac935d86930e6eecff6ccc7007fe54d703808",
|
||||
"sha256:2c726c276d09fc5c414693a2de063f521052d9ea7c240ce553316f70656c84d4",
|
||||
"sha256:398e44cd480f4d2b7ee8d98385ca104e35c81525dd98c519acff1b79bdaac363",
|
||||
"sha256:52b1eb8c83f178ab787f3a4283f68258525f8d70f778a2f6dd54d3b5e5fb4341",
|
||||
"sha256:5feca99c17af94057417d744607b82dd0a664fd5e4ca98061480fd8b14b18d04",
|
||||
"sha256:7538e495704e2ccda9b234b82423a4038f324f3a10c43bc088a1636180f11a41",
|
||||
"sha256:760ad187b1041a154f0e4d0f6aae3e40fdb51d6de16e5c99aedadd9246450e9e",
|
||||
"sha256:777a26c84bea6cd934422ac2e3b78863a37017618b6e5c08f92ef69853e765d3",
|
||||
"sha256:95431a26309a21874005845c21118c83991c63ea800dd44843e42a916aec5899",
|
||||
"sha256:9ad2c92ec681e02baf81fdfa056fe0d818645efa9af1f1cd5fd6f1bd2bdfd805",
|
||||
"sha256:9c6d1a54552b5330bc657b7ef0eae25d00ba7ffe85d9ea8ae6540d2197a3788c",
|
||||
"sha256:aee0c1256be6c07bd3e1263ff920c325b59849dc95392a05f258bb9b259cf39c",
|
||||
"sha256:af3d4a73793725138d6b334d9d247ce7e5f084d96284ed23f22ee626a7b88e39",
|
||||
"sha256:b36b4f3920103a25e1d5d024d155c504080959582b928e91cb608a65c3a49e1a",
|
||||
"sha256:b9574c6f03f685070d859e75c7f9eeca02d6933273b5e69572e5ff9d5e3931c3",
|
||||
"sha256:bff6ad71c81b3bba8fa35f0f1921fb24ff4476235a6e94a26ada2e54370e6da7",
|
||||
"sha256:c190f0899e9f9f8b6b7863debfb739abcb21a5c054f911ca3596d12b8a4c4c7f",
|
||||
"sha256:c907f561b1e83e93fad565bac5ba9c22d96a54e7ea0267c708bffe863cbe4075",
|
||||
"sha256:cae53c389825d3b46fb37538441f75d6aecc4174f615d048321b716df2757fb0",
|
||||
"sha256:dd4a21253f42b8d2b48410cb31fe501d32f8b9fbeb1f55063ad102fe9c425e40",
|
||||
"sha256:dde816ca9dac1d9c01dd504ea5967821606f02e510438120091b84e852367428",
|
||||
"sha256:f2362f3cb0f3172c42938946dbc5b7843c2a28aec307c49100c8b38764eb6927",
|
||||
"sha256:f328adcfebed9f11301eaedfa48e15bdece9b519fb27e6a8c01aa52a17ec31b3",
|
||||
"sha256:f8afcf15cc511ada719a88e013cec87c11aff7b91f019295eb4530f96fe5ef2f",
|
||||
"sha256:fb1bbeac803adea29cedd70781399c99138358c26d05fcbd23c13016b7f5ec65"
|
||||
],
|
||||
"markers": "python_version < '3.8'",
|
||||
"version": "==1.4.3"
|
||||
},
|
||||
"types-psycopg2": {
|
||||
"hashes": [
|
||||
"sha256:77ed80f2668582654623e04fb3d741ecce93effcc39c929d7e02f4a917a538ce",
|
||||
"sha256:98a6e0e9580cd7eb4bd4d20f7c7063d154b2589a2b90c0ce4e3ca6085cde77c6"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==2.9.1"
|
||||
},
|
||||
"types-requests": {
|
||||
"hashes": [
|
||||
"sha256:b279284e51f668e38ee12d9665e4d789089f532dc2a0be4a1508ca0efd98ba9e",
|
||||
"sha256:ba1d108d512e294b6080c37f6ae7cb2a2abf527560e2b671d1786c1fc46b541a"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==2.25.11"
|
||||
},
|
||||
"typing-extensions": {
|
||||
"hashes": [
|
||||
"sha256:49f75d16ff11f1cd258e1b988ccff82a3ca5570217d7ad8c5f48205dd99a677e",
|
||||
"sha256:d8226d10bc02a29bcc81df19a26e56a9647f8b0a6d4a83924139f4a8b01f17b7",
|
||||
"sha256:f1d25edafde516b146ecd0613dabcc61409817af4766fbbcfb8d1ad4ec441a34"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==3.10.0.2"
|
||||
},
|
||||
"virtualenv": {
|
||||
"hashes": [
|
||||
"sha256:4b02e52a624336eece99c96e3ab7111f469c24ba226a53ec474e8e787b365814",
|
||||
"sha256:576d05b46eace16a9c348085f7d0dc8ef28713a2cabaa1cf0aea41e8f12c9218"
|
||||
],
|
||||
"markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'",
|
||||
"version": "==20.10.0"
|
||||
},
|
||||
"virtualenv-clone": {
|
||||
"hashes": [
|
||||
"sha256:418ee935c36152f8f153c79824bb93eaf6f0f7984bae31d3f48f350b9183501a",
|
||||
"sha256:44d5263bceed0bac3e1424d64f798095233b64def1c5689afa43dc3223caf5b0"
|
||||
],
|
||||
"markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'",
|
||||
"version": "==0.5.7"
|
||||
},
|
||||
"yapf": {
|
||||
"hashes": [
|
||||
"sha256:408fb9a2b254c302f49db83c59f9aa0b4b0fd0ec25be3a5c51181327922ff63d",
|
||||
"sha256:e3a234ba8455fe201eaa649cdac872d590089a18b661e39bbac7020978dd9c2e"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==0.31.0"
|
||||
},
|
||||
"zipp": {
|
||||
"hashes": [
|
||||
"sha256:71c644c5369f4a6e07636f0aa966270449561fcea2e3d6747b8d23efaa9d7832",
|
||||
"sha256:9fe5ea21568a0a70e50f273397638d39b03353731e6cbbb3fd8502a33fec40bc"
|
||||
],
|
||||
"markers": "python_version >= '3.6'",
|
||||
"version": "==3.6.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
99
README.md
99
README.md
@@ -1,19 +1,22 @@
|
||||
# Zenith
|
||||
# Neon
|
||||
|
||||
Zenith is a serverless open source alternative to AWS Aurora Postgres. It separates storage and compute and substitutes PostgreSQL storage layer by redistributing data across a cluster of nodes.
|
||||
Neon is a serverless open source alternative to AWS Aurora Postgres. It separates storage and compute and substitutes PostgreSQL storage layer by redistributing data across a cluster of nodes.
|
||||
|
||||
The project used to be called "Zenith". Many of the commands and code comments
|
||||
still refer to "zenith", but we are in the process of renaming things.
|
||||
|
||||
## Architecture overview
|
||||
|
||||
A Zenith installation consists of compute nodes and Zenith storage engine.
|
||||
A Neon installation consists of compute nodes and Neon storage engine.
|
||||
|
||||
Compute nodes are stateless PostgreSQL nodes, backed by Zenith storage engine.
|
||||
Compute nodes are stateless PostgreSQL nodes, backed by Neon storage engine.
|
||||
|
||||
Zenith storage engine consists of two major components:
|
||||
Neon storage engine consists of two major components:
|
||||
- Pageserver. Scalable storage backend for compute nodes.
|
||||
- WAL service. The service that receives WAL from compute node and ensures that it is stored durably.
|
||||
|
||||
Pageserver consists of:
|
||||
- Repository - Zenith storage implementation.
|
||||
- Repository - Neon storage implementation.
|
||||
- WAL receiver - service that receives WAL from WAL service and stores it in the repository.
|
||||
- Page service - service that communicates with compute nodes and responds with pages from the repository.
|
||||
- WAL redo - service that builds pages from base images and WAL records on Page service request.
|
||||
@@ -28,17 +31,17 @@ apt install build-essential libtool libreadline-dev zlib1g-dev flex bison libsec
|
||||
libssl-dev clang pkg-config libpq-dev
|
||||
```
|
||||
|
||||
[Rust] 1.55 or later is also required.
|
||||
[Rust] 1.58 or later is also required.
|
||||
|
||||
To run the `psql` client, install the `postgresql-client` package or modify `PATH` and `LD_LIBRARY_PATH` to include `tmp_install/bin` and `tmp_install/lib`, respectively.
|
||||
|
||||
To run the integration tests or Python scripts (not required to use the code), install
|
||||
Python (3.7 or higher), and install python3 packages using `pipenv install` in the project directory.
|
||||
Python (3.7 or higher), and install python3 packages using `./scripts/pysync` (requires poetry) in the project directory.
|
||||
|
||||
2. Build zenith and patched postgres
|
||||
2. Build neon and patched postgres
|
||||
```sh
|
||||
git clone --recursive https://github.com/zenithdb/zenith.git
|
||||
cd zenith
|
||||
git clone --recursive https://github.com/neondatabase/neon.git
|
||||
cd neon
|
||||
make -j5
|
||||
```
|
||||
|
||||
@@ -46,32 +49,30 @@ make -j5
|
||||
```sh
|
||||
# Create repository in .zenith with proper paths to binaries and data
|
||||
# Later that would be responsibility of a package install script
|
||||
> ./target/debug/zenith init
|
||||
initializing tenantid c03ba6b7ad4c5e9cf556f059ade44229
|
||||
created initial timeline 5b014a9e41b4b63ce1a1febc04503636 timeline.lsn 0/169C3C8
|
||||
created main branch
|
||||
> ./target/debug/neon_local init
|
||||
initializing tenantid 9ef87a5bf0d92544f6fafeeb3239695c
|
||||
created initial timeline de200bd42b49cc1814412c7e592dd6e9 timeline.lsn 0/16B5A50
|
||||
initial timeline de200bd42b49cc1814412c7e592dd6e9 created
|
||||
pageserver init succeeded
|
||||
|
||||
# start pageserver and safekeeper
|
||||
> ./target/debug/zenith start
|
||||
Starting pageserver at 'localhost:64000' in '.zenith'
|
||||
> ./target/debug/neon_local start
|
||||
Starting pageserver at '127.0.0.1:64000' in '.zenith'
|
||||
Pageserver started
|
||||
initializing for single for 7676
|
||||
Starting safekeeper at 'localhost:5454' in '.zenith/safekeepers/single'
|
||||
initializing for sk 1 for 7676
|
||||
Starting safekeeper at '127.0.0.1:5454' in '.zenith/safekeepers/sk1'
|
||||
Safekeeper started
|
||||
|
||||
# start postgres compute node
|
||||
> ./target/debug/zenith pg start main
|
||||
Starting new postgres main on main...
|
||||
Extracting base backup to create postgres instance: path=.zenith/pgdatadirs/tenants/c03ba6b7ad4c5e9cf556f059ade44229/main port=55432
|
||||
> ./target/debug/neon_local pg start main
|
||||
Starting new postgres main on timeline de200bd42b49cc1814412c7e592dd6e9 ...
|
||||
Extracting base backup to create postgres instance: path=.zenith/pgdatadirs/tenants/9ef87a5bf0d92544f6fafeeb3239695c/main port=55432
|
||||
Starting postgres node at 'host=127.0.0.1 port=55432 user=zenith_admin dbname=postgres'
|
||||
waiting for server to start.... done
|
||||
server started
|
||||
|
||||
# check list of running postgres instances
|
||||
> ./target/debug/zenith pg list
|
||||
BRANCH ADDRESS LSN STATUS
|
||||
main 127.0.0.1:55432 0/1609610 running
|
||||
> ./target/debug/neon_local pg list
|
||||
NODE ADDRESS TIMELINE BRANCH NAME LSN STATUS
|
||||
main 127.0.0.1:55432 de200bd42b49cc1814412c7e592dd6e9 main 0/16B5BA8 running
|
||||
```
|
||||
|
||||
4. Now it is possible to connect to postgres and run some queries:
|
||||
@@ -91,18 +92,25 @@ postgres=# select * from t;
|
||||
5. And create branches and run postgres on them:
|
||||
```sh
|
||||
# create branch named migration_check
|
||||
> ./target/debug/zenith branch migration_check main
|
||||
Created branch 'migration_check' at 0/1609610
|
||||
> ./target/debug/neon_local timeline branch --branch-name migration_check
|
||||
Created timeline 'b3b863fa45fa9e57e615f9f2d944e601' at Lsn 0/16F9A00 for tenant: 9ef87a5bf0d92544f6fafeeb3239695c. Ancestor timeline: 'main'
|
||||
|
||||
# check branches tree
|
||||
> ./target/debug/zenith branch
|
||||
main
|
||||
┗━ @0/1609610: migration_check
|
||||
> ./target/debug/neon_local timeline list
|
||||
(L) main [de200bd42b49cc1814412c7e592dd6e9]
|
||||
(L) ┗━ @0/16F9A00: migration_check [b3b863fa45fa9e57e615f9f2d944e601]
|
||||
|
||||
# start postgres on that branch
|
||||
> ./target/debug/zenith pg start migration_check
|
||||
Starting postgres node at 'host=127.0.0.1 port=55433 user=stas'
|
||||
waiting for server to start.... done
|
||||
> ./target/debug/neon_local pg start migration_check --branch-name migration_check
|
||||
Starting new postgres migration_check on timeline b3b863fa45fa9e57e615f9f2d944e601 ...
|
||||
Extracting base backup to create postgres instance: path=.zenith/pgdatadirs/tenants/9ef87a5bf0d92544f6fafeeb3239695c/migration_check port=55433
|
||||
Starting postgres node at 'host=127.0.0.1 port=55433 user=zenith_admin dbname=postgres'
|
||||
|
||||
# check the new list of running postgres instances
|
||||
> ./target/debug/neon_local pg list
|
||||
NODE ADDRESS TIMELINE BRANCH NAME LSN STATUS
|
||||
main 127.0.0.1:55432 de200bd42b49cc1814412c7e592dd6e9 main 0/16F9A38 running
|
||||
migration_check 127.0.0.1:55433 b3b863fa45fa9e57e615f9f2d944e601 migration_check 0/16F9A70 running
|
||||
|
||||
# this new postgres instance will have all the data from 'main' postgres,
|
||||
# but all modifications would not affect data in original postgres
|
||||
@@ -115,21 +123,28 @@ postgres=# select * from t;
|
||||
|
||||
postgres=# insert into t values(2,2);
|
||||
INSERT 0 1
|
||||
|
||||
# check that the new change doesn't affect the 'main' postgres
|
||||
> psql -p55432 -h 127.0.0.1 -U zenith_admin postgres
|
||||
postgres=# select * from t;
|
||||
key | value
|
||||
-----+-------
|
||||
1 | 1
|
||||
(1 row)
|
||||
```
|
||||
|
||||
6. If you want to run tests afterwards (see below), you have to stop all the running the pageserver, safekeeper and postgres instances
|
||||
you have just started. You can stop them all with one command:
|
||||
```sh
|
||||
> ./target/debug/zenith stop
|
||||
> ./target/debug/neon_local stop
|
||||
```
|
||||
|
||||
## Running tests
|
||||
|
||||
```sh
|
||||
git clone --recursive https://github.com/zenithdb/zenith.git
|
||||
git clone --recursive https://github.com/neondatabase/neon.git
|
||||
make # builds also postgres and installs it to ./tmp_install
|
||||
cd test_runner
|
||||
pipenv run pytest
|
||||
./scripts/pytest
|
||||
```
|
||||
|
||||
## Documentation
|
||||
@@ -142,14 +157,14 @@ To view your `rustdoc` documentation in a browser, try running `cargo doc --no-d
|
||||
|
||||
### Postgres-specific terms
|
||||
|
||||
Due to Zenith's very close relation with PostgreSQL internals, there are numerous specific terms used.
|
||||
Due to Neon's very close relation with PostgreSQL internals, there are numerous specific terms used.
|
||||
Same applies to certain spelling: i.e. we use MB to denote 1024 * 1024 bytes, while MiB would be technically more correct, it's inconsistent with what PostgreSQL code and its documentation use.
|
||||
|
||||
To get more familiar with this aspect, refer to:
|
||||
|
||||
- [Zenith glossary](/docs/glossary.md)
|
||||
- [Neon glossary](/docs/glossary.md)
|
||||
- [PostgreSQL glossary](https://www.postgresql.org/docs/13/glossary.html)
|
||||
- Other PostgreSQL documentation and sources (Zenith fork sources can be found [here](https://github.com/zenithdb/postgres))
|
||||
- Other PostgreSQL documentation and sources (Neon fork sources can be found [here](https://github.com/neondatabase/postgres))
|
||||
|
||||
## Join the development
|
||||
|
||||
|
||||
1161
compute_tools/Cargo.lock
generated
1161
compute_tools/Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -1,28 +1,21 @@
|
||||
[package]
|
||||
name = "compute_tools"
|
||||
version = "0.1.0"
|
||||
authors = ["Alexey Kondratov <kondratov.aleksey@gmail.com>"]
|
||||
edition = "2018"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[workspace]
|
||||
# TODO: make it a part of global zenith worksapce
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
libc = "0.2"
|
||||
anyhow = "1.0"
|
||||
chrono = "0.4"
|
||||
clap = "2.33"
|
||||
env_logger = "0.8"
|
||||
clap = "3.0"
|
||||
env_logger = "0.9"
|
||||
hyper = { version = "0.14", features = ["full"] }
|
||||
log = { version = "0.4", features = ["std", "serde"] }
|
||||
postgres = "0.19"
|
||||
postgres = { git = "https://github.com/zenithdb/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" }
|
||||
regex = "1"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
serde_json = "1"
|
||||
tar = "0.4"
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
|
||||
[profile.release]
|
||||
debug = true
|
||||
tokio = { version = "1.17", features = ["macros", "rt", "rt-multi-thread"] }
|
||||
tokio-postgres = { git = "https://github.com/zenithdb/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" }
|
||||
workspace_hack = { version = "0.1", path = "../workspace_hack" }
|
||||
|
||||
@@ -1,14 +0,0 @@
|
||||
# First transient image to build compute_tools binaries
|
||||
FROM rust:slim-buster AS rust-build
|
||||
|
||||
RUN mkdir /compute_tools
|
||||
WORKDIR /compute_tools
|
||||
|
||||
COPY . /compute_tools/
|
||||
|
||||
RUN cargo build --release
|
||||
|
||||
# Final image that only has one binary
|
||||
FROM debian:buster-slim
|
||||
|
||||
COPY --from=rust-build /compute_tools/target/release/zenith_ctl /usr/local/bin/zenith_ctl
|
||||
@@ -27,17 +27,18 @@
|
||||
//! ```
|
||||
//!
|
||||
use std::fs::File;
|
||||
use std::panic;
|
||||
use std::path::Path;
|
||||
use std::process::{exit, Command, ExitStatus};
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::{env, panic};
|
||||
|
||||
use anyhow::Result;
|
||||
use anyhow::{Context, Result};
|
||||
use chrono::Utc;
|
||||
use libc::{prctl, PR_SET_PDEATHSIG, SIGINT};
|
||||
use clap::Arg;
|
||||
use log::info;
|
||||
use postgres::{Client, NoTls};
|
||||
|
||||
use compute_tools::checker::create_writablity_check_data;
|
||||
use compute_tools::config;
|
||||
use compute_tools::http_api::launch_http_server;
|
||||
use compute_tools::logger::*;
|
||||
@@ -70,7 +71,7 @@ fn prepare_pgdata(state: &Arc<RwLock<ComputeState>>) -> Result<()> {
|
||||
.expect("tenant id should be provided");
|
||||
|
||||
info!(
|
||||
"applying spec for cluster #{}, operation #{}",
|
||||
"starting cluster #{}, operation #{}",
|
||||
spec.cluster.cluster_id,
|
||||
spec.operation_uuid.as_ref().unwrap()
|
||||
);
|
||||
@@ -80,10 +81,23 @@ fn prepare_pgdata(state: &Arc<RwLock<ComputeState>>) -> Result<()> {
|
||||
config::write_postgres_conf(&pgdata_path.join("postgresql.conf"), spec)?;
|
||||
|
||||
info!("starting safekeepers syncing");
|
||||
let lsn = sync_safekeepers(&state.pgdata, &state.pgbin)?;
|
||||
let lsn = sync_safekeepers(&state.pgdata, &state.pgbin)
|
||||
.with_context(|| "failed to sync safekeepers")?;
|
||||
info!("safekeepers synced at LSN {}", lsn);
|
||||
|
||||
get_basebackup(&state.pgdata, &pageserver_connstr, &tenant, &timeline, &lsn)?;
|
||||
info!(
|
||||
"getting basebackup@{} from pageserver {}",
|
||||
lsn, pageserver_connstr
|
||||
);
|
||||
get_basebackup(&state.pgdata, &pageserver_connstr, &tenant, &timeline, &lsn).with_context(
|
||||
|| {
|
||||
format!(
|
||||
"failed to get basebackup@{} from pageserver {}",
|
||||
lsn, pageserver_connstr
|
||||
)
|
||||
},
|
||||
)?;
|
||||
|
||||
// Update pg_hba.conf received with basebackup.
|
||||
update_pg_hba(pgdata_path)?;
|
||||
|
||||
@@ -115,6 +129,8 @@ fn run_compute(state: &Arc<RwLock<ComputeState>>) -> Result<ExitStatus> {
|
||||
|
||||
handle_roles(&read_state.spec, &mut client)?;
|
||||
handle_databases(&read_state.spec, &mut client)?;
|
||||
handle_grants(&read_state.spec, &mut client)?;
|
||||
create_writablity_check_data(&mut client)?;
|
||||
|
||||
// 'Close' connection
|
||||
drop(client);
|
||||
@@ -142,51 +158,42 @@ fn run_compute(state: &Arc<RwLock<ComputeState>>) -> Result<ExitStatus> {
|
||||
}
|
||||
|
||||
fn main() -> Result<()> {
|
||||
// During configuration we are starting Postgres as a child process. If we
|
||||
// fail we do not want to leave it running. PR_SET_PDEATHSIG sets the signal
|
||||
// that will be sent to the child process when the parent dies. NB: this is
|
||||
// cleared for the child of a fork(). SIGINT means fast shutdown for Postgres.
|
||||
// This does not matter much for Docker, where `zenith_ctl` is an entrypoint,
|
||||
// so the whole container will exit if it exits. But could be useful when
|
||||
// `zenith_ctl` is used in e.g. systemd.
|
||||
unsafe {
|
||||
prctl(PR_SET_PDEATHSIG, SIGINT);
|
||||
}
|
||||
|
||||
// TODO: re-use `zenith_utils::logging` later
|
||||
// TODO: re-use `utils::logging` later
|
||||
init_logger(DEFAULT_LOG_LEVEL)?;
|
||||
|
||||
// Env variable is set by `cargo`
|
||||
let version: Option<&str> = option_env!("CARGO_PKG_VERSION");
|
||||
let matches = clap::App::new("zenith_ctl")
|
||||
.version("0.1.0")
|
||||
.version(version.unwrap_or("unknown"))
|
||||
.arg(
|
||||
clap::Arg::with_name("connstr")
|
||||
.short("C")
|
||||
Arg::new("connstr")
|
||||
.short('C')
|
||||
.long("connstr")
|
||||
.value_name("DATABASE_URL")
|
||||
.required(true),
|
||||
)
|
||||
.arg(
|
||||
clap::Arg::with_name("pgdata")
|
||||
.short("D")
|
||||
Arg::new("pgdata")
|
||||
.short('D')
|
||||
.long("pgdata")
|
||||
.value_name("DATADIR")
|
||||
.required(true),
|
||||
)
|
||||
.arg(
|
||||
clap::Arg::with_name("pgbin")
|
||||
.short("b")
|
||||
Arg::new("pgbin")
|
||||
.short('b')
|
||||
.long("pgbin")
|
||||
.value_name("POSTGRES_PATH"),
|
||||
)
|
||||
.arg(
|
||||
clap::Arg::with_name("spec")
|
||||
.short("s")
|
||||
Arg::new("spec")
|
||||
.short('s')
|
||||
.long("spec")
|
||||
.value_name("SPEC_JSON"),
|
||||
)
|
||||
.arg(
|
||||
clap::Arg::with_name("spec-path")
|
||||
.short("S")
|
||||
Arg::new("spec-path")
|
||||
.short('S')
|
||||
.long("spec-path")
|
||||
.value_name("SPEC_PATH"),
|
||||
)
|
||||
@@ -212,13 +219,7 @@ fn main() -> Result<()> {
|
||||
let file = File::open(path)?;
|
||||
serde_json::from_reader(file)?
|
||||
} else {
|
||||
// Finally, try to fetch it from the env
|
||||
// XXX: not tested well and kept as a backup option for k8s, Docker, etc.
|
||||
// TODO: remove later
|
||||
match env::var("CLUSTER_SPEC") {
|
||||
Ok(json) => serde_json::from_str(&json)?,
|
||||
Err(_) => panic!("cluster spec should be provided via --spec, --spec-path or env variable CLUSTER_SPEC")
|
||||
}
|
||||
panic!("cluster spec should be provided via --spec or --spec-path argument");
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
46
compute_tools/src/checker.rs
Normal file
46
compute_tools/src/checker.rs
Normal file
@@ -0,0 +1,46 @@
|
||||
use std::sync::{Arc, RwLock};
|
||||
|
||||
use anyhow::{anyhow, Result};
|
||||
use log::error;
|
||||
use postgres::Client;
|
||||
use tokio_postgres::NoTls;
|
||||
|
||||
use crate::zenith::ComputeState;
|
||||
|
||||
pub fn create_writablity_check_data(client: &mut Client) -> Result<()> {
|
||||
let query = "
|
||||
CREATE TABLE IF NOT EXISTS health_check (
|
||||
id serial primary key,
|
||||
updated_at timestamptz default now()
|
||||
);
|
||||
INSERT INTO health_check VALUES (1, now())
|
||||
ON CONFLICT (id) DO UPDATE
|
||||
SET updated_at = now();";
|
||||
let result = client.simple_query(query)?;
|
||||
if result.len() < 2 {
|
||||
return Err(anyhow::format_err!("executed {} queries", result.len()));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn check_writability(state: &Arc<RwLock<ComputeState>>) -> Result<()> {
|
||||
let connstr = state.read().unwrap().connstr.clone();
|
||||
let (client, connection) = tokio_postgres::connect(&connstr, NoTls).await?;
|
||||
if client.is_closed() {
|
||||
return Err(anyhow!("connection to postgres closed"));
|
||||
}
|
||||
tokio::spawn(async move {
|
||||
if let Err(e) = connection.await {
|
||||
error!("connection error: {}", e);
|
||||
}
|
||||
});
|
||||
|
||||
let result = client
|
||||
.simple_query("UPDATE health_check SET updated_at = now() WHERE id = 1;")
|
||||
.await?;
|
||||
|
||||
if result.len() != 1 {
|
||||
return Err(anyhow!("statement can't be executed"));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
@@ -11,7 +11,7 @@ use log::{error, info};
|
||||
use crate::zenith::*;
|
||||
|
||||
// Service function to handle all available routes.
|
||||
fn routes(req: Request<Body>, state: Arc<RwLock<ComputeState>>) -> Response<Body> {
|
||||
async fn routes(req: Request<Body>, state: Arc<RwLock<ComputeState>>) -> Response<Body> {
|
||||
match (req.method(), req.uri().path()) {
|
||||
// Timestamp of the last Postgres activity in the plain text.
|
||||
(&Method::GET, "/last_activity") => {
|
||||
@@ -29,6 +29,15 @@ fn routes(req: Request<Body>, state: Arc<RwLock<ComputeState>>) -> Response<Body
|
||||
Response::new(Body::from(format!("{}", state.ready)))
|
||||
}
|
||||
|
||||
(&Method::GET, "/check_writability") => {
|
||||
info!("serving /check_writability GET request");
|
||||
let res = crate::checker::check_writability(&state).await;
|
||||
match res {
|
||||
Ok(_) => Response::new(Body::from("true")),
|
||||
Err(e) => Response::new(Body::from(e.to_string())),
|
||||
}
|
||||
}
|
||||
|
||||
// Return the `404 Not Found` for any other routes.
|
||||
_ => {
|
||||
let mut not_found = Response::new(Body::from("404 Not Found"));
|
||||
@@ -48,7 +57,7 @@ async fn serve(state: Arc<RwLock<ComputeState>>) {
|
||||
async move {
|
||||
Ok::<_, Infallible>(service_fn(move |req: Request<Body>| {
|
||||
let state = state.clone();
|
||||
async move { Ok::<_, Infallible>(routes(req, state)) }
|
||||
async move { Ok::<_, Infallible>(routes(req, state).await) }
|
||||
}))
|
||||
}
|
||||
});
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
//! Various tools and helpers to handle cluster / compute node (Postgres)
|
||||
//! configuration.
|
||||
//!
|
||||
pub mod checker;
|
||||
pub mod config;
|
||||
pub mod http_api;
|
||||
#[macro_use]
|
||||
|
||||
@@ -5,7 +5,7 @@ use std::process::Command;
|
||||
use std::str::FromStr;
|
||||
use std::{fs, thread, time};
|
||||
|
||||
use anyhow::{anyhow, Result};
|
||||
use anyhow::{bail, Result};
|
||||
use postgres::{Client, Transaction};
|
||||
use serde::Deserialize;
|
||||
|
||||
@@ -132,7 +132,14 @@ impl Role {
|
||||
let mut params: String = "LOGIN".to_string();
|
||||
|
||||
if let Some(pass) = &self.encrypted_password {
|
||||
params.push_str(&format!(" PASSWORD 'md5{}'", pass));
|
||||
// Some time ago we supported only md5 and treated all encrypted_password as md5.
|
||||
// Now we also support SCRAM-SHA-256 and to preserve compatibility
|
||||
// we treat all encrypted_password as md5 unless they starts with SCRAM-SHA-256.
|
||||
if pass.starts_with("SCRAM-SHA-256") {
|
||||
params.push_str(&format!(" PASSWORD '{}'", pass));
|
||||
} else {
|
||||
params.push_str(&format!(" PASSWORD 'md5{}'", pass));
|
||||
}
|
||||
} else {
|
||||
params.push_str(" PASSWORD NULL");
|
||||
}
|
||||
@@ -171,7 +178,7 @@ impl PgQuote for PgIdent {
|
||||
/// always quotes provided string with `""` and escapes every `"`. Not idempotent,
|
||||
/// i.e. if string is already escaped it will be escaped again.
|
||||
fn quote(&self) -> String {
|
||||
let result = format!("\"{}\"", self.replace("\"", "\"\""));
|
||||
let result = format!("\"{}\"", self.replace('"', "\"\""));
|
||||
result
|
||||
}
|
||||
}
|
||||
@@ -226,7 +233,7 @@ pub fn wait_for_postgres(port: &str, pgdata: &Path) -> Result<()> {
|
||||
// but postgres starts listening almost immediately, even if it is not really
|
||||
// ready to accept connections).
|
||||
if slept >= POSTGRES_WAIT_TIMEOUT {
|
||||
return Err(anyhow!("timed out while waiting for Postgres to start"));
|
||||
bail!("timed out while waiting for Postgres to start");
|
||||
}
|
||||
|
||||
if pid_path.exists() {
|
||||
|
||||
@@ -215,7 +215,7 @@ pub fn handle_databases(spec: &ClusterSpec, client: &mut Client) -> Result<()> {
|
||||
if let Some(r) = pg_db {
|
||||
// XXX: db owner name is returned as quoted string from Postgres,
|
||||
// when quoting is needed.
|
||||
let new_owner = if r.owner.starts_with('\"') {
|
||||
let new_owner = if r.owner.starts_with('"') {
|
||||
db.owner.quote()
|
||||
} else {
|
||||
db.owner.clone()
|
||||
@@ -244,3 +244,24 @@ pub fn handle_databases(spec: &ClusterSpec, client: &mut Client) -> Result<()> {
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Grant CREATE ON DATABASE to the database owner
|
||||
// to allow clients create trusted extensions.
|
||||
pub fn handle_grants(spec: &ClusterSpec, client: &mut Client) -> Result<()> {
|
||||
info!("cluster spec grants:");
|
||||
|
||||
for db in &spec.cluster.databases {
|
||||
let dbname = &db.name;
|
||||
|
||||
let query: String = format!(
|
||||
"GRANT CREATE ON DATABASE {} TO {}",
|
||||
dbname.quote(),
|
||||
db.owner.quote()
|
||||
);
|
||||
info!("grant query {}", &query);
|
||||
|
||||
client.execute(query.as_str(), &[])?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -87,17 +87,19 @@ pub fn sync_safekeepers(pgdata: &str, pgbin: &str) -> Result<String> {
|
||||
.args(&["--sync-safekeepers"])
|
||||
.env("PGDATA", &pgdata) // we cannot use -D in this mode
|
||||
.stdout(Stdio::piped())
|
||||
.stderr(Stdio::piped())
|
||||
.spawn()
|
||||
.expect("postgres --sync-safekeepers failed to start");
|
||||
|
||||
// `postgres --sync-safekeepers` will print all log output to stderr and
|
||||
// final LSN to stdout. So we pipe only stdout, while stderr will be automatically
|
||||
// redirected to the caller output.
|
||||
let sync_output = sync_handle
|
||||
.wait_with_output()
|
||||
.expect("postgres --sync-safekeepers failed");
|
||||
if !sync_output.status.success() {
|
||||
anyhow::bail!(
|
||||
"postgres --sync-safekeepers failed: '{}'",
|
||||
String::from_utf8_lossy(&sync_output.stderr)
|
||||
"postgres --sync-safekeepers exited with non-zero status: {}",
|
||||
sync_output.status,
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
@@ -1,15 +1,13 @@
|
||||
[package]
|
||||
name = "control_plane"
|
||||
version = "0.1.0"
|
||||
authors = ["Stas Kelvich <stas@zenith.tech>"]
|
||||
edition = "2018"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
tar = "0.4.33"
|
||||
postgres = { git = "https://github.com/zenithdb/rust-postgres.git", rev="9eb0dbfbeb6a6c1b79099b9f7ae4a8c021877858" }
|
||||
postgres = { git = "https://github.com/zenithdb/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" }
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_with = "1.12.0"
|
||||
toml = "0.5"
|
||||
lazy_static = "1.4"
|
||||
regex = "1"
|
||||
@@ -20,5 +18,6 @@ url = "2.2.2"
|
||||
reqwest = { version = "0.11", default-features = false, features = ["blocking", "json", "rustls-tls"] }
|
||||
|
||||
pageserver = { path = "../pageserver" }
|
||||
zenith_utils = { path = "../zenith_utils" }
|
||||
workspace_hack = { path = "../workspace_hack" }
|
||||
safekeeper = { path = "../safekeeper" }
|
||||
utils = { path = "../libs/utils" }
|
||||
workspace_hack = { version = "0.1", path = "../workspace_hack" }
|
||||
|
||||
@@ -1,20 +1,20 @@
|
||||
# Page server and three safekeepers.
|
||||
[pageserver]
|
||||
listen_pg_addr = 'localhost:64000'
|
||||
listen_http_addr = 'localhost:9898'
|
||||
listen_pg_addr = '127.0.0.1:64000'
|
||||
listen_http_addr = '127.0.0.1:9898'
|
||||
auth_type = 'Trust'
|
||||
|
||||
[[safekeepers]]
|
||||
name = 'sk1'
|
||||
id = 1
|
||||
pg_port = 5454
|
||||
http_port = 7676
|
||||
|
||||
[[safekeepers]]
|
||||
name = 'sk2'
|
||||
id = 2
|
||||
pg_port = 5455
|
||||
http_port = 7677
|
||||
|
||||
[[safekeepers]]
|
||||
name = 'sk3'
|
||||
id = 3
|
||||
pg_port = 5456
|
||||
http_port = 7678
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
# Minimal zenith environment with one safekeeper. This is equivalent to the built-in
|
||||
# defaults that you get with no --config
|
||||
[pageserver]
|
||||
listen_pg_addr = 'localhost:64000'
|
||||
listen_http_addr = 'localhost:9898'
|
||||
listen_pg_addr = '127.0.0.1:64000'
|
||||
listen_http_addr = '127.0.0.1:9898'
|
||||
auth_type = 'Trust'
|
||||
|
||||
[[safekeepers]]
|
||||
name = 'single'
|
||||
id = 1
|
||||
pg_port = 5454
|
||||
http_port = 7676
|
||||
|
||||
@@ -11,11 +11,12 @@ use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use anyhow::{Context, Result};
|
||||
use zenith_utils::connstring::connection_host_port;
|
||||
use zenith_utils::lsn::Lsn;
|
||||
use zenith_utils::postgres_backend::AuthType;
|
||||
use zenith_utils::zid::ZTenantId;
|
||||
use zenith_utils::zid::ZTimelineId;
|
||||
use utils::{
|
||||
connstring::connection_host_port,
|
||||
lsn::Lsn,
|
||||
postgres_backend::AuthType,
|
||||
zid::{ZTenantId, ZTimelineId},
|
||||
};
|
||||
|
||||
use crate::local_env::LocalEnv;
|
||||
use crate::postgresql_conf::PostgresConf;
|
||||
@@ -37,7 +38,7 @@ impl ComputeControlPlane {
|
||||
// pgdatadirs
|
||||
// |- tenants
|
||||
// | |- <tenant_id>
|
||||
// | | |- <branch name>
|
||||
// | | |- <node name>
|
||||
pub fn load(env: LocalEnv) -> Result<ComputeControlPlane> {
|
||||
let pageserver = Arc::new(PageServerNode::from_env(&env));
|
||||
|
||||
@@ -52,7 +53,7 @@ impl ComputeControlPlane {
|
||||
.with_context(|| format!("failed to list {}", tenant_dir.path().display()))?
|
||||
{
|
||||
let node = PostgresNode::from_dir_entry(timeline_dir?, &env, &pageserver)?;
|
||||
nodes.insert((node.tenantid, node.name.clone()), Arc::new(node));
|
||||
nodes.insert((node.tenant_id, node.name.clone()), Arc::new(node));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -73,44 +74,14 @@ impl ComputeControlPlane {
|
||||
.unwrap_or(self.base_port)
|
||||
}
|
||||
|
||||
// FIXME: see also parse_point_in_time in branches.rs.
|
||||
fn parse_point_in_time(
|
||||
&self,
|
||||
tenantid: ZTenantId,
|
||||
s: &str,
|
||||
) -> Result<(ZTimelineId, Option<Lsn>)> {
|
||||
let mut strings = s.split('@');
|
||||
let name = strings.next().unwrap();
|
||||
|
||||
let lsn: Option<Lsn>;
|
||||
if let Some(lsnstr) = strings.next() {
|
||||
lsn = Some(
|
||||
Lsn::from_str(lsnstr)
|
||||
.with_context(|| "invalid LSN in point-in-time specification")?,
|
||||
);
|
||||
} else {
|
||||
lsn = None
|
||||
}
|
||||
|
||||
// Resolve the timeline ID, given the human-readable branch name
|
||||
let timeline_id = self
|
||||
.pageserver
|
||||
.branch_get_by_name(&tenantid, name)?
|
||||
.timeline_id;
|
||||
|
||||
Ok((timeline_id, lsn))
|
||||
}
|
||||
|
||||
pub fn new_node(
|
||||
&mut self,
|
||||
tenantid: ZTenantId,
|
||||
tenant_id: ZTenantId,
|
||||
name: &str,
|
||||
timeline_spec: &str,
|
||||
timeline_id: ZTimelineId,
|
||||
lsn: Option<Lsn>,
|
||||
port: Option<u16>,
|
||||
) -> Result<Arc<PostgresNode>> {
|
||||
// Resolve the human-readable timeline spec into timeline ID and LSN
|
||||
let (timelineid, lsn) = self.parse_point_in_time(tenantid, timeline_spec)?;
|
||||
|
||||
let port = port.unwrap_or_else(|| self.get_port());
|
||||
let node = Arc::new(PostgresNode {
|
||||
name: name.to_owned(),
|
||||
@@ -118,9 +89,9 @@ impl ComputeControlPlane {
|
||||
env: self.env.clone(),
|
||||
pageserver: Arc::clone(&self.pageserver),
|
||||
is_test: false,
|
||||
timelineid,
|
||||
timeline_id,
|
||||
lsn,
|
||||
tenantid,
|
||||
tenant_id,
|
||||
uses_wal_proposer: false,
|
||||
});
|
||||
|
||||
@@ -128,7 +99,7 @@ impl ComputeControlPlane {
|
||||
node.setup_pg_conf(self.env.pageserver.auth_type)?;
|
||||
|
||||
self.nodes
|
||||
.insert((tenantid, node.name.clone()), Arc::clone(&node));
|
||||
.insert((tenant_id, node.name.clone()), Arc::clone(&node));
|
||||
|
||||
Ok(node)
|
||||
}
|
||||
@@ -143,9 +114,9 @@ pub struct PostgresNode {
|
||||
pub env: LocalEnv,
|
||||
pageserver: Arc<PageServerNode>,
|
||||
is_test: bool,
|
||||
pub timelineid: ZTimelineId,
|
||||
pub timeline_id: ZTimelineId,
|
||||
pub lsn: Option<Lsn>, // if it's a read-only node. None for primary
|
||||
pub tenantid: ZTenantId,
|
||||
pub tenant_id: ZTenantId,
|
||||
uses_wal_proposer: bool,
|
||||
}
|
||||
|
||||
@@ -177,8 +148,8 @@ impl PostgresNode {
|
||||
// Read a few options from the config file
|
||||
let context = format!("in config file {}", cfg_path_str);
|
||||
let port: u16 = conf.parse_field("port", &context)?;
|
||||
let timelineid: ZTimelineId = conf.parse_field("zenith.zenith_timeline", &context)?;
|
||||
let tenantid: ZTenantId = conf.parse_field("zenith.zenith_tenant", &context)?;
|
||||
let timeline_id: ZTimelineId = conf.parse_field("zenith.zenith_timeline", &context)?;
|
||||
let tenant_id: ZTenantId = conf.parse_field("zenith.zenith_tenant", &context)?;
|
||||
let uses_wal_proposer = conf.get("wal_acceptors").is_some();
|
||||
|
||||
// parse recovery_target_lsn, if any
|
||||
@@ -192,9 +163,9 @@ impl PostgresNode {
|
||||
env: env.clone(),
|
||||
pageserver: Arc::clone(pageserver),
|
||||
is_test: false,
|
||||
timelineid,
|
||||
timeline_id,
|
||||
lsn: recovery_target_lsn,
|
||||
tenantid,
|
||||
tenant_id,
|
||||
uses_wal_proposer,
|
||||
})
|
||||
}
|
||||
@@ -245,24 +216,24 @@ impl PostgresNode {
|
||||
);
|
||||
|
||||
let sql = if let Some(lsn) = lsn {
|
||||
format!("basebackup {} {} {}", self.tenantid, self.timelineid, lsn)
|
||||
format!("basebackup {} {} {}", self.tenant_id, self.timeline_id, lsn)
|
||||
} else {
|
||||
format!("basebackup {} {}", self.tenantid, self.timelineid)
|
||||
format!("basebackup {} {}", self.tenant_id, self.timeline_id)
|
||||
};
|
||||
|
||||
let mut client = self
|
||||
.pageserver
|
||||
.page_server_psql_client()
|
||||
.with_context(|| "connecting to page server failed")?;
|
||||
.context("connecting to page server failed")?;
|
||||
|
||||
let copyreader = client
|
||||
.copy_out(sql.as_str())
|
||||
.with_context(|| "page server 'basebackup' command failed")?;
|
||||
.context("page server 'basebackup' command failed")?;
|
||||
|
||||
// Read the archive directly from the `CopyOutReader`
|
||||
tar::Archive::new(copyreader)
|
||||
.unpack(&self.pgdata())
|
||||
.with_context(|| "extracting base backup failed")?;
|
||||
.context("extracting base backup failed")?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -302,12 +273,7 @@ impl PostgresNode {
|
||||
conf.append("wal_sender_timeout", "5s");
|
||||
conf.append("listen_addresses", &self.address.ip().to_string());
|
||||
conf.append("port", &self.address.port().to_string());
|
||||
|
||||
// Never clean up old WAL. TODO: We should use a replication
|
||||
// slot or something proper, to prevent the compute node
|
||||
// from removing WAL that hasn't been streamed to the safekeeper or
|
||||
// page server yet. (gh issue #349)
|
||||
conf.append("wal_keep_size", "10TB");
|
||||
conf.append("wal_keep_size", "0");
|
||||
|
||||
// Configure the node to fetch pages from pageserver
|
||||
let pageserver_connstr = {
|
||||
@@ -333,36 +299,43 @@ impl PostgresNode {
|
||||
conf.append("shared_preload_libraries", "zenith");
|
||||
conf.append_line("");
|
||||
conf.append("zenith.page_server_connstring", &pageserver_connstr);
|
||||
conf.append("zenith.zenith_tenant", &self.tenantid.to_string());
|
||||
conf.append("zenith.zenith_timeline", &self.timelineid.to_string());
|
||||
conf.append("zenith.zenith_tenant", &self.tenant_id.to_string());
|
||||
conf.append("zenith.zenith_timeline", &self.timeline_id.to_string());
|
||||
if let Some(lsn) = self.lsn {
|
||||
conf.append("recovery_target_lsn", &lsn.to_string());
|
||||
}
|
||||
|
||||
conf.append_line("");
|
||||
// Configure backpressure
|
||||
// - Replication write lag depends on how fast the walreceiver can process incoming WAL.
|
||||
// This lag determines latency of get_page_at_lsn. Speed of applying WAL is about 10MB/sec,
|
||||
// so to avoid expiration of 1 minute timeout, this lag should not be larger than 600MB.
|
||||
// Actually latency should be much smaller (better if < 1sec). But we assume that recently
|
||||
// updates pages are not requested from pageserver.
|
||||
// - Replication flush lag depends on speed of persisting data by checkpointer (creation of
|
||||
// delta/image layers) and advancing disk_consistent_lsn. Safekeepers are able to
|
||||
// remove/archive WAL only beyond disk_consistent_lsn. Too large a lag can cause long
|
||||
// recovery time (in case of pageserver crash) and disk space overflow at safekeepers.
|
||||
// - Replication apply lag depends on speed of uploading changes to S3 by uploader thread.
|
||||
// To be able to restore database in case of pageserver node crash, safekeeper should not
|
||||
// remove WAL beyond this point. Too large lag can cause space exhaustion in safekeepers
|
||||
// (if they are not able to upload WAL to S3).
|
||||
conf.append("max_replication_write_lag", "500MB");
|
||||
conf.append("max_replication_flush_lag", "10GB");
|
||||
|
||||
if !self.env.safekeepers.is_empty() {
|
||||
// Configure backpressure
|
||||
// In setup with safekeepers apply_lag depends on
|
||||
// speed of data checkpointing on pageserver (see disk_consistent_lsn).
|
||||
conf.append("max_replication_apply_lag", "1500MB");
|
||||
|
||||
// Configure the node to connect to the safekeepers
|
||||
conf.append("synchronous_standby_names", "walproposer");
|
||||
|
||||
let wal_acceptors = self
|
||||
let safekeepers = self
|
||||
.env
|
||||
.safekeepers
|
||||
.iter()
|
||||
.map(|sk| format!("localhost:{}", sk.pg_port))
|
||||
.collect::<Vec<String>>()
|
||||
.join(",");
|
||||
conf.append("wal_acceptors", &wal_acceptors);
|
||||
conf.append("wal_acceptors", &safekeepers);
|
||||
} else {
|
||||
// Configure backpressure
|
||||
// In setup without safekeepers, flush_lag depends on
|
||||
// speed of of data checkpointing on pageserver (see disk_consistent_lsn)
|
||||
conf.append("max_replication_flush_lag", "1500MB");
|
||||
|
||||
// We only use setup without safekeepers for tests,
|
||||
// and don't care about data durability on pageserver,
|
||||
// so set more relaxed synchronous_commit.
|
||||
@@ -405,7 +378,7 @@ impl PostgresNode {
|
||||
}
|
||||
|
||||
pub fn pgdata(&self) -> PathBuf {
|
||||
self.env.pg_data_dir(&self.tenantid, &self.name)
|
||||
self.env.pg_data_dir(&self.tenant_id, &self.name)
|
||||
}
|
||||
|
||||
pub fn status(&self) -> &str {
|
||||
@@ -443,10 +416,15 @@ impl PostgresNode {
|
||||
if let Some(token) = auth_token {
|
||||
cmd.env("ZENITH_AUTH_TOKEN", token);
|
||||
}
|
||||
let pg_ctl = cmd.status().with_context(|| "pg_ctl failed")?;
|
||||
|
||||
if !pg_ctl.success() {
|
||||
anyhow::bail!("pg_ctl failed");
|
||||
let pg_ctl = cmd.output().context("pg_ctl failed")?;
|
||||
if !pg_ctl.status.success() {
|
||||
anyhow::bail!(
|
||||
"pg_ctl failed, exit code: {}, stdout: {}, stderr: {}",
|
||||
pg_ctl.status,
|
||||
String::from_utf8_lossy(&pg_ctl.stdout),
|
||||
String::from_utf8_lossy(&pg_ctl.stderr),
|
||||
);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -9,6 +9,7 @@
|
||||
use anyhow::{anyhow, bail, Context, Result};
|
||||
use std::fs;
|
||||
use std::path::Path;
|
||||
use std::process::Command;
|
||||
|
||||
pub mod compute;
|
||||
pub mod local_env;
|
||||
@@ -31,3 +32,19 @@ pub fn read_pidfile(pidfile: &Path) -> Result<i32> {
|
||||
}
|
||||
Ok(pid)
|
||||
}
|
||||
|
||||
fn fill_rust_env_vars(cmd: &mut Command) -> &mut Command {
|
||||
let cmd = cmd.env_clear().env("RUST_BACKTRACE", "1");
|
||||
|
||||
let var = "LLVM_PROFILE_FILE";
|
||||
if let Some(val) = std::env::var_os(var) {
|
||||
cmd.env(var, val);
|
||||
}
|
||||
|
||||
const RUST_LOG_KEY: &str = "RUST_LOG";
|
||||
if let Ok(rust_log_value) = std::env::var(RUST_LOG_KEY) {
|
||||
cmd.env(RUST_LOG_KEY, rust_log_value)
|
||||
} else {
|
||||
cmd
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,16 +3,21 @@
|
||||
//! Now it also provides init method which acts like a stub for proper installation
|
||||
//! script which will use local paths.
|
||||
|
||||
use anyhow::{bail, Context};
|
||||
use anyhow::{bail, ensure, Context};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_with::{serde_as, DisplayFromStr};
|
||||
use std::collections::HashMap;
|
||||
use std::env;
|
||||
use std::fmt::Write;
|
||||
use std::fs;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::process::{Command, Stdio};
|
||||
use zenith_utils::auth::{encode_from_key_file, Claims, Scope};
|
||||
use zenith_utils::postgres_backend::AuthType;
|
||||
use zenith_utils::zid::{opt_display_serde, ZTenantId};
|
||||
use utils::{
|
||||
auth::{encode_from_key_file, Claims, Scope},
|
||||
postgres_backend::AuthType,
|
||||
zid::{ZNodeId, ZTenantId, ZTenantTimelineId, ZTimelineId},
|
||||
};
|
||||
|
||||
use crate::safekeeper::SafekeeperNode;
|
||||
|
||||
//
|
||||
// This data structures represents zenith CLI config
|
||||
@@ -21,7 +26,8 @@ use zenith_utils::zid::{opt_display_serde, ZTenantId};
|
||||
// to 'zenith init --config=<path>' option. See control_plane/simple.conf for
|
||||
// an example.
|
||||
//
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
#[serde_as]
|
||||
#[derive(Serialize, Deserialize, PartialEq, Eq, Clone, Debug)]
|
||||
pub struct LocalEnv {
|
||||
// Base directory for all the nodes (the pageserver, safekeepers and
|
||||
// compute nodes).
|
||||
@@ -45,23 +51,41 @@ pub struct LocalEnv {
|
||||
|
||||
// Default tenant ID to use with the 'zenith' command line utility, when
|
||||
// --tenantid is not explicitly specified.
|
||||
#[serde(with = "opt_display_serde")]
|
||||
#[serde(default)]
|
||||
pub default_tenantid: Option<ZTenantId>,
|
||||
#[serde_as(as = "Option<DisplayFromStr>")]
|
||||
pub default_tenant_id: Option<ZTenantId>,
|
||||
|
||||
// used to issue tokens during e.g pg start
|
||||
#[serde(default)]
|
||||
pub private_key_path: PathBuf,
|
||||
|
||||
// A comma separated broker (etcd) endpoints for storage nodes coordination, e.g. 'http://127.0.0.1:2379'.
|
||||
#[serde(default)]
|
||||
pub broker_endpoints: Option<String>,
|
||||
|
||||
/// A prefix to all to any key when pushing/polling etcd from a node.
|
||||
#[serde(default)]
|
||||
pub broker_etcd_prefix: Option<String>,
|
||||
|
||||
pub pageserver: PageServerConf,
|
||||
|
||||
#[serde(default)]
|
||||
pub safekeepers: Vec<SafekeeperConf>,
|
||||
|
||||
/// Keep human-readable aliases in memory (and persist them to config), to hide ZId hex strings from the user.
|
||||
#[serde(default)]
|
||||
// A `HashMap<String, HashMap<ZTenantId, ZTimelineId>>` would be more appropriate here,
|
||||
// but deserialization into a generic toml object as `toml::Value::try_from` fails with an error.
|
||||
// https://toml.io/en/v1.0.0 does not contain a concept of "a table inside another table".
|
||||
#[serde_as(as = "HashMap<_, Vec<(DisplayFromStr, DisplayFromStr)>>")]
|
||||
branch_name_mappings: HashMap<String, Vec<(ZTenantId, ZTimelineId)>>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
#[derive(Serialize, Deserialize, PartialEq, Eq, Clone, Debug)]
|
||||
#[serde(default)]
|
||||
pub struct PageServerConf {
|
||||
// node id
|
||||
pub id: ZNodeId,
|
||||
// Pageserver connection settings
|
||||
pub listen_pg_addr: String,
|
||||
pub listen_http_addr: String,
|
||||
@@ -76,6 +100,7 @@ pub struct PageServerConf {
|
||||
impl Default for PageServerConf {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
id: ZNodeId(0),
|
||||
listen_pg_addr: String::new(),
|
||||
listen_http_addr: String::new(),
|
||||
auth_type: AuthType::Trust,
|
||||
@@ -84,10 +109,10 @@ impl Default for PageServerConf {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
#[derive(Serialize, Deserialize, PartialEq, Eq, Clone, Debug)]
|
||||
#[serde(default)]
|
||||
pub struct SafekeeperConf {
|
||||
pub name: String,
|
||||
pub id: ZNodeId,
|
||||
pub pg_port: u16,
|
||||
pub http_port: u16,
|
||||
pub sync: bool,
|
||||
@@ -96,7 +121,7 @@ pub struct SafekeeperConf {
|
||||
impl Default for SafekeeperConf {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
name: String::new(),
|
||||
id: ZNodeId(0),
|
||||
pg_port: 0,
|
||||
http_port: 0,
|
||||
sync: true,
|
||||
@@ -136,8 +161,64 @@ impl LocalEnv {
|
||||
self.base_data_dir.clone()
|
||||
}
|
||||
|
||||
pub fn safekeeper_data_dir(&self, node_name: &str) -> PathBuf {
|
||||
self.base_data_dir.join("safekeepers").join(node_name)
|
||||
pub fn safekeeper_data_dir(&self, data_dir_name: &str) -> PathBuf {
|
||||
self.base_data_dir.join("safekeepers").join(data_dir_name)
|
||||
}
|
||||
|
||||
pub fn register_branch_mapping(
|
||||
&mut self,
|
||||
branch_name: String,
|
||||
tenant_id: ZTenantId,
|
||||
timeline_id: ZTimelineId,
|
||||
) -> anyhow::Result<()> {
|
||||
let existing_values = self
|
||||
.branch_name_mappings
|
||||
.entry(branch_name.clone())
|
||||
.or_default();
|
||||
|
||||
let existing_ids = existing_values
|
||||
.iter()
|
||||
.find(|(existing_tenant_id, _)| existing_tenant_id == &tenant_id);
|
||||
|
||||
if let Some((_, old_timeline_id)) = existing_ids {
|
||||
if old_timeline_id == &timeline_id {
|
||||
Ok(())
|
||||
} else {
|
||||
bail!(
|
||||
"branch '{}' is already mapped to timeline {}, cannot map to another timeline {}",
|
||||
branch_name,
|
||||
old_timeline_id,
|
||||
timeline_id
|
||||
);
|
||||
}
|
||||
} else {
|
||||
existing_values.push((tenant_id, timeline_id));
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_branch_timeline_id(
|
||||
&self,
|
||||
branch_name: &str,
|
||||
tenant_id: ZTenantId,
|
||||
) -> Option<ZTimelineId> {
|
||||
self.branch_name_mappings
|
||||
.get(branch_name)?
|
||||
.iter()
|
||||
.find(|(mapped_tenant_id, _)| mapped_tenant_id == &tenant_id)
|
||||
.map(|&(_, timeline_id)| timeline_id)
|
||||
.map(ZTimelineId::from)
|
||||
}
|
||||
|
||||
pub fn timeline_name_mappings(&self) -> HashMap<ZTenantTimelineId, String> {
|
||||
self.branch_name_mappings
|
||||
.iter()
|
||||
.flat_map(|(name, tenant_timelines)| {
|
||||
tenant_timelines.iter().map(|&(tenant_id, timeline_id)| {
|
||||
(ZTenantTimelineId::new(tenant_id, timeline_id), name.clone())
|
||||
})
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Create a LocalEnv from a config file.
|
||||
@@ -179,8 +260,8 @@ impl LocalEnv {
|
||||
}
|
||||
|
||||
// If no initial tenant ID was given, generate it.
|
||||
if env.default_tenantid.is_none() {
|
||||
env.default_tenantid = Some(ZTenantId::generate());
|
||||
if env.default_tenant_id.is_none() {
|
||||
env.default_tenant_id = Some(ZTenantId::generate());
|
||||
}
|
||||
|
||||
env.base_data_dir = base_path();
|
||||
@@ -210,6 +291,39 @@ impl LocalEnv {
|
||||
Ok(env)
|
||||
}
|
||||
|
||||
pub fn persist_config(&self, base_path: &Path) -> anyhow::Result<()> {
|
||||
// Currently, the user first passes a config file with 'zenith init --config=<path>'
|
||||
// We read that in, in `create_config`, and fill any missing defaults. Then it's saved
|
||||
// to .zenith/config. TODO: We lose any formatting and comments along the way, which is
|
||||
// a bit sad.
|
||||
let mut conf_content = r#"# This file describes a locale deployment of the page server
|
||||
# and safekeeeper node. It is read by the 'zenith' command-line
|
||||
# utility.
|
||||
"#
|
||||
.to_string();
|
||||
|
||||
// Convert the LocalEnv to a toml file.
|
||||
//
|
||||
// This could be as simple as this:
|
||||
//
|
||||
// conf_content += &toml::to_string_pretty(env)?;
|
||||
//
|
||||
// But it results in a "values must be emitted before tables". I'm not sure
|
||||
// why, AFAICS the table, i.e. 'safekeepers: Vec<SafekeeperConf>' is last.
|
||||
// Maybe rust reorders the fields to squeeze avoid padding or something?
|
||||
// In any case, converting to toml::Value first, and serializing that, works.
|
||||
// See https://github.com/alexcrichton/toml-rs/issues/142
|
||||
conf_content += &toml::to_string_pretty(&toml::Value::try_from(self)?)?;
|
||||
|
||||
let target_config_path = base_path.join("config");
|
||||
fs::write(&target_config_path, conf_content).with_context(|| {
|
||||
format!(
|
||||
"Failed to write config file into path '{}'",
|
||||
target_config_path.display()
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
// this function is used only for testing purposes in CLI e g generate tokens during init
|
||||
pub fn generate_auth_token(&self, claims: &Claims) -> anyhow::Result<String> {
|
||||
let private_key_path = if self.private_key_path.is_absolute() {
|
||||
@@ -228,15 +342,15 @@ impl LocalEnv {
|
||||
pub fn init(&mut self) -> anyhow::Result<()> {
|
||||
// check if config already exists
|
||||
let base_path = &self.base_data_dir;
|
||||
if base_path == Path::new("") {
|
||||
bail!("repository base path is missing");
|
||||
}
|
||||
if base_path.exists() {
|
||||
bail!(
|
||||
"directory '{}' already exists. Perhaps already initialized?",
|
||||
base_path.to_str().unwrap()
|
||||
);
|
||||
}
|
||||
ensure!(
|
||||
base_path != Path::new(""),
|
||||
"repository base path is missing"
|
||||
);
|
||||
ensure!(
|
||||
!base_path.exists(),
|
||||
"directory '{}' already exists. Perhaps already initialized?",
|
||||
base_path.display()
|
||||
);
|
||||
|
||||
fs::create_dir(&base_path)?;
|
||||
|
||||
@@ -251,7 +365,7 @@ impl LocalEnv {
|
||||
.arg("2048")
|
||||
.stdout(Stdio::null())
|
||||
.output()
|
||||
.with_context(|| "failed to generate auth private key")?;
|
||||
.context("failed to generate auth private key")?;
|
||||
if !keygen_output.status.success() {
|
||||
bail!(
|
||||
"openssl failed: '{}'",
|
||||
@@ -270,7 +384,7 @@ impl LocalEnv {
|
||||
.args(&["-out", public_key_path.to_str().unwrap()])
|
||||
.stdout(Stdio::null())
|
||||
.output()
|
||||
.with_context(|| "failed to generate auth private key")?;
|
||||
.context("failed to generate auth private key")?;
|
||||
if !keygen_output.status.success() {
|
||||
bail!(
|
||||
"openssl failed: '{}'",
|
||||
@@ -285,39 +399,10 @@ impl LocalEnv {
|
||||
fs::create_dir_all(self.pg_data_dirs_path())?;
|
||||
|
||||
for safekeeper in &self.safekeepers {
|
||||
fs::create_dir_all(self.safekeeper_data_dir(&safekeeper.name))?;
|
||||
fs::create_dir_all(SafekeeperNode::datadir_path_by_id(self, safekeeper.id))?;
|
||||
}
|
||||
|
||||
let mut conf_content = String::new();
|
||||
|
||||
// Currently, the user first passes a config file with 'zenith init --config=<path>'
|
||||
// We read that in, in `create_config`, and fill any missing defaults. Then it's saved
|
||||
// to .zenith/config. TODO: We lose any formatting and comments along the way, which is
|
||||
// a bit sad.
|
||||
write!(
|
||||
&mut conf_content,
|
||||
r#"# This file describes a locale deployment of the page server
|
||||
# and safekeeeper node. It is read by the 'zenith' command-line
|
||||
# utility.
|
||||
"#
|
||||
)?;
|
||||
|
||||
// Convert the LocalEnv to a toml file.
|
||||
//
|
||||
// This could be as simple as this:
|
||||
//
|
||||
// conf_content += &toml::to_string_pretty(env)?;
|
||||
//
|
||||
// But it results in a "values must be emitted before tables". I'm not sure
|
||||
// why, AFAICS the table, i.e. 'safekeepers: Vec<SafekeeperConf>' is last.
|
||||
// Maybe rust reorders the fields to squeeze avoid padding or something?
|
||||
// In any case, converting to toml::Value first, and serializing that, works.
|
||||
// See https://github.com/alexcrichton/toml-rs/issues/142
|
||||
conf_content += &toml::to_string_pretty(&toml::Value::try_from(&self)?)?;
|
||||
|
||||
fs::write(base_path.join("config"), conf_content)?;
|
||||
|
||||
Ok(())
|
||||
self.persist_config(base_path)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
/// NOTE: This doesn't implement the full, correct postgresql.conf syntax. Just
|
||||
/// enough to extract a few settings we need in Zenith, assuming you don't do
|
||||
/// funny stuff like include-directives or funny escaping.
|
||||
use anyhow::{anyhow, bail, Context, Result};
|
||||
use anyhow::{bail, Context, Result};
|
||||
use lazy_static::lazy_static;
|
||||
use regex::Regex;
|
||||
use std::collections::HashMap;
|
||||
@@ -78,7 +78,7 @@ impl PostgresConf {
|
||||
<T as FromStr>::Err: std::error::Error + Send + Sync + 'static,
|
||||
{
|
||||
self.get(field_name)
|
||||
.ok_or_else(|| anyhow!("could not find '{}' option {}", field_name, context))?
|
||||
.with_context(|| format!("could not find '{}' option {}", field_name, context))?
|
||||
.parse::<T>()
|
||||
.with_context(|| format!("could not parse '{}' option {}", field_name, context))
|
||||
}
|
||||
|
||||
@@ -13,13 +13,17 @@ use nix::unistd::Pid;
|
||||
use postgres::Config;
|
||||
use reqwest::blocking::{Client, RequestBuilder, Response};
|
||||
use reqwest::{IntoUrl, Method};
|
||||
use safekeeper::http::models::TimelineCreateRequest;
|
||||
use thiserror::Error;
|
||||
use zenith_utils::http::error::HttpErrorBody;
|
||||
use utils::{
|
||||
connstring::connection_address,
|
||||
http::error::HttpErrorBody,
|
||||
zid::{ZNodeId, ZTenantId, ZTimelineId},
|
||||
};
|
||||
|
||||
use crate::local_env::{LocalEnv, SafekeeperConf};
|
||||
use crate::read_pidfile;
|
||||
use crate::storage::PageServerNode;
|
||||
use zenith_utils::connstring::connection_address;
|
||||
use crate::{fill_rust_env_vars, read_pidfile};
|
||||
|
||||
#[derive(Error, Debug)]
|
||||
pub enum SafekeeperHttpError {
|
||||
@@ -61,7 +65,7 @@ impl ResponseErrorMessageExt for Response {
|
||||
//
|
||||
#[derive(Debug)]
|
||||
pub struct SafekeeperNode {
|
||||
pub name: String,
|
||||
pub id: ZNodeId,
|
||||
|
||||
pub conf: SafekeeperConf,
|
||||
|
||||
@@ -71,35 +75,44 @@ pub struct SafekeeperNode {
|
||||
pub http_base_url: String,
|
||||
|
||||
pub pageserver: Arc<PageServerNode>,
|
||||
|
||||
broker_endpoints: Option<String>,
|
||||
broker_etcd_prefix: Option<String>,
|
||||
}
|
||||
|
||||
impl SafekeeperNode {
|
||||
pub fn from_env(env: &LocalEnv, conf: &SafekeeperConf) -> SafekeeperNode {
|
||||
let pageserver = Arc::new(PageServerNode::from_env(env));
|
||||
|
||||
println!("initializing for {} for {}", conf.name, conf.http_port);
|
||||
println!("initializing for sk {} for {}", conf.id, conf.http_port);
|
||||
|
||||
SafekeeperNode {
|
||||
name: conf.name.clone(),
|
||||
id: conf.id,
|
||||
conf: conf.clone(),
|
||||
pg_connection_config: Self::safekeeper_connection_config(conf.pg_port),
|
||||
env: env.clone(),
|
||||
http_client: Client::new(),
|
||||
http_base_url: format!("http://localhost:{}/v1", conf.http_port),
|
||||
http_base_url: format!("http://127.0.0.1:{}/v1", conf.http_port),
|
||||
pageserver,
|
||||
broker_endpoints: env.broker_endpoints.clone(),
|
||||
broker_etcd_prefix: env.broker_etcd_prefix.clone(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Construct libpq connection string for connecting to this safekeeper.
|
||||
fn safekeeper_connection_config(port: u16) -> Config {
|
||||
// TODO safekeeper authentication not implemented yet
|
||||
format!("postgresql://no_user@localhost:{}/no_db", port)
|
||||
format!("postgresql://no_user@127.0.0.1:{}/no_db", port)
|
||||
.parse()
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
pub fn datadir_path_by_id(env: &LocalEnv, sk_id: ZNodeId) -> PathBuf {
|
||||
env.safekeeper_data_dir(format!("sk{}", sk_id).as_ref())
|
||||
}
|
||||
|
||||
pub fn datadir_path(&self) -> PathBuf {
|
||||
self.env.safekeeper_data_dir(&self.name)
|
||||
SafekeeperNode::datadir_path_by_id(&self.env, self.id)
|
||||
}
|
||||
|
||||
pub fn pid_file(&self) -> PathBuf {
|
||||
@@ -114,24 +127,26 @@ impl SafekeeperNode {
|
||||
);
|
||||
io::stdout().flush().unwrap();
|
||||
|
||||
let listen_pg = format!("localhost:{}", self.conf.pg_port);
|
||||
let listen_http = format!("localhost:{}", self.conf.http_port);
|
||||
let listen_pg = format!("127.0.0.1:{}", self.conf.pg_port);
|
||||
let listen_http = format!("127.0.0.1:{}", self.conf.http_port);
|
||||
|
||||
let mut cmd = Command::new(self.env.safekeeper_bin()?);
|
||||
cmd.args(&["-D", self.datadir_path().to_str().unwrap()])
|
||||
.args(&["--listen-pg", &listen_pg])
|
||||
.args(&["--listen-http", &listen_http])
|
||||
.args(&["--recall", "1 second"])
|
||||
.arg("--daemonize")
|
||||
.env_clear()
|
||||
.env("RUST_BACKTRACE", "1");
|
||||
fill_rust_env_vars(
|
||||
cmd.args(&["-D", self.datadir_path().to_str().unwrap()])
|
||||
.args(&["--id", self.id.to_string().as_ref()])
|
||||
.args(&["--listen-pg", &listen_pg])
|
||||
.args(&["--listen-http", &listen_http])
|
||||
.args(&["--recall", "1 second"])
|
||||
.arg("--daemonize"),
|
||||
);
|
||||
if !self.conf.sync {
|
||||
cmd.arg("--no-sync");
|
||||
}
|
||||
|
||||
let var = "LLVM_PROFILE_FILE";
|
||||
if let Some(val) = std::env::var_os(var) {
|
||||
cmd.env(var, val);
|
||||
if let Some(ref ep) = self.broker_endpoints {
|
||||
cmd.args(&["--broker-endpoints", ep]);
|
||||
}
|
||||
if let Some(prefix) = self.broker_etcd_prefix.as_deref() {
|
||||
cmd.args(&["--broker-etcd-prefix", prefix]);
|
||||
}
|
||||
|
||||
if !cmd.status()?.success() {
|
||||
@@ -188,7 +203,7 @@ impl SafekeeperNode {
|
||||
pub fn stop(&self, immediate: bool) -> anyhow::Result<()> {
|
||||
let pid_file = self.pid_file();
|
||||
if !pid_file.exists() {
|
||||
println!("Safekeeper {} is already stopped", self.name);
|
||||
println!("Safekeeper {} is already stopped", self.id);
|
||||
return Ok(());
|
||||
}
|
||||
let pid = read_pidfile(&pid_file)?;
|
||||
@@ -260,4 +275,25 @@ impl SafekeeperNode {
|
||||
.error_from_body()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn timeline_create(
|
||||
&self,
|
||||
tenant_id: ZTenantId,
|
||||
timeline_id: ZTimelineId,
|
||||
peer_ids: Vec<ZNodeId>,
|
||||
) -> Result<()> {
|
||||
Ok(self
|
||||
.http_request(
|
||||
Method::POST,
|
||||
format!("{}/{}", self.http_base_url, "timeline"),
|
||||
)
|
||||
.json(&TimelineCreateRequest {
|
||||
tenant_id,
|
||||
timeline_id,
|
||||
peer_ids,
|
||||
})
|
||||
.send()?
|
||||
.error_from_body()?
|
||||
.json()?)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
use std::collections::HashMap;
|
||||
use std::io::Write;
|
||||
use std::net::TcpStream;
|
||||
use std::path::PathBuf;
|
||||
@@ -5,24 +6,27 @@ use std::process::Command;
|
||||
use std::time::Duration;
|
||||
use std::{io, result, thread};
|
||||
|
||||
use anyhow::bail;
|
||||
use anyhow::{bail, Context};
|
||||
use nix::errno::Errno;
|
||||
use nix::sys::signal::{kill, Signal};
|
||||
use nix::unistd::Pid;
|
||||
use pageserver::http::models::{BranchCreateRequest, TenantCreateRequest};
|
||||
use pageserver::http::models::{TenantConfigRequest, TenantCreateRequest, TimelineCreateRequest};
|
||||
use pageserver::timelines::TimelineInfo;
|
||||
use postgres::{Config, NoTls};
|
||||
use reqwest::blocking::{Client, RequestBuilder, Response};
|
||||
use reqwest::{IntoUrl, Method};
|
||||
use thiserror::Error;
|
||||
use zenith_utils::http::error::HttpErrorBody;
|
||||
use zenith_utils::postgres_backend::AuthType;
|
||||
use zenith_utils::zid::ZTenantId;
|
||||
use utils::{
|
||||
connstring::connection_address,
|
||||
http::error::HttpErrorBody,
|
||||
lsn::Lsn,
|
||||
postgres_backend::AuthType,
|
||||
zid::{ZTenantId, ZTimelineId},
|
||||
};
|
||||
|
||||
use crate::local_env::LocalEnv;
|
||||
use crate::read_pidfile;
|
||||
use pageserver::branches::BranchInfo;
|
||||
use crate::{fill_rust_env_vars, read_pidfile};
|
||||
use pageserver::tenant_mgr::TenantInfo;
|
||||
use zenith_utils::connstring::connection_address;
|
||||
|
||||
#[derive(Error, Debug)]
|
||||
pub enum PageserverHttpError {
|
||||
@@ -96,54 +100,77 @@ impl PageServerNode {
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
pub fn init(&self, create_tenant: Option<&str>) -> anyhow::Result<()> {
|
||||
pub fn init(
|
||||
&self,
|
||||
create_tenant: Option<ZTenantId>,
|
||||
initial_timeline_id: Option<ZTimelineId>,
|
||||
config_overrides: &[&str],
|
||||
) -> anyhow::Result<ZTimelineId> {
|
||||
let mut cmd = Command::new(self.env.pageserver_bin()?);
|
||||
let var = "LLVM_PROFILE_FILE";
|
||||
if let Some(val) = std::env::var_os(var) {
|
||||
cmd.env(var, val);
|
||||
}
|
||||
|
||||
let id = format!("id={}", self.env.pageserver.id);
|
||||
|
||||
// FIXME: the paths should be shell-escaped to handle paths with spaces, quotas etc.
|
||||
let mut args = vec![
|
||||
"--init".to_string(),
|
||||
"-D".to_string(),
|
||||
self.env.base_data_dir.display().to_string(),
|
||||
"-c".to_string(),
|
||||
format!("pg_distrib_dir='{}'", self.env.pg_distrib_dir.display()),
|
||||
"-c".to_string(),
|
||||
format!("auth_type='{}'", self.env.pageserver.auth_type),
|
||||
"-c".to_string(),
|
||||
format!(
|
||||
"listen_http_addr='{}'",
|
||||
self.env.pageserver.listen_http_addr
|
||||
),
|
||||
"-c".to_string(),
|
||||
format!("listen_pg_addr='{}'", self.env.pageserver.listen_pg_addr),
|
||||
];
|
||||
let base_data_dir_param = self.env.base_data_dir.display().to_string();
|
||||
let pg_distrib_dir_param =
|
||||
format!("pg_distrib_dir='{}'", self.env.pg_distrib_dir.display());
|
||||
let authg_type_param = format!("auth_type='{}'", self.env.pageserver.auth_type);
|
||||
let listen_http_addr_param = format!(
|
||||
"listen_http_addr='{}'",
|
||||
self.env.pageserver.listen_http_addr
|
||||
);
|
||||
let listen_pg_addr_param =
|
||||
format!("listen_pg_addr='{}'", self.env.pageserver.listen_pg_addr);
|
||||
let mut args = Vec::with_capacity(20);
|
||||
|
||||
args.push("--init");
|
||||
args.extend(["-D", &base_data_dir_param]);
|
||||
args.extend(["-c", &pg_distrib_dir_param]);
|
||||
args.extend(["-c", &authg_type_param]);
|
||||
args.extend(["-c", &listen_http_addr_param]);
|
||||
args.extend(["-c", &listen_pg_addr_param]);
|
||||
args.extend(["-c", &id]);
|
||||
|
||||
for config_override in config_overrides {
|
||||
args.extend(["-c", config_override]);
|
||||
}
|
||||
|
||||
if self.env.pageserver.auth_type != AuthType::Trust {
|
||||
args.extend([
|
||||
"-c".to_string(),
|
||||
"auth_validation_public_key_path='auth_public_key.pem'".to_string(),
|
||||
"-c",
|
||||
"auth_validation_public_key_path='auth_public_key.pem'",
|
||||
]);
|
||||
}
|
||||
|
||||
if let Some(tenantid) = create_tenant {
|
||||
args.extend(["--create-tenant".to_string(), tenantid.to_string()])
|
||||
let create_tenant = create_tenant.map(|id| id.to_string());
|
||||
if let Some(tenant_id) = create_tenant.as_deref() {
|
||||
args.extend(["--create-tenant", tenant_id])
|
||||
}
|
||||
|
||||
let status = cmd
|
||||
.args(args)
|
||||
.env_clear()
|
||||
.env("RUST_BACKTRACE", "1")
|
||||
.status()
|
||||
.expect("pageserver init failed");
|
||||
let initial_timeline_id = initial_timeline_id.unwrap_or_else(ZTimelineId::generate);
|
||||
let initial_timeline_id_string = initial_timeline_id.to_string();
|
||||
args.extend(["--initial-timeline-id", &initial_timeline_id_string]);
|
||||
|
||||
if !status.success() {
|
||||
bail!("pageserver init failed");
|
||||
let cmd_with_args = cmd.args(args);
|
||||
let init_output = fill_rust_env_vars(cmd_with_args)
|
||||
.output()
|
||||
.with_context(|| {
|
||||
format!("failed to init pageserver with command {:?}", cmd_with_args)
|
||||
})?;
|
||||
|
||||
if !init_output.status.success() {
|
||||
bail!(
|
||||
"init invocation failed, {}\nStdout: {}\nStderr: {}",
|
||||
init_output.status,
|
||||
String::from_utf8_lossy(&init_output.stdout),
|
||||
String::from_utf8_lossy(&init_output.stderr)
|
||||
);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
// echo the captured output of the init command
|
||||
println!("{}", String::from_utf8_lossy(&init_output.stdout));
|
||||
|
||||
Ok(initial_timeline_id)
|
||||
}
|
||||
|
||||
pub fn repo_path(&self) -> PathBuf {
|
||||
@@ -154,7 +181,7 @@ impl PageServerNode {
|
||||
self.repo_path().join("pageserver.pid")
|
||||
}
|
||||
|
||||
pub fn start(&self) -> anyhow::Result<()> {
|
||||
pub fn start(&self, config_overrides: &[&str]) -> anyhow::Result<()> {
|
||||
print!(
|
||||
"Starting pageserver at '{}' in '{}'",
|
||||
connection_address(&self.pg_connection_config),
|
||||
@@ -162,18 +189,18 @@ impl PageServerNode {
|
||||
);
|
||||
io::stdout().flush().unwrap();
|
||||
|
||||
let mut cmd = Command::new(self.env.pageserver_bin()?);
|
||||
cmd.args(&["-D", self.repo_path().to_str().unwrap()])
|
||||
.arg("--daemonize")
|
||||
.env_clear()
|
||||
.env("RUST_BACKTRACE", "1");
|
||||
let repo_path = self.repo_path();
|
||||
let mut args = vec!["-D", repo_path.to_str().unwrap()];
|
||||
|
||||
let var = "LLVM_PROFILE_FILE";
|
||||
if let Some(val) = std::env::var_os(var) {
|
||||
cmd.env(var, val);
|
||||
for config_override in config_overrides {
|
||||
args.extend(["-c", config_override]);
|
||||
}
|
||||
|
||||
if !cmd.status()?.success() {
|
||||
let mut cmd = Command::new(self.env.pageserver_bin()?);
|
||||
let mut filled_cmd = fill_rust_env_vars(cmd.args(&args).arg("--daemonize"));
|
||||
filled_cmd = fill_aws_secrets_vars(filled_cmd);
|
||||
|
||||
if !filled_cmd.status()?.success() {
|
||||
bail!(
|
||||
"Pageserver failed to start. See '{}' for details.",
|
||||
self.repo_path().join("pageserver.log").display()
|
||||
@@ -304,7 +331,7 @@ impl PageServerNode {
|
||||
}
|
||||
|
||||
pub fn check_status(&self) -> Result<()> {
|
||||
self.http_request(Method::GET, format!("{}/{}", self.http_base_url, "status"))
|
||||
self.http_request(Method::GET, format!("{}/status", self.http_base_url))
|
||||
.send()?
|
||||
.error_from_body()?;
|
||||
Ok(())
|
||||
@@ -312,64 +339,133 @@ impl PageServerNode {
|
||||
|
||||
pub fn tenant_list(&self) -> Result<Vec<TenantInfo>> {
|
||||
Ok(self
|
||||
.http_request(Method::GET, format!("{}/{}", self.http_base_url, "tenant"))
|
||||
.http_request(Method::GET, format!("{}/tenant", self.http_base_url))
|
||||
.send()?
|
||||
.error_from_body()?
|
||||
.json()?)
|
||||
}
|
||||
|
||||
pub fn tenant_create(&self, tenantid: ZTenantId) -> Result<()> {
|
||||
Ok(self
|
||||
.http_request(Method::POST, format!("{}/{}", self.http_base_url, "tenant"))
|
||||
pub fn tenant_create(
|
||||
&self,
|
||||
new_tenant_id: Option<ZTenantId>,
|
||||
settings: HashMap<&str, &str>,
|
||||
) -> anyhow::Result<Option<ZTenantId>> {
|
||||
let tenant_id_string = self
|
||||
.http_request(Method::POST, format!("{}/tenant", self.http_base_url))
|
||||
.json(&TenantCreateRequest {
|
||||
tenant_id: tenantid,
|
||||
new_tenant_id,
|
||||
checkpoint_distance: settings
|
||||
.get("checkpoint_distance")
|
||||
.map(|x| x.parse::<u64>())
|
||||
.transpose()?,
|
||||
compaction_target_size: settings
|
||||
.get("compaction_target_size")
|
||||
.map(|x| x.parse::<u64>())
|
||||
.transpose()?,
|
||||
compaction_period: settings.get("compaction_period").map(|x| x.to_string()),
|
||||
compaction_threshold: settings
|
||||
.get("compaction_threshold")
|
||||
.map(|x| x.parse::<usize>())
|
||||
.transpose()?,
|
||||
gc_horizon: settings
|
||||
.get("gc_horizon")
|
||||
.map(|x| x.parse::<u64>())
|
||||
.transpose()?,
|
||||
gc_period: settings.get("gc_period").map(|x| x.to_string()),
|
||||
image_creation_threshold: settings
|
||||
.get("image_creation_threshold")
|
||||
.map(|x| x.parse::<usize>())
|
||||
.transpose()?,
|
||||
pitr_interval: settings.get("pitr_interval").map(|x| x.to_string()),
|
||||
})
|
||||
.send()?
|
||||
.error_from_body()?
|
||||
.json()?)
|
||||
.json::<Option<String>>()?;
|
||||
|
||||
tenant_id_string
|
||||
.map(|id| {
|
||||
id.parse().with_context(|| {
|
||||
format!(
|
||||
"Failed to parse tennat creation response as tenant id: {}",
|
||||
id
|
||||
)
|
||||
})
|
||||
})
|
||||
.transpose()
|
||||
}
|
||||
|
||||
pub fn branch_list(&self, tenantid: &ZTenantId) -> Result<Vec<BranchInfo>> {
|
||||
Ok(self
|
||||
pub fn tenant_config(&self, tenant_id: ZTenantId, settings: HashMap<&str, &str>) -> Result<()> {
|
||||
self.http_request(Method::PUT, format!("{}/tenant/config", self.http_base_url))
|
||||
.json(&TenantConfigRequest {
|
||||
tenant_id,
|
||||
checkpoint_distance: settings
|
||||
.get("checkpoint_distance")
|
||||
.map(|x| x.parse::<u64>().unwrap()),
|
||||
compaction_target_size: settings
|
||||
.get("compaction_target_size")
|
||||
.map(|x| x.parse::<u64>().unwrap()),
|
||||
compaction_period: settings.get("compaction_period").map(|x| x.to_string()),
|
||||
compaction_threshold: settings
|
||||
.get("compaction_threshold")
|
||||
.map(|x| x.parse::<usize>().unwrap()),
|
||||
gc_horizon: settings
|
||||
.get("gc_horizon")
|
||||
.map(|x| x.parse::<u64>().unwrap()),
|
||||
gc_period: settings.get("gc_period").map(|x| x.to_string()),
|
||||
image_creation_threshold: settings
|
||||
.get("image_creation_threshold")
|
||||
.map(|x| x.parse::<usize>().unwrap()),
|
||||
pitr_interval: settings.get("pitr_interval").map(|x| x.to_string()),
|
||||
})
|
||||
.send()?
|
||||
.error_from_body()?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn timeline_list(&self, tenant_id: &ZTenantId) -> anyhow::Result<Vec<TimelineInfo>> {
|
||||
let timeline_infos: Vec<TimelineInfo> = self
|
||||
.http_request(
|
||||
Method::GET,
|
||||
format!("{}/branch/{}", self.http_base_url, tenantid),
|
||||
format!("{}/tenant/{}/timeline", self.http_base_url, tenant_id),
|
||||
)
|
||||
.send()?
|
||||
.error_from_body()?
|
||||
.json()?)
|
||||
.json()?;
|
||||
|
||||
Ok(timeline_infos)
|
||||
}
|
||||
|
||||
pub fn branch_create(
|
||||
pub fn timeline_create(
|
||||
&self,
|
||||
branch_name: &str,
|
||||
startpoint: &str,
|
||||
tenantid: &ZTenantId,
|
||||
) -> Result<BranchInfo> {
|
||||
Ok(self
|
||||
.http_request(Method::POST, format!("{}/branch", self.http_base_url))
|
||||
.json(&BranchCreateRequest {
|
||||
tenant_id: tenantid.to_owned(),
|
||||
name: branch_name.to_owned(),
|
||||
start_point: startpoint.to_owned(),
|
||||
tenant_id: ZTenantId,
|
||||
new_timeline_id: Option<ZTimelineId>,
|
||||
ancestor_start_lsn: Option<Lsn>,
|
||||
ancestor_timeline_id: Option<ZTimelineId>,
|
||||
) -> anyhow::Result<Option<TimelineInfo>> {
|
||||
let timeline_info_response = self
|
||||
.http_request(
|
||||
Method::POST,
|
||||
format!("{}/tenant/{}/timeline", self.http_base_url, tenant_id),
|
||||
)
|
||||
.json(&TimelineCreateRequest {
|
||||
new_timeline_id,
|
||||
ancestor_start_lsn,
|
||||
ancestor_timeline_id,
|
||||
})
|
||||
.send()?
|
||||
.error_from_body()?
|
||||
.json()?)
|
||||
}
|
||||
.json::<Option<TimelineInfo>>()?;
|
||||
|
||||
pub fn branch_get_by_name(
|
||||
&self,
|
||||
tenantid: &ZTenantId,
|
||||
branch_name: &str,
|
||||
) -> Result<BranchInfo> {
|
||||
Ok(self
|
||||
.http_request(
|
||||
Method::GET,
|
||||
format!("{}/branch/{}/{}", self.http_base_url, tenantid, branch_name),
|
||||
)
|
||||
.send()?
|
||||
.error_for_status()?
|
||||
.json()?)
|
||||
Ok(timeline_info_response)
|
||||
}
|
||||
}
|
||||
|
||||
fn fill_aws_secrets_vars(mut cmd: &mut Command) -> &mut Command {
|
||||
for env_key in ["AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY"] {
|
||||
if let Ok(value) = std::env::var(env_key) {
|
||||
cmd = cmd.env(env_key, value);
|
||||
}
|
||||
}
|
||||
cmd
|
||||
}
|
||||
|
||||
@@ -4,7 +4,7 @@ set -eux
|
||||
if [ "$1" = 'pageserver' ]; then
|
||||
if [ ! -d "/data/tenants" ]; then
|
||||
echo "Initializing pageserver data directory"
|
||||
pageserver --init -D /data -c "pg_distrib_dir='/usr/local'"
|
||||
pageserver --init -D /data -c "pg_distrib_dir='/usr/local'" -c "id=10"
|
||||
fi
|
||||
echo "Staring pageserver at 0.0.0.0:6400"
|
||||
pageserver -c "listen_pg_addr='0.0.0.0:6400'" -c "listen_http_addr='0.0.0.0:9898'" -D /data
|
||||
|
||||
@@ -7,8 +7,8 @@
|
||||
- [glossary.md](glossary.md) — Glossary of all the terms used in codebase.
|
||||
- [multitenancy.md](multitenancy.md) — how multitenancy is organized in the pageserver and Zenith CLI.
|
||||
- [sourcetree.md](sourcetree.md) — Overview of the source tree layeout.
|
||||
- [pageserver/README](/pageserver/README) — pageserver overview.
|
||||
- [postgres_ffi/README](/postgres_ffi/README) — Postgres FFI overview.
|
||||
- [pageserver/README.md](/pageserver/README.md) — pageserver overview.
|
||||
- [postgres_ffi/README.md](/libs/postgres_ffi/README.md) — Postgres FFI overview.
|
||||
- [test_runner/README.md](/test_runner/README.md) — tests infrastructure overview.
|
||||
- [walkeeper/README](/walkeeper/README) — WAL service overview.
|
||||
- [safekeeper/README.md](/safekeeper/README.md) — WAL service overview.
|
||||
- [core_changes.md](core_changes.md) - Description of Zenith changes in Postgres core
|
||||
|
||||
@@ -27,4 +27,4 @@ management_token = jwt.encode({"scope": "pageserverapi"}, auth_keys.priv, algori
|
||||
tenant_token = jwt.encode({"scope": "tenant", "tenant_id": ps.initial_tenant}, auth_keys.priv, algorithm="RS256")
|
||||
```
|
||||
|
||||
Utility functions to work with jwts in rust are located in zenith_utils/src/auth.rs
|
||||
Utility functions to work with jwts in rust are located in libs/utils/src/auth.rs
|
||||
|
||||
@@ -7,32 +7,14 @@ Currently we build two main images:
|
||||
- [zenithdb/zenith](https://hub.docker.com/repository/docker/zenithdb/zenith) — image with pre-built `pageserver`, `safekeeper` and `proxy` binaries and all the required runtime dependencies. Built from [/Dockerfile](/Dockerfile).
|
||||
- [zenithdb/compute-node](https://hub.docker.com/repository/docker/zenithdb/compute-node) — compute node image with pre-built Postgres binaries from [zenithdb/postgres](https://github.com/zenithdb/postgres).
|
||||
|
||||
And two intermediate images used either to reduce build time or to deliver some additional binary tools from other repos:
|
||||
And additional intermediate images:
|
||||
|
||||
- [zenithdb/build](https://hub.docker.com/repository/docker/zenithdb/build) — image with all the dependencies required to build Zenith and compute node images. This image is based on `rust:slim-buster`, so it also has a proper `rust` environment. Built from [/Dockerfile.build](/Dockerfile.build).
|
||||
- [zenithdb/compute-tools](https://hub.docker.com/repository/docker/zenithdb/compute-tools) — compute node configuration management tools.
|
||||
|
||||
## Building pipeline
|
||||
|
||||
1. Image `zenithdb/compute-tools` is re-built automatically.
|
||||
|
||||
2. Image `zenithdb/build` is built manually. If you want to introduce any new compile time dependencies to Zenith or compute node you have to update this image as well, build it and push to Docker Hub.
|
||||
2. Image `zenithdb/compute-node` is built independently in the [zenithdb/postgres](https://github.com/zenithdb/postgres) repo.
|
||||
|
||||
Build:
|
||||
```sh
|
||||
docker build -t zenithdb/build:buster -f Dockerfile.build .
|
||||
```
|
||||
|
||||
Login:
|
||||
```sh
|
||||
docker login
|
||||
```
|
||||
|
||||
Push to Docker Hub:
|
||||
```sh
|
||||
docker push zenithdb/build:buster
|
||||
```
|
||||
|
||||
3. Image `zenithdb/compute-node` is built independently in the [zenithdb/postgres](https://github.com/zenithdb/postgres) repo.
|
||||
|
||||
4. Image `zenithdb/zenith` is built in this repo after a successful `release` tests run and pushed to Docker Hub automatically.
|
||||
3. Image `zenithdb/zenith` is built in this repo after a successful `release` tests run and pushed to Docker Hub automatically.
|
||||
|
||||
@@ -2,6 +2,16 @@
|
||||
|
||||
### Authentication
|
||||
|
||||
### Backpresssure
|
||||
|
||||
Backpressure is used to limit the lag between pageserver and compute node or WAL service.
|
||||
|
||||
If compute node or WAL service run far ahead of Page Server,
|
||||
the time of serving page requests increases. This may lead to timeout errors.
|
||||
|
||||
To tune backpressure limits use `max_replication_write_lag`, `max_replication_flush_lag` and `max_replication_apply_lag` settings.
|
||||
When lag between current LSN (pg_current_wal_flush_lsn() at compute node) and minimal write/flush/apply position of replica exceeds the limit
|
||||
backends performing writes are blocked until the replica is caught up.
|
||||
### Base image (page image)
|
||||
|
||||
### Basebackup
|
||||
@@ -11,7 +21,7 @@ NOTE:It has nothing to do with PostgreSQL pg_basebackup.
|
||||
|
||||
### Branch
|
||||
|
||||
We can create branch at certain LSN using `zenith branch` command.
|
||||
We can create branch at certain LSN using `zenith timeline branch` command.
|
||||
Each Branch lives in a corresponding timeline[] and has an ancestor[].
|
||||
|
||||
|
||||
@@ -19,24 +29,32 @@ Each Branch lives in a corresponding timeline[] and has an ancestor[].
|
||||
|
||||
NOTE: This is an overloaded term.
|
||||
|
||||
A checkpoint record in the WAL marks a point in the WAL sequence at which it is guaranteed that all data files have been updated with all information from shared memory modified before that checkpoint;
|
||||
A checkpoint record in the WAL marks a point in the WAL sequence at which it is guaranteed that all data files have been updated with all information from shared memory modified before that checkpoint;
|
||||
|
||||
### Checkpoint (Layered repository)
|
||||
|
||||
NOTE: This is an overloaded term.
|
||||
|
||||
Whenever enough WAL has been accumulated in memory, the page server []
|
||||
writes out the changes from in-memory layers into new layer files[]. This process
|
||||
is called "checkpointing". The page server only creates layer files for
|
||||
relations that have been modified since the last checkpoint.
|
||||
writes out the changes from the in-memory layer into a new delta layer file. This process
|
||||
is called "checkpointing".
|
||||
|
||||
Configuration parameter `checkpoint_distance` defines the distance
|
||||
from current LSN to perform checkpoint of in-memory layers.
|
||||
Default is `DEFAULT_CHECKPOINT_DISTANCE`.
|
||||
Set this parameter to `0` to force checkpoint of every layer.
|
||||
|
||||
Configuration parameter `checkpoint_period` defines the interval between checkpoint iterations.
|
||||
Default is `DEFAULT_CHECKPOINT_PERIOD`.
|
||||
### Compaction
|
||||
|
||||
A background operation on layer files. Compaction takes a number of L0
|
||||
layer files, each of which covers the whole key space and a range of
|
||||
LSN, and reshuffles the data in them into L1 files so that each file
|
||||
covers the whole LSN range, but only part of the key space.
|
||||
|
||||
Compaction should also opportunistically leave obsolete page versions
|
||||
from the L1 files, and materialize other page versions for faster
|
||||
access. That hasn't been implemented as of this writing, though.
|
||||
|
||||
|
||||
### Compute node
|
||||
|
||||
Stateless Postgres node that stores data in pageserver.
|
||||
@@ -44,10 +62,10 @@ Stateless Postgres node that stores data in pageserver.
|
||||
### Garbage collection
|
||||
|
||||
The process of removing old on-disk layers that are not needed by any timeline anymore.
|
||||
|
||||
### Fork
|
||||
|
||||
Each of the separate segmented file sets in which a relation is stored. The main fork is where the actual data resides. There also exist two secondary forks for metadata: the free space map and the visibility map.
|
||||
Each PostgreSQL fork is considered a separate relish.
|
||||
|
||||
### Layer
|
||||
|
||||
@@ -62,21 +80,51 @@ are immutable. See pageserver/src/layered_repository/README.md for more.
|
||||
### Layer file (on-disk layer)
|
||||
|
||||
Layered repository on-disk format is based on immutable files. The
|
||||
files are called "layer files". Each file corresponds to one RELISH_SEG_SIZE
|
||||
segment of a PostgreSQL relation fork. There are two kinds of layer
|
||||
files: image files and delta files. An image file contains a
|
||||
"snapshot" of the segment at a particular LSN, and a delta file
|
||||
contains WAL records applicable to the segment, in a range of LSNs.
|
||||
files are called "layer files". There are two kinds of layer files:
|
||||
image files and delta files. An image file contains a "snapshot" of a
|
||||
range of keys at a particular LSN, and a delta file contains WAL
|
||||
records applicable to a range of keys, in a range of LSNs.
|
||||
|
||||
### Layer map
|
||||
|
||||
The layer map tracks what layers exist for all the relishes in a timeline.
|
||||
The layer map tracks what layers exist in a timeline.
|
||||
|
||||
### Layered repository
|
||||
|
||||
Zenith repository implementation that keeps data in layers.
|
||||
### LSN
|
||||
|
||||
The Log Sequence Number (LSN) is a unique identifier of the WAL record[] in the WAL log.
|
||||
The insert position is a byte offset into the logs, increasing monotonically with each new record.
|
||||
Internally, an LSN is a 64-bit integer, representing a byte position in the write-ahead log stream.
|
||||
It is printed as two hexadecimal numbers of up to 8 digits each, separated by a slash.
|
||||
Check also [PostgreSQL doc about pg_lsn type](https://www.postgresql.org/docs/devel/datatype-pg-lsn.html)
|
||||
Values can be compared to calculate the volume of WAL data that separates them, so they are used to measure the progress of replication and recovery.
|
||||
|
||||
In postgres and Zenith lsns are used to describe certain points in WAL handling.
|
||||
|
||||
PostgreSQL LSNs and functions to monitor them:
|
||||
* `pg_current_wal_insert_lsn()` - Returns the current write-ahead log insert location.
|
||||
* `pg_current_wal_lsn()` - Returns the current write-ahead log write location.
|
||||
* `pg_current_wal_flush_lsn()` - Returns the current write-ahead log flush location.
|
||||
* `pg_last_wal_receive_lsn()` - Returns the last write-ahead log location that has been received and synced to disk by streaming replication. While streaming replication is in progress this will increase monotonically.
|
||||
* `pg_last_wal_replay_lsn ()` - Returns the last write-ahead log location that has been replayed during recovery. If recovery is still in progress this will increase monotonically.
|
||||
[source PostgreSQL documentation](https://www.postgresql.org/docs/devel/functions-admin.html):
|
||||
|
||||
Zenith safekeeper LSNs. For more check [safekeeper/README_PROTO.md](/safekeeper/README_PROTO.md)
|
||||
* `CommitLSN`: position in WAL confirmed by quorum safekeepers.
|
||||
* `RestartLSN`: position in WAL confirmed by all safekeepers.
|
||||
* `FlushLSN`: part of WAL persisted to the disk by safekeeper.
|
||||
* `VCL`: the largerst LSN for which we can guarantee availablity of all prior records.
|
||||
|
||||
Zenith pageserver LSNs:
|
||||
* `last_record_lsn` - the end of last processed WAL record.
|
||||
* `disk_consistent_lsn` - data is known to be fully flushed and fsync'd to local disk on pageserver up to this LSN.
|
||||
* `remote_consistent_lsn` - The last LSN that is synced to remote storage and is guaranteed to survive pageserver crash.
|
||||
TODO: use this name consistently in remote storage code. Now `disk_consistent_lsn` is used and meaning depends on the context.
|
||||
* `ancestor_lsn` - LSN of the branch point (the LSN at which this branch was created)
|
||||
|
||||
TODO: add table that describes mapping between PostgreSQL (compute), safekeeper and pageserver LSNs.
|
||||
### Page (block)
|
||||
|
||||
The basic structure used to store relation data. All pages are of the same size.
|
||||
@@ -109,14 +157,6 @@ and create new databases and accounts (control plane API in our case).
|
||||
|
||||
The generic term in PostgreSQL for all objects in a database that have a name and a list of attributes defined in a specific order.
|
||||
|
||||
### Relish
|
||||
|
||||
We call each relation and other file that is stored in the
|
||||
repository a "relish". It comes from "rel"-ish, as in "kind of a
|
||||
rel", because it covers relations as well as other things that are
|
||||
not relations, but are treated similarly for the purposes of the
|
||||
storage layer.
|
||||
|
||||
### Replication slot
|
||||
|
||||
|
||||
@@ -133,33 +173,24 @@ One repository corresponds to one Tenant.
|
||||
|
||||
How much history do we need to keep around for PITR and read-only nodes?
|
||||
|
||||
### Segment (PostgreSQL)
|
||||
|
||||
NOTE: This is an overloaded term.
|
||||
### Segment
|
||||
|
||||
A physical file that stores data for a given relation. File segments are
|
||||
limited in size by a compile-time setting (1 gigabyte by default), so if a
|
||||
relation exceeds that size, it is split into multiple segments.
|
||||
|
||||
### Segment (Layered Repository)
|
||||
|
||||
NOTE: This is an overloaded term.
|
||||
|
||||
Segment is a RELISH_SEG_SIZE slice of relish (identified by a SegmentTag).
|
||||
|
||||
### SLRU
|
||||
|
||||
SLRUs include pg_clog, pg_multixact/members, and
|
||||
pg_multixact/offsets. There are other SLRUs in PostgreSQL, but
|
||||
they don't need to be stored permanently (e.g. pg_subtrans),
|
||||
or we do not support them in zenith yet (pg_commit_ts).
|
||||
Each SLRU segment is considered a separate relish[].
|
||||
|
||||
### Tenant (Multitenancy)
|
||||
Tenant represents a single customer, interacting with Zenith.
|
||||
Wal redo[] activity, timelines[], layers[] are managed for each tenant independently.
|
||||
One pageserver[] can serve multiple tenants at once.
|
||||
One safekeeper
|
||||
One safekeeper
|
||||
|
||||
See `docs/multitenancy.md` for more.
|
||||
|
||||
|
||||
22
docs/pageserver-tenant-migration.md
Normal file
22
docs/pageserver-tenant-migration.md
Normal file
@@ -0,0 +1,22 @@
|
||||
## Pageserver tenant migration
|
||||
|
||||
### Overview
|
||||
|
||||
This feature allows to migrate a timeline from one pageserver to another by utilizing remote storage capability.
|
||||
|
||||
### Migration process
|
||||
|
||||
Pageserver implements two new http handlers: timeline attach and timeline detach.
|
||||
Timeline migration is performed in a following way:
|
||||
1. Timeline attach is called on a target pageserver. This asks pageserver to download latest checkpoint uploaded to s3.
|
||||
2. For now it is necessary to manually initialize replication stream via callmemaybe call so target pageserver initializes replication from safekeeper (it is desired to avoid this and initialize replication directly in attach handler, but this requires some refactoring (probably [#997](https://github.com/zenithdb/zenith/issues/997)/[#1049](https://github.com/zenithdb/zenith/issues/1049))
|
||||
3. Replication state can be tracked via timeline detail pageserver call.
|
||||
4. Compute node should be restarted with new pageserver connection string. Issue with multiple compute nodes for one timeline is handled on the safekeeper consensus level. So this is not a problem here.Currently responsibility for rescheduling the compute with updated config lies on external coordinator (console).
|
||||
5. Timeline is detached from old pageserver. On disk data is removed.
|
||||
|
||||
|
||||
### Implementation details
|
||||
|
||||
Now safekeeper needs to track which pageserver it is replicating to. This introduces complications into replication code:
|
||||
* We need to distinguish different pageservers (now this is done by connection string which is imperfect and is covered here: https://github.com/zenithdb/zenith/issues/1105). Callmemaybe subscription management also needs to track that (this is already implemented).
|
||||
* We need to track which pageserver is the primary. This is needed to avoid reconnections to non primary pageservers. Because we shouldn't reconnect to them when they decide to stop their walreceiver. I e this can appear when there is a load on the compute and we are trying to detach timeline from old pageserver. In this case callmemaybe will try to reconnect to it because replication termination condition is not met (page server with active compute could never catch up to the latest lsn, so there is always some wal tail)
|
||||
186
docs/rfcs/002-storage.md
Normal file
186
docs/rfcs/002-storage.md
Normal file
@@ -0,0 +1,186 @@
|
||||
# Zenith storage node — alternative
|
||||
|
||||
## **Design considerations**
|
||||
|
||||
Simplify storage operations for people => Gain adoption/installs on laptops and small private installation => Attract customers to DBaaS by seamless integration between our tooling and cloud.
|
||||
|
||||
Proposed architecture addresses:
|
||||
|
||||
- High availability -- tolerates n/2 - 1 failures
|
||||
- Multi-tenancy -- one storage for all databases
|
||||
- Elasticity -- increase storage size on the go by adding nodes
|
||||
- Snapshots / backups / PITR with S3 offload
|
||||
- Compression
|
||||
|
||||
Minuses are:
|
||||
|
||||
- Quite a lot of work
|
||||
- Single page access may touch few disk pages
|
||||
- Some bloat in data — may slowdown sequential scans
|
||||
|
||||
## **Summary**
|
||||
|
||||
Storage cluster is sharded key-value store with ordered keys. Key (****page_key****) is a tuple of `(pg_id, db_id, timeline_id, rel_id, forkno, segno, pageno, lsn)`. Value is either page or page diff/wal. Each chunk (chunk == shard) stores approx 50-100GB ~~and automatically splits in half when grows bigger then soft 100GB limit~~. by having a fixed range of pageno's it is responsible for. Chunks placement on storage nodes is stored in a separate metadata service, so chunk can be freely moved around the cluster if it is need. Chunk itself is a filesystem directory with following sub directories:
|
||||
|
||||
```
|
||||
|
||||
|-chunk_42/
|
||||
|-store/ -- contains lsm with pages/pagediffs ranging from
|
||||
| page_key_lo to page_key_hi
|
||||
|-wal/
|
||||
| |- db_1234/ db-specific wal files with pages from page_key_lo
|
||||
| to page_key_hi
|
||||
|
|
||||
|-chunk.meta -- small file with snapshot references
|
||||
(page_key_prefix+lsn+name)
|
||||
and PITR regions (page_key_start, page_key_end)
|
||||
```
|
||||
|
||||
## **Chunk**
|
||||
|
||||
Chunk is responsible for storing pages potentially from different databases and relations. Each page is addressed by a lexicographically ordered tuple (****page_key****) with following fields:
|
||||
|
||||
- `pg_id` -- unique id of given postgres instance (or postgres cluster as it is called in postgres docs)
|
||||
- `db_id` -- database that was created by 'CREATE DATABASE' in a given postgres instance
|
||||
- `db_timeline` -- used to create Copy-on-Write instances from snapshots, described later
|
||||
- `rel_id` -- tuple of (relation_id, 0) for tables and (indexed_relation_id, rel_id) for indices. Done this way so table indices were closer to table itself on our global key space.
|
||||
- `(forkno, segno, pageno)` -- page coordinates in postgres data files
|
||||
- `lsn_timeline` -- postgres feature, increments when PITR was done.
|
||||
- `lsn` -- lsn of current page version.
|
||||
|
||||
Chunk stores pages and page diffs ranging from page_key_lo to page_key_hi. Processing node looks at page in wal record and sends record to a chunk responsible for this page range. When wal record arrives to a chunk it is initially stored in `chunk_id/wal/db_id/wal_segno.wal`. Then background process moves records from that wal files to the lsm tree in `chunk_id/store`. Or, more precisely, wal records would be materialized into lsm memtable and when that memtable is flushed to SSTable on disk we may trim the wal. That way some not durably (in the distributed sense) committed pages may enter the tree -- here we rely on processing node behavior: page request from processing node should contain proper lsm horizons so that storage node may respond with proper page version.
|
||||
|
||||
LSM here is a usual LSM for variable-length values: at first data is stored in memory (we hold incoming wal records to be able to regenerate it after restart) at some balanced tree. When this tree grows big enough we dump it into disk file (SSTable) sorting records by key. Then SStables are mergesorted in the background to a different files. All file operation are sequential and do not require WAL for durability.
|
||||
|
||||
Content of SSTable can be following:
|
||||
|
||||
```jsx
|
||||
(pg_id, db_id, ... , pageno=42, lsn=100) (full 8k page data)
|
||||
(pg_id, db_id, ... , pageno=42, lsn=150) (per-page diff)
|
||||
(pg_id, db_id, ... , pageno=42, lsn=180) (per-page diff)
|
||||
(pg_id, db_id, ... , pageno=42, lsn=200) (per-page diff)
|
||||
(pg_id, db_id, ... , pageno=42, lsn=220) (full 8k page data)
|
||||
(pg_id, db_id, ... , pageno=42, lsn=250) (per-page diff)
|
||||
(pg_id, db_id, ... , pageno=42, lsn=270) (per-page diff)
|
||||
(pg_id, db_id, ... , pageno=5000, lsn=100) (full 8k page data)
|
||||
```
|
||||
|
||||
So query for `pageno=42 up to lsn=260` would need to find closest entry less then this key, iterate back to the latest full page and iterate forward to apply diffs. How often page is materialized in lsn-version sequence is up to us -- let's say each 5th version should be a full page.
|
||||
|
||||
### **Page deletion**
|
||||
|
||||
To delete old pages we insert blind deletion marker `(pg_id, db_id, #trim_lsn < 150)` into a lsm tree. During merges such marker would indicate that all pages with smaller lsn should be discarded. Delete marker will travel down the tree levels hierarchy until it reaches last level. In non-PITR scenario where old page version are not needed at all such deletion marker would (in average) prevent old page versions propagation down the tree -- so all bloat would concentrate at higher tree layers without affecting bigger bottom layers.
|
||||
|
||||
### **Recovery**
|
||||
|
||||
Upon storage node restart recent WAL files are applied to appropriate pages and resulting pages stored in lsm memtable. So this should be fast since we are not writing anything to disk.
|
||||
|
||||
### **Checkpointing**
|
||||
|
||||
No such mechanism is needed. Or we may look at the storage node as at kind of continuous chekpointer.
|
||||
|
||||
### **Full page writes (torn page protection)**
|
||||
|
||||
Storage node never updates individual pages, only merges SSTable, so torn pages is not an issue.
|
||||
|
||||
### **Snapshot**
|
||||
|
||||
That is the part that I like about this design -- snapshot creation is instant and cheap operation that can have flexible granularity level: whole instance, database, table. Snapshot creation inserts a record in `chunk.meta` file with lsn of this snapshot and key prefix `(pg_id, db_id, db_timeline, rel_id, *)` that prohibits pages deletion within this range. Storage node may not know anything about page internals, but by changing number of fields in our prefix we may change snapshot granularity.
|
||||
|
||||
It is again useful to remap `rel_id` to `(indexed_relation_id, rel_id)` so that snapshot of relation would include it's indices. Also table snapshot would trickily interact with catalog. Probably all table snapshots should hold also a catalog snapshot. And when node is started with such snapshot it should check that only tables from snapshot are queried. I assume here that for snapshot reading one need to start a new postgres instance.
|
||||
|
||||
Storage consumed by snapshot is proportional to the amount of data changed. We may have some heuristic (calculated based on cost of different storages) about when to offload old snapshot to s3. For example, if current database has more then 40% of changed pages with respect to previous snapshot then we may offload that snapshot to s3, and release this space.
|
||||
|
||||
**Starting db from snapshot**
|
||||
|
||||
When we are starting database from snapshot it can be done in two ways. First, we may create new db_id, move all the data from snapshot to a new db and start a database. Second option is to create Copy-on-Write (CoW) instance out of snapshot and read old pages from old snapshot and store new pages separately. That is why there is `db_timeline` key field near `db_id` -- CoW (🐮) database should create new `db_timeline` and remember old `db_timeline`. Such a database can have hashmap of pages that it is changed to query pages from proper snapshot on the first try. `db_timeline` is located near `db_id` so that new page versions generated by new instance would not bloat data of initial snapshot. It is not clear for whether it is possibly to effectively support "stacked" CoW snapshot, so we may disallow them. (Well, one way to support them is to move `db_timeline` close to `lsn` -- so we may scan neighboring pages and find right one. But again that way we bloat snapshot with unrelated data and may slowdown full scans that are happening in different database).
|
||||
|
||||
**Snapshot export/import**
|
||||
|
||||
Once we may start CoW instances it is easy to run auxiliary postgres instance on this snapshot and run `COPY FROM (...) TO stdout` or `pg_dump` and export data from the snapshot to some portable formats. Also we may start postgres on a new empty database and run `COPY FROM stdin`. This way we can initialize new non-CoW databases and transfer snapshots via network.
|
||||
|
||||
### **PITR area**
|
||||
|
||||
In described scheme PITR is just a prohibition to delete any versions within some key prefix, either it is a database or a table key prefix. So PITR may have different settings for different tables, databases, etc.
|
||||
|
||||
PITR is quite bloaty, so we may aggressively offload it to s3 -- we may push same (or bigger) SSTables to s3 and maintain lsm structure there.
|
||||
|
||||
### **Compression**
|
||||
|
||||
Since we are storing page diffs of variable sizes there is no structural dependency on a page size and we may compress it. Again that could be enabled only on pages with some key prefixes, so we may have this with db/table granularity.
|
||||
|
||||
### **Chunk metadata**
|
||||
|
||||
Chunk metadata is a file lies in chunk directory that stores info about current snapshots and PITR regions. Chunck should always consult this data when merging SSTables and applying delete markers.
|
||||
|
||||
### **Chunk splitting**
|
||||
|
||||
*(NB: following paragraph is about how to avoid page splitting)*
|
||||
|
||||
When chunks hits some soft storage limit (let's say 100Gb) it should be split in half and global matadata about chunk boundaries should be updated. Here i assume that chunk split is a local operation happening on single node. Process of chink splitting should look like following:
|
||||
|
||||
1. Find separation key and spawn two new chunks with [lo, mid) [mid, hi) boundaries.
|
||||
|
||||
2. Prohibit WAL deletion and old SSTables deletion on original chunk.
|
||||
|
||||
3. On each lsm layer we would need to split only one SSTable, all other would fit within left or right range. Symlink/split that files to new chunks.
|
||||
|
||||
4. Start WAL replay on new chunks.
|
||||
|
||||
5. Update global metadata about new chunk boundaries.
|
||||
|
||||
6. Eventually (metadata update should be pushed to processing node by metadata service) storage node will start sending WAL and page requests to the new nodes.
|
||||
|
||||
7. New chunk may start serving read queries when following conditions are met:
|
||||
|
||||
a) it receives at least on WAL record from processing node
|
||||
|
||||
b) it replayed all WAL up to the new received one
|
||||
|
||||
c) checked by downlinks that there were no WAL gaps.
|
||||
|
||||
Chunk split as it is described here is quite fast operation when it is happening on the local disk -- vast majority of files will be just moved without copying anything. I suggest to keep split always local and not to mix it with chunk moving around cluster. So if we want to split some chunk but there is small amount of free space left on the device, we should first move some chunks away from the node and then proceed with splitting.
|
||||
|
||||
### Fixed chunks
|
||||
|
||||
Alternative strategy is to not to split at all and have pageno-fixed chunk boundaries. When table is created we first materialize this chunk by storing first new pages only and chunks is small. Then chunk is growing while table is filled, but it can't grow substantially bigger then allowed pageno range, so at max it would be 1GB or whatever limit we want + some bloat due to snapshots and old page versions.
|
||||
|
||||
### **Chunk lsm internals**
|
||||
|
||||
So how to implement chunk's lsm?
|
||||
|
||||
- Write from scratch and use RocksDB to prototype/benchmark, then switch to own lsm implementation. RocksDB can provide some sanity check for performance of home-brewed implementation and it would be easier to prototype.
|
||||
- Use postgres as lego constructor. We may model memtable with postgres B-tree referencing some in-memory log of incoming records. SSTable merging may reuse postgres external merging algorithm, etc. One thing that would definitely not fit (or I didn't came up with idea how to fit that) -- is multi-tenancy. If we are storing pages from different databases we can't use postgres buffer pool, since there is no db_id in the page header. We can add new field there but IMO it would be no go for committing that to vanilla.
|
||||
|
||||
Other possibility is to not to try to fit few databases in one storage node. But that way it is no go for multi-tenant cloud installation: we would need to run a lot of storage node instances on one physical storage node, all with it own local page cache. So that would be much closer to ordinary managed RDS.
|
||||
|
||||
Multi-tenant storage makes sense even on a laptop, when you work with different databases, running tests with temp database, etc. And when installation grows bigger it start to make more and more sense, so it seems important.
|
||||
|
||||
# Storage fleet
|
||||
|
||||
# **Storage fleet**
|
||||
|
||||
- When database is smaller then a chunk size we naturally can store them in one chunk (since their page_key would fit in some chunk's [hi, lo) range).
|
||||
|
||||
<img width="937" alt="Screenshot_2021-02-22_at_16 49 17" src="https://user-images.githubusercontent.com/284219/108729836-ffcbd200-753b-11eb-9412-db802ec30021.png">
|
||||
|
||||
Few databases are stored in one chunk, replicated three times
|
||||
|
||||
- When database can't fit into one storage node it can occupy lots of chunks that were split while database was growing. Chunk placement on nodes is controlled by us with some automatization, but we alway may manually move chunks around the cluster.
|
||||
|
||||
<img width="940" alt="Screenshot_2021-02-22_at_16 49 10" src="https://user-images.githubusercontent.com/284219/108729815-fb071e00-753b-11eb-86e0-be6703e47d82.png">
|
||||
|
||||
Here one big database occupies two set of nodes. Also some chunks were moved around to restore replication factor after disk failure. In this case we also have "sharded" storage for a big database and issue wal writes to different chunks in parallel.
|
||||
|
||||
## **Chunk placement strategies**
|
||||
|
||||
There are few scenarios where we may want to move chunks around the cluster:
|
||||
|
||||
- disk usage on some node is big
|
||||
- some disk experienced a failure
|
||||
- some node experienced a failure or need maintenance
|
||||
|
||||
## **Chunk replication**
|
||||
|
||||
Chunk replication may be done by cloning page ranges with respect to some lsn from peer nodes, updating global metadata, waiting for WAL to come, replaying previous WAL and becoming online -- more or less like during chunk split.
|
||||
|
||||
267
docs/rfcs/003-laptop-cli.md
Normal file
267
docs/rfcs/003-laptop-cli.md
Normal file
@@ -0,0 +1,267 @@
|
||||
# Command line interface (end-user)
|
||||
|
||||
Zenith CLI as it is described here mostly resides on the same conceptual level as pg_ctl/initdb/pg_recvxlog/etc and replaces some of them in an opinionated way. I would also suggest bundling our patched postgres inside zenith distribution at least at the start.
|
||||
|
||||
This proposal is focused on managing local installations. For cluster operations, different tooling would be needed. The point of integration between the two is storage URL: no matter how complex cluster setup is it may provide an endpoint where the user may push snapshots.
|
||||
|
||||
The most important concept here is a snapshot, which can be created/pushed/pulled/exported. Also, we may start temporary read-only postgres instance over any local snapshot. A more complex scenario would consist of several basic operations over snapshots.
|
||||
|
||||
# Possible usage scenarios
|
||||
|
||||
## Install zenith, run a postgres
|
||||
|
||||
```
|
||||
> brew install pg-zenith
|
||||
> zenith pg create # creates pgdata with default pattern pgdata$i
|
||||
> zenith pg list
|
||||
ID PGDATA USED STORAGE ENDPOINT
|
||||
primary1 pgdata1 0G zenith-local localhost:5432
|
||||
```
|
||||
|
||||
## Import standalone postgres to zenith
|
||||
|
||||
```
|
||||
> zenith snapshot import --from=basebackup://replication@localhost:5432/ oldpg
|
||||
[====================------------] 60% | 20MB/s
|
||||
> zenith snapshot list
|
||||
ID SIZE PARENT
|
||||
oldpg 5G -
|
||||
|
||||
> zenith pg create --snapshot oldpg
|
||||
Started postgres on localhost:5432
|
||||
|
||||
> zenith pg list
|
||||
ID PGDATA USED STORAGE ENDPOINT
|
||||
primary1 pgdata1 5G zenith-local localhost:5432
|
||||
|
||||
> zenith snapshot destroy oldpg
|
||||
Ok
|
||||
```
|
||||
|
||||
Also, we may start snapshot import implicitly by looking at snapshot schema
|
||||
|
||||
```
|
||||
> zenith pg create --snapshot basebackup://replication@localhost:5432/
|
||||
Downloading snapshot... Done.
|
||||
Started postgres on localhost:5432
|
||||
Destroying snapshot... Done.
|
||||
```
|
||||
|
||||
## Pull snapshot with some publicly shared database
|
||||
|
||||
Since we may export the whole snapshot as one big file (tar of basebackup, maybe with some manifest) it may be shared over conventional means: http, ssh, [git+lfs](https://docs.github.com/en/github/managing-large-files/about-git-large-file-storage).
|
||||
|
||||
```
|
||||
> zenith pg create --snapshot http://learn-postgres.com/movies_db.zenith movies
|
||||
```
|
||||
|
||||
## Create snapshot and push it to the cloud
|
||||
|
||||
```
|
||||
> zenith snapshot create pgdata1@snap1
|
||||
> zenith snapshot push --to ssh://stas@zenith.tech pgdata1@snap1
|
||||
```
|
||||
|
||||
## Rollback database to the snapshot
|
||||
|
||||
One way to rollback the database is just to init a new database from the snapshot and destroy the old one. But creating a new database from a snapshot would require a copy of that snapshot which is time consuming operation. Another option that would be cool to support is the ability to create the copy-on-write database from the snapshot without copying data, and store updated pages in a separate location, however that way would have performance implications. So to properly rollback the database to the older state we have `zenith pg checkout`.
|
||||
|
||||
```
|
||||
> zenith pg list
|
||||
ID PGDATA USED STORAGE ENDPOINT
|
||||
primary1 pgdata1 5G zenith-local localhost:5432
|
||||
|
||||
> zenith snapshot create pgdata1@snap1
|
||||
|
||||
> zenith snapshot list
|
||||
ID SIZE PARENT
|
||||
oldpg 5G -
|
||||
pgdata1@snap1 6G -
|
||||
pgdata1@CURRENT 6G -
|
||||
|
||||
> zenith pg checkout pgdata1@snap1
|
||||
Stopping postgres on pgdata1.
|
||||
Rolling back pgdata1@CURRENT to pgdata1@snap1.
|
||||
Starting postgres on pgdata1.
|
||||
|
||||
> zenith snapshot list
|
||||
ID SIZE PARENT
|
||||
oldpg 5G -
|
||||
pgdata1@snap1 6G -
|
||||
pgdata1@HEAD{0} 6G -
|
||||
pgdata1@CURRENT 6G -
|
||||
```
|
||||
|
||||
Some notes: pgdata1@CURRENT -- implicit snapshot representing the current state of the database in the data directory. When we are checking out some snapshot CURRENT will be set to this snapshot and the old CURRENT state will be named HEAD{0} (0 is the number of postgres timeline, it would be incremented after each such checkout).
|
||||
|
||||
## Configure PITR area (Point In Time Recovery).
|
||||
|
||||
PITR area acts like a continuous snapshot where you can reset the database to any point in time within this area (by area I mean some TTL period or some size limit, both possibly infinite).
|
||||
|
||||
```
|
||||
> zenith pitr create --storage s3tank --ttl 30d --name pitr_last_month
|
||||
```
|
||||
|
||||
Resetting the database to some state in past would require creating a snapshot on some lsn / time in this pirt area.
|
||||
|
||||
# Manual
|
||||
|
||||
## storage
|
||||
|
||||
Storage is either zenith pagestore or s3. Users may create a database in a pagestore and create/move *snapshots* and *pitr regions* in both pagestore and s3. Storage is a concept similar to `git remote`. After installation, I imagine one local storage is available by default.
|
||||
|
||||
**zenith storage attach** -t [native|s3] -c key=value -n name
|
||||
|
||||
Attaches/initializes storage. For --type=s3, user credentials and path should be provided. For --type=native we may support --path=/local/path and --url=zenith.tech/stas/mystore. Other possible term for native is 'zstore'.
|
||||
|
||||
|
||||
**zenith storage list**
|
||||
|
||||
Show currently attached storages. For example:
|
||||
|
||||
```
|
||||
> zenith storage list
|
||||
NAME USED TYPE OPTIONS PATH
|
||||
local 5.1G zenith-local /opt/zenith/store/local
|
||||
local.compr 20.4G zenith-local comression=on /opt/zenith/store/local.compr
|
||||
zcloud 60G zenith-remote zenith.tech/stas/mystore
|
||||
s3tank 80G S3
|
||||
```
|
||||
|
||||
**zenith storage detach**
|
||||
|
||||
**zenith storage show**
|
||||
|
||||
|
||||
|
||||
## pg
|
||||
|
||||
Manages postgres data directories and can start postgreses with proper configuration. An experienced user may avoid using that (except pg create) and configure/run postgres by themself.
|
||||
|
||||
Pg is a term for a single postgres running on some data. I'm trying to avoid here separation of datadir management and postgres instance management -- both that concepts bundled here together.
|
||||
|
||||
**zenith pg create** [--no-start --snapshot --cow] -s storage-name -n pgdata
|
||||
|
||||
Creates (initializes) new data directory in given storage and starts postgres. I imagine that storage for this operation may be only local and data movement to remote location happens through snapshots/pitr.
|
||||
|
||||
--no-start: just init datadir without creating
|
||||
|
||||
--snapshot snap: init from the snapshot. Snap is a name or URL (zenith.tech/stas/mystore/snap1)
|
||||
|
||||
--cow: initialize Copy-on-Write data directory on top of some snapshot (makes sense if it is a snapshot of currently running a database)
|
||||
|
||||
**zenith pg destroy**
|
||||
|
||||
**zenith pg start** [--replica] pgdata
|
||||
|
||||
Start postgres with proper extensions preloaded/installed.
|
||||
|
||||
**zenith pg checkout**
|
||||
|
||||
Rollback data directory to some previous snapshot.
|
||||
|
||||
**zenith pg stop** pg_id
|
||||
|
||||
**zenith pg list**
|
||||
|
||||
```
|
||||
ROLE PGDATA USED STORAGE ENDPOINT
|
||||
primary my_pg 5.1G local localhost:5432
|
||||
replica-1 localhost:5433
|
||||
replica-2 localhost:5434
|
||||
primary my_pg2 3.2G local.compr localhost:5435
|
||||
- my_pg3 9.2G local.compr -
|
||||
```
|
||||
|
||||
**zenith pg show**
|
||||
|
||||
```
|
||||
my_pg:
|
||||
storage: local
|
||||
space used on local: 5.1G
|
||||
space used on all storages: 15.1G
|
||||
snapshots:
|
||||
on local:
|
||||
snap1: 1G
|
||||
snap2: 1G
|
||||
on zcloud:
|
||||
snap2: 1G
|
||||
on s3tank:
|
||||
snap5: 2G
|
||||
pitr:
|
||||
on s3tank:
|
||||
pitr_one_month: 45G
|
||||
|
||||
```
|
||||
|
||||
**zenith pg start-rest/graphql** pgdata
|
||||
|
||||
Starts REST/GraphQL proxy on top of postgres master. Not sure we should do that, just an idea.
|
||||
|
||||
|
||||
## snapshot
|
||||
|
||||
Snapshot creation is cheap -- no actual data is copied, we just start retaining old pages. Snapshot size means the amount of retained data, not all data. Snapshot name looks like pgdata_name@tag_name. tag_name is set by the user during snapshot creation. There are some reserved tag names: CURRENT represents the current state of the data directory; HEAD{i} represents the data directory state that resided in the database before i-th checkout.
|
||||
|
||||
**zenith snapshot create** pgdata_name@snap_name
|
||||
|
||||
Creates a new snapshot in the same storage where pgdata_name exists.
|
||||
|
||||
**zenith snapshot push** --to url pgdata_name@snap_name
|
||||
|
||||
Produces binary stream of a given snapshot. Under the hood starts temp read-only postgres over this snapshot and sends basebackup stream. Receiving side should start `zenith snapshot recv` before push happens. If url has some special schema like zenith:// receiving side may require auth start `zenith snapshot recv` on the go.
|
||||
|
||||
**zenith snapshot recv**
|
||||
|
||||
Starts a port listening for a basebackup stream, prints connection info to stdout (so that user may use that in push command), and expects data on that socket.
|
||||
|
||||
**zenith snapshot pull** --from url or path
|
||||
|
||||
Connects to a remote zenith/s3/file and pulls snapshot. The remote site should be zenith service or files in our format.
|
||||
|
||||
**zenith snapshot import** --from basebackup://<...> or path
|
||||
|
||||
Creates a new snapshot out of running postgres via basebackup protocol or basebackup files.
|
||||
|
||||
**zenith snapshot export**
|
||||
|
||||
Starts read-only postgres over this snapshot and exports data in some format (pg_dump, or COPY TO on some/all tables). One of the options may be zenith own format which is handy for us (but I think just tar of basebackup would be okay).
|
||||
|
||||
**zenith snapshot diff** snap1 snap2
|
||||
|
||||
Shows size of data changed between two snapshots. We also may provide options to diff schema/data in tables. To do that start temp read-only postgreses.
|
||||
|
||||
**zenith snapshot destroy**
|
||||
|
||||
## pitr
|
||||
|
||||
Pitr represents wal stream and ttl policy for that stream
|
||||
|
||||
XXX: any suggestions on a better name?
|
||||
|
||||
**zenith pitr create** name
|
||||
|
||||
--ttl = inf | period
|
||||
|
||||
--size-limit = inf | limit
|
||||
|
||||
--storage = storage_name
|
||||
|
||||
**zenith pitr extract-snapshot** pitr_name --lsn xxx
|
||||
|
||||
Creates a snapshot out of some lsn in PITR area. The obtained snapshot may be managed with snapshot routines (move/send/export)
|
||||
|
||||
**zenith pitr gc** pitr_name
|
||||
|
||||
Force garbage collection on some PITR area.
|
||||
|
||||
**zenith pitr list**
|
||||
|
||||
**zenith pitr destroy**
|
||||
|
||||
|
||||
## console
|
||||
|
||||
**zenith console**
|
||||
|
||||
Opens browser targeted at web console with the more or less same functionality as described here.
|
||||
218
docs/rfcs/004-durability.md
Normal file
218
docs/rfcs/004-durability.md
Normal file
@@ -0,0 +1,218 @@
|
||||
Durability & Consensus
|
||||
======================
|
||||
|
||||
When a transaction commits, a commit record is generated in the WAL.
|
||||
When do we consider the WAL record as durable, so that we can
|
||||
acknowledge the commit to the client and be reasonably certain that we
|
||||
will not lose the transaction?
|
||||
|
||||
Zenith uses a group of WAL safekeeper nodes to hold the generated WAL.
|
||||
A WAL record is considered durable, when it has been written to a
|
||||
majority of WAL safekeeper nodes. In this document, I use 5
|
||||
safekeepers, because I have five fingers. A WAL record is durable,
|
||||
when at least 3 safekeepers have written it to disk.
|
||||
|
||||
First, assume that only one primary node can be running at a
|
||||
time. This can be achieved by Kubernetes or etcd or some
|
||||
cloud-provider specific facility, or we can implement it
|
||||
ourselves. These options are discussed in later chapters. For now,
|
||||
assume that there is a Magic STONITH Fairy that ensures that.
|
||||
|
||||
In addition to the WAL safekeeper nodes, the WAL is archived in
|
||||
S3. WAL that has been archived to S3 can be removed from the
|
||||
safekeepers, so the safekeepers don't need a lot of disk space.
|
||||
|
||||
|
||||
+----------------+
|
||||
+-----> | WAL safekeeper |
|
||||
| +----------------+
|
||||
| +----------------+
|
||||
+-----> | WAL safekeeper |
|
||||
+------------+ | +----------------+
|
||||
| Primary | | +----------------+
|
||||
| Processing | ---------+-----> | WAL safekeeper |
|
||||
| Node | | +----------------+
|
||||
+------------+ | +----------------+
|
||||
\ +-----> | WAL safekeeper |
|
||||
\ | +----------------+
|
||||
\ | +----------------+
|
||||
\ +-----> | WAL safekeeper |
|
||||
\ +----------------+
|
||||
\
|
||||
\
|
||||
\
|
||||
\
|
||||
\ +--------+
|
||||
\ | |
|
||||
+--> | S3 |
|
||||
| |
|
||||
+--------+
|
||||
|
||||
|
||||
Every WAL safekeeper holds a section of WAL, and a VCL value.
|
||||
The WAL can be divided into three portions:
|
||||
|
||||
|
||||
VCL LSN
|
||||
| |
|
||||
V V
|
||||
.................ccccccccccccccccccccXXXXXXXXXXXXXXXXXXXXXXX
|
||||
Archived WAL Completed WAL In-flight WAL
|
||||
|
||||
|
||||
Note that all this WAL kept in a safekeeper is a contiguous section.
|
||||
This is different from Aurora: In Aurora, there can be holes in the
|
||||
WAL, and there is a Gossip protocol to fill the holes. That could be
|
||||
implemented in the future, but let's keep it simple for now. WAL needs
|
||||
to be written to a safekeeper in order. However, during crash
|
||||
recovery, In-flight WAL that has already been stored in a safekeeper
|
||||
can be truncated or overwritten.
|
||||
|
||||
The Archived WAL has already been stored in S3, and can be removed from
|
||||
the safekeeper.
|
||||
|
||||
The Completed WAL has been written to at least three safekeepers. The
|
||||
algorithm ensures that it is not lost, when at most two nodes fail at
|
||||
the same time.
|
||||
|
||||
The In-flight WAL has been persisted in the safekeeper, but if a crash
|
||||
happens, it may still be overwritten or truncated.
|
||||
|
||||
|
||||
The VCL point is determined in the Primary. It is not strictly
|
||||
necessary to store it in the safekeepers, but it allows some
|
||||
optimizations and sanity checks and is probably generally useful for
|
||||
the system as whole. The VCL values stored in the safekeepers can lag
|
||||
behind the VCL computed by the primary.
|
||||
|
||||
|
||||
Primary node Normal operation
|
||||
-----------------------------
|
||||
|
||||
1. Generate some WAL.
|
||||
|
||||
2. Send the WAL to all the safekeepers that you can reach.
|
||||
|
||||
3. As soon as a quorum of safekeepers have acknowledged that they have
|
||||
received and durably stored the WAL up to that LSN, update local VCL
|
||||
value in memory, and acknowledge commits to the clients.
|
||||
|
||||
4. Send the new VCL to all the safekeepers that were part of the quorum.
|
||||
(Optional)
|
||||
|
||||
|
||||
Primary Crash recovery
|
||||
----------------------
|
||||
|
||||
When a new Primary node starts up, before it can generate any new WAL
|
||||
it needs to contact a majority of the WAL safekeepers to compute the
|
||||
VCL. Remember that there is a Magic STONITH fairy that ensures that
|
||||
only node process can be doing this at a time.
|
||||
|
||||
1. Contact all WAL safekeepers. Find the Max((Epoch, LSN)) tuple among the ones you
|
||||
can reach. This is the Winner safekeeper, and its LSN becomes the new VCL.
|
||||
|
||||
2. Update the other safekeepers you can reach, by copying all the WAL
|
||||
from the Winner, starting from each safekeeper's old VCL point. Any old
|
||||
In-Flight WAL from previous Epoch is truncated away.
|
||||
|
||||
3. Increment Epoch, and send the new Epoch to the quorum of
|
||||
safekeepers. (This ensures that if any of the safekeepers that we
|
||||
could not reach later come back online, they will be considered as
|
||||
older than this in any future recovery)
|
||||
|
||||
You can now start generating new WAL, starting from the newly-computed
|
||||
VCL.
|
||||
|
||||
Optimizations
|
||||
-------------
|
||||
|
||||
As described, the Primary node sends all the WAL to all the WAL safekeepers. That
|
||||
can be a lot of network traffic. Instead of sending the WAL directly from Primary,
|
||||
some safekeepers can be daisy-chained off other safekeepers, or there can be a
|
||||
broadcast mechanism among them. There should still be a direct connection from the
|
||||
each safekeeper to the Primary for the acknowledgments though.
|
||||
|
||||
Similarly, the responsibility for archiving WAL to S3 can be delegated to one of
|
||||
the safekeepers, to reduce the load on the primary.
|
||||
|
||||
|
||||
Magic STONITH fairy
|
||||
-------------------
|
||||
|
||||
Now that we have a system that works as long as only one primary node is running at a time, how
|
||||
do we ensure that?
|
||||
|
||||
1. Use etcd to grant a lease on a key. The primary node is only allowed to operate as primary
|
||||
when it's holding a valid lease. If the primary node dies, the lease expires after a timeout
|
||||
period, and a new node is allowed to become the primary.
|
||||
|
||||
2. Use S3 to store the lease. S3's consistency guarantees are more lenient, so in theory you
|
||||
cannot do this safely. In practice, it would probably be OK if you make the lease times and
|
||||
timeouts long enough. This has the advantage that we don't need to introduce a new
|
||||
component to the architecture.
|
||||
|
||||
3. Use Raft or Paxos, with the WAL safekeepers acting as the Acceptors to form the quorum. The
|
||||
next chapter describes this option.
|
||||
|
||||
|
||||
Built-in Paxos
|
||||
--------------
|
||||
|
||||
The WAL safekeepers act as PAXOS Acceptors, and the Processing nodes
|
||||
as both Proposers and Learners.
|
||||
|
||||
Each WAL safekeeper holds an Epoch value in addition to the VCL and
|
||||
the WAL. Each request by the primary to safekeep WAL is accompanied by
|
||||
an Epoch value. If a safekeeper receives a request with Epoch that
|
||||
doesn't match its current Accepted Epoch, it must ignore (NACK) it.
|
||||
(In different Paxos papers, Epochs are called "terms" or "round
|
||||
numbers")
|
||||
|
||||
When a node wants to become the primary, it generates a new Epoch
|
||||
value that is higher than any previously observed Epoch value, and
|
||||
globally unique.
|
||||
|
||||
|
||||
Accepted Epoch: 555 VCL LSN
|
||||
| |
|
||||
V V
|
||||
.................ccccccccccccccccccccXXXXXXXXXXXXXXXXXXXXXXX
|
||||
Archived WAL Completed WAL In-flight WAL
|
||||
|
||||
|
||||
Primary node startup:
|
||||
|
||||
1. Contact all WAL safekeepers that you can reach (if you cannot
|
||||
connect to a quorum of them, you can give up immediately). Find the
|
||||
latest Epoch among them.
|
||||
|
||||
2. Generate a new globally unique Epoch, greater than the latest Epoch
|
||||
found in previous step.
|
||||
|
||||
2. Send the new Epoch in a Prepare message to a quorum of
|
||||
safekeepers. (PAXOS Prepare message)
|
||||
|
||||
3. Each safekeeper responds with a Promise. If a safekeeper has
|
||||
already made a promise with a higher Epoch, it doesn't respond (or
|
||||
responds with a NACK). After making a promise, the safekeeper stops
|
||||
responding to any write requests with earlier Epoch.
|
||||
|
||||
4. Once you have received a majority of promises, you know that the
|
||||
VCL cannot advance on the old Epoch anymore. This effectively kills
|
||||
any old primary server.
|
||||
|
||||
5. Find the highest written LSN among the quorum of safekeepers (these
|
||||
can be included in the Promise messages already). This is the new
|
||||
VCL. If a new node starts the election process after this point,
|
||||
it will compute the same or higher VCL.
|
||||
|
||||
6. Copy the WAL from the safekeeper with the highest LSN to the other
|
||||
safekeepers in the quorum, using the new Epoch. (PAXOS Accept
|
||||
phase)
|
||||
|
||||
7. You can now start generating new WAL starting from the VCL. If
|
||||
another process starts the election process after this point and
|
||||
gains control of a majority of the safekeepers, we will no longer
|
||||
be able to advance the VCL.
|
||||
|
||||
103
docs/rfcs/005-zenith_local.md
Normal file
103
docs/rfcs/005-zenith_local.md
Normal file
@@ -0,0 +1,103 @@
|
||||
# Zenith local
|
||||
|
||||
Here I list some objectives to keep in mind when discussing zenith-local design and a proposal that brings all components together. Your comments on both parts are very welcome.
|
||||
|
||||
#### Why do we need it?
|
||||
- For distribution - this easy to use binary will help us to build adoption among developers.
|
||||
- For internal use - to test all components together.
|
||||
|
||||
In my understanding, we consider it to be just a mock-up version of zenith-cloud.
|
||||
> Question: How much should we care about durability and security issues for a local setup?
|
||||
|
||||
|
||||
#### Why is it better than a simple local postgres?
|
||||
|
||||
- Easy one-line setup. As simple as `cargo install zenith && zenith start`
|
||||
|
||||
- Quick and cheap creation of compute nodes over the same storage.
|
||||
> Question: How can we describe a use-case for this feature?
|
||||
|
||||
- Zenith-local can work with S3 directly.
|
||||
|
||||
- Push and pull images (snapshots) to remote S3 to exchange data with other users.
|
||||
|
||||
- Quick and cheap snapshot checkouts to switch back and forth in the database history.
|
||||
> Question: Do we want it in the very first release? This feature seems quite complicated.
|
||||
|
||||
#### Distribution:
|
||||
|
||||
Ideally, just one binary that incorporates all elements we need.
|
||||
> Question: Let's discuss pros and cons of having a separate package with modified PostgreSQL.
|
||||
|
||||
#### Components:
|
||||
|
||||
- **zenith-CLI** - interface for end-users. Turns commands to REST requests and handles responces to show them in a user-friendly way.
|
||||
CLI proposal is here https://github.com/libzenith/rfcs/blob/003-laptop-cli.md/003-laptop-cli.md
|
||||
WIP code is here: https://github.com/libzenith/postgres/tree/main/pageserver/src/bin/cli
|
||||
|
||||
- **zenith-console** - WEB UI with same functionality as CLI.
|
||||
>Note: not for the first release.
|
||||
|
||||
- **zenith-local** - entrypoint. Service that starts all other components and handles REST API requests. See REST API proposal below.
|
||||
> Idea: spawn all other components as child processes, so that we could shutdown everything by stopping zenith-local.
|
||||
|
||||
- **zenith-pageserver** - consists of a storage and WAL-replaying service (modified PG in current implementation).
|
||||
> Question: Probably, for local setup we should be able to bypass page-storage and interact directly with S3 to avoid double caching in shared buffers and page-server?
|
||||
|
||||
WIP code is here: https://github.com/libzenith/postgres/tree/main/pageserver/src
|
||||
|
||||
- **zenith-S3** - stores base images of the database and WAL in S3 object storage. Import and export images from/to zenith.
|
||||
> Question: How should it operate in a local setup? Will we manage it ourselves or ask user to provide credentials for existing S3 object storage (i.e. minio)?
|
||||
> Question: Do we use it together with local page store or they are interchangeable?
|
||||
|
||||
WIP code is ???
|
||||
|
||||
- **zenith-safekeeper** - receives WAL from postgres, stores it durably, answers to Postgres that "sync" is succeed.
|
||||
> Question: How should it operate in a local setup? In my understanding it should push WAL directly to S3 (if we use it) or store all data locally (if we use local page storage). The latter option seems meaningless (extra overhead and no gain), but it is still good to test the system.
|
||||
|
||||
WIP code is here: https://github.com/libzenith/postgres/tree/main/src/bin/safekeeper
|
||||
|
||||
- **zenith-computenode** - bottomless PostgreSQL, ideally upstream, but for a start - our modified version. User can quickly create and destroy them and work with it as a regular postgres database.
|
||||
|
||||
WIP code is in main branch and here: https://github.com/libzenith/postgres/commits/compute_node
|
||||
|
||||
#### REST API:
|
||||
|
||||
Service endpoint: `http://localhost:3000`
|
||||
|
||||
Resources:
|
||||
- /storages - Where data lives: zenith-pageserver or zenith-s3
|
||||
- /pgs - Postgres - zenith-computenode
|
||||
- /snapshots - snapshots **TODO**
|
||||
|
||||
>Question: Do we want to extend this API to manage zenith components? I.e. start page-server, manage safekeepers and so on? Or they will be hardcoded to just start once and for all?
|
||||
|
||||
Methods and their mapping to CLI:
|
||||
|
||||
- /storages - zenith-pageserver or zenith-s3
|
||||
|
||||
CLI | REST API
|
||||
------------- | -------------
|
||||
storage attach -n name --type [native\s3] --path=[datadir\URL] | PUT -d { "name": "name", "type": "native", "path": "/tmp" } /storages
|
||||
storage detach -n name | DELETE /storages/:storage_name
|
||||
storage list | GET /storages
|
||||
storage show -n name | GET /storages/:storage_name
|
||||
|
||||
|
||||
- /pgs - zenith-computenode
|
||||
|
||||
CLI | REST API
|
||||
------------- | -------------
|
||||
pg create -n name --s storage_name | PUT -d { "name": "name", "storage_name": "storage_name" } /pgs
|
||||
pg destroy -n name | DELETE /pgs/:pg_name
|
||||
pg start -n name --replica | POST -d {"action": "start", "is_replica":"replica"} /pgs/:pg_name /actions
|
||||
pg stop -n name | POST -d {"action": "stop"} /pgs/:pg_name /actions
|
||||
pg promote -n name | POST -d {"action": "promote"} /pgs/:pg_name /actions
|
||||
pg list | GET /pgs
|
||||
pg show -n name | GET /pgs/:pg_name
|
||||
|
||||
- /snapshots **TODO**
|
||||
|
||||
CLI | REST API
|
||||
------------- | -------------
|
||||
|
||||
64
docs/rfcs/006-laptop-cli-v2-CLI.md
Normal file
64
docs/rfcs/006-laptop-cli-v2-CLI.md
Normal file
@@ -0,0 +1,64 @@
|
||||
Zenith CLI allows you to operate database clusters (catalog clusters) and their commit history locally and in the cloud. Since ANSI calls them catalog clusters and cluster is a loaded term in the modern infrastructure we will call it "catalog".
|
||||
|
||||
# CLI v2 (after chatting with Carl)
|
||||
|
||||
Zenith introduces the notion of a repository.
|
||||
|
||||
```bash
|
||||
zenith init
|
||||
zenith clone zenith://zenith.tech/piedpiper/northwind -- clones a repo to the northwind directory
|
||||
```
|
||||
|
||||
Once you have a cluster catalog you can explore it
|
||||
|
||||
```bash
|
||||
zenith log -- returns a list of commits
|
||||
zenith status -- returns if there are changes in the catalog that can be committed
|
||||
zenith commit -- commits the changes and generates a new commit hash
|
||||
zenith branch experimental <hash> -- creates a branch called testdb based on a given commit hash
|
||||
```
|
||||
|
||||
To make changes in the catalog you need to run compute nodes
|
||||
|
||||
```bash
|
||||
-- here is how you a compute node
|
||||
zenith start /home/pipedpiper/northwind:main -- starts a compute instance
|
||||
zenith start zenith://zenith.tech/northwind:main -- starts a compute instance in the cloud
|
||||
-- you can start a compute node against any hash or branch
|
||||
zenith start /home/pipedpiper/northwind:experimental --port 8008 -- start anothe compute instance (on different port)
|
||||
-- you can start a compute node against any hash or branch
|
||||
zenith start /home/pipedpiper/northwind:<hash> --port 8009 -- start anothe compute instance (on different port)
|
||||
|
||||
-- After running some DML you can run
|
||||
-- zenith status and see how there are two WAL streams one on top of
|
||||
-- the main branch
|
||||
zenith status
|
||||
-- and another on top of the experimental branch
|
||||
zenith status -b experimental
|
||||
|
||||
-- you can commit each branch separately
|
||||
zenith commit main
|
||||
-- or
|
||||
zenith commit -c /home/pipedpiper/northwind:experimental
|
||||
```
|
||||
|
||||
Starting compute instances against cloud environments
|
||||
|
||||
```bash
|
||||
-- you can start a compute instance against the cloud environment
|
||||
-- in this case all of the changes will be streamed into the cloud
|
||||
zenith start https://zenith:tech/pipedpiper/northwind:main
|
||||
zenith start https://zenith:tech/pipedpiper/northwind:main
|
||||
zenith status -c https://zenith:tech/pipedpiper/northwind:main
|
||||
zenith commit -c https://zenith:tech/pipedpiper/northwind:main
|
||||
zenith branch -c https://zenith:tech/pipedpiper/northwind:<hash> experimental
|
||||
```
|
||||
|
||||
Pushing data into the cloud
|
||||
|
||||
```bash
|
||||
-- pull all the commits from the cloud
|
||||
zenith pull
|
||||
-- push all the commits to the cloud
|
||||
zenith push
|
||||
```
|
||||
140
docs/rfcs/006-laptop-cli-v2-repository-structure.md
Normal file
140
docs/rfcs/006-laptop-cli-v2-repository-structure.md
Normal file
@@ -0,0 +1,140 @@
|
||||
# Repository format
|
||||
|
||||
A Zenith repository is similar to a traditional PostgreSQL backup
|
||||
archive, like a WAL-G bucket or pgbarman backup catalogue. It holds
|
||||
multiple versions of a PostgreSQL database cluster.
|
||||
|
||||
The distinguishing feature is that you can launch a Zenith Postgres
|
||||
server directly against a branch in the repository, without having to
|
||||
"restore" it first. Also, Zenith manages the storage automatically,
|
||||
there is no separation between full and incremental backups nor WAL
|
||||
archive. Zenith relies heavily on the WAL, and uses concepts similar
|
||||
to incremental backups and WAL archiving internally, but it is hidden
|
||||
from the user.
|
||||
|
||||
## Directory structure, version 1
|
||||
|
||||
This first version is pretty straightforward but not very
|
||||
efficient. Just something to get us started.
|
||||
|
||||
The repository directory looks like this:
|
||||
|
||||
.zenith/timelines/4543be3daeab2ed4e58a285cbb8dd1fce6970f8c/wal/
|
||||
.zenith/timelines/4543be3daeab2ed4e58a285cbb8dd1fce6970f8c/snapshots/<lsn>/
|
||||
.zenith/timelines/4543be3daeab2ed4e58a285cbb8dd1fce6970f8c/history
|
||||
|
||||
.zenith/refs/branches/mybranch
|
||||
.zenith/refs/tags/foo
|
||||
.zenith/refs/tags/bar
|
||||
|
||||
.zenith/datadirs/<timeline uuid>
|
||||
|
||||
### Timelines
|
||||
|
||||
A timeline is similar to PostgeSQL's timeline, but is identified by a
|
||||
UUID instead of a 32-bit timeline Id. For user convenience, it can be
|
||||
given a name that refers to the UUID (called a branch).
|
||||
|
||||
All WAL is generated on a timeline. You can launch a read-only node
|
||||
against a tag or arbitrary LSN on a timeline, but in order to write,
|
||||
you need to create a timeline.
|
||||
|
||||
Each timeline is stored in a directory under .zenith/timelines. It
|
||||
consists of a WAL archive, containing all the WAL in the standard
|
||||
PostgreSQL format, under the wal/ subdirectory.
|
||||
|
||||
The 'snapshots/' subdirectory, contains "base backups" of the data
|
||||
directory at a different LSNs. Each snapshot is simply a copy of the
|
||||
Postgres data directory.
|
||||
|
||||
When a new timeline is forked from a previous timeline, the ancestor
|
||||
timeline's UUID is stored in the 'history' file.
|
||||
|
||||
### Refs
|
||||
|
||||
There are two kinds of named objects in the repository: branches and
|
||||
tags. A branch is a human-friendly name for a timeline UUID, and a
|
||||
tag is a human-friendly name for a specific LSN on a timeline
|
||||
(timeline UUID + LSN). Like in git, these are just for user
|
||||
convenience; you can also use timeline UUIDs and LSNs directly.
|
||||
|
||||
Refs do have one additional purpose though: naming a timeline or LSN
|
||||
prevents it from being automatically garbage collected.
|
||||
|
||||
The refs directory contains a small text file for each tag/branch. It
|
||||
contains the UUID of the timeline (and LSN, for tags).
|
||||
|
||||
### Datadirs
|
||||
|
||||
.zenith/datadirs contains PostgreSQL data directories. You can launch
|
||||
a Postgres instance on one of them with:
|
||||
|
||||
```
|
||||
postgres -D .zenith/datadirs/4543be3daeab2ed4e58a285cbb8dd1fce6970f8c
|
||||
```
|
||||
|
||||
All the actual data is kept in the timeline directories, under
|
||||
.zenith/timelines. The data directories are only needed for active
|
||||
PostgreQSL instances. After an instance is stopped, the data directory
|
||||
can be safely removed. "zenith start" will recreate it quickly from
|
||||
the data in .zenith/timelines, if it's missing.
|
||||
|
||||
## Version 2
|
||||
|
||||
The format described above isn't very different from a traditional
|
||||
daily base backup + WAL archive configuration. The main difference is
|
||||
the nicer naming of branches and tags.
|
||||
|
||||
That's not very efficient. For performance, we need something like
|
||||
incremental backups that don't require making a full copy of all
|
||||
data. So only store modified files or pages. And instead of having to
|
||||
replay all WAL from the last snapshot, "slice" the WAL into
|
||||
per-relation WAL files and only recover what's needed when a table is
|
||||
accessed.
|
||||
|
||||
In version 2, the file format in the "snapshots" subdirectory gets
|
||||
more advanced. The exact format is TODO. But it should support:
|
||||
- storing WAL records of individual relations/pages
|
||||
- storing a delta from an older snapshot
|
||||
- compression
|
||||
|
||||
|
||||
## Operations
|
||||
|
||||
### Garbage collection
|
||||
|
||||
When you run "zenith gc", old timelines that are no longer needed are
|
||||
removed. That involves collecting the list of "unreachable" objects,
|
||||
starting from the named branches and tags.
|
||||
|
||||
Also, if enough WAL has been generated on a timeline since last
|
||||
snapshot, a new snapshot or delta is created.
|
||||
|
||||
### zenith push/pull
|
||||
|
||||
Compare the tags and branches on both servers, and copy missing ones.
|
||||
For each branch, compare the timeline it points to in both servers. If
|
||||
one is behind the other, copy the missing parts.
|
||||
|
||||
FIXME: how do you prevent confusion if you have to clones of the same
|
||||
repository, launch an instance on the same branch in both clones, and
|
||||
later try to push/pull between them? Perhaps create a new timeline
|
||||
every time you start up an instance? Then you would detect that the
|
||||
timelines have diverged. That would match with the "epoch" concept
|
||||
that we have in the WAL safekeepr
|
||||
|
||||
### zenith checkout/commit
|
||||
|
||||
In this format, there is no concept of a "working tree", and hence no
|
||||
concept of checking out or committing. All modifications are done on
|
||||
a branch or a timeline. As soon as you launch a server, the changes are
|
||||
appended to the timeline.
|
||||
|
||||
You can easily fork off a temporary timeline to emulate a "working tree".
|
||||
You can later remove it and have it garbage collected, or to "commit",
|
||||
re-point the branch to the new timeline.
|
||||
|
||||
If we want to have a worktree and "zenith checkout/commit" concept, we can
|
||||
emulate that with a temporary timeline. Create the temporary timeline at
|
||||
"zenith checkout", and have "zenith commit" modify the branch to point to
|
||||
the new timeline.
|
||||
93
docs/rfcs/007-serverless-on-laptop.md
Normal file
93
docs/rfcs/007-serverless-on-laptop.md
Normal file
@@ -0,0 +1,93 @@
|
||||
How it works now
|
||||
----------------
|
||||
|
||||
1. Create repository, start page server on it
|
||||
|
||||
```
|
||||
$ zenith init
|
||||
...
|
||||
created main branch
|
||||
new zenith repository was created in .zenith
|
||||
|
||||
$ zenith pageserver start
|
||||
Starting pageserver at '127.0.0.1:64000' in .zenith
|
||||
Page server started
|
||||
```
|
||||
|
||||
2. Create a branch, and start a Postgres instance on it
|
||||
|
||||
```
|
||||
$ zenith branch heikki main
|
||||
branching at end of WAL: 0/15ECF68
|
||||
|
||||
$ zenith pg create heikki
|
||||
Initializing Postgres on timeline 76cf9279915be7797095241638e64644...
|
||||
Extracting base backup to create postgres instance: path=.zenith/pgdatadirs/pg1 port=55432
|
||||
|
||||
$ zenith pg start pg1
|
||||
Starting postgres node at 'host=127.0.0.1 port=55432 user=heikki'
|
||||
waiting for server to start.... done
|
||||
server started
|
||||
```
|
||||
|
||||
|
||||
3. Connect to it and run queries
|
||||
|
||||
```
|
||||
$ psql "dbname=postgres port=55432"
|
||||
psql (14devel)
|
||||
Type "help" for help.
|
||||
|
||||
postgres=#
|
||||
```
|
||||
|
||||
|
||||
Proposal: Serverless on your Laptop
|
||||
-----------------------------------
|
||||
|
||||
We've been talking about doing the "pg create" step automatically at
|
||||
"pg start", to eliminate that step. What if we go further, go
|
||||
serverless on your laptop, so that the workflow becomes just:
|
||||
|
||||
1. Create repository, start page server on it (same as before)
|
||||
|
||||
```
|
||||
$ zenith init
|
||||
...
|
||||
created main branch
|
||||
new zenith repository was created in .zenith
|
||||
|
||||
$ zenith pageserver start
|
||||
Starting pageserver at '127.0.0.1:64000' in .zenith
|
||||
Page server started
|
||||
```
|
||||
|
||||
2. Create branch
|
||||
|
||||
```
|
||||
$ zenith branch heikki main
|
||||
branching at end of WAL: 0/15ECF68
|
||||
```
|
||||
|
||||
3. Connect to it:
|
||||
|
||||
```
|
||||
$ psql "dbname=postgres port=5432 branch=heikki"
|
||||
psql (14devel)
|
||||
Type "help" for help.
|
||||
|
||||
postgres=#
|
||||
```
|
||||
|
||||
|
||||
The trick behind the scenes is that when you launch the page server,
|
||||
it starts to listen on port 5432. When you connect to it with psql, it
|
||||
looks at the 'branch' parameter that you passed in the connection
|
||||
string. It automatically performs the "pg create" and "pg start" steps
|
||||
for that branch, and then forwards the connection to the Postgres
|
||||
instance that it launched. After you disconnect, if there are no more
|
||||
active connections to the server running on the branch, it can
|
||||
automatically shut it down again.
|
||||
|
||||
This is how serverless would work in the cloud. We can do it on your
|
||||
laptop, too.
|
||||
66
docs/rfcs/008-push-pull.md
Normal file
66
docs/rfcs/008-push-pull.md
Normal file
@@ -0,0 +1,66 @@
|
||||
# Push and pull between pageservers
|
||||
|
||||
Here is a proposal about implementing push/pull mechanics between pageservers. We also want to be able to push/pull to S3 but that would depend on the exact storage format so we don't touch that in this proposal.
|
||||
|
||||
## Origin management
|
||||
|
||||
The origin represents connection info for some remote pageserver. Let's use here same commands as git uses except using explicit list subcommand (git uses `origin -v` for that).
|
||||
|
||||
```
|
||||
zenith origin add <name> <connection_uri>
|
||||
zenith origin list
|
||||
zenith origin remove <name>
|
||||
```
|
||||
|
||||
Connection URI a string of form `postgresql://user:pass@hostname:port` (https://www.postgresql.org/docs/13/libpq-connect.html#id-1.7.3.8.3.6). We can start with libpq password auth and later add support for client certs or require ssh as transport or invent some other kind of transport.
|
||||
|
||||
Behind the scenes, this commands may update toml file inside .zenith directory.
|
||||
|
||||
## Push
|
||||
|
||||
### Pushing branch
|
||||
|
||||
```
|
||||
zenith push mybranch cloudserver # push to eponymous branch in cloudserver
|
||||
zenith push mybranch cloudserver:otherbranch # push to a different branch in cloudserver
|
||||
```
|
||||
|
||||
Exact mechanics would be slightly different in the following situations:
|
||||
|
||||
1) Destination branch does not exist.
|
||||
|
||||
That is the simplest scenario. We can just create an empty branch (or timeline in internal terminology) and transfer all the pages/records that we have in our timeline. Right now each timeline is quite independent of other timelines so I suggest skipping any checks that there is a common ancestor and just fill it with data. Later when CoW timelines will land to the pageserver we may add that check and decide whether this timeline belongs to this pageserver repository or not [*].
|
||||
|
||||
The exact mechanics may be the following:
|
||||
|
||||
* CLI asks local pageserver to perform push and hands over connection uri: `perform_push <branch_name> <uri>`.
|
||||
* local pageserver connects to the remote pageserver and runs `branch_push <branch_name> <timetine_id>`
|
||||
Handler for branch_create would create destination timeline and switch connection to copyboth mode.
|
||||
* Sending pageserver may start iterator on that timeline and send all the records as copy messages.
|
||||
|
||||
2) Destination branch exists and latest_valid_lsn is less than ours.
|
||||
|
||||
In this case, we need to send missing records. To do that we need to find all pages that were changed since that remote LSN. Right now we don't have any tracking mechanism for that, so let's just iterate over all records and send ones that are newer than remote LSN. Later we probably should add a sparse bitmap that would track changed pages to avoid full scan.
|
||||
|
||||
3) Destination branch exists and latest_valid_lsn is bigger than ours.
|
||||
|
||||
In this case, we can't push to that branch. We can only pull.
|
||||
|
||||
### Pulling branch
|
||||
|
||||
Here we need to handle the same three cases, but also keep in mind that local pageserver can be behind NAT and we can't trivially re-use pushing by asking remote to 'perform_push' to our address. So we would need a new set of commands:
|
||||
|
||||
* CLI calls `perform_pull <branch_name> <uri>` on local pageserver.
|
||||
* local pageserver calls `branch_pull <branch_name> <timetine_id>` on remote pageserver.
|
||||
* remote pageserver sends records in our direction
|
||||
|
||||
But despite the different set of commands code that performs iteration over records and receiving code that inserts that records can be the same for both pull and push.
|
||||
|
||||
|
||||
|
||||
[*] It looks to me that there are two different possible approaches to handling unrelated timelines:
|
||||
|
||||
1) Allow storing unrelated timelines in one repo. Some timelines may have parents and some may not.
|
||||
2) Transparently create and manage several repositories in one pageserver.
|
||||
|
||||
But that is the topic for a separate RFC/discussion.
|
||||
56
docs/rfcs/009-snapshot-first-storage-cli.md
Normal file
56
docs/rfcs/009-snapshot-first-storage-cli.md
Normal file
@@ -0,0 +1,56 @@
|
||||
While working on export/import commands, I understood that they fit really well into "snapshot-first design".
|
||||
|
||||
We may think about backups as snapshots in a different format (i.e plain pgdata format, basebackup tar format, WAL-G format (if they want to support it) and so on). They use same storage API, the only difference is the code that packs/unpacks files.
|
||||
|
||||
Even if zenith aims to maintains durability using it's own snapshots, backups will be useful for uploading data from postges to zenith.
|
||||
|
||||
So here is an attemt to design consistent CLI for diferent usage scenarios:
|
||||
|
||||
#### 1. Start empty pageserver.
|
||||
That is what we have now.
|
||||
Init empty pageserver using `initdb` in temporary directory.
|
||||
|
||||
`--storage_dest=FILE_PREFIX | S3_PREFIX |...` option defines object storage type, all other parameters are passed via env variables. Inspired by WAL-G style naming : https://wal-g.readthedocs.io/STORAGES/.
|
||||
|
||||
Save`storage_dest` and other parameters in config.
|
||||
Push snapshots to `storage_dest` in background.
|
||||
|
||||
```
|
||||
zenith init --storage_dest=S3_PREFIX
|
||||
zenith start
|
||||
```
|
||||
|
||||
#### 2. Restart pageserver (manually or crash-recovery).
|
||||
Take `storage_dest` from pageserver config, start pageserver from latest snapshot in `storage_dest`.
|
||||
Push snapshots to `storage_dest` in background.
|
||||
|
||||
```
|
||||
zenith start
|
||||
```
|
||||
|
||||
#### 3. Import.
|
||||
Start pageserver from existing snapshot.
|
||||
Path to snapshot provided via `--snapshot_path=FILE_PREFIX | S3_PREFIX | ...`
|
||||
Do not save `snapshot_path` and `snapshot_format` in config, as it is a one-time operation.
|
||||
Save`storage_dest` parameters in config.
|
||||
Push snapshots to `storage_dest` in background.
|
||||
```
|
||||
//I.e. we want to start zenith on top of existing $PGDATA and use s3 as a persistent storage.
|
||||
zenith init --snapshot_path=FILE_PREFIX --snapshot_format=pgdata --storage_dest=S3_PREFIX
|
||||
zenith start
|
||||
```
|
||||
How to pass credentials needed for `snapshot_path`?
|
||||
|
||||
#### 4. Export.
|
||||
Manually push snapshot to `snapshot_path` which differs from `storage_dest`
|
||||
Optionally set `snapshot_format`, which can be plain pgdata format or zenith format.
|
||||
```
|
||||
zenith export --snapshot_path=FILE_PREFIX --snapshot_format=pgdata
|
||||
```
|
||||
|
||||
#### Notes and questions
|
||||
- safekeeper s3_offload should use same (similar) syntax for storage. How to set it in UI?
|
||||
- Why do we need `zenith init` as a separate command? Can't we init everything at first start?
|
||||
- We can think of better names for all options.
|
||||
- Export to plain postgres format will be useless, if we are not 100% compatible on page level.
|
||||
I can recall at least one such difference - PD_WAL_LOGGED flag in pages.
|
||||
227
docs/rfcs/009-snapshot-first-storage-pitr.md
Normal file
227
docs/rfcs/009-snapshot-first-storage-pitr.md
Normal file
@@ -0,0 +1,227 @@
|
||||
# Preface
|
||||
|
||||
GetPage@LSN can be called with older LSNs, and the page server needs
|
||||
to be able to reconstruct older page versions. That's needed for
|
||||
having read-only replicas that lag behind the primary, or that are
|
||||
"anchored" at an older LSN, and internally in the page server whne you
|
||||
branch at an older point in time. How do you do that?
|
||||
|
||||
For now, I'm not considering incremental snapshots at all. I don't
|
||||
think that changes things. So whenever you create a snapshot or a
|
||||
snapshot file, it contains an image of all the pages, there is no need
|
||||
to look at an older snapshot file.
|
||||
|
||||
Also, I'm imagining that this works on a per-relation basis, so that
|
||||
each snapshot file contains data for one relation. A "relation" is a
|
||||
fuzzy concept - it could actually be one 1 GB relation segment. Or it
|
||||
could include all the different "forks" of a relation, or you could
|
||||
treat each fork as a separate relation for storage purpose. And once
|
||||
we have the "non-relational" work is finished, a "relation" could
|
||||
actually mean some other versioned object kept in the PostgreSQL data
|
||||
directory. Let's ignore that for now.
|
||||
|
||||
# Eric's RFC:
|
||||
|
||||
Every now and then, you create a "snapshot". It means that you create
|
||||
a new snapshot file for each relation that was modified after the last
|
||||
snapshot, and write out the contents the relation as it is/was at the
|
||||
snapshot LSN. Write-ahead log is stored separately in S3 by the WAL
|
||||
safekeeping service, in the original PostgreSQL WAL file format.
|
||||
|
||||
SNAPSHOT @100 WAL
|
||||
. |
|
||||
. |
|
||||
. |
|
||||
. |
|
||||
SNAPSHOT @200 |
|
||||
. |
|
||||
. |
|
||||
. |
|
||||
. |
|
||||
SNAPSHOT @300 |
|
||||
. |
|
||||
. V
|
||||
IN-MEMORY @400
|
||||
|
||||
If a GetPage@LSN request comes from the primary, you return the latest
|
||||
page from the in-memory layer. If there is no trace of the page in
|
||||
memory, it means that it hasn't been modified since the last snapshot,
|
||||
so you return the page from the latest snapshot, at LSN 300 in the
|
||||
above example.
|
||||
|
||||
PITR is implemented using the original WAL files:
|
||||
|
||||
If a GetPage@LSN request comes from a read replica with LSN 250, you
|
||||
read the image of the page from the snapshot at LSN 200, and you also
|
||||
scan the WAL between 200 and 250, and apply all WAL records for the
|
||||
requested page, to reconstruct it at LSN 250.
|
||||
|
||||
Scanning the WAL naively for every GetPage@LSN request would be
|
||||
expensive, so in practice you'd construct an in-memory data structure
|
||||
of all the WAL between 200 and 250 once that allows quickly looking up
|
||||
records for a given page.
|
||||
|
||||
## Problems/questions
|
||||
|
||||
I think you'll need to store the list of snapshot LSNs on each
|
||||
timeline somewhere.
|
||||
|
||||
If the latest snapshot of a relation is at LSN 100, and you request a
|
||||
page at LSN 1000000, how do you know if there are some modifications
|
||||
to it between 100 and 1000000 that you need to replay? You can scan
|
||||
all the WAL between 100 and 1000000, but that would be expensive.
|
||||
|
||||
You can skip that, if you know that a snapshot was taken e.g. at LSN
|
||||
999900. Then you know that the fact that there is no snapshot file at
|
||||
999900 means that the relation hasn't been modified between
|
||||
100-999900. Then you only need to scan the WAL between 999900 and
|
||||
1000000. However, there is no trace of a snapshot happening at LSN
|
||||
999900 in the snapshot file for this relation, so you need to get
|
||||
that information from somewhere else.
|
||||
|
||||
Where do you get that information from? Perhaps you can scan all the
|
||||
other relations, and if you see a snapshot file for *any* relation at
|
||||
LSN 999900, you know that if there were modifications to this
|
||||
relation, there would be a newer snapshot file for it, too. In other
|
||||
words, the list of snapshots that have been taken can be constructed
|
||||
by scanning all relations and computing the union of all snapshot LSNs
|
||||
that you see for any relation. But that's expensive so at least you
|
||||
should keep that in memory, after computing it once. Also, if you rely
|
||||
on that, it's not possible to have snapshots at different intervals
|
||||
for different files. That seems limiting.
|
||||
|
||||
Another option is to explicitly store a list of snapshot LSNs in a
|
||||
separate metadata file.
|
||||
|
||||
|
||||
# Current implementation in the 'layered_repo' branch:
|
||||
|
||||
We store snapshot files like in the RFC, but each snapshot file also
|
||||
contains all the WAL in the range of LSNs, so that you don't need to
|
||||
fetch the WAL separately from S3. So you have "layers" like this:
|
||||
|
||||
SNAPSHOT+WAL 100-200
|
||||
|
|
||||
|
|
||||
|
|
||||
|
|
||||
SNAPSHOT+WAL 200-300
|
||||
|
|
||||
|
|
||||
|
|
||||
|
|
||||
IN-MEMORY 300-
|
||||
|
||||
Each "snapshot+WAL" is a file that contains a snapshot - i.e. full
|
||||
copy of each page in the relation, at the *start* LSN. In addition to
|
||||
that, it contains all the WAL applicable to the relation from the
|
||||
start LSN to the end LSN. With that, you can reconstruct any page
|
||||
version in the range that the file covers.
|
||||
|
||||
|
||||
## Problems/questions
|
||||
|
||||
I can see one potential performance issue here, compared to the RFC.
|
||||
Let's focus on a single relation for now. Imagine that you start from
|
||||
an empty relation, and you receive WAL from 100 to 200, containing
|
||||
a bunch of inserts and updates to the relation. You now have all that
|
||||
WAL in memory:
|
||||
|
||||
memory: WAL from 100-200
|
||||
|
||||
We decide that it's time to materialize that to a snapshot file on
|
||||
disk. We materialize full image of the relation as it was at LSN 100
|
||||
to the snapshot file, and include all of the WAL. Since the relation
|
||||
was initially empty, the "image" at the beginning of th range is empty
|
||||
too.
|
||||
|
||||
So now you have one file on on disk:
|
||||
|
||||
SNAPSHOT+WAL 100-200
|
||||
|
||||
It contains a full image of the relation at LSN 100 and all WAL
|
||||
between 100-200. (It's actually stored as a serialized BTreeMap of
|
||||
page versions, with the page images and WAL records all stored
|
||||
together in the same BtreeMap. But for this story, that's not
|
||||
important.)
|
||||
|
||||
We now receive more WAL updating the relation, up to LSN 300. We
|
||||
decide it's time to materialize a new snapshot file, and we now have
|
||||
two files:
|
||||
|
||||
SNAPSHOT+WAL 100-200
|
||||
SNAPSHOT+WAL 200-300
|
||||
|
||||
Note that the latest "full snapshot" that we store on disk always lags
|
||||
behind by one snapshot cycle. The first file contains a full image of
|
||||
the relation at LSN 100, the second at LSN 200. When we have received
|
||||
WAL up to LSN 300, we write a materialized image at LSN 200. That
|
||||
seems a bit silly. In the design per your RFC, you would write a
|
||||
snapshots at LSNs 200 and 300, instead. That seems better.
|
||||
|
||||
|
||||
|
||||
# Third option (not implemented yet)
|
||||
|
||||
Store snapshot files like in the RFC, but also store per-relation
|
||||
WAL files that contain WAL in a range of LSNs for that relation.
|
||||
|
||||
SNAPSHOT @100 WAL 100-200
|
||||
. |
|
||||
. |
|
||||
. |
|
||||
. |
|
||||
SNAPSHOT @200 WAL 200-300
|
||||
. |
|
||||
. |
|
||||
. |
|
||||
. |
|
||||
SNAPSHOT @300
|
||||
.
|
||||
.
|
||||
IN-MEMORY 300-
|
||||
|
||||
|
||||
This could be the best of both worlds. The snapshot files would be
|
||||
independent of the PostgreSQL WAL format. When it's time to write
|
||||
snapshot file @300, you write a full image of the relation at LSN 300,
|
||||
and you write the WAL that you had accumulated between 200 and 300 to
|
||||
a separate file. That way, you don't "lag behind" for one snapshot
|
||||
cycle like in the current implementation. But you still have the WAL
|
||||
for a particular relation readily available alongside the snapshot
|
||||
files, and you don't need to track what snapshot LSNs exist
|
||||
separately.
|
||||
|
||||
(If we wanted to minize the number of files, you could include the
|
||||
snapshot @300 and the WAL between 200 and 300 in the same file, but I
|
||||
feel it's probably better to keep them separate)
|
||||
|
||||
|
||||
|
||||
# Further thoughts
|
||||
|
||||
There's no fundamental reason why the LSNs of the snapshot files and the
|
||||
ranges of the WAL files would need to line up. So this would be possible
|
||||
too:
|
||||
|
||||
SNAPSHOT @100 WAL 100-150
|
||||
. |
|
||||
. |
|
||||
. WAL 150-250
|
||||
. |
|
||||
SNAPSHOT @200 |
|
||||
. |
|
||||
. WAL 250-400
|
||||
. |
|
||||
. |
|
||||
SNAPSHOT @300 |
|
||||
. |
|
||||
. |
|
||||
IN-MEMORY 300-
|
||||
|
||||
I'm not sure what the benefit of this would be. You could materialize
|
||||
additional snapshot files in the middle of a range covered by a WAL
|
||||
file, maybe? Might be useful to speed up access when you create a new
|
||||
branch in the middle of an LSN range or if there's some other reason
|
||||
to believe that a particular LSN is "interesting" and there will be
|
||||
a lot of requests using it.
|
||||
148
docs/rfcs/009-snapshot-first-storage.md
Normal file
148
docs/rfcs/009-snapshot-first-storage.md
Normal file
@@ -0,0 +1,148 @@
|
||||
# Snapshot-first storage architecture
|
||||
|
||||
Goals:
|
||||
- Long-term storage of database pages.
|
||||
- Easy snapshots; simple snapshot and branch management.
|
||||
- Allow cloud-based snapshot/branch management.
|
||||
- Allow cloud-centric branching; decouple branch state from running pageserver.
|
||||
- Allow customer ownership of data via s3 permissions.
|
||||
- Provide same or better performance for typical workloads, vs plain postgres.
|
||||
|
||||
Non-goals:
|
||||
- Service database reads from s3 (reads should be serviced from the pageserver cache).
|
||||
- Keep every version of every page / Implement point-in-time recovery (possibly a future paid feature, based on WAL replay from an existing snapshot).
|
||||
|
||||
## Principle of operation
|
||||
|
||||
The database “lives in s3”. This means that all of the long term page storage is in s3, and the “live database”-- the version that lives in the pageserver-- is a set of “dirty pages” that haven’t yet been written back to s3.
|
||||
|
||||
In practice, this is mostly similar to storing frequent snapshots to s3 of a database that lives primarily elsewhere.
|
||||
|
||||
The main difference is that s3 is authoritative about which branches exist; pageservers consume branches, snapshots, and related metadata by reading them from s3. This allows cloud-based management of branches and snapshots, regardless of whether a pageserver is running or not.
|
||||
|
||||
It’s expected that a pageserver should keep a copy of all pages, to shield users from s3 latency. A cheap/slow pageserver that falls back to s3 for some reads would be possible, but doesn’t seem very useful right now.
|
||||
|
||||
Because s3 keeps all history, and the safekeeper(s) preserve any WAL records needed to reconstruct the most recent changes, the pageserver can store dirty pages in RAM or using non-durable local storage; this should allow very good write performance, since there is no need for fsync or journaling.
|
||||
|
||||
Objects in s3 are immutable snapshots, never to be modified once written (only deleted).
|
||||
|
||||
Objects in s3 are files, each containing a set of pages for some branch/relation/segment as of a specific time (LSN). A snapshot could be complete (meaning it has a copy of every page), or it could be incremental (containing only the pages that were modified since the previous snapshot). It’s expected that most snapshots are incremental to keep storage costs low.
|
||||
|
||||
It’s expected that the pageserver would upload new snapshot objects frequently, e.g. somewhere between 30 seconds and 15 minutes, depending on cost/performance balance.
|
||||
|
||||
No-longer needed snapshots can be “squashed”-- meaning snapshot N and snapshot N+1 can be read by some cloud agent software, which writes out a new object containing the combined set of pages (keeping only the newest version of each page) and then deletes the original snapshots.
|
||||
|
||||
A pageserver only needs to store the set of pages needed to satisfy operations in flight: if a snapshot is still being written, the pageserver needs to hold historical pages so that snapshot captures a consistent moment in time (similar to what is needed to satisfy a slow replica).
|
||||
|
||||
WAL records can be discarded once a snapshot has been stored to s3. (Unless we want to keep them longer as part of a point-in-time recovery feature.)
|
||||
|
||||
## Pageserver operation
|
||||
|
||||
To start a pageserver from a stored snapshot, the pageserver downloads a set of snapshots sufficient to start handling requests. We assume this includes the latest copy of every page, though it might be possible to start handling requests early, and retrieve pages for the first time only when needed.
|
||||
|
||||
To halt a pageserver, one final snapshot should be written containing all pending WAL updates; then the pageserver and safekeepers can shut down.
|
||||
|
||||
It’s assumed there is some cloud management service that ensures only one pageserver is active and servicing writes to a given branch.
|
||||
|
||||
The pageserver needs to be able to track whether a given page has been modified since the last snapshot, and should be able to produce the set of dirty pages efficiently to create a new snapshot.
|
||||
|
||||
The pageserver need only store pages that are “reachable” from a particular LSN. For example, a page may be written four times, at LSN 100, 200, 300, and 400. If no snapshot is being created when LSN 200 is written, the page at LSN 100 can be discarded. If a snapshot is triggered when the pageserver is at LSN 299, the pageserver must preserve the page from LSN 200 until that snapshot is complete. As before, the page at LSN 300 can be discarded when the LSN 400 pages is written (regardless of whether the LSN 200 snapshot has completed.)
|
||||
|
||||
If the pageserver is servicing multiple branches, those branches may contain common history. While it would be possible to serve branches with zero knowledge of their common history, a pageserver could save a lot of space using an awareness of branch history to share the common set of pages. Computing the “liveness” of a historical page may be tricky in the face of multiple branches.
|
||||
|
||||
The pageserver may store dirty pages to memory or to local block storage; any local block storage format is only temporary “overflow” storage, and is not expected to be readable by future software versions.
|
||||
|
||||
The pageserver may store clean pages (those that are captured in a snapshot) any way it likes: in memory, in a local filesystem (possibly keeping a local copy of the snapshot file), or using some custom storage format. Reading pages from s3 would be functional, but is expected to be prohibitively slow.
|
||||
|
||||
The mechanism for recovery after a pageserver failure is WAL redo. If we find that too slow in some situations (e.g. write-heavy workload causes long startup), we can write more frequent snapshots to keep the number of outstanding WAL records low. If that’s still not good enough, we could look at other options (e.g. redundant pageserver or an EBS page journal).
|
||||
|
||||
A read-only pageserver is possible; such a pageserver could be a read-only cache of a specific snapshot, or could auto-update to the latest snapshot on some branch. Either way, no safekeeper is required. Multiple read-only pageservers could exist for a single branch or snapshot.
|
||||
|
||||
## Cloud snapshot manager operation
|
||||
|
||||
Cloud software may wish to do the following operations (commanded by a user, or based on some pre-programmed policy or other cloud agent):
|
||||
Create/delete/clone/rename a database
|
||||
Create a new branch (possibly from a historical snapshot)
|
||||
Start/stop the pageserver/safekeeper on a branch
|
||||
List databases/branches/snapshots that are visible to this user account
|
||||
|
||||
Some metadata operations (e.g. list branches/snapshots of a particular db) could be performed by scanning the contents of a bucket and inspecting the file headers of each snapshot object. This might not be fast enough; it might be necessary to build a metadata service that can respond more quickly to some queries.
|
||||
|
||||
This is especially true if there are public databases: there may be many thousands of buckets that are public, and scanning all of them is not a practical strategy for answering metadata queries.
|
||||
|
||||
## Snapshot names, deletion and concurrency
|
||||
|
||||
There may be race conditions between operations-- in particular, a “squash” operation may replace two snapshot objects (A, B) with some combined object (C). Since C is logically equivalent to B, anything that attempts to access B should be able to seamlessly switch over to C. It’s assumed that concurrent delete won’t disrupt a read in flight, but it may be possible for some process to read B’s header, and then discover on the next operation that B is gone.
|
||||
|
||||
For this reason, any attempted read should attempt a fallback procedure (list objects; search list for an equivalent object) if an attempted read fails. This requires a predictable naming scheme, e.g. `XXXX_YYYY_ZZZZ_DDDD`, where `XXXX` is the branch unique id, and `YYYY` and `ZZZZ` are the starting/ending LSN values. `DDDD` is a timestamp indicating when the object was created; this is used to disambiguate a series of empty snapshots, or to help a snapshot policy engine understand which snapshots should be kept or discarded.
|
||||
|
||||
## Branching
|
||||
|
||||
A user may request a new branch from the cloud user interface. There is a sequence of things that needs to happen:
|
||||
- If the branch is supposed to be based on the latest contents, the pageserver should perform an immediate snapshot. This is the parent snapshot for the new branch.
|
||||
- Cloud software should create the new branch, by generating a new (random) unique branch identifier, and creating a placeholder snapshot object.
|
||||
- The placeholder object is an empty snapshot containing only metadata (which anchors it to the right parent history) and no pages.
|
||||
- The placeholder can be discarded when the first snapshot (containing data) is completed. Discarding is equivalent to squashing, when the snapshot contains no data.
|
||||
- If the branch needs to be started immediately, a pageserver should be notified that it needs to start servicing the branch. This may not be the same pageserver that services the parent branch, though the common history may make it the best choice.
|
||||
|
||||
Some of these steps could be combined into the pageserver, but that process would not be possible under all cases (e.g. if no pageserver is currently running, or if the branch is based on an older snapshot, or if a different pageserver will be serving the new branch). Regardless of which software drives the process, the result should look the same.
|
||||
|
||||
## Long-term file format
|
||||
|
||||
Snapshot files (and any other object stored in s3) must be readable by future software versions.
|
||||
|
||||
It should be possible to build multiple tools (in addition to the pageserver) that can read and write this file format-- for example, to allow cloud snapshot management.
|
||||
|
||||
Files should contain the following metadata, in addition to the set of pages:
|
||||
- The version of the file format.
|
||||
- A unique identifier for this branch (should be worldwide-unique and unchanging).
|
||||
- Optionally, any human-readable names assigned to this branch (for management UI/debugging/logging).
|
||||
- For incremental snapshots, the identifier of the predecessor snapshot. For new branches, this will be the parent snapshot (the point at which history diverges).
|
||||
- The location of the predecessor branch snapshot, if different from this branch’s location.
|
||||
- The LSN range `(parent, latest]` for this snapshot. For complete snapshots, the parent LSN can be 0.
|
||||
- The UTC timestamp of the snapshot creation (which may be different from the time of its highest LSN, if the database is idle).
|
||||
- A SHA2 checksum over the entire file (excluding the checksum itself), to preserve file integrity.
|
||||
|
||||
A file may contain no pages, and an empty LSN range (probably `(latest, latest]`?), which serves as a placeholder for either a newly-created branch, or a snapshot of an idle database.
|
||||
|
||||
Any human-readable names stored in the file may fall out of date if database/branch renames are allowed; there may need to be a cloud metadata service to query (current name -> unique identifier). We may choose instead to not store human-readable names in the database, or treat them as debugging information only.
|
||||
|
||||
## S3 semantics, and other kinds of storage
|
||||
|
||||
For development and testing, it may be easier to use other kinds of storage in place of s3. For example, a directory full of files can substitute for an s3 bucket with multiple objects. This mode is expected to match the s3 semantics (e.g. don’t edit existing files or use symlinks). Unit tests may omit files entirely and use an in-memory mock bucket.
|
||||
|
||||
Some users may want to use a local or network filesystem in place of s3. This isn’t prohibited but it’s not a priority, either.
|
||||
|
||||
Alternate implementations of s3 should be supported, including Google Cloud Storage.
|
||||
|
||||
Azure Blob Storage should be supported. We assume (without evidence) that it’s semantically equivalent to s3 for this purpose.
|
||||
|
||||
The properties of s3 that we depend on are:
|
||||
list objects
|
||||
streaming read of entire object
|
||||
read byte range from object
|
||||
streaming write new object (may use multipart upload for better relialibity)
|
||||
delete object (that should not disrupt an already-started read).
|
||||
|
||||
Uploaded files, restored backups, or s3 buckets controlled by users could contain malicious content. We should always validate that objects contain the content they’re supposed to. Incorrect, Corrupt or malicious-looking contents should cause software (cloud tools, pageserver) to fail gracefully.
|
||||
|
||||
## Notes
|
||||
|
||||
Possible simplifications, for a first draft implementation:
|
||||
- Assume that dirty pages fit in pageserver RAM. Can use kernel virtual memory to page out to disk if needed. Can improve this later.
|
||||
- Don’t worry about the details of the squashing process yet.
|
||||
- Don’t implement cloud metadata service; try to make everything work using basic s3 list-objects and reads.
|
||||
- Don’t implement rename, delete at first.
|
||||
- Don’t implement public/private, just use s3 permissions.
|
||||
- Don’t worry about sharing history yet-- each user has their own bucket and a full copy of all data.
|
||||
- Don’t worry about history that spans multiple buckets.
|
||||
- Don’t worry about s3 regions.
|
||||
- Don’t support user-writeable s3 buckets; users get only read-only access at most.
|
||||
|
||||
Open questions:
|
||||
- How important is point-in-time recovery? When should we add this? How should it work?
|
||||
- Should snapshot files use compression?
|
||||
- Should we use snapshots for async replication? A spare pageserver could stay mostly warmed up by consuming snapshots as they’re created.
|
||||
- Should manual snapshots, or snapshots triggered by branch creation, be named differently from snapshots that are triggered by a snapshot policy?
|
||||
- When a new branch is created, should it always be served by the same pageserver that owns its parent branch? When should we start a new pageserver?
|
||||
- How can pageserver software upgrade be done with minimal downtime?
|
||||
144
docs/rfcs/010-storage_details.md
Normal file
144
docs/rfcs/010-storage_details.md
Normal file
@@ -0,0 +1,144 @@
|
||||
# Storage details
|
||||
|
||||
Here I tried to describe the current state of thinking about our storage subsystem as I understand it. Feel free to correct me. Also, I tried to address items from Heikki's TODO and be specific on some of the details.
|
||||
|
||||
## Overview
|
||||
|
||||

|
||||
|
||||
### MemStore
|
||||
|
||||
MemStore holds the data between `latest_snapshot_lsn` and `latest_lsn`. It consists of PageIndex that holds references to WAL records or pages, PageStore that stores recently materialized pages, and WalStore that stores recently received WAL.
|
||||
|
||||
### PageIndex
|
||||
|
||||
PageIndex is an ordered collection that maps `(BufferTag, LSN)` to one of the following references (by reference I mean some information that is needed to access that data, e.g. file_id and offset):
|
||||
|
||||
* PageStoreRef -- page offset in the PageStore
|
||||
* LocalStoreRef -- snapshot_id and page offset inside of that snapshot
|
||||
* WalStoreRef -- offset (and size optionally) of WalRecord in WalStore
|
||||
|
||||
PageIndex holds information about all the pages in all incremental snapshots and in the latest full snapshot. If we aren't using page compression inside snapshots we actually can avoid storing references to the full snapshot and calculate page offsets based on relation sizes metadata in the full snapshot (assuming that full snapshot stores pages sorted by page number). However, I would suggest embracing page compression from the beginning and treat all pages as variable-sized.
|
||||
|
||||
We assume that PageIndex is few orders of magnitude smaller than addressed data hence it should fit memory. We also don't care about crash tolerance as we can rebuild it from snapshots metadata and WAL records from WalStore or/and Safekeeper.
|
||||
|
||||
### WalStore
|
||||
|
||||
WalStore is a queue of recent WalRecords. I imagine that we can store recent WAL the same way as Postgres does -- as 16MB files on disk. On top of that, we can add some fixed-size cache that would keep some amount of segments in memory.
|
||||
|
||||
For now, we may rely on the Safekeeper to safely store that recent WAL. But generally, I think we can pack all S3 operations into the page server so that it would be also responsible for the recent WAL pushdown to S3 (and Safekeeper may just delete WAL that was confirmed as S3-durable by the page server).
|
||||
|
||||
### PageStore
|
||||
|
||||
PageStore is storage for recently materialized pages (or in other words cache of getPage results). It is also can be implemented as a file-based queue with some memory cache on top of it.
|
||||
|
||||
There are few possible options for PageStore:
|
||||
|
||||
a) we just add all recently materialized pages there (so several versions of the same page can be stored there) -- that is more or less how it happens now with the current RocksDB implementation.
|
||||
|
||||
b) overwrite older pages with the newer pages -- if there is no replica we probably don't need older pages. During page overwrite, we would also need to change PageStoreRef back to WalStoreRef in PageIndex.
|
||||
|
||||
I imagine that newly created pages would just be added to the back of PageStore (again in queue-like fashion) and this way there wouldn't be any meaningful ordering inside of that queue. When we are forming a new incremental snapshot we may prohibit any updates to the current set of pages in PageStore (giving up on single page version rule) and cut off that whole set when snapshot creation is complete.
|
||||
|
||||
With option b) we can also treat PageStor as an uncompleted increamental snapshot.
|
||||
|
||||
### LocalStore
|
||||
|
||||
LocalStore keeps the latest full snapshot and set of incremental snapshots on top of it. We add new snapshots when the number of changed pages grows bigger than a certain threshold.
|
||||
|
||||
## Granularity
|
||||
|
||||
By granularity, I mean a set of pages that goes into a certain full snapshot. Following things should be taken into account:
|
||||
|
||||
* can we shard big databases between page servers?
|
||||
* how much time will we spend applying WAL to access certain pages with older LSN's?
|
||||
* how many files do we create for a single database?
|
||||
|
||||
I can think of the following options here:
|
||||
|
||||
1. whole database goes to one full snapshot.
|
||||
* +: we never create a lot of files for one database
|
||||
* +: the approach is quite straightforward, moving data around is simple
|
||||
* -: can not be sharded
|
||||
* -: long recovery -- we always need to recover the whole database
|
||||
2. table segment is the unit of snapshotting
|
||||
* +: straightforward for sharding
|
||||
* +: individual segment can be quickly recovered with sliced WAL
|
||||
* -: full snapshot can be really small (e.g. when the corresponding segment consists of a single page) and we can blow amount of files. Then we would spend eternity in directory scans and the amount of metadata for sharding can be also quite big.
|
||||
3. range-partitioned snapshots -- snapshot includes all pages between [BuffTagLo, BuffTagHi] mixing different relations, databases, and potentially clusters (albeit from one tenant only). When full snapshot outgrows a certain limit (could be also a few gigabytes) we split the snapshot in two during the next full snapshot write. That approach would also require pages sorted by BuffTag inside our snapshots.
|
||||
* +: addresses all mentioned issues
|
||||
* -: harder to implement
|
||||
|
||||
I think it is okay to start with table segments granularity and just check how we will perform in cases of lots of small tables and check is there any way besides c) to deal with it.
|
||||
|
||||
Both PageStore and WalStore should be "sharded" by this granularity level.
|
||||
|
||||
## Security
|
||||
|
||||
We can generate different IAM keys for each tenant and potentially share them with users (in read-only mode?) or even allow users to provide their S3 buckets credentials.
|
||||
|
||||
Also, S3 backups are usually encrypted by per-tenant privates keys. I'm not sure in what threat model such encryption would improve something (taking into account per-tenant IAM keys), but it seems that everybody is doing that (both AMZN and YNDX). Most likely that comes as a requirement about "cold backups" by some certification procedure.
|
||||
|
||||
## Dynamics
|
||||
|
||||
### WAL stream handling
|
||||
|
||||
When a new WAL record is received we need to parse BufferTags in that record and insert them in PageIndex with WalStoreRef as a value.
|
||||
|
||||
### getPage queries
|
||||
|
||||
Look up the page in PageIndex. If the value is a page reference then just respond with that page. If the referenced value is WAL record then find the most recent page with the same BuffTag (that is why we need ordering in PageIndex); recover it by applying WAL records; save it in PageStore; respond with that page.
|
||||
|
||||
### Starting page server without local data
|
||||
|
||||
* build set of latest full snapshots and incremental snapshots on top of them
|
||||
* load all their metadata into PageIndex
|
||||
* Safekeeper should connect soon and we can ask for a WAL stream starting from the latest incremental snapshot
|
||||
* for databases that are connected to us through the Safekeeper we can start loading the set of the latest snapshots or we can do that lazily based on getPage request (I'd better avoid doing that lazily for now without some access stats from the previous run and just transfer all data for active database from S3 to LocalStore).
|
||||
|
||||
### Starting page server with local data (aka restart or reboot)
|
||||
|
||||
* check that local snapshot files are consistent with S3
|
||||
|
||||
### Snapshot creation
|
||||
|
||||
Track size of future snapshots based on info in MemStore and when it exceeds some threshold (taking into account our granularity level) create a new incremental snapshot. Always emit incremental snapshots from MemStore.
|
||||
|
||||
To create a new snapshot we need to walk through WalStore to get the list of all changed pages, sort it, and get the latest versions of that pages from PageStore or by WAL replay. It makes sense to maintain that set in memory while we are receiving the WAL stream to avoid parsing WAL during snapshot creation.
|
||||
|
||||
Full snapshot creation can be done by GC (or we can call that entity differently -- e.g. merger?) by merging the previous full snapshot with several incremental snapshots.
|
||||
|
||||
### S3 pushdown
|
||||
|
||||
When we have several full snapshots GC can push the old one with its increments to S3.
|
||||
|
||||
### Branch creation
|
||||
|
||||
Create a new timeline and replay sliced WAL up to a requested point. When the page is not in PageIndex ask the parent timeline about a page. Relation sizes are tricky.
|
||||
|
||||
## File formats
|
||||
|
||||
As far as I understand Bookfile/Aversion addresses versioning and serialization parts.
|
||||
|
||||
As for exact data that should go to snapshots I think it is the following for each snapshot:
|
||||
|
||||
* format version number
|
||||
* set of key/values to interpret content (e.g. is page compression enabled, is that a full or incremental snapshot, previous snapshot id, is there WAL at the end on file, etc) -- it is up to a reader to decide what to do if some keys are missing or some unknow key are present. If we add something backward compatible to the file we can keep the version number.
|
||||
* array of [BuffTag, corresponding offset in file] for pages -- IIUC that is analogous to ToC in Bookfile
|
||||
* array of [(BuffTag, LSN), corresponding offset in file] for the WAL records
|
||||
* pages, one by one
|
||||
* WAL records, one by one
|
||||
|
||||
It is also important to be able to load metadata quickly since it would be one of the main factors impacting the time of page server start. E.g. if would store/cache about 10TB of data per page server, the size of uncompressed page references would be about 30GB (10TB / ( 8192 bytes page size / ( ~18 bytes per ObjectTag + 8 bytes offset in the file))).
|
||||
|
||||
1) Since our ToC/array of entries can be sorted by ObjectTag we can store the whole BufferTag only when realtion_id is changed and store only delta-encoded offsets for a given relation. That would reduce the average per-page metadata size to something less than 4 bytes instead of 26 (assuming that pages would follow the same order and offset delatas would be small).
|
||||
2) It makes sense to keep ToC at the beginning of the file to avoid extra seeks to locate it. Doesn't matter too much with the local files but matters on S3 -- if we are accessing a lot of ~1Gb files with the size of metadata ~ 1Mb then the time to transfer this metadata would be comparable with access latency itself (which is about a half of a second). So by slurping metadata with one read of file header instead of N reads we can improve the speed of page server start by this N factor.
|
||||
|
||||
I think both of that optimizations can be done later, but that is something to keep in mind when we are designing our storage serialization routines.
|
||||
|
||||
Also, there were some discussions about how to embed WAL in incremental snapshots. So far following ideas were mentioned:
|
||||
1. snapshot lsn=200, includes WAL in range 200-300
|
||||
2. snapshot lsn=200, includes WAL in range 100-200
|
||||
3. data snapshots are separated from WAL snapshots
|
||||
|
||||
Both options 2 and 3 look good. I'm inclined towards option 3 as it would allow us to apply different S3 pushdown strategies for data and WAL files (e.g. we may keep data snapshot until the next full snapshot, but we may push WAL snapshot to S3 just when they appeared if there are no replicas).
|
||||
91
docs/rfcs/011-retention-policy.md
Normal file
91
docs/rfcs/011-retention-policy.md
Normal file
@@ -0,0 +1,91 @@
|
||||
# User-visible timeline history
|
||||
|
||||
The user can specify a retention policy. The retention policy is
|
||||
presented to the user as a PITR period and snapshots. The PITR period
|
||||
is the amount of recent history that needs to be retained, as minutes,
|
||||
hours, or days. Within that period, you can create a branch or
|
||||
snapshot at any point in time, open a compute node, and start running
|
||||
queries. Internally, a PITR period is represented as a range of LSNs
|
||||
|
||||
The user can also create snapshots. A snapshot is a point in time,
|
||||
internally represented by an LSN. The user gives the snapshot a name.
|
||||
|
||||
The user can also specify an interval, at which the system creates
|
||||
snapshots automatically. For example, create a snapshot every night at
|
||||
2 AM. After some user-specified time, old automatically created
|
||||
snapshots are removed.
|
||||
|
||||
Snapshot Snapshot
|
||||
PITR "Monday" "Tuesday" PITR
|
||||
----######----------+-------------+-------------######>
|
||||
|
||||
If there are multiple branches, you can specify different policies or
|
||||
different branches.
|
||||
|
||||
The PITR period and user-visible snapshots together define the
|
||||
retention policy.
|
||||
|
||||
NOTE: As presented here, this is probably overly flexible. In reality,
|
||||
we want to keep the user interface simple. Only allow a PITR period at
|
||||
the tip of a branch, for example. But that doesn't make much
|
||||
difference to the internals.
|
||||
|
||||
|
||||
# Retention policy behind the scenes
|
||||
|
||||
The retention policy consists of points (for snapshots) and ranges
|
||||
(for PITR periods).
|
||||
|
||||
The system must be able to reconstruct any page within the retention
|
||||
policy. Other page versions can be garbage collected away. We have a
|
||||
lot of flexibility on when to perform the garbage collection and how
|
||||
aggressive it is.
|
||||
|
||||
|
||||
# Base images and WAL slices
|
||||
|
||||
The page versions are stored in two kinds of files: base images and
|
||||
WAL slices. A base image contains a dump of all the pages of one
|
||||
relation at a specific LSN. A WAL slice contains all the WAL in an LSN
|
||||
range.
|
||||
|
||||
|
||||
|
|
||||
|
|
||||
|
|
||||
| --Base img @100 +
|
||||
| |
|
||||
| | WAL slice
|
||||
| | 100-200
|
||||
| |
|
||||
| --Base img @200 +
|
||||
| |
|
||||
| | WAL slice
|
||||
| | 200-300
|
||||
| |
|
||||
| +
|
||||
|
|
||||
V
|
||||
|
||||
|
||||
To recover a page e.g. at LSN 150, you need the base image at LSN 100,
|
||||
and the WAL slice 100-200.
|
||||
|
||||
All of this works at a per-relation or per-relation-segment basis. If
|
||||
a relation is updated very frequently, we create base images and WAL
|
||||
slices for it more quickly. For a relation that's updated
|
||||
infrequently, we hold the recent WAL for that relation longer, and
|
||||
only write it out when we need to release the disk space occupied by
|
||||
the original WAL. (We need a backstop like that, because until all the
|
||||
WAL/base images have been been durably copied to S3, we must keep the
|
||||
original WAL for that period somewhere, in the WAL service or in S3.)
|
||||
|
||||
|
||||
# Branching
|
||||
|
||||
Internally, branch points are also "retention points", in addition to
|
||||
the user-visible snapshots. If a branch has been forked off at LSN
|
||||
100, we need to be able to reconstruct any page on the parent branch
|
||||
at that LSN, because it is needed by the child branch. If a page is
|
||||
modified in the child, we don't need to keep that in the parent
|
||||
anymore, though.
|
||||
38
docs/rfcs/012-background-tasks.md
Normal file
38
docs/rfcs/012-background-tasks.md
Normal file
@@ -0,0 +1,38 @@
|
||||
# Eviction
|
||||
|
||||
Write out in-memory layer to disk, into a delta layer.
|
||||
|
||||
- To release memory
|
||||
- To make it possible to advance disk_consistent_lsn and allow the WAL
|
||||
service to release some WAL.
|
||||
|
||||
- Triggered if we are short on memory
|
||||
- Or if the oldest in-memory layer is so old that it's holding back
|
||||
the WAL service from removing old WAL
|
||||
|
||||
# Materialization
|
||||
|
||||
Create a new image layer of a segment, by performing WAL redo
|
||||
|
||||
- To reduce the amount of WAL that needs to be replayed on a GetPage request.
|
||||
- To allow garbage collection of old layers
|
||||
|
||||
- Triggered by distance to last full image of a page
|
||||
|
||||
# Coalescing
|
||||
|
||||
Replace N consecutive layers of a segment with one larger layer.
|
||||
|
||||
- To reduce the number of small files that needs to be uploaded to S3
|
||||
|
||||
|
||||
# Bundling
|
||||
|
||||
Zip together multiple small files belonging to different segments.
|
||||
|
||||
- To reduce the number of small files that needs to be uploaded to S3
|
||||
|
||||
|
||||
# Garbage collection
|
||||
|
||||
Remove a layer that's older than the GC horizon, and isn't needed anymore.
|
||||
147
docs/rfcs/013-term-history.md
Normal file
147
docs/rfcs/013-term-history.md
Normal file
@@ -0,0 +1,147 @@
|
||||
# What
|
||||
|
||||
Currently, apart from WAL safekeeper persistently stores only two logical clock
|
||||
counter (aka term) values, sourced from the same sequence. The first is bumped
|
||||
whenever safekeeper gives vote to proposer (or acknowledges already elected one)
|
||||
and e.g. prevents electing two proposers with the same term -- it is actually
|
||||
called `term` in the code. The second, called `epoch`, reflects progress of log
|
||||
receival and this might lag behind `term`; safekeeper switches to epoch `n` when
|
||||
it has received all committed log records from all `< n` terms. This roughly
|
||||
correspones to proposed in
|
||||
|
||||
https://github.com/zenithdb/rfcs/pull/3/files
|
||||
|
||||
|
||||
This makes our biggest our difference from Raft. In Raft, every log record is
|
||||
stamped with term in which it was generated; while we essentialy store in
|
||||
`epoch` only the term of the highest record on this safekeeper -- when we know
|
||||
it -- because during recovery generally we don't, and `epoch` is bumped directly
|
||||
to the term of the proposer who performs the recovery when it is finished. It is
|
||||
not immediately obvious that this simplification is safe. I thought and I still
|
||||
think it is; model checking confirmed that. However, some details now make me
|
||||
believe it is better to keep full term switching history (which is equivalent to
|
||||
knowing term of each record).
|
||||
|
||||
# Why
|
||||
|
||||
Without knowing full history (list of <term, LSN> pairs) of terms it is hard to
|
||||
determine the exact divergence point, and if we don't perform truncation at that
|
||||
point safety becomes questionable. Consider the following history, with
|
||||
safekeepers A, B, C, D, E. n_m means record created by proposer in term n with
|
||||
LSN m; (t=x, e=y) means safekeeper currently has term x and epoch y.
|
||||
|
||||
1) P1 in term 1 writes 1.1 everywhere, which is committed, and some more only
|
||||
on A.
|
||||
|
||||
<pre>
|
||||
A(t=1, e=1) 1.1 1.2 1.3 1.4
|
||||
B(t=1, e=1) 1.1
|
||||
C(t=1, e=1) 1.1
|
||||
D(t=1, e=1) 1.1
|
||||
E(t=1, e=1) 1.1
|
||||
</pre>
|
||||
|
||||
2) P2 is elected by CDE in term 2, epochStartLsn is 2, and writes 2.2, 2.3 on CD:
|
||||
|
||||
<pre>
|
||||
A(t=1, e=1) 1.1 1.2 1.3 1.4
|
||||
B(t=1, e=1) 1.1
|
||||
C(t=2, e=2) 1.1 2.2 2.3
|
||||
D(t=2, e=2) 1.1 2.2 2.3
|
||||
E(t=2, e=1) 1.1
|
||||
</pre>
|
||||
|
||||
|
||||
3) P3 is elected by CDE in term 3, epochStartLsn is 4, and writes 3.4 on D:
|
||||
|
||||
<pre>
|
||||
A(t=1, e=1) 1.1 1.2 1.3 1.4
|
||||
B(t=1, e=1) 1.1
|
||||
C(t=3, e=2) 1.1 2.2 2.3
|
||||
D(t=3, e=3) 1.1 2.2 2.3 3.4
|
||||
E(t=3, e=1) 1.1
|
||||
</pre>
|
||||
|
||||
|
||||
Now, A gets back and P3 starts recovering it. How it should proceed? There are
|
||||
two options.
|
||||
|
||||
## Don't try to find divergence point at all
|
||||
|
||||
...start sending WAL conservatively since the horizon (1.1), and truncate
|
||||
obsolete part of WAL only when recovery is finished, i.e. epochStartLsn (4) is
|
||||
reached, i.e. 2.3 transferred -- that's what https://github.com/zenithdb/zenith/pull/505 proposes.
|
||||
|
||||
Then the following is possible:
|
||||
|
||||
4) P3 moves one record 2.2 to A.
|
||||
|
||||
<pre>
|
||||
A(t=1, e=1) 1.1 <b>2.2</b> 1.3 1.4
|
||||
B(t=1, e=1) 1.1 1.2
|
||||
C(t=3, e=2) 1.1 2.2 2.3
|
||||
D(t=3, e=3) 1.1 2.2 2.3 3.4
|
||||
E(t=3, e=1) 1.1
|
||||
</pre>
|
||||
|
||||
Now log of A is basically corrupted. Moreover, since ABE are all in epoch 1 and
|
||||
A's log is the longest one, they can elect P4 who will commit such log.
|
||||
|
||||
Note that this particular history couldn't happen if we forbid to *create* new
|
||||
records in term n until majority of safekeepers switch to it. It would force CDE
|
||||
to switch to 2 before 2.2 is created, and A could never become donor while his
|
||||
log is corrupted. Generally with this additional barrier I believe the algorithm
|
||||
becomes safe, but
|
||||
- I don't like this kind of artificial barrier;
|
||||
- I also feel somewhat discomfortable about even temporary having intentionally
|
||||
corrupted WAL;
|
||||
- I'd still model check the idea.
|
||||
|
||||
## Find divergence point and truncate at it
|
||||
|
||||
Then step 4 would delete 1.3 1.4 on A, and we are ok. The question is, how do we
|
||||
do that? Without term switching history we have to resort to sending again since
|
||||
the horizon and memcmp'ing records, which is inefficient and ugly. Or we can
|
||||
maintain full history and determine truncation point by comparing 'wrong' and
|
||||
'right' histories -- much like pg_rewind does -- and perform truncation + start
|
||||
streaming right there.
|
||||
|
||||
# Proposal
|
||||
|
||||
- Add term history as array of <term, LSN> pairs to safekeeper controlfile.
|
||||
- Return it to proposer with VoteResponse so 1) proposer can tell it to other
|
||||
nodes and 2) determine personal streaming starting point. However, since we
|
||||
don't append WAL and update controlfile atomically, let's first always update
|
||||
controlfile but send only the history of what we really have (up to highest
|
||||
term in history where begin_lsn >= end of wal; this highest term replaces
|
||||
current `epoch`). We also send end of wal as we do now to determine the donor.
|
||||
- Create ProposerAnnouncement message which proposer sends before starting
|
||||
streaming. It announces proposer as elected and
|
||||
1) Truncates wrong part of WAL on safekeeper
|
||||
(divergence point is already calculated at proposer, but can be
|
||||
cross-verified here).
|
||||
2) Communicates the 'right' history of its term (taken from donor). Seems
|
||||
better to immediately put the history in the controlfile,
|
||||
though safekeeper might not have full WAL for previous terms in it --
|
||||
this way is simpler, and we can't update WAL and controlfile atomically anyway.
|
||||
|
||||
This also constitutes analogue of current epoch bump for those safekeepers
|
||||
which don't need recovery, which is important for sync-safekeepers (bump
|
||||
epoch without waiting records from new term).
|
||||
- After ProposerAnnouncement proposer streams WAL since calculated starting
|
||||
point -- only what is missing.
|
||||
|
||||
|
||||
pros/cons:
|
||||
+ (more) clear safety of WAL truncation -- we get very close to Raft
|
||||
+ no unnecessary data sending (faster recovery for not-oldest-safekeepers, matters
|
||||
only for 5+ nodes)
|
||||
+ adds some observability at safekeepers
|
||||
|
||||
- complexity, but not that much
|
||||
|
||||
|
||||
# Misc
|
||||
|
||||
- During model checking I did truncation on first locally non existent or
|
||||
different record -- analogue of 'memcmp' variant described above.
|
||||
69
docs/rfcs/014-safekeepers-gossip.md
Normal file
69
docs/rfcs/014-safekeepers-gossip.md
Normal file
@@ -0,0 +1,69 @@
|
||||
# Safekeeper gossip
|
||||
|
||||
Extracted from this [PR](https://github.com/zenithdb/rfcs/pull/13)
|
||||
|
||||
## Motivation
|
||||
|
||||
In some situations, safekeeper (SK) needs coordination with other SK's that serve the same tenant:
|
||||
|
||||
1. WAL deletion. SK needs to know what WAL was already safely replicated to delete it. Now we keep WAL indefinitely.
|
||||
2. Deciding on who is sending WAL to the pageserver. Now sending SK crash may lead to a livelock where nobody sends WAL to the pageserver.
|
||||
3. To enable SK to SK direct recovery without involving the compute
|
||||
|
||||
## Summary
|
||||
|
||||
Compute node has connection strings to each safekeeper. During each compute->safekeeper connection establishment, the compute node should pass down all that connection strings to each safekeeper. With that info, safekeepers may establish Postgres connections to each other and periodically send ping messages with LSN payload.
|
||||
|
||||
## Components
|
||||
|
||||
safekeeper, compute, compute<->safekeeper protocol, possibly console (group SK addresses)
|
||||
|
||||
## Proposed implementation
|
||||
|
||||
Each safekeeper can periodically ping all its peers and share connectivity and liveness info. If the ping was not receiver for, let's say, four ping periods, we may consider sending safekeeper as dead. That would mean some of the alive safekeepers should connect to the pageserver. One way to decide which one exactly: `make_connection = my_node_id == min(alive_nodes)`
|
||||
|
||||
Since safekeepers are multi-tenant, we may establish either per-tenant physical connections or per-safekeeper ones. So it makes sense to group "logical" connections between corresponding tenants on different nodes into a single physical connection. That means that we should implement an interconnect thread that maintains physical connections and periodically broadcasts info about all tenants.
|
||||
|
||||
Right now console may assign any 3 SK addresses to a given compute node. That may lead to a high number of gossip connections between SK's. Instead, we can assign safekeeper triples to the compute node. But if we want to "break"/" change" group by an ad-hoc action, we can do it.
|
||||
|
||||
### Corner cases
|
||||
|
||||
- Current safekeeper may be alive but may not have connectivity to the pageserver
|
||||
|
||||
To address that, we need to gossip visibility info. Based on that info, we may define SK as alive only when it can connect to the pageserver.
|
||||
|
||||
- Current safekeeper may be alive but may not have connectivity with the compute node.
|
||||
|
||||
We may broadcast last_received_lsn and presence of compute connection and decide who is alive based on that.
|
||||
|
||||
- It is tricky to decide when to shut down gossip connections because we need to be sure that pageserver got all the committed (in the distributed sense, so local SK info is not enough) records, and it may never lose them. It is not a strict requirement since `--sync-safekeepers` that happen before the compute start will allow the pageserver to consume missing WAL, but it is better to do that in the background. So the condition may look like that: `majority_max(flush_lsn) == pageserver_s3_lsn` Here we rely on the two facts:
|
||||
- that `--sync-safekeepers` happened after the compute shutdown, and it advanced local commit_lsn's allowing pageserver to consume that WAL.
|
||||
|
||||
- we wait for the `pageserver_s3_lsn` advancement to avoid pageserver's last_received_lsn/disk_consistent_lsn going backward due to the disk/hardware failure and subsequent S3 recovery
|
||||
|
||||
If those conditions are not met, we will have some gossip activity (but that may be okay).
|
||||
|
||||
## Pros/cons
|
||||
|
||||
Pros:
|
||||
|
||||
- distributed, does not introduce new services (like etcd), does not add console as a storage dependency
|
||||
- lays the foundation for gossip-based recovery
|
||||
|
||||
Cons:
|
||||
|
||||
- Only compute knows a set of safekeepers, but they should communicate even without compute node. In case of safekeepers restart, we will lose that info and can't gossip anymore. Hence we can't trim some WAL tail until the compute node start. Also, it is ugly.
|
||||
|
||||
- If the console assigns a random set of safekeepers to each Postgres, we may end up in a situation where each safekeeper needs to have a connection with all other safekeepers. We can group safekeepers into isolated triples in the console to avoid that. Then "mixing" would happen only if we do rebalancing.
|
||||
|
||||
## Alternative implementation
|
||||
|
||||
We can have a selected node (e.g., console) with everybody reporting to it.
|
||||
|
||||
## Security implications
|
||||
|
||||
We don't increase the attack surface here. Communication can happen in a private network that is not exposed to users.
|
||||
|
||||
## Scalability implications
|
||||
|
||||
The only thing that may grow as we grow the number of computes is the number of gossip connections. But if we group safekeepers and assign a compute node to the random SK triple, the number of connections would be constant.
|
||||
145
docs/rfcs/014-storage-lsm.md
Normal file
145
docs/rfcs/014-storage-lsm.md
Normal file
@@ -0,0 +1,145 @@
|
||||
# Why LSM trees?
|
||||
|
||||
In general, an LSM tree has the nice property that random updates are
|
||||
fast, but the disk writes are sequential. When a new file is created,
|
||||
it is immutable. New files are created and old ones are deleted, but
|
||||
existing files are never modified. That fits well with storing the
|
||||
files on S3.
|
||||
|
||||
Currently, we create a lot of small files. That is mostly a problem
|
||||
with S3, because each GET/PUT operation is expensive, and LIST
|
||||
operation only returns 1000 objects at a time, and isn't free
|
||||
either. Currently, the files are "archived" together into larger
|
||||
checkpoint files before they're uploaded to S3 to alleviate that
|
||||
problem, but garbage collecting data from the archive files would be
|
||||
difficult and we have not implemented it. This proposal addresses that
|
||||
problem.
|
||||
|
||||
|
||||
# Overview
|
||||
|
||||
|
||||
```
|
||||
^ LSN
|
||||
|
|
||||
| Memtable: +-----------------------------+
|
||||
| | |
|
||||
| +-----------------------------+
|
||||
|
|
||||
|
|
||||
| L0: +-----------------------------+
|
||||
| | |
|
||||
| +-----------------------------+
|
||||
|
|
||||
| +-----------------------------+
|
||||
| | |
|
||||
| +-----------------------------+
|
||||
|
|
||||
| +-----------------------------+
|
||||
| | |
|
||||
| +-----------------------------+
|
||||
|
|
||||
| +-----------------------------+
|
||||
| | |
|
||||
| +-----------------------------+
|
||||
|
|
||||
|
|
||||
| L1: +-------+ +-----+ +--+ +-+
|
||||
| | | | | | | | |
|
||||
| | | | | | | | |
|
||||
| +-------+ +-----+ +--+ +-+
|
||||
|
|
||||
| +----+ +-----+ +--+ +----+
|
||||
| | | | | | | | |
|
||||
| | | | | | | | |
|
||||
| +----+ +-----+ +--+ +----+
|
||||
|
|
||||
+--------------------------------------------------------------> Page ID
|
||||
|
||||
|
||||
+---+
|
||||
| | Layer file
|
||||
+---+
|
||||
```
|
||||
|
||||
|
||||
# Memtable
|
||||
|
||||
When new WAL arrives, it is first put into the Memtable. Despite the
|
||||
name, the Memtable is not a purely in-memory data structure. It can
|
||||
spill to a temporary file on disk if the system is low on memory, and
|
||||
is accessed through a buffer cache.
|
||||
|
||||
If the page server crashes, the Memtable is lost. It is rebuilt by
|
||||
processing again the WAL that's newer than the latest layer in L0.
|
||||
|
||||
The size of the Memtable is configured by the "checkpoint distance"
|
||||
setting. Because anything that hasn't been flushed to disk and
|
||||
uploaded to S3 yet needs to be kept in the safekeeper, the "checkpoint
|
||||
distance" also determines the amount of WAL that needs to kept in the
|
||||
safekeeper.
|
||||
|
||||
# L0
|
||||
|
||||
When the Memtable fills up, it is written out to a new file in L0. The
|
||||
files are immutable; when a file is created, it is never
|
||||
modified. Each file in L0 is roughly 1 GB in size (*). Like the
|
||||
Memtable, each file in L0 covers the whole key range.
|
||||
|
||||
When enough files have been accumulated in L0, compaction
|
||||
starts. Compaction processes all the files in L0 and reshuffles the
|
||||
data to create a new set of files in L1.
|
||||
|
||||
|
||||
(*) except in corner cases like if we want to shut down the page
|
||||
server and want to flush out the memtable to disk even though it's not
|
||||
full yet.
|
||||
|
||||
|
||||
# L1
|
||||
|
||||
L1 consists of ~ 1 GB files like L0. But each file covers only part of
|
||||
the overall key space, and a larger range of LSNs. This speeds up
|
||||
searches. When you're looking for a given page, you need to check all
|
||||
the files in L0, to see if they contain a page version for the requested
|
||||
page. But in L1, you only need to check the files whose key range covers
|
||||
the requested page. This is particularly important at cold start, when
|
||||
checking a file means downloading it from S3.
|
||||
|
||||
Partitioning by key range also helps with garbage collection. If only a
|
||||
part of the database is updated, we will accumulate more files for
|
||||
the hot part in L1, and old files can be removed without affecting the
|
||||
cold part.
|
||||
|
||||
|
||||
# Image layers
|
||||
|
||||
So far, we've only talked about delta layers. In addition to the delta
|
||||
layers, we create image layers, when "enough" WAL has been accumulated
|
||||
for some part of the database. Each image layer covers a 1 GB range of
|
||||
key space. It contains images of the pages at a single LSN, a snapshot
|
||||
if you will.
|
||||
|
||||
The exact heuristic for what "enough" means is not clear yet. Maybe
|
||||
create a new image layer when 10 GB of WAL has been accumulated for a
|
||||
1 GB segment.
|
||||
|
||||
The image layers limit the number of layers that a search needs to
|
||||
check. That put a cap on read latency, and it also allows garbage
|
||||
collecting layers that are older than the GC horizon.
|
||||
|
||||
|
||||
# Partitioning scheme
|
||||
|
||||
When compaction happens and creates a new set of files in L1, how do
|
||||
we partition the data into the files?
|
||||
|
||||
- Goal is that each file is ~ 1 GB in size
|
||||
- Try to match partition boundaries at relation boundaries. (See [1]
|
||||
for how PebblesDB does this, and for why that's important)
|
||||
- Greedy algorithm
|
||||
|
||||
# Additional Reading
|
||||
|
||||
[1] Paper on PebblesDB and how it does partitioning.
|
||||
https://www.cs.utexas.edu/~rak/papers/sosp17-pebblesdb.pdf
|
||||
295
docs/rfcs/015-storage-messaging.md
Normal file
295
docs/rfcs/015-storage-messaging.md
Normal file
@@ -0,0 +1,295 @@
|
||||
# Storage messaging
|
||||
|
||||
Created on 19.01.22
|
||||
|
||||
Initially created [here](https://github.com/zenithdb/rfcs/pull/16) by @kelvich.
|
||||
|
||||
That it is an alternative to (014-safekeeper-gossip)[]
|
||||
|
||||
## Motivation
|
||||
|
||||
As in 014-safekeeper-gossip we need to solve the following problems:
|
||||
|
||||
* Trim WAL on safekeepers
|
||||
* Decide on which SK should push WAL to the S3
|
||||
* Decide on which SK should forward WAL to the pageserver
|
||||
* Decide on when to shut down SK<->pageserver connection
|
||||
|
||||
This RFC suggests a more generic and hopefully more manageable way to address those problems. However, unlike 014-safekeeper-gossip, it does not bring us any closer to safekeeper-to-safekeeper recovery but rather unties two sets of different issues we previously wanted to solve with gossip.
|
||||
|
||||
Also, with this approach, we would not need "call me maybe" anymore, and the pageserver will have all the data required to understand that it needs to reconnect to another safekeeper.
|
||||
|
||||
## Summary
|
||||
|
||||
Instead of p2p gossip, let's have a centralized broker where all the storage nodes report per-timeline state. Each storage node should have a `--broker-url=1.2.3.4` CLI param.
|
||||
|
||||
Here I propose two ways to do that. After a lot of arguing with myself, I'm leaning towards the etcd approach. My arguments for it are in the pros/cons section. Both options require adding a Grpc client in our codebase either directly or as an etcd dependency.
|
||||
|
||||
## Non-goals
|
||||
|
||||
That RFC does *not* suggest moving the compute to pageserver and compute to safekeeper mappings out of the console. The console is still the only place in the cluster responsible for the persistency of that info. So I'm implying that each pageserver and safekeeper exactly knows what timelines he serves, as it currently is. We need some mechanism for a new pageserver to discover mapping info, but that is out of the scope of this RFC.
|
||||
|
||||
## Impacted components
|
||||
|
||||
pageserver, safekeeper
|
||||
adds either etcd or console as a storage dependency
|
||||
|
||||
## Possible implementation: custom message broker in the console
|
||||
|
||||
We've decided to go with an etcd approach instead of the message broker.
|
||||
|
||||
<details closed>
|
||||
<summary>Original suggestion</summary>
|
||||
<br>
|
||||
We can add a Grpc service in the console that acts as a message broker since the console knows the addresses of all the components. The broker can ignore the payload and only redirect messages. So, for example, each safekeeper may send a message to the peering safekeepers or to the pageserver responsible for a given timeline.
|
||||
|
||||
Message format could be `{sender, destination, payload}`.
|
||||
|
||||
The destination is either:
|
||||
1. `sk_#{tenant}_#{timeline}` -- to be broadcasted on all safekeepers, responsible for that timeline, or
|
||||
2. `pserver_#{tenant}_#{timeline}` -- to be broadcasted on all pageservers, responsible for that timeline
|
||||
|
||||
Sender is either:
|
||||
1. `sk_#{sk_id}`, or
|
||||
2. `pserver_#{pserver_id}`
|
||||
|
||||
I can think of the following behavior to address our original problems:
|
||||
|
||||
* WAL trimming
|
||||
Each safekeeper periodically broadcasts `(write_lsn, commit_lsn)` to all peering (peering == responsible for that timeline) safekeepers
|
||||
|
||||
* Decide on which SK should push WAL to the S3
|
||||
|
||||
Each safekeeper periodically broadcasts `i_am_alive_#{current_timestamp}` message to all peering safekeepers. That way, safekeepers may maintain the vector of alive peers (loose one, with false negatives). Alive safekeeper with the minimal id pushes data to S3.
|
||||
|
||||
* Decide on which SK should forward WAL to the pageserver
|
||||
|
||||
Each safekeeper periodically sends (write_lsn, commit_lsn, compute_connected) to the relevant pageservers. With that info, pageserver can maintain a view of the safekeepers state, connect to a random one, and detect the moments (e.g., one the safekeepers is not making progress or down) when it needs to reconnect to another safekeeper. Pageserver should resolve exact IP addresses through the console, e.g., exchange `#sk_#{sk_id}` to `4.5.6.7:6400`.
|
||||
|
||||
Pageserver connection to the safekeeper triggered by the state change `compute_connected: false -> true`. With that, we don't need "call me maybe" anymore.
|
||||
|
||||
Also, we don't have a "peer address amnesia" problem as in the gossip approach (with gossip, after a simultaneous reboot, safekeepers wouldn't know each other addresses until the next compute connection).
|
||||
|
||||
* Decide on when to shutdown sk<->pageserver connection
|
||||
|
||||
Again, pageserver would have all the info to understand when to shut down the safekeeper connection.
|
||||
|
||||
### Scalability
|
||||
|
||||
One node is enough (c) No, seriously, it is enough.
|
||||
|
||||
### High Availability
|
||||
|
||||
Broker lives in the console, so we can rely on k8s maintaining the console app alive.
|
||||
|
||||
If the console is down, we won't trim WAL and reconnect the pageserver to another safekeeper. But, at the same, if the console is down, we already can't accept new compute connections and start stopped computes, so we are making things a bit worse, but not dramatically.
|
||||
|
||||
### Interactions
|
||||
|
||||
```
|
||||
.________________.
|
||||
sk_1 <-> | | <-> pserver_1
|
||||
... | Console broker | ...
|
||||
sk_n <-> |________________| <-> pserver_m
|
||||
```
|
||||
</details>
|
||||
|
||||
|
||||
## Implementation: etcd state store
|
||||
|
||||
Alternatively, we can set up `etcd` and maintain the following data structure in it:
|
||||
|
||||
```ruby
|
||||
"compute_#{tenant}_#{timeline}" => {
|
||||
safekeepers => {
|
||||
"sk_#{sk_id}" => {
|
||||
write_lsn: "0/AEDF130",
|
||||
commit_lsn: "0/AEDF100",
|
||||
compute_connected: true,
|
||||
last_updated: 1642621138,
|
||||
},
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
As etcd doesn't support field updates in the nested objects that translates to the following set of keys:
|
||||
|
||||
```ruby
|
||||
"compute_#{tenant}_#{timeline}/safekeepers/sk_#{sk_id}/write_lsn",
|
||||
"compute_#{tenant}_#{timeline}/safekeepers/sk_#{sk_id}/commit_lsn",
|
||||
...
|
||||
```
|
||||
|
||||
Each storage node can subscribe to the relevant sets of keys and maintain a local view of that structure. So in terms of the data flow, everything is the same as in the previous approach. Still, we can avoid implementing the message broker and prevent runtime storage dependency on a console.
|
||||
|
||||
### Safekeeper address discovery
|
||||
|
||||
During the startup safekeeper should publish the address he is listening on as the part of `{"sk_#{sk_id}" => ip_address}`. Then the pageserver can resolve `sk_#{sk_id}` to the actual address. This way it would work both locally and in the cloud setup. Safekeeper should have `--advertised-address` CLI option so that we can listen on e.g. 0.0.0.0 but advertize something more useful.
|
||||
|
||||
### Safekeeper behavior
|
||||
|
||||
For each timeline safekeeper periodically broadcasts `compute_#{tenant}_#{timeline}/safekeepers/sk_#{sk_id}/*` fields. It subscribes to changes of `compute_#{tenant}_#{timeline}` -- that way safekeeper will have an information about peering safekeepers.
|
||||
That amount of information is enough to properly trim WAL. To decide on who is pushing the data to S3 safekeeper may use etcd leases or broadcast a timestamp and hence track who is alive.
|
||||
|
||||
### Pageserver behavior
|
||||
|
||||
Pageserver subscribes to `compute_#{tenant}_#{timeline}` for each tenant it owns. With that info, pageserver can maintain a view of the safekeepers state, connect to a random one, and detect the moments (e.g., one the safekeepers is not making progress or down) when it needs to reconnect to another safekeeper. Pageserver should resolve exact IP addresses through the console, e.g., exchange `#sk_#{sk_id}` to `4.5.6.7:6400`.
|
||||
|
||||
Pageserver connection to the safekeeper can be triggered by the state change `compute_connected: false -> true`. With that, we don't need "call me maybe" anymore.
|
||||
|
||||
As an alternative to compute_connected, we can track timestamp of the latest message arrived to safekeeper from compute. Usually compute broadcasts KeepAlive to all safekeepers every second, so it'll be updated every second when connection is ok. Then the connection can be considered down when this timestamp isn't updated for a several seconds.
|
||||
|
||||
This will help to faster detect issues with safekeeper (and switch to another) in the following cases:
|
||||
|
||||
when compute failed but TCP connection stays alive until timeout (usually about a minute)
|
||||
when safekeeper failed and didn't set compute_connected to false
|
||||
|
||||
Another way to deal with [2] is to process (write_lsn, commit_lsn, compute_connected) as a KeepAlive on the pageserver side and detect issues when sk_id don't send anything for some time. This way is fully compliant to this RFC.
|
||||
|
||||
Also, we don't have a "peer address amnesia" problem as in the gossip approach (with gossip, after a simultaneous reboot, safekeepers wouldn't know each other addresses until the next compute connection).
|
||||
|
||||
### Interactions
|
||||
|
||||
```
|
||||
.________________.
|
||||
sk_1 <-> | | <-> pserver_1
|
||||
... | etcd | ...
|
||||
sk_n <-> |________________| <-> pserver_m
|
||||
```
|
||||
|
||||
### Sequence diagrams for different workflows
|
||||
|
||||
#### Cluster startup
|
||||
|
||||
```mermaid
|
||||
sequenceDiagram
|
||||
autonumber
|
||||
participant C as Compute
|
||||
participant SK1
|
||||
participant SK2
|
||||
participant SK3
|
||||
participant PS1
|
||||
participant PS2
|
||||
participant O as Orchestrator
|
||||
participant M as Metadata Service
|
||||
|
||||
PS1->>M: subscribe to updates to state of timeline N
|
||||
C->>+SK1: WAL push
|
||||
loop constantly update current lsns
|
||||
SK1->>-M: I'm at lsn A
|
||||
end
|
||||
C->>+SK2: WAL push
|
||||
loop constantly update current lsns
|
||||
SK2->>-M: I'm at lsn B
|
||||
end
|
||||
C->>+SK3: WAL push
|
||||
loop constantly update current lsns
|
||||
SK3->>-M: I'm at lsn C
|
||||
end
|
||||
loop request pages
|
||||
C->>+PS1: get_page@lsn
|
||||
PS1->>-C: page image
|
||||
end
|
||||
M->>PS1: New compute appeared for timeline N. SK1 at A, SK2 at B, SK3 at C
|
||||
note over PS1: Say SK1 at A=200, SK2 at B=150 SK3 at C=100 <br> so connect to SK1 because it is the most up to date one
|
||||
PS1->>SK1: start replication
|
||||
```
|
||||
|
||||
#### Behavour of services during typical operations
|
||||
|
||||
```mermaid
|
||||
sequenceDiagram
|
||||
autonumber
|
||||
participant C as Compute
|
||||
participant SK1
|
||||
participant SK2
|
||||
participant SK3
|
||||
participant PS1
|
||||
participant PS2
|
||||
participant O as Orchestrator
|
||||
participant M as Metadata Service
|
||||
|
||||
note over C,M: Scenario 1: Pageserver checkpoint
|
||||
note over PS1: Upload data to S3
|
||||
PS1->>M: Update remote consistent lsn
|
||||
M->>SK1: propagate remote consistent lsn update
|
||||
note over SK1: truncate WAL up to remote consistent lsn
|
||||
M->>SK2: propagate remote consistent lsn update
|
||||
note over SK2: truncate WAL up to remote consistent lsn
|
||||
M->>SK3: propagate remote consistent lsn update
|
||||
note over SK3: truncate WAL up to remote consistent lsn
|
||||
note over C,M: Scenario 2: SK1 finds itself lagging behind MAX(150 (SK2), 200 (SK2)) - 100 (SK1) > THRESHOLD
|
||||
SK1->>SK2: Fetch WAL delta between 100 (SK1) and 200 (SK2)
|
||||
note over C,M: Scenario 3: PS1 detects that SK1 is lagging behind: Connection from SK1 is broken or there is no messages from it in 30 seconds.
|
||||
note over PS1: e.g. SK2 is at 150, SK3 is at 100, chose SK2 as a new replication source
|
||||
PS1->>SK2: start replication
|
||||
```
|
||||
|
||||
#### Behaviour during timeline relocation
|
||||
|
||||
```mermaid
|
||||
sequenceDiagram
|
||||
autonumber
|
||||
participant C as Compute
|
||||
participant SK1
|
||||
participant SK2
|
||||
participant SK3
|
||||
participant PS1
|
||||
participant PS2
|
||||
participant O as Orchestrator
|
||||
participant M as Metadata Service
|
||||
|
||||
note over C,M: Timeline is being relocated from PS1 to PS2
|
||||
O->>+PS2: Attach timeline
|
||||
PS2->>-O: 202 Accepted if timeline exists in S3
|
||||
note over PS2: Download timeline from S3
|
||||
note over O: Poll for timeline download (or subscribe to metadata service)
|
||||
loop wait for attach to complete
|
||||
O->>PS2: timeline detail should answer that timeline is ready
|
||||
end
|
||||
PS2->>M: Register downloaded timeline
|
||||
PS2->>M: Get safekeepers for timeline, subscribe to changes
|
||||
PS2->>SK1: Start replication to catch up
|
||||
note over O: PS2 catched up, time to switch compute
|
||||
O->>C: Restart compute with new pageserver url in config
|
||||
note over C: Wal push is restarted
|
||||
loop request pages
|
||||
C->>+PS2: get_page@lsn
|
||||
PS2->>-C: page image
|
||||
end
|
||||
O->>PS1: detach timeline
|
||||
note over C,M: Scenario 1: Attach call failed
|
||||
O--xPS2: Attach timeline
|
||||
note over O: The operation can be safely retried, <br> if we hit some threshold we can try another pageserver
|
||||
note over C,M: Scenario 2: Attach succeeded but pageserver failed to download the data or start replication
|
||||
loop wait for attach to complete
|
||||
O--xPS2: timeline detail should answer that timeline is ready
|
||||
end
|
||||
note over O: Can wait for a timeout, and then try another pageserver <br> there should be a limit on number of different pageservers to try
|
||||
note over C,M: Scenario 3: Detach fails
|
||||
O--xPS1: Detach timeline
|
||||
note over O: can be retried, if continues to fail might lead to data duplication in s3
|
||||
```
|
||||
|
||||
# Pros/cons
|
||||
|
||||
## Console broker/etcd vs gossip:
|
||||
|
||||
Gossip pros:
|
||||
* gossip allows running storage without the console or etcd
|
||||
|
||||
Console broker/etcd pros:
|
||||
* simpler
|
||||
* solves "call me maybe" as well
|
||||
* avoid possible N-to-N connection issues with gossip without grouping safekeepers in pre-defined triples
|
||||
|
||||
## Console broker vs. etcd:
|
||||
|
||||
Initially, I wanted to avoid etcd as a dependency mostly because I've seen how painful for Clickhouse was their ZooKeeper dependency: in each chat, at each conference, people were complaining about configuration and maintenance barriers with ZooKeeper. It was that bad that ClickHouse re-implemented ZooKeeper to embed it: https://clickhouse.com/docs/en/operations/clickhouse-keeper/.
|
||||
|
||||
But with an etcd we are in a bit different situation:
|
||||
|
||||
1. We don't need persistency and strong consistency guarantees for the data we store in the etcd
|
||||
2. etcd uses Grpc as a protocol, and messages are pretty simple
|
||||
|
||||
So it looks like implementing in-mem store with etcd interface is straightforward thing _if we will want that in future_. At the same time, we can avoid implementing it right now, and we will be able to run local zenith installation with etcd running somewhere in the background (as opposed to building and running console, which in turn requires Postgres).
|
||||
151
docs/rfcs/016-connection-routing.md
Normal file
151
docs/rfcs/016-connection-routing.md
Normal file
@@ -0,0 +1,151 @@
|
||||
# Dispatching a connection
|
||||
|
||||
For each client connection, Neon service needs to authenticate the
|
||||
connection, and route it to the right PostgreSQL instance.
|
||||
|
||||
## Authentication
|
||||
|
||||
There are three different ways to authenticate:
|
||||
|
||||
- anonymous; no authentication needed
|
||||
- PostgreSQL authentication
|
||||
- github single sign-on using browser
|
||||
|
||||
In anonymous access, the user doesn't need to perform any
|
||||
authentication at all. This can be used e.g. in interactive PostgreSQL
|
||||
documentation, allowing you to run the examples very quickly. Similar
|
||||
to sqlfiddle.com.
|
||||
|
||||
PostgreSQL authentication works the same as always. All the different
|
||||
PostgreSQL authentication options like SCRAM, kerberos, etc. are
|
||||
available. [1]
|
||||
|
||||
The third option is to authenticate with github single sign-on. When
|
||||
you open the connection in psql, you get a link that you open with
|
||||
your browser. Opening the link redirects you to github authentication,
|
||||
and lets the connection to proceed. This is also known as "Link auth" [2].
|
||||
|
||||
|
||||
## Routing the connection
|
||||
|
||||
When a client starts a connection, it needs to be routed to the
|
||||
correct PostgreSQL instance. Routing can be done by the proxy, acting
|
||||
as a man-in-the-middle, or the connection can be routed at the network
|
||||
level based on the hostname or IP address.
|
||||
|
||||
Either way, Neon needs to identify which PostgreSQL instance the
|
||||
connection should be routed to. If the instance is not already
|
||||
running, it needs to be started. Some connections always require a new
|
||||
PostgreSQL instance to be created, e.g. if you want to run a one-off
|
||||
query against a particular point-in-time.
|
||||
|
||||
The PostgreSQL instance is identified by:
|
||||
- Neon account (possibly anonymous)
|
||||
- cluster (known as tenant in the storage?)
|
||||
- branch or snapshot name
|
||||
- timestamp (PITR)
|
||||
- primary or read-replica
|
||||
- one-off read replica
|
||||
- one-off writeable branch
|
||||
|
||||
When you are using regular PostgreSQL authentication or anonymous
|
||||
access, the connection URL needs to contain all the information needed
|
||||
for the routing. With github single sign-on, the browser is involved
|
||||
and some details - the Neon account in particular - can be deduced
|
||||
from the authentication exchange.
|
||||
|
||||
There are three methods for identifying the PostgreSQL instance:
|
||||
|
||||
- Browser interaction (link auth)
|
||||
- Options in the connection URL and the domain name
|
||||
- A pre-defined endpoint, identified by domain name or IP address
|
||||
|
||||
### Link Auth
|
||||
|
||||
postgres://<username>@start.neon.tech/<dbname>
|
||||
|
||||
This gives you a link that you open in browser. Clicking the link
|
||||
performs github authentication, and the Neon account name is
|
||||
provided to the proxy behind the scenes. The proxy routes the
|
||||
connection to the primary PostgreSQL instance in cluster called
|
||||
"main", branch "main".
|
||||
|
||||
Further ideas:
|
||||
- You could pre-define a different target for link auth
|
||||
connections in the UI.
|
||||
- You could have a drop-down in the browser, allowing you to connect
|
||||
to any cluster you want. Link Auth can be like Teleport.
|
||||
|
||||
### Connection URL
|
||||
|
||||
The connection URL looks like this:
|
||||
|
||||
postgres://<username>@<cluster-id>.db.neon.tech/<dbname>
|
||||
|
||||
By default, this connects you to the primary PostgreSQL instance
|
||||
running on the "main" branch in the named cluster [3]. However, you can
|
||||
change that by specifying options in the connection URL. The following
|
||||
options are supported:
|
||||
|
||||
| option name | Description | Examples |
|
||||
| --- | --- | --- |
|
||||
| cluster | Cluster name | cluster:myproject |
|
||||
| branch | Branch name | branch:main |
|
||||
| timestamp | Connect to an instance at given point-in-time. | timestamp:2022-04-08 timestamp:2022-04-08T11:42:16Z |
|
||||
| lsn | Connect to an instance at given LSN | lsn:0/12FF0420 |
|
||||
| read-replica | Connect to a read-replica. If the parameter is 'new', a new instance is created for this session. | read-replica read-replica:new |
|
||||
|
||||
For example, to read branch 'testing' as it was on Mar 31, 2022, you could
|
||||
specify a timestamp in the connection URL [4]:
|
||||
|
||||
postgres://alice@cluster-1234.db.neon.tech/postgres?options=branch:testing,timestamp:2022-03-31
|
||||
|
||||
Connecting with cluster name and options can be disabled in the UI. If
|
||||
disabled, you can only connect using a pre-defined endpoint.
|
||||
|
||||
### Pre-defined Endpoint
|
||||
|
||||
Instead of providing the cluster name, branch, and all those options
|
||||
in the connection URL, you can define a named endpoint with the same
|
||||
options.
|
||||
|
||||
In the UI, click "create endpoint". Fill in the details:
|
||||
|
||||
- Cluster name
|
||||
- Branch
|
||||
- timestamp or LSN
|
||||
- is this for the primary or for a read replica
|
||||
- etc.
|
||||
|
||||
When you click Finish, a named endpoint is created. You can now use the endpoint ID to connect:
|
||||
|
||||
postgres://<username>@<endpoint-id>.endpoint.neon.tech/<dbname>
|
||||
|
||||
|
||||
An endpoint can be assigned a static or dynamic IP address, so that
|
||||
you can connect to it with clients that don't support TLS SNI. Maybe
|
||||
bypass the proxy altogether, but that ought to be invisible to the
|
||||
user.
|
||||
|
||||
You can limit the range of source IP addresses that are allowed to
|
||||
connect to an endpoint. An endpoint can also be exposed in an Amazon
|
||||
VPC, allowing direct connections from applications.
|
||||
|
||||
|
||||
# Footnotes
|
||||
|
||||
[1] I'm not sure how feasible it is to set up configure like Kerberos
|
||||
or LDAP in a cloud environment. But in principle I think we should
|
||||
allow customers to have the full power of PostgreSQL, including all
|
||||
authentication options. However, it's up to the customer to configure
|
||||
it correctly.
|
||||
|
||||
[2] Link is a way to both authenticate and to route the connection
|
||||
|
||||
[3] This assumes that cluster-ids are globally unique, across all
|
||||
Neon accounts.
|
||||
|
||||
[4] The syntax accepted in the connection URL is limited by libpq. The
|
||||
only way to pass arbitrary options to the server (or our proxy) is
|
||||
with the "options" keyword, and the options must be percent-encoded. I
|
||||
think the above would work but i haven't tested it
|
||||
95
docs/rfcs/README.md
Normal file
95
docs/rfcs/README.md
Normal file
@@ -0,0 +1,95 @@
|
||||
This directory contains Request for Comments documents, or RFCs, for
|
||||
features or concepts that have been proposed. Alternative names:
|
||||
technical design doc, ERD, one-pager
|
||||
|
||||
To make a new proposal, create a new text file in this directory and
|
||||
open a Pull Request with it. That gives others a chance and a forum
|
||||
to comment and discuss the design.
|
||||
|
||||
When a feature is implemented and the code changes are committed, also
|
||||
include the corresponding RFC in this directory.
|
||||
|
||||
Some of the RFCs in this directory have been implemented in some form
|
||||
or another, while others are on the roadmap, while still others are
|
||||
just obsolete and forgotten about. So read them with a grain of salt,
|
||||
but hopefully even the ones that don't reflect reality give useful
|
||||
context information.
|
||||
|
||||
## What
|
||||
|
||||
We use Tech Design RFC’s to summarize what we are planning to
|
||||
implement in our system. These RFCs should be created for large or not
|
||||
obvious technical tasks, e.g. changes of the architecture or bigger
|
||||
tasks that could take over a week, changes that touch multiple
|
||||
components or their interaction. RFCs should fit into a couple of
|
||||
pages, but could be longer on occasion.
|
||||
|
||||
## Why
|
||||
|
||||
We’re using RFCs to enable early review and collaboration, reduce
|
||||
uncertainties, risk and save time during the implementation phase that
|
||||
follows the Tech Design RFC.
|
||||
|
||||
Tech Design RFCs also aim to avoid bus factor and are an additional
|
||||
measure to keep more peers up to date & familiar with our design and
|
||||
architecture.
|
||||
|
||||
This is a crucial part for ensuring collaboration across timezones and
|
||||
setting up for success a distributed team that works on complex
|
||||
topics.
|
||||
|
||||
## Prior art
|
||||
|
||||
- Rust: [https://github.com/rust-lang/rfcs/blob/master/0000-template.md](https://github.com/rust-lang/rfcs/blob/master/0000-template.md)
|
||||
- React.js: [https://github.com/reactjs/rfcs/blob/main/0000-template.md](https://github.com/reactjs/rfcs/blob/main/0000-template.md)
|
||||
- Google fuchsia: [https://fuchsia.dev/fuchsia-src/contribute/governance/rfcs/TEMPLATE](https://fuchsia.dev/fuchsia-src/contribute/governance/rfcs/TEMPLATE)
|
||||
- Apache: [https://cwiki.apache.org/confluence/display/GEODE/RFC+Template](https://cwiki.apache.org/confluence/display/GEODE/RFC+Template) / [https://cwiki.apache.org/confluence/display/GEODE/Lightweight+RFC+Process](https://cwiki.apache.org/confluence/display/GEODE/Lightweight+RFC+Process)
|
||||
|
||||
## How
|
||||
|
||||
RFC lifecycle:
|
||||
|
||||
- Should be submitted in a pull request with and full RFC text in a commited markdown file and copy of the Summary and Motivation sections also included in the PR body.
|
||||
- RFC should be published for review before most of the actual code is written. This isn’t a strict rule, don’t hesitate to experiment and build a POC in parallel with writing an RFC.
|
||||
- Add labels to the PR in the same manner as you do Issues. Example TBD
|
||||
- Request the review from your peers. Reviewing the RFCs from your peers is a priority, same as reviewing the actual code.
|
||||
- The Tech Design RFC should evolve based on the feedback received and further during the development phase if problems are discovered with the taken approach
|
||||
- RFCs stop evolving once the consensus is found or the proposal is implemented and merged.
|
||||
- RFCs are not intended as a documentation that’s kept up to date **after** the implementation is finished. Do not update the Tech Design RFC when merged functionality evolves later on. In such situation a new RFC may be appropriate.
|
||||
|
||||
### RFC template
|
||||
|
||||
Note, a lot of the sections are marked as ‘if relevant’. They are included into the template as a reminder and to help inspiration.
|
||||
|
||||
```
|
||||
# Name
|
||||
Created on ..
|
||||
Implemented on ..
|
||||
|
||||
## Summary
|
||||
|
||||
## Motivation
|
||||
|
||||
## Non Goals (if relevant)
|
||||
|
||||
## Impacted components (e.g. pageserver, safekeeper, console, etc)
|
||||
|
||||
## Proposed implementation
|
||||
|
||||
### Reliability, failure modes and corner cases (if relevant)
|
||||
|
||||
### Interaction/Sequence diagram (if relevant)
|
||||
|
||||
### Scalability (if relevant)
|
||||
|
||||
### Security implications (if relevant)
|
||||
|
||||
### Unresolved questions (if relevant)
|
||||
|
||||
## Alternative implementation (if relevant)
|
||||
|
||||
## Pros/cons of proposed approaches (if relevant)
|
||||
|
||||
## Definition of Done (if relevant)
|
||||
|
||||
```
|
||||
79
docs/rfcs/cluster-size-limits.md
Normal file
79
docs/rfcs/cluster-size-limits.md
Normal file
@@ -0,0 +1,79 @@
|
||||
Cluster size limits
|
||||
==================
|
||||
|
||||
## Summary
|
||||
|
||||
One of the resource consumption limits for free-tier users is a cluster size limit.
|
||||
|
||||
To enforce it, we need to calculate the timeline size and check if the limit is reached before relation create/extend operations.
|
||||
If the limit is reached, the query must fail with some meaningful error/warning.
|
||||
We may want to exempt some operations from the quota to allow users free space to fit back into the limit.
|
||||
|
||||
The stateless compute node that performs validation is separate from the storage that calculates the usage, so we need to exchange cluster size information between those components.
|
||||
|
||||
## Motivation
|
||||
|
||||
Limit the maximum size of a PostgreSQL instance to limit free tier users (and other tiers in the future).
|
||||
First of all, this is needed to control our free tier production costs.
|
||||
Another reason to limit resources is risk management — we haven't (fully) tested and optimized zenith for big clusters,
|
||||
so we don't want to give users access to the functionality that we don't think is ready.
|
||||
|
||||
## Components
|
||||
|
||||
* pageserver - calculate the size consumed by a timeline and add it to the feedback message.
|
||||
* safekeeper - pass feedback message from pageserver to compute.
|
||||
* compute - receive feedback message, enforce size limit based on GUC `zenith.max_cluster_size`.
|
||||
* console - set and update `zenith.max_cluster_size` setting
|
||||
|
||||
## Proposed implementation
|
||||
|
||||
First of all, it's necessary to define timeline size.
|
||||
|
||||
The current approach is to count all data, including SLRUs. (not including WAL)
|
||||
Here we think of it as a physical disk underneath the Postgres cluster.
|
||||
This is how the `LOGICAL_TIMELINE_SIZE` metric is implemented in the pageserver.
|
||||
|
||||
Alternatively, we could count only relation data. As in pg_database_size().
|
||||
This approach is somewhat more user-friendly because it is the data that is really affected by the user.
|
||||
On the other hand, it puts us in a weaker position than other services, i.e., RDS.
|
||||
We will need to refactor the timeline_size counter or add another counter to implement it.
|
||||
|
||||
Timeline size is updated during wal digestion. It is not versioned and is valid at the last_received_lsn moment.
|
||||
Then this size should be reported to compute node.
|
||||
|
||||
`current_timeline_size` value is included in the walreceiver's custom feedback message: `ZenithFeedback.`
|
||||
|
||||
(PR about protocol changes https://github.com/zenithdb/zenith/pull/1037).
|
||||
|
||||
This message is received by the safekeeper and propagated to compute node as a part of `AppendResponse`.
|
||||
|
||||
Finally, when compute node receives the `current_timeline_size` from safekeeper (or from pageserver directly), it updates the global variable.
|
||||
|
||||
And then every zenith_extend() operation checks if limit is reached `(current_timeline_size > zenith.max_cluster_size)` and throws `ERRCODE_DISK_FULL` error if so.
|
||||
(see Postgres error codes [https://www.postgresql.org/docs/devel/errcodes-appendix.html](https://www.postgresql.org/docs/devel/errcodes-appendix.html))
|
||||
|
||||
TODO:
|
||||
We can allow autovacuum processes to bypass this check, simply checking `IsAutoVacuumWorkerProcess()`.
|
||||
It would be nice to allow manual VACUUM and VACUUM FULL to bypass the check, but it's uneasy to distinguish these operations at the low level.
|
||||
See issues https://github.com/neondatabase/neon/issues/1245
|
||||
https://github.com/zenithdb/zenith/issues/1445
|
||||
|
||||
TODO:
|
||||
We should warn users if the limit is soon to be reached.
|
||||
|
||||
### **Reliability, failure modes and corner cases**
|
||||
|
||||
1. `current_timeline_size` is valid at the last received and digested by pageserver lsn.
|
||||
|
||||
If pageserver lags behind compute node, `current_timeline_size` will lag too. This lag can be tuned using backpressure, but it is not expected to be 0 all the time.
|
||||
|
||||
So transactions that happen in this lsn range may cause limit overflow. Especially operations that generate (i.e., CREATE DATABASE) or free (i.e., TRUNCATE) a lot of data pages while generating a small amount of WAL. Are there other operations like this?
|
||||
|
||||
Currently, CREATE DATABASE operations are restricted in the console. So this is not an issue.
|
||||
|
||||
|
||||
### **Security implications**
|
||||
|
||||
We treat compute as an untrusted component. That's why we try to isolate it with secure container runtime or a VM.
|
||||
Malicious users may change the `zenith.max_cluster_size`, so we need an extra size limit check.
|
||||
To cover this case, we also monitor the compute node size in the console.
|
||||
BIN
docs/rfcs/images/storage.jpeg
Normal file
BIN
docs/rfcs/images/storage.jpeg
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 421 KiB |
@@ -6,7 +6,6 @@ If there's no such file during `init` phase of the server, it creates the file i
|
||||
There's a possibility to pass an arbitrary config value to the pageserver binary as an argument: such values override
|
||||
the values in the config file, if any are specified for the same key and get into the final config during init phase.
|
||||
|
||||
|
||||
### Config example
|
||||
|
||||
```toml
|
||||
@@ -35,9 +34,9 @@ Yet, it validates the config values it can (e.g. postgres install dir) and error
|
||||
|
||||
Note the `[remote_storage]` section: it's a [table](https://toml.io/en/v1.0.0#table) in TOML specification and
|
||||
|
||||
* either has to be placed in the config after the table-less values such as `initial_superuser_name = 'zenith_admin'`
|
||||
- either has to be placed in the config after the table-less values such as `initial_superuser_name = 'zenith_admin'`
|
||||
|
||||
* or can be placed anywhere if rewritten in identical form as [inline table](https://toml.io/en/v1.0.0#inline-table): `remote_storage = {foo = 2}`
|
||||
- or can be placed anywhere if rewritten in identical form as [inline table](https://toml.io/en/v1.0.0#inline-table): `remote_storage = {foo = 2}`
|
||||
|
||||
### Config values
|
||||
|
||||
@@ -57,7 +56,7 @@ but it will trigger a checkpoint operation to get it back below the
|
||||
limit.
|
||||
|
||||
`checkpoint_distance` also determines how much WAL needs to be kept
|
||||
durable in the safekeeper. The safekeeper must have capacity to hold
|
||||
durable in the safekeeper. The safekeeper must have capacity to hold
|
||||
this much WAL, with some headroom, otherwise you can get stuck in a
|
||||
situation where the safekeeper is full and stops accepting new WAL,
|
||||
but the pageserver is not flushing out and releasing the space in the
|
||||
@@ -68,11 +67,15 @@ S3.
|
||||
|
||||
The unit is # of bytes.
|
||||
|
||||
#### checkpoint_period
|
||||
#### compaction_period
|
||||
|
||||
The pageserver checks whether `checkpoint_distance` has been reached
|
||||
every `checkpoint_period` seconds. Default is 1 s, which should be
|
||||
fine.
|
||||
Every `compaction_period` seconds, the page server checks if
|
||||
maintenance operations, like compaction, are needed on the layer
|
||||
files. Default is 1 s, which should be fine.
|
||||
|
||||
#### compaction_target_size
|
||||
|
||||
File sizes for L0 delta and L1 image layers. Default is 128MB.
|
||||
|
||||
#### gc_horizon
|
||||
|
||||
@@ -85,6 +88,14 @@ away.
|
||||
|
||||
Interval at which garbage collection is triggered. Default is 100 s.
|
||||
|
||||
#### image_creation_threshold
|
||||
|
||||
L0 delta layer threshold for L1 iamge layer creation. Default is 3.
|
||||
|
||||
#### pitr_interval
|
||||
|
||||
WAL retention duration for PITR branching. Default is 30 days.
|
||||
|
||||
#### initial_superuser_name
|
||||
|
||||
Name of the initial superuser role, passed to initdb when a new tenant
|
||||
@@ -147,13 +158,16 @@ bucket_name = 'some-sample-bucket'
|
||||
# Name of the region where the bucket is located at
|
||||
bucket_region = 'eu-north-1'
|
||||
|
||||
# Access key to connect to the bucket ("login" part of the credentials)
|
||||
access_key_id = 'SOMEKEYAAAAASADSAH*#'
|
||||
# A "subfolder" in the bucket, to use the same bucket separately by multiple pageservers at once.
|
||||
# Optional, pageserver uses entire bucket if the prefix is not specified.
|
||||
prefix_in_bucket = '/some/prefix/'
|
||||
|
||||
# Secret access key to connect to the bucket ("password" part of the credentials)
|
||||
secret_access_key = 'SOMEsEcReTsd292v'
|
||||
# S3 API query limit to avoid getting errors/throttling from AWS.
|
||||
concurrency_limit = 100
|
||||
```
|
||||
|
||||
If no IAM bucket access is used during the remote storage usage, use the `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` environment variables to set the access credentials.
|
||||
|
||||
###### General remote storage configuration
|
||||
|
||||
Pagesever allows only one remote storage configured concurrently and errors if parameters from multiple different remote configurations are used.
|
||||
@@ -163,14 +177,13 @@ Besides, there are parameters common for all types of remote storage that can be
|
||||
|
||||
```toml
|
||||
[remote_storage]
|
||||
# Max number of concurrent connections to open for uploading to or downloading from the remote storage.
|
||||
max_concurrent_sync = 100
|
||||
# Max number of concurrent timeline synchronized (layers uploaded or downloaded) with the remote storage at the same time.
|
||||
max_concurrent_syncs = 50
|
||||
|
||||
# Max number of errors a single task can have before it's considered failed and not attempted to run anymore.
|
||||
max_sync_errors = 10
|
||||
```
|
||||
|
||||
|
||||
## safekeeper
|
||||
|
||||
TODO
|
||||
|
||||
@@ -28,12 +28,7 @@ The pageserver has a few different duties:
|
||||
- Receive WAL from the WAL service and decode it.
|
||||
- Replay WAL that's applicable to the chunks that the Page Server maintains
|
||||
|
||||
For more detailed info, see `/pageserver/README`
|
||||
|
||||
`/postgres_ffi`:
|
||||
|
||||
Utility functions for interacting with PostgreSQL file formats.
|
||||
Misc constants, copied from PostgreSQL headers.
|
||||
For more detailed info, see [/pageserver/README](/pageserver/README.md)
|
||||
|
||||
`/proxy`:
|
||||
|
||||
@@ -57,29 +52,38 @@ PostgreSQL extension that implements storage manager API and network communicati
|
||||
|
||||
PostgreSQL extension that contains functions needed for testing and debugging.
|
||||
|
||||
`/walkeeper`:
|
||||
`/safekeeper`:
|
||||
|
||||
The zenith WAL service that receives WAL from a primary compute nodes and streams it to the pageserver.
|
||||
It acts as a holding area and redistribution center for recently generated WAL.
|
||||
|
||||
For more detailed info, see `/walkeeper/README`
|
||||
For more detailed info, see [/safekeeper/README](/safekeeper/README.md)
|
||||
|
||||
`/workspace_hack`:
|
||||
The workspace_hack crate exists only to pin down some dependencies.
|
||||
|
||||
We use [cargo-hakari](https://crates.io/crates/cargo-hakari) for automation.
|
||||
|
||||
`/zenith`
|
||||
|
||||
Main entry point for the 'zenith' CLI utility.
|
||||
TODO: Doesn't it belong to control_plane?
|
||||
|
||||
`/zenith_metrics`:
|
||||
`/libs`:
|
||||
Unites granular neon helper crates under the hood.
|
||||
|
||||
`/libs/postgres_ffi`:
|
||||
|
||||
Utility functions for interacting with PostgreSQL file formats.
|
||||
Misc constants, copied from PostgreSQL headers.
|
||||
|
||||
`/libs/utils`:
|
||||
Generic helpers that are shared between other crates in this repository.
|
||||
A subject for future modularization.
|
||||
|
||||
`/libs/metrics`:
|
||||
Helpers for exposing Prometheus metrics from the server.
|
||||
|
||||
`/zenith_utils`:
|
||||
|
||||
Helpers that are shared between other crates in this repository.
|
||||
|
||||
## Using Python
|
||||
Note that Debian/Ubuntu Python packages are stale, as it commonly happens,
|
||||
so manual installation of dependencies is not recommended.
|
||||
@@ -87,31 +91,29 @@ so manual installation of dependencies is not recommended.
|
||||
A single virtual environment with all dependencies is described in the single `Pipfile`.
|
||||
|
||||
### Prerequisites
|
||||
- Install Python 3.7 (the minimal supported version)
|
||||
- Later version (e.g. 3.8) is ok if you don't write Python code
|
||||
- You can install Python 3.7 separately, e.g.:
|
||||
- Install Python 3.7 (the minimal supported version) or greater.
|
||||
- Our setup with poetry should work with newer python versions too. So feel free to open an issue with a `c/test-runner` label if something doesnt work as expected.
|
||||
- If you have some trouble with other version you can resolve it by installing Python 3.7 separately, via pyenv or via system package manager e.g.:
|
||||
```bash
|
||||
# In Ubuntu
|
||||
sudo add-apt-repository ppa:deadsnakes/ppa
|
||||
sudo apt update
|
||||
sudo apt install python3.7
|
||||
```
|
||||
- Install `pipenv`
|
||||
- Exact version of `pipenv` is not important, you can use Debian/Ubuntu package `pipenv`.
|
||||
- Install dependencies via either
|
||||
* `pipenv --python 3.7 install --dev` if you will write Python code, or
|
||||
* `pipenv install` if you only want to run Python scripts and don't have Python 3.7.
|
||||
- Install `poetry`
|
||||
- Exact version of `poetry` is not important, see installation instructions available at poetry's [website](https://python-poetry.org/docs/#installation)`.
|
||||
- Install dependencies via `./scripts/pysync`. Note that CI uses Python 3.7 so if you have different version some linting tools can yield different result locally vs in the CI.
|
||||
|
||||
Run `pipenv shell` to activate the virtual environment.
|
||||
Alternatively, use `pipenv run` to run a single command in the venv, e.g. `pipenv run pytest`.
|
||||
Run `poetry shell` to activate the virtual environment.
|
||||
Alternatively, use `poetry run` to run a single command in the venv, e.g. `poetry run pytest`.
|
||||
|
||||
### Obligatory checks
|
||||
We force code formatting via `yapf` and type hints via `mypy`.
|
||||
Run the following commands in the repository's root (next to `setup.cfg`):
|
||||
|
||||
```bash
|
||||
pipenv run yapf -ri . # All code is reformatted
|
||||
pipenv run mypy . # Ensure there are no typing errors
|
||||
poetry run yapf -ri . # All code is reformatted
|
||||
poetry run mypy . # Ensure there are no typing errors
|
||||
```
|
||||
|
||||
**WARNING**: do not run `mypy` from a directory other than the root of the repository.
|
||||
@@ -123,17 +125,6 @@ Also consider:
|
||||
* Adding more type hints to your code to avoid `Any`.
|
||||
|
||||
### Changing dependencies
|
||||
You have to update `Pipfile.lock` if you have changed `Pipfile`:
|
||||
To add new package or change an existing one you can use `poetry add` or `poetry update` or edit `pyproject.toml` manually. Do not forget to run `poetry lock` in the latter case.
|
||||
|
||||
```bash
|
||||
pipenv --python 3.7 install --dev # Re-create venv for Python 3.7 and install recent pipenv inside
|
||||
pipenv run pipenv --version # Should be at least 2021.5.29
|
||||
pipenv run pipenv lock # Regenerate Pipfile.lock
|
||||
```
|
||||
|
||||
As the minimal supported version is Python 3.7 and we use it in CI,
|
||||
you have to use a Python 3.7 environment when updating `Pipfile.lock`.
|
||||
Otherwise some back-compatibility packages will be missing.
|
||||
|
||||
It is also important to run recent `pipenv`.
|
||||
Older versions remove markers from `Pipfile.lock`.
|
||||
More details are available in poetry's [documentation](https://python-poetry.org/docs/).
|
||||
|
||||
17
libs/etcd_broker/Cargo.toml
Normal file
17
libs/etcd_broker/Cargo.toml
Normal file
@@ -0,0 +1,17 @@
|
||||
[package]
|
||||
name = "etcd_broker"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
etcd-client = "0.9.0"
|
||||
regex = "1.4.5"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1"
|
||||
serde_with = "1.12.0"
|
||||
|
||||
utils = { path = "../utils" }
|
||||
workspace_hack = { version = "0.1", path = "../../workspace_hack" }
|
||||
tokio = "1"
|
||||
tracing = "0.1"
|
||||
thiserror = "1"
|
||||
335
libs/etcd_broker/src/lib.rs
Normal file
335
libs/etcd_broker/src/lib.rs
Normal file
@@ -0,0 +1,335 @@
|
||||
//! A set of primitives to access a shared data/updates, propagated via etcd broker (not persistent).
|
||||
//! Intended to connect services to each other, not to store their data.
|
||||
use std::{
|
||||
collections::{hash_map, HashMap},
|
||||
fmt::Display,
|
||||
str::FromStr,
|
||||
};
|
||||
|
||||
use regex::{Captures, Regex};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_with::{serde_as, DisplayFromStr};
|
||||
|
||||
pub use etcd_client::*;
|
||||
|
||||
use tokio::{sync::mpsc, task::JoinHandle};
|
||||
use tracing::*;
|
||||
use utils::{
|
||||
lsn::Lsn,
|
||||
zid::{ZNodeId, ZTenantId, ZTenantTimelineId},
|
||||
};
|
||||
|
||||
#[derive(Debug, Deserialize, Serialize)]
|
||||
struct SafekeeperTimeline {
|
||||
safekeeper_id: ZNodeId,
|
||||
info: SkTimelineInfo,
|
||||
}
|
||||
|
||||
/// Published data about safekeeper's timeline. Fields made optional for easy migrations.
|
||||
#[serde_as]
|
||||
#[derive(Debug, Deserialize, Serialize)]
|
||||
pub struct SkTimelineInfo {
|
||||
/// Term of the last entry.
|
||||
pub last_log_term: Option<u64>,
|
||||
/// LSN of the last record.
|
||||
#[serde_as(as = "Option<DisplayFromStr>")]
|
||||
#[serde(default)]
|
||||
pub flush_lsn: Option<Lsn>,
|
||||
/// Up to which LSN safekeeper regards its WAL as committed.
|
||||
#[serde_as(as = "Option<DisplayFromStr>")]
|
||||
#[serde(default)]
|
||||
pub commit_lsn: Option<Lsn>,
|
||||
/// LSN up to which safekeeper offloaded WAL to s3.
|
||||
#[serde_as(as = "Option<DisplayFromStr>")]
|
||||
#[serde(default)]
|
||||
pub s3_wal_lsn: Option<Lsn>,
|
||||
/// LSN of last checkpoint uploaded by pageserver.
|
||||
#[serde_as(as = "Option<DisplayFromStr>")]
|
||||
#[serde(default)]
|
||||
pub remote_consistent_lsn: Option<Lsn>,
|
||||
#[serde_as(as = "Option<DisplayFromStr>")]
|
||||
#[serde(default)]
|
||||
pub peer_horizon_lsn: Option<Lsn>,
|
||||
#[serde(default)]
|
||||
pub wal_stream_connection_string: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum BrokerError {
|
||||
#[error("Etcd client error: {0}. Context: {1}")]
|
||||
EtcdClient(etcd_client::Error, String),
|
||||
#[error("Error during parsing etcd data: {0}")]
|
||||
ParsingError(String),
|
||||
#[error("Internal error: {0}")]
|
||||
InternalError(String),
|
||||
}
|
||||
|
||||
/// A way to control the data retrieval from a certain subscription.
|
||||
pub struct SkTimelineSubscription {
|
||||
safekeeper_timeline_updates:
|
||||
mpsc::UnboundedReceiver<HashMap<ZTenantTimelineId, HashMap<ZNodeId, SkTimelineInfo>>>,
|
||||
kind: SkTimelineSubscriptionKind,
|
||||
watcher_handle: JoinHandle<Result<(), BrokerError>>,
|
||||
watcher: Watcher,
|
||||
}
|
||||
|
||||
impl SkTimelineSubscription {
|
||||
/// Asynchronously polls for more data from the subscription, suspending the current future if there's no data sent yet.
|
||||
pub async fn fetch_data(
|
||||
&mut self,
|
||||
) -> Option<HashMap<ZTenantTimelineId, HashMap<ZNodeId, SkTimelineInfo>>> {
|
||||
self.safekeeper_timeline_updates.recv().await
|
||||
}
|
||||
|
||||
/// Cancels the subscription, stopping the data poller and waiting for it to shut down.
|
||||
pub async fn cancel(mut self) -> Result<(), BrokerError> {
|
||||
self.watcher.cancel().await.map_err(|e| {
|
||||
BrokerError::EtcdClient(
|
||||
e,
|
||||
format!(
|
||||
"Failed to cancel timeline subscription, kind: {:?}",
|
||||
self.kind
|
||||
),
|
||||
)
|
||||
})?;
|
||||
self.watcher_handle.await.map_err(|e| {
|
||||
BrokerError::InternalError(format!(
|
||||
"Failed to join the timeline updates task, kind: {:?}, error: {e}",
|
||||
self.kind
|
||||
))
|
||||
})?
|
||||
}
|
||||
}
|
||||
|
||||
/// The subscription kind to the timeline updates from safekeeper.
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
|
||||
pub struct SkTimelineSubscriptionKind {
|
||||
broker_prefix: String,
|
||||
kind: SubscriptionKind,
|
||||
}
|
||||
|
||||
impl SkTimelineSubscriptionKind {
|
||||
pub fn all(broker_prefix: String) -> Self {
|
||||
Self {
|
||||
broker_prefix,
|
||||
kind: SubscriptionKind::All,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn tenant(broker_prefix: String, tenant: ZTenantId) -> Self {
|
||||
Self {
|
||||
broker_prefix,
|
||||
kind: SubscriptionKind::Tenant(tenant),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn timeline(broker_prefix: String, timeline: ZTenantTimelineId) -> Self {
|
||||
Self {
|
||||
broker_prefix,
|
||||
kind: SubscriptionKind::Timeline(timeline),
|
||||
}
|
||||
}
|
||||
|
||||
fn watch_regex(&self) -> Regex {
|
||||
match self.kind {
|
||||
SubscriptionKind::All => Regex::new(&format!(
|
||||
r"^{}/([[:xdigit:]]+)/([[:xdigit:]]+)/safekeeper/([[:digit:]])$",
|
||||
self.broker_prefix
|
||||
))
|
||||
.expect("wrong regex for 'everything' subscription"),
|
||||
SubscriptionKind::Tenant(tenant_id) => Regex::new(&format!(
|
||||
r"^{}/{tenant_id}/([[:xdigit:]]+)/safekeeper/([[:digit:]])$",
|
||||
self.broker_prefix
|
||||
))
|
||||
.expect("wrong regex for 'tenant' subscription"),
|
||||
SubscriptionKind::Timeline(ZTenantTimelineId {
|
||||
tenant_id,
|
||||
timeline_id,
|
||||
}) => Regex::new(&format!(
|
||||
r"^{}/{tenant_id}/{timeline_id}/safekeeper/([[:digit:]])$",
|
||||
self.broker_prefix
|
||||
))
|
||||
.expect("wrong regex for 'timeline' subscription"),
|
||||
}
|
||||
}
|
||||
|
||||
/// Etcd key to use for watching a certain timeline updates from safekeepers.
|
||||
pub fn watch_key(&self) -> String {
|
||||
match self.kind {
|
||||
SubscriptionKind::All => self.broker_prefix.to_string(),
|
||||
SubscriptionKind::Tenant(tenant_id) => {
|
||||
format!("{}/{tenant_id}/safekeeper", self.broker_prefix)
|
||||
}
|
||||
SubscriptionKind::Timeline(ZTenantTimelineId {
|
||||
tenant_id,
|
||||
timeline_id,
|
||||
}) => format!(
|
||||
"{}/{tenant_id}/{timeline_id}/safekeeper",
|
||||
self.broker_prefix
|
||||
),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
|
||||
enum SubscriptionKind {
|
||||
/// Get every timeline update.
|
||||
All,
|
||||
/// Get certain tenant timelines' updates.
|
||||
Tenant(ZTenantId),
|
||||
/// Get certain timeline updates.
|
||||
Timeline(ZTenantTimelineId),
|
||||
}
|
||||
|
||||
/// Creates a background task to poll etcd for timeline updates from safekeepers.
|
||||
/// Stops and returns `Err` on any error during etcd communication.
|
||||
/// Watches the key changes until either the watcher is cancelled via etcd or the subscription cancellation handle,
|
||||
/// exiting normally in such cases.
|
||||
pub async fn subscribe_to_safekeeper_timeline_updates(
|
||||
client: &mut Client,
|
||||
subscription: SkTimelineSubscriptionKind,
|
||||
) -> Result<SkTimelineSubscription, BrokerError> {
|
||||
info!("Subscribing to timeline updates, subscription kind: {subscription:?}");
|
||||
|
||||
let (watcher, mut stream) = client
|
||||
.watch(
|
||||
subscription.watch_key(),
|
||||
Some(WatchOptions::new().with_prefix()),
|
||||
)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
BrokerError::EtcdClient(
|
||||
e,
|
||||
format!("Failed to init the watch for subscription {subscription:?}"),
|
||||
)
|
||||
})?;
|
||||
|
||||
let (timeline_updates_sender, safekeeper_timeline_updates) = mpsc::unbounded_channel();
|
||||
|
||||
let subscription_kind = subscription.kind;
|
||||
let regex = subscription.watch_regex();
|
||||
let watcher_handle = tokio::spawn(async move {
|
||||
while let Some(resp) = stream.message().await.map_err(|e| BrokerError::InternalError(format!(
|
||||
"Failed to get messages from the subscription stream, kind: {subscription_kind:?}, error: {e}"
|
||||
)))? {
|
||||
if resp.canceled() {
|
||||
info!("Watch for timeline updates subscription was canceled, exiting");
|
||||
break;
|
||||
}
|
||||
|
||||
let mut timeline_updates: HashMap<ZTenantTimelineId, HashMap<ZNodeId, SkTimelineInfo>> =
|
||||
HashMap::new();
|
||||
|
||||
let events = resp.events();
|
||||
debug!("Processing {} events", events.len());
|
||||
|
||||
for event in events {
|
||||
if EventType::Put == event.event_type() {
|
||||
if let Some(kv) = event.kv() {
|
||||
match parse_etcd_key_value(subscription_kind, ®ex, kv) {
|
||||
Ok(Some((zttid, timeline))) => {
|
||||
match timeline_updates
|
||||
.entry(zttid)
|
||||
.or_default()
|
||||
.entry(timeline.safekeeper_id)
|
||||
{
|
||||
hash_map::Entry::Occupied(mut o) => {
|
||||
if o.get().flush_lsn < timeline.info.flush_lsn {
|
||||
o.insert(timeline.info);
|
||||
}
|
||||
}
|
||||
hash_map::Entry::Vacant(v) => {
|
||||
v.insert(timeline.info);
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(None) => {}
|
||||
Err(e) => error!("Failed to parse timeline update: {e}"),
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if let Err(e) = timeline_updates_sender.send(timeline_updates) {
|
||||
info!("Timeline updates sender got dropped, exiting: {e}");
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
});
|
||||
|
||||
Ok(SkTimelineSubscription {
|
||||
kind: subscription,
|
||||
safekeeper_timeline_updates,
|
||||
watcher_handle,
|
||||
watcher,
|
||||
})
|
||||
}
|
||||
|
||||
fn parse_etcd_key_value(
|
||||
subscription_kind: SubscriptionKind,
|
||||
regex: &Regex,
|
||||
kv: &KeyValue,
|
||||
) -> Result<Option<(ZTenantTimelineId, SafekeeperTimeline)>, BrokerError> {
|
||||
let caps = if let Some(caps) = regex.captures(kv.key_str().map_err(|e| {
|
||||
BrokerError::EtcdClient(e, format!("Failed to represent kv {kv:?} as key str"))
|
||||
})?) {
|
||||
caps
|
||||
} else {
|
||||
return Ok(None);
|
||||
};
|
||||
|
||||
let (zttid, safekeeper_id) = match subscription_kind {
|
||||
SubscriptionKind::All => (
|
||||
ZTenantTimelineId::new(
|
||||
parse_capture(&caps, 1).map_err(BrokerError::ParsingError)?,
|
||||
parse_capture(&caps, 2).map_err(BrokerError::ParsingError)?,
|
||||
),
|
||||
ZNodeId(parse_capture(&caps, 3).map_err(BrokerError::ParsingError)?),
|
||||
),
|
||||
SubscriptionKind::Tenant(tenant_id) => (
|
||||
ZTenantTimelineId::new(
|
||||
tenant_id,
|
||||
parse_capture(&caps, 1).map_err(BrokerError::ParsingError)?,
|
||||
),
|
||||
ZNodeId(parse_capture(&caps, 2).map_err(BrokerError::ParsingError)?),
|
||||
),
|
||||
SubscriptionKind::Timeline(zttid) => (
|
||||
zttid,
|
||||
ZNodeId(parse_capture(&caps, 1).map_err(BrokerError::ParsingError)?),
|
||||
),
|
||||
};
|
||||
|
||||
let info_str = kv.value_str().map_err(|e| {
|
||||
BrokerError::EtcdClient(e, format!("Failed to represent kv {kv:?} as value str"))
|
||||
})?;
|
||||
Ok(Some((
|
||||
zttid,
|
||||
SafekeeperTimeline {
|
||||
safekeeper_id,
|
||||
info: serde_json::from_str(info_str).map_err(|e| {
|
||||
BrokerError::ParsingError(format!(
|
||||
"Failed to parse '{info_str}' as safekeeper timeline info: {e}"
|
||||
))
|
||||
})?,
|
||||
},
|
||||
)))
|
||||
}
|
||||
|
||||
fn parse_capture<T>(caps: &Captures, index: usize) -> Result<T, String>
|
||||
where
|
||||
T: FromStr,
|
||||
<T as FromStr>::Err: Display,
|
||||
{
|
||||
let capture_match = caps
|
||||
.get(index)
|
||||
.ok_or_else(|| format!("Failed to get capture match at index {index}"))?
|
||||
.as_str();
|
||||
capture_match.parse().map_err(|e| {
|
||||
format!(
|
||||
"Failed to parse {} from {capture_match}: {e}",
|
||||
std::any::type_name::<T>()
|
||||
)
|
||||
})
|
||||
}
|
||||
11
libs/metrics/Cargo.toml
Normal file
11
libs/metrics/Cargo.toml
Normal file
@@ -0,0 +1,11 @@
|
||||
[package]
|
||||
name = "metrics"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
prometheus = {version = "0.13", default_features=false} # removes protobuf dependency
|
||||
libc = "0.2"
|
||||
lazy_static = "1.4"
|
||||
once_cell = "1.8.0"
|
||||
workspace_hack = { version = "0.1", path = "../../workspace_hack" }
|
||||
@@ -8,8 +8,8 @@ use std::io::{Read, Result, Write};
|
||||
///
|
||||
/// ```
|
||||
/// # use std::io::{Result, Read};
|
||||
/// # use zenith_metrics::{register_int_counter, IntCounter};
|
||||
/// # use zenith_metrics::CountedReader;
|
||||
/// # use metrics::{register_int_counter, IntCounter};
|
||||
/// # use metrics::CountedReader;
|
||||
/// #
|
||||
/// # lazy_static::lazy_static! {
|
||||
/// # static ref INT_COUNTER: IntCounter = register_int_counter!(
|
||||
@@ -83,8 +83,8 @@ impl<T: Read> Read for CountedReader<'_, T> {
|
||||
///
|
||||
/// ```
|
||||
/// # use std::io::{Result, Write};
|
||||
/// # use zenith_metrics::{register_int_counter, IntCounter};
|
||||
/// # use zenith_metrics::CountedWriter;
|
||||
/// # use metrics::{register_int_counter, IntCounter};
|
||||
/// # use metrics::CountedWriter;
|
||||
/// #
|
||||
/// # lazy_static::lazy_static! {
|
||||
/// # static ref INT_COUNTER: IntCounter = register_int_counter!(
|
||||
@@ -1,10 +1,7 @@
|
||||
[package]
|
||||
name = "postgres_ffi"
|
||||
version = "0.1.0"
|
||||
authors = ["Heikki Linnakangas <heikki@zenith.tech>"]
|
||||
edition = "2018"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
chrono = "0.4.19"
|
||||
@@ -20,8 +17,8 @@ log = "0.4.14"
|
||||
memoffset = "0.6.2"
|
||||
thiserror = "1.0"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
workspace_hack = { path = "../workspace_hack" }
|
||||
zenith_utils = { path = "../zenith_utils" }
|
||||
utils = { path = "../utils" }
|
||||
workspace_hack = { version = "0.1", path = "../../workspace_hack" }
|
||||
|
||||
[build-dependencies]
|
||||
bindgen = "0.59.1"
|
||||
@@ -88,8 +88,8 @@ fn main() {
|
||||
// 'pg_config --includedir-server' would perhaps be the more proper way to find it,
|
||||
// but this will do for now.
|
||||
//
|
||||
.clang_arg("-I../tmp_install/include/server")
|
||||
.clang_arg("-I../tmp_install/include/postgresql/server")
|
||||
.clang_arg("-I../../tmp_install/include/server")
|
||||
.clang_arg("-I../../tmp_install/include/postgresql/server")
|
||||
//
|
||||
// Finish the builder and generate the bindings.
|
||||
//
|
||||
@@ -43,7 +43,7 @@ impl ControlFileData {
|
||||
/// Interpret a slice of bytes as a Postgres control file.
|
||||
///
|
||||
pub fn decode(buf: &[u8]) -> Result<ControlFileData> {
|
||||
use zenith_utils::bin_ser::LeSer;
|
||||
use utils::bin_ser::LeSer;
|
||||
|
||||
// Check that the slice has the expected size. The control file is
|
||||
// padded with zeros up to a 512 byte sector size, so accept a
|
||||
@@ -77,7 +77,7 @@ impl ControlFileData {
|
||||
///
|
||||
/// The CRC is recomputed to match the contents of the fields.
|
||||
pub fn encode(&self) -> Bytes {
|
||||
use zenith_utils::bin_ser::LeSer;
|
||||
use utils::bin_ser::LeSer;
|
||||
|
||||
// Serialize into a new buffer.
|
||||
let b = self.ser().unwrap();
|
||||
@@ -24,6 +24,9 @@ pub const VISIBILITYMAP_FORKNUM: u8 = 2;
|
||||
pub const INIT_FORKNUM: u8 = 3;
|
||||
|
||||
// From storage_xlog.h
|
||||
pub const XLOG_SMGR_CREATE: u8 = 0x10;
|
||||
pub const XLOG_SMGR_TRUNCATE: u8 = 0x20;
|
||||
|
||||
pub const SMGR_TRUNCATE_HEAP: u32 = 0x0001;
|
||||
pub const SMGR_TRUNCATE_VM: u32 = 0x0002;
|
||||
pub const SMGR_TRUNCATE_FSM: u32 = 0x0004;
|
||||
@@ -113,7 +116,6 @@ pub const XACT_XINFO_HAS_TWOPHASE: u32 = 1u32 << 4;
|
||||
// From pg_control.h and rmgrlist.h
|
||||
pub const XLOG_NEXTOID: u8 = 0x30;
|
||||
pub const XLOG_SWITCH: u8 = 0x40;
|
||||
pub const XLOG_SMGR_TRUNCATE: u8 = 0x20;
|
||||
pub const XLOG_FPI_FOR_HINT: u8 = 0xA0;
|
||||
pub const XLOG_FPI: u8 = 0xB0;
|
||||
pub const DB_SHUTDOWNED: u32 = 1;
|
||||
@@ -4,7 +4,7 @@
|
||||
//! This understands the WAL page and record format, enough to figure out where the WAL record
|
||||
//! boundaries are, and to reassemble WAL records that cross page boundaries.
|
||||
//!
|
||||
//! This functionality is needed by both the pageserver and the walkeepers. The pageserver needs
|
||||
//! This functionality is needed by both the pageserver and the safekeepers. The pageserver needs
|
||||
//! to look deeper into the WAL records to also understand which blocks they modify, the code
|
||||
//! for that is in pageserver/src/walrecord.rs
|
||||
//!
|
||||
@@ -18,7 +18,7 @@ use crc32c::*;
|
||||
use log::*;
|
||||
use std::cmp::min;
|
||||
use thiserror::Error;
|
||||
use zenith_utils::lsn::Lsn;
|
||||
use utils::lsn::Lsn;
|
||||
|
||||
pub struct WalStreamDecoder {
|
||||
lsn: Lsn,
|
||||
@@ -89,7 +89,12 @@ impl WalStreamDecoder {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
let hdr = XLogLongPageHeaderData::from_bytes(&mut self.inputbuf);
|
||||
let hdr = XLogLongPageHeaderData::from_bytes(&mut self.inputbuf).map_err(|e| {
|
||||
WalDecodeError {
|
||||
msg: format!("long header deserialization failed {}", e),
|
||||
lsn: self.lsn,
|
||||
}
|
||||
})?;
|
||||
|
||||
if hdr.std.xlp_pageaddr != self.lsn.0 {
|
||||
return Err(WalDecodeError {
|
||||
@@ -106,7 +111,12 @@ impl WalStreamDecoder {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
let hdr = XLogPageHeaderData::from_bytes(&mut self.inputbuf);
|
||||
let hdr = XLogPageHeaderData::from_bytes(&mut self.inputbuf).map_err(|e| {
|
||||
WalDecodeError {
|
||||
msg: format!("header deserialization failed {}", e),
|
||||
lsn: self.lsn,
|
||||
}
|
||||
})?;
|
||||
|
||||
if hdr.xlp_pageaddr != self.lsn.0 {
|
||||
return Err(WalDecodeError {
|
||||
@@ -188,7 +198,13 @@ impl WalStreamDecoder {
|
||||
}
|
||||
|
||||
// We now have a record in the 'recordbuf' local variable.
|
||||
let xlogrec = XLogRecord::from_slice(&recordbuf[0..XLOG_SIZE_OF_XLOG_RECORD]);
|
||||
let xlogrec =
|
||||
XLogRecord::from_slice(&recordbuf[0..XLOG_SIZE_OF_XLOG_RECORD]).map_err(|e| {
|
||||
WalDecodeError {
|
||||
msg: format!("xlog record deserialization failed {}", e),
|
||||
lsn: self.lsn,
|
||||
}
|
||||
})?;
|
||||
|
||||
let mut crc = 0;
|
||||
crc = crc32c_append(crc, &recordbuf[XLOG_RECORD_CRC_OFFS + 4..]);
|
||||
@@ -15,7 +15,7 @@ use crate::XLogPageHeaderData;
|
||||
use crate::XLogRecord;
|
||||
use crate::XLOG_PAGE_MAGIC;
|
||||
|
||||
use anyhow::{bail, Result};
|
||||
use anyhow::bail;
|
||||
use byteorder::{ByteOrder, LittleEndian};
|
||||
use bytes::BytesMut;
|
||||
use bytes::{Buf, Bytes};
|
||||
@@ -28,7 +28,9 @@ use std::io::prelude::*;
|
||||
use std::io::SeekFrom;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::time::SystemTime;
|
||||
use zenith_utils::lsn::Lsn;
|
||||
use utils::bin_ser::DeserializeError;
|
||||
use utils::bin_ser::SerializeError;
|
||||
use utils::lsn::Lsn;
|
||||
|
||||
pub const XLOG_FNAME_LEN: usize = 24;
|
||||
pub const XLOG_BLCKSZ: usize = 8192;
|
||||
@@ -51,6 +53,13 @@ pub type TimeLineID = u32;
|
||||
pub type TimestampTz = i64;
|
||||
pub type XLogSegNo = u64;
|
||||
|
||||
/// Interval of checkpointing metadata file. We should store metadata file to enforce
|
||||
/// predicate that checkpoint.nextXid is larger than any XID in WAL.
|
||||
/// But flushing checkpoint file for each transaction seems to be too expensive,
|
||||
/// so XID_CHECKPOINT_INTERVAL is used to forward align nextXid and so perform
|
||||
/// metadata checkpoint only once per XID_CHECKPOINT_INTERVAL transactions.
|
||||
/// XID_CHECKPOINT_INTERVAL should not be larger than BLCKSZ*CLOG_XACTS_PER_BYTE
|
||||
/// in order to let CLOG_TRUNCATE mechanism correctly extend CLOG.
|
||||
const XID_CHECKPOINT_INTERVAL: u32 = 1024;
|
||||
|
||||
#[allow(non_snake_case)]
|
||||
@@ -111,11 +120,15 @@ pub fn normalize_lsn(lsn: Lsn, seg_sz: usize) -> Lsn {
|
||||
}
|
||||
|
||||
pub fn get_current_timestamp() -> TimestampTz {
|
||||
to_pg_timestamp(SystemTime::now())
|
||||
}
|
||||
|
||||
pub fn to_pg_timestamp(time: SystemTime) -> TimestampTz {
|
||||
const UNIX_EPOCH_JDATE: u64 = 2440588; /* == date2j(1970, 1, 1) */
|
||||
const POSTGRES_EPOCH_JDATE: u64 = 2451545; /* == date2j(2000, 1, 1) */
|
||||
const SECS_PER_DAY: u64 = 86400;
|
||||
const USECS_PER_SEC: u64 = 1000000;
|
||||
match SystemTime::now().duration_since(SystemTime::UNIX_EPOCH) {
|
||||
match time.duration_since(SystemTime::UNIX_EPOCH) {
|
||||
Ok(n) => {
|
||||
((n.as_secs() - ((POSTGRES_EPOCH_JDATE - UNIX_EPOCH_JDATE) * SECS_PER_DAY))
|
||||
* USECS_PER_SEC
|
||||
@@ -125,13 +138,15 @@ pub fn get_current_timestamp() -> TimestampTz {
|
||||
}
|
||||
}
|
||||
|
||||
/// Return offset of the last valid record in the segment segno, starting
|
||||
/// looking at start_offset. Returns start_offset if no records found.
|
||||
fn find_end_of_wal_segment(
|
||||
data_dir: &Path,
|
||||
segno: XLogSegNo,
|
||||
tli: TimeLineID,
|
||||
wal_seg_size: usize,
|
||||
start_offset: usize, // start reading at this point
|
||||
) -> Result<u32> {
|
||||
) -> anyhow::Result<u32> {
|
||||
// step back to the beginning of the page to read it in...
|
||||
let mut offs: usize = start_offset - start_offset % XLOG_BLCKSZ;
|
||||
let mut contlen: usize = 0;
|
||||
@@ -140,7 +155,7 @@ fn find_end_of_wal_segment(
|
||||
let mut rec_offs: usize = 0;
|
||||
let mut buf = [0u8; XLOG_BLCKSZ];
|
||||
let file_name = XLogFileName(tli, segno, wal_seg_size);
|
||||
let mut last_valid_rec_pos: usize = 0;
|
||||
let mut last_valid_rec_pos: usize = start_offset; // assume at given start_offset begins new record
|
||||
let mut file = File::open(data_dir.join(file_name.clone() + ".partial")).unwrap();
|
||||
file.seek(SeekFrom::Start(offs as u64))?;
|
||||
let mut rec_hdr = [0u8; XLOG_RECORD_CRC_OFFS];
|
||||
@@ -259,7 +274,7 @@ pub fn find_end_of_wal(
|
||||
wal_seg_size: usize,
|
||||
precise: bool,
|
||||
start_lsn: Lsn, // start reading WAL at this point or later
|
||||
) -> Result<(XLogRecPtr, TimeLineID)> {
|
||||
) -> anyhow::Result<(XLogRecPtr, TimeLineID)> {
|
||||
let mut high_segno: XLogSegNo = 0;
|
||||
let mut high_tli: TimeLineID = 0;
|
||||
let mut high_ispartial = false;
|
||||
@@ -341,19 +356,19 @@ pub fn main() {
|
||||
}
|
||||
|
||||
impl XLogRecord {
|
||||
pub fn from_slice(buf: &[u8]) -> XLogRecord {
|
||||
use zenith_utils::bin_ser::LeSer;
|
||||
XLogRecord::des(buf).unwrap()
|
||||
pub fn from_slice(buf: &[u8]) -> Result<XLogRecord, DeserializeError> {
|
||||
use utils::bin_ser::LeSer;
|
||||
XLogRecord::des(buf)
|
||||
}
|
||||
|
||||
pub fn from_bytes<B: Buf>(buf: &mut B) -> XLogRecord {
|
||||
use zenith_utils::bin_ser::LeSer;
|
||||
XLogRecord::des_from(&mut buf.reader()).unwrap()
|
||||
pub fn from_bytes<B: Buf>(buf: &mut B) -> Result<XLogRecord, DeserializeError> {
|
||||
use utils::bin_ser::LeSer;
|
||||
XLogRecord::des_from(&mut buf.reader())
|
||||
}
|
||||
|
||||
pub fn encode(&self) -> Bytes {
|
||||
use zenith_utils::bin_ser::LeSer;
|
||||
self.ser().unwrap().into()
|
||||
pub fn encode(&self) -> Result<Bytes, SerializeError> {
|
||||
use utils::bin_ser::LeSer;
|
||||
Ok(self.ser()?.into())
|
||||
}
|
||||
|
||||
// Is this record an XLOG_SWITCH record? They need some special processing,
|
||||
@@ -363,35 +378,35 @@ impl XLogRecord {
|
||||
}
|
||||
|
||||
impl XLogPageHeaderData {
|
||||
pub fn from_bytes<B: Buf>(buf: &mut B) -> XLogPageHeaderData {
|
||||
use zenith_utils::bin_ser::LeSer;
|
||||
XLogPageHeaderData::des_from(&mut buf.reader()).unwrap()
|
||||
pub fn from_bytes<B: Buf>(buf: &mut B) -> Result<XLogPageHeaderData, DeserializeError> {
|
||||
use utils::bin_ser::LeSer;
|
||||
XLogPageHeaderData::des_from(&mut buf.reader())
|
||||
}
|
||||
}
|
||||
|
||||
impl XLogLongPageHeaderData {
|
||||
pub fn from_bytes<B: Buf>(buf: &mut B) -> XLogLongPageHeaderData {
|
||||
use zenith_utils::bin_ser::LeSer;
|
||||
XLogLongPageHeaderData::des_from(&mut buf.reader()).unwrap()
|
||||
pub fn from_bytes<B: Buf>(buf: &mut B) -> Result<XLogLongPageHeaderData, DeserializeError> {
|
||||
use utils::bin_ser::LeSer;
|
||||
XLogLongPageHeaderData::des_from(&mut buf.reader())
|
||||
}
|
||||
|
||||
pub fn encode(&self) -> Bytes {
|
||||
use zenith_utils::bin_ser::LeSer;
|
||||
self.ser().unwrap().into()
|
||||
pub fn encode(&self) -> Result<Bytes, SerializeError> {
|
||||
use utils::bin_ser::LeSer;
|
||||
self.ser().map(|b| b.into())
|
||||
}
|
||||
}
|
||||
|
||||
pub const SIZEOF_CHECKPOINT: usize = std::mem::size_of::<CheckPoint>();
|
||||
|
||||
impl CheckPoint {
|
||||
pub fn encode(&self) -> Bytes {
|
||||
use zenith_utils::bin_ser::LeSer;
|
||||
self.ser().unwrap().into()
|
||||
pub fn encode(&self) -> Result<Bytes, SerializeError> {
|
||||
use utils::bin_ser::LeSer;
|
||||
Ok(self.ser()?.into())
|
||||
}
|
||||
|
||||
pub fn decode(buf: &[u8]) -> Result<CheckPoint, anyhow::Error> {
|
||||
use zenith_utils::bin_ser::LeSer;
|
||||
Ok(CheckPoint::des(buf)?)
|
||||
pub fn decode(buf: &[u8]) -> Result<CheckPoint, DeserializeError> {
|
||||
use utils::bin_ser::LeSer;
|
||||
CheckPoint::des(buf)
|
||||
}
|
||||
|
||||
/// Update next XID based on provided new_xid and stored epoch.
|
||||
@@ -400,9 +415,13 @@ impl CheckPoint {
|
||||
///
|
||||
/// Returns 'true' if the XID was updated.
|
||||
pub fn update_next_xid(&mut self, xid: u32) -> bool {
|
||||
let xid = xid.wrapping_add(XID_CHECKPOINT_INTERVAL - 1) & !(XID_CHECKPOINT_INTERVAL - 1);
|
||||
// nextXid should nw greate than any XID in WAL, so increment provided XID and check for wraparround.
|
||||
let mut new_xid = std::cmp::max(xid + 1, pg_constants::FIRST_NORMAL_TRANSACTION_ID);
|
||||
// To reduce number of metadata checkpoints, we forward align XID on XID_CHECKPOINT_INTERVAL.
|
||||
// XID_CHECKPOINT_INTERVAL should not be larger than BLCKSZ*CLOG_XACTS_PER_BYTE
|
||||
new_xid =
|
||||
new_xid.wrapping_add(XID_CHECKPOINT_INTERVAL - 1) & !(XID_CHECKPOINT_INTERVAL - 1);
|
||||
let full_xid = self.nextXid.value;
|
||||
let new_xid = std::cmp::max(xid + 1, pg_constants::FIRST_NORMAL_TRANSACTION_ID);
|
||||
let old_xid = full_xid as u32;
|
||||
if new_xid.wrapping_sub(old_xid) as i32 > 0 {
|
||||
let mut epoch = full_xid >> 32;
|
||||
@@ -425,7 +444,7 @@ impl CheckPoint {
|
||||
// Generate new, empty WAL segment.
|
||||
// We need this segment to start compute node.
|
||||
//
|
||||
pub fn generate_wal_segment(segno: u64, system_id: u64) -> Bytes {
|
||||
pub fn generate_wal_segment(segno: u64, system_id: u64) -> Result<Bytes, SerializeError> {
|
||||
let mut seg_buf = BytesMut::with_capacity(pg_constants::WAL_SEGMENT_SIZE as usize);
|
||||
|
||||
let pageaddr = XLogSegNoOffsetToRecPtr(segno, 0, pg_constants::WAL_SEGMENT_SIZE);
|
||||
@@ -445,12 +464,12 @@ pub fn generate_wal_segment(segno: u64, system_id: u64) -> Bytes {
|
||||
xlp_xlog_blcksz: XLOG_BLCKSZ as u32,
|
||||
};
|
||||
|
||||
let hdr_bytes = hdr.encode();
|
||||
let hdr_bytes = hdr.encode()?;
|
||||
seg_buf.extend_from_slice(&hdr_bytes);
|
||||
|
||||
//zero out the rest of the file
|
||||
seg_buf.resize(pg_constants::WAL_SEGMENT_SIZE, 0);
|
||||
seg_buf.freeze()
|
||||
Ok(seg_buf.freeze())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -464,7 +483,9 @@ mod tests {
|
||||
#[test]
|
||||
pub fn test_find_end_of_wal() {
|
||||
// 1. Run initdb to generate some WAL
|
||||
let top_path = PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("..");
|
||||
let top_path = PathBuf::from(env!("CARGO_MANIFEST_DIR"))
|
||||
.join("..")
|
||||
.join("..");
|
||||
let data_dir = top_path.join("test_output/test_find_end_of_wal");
|
||||
let initdb_path = top_path.join("tmp_install/bin/initdb");
|
||||
let lib_path = top_path.join("tmp_install/lib");
|
||||
@@ -482,7 +503,13 @@ mod tests {
|
||||
.env("DYLD_LIBRARY_PATH", &lib_path)
|
||||
.output()
|
||||
.unwrap();
|
||||
assert!(initdb_output.status.success());
|
||||
assert!(
|
||||
initdb_output.status.success(),
|
||||
"initdb failed. Status: '{}', stdout: '{}', stderr: '{}'",
|
||||
initdb_output.status,
|
||||
String::from_utf8_lossy(&initdb_output.stdout),
|
||||
String::from_utf8_lossy(&initdb_output.stderr),
|
||||
);
|
||||
|
||||
// 2. Pick WAL generated by initdb
|
||||
let wal_dir = data_dir.join("pg_wal");
|
||||
@@ -520,4 +547,34 @@ mod tests {
|
||||
println!("wal_end={}, tli={}", wal_end, tli);
|
||||
assert_eq!(wal_end, waldump_wal_end);
|
||||
}
|
||||
|
||||
/// Check the math in update_next_xid
|
||||
///
|
||||
/// NOTE: These checks are sensitive to the value of XID_CHECKPOINT_INTERVAL,
|
||||
/// currently 1024.
|
||||
#[test]
|
||||
pub fn test_update_next_xid() {
|
||||
let checkpoint_buf = [0u8; std::mem::size_of::<CheckPoint>()];
|
||||
let mut checkpoint = CheckPoint::decode(&checkpoint_buf).unwrap();
|
||||
|
||||
checkpoint.nextXid = FullTransactionId { value: 10 };
|
||||
assert_eq!(checkpoint.nextXid.value, 10);
|
||||
|
||||
// The input XID gets rounded up to the next XID_CHECKPOINT_INTERVAL
|
||||
// boundary
|
||||
checkpoint.update_next_xid(100);
|
||||
assert_eq!(checkpoint.nextXid.value, 1024);
|
||||
|
||||
// No change
|
||||
checkpoint.update_next_xid(500);
|
||||
assert_eq!(checkpoint.nextXid.value, 1024);
|
||||
checkpoint.update_next_xid(1023);
|
||||
assert_eq!(checkpoint.nextXid.value, 1024);
|
||||
|
||||
// The function returns the *next* XID, given the highest XID seen so
|
||||
// far. So when we pass 1024, the nextXid gets bumped up to the next
|
||||
// XID_CHECKPOINT_INTERVAL boundary.
|
||||
checkpoint.update_next_xid(1024);
|
||||
assert_eq!(checkpoint.nextXid.value, 2048);
|
||||
}
|
||||
}
|
||||
20
libs/remote_storage/Cargo.toml
Normal file
20
libs/remote_storage/Cargo.toml
Normal file
@@ -0,0 +1,20 @@
|
||||
[package]
|
||||
name = "remote_storage"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
anyhow = { version = "1.0", features = ["backtrace"] }
|
||||
tokio = { version = "1.17", features = ["sync", "macros", "fs", "io-util"] }
|
||||
tokio-util = { version = "0.7", features = ["io"] }
|
||||
tracing = "0.1.27"
|
||||
rusoto_core = "0.48"
|
||||
rusoto_s3 = "0.48"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1"
|
||||
async-trait = "0.1"
|
||||
|
||||
workspace_hack = { version = "0.1", path = "../../workspace_hack" }
|
||||
|
||||
[dev-dependencies]
|
||||
tempfile = "3.2"
|
||||
232
libs/remote_storage/src/lib.rs
Normal file
232
libs/remote_storage/src/lib.rs
Normal file
@@ -0,0 +1,232 @@
|
||||
//! A set of generic storage abstractions for the page server to use when backing up and restoring its state from the external storage.
|
||||
//! No other modules from this tree are supposed to be used directly by the external code.
|
||||
//!
|
||||
//! [`RemoteStorage`] trait a CRUD-like generic abstraction to use for adapting external storages with a few implementations:
|
||||
//! * [`local_fs`] allows to use local file system as an external storage
|
||||
//! * [`s3_bucket`] uses AWS S3 bucket as an external storage
|
||||
//!
|
||||
mod local_fs;
|
||||
mod s3_bucket;
|
||||
|
||||
use std::{
|
||||
borrow::Cow,
|
||||
collections::HashMap,
|
||||
ffi::OsStr,
|
||||
num::{NonZeroU32, NonZeroUsize},
|
||||
path::{Path, PathBuf},
|
||||
};
|
||||
|
||||
use anyhow::Context;
|
||||
use tokio::io;
|
||||
use tracing::info;
|
||||
|
||||
pub use self::{
|
||||
local_fs::LocalFs,
|
||||
s3_bucket::{S3Bucket, S3ObjectKey},
|
||||
};
|
||||
|
||||
/// How many different timelines can be processed simultaneously when synchronizing layers with the remote storage.
|
||||
/// During regular work, pageserver produces one layer file per timeline checkpoint, with bursts of concurrency
|
||||
/// during start (where local and remote timelines are compared and initial sync tasks are scheduled) and timeline attach.
|
||||
/// Both cases may trigger timeline download, that might download a lot of layers. This concurrency is limited by the clients internally, if needed.
|
||||
pub const DEFAULT_REMOTE_STORAGE_MAX_CONCURRENT_SYNCS: usize = 50;
|
||||
pub const DEFAULT_REMOTE_STORAGE_MAX_SYNC_ERRORS: u32 = 10;
|
||||
/// Currently, sync happens with AWS S3, that has two limits on requests per second:
|
||||
/// ~200 RPS for IAM services
|
||||
/// https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/UsingWithRDS.IAMDBAuth.html
|
||||
/// ~3500 PUT/COPY/POST/DELETE or 5500 GET/HEAD S3 requests
|
||||
/// https://aws.amazon.com/premiumsupport/knowledge-center/s3-request-limit-avoid-throttling/
|
||||
pub const DEFAULT_REMOTE_STORAGE_S3_CONCURRENCY_LIMIT: usize = 100;
|
||||
|
||||
/// Storage (potentially remote) API to manage its state.
|
||||
/// This storage tries to be unaware of any layered repository context,
|
||||
/// providing basic CRUD operations for storage files.
|
||||
#[async_trait::async_trait]
|
||||
pub trait RemoteStorage: Send + Sync {
|
||||
/// A way to uniquely reference a file in the remote storage.
|
||||
type RemoteObjectId;
|
||||
|
||||
/// Attempts to derive the storage path out of the local path, if the latter is correct.
|
||||
fn remote_object_id(&self, local_path: &Path) -> anyhow::Result<Self::RemoteObjectId>;
|
||||
|
||||
/// Gets the download path of the given storage file.
|
||||
fn local_path(&self, remote_object_id: &Self::RemoteObjectId) -> anyhow::Result<PathBuf>;
|
||||
|
||||
/// Lists all items the storage has right now.
|
||||
async fn list(&self) -> anyhow::Result<Vec<Self::RemoteObjectId>>;
|
||||
|
||||
/// Streams the local file contents into remote into the remote storage entry.
|
||||
async fn upload(
|
||||
&self,
|
||||
from: impl io::AsyncRead + Unpin + Send + Sync + 'static,
|
||||
// S3 PUT request requires the content length to be specified,
|
||||
// otherwise it starts to fail with the concurrent connection count increasing.
|
||||
from_size_bytes: usize,
|
||||
to: &Self::RemoteObjectId,
|
||||
metadata: Option<StorageMetadata>,
|
||||
) -> anyhow::Result<()>;
|
||||
|
||||
/// Streams the remote storage entry contents into the buffered writer given, returns the filled writer.
|
||||
/// Returns the metadata, if any was stored with the file previously.
|
||||
async fn download(
|
||||
&self,
|
||||
from: &Self::RemoteObjectId,
|
||||
to: &mut (impl io::AsyncWrite + Unpin + Send + Sync),
|
||||
) -> anyhow::Result<Option<StorageMetadata>>;
|
||||
|
||||
/// Streams a given byte range of the remote storage entry contents into the buffered writer given, returns the filled writer.
|
||||
/// Returns the metadata, if any was stored with the file previously.
|
||||
async fn download_byte_range(
|
||||
&self,
|
||||
from: &Self::RemoteObjectId,
|
||||
start_inclusive: u64,
|
||||
end_exclusive: Option<u64>,
|
||||
to: &mut (impl io::AsyncWrite + Unpin + Send + Sync),
|
||||
) -> anyhow::Result<Option<StorageMetadata>>;
|
||||
|
||||
async fn delete(&self, path: &Self::RemoteObjectId) -> anyhow::Result<()>;
|
||||
}
|
||||
|
||||
/// TODO kb
|
||||
pub enum GenericRemoteStorage {
|
||||
Local(LocalFs),
|
||||
S3(S3Bucket),
|
||||
}
|
||||
|
||||
impl GenericRemoteStorage {
|
||||
pub fn new(
|
||||
working_directory: PathBuf,
|
||||
storage_config: &RemoteStorageConfig,
|
||||
) -> anyhow::Result<Self> {
|
||||
match &storage_config.storage {
|
||||
RemoteStorageKind::LocalFs(root) => {
|
||||
info!("Using fs root '{}' as a remote storage", root.display());
|
||||
LocalFs::new(root.clone(), working_directory).map(GenericRemoteStorage::Local)
|
||||
}
|
||||
RemoteStorageKind::AwsS3(s3_config) => {
|
||||
info!("Using s3 bucket '{}' in region '{}' as a remote storage, prefix in bucket: '{:?}', bucket endpoint: '{:?}'",
|
||||
s3_config.bucket_name, s3_config.bucket_region, s3_config.prefix_in_bucket, s3_config.endpoint);
|
||||
S3Bucket::new(s3_config, working_directory).map(GenericRemoteStorage::S3)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Extra set of key-value pairs that contain arbitrary metadata about the storage entry.
|
||||
/// Immutable, cannot be changed once the file is created.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct StorageMetadata(HashMap<String, String>);
|
||||
|
||||
fn strip_path_prefix<'a>(prefix: &'a Path, path: &'a Path) -> anyhow::Result<&'a Path> {
|
||||
if prefix == path {
|
||||
anyhow::bail!(
|
||||
"Prefix and the path are equal, cannot strip: '{}'",
|
||||
prefix.display()
|
||||
)
|
||||
} else {
|
||||
path.strip_prefix(prefix).with_context(|| {
|
||||
format!(
|
||||
"Path '{}' is not prefixed with '{}'",
|
||||
path.display(),
|
||||
prefix.display(),
|
||||
)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// External backup storage configuration, enough for creating a client for that storage.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct RemoteStorageConfig {
|
||||
/// Max allowed number of concurrent sync operations between the API user and the remote storage.
|
||||
pub max_concurrent_syncs: NonZeroUsize,
|
||||
/// Max allowed errors before the sync task is considered failed and evicted.
|
||||
pub max_sync_errors: NonZeroU32,
|
||||
/// The storage connection configuration.
|
||||
pub storage: RemoteStorageKind,
|
||||
}
|
||||
|
||||
/// A kind of a remote storage to connect to, with its connection configuration.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub enum RemoteStorageKind {
|
||||
/// Storage based on local file system.
|
||||
/// Specify a root folder to place all stored files into.
|
||||
LocalFs(PathBuf),
|
||||
/// AWS S3 based storage, storing all files in the S3 bucket
|
||||
/// specified by the config
|
||||
AwsS3(S3Config),
|
||||
}
|
||||
|
||||
/// AWS S3 bucket coordinates and access credentials to manage the bucket contents (read and write).
|
||||
#[derive(Clone, PartialEq, Eq)]
|
||||
pub struct S3Config {
|
||||
/// Name of the bucket to connect to.
|
||||
pub bucket_name: String,
|
||||
/// The region where the bucket is located at.
|
||||
pub bucket_region: String,
|
||||
/// A "subfolder" in the bucket, to use the same bucket separately by multiple remote storage users at once.
|
||||
pub prefix_in_bucket: Option<String>,
|
||||
/// A base URL to send S3 requests to.
|
||||
/// By default, the endpoint is derived from a region name, assuming it's
|
||||
/// an AWS S3 region name, erroring on wrong region name.
|
||||
/// Endpoint provides a way to support other S3 flavors and their regions.
|
||||
///
|
||||
/// Example: `http://127.0.0.1:5000`
|
||||
pub endpoint: Option<String>,
|
||||
/// AWS S3 has various limits on its API calls, we need not to exceed those.
|
||||
/// See [`DEFAULT_REMOTE_STORAGE_S3_CONCURRENCY_LIMIT`] for more details.
|
||||
pub concurrency_limit: NonZeroUsize,
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for S3Config {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("S3Config")
|
||||
.field("bucket_name", &self.bucket_name)
|
||||
.field("bucket_region", &self.bucket_region)
|
||||
.field("prefix_in_bucket", &self.prefix_in_bucket)
|
||||
.field("concurrency_limit", &self.concurrency_limit)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
pub fn path_with_suffix_extension(original_path: impl AsRef<Path>, suffix: &str) -> PathBuf {
|
||||
let new_extension = match original_path
|
||||
.as_ref()
|
||||
.extension()
|
||||
.map(OsStr::to_string_lossy)
|
||||
{
|
||||
Some(extension) => Cow::Owned(format!("{extension}.{suffix}")),
|
||||
None => Cow::Borrowed(suffix),
|
||||
};
|
||||
original_path
|
||||
.as_ref()
|
||||
.with_extension(new_extension.as_ref())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_path_with_suffix_extension() {
|
||||
let p = PathBuf::from("/foo/bar");
|
||||
assert_eq!(
|
||||
&path_with_suffix_extension(&p, "temp").to_string_lossy(),
|
||||
"/foo/bar.temp"
|
||||
);
|
||||
let p = PathBuf::from("/foo/bar");
|
||||
assert_eq!(
|
||||
&path_with_suffix_extension(&p, "temp.temp").to_string_lossy(),
|
||||
"/foo/bar.temp.temp"
|
||||
);
|
||||
let p = PathBuf::from("/foo/bar.baz");
|
||||
assert_eq!(
|
||||
&path_with_suffix_extension(&p, "temp.temp").to_string_lossy(),
|
||||
"/foo/bar.baz.temp.temp"
|
||||
);
|
||||
let p = PathBuf::from("/foo/bar.baz");
|
||||
assert_eq!(
|
||||
&path_with_suffix_extension(&p, ".temp").to_string_lossy(),
|
||||
"/foo/bar.baz..temp"
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -1,7 +1,7 @@
|
||||
//! Local filesystem acting as a remote storage.
|
||||
//! Multiple pageservers can use the same "storage" of this kind by using different storage roots.
|
||||
//! Multiple API users can use the same "storage" of this kind by using different storage roots.
|
||||
//!
|
||||
//! This storage used in pageserver tests, but can also be used in cases when a certain persistent
|
||||
//! This storage used in tests, but can also be used in cases when a certain persistent
|
||||
//! volume is mounted to the local FS.
|
||||
|
||||
use std::{
|
||||
@@ -17,16 +17,18 @@ use tokio::{
|
||||
};
|
||||
use tracing::*;
|
||||
|
||||
use super::{strip_path_prefix, RemoteStorage};
|
||||
use crate::path_with_suffix_extension;
|
||||
|
||||
use super::{strip_path_prefix, RemoteStorage, StorageMetadata};
|
||||
|
||||
pub struct LocalFs {
|
||||
pageserver_workdir: &'static Path,
|
||||
root: PathBuf,
|
||||
working_directory: PathBuf,
|
||||
storage_root: PathBuf,
|
||||
}
|
||||
|
||||
impl LocalFs {
|
||||
/// Attempts to create local FS storage, along with its root directory.
|
||||
pub fn new(root: PathBuf, pageserver_workdir: &'static Path) -> anyhow::Result<Self> {
|
||||
pub fn new(root: PathBuf, working_directory: PathBuf) -> anyhow::Result<Self> {
|
||||
if !root.exists() {
|
||||
std::fs::create_dir_all(&root).with_context(|| {
|
||||
format!(
|
||||
@@ -36,15 +38,15 @@ impl LocalFs {
|
||||
})?;
|
||||
}
|
||||
Ok(Self {
|
||||
pageserver_workdir,
|
||||
root,
|
||||
working_directory,
|
||||
storage_root: root,
|
||||
})
|
||||
}
|
||||
|
||||
fn resolve_in_storage(&self, path: &Path) -> anyhow::Result<PathBuf> {
|
||||
if path.is_relative() {
|
||||
Ok(self.root.join(path))
|
||||
} else if path.starts_with(&self.root) {
|
||||
Ok(self.storage_root.join(path))
|
||||
} else if path.starts_with(&self.storage_root) {
|
||||
Ok(path.to_path_buf())
|
||||
} else {
|
||||
bail!(
|
||||
@@ -53,41 +55,73 @@ impl LocalFs {
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
async fn read_storage_metadata(
|
||||
&self,
|
||||
file_path: &Path,
|
||||
) -> anyhow::Result<Option<StorageMetadata>> {
|
||||
let metadata_path = storage_metadata_path(file_path);
|
||||
if metadata_path.exists() && metadata_path.is_file() {
|
||||
let metadata_string = fs::read_to_string(&metadata_path).await.with_context(|| {
|
||||
format!(
|
||||
"Failed to read metadata from the local storage at '{}'",
|
||||
metadata_path.display()
|
||||
)
|
||||
})?;
|
||||
|
||||
serde_json::from_str(&metadata_string)
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"Failed to deserialize metadata from the local storage at '{}'",
|
||||
metadata_path.display()
|
||||
)
|
||||
})
|
||||
.map(|metadata| Some(StorageMetadata(metadata)))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl RemoteStorage for LocalFs {
|
||||
type StoragePath = PathBuf;
|
||||
type RemoteObjectId = PathBuf;
|
||||
|
||||
fn storage_path(&self, local_path: &Path) -> anyhow::Result<Self::StoragePath> {
|
||||
Ok(self.root.join(
|
||||
strip_path_prefix(self.pageserver_workdir, local_path)
|
||||
fn remote_object_id(&self, local_path: &Path) -> anyhow::Result<Self::RemoteObjectId> {
|
||||
Ok(self.storage_root.join(
|
||||
strip_path_prefix(&self.working_directory, local_path)
|
||||
.context("local path does not belong to this storage")?,
|
||||
))
|
||||
}
|
||||
|
||||
fn local_path(&self, storage_path: &Self::StoragePath) -> anyhow::Result<PathBuf> {
|
||||
let relative_path = strip_path_prefix(&self.root, storage_path)
|
||||
fn local_path(&self, storage_path: &Self::RemoteObjectId) -> anyhow::Result<PathBuf> {
|
||||
let relative_path = strip_path_prefix(&self.storage_root, storage_path)
|
||||
.context("local path does not belong to this storage")?;
|
||||
Ok(self.pageserver_workdir.join(relative_path))
|
||||
Ok(self.working_directory.join(relative_path))
|
||||
}
|
||||
|
||||
async fn list(&self) -> anyhow::Result<Vec<Self::StoragePath>> {
|
||||
Ok(get_all_files(&self.root).await?.into_iter().collect())
|
||||
async fn list(&self) -> anyhow::Result<Vec<Self::RemoteObjectId>> {
|
||||
get_all_files(&self.storage_root).await
|
||||
}
|
||||
|
||||
async fn upload(
|
||||
&self,
|
||||
mut from: impl io::AsyncRead + Unpin + Send + Sync + 'static,
|
||||
to: &Self::StoragePath,
|
||||
from: impl io::AsyncRead + Unpin + Send + Sync + 'static,
|
||||
from_size_bytes: usize,
|
||||
to: &Self::RemoteObjectId,
|
||||
metadata: Option<StorageMetadata>,
|
||||
) -> anyhow::Result<()> {
|
||||
let target_file_path = self.resolve_in_storage(to)?;
|
||||
create_target_directory(&target_file_path).await?;
|
||||
// We need this dance with sort of durable rename (without fsyncs)
|
||||
// to prevent partial uploads. This was really hit when pageserver shutdown
|
||||
// cancelled the upload and partial file was left on the fs
|
||||
let temp_file_path = path_with_suffix_extension(&target_file_path, "temp");
|
||||
let mut destination = io::BufWriter::new(
|
||||
fs::OpenOptions::new()
|
||||
.write(true)
|
||||
.create(true)
|
||||
.open(&target_file_path)
|
||||
.open(&temp_file_path)
|
||||
.await
|
||||
.with_context(|| {
|
||||
format!(
|
||||
@@ -97,28 +131,72 @@ impl RemoteStorage for LocalFs {
|
||||
})?,
|
||||
);
|
||||
|
||||
io::copy(&mut from, &mut destination)
|
||||
let from_size_bytes = from_size_bytes as u64;
|
||||
// Require to read 1 byte more than the expected to check later, that the stream and its size match.
|
||||
let mut buffer_to_read = from.take(from_size_bytes + 1);
|
||||
|
||||
let bytes_read = io::copy(&mut buffer_to_read, &mut destination)
|
||||
.await
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"Failed to upload file to the local storage at '{}'",
|
||||
"Failed to upload file (write temp) to the local storage at '{}'",
|
||||
temp_file_path.display()
|
||||
)
|
||||
})?;
|
||||
|
||||
ensure!(
|
||||
bytes_read == from_size_bytes,
|
||||
"Provided stream has actual size {} fthat is smaller than the given stream size {}",
|
||||
bytes_read,
|
||||
from_size_bytes
|
||||
);
|
||||
|
||||
ensure!(
|
||||
buffer_to_read.read(&mut [0]).await? == 0,
|
||||
"Provided stream has bigger size than the given stream size {}",
|
||||
from_size_bytes
|
||||
);
|
||||
|
||||
destination.flush().await.with_context(|| {
|
||||
format!(
|
||||
"Failed to upload (flush temp) file to the local storage at '{}'",
|
||||
temp_file_path.display()
|
||||
)
|
||||
})?;
|
||||
|
||||
fs::rename(temp_file_path, &target_file_path)
|
||||
.await
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"Failed to upload (rename) file to the local storage at '{}'",
|
||||
target_file_path.display()
|
||||
)
|
||||
})?;
|
||||
destination.flush().await.with_context(|| {
|
||||
format!(
|
||||
"Failed to upload file to the local storage at '{}'",
|
||||
target_file_path.display()
|
||||
|
||||
if let Some(storage_metadata) = metadata {
|
||||
let storage_metadata_path = storage_metadata_path(&target_file_path);
|
||||
fs::write(
|
||||
&storage_metadata_path,
|
||||
serde_json::to_string(&storage_metadata.0)
|
||||
.context("Failed to serialize storage metadata as json")?,
|
||||
)
|
||||
})?;
|
||||
.await
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"Failed to write metadata to the local storage at '{}'",
|
||||
storage_metadata_path.display()
|
||||
)
|
||||
})?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn download(
|
||||
&self,
|
||||
from: &Self::StoragePath,
|
||||
from: &Self::RemoteObjectId,
|
||||
to: &mut (impl io::AsyncWrite + Unpin + Send + Sync),
|
||||
) -> anyhow::Result<()> {
|
||||
) -> anyhow::Result<Option<StorageMetadata>> {
|
||||
let file_path = self.resolve_in_storage(from)?;
|
||||
|
||||
if file_path.exists() && file_path.is_file() {
|
||||
@@ -141,7 +219,8 @@ impl RemoteStorage for LocalFs {
|
||||
)
|
||||
})?;
|
||||
source.flush().await?;
|
||||
Ok(())
|
||||
|
||||
self.read_storage_metadata(&file_path).await
|
||||
} else {
|
||||
bail!(
|
||||
"File '{}' either does not exist or is not a file",
|
||||
@@ -150,13 +229,13 @@ impl RemoteStorage for LocalFs {
|
||||
}
|
||||
}
|
||||
|
||||
async fn download_range(
|
||||
async fn download_byte_range(
|
||||
&self,
|
||||
from: &Self::StoragePath,
|
||||
from: &Self::RemoteObjectId,
|
||||
start_inclusive: u64,
|
||||
end_exclusive: Option<u64>,
|
||||
to: &mut (impl io::AsyncWrite + Unpin + Send + Sync),
|
||||
) -> anyhow::Result<()> {
|
||||
) -> anyhow::Result<Option<StorageMetadata>> {
|
||||
if let Some(end_exclusive) = end_exclusive {
|
||||
ensure!(
|
||||
end_exclusive > start_inclusive,
|
||||
@@ -165,7 +244,7 @@ impl RemoteStorage for LocalFs {
|
||||
end_exclusive
|
||||
);
|
||||
if start_inclusive == end_exclusive.saturating_sub(1) {
|
||||
return Ok(());
|
||||
return Ok(None);
|
||||
}
|
||||
}
|
||||
let file_path = self.resolve_in_storage(from)?;
|
||||
@@ -199,7 +278,8 @@ impl RemoteStorage for LocalFs {
|
||||
file_path.display()
|
||||
)
|
||||
})?;
|
||||
Ok(())
|
||||
|
||||
self.read_storage_metadata(&file_path).await
|
||||
} else {
|
||||
bail!(
|
||||
"File '{}' either does not exist or is not a file",
|
||||
@@ -208,7 +288,7 @@ impl RemoteStorage for LocalFs {
|
||||
}
|
||||
}
|
||||
|
||||
async fn delete(&self, path: &Self::StoragePath) -> anyhow::Result<()> {
|
||||
async fn delete(&self, path: &Self::RemoteObjectId) -> anyhow::Result<()> {
|
||||
let file_path = self.resolve_in_storage(path)?;
|
||||
if file_path.exists() && file_path.is_file() {
|
||||
Ok(fs::remove_file(file_path).await?)
|
||||
@@ -221,6 +301,10 @@ impl RemoteStorage for LocalFs {
|
||||
}
|
||||
}
|
||||
|
||||
fn storage_metadata_path(original_path: &Path) -> PathBuf {
|
||||
path_with_suffix_extension(original_path, "metadata")
|
||||
}
|
||||
|
||||
fn get_all_files<'a, P>(
|
||||
directory_path: P,
|
||||
) -> Pin<Box<dyn Future<Output = anyhow::Result<Vec<PathBuf>>> + Send + Sync + 'a>>
|
||||
@@ -270,29 +354,30 @@ async fn create_target_directory(target_file_path: &Path) -> anyhow::Result<()>
|
||||
|
||||
#[cfg(test)]
|
||||
mod pure_tests {
|
||||
use crate::{
|
||||
layered_repository::metadata::METADATA_FILE_NAME,
|
||||
repository::repo_harness::{RepoHarness, TIMELINE_ID},
|
||||
};
|
||||
use tempfile::tempdir;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn storage_path_positive() -> anyhow::Result<()> {
|
||||
let repo_harness = RepoHarness::create("storage_path_positive")?;
|
||||
let workdir = tempdir()?.path().to_owned();
|
||||
|
||||
let storage_root = PathBuf::from("somewhere").join("else");
|
||||
let storage = LocalFs {
|
||||
pageserver_workdir: &repo_harness.conf.workdir,
|
||||
root: storage_root.clone(),
|
||||
working_directory: workdir.clone(),
|
||||
storage_root: storage_root.clone(),
|
||||
};
|
||||
|
||||
let local_path = repo_harness.timeline_path(&TIMELINE_ID).join("file_name");
|
||||
let expected_path = storage_root.join(local_path.strip_prefix(&repo_harness.conf.workdir)?);
|
||||
let local_path = workdir
|
||||
.join("timelines")
|
||||
.join("some_timeline")
|
||||
.join("file_name");
|
||||
let expected_path = storage_root.join(local_path.strip_prefix(&workdir)?);
|
||||
|
||||
assert_eq!(
|
||||
expected_path,
|
||||
storage.storage_path(&local_path).expect("Matching path should map to storage path normally"),
|
||||
"File paths from pageserver workdir should be stored in local fs storage with the same path they have relative to the workdir"
|
||||
storage.remote_object_id(&local_path).expect("Matching path should map to storage path normally"),
|
||||
"File paths from workdir should be stored in local fs storage with the same path they have relative to the workdir"
|
||||
);
|
||||
|
||||
Ok(())
|
||||
@@ -302,7 +387,7 @@ mod pure_tests {
|
||||
fn storage_path_negatives() -> anyhow::Result<()> {
|
||||
#[track_caller]
|
||||
fn storage_path_error(storage: &LocalFs, mismatching_path: &Path) -> String {
|
||||
match storage.storage_path(mismatching_path) {
|
||||
match storage.remote_object_id(mismatching_path) {
|
||||
Ok(wrong_path) => panic!(
|
||||
"Expected path '{}' to error, but got storage path: {:?}",
|
||||
mismatching_path.display(),
|
||||
@@ -312,16 +397,16 @@ mod pure_tests {
|
||||
}
|
||||
}
|
||||
|
||||
let repo_harness = RepoHarness::create("storage_path_negatives")?;
|
||||
let workdir = tempdir()?.path().to_owned();
|
||||
let storage_root = PathBuf::from("somewhere").join("else");
|
||||
let storage = LocalFs {
|
||||
pageserver_workdir: &repo_harness.conf.workdir,
|
||||
root: storage_root,
|
||||
working_directory: workdir.clone(),
|
||||
storage_root,
|
||||
};
|
||||
|
||||
let error_string = storage_path_error(&storage, &repo_harness.conf.workdir);
|
||||
let error_string = storage_path_error(&storage, &workdir);
|
||||
assert!(error_string.contains("does not belong to this storage"));
|
||||
assert!(error_string.contains(repo_harness.conf.workdir.to_str().unwrap()));
|
||||
assert!(error_string.contains(workdir.to_str().unwrap()));
|
||||
|
||||
let mismatching_path_str = "/something/else";
|
||||
let error_message = storage_path_error(&storage, Path::new(mismatching_path_str));
|
||||
@@ -330,7 +415,7 @@ mod pure_tests {
|
||||
"Error should mention wrong path"
|
||||
);
|
||||
assert!(
|
||||
error_message.contains(repo_harness.conf.workdir.to_str().unwrap()),
|
||||
error_message.contains(workdir.to_str().unwrap()),
|
||||
"Error should mention server workdir"
|
||||
);
|
||||
assert!(error_message.contains("does not belong to this storage"));
|
||||
@@ -340,29 +425,28 @@ mod pure_tests {
|
||||
|
||||
#[test]
|
||||
fn local_path_positive() -> anyhow::Result<()> {
|
||||
let repo_harness = RepoHarness::create("local_path_positive")?;
|
||||
let workdir = tempdir()?.path().to_owned();
|
||||
let storage_root = PathBuf::from("somewhere").join("else");
|
||||
let storage = LocalFs {
|
||||
pageserver_workdir: &repo_harness.conf.workdir,
|
||||
root: storage_root.clone(),
|
||||
working_directory: workdir.clone(),
|
||||
storage_root: storage_root.clone(),
|
||||
};
|
||||
|
||||
let name = "not a metadata";
|
||||
let local_path = repo_harness.timeline_path(&TIMELINE_ID).join(name);
|
||||
let local_path = workdir.join("timelines").join("some_timeline").join(name);
|
||||
assert_eq!(
|
||||
local_path,
|
||||
storage
|
||||
.local_path(
|
||||
&storage_root.join(local_path.strip_prefix(&repo_harness.conf.workdir)?)
|
||||
)
|
||||
.local_path(&storage_root.join(local_path.strip_prefix(&workdir)?))
|
||||
.expect("For a valid input, valid local path should be parsed"),
|
||||
"Should be able to parse metadata out of the correctly named remote delta file"
|
||||
);
|
||||
|
||||
let local_metadata_path = repo_harness
|
||||
.timeline_path(&TIMELINE_ID)
|
||||
.join(METADATA_FILE_NAME);
|
||||
let remote_metadata_path = storage.storage_path(&local_metadata_path)?;
|
||||
let local_metadata_path = workdir
|
||||
.join("timelines")
|
||||
.join("some_timeline")
|
||||
.join("metadata");
|
||||
let remote_metadata_path = storage.remote_object_id(&local_metadata_path)?;
|
||||
assert_eq!(
|
||||
local_metadata_path,
|
||||
storage
|
||||
@@ -388,11 +472,10 @@ mod pure_tests {
|
||||
}
|
||||
}
|
||||
|
||||
let repo_harness = RepoHarness::create("local_path_negatives")?;
|
||||
let storage_root = PathBuf::from("somewhere").join("else");
|
||||
let storage = LocalFs {
|
||||
pageserver_workdir: &repo_harness.conf.workdir,
|
||||
root: storage_root,
|
||||
working_directory: tempdir()?.path().to_owned(),
|
||||
storage_root,
|
||||
};
|
||||
|
||||
let totally_wrong_path = "wrong_wrong_wrong";
|
||||
@@ -404,16 +487,19 @@ mod pure_tests {
|
||||
|
||||
#[test]
|
||||
fn download_destination_matches_original_path() -> anyhow::Result<()> {
|
||||
let repo_harness = RepoHarness::create("download_destination_matches_original_path")?;
|
||||
let original_path = repo_harness.timeline_path(&TIMELINE_ID).join("some name");
|
||||
let workdir = tempdir()?.path().to_owned();
|
||||
let original_path = workdir
|
||||
.join("timelines")
|
||||
.join("some_timeline")
|
||||
.join("some name");
|
||||
|
||||
let storage_root = PathBuf::from("somewhere").join("else");
|
||||
let dummy_storage = LocalFs {
|
||||
pageserver_workdir: &repo_harness.conf.workdir,
|
||||
root: storage_root,
|
||||
working_directory: workdir,
|
||||
storage_root,
|
||||
};
|
||||
|
||||
let storage_path = dummy_storage.storage_path(&original_path)?;
|
||||
let storage_path = dummy_storage.remote_object_id(&original_path)?;
|
||||
let download_destination = dummy_storage.local_path(&storage_path)?;
|
||||
|
||||
assert_eq!(
|
||||
@@ -428,23 +514,22 @@ mod pure_tests {
|
||||
#[cfg(test)]
|
||||
mod fs_tests {
|
||||
use super::*;
|
||||
use crate::repository::repo_harness::{RepoHarness, TIMELINE_ID};
|
||||
|
||||
use std::io::Write;
|
||||
use std::{collections::HashMap, io::Write};
|
||||
use tempfile::tempdir;
|
||||
|
||||
#[tokio::test]
|
||||
async fn upload_file() -> anyhow::Result<()> {
|
||||
let repo_harness = RepoHarness::create("upload_file")?;
|
||||
let workdir = tempdir()?.path().to_owned();
|
||||
let storage = create_storage()?;
|
||||
|
||||
let source = create_file_for_upload(
|
||||
&storage.pageserver_workdir.join("whatever"),
|
||||
let (file, size) = create_file_for_upload(
|
||||
&storage.working_directory.join("whatever"),
|
||||
"whatever_contents",
|
||||
)
|
||||
.await?;
|
||||
let target_path = PathBuf::from("/").join("somewhere").join("else");
|
||||
match storage.upload(source, &target_path).await {
|
||||
match storage.upload(file, size, &target_path, None).await {
|
||||
Ok(()) => panic!("Should not allow storing files with wrong target path"),
|
||||
Err(e) => {
|
||||
let message = format!("{:?}", e);
|
||||
@@ -454,14 +539,14 @@ mod fs_tests {
|
||||
}
|
||||
assert!(storage.list().await?.is_empty());
|
||||
|
||||
let target_path_1 = upload_dummy_file(&repo_harness, &storage, "upload_1").await?;
|
||||
let target_path_1 = upload_dummy_file(&workdir, &storage, "upload_1", None).await?;
|
||||
assert_eq!(
|
||||
storage.list().await?,
|
||||
vec![target_path_1.clone()],
|
||||
"Should list a single file after first upload"
|
||||
);
|
||||
|
||||
let target_path_2 = upload_dummy_file(&repo_harness, &storage, "upload_2").await?;
|
||||
let target_path_2 = upload_dummy_file(&workdir, &storage, "upload_2", None).await?;
|
||||
assert_eq!(
|
||||
list_files_sorted(&storage).await?,
|
||||
vec![target_path_1.clone(), target_path_2.clone()],
|
||||
@@ -472,22 +557,25 @@ mod fs_tests {
|
||||
}
|
||||
|
||||
fn create_storage() -> anyhow::Result<LocalFs> {
|
||||
let pageserver_workdir = Box::leak(Box::new(tempdir()?.path().to_owned()));
|
||||
let storage = LocalFs::new(tempdir()?.path().to_owned(), pageserver_workdir)?;
|
||||
Ok(storage)
|
||||
LocalFs::new(tempdir()?.path().to_owned(), tempdir()?.path().to_owned())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn download_file() -> anyhow::Result<()> {
|
||||
let repo_harness = RepoHarness::create("download_file")?;
|
||||
let workdir = tempdir()?.path().to_owned();
|
||||
|
||||
let storage = create_storage()?;
|
||||
let upload_name = "upload_1";
|
||||
let upload_target = upload_dummy_file(&repo_harness, &storage, upload_name).await?;
|
||||
let upload_target = upload_dummy_file(&workdir, &storage, upload_name, None).await?;
|
||||
|
||||
let mut content_bytes = io::BufWriter::new(std::io::Cursor::new(Vec::new()));
|
||||
storage.download(&upload_target, &mut content_bytes).await?;
|
||||
content_bytes.flush().await?;
|
||||
let metadata = storage.download(&upload_target, &mut content_bytes).await?;
|
||||
assert!(
|
||||
metadata.is_none(),
|
||||
"No metadata should be returned for no metadata upload"
|
||||
);
|
||||
|
||||
content_bytes.flush().await?;
|
||||
let contents = String::from_utf8(content_bytes.into_inner().into_inner())?;
|
||||
assert_eq!(
|
||||
dummy_contents(upload_name),
|
||||
@@ -509,15 +597,20 @@ mod fs_tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn download_file_range_positive() -> anyhow::Result<()> {
|
||||
let repo_harness = RepoHarness::create("download_file_range_positive")?;
|
||||
let workdir = tempdir()?.path().to_owned();
|
||||
|
||||
let storage = create_storage()?;
|
||||
let upload_name = "upload_1";
|
||||
let upload_target = upload_dummy_file(&repo_harness, &storage, upload_name).await?;
|
||||
let upload_target = upload_dummy_file(&workdir, &storage, upload_name, None).await?;
|
||||
|
||||
let mut full_range_bytes = io::BufWriter::new(std::io::Cursor::new(Vec::new()));
|
||||
storage
|
||||
.download_range(&upload_target, 0, None, &mut full_range_bytes)
|
||||
let metadata = storage
|
||||
.download_byte_range(&upload_target, 0, None, &mut full_range_bytes)
|
||||
.await?;
|
||||
assert!(
|
||||
metadata.is_none(),
|
||||
"No metadata should be returned for no metadata upload"
|
||||
);
|
||||
full_range_bytes.flush().await?;
|
||||
assert_eq!(
|
||||
dummy_contents(upload_name),
|
||||
@@ -527,14 +620,18 @@ mod fs_tests {
|
||||
|
||||
let mut zero_range_bytes = io::BufWriter::new(std::io::Cursor::new(Vec::new()));
|
||||
let same_byte = 1_000_000_000;
|
||||
storage
|
||||
.download_range(
|
||||
let metadata = storage
|
||||
.download_byte_range(
|
||||
&upload_target,
|
||||
same_byte,
|
||||
Some(same_byte + 1), // exclusive end
|
||||
&mut zero_range_bytes,
|
||||
)
|
||||
.await?;
|
||||
assert!(
|
||||
metadata.is_none(),
|
||||
"No metadata should be returned for no metadata upload"
|
||||
);
|
||||
zero_range_bytes.flush().await?;
|
||||
assert!(
|
||||
zero_range_bytes.into_inner().into_inner().is_empty(),
|
||||
@@ -545,14 +642,19 @@ mod fs_tests {
|
||||
let (first_part_local, second_part_local) = uploaded_bytes.split_at(3);
|
||||
|
||||
let mut first_part_remote = io::BufWriter::new(std::io::Cursor::new(Vec::new()));
|
||||
storage
|
||||
.download_range(
|
||||
let metadata = storage
|
||||
.download_byte_range(
|
||||
&upload_target,
|
||||
0,
|
||||
Some(first_part_local.len() as u64),
|
||||
&mut first_part_remote,
|
||||
)
|
||||
.await?;
|
||||
assert!(
|
||||
metadata.is_none(),
|
||||
"No metadata should be returned for no metadata upload"
|
||||
);
|
||||
|
||||
first_part_remote.flush().await?;
|
||||
let first_part_remote = first_part_remote.into_inner().into_inner();
|
||||
assert_eq!(
|
||||
@@ -562,14 +664,19 @@ mod fs_tests {
|
||||
);
|
||||
|
||||
let mut second_part_remote = io::BufWriter::new(std::io::Cursor::new(Vec::new()));
|
||||
storage
|
||||
.download_range(
|
||||
let metadata = storage
|
||||
.download_byte_range(
|
||||
&upload_target,
|
||||
first_part_local.len() as u64,
|
||||
Some((first_part_local.len() + second_part_local.len()) as u64),
|
||||
&mut second_part_remote,
|
||||
)
|
||||
.await?;
|
||||
assert!(
|
||||
metadata.is_none(),
|
||||
"No metadata should be returned for no metadata upload"
|
||||
);
|
||||
|
||||
second_part_remote.flush().await?;
|
||||
let second_part_remote = second_part_remote.into_inner().into_inner();
|
||||
assert_eq!(
|
||||
@@ -583,16 +690,17 @@ mod fs_tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn download_file_range_negative() -> anyhow::Result<()> {
|
||||
let repo_harness = RepoHarness::create("download_file_range_negative")?;
|
||||
let workdir = tempdir()?.path().to_owned();
|
||||
|
||||
let storage = create_storage()?;
|
||||
let upload_name = "upload_1";
|
||||
let upload_target = upload_dummy_file(&repo_harness, &storage, upload_name).await?;
|
||||
let upload_target = upload_dummy_file(&workdir, &storage, upload_name, None).await?;
|
||||
|
||||
let start = 10000;
|
||||
let end = 234;
|
||||
assert!(start > end, "Should test an incorrect range");
|
||||
match storage
|
||||
.download_range(&upload_target, start, Some(end), &mut io::sink())
|
||||
.download_byte_range(&upload_target, start, Some(end), &mut io::sink())
|
||||
.await
|
||||
{
|
||||
Ok(_) => panic!("Should not allow downloading wrong ranges"),
|
||||
@@ -606,7 +714,7 @@ mod fs_tests {
|
||||
|
||||
let non_existing_path = PathBuf::from("somewhere").join("else");
|
||||
match storage
|
||||
.download_range(&non_existing_path, 1, Some(3), &mut io::sink())
|
||||
.download_byte_range(&non_existing_path, 1, Some(3), &mut io::sink())
|
||||
.await
|
||||
{
|
||||
Ok(_) => panic!("Should not allow downloading non-existing storage file ranges"),
|
||||
@@ -621,10 +729,11 @@ mod fs_tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn delete_file() -> anyhow::Result<()> {
|
||||
let repo_harness = RepoHarness::create("delete_file")?;
|
||||
let workdir = tempdir()?.path().to_owned();
|
||||
|
||||
let storage = create_storage()?;
|
||||
let upload_name = "upload_1";
|
||||
let upload_target = upload_dummy_file(&repo_harness, &storage, upload_name).await?;
|
||||
let upload_target = upload_dummy_file(&workdir, &storage, upload_name, None).await?;
|
||||
|
||||
storage.delete(&upload_target).await?;
|
||||
assert!(storage.list().await?.is_empty());
|
||||
@@ -640,31 +749,85 @@ mod fs_tests {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn upload_dummy_file(
|
||||
harness: &RepoHarness,
|
||||
storage: &LocalFs,
|
||||
name: &str,
|
||||
) -> anyhow::Result<PathBuf> {
|
||||
let timeline_path = harness.timeline_path(&TIMELINE_ID);
|
||||
let relative_timeline_path = timeline_path.strip_prefix(&harness.conf.workdir)?;
|
||||
let storage_path = storage.root.join(relative_timeline_path).join(name);
|
||||
storage
|
||||
.upload(
|
||||
create_file_for_upload(
|
||||
&storage.pageserver_workdir.join(name),
|
||||
&dummy_contents(name),
|
||||
)
|
||||
.await?,
|
||||
&storage_path,
|
||||
#[tokio::test]
|
||||
async fn file_with_metadata() -> anyhow::Result<()> {
|
||||
let workdir = tempdir()?.path().to_owned();
|
||||
|
||||
let storage = create_storage()?;
|
||||
let upload_name = "upload_1";
|
||||
let metadata = StorageMetadata(HashMap::from([
|
||||
("one".to_string(), "1".to_string()),
|
||||
("two".to_string(), "2".to_string()),
|
||||
]));
|
||||
let upload_target =
|
||||
upload_dummy_file(&workdir, &storage, upload_name, Some(metadata.clone())).await?;
|
||||
|
||||
let mut content_bytes = io::BufWriter::new(std::io::Cursor::new(Vec::new()));
|
||||
let full_download_metadata = storage.download(&upload_target, &mut content_bytes).await?;
|
||||
|
||||
content_bytes.flush().await?;
|
||||
let contents = String::from_utf8(content_bytes.into_inner().into_inner())?;
|
||||
assert_eq!(
|
||||
dummy_contents(upload_name),
|
||||
contents,
|
||||
"We should upload and download the same contents"
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
full_download_metadata.as_ref(),
|
||||
Some(&metadata),
|
||||
"We should get the same metadata back for full download"
|
||||
);
|
||||
|
||||
let uploaded_bytes = dummy_contents(upload_name).into_bytes();
|
||||
let (first_part_local, _) = uploaded_bytes.split_at(3);
|
||||
|
||||
let mut first_part_remote = io::BufWriter::new(std::io::Cursor::new(Vec::new()));
|
||||
let partial_download_metadata = storage
|
||||
.download_byte_range(
|
||||
&upload_target,
|
||||
0,
|
||||
Some(first_part_local.len() as u64),
|
||||
&mut first_part_remote,
|
||||
)
|
||||
.await?;
|
||||
first_part_remote.flush().await?;
|
||||
let first_part_remote = first_part_remote.into_inner().into_inner();
|
||||
assert_eq!(
|
||||
first_part_local,
|
||||
first_part_remote.as_slice(),
|
||||
"First part bytes should be returned when requested"
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
partial_download_metadata.as_ref(),
|
||||
Some(&metadata),
|
||||
"We should get the same metadata back for partial download"
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn upload_dummy_file(
|
||||
workdir: &Path,
|
||||
storage: &LocalFs,
|
||||
name: &str,
|
||||
metadata: Option<StorageMetadata>,
|
||||
) -> anyhow::Result<PathBuf> {
|
||||
let timeline_path = workdir.join("timelines").join("some_timeline");
|
||||
let relative_timeline_path = timeline_path.strip_prefix(&workdir)?;
|
||||
let storage_path = storage.storage_root.join(relative_timeline_path).join(name);
|
||||
|
||||
let from_path = storage.working_directory.join(name);
|
||||
let (file, size) = create_file_for_upload(&from_path, &dummy_contents(name)).await?;
|
||||
storage.upload(file, size, &storage_path, metadata).await?;
|
||||
Ok(storage_path)
|
||||
}
|
||||
|
||||
async fn create_file_for_upload(
|
||||
path: &Path,
|
||||
contents: &str,
|
||||
) -> anyhow::Result<io::BufReader<fs::File>> {
|
||||
) -> anyhow::Result<(io::BufReader<fs::File>, usize)> {
|
||||
std::fs::create_dir_all(path.parent().unwrap())?;
|
||||
let mut file_for_writing = std::fs::OpenOptions::new()
|
||||
.write(true)
|
||||
@@ -672,8 +835,10 @@ mod fs_tests {
|
||||
.open(path)?;
|
||||
write!(file_for_writing, "{}", contents)?;
|
||||
drop(file_for_writing);
|
||||
Ok(io::BufReader::new(
|
||||
fs::OpenOptions::new().read(true).open(&path).await?,
|
||||
let file_size = path.metadata()?.len() as usize;
|
||||
Ok((
|
||||
io::BufReader::new(fs::OpenOptions::new().read(true).open(&path).await?),
|
||||
file_size,
|
||||
))
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user