mirror of
https://github.com/neondatabase/neon.git
synced 2026-01-20 20:02:56 +00:00
Compare commits
226 Commits
layer_map_
...
arthur/sim
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6a00ad3aab | ||
|
|
61e6b24cb2 | ||
|
|
44c7d96ed0 | ||
|
|
10ad3ae4eb | ||
|
|
eb2886b401 | ||
|
|
0dc262a84a | ||
|
|
d801ba7248 | ||
|
|
1effb586ba | ||
|
|
2fd351fd63 | ||
|
|
13e94bf687 | ||
|
|
41b9750e81 | ||
|
|
f8729f046d | ||
|
|
420d3bc18f | ||
|
|
33f7877d1b | ||
|
|
7de94c959a | ||
|
|
731ed3bb64 | ||
|
|
413ce2cfe8 | ||
|
|
7f36028fab | ||
|
|
cb6a8d3fe3 | ||
|
|
095747afc0 | ||
|
|
89bd7ab8a3 | ||
|
|
5034a8cca0 | ||
|
|
55e40d090e | ||
|
|
d87e822169 | ||
|
|
296a0cbac2 | ||
|
|
aed14f52d5 | ||
|
|
909d7fadb8 | ||
|
|
3840d6b18b | ||
|
|
65f92232e6 | ||
|
|
0d4f987fc8 | ||
|
|
aa0763d49d | ||
|
|
7b5123edda | ||
|
|
b6a80bc269 | ||
|
|
ac82b34c64 | ||
|
|
a77fc2c5ff | ||
|
|
9ccbec0e14 | ||
|
|
b55005d2c4 | ||
|
|
6436432a77 | ||
|
|
1b8918e665 | ||
|
|
87c9edac7c | ||
|
|
5e0550a620 | ||
|
|
06f493f525 | ||
|
|
f6b540ebfe | ||
|
|
83f87af02b | ||
|
|
79823c38cd | ||
|
|
072fb3d7e9 | ||
|
|
f2fb9f6be9 | ||
|
|
dd4c8fb568 | ||
|
|
9116c01614 | ||
|
|
17cd96e022 | ||
|
|
9cada8b59d | ||
|
|
66a5159511 | ||
|
|
d1a0a907ff | ||
|
|
1b780fa752 | ||
|
|
38022ff11c | ||
|
|
1b9b9d60d4 | ||
|
|
68141a924d | ||
|
|
764d27f696 | ||
|
|
b23742e09c | ||
|
|
5e514b8465 | ||
|
|
a60f687ce2 | ||
|
|
8dae879994 | ||
|
|
d19c5248c9 | ||
|
|
1360361f60 | ||
|
|
000eb1b069 | ||
|
|
f51b48fa49 | ||
|
|
9f906ff236 | ||
|
|
c79dd8d458 | ||
|
|
ec4ecdd543 | ||
|
|
20a4d817ce | ||
|
|
5ebf7e5619 | ||
|
|
0692fffbf3 | ||
|
|
093570af20 | ||
|
|
eb403da814 | ||
|
|
f3ad635911 | ||
|
|
a8d7360881 | ||
|
|
b0311cfdeb | ||
|
|
412e0aa985 | ||
|
|
965b4f4ae2 | ||
|
|
95018672fa | ||
|
|
2caece2077 | ||
|
|
b8b8c19fb4 | ||
|
|
225add041f | ||
|
|
5d001b1e5a | ||
|
|
fe462de85b | ||
|
|
c0de7f5cd8 | ||
|
|
b220ba6cd1 | ||
|
|
7de373210d | ||
|
|
5c5b03ce08 | ||
|
|
d7d3f451f0 | ||
|
|
bc7d3c6476 | ||
|
|
e3d75879c0 | ||
|
|
485b269674 | ||
|
|
ee1eda9921 | ||
|
|
e363911c85 | ||
|
|
d5d690c044 | ||
|
|
8f557477c6 | ||
|
|
af210c8b42 | ||
|
|
2153d2e00a | ||
|
|
564fa11244 | ||
|
|
8d28a24b26 | ||
|
|
53128d56d9 | ||
|
|
40799d8ae7 | ||
|
|
b242b0ad67 | ||
|
|
d90cd36bcc | ||
|
|
956b6f17ca | ||
|
|
6f9af0aa8c | ||
|
|
8e6b27bf7c | ||
|
|
ae3eff1ad2 | ||
|
|
501702b27c | ||
|
|
526f8b76aa | ||
|
|
a1b062123b | ||
|
|
a4d5c8085b | ||
|
|
edffe0dd9d | ||
|
|
d9c518b2cc | ||
|
|
0d3aefb274 | ||
|
|
6139e8e426 | ||
|
|
d9ba3c5f5e | ||
|
|
0cf7fd0fb8 | ||
|
|
f0b41e7750 | ||
|
|
5082d84f5b | ||
|
|
7991bd3b69 | ||
|
|
ddbdcdddd7 | ||
|
|
7b182e2605 | ||
|
|
1d9d7c02db | ||
|
|
a974602f9f | ||
|
|
a839860c2e | ||
|
|
a5ce2b5330 | ||
|
|
3569c1bacd | ||
|
|
86681b92aa | ||
|
|
eb21d9969d | ||
|
|
e6618f1cc0 | ||
|
|
eaff14da5f | ||
|
|
f383b4d540 | ||
|
|
694150ce40 | ||
|
|
f4359b688c | ||
|
|
948f047f0a | ||
|
|
4175cfbdac | ||
|
|
9657459d80 | ||
|
|
a4256b3250 | ||
|
|
175a577ad4 | ||
|
|
1fdf01e3bc | ||
|
|
446a39e969 | ||
|
|
7ed9eb4a56 | ||
|
|
f07d6433b6 | ||
|
|
2040db98ef | ||
|
|
371493ae32 | ||
|
|
1b9e5e84aa | ||
|
|
7ed93fff06 | ||
|
|
a6dffb6ef9 | ||
|
|
c5c14368e3 | ||
|
|
1254dc7ee2 | ||
|
|
fcb905f519 | ||
|
|
58fa4f0eb7 | ||
|
|
877a2d70e3 | ||
|
|
959f5c6f40 | ||
|
|
678fe0684f | ||
|
|
c9821f13e0 | ||
|
|
121d535068 | ||
|
|
ec3a3aed37 | ||
|
|
87cd2bae77 | ||
|
|
be81db21b9 | ||
|
|
f2d89761c2 | ||
|
|
a0372158a0 | ||
|
|
83048a4adc | ||
|
|
f71b1b174d | ||
|
|
96e78394f5 | ||
|
|
ada933eb42 | ||
|
|
f6a10f4693 | ||
|
|
d25307dced | ||
|
|
2759f1a22e | ||
|
|
f474495ba0 | ||
|
|
bf1c36a30c | ||
|
|
567b71c1d2 | ||
|
|
f3dadfb3d0 | ||
|
|
ea0278cf27 | ||
|
|
f1aece1ba0 | ||
|
|
590695e845 | ||
|
|
9bb6a6c77c | ||
|
|
2309dd5646 | ||
|
|
847fc566fd | ||
|
|
a058bc6de8 | ||
|
|
895f929bce | ||
|
|
a7d8bfa631 | ||
|
|
0806a46c0c | ||
|
|
5e08b35f53 | ||
|
|
82cbcb36ab | ||
|
|
ec0e641578 | ||
|
|
20b38acff0 | ||
|
|
c61bc25ef9 | ||
|
|
7bb13569b3 | ||
|
|
5fc233964a | ||
|
|
5ee77c0b1f | ||
|
|
ddb9c2fe94 | ||
|
|
67d418e91c | ||
|
|
4d291d0e90 | ||
|
|
4718c67c17 | ||
|
|
c5ca7d0c68 | ||
|
|
0ec84e2f1f | ||
|
|
8342e9ea6f | ||
|
|
99399c112a | ||
|
|
2388981311 | ||
|
|
fb721cdfa5 | ||
|
|
bf63f129ae | ||
|
|
2ecd0e1f00 | ||
|
|
b858d70f19 | ||
|
|
0c0e15b81d | ||
|
|
3e94fd5af3 | ||
|
|
006ee5f94a | ||
|
|
4bcbb7793d | ||
|
|
dc64962ffc | ||
|
|
cd5732d9d8 | ||
|
|
0a09589403 | ||
|
|
e3efb0d854 | ||
|
|
4b8dbea5c1 | ||
|
|
0c7276ae13 | ||
|
|
00f1f54b7a | ||
|
|
8963d830fb | ||
|
|
01b4b0c2f3 | ||
|
|
dee71404a2 | ||
|
|
572332ab50 | ||
|
|
5223b62a19 | ||
|
|
bc4f594ed6 | ||
|
|
ea6f41324a | ||
|
|
3d5faa0295 | ||
|
|
9fbef1159f |
@@ -14,3 +14,6 @@ opt-level = 1
|
||||
|
||||
[alias]
|
||||
build_testing = ["build", "--features", "testing"]
|
||||
|
||||
[build]
|
||||
rustflags = ["-C", "default-linker-libraries"]
|
||||
|
||||
@@ -15,8 +15,10 @@
|
||||
!proxy/
|
||||
!safekeeper/
|
||||
!storage_broker/
|
||||
!trace/
|
||||
!vendor/postgres-v14/
|
||||
!vendor/postgres-v15/
|
||||
!workspace_hack/
|
||||
!neon_local/
|
||||
!scripts/ninstall.sh
|
||||
!vm-cgconfig.conf
|
||||
|
||||
@@ -123,8 +123,8 @@ runs:
|
||||
exit 1
|
||||
fi
|
||||
if [[ "${{ inputs.run_in_parallel }}" == "true" ]]; then
|
||||
# -n4 uses four processes to run tests via pytest-xdist
|
||||
EXTRA_PARAMS="-n4 $EXTRA_PARAMS"
|
||||
# -n16 uses sixteen processes to run tests via pytest-xdist
|
||||
EXTRA_PARAMS="-n16 $EXTRA_PARAMS"
|
||||
|
||||
# --dist=loadgroup points tests marked with @pytest.mark.xdist_group
|
||||
# to the same worker to make @pytest.mark.order work with xdist
|
||||
|
||||
@@ -2,11 +2,11 @@ storage:
|
||||
vars:
|
||||
bucket_name: neon-prod-storage-ap-southeast-1
|
||||
bucket_region: ap-southeast-1
|
||||
console_mgmt_base_url: http://console-release.local
|
||||
console_mgmt_base_url: http://neon-internal-api.aws.neon.tech
|
||||
broker_endpoint: http://storage-broker-lb.epsilon.ap-southeast-1.internal.aws.neon.tech:50051
|
||||
pageserver_config_stub:
|
||||
pg_distrib_dir: /usr/local
|
||||
metric_collection_endpoint: http://console-release.local/billing/api/v1/usage_events
|
||||
metric_collection_endpoint: http://neon-internal-api.aws.neon.tech/billing/api/v1/usage_events
|
||||
metric_collection_interval: 10min
|
||||
remote_storage:
|
||||
bucket_name: "{{ bucket_name }}"
|
||||
@@ -32,7 +32,7 @@ storage:
|
||||
hosts:
|
||||
safekeeper-0.ap-southeast-1.aws.neon.tech:
|
||||
ansible_host: i-0d6f1dc5161eef894
|
||||
safekeeper-1.ap-southeast-1.aws.neon.tech:
|
||||
ansible_host: i-0e338adda8eb2d19f
|
||||
safekeeper-2.ap-southeast-1.aws.neon.tech:
|
||||
ansible_host: i-04fb63634e4679eb9
|
||||
safekeeper-3.ap-southeast-1.aws.neon.tech:
|
||||
ansible_host: i-05481f3bc88cfc2d4
|
||||
|
||||
4
.github/ansible/prod.eu-central-1.hosts.yaml
vendored
4
.github/ansible/prod.eu-central-1.hosts.yaml
vendored
@@ -2,11 +2,11 @@ storage:
|
||||
vars:
|
||||
bucket_name: neon-prod-storage-eu-central-1
|
||||
bucket_region: eu-central-1
|
||||
console_mgmt_base_url: http://console-release.local
|
||||
console_mgmt_base_url: http://neon-internal-api.aws.neon.tech
|
||||
broker_endpoint: http://storage-broker-lb.gamma.eu-central-1.internal.aws.neon.tech:50051
|
||||
pageserver_config_stub:
|
||||
pg_distrib_dir: /usr/local
|
||||
metric_collection_endpoint: http://console-release.local/billing/api/v1/usage_events
|
||||
metric_collection_endpoint: http://neon-internal-api.aws.neon.tech/billing/api/v1/usage_events
|
||||
metric_collection_interval: 10min
|
||||
remote_storage:
|
||||
bucket_name: "{{ bucket_name }}"
|
||||
|
||||
6
.github/ansible/prod.us-east-2.hosts.yaml
vendored
6
.github/ansible/prod.us-east-2.hosts.yaml
vendored
@@ -2,11 +2,11 @@ storage:
|
||||
vars:
|
||||
bucket_name: neon-prod-storage-us-east-2
|
||||
bucket_region: us-east-2
|
||||
console_mgmt_base_url: http://console-release.local
|
||||
console_mgmt_base_url: http://neon-internal-api.aws.neon.tech
|
||||
broker_endpoint: http://storage-broker-lb.delta.us-east-2.internal.aws.neon.tech:50051
|
||||
pageserver_config_stub:
|
||||
pg_distrib_dir: /usr/local
|
||||
metric_collection_endpoint: http://console-release.local/billing/api/v1/usage_events
|
||||
metric_collection_endpoint: http://neon-internal-api.aws.neon.tech/billing/api/v1/usage_events
|
||||
metric_collection_interval: 10min
|
||||
remote_storage:
|
||||
bucket_name: "{{ bucket_name }}"
|
||||
@@ -27,6 +27,8 @@ storage:
|
||||
ansible_host: i-062227ba7f119eb8c
|
||||
pageserver-1.us-east-2.aws.neon.tech:
|
||||
ansible_host: i-0b3ec0afab5968938
|
||||
pageserver-2.us-east-2.aws.neon.tech:
|
||||
ansible_host: i-0d7a1c4325e71421d
|
||||
|
||||
safekeepers:
|
||||
hosts:
|
||||
|
||||
6
.github/ansible/prod.us-west-2.hosts.yaml
vendored
6
.github/ansible/prod.us-west-2.hosts.yaml
vendored
@@ -2,11 +2,11 @@ storage:
|
||||
vars:
|
||||
bucket_name: neon-prod-storage-us-west-2
|
||||
bucket_region: us-west-2
|
||||
console_mgmt_base_url: http://console-release.local
|
||||
console_mgmt_base_url: http://neon-internal-api.aws.neon.tech
|
||||
broker_endpoint: http://storage-broker-lb.eta.us-west-2.internal.aws.neon.tech:50051
|
||||
pageserver_config_stub:
|
||||
pg_distrib_dir: /usr/local
|
||||
metric_collection_endpoint: http://console-release.local/billing/api/v1/usage_events
|
||||
metric_collection_endpoint: http://neon-internal-api.aws.neon.tech/billing/api/v1/usage_events
|
||||
metric_collection_interval: 10min
|
||||
remote_storage:
|
||||
bucket_name: "{{ bucket_name }}"
|
||||
@@ -29,6 +29,8 @@ storage:
|
||||
ansible_host: i-0c834be1dddba8b3f
|
||||
pageserver-2.us-west-2.aws.neon.tech:
|
||||
ansible_host: i-051642d372c0a4f32
|
||||
pageserver-3.us-west-2.aws.neon.tech:
|
||||
ansible_host: i-00c3844beb9ad1c6b
|
||||
|
||||
safekeepers:
|
||||
hosts:
|
||||
|
||||
40
.github/ansible/production.hosts.yaml
vendored
40
.github/ansible/production.hosts.yaml
vendored
@@ -1,40 +0,0 @@
|
||||
---
|
||||
storage:
|
||||
vars:
|
||||
console_mgmt_base_url: http://console-release.local
|
||||
bucket_name: zenith-storage-oregon
|
||||
bucket_region: us-west-2
|
||||
broker_endpoint: http://storage-broker.prod.local:50051
|
||||
pageserver_config_stub:
|
||||
pg_distrib_dir: /usr/local
|
||||
metric_collection_endpoint: http://console-release.local/billing/api/v1/usage_events
|
||||
metric_collection_interval: 10min
|
||||
remote_storage:
|
||||
bucket_name: "{{ bucket_name }}"
|
||||
bucket_region: "{{ bucket_region }}"
|
||||
prefix_in_bucket: "{{ inventory_hostname }}"
|
||||
safekeeper_s3_prefix: prod-1/wal
|
||||
hostname_suffix: ".local"
|
||||
remote_user: admin
|
||||
sentry_environment: production
|
||||
|
||||
children:
|
||||
pageservers:
|
||||
hosts:
|
||||
zenith-1-ps-2:
|
||||
console_region_id: aws-us-west-2
|
||||
zenith-1-ps-3:
|
||||
console_region_id: aws-us-west-2
|
||||
zenith-1-ps-4:
|
||||
console_region_id: aws-us-west-2
|
||||
zenith-1-ps-5:
|
||||
console_region_id: aws-us-west-2
|
||||
|
||||
safekeepers:
|
||||
hosts:
|
||||
zenith-1-sk-1:
|
||||
console_region_id: aws-us-west-2
|
||||
zenith-1-sk-2:
|
||||
console_region_id: aws-us-west-2
|
||||
zenith-1-sk-4:
|
||||
console_region_id: aws-us-west-2
|
||||
9
.github/ansible/staging.eu-west-1.hosts.yaml
vendored
9
.github/ansible/staging.eu-west-1.hosts.yaml
vendored
@@ -2,12 +2,17 @@ storage:
|
||||
vars:
|
||||
bucket_name: neon-dev-storage-eu-west-1
|
||||
bucket_region: eu-west-1
|
||||
console_mgmt_base_url: http://console-staging.local
|
||||
console_mgmt_base_url: http://neon-internal-api.aws.neon.build
|
||||
broker_endpoint: http://storage-broker-lb.zeta.eu-west-1.internal.aws.neon.build:50051
|
||||
pageserver_config_stub:
|
||||
pg_distrib_dir: /usr/local
|
||||
metric_collection_endpoint: http://console-staging.local/billing/api/v1/usage_events
|
||||
metric_collection_endpoint: http://neon-internal-api.aws.neon.build/billing/api/v1/usage_events
|
||||
metric_collection_interval: 10min
|
||||
tenant_config:
|
||||
eviction_policy:
|
||||
kind: "LayerAccessThreshold"
|
||||
period: "20m"
|
||||
threshold: "20m"
|
||||
remote_storage:
|
||||
bucket_name: "{{ bucket_name }}"
|
||||
bucket_region: "{{ bucket_region }}"
|
||||
|
||||
13
.github/ansible/staging.us-east-2.hosts.yaml
vendored
13
.github/ansible/staging.us-east-2.hosts.yaml
vendored
@@ -2,12 +2,17 @@ storage:
|
||||
vars:
|
||||
bucket_name: neon-staging-storage-us-east-2
|
||||
bucket_region: us-east-2
|
||||
console_mgmt_base_url: http://console-staging.local
|
||||
console_mgmt_base_url: http://neon-internal-api.aws.neon.build
|
||||
broker_endpoint: http://storage-broker-lb.beta.us-east-2.internal.aws.neon.build:50051
|
||||
pageserver_config_stub:
|
||||
pg_distrib_dir: /usr/local
|
||||
metric_collection_endpoint: http://console-staging.local/billing/api/v1/usage_events
|
||||
metric_collection_endpoint: http://neon-internal-api.aws.neon.build/billing/api/v1/usage_events
|
||||
metric_collection_interval: 10min
|
||||
tenant_config:
|
||||
eviction_policy:
|
||||
kind: "LayerAccessThreshold"
|
||||
period: "20m"
|
||||
threshold: "20m"
|
||||
remote_storage:
|
||||
bucket_name: "{{ bucket_name }}"
|
||||
bucket_region: "{{ bucket_region }}"
|
||||
@@ -31,6 +36,8 @@ storage:
|
||||
ansible_host: i-01e31cdf7e970586a
|
||||
pageserver-3.us-east-2.aws.neon.build:
|
||||
ansible_host: i-0602a0291365ef7cc
|
||||
pageserver-99.us-east-2.aws.neon.build:
|
||||
ansible_host: i-0c39491109bb88824
|
||||
|
||||
safekeepers:
|
||||
hosts:
|
||||
@@ -40,3 +47,5 @@ storage:
|
||||
ansible_host: i-0171efc3604a7b907
|
||||
safekeeper-2.us-east-2.aws.neon.build:
|
||||
ansible_host: i-0de0b03a51676a6ce
|
||||
safekeeper-99.us-east-2.aws.neon.build:
|
||||
ansible_host: i-0d61b6a2ea32028d5
|
||||
|
||||
@@ -1,16 +1,31 @@
|
||||
# Helm chart values for neon-proxy-scram.
|
||||
# This is a YAML-formatted file.
|
||||
|
||||
deploymentStrategy:
|
||||
type: RollingUpdate
|
||||
rollingUpdate:
|
||||
maxSurge: 100%
|
||||
maxUnavailable: 50%
|
||||
|
||||
# Delay the kill signal by 7 days (7 * 24 * 60 * 60)
|
||||
# The pod(s) will stay in Terminating, keeps the existing connections
|
||||
# but doesn't receive new ones
|
||||
containerLifecycle:
|
||||
preStop:
|
||||
exec:
|
||||
command: ["/bin/sh", "-c", "sleep 604800"]
|
||||
terminationGracePeriodSeconds: 604800
|
||||
|
||||
image:
|
||||
repository: neondatabase/neon
|
||||
|
||||
settings:
|
||||
authBackend: "console"
|
||||
authEndpoint: "http://console-staging.local/management/api/v2"
|
||||
authEndpoint: "http://neon-internal-api.aws.neon.build/management/api/v2"
|
||||
domain: "*.eu-west-1.aws.neon.build"
|
||||
sentryEnvironment: "staging"
|
||||
wssPort: 8443
|
||||
metricCollectionEndpoint: "http://console-staging.local/billing/api/v1/usage_events"
|
||||
metricCollectionEndpoint: "http://neon-internal-api.aws.neon.build/billing/api/v1/usage_events"
|
||||
metricCollectionInterval: "1min"
|
||||
|
||||
# -- Additional labels for neon-proxy pods
|
||||
|
||||
@@ -8,8 +8,9 @@ settings:
|
||||
authBackend: "link"
|
||||
authEndpoint: "https://console.stage.neon.tech/authenticate_proxy_request/"
|
||||
uri: "https://console.stage.neon.tech/psql_session/"
|
||||
domain: "pg.neon.build"
|
||||
sentryEnvironment: "staging"
|
||||
metricCollectionEndpoint: "http://console-staging.local/billing/api/v1/usage_events"
|
||||
metricCollectionEndpoint: "http://neon-internal-api.aws.neon.build/billing/api/v1/usage_events"
|
||||
metricCollectionInterval: "1min"
|
||||
|
||||
# -- Additional labels for neon-proxy-link pods
|
||||
|
||||
@@ -6,11 +6,11 @@ image:
|
||||
|
||||
settings:
|
||||
authBackend: "console"
|
||||
authEndpoint: "http://console-staging.local/management/api/v2"
|
||||
authEndpoint: "http://neon-internal-api.aws.neon.build/management/api/v2"
|
||||
domain: "*.cloud.stage.neon.tech"
|
||||
sentryEnvironment: "staging"
|
||||
wssPort: 8443
|
||||
metricCollectionEndpoint: "http://console-staging.local/billing/api/v1/usage_events"
|
||||
metricCollectionEndpoint: "http://neon-internal-api.aws.neon.build/billing/api/v1/usage_events"
|
||||
metricCollectionInterval: "1min"
|
||||
|
||||
# -- Additional labels for neon-proxy pods
|
||||
|
||||
@@ -1,16 +1,31 @@
|
||||
# Helm chart values for neon-proxy-scram.
|
||||
# This is a YAML-formatted file.
|
||||
|
||||
deploymentStrategy:
|
||||
type: RollingUpdate
|
||||
rollingUpdate:
|
||||
maxSurge: 100%
|
||||
maxUnavailable: 50%
|
||||
|
||||
# Delay the kill signal by 7 days (7 * 24 * 60 * 60)
|
||||
# The pod(s) will stay in Terminating, keeps the existing connections
|
||||
# but doesn't receive new ones
|
||||
containerLifecycle:
|
||||
preStop:
|
||||
exec:
|
||||
command: ["/bin/sh", "-c", "sleep 604800"]
|
||||
terminationGracePeriodSeconds: 604800
|
||||
|
||||
image:
|
||||
repository: neondatabase/neon
|
||||
|
||||
settings:
|
||||
authBackend: "console"
|
||||
authEndpoint: "http://console-staging.local/management/api/v2"
|
||||
authEndpoint: "http://neon-internal-api.aws.neon.build/management/api/v2"
|
||||
domain: "*.us-east-2.aws.neon.build"
|
||||
sentryEnvironment: "staging"
|
||||
wssPort: 8443
|
||||
metricCollectionEndpoint: "http://console-staging.local/billing/api/v1/usage_events"
|
||||
metricCollectionEndpoint: "http://neon-internal-api.aws.neon.build/billing/api/v1/usage_events"
|
||||
metricCollectionInterval: "1min"
|
||||
|
||||
# -- Additional labels for neon-proxy pods
|
||||
|
||||
@@ -1,16 +1,32 @@
|
||||
# Helm chart values for neon-proxy-scram.
|
||||
# This is a YAML-formatted file.
|
||||
|
||||
deploymentStrategy:
|
||||
type: RollingUpdate
|
||||
rollingUpdate:
|
||||
maxSurge: 100%
|
||||
maxUnavailable: 50%
|
||||
|
||||
# Delay the kill signal by 7 days (7 * 24 * 60 * 60)
|
||||
# The pod(s) will stay in Terminating, keeps the existing connections
|
||||
# but doesn't receive new ones
|
||||
containerLifecycle:
|
||||
preStop:
|
||||
exec:
|
||||
command: ["/bin/sh", "-c", "sleep 604800"]
|
||||
terminationGracePeriodSeconds: 604800
|
||||
|
||||
|
||||
image:
|
||||
repository: neondatabase/neon
|
||||
|
||||
settings:
|
||||
authBackend: "console"
|
||||
authEndpoint: "http://console-release.local/management/api/v2"
|
||||
authEndpoint: "http://neon-internal-api.aws.neon.tech/management/api/v2"
|
||||
domain: "*.ap-southeast-1.aws.neon.tech"
|
||||
sentryEnvironment: "production"
|
||||
wssPort: 8443
|
||||
metricCollectionEndpoint: "http://console-release.local/billing/api/v1/usage_events"
|
||||
metricCollectionEndpoint: "http://neon-internal-api.aws.neon.tech/billing/api/v1/usage_events"
|
||||
metricCollectionInterval: "10min"
|
||||
|
||||
# -- Additional labels for neon-proxy pods
|
||||
|
||||
@@ -1,16 +1,32 @@
|
||||
# Helm chart values for neon-proxy-scram.
|
||||
# This is a YAML-formatted file.
|
||||
|
||||
deploymentStrategy:
|
||||
type: RollingUpdate
|
||||
rollingUpdate:
|
||||
maxSurge: 100%
|
||||
maxUnavailable: 50%
|
||||
|
||||
# Delay the kill signal by 7 days (7 * 24 * 60 * 60)
|
||||
# The pod(s) will stay in Terminating, keeps the existing connections
|
||||
# but doesn't receive new ones
|
||||
containerLifecycle:
|
||||
preStop:
|
||||
exec:
|
||||
command: ["/bin/sh", "-c", "sleep 604800"]
|
||||
terminationGracePeriodSeconds: 604800
|
||||
|
||||
|
||||
image:
|
||||
repository: neondatabase/neon
|
||||
|
||||
settings:
|
||||
authBackend: "console"
|
||||
authEndpoint: "http://console-release.local/management/api/v2"
|
||||
authEndpoint: "http://neon-internal-api.aws.neon.tech/management/api/v2"
|
||||
domain: "*.eu-central-1.aws.neon.tech"
|
||||
sentryEnvironment: "production"
|
||||
wssPort: 8443
|
||||
metricCollectionEndpoint: "http://console-release.local/billing/api/v1/usage_events"
|
||||
metricCollectionEndpoint: "http://neon-internal-api.aws.neon.tech/billing/api/v1/usage_events"
|
||||
metricCollectionInterval: "10min"
|
||||
|
||||
# -- Additional labels for neon-proxy pods
|
||||
|
||||
@@ -1,37 +1,37 @@
|
||||
# Helm chart values for neon-proxy-link.
|
||||
# This is a YAML-formatted file.
|
||||
|
||||
image:
|
||||
repository: neondatabase/neon
|
||||
|
||||
settings:
|
||||
authBackend: "link"
|
||||
authEndpoint: "https://console.neon.tech/authenticate_proxy_request/"
|
||||
uri: "https://console.neon.tech/psql_session/"
|
||||
domain: "pg.neon.tech"
|
||||
sentryEnvironment: "production"
|
||||
|
||||
# -- Additional labels for zenith-proxy pods
|
||||
podLabels:
|
||||
zenith_service: proxy
|
||||
zenith_env: production
|
||||
zenith_region: us-west-2
|
||||
zenith_region_slug: oregon
|
||||
zenith_region: us-east-2
|
||||
zenith_region_slug: us-east-2
|
||||
|
||||
service:
|
||||
type: LoadBalancer
|
||||
annotations:
|
||||
service.beta.kubernetes.io/aws-load-balancer-type: external
|
||||
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
||||
service.beta.kubernetes.io/aws-load-balancer-scheme: internal
|
||||
external-dns.alpha.kubernetes.io/hostname: proxy-release.local
|
||||
type: LoadBalancer
|
||||
external-dns.alpha.kubernetes.io/hostname: neon-proxy-link-mgmt.delta.us-east-2.aws.neon.tech
|
||||
|
||||
exposedService:
|
||||
annotations:
|
||||
service.beta.kubernetes.io/aws-load-balancer-type: external
|
||||
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
||||
service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing
|
||||
external-dns.alpha.kubernetes.io/hostname: connect.neon.tech,pg.neon.tech
|
||||
|
||||
metrics:
|
||||
enabled: true
|
||||
serviceMonitor:
|
||||
enabled: true
|
||||
selector:
|
||||
release: kube-prometheus-stack
|
||||
external-dns.alpha.kubernetes.io/hostname: neon-proxy-link.delta.us-east-2.aws.neon.tech
|
||||
|
||||
extraManifests:
|
||||
- apiVersion: operator.victoriametrics.com/v1beta1
|
||||
@@ -1,16 +1,32 @@
|
||||
# Helm chart values for neon-proxy-scram.
|
||||
# This is a YAML-formatted file.
|
||||
|
||||
deploymentStrategy:
|
||||
type: RollingUpdate
|
||||
rollingUpdate:
|
||||
maxSurge: 100%
|
||||
maxUnavailable: 50%
|
||||
|
||||
# Delay the kill signal by 7 days (7 * 24 * 60 * 60)
|
||||
# The pod(s) will stay in Terminating, keeps the existing connections
|
||||
# but doesn't receive new ones
|
||||
containerLifecycle:
|
||||
preStop:
|
||||
exec:
|
||||
command: ["/bin/sh", "-c", "sleep 604800"]
|
||||
terminationGracePeriodSeconds: 604800
|
||||
|
||||
|
||||
image:
|
||||
repository: neondatabase/neon
|
||||
|
||||
settings:
|
||||
authBackend: "console"
|
||||
authEndpoint: "http://console-release.local/management/api/v2"
|
||||
authEndpoint: "http://neon-internal-api.aws.neon.tech/management/api/v2"
|
||||
domain: "*.us-east-2.aws.neon.tech"
|
||||
sentryEnvironment: "production"
|
||||
wssPort: 8443
|
||||
metricCollectionEndpoint: "http://console-release.local/billing/api/v1/usage_events"
|
||||
metricCollectionEndpoint: "http://neon-internal-api.aws.neon.tech/billing/api/v1/usage_events"
|
||||
metricCollectionInterval: "10min"
|
||||
|
||||
# -- Additional labels for neon-proxy pods
|
||||
|
||||
@@ -1,32 +1,39 @@
|
||||
# Helm chart values for neon-proxy-scram.
|
||||
# This is a YAML-formatted file.
|
||||
|
||||
image:
|
||||
repository: neondatabase/neon
|
||||
|
||||
settings:
|
||||
authBackend: "console"
|
||||
authEndpoint: "http://console-release.local/management/api/v2"
|
||||
authEndpoint: "http://neon-internal-api.aws.neon.tech/management/api/v2"
|
||||
domain: "*.cloud.neon.tech"
|
||||
sentryEnvironment: "production"
|
||||
wssPort: 8443
|
||||
metricCollectionEndpoint: "http://console-release.local/billing/api/v1/usage_events"
|
||||
metricCollectionEndpoint: "http://neon-internal-api.aws.neon.tech/billing/api/v1/usage_events"
|
||||
metricCollectionInterval: "10min"
|
||||
|
||||
# -- Additional labels for neon-proxy pods
|
||||
podLabels:
|
||||
zenith_service: proxy-scram
|
||||
zenith_env: production
|
||||
zenith_env: prod
|
||||
zenith_region: us-west-2
|
||||
zenith_region_slug: oregon
|
||||
zenith_region_slug: us-west-2
|
||||
|
||||
exposedService:
|
||||
annotations:
|
||||
service.beta.kubernetes.io/aws-load-balancer-type: external
|
||||
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
||||
service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing
|
||||
external-dns.alpha.kubernetes.io/hostname: '*.cloud.neon.tech'
|
||||
external-dns.alpha.kubernetes.io/hostname: neon-proxy-scram-legacy.eta.us-west-2.aws.neon.tech
|
||||
httpsPort: 443
|
||||
|
||||
metrics:
|
||||
enabled: true
|
||||
serviceMonitor:
|
||||
enabled: true
|
||||
selector:
|
||||
release: kube-prometheus-stack
|
||||
#metrics:
|
||||
# enabled: true
|
||||
# serviceMonitor:
|
||||
# enabled: true
|
||||
# selector:
|
||||
# release: kube-prometheus-stack
|
||||
|
||||
extraManifests:
|
||||
- apiVersion: operator.victoriametrics.com/v1beta1
|
||||
@@ -1,16 +1,32 @@
|
||||
# Helm chart values for neon-proxy-scram.
|
||||
# This is a YAML-formatted file.
|
||||
|
||||
deploymentStrategy:
|
||||
type: RollingUpdate
|
||||
rollingUpdate:
|
||||
maxSurge: 100%
|
||||
maxUnavailable: 50%
|
||||
|
||||
# Delay the kill signal by 7 days (7 * 24 * 60 * 60)
|
||||
# The pod(s) will stay in Terminating, keeps the existing connections
|
||||
# but doesn't receive new ones
|
||||
containerLifecycle:
|
||||
preStop:
|
||||
exec:
|
||||
command: ["/bin/sh", "-c", "sleep 604800"]
|
||||
terminationGracePeriodSeconds: 604800
|
||||
|
||||
|
||||
image:
|
||||
repository: neondatabase/neon
|
||||
|
||||
settings:
|
||||
authBackend: "console"
|
||||
authEndpoint: "http://console-release.local/management/api/v2"
|
||||
authEndpoint: "http://neon-internal-api.aws.neon.tech/management/api/v2"
|
||||
domain: "*.us-west-2.aws.neon.tech"
|
||||
sentryEnvironment: "production"
|
||||
wssPort: 8443
|
||||
metricCollectionEndpoint: "http://console-release.local/billing/api/v1/usage_events"
|
||||
metricCollectionEndpoint: "http://neon-internal-api.aws.neon.tech/billing/api/v1/usage_events"
|
||||
metricCollectionInterval: "10min"
|
||||
|
||||
# -- Additional labels for neon-proxy pods
|
||||
|
||||
@@ -1,56 +0,0 @@
|
||||
# Helm chart values for neon-storage-broker
|
||||
podLabels:
|
||||
neon_env: production
|
||||
neon_service: storage-broker
|
||||
|
||||
# Use L4 LB
|
||||
service:
|
||||
# service.annotations -- Annotations to add to the service
|
||||
annotations:
|
||||
service.beta.kubernetes.io/aws-load-balancer-type: external # use newer AWS Load Balancer Controller
|
||||
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
||||
service.beta.kubernetes.io/aws-load-balancer-scheme: internal # deploy LB to private subnet
|
||||
# assign service to this name at external-dns
|
||||
external-dns.alpha.kubernetes.io/hostname: storage-broker.prod.local
|
||||
# service.type -- Service type
|
||||
type: LoadBalancer
|
||||
# service.port -- broker listen port
|
||||
port: 50051
|
||||
|
||||
ingress:
|
||||
enabled: false
|
||||
|
||||
metrics:
|
||||
enabled: true
|
||||
serviceMonitor:
|
||||
enabled: true
|
||||
selector:
|
||||
release: kube-prometheus-stack
|
||||
|
||||
extraManifests:
|
||||
- apiVersion: operator.victoriametrics.com/v1beta1
|
||||
kind: VMServiceScrape
|
||||
metadata:
|
||||
name: "{{ include \"neon-storage-broker.fullname\" . }}"
|
||||
labels:
|
||||
helm.sh/chart: neon-storage-broker-{{ .Chart.Version }}
|
||||
app.kubernetes.io/name: neon-storage-broker
|
||||
app.kubernetes.io/instance: neon-storage-broker
|
||||
app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
namespace: "{{ .Release.Namespace }}"
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: "neon-storage-broker"
|
||||
endpoints:
|
||||
- port: broker
|
||||
path: /metrics
|
||||
interval: 10s
|
||||
scrapeTimeout: 10s
|
||||
namespaceSelector:
|
||||
matchNames:
|
||||
- "{{ .Release.Namespace }}"
|
||||
|
||||
settings:
|
||||
sentryEnvironment: "production"
|
||||
599
.github/workflows/build_and_test.yml
vendored
599
.github/workflows/build_and_test.yml
vendored
@@ -1,4 +1,4 @@
|
||||
name: Test and Deploy
|
||||
name: Build and Test
|
||||
|
||||
on:
|
||||
push:
|
||||
@@ -19,10 +19,12 @@ concurrency:
|
||||
env:
|
||||
RUST_BACKTRACE: 1
|
||||
COPT: '-Werror'
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_DEV }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_KEY_DEV }}
|
||||
|
||||
jobs:
|
||||
tag:
|
||||
runs-on: [ self-hosted, dev, x64 ]
|
||||
runs-on: [ self-hosted, gen3, small ]
|
||||
container: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/base:pinned
|
||||
outputs:
|
||||
build-tag: ${{steps.build-tag.outputs.tag}}
|
||||
@@ -50,9 +52,9 @@ jobs:
|
||||
id: build-tag
|
||||
|
||||
check-codestyle-python:
|
||||
runs-on: [ self-hosted, dev, x64 ]
|
||||
runs-on: [ self-hosted, gen3, small ]
|
||||
container:
|
||||
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/cloud:pinned
|
||||
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rust:pinned
|
||||
options: --init
|
||||
|
||||
steps:
|
||||
@@ -85,7 +87,7 @@ jobs:
|
||||
run: poetry run mypy .
|
||||
|
||||
check-codestyle-rust:
|
||||
runs-on: [ self-hosted, dev, x64 ]
|
||||
runs-on: [ self-hosted, gen3, large ]
|
||||
container:
|
||||
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rust:pinned
|
||||
options: --init
|
||||
@@ -97,16 +99,16 @@ jobs:
|
||||
submodules: true
|
||||
fetch-depth: 1
|
||||
|
||||
- name: Restore cargo deps cache
|
||||
id: cache_cargo
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/registry/
|
||||
!~/.cargo/registry/src
|
||||
~/.cargo/git/
|
||||
target/
|
||||
key: v1-${{ runner.os }}-cargo-clippy-${{ hashFiles('rust-toolchain.toml') }}-${{ hashFiles('Cargo.lock') }}
|
||||
# Disabled for now
|
||||
# - name: Restore cargo deps cache
|
||||
# id: cache_cargo
|
||||
# uses: actions/cache@v3
|
||||
# with:
|
||||
# path: |
|
||||
# !~/.cargo/registry/src
|
||||
# ~/.cargo/git/
|
||||
# target/
|
||||
# key: v1-${{ runner.os }}-cargo-clippy-${{ hashFiles('rust-toolchain.toml') }}-${{ hashFiles('Cargo.lock') }}
|
||||
|
||||
# Some of our rust modules use FFI and need those to be checked
|
||||
- name: Get postgres headers
|
||||
@@ -133,7 +135,7 @@ jobs:
|
||||
run: cargo deny check
|
||||
|
||||
build-neon:
|
||||
runs-on: [ self-hosted, dev, x64 ]
|
||||
runs-on: [ self-hosted, gen3, large ]
|
||||
container:
|
||||
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rust:pinned
|
||||
options: --init
|
||||
@@ -141,7 +143,6 @@ jobs:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
build_type: [ debug, release ]
|
||||
|
||||
env:
|
||||
BUILD_TYPE: ${{ matrix.build_type }}
|
||||
GIT_VERSION: ${{ github.sha }}
|
||||
@@ -194,24 +195,26 @@ jobs:
|
||||
echo "cov_prefix=${cov_prefix}" >> $GITHUB_ENV
|
||||
echo "CARGO_FEATURES=${CARGO_FEATURES}" >> $GITHUB_ENV
|
||||
echo "CARGO_FLAGS=${CARGO_FLAGS}" >> $GITHUB_ENV
|
||||
echo "CARGO_HOME=${GITHUB_WORKSPACE}/.cargo" >> $GITHUB_ENV
|
||||
|
||||
# Disabled for now
|
||||
# Don't include the ~/.cargo/registry/src directory. It contains just
|
||||
# uncompressed versions of the crates in ~/.cargo/registry/cache
|
||||
# directory, and it's faster to let 'cargo' to rebuild it from the
|
||||
# compressed crates.
|
||||
- name: Cache cargo deps
|
||||
id: cache_cargo
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/registry/
|
||||
!~/.cargo/registry/src
|
||||
~/.cargo/git/
|
||||
target/
|
||||
# Fall back to older versions of the key, if no cache for current Cargo.lock was found
|
||||
key: |
|
||||
v1-${{ runner.os }}-${{ matrix.build_type }}-cargo-${{ hashFiles('rust-toolchain.toml') }}-${{ hashFiles('Cargo.lock') }}
|
||||
v1-${{ runner.os }}-${{ matrix.build_type }}-cargo-${{ hashFiles('rust-toolchain.toml') }}-
|
||||
# - name: Cache cargo deps
|
||||
# id: cache_cargo
|
||||
# uses: actions/cache@v3
|
||||
# with:
|
||||
# path: |
|
||||
# ~/.cargo/registry/
|
||||
# !~/.cargo/registry/src
|
||||
# ~/.cargo/git/
|
||||
# target/
|
||||
# # Fall back to older versions of the key, if no cache for current Cargo.lock was found
|
||||
# key: |
|
||||
# v1-${{ runner.os }}-${{ matrix.build_type }}-cargo-${{ hashFiles('rust-toolchain.toml') }}-${{ hashFiles('Cargo.lock') }}
|
||||
# v1-${{ runner.os }}-${{ matrix.build_type }}-cargo-${{ hashFiles('rust-toolchain.toml') }}-
|
||||
|
||||
- name: Cache postgres v14 build
|
||||
id: cache_pg_14
|
||||
@@ -301,7 +304,7 @@ jobs:
|
||||
uses: ./.github/actions/save-coverage-data
|
||||
|
||||
regress-tests:
|
||||
runs-on: [ self-hosted, dev, x64 ]
|
||||
runs-on: [ self-hosted, gen3, large ]
|
||||
container:
|
||||
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rust:pinned
|
||||
options: --init
|
||||
@@ -334,7 +337,7 @@ jobs:
|
||||
uses: ./.github/actions/save-coverage-data
|
||||
|
||||
benchmarks:
|
||||
runs-on: [ self-hosted, dev, x64 ]
|
||||
runs-on: [ self-hosted, gen3, small ]
|
||||
container:
|
||||
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rust:pinned
|
||||
options: --init
|
||||
@@ -365,7 +368,7 @@ jobs:
|
||||
# while coverage is currently collected for the debug ones
|
||||
|
||||
merge-allure-report:
|
||||
runs-on: [ self-hosted, dev, x64 ]
|
||||
runs-on: [ self-hosted, gen3, small ]
|
||||
container:
|
||||
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rust:pinned
|
||||
options: --init
|
||||
@@ -402,7 +405,7 @@ jobs:
|
||||
DATABASE_URL="$TEST_RESULT_CONNSTR" poetry run python3 scripts/ingest_regress_test_result.py --revision ${SHA} --reference ${GITHUB_REF} --build-type ${BUILD_TYPE} --ingest suites.json
|
||||
|
||||
coverage-report:
|
||||
runs-on: [ self-hosted, dev, x64 ]
|
||||
runs-on: [ self-hosted, gen3, small ]
|
||||
container:
|
||||
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rust:pinned
|
||||
options: --init
|
||||
@@ -418,16 +421,17 @@ jobs:
|
||||
submodules: true
|
||||
fetch-depth: 1
|
||||
|
||||
- name: Restore cargo deps cache
|
||||
id: cache_cargo
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/registry/
|
||||
!~/.cargo/registry/src
|
||||
~/.cargo/git/
|
||||
target/
|
||||
key: v1-${{ runner.os }}-${{ matrix.build_type }}-cargo-${{ hashFiles('rust-toolchain.toml') }}-${{ hashFiles('Cargo.lock') }}
|
||||
# Disabled for now
|
||||
# - name: Restore cargo deps cache
|
||||
# id: cache_cargo
|
||||
# uses: actions/cache@v3
|
||||
# with:
|
||||
# path: |
|
||||
# ~/.cargo/registry/
|
||||
# !~/.cargo/registry/src
|
||||
# ~/.cargo/git/
|
||||
# target/
|
||||
# key: v1-${{ runner.os }}-${{ matrix.build_type }}-cargo-${{ hashFiles('rust-toolchain.toml') }}-${{ hashFiles('Cargo.lock') }}
|
||||
|
||||
- name: Get Neon artifact
|
||||
uses: ./.github/actions/download
|
||||
@@ -477,7 +481,7 @@ jobs:
|
||||
}"
|
||||
|
||||
trigger-e2e-tests:
|
||||
runs-on: [ self-hosted, dev, x64 ]
|
||||
runs-on: [ self-hosted, gen3, small ]
|
||||
container:
|
||||
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/base:pinned
|
||||
options: --init
|
||||
@@ -522,9 +526,10 @@ jobs:
|
||||
}"
|
||||
|
||||
neon-image:
|
||||
runs-on: [ self-hosted, dev, x64 ]
|
||||
runs-on: [ self-hosted, gen3, large ]
|
||||
needs: [ tag ]
|
||||
container: gcr.io/kaniko-project/executor:v1.9.0-debug
|
||||
# https://github.com/GoogleContainerTools/kaniko/issues/2005
|
||||
container: gcr.io/kaniko-project/executor:v1.7.0-debug
|
||||
defaults:
|
||||
run:
|
||||
shell: sh -eu {0}
|
||||
@@ -540,12 +545,16 @@ jobs:
|
||||
run: echo "{\"credsStore\":\"ecr-login\"}" > /kaniko/.docker/config.json
|
||||
|
||||
- name: Kaniko build neon
|
||||
run: /kaniko/executor --snapshotMode=redo --cache=true --cache-repo 369495373322.dkr.ecr.eu-central-1.amazonaws.com/cache --snapshotMode=redo --context . --build-arg GIT_VERSION=${{ github.sha }} --destination 369495373322.dkr.ecr.eu-central-1.amazonaws.com/neon:${{needs.tag.outputs.build-tag}}
|
||||
run: /kaniko/executor --reproducible --snapshotMode=redo --skip-unused-stages --cache=true --cache-repo 369495373322.dkr.ecr.eu-central-1.amazonaws.com/cache --context . --build-arg GIT_VERSION=${{ github.sha }} --destination 369495373322.dkr.ecr.eu-central-1.amazonaws.com/neon:${{needs.tag.outputs.build-tag}}
|
||||
|
||||
# Cleanup script fails otherwise - rm: cannot remove '/nvme/actions-runner/_work/_temp/_github_home/.ecr': Permission denied
|
||||
- name: Cleanup ECR folder
|
||||
run: rm -rf ~/.ecr
|
||||
|
||||
compute-tools-image:
|
||||
runs-on: [ self-hosted, dev, x64 ]
|
||||
runs-on: [ self-hosted, gen3, large ]
|
||||
needs: [ tag ]
|
||||
container: gcr.io/kaniko-project/executor:v1.9.0-debug
|
||||
container: gcr.io/kaniko-project/executor:v1.7.0-debug
|
||||
defaults:
|
||||
run:
|
||||
shell: sh -eu {0}
|
||||
@@ -558,11 +567,14 @@ jobs:
|
||||
run: echo "{\"credsStore\":\"ecr-login\"}" > /kaniko/.docker/config.json
|
||||
|
||||
- name: Kaniko build compute tools
|
||||
run: /kaniko/executor --snapshotMode=redo --cache=true --cache-repo 369495373322.dkr.ecr.eu-central-1.amazonaws.com/cache --snapshotMode=redo --context . --build-arg GIT_VERSION=${{ github.sha }} --dockerfile Dockerfile.compute-tools --destination 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-tools:${{needs.tag.outputs.build-tag}}
|
||||
run: /kaniko/executor --reproducible --snapshotMode=redo --skip-unused-stages --cache=true --cache-repo 369495373322.dkr.ecr.eu-central-1.amazonaws.com/cache --context . --build-arg GIT_VERSION=${{ github.sha }} --dockerfile Dockerfile.compute-tools --destination 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-tools:${{needs.tag.outputs.build-tag}}
|
||||
|
||||
- name: Cleanup ECR folder
|
||||
run: rm -rf ~/.ecr
|
||||
|
||||
compute-node-image:
|
||||
runs-on: [ self-hosted, dev, x64 ]
|
||||
container: gcr.io/kaniko-project/executor:v1.9.0-debug
|
||||
runs-on: [ self-hosted, gen3, large ]
|
||||
container: gcr.io/kaniko-project/executor:v1.7.0-debug
|
||||
needs: [ tag ]
|
||||
strategy:
|
||||
fail-fast: false
|
||||
@@ -583,10 +595,13 @@ jobs:
|
||||
run: echo "{\"credsStore\":\"ecr-login\"}" > /kaniko/.docker/config.json
|
||||
|
||||
- name: Kaniko build compute node with extensions
|
||||
run: /kaniko/executor --skip-unused-stages --snapshotMode=redo --cache=true --cache-repo 369495373322.dkr.ecr.eu-central-1.amazonaws.com/cache --context . --build-arg GIT_VERSION=${{ github.sha }} --dockerfile Dockerfile.compute-node-${{ matrix.version }} --destination 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-node-${{ matrix.version }}:${{needs.tag.outputs.build-tag}}
|
||||
run: /kaniko/executor --reproducible --snapshotMode=redo --skip-unused-stages --cache=true --cache-repo 369495373322.dkr.ecr.eu-central-1.amazonaws.com/cache --context . --build-arg GIT_VERSION=${{ github.sha }} --build-arg PG_VERSION=${{ matrix.version }} --dockerfile Dockerfile.compute-node --destination 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-node-${{ matrix.version }}:${{needs.tag.outputs.build-tag}}
|
||||
|
||||
- name: Cleanup ECR folder
|
||||
run: rm -rf ~/.ecr
|
||||
|
||||
vm-compute-node-image:
|
||||
runs-on: [ self-hosted, dev, x64 ]
|
||||
runs-on: [ self-hosted, gen3, large ]
|
||||
needs: [ tag, compute-node-image ]
|
||||
strategy:
|
||||
fail-fast: false
|
||||
@@ -596,34 +611,31 @@ jobs:
|
||||
run:
|
||||
shell: sh -eu {0}
|
||||
env:
|
||||
VM_INFORMANT_VERSION: 0.1.1
|
||||
VM_BUILDER_VERSION: v0.4.6
|
||||
|
||||
steps:
|
||||
- name: Downloading latest vm-builder
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v1
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Downloading vm-builder
|
||||
run: |
|
||||
curl -L https://github.com/neondatabase/neonvm/releases/latest/download/vm-builder -o vm-builder
|
||||
curl -L https://github.com/neondatabase/neonvm/releases/download/$VM_BUILDER_VERSION/vm-builder -o vm-builder
|
||||
chmod +x vm-builder
|
||||
|
||||
- name: Pulling compute-node image
|
||||
run: |
|
||||
docker pull 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-node-${{ matrix.version }}:${{needs.tag.outputs.build-tag}}
|
||||
|
||||
- name: Downloading VM informant version ${{ env.VM_INFORMANT_VERSION }}
|
||||
- name: Building VM compute-node rootfs
|
||||
run: |
|
||||
curl -fL https://github.com/neondatabase/autoscaling/releases/download/${{ env.VM_INFORMANT_VERSION }}/vm-informant -o vm-informant
|
||||
chmod +x vm-informant
|
||||
|
||||
- name: Adding VM informant to compute-node image
|
||||
run: |
|
||||
ID=$(docker create 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-node-${{ matrix.version }}:${{needs.tag.outputs.build-tag}})
|
||||
docker cp vm-informant $ID:/bin/vm-informant
|
||||
docker commit $ID temp-vm-compute-node
|
||||
docker rm -f $ID
|
||||
docker build -t temp-vm-compute-node --build-arg SRC_IMAGE=369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-node-${{ matrix.version }}:${{needs.tag.outputs.build-tag}} -f Dockerfile.vm-compute-node .
|
||||
|
||||
- name: Build vm image
|
||||
run: |
|
||||
# note: as of 2023-01-12, vm-builder requires a trailing ":latest" for local images
|
||||
./vm-builder -src=temp-vm-compute-node:latest -dst=369495373322.dkr.ecr.eu-central-1.amazonaws.com/vm-compute-node-${{ matrix.version }}:${{needs.tag.outputs.build-tag}}
|
||||
./vm-builder -use-inittab -src=temp-vm-compute-node:latest -dst=369495373322.dkr.ecr.eu-central-1.amazonaws.com/vm-compute-node-${{ matrix.version }}:${{needs.tag.outputs.build-tag}}
|
||||
|
||||
- name: Pushing vm-compute-node image
|
||||
run: |
|
||||
@@ -631,7 +643,7 @@ jobs:
|
||||
|
||||
test-images:
|
||||
needs: [ tag, neon-image, compute-node-image, compute-tools-image ]
|
||||
runs-on: [ self-hosted, dev, x64 ]
|
||||
runs-on: [ self-hosted, gen3, small ]
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
@@ -673,20 +685,39 @@ jobs:
|
||||
docker compose -f ./docker-compose/docker-compose.yml down
|
||||
|
||||
promote-images:
|
||||
runs-on: [ self-hosted, dev, x64 ]
|
||||
runs-on: [ self-hosted, gen3, small ]
|
||||
needs: [ tag, test-images, vm-compute-node-image ]
|
||||
container: golang:1.19-bullseye
|
||||
if: github.event_name != 'workflow_dispatch'
|
||||
container: amazon/aws-cli
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
name: [ neon, compute-node-v14, vm-compute-node-v14, compute-node-v15, vm-compute-node-v15, compute-tools]
|
||||
|
||||
steps:
|
||||
- name: Promote image to latest
|
||||
- name: Install Crane & ECR helper
|
||||
if: |
|
||||
(github.ref_name == 'main' || github.ref_name == 'release') &&
|
||||
github.event_name != 'workflow_dispatch'
|
||||
run: |
|
||||
export MANIFEST=$(aws ecr batch-get-image --repository-name ${{ matrix.name }} --image-ids imageTag=${{needs.tag.outputs.build-tag}} --query 'images[].imageManifest' --output text)
|
||||
aws ecr put-image --repository-name ${{ matrix.name }} --image-tag latest --image-manifest "$MANIFEST"
|
||||
go install github.com/google/go-containerregistry/cmd/crane@31786c6cbb82d6ec4fb8eb79cd9387905130534e # v0.11.0
|
||||
go install github.com/awslabs/amazon-ecr-credential-helper/ecr-login/cli/docker-credential-ecr-login@69c85dc22db6511932bbf119e1a0cc5c90c69a7f # v0.6.0
|
||||
|
||||
- name: Configure ECR login
|
||||
run: |
|
||||
mkdir /github/home/.docker/
|
||||
echo "{\"credsStore\":\"ecr-login\"}" > /github/home/.docker/config.json
|
||||
|
||||
- name: Add latest tag to images
|
||||
if: |
|
||||
(github.ref_name == 'main' || github.ref_name == 'release') &&
|
||||
github.event_name != 'workflow_dispatch'
|
||||
run: |
|
||||
crane tag 369495373322.dkr.ecr.eu-central-1.amazonaws.com/neon:${{needs.tag.outputs.build-tag}} latest
|
||||
crane tag 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-tools:${{needs.tag.outputs.build-tag}} latest
|
||||
crane tag 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-node-v14:${{needs.tag.outputs.build-tag}} latest
|
||||
crane tag 369495373322.dkr.ecr.eu-central-1.amazonaws.com/vm-compute-node-v14:${{needs.tag.outputs.build-tag}} latest
|
||||
crane tag 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-node-v15:${{needs.tag.outputs.build-tag}} latest
|
||||
crane tag 369495373322.dkr.ecr.eu-central-1.amazonaws.com/vm-compute-node-v15:${{needs.tag.outputs.build-tag}} latest
|
||||
|
||||
- name: Cleanup ECR folder
|
||||
run: rm -rf ~/.ecr
|
||||
|
||||
push-docker-hub:
|
||||
runs-on: [ self-hosted, dev, x64 ]
|
||||
@@ -776,114 +807,11 @@ jobs:
|
||||
crane tag neondatabase/compute-node-v15:${{needs.tag.outputs.build-tag}} latest
|
||||
crane tag neondatabase/vm-compute-node-v15:${{needs.tag.outputs.build-tag}} latest
|
||||
|
||||
calculate-deploy-targets:
|
||||
runs-on: [ self-hosted, dev, x64 ]
|
||||
if: |
|
||||
github.ref_name == 'release' &&
|
||||
github.event_name != 'workflow_dispatch'
|
||||
outputs:
|
||||
matrix-include: ${{ steps.set-matrix.outputs.include }}
|
||||
steps:
|
||||
- id: set-matrix
|
||||
run: |
|
||||
if [[ "$GITHUB_REF_NAME" == "release" ]]; then
|
||||
PRODUCTION='{"env_name": "production", "proxy_job": "neon-proxy", "proxy_config": "production.proxy", "storage_broker_ns": "neon-storage-broker", "storage_broker_config": "production.neon-storage-broker", "kubeconfig_secret": "PRODUCTION_KUBECONFIG_DATA", "console_api_key_secret": "NEON_PRODUCTION_API_KEY"}'
|
||||
echo "include=[$PRODUCTION]" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "GITHUB_REF_NAME (value '$GITHUB_REF_NAME') is not set to 'release'"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
deploy:
|
||||
runs-on: [ self-hosted, dev, x64 ]
|
||||
container: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/ansible:pinned
|
||||
# We need both storage **and** compute images for deploy, because control plane picks the compute version based on the storage version.
|
||||
# If it notices a fresh storage it may bump the compute version. And if compute image failed to build it may break things badly
|
||||
needs: [ push-docker-hub, calculate-deploy-targets, tag, regress-tests ]
|
||||
if: |
|
||||
github.ref_name == 'release' &&
|
||||
github.event_name != 'workflow_dispatch'
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
strategy:
|
||||
matrix:
|
||||
include: ${{fromJSON(needs.calculate-deploy-targets.outputs.matrix-include)}}
|
||||
environment:
|
||||
name: prod-old
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
submodules: true
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Redeploy
|
||||
run: |
|
||||
export DOCKER_TAG=${{needs.tag.outputs.build-tag}}
|
||||
cd "$(pwd)/.github/ansible"
|
||||
|
||||
if [[ "$GITHUB_REF_NAME" == "main" ]]; then
|
||||
./get_binaries.sh
|
||||
elif [[ "$GITHUB_REF_NAME" == "release" ]]; then
|
||||
RELEASE=true ./get_binaries.sh
|
||||
else
|
||||
echo "GITHUB_REF_NAME (value '$GITHUB_REF_NAME') is not set to either 'main' or 'release'"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
eval $(ssh-agent)
|
||||
echo "${{ secrets.TELEPORT_SSH_KEY }}" | tr -d '\n'| base64 --decode >ssh-key
|
||||
echo "${{ secrets.TELEPORT_SSH_CERT }}" | tr -d '\n'| base64 --decode >ssh-key-cert.pub
|
||||
chmod 0600 ssh-key
|
||||
ssh-add ssh-key
|
||||
rm -f ssh-key ssh-key-cert.pub
|
||||
ANSIBLE_CONFIG=./ansible.cfg ansible-galaxy collection install sivel.toiletwater
|
||||
ANSIBLE_CONFIG=./ansible.cfg ansible-playbook deploy.yaml -i ${{ matrix.env_name }}.hosts.yaml -e CONSOLE_API_TOKEN=${{ secrets[matrix.console_api_key_secret] }} -e SENTRY_URL_PAGESERVER=${{ secrets.SENTRY_URL_PAGESERVER }} -e SENTRY_URL_SAFEKEEPER=${{ secrets.SENTRY_URL_SAFEKEEPER }}
|
||||
rm -f neon_install.tar.gz .neon_current_version
|
||||
|
||||
deploy-new:
|
||||
runs-on: [ self-hosted, dev, x64 ]
|
||||
container: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/ansible:pinned
|
||||
# We need both storage **and** compute images for deploy, because control plane picks the compute version based on the storage version.
|
||||
# If it notices a fresh storage it may bump the compute version. And if compute image failed to build it may break things badly
|
||||
needs: [ push-docker-hub, tag, regress-tests ]
|
||||
if: |
|
||||
(github.ref_name == 'main') &&
|
||||
github.event_name != 'workflow_dispatch'
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
strategy:
|
||||
matrix:
|
||||
target_region: [ eu-west-1, us-east-2 ]
|
||||
environment:
|
||||
name: dev-${{ matrix.target_region }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
submodules: true
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Redeploy
|
||||
run: |
|
||||
export DOCKER_TAG=${{needs.tag.outputs.build-tag}}
|
||||
cd "$(pwd)/.github/ansible"
|
||||
if [[ "$GITHUB_REF_NAME" == "main" ]]; then
|
||||
./get_binaries.sh
|
||||
elif [[ "$GITHUB_REF_NAME" == "release" ]]; then
|
||||
RELEASE=true ./get_binaries.sh
|
||||
else
|
||||
echo "GITHUB_REF_NAME (value '$GITHUB_REF_NAME') is not set to either 'main' or 'release'"
|
||||
exit 1
|
||||
fi
|
||||
ansible-galaxy collection install sivel.toiletwater
|
||||
ansible-playbook deploy.yaml -i staging.${{ matrix.target_region }}.hosts.yaml -e @ssm_config -e CONSOLE_API_TOKEN=${{ secrets.NEON_STAGING_API_KEY }} -e SENTRY_URL_PAGESERVER=${{ secrets.SENTRY_URL_PAGESERVER }} -e SENTRY_URL_SAFEKEEPER=${{ secrets.SENTRY_URL_SAFEKEEPER }}
|
||||
rm -f neon_install.tar.gz .neon_current_version
|
||||
- name: Cleanup ECR folder
|
||||
run: rm -rf ~/.ecr
|
||||
|
||||
deploy-pr-test-new:
|
||||
runs-on: [ self-hosted, dev, x64 ]
|
||||
runs-on: [ self-hosted, gen3, small ]
|
||||
container: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/ansible:pinned
|
||||
# We need both storage **and** compute images for deploy, because control plane picks the compute version based on the storage version.
|
||||
# If it notices a fresh storage it may bump the compute version. And if compute image failed to build it may break things badly
|
||||
@@ -915,311 +843,40 @@ jobs:
|
||||
ansible-playbook deploy.yaml -i staging.${{ matrix.target_region }}.hosts.yaml -e @ssm_config -e CONSOLE_API_TOKEN=${{ secrets.NEON_STAGING_API_KEY }} -e SENTRY_URL_PAGESERVER=${{ secrets.SENTRY_URL_PAGESERVER }} -e SENTRY_URL_SAFEKEEPER=${{ secrets.SENTRY_URL_SAFEKEEPER }}
|
||||
rm -f neon_install.tar.gz .neon_current_version
|
||||
|
||||
deploy-prod-new:
|
||||
runs-on: prod
|
||||
container: 093970136003.dkr.ecr.eu-central-1.amazonaws.com/ansible:latest
|
||||
# We need both storage **and** compute images for deploy, because control plane picks the compute version based on the storage version.
|
||||
# If it notices a fresh storage it may bump the compute version. And if compute image failed to build it may break things badly
|
||||
- name: Cleanup ansible folder
|
||||
run: rm -rf ~/.ansible
|
||||
|
||||
deploy:
|
||||
runs-on: [ self-hosted, gen3, small ]
|
||||
container: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/ansible:latest
|
||||
needs: [ push-docker-hub, tag, regress-tests ]
|
||||
if: |
|
||||
(github.ref_name == 'release') &&
|
||||
github.event_name != 'workflow_dispatch'
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
strategy:
|
||||
matrix:
|
||||
target_region: [ us-east-2, us-west-2, eu-central-1, ap-southeast-1 ]
|
||||
environment:
|
||||
name: prod-${{ matrix.target_region }}
|
||||
if: ( github.ref_name == 'main' || github.ref_name == 'release' ) && github.event_name != 'workflow_dispatch'
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
submodules: true
|
||||
submodules: false
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Redeploy
|
||||
- name: Trigger deploy workflow
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
run: |
|
||||
export DOCKER_TAG=${{needs.tag.outputs.build-tag}}
|
||||
cd "$(pwd)/.github/ansible"
|
||||
|
||||
if [[ "$GITHUB_REF_NAME" == "main" ]]; then
|
||||
./get_binaries.sh
|
||||
gh workflow run deploy-dev.yml --ref main -f branch=${{ github.sha }} -f dockerTag=${{needs.tag.outputs.build-tag}}
|
||||
elif [[ "$GITHUB_REF_NAME" == "release" ]]; then
|
||||
RELEASE=true ./get_binaries.sh
|
||||
gh workflow run deploy-prod.yml --ref release -f branch=${{ github.sha }} -f dockerTag=${{needs.tag.outputs.build-tag}} -f disclamerAcknowledged=true
|
||||
else
|
||||
echo "GITHUB_REF_NAME (value '$GITHUB_REF_NAME') is not set to either 'main' or 'release'"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
ansible-galaxy collection install sivel.toiletwater
|
||||
ansible-playbook deploy.yaml -i prod.${{ matrix.target_region }}.hosts.yaml -e @ssm_config -e CONSOLE_API_TOKEN=${{ secrets.NEON_PRODUCTION_API_KEY }} -e SENTRY_URL_PAGESERVER=${{ secrets.SENTRY_URL_PAGESERVER }} -e SENTRY_URL_SAFEKEEPER=${{ secrets.SENTRY_URL_SAFEKEEPER }}
|
||||
rm -f neon_install.tar.gz .neon_current_version
|
||||
|
||||
deploy-proxy:
|
||||
runs-on: [ self-hosted, dev, x64 ]
|
||||
container: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/base:pinned
|
||||
# Compute image isn't strictly required for proxy deploy, but let's still wait for it to run all deploy jobs consistently.
|
||||
needs: [ push-docker-hub, calculate-deploy-targets, tag, regress-tests ]
|
||||
if: |
|
||||
github.ref_name == 'release' &&
|
||||
github.event_name != 'workflow_dispatch'
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
strategy:
|
||||
matrix:
|
||||
include: ${{fromJSON(needs.calculate-deploy-targets.outputs.matrix-include)}}
|
||||
environment:
|
||||
name: prod-old
|
||||
env:
|
||||
KUBECONFIG: .kubeconfig
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
submodules: true
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Add curl
|
||||
run: apt update && apt install curl -y
|
||||
|
||||
- name: Store kubeconfig file
|
||||
run: |
|
||||
echo "${{ secrets[matrix.kubeconfig_secret] }}" | base64 --decode > ${KUBECONFIG}
|
||||
chmod 0600 ${KUBECONFIG}
|
||||
|
||||
- name: Setup helm v3
|
||||
run: |
|
||||
curl -s https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash
|
||||
helm repo add neondatabase https://neondatabase.github.io/helm-charts
|
||||
|
||||
- name: Re-deploy proxy
|
||||
run: |
|
||||
DOCKER_TAG=${{needs.tag.outputs.build-tag}}
|
||||
helm upgrade ${{ matrix.proxy_job }} neondatabase/neon-proxy --namespace neon-proxy --install --atomic -f .github/helm-values/${{ matrix.proxy_config }}.yaml --set image.tag=${DOCKER_TAG} --set settings.sentryUrl=${{ secrets.SENTRY_URL_PROXY }} --wait --timeout 15m0s
|
||||
helm upgrade ${{ matrix.proxy_job }}-scram neondatabase/neon-proxy --namespace neon-proxy --install --atomic -f .github/helm-values/${{ matrix.proxy_config }}-scram.yaml --set image.tag=${DOCKER_TAG} --set settings.sentryUrl=${{ secrets.SENTRY_URL_PROXY }} --wait --timeout 15m0s
|
||||
|
||||
deploy-storage-broker:
|
||||
name: deploy storage broker on old staging and old prod
|
||||
runs-on: [ self-hosted, dev, x64 ]
|
||||
container: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/base:pinned
|
||||
# Compute image isn't strictly required for proxy deploy, but let's still wait for it to run all deploy jobs consistently.
|
||||
needs: [ push-docker-hub, calculate-deploy-targets, tag, regress-tests ]
|
||||
if: |
|
||||
github.ref_name == 'release' &&
|
||||
github.event_name != 'workflow_dispatch'
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
strategy:
|
||||
matrix:
|
||||
include: ${{fromJSON(needs.calculate-deploy-targets.outputs.matrix-include)}}
|
||||
environment:
|
||||
name: prod-old
|
||||
env:
|
||||
KUBECONFIG: .kubeconfig
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
submodules: true
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Add curl
|
||||
run: apt update && apt install curl -y
|
||||
|
||||
- name: Store kubeconfig file
|
||||
run: |
|
||||
echo "${{ secrets[matrix.kubeconfig_secret] }}" | base64 --decode > ${KUBECONFIG}
|
||||
chmod 0600 ${KUBECONFIG}
|
||||
|
||||
- name: Setup helm v3
|
||||
run: |
|
||||
curl -s https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash
|
||||
helm repo add neondatabase https://neondatabase.github.io/helm-charts
|
||||
|
||||
- name: Deploy storage-broker
|
||||
run:
|
||||
helm upgrade neon-storage-broker neondatabase/neon-storage-broker --namespace ${{ matrix.storage_broker_ns }} --create-namespace --install --atomic -f .github/helm-values/${{ matrix.storage_broker_config }}.yaml --set image.tag=${{ needs.tag.outputs.build-tag }} --set settings.sentryUrl=${{ secrets.SENTRY_URL_BROKER }} --wait --timeout 5m0s
|
||||
|
||||
deploy-proxy-new:
|
||||
runs-on: [ self-hosted, dev, x64 ]
|
||||
container: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/ansible:pinned
|
||||
# Compute image isn't strictly required for proxy deploy, but let's still wait for it to run all deploy jobs consistently.
|
||||
needs: [ push-docker-hub, tag, regress-tests ]
|
||||
if: |
|
||||
(github.ref_name == 'main') &&
|
||||
github.event_name != 'workflow_dispatch'
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- target_region: us-east-2
|
||||
target_cluster: dev-us-east-2-beta
|
||||
deploy_link_proxy: true
|
||||
deploy_legacy_scram_proxy: true
|
||||
- target_region: eu-west-1
|
||||
target_cluster: dev-eu-west-1-zeta
|
||||
deploy_link_proxy: false
|
||||
deploy_legacy_scram_proxy: false
|
||||
environment:
|
||||
name: dev-${{ matrix.target_region }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
submodules: true
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Configure environment
|
||||
run: |
|
||||
helm repo add neondatabase https://neondatabase.github.io/helm-charts
|
||||
aws --region ${{ matrix.target_region }} eks update-kubeconfig --name ${{ matrix.target_cluster }}
|
||||
|
||||
- name: Re-deploy scram proxy
|
||||
run: |
|
||||
DOCKER_TAG=${{needs.tag.outputs.build-tag}}
|
||||
helm upgrade neon-proxy-scram neondatabase/neon-proxy --namespace neon-proxy --create-namespace --install --atomic -f .github/helm-values/${{ matrix.target_cluster }}.neon-proxy-scram.yaml --set image.tag=${DOCKER_TAG} --set settings.sentryUrl=${{ secrets.SENTRY_URL_PROXY }} --wait --timeout 15m0s
|
||||
|
||||
- name: Re-deploy link proxy
|
||||
if: matrix.deploy_link_proxy
|
||||
run: |
|
||||
DOCKER_TAG=${{needs.tag.outputs.build-tag}}
|
||||
helm upgrade neon-proxy-link neondatabase/neon-proxy --namespace neon-proxy --create-namespace --install --atomic -f .github/helm-values/${{ matrix.target_cluster }}.neon-proxy-link.yaml --set image.tag=${DOCKER_TAG} --set settings.sentryUrl=${{ secrets.SENTRY_URL_PROXY }} --wait --timeout 15m0s
|
||||
|
||||
- name: Re-deploy legacy scram proxy
|
||||
if: matrix.deploy_legacy_scram_proxy
|
||||
run: |
|
||||
DOCKER_TAG=${{needs.tag.outputs.build-tag}}
|
||||
helm upgrade neon-proxy-scram-legacy neondatabase/neon-proxy --namespace neon-proxy --create-namespace --install --atomic -f .github/helm-values/${{ matrix.target_cluster }}.neon-proxy-scram-legacy.yaml --set image.tag=${DOCKER_TAG} --set settings.sentryUrl=${{ secrets.SENTRY_URL_PROXY }} --wait --timeout 15m0s
|
||||
|
||||
deploy-storage-broker-dev-new:
|
||||
runs-on: [ self-hosted, dev, x64 ]
|
||||
container: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/ansible:pinned
|
||||
# Compute image isn't strictly required for proxy deploy, but let's still wait for it to run all deploy jobs consistently.
|
||||
needs: [ push-docker-hub, tag, regress-tests ]
|
||||
if: |
|
||||
(github.ref_name == 'main') &&
|
||||
github.event_name != 'workflow_dispatch'
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- target_region: us-east-2
|
||||
target_cluster: dev-us-east-2-beta
|
||||
- target_region: eu-west-1
|
||||
target_cluster: dev-eu-west-1-zeta
|
||||
environment:
|
||||
name: dev-${{ matrix.target_region }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
submodules: true
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Configure environment
|
||||
run: |
|
||||
helm repo add neondatabase https://neondatabase.github.io/helm-charts
|
||||
aws --region ${{ matrix.target_region }} eks update-kubeconfig --name ${{ matrix.target_cluster }}
|
||||
|
||||
- name: Deploy storage-broker
|
||||
run:
|
||||
helm upgrade neon-storage-broker-lb neondatabase/neon-storage-broker --namespace neon-storage-broker-lb --create-namespace --install --atomic -f .github/helm-values/${{ matrix.target_cluster }}.neon-storage-broker.yaml --set image.tag=${{ needs.tag.outputs.build-tag }} --set settings.sentryUrl=${{ secrets.SENTRY_URL_BROKER }} --wait --timeout 5m0s
|
||||
|
||||
deploy-proxy-prod-new:
|
||||
runs-on: prod
|
||||
container: 093970136003.dkr.ecr.eu-central-1.amazonaws.com/ansible:latest
|
||||
# Compute image isn't strictly required for proxy deploy, but let's still wait for it to run all deploy jobs consistently.
|
||||
needs: [ push-docker-hub, tag, regress-tests ]
|
||||
if: |
|
||||
(github.ref_name == 'release') &&
|
||||
github.event_name != 'workflow_dispatch'
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- target_region: us-east-2
|
||||
target_cluster: prod-us-east-2-delta
|
||||
- target_region: us-west-2
|
||||
target_cluster: prod-us-west-2-eta
|
||||
- target_region: eu-central-1
|
||||
target_cluster: prod-eu-central-1-gamma
|
||||
- target_region: ap-southeast-1
|
||||
target_cluster: prod-ap-southeast-1-epsilon
|
||||
environment:
|
||||
name: prod-${{ matrix.target_region }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
submodules: true
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Configure environment
|
||||
run: |
|
||||
helm repo add neondatabase https://neondatabase.github.io/helm-charts
|
||||
aws --region ${{ matrix.target_region }} eks update-kubeconfig --name ${{ matrix.target_cluster }}
|
||||
|
||||
- name: Re-deploy proxy
|
||||
run: |
|
||||
DOCKER_TAG=${{needs.tag.outputs.build-tag}}
|
||||
helm upgrade neon-proxy-scram neondatabase/neon-proxy --namespace neon-proxy --create-namespace --install --atomic -f .github/helm-values/${{ matrix.target_cluster }}.neon-proxy-scram.yaml --set image.tag=${DOCKER_TAG} --set settings.sentryUrl=${{ secrets.SENTRY_URL_PROXY }} --wait --timeout 15m0s
|
||||
|
||||
deploy-storage-broker-prod-new:
|
||||
runs-on: prod
|
||||
container: 093970136003.dkr.ecr.eu-central-1.amazonaws.com/ansible:latest
|
||||
# Compute image isn't strictly required for proxy deploy, but let's still wait for it to run all deploy jobs consistently.
|
||||
needs: [ push-docker-hub, tag, regress-tests ]
|
||||
if: |
|
||||
(github.ref_name == 'release') &&
|
||||
github.event_name != 'workflow_dispatch'
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- target_region: us-east-2
|
||||
target_cluster: prod-us-east-2-delta
|
||||
- target_region: us-west-2
|
||||
target_cluster: prod-us-west-2-eta
|
||||
- target_region: eu-central-1
|
||||
target_cluster: prod-eu-central-1-gamma
|
||||
- target_region: ap-southeast-1
|
||||
target_cluster: prod-ap-southeast-1-epsilon
|
||||
environment:
|
||||
name: prod-${{ matrix.target_region }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
submodules: true
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Configure environment
|
||||
run: |
|
||||
helm repo add neondatabase https://neondatabase.github.io/helm-charts
|
||||
aws --region ${{ matrix.target_region }} eks update-kubeconfig --name ${{ matrix.target_cluster }}
|
||||
|
||||
- name: Deploy storage-broker
|
||||
run:
|
||||
helm upgrade neon-storage-broker-lb neondatabase/neon-storage-broker --namespace neon-storage-broker-lb --create-namespace --install --atomic -f .github/helm-values/${{ matrix.target_cluster }}.neon-storage-broker.yaml --set image.tag=${{ needs.tag.outputs.build-tag }} --set settings.sentryUrl=${{ secrets.SENTRY_URL_BROKER }} --wait --timeout 5m0s
|
||||
|
||||
promote-compatibility-data:
|
||||
runs-on: [ self-hosted, dev, x64 ]
|
||||
runs-on: [ self-hosted, gen3, small ]
|
||||
container:
|
||||
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rust:pinned
|
||||
options: --init
|
||||
needs: [ deploy, deploy-proxy ]
|
||||
needs: [ push-docker-hub, tag, regress-tests ]
|
||||
if: github.ref_name == 'release' && github.event_name != 'workflow_dispatch'
|
||||
steps:
|
||||
- name: Promote compatibility snapshot for the release
|
||||
|
||||
179
.github/workflows/deploy-dev.yml
vendored
Normal file
179
.github/workflows/deploy-dev.yml
vendored
Normal file
@@ -0,0 +1,179 @@
|
||||
name: Neon Deploy dev
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
dockerTag:
|
||||
description: 'Docker tag to deploy'
|
||||
required: true
|
||||
type: string
|
||||
branch:
|
||||
description: 'Branch or commit used for deploy scripts and configs'
|
||||
required: true
|
||||
type: string
|
||||
default: 'main'
|
||||
deployStorage:
|
||||
description: 'Deploy storage'
|
||||
required: true
|
||||
type: boolean
|
||||
default: true
|
||||
deployProxy:
|
||||
description: 'Deploy proxy'
|
||||
required: true
|
||||
type: boolean
|
||||
default: true
|
||||
deployStorageBroker:
|
||||
description: 'Deploy storage-broker'
|
||||
required: true
|
||||
type: boolean
|
||||
default: true
|
||||
|
||||
env:
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_DEV }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_KEY_DEV }}
|
||||
|
||||
concurrency:
|
||||
group: deploy-dev
|
||||
cancel-in-progress: false
|
||||
|
||||
jobs:
|
||||
deploy-storage-new:
|
||||
runs-on: [ self-hosted, gen3, small ]
|
||||
container:
|
||||
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/ansible:pinned
|
||||
options: --user root --privileged
|
||||
if: inputs.deployStorage
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
strategy:
|
||||
matrix:
|
||||
target_region: [ eu-west-1, us-east-2 ]
|
||||
environment:
|
||||
name: dev-${{ matrix.target_region }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
submodules: true
|
||||
fetch-depth: 0
|
||||
ref: ${{ inputs.branch }}
|
||||
|
||||
- name: Redeploy
|
||||
run: |
|
||||
export DOCKER_TAG=${{ inputs.dockerTag }}
|
||||
cd "$(pwd)/.github/ansible"
|
||||
|
||||
./get_binaries.sh
|
||||
|
||||
ansible-galaxy collection install sivel.toiletwater
|
||||
ansible-playbook -v deploy.yaml -i staging.${{ matrix.target_region }}.hosts.yaml -e @ssm_config -e CONSOLE_API_TOKEN=${{ secrets.NEON_STAGING_API_KEY }} -e SENTRY_URL_PAGESERVER=${{ secrets.SENTRY_URL_PAGESERVER }} -e SENTRY_URL_SAFEKEEPER=${{ secrets.SENTRY_URL_SAFEKEEPER }}
|
||||
rm -f neon_install.tar.gz .neon_current_version
|
||||
|
||||
- name: Cleanup ansible folder
|
||||
run: rm -rf ~/.ansible
|
||||
|
||||
deploy-proxy-new:
|
||||
runs-on: [ self-hosted, gen3, small ]
|
||||
container: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/ansible:pinned
|
||||
if: inputs.deployProxy
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- target_region: us-east-2
|
||||
target_cluster: dev-us-east-2-beta
|
||||
deploy_link_proxy: true
|
||||
deploy_legacy_scram_proxy: true
|
||||
- target_region: eu-west-1
|
||||
target_cluster: dev-eu-west-1-zeta
|
||||
deploy_link_proxy: false
|
||||
deploy_legacy_scram_proxy: false
|
||||
environment:
|
||||
name: dev-${{ matrix.target_region }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
submodules: true
|
||||
fetch-depth: 0
|
||||
ref: ${{ inputs.branch }}
|
||||
|
||||
- name: Configure AWS Credentials
|
||||
uses: aws-actions/configure-aws-credentials@v1-node16
|
||||
with:
|
||||
role-to-assume: arn:aws:iam::369495373322:role/github-runner
|
||||
aws-region: eu-central-1
|
||||
role-skip-session-tagging: true
|
||||
role-duration-seconds: 1800
|
||||
|
||||
- name: Configure environment
|
||||
run: |
|
||||
helm repo add neondatabase https://neondatabase.github.io/helm-charts
|
||||
aws --region ${{ matrix.target_region }} eks update-kubeconfig --name ${{ matrix.target_cluster }}
|
||||
|
||||
- name: Re-deploy scram proxy
|
||||
run: |
|
||||
DOCKER_TAG=${{ inputs.dockerTag }}
|
||||
helm upgrade neon-proxy-scram neondatabase/neon-proxy --namespace neon-proxy --create-namespace --install --atomic -f .github/helm-values/${{ matrix.target_cluster }}.neon-proxy-scram.yaml --set image.tag=${DOCKER_TAG} --set settings.sentryUrl=${{ secrets.SENTRY_URL_PROXY }} --wait --timeout 15m0s
|
||||
|
||||
- name: Re-deploy link proxy
|
||||
if: matrix.deploy_link_proxy
|
||||
run: |
|
||||
DOCKER_TAG=${{ inputs.dockerTag }}
|
||||
helm upgrade neon-proxy-link neondatabase/neon-proxy --namespace neon-proxy --create-namespace --install --atomic -f .github/helm-values/${{ matrix.target_cluster }}.neon-proxy-link.yaml --set image.tag=${DOCKER_TAG} --set settings.sentryUrl=${{ secrets.SENTRY_URL_PROXY }} --wait --timeout 15m0s
|
||||
|
||||
- name: Re-deploy legacy scram proxy
|
||||
if: matrix.deploy_legacy_scram_proxy
|
||||
run: |
|
||||
DOCKER_TAG=${{ inputs.dockerTag }}
|
||||
helm upgrade neon-proxy-scram-legacy neondatabase/neon-proxy --namespace neon-proxy --create-namespace --install --atomic -f .github/helm-values/${{ matrix.target_cluster }}.neon-proxy-scram-legacy.yaml --set image.tag=${DOCKER_TAG} --set settings.sentryUrl=${{ secrets.SENTRY_URL_PROXY }} --wait --timeout 15m0s
|
||||
|
||||
- name: Cleanup helm folder
|
||||
run: rm -rf ~/.cache
|
||||
|
||||
deploy-storage-broker-new:
|
||||
runs-on: [ self-hosted, gen3, small ]
|
||||
container: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/ansible:pinned
|
||||
if: inputs.deployStorageBroker
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- target_region: us-east-2
|
||||
target_cluster: dev-us-east-2-beta
|
||||
- target_region: eu-west-1
|
||||
target_cluster: dev-eu-west-1-zeta
|
||||
environment:
|
||||
name: dev-${{ matrix.target_region }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
submodules: true
|
||||
fetch-depth: 0
|
||||
ref: ${{ inputs.branch }}
|
||||
|
||||
- name: Configure AWS Credentials
|
||||
uses: aws-actions/configure-aws-credentials@v1-node16
|
||||
with:
|
||||
role-to-assume: arn:aws:iam::369495373322:role/github-runner
|
||||
aws-region: eu-central-1
|
||||
role-skip-session-tagging: true
|
||||
role-duration-seconds: 1800
|
||||
|
||||
- name: Configure environment
|
||||
run: |
|
||||
helm repo add neondatabase https://neondatabase.github.io/helm-charts
|
||||
aws --region ${{ matrix.target_region }} eks update-kubeconfig --name ${{ matrix.target_cluster }}
|
||||
|
||||
- name: Deploy storage-broker
|
||||
run:
|
||||
helm upgrade neon-storage-broker-lb neondatabase/neon-storage-broker --namespace neon-storage-broker-lb --create-namespace --install --atomic -f .github/helm-values/${{ matrix.target_cluster }}.neon-storage-broker.yaml --set image.tag=${{ inputs.dockerTag }} --set settings.sentryUrl=${{ secrets.SENTRY_URL_BROKER }} --wait --timeout 5m0s
|
||||
|
||||
- name: Cleanup helm folder
|
||||
run: rm -rf ~/.cache
|
||||
167
.github/workflows/deploy-prod.yml
vendored
Normal file
167
.github/workflows/deploy-prod.yml
vendored
Normal file
@@ -0,0 +1,167 @@
|
||||
name: Neon Deploy prod
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
dockerTag:
|
||||
description: 'Docker tag to deploy'
|
||||
required: true
|
||||
type: string
|
||||
branch:
|
||||
description: 'Branch or commit used for deploy scripts and configs'
|
||||
required: true
|
||||
type: string
|
||||
default: 'release'
|
||||
deployStorage:
|
||||
description: 'Deploy storage'
|
||||
required: true
|
||||
type: boolean
|
||||
default: true
|
||||
deployProxy:
|
||||
description: 'Deploy proxy'
|
||||
required: true
|
||||
type: boolean
|
||||
default: true
|
||||
deployStorageBroker:
|
||||
description: 'Deploy storage-broker'
|
||||
required: true
|
||||
type: boolean
|
||||
default: true
|
||||
disclamerAcknowledged:
|
||||
description: 'I confirm that there is an emergency and I can not use regular release workflow'
|
||||
required: true
|
||||
type: boolean
|
||||
default: false
|
||||
|
||||
concurrency:
|
||||
group: deploy-prod
|
||||
cancel-in-progress: false
|
||||
|
||||
jobs:
|
||||
deploy-prod-new:
|
||||
runs-on: prod
|
||||
container:
|
||||
image: 093970136003.dkr.ecr.eu-central-1.amazonaws.com/ansible:latest
|
||||
options: --user root --privileged
|
||||
if: inputs.deployStorage && inputs.disclamerAcknowledged
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
strategy:
|
||||
matrix:
|
||||
target_region: [ us-east-2, us-west-2, eu-central-1, ap-southeast-1 ]
|
||||
environment:
|
||||
name: prod-${{ matrix.target_region }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
submodules: true
|
||||
fetch-depth: 0
|
||||
ref: ${{ inputs.branch }}
|
||||
|
||||
- name: Redeploy
|
||||
run: |
|
||||
export DOCKER_TAG=${{ inputs.dockerTag }}
|
||||
cd "$(pwd)/.github/ansible"
|
||||
|
||||
./get_binaries.sh
|
||||
|
||||
ansible-galaxy collection install sivel.toiletwater
|
||||
ansible-playbook -v deploy.yaml -i prod.${{ matrix.target_region }}.hosts.yaml -e @ssm_config -e CONSOLE_API_TOKEN=${{ secrets.NEON_PRODUCTION_API_KEY }} -e SENTRY_URL_PAGESERVER=${{ secrets.SENTRY_URL_PAGESERVER }} -e SENTRY_URL_SAFEKEEPER=${{ secrets.SENTRY_URL_SAFEKEEPER }}
|
||||
rm -f neon_install.tar.gz .neon_current_version
|
||||
|
||||
deploy-proxy-prod-new:
|
||||
runs-on: prod
|
||||
container: 093970136003.dkr.ecr.eu-central-1.amazonaws.com/ansible:latest
|
||||
if: inputs.deployProxy && inputs.disclamerAcknowledged
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- target_region: us-east-2
|
||||
target_cluster: prod-us-east-2-delta
|
||||
deploy_link_proxy: true
|
||||
deploy_legacy_scram_proxy: false
|
||||
- target_region: us-west-2
|
||||
target_cluster: prod-us-west-2-eta
|
||||
deploy_link_proxy: false
|
||||
deploy_legacy_scram_proxy: true
|
||||
- target_region: eu-central-1
|
||||
target_cluster: prod-eu-central-1-gamma
|
||||
deploy_link_proxy: false
|
||||
deploy_legacy_scram_proxy: false
|
||||
- target_region: ap-southeast-1
|
||||
target_cluster: prod-ap-southeast-1-epsilon
|
||||
deploy_link_proxy: false
|
||||
deploy_legacy_scram_proxy: false
|
||||
environment:
|
||||
name: prod-${{ matrix.target_region }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
submodules: true
|
||||
fetch-depth: 0
|
||||
ref: ${{ inputs.branch }}
|
||||
|
||||
- name: Configure environment
|
||||
run: |
|
||||
helm repo add neondatabase https://neondatabase.github.io/helm-charts
|
||||
aws --region ${{ matrix.target_region }} eks update-kubeconfig --name ${{ matrix.target_cluster }}
|
||||
|
||||
- name: Re-deploy scram proxy
|
||||
run: |
|
||||
DOCKER_TAG=${{ inputs.dockerTag }}
|
||||
helm upgrade neon-proxy-scram neondatabase/neon-proxy --namespace neon-proxy --create-namespace --install --atomic -f .github/helm-values/${{ matrix.target_cluster }}.neon-proxy-scram.yaml --set image.tag=${DOCKER_TAG} --set settings.sentryUrl=${{ secrets.SENTRY_URL_PROXY }} --wait --timeout 15m0s
|
||||
|
||||
- name: Re-deploy link proxy
|
||||
if: matrix.deploy_link_proxy
|
||||
run: |
|
||||
DOCKER_TAG=${{ inputs.dockerTag }}
|
||||
helm upgrade neon-proxy-link neondatabase/neon-proxy --namespace neon-proxy --create-namespace --install --atomic -f .github/helm-values/${{ matrix.target_cluster }}.neon-proxy-link.yaml --set image.tag=${DOCKER_TAG} --set settings.sentryUrl=${{ secrets.SENTRY_URL_PROXY }} --wait --timeout 15m0s
|
||||
|
||||
- name: Re-deploy legacy scram proxy
|
||||
if: matrix.deploy_legacy_scram_proxy
|
||||
run: |
|
||||
DOCKER_TAG=${{ inputs.dockerTag }}
|
||||
helm upgrade neon-proxy-scram-legacy neondatabase/neon-proxy --namespace neon-proxy --create-namespace --install --atomic -f .github/helm-values/${{ matrix.target_cluster }}.neon-proxy-scram-legacy.yaml --set image.tag=${DOCKER_TAG} --set settings.sentryUrl=${{ secrets.SENTRY_URL_PROXY }} --wait --timeout 15m0s
|
||||
|
||||
deploy-storage-broker-prod-new:
|
||||
runs-on: prod
|
||||
container: 093970136003.dkr.ecr.eu-central-1.amazonaws.com/ansible:latest
|
||||
if: inputs.deployStorageBroker && inputs.disclamerAcknowledged
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- target_region: us-east-2
|
||||
target_cluster: prod-us-east-2-delta
|
||||
- target_region: us-west-2
|
||||
target_cluster: prod-us-west-2-eta
|
||||
- target_region: eu-central-1
|
||||
target_cluster: prod-eu-central-1-gamma
|
||||
- target_region: ap-southeast-1
|
||||
target_cluster: prod-ap-southeast-1-epsilon
|
||||
environment:
|
||||
name: prod-${{ matrix.target_region }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
submodules: true
|
||||
fetch-depth: 0
|
||||
ref: ${{ inputs.branch }}
|
||||
|
||||
- name: Configure environment
|
||||
run: |
|
||||
helm repo add neondatabase https://neondatabase.github.io/helm-charts
|
||||
aws --region ${{ matrix.target_region }} eks update-kubeconfig --name ${{ matrix.target_cluster }}
|
||||
|
||||
- name: Deploy storage-broker
|
||||
run:
|
||||
helm upgrade neon-storage-broker-lb neondatabase/neon-storage-broker --namespace neon-storage-broker-lb --create-namespace --install --atomic -f .github/helm-values/${{ matrix.target_cluster }}.neon-storage-broker.yaml --set image.tag=${{ inputs.dockerTag }} --set settings.sentryUrl=${{ secrets.SENTRY_URL_BROKER }} --wait --timeout 5m0s
|
||||
46
.github/workflows/neon_extra_builds.yml
vendored
46
.github/workflows/neon_extra_builds.yml
vendored
@@ -4,6 +4,7 @@ on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
|
||||
defaults:
|
||||
run:
|
||||
@@ -20,6 +21,7 @@ env:
|
||||
|
||||
jobs:
|
||||
check-macos-build:
|
||||
if: github.ref_name == 'main' || contains(github.event.pull_request.labels.*.name, 'run-extra-build-macos')
|
||||
timeout-minutes: 90
|
||||
runs-on: macos-latest
|
||||
|
||||
@@ -93,11 +95,16 @@ jobs:
|
||||
run: ./run_clippy.sh
|
||||
|
||||
gather-rust-build-stats:
|
||||
timeout-minutes: 90
|
||||
runs-on: ubuntu-latest
|
||||
if: github.ref_name == 'main' || contains(github.event.pull_request.labels.*.name, 'run-extra-build-stats')
|
||||
runs-on: [ self-hosted, gen3, large ]
|
||||
container:
|
||||
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rust:pinned
|
||||
options: --init
|
||||
|
||||
env:
|
||||
BUILD_TYPE: release
|
||||
# remove the cachepot wrapper and build without crate caches
|
||||
RUSTC_WRAPPER: ""
|
||||
# build with incremental compilation produce partial results
|
||||
# so do not attempt to cache this build, also disable the incremental compilation
|
||||
CARGO_INCREMENTAL: 0
|
||||
@@ -109,11 +116,6 @@ jobs:
|
||||
submodules: true
|
||||
fetch-depth: 1
|
||||
|
||||
- name: Install Ubuntu postgres dependencies
|
||||
run: |
|
||||
sudo apt update
|
||||
sudo apt install build-essential libreadline-dev zlib1g-dev flex bison libseccomp-dev libssl-dev protobuf-compiler
|
||||
|
||||
# Some of our rust modules use FFI and need those to be checked
|
||||
- name: Get postgres headers
|
||||
run: make postgres-headers -j$(nproc)
|
||||
@@ -122,7 +124,31 @@ jobs:
|
||||
run: cargo build --all --release --timings
|
||||
|
||||
- name: Upload the build stats
|
||||
uses: actions/upload-artifact@v3
|
||||
id: upload-stats
|
||||
env:
|
||||
BUCKET: neon-github-public-dev
|
||||
SHA: ${{ github.event.pull_request.head.sha || github.sha }}
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_DEV }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_KEY_DEV }}
|
||||
run: |
|
||||
REPORT_URL=https://${BUCKET}.s3.amazonaws.com/build-stats/${SHA}/${GITHUB_RUN_ID}/cargo-timing.html
|
||||
aws s3 cp --only-show-errors ./target/cargo-timings/cargo-timing.html "s3://${BUCKET}/build-stats/${SHA}/${GITHUB_RUN_ID}/"
|
||||
echo "report-url=${REPORT_URL}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Publish build stats report
|
||||
uses: actions/github-script@v6
|
||||
env:
|
||||
REPORT_URL: ${{ steps.upload-stats.outputs.report-url }}
|
||||
SHA: ${{ github.event.pull_request.head.sha || github.sha }}
|
||||
with:
|
||||
name: neon-${{ runner.os }}-release-build-stats
|
||||
path: ./target/cargo-timings/
|
||||
script: |
|
||||
const { REPORT_URL, SHA } = process.env
|
||||
|
||||
await github.rest.repos.createCommitStatus({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
sha: `${SHA}`,
|
||||
state: 'success',
|
||||
target_url: `${REPORT_URL}`,
|
||||
context: `Build stats (release)`,
|
||||
})
|
||||
|
||||
33
.github/workflows/release.yml
vendored
Normal file
33
.github/workflows/release.yml
vendored
Normal file
@@ -0,0 +1,33 @@
|
||||
name: Create Release Branch
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 10 * * 2'
|
||||
|
||||
jobs:
|
||||
create_release_branch:
|
||||
runs-on: [ubuntu-latest]
|
||||
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
ref: main
|
||||
|
||||
- name: Get current date
|
||||
id: date
|
||||
run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Create release branch
|
||||
run: git checkout -b releases/${{ steps.date.outputs.date }}
|
||||
|
||||
- name: Push new branch
|
||||
run: git push origin releases/${{ steps.date.outputs.date }}
|
||||
|
||||
- name: Create pull request into release
|
||||
uses: thomaseizinger/create-pull-request@e3972219c86a56550fb70708d96800d8e24ba862 # 1.3.0
|
||||
with:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
head: releases/${{ steps.date.outputs.date }}
|
||||
base: release
|
||||
title: Release ${{ steps.date.outputs.date }}
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -18,3 +18,5 @@ test_output/
|
||||
*.o
|
||||
*.so
|
||||
*.Po
|
||||
|
||||
tmp
|
||||
|
||||
758
Cargo.lock
generated
758
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
22
Cargo.toml
22
Cargo.toml
@@ -7,6 +7,7 @@ members = [
|
||||
"safekeeper",
|
||||
"storage_broker",
|
||||
"workspace_hack",
|
||||
"trace",
|
||||
"libs/*",
|
||||
]
|
||||
|
||||
@@ -31,12 +32,15 @@ bstr = "1.0"
|
||||
byteorder = "1.4"
|
||||
bytes = "1.0"
|
||||
chrono = { version = "0.4", default-features = false, features = ["clock"] }
|
||||
clap = "4.0"
|
||||
clap = { version = "4.0", features = ["derive"] }
|
||||
close_fds = "0.3.2"
|
||||
comfy-table = "6.1"
|
||||
const_format = "0.2"
|
||||
crc32c = "0.6"
|
||||
crossbeam-utils = "0.8.5"
|
||||
either = "1.8"
|
||||
enum-map = "2.4.2"
|
||||
enumset = "1.0.12"
|
||||
fail = "0.5.0"
|
||||
fs2 = "0.4.3"
|
||||
futures = "0.3"
|
||||
@@ -44,6 +48,7 @@ futures-core = "0.3"
|
||||
futures-util = "0.3"
|
||||
git-version = "0.3"
|
||||
hashbrown = "0.13"
|
||||
hashlink = "0.8.1"
|
||||
hex = "0.4"
|
||||
hex-literal = "0.3"
|
||||
hmac = "0.12.1"
|
||||
@@ -61,6 +66,9 @@ nix = "0.26"
|
||||
notify = "5.0.0"
|
||||
num-traits = "0.2.15"
|
||||
once_cell = "1.13"
|
||||
opentelemetry = "0.18.0"
|
||||
opentelemetry-otlp = { version = "0.11.0", default_features=false, features = ["http-proto", "trace", "http", "reqwest-client"] }
|
||||
opentelemetry-semantic-conventions = "0.10.0"
|
||||
parking_lot = "0.12"
|
||||
pin-project-lite = "0.2"
|
||||
prometheus = {version = "0.13", default_features=false, features = ["process"]} # removes protobuf dependency
|
||||
@@ -68,6 +76,8 @@ prost = "0.11"
|
||||
rand = "0.8"
|
||||
regex = "1.4"
|
||||
reqwest = { version = "0.11", default-features = false, features = ["rustls-tls"] }
|
||||
reqwest-tracing = { version = "0.4.0", features = ["opentelemetry_0_18"] }
|
||||
reqwest-middleware = "0.2.0"
|
||||
routerify = "3"
|
||||
rpds = "0.12.0"
|
||||
rustls = "0.20"
|
||||
@@ -84,6 +94,7 @@ socket2 = "0.4.4"
|
||||
strum = "0.24"
|
||||
strum_macros = "0.24"
|
||||
svg_fmt = "0.4.1"
|
||||
sync_wrapper = "0.1.2"
|
||||
tar = "0.4"
|
||||
thiserror = "1.0"
|
||||
tls-listener = { version = "0.6", features = ["rustls", "hyper-h1"] }
|
||||
@@ -96,6 +107,7 @@ toml = "0.5"
|
||||
toml_edit = { version = "0.17", features = ["easy"] }
|
||||
tonic = {version = "0.8", features = ["tls", "tls-roots"]}
|
||||
tracing = "0.1"
|
||||
tracing-opentelemetry = "0.18.0"
|
||||
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
||||
url = "2.2"
|
||||
uuid = { version = "1.2", features = ["v4", "serde"] }
|
||||
@@ -114,6 +126,9 @@ postgres-types = { git = "https://github.com/neondatabase/rust-postgres.git", re
|
||||
tokio-postgres = { git = "https://github.com/neondatabase/rust-postgres.git", rev="43e6db254a97fdecbce33d8bc0890accfd74495e" }
|
||||
tokio-tar = { git = "https://github.com/neondatabase/tokio-tar.git", rev="404df61437de0feef49ba2ccdbdd94eb8ad6e142" }
|
||||
|
||||
## Other git libraries
|
||||
heapless = { default-features=false, features=[], git = "https://github.com/japaric/heapless.git", rev = "644653bf3b831c6bb4963be2de24804acf5e5001" } # upstream release pending
|
||||
|
||||
## Local libraries
|
||||
consumption_metrics = { version = "0.1", path = "./libs/consumption_metrics/" }
|
||||
metrics = { version = "0.1", path = "./libs/metrics/" }
|
||||
@@ -123,9 +138,12 @@ postgres_ffi = { version = "0.1", path = "./libs/postgres_ffi/" }
|
||||
pq_proto = { version = "0.1", path = "./libs/pq_proto/" }
|
||||
remote_storage = { version = "0.1", path = "./libs/remote_storage/" }
|
||||
safekeeper_api = { version = "0.1", path = "./libs/safekeeper_api" }
|
||||
safekeeper = { path = "./safekeeper/" }
|
||||
storage_broker = { version = "0.1", path = "./storage_broker/" } # Note: main broker code is inside the binary crate, so linking with the library shouldn't be heavy.
|
||||
tenant_size_model = { version = "0.1", path = "./libs/tenant_size_model/" }
|
||||
tracing-utils = { version = "0.1", path = "./libs/tracing-utils/" }
|
||||
utils = { version = "0.1", path = "./libs/utils/" }
|
||||
walproposer = { version = "0.1", path = "./libs/walproposer/" }
|
||||
|
||||
## Common library dependency
|
||||
workspace_hack = { version = "0.1", path = "./workspace_hack/" }
|
||||
@@ -134,7 +152,7 @@ workspace_hack = { version = "0.1", path = "./workspace_hack/" }
|
||||
criterion = "0.4"
|
||||
rcgen = "0.10"
|
||||
rstest = "0.16"
|
||||
tempfile = "3.2"
|
||||
tempfile = "3.4"
|
||||
tonic-build = "0.8"
|
||||
|
||||
# This is only needed for proxy's tests.
|
||||
|
||||
421
Dockerfile.compute-node
Normal file
421
Dockerfile.compute-node
Normal file
@@ -0,0 +1,421 @@
|
||||
ARG PG_VERSION
|
||||
ARG REPOSITORY=369495373322.dkr.ecr.eu-central-1.amazonaws.com
|
||||
ARG IMAGE=rust
|
||||
ARG TAG=pinned
|
||||
|
||||
#########################################################################################
|
||||
#
|
||||
# Layer "build-deps"
|
||||
#
|
||||
#########################################################################################
|
||||
FROM debian:bullseye-slim AS build-deps
|
||||
RUN apt update && \
|
||||
apt install -y git autoconf automake libtool build-essential bison flex libreadline-dev \
|
||||
zlib1g-dev libxml2-dev libcurl4-openssl-dev libossp-uuid-dev wget pkg-config libssl-dev \
|
||||
libicu-dev libxslt1-dev
|
||||
|
||||
#########################################################################################
|
||||
#
|
||||
# Layer "pg-build"
|
||||
# Build Postgres from the neon postgres repository.
|
||||
#
|
||||
#########################################################################################
|
||||
FROM build-deps AS pg-build
|
||||
ARG PG_VERSION
|
||||
COPY vendor/postgres-${PG_VERSION} postgres
|
||||
RUN cd postgres && \
|
||||
./configure CFLAGS='-O2 -g3' --enable-debug --with-openssl --with-uuid=ossp --with-icu \
|
||||
--with-libxml --with-libxslt && \
|
||||
make MAKELEVEL=0 -j $(getconf _NPROCESSORS_ONLN) -s install && \
|
||||
make MAKELEVEL=0 -j $(getconf _NPROCESSORS_ONLN) -s -C contrib/ install && \
|
||||
# Install headers
|
||||
make MAKELEVEL=0 -j $(getconf _NPROCESSORS_ONLN) -s -C src/include install && \
|
||||
make MAKELEVEL=0 -j $(getconf _NPROCESSORS_ONLN) -s -C src/interfaces/libpq install && \
|
||||
# Enable some of contrib extensions
|
||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/autoinc.control && \
|
||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/bloom.control && \
|
||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/earthdistance.control && \
|
||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/insert_username.control && \
|
||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/intagg.control && \
|
||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/moddatetime.control && \
|
||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/pgrowlocks.control && \
|
||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/pgstattuple.control && \
|
||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/refint.control && \
|
||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/xml2.control
|
||||
|
||||
#########################################################################################
|
||||
#
|
||||
# Layer "postgis-build"
|
||||
# Build PostGIS from the upstream PostGIS mirror.
|
||||
#
|
||||
#########################################################################################
|
||||
FROM build-deps AS postgis-build
|
||||
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||
RUN apt update && \
|
||||
apt install -y cmake gdal-bin libboost-dev libboost-thread-dev libboost-filesystem-dev \
|
||||
libboost-system-dev libboost-iostreams-dev libboost-program-options-dev libboost-timer-dev \
|
||||
libcgal-dev libgdal-dev libgmp-dev libmpfr-dev libopenscenegraph-dev libprotobuf-c-dev \
|
||||
protobuf-c-compiler xsltproc
|
||||
|
||||
# SFCGAL > 1.3 requires CGAL > 5.2, Bullseye's libcgal-dev is 5.2
|
||||
RUN wget https://gitlab.com/Oslandia/SFCGAL/-/archive/v1.3.10/SFCGAL-v1.3.10.tar.gz -O SFCGAL.tar.gz && \
|
||||
mkdir sfcgal-src && cd sfcgal-src && tar xvzf ../SFCGAL.tar.gz --strip-components=1 -C . && \
|
||||
cmake . && make -j $(getconf _NPROCESSORS_ONLN) && \
|
||||
DESTDIR=/sfcgal make install -j $(getconf _NPROCESSORS_ONLN) && \
|
||||
make clean && cp -R /sfcgal/* /
|
||||
|
||||
ENV PATH "/usr/local/pgsql/bin:$PATH"
|
||||
|
||||
RUN wget https://download.osgeo.org/postgis/source/postgis-3.3.2.tar.gz -O postgis.tar.gz && \
|
||||
mkdir postgis-src && cd postgis-src && tar xvzf ../postgis.tar.gz --strip-components=1 -C . && \
|
||||
./autogen.sh && \
|
||||
./configure --with-sfcgal=/usr/local/bin/sfcgal-config && \
|
||||
make -j $(getconf _NPROCESSORS_ONLN) install && \
|
||||
cd extensions/postgis && \
|
||||
make clean && \
|
||||
make -j $(getconf _NPROCESSORS_ONLN) install && \
|
||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/postgis.control && \
|
||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/postgis_raster.control && \
|
||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/postgis_sfcgal.control && \
|
||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/postgis_tiger_geocoder.control && \
|
||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/postgis_topology.control && \
|
||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/address_standardizer.control && \
|
||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/address_standardizer_data_us.control
|
||||
|
||||
RUN wget https://github.com/pgRouting/pgrouting/archive/v3.4.2.tar.gz -O pgrouting.tar.gz && \
|
||||
mkdir pgrouting-src && cd pgrouting-src && tar xvzf ../pgrouting.tar.gz --strip-components=1 -C . && \
|
||||
mkdir build && \
|
||||
cd build && \
|
||||
cmake .. && \
|
||||
make -j $(getconf _NPROCESSORS_ONLN) && \
|
||||
make -j $(getconf _NPROCESSORS_ONLN) install && \
|
||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/pgrouting.control
|
||||
|
||||
#########################################################################################
|
||||
#
|
||||
# Layer "plv8-build"
|
||||
# Build plv8
|
||||
#
|
||||
#########################################################################################
|
||||
FROM build-deps AS plv8-build
|
||||
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||
RUN apt update && \
|
||||
apt install -y ninja-build python3-dev libncurses5 binutils clang
|
||||
|
||||
RUN wget https://github.com/plv8/plv8/archive/refs/tags/v3.1.5.tar.gz -O plv8.tar.gz && \
|
||||
mkdir plv8-src && cd plv8-src && tar xvzf ../plv8.tar.gz --strip-components=1 -C . && \
|
||||
export PATH="/usr/local/pgsql/bin:$PATH" && \
|
||||
make DOCKER=1 -j $(getconf _NPROCESSORS_ONLN) install && \
|
||||
rm -rf /plv8-* && \
|
||||
find /usr/local/pgsql/ -name "plv8-*.so" | xargs strip && \
|
||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/plv8.control && \
|
||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/plcoffee.control && \
|
||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/plls.control
|
||||
|
||||
#########################################################################################
|
||||
#
|
||||
# Layer "h3-pg-build"
|
||||
# Build h3_pg
|
||||
#
|
||||
#########################################################################################
|
||||
FROM build-deps AS h3-pg-build
|
||||
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||
|
||||
# packaged cmake is too old
|
||||
RUN wget https://github.com/Kitware/CMake/releases/download/v3.24.2/cmake-3.24.2-linux-x86_64.sh \
|
||||
-q -O /tmp/cmake-install.sh \
|
||||
&& chmod u+x /tmp/cmake-install.sh \
|
||||
&& /tmp/cmake-install.sh --skip-license --prefix=/usr/local/ \
|
||||
&& rm /tmp/cmake-install.sh
|
||||
|
||||
RUN wget https://github.com/uber/h3/archive/refs/tags/v4.1.0.tar.gz -O h3.tar.gz && \
|
||||
mkdir h3-src && cd h3-src && tar xvzf ../h3.tar.gz --strip-components=1 -C . && \
|
||||
mkdir build && cd build && \
|
||||
cmake .. -DCMAKE_BUILD_TYPE=Release && \
|
||||
make -j $(getconf _NPROCESSORS_ONLN) && \
|
||||
DESTDIR=/h3 make install && \
|
||||
cp -R /h3/usr / && \
|
||||
rm -rf build
|
||||
|
||||
RUN wget https://github.com/zachasme/h3-pg/archive/refs/tags/v4.1.2.tar.gz -O h3-pg.tar.gz && \
|
||||
mkdir h3-pg-src && cd h3-pg-src && tar xvzf ../h3-pg.tar.gz --strip-components=1 -C . && \
|
||||
export PATH="/usr/local/pgsql/bin:$PATH" && \
|
||||
make -j $(getconf _NPROCESSORS_ONLN) && \
|
||||
make -j $(getconf _NPROCESSORS_ONLN) install && \
|
||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/h3.control && \
|
||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/h3_postgis.control
|
||||
|
||||
#########################################################################################
|
||||
#
|
||||
# Layer "unit-pg-build"
|
||||
# compile unit extension
|
||||
#
|
||||
#########################################################################################
|
||||
FROM build-deps AS unit-pg-build
|
||||
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||
|
||||
RUN wget https://github.com/df7cb/postgresql-unit/archive/refs/tags/7.7.tar.gz -O postgresql-unit.tar.gz && \
|
||||
mkdir postgresql-unit-src && cd postgresql-unit-src && tar xvzf ../postgresql-unit.tar.gz --strip-components=1 -C . && \
|
||||
make -j $(getconf _NPROCESSORS_ONLN) PG_CONFIG=/usr/local/pgsql/bin/pg_config && \
|
||||
make -j $(getconf _NPROCESSORS_ONLN) install PG_CONFIG=/usr/local/pgsql/bin/pg_config && \
|
||||
# unit extension's "create extension" script relies on absolute install path to fill some reference tables.
|
||||
# We move the extension from '/usr/local/pgsql/' to '/usr/local/' after it is build. So we need to adjust the path.
|
||||
# This one-liner removes pgsql/ part of the path.
|
||||
# NOTE: Other extensions that rely on MODULEDIR variable after building phase will need the same fix.
|
||||
find /usr/local/pgsql/share/extension/ -name "unit*.sql" -print0 | xargs -0 sed -i "s|pgsql/||g" && \
|
||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/unit.control
|
||||
|
||||
#########################################################################################
|
||||
#
|
||||
# Layer "vector-pg-build"
|
||||
# compile pgvector extension
|
||||
#
|
||||
#########################################################################################
|
||||
FROM build-deps AS vector-pg-build
|
||||
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||
|
||||
RUN wget https://github.com/pgvector/pgvector/archive/refs/tags/v0.4.0.tar.gz -O pgvector.tar.gz && \
|
||||
mkdir pgvector-src && cd pgvector-src && tar xvzf ../pgvector.tar.gz --strip-components=1 -C . && \
|
||||
make -j $(getconf _NPROCESSORS_ONLN) PG_CONFIG=/usr/local/pgsql/bin/pg_config && \
|
||||
make -j $(getconf _NPROCESSORS_ONLN) install PG_CONFIG=/usr/local/pgsql/bin/pg_config && \
|
||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/vector.control
|
||||
|
||||
#########################################################################################
|
||||
#
|
||||
# Layer "pgjwt-pg-build"
|
||||
# compile pgjwt extension
|
||||
#
|
||||
#########################################################################################
|
||||
FROM build-deps AS pgjwt-pg-build
|
||||
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||
|
||||
# 9742dab1b2f297ad3811120db7b21451bca2d3c9 made on 13/11/2021
|
||||
RUN wget https://github.com/michelp/pgjwt/archive/9742dab1b2f297ad3811120db7b21451bca2d3c9.tar.gz -O pgjwt.tar.gz && \
|
||||
mkdir pgjwt-src && cd pgjwt-src && tar xvzf ../pgjwt.tar.gz --strip-components=1 -C . && \
|
||||
make -j $(getconf _NPROCESSORS_ONLN) install PG_CONFIG=/usr/local/pgsql/bin/pg_config && \
|
||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/pgjwt.control
|
||||
|
||||
#########################################################################################
|
||||
#
|
||||
# Layer "hypopg-pg-build"
|
||||
# compile hypopg extension
|
||||
#
|
||||
#########################################################################################
|
||||
FROM build-deps AS hypopg-pg-build
|
||||
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||
|
||||
RUN wget https://github.com/HypoPG/hypopg/archive/refs/tags/1.3.1.tar.gz -O hypopg.tar.gz && \
|
||||
mkdir hypopg-src && cd hypopg-src && tar xvzf ../hypopg.tar.gz --strip-components=1 -C . && \
|
||||
make -j $(getconf _NPROCESSORS_ONLN) PG_CONFIG=/usr/local/pgsql/bin/pg_config && \
|
||||
make -j $(getconf _NPROCESSORS_ONLN) install PG_CONFIG=/usr/local/pgsql/bin/pg_config && \
|
||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/hypopg.control
|
||||
|
||||
#########################################################################################
|
||||
#
|
||||
# Layer "pg-hashids-pg-build"
|
||||
# compile pg_hashids extension
|
||||
#
|
||||
#########################################################################################
|
||||
FROM build-deps AS pg-hashids-pg-build
|
||||
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||
|
||||
RUN wget https://github.com/iCyberon/pg_hashids/archive/refs/tags/v1.2.1.tar.gz -O pg_hashids.tar.gz && \
|
||||
mkdir pg_hashids-src && cd pg_hashids-src && tar xvzf ../pg_hashids.tar.gz --strip-components=1 -C . && \
|
||||
make -j $(getconf _NPROCESSORS_ONLN) PG_CONFIG=/usr/local/pgsql/bin/pg_config USE_PGXS=1 && \
|
||||
make -j $(getconf _NPROCESSORS_ONLN) install PG_CONFIG=/usr/local/pgsql/bin/pg_config USE_PGXS=1 && \
|
||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/pg_hashids.control
|
||||
|
||||
#########################################################################################
|
||||
#
|
||||
# Layer "rum-pg-build"
|
||||
# compile rum extension
|
||||
#
|
||||
#########################################################################################
|
||||
FROM build-deps AS rum-pg-build
|
||||
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||
|
||||
RUN wget https://github.com/postgrespro/rum/archive/refs/tags/1.3.13.tar.gz -O rum.tar.gz && \
|
||||
mkdir rum-src && cd rum-src && tar xvzf ../rum.tar.gz --strip-components=1 -C . && \
|
||||
make -j $(getconf _NPROCESSORS_ONLN) PG_CONFIG=/usr/local/pgsql/bin/pg_config USE_PGXS=1 && \
|
||||
make -j $(getconf _NPROCESSORS_ONLN) install PG_CONFIG=/usr/local/pgsql/bin/pg_config USE_PGXS=1 && \
|
||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/rum.control
|
||||
|
||||
#########################################################################################
|
||||
#
|
||||
# Layer "pgtap-pg-build"
|
||||
# compile pgTAP extension
|
||||
#
|
||||
#########################################################################################
|
||||
FROM build-deps AS pgtap-pg-build
|
||||
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||
|
||||
RUN wget https://github.com/theory/pgtap/archive/refs/tags/v1.2.0.tar.gz -O pgtap.tar.gz && \
|
||||
mkdir pgtap-src && cd pgtap-src && tar xvzf ../pgtap.tar.gz --strip-components=1 -C . && \
|
||||
make -j $(getconf _NPROCESSORS_ONLN) PG_CONFIG=/usr/local/pgsql/bin/pg_config && \
|
||||
make -j $(getconf _NPROCESSORS_ONLN) install PG_CONFIG=/usr/local/pgsql/bin/pg_config && \
|
||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/pgtap.control
|
||||
|
||||
#########################################################################################
|
||||
#
|
||||
# Layer "rust extensions"
|
||||
# This layer is used to build `pgx` deps
|
||||
#
|
||||
#########################################################################################
|
||||
FROM build-deps AS rust-extensions-build
|
||||
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y curl libclang-dev cmake && \
|
||||
useradd -ms /bin/bash nonroot -b /home
|
||||
|
||||
ENV HOME=/home/nonroot
|
||||
ENV PATH="/home/nonroot/.cargo/bin:/usr/local/pgsql/bin/:$PATH"
|
||||
USER nonroot
|
||||
WORKDIR /home/nonroot
|
||||
ARG PG_VERSION
|
||||
|
||||
RUN curl -sSO https://static.rust-lang.org/rustup/dist/$(uname -m)-unknown-linux-gnu/rustup-init && \
|
||||
chmod +x rustup-init && \
|
||||
./rustup-init -y --no-modify-path --profile minimal --default-toolchain stable && \
|
||||
rm rustup-init && \
|
||||
cargo install --git https://github.com/vadim2404/pgx --branch neon_abi_v0.6.1 --locked cargo-pgx && \
|
||||
/bin/bash -c 'cargo pgx init --pg${PG_VERSION:1}=/usr/local/pgsql/bin/pg_config'
|
||||
|
||||
USER root
|
||||
|
||||
#########################################################################################
|
||||
#
|
||||
# Layer "pg-jsonschema-pg-build"
|
||||
# Compile "pg_jsonschema" extension
|
||||
#
|
||||
#########################################################################################
|
||||
|
||||
FROM rust-extensions-build AS pg-jsonschema-pg-build
|
||||
|
||||
RUN git clone --depth=1 --single-branch --branch neon_abi_v0.1.4 https://github.com/vadim2404/pg_jsonschema/ && \
|
||||
cd pg_jsonschema && \
|
||||
cargo pgx install --release && \
|
||||
# it's needed to enable extension because it uses untrusted C language
|
||||
sed -i 's/superuser = false/superuser = true/g' /usr/local/pgsql/share/extension/pg_jsonschema.control && \
|
||||
echo "trusted = true" >> /usr/local/pgsql/share/extension/pg_jsonschema.control
|
||||
|
||||
#########################################################################################
|
||||
#
|
||||
# Layer "pg-graphql-pg-build"
|
||||
# Compile "pg_graphql" extension
|
||||
#
|
||||
#########################################################################################
|
||||
|
||||
FROM rust-extensions-build AS pg-graphql-pg-build
|
||||
|
||||
RUN git clone --depth=1 --single-branch --branch neon_abi_v1.1.0 https://github.com/vadim2404/pg_graphql && \
|
||||
cd pg_graphql && \
|
||||
cargo pgx install --release && \
|
||||
# it's needed to enable extension because it uses untrusted C language
|
||||
sed -i 's/superuser = false/superuser = true/g' /usr/local/pgsql/share/extension/pg_graphql.control && \
|
||||
echo "trusted = true" >> /usr/local/pgsql/share/extension/pg_graphql.control
|
||||
|
||||
#########################################################################################
|
||||
#
|
||||
# Layer "neon-pg-ext-build"
|
||||
# compile neon extensions
|
||||
#
|
||||
#########################################################################################
|
||||
FROM build-deps AS neon-pg-ext-build
|
||||
COPY --from=postgis-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||
COPY --from=postgis-build /sfcgal/* /
|
||||
COPY --from=plv8-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||
COPY --from=h3-pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||
COPY --from=h3-pg-build /h3/usr /
|
||||
COPY --from=unit-pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||
COPY --from=vector-pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||
COPY --from=pgjwt-pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||
COPY --from=pg-jsonschema-pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||
COPY --from=pg-graphql-pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||
COPY --from=hypopg-pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||
COPY --from=pg-hashids-pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||
COPY --from=rum-pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||
COPY --from=pgtap-pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||
COPY pgxn/ pgxn/
|
||||
|
||||
RUN make -j $(getconf _NPROCESSORS_ONLN) \
|
||||
PG_CONFIG=/usr/local/pgsql/bin/pg_config \
|
||||
-C pgxn/neon \
|
||||
-s install
|
||||
|
||||
#########################################################################################
|
||||
#
|
||||
# Compile and run the Neon-specific `compute_ctl` binary
|
||||
#
|
||||
#########################################################################################
|
||||
FROM $REPOSITORY/$IMAGE:$TAG AS compute-tools
|
||||
USER nonroot
|
||||
# Copy entire project to get Cargo.* files with proper dependencies for the whole project
|
||||
COPY --chown=nonroot . .
|
||||
RUN cd compute_tools && cargo build --locked --profile release-line-debug-size-lto
|
||||
|
||||
#########################################################################################
|
||||
#
|
||||
# Clean up postgres folder before inclusion
|
||||
#
|
||||
#########################################################################################
|
||||
FROM neon-pg-ext-build AS postgres-cleanup-layer
|
||||
COPY --from=neon-pg-ext-build /usr/local/pgsql /usr/local/pgsql
|
||||
|
||||
# Remove binaries from /bin/ that we won't use (or would manually copy & install otherwise)
|
||||
RUN cd /usr/local/pgsql/bin && rm ecpg raster2pgsql shp2pgsql pgtopo_export pgtopo_import pgsql2shp
|
||||
|
||||
# Remove headers that we won't need anymore - we've completed installation of all extensions
|
||||
RUN rm -r /usr/local/pgsql/include
|
||||
|
||||
# Remove static postgresql libraries - all compilation is finished, so we
|
||||
# can now remove these files - they must be included in other binaries by now
|
||||
# if they were to be used by other libraries.
|
||||
RUN rm /usr/local/pgsql/lib/lib*.a
|
||||
|
||||
#########################################################################################
|
||||
#
|
||||
# Final layer
|
||||
# Put it all together into the final image
|
||||
#
|
||||
#########################################################################################
|
||||
FROM debian:bullseye-slim
|
||||
# Add user postgres
|
||||
RUN mkdir /var/db && useradd -m -d /var/db/postgres postgres && \
|
||||
echo "postgres:test_console_pass" | chpasswd && \
|
||||
mkdir /var/db/postgres/compute && mkdir /var/db/postgres/specs && \
|
||||
chown -R postgres:postgres /var/db/postgres && \
|
||||
chmod 0750 /var/db/postgres/compute && \
|
||||
echo '/usr/local/lib' >> /etc/ld.so.conf && /sbin/ldconfig && \
|
||||
# create folder for file cache
|
||||
mkdir -p -m 777 /neon/cache
|
||||
|
||||
COPY --from=postgres-cleanup-layer --chown=postgres /usr/local/pgsql /usr/local
|
||||
COPY --from=compute-tools --chown=postgres /home/nonroot/target/release-line-debug-size-lto/compute_ctl /usr/local/bin/compute_ctl
|
||||
|
||||
# Install:
|
||||
# libreadline8 for psql
|
||||
# libicu67, locales for collations (including ICU)
|
||||
# libossp-uuid16 for extension ossp-uuid
|
||||
# libgeos, libgdal, libsfcgal1, libproj and libprotobuf-c1 for PostGIS
|
||||
# libxml2, libxslt1.1 for xml2
|
||||
RUN apt update && \
|
||||
apt install --no-install-recommends -y \
|
||||
locales \
|
||||
libicu67 \
|
||||
libreadline8 \
|
||||
libossp-uuid16 \
|
||||
libgeos-c1v5 \
|
||||
libgdal28 \
|
||||
libproj19 \
|
||||
libprotobuf-c1 \
|
||||
libsfcgal1 \
|
||||
libxml2 \
|
||||
libxslt1.1 \
|
||||
gdb && \
|
||||
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* && \
|
||||
localedef -i en_US -c -f UTF-8 -A /usr/share/locale/locale.alias en_US.UTF-8
|
||||
|
||||
ENV LANG en_US.utf8
|
||||
USER postgres
|
||||
ENTRYPOINT ["/usr/local/bin/compute_ctl"]
|
||||
@@ -1,220 +0,0 @@
|
||||
#
|
||||
# This file is identical to the Dockerfile.compute-node-v15 file
|
||||
# except for the version of Postgres that is built.
|
||||
#
|
||||
|
||||
ARG TAG=pinned
|
||||
|
||||
#########################################################################################
|
||||
#
|
||||
# Layer "build-deps"
|
||||
#
|
||||
#########################################################################################
|
||||
FROM debian:bullseye-slim AS build-deps
|
||||
RUN apt update && \
|
||||
apt install -y git autoconf automake libtool build-essential bison flex libreadline-dev \
|
||||
zlib1g-dev libxml2-dev libcurl4-openssl-dev libossp-uuid-dev wget pkg-config libssl-dev
|
||||
|
||||
#########################################################################################
|
||||
#
|
||||
# Layer "pg-build"
|
||||
# Build Postgres from the neon postgres repository.
|
||||
#
|
||||
#########################################################################################
|
||||
FROM build-deps AS pg-build
|
||||
COPY vendor/postgres-v14 postgres
|
||||
RUN cd postgres && \
|
||||
./configure CFLAGS='-O2 -g3' --enable-debug --with-openssl --with-uuid=ossp && \
|
||||
make MAKELEVEL=0 -j $(getconf _NPROCESSORS_ONLN) -s install && \
|
||||
make MAKELEVEL=0 -j $(getconf _NPROCESSORS_ONLN) -s -C contrib/ install && \
|
||||
# Install headers
|
||||
make MAKELEVEL=0 -j $(getconf _NPROCESSORS_ONLN) -s -C src/include install && \
|
||||
make MAKELEVEL=0 -j $(getconf _NPROCESSORS_ONLN) -s -C src/interfaces/libpq install && \
|
||||
# Enable some of contrib extensions
|
||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/bloom.control && \
|
||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/pgrowlocks.control && \
|
||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/intagg.control && \
|
||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/pgstattuple.control && \
|
||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/earthdistance.control
|
||||
|
||||
#########################################################################################
|
||||
#
|
||||
# Layer "postgis-build"
|
||||
# Build PostGIS from the upstream PostGIS mirror.
|
||||
#
|
||||
#########################################################################################
|
||||
FROM build-deps AS postgis-build
|
||||
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||
RUN apt update && \
|
||||
apt install -y gdal-bin libgdal-dev libprotobuf-c-dev protobuf-c-compiler xsltproc
|
||||
|
||||
RUN wget https://download.osgeo.org/postgis/source/postgis-3.3.1.tar.gz && \
|
||||
tar xvzf postgis-3.3.1.tar.gz && \
|
||||
cd postgis-3.3.1 && \
|
||||
./autogen.sh && \
|
||||
export PATH="/usr/local/pgsql/bin:$PATH" && \
|
||||
./configure && \
|
||||
make -j $(getconf _NPROCESSORS_ONLN) install && \
|
||||
cd extensions/postgis && \
|
||||
make clean && \
|
||||
make -j $(getconf _NPROCESSORS_ONLN) install && \
|
||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/postgis.control && \
|
||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/postgis_raster.control && \
|
||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/postgis_tiger_geocoder.control && \
|
||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/postgis_topology.control && \
|
||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/address_standardizer.control && \
|
||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/address_standardizer_data_us.control
|
||||
|
||||
#########################################################################################
|
||||
#
|
||||
# Layer "plv8-build"
|
||||
# Build plv8
|
||||
#
|
||||
#########################################################################################
|
||||
FROM build-deps AS plv8-build
|
||||
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||
RUN apt update && \
|
||||
apt install -y ninja-build python3-dev libc++-dev libc++abi-dev libncurses5 binutils
|
||||
|
||||
# https://github.com/plv8/plv8/issues/475:
|
||||
# v8 uses gold for linking and sets `--thread-count=4` which breaks
|
||||
# gold version <= 1.35 (https://sourceware.org/bugzilla/show_bug.cgi?id=23607)
|
||||
# Install newer gold version manually as debian-testing binutils version updates
|
||||
# libc version, which in turn breaks other extension built against non-testing libc.
|
||||
RUN wget https://ftp.gnu.org/gnu/binutils/binutils-2.38.tar.gz && \
|
||||
tar xvzf binutils-2.38.tar.gz && \
|
||||
cd binutils-2.38 && \
|
||||
cd libiberty && ./configure && make -j $(getconf _NPROCESSORS_ONLN) && \
|
||||
cd ../bfd && ./configure && make bfdver.h && \
|
||||
cd ../gold && ./configure && make -j $(getconf _NPROCESSORS_ONLN) && make install && \
|
||||
cp /usr/local/bin/ld.gold /usr/bin/gold
|
||||
|
||||
# Sed is used to patch for https://github.com/plv8/plv8/issues/503
|
||||
RUN wget https://github.com/plv8/plv8/archive/refs/tags/v3.1.4.tar.gz && \
|
||||
tar xvzf v3.1.4.tar.gz && \
|
||||
cd plv8-3.1.4 && \
|
||||
export PATH="/usr/local/pgsql/bin:$PATH" && \
|
||||
sed -i 's/MemoryContextAlloc(/MemoryContextAllocZero(/' plv8.cc && \
|
||||
make DOCKER=1 -j $(getconf _NPROCESSORS_ONLN) install && \
|
||||
rm -rf /plv8-* && \
|
||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/plv8.control
|
||||
|
||||
#########################################################################################
|
||||
#
|
||||
# Layer "h3-pg-build"
|
||||
# Build h3_pg
|
||||
#
|
||||
#########################################################################################
|
||||
FROM build-deps AS h3-pg-build
|
||||
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||
|
||||
# packaged cmake is too old
|
||||
RUN wget https://github.com/Kitware/CMake/releases/download/v3.24.2/cmake-3.24.2-linux-x86_64.sh \
|
||||
-q -O /tmp/cmake-install.sh \
|
||||
&& chmod u+x /tmp/cmake-install.sh \
|
||||
&& /tmp/cmake-install.sh --skip-license --prefix=/usr/local/ \
|
||||
&& rm /tmp/cmake-install.sh
|
||||
|
||||
RUN wget https://github.com/uber/h3/archive/refs/tags/v4.0.1.tar.gz -O h3.tgz && \
|
||||
tar xvzf h3.tgz && \
|
||||
cd h3-4.0.1 && \
|
||||
mkdir build && \
|
||||
cd build && \
|
||||
cmake .. -DCMAKE_BUILD_TYPE=Release && \
|
||||
make -j $(getconf _NPROCESSORS_ONLN) && \
|
||||
DESTDIR=/h3 make install && \
|
||||
cp -R /h3/usr / && \
|
||||
rm -rf build
|
||||
|
||||
RUN wget https://github.com/zachasme/h3-pg/archive/refs/tags/v4.0.1.tar.gz -O h3-pg.tgz && \
|
||||
tar xvzf h3-pg.tgz && \
|
||||
cd h3-pg-4.0.1 && \
|
||||
export PATH="/usr/local/pgsql/bin:$PATH" && \
|
||||
make -j $(getconf _NPROCESSORS_ONLN) && \
|
||||
make -j $(getconf _NPROCESSORS_ONLN) install && \
|
||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/h3.control && \
|
||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/h3_postgis.control
|
||||
|
||||
#########################################################################################
|
||||
#
|
||||
# Layer "neon-pg-ext-build"
|
||||
# compile neon extensions
|
||||
#
|
||||
#########################################################################################
|
||||
FROM build-deps AS neon-pg-ext-build
|
||||
COPY --from=postgis-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||
COPY --from=plv8-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||
COPY --from=h3-pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||
COPY --from=h3-pg-build /h3/usr /
|
||||
COPY pgxn/ pgxn/
|
||||
|
||||
RUN make -j $(getconf _NPROCESSORS_ONLN) \
|
||||
PG_CONFIG=/usr/local/pgsql/bin/pg_config \
|
||||
-C pgxn/neon \
|
||||
-s install
|
||||
|
||||
#########################################################################################
|
||||
#
|
||||
# Compile and run the Neon-specific `compute_ctl` binary
|
||||
#
|
||||
#########################################################################################
|
||||
FROM 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rust:$TAG AS compute-tools
|
||||
USER nonroot
|
||||
# Copy entire project to get Cargo.* files with proper dependencies for the whole project
|
||||
COPY --chown=nonroot . .
|
||||
RUN cd compute_tools && cargo build --locked --profile release-line-debug-size-lto
|
||||
|
||||
#########################################################################################
|
||||
#
|
||||
# Clean up postgres folder before inclusion
|
||||
#
|
||||
#########################################################################################
|
||||
FROM neon-pg-ext-build AS postgres-cleanup-layer
|
||||
COPY --from=neon-pg-ext-build /usr/local/pgsql /usr/local/pgsql
|
||||
|
||||
# Remove binaries from /bin/ that we won't use (or would manually copy & install otherwise)
|
||||
RUN cd /usr/local/pgsql/bin && rm ecpg raster2pgsql shp2pgsql pgtopo_export pgtopo_import pgsql2shp
|
||||
|
||||
# Remove headers that we won't need anymore - we've completed installation of all extensions
|
||||
RUN rm -r /usr/local/pgsql/include
|
||||
|
||||
# Remove static postgresql libraries - all compilation is finished, so we
|
||||
# can now remove these files - they must be included in other binaries by now
|
||||
# if they were to be used by other libraries.
|
||||
RUN rm /usr/local/pgsql/lib/lib*.a
|
||||
|
||||
#########################################################################################
|
||||
#
|
||||
# Final layer
|
||||
# Put it all together into the final image
|
||||
#
|
||||
#########################################################################################
|
||||
FROM debian:bullseye-slim
|
||||
# Add user postgres
|
||||
RUN mkdir /var/db && useradd -m -d /var/db/postgres postgres && \
|
||||
echo "postgres:test_console_pass" | chpasswd && \
|
||||
mkdir /var/db/postgres/compute && mkdir /var/db/postgres/specs && \
|
||||
chown -R postgres:postgres /var/db/postgres && \
|
||||
chmod 0750 /var/db/postgres/compute && \
|
||||
echo '/usr/local/lib' >> /etc/ld.so.conf && /sbin/ldconfig
|
||||
|
||||
COPY --from=postgres-cleanup-layer --chown=postgres /usr/local/pgsql /usr/local
|
||||
COPY --from=compute-tools --chown=postgres /home/nonroot/target/release-line-debug-size-lto/compute_ctl /usr/local/bin/compute_ctl
|
||||
|
||||
# Install:
|
||||
# libreadline8 for psql
|
||||
# libossp-uuid16 for extension ossp-uuid
|
||||
# libgeos, libgdal, libproj and libprotobuf-c1 for PostGIS
|
||||
RUN apt update && \
|
||||
apt install --no-install-recommends -y \
|
||||
libreadline8 \
|
||||
libossp-uuid16 \
|
||||
libgeos-c1v5 \
|
||||
libgdal28 \
|
||||
libproj19 \
|
||||
libprotobuf-c1 \
|
||||
gdb && \
|
||||
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
|
||||
|
||||
USER postgres
|
||||
ENTRYPOINT ["/usr/local/bin/compute_ctl"]
|
||||
@@ -1,220 +0,0 @@
|
||||
#
|
||||
# This file is identical to the Dockerfile.compute-node-v14 file
|
||||
# except for the version of Postgres that is built.
|
||||
#
|
||||
|
||||
ARG TAG=pinned
|
||||
|
||||
#########################################################################################
|
||||
#
|
||||
# Layer "build-deps"
|
||||
#
|
||||
#########################################################################################
|
||||
FROM debian:bullseye-slim AS build-deps
|
||||
RUN apt update && \
|
||||
apt install -y git autoconf automake libtool build-essential bison flex libreadline-dev \
|
||||
zlib1g-dev libxml2-dev libcurl4-openssl-dev libossp-uuid-dev wget pkg-config libssl-dev
|
||||
|
||||
#########################################################################################
|
||||
#
|
||||
# Layer "pg-build"
|
||||
# Build Postgres from the neon postgres repository.
|
||||
#
|
||||
#########################################################################################
|
||||
FROM build-deps AS pg-build
|
||||
COPY vendor/postgres-v15 postgres
|
||||
RUN cd postgres && \
|
||||
./configure CFLAGS='-O2 -g3' --enable-debug --with-openssl --with-uuid=ossp && \
|
||||
make MAKELEVEL=0 -j $(getconf _NPROCESSORS_ONLN) -s install && \
|
||||
make MAKELEVEL=0 -j $(getconf _NPROCESSORS_ONLN) -s -C contrib/ install && \
|
||||
# Install headers
|
||||
make MAKELEVEL=0 -j $(getconf _NPROCESSORS_ONLN) -s -C src/include install && \
|
||||
make MAKELEVEL=0 -j $(getconf _NPROCESSORS_ONLN) -s -C src/interfaces/libpq install && \
|
||||
# Enable some of contrib extensions
|
||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/bloom.control && \
|
||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/pgrowlocks.control && \
|
||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/intagg.control && \
|
||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/pgstattuple.control && \
|
||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/earthdistance.control
|
||||
|
||||
#########################################################################################
|
||||
#
|
||||
# Layer "postgis-build"
|
||||
# Build PostGIS from the upstream PostGIS mirror.
|
||||
#
|
||||
#########################################################################################
|
||||
FROM build-deps AS postgis-build
|
||||
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||
RUN apt update && \
|
||||
apt install -y gdal-bin libgdal-dev libprotobuf-c-dev protobuf-c-compiler xsltproc
|
||||
|
||||
RUN wget https://download.osgeo.org/postgis/source/postgis-3.3.1.tar.gz && \
|
||||
tar xvzf postgis-3.3.1.tar.gz && \
|
||||
cd postgis-3.3.1 && \
|
||||
./autogen.sh && \
|
||||
export PATH="/usr/local/pgsql/bin:$PATH" && \
|
||||
./configure && \
|
||||
make -j $(getconf _NPROCESSORS_ONLN) install && \
|
||||
cd extensions/postgis && \
|
||||
make clean && \
|
||||
make -j $(getconf _NPROCESSORS_ONLN) install && \
|
||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/postgis.control && \
|
||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/postgis_raster.control && \
|
||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/postgis_tiger_geocoder.control && \
|
||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/postgis_topology.control && \
|
||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/address_standardizer.control && \
|
||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/address_standardizer_data_us.control
|
||||
|
||||
#########################################################################################
|
||||
#
|
||||
# Layer "plv8-build"
|
||||
# Build plv8
|
||||
#
|
||||
#########################################################################################
|
||||
FROM build-deps AS plv8-build
|
||||
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||
RUN apt update && \
|
||||
apt install -y ninja-build python3-dev libc++-dev libc++abi-dev libncurses5 binutils
|
||||
|
||||
# https://github.com/plv8/plv8/issues/475:
|
||||
# v8 uses gold for linking and sets `--thread-count=4` which breaks
|
||||
# gold version <= 1.35 (https://sourceware.org/bugzilla/show_bug.cgi?id=23607)
|
||||
# Install newer gold version manually as debian-testing binutils version updates
|
||||
# libc version, which in turn breaks other extension built against non-testing libc.
|
||||
RUN wget https://ftp.gnu.org/gnu/binutils/binutils-2.38.tar.gz && \
|
||||
tar xvzf binutils-2.38.tar.gz && \
|
||||
cd binutils-2.38 && \
|
||||
cd libiberty && ./configure && make -j $(getconf _NPROCESSORS_ONLN) && \
|
||||
cd ../bfd && ./configure && make bfdver.h && \
|
||||
cd ../gold && ./configure && make -j $(getconf _NPROCESSORS_ONLN) && make install && \
|
||||
cp /usr/local/bin/ld.gold /usr/bin/gold
|
||||
|
||||
# Sed is used to patch for https://github.com/plv8/plv8/issues/503
|
||||
RUN wget https://github.com/plv8/plv8/archive/refs/tags/v3.1.4.tar.gz && \
|
||||
tar xvzf v3.1.4.tar.gz && \
|
||||
cd plv8-3.1.4 && \
|
||||
export PATH="/usr/local/pgsql/bin:$PATH" && \
|
||||
sed -i 's/MemoryContextAlloc(/MemoryContextAllocZero(/' plv8.cc && \
|
||||
make DOCKER=1 -j $(getconf _NPROCESSORS_ONLN) install && \
|
||||
rm -rf /plv8-* && \
|
||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/plv8.control
|
||||
|
||||
#########################################################################################
|
||||
#
|
||||
# Layer "h3-pg-build"
|
||||
# Build h3_pg
|
||||
#
|
||||
#########################################################################################
|
||||
FROM build-deps AS h3-pg-build
|
||||
COPY --from=pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||
|
||||
# packaged cmake is too old
|
||||
RUN wget https://github.com/Kitware/CMake/releases/download/v3.24.2/cmake-3.24.2-linux-x86_64.sh \
|
||||
-q -O /tmp/cmake-install.sh \
|
||||
&& chmod u+x /tmp/cmake-install.sh \
|
||||
&& /tmp/cmake-install.sh --skip-license --prefix=/usr/local/ \
|
||||
&& rm /tmp/cmake-install.sh
|
||||
|
||||
RUN wget https://github.com/uber/h3/archive/refs/tags/v4.0.1.tar.gz -O h3.tgz && \
|
||||
tar xvzf h3.tgz && \
|
||||
cd h3-4.0.1 && \
|
||||
mkdir build && \
|
||||
cd build && \
|
||||
cmake .. -DCMAKE_BUILD_TYPE=Release && \
|
||||
make -j $(getconf _NPROCESSORS_ONLN) && \
|
||||
DESTDIR=/h3 make install && \
|
||||
cp -R /h3/usr / && \
|
||||
rm -rf build
|
||||
|
||||
RUN wget https://github.com/zachasme/h3-pg/archive/refs/tags/v4.0.1.tar.gz -O h3-pg.tgz && \
|
||||
tar xvzf h3-pg.tgz && \
|
||||
cd h3-pg-4.0.1 && \
|
||||
export PATH="/usr/local/pgsql/bin:$PATH" && \
|
||||
make -j $(getconf _NPROCESSORS_ONLN) && \
|
||||
make -j $(getconf _NPROCESSORS_ONLN) install && \
|
||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/h3.control && \
|
||||
echo 'trusted = true' >> /usr/local/pgsql/share/extension/h3_postgis.control
|
||||
|
||||
#########################################################################################
|
||||
#
|
||||
# Layer "neon-pg-ext-build"
|
||||
# compile neon extensions
|
||||
#
|
||||
#########################################################################################
|
||||
FROM build-deps AS neon-pg-ext-build
|
||||
COPY --from=postgis-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||
COPY --from=plv8-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||
COPY --from=h3-pg-build /usr/local/pgsql/ /usr/local/pgsql/
|
||||
COPY --from=h3-pg-build /h3/usr /
|
||||
COPY pgxn/ pgxn/
|
||||
|
||||
RUN make -j $(getconf _NPROCESSORS_ONLN) \
|
||||
PG_CONFIG=/usr/local/pgsql/bin/pg_config \
|
||||
-C pgxn/neon \
|
||||
-s install
|
||||
|
||||
#########################################################################################
|
||||
#
|
||||
# Compile and run the Neon-specific `compute_ctl` binary
|
||||
#
|
||||
#########################################################################################
|
||||
FROM 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rust:$TAG AS compute-tools
|
||||
USER nonroot
|
||||
# Copy entire project to get Cargo.* files with proper dependencies for the whole project
|
||||
COPY --chown=nonroot . .
|
||||
RUN cd compute_tools && cargo build --locked --profile release-line-debug-size-lto
|
||||
|
||||
#########################################################################################
|
||||
#
|
||||
# Clean up postgres folder before inclusion
|
||||
#
|
||||
#########################################################################################
|
||||
FROM neon-pg-ext-build AS postgres-cleanup-layer
|
||||
COPY --from=neon-pg-ext-build /usr/local/pgsql /usr/local/pgsql
|
||||
|
||||
# Remove binaries from /bin/ that we won't use (or would manually copy & install otherwise)
|
||||
RUN cd /usr/local/pgsql/bin && rm ecpg raster2pgsql shp2pgsql pgtopo_export pgtopo_import pgsql2shp
|
||||
|
||||
# Remove headers that we won't need anymore - we've completed installation of all extensions
|
||||
RUN rm -r /usr/local/pgsql/include
|
||||
|
||||
# Remove static postgresql libraries - all compilation is finished, so we
|
||||
# can now remove these files - they must be included in other binaries by now
|
||||
# if they were to be used by other libraries.
|
||||
RUN rm /usr/local/pgsql/lib/lib*.a
|
||||
|
||||
#########################################################################################
|
||||
#
|
||||
# Final layer
|
||||
# Put it all together into the final image
|
||||
#
|
||||
#########################################################################################
|
||||
FROM debian:bullseye-slim
|
||||
# Add user postgres
|
||||
RUN mkdir /var/db && useradd -m -d /var/db/postgres postgres && \
|
||||
echo "postgres:test_console_pass" | chpasswd && \
|
||||
mkdir /var/db/postgres/compute && mkdir /var/db/postgres/specs && \
|
||||
chown -R postgres:postgres /var/db/postgres && \
|
||||
chmod 0750 /var/db/postgres/compute && \
|
||||
echo '/usr/local/lib' >> /etc/ld.so.conf && /sbin/ldconfig
|
||||
|
||||
COPY --from=postgres-cleanup-layer --chown=postgres /usr/local/pgsql /usr/local
|
||||
COPY --from=compute-tools --chown=postgres /home/nonroot/target/release-line-debug-size-lto/compute_ctl /usr/local/bin/compute_ctl
|
||||
|
||||
# Install:
|
||||
# libreadline8 for psql
|
||||
# libossp-uuid16 for extension ossp-uuid
|
||||
# libgeos, libgdal, libproj and libprotobuf-c1 for PostGIS
|
||||
RUN apt update && \
|
||||
apt install --no-install-recommends -y \
|
||||
libreadline8 \
|
||||
libossp-uuid16 \
|
||||
libgeos-c1v5 \
|
||||
libgdal28 \
|
||||
libproj19 \
|
||||
libprotobuf-c1 \
|
||||
gdb && \
|
||||
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
|
||||
|
||||
USER postgres
|
||||
ENTRYPOINT ["/usr/local/bin/compute_ctl"]
|
||||
25
Dockerfile.vm-compute-node
Normal file
25
Dockerfile.vm-compute-node
Normal file
@@ -0,0 +1,25 @@
|
||||
# Note: this file *mostly* just builds on Dockerfile.compute-node
|
||||
|
||||
ARG SRC_IMAGE
|
||||
ARG VM_INFORMANT_VERSION=v0.1.6
|
||||
|
||||
# Pull VM informant and set up inittab
|
||||
FROM neondatabase/vm-informant:$VM_INFORMANT_VERSION as informant
|
||||
|
||||
RUN set -e \
|
||||
&& rm -f /etc/inittab \
|
||||
&& touch /etc/inittab
|
||||
|
||||
RUN set -e \
|
||||
&& echo "::respawn:su vm-informant -c '/usr/local/bin/vm-informant --auto-restart'" >> /etc/inittab
|
||||
|
||||
# Combine, starting from non-VM compute node image.
|
||||
FROM $SRC_IMAGE as base
|
||||
|
||||
# Temporarily set user back to root so we can run adduser
|
||||
USER root
|
||||
RUN adduser vm-informant --disabled-password --no-create-home
|
||||
USER postgres
|
||||
|
||||
COPY --from=informant /etc/inittab /etc/inittab
|
||||
COPY --from=informant /usr/bin/vm-informant /usr/local/bin/vm-informant
|
||||
20
Makefile
20
Makefile
@@ -39,6 +39,8 @@ endif
|
||||
# been no changes to the files. Changing the mtime triggers an
|
||||
# unnecessary rebuild of 'postgres_ffi'.
|
||||
PG_CONFIGURE_OPTS += INSTALL='$(ROOT_PROJECT_DIR)/scripts/ninstall.sh -C'
|
||||
PG_CONFIGURE_OPTS += CC=clang
|
||||
PG_CONFIGURE_OPTS += CCX=clang++
|
||||
|
||||
# Choose whether we should be silent or verbose
|
||||
CARGO_BUILD_FLAGS += --$(if $(filter s,$(MAKEFLAGS)),quiet,verbose)
|
||||
@@ -134,11 +136,23 @@ neon-pg-ext-%: postgres-%
|
||||
-C $(POSTGRES_INSTALL_DIR)/build/neon-test-utils-$* \
|
||||
-f $(ROOT_PROJECT_DIR)/pgxn/neon_test_utils/Makefile install
|
||||
|
||||
.PHONY:
|
||||
neon-pg-ext-walproposer:
|
||||
$(MAKE) PG_CONFIG=$(POSTGRES_INSTALL_DIR)/v15/bin/pg_config CFLAGS='$(PG_CFLAGS) $(COPT)' \
|
||||
-C $(POSTGRES_INSTALL_DIR)/build/neon-v15 \
|
||||
-f $(ROOT_PROJECT_DIR)/pgxn/neon/Makefile install
|
||||
|
||||
.PHONY: neon-pg-ext-clean-%
|
||||
neon-pg-ext-clean-%:
|
||||
$(MAKE) -C $(POSTGRES_INSTALL_DIR)/pgxn/neon-$* -f $(ROOT_PROJECT_DIR)/pgxn/neon/Makefile clean
|
||||
$(MAKE) -C $(POSTGRES_INSTALL_DIR)/pgxn/neon_walredo-$* -f $(ROOT_PROJECT_DIR)/pgxn/neon_walredo/Makefile clean
|
||||
$(MAKE) -C $(POSTGRES_INSTALL_DIR)/pgxn/neon_test_utils-$* -f $(ROOT_PROJECT_DIR)/pgxn/neon_test_utils/Makefile clean
|
||||
$(MAKE) PG_CONFIG=$(POSTGRES_INSTALL_DIR)/$*/bin/pg_config \
|
||||
-C $(POSTGRES_INSTALL_DIR)/build/neon-$* \
|
||||
-f $(ROOT_PROJECT_DIR)/pgxn/neon/Makefile clean
|
||||
$(MAKE) PG_CONFIG=$(POSTGRES_INSTALL_DIR)/$*/bin/pg_config \
|
||||
-C $(POSTGRES_INSTALL_DIR)/build/neon-walredo-$* \
|
||||
-f $(ROOT_PROJECT_DIR)/pgxn/neon_walredo/Makefile clean
|
||||
$(MAKE) PG_CONFIG=$(POSTGRES_INSTALL_DIR)/$*/bin/pg_config \
|
||||
-C $(POSTGRES_INSTALL_DIR)/build/neon-test-utils-$* \
|
||||
-f $(ROOT_PROJECT_DIR)/pgxn/neon_test_utils/Makefile clean
|
||||
|
||||
.PHONY: neon-pg-ext
|
||||
neon-pg-ext: \
|
||||
|
||||
17
README.md
17
README.md
@@ -34,6 +34,11 @@ dnf install flex bison readline-devel zlib-devel openssl-devel \
|
||||
libseccomp-devel perl clang cmake postgresql postgresql-contrib protobuf-compiler \
|
||||
protobuf-devel
|
||||
```
|
||||
* On Arch based systems, these packages are needed:
|
||||
```bash
|
||||
pacman -S base-devel readline zlib libseccomp openssl clang \
|
||||
postgresql-libs cmake postgresql protobuf
|
||||
```
|
||||
|
||||
2. [Install Rust](https://www.rust-lang.org/tools/install)
|
||||
```
|
||||
@@ -83,9 +88,10 @@ cd neon
|
||||
|
||||
# The preferred and default is to make a debug build. This will create a
|
||||
# demonstrably slower build than a release build. For a release build,
|
||||
# use "BUILD_TYPE=release make -j`nproc`"
|
||||
# use "BUILD_TYPE=release make -j`nproc` -s"
|
||||
# Remove -s for the verbose build log
|
||||
|
||||
make -j`nproc`
|
||||
make -j`nproc` -s
|
||||
```
|
||||
|
||||
#### Building on OSX
|
||||
@@ -99,16 +105,17 @@ cd neon
|
||||
|
||||
# The preferred and default is to make a debug build. This will create a
|
||||
# demonstrably slower build than a release build. For a release build,
|
||||
# use "BUILD_TYPE=release make -j`sysctl -n hw.logicalcpu`"
|
||||
# use "BUILD_TYPE=release make -j`sysctl -n hw.logicalcpu` -s"
|
||||
# Remove -s for the verbose build log
|
||||
|
||||
make -j`sysctl -n hw.logicalcpu`
|
||||
make -j`sysctl -n hw.logicalcpu` -s
|
||||
```
|
||||
|
||||
#### Dependency installation notes
|
||||
To run the `psql` client, install the `postgresql-client` package or modify `PATH` and `LD_LIBRARY_PATH` to include `pg_install/bin` and `pg_install/lib`, respectively.
|
||||
|
||||
To run the integration tests or Python scripts (not required to use the code), install
|
||||
Python (3.9 or higher), and install python3 packages using `./scripts/pysync` (requires [poetry](https://python-poetry.org/)) in the project directory.
|
||||
Python (3.9 or higher), and install python3 packages using `./scripts/pysync` (requires [poetry>=1.3](https://python-poetry.org/)) in the project directory.
|
||||
|
||||
|
||||
#### Running neon database
|
||||
|
||||
@@ -11,15 +11,19 @@ clap.workspace = true
|
||||
futures.workspace = true
|
||||
hyper = { workspace = true, features = ["full"] }
|
||||
notify.workspace = true
|
||||
opentelemetry.workspace = true
|
||||
postgres.workspace = true
|
||||
regex.workspace = true
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
tar.workspace = true
|
||||
reqwest = { workspace = true, features = ["json"] }
|
||||
tokio = { workspace = true, features = ["rt", "rt-multi-thread"] }
|
||||
tokio-postgres.workspace = true
|
||||
tracing.workspace = true
|
||||
tracing-opentelemetry.workspace = true
|
||||
tracing-subscriber.workspace = true
|
||||
tracing-utils.workspace = true
|
||||
url.workspace = true
|
||||
|
||||
workspace_hack.workspace = true
|
||||
|
||||
@@ -44,7 +44,6 @@ use tracing::{error, info};
|
||||
|
||||
use compute_tools::compute::{ComputeMetrics, ComputeNode, ComputeState, ComputeStatus};
|
||||
use compute_tools::http::api::launch_http_server;
|
||||
use compute_tools::informant::spawn_vm_informant_if_present;
|
||||
use compute_tools::logger::*;
|
||||
use compute_tools::monitor::launch_monitor;
|
||||
use compute_tools::params::*;
|
||||
@@ -53,7 +52,7 @@ use compute_tools::spec::*;
|
||||
use url::Url;
|
||||
|
||||
fn main() -> Result<()> {
|
||||
init_logger(DEFAULT_LOG_LEVEL)?;
|
||||
init_tracing_and_logging(DEFAULT_LOG_LEVEL)?;
|
||||
|
||||
let matches = cli().get_matches();
|
||||
|
||||
@@ -66,6 +65,9 @@ fn main() -> Result<()> {
|
||||
let spec = matches.get_one::<String>("spec");
|
||||
let spec_path = matches.get_one::<String>("spec-path");
|
||||
|
||||
let compute_id = matches.get_one::<String>("compute-id");
|
||||
let control_plane_uri = matches.get_one::<String>("control-plane-uri");
|
||||
|
||||
// Try to use just 'postgres' if no path is provided
|
||||
let pgbin = matches.get_one::<String>("pgbin").unwrap();
|
||||
|
||||
@@ -78,12 +80,54 @@ fn main() -> Result<()> {
|
||||
let path = Path::new(sp);
|
||||
let file = File::open(path)?;
|
||||
serde_json::from_reader(file)?
|
||||
} else if let Some(id) = compute_id {
|
||||
if let Some(cp_base) = control_plane_uri {
|
||||
let cp_uri = format!("{cp_base}/management/api/v1/{id}/spec");
|
||||
let jwt: String = match std::env::var("NEON_CONSOLE_JWT") {
|
||||
Ok(v) => v,
|
||||
Err(_) => "".to_string(),
|
||||
};
|
||||
|
||||
reqwest::blocking::Client::new()
|
||||
.get(cp_uri)
|
||||
.header("Authorization", jwt)
|
||||
.send()?
|
||||
.json()?
|
||||
} else {
|
||||
panic!(
|
||||
"must specify --control-plane-uri \"{:#?}\" and --compute-id \"{:#?}\"",
|
||||
control_plane_uri, compute_id
|
||||
);
|
||||
}
|
||||
} else {
|
||||
panic!("cluster spec should be provided via --spec or --spec-path argument");
|
||||
panic!("compute spec should be provided via --spec or --spec-path argument");
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// Extract OpenTelemetry context for the startup actions from the spec, and
|
||||
// attach it to the current tracing context.
|
||||
//
|
||||
// This is used to propagate the context for the 'start_compute' operation
|
||||
// from the neon control plane. This allows linking together the wider
|
||||
// 'start_compute' operation that creates the compute container, with the
|
||||
// startup actions here within the container.
|
||||
//
|
||||
// Switch to the startup context here, and exit it once the startup has
|
||||
// completed and Postgres is up and running.
|
||||
//
|
||||
// NOTE: This is supposed to only cover the *startup* actions. Once
|
||||
// postgres is configured and up-and-running, we exit this span. Any other
|
||||
// actions that are performed on incoming HTTP requests, for example, are
|
||||
// performed in separate spans.
|
||||
let startup_context_guard = if let Some(ref carrier) = spec.startup_tracing_context {
|
||||
use opentelemetry::propagation::TextMapPropagator;
|
||||
use opentelemetry::sdk::propagation::TraceContextPropagator;
|
||||
Some(TraceContextPropagator::new().extract(carrier).attach())
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let pageserver_connstr = spec
|
||||
.cluster
|
||||
.settings
|
||||
@@ -118,8 +162,6 @@ fn main() -> Result<()> {
|
||||
// requests, while configuration is still in progress.
|
||||
let _http_handle = launch_http_server(&compute).expect("cannot launch http endpoint thread");
|
||||
let _monitor_handle = launch_monitor(&compute).expect("cannot launch compute monitor thread");
|
||||
// Also spawn the thread responsible for handling the VM informant -- if it's present
|
||||
let _vm_informant_handle = spawn_vm_informant_if_present().expect("cannot launch VM informant");
|
||||
|
||||
// Start Postgres
|
||||
let mut delay_exit = false;
|
||||
@@ -140,6 +182,9 @@ fn main() -> Result<()> {
|
||||
// Wait for the child Postgres process forever. In this state Ctrl+C will
|
||||
// propagate to Postgres and it will be shut down as well.
|
||||
if let Some(mut pg) = pg {
|
||||
// Startup is finished, exit the startup tracing span
|
||||
drop(startup_context_guard);
|
||||
|
||||
let ecode = pg
|
||||
.wait()
|
||||
.expect("failed to start waiting on Postgres process");
|
||||
@@ -159,6 +204,10 @@ fn main() -> Result<()> {
|
||||
info!("shutting down");
|
||||
}
|
||||
|
||||
// Shutdown trace pipeline gracefully, so that it has a chance to send any
|
||||
// pending traces before we exit.
|
||||
tracing_utils::shutdown_tracing();
|
||||
|
||||
exit(exit_code.unwrap_or(1))
|
||||
}
|
||||
|
||||
@@ -200,6 +249,18 @@ fn cli() -> clap::Command {
|
||||
.long("spec-path")
|
||||
.value_name("SPEC_PATH"),
|
||||
)
|
||||
.arg(
|
||||
Arg::new("compute-id")
|
||||
.short('i')
|
||||
.long("compute-id")
|
||||
.value_name("COMPUTE_ID"),
|
||||
)
|
||||
.arg(
|
||||
Arg::new("control-plane-uri")
|
||||
.short('p')
|
||||
.long("control-plane-uri")
|
||||
.value_name("CONTROL_PLANE"),
|
||||
)
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -3,16 +3,21 @@ use std::net::SocketAddr;
|
||||
use std::sync::Arc;
|
||||
use std::thread;
|
||||
|
||||
use crate::compute::ComputeNode;
|
||||
use anyhow::Result;
|
||||
use hyper::service::{make_service_fn, service_fn};
|
||||
use hyper::{Body, Method, Request, Response, Server, StatusCode};
|
||||
use serde_json;
|
||||
use tracing::{error, info};
|
||||
|
||||
use crate::compute::ComputeNode;
|
||||
use tracing_utils::http::OtelName;
|
||||
|
||||
// Service function to handle all available routes.
|
||||
async fn routes(req: Request<Body>, compute: Arc<ComputeNode>) -> Response<Body> {
|
||||
async fn routes(req: Request<Body>, compute: &Arc<ComputeNode>) -> Response<Body> {
|
||||
//
|
||||
// NOTE: The URI path is currently included in traces. That's OK because
|
||||
// it doesn't contain any variable parts or sensitive information. But
|
||||
// please keep that in mind if you change the routing here.
|
||||
//
|
||||
match (req.method(), req.uri().path()) {
|
||||
// Serialized compute state.
|
||||
(&Method::GET, "/status") => {
|
||||
@@ -30,7 +35,7 @@ async fn routes(req: Request<Body>, compute: Arc<ComputeNode>) -> Response<Body>
|
||||
|
||||
(&Method::POST, "/check_writability") => {
|
||||
info!("serving /check_writability POST request");
|
||||
let res = crate::checker::check_writability(&compute).await;
|
||||
let res = crate::checker::check_writability(compute).await;
|
||||
match res {
|
||||
Ok(_) => Response::new(Body::from("true")),
|
||||
Err(e) => Response::new(Body::from(e.to_string())),
|
||||
@@ -56,7 +61,19 @@ async fn serve(state: Arc<ComputeNode>) {
|
||||
async move {
|
||||
Ok::<_, Infallible>(service_fn(move |req: Request<Body>| {
|
||||
let state = state.clone();
|
||||
async move { Ok::<_, Infallible>(routes(req, state).await) }
|
||||
async move {
|
||||
Ok::<_, Infallible>(
|
||||
// NOTE: We include the URI path in the string. It
|
||||
// doesn't contain any variable parts or sensitive
|
||||
// information in this API.
|
||||
tracing_utils::http::tracing_handler(
|
||||
req,
|
||||
|req| routes(req, &state),
|
||||
OtelName::UriPath,
|
||||
)
|
||||
.await,
|
||||
)
|
||||
}
|
||||
}))
|
||||
}
|
||||
});
|
||||
|
||||
@@ -1,50 +0,0 @@
|
||||
use std::path::Path;
|
||||
use std::process;
|
||||
use std::thread;
|
||||
use std::time::Duration;
|
||||
use tracing::{info, warn};
|
||||
|
||||
use anyhow::{Context, Result};
|
||||
|
||||
const VM_INFORMANT_PATH: &str = "/bin/vm-informant";
|
||||
const RESTART_INFORMANT_AFTER_MILLIS: u64 = 5000;
|
||||
|
||||
/// Launch a thread to start the VM informant if it's present (and restart, on failure)
|
||||
pub fn spawn_vm_informant_if_present() -> Result<Option<thread::JoinHandle<()>>> {
|
||||
let exists = Path::new(VM_INFORMANT_PATH)
|
||||
.try_exists()
|
||||
.context("could not check if path exists")?;
|
||||
|
||||
if !exists {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
Ok(Some(
|
||||
thread::Builder::new()
|
||||
.name("run-vm-informant".into())
|
||||
.spawn(move || run_informant())?,
|
||||
))
|
||||
}
|
||||
|
||||
fn run_informant() -> ! {
|
||||
let restart_wait = Duration::from_millis(RESTART_INFORMANT_AFTER_MILLIS);
|
||||
|
||||
info!("starting VM informant");
|
||||
|
||||
loop {
|
||||
let mut cmd = process::Command::new(VM_INFORMANT_PATH);
|
||||
// Block on subprocess:
|
||||
let result = cmd.status();
|
||||
|
||||
match result {
|
||||
Err(e) => warn!("failed to run VM informant at {VM_INFORMANT_PATH:?}: {e}"),
|
||||
Ok(status) if !status.success() => {
|
||||
warn!("{VM_INFORMANT_PATH} exited with code {status:?}, retrying")
|
||||
}
|
||||
Ok(_) => info!("{VM_INFORMANT_PATH} ended gracefully (unexpectedly). Retrying"),
|
||||
}
|
||||
|
||||
// Wait before retrying
|
||||
thread::sleep(restart_wait);
|
||||
}
|
||||
}
|
||||
@@ -8,7 +8,6 @@ pub mod http;
|
||||
#[macro_use]
|
||||
pub mod logger;
|
||||
pub mod compute;
|
||||
pub mod informant;
|
||||
pub mod monitor;
|
||||
pub mod params;
|
||||
pub mod pg_helpers;
|
||||
|
||||
@@ -1,21 +1,37 @@
|
||||
use anyhow::Result;
|
||||
use tracing_opentelemetry::OpenTelemetryLayer;
|
||||
use tracing_subscriber::layer::SubscriberExt;
|
||||
use tracing_subscriber::prelude::*;
|
||||
|
||||
/// Initialize `env_logger` using either `default_level` or
|
||||
/// Initialize logging to stderr, and OpenTelemetry tracing and exporter.
|
||||
///
|
||||
/// Logging is configured using either `default_log_level` or
|
||||
/// `RUST_LOG` environment variable as default log level.
|
||||
pub fn init_logger(default_level: &str) -> Result<()> {
|
||||
///
|
||||
/// OpenTelemetry is configured with OTLP/HTTP exporter. It picks up
|
||||
/// configuration from environment variables. For example, to change the destination,
|
||||
/// set `OTEL_EXPORTER_OTLP_ENDPOINT=http://jaeger:4318`. See
|
||||
/// `tracing-utils` package description.
|
||||
///
|
||||
pub fn init_tracing_and_logging(default_log_level: &str) -> anyhow::Result<()> {
|
||||
// Initialize Logging
|
||||
let env_filter = tracing_subscriber::EnvFilter::try_from_default_env()
|
||||
.unwrap_or_else(|_| tracing_subscriber::EnvFilter::new(default_level));
|
||||
.unwrap_or_else(|_| tracing_subscriber::EnvFilter::new(default_log_level));
|
||||
|
||||
let fmt_layer = tracing_subscriber::fmt::layer()
|
||||
.with_target(false)
|
||||
.with_writer(std::io::stderr);
|
||||
|
||||
// Initialize OpenTelemetry
|
||||
let otlp_layer =
|
||||
tracing_utils::init_tracing_without_runtime("compute_ctl").map(OpenTelemetryLayer::new);
|
||||
|
||||
// Put it all together
|
||||
tracing_subscriber::registry()
|
||||
.with(env_filter)
|
||||
.with(otlp_layer)
|
||||
.with(fmt_layer)
|
||||
.init();
|
||||
tracing::info!("logging and tracing started");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
use std::collections::HashMap;
|
||||
use std::path::Path;
|
||||
use std::str::FromStr;
|
||||
|
||||
@@ -22,6 +23,8 @@ pub struct ComputeSpec {
|
||||
/// Expected cluster state at the end of transition process.
|
||||
pub cluster: Cluster,
|
||||
pub delta_operations: Option<Vec<DeltaOp>>,
|
||||
|
||||
pub startup_tracing_context: Option<HashMap<String, String>>,
|
||||
}
|
||||
|
||||
/// Cluster state seen from the perspective of the external tools
|
||||
@@ -384,13 +387,13 @@ pub fn handle_databases(spec: &ComputeSpec, client: &mut Client) -> Result<()> {
|
||||
name.pg_quote(),
|
||||
db.owner.pg_quote()
|
||||
);
|
||||
let _ = info_span!("executing", query).entered();
|
||||
let _guard = info_span!("executing", query).entered();
|
||||
client.execute(query.as_str(), &[])?;
|
||||
}
|
||||
DatabaseAction::Create => {
|
||||
let mut query: String = format!("CREATE DATABASE {} ", name.pg_quote());
|
||||
query.push_str(&db.to_pg_options());
|
||||
let _ = info_span!("executing", query).entered();
|
||||
let _guard = info_span!("executing", query).entered();
|
||||
client.execute(query.as_str(), &[])?;
|
||||
}
|
||||
};
|
||||
|
||||
@@ -15,6 +15,7 @@ postgres.workspace = true
|
||||
regex.workspace = true
|
||||
reqwest = { workspace = true, features = ["blocking", "json"] }
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
serde_with.workspace = true
|
||||
tar.workspace = true
|
||||
thiserror.workspace = true
|
||||
|
||||
@@ -419,6 +419,11 @@ impl PageServerNode {
|
||||
.map(|x| x.parse::<bool>())
|
||||
.transpose()
|
||||
.context("Failed to parse 'trace_read_requests' as bool")?,
|
||||
eviction_policy: settings
|
||||
.get("eviction_policy")
|
||||
.map(|x| serde_json::from_str(x))
|
||||
.transpose()
|
||||
.context("Failed to parse 'eviction_policy' json")?,
|
||||
})
|
||||
.send()?
|
||||
.error_from_body()?;
|
||||
|
||||
@@ -16,7 +16,7 @@ listen_http_addr = '127.0.0.1:9898'
|
||||
checkpoint_distance = '268435456' # in bytes
|
||||
checkpoint_timeout = '10m'
|
||||
|
||||
gc_period = '100 s'
|
||||
gc_period = '1 hour'
|
||||
gc_horizon = '67108864'
|
||||
|
||||
max_file_descriptors = '100'
|
||||
@@ -101,7 +101,7 @@ away.
|
||||
|
||||
#### gc_period
|
||||
|
||||
Interval at which garbage collection is triggered. Default is 100 s.
|
||||
Interval at which garbage collection is triggered. Default is 1 hour.
|
||||
|
||||
#### image_creation_threshold
|
||||
|
||||
@@ -109,7 +109,7 @@ L0 delta layer threshold for L1 image layer creation. Default is 3.
|
||||
|
||||
#### pitr_interval
|
||||
|
||||
WAL retention duration for PITR branching. Default is 30 days.
|
||||
WAL retention duration for PITR branching. Default is 7 days.
|
||||
|
||||
#### walreceiver_connect_timeout
|
||||
|
||||
|
||||
335
docs/synthetic-size.md
Normal file
335
docs/synthetic-size.md
Normal file
@@ -0,0 +1,335 @@
|
||||
# Synthetic size
|
||||
|
||||
Neon storage has copy-on-write branching, which makes it difficult to
|
||||
answer the question "how large is my database"? To give one reasonable
|
||||
answer, we calculate _synthetic size_ for a project.
|
||||
|
||||
The calculation is called "synthetic", because it is based purely on
|
||||
the user-visible logical size, which is the size that you would see on
|
||||
a standalone PostgreSQL installation, and the amount of WAL, which is
|
||||
also the same as what you'd see on a standalone PostgreSQL, for the
|
||||
same set of updates.
|
||||
|
||||
The synthetic size does *not* depend on the actual physical size
|
||||
consumed in the storage, or implementation details of the Neon storage
|
||||
like garbage collection, compaction and compression. There is a
|
||||
strong *correlation* between the physical size and the synthetic size,
|
||||
but the synthetic size is designed to be independent of the
|
||||
implementation details, so that any improvements we make in the
|
||||
storage system simply reduce our COGS. And vice versa: any bugs or bad
|
||||
implementation where we keep more data than we would need to, do not
|
||||
change the synthetic size or incur any costs to the user.
|
||||
|
||||
The synthetic size is calculated for the whole project. It is not
|
||||
straighforward to attribute size to individual branches. See "What is
|
||||
the size of an individual branch?" for discussion on those
|
||||
difficulties.
|
||||
|
||||
The synthetic size is designed to:
|
||||
|
||||
- Take into account the copy-on-write nature of the storage. For
|
||||
example, if you create a branch, it doesn't immediately add anything
|
||||
to the synthetic size. It starts to affect the synthetic size only
|
||||
as it diverges from the parent branch.
|
||||
|
||||
- Be independent of any implementation details of the storage, like
|
||||
garbage collection, remote storage, or compression.
|
||||
|
||||
## Terms & assumptions
|
||||
|
||||
- logical size is the size of a branch *at a given point in
|
||||
time*. It's the total size of all tables in all databases, as you
|
||||
see with "\l+" in psql for example, plus the Postgres SLRUs and some
|
||||
small amount of metadata. NOTE that currently, Neon does not include
|
||||
the SLRUs and metadata in the logical size. See comment to `get_current_logical_size_non_incremental()`.
|
||||
|
||||
- a "point in time" is defined as an LSN value. You can convert a
|
||||
timestamp to an LSN, but the storage internally works with LSNs.
|
||||
|
||||
- PITR horizon can be set per-branch.
|
||||
|
||||
- PITR horizon can be set as a time interval, e.g. 5 days or hours, or
|
||||
as amount of WAL, in bytes. If it's given as a time interval, it's
|
||||
converted to an LSN for the calculation.
|
||||
|
||||
- PITR horizon can be set to 0, if you don't want to retain any history.
|
||||
|
||||
## Calculation
|
||||
|
||||
Inputs to the calculation are:
|
||||
- logical size of the database at different points in time,
|
||||
- amount of WAL generated, and
|
||||
- the PITR horizon settings
|
||||
|
||||
The synthetic size is based on an idealistic model of the storage
|
||||
system, where we pretend that the storage consists of two things:
|
||||
- snapshots, containing a full snapshot of the database, at a given
|
||||
point in time, and
|
||||
- WAL.
|
||||
|
||||
In the simple case that the project contains just one branch (main),
|
||||
and a fixed PITR horizon, the synthetic size is the sum of:
|
||||
|
||||
- the logical size of the branch *at the beginning of the PITR
|
||||
horizon*, i.e. at the oldest point that you can still recover to, and
|
||||
- the size of the WAL covering the PITR horizon.
|
||||
|
||||
The snapshot allows you to recover to the beginning of the PITR
|
||||
horizon, and the WAL allows you to recover from that point to any
|
||||
point within the horizon.
|
||||
|
||||
```
|
||||
WAL
|
||||
-----------------------#########>
|
||||
^
|
||||
snapshot
|
||||
|
||||
Legend:
|
||||
##### PITR horizon. This is the region that you can still access
|
||||
with Point-in-time query and you can still create branches
|
||||
from.
|
||||
----- history that has fallen out of the PITR horizon, and can no
|
||||
longer be accessed
|
||||
```
|
||||
|
||||
NOTE: This is not how the storage system actually works! The actual
|
||||
implementation is also based on snapshots and WAL, but the snapshots
|
||||
are taken for individual database pages and ranges of pages rather
|
||||
than the whole database, and it is much more complicated. This model
|
||||
is a reasonable approximation, however, to make the synthetic size a
|
||||
useful proxy for the actual storage consumption.
|
||||
|
||||
|
||||
## Example: Data is INSERTed
|
||||
|
||||
For example, let's assume that your database contained 10 GB of data
|
||||
at the beginning of the PITR horizon, and you have since then inserted
|
||||
5 GB of additional data into it. The additional insertions of 5 GB of
|
||||
data consume roughly 5 GB of WAL. In that case, the synthetic size is:
|
||||
|
||||
> 10 GB (snapshot) + 5 GB (WAL) = 15 GB
|
||||
|
||||
If you now set the PITR horizon on the project to 0, so that no
|
||||
historical data is retained, then the beginning PITR horizon would be
|
||||
at the end of the branch, so the size of the snapshot would be
|
||||
calculated at the end of the branch, after the insertions. Then the
|
||||
synthetic size is:
|
||||
|
||||
> 15 GB (snapshot) + 0 GB (WAL) = 15 GB.
|
||||
|
||||
In this case, the synthetic size is the same, regardless of the PITR horizon,
|
||||
because all the history consists of inserts. The newly inserted data takes
|
||||
up the same amount of space, whether it's stored as part of the logical
|
||||
snapshot, or as WAL. (*)
|
||||
|
||||
(*) This is a rough approximation. In reality, the WAL contains
|
||||
headers and other overhead, and on the other hand, the logical
|
||||
snapshot includes empty space on pages, so the size of insertions in
|
||||
WAL can be smaller or greater than the size of the final table after
|
||||
the insertions. But in most cases, it's in the same ballpark.
|
||||
|
||||
## Example: Data is DELETEd
|
||||
|
||||
Let's look at another example:
|
||||
|
||||
Let's start again with a database that contains 10 GB of data. Then,
|
||||
you DELETE 5 GB of the data, and run VACUUM to free up the space, so
|
||||
that the logical size of the database is now only 5 GB.
|
||||
|
||||
Let's assume that the WAL for the deletions and the vacuum take up
|
||||
100 MB of space. In that case, the synthetic size of the project is:
|
||||
|
||||
> 10 GB (snapshot) + 100 MB (WAL) = 10.1 GB
|
||||
|
||||
This is much larger than the logical size of the database after the
|
||||
deletions (5 GB). That's because the system still needs to retain the
|
||||
deleted data, because it's still accessible to queries and branching
|
||||
in the PITR window.
|
||||
|
||||
If you now set the PITR horizon to 0 or just wait for time to pass so
|
||||
that the data falls out of the PITR horizon, making the deleted data
|
||||
inaccessible, the synthetic size shrinks:
|
||||
|
||||
> 5 GB (snapshot) + 0 GB (WAL) = 5 GB
|
||||
|
||||
|
||||
# Branching
|
||||
|
||||
Things get more complicated with branching. Branches in Neon are
|
||||
copy-on-write, which is also reflected in the synthetic size.
|
||||
|
||||
When you create a branch, it doesn't immediately change the synthetic
|
||||
size at all. The branch point is within the PITR horizon, and all the
|
||||
data needed to recover to that point in time needs to be retained
|
||||
anyway.
|
||||
|
||||
However, if you make modifications on the branch, the system needs to
|
||||
keep the WAL of those modifications. The WAL is included in the
|
||||
synthetic size.
|
||||
|
||||
## Example: branch and INSERT
|
||||
|
||||
Let's assume that you again start with a 10 GB database.
|
||||
On the main branch, you insert 2 GB of data. Then you create
|
||||
a branch at that point, and insert another 3 GB of data on the
|
||||
main branch, and 1 GB of data on the child branch
|
||||
|
||||
```
|
||||
child +#####>
|
||||
|
|
||||
| WAL
|
||||
main ---------###############>
|
||||
^
|
||||
snapshot
|
||||
```
|
||||
|
||||
In this case, the synthetic size consists of:
|
||||
- the snapshot at the beginning of the PITR horizon (10 GB)
|
||||
- the WAL on the main branch (2 GB + 3 GB = 5 GB)
|
||||
- the WAL on the child branch (1 GB)
|
||||
|
||||
Total: 16 GB
|
||||
|
||||
# Diverging branches
|
||||
|
||||
If there is only a small amount of changes in the database on the
|
||||
different branches, as in the previous example, the synthetic size
|
||||
consists of a snapshot before the branch point, containing all the
|
||||
shared data, and the WAL on both branches. However, if the branches
|
||||
diverge a lot, it is more efficient to store a separate snapshot of
|
||||
branches.
|
||||
|
||||
## Example: diverging branches
|
||||
|
||||
You start with a 10 GB database. You insert 5 GB of data on the main
|
||||
branch. Then you create a branch, and immediately delete all the data
|
||||
on the child branch and insert 5 GB of new data to it. Then you do the
|
||||
same on the main branch. Let's assume
|
||||
that the PITR horizon requires keeping the last 1 GB of WAL on the
|
||||
both branches.
|
||||
|
||||
```
|
||||
snapshot
|
||||
v WAL
|
||||
child +---------##############>
|
||||
|
|
||||
|
|
||||
main -------------+---------##############>
|
||||
^ WAL
|
||||
snapshot
|
||||
```
|
||||
|
||||
In this case, the synthetic size consists of:
|
||||
- snapshot at the beginning of the PITR horizon on the main branch (4 GB)
|
||||
- WAL on the main branch (1 GB)
|
||||
- snapshot at the beginning of the PITR horizon on the child branch (4 GB)
|
||||
- last 1 GB of WAL on the child branch (1 GB)
|
||||
|
||||
Total: 10 GB
|
||||
|
||||
The alternative way to store this would be to take only one snapshot
|
||||
at the beginning of branch point, and keep all the WAL on both
|
||||
branches. However, the size with that method would be larger, as it
|
||||
would require one 10 GB snapshot, and 5 GB + 5 GB of WAL. It depends
|
||||
on the amount of changes (WAL) on both branches, and the logical size
|
||||
at the branch point, which method would result in a smaller synthetic
|
||||
size. On each branch point, the system performs the calculation with
|
||||
both methods, and uses the method that is cheaper, i.e. the one that
|
||||
results in a smaller synthetic size.
|
||||
|
||||
One way to think about this is that when you create a branch, it
|
||||
starts out as a thin branch that only stores the WAL since the branch
|
||||
point. As you modify it, and the amount of WAL grows, at some point
|
||||
it becomes cheaper to store a completely new snapshot of the branch
|
||||
and truncate the WAL.
|
||||
|
||||
|
||||
# What is the size of an individual branch?
|
||||
|
||||
Synthetic size is calculated for the whole project, and includes all
|
||||
branches. There is no such thing as the size of a branch, because it
|
||||
is not straighforward to attribute the parts of size to individual
|
||||
branches.
|
||||
|
||||
## Example: attributing size to branches
|
||||
|
||||
(copied from https://github.com/neondatabase/neon/pull/2884#discussion_r1029365278)
|
||||
|
||||
Imagine that you create two branches, A and B, at the same point from
|
||||
main branch, and do a couple of small updates on both branches. Then
|
||||
six months pass, and during those six months the data on the main
|
||||
branch churns over completely multiple times. The retention period is,
|
||||
say 1 month.
|
||||
|
||||
```
|
||||
+------> A
|
||||
/
|
||||
--------------------*-------------------------------> main
|
||||
\
|
||||
+--------> B
|
||||
```
|
||||
|
||||
In that situation, the synthetic tenant size would be calculated based
|
||||
on a "logical snapshot" at the branch point, that is, the logical size
|
||||
of the database at that point. Plus the WAL on branches A and B. Let's
|
||||
say that the snapshot size is 10 GB, and the WAL is 1 MB on both
|
||||
branches A and B. So the total synthetic storage size is 10002
|
||||
MB. (Let's ignore the main branch for now, that would be just added to
|
||||
the sum)
|
||||
|
||||
How would you break that down per branch? I can think of three
|
||||
different ways to do it, and all of them have their own problems:
|
||||
|
||||
### Subtraction method
|
||||
|
||||
For each branch, calculate how much smaller the total synthetic size
|
||||
would be, if that branch didn't exist. In other words, how much would
|
||||
you save if you dropped the branch. With this method, the size of
|
||||
branches A and B is 1 MB.
|
||||
|
||||
With this method, the 10 GB shared logical snapshot is not included
|
||||
for A nor B. So the size of all branches is not equal to the total
|
||||
synthetic size of the tenant. If you drop branch A, you save 1 MB as
|
||||
you'd expect, but also the size of B suddenly jumps from 1 MB to 10001
|
||||
MB, which might feel surprising.
|
||||
|
||||
### Division method
|
||||
|
||||
Divide the common parts evenly across all branches that need
|
||||
them. With this method, the size of branches A and B would be 5001 MB.
|
||||
|
||||
With this method, the sum of all branches adds up to the total
|
||||
synthetic size. But it's surprising in other ways: if you drop branch
|
||||
A, you might think that you save 5001 MB, but in reality you only save
|
||||
1 MB, and the size of branch B suddenly grows from 5001 to 10001 MB.
|
||||
|
||||
### Addition method
|
||||
|
||||
For each branch, include all the snapshots and WAL that it depends on,
|
||||
even if some of them are shared by other branches. With this method,
|
||||
the size of branches A and B would be 10001 MB.
|
||||
|
||||
The surprise with this method is that the sum of all the branches is
|
||||
larger than the total synthetic size. And if you drop branch A, the
|
||||
total synthetic size doesn't fall by 10001 MB as you might think.
|
||||
|
||||
# Alternatives
|
||||
|
||||
A sort of cop-out method would be to show the whole tree of branches
|
||||
graphically, and for each section of WAL or logical snapshot, display
|
||||
the size of that section. You can then see which branches depend on
|
||||
which sections, which sections are shared etc. That would be good to
|
||||
have in the UI anyway.
|
||||
|
||||
Or perhaps calculate per-branch numbers using the subtraction method,
|
||||
and in addition to that, one more number for "shared size" that
|
||||
includes all the data that is needed by more than one branch.
|
||||
|
||||
## Which is the right method?
|
||||
|
||||
The bottom line is that it's not straightforward to attribute the
|
||||
synthetic size to individual branches. There are things we can do, and
|
||||
all of those methods are pretty straightforward to implement, but they
|
||||
all have their own problems. What makes sense depends a lot on what
|
||||
you want to do with the number, what question you are trying to
|
||||
answer.
|
||||
@@ -8,5 +8,6 @@ license.workspace = true
|
||||
prometheus.workspace = true
|
||||
libc.workspace = true
|
||||
once_cell.workspace = true
|
||||
chrono.workspace = true
|
||||
|
||||
workspace_hack.workspace = true
|
||||
|
||||
34
libs/metrics/src/launch_timestamp.rs
Normal file
34
libs/metrics/src/launch_timestamp.rs
Normal file
@@ -0,0 +1,34 @@
|
||||
//! A timestamp captured at process startup to identify restarts of the process, e.g., in logs and metrics.
|
||||
|
||||
use chrono::Utc;
|
||||
|
||||
use super::register_uint_gauge;
|
||||
use std::fmt::Display;
|
||||
|
||||
pub struct LaunchTimestamp(chrono::DateTime<Utc>);
|
||||
|
||||
impl LaunchTimestamp {
|
||||
pub fn generate() -> Self {
|
||||
LaunchTimestamp(Utc::now())
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for LaunchTimestamp {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}", self.0)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn set_launch_timestamp_metric(launch_ts: &'static LaunchTimestamp) {
|
||||
let millis_since_epoch: u64 = launch_ts
|
||||
.0
|
||||
.timestamp_millis()
|
||||
.try_into()
|
||||
.expect("we're after the epoch, this should be positive");
|
||||
let metric = register_uint_gauge!(
|
||||
"libmetrics_launch_timestamp",
|
||||
"Timestamp (millis since epoch) at wich the process launched."
|
||||
)
|
||||
.unwrap();
|
||||
metric.set(millis_since_epoch);
|
||||
}
|
||||
@@ -8,6 +8,7 @@ pub use prometheus::opts;
|
||||
pub use prometheus::register;
|
||||
pub use prometheus::{core, default_registry, proto};
|
||||
pub use prometheus::{exponential_buckets, linear_buckets};
|
||||
pub use prometheus::{register_counter_vec, Counter, CounterVec};
|
||||
pub use prometheus::{register_gauge, Gauge};
|
||||
pub use prometheus::{register_gauge_vec, GaugeVec};
|
||||
pub use prometheus::{register_histogram, Histogram};
|
||||
@@ -19,6 +20,7 @@ pub use prometheus::{register_int_gauge_vec, IntGaugeVec};
|
||||
pub use prometheus::{Encoder, TextEncoder};
|
||||
use prometheus::{Registry, Result};
|
||||
|
||||
pub mod launch_timestamp;
|
||||
mod wrappers;
|
||||
pub use wrappers::{CountedReader, CountedWriter};
|
||||
|
||||
@@ -33,6 +35,14 @@ macro_rules! register_uint_gauge_vec {
|
||||
}};
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! register_uint_gauge {
|
||||
($NAME:expr, $HELP:expr $(,)?) => {{
|
||||
let gauge = $crate::UIntGauge::new($NAME, $HELP).unwrap();
|
||||
$crate::register(Box::new(gauge.clone())).map(|_| gauge)
|
||||
}};
|
||||
}
|
||||
|
||||
/// Special internal registry, to collect metrics independently from the default registry.
|
||||
/// Was introduced to fix deadlock with lazy registration of metrics in the default registry.
|
||||
static INTERNAL_REGISTRY: Lazy<Registry> = Lazy::new(Registry::new);
|
||||
|
||||
@@ -13,5 +13,7 @@ bytes.workspace = true
|
||||
byteorder.workspace = true
|
||||
utils.workspace = true
|
||||
postgres_ffi.workspace = true
|
||||
enum-map.workspace = true
|
||||
serde_json.workspace = true
|
||||
|
||||
workspace_hack.workspace = true
|
||||
|
||||
@@ -1,9 +1,14 @@
|
||||
use std::num::{NonZeroU64, NonZeroUsize};
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
num::{NonZeroU64, NonZeroUsize},
|
||||
time::SystemTime,
|
||||
};
|
||||
|
||||
use byteorder::{BigEndian, ReadBytesExt};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_with::{serde_as, DisplayFromStr};
|
||||
use utils::{
|
||||
history_buffer::HistoryBufferWithDropCounter,
|
||||
id::{NodeId, TenantId, TimelineId},
|
||||
lsn::Lsn,
|
||||
};
|
||||
@@ -29,6 +34,14 @@ pub enum TenantState {
|
||||
Broken,
|
||||
}
|
||||
|
||||
pub mod state {
|
||||
pub const LOADING: &str = "loading";
|
||||
pub const ATTACHING: &str = "attaching";
|
||||
pub const ACTIVE: &str = "active";
|
||||
pub const STOPPING: &str = "stopping";
|
||||
pub const BROKEN: &str = "broken";
|
||||
}
|
||||
|
||||
impl TenantState {
|
||||
pub fn has_in_progress_downloads(&self) -> bool {
|
||||
match self {
|
||||
@@ -39,6 +52,16 @@ impl TenantState {
|
||||
Self::Broken => false,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn as_str(&self) -> &'static str {
|
||||
match self {
|
||||
TenantState::Loading => state::LOADING,
|
||||
TenantState::Attaching => state::ATTACHING,
|
||||
TenantState::Active => state::ACTIVE,
|
||||
TenantState::Stopping => state::STOPPING,
|
||||
TenantState::Broken => state::BROKEN,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A state of a timeline in pageserver's memory.
|
||||
@@ -119,7 +142,6 @@ pub struct TenantConfigRequest {
|
||||
#[serde_as(as = "DisplayFromStr")]
|
||||
pub tenant_id: TenantId,
|
||||
#[serde(default)]
|
||||
#[serde_as(as = "Option<DisplayFromStr>")]
|
||||
pub checkpoint_distance: Option<u64>,
|
||||
pub checkpoint_timeout: Option<String>,
|
||||
pub compaction_target_size: Option<u64>,
|
||||
@@ -133,6 +155,11 @@ pub struct TenantConfigRequest {
|
||||
pub lagging_wal_timeout: Option<String>,
|
||||
pub max_lsn_wal_lag: Option<NonZeroU64>,
|
||||
pub trace_read_requests: Option<bool>,
|
||||
// We defer the parsing of the eviction_policy field to the request handler.
|
||||
// Otherwise we'd have to move the types for eviction policy into this package.
|
||||
// We might do that once the eviction feature has stabilizied.
|
||||
// For now, this field is not even documented in the openapi_spec.yml.
|
||||
pub eviction_policy: Option<serde_json::Value>,
|
||||
}
|
||||
|
||||
impl TenantConfigRequest {
|
||||
@@ -152,6 +179,7 @@ impl TenantConfigRequest {
|
||||
lagging_wal_timeout: None,
|
||||
max_lsn_wal_lag: None,
|
||||
trace_read_requests: None,
|
||||
eviction_policy: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -209,6 +237,130 @@ pub struct TimelineInfo {
|
||||
pub state: TimelineState,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub struct LayerMapInfo {
|
||||
pub in_memory_layers: Vec<InMemoryLayerInfo>,
|
||||
pub historic_layers: Vec<HistoricLayerInfo>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Hash, PartialEq, Eq, Clone, Copy, Serialize, Deserialize, enum_map::Enum)]
|
||||
#[repr(usize)]
|
||||
pub enum LayerAccessKind {
|
||||
GetValueReconstructData,
|
||||
Iter,
|
||||
KeyIter,
|
||||
Dump,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct LayerAccessStatFullDetails {
|
||||
pub when_millis_since_epoch: u64,
|
||||
pub task_kind: &'static str,
|
||||
pub access_kind: LayerAccessKind,
|
||||
}
|
||||
|
||||
/// An event that impacts the layer's residence status.
|
||||
#[serde_as]
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct LayerResidenceEvent {
|
||||
/// The time when the event occurred.
|
||||
/// NB: this timestamp is captured while the residence status changes.
|
||||
/// So, it might be behind/ahead of the actual residence change by a short amount of time.
|
||||
///
|
||||
#[serde(rename = "timestamp_millis_since_epoch")]
|
||||
#[serde_as(as = "serde_with::TimestampMilliSeconds")]
|
||||
pub timestamp: SystemTime,
|
||||
/// The new residence status of the layer.
|
||||
pub status: LayerResidenceStatus,
|
||||
/// The reason why we had to record this event.
|
||||
pub reason: LayerResidenceEventReason,
|
||||
}
|
||||
|
||||
/// The reason for recording a given [`ResidenceEvent`].
|
||||
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
|
||||
pub enum LayerResidenceEventReason {
|
||||
/// The layer map is being populated, e.g. during timeline load or attach.
|
||||
/// This includes [`RemoteLayer`] objects created in [`reconcile_with_remote`].
|
||||
/// We need to record such events because there is no persistent storage for the events.
|
||||
LayerLoad,
|
||||
/// We just created the layer (e.g., freeze_and_flush or compaction).
|
||||
/// Such layers are always [`LayerResidenceStatus::Resident`].
|
||||
LayerCreate,
|
||||
/// We on-demand downloaded or evicted the given layer.
|
||||
ResidenceChange,
|
||||
}
|
||||
|
||||
/// The residence status of the layer, after the given [`LayerResidenceEvent`].
|
||||
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
|
||||
pub enum LayerResidenceStatus {
|
||||
/// Residence status for a layer file that exists locally.
|
||||
/// It may also exist on the remote, we don't care here.
|
||||
Resident,
|
||||
/// Residence status for a layer file that only exists on the remote.
|
||||
Evicted,
|
||||
}
|
||||
|
||||
impl LayerResidenceEvent {
|
||||
pub fn new(status: LayerResidenceStatus, reason: LayerResidenceEventReason) -> Self {
|
||||
Self {
|
||||
status,
|
||||
reason,
|
||||
timestamp: SystemTime::now(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub struct LayerAccessStats {
|
||||
pub access_count_by_access_kind: HashMap<LayerAccessKind, u64>,
|
||||
pub task_kind_access_flag: Vec<&'static str>,
|
||||
pub first: Option<LayerAccessStatFullDetails>,
|
||||
pub accesses_history: HistoryBufferWithDropCounter<LayerAccessStatFullDetails, 16>,
|
||||
pub residence_events_history: HistoryBufferWithDropCounter<LayerResidenceEvent, 16>,
|
||||
}
|
||||
|
||||
#[serde_as]
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
#[serde(tag = "kind")]
|
||||
pub enum InMemoryLayerInfo {
|
||||
Open {
|
||||
#[serde_as(as = "DisplayFromStr")]
|
||||
lsn_start: Lsn,
|
||||
},
|
||||
Frozen {
|
||||
#[serde_as(as = "DisplayFromStr")]
|
||||
lsn_start: Lsn,
|
||||
#[serde_as(as = "DisplayFromStr")]
|
||||
lsn_end: Lsn,
|
||||
},
|
||||
}
|
||||
|
||||
#[serde_as]
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
#[serde(tag = "kind")]
|
||||
pub enum HistoricLayerInfo {
|
||||
Delta {
|
||||
layer_file_name: String,
|
||||
layer_file_size: Option<u64>,
|
||||
|
||||
#[serde_as(as = "DisplayFromStr")]
|
||||
lsn_start: Lsn,
|
||||
#[serde_as(as = "DisplayFromStr")]
|
||||
lsn_end: Lsn,
|
||||
remote: bool,
|
||||
access_stats: LayerAccessStats,
|
||||
},
|
||||
Image {
|
||||
layer_file_name: String,
|
||||
layer_file_size: Option<u64>,
|
||||
|
||||
#[serde_as(as = "DisplayFromStr")]
|
||||
lsn_start: Lsn,
|
||||
remote: bool,
|
||||
access_stats: LayerAccessStats,
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct DownloadRemoteLayersTaskSpawnRequest {
|
||||
pub max_concurrent_downloads: NonZeroUsize,
|
||||
@@ -249,7 +401,7 @@ pub struct TimelineGcRequest {
|
||||
}
|
||||
|
||||
// Wrapped in libpq CopyData
|
||||
#[derive(PartialEq, Eq)]
|
||||
#[derive(PartialEq, Eq, Debug)]
|
||||
pub enum PagestreamFeMessage {
|
||||
Exists(PagestreamExistsRequest),
|
||||
Nblocks(PagestreamNblocksRequest),
|
||||
|
||||
@@ -98,6 +98,15 @@ impl RelTag {
|
||||
|
||||
name
|
||||
}
|
||||
|
||||
pub fn with_forknum(&self, forknum: u8) -> Self {
|
||||
RelTag {
|
||||
forknum,
|
||||
spcnode: self.spcnode,
|
||||
dbnode: self.dbnode,
|
||||
relnode: self.relnode,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
///
|
||||
|
||||
@@ -75,27 +75,36 @@ impl StartupMessageParams {
|
||||
/// taking into account all escape sequences but leaving them as-is.
|
||||
/// [`None`] means that there's no `options` in [`Self`].
|
||||
pub fn options_raw(&self) -> Option<impl Iterator<Item = &str>> {
|
||||
// See `postgres: pg_split_opts`.
|
||||
let mut last_was_escape = false;
|
||||
let iter = self
|
||||
.get("options")?
|
||||
.split(move |c: char| {
|
||||
// We split by non-escaped whitespace symbols.
|
||||
let should_split = c.is_ascii_whitespace() && !last_was_escape;
|
||||
last_was_escape = c == '\\' && !last_was_escape;
|
||||
should_split
|
||||
})
|
||||
.filter(|s| !s.is_empty());
|
||||
|
||||
Some(iter)
|
||||
self.get("options").map(Self::parse_options_raw)
|
||||
}
|
||||
|
||||
/// Split command-line options according to PostgreSQL's logic,
|
||||
/// applying all escape sequences (using owned strings as needed).
|
||||
/// [`None`] means that there's no `options` in [`Self`].
|
||||
pub fn options_escaped(&self) -> Option<impl Iterator<Item = Cow<'_, str>>> {
|
||||
self.get("options").map(Self::parse_options_escaped)
|
||||
}
|
||||
|
||||
/// Split command-line options according to PostgreSQL's logic,
|
||||
/// taking into account all escape sequences but leaving them as-is.
|
||||
pub fn parse_options_raw(input: &str) -> impl Iterator<Item = &str> {
|
||||
// See `postgres: pg_split_opts`.
|
||||
let iter = self.options_raw()?.map(|s| {
|
||||
let mut last_was_escape = false;
|
||||
input
|
||||
.split(move |c: char| {
|
||||
// We split by non-escaped whitespace symbols.
|
||||
let should_split = c.is_ascii_whitespace() && !last_was_escape;
|
||||
last_was_escape = c == '\\' && !last_was_escape;
|
||||
should_split
|
||||
})
|
||||
.filter(|s| !s.is_empty())
|
||||
}
|
||||
|
||||
/// Split command-line options according to PostgreSQL's logic,
|
||||
/// applying all escape sequences (using owned strings as needed).
|
||||
pub fn parse_options_escaped(input: &str) -> impl Iterator<Item = Cow<'_, str>> {
|
||||
// See `postgres: pg_split_opts`.
|
||||
Self::parse_options_raw(input).map(|s| {
|
||||
let mut preserve_next_escape = false;
|
||||
let escape = |c| {
|
||||
// We should remove '\\' unless it's preceded by '\\'.
|
||||
@@ -108,9 +117,12 @@ impl StartupMessageParams {
|
||||
true => Cow::Owned(s.replace(escape, "")),
|
||||
false => Cow::Borrowed(s),
|
||||
}
|
||||
});
|
||||
})
|
||||
}
|
||||
|
||||
Some(iter)
|
||||
/// Iterate through key-value pairs in an arbitrary order.
|
||||
pub fn iter(&self) -> impl Iterator<Item = (&str, &str)> {
|
||||
self.params.iter().map(|(k, v)| (k.as_str(), v.as_str()))
|
||||
}
|
||||
|
||||
// This function is mostly useful in tests.
|
||||
|
||||
@@ -21,7 +21,7 @@ toml_edit.workspace = true
|
||||
tracing.workspace = true
|
||||
metrics.workspace = true
|
||||
utils.workspace = true
|
||||
|
||||
pin-project-lite.workspace = true
|
||||
workspace_hack.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
|
||||
@@ -20,7 +20,10 @@ use aws_sdk_s3::{
|
||||
};
|
||||
use aws_smithy_http::body::SdkBody;
|
||||
use hyper::Body;
|
||||
use tokio::{io, sync::Semaphore};
|
||||
use tokio::{
|
||||
io::{self, AsyncRead},
|
||||
sync::Semaphore,
|
||||
};
|
||||
use tokio_util::io::ReaderStream;
|
||||
use tracing::debug;
|
||||
|
||||
@@ -102,7 +105,7 @@ pub struct S3Bucket {
|
||||
// Every request to S3 can be throttled or cancelled, if a certain number of requests per second is exceeded.
|
||||
// Same goes to IAM, which is queried before every S3 request, if enabled. IAM has even lower RPS threshold.
|
||||
// The helps to ensure we don't exceed the thresholds.
|
||||
concurrency_limiter: Semaphore,
|
||||
concurrency_limiter: Arc<Semaphore>,
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
@@ -162,7 +165,7 @@ impl S3Bucket {
|
||||
client,
|
||||
bucket_name: aws_config.bucket_name.clone(),
|
||||
prefix_in_bucket,
|
||||
concurrency_limiter: Semaphore::new(aws_config.concurrency_limit.get()),
|
||||
concurrency_limiter: Arc::new(Semaphore::new(aws_config.concurrency_limit.get())),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -194,9 +197,10 @@ impl S3Bucket {
|
||||
}
|
||||
|
||||
async fn download_object(&self, request: GetObjectRequest) -> Result<Download, DownloadError> {
|
||||
let _guard = self
|
||||
let permit = self
|
||||
.concurrency_limiter
|
||||
.acquire()
|
||||
.clone()
|
||||
.acquire_owned()
|
||||
.await
|
||||
.context("Concurrency limiter semaphore got closed during S3 download")
|
||||
.map_err(DownloadError::Other)?;
|
||||
@@ -217,9 +221,10 @@ impl S3Bucket {
|
||||
let metadata = object_output.metadata().cloned().map(StorageMetadata);
|
||||
Ok(Download {
|
||||
metadata,
|
||||
download_stream: Box::pin(io::BufReader::new(
|
||||
download_stream: Box::pin(io::BufReader::new(RatelimitedAsyncRead::new(
|
||||
permit,
|
||||
object_output.body.into_async_read(),
|
||||
)),
|
||||
))),
|
||||
})
|
||||
}
|
||||
Err(SdkError::ServiceError {
|
||||
@@ -240,6 +245,32 @@ impl S3Bucket {
|
||||
}
|
||||
}
|
||||
|
||||
pin_project_lite::pin_project! {
|
||||
/// An `AsyncRead` adapter which carries a permit for the lifetime of the value.
|
||||
struct RatelimitedAsyncRead<S> {
|
||||
permit: tokio::sync::OwnedSemaphorePermit,
|
||||
#[pin]
|
||||
inner: S,
|
||||
}
|
||||
}
|
||||
|
||||
impl<S: AsyncRead> RatelimitedAsyncRead<S> {
|
||||
fn new(permit: tokio::sync::OwnedSemaphorePermit, inner: S) -> Self {
|
||||
RatelimitedAsyncRead { permit, inner }
|
||||
}
|
||||
}
|
||||
|
||||
impl<S: AsyncRead> AsyncRead for RatelimitedAsyncRead<S> {
|
||||
fn poll_read(
|
||||
self: std::pin::Pin<&mut Self>,
|
||||
cx: &mut std::task::Context<'_>,
|
||||
buf: &mut io::ReadBuf<'_>,
|
||||
) -> std::task::Poll<std::io::Result<()>> {
|
||||
let this = self.project();
|
||||
this.inner.poll_read(cx, buf)
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl RemoteStorage for S3Bucket {
|
||||
async fn list(&self) -> anyhow::Result<Vec<RemotePath>> {
|
||||
|
||||
@@ -7,5 +7,7 @@ license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
anyhow.workspace = true
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
|
||||
workspace_hack.workspace = true
|
||||
|
||||
219
libs/tenant_size_model/src/calculation.rs
Normal file
219
libs/tenant_size_model/src/calculation.rs
Normal file
@@ -0,0 +1,219 @@
|
||||
use crate::{SegmentMethod, SegmentSizeResult, SizeResult, StorageModel};
|
||||
|
||||
//
|
||||
// *-g--*---D--->
|
||||
// /
|
||||
// /
|
||||
// / *---b----*-B--->
|
||||
// / /
|
||||
// / /
|
||||
// -----*--e---*-----f----* C
|
||||
// E \
|
||||
// \
|
||||
// *--a---*---A-->
|
||||
//
|
||||
// If A and B need to be retained, is it cheaper to store
|
||||
// snapshot at C+a+b, or snapshots at A and B ?
|
||||
//
|
||||
// If D also needs to be retained, which is cheaper:
|
||||
//
|
||||
// 1. E+g+e+f+a+b
|
||||
// 2. D+C+a+b
|
||||
// 3. D+A+B
|
||||
|
||||
/// [`Segment`] which has had it's size calculated.
|
||||
#[derive(Clone, Debug)]
|
||||
struct SegmentSize {
|
||||
method: SegmentMethod,
|
||||
|
||||
// calculated size of this subtree, using this method
|
||||
accum_size: u64,
|
||||
|
||||
seg_id: usize,
|
||||
children: Vec<SegmentSize>,
|
||||
}
|
||||
|
||||
struct SizeAlternatives {
|
||||
// cheapest alternative if parent is available.
|
||||
incremental: SegmentSize,
|
||||
|
||||
// cheapest alternative if parent node is not available
|
||||
non_incremental: Option<SegmentSize>,
|
||||
}
|
||||
|
||||
impl StorageModel {
|
||||
pub fn calculate(&self) -> SizeResult {
|
||||
// Build adjacency list. 'child_list' is indexed by segment id. Each entry
|
||||
// contains a list of all child segments of the segment.
|
||||
let mut roots: Vec<usize> = Vec::new();
|
||||
let mut child_list: Vec<Vec<usize>> = Vec::new();
|
||||
child_list.resize(self.segments.len(), Vec::new());
|
||||
|
||||
for (seg_id, seg) in self.segments.iter().enumerate() {
|
||||
if let Some(parent_id) = seg.parent {
|
||||
child_list[parent_id].push(seg_id);
|
||||
} else {
|
||||
roots.push(seg_id);
|
||||
}
|
||||
}
|
||||
|
||||
let mut segment_results = Vec::new();
|
||||
segment_results.resize(
|
||||
self.segments.len(),
|
||||
SegmentSizeResult {
|
||||
method: SegmentMethod::Skipped,
|
||||
accum_size: 0,
|
||||
},
|
||||
);
|
||||
|
||||
let mut total_size = 0;
|
||||
for root in roots {
|
||||
if let Some(selected) = self.size_here(root, &child_list).non_incremental {
|
||||
StorageModel::fill_selected_sizes(&selected, &mut segment_results);
|
||||
total_size += selected.accum_size;
|
||||
} else {
|
||||
// Couldn't find any way to get this root. Error?
|
||||
}
|
||||
}
|
||||
|
||||
SizeResult {
|
||||
total_size,
|
||||
segments: segment_results,
|
||||
}
|
||||
}
|
||||
|
||||
fn fill_selected_sizes(selected: &SegmentSize, result: &mut Vec<SegmentSizeResult>) {
|
||||
result[selected.seg_id] = SegmentSizeResult {
|
||||
method: selected.method,
|
||||
accum_size: selected.accum_size,
|
||||
};
|
||||
// recurse to children
|
||||
for child in selected.children.iter() {
|
||||
StorageModel::fill_selected_sizes(child, result);
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
// This is the core of the sizing calculation.
|
||||
//
|
||||
// This is a recursive function, that for each Segment calculates the best way
|
||||
// to reach all the Segments that are marked as needed in this subtree, under two
|
||||
// different conditions:
|
||||
// a) when the parent of this segment is available (as a snaphot or through WAL), and
|
||||
// b) when the parent of this segment is not available.
|
||||
//
|
||||
fn size_here(&self, seg_id: usize, child_list: &Vec<Vec<usize>>) -> SizeAlternatives {
|
||||
let seg = &self.segments[seg_id];
|
||||
// First figure out the best way to get each child
|
||||
let mut children = Vec::new();
|
||||
for child_id in &child_list[seg_id] {
|
||||
children.push(self.size_here(*child_id, child_list))
|
||||
}
|
||||
|
||||
// Method 1. If this node is not needed, we can skip it as long as we
|
||||
// take snapshots later in each sub-tree
|
||||
let snapshot_later = if !seg.needed {
|
||||
let mut snapshot_later = SegmentSize {
|
||||
seg_id,
|
||||
method: SegmentMethod::Skipped,
|
||||
accum_size: 0,
|
||||
children: Vec::new(),
|
||||
};
|
||||
|
||||
let mut possible = true;
|
||||
for child in children.iter() {
|
||||
if let Some(non_incremental) = &child.non_incremental {
|
||||
snapshot_later.accum_size += non_incremental.accum_size;
|
||||
snapshot_later.children.push(non_incremental.clone())
|
||||
} else {
|
||||
possible = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if possible {
|
||||
Some(snapshot_later)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
// Method 2. Get a snapshot here. This assumed to be possible, if the 'size' of
|
||||
// this Segment was given.
|
||||
let snapshot_here = if !seg.needed || seg.parent.is_none() {
|
||||
if let Some(snapshot_size) = seg.size {
|
||||
let mut snapshot_here = SegmentSize {
|
||||
seg_id,
|
||||
method: SegmentMethod::SnapshotHere,
|
||||
accum_size: snapshot_size,
|
||||
children: Vec::new(),
|
||||
};
|
||||
for child in children.iter() {
|
||||
snapshot_here.accum_size += child.incremental.accum_size;
|
||||
snapshot_here.children.push(child.incremental.clone())
|
||||
}
|
||||
Some(snapshot_here)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
// Method 3. Use WAL to get here from parent
|
||||
let wal_here = {
|
||||
let mut wal_here = SegmentSize {
|
||||
seg_id,
|
||||
method: SegmentMethod::Wal,
|
||||
accum_size: if let Some(parent_id) = seg.parent {
|
||||
seg.lsn - self.segments[parent_id].lsn
|
||||
} else {
|
||||
0
|
||||
},
|
||||
children: Vec::new(),
|
||||
};
|
||||
for child in children {
|
||||
wal_here.accum_size += child.incremental.accum_size;
|
||||
wal_here.children.push(child.incremental)
|
||||
}
|
||||
wal_here
|
||||
};
|
||||
|
||||
// If the parent is not available, what's the cheapest method involving
|
||||
// a snapshot here or later?
|
||||
let mut cheapest_non_incremental: Option<SegmentSize> = None;
|
||||
if let Some(snapshot_here) = snapshot_here {
|
||||
cheapest_non_incremental = Some(snapshot_here);
|
||||
}
|
||||
if let Some(snapshot_later) = snapshot_later {
|
||||
// Use <=, to prefer skipping if the size is equal
|
||||
if let Some(parent) = &cheapest_non_incremental {
|
||||
if snapshot_later.accum_size <= parent.accum_size {
|
||||
cheapest_non_incremental = Some(snapshot_later);
|
||||
}
|
||||
} else {
|
||||
cheapest_non_incremental = Some(snapshot_later);
|
||||
}
|
||||
}
|
||||
|
||||
// And what's the cheapest method, if the parent is available?
|
||||
let cheapest_incremental = if let Some(cheapest_non_incremental) = &cheapest_non_incremental
|
||||
{
|
||||
// Is it cheaper to use a snapshot here or later, anyway?
|
||||
// Use <, to prefer Wal over snapshot if the cost is the same
|
||||
if wal_here.accum_size < cheapest_non_incremental.accum_size {
|
||||
wal_here
|
||||
} else {
|
||||
cheapest_non_incremental.clone()
|
||||
}
|
||||
} else {
|
||||
wal_here
|
||||
};
|
||||
|
||||
SizeAlternatives {
|
||||
incremental: cheapest_incremental,
|
||||
non_incremental: cheapest_non_incremental,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,401 +1,70 @@
|
||||
use std::borrow::Cow;
|
||||
use std::collections::HashMap;
|
||||
//! Synthetic size calculation
|
||||
|
||||
use anyhow::Context;
|
||||
mod calculation;
|
||||
pub mod svg;
|
||||
|
||||
/// Pricing model or history size builder.
|
||||
/// StorageModel is the input to the synthetic size calculation. It represents
|
||||
/// a tree of timelines, with just the information that's needed for the
|
||||
/// calculation. This doesn't track timeline names or where each timeline
|
||||
/// begins and ends, for example. Instead, it consists of "points of interest"
|
||||
/// on the timelines. A point of interest could be the timeline start or end point,
|
||||
/// the oldest point on a timeline that needs to be retained because of PITR
|
||||
/// cutoff, or snapshot points named by the user. For each such point, and the
|
||||
/// edge connecting the points (implicit in Segment), we store information about
|
||||
/// whether we need to be able to recover to the point, and if known, the logical
|
||||
/// size at the point.
|
||||
///
|
||||
/// Maintains knowledge of the branches and their modifications. Generic over the branch name key
|
||||
/// type.
|
||||
pub struct Storage<K: 'static> {
|
||||
segments: Vec<Segment>,
|
||||
|
||||
/// Mapping from the branch name to the index of a segment describing it's latest state.
|
||||
branches: HashMap<K, usize>,
|
||||
/// The segments must form a well-formed tree, with no loops.
|
||||
#[derive(serde::Serialize)]
|
||||
pub struct StorageModel {
|
||||
pub segments: Vec<Segment>,
|
||||
}
|
||||
|
||||
/// Snapshot of a branch.
|
||||
#[derive(Clone, Debug, Eq, PartialEq)]
|
||||
/// Segment represents one point in the tree of branches, *and* the edge that leads
|
||||
/// to it (if any). We don't need separate structs for points and edges, because each
|
||||
/// point can have only one parent.
|
||||
///
|
||||
/// When 'needed' is true, it means that we need to be able to reconstruct
|
||||
/// any version between 'parent.lsn' and 'lsn'. If you want to represent that only
|
||||
/// a single point is needed, create two Segments with the same lsn, and mark only
|
||||
/// the child as needed.
|
||||
///
|
||||
#[derive(Clone, Debug, Eq, PartialEq, serde::Serialize, serde::Deserialize)]
|
||||
pub struct Segment {
|
||||
/// Previous segment index into ['Storage::segments`], if any.
|
||||
parent: Option<usize>,
|
||||
pub parent: Option<usize>,
|
||||
|
||||
/// Description of how did we get to this state.
|
||||
///
|
||||
/// Mainly used in the original scenarios 1..=4 with insert, delete and update. Not used when
|
||||
/// modifying a branch directly.
|
||||
pub op: Cow<'static, str>,
|
||||
/// LSN at this point
|
||||
pub lsn: u64,
|
||||
|
||||
/// LSN before this state
|
||||
start_lsn: u64,
|
||||
/// Logical size at this node, if known.
|
||||
pub size: Option<u64>,
|
||||
|
||||
/// LSN at this state
|
||||
pub end_lsn: u64,
|
||||
|
||||
/// Logical size before this state
|
||||
start_size: u64,
|
||||
|
||||
/// Logical size at this state. Can be None in the last Segment of a branch.
|
||||
pub end_size: Option<u64>,
|
||||
|
||||
/// Indices to [`Storage::segments`]
|
||||
///
|
||||
/// FIXME: this could be an Option<usize>
|
||||
children_after: Vec<usize>,
|
||||
|
||||
/// Determined by `retention_period` given to [`Storage::calculate`]
|
||||
/// If true, the segment from parent to this node is needed by `retention_period`
|
||||
pub needed: bool,
|
||||
}
|
||||
|
||||
//
|
||||
//
|
||||
//
|
||||
//
|
||||
// *-g--*---D--->
|
||||
// /
|
||||
// /
|
||||
// / *---b----*-B--->
|
||||
// / /
|
||||
// / /
|
||||
// -----*--e---*-----f----* C
|
||||
// E \
|
||||
// \
|
||||
// *--a---*---A-->
|
||||
//
|
||||
// If A and B need to be retained, is it cheaper to store
|
||||
// snapshot at C+a+b, or snapshots at A and B ?
|
||||
//
|
||||
// If D also needs to be retained, which is cheaper:
|
||||
//
|
||||
// 1. E+g+e+f+a+b
|
||||
// 2. D+C+a+b
|
||||
// 3. D+A+B
|
||||
/// Result of synthetic size calculation. Returned by StorageModel::calculate()
|
||||
pub struct SizeResult {
|
||||
pub total_size: u64,
|
||||
|
||||
/// [`Segment`] which has had it's size calculated.
|
||||
pub struct SegmentSize {
|
||||
pub seg_id: usize,
|
||||
|
||||
pub method: SegmentMethod,
|
||||
|
||||
this_size: u64,
|
||||
|
||||
pub children: Vec<SegmentSize>,
|
||||
// This has same length as the StorageModel::segments vector in the input.
|
||||
// Each entry in this array corresponds to the entry with same index in
|
||||
// StorageModel::segments.
|
||||
pub segments: Vec<SegmentSizeResult>,
|
||||
}
|
||||
|
||||
impl SegmentSize {
|
||||
fn total(&self) -> u64 {
|
||||
self.this_size + self.children.iter().fold(0, |acc, x| acc + x.total())
|
||||
}
|
||||
|
||||
pub fn total_children(&self) -> u64 {
|
||||
if self.method == SnapshotAfter {
|
||||
self.this_size + self.children.iter().fold(0, |acc, x| acc + x.total())
|
||||
} else {
|
||||
self.children.iter().fold(0, |acc, x| acc + x.total())
|
||||
}
|
||||
}
|
||||
#[derive(Clone, Debug, Eq, PartialEq, serde::Serialize, serde::Deserialize)]
|
||||
pub struct SegmentSizeResult {
|
||||
pub method: SegmentMethod,
|
||||
// calculated size of this subtree, using this method
|
||||
pub accum_size: u64,
|
||||
}
|
||||
|
||||
/// Different methods to retain history from a particular state
|
||||
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
|
||||
#[derive(Clone, Copy, Debug, Eq, PartialEq, serde::Serialize, serde::Deserialize)]
|
||||
pub enum SegmentMethod {
|
||||
SnapshotAfter,
|
||||
Wal,
|
||||
WalNeeded,
|
||||
SnapshotHere, // A logical snapshot is needed after this segment
|
||||
Wal, // Keep WAL leading up to this node
|
||||
Skipped,
|
||||
}
|
||||
|
||||
use SegmentMethod::*;
|
||||
|
||||
impl<K: std::hash::Hash + Eq + 'static> Storage<K> {
|
||||
/// Creates a new storage with the given default branch name.
|
||||
pub fn new(initial_branch: K) -> Storage<K> {
|
||||
let init_segment = Segment {
|
||||
op: "".into(),
|
||||
needed: false,
|
||||
parent: None,
|
||||
start_lsn: 0,
|
||||
end_lsn: 0,
|
||||
start_size: 0,
|
||||
end_size: Some(0),
|
||||
children_after: Vec::new(),
|
||||
};
|
||||
|
||||
Storage {
|
||||
segments: vec![init_segment],
|
||||
branches: HashMap::from([(initial_branch, 0)]),
|
||||
}
|
||||
}
|
||||
|
||||
/// Advances the branch with a new point, at given LSN.
|
||||
pub fn insert_point<Q: ?Sized>(
|
||||
&mut self,
|
||||
branch: &Q,
|
||||
op: Cow<'static, str>,
|
||||
lsn: u64,
|
||||
size: Option<u64>,
|
||||
) -> anyhow::Result<()>
|
||||
where
|
||||
K: std::borrow::Borrow<Q>,
|
||||
Q: std::hash::Hash + Eq + std::fmt::Debug,
|
||||
{
|
||||
let Some(lastseg_id) = self.branches.get(branch).copied() else { anyhow::bail!("branch not found: {branch:?}") };
|
||||
let newseg_id = self.segments.len();
|
||||
let lastseg = &mut self.segments[lastseg_id];
|
||||
|
||||
assert!(lsn > lastseg.end_lsn);
|
||||
|
||||
let Some(start_size) = lastseg.end_size else { anyhow::bail!("no end_size on latest segment for {branch:?}") };
|
||||
|
||||
let newseg = Segment {
|
||||
op,
|
||||
parent: Some(lastseg_id),
|
||||
start_lsn: lastseg.end_lsn,
|
||||
end_lsn: lsn,
|
||||
start_size,
|
||||
end_size: size,
|
||||
children_after: Vec::new(),
|
||||
needed: false,
|
||||
};
|
||||
lastseg.children_after.push(newseg_id);
|
||||
|
||||
self.segments.push(newseg);
|
||||
*self.branches.get_mut(branch).expect("read already") = newseg_id;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Advances the branch with the named operation, by the relative LSN and logical size bytes.
|
||||
pub fn modify_branch<Q: ?Sized>(
|
||||
&mut self,
|
||||
branch: &Q,
|
||||
op: Cow<'static, str>,
|
||||
lsn_bytes: u64,
|
||||
size_bytes: i64,
|
||||
) -> anyhow::Result<()>
|
||||
where
|
||||
K: std::borrow::Borrow<Q>,
|
||||
Q: std::hash::Hash + Eq + std::fmt::Debug,
|
||||
{
|
||||
let Some(lastseg_id) = self.branches.get(branch).copied() else { anyhow::bail!("branch not found: {branch:?}") };
|
||||
let newseg_id = self.segments.len();
|
||||
let lastseg = &mut self.segments[lastseg_id];
|
||||
|
||||
let Some(last_end_size) = lastseg.end_size else { anyhow::bail!("no end_size on latest segment for {branch:?}") };
|
||||
|
||||
let newseg = Segment {
|
||||
op,
|
||||
parent: Some(lastseg_id),
|
||||
start_lsn: lastseg.end_lsn,
|
||||
end_lsn: lastseg.end_lsn + lsn_bytes,
|
||||
start_size: last_end_size,
|
||||
end_size: Some((last_end_size as i64 + size_bytes) as u64),
|
||||
children_after: Vec::new(),
|
||||
needed: false,
|
||||
};
|
||||
lastseg.children_after.push(newseg_id);
|
||||
|
||||
self.segments.push(newseg);
|
||||
*self.branches.get_mut(branch).expect("read already") = newseg_id;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn insert<Q: ?Sized>(&mut self, branch: &Q, bytes: u64) -> anyhow::Result<()>
|
||||
where
|
||||
K: std::borrow::Borrow<Q>,
|
||||
Q: std::hash::Hash + Eq + std::fmt::Debug,
|
||||
{
|
||||
self.modify_branch(branch, "insert".into(), bytes, bytes as i64)
|
||||
}
|
||||
|
||||
pub fn update<Q: ?Sized>(&mut self, branch: &Q, bytes: u64) -> anyhow::Result<()>
|
||||
where
|
||||
K: std::borrow::Borrow<Q>,
|
||||
Q: std::hash::Hash + Eq + std::fmt::Debug,
|
||||
{
|
||||
self.modify_branch(branch, "update".into(), bytes, 0i64)
|
||||
}
|
||||
|
||||
pub fn delete<Q: ?Sized>(&mut self, branch: &Q, bytes: u64) -> anyhow::Result<()>
|
||||
where
|
||||
K: std::borrow::Borrow<Q>,
|
||||
Q: std::hash::Hash + Eq + std::fmt::Debug,
|
||||
{
|
||||
self.modify_branch(branch, "delete".into(), bytes, -(bytes as i64))
|
||||
}
|
||||
|
||||
pub fn branch<Q: ?Sized>(&mut self, parent: &Q, name: K) -> anyhow::Result<()>
|
||||
where
|
||||
K: std::borrow::Borrow<Q> + std::fmt::Debug,
|
||||
Q: std::hash::Hash + Eq + std::fmt::Debug,
|
||||
{
|
||||
// Find the right segment
|
||||
let branchseg_id = *self.branches.get(parent).with_context(|| {
|
||||
format!(
|
||||
"should had found the parent {:?} by key. in branches {:?}",
|
||||
parent, self.branches
|
||||
)
|
||||
})?;
|
||||
|
||||
let _branchseg = &mut self.segments[branchseg_id];
|
||||
|
||||
// Create branch name for it
|
||||
self.branches.insert(name, branchseg_id);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn calculate(&mut self, retention_period: u64) -> anyhow::Result<SegmentSize> {
|
||||
// Phase 1: Mark all the segments that need to be retained
|
||||
for (_branch, &last_seg_id) in self.branches.iter() {
|
||||
let last_seg = &self.segments[last_seg_id];
|
||||
let cutoff_lsn = last_seg.start_lsn.saturating_sub(retention_period);
|
||||
let mut seg_id = last_seg_id;
|
||||
loop {
|
||||
let seg = &mut self.segments[seg_id];
|
||||
if seg.end_lsn < cutoff_lsn {
|
||||
break;
|
||||
}
|
||||
seg.needed = true;
|
||||
if let Some(prev_seg_id) = seg.parent {
|
||||
seg_id = prev_seg_id;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Phase 2: For each oldest segment in a chain that needs to be retained,
|
||||
// calculate if we should store snapshot or WAL
|
||||
self.size_from_snapshot_later(0)
|
||||
}
|
||||
|
||||
fn size_from_wal(&self, seg_id: usize) -> anyhow::Result<SegmentSize> {
|
||||
let seg = &self.segments[seg_id];
|
||||
|
||||
let this_size = seg.end_lsn - seg.start_lsn;
|
||||
|
||||
let mut children = Vec::new();
|
||||
|
||||
// try both ways
|
||||
for &child_id in seg.children_after.iter() {
|
||||
// try each child both ways
|
||||
let child = &self.segments[child_id];
|
||||
let p1 = self.size_from_wal(child_id)?;
|
||||
|
||||
let p = if !child.needed {
|
||||
let p2 = self.size_from_snapshot_later(child_id)?;
|
||||
if p1.total() < p2.total() {
|
||||
p1
|
||||
} else {
|
||||
p2
|
||||
}
|
||||
} else {
|
||||
p1
|
||||
};
|
||||
children.push(p);
|
||||
}
|
||||
Ok(SegmentSize {
|
||||
seg_id,
|
||||
method: if seg.needed { WalNeeded } else { Wal },
|
||||
this_size,
|
||||
children,
|
||||
})
|
||||
}
|
||||
|
||||
fn size_from_snapshot_later(&self, seg_id: usize) -> anyhow::Result<SegmentSize> {
|
||||
// If this is needed, then it's time to do the snapshot and continue
|
||||
// with wal method.
|
||||
let seg = &self.segments[seg_id];
|
||||
//eprintln!("snap: seg{}: {} needed: {}", seg_id, seg.children_after.len(), seg.needed);
|
||||
if seg.needed {
|
||||
let mut children = Vec::new();
|
||||
|
||||
for &child_id in seg.children_after.iter() {
|
||||
// try each child both ways
|
||||
let child = &self.segments[child_id];
|
||||
let p1 = self.size_from_wal(child_id)?;
|
||||
|
||||
let p = if !child.needed {
|
||||
let p2 = self.size_from_snapshot_later(child_id)?;
|
||||
if p1.total() < p2.total() {
|
||||
p1
|
||||
} else {
|
||||
p2
|
||||
}
|
||||
} else {
|
||||
p1
|
||||
};
|
||||
children.push(p);
|
||||
}
|
||||
Ok(SegmentSize {
|
||||
seg_id,
|
||||
method: WalNeeded,
|
||||
this_size: seg.start_size,
|
||||
children,
|
||||
})
|
||||
} else {
|
||||
// If any of the direct children are "needed", need to be able to reconstruct here
|
||||
let mut children_needed = false;
|
||||
for &child in seg.children_after.iter() {
|
||||
let seg = &self.segments[child];
|
||||
if seg.needed {
|
||||
children_needed = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
let method1 = if !children_needed {
|
||||
let mut children = Vec::new();
|
||||
for child in seg.children_after.iter() {
|
||||
children.push(self.size_from_snapshot_later(*child)?);
|
||||
}
|
||||
Some(SegmentSize {
|
||||
seg_id,
|
||||
method: Skipped,
|
||||
this_size: 0,
|
||||
children,
|
||||
})
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
// If this a junction, consider snapshotting here
|
||||
let method2 = if children_needed || seg.children_after.len() >= 2 {
|
||||
let mut children = Vec::new();
|
||||
for child in seg.children_after.iter() {
|
||||
children.push(self.size_from_wal(*child)?);
|
||||
}
|
||||
let Some(this_size) = seg.end_size else { anyhow::bail!("no end_size at junction {seg_id}") };
|
||||
Some(SegmentSize {
|
||||
seg_id,
|
||||
method: SnapshotAfter,
|
||||
this_size,
|
||||
children,
|
||||
})
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
Ok(match (method1, method2) {
|
||||
(None, None) => anyhow::bail!(
|
||||
"neither method was applicable: children_after={}, children_needed={}",
|
||||
seg.children_after.len(),
|
||||
children_needed
|
||||
),
|
||||
(Some(method), None) => method,
|
||||
(None, Some(method)) => method,
|
||||
(Some(method1), Some(method2)) => {
|
||||
if method1.total() < method2.total() {
|
||||
method1
|
||||
} else {
|
||||
method2
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
pub fn into_segments(self) -> Vec<Segment> {
|
||||
self.segments
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,269 +0,0 @@
|
||||
//! Tenant size model testing ground.
|
||||
//!
|
||||
//! Has a number of scenarios and a `main` for invoking these by number, calculating the history
|
||||
//! size, outputs graphviz graph. Makefile in directory shows how to use graphviz to turn scenarios
|
||||
//! into pngs.
|
||||
|
||||
use tenant_size_model::{Segment, SegmentSize, Storage};
|
||||
|
||||
// Main branch only. Some updates on it.
|
||||
fn scenario_1() -> anyhow::Result<(Vec<Segment>, SegmentSize)> {
|
||||
// Create main branch
|
||||
let mut storage = Storage::new("main");
|
||||
|
||||
// Bulk load 5 GB of data to it
|
||||
storage.insert("main", 5_000)?;
|
||||
|
||||
// Stream of updates
|
||||
for _ in 0..5 {
|
||||
storage.update("main", 1_000)?;
|
||||
}
|
||||
|
||||
let size = storage.calculate(1000)?;
|
||||
|
||||
Ok((storage.into_segments(), size))
|
||||
}
|
||||
|
||||
// Main branch only. Some updates on it.
|
||||
fn scenario_2() -> anyhow::Result<(Vec<Segment>, SegmentSize)> {
|
||||
// Create main branch
|
||||
let mut storage = Storage::new("main");
|
||||
|
||||
// Bulk load 5 GB of data to it
|
||||
storage.insert("main", 5_000)?;
|
||||
|
||||
// Stream of updates
|
||||
for _ in 0..5 {
|
||||
storage.update("main", 1_000)?;
|
||||
}
|
||||
|
||||
// Branch
|
||||
storage.branch("main", "child")?;
|
||||
storage.update("child", 1_000)?;
|
||||
|
||||
// More updates on parent
|
||||
storage.update("main", 1_000)?;
|
||||
|
||||
let size = storage.calculate(1000)?;
|
||||
|
||||
Ok((storage.into_segments(), size))
|
||||
}
|
||||
|
||||
// Like 2, but more updates on main
|
||||
fn scenario_3() -> anyhow::Result<(Vec<Segment>, SegmentSize)> {
|
||||
// Create main branch
|
||||
let mut storage = Storage::new("main");
|
||||
|
||||
// Bulk load 5 GB of data to it
|
||||
storage.insert("main", 5_000)?;
|
||||
|
||||
// Stream of updates
|
||||
for _ in 0..5 {
|
||||
storage.update("main", 1_000)?;
|
||||
}
|
||||
|
||||
// Branch
|
||||
storage.branch("main", "child")?;
|
||||
storage.update("child", 1_000)?;
|
||||
|
||||
// More updates on parent
|
||||
for _ in 0..5 {
|
||||
storage.update("main", 1_000)?;
|
||||
}
|
||||
|
||||
let size = storage.calculate(1000)?;
|
||||
|
||||
Ok((storage.into_segments(), size))
|
||||
}
|
||||
|
||||
// Diverged branches
|
||||
fn scenario_4() -> anyhow::Result<(Vec<Segment>, SegmentSize)> {
|
||||
// Create main branch
|
||||
let mut storage = Storage::new("main");
|
||||
|
||||
// Bulk load 5 GB of data to it
|
||||
storage.insert("main", 5_000)?;
|
||||
|
||||
// Stream of updates
|
||||
for _ in 0..5 {
|
||||
storage.update("main", 1_000)?;
|
||||
}
|
||||
|
||||
// Branch
|
||||
storage.branch("main", "child")?;
|
||||
storage.update("child", 1_000)?;
|
||||
|
||||
// More updates on parent
|
||||
for _ in 0..8 {
|
||||
storage.update("main", 1_000)?;
|
||||
}
|
||||
|
||||
let size = storage.calculate(1000)?;
|
||||
|
||||
Ok((storage.into_segments(), size))
|
||||
}
|
||||
|
||||
fn scenario_5() -> anyhow::Result<(Vec<Segment>, SegmentSize)> {
|
||||
let mut storage = Storage::new("a");
|
||||
storage.insert("a", 5000)?;
|
||||
storage.branch("a", "b")?;
|
||||
storage.update("b", 4000)?;
|
||||
storage.update("a", 2000)?;
|
||||
storage.branch("a", "c")?;
|
||||
storage.insert("c", 4000)?;
|
||||
storage.insert("a", 2000)?;
|
||||
|
||||
let size = storage.calculate(5000)?;
|
||||
|
||||
Ok((storage.into_segments(), size))
|
||||
}
|
||||
|
||||
fn scenario_6() -> anyhow::Result<(Vec<Segment>, SegmentSize)> {
|
||||
use std::borrow::Cow;
|
||||
|
||||
const NO_OP: Cow<'static, str> = Cow::Borrowed("");
|
||||
|
||||
let branches = [
|
||||
Some(0x7ff1edab8182025f15ae33482edb590a_u128),
|
||||
Some(0xb1719e044db05401a05a2ed588a3ad3f),
|
||||
Some(0xb68d6691c895ad0a70809470020929ef),
|
||||
];
|
||||
|
||||
// compared to other scenarios, this one uses bytes instead of kB
|
||||
|
||||
let mut storage = Storage::new(None);
|
||||
|
||||
storage.branch(&None, branches[0])?; // at 0
|
||||
storage.modify_branch(&branches[0], NO_OP, 108951064, 43696128)?; // at 108951064
|
||||
storage.branch(&branches[0], branches[1])?; // at 108951064
|
||||
storage.modify_branch(&branches[1], NO_OP, 15560408, -1851392)?; // at 124511472
|
||||
storage.modify_branch(&branches[0], NO_OP, 174464360, -1531904)?; // at 283415424
|
||||
storage.branch(&branches[0], branches[2])?; // at 283415424
|
||||
storage.modify_branch(&branches[2], NO_OP, 15906192, 8192)?; // at 299321616
|
||||
storage.modify_branch(&branches[0], NO_OP, 18909976, 32768)?; // at 302325400
|
||||
|
||||
let size = storage.calculate(100_000)?;
|
||||
|
||||
Ok((storage.into_segments(), size))
|
||||
}
|
||||
|
||||
fn main() {
|
||||
let args: Vec<String> = std::env::args().collect();
|
||||
|
||||
let scenario = if args.len() < 2 { "1" } else { &args[1] };
|
||||
|
||||
let (segments, size) = match scenario {
|
||||
"1" => scenario_1(),
|
||||
"2" => scenario_2(),
|
||||
"3" => scenario_3(),
|
||||
"4" => scenario_4(),
|
||||
"5" => scenario_5(),
|
||||
"6" => scenario_6(),
|
||||
other => {
|
||||
eprintln!("invalid scenario {}", other);
|
||||
std::process::exit(1);
|
||||
}
|
||||
}
|
||||
.unwrap();
|
||||
|
||||
graphviz_tree(&segments, &size);
|
||||
}
|
||||
|
||||
fn graphviz_recurse(segments: &[Segment], node: &SegmentSize) {
|
||||
use tenant_size_model::SegmentMethod::*;
|
||||
|
||||
let seg_id = node.seg_id;
|
||||
let seg = segments.get(seg_id).unwrap();
|
||||
let lsn = seg.end_lsn;
|
||||
let size = seg.end_size.unwrap_or(0);
|
||||
let method = node.method;
|
||||
|
||||
println!(" {{");
|
||||
println!(" node [width=0.1 height=0.1 shape=oval]");
|
||||
|
||||
let tenant_size = node.total_children();
|
||||
|
||||
let penwidth = if seg.needed { 6 } else { 3 };
|
||||
let x = match method {
|
||||
SnapshotAfter =>
|
||||
format!("label=\"lsn: {lsn}\\nsize: {size}\\ntenant_size: {tenant_size}\" style=filled penwidth={penwidth}"),
|
||||
Wal =>
|
||||
format!("label=\"lsn: {lsn}\\nsize: {size}\\ntenant_size: {tenant_size}\" color=\"black\" penwidth={penwidth}"),
|
||||
WalNeeded =>
|
||||
format!("label=\"lsn: {lsn}\\nsize: {size}\\ntenant_size: {tenant_size}\" color=\"black\" penwidth={penwidth}"),
|
||||
Skipped =>
|
||||
format!("label=\"lsn: {lsn}\\nsize: {size}\\ntenant_size: {tenant_size}\" color=\"gray\" penwidth={penwidth}"),
|
||||
};
|
||||
|
||||
println!(" \"seg{seg_id}\" [{x}]");
|
||||
println!(" }}");
|
||||
|
||||
// Recurse. Much of the data is actually on the edge
|
||||
for child in node.children.iter() {
|
||||
let child_id = child.seg_id;
|
||||
graphviz_recurse(segments, child);
|
||||
|
||||
let edge_color = match child.method {
|
||||
SnapshotAfter => "gray",
|
||||
Wal => "black",
|
||||
WalNeeded => "black",
|
||||
Skipped => "gray",
|
||||
};
|
||||
|
||||
println!(" {{");
|
||||
println!(" edge [] ");
|
||||
print!(" \"seg{seg_id}\" -> \"seg{child_id}\" [");
|
||||
print!("color={edge_color}");
|
||||
if child.method == WalNeeded {
|
||||
print!(" penwidth=6");
|
||||
}
|
||||
if child.method == Wal {
|
||||
print!(" penwidth=3");
|
||||
}
|
||||
|
||||
let next = segments.get(child_id).unwrap();
|
||||
|
||||
if next.op.is_empty() {
|
||||
print!(
|
||||
" label=\"{} / {}\"",
|
||||
next.end_lsn - seg.end_lsn,
|
||||
(next.end_size.unwrap_or(0) as i128 - seg.end_size.unwrap_or(0) as i128)
|
||||
);
|
||||
} else {
|
||||
print!(" label=\"{}: {}\"", next.op, next.end_lsn - seg.end_lsn);
|
||||
}
|
||||
println!("]");
|
||||
println!(" }}");
|
||||
}
|
||||
}
|
||||
|
||||
fn graphviz_tree(segments: &[Segment], tree: &SegmentSize) {
|
||||
println!("digraph G {{");
|
||||
println!(" fontname=\"Helvetica,Arial,sans-serif\"");
|
||||
println!(" node [fontname=\"Helvetica,Arial,sans-serif\"]");
|
||||
println!(" edge [fontname=\"Helvetica,Arial,sans-serif\"]");
|
||||
println!(" graph [center=1 rankdir=LR]");
|
||||
println!(" edge [dir=none]");
|
||||
|
||||
graphviz_recurse(segments, tree);
|
||||
|
||||
println!("}}");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn scenarios_return_same_size() {
|
||||
type ScenarioFn = fn() -> anyhow::Result<(Vec<Segment>, SegmentSize)>;
|
||||
let truths: &[(u32, ScenarioFn, _)] = &[
|
||||
(line!(), scenario_1, 8000),
|
||||
(line!(), scenario_2, 9000),
|
||||
(line!(), scenario_3, 13000),
|
||||
(line!(), scenario_4, 16000),
|
||||
(line!(), scenario_5, 17000),
|
||||
(line!(), scenario_6, 333_792_000),
|
||||
];
|
||||
|
||||
for (line, scenario, expected) in truths {
|
||||
let (_, size) = scenario().unwrap();
|
||||
assert_eq!(*expected, size.total_children(), "scenario on line {line}");
|
||||
}
|
||||
}
|
||||
193
libs/tenant_size_model/src/svg.rs
Normal file
193
libs/tenant_size_model/src/svg.rs
Normal file
@@ -0,0 +1,193 @@
|
||||
use crate::{SegmentMethod, SegmentSizeResult, SizeResult, StorageModel};
|
||||
use std::fmt::Write;
|
||||
|
||||
const SVG_WIDTH: f32 = 500.0;
|
||||
|
||||
struct SvgDraw<'a> {
|
||||
storage: &'a StorageModel,
|
||||
branches: &'a [String],
|
||||
seg_to_branch: &'a [usize],
|
||||
sizes: &'a [SegmentSizeResult],
|
||||
|
||||
// layout
|
||||
xscale: f32,
|
||||
min_lsn: u64,
|
||||
seg_coordinates: Vec<(f32, f32)>,
|
||||
}
|
||||
|
||||
fn draw_legend(result: &mut String) -> anyhow::Result<()> {
|
||||
writeln!(
|
||||
result,
|
||||
"<circle cx=\"10\" cy=\"10\" r=\"5\" stroke=\"red\"/>"
|
||||
)?;
|
||||
writeln!(result, "<text x=\"20\" y=\"15\">logical snapshot</text>")?;
|
||||
writeln!(
|
||||
result,
|
||||
"<line x1=\"5\" y1=\"30\" x2=\"15\" y2=\"30\" stroke-width=\"6\" stroke=\"black\" />"
|
||||
)?;
|
||||
writeln!(
|
||||
result,
|
||||
"<text x=\"20\" y=\"35\">WAL within retention period</text>"
|
||||
)?;
|
||||
writeln!(
|
||||
result,
|
||||
"<line x1=\"5\" y1=\"50\" x2=\"15\" y2=\"50\" stroke-width=\"3\" stroke=\"black\" />"
|
||||
)?;
|
||||
writeln!(
|
||||
result,
|
||||
"<text x=\"20\" y=\"55\">WAL retained to avoid copy</text>"
|
||||
)?;
|
||||
writeln!(
|
||||
result,
|
||||
"<line x1=\"5\" y1=\"70\" x2=\"15\" y2=\"70\" stroke-width=\"1\" stroke=\"gray\" />"
|
||||
)?;
|
||||
writeln!(result, "<text x=\"20\" y=\"75\">WAL not retained</text>")?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn draw_svg(
|
||||
storage: &StorageModel,
|
||||
branches: &[String],
|
||||
seg_to_branch: &[usize],
|
||||
sizes: &SizeResult,
|
||||
) -> anyhow::Result<String> {
|
||||
let mut draw = SvgDraw {
|
||||
storage,
|
||||
branches,
|
||||
seg_to_branch,
|
||||
sizes: &sizes.segments,
|
||||
|
||||
xscale: 0.0,
|
||||
min_lsn: 0,
|
||||
seg_coordinates: Vec::new(),
|
||||
};
|
||||
|
||||
let mut result = String::new();
|
||||
|
||||
writeln!(result, "<svg xmlns=\"http://www.w3.org/2000/svg\" xmlns:xlink=\"http://www.w3.org/1999/xlink\" height=\"300\" width=\"500\">")?;
|
||||
|
||||
draw.calculate_svg_layout();
|
||||
|
||||
// Draw the tree
|
||||
for (seg_id, _seg) in storage.segments.iter().enumerate() {
|
||||
draw.draw_seg_phase1(seg_id, &mut result)?;
|
||||
}
|
||||
|
||||
// Draw snapshots
|
||||
for (seg_id, _seg) in storage.segments.iter().enumerate() {
|
||||
draw.draw_seg_phase2(seg_id, &mut result)?;
|
||||
}
|
||||
|
||||
draw_legend(&mut result)?;
|
||||
|
||||
write!(result, "</svg>")?;
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
impl<'a> SvgDraw<'a> {
|
||||
fn calculate_svg_layout(&mut self) {
|
||||
// Find x scale
|
||||
let segments = &self.storage.segments;
|
||||
let min_lsn = segments.iter().map(|s| s.lsn).fold(u64::MAX, std::cmp::min);
|
||||
let max_lsn = segments.iter().map(|s| s.lsn).fold(0, std::cmp::max);
|
||||
|
||||
// Start with 1 pixel = 1 byte. Double the scale until it fits into the image
|
||||
let mut xscale = 1.0;
|
||||
while (max_lsn - min_lsn) as f32 / xscale > SVG_WIDTH {
|
||||
xscale *= 2.0;
|
||||
}
|
||||
|
||||
// Layout the timelines on Y dimension.
|
||||
// TODO
|
||||
let mut y = 100.0;
|
||||
let mut branch_y_coordinates = Vec::new();
|
||||
for _branch in self.branches {
|
||||
branch_y_coordinates.push(y);
|
||||
y += 40.0;
|
||||
}
|
||||
|
||||
// Calculate coordinates for each point
|
||||
let seg_coordinates = std::iter::zip(segments, self.seg_to_branch)
|
||||
.map(|(seg, branch_id)| {
|
||||
let x = (seg.lsn - min_lsn) as f32 / xscale;
|
||||
let y = branch_y_coordinates[*branch_id];
|
||||
(x, y)
|
||||
})
|
||||
.collect();
|
||||
|
||||
self.xscale = xscale;
|
||||
self.min_lsn = min_lsn;
|
||||
self.seg_coordinates = seg_coordinates;
|
||||
}
|
||||
|
||||
/// Draws lines between points
|
||||
fn draw_seg_phase1(&self, seg_id: usize, result: &mut String) -> anyhow::Result<()> {
|
||||
let seg = &self.storage.segments[seg_id];
|
||||
|
||||
let wal_bytes = if let Some(parent_id) = seg.parent {
|
||||
seg.lsn - self.storage.segments[parent_id].lsn
|
||||
} else {
|
||||
0
|
||||
};
|
||||
|
||||
let style = match self.sizes[seg_id].method {
|
||||
SegmentMethod::SnapshotHere => "stroke-width=\"1\" stroke=\"gray\"",
|
||||
SegmentMethod::Wal if seg.needed && wal_bytes > 0 => {
|
||||
"stroke-width=\"6\" stroke=\"black\""
|
||||
}
|
||||
SegmentMethod::Wal => "stroke-width=\"3\" stroke=\"black\"",
|
||||
SegmentMethod::Skipped => "stroke-width=\"1\" stroke=\"gray\"",
|
||||
};
|
||||
if let Some(parent_id) = seg.parent {
|
||||
let (x1, y1) = self.seg_coordinates[parent_id];
|
||||
let (x2, y2) = self.seg_coordinates[seg_id];
|
||||
|
||||
writeln!(
|
||||
result,
|
||||
"<line x1=\"{x1}\" y1=\"{y1}\" x2=\"{x2}\" y2=\"{y2}\" {style}>",
|
||||
)?;
|
||||
writeln!(
|
||||
result,
|
||||
" <title>{wal_bytes} bytes of WAL (seg {seg_id})</title>"
|
||||
)?;
|
||||
writeln!(result, "</line>")?;
|
||||
} else {
|
||||
// draw a little dash to mark the starting point of this branch
|
||||
let (x, y) = self.seg_coordinates[seg_id];
|
||||
let (x1, y1) = (x, y - 5.0);
|
||||
let (x2, y2) = (x, y + 5.0);
|
||||
|
||||
writeln!(
|
||||
result,
|
||||
"<line x1=\"{x1}\" y1=\"{y1}\" x2=\"{x2}\" y2=\"{y2}\" {style}>",
|
||||
)?;
|
||||
writeln!(result, " <title>(seg {seg_id})</title>")?;
|
||||
writeln!(result, "</line>")?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Draw circles where snapshots are taken
|
||||
fn draw_seg_phase2(&self, seg_id: usize, result: &mut String) -> anyhow::Result<()> {
|
||||
let seg = &self.storage.segments[seg_id];
|
||||
|
||||
// draw a snapshot point if it's needed
|
||||
let (coord_x, coord_y) = self.seg_coordinates[seg_id];
|
||||
if self.sizes[seg_id].method == SegmentMethod::SnapshotHere {
|
||||
writeln!(
|
||||
result,
|
||||
"<circle cx=\"{coord_x}\" cy=\"{coord_y}\" r=\"5\" stroke=\"red\">",
|
||||
)?;
|
||||
writeln!(
|
||||
result,
|
||||
" <title>logical size {}</title>",
|
||||
seg.size.unwrap()
|
||||
)?;
|
||||
write!(result, "</circle>")?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
313
libs/tenant_size_model/tests/tests.rs
Normal file
313
libs/tenant_size_model/tests/tests.rs
Normal file
@@ -0,0 +1,313 @@
|
||||
//! Tenant size model tests.
|
||||
|
||||
use tenant_size_model::{Segment, SizeResult, StorageModel};
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
struct ScenarioBuilder {
|
||||
segments: Vec<Segment>,
|
||||
|
||||
/// Mapping from the branch name to the index of a segment describing its latest state.
|
||||
branches: HashMap<String, usize>,
|
||||
}
|
||||
|
||||
impl ScenarioBuilder {
|
||||
/// Creates a new storage with the given default branch name.
|
||||
pub fn new(initial_branch: &str) -> ScenarioBuilder {
|
||||
let init_segment = Segment {
|
||||
parent: None,
|
||||
lsn: 0,
|
||||
size: Some(0),
|
||||
needed: false, // determined later
|
||||
};
|
||||
|
||||
ScenarioBuilder {
|
||||
segments: vec![init_segment],
|
||||
branches: HashMap::from([(initial_branch.into(), 0)]),
|
||||
}
|
||||
}
|
||||
|
||||
/// Advances the branch with the named operation, by the relative LSN and logical size bytes.
|
||||
pub fn modify_branch(&mut self, branch: &str, lsn_bytes: u64, size_bytes: i64) {
|
||||
let lastseg_id = *self.branches.get(branch).unwrap();
|
||||
let newseg_id = self.segments.len();
|
||||
let lastseg = &mut self.segments[lastseg_id];
|
||||
|
||||
let newseg = Segment {
|
||||
parent: Some(lastseg_id),
|
||||
lsn: lastseg.lsn + lsn_bytes,
|
||||
size: Some((lastseg.size.unwrap() as i64 + size_bytes) as u64),
|
||||
needed: false,
|
||||
};
|
||||
|
||||
self.segments.push(newseg);
|
||||
*self.branches.get_mut(branch).expect("read already") = newseg_id;
|
||||
}
|
||||
|
||||
pub fn insert(&mut self, branch: &str, bytes: u64) {
|
||||
self.modify_branch(branch, bytes, bytes as i64);
|
||||
}
|
||||
|
||||
pub fn update(&mut self, branch: &str, bytes: u64) {
|
||||
self.modify_branch(branch, bytes, 0i64);
|
||||
}
|
||||
|
||||
pub fn _delete(&mut self, branch: &str, bytes: u64) {
|
||||
self.modify_branch(branch, bytes, -(bytes as i64));
|
||||
}
|
||||
|
||||
/// Panics if the parent branch cannot be found.
|
||||
pub fn branch(&mut self, parent: &str, name: &str) {
|
||||
// Find the right segment
|
||||
let branchseg_id = *self
|
||||
.branches
|
||||
.get(parent)
|
||||
.expect("should had found the parent by key");
|
||||
let _branchseg = &mut self.segments[branchseg_id];
|
||||
|
||||
// Create branch name for it
|
||||
self.branches.insert(name.to_string(), branchseg_id);
|
||||
}
|
||||
|
||||
pub fn calculate(&mut self, retention_period: u64) -> (StorageModel, SizeResult) {
|
||||
// Phase 1: Mark all the segments that need to be retained
|
||||
for (_branch, &last_seg_id) in self.branches.iter() {
|
||||
let last_seg = &self.segments[last_seg_id];
|
||||
let cutoff_lsn = last_seg.lsn.saturating_sub(retention_period);
|
||||
let mut seg_id = last_seg_id;
|
||||
loop {
|
||||
let seg = &mut self.segments[seg_id];
|
||||
if seg.lsn <= cutoff_lsn {
|
||||
break;
|
||||
}
|
||||
seg.needed = true;
|
||||
if let Some(prev_seg_id) = seg.parent {
|
||||
seg_id = prev_seg_id;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Perform the calculation
|
||||
let storage_model = StorageModel {
|
||||
segments: self.segments.clone(),
|
||||
};
|
||||
let size_result = storage_model.calculate();
|
||||
(storage_model, size_result)
|
||||
}
|
||||
}
|
||||
|
||||
// Main branch only. Some updates on it.
|
||||
#[test]
|
||||
fn scenario_1() {
|
||||
// Create main branch
|
||||
let mut scenario = ScenarioBuilder::new("main");
|
||||
|
||||
// Bulk load 5 GB of data to it
|
||||
scenario.insert("main", 5_000);
|
||||
|
||||
// Stream of updates
|
||||
for _ in 0..5 {
|
||||
scenario.update("main", 1_000);
|
||||
}
|
||||
|
||||
// Calculate the synthetic size with retention horizon 1000
|
||||
let (_model, result) = scenario.calculate(1000);
|
||||
|
||||
// The end of the branch is at LSN 10000. Need to retain
|
||||
// a logical snapshot at LSN 9000, plus the WAL between 9000-10000.
|
||||
// The logical snapshot has size 5000.
|
||||
assert_eq!(result.total_size, 5000 + 1000);
|
||||
}
|
||||
|
||||
// Main branch only. Some updates on it.
|
||||
#[test]
|
||||
fn scenario_2() {
|
||||
// Create main branch
|
||||
let mut scenario = ScenarioBuilder::new("main");
|
||||
|
||||
// Bulk load 5 GB of data to it
|
||||
scenario.insert("main", 5_000);
|
||||
|
||||
// Stream of updates
|
||||
for _ in 0..5 {
|
||||
scenario.update("main", 1_000);
|
||||
}
|
||||
|
||||
// Branch
|
||||
scenario.branch("main", "child");
|
||||
scenario.update("child", 1_000);
|
||||
|
||||
// More updates on parent
|
||||
scenario.update("main", 1_000);
|
||||
|
||||
//
|
||||
// The history looks like this now:
|
||||
//
|
||||
// 10000 11000
|
||||
// *----*----*--------------* main
|
||||
// |
|
||||
// | 11000
|
||||
// +-------------- child
|
||||
//
|
||||
//
|
||||
// With retention horizon 1000, we need to retain logical snapshot
|
||||
// at the branch point, size 5000, and the WAL from 10000-11000 on
|
||||
// both branches.
|
||||
let (_model, result) = scenario.calculate(1000);
|
||||
|
||||
assert_eq!(result.total_size, 5000 + 1000 + 1000);
|
||||
}
|
||||
|
||||
// Like 2, but more updates on main
|
||||
#[test]
|
||||
fn scenario_3() {
|
||||
// Create main branch
|
||||
let mut scenario = ScenarioBuilder::new("main");
|
||||
|
||||
// Bulk load 5 GB of data to it
|
||||
scenario.insert("main", 5_000);
|
||||
|
||||
// Stream of updates
|
||||
for _ in 0..5 {
|
||||
scenario.update("main", 1_000);
|
||||
}
|
||||
|
||||
// Branch
|
||||
scenario.branch("main", "child");
|
||||
scenario.update("child", 1_000);
|
||||
|
||||
// More updates on parent
|
||||
for _ in 0..5 {
|
||||
scenario.update("main", 1_000);
|
||||
}
|
||||
|
||||
//
|
||||
// The history looks like this now:
|
||||
//
|
||||
// 10000 15000
|
||||
// *----*----*------------------------------------* main
|
||||
// |
|
||||
// | 11000
|
||||
// +-------------- child
|
||||
//
|
||||
//
|
||||
// With retention horizon 1000, it's still cheapest to retain
|
||||
// - snapshot at branch point (size 5000)
|
||||
// - WAL on child between 10000-11000
|
||||
// - WAL on main between 10000-15000
|
||||
//
|
||||
// This is in total 5000 + 1000 + 5000
|
||||
//
|
||||
let (_model, result) = scenario.calculate(1000);
|
||||
|
||||
assert_eq!(result.total_size, 5000 + 1000 + 5000);
|
||||
}
|
||||
|
||||
// Diverged branches
|
||||
#[test]
|
||||
fn scenario_4() {
|
||||
// Create main branch
|
||||
let mut scenario = ScenarioBuilder::new("main");
|
||||
|
||||
// Bulk load 5 GB of data to it
|
||||
scenario.insert("main", 5_000);
|
||||
|
||||
// Stream of updates
|
||||
for _ in 0..5 {
|
||||
scenario.update("main", 1_000);
|
||||
}
|
||||
|
||||
// Branch
|
||||
scenario.branch("main", "child");
|
||||
scenario.update("child", 1_000);
|
||||
|
||||
// More updates on parent
|
||||
for _ in 0..8 {
|
||||
scenario.update("main", 1_000);
|
||||
}
|
||||
|
||||
//
|
||||
// The history looks like this now:
|
||||
//
|
||||
// 10000 18000
|
||||
// *----*----*------------------------------------* main
|
||||
// |
|
||||
// | 11000
|
||||
// +-------------- child
|
||||
//
|
||||
//
|
||||
// With retention horizon 1000, it's now cheapest to retain
|
||||
// separate snapshots on both branches:
|
||||
// - snapshot on main branch at LSN 17000 (size 5000)
|
||||
// - WAL on main between 17000-18000
|
||||
// - snapshot on child branch at LSN 10000 (size 5000)
|
||||
// - WAL on child between 10000-11000
|
||||
//
|
||||
// This is in total 5000 + 1000 + 5000 + 1000 = 12000
|
||||
//
|
||||
// (If we used the the method from the previous scenario, and
|
||||
// kept only snapshot at the branch point, we'd need to keep
|
||||
// all the WAL between 10000-18000 on the main branch, so
|
||||
// the total size would be 5000 + 1000 + 8000 = 14000. The
|
||||
// calculation always picks the cheapest alternative)
|
||||
|
||||
let (_model, result) = scenario.calculate(1000);
|
||||
|
||||
assert_eq!(result.total_size, 5000 + 1000 + 5000 + 1000);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn scenario_5() {
|
||||
let mut scenario = ScenarioBuilder::new("a");
|
||||
scenario.insert("a", 5000);
|
||||
scenario.branch("a", "b");
|
||||
scenario.update("b", 4000);
|
||||
scenario.update("a", 2000);
|
||||
scenario.branch("a", "c");
|
||||
scenario.insert("c", 4000);
|
||||
scenario.insert("a", 2000);
|
||||
|
||||
let (_model, result) = scenario.calculate(1000);
|
||||
|
||||
assert_eq!(result.total_size, 17000);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn scenario_6() {
|
||||
let branches = [
|
||||
"7ff1edab8182025f15ae33482edb590a",
|
||||
"b1719e044db05401a05a2ed588a3ad3f",
|
||||
"0xb68d6691c895ad0a70809470020929ef",
|
||||
];
|
||||
|
||||
// compared to other scenarios, this one uses bytes instead of kB
|
||||
|
||||
let mut scenario = ScenarioBuilder::new("");
|
||||
|
||||
scenario.branch("", branches[0]); // at 0
|
||||
scenario.modify_branch(branches[0], 108951064, 43696128); // at 108951064
|
||||
scenario.branch(branches[0], branches[1]); // at 108951064
|
||||
scenario.modify_branch(branches[1], 15560408, -1851392); // at 124511472
|
||||
scenario.modify_branch(branches[0], 174464360, -1531904); // at 283415424
|
||||
scenario.branch(branches[0], branches[2]); // at 283415424
|
||||
scenario.modify_branch(branches[2], 15906192, 8192); // at 299321616
|
||||
scenario.modify_branch(branches[0], 18909976, 32768); // at 302325400
|
||||
|
||||
let (model, result) = scenario.calculate(100_000);
|
||||
|
||||
// FIXME: We previously calculated 333_792_000. But with this PR, we get
|
||||
// a much lower number. At a quick look at the model output and the
|
||||
// calculations here, the new result seems correct to me.
|
||||
eprintln!(
|
||||
" MODEL: {}",
|
||||
serde_json::to_string(&model.segments).unwrap()
|
||||
);
|
||||
eprintln!(
|
||||
"RESULT: {}",
|
||||
serde_json::to_string(&result.segments).unwrap()
|
||||
);
|
||||
|
||||
assert_eq!(result.total_size, 136_236_928);
|
||||
}
|
||||
17
libs/tracing-utils/Cargo.toml
Normal file
17
libs/tracing-utils/Cargo.toml
Normal file
@@ -0,0 +1,17 @@
|
||||
[package]
|
||||
name = "tracing-utils"
|
||||
version = "0.1.0"
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
hyper.workspace = true
|
||||
opentelemetry = { workspace = true, features=["rt-tokio"] }
|
||||
opentelemetry-otlp = { workspace = true, default_features=false, features = ["http-proto", "trace", "http", "reqwest-client"] }
|
||||
opentelemetry-semantic-conventions.workspace = true
|
||||
reqwest = { workspace = true, default-features = false, features = ["rustls-tls"] }
|
||||
tokio = { workspace = true, features = ["rt", "rt-multi-thread"] }
|
||||
tracing.workspace = true
|
||||
tracing-opentelemetry.workspace = true
|
||||
tracing-subscriber.workspace = true
|
||||
workspace_hack = { version = "0.1", path = "../../workspace_hack" }
|
||||
96
libs/tracing-utils/src/http.rs
Normal file
96
libs/tracing-utils/src/http.rs
Normal file
@@ -0,0 +1,96 @@
|
||||
//! Tracing wrapper for Hyper HTTP server
|
||||
|
||||
use hyper::HeaderMap;
|
||||
use hyper::{Body, Request, Response};
|
||||
use std::future::Future;
|
||||
use tracing::Instrument;
|
||||
use tracing_opentelemetry::OpenTelemetrySpanExt;
|
||||
|
||||
/// Configuration option for what to use as the "otel.name" field in the traces.
|
||||
pub enum OtelName<'a> {
|
||||
/// Use a constant string
|
||||
Constant(&'a str),
|
||||
|
||||
/// Use the path from the request.
|
||||
///
|
||||
/// That's very useful information, but is not appropriate if the
|
||||
/// path contains parameters that differ on ever request, or worse,
|
||||
/// sensitive information like usernames or email addresses.
|
||||
///
|
||||
/// See <https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/http.md#name>
|
||||
UriPath,
|
||||
}
|
||||
|
||||
/// Handle an incoming HTTP request using the given handler function,
|
||||
/// with OpenTelemetry tracing.
|
||||
///
|
||||
/// This runs 'handler' on the request in a new span, with fields filled in
|
||||
/// from the request. Notably, if the request contains tracing information,
|
||||
/// it is propagated to the span, so that this request is traced as part of
|
||||
/// the same trace.
|
||||
///
|
||||
/// XXX: Usually, this is handled by existing libraries, or built
|
||||
/// directly into HTTP servers. However, I couldn't find one for Hyper,
|
||||
/// so I had to write our own. OpenTelemetry website has a registry of
|
||||
/// instrumentation libraries at:
|
||||
/// https://opentelemetry.io/registry/?language=rust&component=instrumentation
|
||||
/// If a Hyper crate appears, consider switching to that.
|
||||
pub async fn tracing_handler<F, R>(
|
||||
req: Request<Body>,
|
||||
handler: F,
|
||||
otel_name: OtelName<'_>,
|
||||
) -> Response<Body>
|
||||
where
|
||||
F: Fn(Request<Body>) -> R,
|
||||
R: Future<Output = Response<Body>>,
|
||||
{
|
||||
// Create a tracing span, with context propagated from the incoming
|
||||
// request if any.
|
||||
//
|
||||
// See list of standard fields defined for HTTP requests at
|
||||
// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/http.md
|
||||
// We only fill in a few of the most useful ones here.
|
||||
let otel_name = match otel_name {
|
||||
OtelName::Constant(s) => s,
|
||||
OtelName::UriPath => req.uri().path(),
|
||||
};
|
||||
|
||||
let span = tracing::info_span!(
|
||||
"http request",
|
||||
otel.name= %otel_name,
|
||||
http.method = %req.method(),
|
||||
http.status_code = tracing::field::Empty,
|
||||
);
|
||||
let parent_ctx = extract_remote_context(req.headers());
|
||||
span.set_parent(parent_ctx);
|
||||
|
||||
// Handle the request within the span
|
||||
let response = handler(req).instrument(span.clone()).await;
|
||||
|
||||
// Fill in the fields from the response code
|
||||
let status = response.status();
|
||||
span.record("http.status_code", status.as_str());
|
||||
span.record(
|
||||
"otel.status_code",
|
||||
if status.is_success() { "OK" } else { "ERROR" },
|
||||
);
|
||||
|
||||
response
|
||||
}
|
||||
|
||||
// Extract remote tracing context from the HTTP headers
|
||||
fn extract_remote_context(headers: &HeaderMap) -> opentelemetry::Context {
|
||||
struct HeaderExtractor<'a>(&'a HeaderMap);
|
||||
|
||||
impl<'a> opentelemetry::propagation::Extractor for HeaderExtractor<'a> {
|
||||
fn get(&self, key: &str) -> Option<&str> {
|
||||
self.0.get(key).and_then(|value| value.to_str().ok())
|
||||
}
|
||||
|
||||
fn keys(&self) -> Vec<&str> {
|
||||
self.0.keys().map(|value| value.as_str()).collect()
|
||||
}
|
||||
}
|
||||
let extractor = HeaderExtractor(headers);
|
||||
opentelemetry::global::get_text_map_propagator(|propagator| propagator.extract(&extractor))
|
||||
}
|
||||
168
libs/tracing-utils/src/lib.rs
Normal file
168
libs/tracing-utils/src/lib.rs
Normal file
@@ -0,0 +1,168 @@
|
||||
//! Helper functions to set up OpenTelemetry tracing.
|
||||
//!
|
||||
//! This comes in two variants, depending on whether you have a Tokio runtime available.
|
||||
//! If you do, call `init_tracing()`. It sets up the trace processor and exporter to use
|
||||
//! the current tokio runtime. If you don't have a runtime available, or you don't want
|
||||
//! to share the runtime with the tracing tasks, call `init_tracing_without_runtime()`
|
||||
//! instead. It sets up a dedicated single-threaded Tokio runtime for the tracing tasks.
|
||||
//!
|
||||
//! Example:
|
||||
//!
|
||||
//! ```rust,no_run
|
||||
//! use tracing_subscriber::prelude::*;
|
||||
//! use tracing_opentelemetry::OpenTelemetryLayer;
|
||||
//!
|
||||
//! #[tokio::main]
|
||||
//! async fn main() {
|
||||
//! // Set up logging to stderr
|
||||
//! let env_filter = tracing_subscriber::EnvFilter::try_from_default_env()
|
||||
//! .unwrap_or_else(|_| tracing_subscriber::EnvFilter::new("info"));
|
||||
//! let fmt_layer = tracing_subscriber::fmt::layer()
|
||||
//! .with_target(false)
|
||||
//! .with_writer(std::io::stderr);
|
||||
//!
|
||||
//! // Initialize OpenTelemetry. Exports tracing spans as OpenTelemetry traces
|
||||
//! let otlp_layer = tracing_utils::init_tracing("my_application").await.map(OpenTelemetryLayer::new);
|
||||
//!
|
||||
//! // Put it all together
|
||||
//! tracing_subscriber::registry()
|
||||
//! .with(env_filter)
|
||||
//! .with(otlp_layer)
|
||||
//! .with(fmt_layer)
|
||||
//! .init();
|
||||
//! }
|
||||
//! ```
|
||||
|
||||
use opentelemetry::sdk::Resource;
|
||||
use opentelemetry::KeyValue;
|
||||
use opentelemetry_otlp::WithExportConfig;
|
||||
use opentelemetry_otlp::{OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT};
|
||||
|
||||
pub use tracing_opentelemetry::OpenTelemetryLayer;
|
||||
|
||||
pub mod http;
|
||||
|
||||
/// Set up OpenTelemetry exporter, using configuration from environment variables.
|
||||
///
|
||||
/// `service_name` is set as the OpenTelemetry 'service.name' resource (see
|
||||
/// <https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/resource/semantic_conventions/README.md#service>)
|
||||
///
|
||||
/// We try to follow the conventions for the environment variables specified in
|
||||
/// <https://opentelemetry.io/docs/reference/specification/sdk-environment-variables/>
|
||||
///
|
||||
/// However, we only support a subset of those options:
|
||||
///
|
||||
/// - OTEL_SDK_DISABLED is supported. The default is "false", meaning tracing
|
||||
/// is enabled by default. Set it to "true" to disable.
|
||||
///
|
||||
/// - We use the OTLP exporter, with HTTP protocol. Most of the OTEL_EXPORTER_OTLP_*
|
||||
/// settings specified in
|
||||
/// <https://opentelemetry.io/docs/reference/specification/protocol/exporter/>
|
||||
/// are supported, as they are handled by the `opentelemetry-otlp` crate.
|
||||
/// Settings related to other exporters have no effect.
|
||||
///
|
||||
/// - Some other settings are supported by the `opentelemetry` crate.
|
||||
///
|
||||
/// If you need some other setting, please test if it works first. And perhaps
|
||||
/// add a comment in the list above to save the effort of testing for the next
|
||||
/// person.
|
||||
///
|
||||
/// This doesn't block, but is marked as 'async' to hint that this must be called in
|
||||
/// asynchronous execution context.
|
||||
pub async fn init_tracing(service_name: &str) -> Option<opentelemetry::sdk::trace::Tracer> {
|
||||
if std::env::var("OTEL_SDK_DISABLED") == Ok("true".to_string()) {
|
||||
return None;
|
||||
};
|
||||
Some(init_tracing_internal(service_name.to_string()))
|
||||
}
|
||||
|
||||
/// Like `init_tracing`, but creates a separate tokio Runtime for the tracing
|
||||
/// tasks.
|
||||
pub fn init_tracing_without_runtime(
|
||||
service_name: &str,
|
||||
) -> Option<opentelemetry::sdk::trace::Tracer> {
|
||||
if std::env::var("OTEL_SDK_DISABLED") == Ok("true".to_string()) {
|
||||
return None;
|
||||
};
|
||||
|
||||
// The opentelemetry batch processor and the OTLP exporter needs a Tokio
|
||||
// runtime. Create a dedicated runtime for them. One thread should be
|
||||
// enough.
|
||||
//
|
||||
// (Alternatively, instead of batching, we could use the "simple
|
||||
// processor", which doesn't need Tokio, and use "reqwest-blocking"
|
||||
// feature for the OTLP exporter, which also doesn't need Tokio. However,
|
||||
// batching is considered best practice, and also I have the feeling that
|
||||
// the non-Tokio codepaths in the opentelemetry crate are less used and
|
||||
// might be more buggy, so better to stay on the well-beaten path.)
|
||||
//
|
||||
// We leak the runtime so that it keeps running after we exit the
|
||||
// function.
|
||||
let runtime = Box::leak(Box::new(
|
||||
tokio::runtime::Builder::new_multi_thread()
|
||||
.enable_all()
|
||||
.thread_name("otlp runtime thread")
|
||||
.worker_threads(1)
|
||||
.build()
|
||||
.unwrap(),
|
||||
));
|
||||
let _guard = runtime.enter();
|
||||
|
||||
Some(init_tracing_internal(service_name.to_string()))
|
||||
}
|
||||
|
||||
fn init_tracing_internal(service_name: String) -> opentelemetry::sdk::trace::Tracer {
|
||||
// Set up exporter from the OTEL_EXPORTER_* environment variables
|
||||
let mut exporter = opentelemetry_otlp::new_exporter().http().with_env();
|
||||
|
||||
// XXX opentelemetry-otlp v0.18.0 has a bug in how it uses the
|
||||
// OTEL_EXPORTER_OTLP_ENDPOINT env variable. According to the
|
||||
// OpenTelemetry spec at
|
||||
// <https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/protocol/exporter.md#endpoint-urls-for-otlphttp>,
|
||||
// the full exporter URL is formed by appending "/v1/traces" to the value
|
||||
// of OTEL_EXPORTER_OTLP_ENDPOINT. However, opentelemetry-otlp only does
|
||||
// that with the grpc-tonic exporter. Other exporters, like the HTTP
|
||||
// exporter, use the URL from OTEL_EXPORTER_OTLP_ENDPOINT as is, without
|
||||
// appending "/v1/traces".
|
||||
//
|
||||
// See https://github.com/open-telemetry/opentelemetry-rust/pull/950
|
||||
//
|
||||
// Work around that by checking OTEL_EXPORTER_OTLP_ENDPOINT, and setting
|
||||
// the endpoint url with the "/v1/traces" path ourselves. If the bug is
|
||||
// fixed in a later version, we can remove this code. But if we don't
|
||||
// remember to remove this, it won't do any harm either, as the crate will
|
||||
// just ignore the OTEL_EXPORTER_OTLP_ENDPOINT setting when the endpoint
|
||||
// is set directly with `with_endpoint`.
|
||||
if std::env::var(OTEL_EXPORTER_OTLP_TRACES_ENDPOINT).is_err() {
|
||||
if let Ok(mut endpoint) = std::env::var(OTEL_EXPORTER_OTLP_ENDPOINT) {
|
||||
if !endpoint.ends_with('/') {
|
||||
endpoint.push('/');
|
||||
}
|
||||
endpoint.push_str("v1/traces");
|
||||
exporter = exporter.with_endpoint(endpoint);
|
||||
}
|
||||
}
|
||||
|
||||
// Propagate trace information in the standard W3C TraceContext format.
|
||||
opentelemetry::global::set_text_map_propagator(
|
||||
opentelemetry::sdk::propagation::TraceContextPropagator::new(),
|
||||
);
|
||||
|
||||
opentelemetry_otlp::new_pipeline()
|
||||
.tracing()
|
||||
.with_exporter(exporter)
|
||||
.with_trace_config(
|
||||
opentelemetry::sdk::trace::config().with_resource(Resource::new(vec![KeyValue::new(
|
||||
opentelemetry_semantic_conventions::resource::SERVICE_NAME,
|
||||
service_name,
|
||||
)])),
|
||||
)
|
||||
.install_batch(opentelemetry::runtime::Tokio)
|
||||
.expect("could not initialize opentelemetry exporter")
|
||||
}
|
||||
|
||||
// Shutdown trace pipeline gracefully, so that it has a chance to send any
|
||||
// pending traces before we exit.
|
||||
pub fn shutdown_tracing() {
|
||||
opentelemetry::global::shutdown_tracer_provider();
|
||||
}
|
||||
@@ -11,7 +11,9 @@ async-trait.workspace = true
|
||||
anyhow.workspace = true
|
||||
bincode.workspace = true
|
||||
bytes.workspace = true
|
||||
heapless.workspace = true
|
||||
hyper = { workspace = true, features = ["full"] }
|
||||
futures = { workspace = true}
|
||||
routerify.workspace = true
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
@@ -37,7 +39,8 @@ metrics.workspace = true
|
||||
pq_proto.workspace = true
|
||||
|
||||
workspace_hack.workspace = true
|
||||
|
||||
url.workspace = true
|
||||
uuid = { version = "1.2", features = ["v4", "serde"] }
|
||||
[dev-dependencies]
|
||||
byteorder.workspace = true
|
||||
bytes.workspace = true
|
||||
|
||||
161
libs/utils/src/history_buffer.rs
Normal file
161
libs/utils/src/history_buffer.rs
Normal file
@@ -0,0 +1,161 @@
|
||||
//! A heapless buffer for events of sorts.
|
||||
|
||||
use std::ops;
|
||||
|
||||
use heapless::HistoryBuffer;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct HistoryBufferWithDropCounter<T, const L: usize> {
|
||||
buffer: HistoryBuffer<T, L>,
|
||||
drop_count: u64,
|
||||
}
|
||||
|
||||
impl<T, const L: usize> HistoryBufferWithDropCounter<T, L> {
|
||||
pub fn write(&mut self, data: T) {
|
||||
let len_before = self.buffer.len();
|
||||
self.buffer.write(data);
|
||||
let len_after = self.buffer.len();
|
||||
self.drop_count += u64::from(len_before == len_after);
|
||||
}
|
||||
pub fn drop_count(&self) -> u64 {
|
||||
self.drop_count
|
||||
}
|
||||
pub fn map<U, F: Fn(&T) -> U>(&self, f: F) -> HistoryBufferWithDropCounter<U, L> {
|
||||
let mut buffer = HistoryBuffer::new();
|
||||
buffer.extend(self.buffer.oldest_ordered().map(f));
|
||||
HistoryBufferWithDropCounter::<U, L> {
|
||||
buffer,
|
||||
drop_count: self.drop_count,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, const L: usize> Default for HistoryBufferWithDropCounter<T, L> {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
buffer: HistoryBuffer::default(),
|
||||
drop_count: 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, const L: usize> ops::Deref for HistoryBufferWithDropCounter<T, L> {
|
||||
type Target = HistoryBuffer<T, L>;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.buffer
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(serde::Serialize)]
|
||||
struct SerdeRepr<T> {
|
||||
buffer: Vec<T>,
|
||||
drop_count: u64,
|
||||
}
|
||||
|
||||
impl<'a, T, const L: usize> From<&'a HistoryBufferWithDropCounter<T, L>> for SerdeRepr<T>
|
||||
where
|
||||
T: Clone + serde::Serialize,
|
||||
{
|
||||
fn from(value: &'a HistoryBufferWithDropCounter<T, L>) -> Self {
|
||||
let HistoryBufferWithDropCounter { buffer, drop_count } = value;
|
||||
SerdeRepr {
|
||||
buffer: buffer.iter().cloned().collect(),
|
||||
drop_count: *drop_count,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, const L: usize> serde::Serialize for HistoryBufferWithDropCounter<T, L>
|
||||
where
|
||||
T: Clone + serde::Serialize,
|
||||
{
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: serde::Serializer,
|
||||
{
|
||||
SerdeRepr::from(self).serialize(serializer)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::HistoryBufferWithDropCounter;
|
||||
|
||||
#[test]
|
||||
fn test_basics() {
|
||||
let mut b = HistoryBufferWithDropCounter::<_, 2>::default();
|
||||
b.write(1);
|
||||
b.write(2);
|
||||
b.write(3);
|
||||
assert!(b.iter().any(|e| *e == 2));
|
||||
assert!(b.iter().any(|e| *e == 3));
|
||||
assert!(!b.iter().any(|e| *e == 1));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_drop_count_works() {
|
||||
let mut b = HistoryBufferWithDropCounter::<_, 2>::default();
|
||||
b.write(1);
|
||||
assert_eq!(b.drop_count(), 0);
|
||||
b.write(2);
|
||||
assert_eq!(b.drop_count(), 0);
|
||||
b.write(3);
|
||||
assert_eq!(b.drop_count(), 1);
|
||||
b.write(4);
|
||||
assert_eq!(b.drop_count(), 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_clone_works() {
|
||||
let mut b = HistoryBufferWithDropCounter::<_, 2>::default();
|
||||
b.write(1);
|
||||
b.write(2);
|
||||
b.write(3);
|
||||
assert_eq!(b.drop_count(), 1);
|
||||
let mut c = b.clone();
|
||||
assert_eq!(c.drop_count(), 1);
|
||||
assert!(c.iter().any(|e| *e == 2));
|
||||
assert!(c.iter().any(|e| *e == 3));
|
||||
assert!(!c.iter().any(|e| *e == 1));
|
||||
|
||||
c.write(4);
|
||||
assert!(c.iter().any(|e| *e == 4));
|
||||
assert!(!b.iter().any(|e| *e == 4));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_map() {
|
||||
let mut b = HistoryBufferWithDropCounter::<_, 2>::default();
|
||||
|
||||
b.write(1);
|
||||
assert_eq!(b.drop_count(), 0);
|
||||
{
|
||||
let c = b.map(|i| i + 10);
|
||||
assert_eq!(c.oldest_ordered().cloned().collect::<Vec<_>>(), vec![11]);
|
||||
assert_eq!(c.drop_count(), 0);
|
||||
}
|
||||
|
||||
b.write(2);
|
||||
assert_eq!(b.drop_count(), 0);
|
||||
{
|
||||
let c = b.map(|i| i + 10);
|
||||
assert_eq!(
|
||||
c.oldest_ordered().cloned().collect::<Vec<_>>(),
|
||||
vec![11, 12]
|
||||
);
|
||||
assert_eq!(c.drop_count(), 0);
|
||||
}
|
||||
|
||||
b.write(3);
|
||||
assert_eq!(b.drop_count(), 1);
|
||||
{
|
||||
let c = b.map(|i| i + 10);
|
||||
assert_eq!(
|
||||
c.oldest_ordered().cloned().collect::<Vec<_>>(),
|
||||
vec![12, 13]
|
||||
);
|
||||
assert_eq!(c.drop_count(), 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,18 +1,20 @@
|
||||
use crate::auth::{Claims, JwtAuth};
|
||||
use crate::http::error;
|
||||
use anyhow::anyhow;
|
||||
use hyper::header::AUTHORIZATION;
|
||||
use anyhow::{anyhow, Context};
|
||||
use hyper::header::{HeaderName, AUTHORIZATION};
|
||||
use hyper::http::HeaderValue;
|
||||
use hyper::{header::CONTENT_TYPE, Body, Request, Response, Server};
|
||||
use hyper::{Method, StatusCode};
|
||||
use metrics::{register_int_counter, Encoder, IntCounter, TextEncoder};
|
||||
use once_cell::sync::Lazy;
|
||||
use routerify::ext::RequestExt;
|
||||
use routerify::RequestInfo;
|
||||
use routerify::{Middleware, Router, RouterBuilder, RouterService};
|
||||
use routerify::{Middleware, RequestInfo, Router, RouterBuilder, RouterService};
|
||||
use tokio::task::JoinError;
|
||||
use tracing::info;
|
||||
use tracing;
|
||||
|
||||
use std::future::Future;
|
||||
use std::net::TcpListener;
|
||||
use std::str::FromStr;
|
||||
|
||||
use super::error::ApiError;
|
||||
|
||||
@@ -24,8 +26,36 @@ static SERVE_METRICS_COUNT: Lazy<IntCounter> = Lazy::new(|| {
|
||||
.expect("failed to define a metric")
|
||||
});
|
||||
|
||||
static X_REQUEST_ID_HEADER_STR: &str = "x-request-id";
|
||||
|
||||
static X_REQUEST_ID_HEADER: HeaderName = HeaderName::from_static(X_REQUEST_ID_HEADER_STR);
|
||||
#[derive(Debug, Default, Clone)]
|
||||
struct RequestId(String);
|
||||
|
||||
async fn logger(res: Response<Body>, info: RequestInfo) -> Result<Response<Body>, ApiError> {
|
||||
info!("{} {} {}", info.method(), info.uri().path(), res.status(),);
|
||||
let request_id = info.context::<RequestId>().unwrap_or_default().0;
|
||||
|
||||
// cannot factor out the Level to avoid the repetition
|
||||
// because tracing can only work with const Level
|
||||
// which is not the case here
|
||||
|
||||
if info.method() == Method::GET && res.status() == StatusCode::OK {
|
||||
tracing::debug!(
|
||||
"{} {} {} {}",
|
||||
info.method(),
|
||||
info.uri().path(),
|
||||
request_id,
|
||||
res.status()
|
||||
);
|
||||
} else {
|
||||
tracing::info!(
|
||||
"{} {} {} {}",
|
||||
info.method(),
|
||||
info.uri().path(),
|
||||
request_id,
|
||||
res.status()
|
||||
);
|
||||
}
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
@@ -53,9 +83,52 @@ async fn prometheus_metrics_handler(_req: Request<Body>) -> Result<Response<Body
|
||||
Ok(response)
|
||||
}
|
||||
|
||||
pub fn add_request_id_middleware<B: hyper::body::HttpBody + Send + Sync + 'static>(
|
||||
) -> Middleware<B, ApiError> {
|
||||
Middleware::pre(move |req| async move {
|
||||
let request_id = match req.headers().get(&X_REQUEST_ID_HEADER) {
|
||||
Some(request_id) => request_id
|
||||
.to_str()
|
||||
.expect("extract request id value")
|
||||
.to_owned(),
|
||||
None => {
|
||||
let request_id = uuid::Uuid::new_v4();
|
||||
request_id.to_string()
|
||||
}
|
||||
};
|
||||
|
||||
if req.method() == Method::GET {
|
||||
tracing::debug!("{} {} {}", req.method(), req.uri().path(), request_id);
|
||||
} else {
|
||||
tracing::info!("{} {} {}", req.method(), req.uri().path(), request_id);
|
||||
}
|
||||
req.set_context(RequestId(request_id));
|
||||
|
||||
Ok(req)
|
||||
})
|
||||
}
|
||||
|
||||
async fn add_request_id_header_to_response(
|
||||
mut res: Response<Body>,
|
||||
req_info: RequestInfo,
|
||||
) -> Result<Response<Body>, ApiError> {
|
||||
if let Some(request_id) = req_info.context::<RequestId>() {
|
||||
if let Ok(request_header_value) = HeaderValue::from_str(&request_id.0) {
|
||||
res.headers_mut()
|
||||
.insert(&X_REQUEST_ID_HEADER, request_header_value);
|
||||
};
|
||||
};
|
||||
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
pub fn make_router() -> RouterBuilder<hyper::Body, ApiError> {
|
||||
Router::builder()
|
||||
.middleware(add_request_id_middleware())
|
||||
.middleware(Middleware::post_with_info(logger))
|
||||
.middleware(Middleware::post_with_info(
|
||||
add_request_id_header_to_response,
|
||||
))
|
||||
.get("/metrics", prometheus_metrics_handler)
|
||||
.err_handler(error::handler)
|
||||
}
|
||||
@@ -143,6 +216,38 @@ pub fn auth_middleware<B: hyper::body::HttpBody + Send + Sync + 'static>(
|
||||
})
|
||||
}
|
||||
|
||||
pub fn add_response_header_middleware<B>(
|
||||
header: &str,
|
||||
value: &str,
|
||||
) -> anyhow::Result<Middleware<B, ApiError>>
|
||||
where
|
||||
B: hyper::body::HttpBody + Send + Sync + 'static,
|
||||
{
|
||||
let name =
|
||||
HeaderName::from_str(header).with_context(|| format!("invalid header name: {header}"))?;
|
||||
let value =
|
||||
HeaderValue::from_str(value).with_context(|| format!("invalid header value: {value}"))?;
|
||||
Ok(Middleware::post_with_info(
|
||||
move |mut response, request_info| {
|
||||
let name = name.clone();
|
||||
let value = value.clone();
|
||||
async move {
|
||||
let headers = response.headers_mut();
|
||||
if headers.contains_key(&name) {
|
||||
tracing::warn!(
|
||||
"{} response already contains header {:?}",
|
||||
request_info.uri(),
|
||||
&name,
|
||||
);
|
||||
} else {
|
||||
headers.insert(name, value);
|
||||
}
|
||||
Ok(response)
|
||||
}
|
||||
},
|
||||
))
|
||||
}
|
||||
|
||||
pub fn check_permission_with(
|
||||
req: &Request<Body>,
|
||||
check_permission: impl Fn(&Claims) -> Result<(), anyhow::Error>,
|
||||
@@ -169,7 +274,7 @@ pub fn serve_thread_main<S>(
|
||||
where
|
||||
S: Future<Output = ()> + Send + Sync,
|
||||
{
|
||||
info!("Starting an HTTP endpoint at {}", listener.local_addr()?);
|
||||
tracing::info!("Starting an HTTP endpoint at {}", listener.local_addr()?);
|
||||
|
||||
// Create a Service from the router above to handle incoming requests.
|
||||
let service = RouterService::new(router_builder.build().map_err(|err| anyhow!(err))?).unwrap();
|
||||
@@ -189,3 +294,48 @@ where
|
||||
|
||||
Ok(())
|
||||
}
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use futures::future::poll_fn;
|
||||
use hyper::service::Service;
|
||||
use routerify::RequestServiceBuilder;
|
||||
use std::net::{IpAddr, SocketAddr};
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_request_id_returned() {
|
||||
let builder = RequestServiceBuilder::new(make_router().build().unwrap()).unwrap();
|
||||
let remote_addr = SocketAddr::new(IpAddr::from_str("127.0.0.1").unwrap(), 80);
|
||||
let mut service = builder.build(remote_addr);
|
||||
if let Err(e) = poll_fn(|ctx| service.poll_ready(ctx)).await {
|
||||
panic!("request service is not ready: {:?}", e);
|
||||
}
|
||||
|
||||
let mut req: Request<Body> = Request::default();
|
||||
req.headers_mut()
|
||||
.append(&X_REQUEST_ID_HEADER, HeaderValue::from_str("42").unwrap());
|
||||
|
||||
let resp: Response<hyper::body::Body> = service.call(req).await.unwrap();
|
||||
|
||||
let header_val = resp.headers().get(&X_REQUEST_ID_HEADER).unwrap();
|
||||
|
||||
assert!(header_val == "42", "response header mismatch");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_request_id_empty() {
|
||||
let builder = RequestServiceBuilder::new(make_router().build().unwrap()).unwrap();
|
||||
let remote_addr = SocketAddr::new(IpAddr::from_str("127.0.0.1").unwrap(), 80);
|
||||
let mut service = builder.build(remote_addr);
|
||||
if let Err(e) = poll_fn(|ctx| service.poll_ready(ctx)).await {
|
||||
panic!("request service is not ready: {:?}", e);
|
||||
}
|
||||
|
||||
let req: Request<Body> = Request::default();
|
||||
let resp: Response<hyper::body::Body> = service.call(req).await.unwrap();
|
||||
|
||||
let header_val = resp.headers().get(&X_REQUEST_ID_HEADER);
|
||||
|
||||
assert_ne!(header_val, None, "response header should NOT be empty");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
use hyper::{header, Body, Response, StatusCode};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use thiserror::Error;
|
||||
use tracing::error;
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
pub enum ApiError {
|
||||
@@ -76,8 +77,16 @@ impl HttpErrorBody {
|
||||
}
|
||||
|
||||
pub async fn handler(err: routerify::RouteError) -> Response<Body> {
|
||||
tracing::error!("Error processing HTTP request: {:?}", err);
|
||||
err.downcast::<ApiError>()
|
||||
.expect("handler should always return api error")
|
||||
.into_response()
|
||||
let api_error = err
|
||||
.downcast::<ApiError>()
|
||||
.expect("handler should always return api error");
|
||||
|
||||
// Print a stack trace for Internal Server errors
|
||||
if let ApiError::InternalServerError(_) = api_error.as_ref() {
|
||||
error!("Error processing HTTP request: {api_error:?}");
|
||||
} else {
|
||||
error!("Error processing HTTP request: {api_error:#}");
|
||||
}
|
||||
|
||||
api_error.into_response()
|
||||
}
|
||||
|
||||
@@ -1,7 +1,9 @@
|
||||
use std::fmt::Display;
|
||||
|
||||
use anyhow::Context;
|
||||
use bytes::Buf;
|
||||
use hyper::{header, Body, Request, Response, StatusCode};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde::{Deserialize, Serialize, Serializer};
|
||||
|
||||
use super::error::ApiError;
|
||||
|
||||
@@ -31,3 +33,12 @@ pub fn json_response<T: Serialize>(
|
||||
.map_err(|e| ApiError::InternalServerError(e.into()))?;
|
||||
Ok(response)
|
||||
}
|
||||
|
||||
/// Serialize through Display trait.
|
||||
pub fn display_serialize<S, F>(z: &F, s: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: Serializer,
|
||||
F: Display,
|
||||
{
|
||||
s.serialize_str(&format!("{}", z))
|
||||
}
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
use std::str::FromStr;
|
||||
use core::fmt;
|
||||
use std::{borrow::Cow, str::FromStr};
|
||||
|
||||
use super::error::ApiError;
|
||||
use anyhow::anyhow;
|
||||
@@ -29,6 +30,50 @@ pub fn parse_request_param<T: FromStr>(
|
||||
}
|
||||
}
|
||||
|
||||
fn get_query_param<'a>(
|
||||
request: &'a Request<Body>,
|
||||
param_name: &str,
|
||||
) -> Result<Option<Cow<'a, str>>, ApiError> {
|
||||
let query = match request.uri().query() {
|
||||
Some(q) => q,
|
||||
None => return Ok(None),
|
||||
};
|
||||
let mut values = url::form_urlencoded::parse(query.as_bytes())
|
||||
.filter_map(|(k, v)| if k == param_name { Some(v) } else { None })
|
||||
// we call .next() twice below. If it's None the first time, .fuse() ensures it's None afterwards
|
||||
.fuse();
|
||||
|
||||
let value1 = values.next();
|
||||
if values.next().is_some() {
|
||||
return Err(ApiError::BadRequest(anyhow!(
|
||||
"param {param_name} specified more than once"
|
||||
)));
|
||||
}
|
||||
Ok(value1)
|
||||
}
|
||||
|
||||
pub fn must_get_query_param<'a>(
|
||||
request: &'a Request<Body>,
|
||||
param_name: &str,
|
||||
) -> Result<Cow<'a, str>, ApiError> {
|
||||
get_query_param(request, param_name)?.ok_or_else(|| {
|
||||
ApiError::BadRequest(anyhow!("no {param_name} specified in query parameters"))
|
||||
})
|
||||
}
|
||||
|
||||
pub fn parse_query_param<E: fmt::Display, T: FromStr<Err = E>>(
|
||||
request: &Request<Body>,
|
||||
param_name: &str,
|
||||
) -> Result<Option<T>, ApiError> {
|
||||
get_query_param(request, param_name)?
|
||||
.map(|v| {
|
||||
v.parse().map_err(|e| {
|
||||
ApiError::BadRequest(anyhow!("cannot parse query param {param_name}: {e}"))
|
||||
})
|
||||
})
|
||||
.transpose()
|
||||
}
|
||||
|
||||
pub async fn ensure_no_body(request: &mut Request<Body>) -> Result<(), ApiError> {
|
||||
match request.body_mut().data().await {
|
||||
Some(_) => Err(ApiError::BadRequest(anyhow!("Unexpected request body"))),
|
||||
|
||||
@@ -52,6 +52,8 @@ pub mod signals;
|
||||
|
||||
pub mod fs_ext;
|
||||
|
||||
pub mod history_buffer;
|
||||
|
||||
/// use with fail::cfg("$name", "return(2000)")
|
||||
#[macro_export]
|
||||
macro_rules! failpoint_sleep_millis_async {
|
||||
|
||||
@@ -45,3 +45,115 @@ pub fn init(log_format: LogFormat) -> anyhow::Result<()> {
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Disable the default rust panic hook by using `set_hook`.
|
||||
///
|
||||
/// For neon binaries, the assumption is that tracing is configured before with [`init`], after
|
||||
/// that sentry is configured (if needed). sentry will install it's own on top of this, always
|
||||
/// processing the panic before we log it.
|
||||
///
|
||||
/// When the return value is dropped, the hook is reverted to std default hook (prints to stderr).
|
||||
/// If the assumptions about the initialization order are not held, use
|
||||
/// [`TracingPanicHookGuard::disarm`] but keep in mind, if tracing is stopped, then panics will be
|
||||
/// lost.
|
||||
#[must_use]
|
||||
pub fn replace_panic_hook_with_tracing_panic_hook() -> TracingPanicHookGuard {
|
||||
std::panic::set_hook(Box::new(tracing_panic_hook));
|
||||
TracingPanicHookGuard::new()
|
||||
}
|
||||
|
||||
/// Drop guard which restores the std panic hook on drop.
|
||||
///
|
||||
/// Tracing should not be used when it's not configured, but we cannot really latch on to any
|
||||
/// imaginary lifetime of tracing.
|
||||
pub struct TracingPanicHookGuard {
|
||||
act: bool,
|
||||
}
|
||||
|
||||
impl TracingPanicHookGuard {
|
||||
fn new() -> Self {
|
||||
TracingPanicHookGuard { act: true }
|
||||
}
|
||||
|
||||
/// Make this hook guard not do anything when dropped.
|
||||
pub fn forget(&mut self) {
|
||||
self.act = false;
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for TracingPanicHookGuard {
|
||||
fn drop(&mut self) {
|
||||
if self.act {
|
||||
let _ = std::panic::take_hook();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Named symbol for our panic hook, which logs the panic.
|
||||
fn tracing_panic_hook(info: &std::panic::PanicInfo) {
|
||||
// following rust 1.66.1 std implementation:
|
||||
// https://github.com/rust-lang/rust/blob/90743e7298aca107ddaa0c202a4d3604e29bfeb6/library/std/src/panicking.rs#L235-L288
|
||||
let location = info.location();
|
||||
|
||||
let msg = match info.payload().downcast_ref::<&'static str>() {
|
||||
Some(s) => *s,
|
||||
None => match info.payload().downcast_ref::<String>() {
|
||||
Some(s) => &s[..],
|
||||
None => "Box<dyn Any>",
|
||||
},
|
||||
};
|
||||
|
||||
let thread = std::thread::current();
|
||||
let thread = thread.name().unwrap_or("<unnamed>");
|
||||
let backtrace = std::backtrace::Backtrace::capture();
|
||||
|
||||
let _entered = if let Some(location) = location {
|
||||
tracing::error_span!("panic", %thread, location = %PrettyLocation(location))
|
||||
} else {
|
||||
// very unlikely to hit here, but the guarantees of std could change
|
||||
tracing::error_span!("panic", %thread)
|
||||
}
|
||||
.entered();
|
||||
|
||||
if backtrace.status() == std::backtrace::BacktraceStatus::Captured {
|
||||
// this has an annoying extra '\n' in the end which anyhow doesn't do, but we cannot really
|
||||
// get rid of it as we cannot get in between of std::fmt::Formatter<'_>; we could format to
|
||||
// string, maybe even to a TLS one but tracing already does that.
|
||||
tracing::error!("{msg}\n\nStack backtrace:\n{backtrace}");
|
||||
} else {
|
||||
tracing::error!("{msg}");
|
||||
}
|
||||
|
||||
// ensure that we log something on the panic if this hook is left after tracing has been
|
||||
// unconfigured. worst case when teardown is racing the panic is to log the panic twice.
|
||||
tracing::dispatcher::get_default(|d| {
|
||||
if let Some(_none) = d.downcast_ref::<tracing::subscriber::NoSubscriber>() {
|
||||
let location = location.map(PrettyLocation);
|
||||
log_panic_to_stderr(thread, msg, location, &backtrace);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
#[cold]
|
||||
fn log_panic_to_stderr(
|
||||
thread: &str,
|
||||
msg: &str,
|
||||
location: Option<PrettyLocation<'_, '_>>,
|
||||
backtrace: &std::backtrace::Backtrace,
|
||||
) {
|
||||
eprintln!("panic while tracing is unconfigured: thread '{thread}' panicked at '{msg}', {location:?}\nStack backtrace:\n{backtrace}");
|
||||
}
|
||||
|
||||
struct PrettyLocation<'a, 'b>(&'a std::panic::Location<'b>);
|
||||
|
||||
impl std::fmt::Display for PrettyLocation<'_, '_> {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}:{}:{}", self.0.file(), self.0.line(), self.0.column())
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for PrettyLocation<'_, '_> {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
<Self as std::fmt::Display>::fmt(self, f)
|
||||
}
|
||||
}
|
||||
|
||||
4
libs/walproposer/.gitignore
vendored
Normal file
4
libs/walproposer/.gitignore
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
*.a
|
||||
*.o
|
||||
*.tmp
|
||||
pgdata
|
||||
39
libs/walproposer/Cargo.toml
Normal file
39
libs/walproposer/Cargo.toml
Normal file
@@ -0,0 +1,39 @@
|
||||
[package]
|
||||
name = "walproposer"
|
||||
version = "0.1.0"
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
atty.workspace = true
|
||||
rand.workspace = true
|
||||
regex.workspace = true
|
||||
bytes.workspace = true
|
||||
byteorder.workspace = true
|
||||
anyhow.workspace = true
|
||||
crc32c.workspace = true
|
||||
hex.workspace = true
|
||||
once_cell.workspace = true
|
||||
log.workspace = true
|
||||
libc.workspace = true
|
||||
memoffset.workspace = true
|
||||
thiserror.workspace = true
|
||||
tracing.workspace = true
|
||||
tracing-subscriber = { workspace = true, features = ["json"] }
|
||||
serde.workspace = true
|
||||
scopeguard.workspace = true
|
||||
utils.workspace = true
|
||||
safekeeper.workspace = true
|
||||
postgres_ffi.workspace = true
|
||||
hyper.workspace = true
|
||||
|
||||
workspace_hack.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
env_logger.workspace = true
|
||||
postgres.workspace = true
|
||||
|
||||
[build-dependencies]
|
||||
anyhow.workspace = true
|
||||
bindgen.workspace = true
|
||||
cbindgen = "0.24.0"
|
||||
16
libs/walproposer/README.md
Normal file
16
libs/walproposer/README.md
Normal file
@@ -0,0 +1,16 @@
|
||||
# walproposer Rust module
|
||||
|
||||
## Rust -> C
|
||||
|
||||
We compile walproposer as a static library and generate Rust bindings for it using `bindgen`.
|
||||
Entrypoint header file is `bindgen_deps.h`.
|
||||
|
||||
## C -> Rust
|
||||
|
||||
We use `cbindgen` to generate C bindings for the Rust code. They are stored in `rust_bindings.h`.
|
||||
|
||||
## How to run the tests
|
||||
|
||||
```
|
||||
export RUSTFLAGS="-C default-linker-libraries"
|
||||
```
|
||||
30
libs/walproposer/bindgen_deps.h
Normal file
30
libs/walproposer/bindgen_deps.h
Normal file
@@ -0,0 +1,30 @@
|
||||
/*
|
||||
* This header file is the input to bindgen. It includes all the
|
||||
* PostgreSQL headers that we need to auto-generate Rust structs
|
||||
* from. If you need to expose a new struct to Rust code, add the
|
||||
* header here, and whitelist the struct in the build.rs file.
|
||||
*/
|
||||
#include "c.h"
|
||||
#include "walproposer.h"
|
||||
|
||||
#include <stdarg.h>
|
||||
#include <stdbool.h>
|
||||
#include <stdint.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
// Calc a sum of two numbers. Used to test Rust->C function calls.
|
||||
int TestFunc(int a, int b);
|
||||
|
||||
// Run a client for simple simlib test.
|
||||
void RunClientC(uint32_t serverId);
|
||||
|
||||
void WalProposerRust();
|
||||
|
||||
void WalProposerCleanup();
|
||||
|
||||
extern bool debug_enabled;
|
||||
|
||||
// Initialize global variables before calling any Postgres C code.
|
||||
void MyContextInit();
|
||||
|
||||
XLogRecPtr MyInsertRecord();
|
||||
137
libs/walproposer/build.rs
Normal file
137
libs/walproposer/build.rs
Normal file
@@ -0,0 +1,137 @@
|
||||
use std::{env, path::PathBuf, process::Command};
|
||||
use anyhow::{anyhow, Context};
|
||||
use bindgen::CargoCallbacks;
|
||||
|
||||
extern crate bindgen;
|
||||
|
||||
fn main() -> anyhow::Result<()> {
|
||||
let crate_dir = env::var("CARGO_MANIFEST_DIR").unwrap();
|
||||
|
||||
cbindgen::Builder::new()
|
||||
.with_crate(crate_dir)
|
||||
.with_language(cbindgen::Language::C)
|
||||
.generate()
|
||||
.expect("Unable to generate bindings")
|
||||
.write_to_file("rust_bindings.h");
|
||||
|
||||
// Tell cargo to invalidate the built crate whenever the wrapper changes
|
||||
println!("cargo:rerun-if-changed=bindgen_deps.h,test.c,../../pgxn/neon/walproposer.c,build.sh");
|
||||
println!("cargo:rustc-link-arg=-Wl,--start-group");
|
||||
println!("cargo:rustc-link-arg=-lsim");
|
||||
println!("cargo:rustc-link-arg=-lpgport_srv");
|
||||
println!("cargo:rustc-link-arg=-lpostgres");
|
||||
println!("cargo:rustc-link-arg=-lpgcommon_srv");
|
||||
println!("cargo:rustc-link-arg=-lssl");
|
||||
println!("cargo:rustc-link-arg=-lcrypto");
|
||||
println!("cargo:rustc-link-arg=-lz");
|
||||
println!("cargo:rustc-link-arg=-lpthread");
|
||||
println!("cargo:rustc-link-arg=-lrt");
|
||||
println!("cargo:rustc-link-arg=-ldl");
|
||||
println!("cargo:rustc-link-arg=-lm");
|
||||
println!("cargo:rustc-link-arg=-lwalproposer");
|
||||
println!("cargo:rustc-link-arg=-Wl,--end-group");
|
||||
println!("cargo:rustc-link-search=/home/admin/simulator/libs/walproposer");
|
||||
// disable fPIE
|
||||
println!("cargo:rustc-link-arg=-no-pie");
|
||||
|
||||
// print output of build.sh
|
||||
let output = std::process::Command::new("./build.sh")
|
||||
.output()
|
||||
.expect("could not spawn `clang`");
|
||||
|
||||
println!("stdout: {}", String::from_utf8(output.stdout).unwrap());
|
||||
println!("stderr: {}", String::from_utf8(output.stderr).unwrap());
|
||||
|
||||
if !output.status.success() {
|
||||
// Panic if the command was not successful.
|
||||
panic!("could not compile object file");
|
||||
}
|
||||
|
||||
// // Finding the location of C headers for the Postgres server:
|
||||
// // - if POSTGRES_INSTALL_DIR is set look into it, otherwise look into `<project_root>/pg_install`
|
||||
// // - if there's a `bin/pg_config` file use it for getting include server, otherwise use `<project_root>/pg_install/{PG_MAJORVERSION}/include/postgresql/server`
|
||||
let pg_install_dir = if let Some(postgres_install_dir) = env::var_os("POSTGRES_INSTALL_DIR") {
|
||||
postgres_install_dir.into()
|
||||
} else {
|
||||
PathBuf::from("pg_install")
|
||||
};
|
||||
|
||||
let pg_version = "v15";
|
||||
let mut pg_install_dir_versioned = pg_install_dir.join(pg_version);
|
||||
if pg_install_dir_versioned.is_relative() {
|
||||
let cwd = env::current_dir().context("Failed to get current_dir")?;
|
||||
pg_install_dir_versioned = cwd.join("..").join("..").join(pg_install_dir_versioned);
|
||||
}
|
||||
|
||||
let pg_config_bin = pg_install_dir_versioned
|
||||
.join(pg_version)
|
||||
.join("bin")
|
||||
.join("pg_config");
|
||||
let inc_server_path: String = if pg_config_bin.exists() {
|
||||
let output = Command::new(pg_config_bin)
|
||||
.arg("--includedir-server")
|
||||
.output()
|
||||
.context("failed to execute `pg_config --includedir-server`")?;
|
||||
|
||||
if !output.status.success() {
|
||||
panic!("`pg_config --includedir-server` failed")
|
||||
}
|
||||
|
||||
String::from_utf8(output.stdout)
|
||||
.context("pg_config output is not UTF-8")?
|
||||
.trim_end()
|
||||
.into()
|
||||
} else {
|
||||
let server_path = pg_install_dir_versioned
|
||||
.join("include")
|
||||
.join("postgresql")
|
||||
.join("server")
|
||||
.into_os_string();
|
||||
server_path
|
||||
.into_string()
|
||||
.map_err(|s| anyhow!("Bad postgres server path {s:?}"))?
|
||||
};
|
||||
|
||||
let inc_pgxn_path = "/home/admin/simulator/pgxn/neon";
|
||||
|
||||
// The bindgen::Builder is the main entry point
|
||||
// to bindgen, and lets you build up options for
|
||||
// the resulting bindings.
|
||||
let bindings = bindgen::Builder::default()
|
||||
// The input header we would like to generate
|
||||
// bindings for.
|
||||
.header("bindgen_deps.h")
|
||||
// Tell cargo to invalidate the built crate whenever any of the
|
||||
// included header files changed.
|
||||
.parse_callbacks(Box::new(CargoCallbacks))
|
||||
.allowlist_function("TestFunc")
|
||||
.allowlist_function("RunClientC")
|
||||
.allowlist_function("WalProposerRust")
|
||||
.allowlist_function("MyContextInit")
|
||||
.allowlist_function("WalProposerCleanup")
|
||||
.allowlist_function("MyInsertRecord")
|
||||
.allowlist_var("wal_acceptors_list")
|
||||
.allowlist_var("wal_acceptor_reconnect_timeout")
|
||||
.allowlist_var("wal_acceptor_connection_timeout")
|
||||
.allowlist_var("am_wal_proposer")
|
||||
.allowlist_var("neon_timeline_walproposer")
|
||||
.allowlist_var("neon_tenant_walproposer")
|
||||
.allowlist_var("syncSafekeepers")
|
||||
.allowlist_var("sim_redo_start_lsn")
|
||||
.allowlist_var("debug_enabled")
|
||||
.clang_arg(format!("-I{inc_server_path}"))
|
||||
.clang_arg(format!("-I{inc_pgxn_path}"))
|
||||
.clang_arg(format!("-DSIMLIB"))
|
||||
// Finish the builder and generate the bindings.
|
||||
.generate()
|
||||
// Unwrap the Result and panic on failure.
|
||||
.expect("Unable to generate bindings");
|
||||
|
||||
// Write the bindings to the $OUT_DIR/bindings.rs file.
|
||||
let out_path = PathBuf::from(env::var("OUT_DIR").unwrap()).join("bindings.rs");
|
||||
bindings
|
||||
.write_to_file(out_path)
|
||||
.expect("Couldn't write bindings!");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
21
libs/walproposer/build.sh
Executable file
21
libs/walproposer/build.sh
Executable file
@@ -0,0 +1,21 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
cd /home/admin/simulator/libs/walproposer
|
||||
|
||||
# TODO: rewrite to Makefile
|
||||
|
||||
make -C ../.. neon-pg-ext-walproposer
|
||||
make -C ../../pg_install/build/v15/src/backend postgres-lib -s
|
||||
cp ../../pg_install/build/v15/src/backend/libpostgres.a .
|
||||
cp ../../pg_install/build/v15/src/common/libpgcommon_srv.a .
|
||||
cp ../../pg_install/build/v15/src/port/libpgport_srv.a .
|
||||
|
||||
clang -g -c libpqwalproposer.c test.c -ferror-limit=1 -I ../../pg_install/v15/include/postgresql/server -I ../../pgxn/neon
|
||||
rm -rf libsim.a
|
||||
ar rcs libsim.a test.o libpqwalproposer.o
|
||||
|
||||
rm -rf libwalproposer.a
|
||||
|
||||
PGXN_DIR=../../pg_install/build/neon-v15/
|
||||
ar rcs libwalproposer.a $PGXN_DIR/walproposer.o $PGXN_DIR/walproposer_utils.o $PGXN_DIR/neon.o
|
||||
542
libs/walproposer/libpqwalproposer.c
Normal file
542
libs/walproposer/libpqwalproposer.c
Normal file
@@ -0,0 +1,542 @@
|
||||
#include "postgres.h"
|
||||
#include "neon.h"
|
||||
#include "walproposer.h"
|
||||
#include "rust_bindings.h"
|
||||
#include "replication/message.h"
|
||||
#include "access/xlog_internal.h"
|
||||
|
||||
// defined in walproposer.h
|
||||
uint64 sim_redo_start_lsn;
|
||||
XLogRecPtr sim_latest_available_lsn;
|
||||
|
||||
/* Header in walproposer.h -- Wrapper struct to abstract away the libpq connection */
|
||||
struct WalProposerConn
|
||||
{
|
||||
int64_t tcp;
|
||||
};
|
||||
|
||||
/* Helper function */
|
||||
static bool
|
||||
ensure_nonblocking_status(WalProposerConn *conn, bool is_nonblocking)
|
||||
{
|
||||
// walprop_log(LOG, "not implemented");
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Exported function definitions */
|
||||
char *
|
||||
walprop_error_message(WalProposerConn *conn)
|
||||
{
|
||||
// walprop_log(LOG, "not implemented");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
WalProposerConnStatusType
|
||||
walprop_status(WalProposerConn *conn)
|
||||
{
|
||||
// walprop_log(LOG, "not implemented: walprop_status");
|
||||
return WP_CONNECTION_OK;
|
||||
}
|
||||
|
||||
WalProposerConn *
|
||||
walprop_connect_start(char *conninfo)
|
||||
{
|
||||
WalProposerConn *conn;
|
||||
|
||||
walprop_log(LOG, "walprop_connect_start: %s", conninfo);
|
||||
|
||||
const char *connstr_prefix = "host=node port=";
|
||||
Assert(strncmp(conninfo, connstr_prefix, strlen(connstr_prefix)) == 0);
|
||||
|
||||
int nodeId = atoi(conninfo + strlen(connstr_prefix));
|
||||
|
||||
conn = palloc(sizeof(WalProposerConn));
|
||||
conn->tcp = sim_open_tcp(nodeId);
|
||||
return conn;
|
||||
}
|
||||
|
||||
WalProposerConnectPollStatusType
|
||||
walprop_connect_poll(WalProposerConn *conn)
|
||||
{
|
||||
// walprop_log(LOG, "not implemented: walprop_connect_poll");
|
||||
return WP_CONN_POLLING_OK;
|
||||
}
|
||||
|
||||
bool
|
||||
walprop_send_query(WalProposerConn *conn, char *query)
|
||||
{
|
||||
// walprop_log(LOG, "not implemented: walprop_send_query");
|
||||
return true;
|
||||
}
|
||||
|
||||
WalProposerExecStatusType
|
||||
walprop_get_query_result(WalProposerConn *conn)
|
||||
{
|
||||
// walprop_log(LOG, "not implemented: walprop_get_query_result");
|
||||
return WP_EXEC_SUCCESS_COPYBOTH;
|
||||
}
|
||||
|
||||
pgsocket
|
||||
walprop_socket(WalProposerConn *conn)
|
||||
{
|
||||
return (pgsocket) conn->tcp;
|
||||
}
|
||||
|
||||
int
|
||||
walprop_flush(WalProposerConn *conn)
|
||||
{
|
||||
// walprop_log(LOG, "not implemented");
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
walprop_finish(WalProposerConn *conn)
|
||||
{
|
||||
// walprop_log(LOG, "walprop_finish not implemented");
|
||||
}
|
||||
|
||||
/*
|
||||
* Receive a message from the safekeeper.
|
||||
*
|
||||
* On success, the data is placed in *buf. It is valid until the next call
|
||||
* to this function.
|
||||
*/
|
||||
PGAsyncReadResult
|
||||
walprop_async_read(WalProposerConn *conn, char **buf, int *amount)
|
||||
{
|
||||
uintptr_t len;
|
||||
char *msg;
|
||||
Event event;
|
||||
|
||||
event = sim_epoll_peek(0);
|
||||
if (event.tcp != conn->tcp || event.tag != Message || event.any_message != Bytes)
|
||||
return PG_ASYNC_READ_TRY_AGAIN;
|
||||
|
||||
event = sim_epoll_rcv(0);
|
||||
|
||||
// walprop_log(LOG, "walprop_async_read, T: %d, tcp: %d, tag: %d", (int) event.tag, (int) event.tcp, (int) event.any_message);
|
||||
Assert(event.tcp == conn->tcp);
|
||||
Assert(event.tag == Message);
|
||||
Assert(event.any_message == Bytes);
|
||||
|
||||
msg = (char*) sim_msg_get_bytes(&len);
|
||||
*buf = msg;
|
||||
*amount = len;
|
||||
// walprop_log(LOG, "walprop_async_read: %d", (int) len);
|
||||
|
||||
return PG_ASYNC_READ_SUCCESS;
|
||||
}
|
||||
|
||||
PGAsyncWriteResult
|
||||
walprop_async_write(WalProposerConn *conn, void const *buf, size_t size)
|
||||
{
|
||||
// walprop_log(LOG, "walprop_async_write");
|
||||
sim_msg_set_bytes(buf, size);
|
||||
sim_tcp_send(conn->tcp);
|
||||
return PG_ASYNC_WRITE_SUCCESS;
|
||||
}
|
||||
|
||||
/*
|
||||
* This function is very similar to walprop_async_write. For more
|
||||
* information, refer to the comments there.
|
||||
*/
|
||||
bool
|
||||
walprop_blocking_write(WalProposerConn *conn, void const *buf, size_t size)
|
||||
{
|
||||
// walprop_log(LOG, "walprop_blocking_write");
|
||||
sim_msg_set_bytes(buf, size);
|
||||
sim_tcp_send(conn->tcp);
|
||||
return true;
|
||||
}
|
||||
|
||||
void
|
||||
sim_start_replication(XLogRecPtr startptr)
|
||||
{
|
||||
walprop_log(LOG, "sim_start_replication: %X/%X", LSN_FORMAT_ARGS(startptr));
|
||||
sim_latest_available_lsn = startptr;
|
||||
|
||||
for (;;)
|
||||
{
|
||||
XLogRecPtr endptr = sim_latest_available_lsn;
|
||||
|
||||
Assert(startptr <= endptr);
|
||||
if (endptr > startptr)
|
||||
{
|
||||
WalProposerBroadcast(startptr, endptr);
|
||||
startptr = endptr;
|
||||
}
|
||||
|
||||
WalProposerPoll();
|
||||
}
|
||||
}
|
||||
|
||||
#define UsableBytesInPage (XLOG_BLCKSZ - SizeOfXLogShortPHD)
|
||||
|
||||
static int UsableBytesInSegment =
|
||||
(DEFAULT_XLOG_SEG_SIZE / XLOG_BLCKSZ * UsableBytesInPage) -
|
||||
(SizeOfXLogLongPHD - SizeOfXLogShortPHD);
|
||||
|
||||
/*
|
||||
* Converts a "usable byte position" to XLogRecPtr. A usable byte position
|
||||
* is the position starting from the beginning of WAL, excluding all WAL
|
||||
* page headers.
|
||||
*/
|
||||
static XLogRecPtr
|
||||
XLogBytePosToRecPtr(uint64 bytepos)
|
||||
{
|
||||
uint64 fullsegs;
|
||||
uint64 fullpages;
|
||||
uint64 bytesleft;
|
||||
uint32 seg_offset;
|
||||
XLogRecPtr result;
|
||||
|
||||
fullsegs = bytepos / UsableBytesInSegment;
|
||||
bytesleft = bytepos % UsableBytesInSegment;
|
||||
|
||||
if (bytesleft < XLOG_BLCKSZ - SizeOfXLogLongPHD)
|
||||
{
|
||||
/* fits on first page of segment */
|
||||
seg_offset = bytesleft + SizeOfXLogLongPHD;
|
||||
}
|
||||
else
|
||||
{
|
||||
/* account for the first page on segment with long header */
|
||||
seg_offset = XLOG_BLCKSZ;
|
||||
bytesleft -= XLOG_BLCKSZ - SizeOfXLogLongPHD;
|
||||
|
||||
fullpages = bytesleft / UsableBytesInPage;
|
||||
bytesleft = bytesleft % UsableBytesInPage;
|
||||
|
||||
seg_offset += fullpages * XLOG_BLCKSZ + bytesleft + SizeOfXLogShortPHD;
|
||||
}
|
||||
|
||||
XLogSegNoOffsetToRecPtr(fullsegs, seg_offset, wal_segment_size, result);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/*
|
||||
* Convert an XLogRecPtr to a "usable byte position".
|
||||
*/
|
||||
static uint64
|
||||
XLogRecPtrToBytePos(XLogRecPtr ptr)
|
||||
{
|
||||
uint64 fullsegs;
|
||||
uint32 fullpages;
|
||||
uint32 offset;
|
||||
uint64 result;
|
||||
|
||||
XLByteToSeg(ptr, fullsegs, wal_segment_size);
|
||||
|
||||
fullpages = (XLogSegmentOffset(ptr, wal_segment_size)) / XLOG_BLCKSZ;
|
||||
offset = ptr % XLOG_BLCKSZ;
|
||||
|
||||
if (fullpages == 0)
|
||||
{
|
||||
result = fullsegs * UsableBytesInSegment;
|
||||
if (offset > 0)
|
||||
{
|
||||
Assert(offset >= SizeOfXLogLongPHD);
|
||||
result += offset - SizeOfXLogLongPHD;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
result = fullsegs * UsableBytesInSegment +
|
||||
(XLOG_BLCKSZ - SizeOfXLogLongPHD) + /* account for first page */
|
||||
(fullpages - 1) * UsableBytesInPage; /* full pages */
|
||||
if (offset > 0)
|
||||
{
|
||||
Assert(offset >= SizeOfXLogShortPHD);
|
||||
result += offset - SizeOfXLogShortPHD;
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
#define max_rdatas 16
|
||||
|
||||
void InitMyInsert();
|
||||
static void MyBeginInsert();
|
||||
static void MyRegisterData(char *data, int len);
|
||||
static XLogRecPtr MyFinishInsert(RmgrId rmid, uint8 info, uint8 flags);
|
||||
static void MyCopyXLogRecordToWAL(int write_len, XLogRecData *rdata, XLogRecPtr StartPos, XLogRecPtr EndPos);
|
||||
|
||||
/*
|
||||
* An array of XLogRecData structs, to hold registered data.
|
||||
*/
|
||||
static XLogRecData rdatas[max_rdatas];
|
||||
static int num_rdatas; /* entries currently used */
|
||||
static uint32 mainrdata_len; /* total # of bytes in chain */
|
||||
static XLogRecData hdr_rdt;
|
||||
static char hdr_scratch[16000];
|
||||
static XLogRecPtr CurrBytePos;
|
||||
static XLogRecPtr PrevBytePos;
|
||||
|
||||
void InitMyInsert()
|
||||
{
|
||||
CurrBytePos = sim_redo_start_lsn;
|
||||
PrevBytePos = InvalidXLogRecPtr;
|
||||
sim_latest_available_lsn = sim_redo_start_lsn;
|
||||
}
|
||||
|
||||
static void MyBeginInsert()
|
||||
{
|
||||
num_rdatas = 0;
|
||||
mainrdata_len = 0;
|
||||
}
|
||||
|
||||
static void MyRegisterData(char *data, int len)
|
||||
{
|
||||
XLogRecData *rdata;
|
||||
|
||||
if (num_rdatas >= max_rdatas)
|
||||
walprop_log(ERROR, "too much WAL data");
|
||||
rdata = &rdatas[num_rdatas++];
|
||||
|
||||
rdata->data = data;
|
||||
rdata->len = len;
|
||||
rdata->next = NULL;
|
||||
|
||||
if (num_rdatas > 1) {
|
||||
rdatas[num_rdatas - 2].next = rdata;
|
||||
}
|
||||
|
||||
mainrdata_len += len;
|
||||
}
|
||||
|
||||
static XLogRecPtr
|
||||
MyFinishInsert(RmgrId rmid, uint8 info, uint8 flags)
|
||||
{
|
||||
XLogRecData *rdt;
|
||||
uint32 total_len = 0;
|
||||
int block_id;
|
||||
pg_crc32c rdata_crc;
|
||||
XLogRecord *rechdr;
|
||||
char *scratch = hdr_scratch;
|
||||
int size;
|
||||
XLogRecPtr StartPos;
|
||||
XLogRecPtr EndPos;
|
||||
uint64 startbytepos;
|
||||
uint64 endbytepos;
|
||||
|
||||
/*
|
||||
* Note: this function can be called multiple times for the same record.
|
||||
* All the modifications we do to the rdata chains below must handle that.
|
||||
*/
|
||||
|
||||
/* The record begins with the fixed-size header */
|
||||
rechdr = (XLogRecord *) scratch;
|
||||
scratch += SizeOfXLogRecord;
|
||||
|
||||
hdr_rdt.data = hdr_scratch;
|
||||
|
||||
if (num_rdatas > 0)
|
||||
{
|
||||
hdr_rdt.next = &rdatas[0];
|
||||
}
|
||||
else
|
||||
{
|
||||
hdr_rdt.next = NULL;
|
||||
}
|
||||
|
||||
/* followed by main data, if any */
|
||||
if (mainrdata_len > 0)
|
||||
{
|
||||
if (mainrdata_len > 255)
|
||||
{
|
||||
*(scratch++) = (char) XLR_BLOCK_ID_DATA_LONG;
|
||||
memcpy(scratch, &mainrdata_len, sizeof(uint32));
|
||||
scratch += sizeof(uint32);
|
||||
}
|
||||
else
|
||||
{
|
||||
*(scratch++) = (char) XLR_BLOCK_ID_DATA_SHORT;
|
||||
*(scratch++) = (uint8) mainrdata_len;
|
||||
}
|
||||
total_len += mainrdata_len;
|
||||
}
|
||||
|
||||
hdr_rdt.len = (scratch - hdr_scratch);
|
||||
total_len += hdr_rdt.len;
|
||||
|
||||
/*
|
||||
* Calculate CRC of the data
|
||||
*
|
||||
* Note that the record header isn't added into the CRC initially since we
|
||||
* don't know the prev-link yet. Thus, the CRC will represent the CRC of
|
||||
* the whole record in the order: rdata, then backup blocks, then record
|
||||
* header.
|
||||
*/
|
||||
INIT_CRC32C(rdata_crc);
|
||||
COMP_CRC32C(rdata_crc, hdr_scratch + SizeOfXLogRecord, hdr_rdt.len - SizeOfXLogRecord);
|
||||
for (size_t i = 0; i < num_rdatas; i++)
|
||||
{
|
||||
rdt = &rdatas[i];
|
||||
COMP_CRC32C(rdata_crc, rdt->data, rdt->len);
|
||||
}
|
||||
|
||||
/*
|
||||
* Fill in the fields in the record header. Prev-link is filled in later,
|
||||
* once we know where in the WAL the record will be inserted. The CRC does
|
||||
* not include the record header yet.
|
||||
*/
|
||||
rechdr->xl_xid = 0;
|
||||
rechdr->xl_tot_len = total_len;
|
||||
rechdr->xl_info = info;
|
||||
rechdr->xl_rmid = rmid;
|
||||
rechdr->xl_prev = InvalidXLogRecPtr;
|
||||
rechdr->xl_crc = rdata_crc;
|
||||
|
||||
size = MAXALIGN(rechdr->xl_tot_len);
|
||||
|
||||
/* All (non xlog-switch) records should contain data. */
|
||||
Assert(size > SizeOfXLogRecord);
|
||||
|
||||
startbytepos = XLogRecPtrToBytePos(CurrBytePos);
|
||||
endbytepos = startbytepos + size;
|
||||
|
||||
// Get the position.
|
||||
StartPos = XLogBytePosToRecPtr(startbytepos);
|
||||
EndPos = XLogBytePosToRecPtr(startbytepos + size);
|
||||
rechdr->xl_prev = PrevBytePos;
|
||||
|
||||
Assert(XLogRecPtrToBytePos(StartPos) == startbytepos);
|
||||
Assert(XLogRecPtrToBytePos(EndPos) == endbytepos);
|
||||
|
||||
// Update global pointers.
|
||||
CurrBytePos = EndPos;
|
||||
PrevBytePos = StartPos;
|
||||
|
||||
/*
|
||||
* Now that xl_prev has been filled in, calculate CRC of the record
|
||||
* header.
|
||||
*/
|
||||
rdata_crc = rechdr->xl_crc;
|
||||
COMP_CRC32C(rdata_crc, rechdr, offsetof(XLogRecord, xl_crc));
|
||||
FIN_CRC32C(rdata_crc);
|
||||
rechdr->xl_crc = rdata_crc;
|
||||
|
||||
// Now write it to disk.
|
||||
MyCopyXLogRecordToWAL(rechdr->xl_tot_len, &hdr_rdt, StartPos, EndPos);
|
||||
return EndPos;
|
||||
}
|
||||
|
||||
#define INSERT_FREESPACE(endptr) \
|
||||
(((endptr) % XLOG_BLCKSZ == 0) ? 0 : (XLOG_BLCKSZ - (endptr) % XLOG_BLCKSZ))
|
||||
|
||||
static void
|
||||
MyCopyXLogRecordToWAL(int write_len, XLogRecData *rdata, XLogRecPtr StartPos, XLogRecPtr EndPos)
|
||||
{
|
||||
XLogRecPtr CurrPos;
|
||||
int written;
|
||||
int freespace;
|
||||
|
||||
// Write hdr_rdt and `num_rdatas` other datas.
|
||||
CurrPos = StartPos;
|
||||
freespace = INSERT_FREESPACE(CurrPos);
|
||||
written = 0;
|
||||
|
||||
Assert(freespace >= sizeof(uint32));
|
||||
|
||||
while (rdata != NULL)
|
||||
{
|
||||
char *rdata_data = rdata->data;
|
||||
int rdata_len = rdata->len;
|
||||
|
||||
while (rdata_len >= freespace)
|
||||
{
|
||||
char header_buf[SizeOfXLogLongPHD];
|
||||
XLogPageHeader NewPage = (XLogPageHeader) header_buf;
|
||||
|
||||
Assert(CurrPos % XLOG_BLCKSZ >= SizeOfXLogShortPHD || freespace == 0);
|
||||
XLogWalPropWrite(rdata_data, freespace, CurrPos);
|
||||
rdata_data += freespace;
|
||||
rdata_len -= freespace;
|
||||
written += freespace;
|
||||
CurrPos += freespace;
|
||||
|
||||
// Init new page
|
||||
MemSet(header_buf, 0, SizeOfXLogLongPHD);
|
||||
|
||||
/*
|
||||
* Fill the new page's header
|
||||
*/
|
||||
NewPage->xlp_magic = XLOG_PAGE_MAGIC;
|
||||
|
||||
/* NewPage->xlp_info = 0; */ /* done by memset */
|
||||
NewPage->xlp_tli = 1;
|
||||
NewPage->xlp_pageaddr = CurrPos;
|
||||
|
||||
/* NewPage->xlp_rem_len = 0; */ /* done by memset */
|
||||
NewPage->xlp_info |= XLP_BKP_REMOVABLE;
|
||||
|
||||
/*
|
||||
* If first page of an XLOG segment file, make it a long header.
|
||||
*/
|
||||
if ((XLogSegmentOffset(NewPage->xlp_pageaddr, wal_segment_size)) == 0)
|
||||
{
|
||||
XLogLongPageHeader NewLongPage = (XLogLongPageHeader) NewPage;
|
||||
|
||||
NewLongPage->xlp_sysid = 0;
|
||||
NewLongPage->xlp_seg_size = wal_segment_size;
|
||||
NewLongPage->xlp_xlog_blcksz = XLOG_BLCKSZ;
|
||||
NewPage->xlp_info |= XLP_LONG_HEADER;
|
||||
}
|
||||
|
||||
NewPage->xlp_rem_len = write_len - written;
|
||||
if (NewPage->xlp_rem_len > 0) {
|
||||
NewPage->xlp_info |= XLP_FIRST_IS_CONTRECORD;
|
||||
}
|
||||
|
||||
/* skip over the page header */
|
||||
if (XLogSegmentOffset(CurrPos, wal_segment_size) == 0)
|
||||
{
|
||||
XLogWalPropWrite(header_buf, SizeOfXLogLongPHD, CurrPos);
|
||||
CurrPos += SizeOfXLogLongPHD;
|
||||
}
|
||||
else
|
||||
{
|
||||
XLogWalPropWrite(header_buf, SizeOfXLogShortPHD, CurrPos);
|
||||
CurrPos += SizeOfXLogShortPHD;
|
||||
}
|
||||
freespace = INSERT_FREESPACE(CurrPos);
|
||||
}
|
||||
|
||||
Assert(CurrPos % XLOG_BLCKSZ >= SizeOfXLogShortPHD || rdata_len == 0);
|
||||
XLogWalPropWrite(rdata_data, rdata_len, CurrPos);
|
||||
CurrPos += rdata_len;
|
||||
written += rdata_len;
|
||||
freespace -= rdata_len;
|
||||
|
||||
rdata = rdata->next;
|
||||
}
|
||||
|
||||
Assert(written == write_len);
|
||||
CurrPos = MAXALIGN64(CurrPos);
|
||||
Assert(CurrPos == EndPos);
|
||||
}
|
||||
|
||||
XLogRecPtr MyInsertRecord()
|
||||
{
|
||||
const char *prefix = "prefix";
|
||||
const char *message = "message";
|
||||
size_t size = 7;
|
||||
bool transactional = false;
|
||||
|
||||
xl_logical_message xlrec;
|
||||
|
||||
xlrec.dbId = 0;
|
||||
xlrec.transactional = transactional;
|
||||
/* trailing zero is critical; see logicalmsg_desc */
|
||||
xlrec.prefix_size = strlen(prefix) + 1;
|
||||
xlrec.message_size = size;
|
||||
|
||||
MyBeginInsert();
|
||||
MyRegisterData((char *) &xlrec, SizeOfLogicalMessage);
|
||||
MyRegisterData(unconstify(char *, prefix), xlrec.prefix_size);
|
||||
MyRegisterData(unconstify(char *, message), size);
|
||||
|
||||
return MyFinishInsert(RM_LOGICALMSG_ID, XLOG_LOGICAL_MESSAGE, XLOG_INCLUDE_ORIGIN);
|
||||
}
|
||||
106
libs/walproposer/rust_bindings.h
Normal file
106
libs/walproposer/rust_bindings.h
Normal file
@@ -0,0 +1,106 @@
|
||||
#include <stdarg.h>
|
||||
#include <stdbool.h>
|
||||
#include <stdint.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
/**
|
||||
* List of all possible AnyMessage.
|
||||
*/
|
||||
enum AnyMessageTag {
|
||||
None,
|
||||
InternalConnect,
|
||||
Just32,
|
||||
ReplCell,
|
||||
Bytes,
|
||||
LSN,
|
||||
};
|
||||
typedef uint8_t AnyMessageTag;
|
||||
|
||||
/**
|
||||
* List of all possible NodeEvent.
|
||||
*/
|
||||
enum EventTag {
|
||||
Timeout,
|
||||
Accept,
|
||||
Closed,
|
||||
Message,
|
||||
Internal,
|
||||
};
|
||||
typedef uint8_t EventTag;
|
||||
|
||||
/**
|
||||
* Event returned by epoll_recv.
|
||||
*/
|
||||
typedef struct Event {
|
||||
EventTag tag;
|
||||
int64_t tcp;
|
||||
AnyMessageTag any_message;
|
||||
} Event;
|
||||
|
||||
void rust_function(uint32_t a);
|
||||
|
||||
/**
|
||||
* C API for the node os.
|
||||
*/
|
||||
void sim_sleep(uint64_t ms);
|
||||
|
||||
uint64_t sim_random(uint64_t max);
|
||||
|
||||
uint32_t sim_id(void);
|
||||
|
||||
int64_t sim_open_tcp(uint32_t dst);
|
||||
|
||||
int64_t sim_open_tcp_nopoll(uint32_t dst);
|
||||
|
||||
/**
|
||||
* Send MESSAGE_BUF content to the given tcp.
|
||||
*/
|
||||
void sim_tcp_send(int64_t tcp);
|
||||
|
||||
/**
|
||||
* Receive a message from the given tcp. Can be used only with tcp opened with
|
||||
* `sim_open_tcp_nopoll`.
|
||||
*/
|
||||
struct Event sim_tcp_recv(int64_t tcp);
|
||||
|
||||
struct Event sim_epoll_rcv(int64_t timeout);
|
||||
|
||||
struct Event sim_epoll_peek(int64_t timeout);
|
||||
|
||||
int64_t sim_now(void);
|
||||
|
||||
void sim_exit(int32_t code, const uint8_t *msg);
|
||||
|
||||
void sim_set_result(int32_t code, const uint8_t *msg);
|
||||
|
||||
void sim_log_event(const int8_t *msg);
|
||||
|
||||
/**
|
||||
* Get tag of the current message.
|
||||
*/
|
||||
AnyMessageTag sim_msg_tag(void);
|
||||
|
||||
/**
|
||||
* Read AnyMessage::Just32 message.
|
||||
*/
|
||||
void sim_msg_get_just_u32(uint32_t *val);
|
||||
|
||||
/**
|
||||
* Read AnyMessage::LSN message.
|
||||
*/
|
||||
void sim_msg_get_lsn(uint64_t *val);
|
||||
|
||||
/**
|
||||
* Write AnyMessage::ReplCell message.
|
||||
*/
|
||||
void sim_msg_set_repl_cell(uint32_t value, uint32_t client_id, uint32_t seqno);
|
||||
|
||||
/**
|
||||
* Write AnyMessage::Bytes message.
|
||||
*/
|
||||
void sim_msg_set_bytes(const char *bytes, uintptr_t len);
|
||||
|
||||
/**
|
||||
* Read AnyMessage::Bytes message.
|
||||
*/
|
||||
const char *sim_msg_get_bytes(uintptr_t *len);
|
||||
36
libs/walproposer/src/lib.rs
Normal file
36
libs/walproposer/src/lib.rs
Normal file
@@ -0,0 +1,36 @@
|
||||
#![allow(non_upper_case_globals)]
|
||||
#![allow(non_camel_case_types)]
|
||||
#![allow(non_snake_case)]
|
||||
|
||||
use safekeeper::simlib::node_os::NodeOs;
|
||||
use tracing::info;
|
||||
|
||||
pub mod bindings {
|
||||
include!(concat!(env!("OUT_DIR"), "/bindings.rs"));
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "C" fn rust_function(a: u32) {
|
||||
info!("Hello from Rust!");
|
||||
info!("a: {}", a);
|
||||
}
|
||||
|
||||
pub mod sim;
|
||||
pub mod sim_proto;
|
||||
|
||||
#[cfg(test)]
|
||||
mod test;
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod simtest;
|
||||
|
||||
pub fn c_context() -> Option<Box<dyn Fn(NodeOs) + Send + Sync>> {
|
||||
Some(Box::new(|os: NodeOs| {
|
||||
sim::c_attach_node_os(os);
|
||||
unsafe { bindings::MyContextInit(); }
|
||||
}))
|
||||
}
|
||||
|
||||
pub fn enable_debug() {
|
||||
unsafe { bindings::debug_enabled = true; }
|
||||
}
|
||||
240
libs/walproposer/src/sim.rs
Normal file
240
libs/walproposer/src/sim.rs
Normal file
@@ -0,0 +1,240 @@
|
||||
use log::debug;
|
||||
use safekeeper::simlib::{network::TCP, node_os::NodeOs, world::NodeEvent};
|
||||
use std::{
|
||||
cell::RefCell,
|
||||
collections::HashMap,
|
||||
ffi::{CStr, CString},
|
||||
};
|
||||
use tracing::trace;
|
||||
|
||||
use crate::sim_proto::{anymessage_tag, AnyMessageTag, Event, EventTag, MESSAGE_BUF};
|
||||
|
||||
thread_local! {
|
||||
static CURRENT_NODE_OS: RefCell<Option<NodeOs>> = RefCell::new(None);
|
||||
static TCP_CACHE: RefCell<HashMap<i64, TCP>> = RefCell::new(HashMap::new());
|
||||
}
|
||||
|
||||
/// Get the current node os.
|
||||
fn os() -> NodeOs {
|
||||
CURRENT_NODE_OS.with(|cell| cell.borrow().clone().expect("no node os set"))
|
||||
}
|
||||
|
||||
fn tcp_save(tcp: TCP) -> i64 {
|
||||
TCP_CACHE.with(|cell| {
|
||||
let mut cache = cell.borrow_mut();
|
||||
let id = tcp.id();
|
||||
cache.insert(id, tcp);
|
||||
id
|
||||
})
|
||||
}
|
||||
|
||||
fn tcp_load(id: i64) -> TCP {
|
||||
TCP_CACHE.with(|cell| {
|
||||
let cache = cell.borrow();
|
||||
cache.get(&id).expect("unknown TCP id").clone()
|
||||
})
|
||||
}
|
||||
|
||||
/// Should be called before calling any of the C functions.
|
||||
pub(crate) fn c_attach_node_os(os: NodeOs) {
|
||||
CURRENT_NODE_OS.with(|cell| {
|
||||
*cell.borrow_mut() = Some(os);
|
||||
});
|
||||
TCP_CACHE.with(|cell| {
|
||||
*cell.borrow_mut() = HashMap::new();
|
||||
});
|
||||
}
|
||||
|
||||
/// C API for the node os.
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "C" fn sim_sleep(ms: u64) {
|
||||
os().sleep(ms);
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "C" fn sim_random(max: u64) -> u64 {
|
||||
os().random(max)
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "C" fn sim_id() -> u32 {
|
||||
os().id().into()
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "C" fn sim_open_tcp(dst: u32) -> i64 {
|
||||
tcp_save(os().open_tcp(dst.into()))
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "C" fn sim_open_tcp_nopoll(dst: u32) -> i64 {
|
||||
tcp_save(os().open_tcp_nopoll(dst.into()))
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
/// Send MESSAGE_BUF content to the given tcp.
|
||||
pub extern "C" fn sim_tcp_send(tcp: i64) {
|
||||
tcp_load(tcp).send(MESSAGE_BUF.with(|cell| cell.borrow().clone()));
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
/// Receive a message from the given tcp. Can be used only with tcp opened with
|
||||
/// `sim_open_tcp_nopoll`.
|
||||
pub extern "C" fn sim_tcp_recv(tcp: i64) -> Event {
|
||||
let event = tcp_load(tcp).recv();
|
||||
match event {
|
||||
NodeEvent::Accept(_) => unreachable!(),
|
||||
NodeEvent::Closed(_) => Event {
|
||||
tag: EventTag::Closed,
|
||||
tcp: 0,
|
||||
any_message: AnyMessageTag::None,
|
||||
},
|
||||
NodeEvent::Internal(_) => unreachable!(),
|
||||
NodeEvent::Message((message, _)) => {
|
||||
// store message in thread local storage, C code should use
|
||||
// sim_msg_* functions to access it.
|
||||
MESSAGE_BUF.with(|cell| {
|
||||
*cell.borrow_mut() = message.clone();
|
||||
});
|
||||
Event {
|
||||
tag: EventTag::Message,
|
||||
tcp: 0,
|
||||
any_message: anymessage_tag(&message),
|
||||
}
|
||||
}
|
||||
NodeEvent::WakeTimeout(_) => unreachable!(),
|
||||
}
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "C" fn sim_epoll_rcv(timeout: i64) -> Event {
|
||||
let event = os().epoll_recv(timeout);
|
||||
let event = if let Some(event) = event {
|
||||
event
|
||||
} else {
|
||||
return Event {
|
||||
tag: EventTag::Timeout,
|
||||
tcp: 0,
|
||||
any_message: AnyMessageTag::None,
|
||||
};
|
||||
};
|
||||
|
||||
match event {
|
||||
NodeEvent::Accept(tcp) => Event {
|
||||
tag: EventTag::Accept,
|
||||
tcp: tcp_save(tcp),
|
||||
any_message: AnyMessageTag::None,
|
||||
},
|
||||
NodeEvent::Closed(tcp) => Event {
|
||||
tag: EventTag::Closed,
|
||||
tcp: tcp_save(tcp),
|
||||
any_message: AnyMessageTag::None,
|
||||
},
|
||||
NodeEvent::Message((message, tcp)) => {
|
||||
// store message in thread local storage, C code should use
|
||||
// sim_msg_* functions to access it.
|
||||
MESSAGE_BUF.with(|cell| {
|
||||
*cell.borrow_mut() = message.clone();
|
||||
});
|
||||
Event {
|
||||
tag: EventTag::Message,
|
||||
tcp: tcp_save(tcp),
|
||||
any_message: anymessage_tag(&message),
|
||||
}
|
||||
}
|
||||
NodeEvent::Internal(message) => {
|
||||
// store message in thread local storage, C code should use
|
||||
// sim_msg_* functions to access it.
|
||||
MESSAGE_BUF.with(|cell| {
|
||||
*cell.borrow_mut() = message.clone();
|
||||
});
|
||||
Event {
|
||||
tag: EventTag::Internal,
|
||||
tcp: 0,
|
||||
any_message: anymessage_tag(&message),
|
||||
}
|
||||
}
|
||||
NodeEvent::WakeTimeout(_) => {
|
||||
// can't happen
|
||||
unreachable!()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "C" fn sim_epoll_peek(timeout: i64) -> Event {
|
||||
let event = os().epoll_peek(timeout);
|
||||
let event = if let Some(event) = event {
|
||||
event
|
||||
} else {
|
||||
return Event {
|
||||
tag: EventTag::Timeout,
|
||||
tcp: 0,
|
||||
any_message: AnyMessageTag::None,
|
||||
};
|
||||
};
|
||||
|
||||
match event {
|
||||
NodeEvent::Accept(tcp) => Event {
|
||||
tag: EventTag::Accept,
|
||||
tcp: tcp_save(tcp),
|
||||
any_message: AnyMessageTag::None,
|
||||
},
|
||||
NodeEvent::Closed(tcp) => Event {
|
||||
tag: EventTag::Closed,
|
||||
tcp: tcp_save(tcp),
|
||||
any_message: AnyMessageTag::None,
|
||||
},
|
||||
NodeEvent::Message((message, tcp)) => Event {
|
||||
tag: EventTag::Message,
|
||||
tcp: tcp_save(tcp),
|
||||
any_message: anymessage_tag(&message),
|
||||
},
|
||||
NodeEvent::Internal(message) => Event {
|
||||
tag: EventTag::Internal,
|
||||
tcp: 0,
|
||||
any_message: anymessage_tag(&message),
|
||||
},
|
||||
NodeEvent::WakeTimeout(_) => {
|
||||
// can't happen
|
||||
unreachable!()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "C" fn sim_now() -> i64 {
|
||||
os().now() as i64
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "C" fn sim_exit(code: i32, msg: *const u8) {
|
||||
trace!("sim_exit({}, {:?})", code, msg);
|
||||
sim_set_result(code, msg);
|
||||
|
||||
// I tried to make use of pthread_exit, but it doesn't work.
|
||||
// https://github.com/rust-lang/unsafe-code-guidelines/issues/211
|
||||
// unsafe { libc::pthread_exit(std::ptr::null_mut()) };
|
||||
|
||||
// https://doc.rust-lang.org/nomicon/unwinding.html
|
||||
// Everyone on the internet saying this is UB, but it works for me,
|
||||
// so I'm going to use it for now.
|
||||
panic!("sim_exit() called from C code")
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "C" fn sim_set_result(code: i32, msg: *const u8) {
|
||||
let msg = unsafe { CStr::from_ptr(msg as *const i8) };
|
||||
let msg = msg.to_string_lossy().into_owned();
|
||||
debug!("sim_set_result({}, {:?})", code, msg);
|
||||
os().set_result(code, msg);
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "C" fn sim_log_event(msg: *const i8) {
|
||||
let msg = unsafe { CStr::from_ptr(msg) };
|
||||
let msg = msg.to_string_lossy().into_owned();
|
||||
debug!("sim_log_event({:?})", msg);
|
||||
os().log_event(msg);
|
||||
}
|
||||
114
libs/walproposer/src/sim_proto.rs
Normal file
114
libs/walproposer/src/sim_proto.rs
Normal file
@@ -0,0 +1,114 @@
|
||||
use safekeeper::simlib::proto::{AnyMessage, ReplCell};
|
||||
use std::{cell::RefCell, ffi::c_char};
|
||||
|
||||
pub(crate) fn anymessage_tag(msg: &AnyMessage) -> AnyMessageTag {
|
||||
match msg {
|
||||
AnyMessage::None => AnyMessageTag::None,
|
||||
AnyMessage::InternalConnect => AnyMessageTag::InternalConnect,
|
||||
AnyMessage::Just32(_) => AnyMessageTag::Just32,
|
||||
AnyMessage::ReplCell(_) => AnyMessageTag::ReplCell,
|
||||
AnyMessage::Bytes(_) => AnyMessageTag::Bytes,
|
||||
AnyMessage::LSN(_) => AnyMessageTag::LSN,
|
||||
}
|
||||
}
|
||||
|
||||
thread_local! {
|
||||
pub static MESSAGE_BUF: RefCell<AnyMessage> = RefCell::new(AnyMessage::None);
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
/// Get tag of the current message.
|
||||
pub extern "C" fn sim_msg_tag() -> AnyMessageTag {
|
||||
MESSAGE_BUF.with(|cell| anymessage_tag(&*cell.borrow()))
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
/// Read AnyMessage::Just32 message.
|
||||
pub extern "C" fn sim_msg_get_just_u32(val: &mut u32) {
|
||||
MESSAGE_BUF.with(|cell| match &*cell.borrow() {
|
||||
AnyMessage::Just32(v) => {
|
||||
*val = *v;
|
||||
}
|
||||
_ => panic!("expected Just32 message"),
|
||||
});
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
/// Read AnyMessage::LSN message.
|
||||
pub extern "C" fn sim_msg_get_lsn(val: &mut u64) {
|
||||
MESSAGE_BUF.with(|cell| match &*cell.borrow() {
|
||||
AnyMessage::LSN(v) => {
|
||||
*val = *v;
|
||||
}
|
||||
_ => panic!("expected LSN message"),
|
||||
});
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
/// Write AnyMessage::ReplCell message.
|
||||
pub extern "C" fn sim_msg_set_repl_cell(value: u32, client_id: u32, seqno: u32) {
|
||||
MESSAGE_BUF.with(|cell| {
|
||||
*cell.borrow_mut() = AnyMessage::ReplCell(ReplCell {
|
||||
value,
|
||||
client_id,
|
||||
seqno,
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
/// Write AnyMessage::Bytes message.
|
||||
pub extern "C" fn sim_msg_set_bytes(bytes: *const c_char, len: usize) {
|
||||
MESSAGE_BUF.with(|cell| {
|
||||
// copy bytes to a Rust Vec
|
||||
let mut v: Vec<u8> = Vec::with_capacity(len);
|
||||
unsafe {
|
||||
v.set_len(len);
|
||||
std::ptr::copy_nonoverlapping(bytes as *const u8, v.as_mut_ptr(), len);
|
||||
}
|
||||
*cell.borrow_mut() = AnyMessage::Bytes(v.into());
|
||||
});
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
/// Read AnyMessage::Bytes message.
|
||||
pub extern "C" fn sim_msg_get_bytes(len: *mut usize) -> *const c_char {
|
||||
MESSAGE_BUF.with(|cell| match &*cell.borrow() {
|
||||
AnyMessage::Bytes(v) => {
|
||||
unsafe {
|
||||
*len = v.len();
|
||||
v.as_ptr() as *const i8
|
||||
}
|
||||
}
|
||||
_ => panic!("expected Bytes message"),
|
||||
})
|
||||
}
|
||||
|
||||
#[repr(C)]
|
||||
/// Event returned by epoll_recv.
|
||||
pub struct Event {
|
||||
pub tag: EventTag,
|
||||
pub tcp: i64,
|
||||
pub any_message: AnyMessageTag,
|
||||
}
|
||||
|
||||
#[repr(u8)]
|
||||
/// List of all possible NodeEvent.
|
||||
pub enum EventTag {
|
||||
Timeout,
|
||||
Accept,
|
||||
Closed,
|
||||
Message,
|
||||
Internal,
|
||||
}
|
||||
|
||||
#[repr(u8)]
|
||||
/// List of all possible AnyMessage.
|
||||
pub enum AnyMessageTag {
|
||||
None,
|
||||
InternalConnect,
|
||||
Just32,
|
||||
ReplCell,
|
||||
Bytes,
|
||||
LSN,
|
||||
}
|
||||
88
libs/walproposer/src/simtest/disk.rs
Normal file
88
libs/walproposer/src/simtest/disk.rs
Normal file
@@ -0,0 +1,88 @@
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use safekeeper::safekeeper::SafeKeeperState;
|
||||
use safekeeper::simlib::sync::Mutex;
|
||||
use utils::id::TenantTimelineId;
|
||||
|
||||
pub struct Disk {
|
||||
pub timelines: Mutex<HashMap<TenantTimelineId, Arc<TimelineDisk>>>,
|
||||
}
|
||||
|
||||
impl Disk {
|
||||
pub fn new() -> Self {
|
||||
Disk {
|
||||
timelines: Mutex::new(HashMap::new()),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn put_state(&self, ttid: &TenantTimelineId, state: SafeKeeperState) -> Arc<TimelineDisk> {
|
||||
self.timelines
|
||||
.lock()
|
||||
.entry(ttid.clone())
|
||||
.and_modify(|e| {
|
||||
let mut mu = e.state.lock();
|
||||
*mu = state.clone();
|
||||
})
|
||||
.or_insert_with(|| {
|
||||
Arc::new(TimelineDisk {
|
||||
state: Mutex::new(state),
|
||||
wal: Mutex::new(BlockStorage::new()),
|
||||
})
|
||||
})
|
||||
.clone()
|
||||
}
|
||||
}
|
||||
|
||||
pub struct TimelineDisk {
|
||||
pub state: Mutex<SafeKeeperState>,
|
||||
pub wal: Mutex<BlockStorage>,
|
||||
}
|
||||
|
||||
const BLOCK_SIZE: usize = 8192;
|
||||
|
||||
pub struct BlockStorage {
|
||||
blocks: HashMap<u64, [u8; BLOCK_SIZE]>,
|
||||
}
|
||||
|
||||
impl BlockStorage {
|
||||
pub fn new() -> Self {
|
||||
BlockStorage {
|
||||
blocks: HashMap::new(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn read(&self, pos: u64, buf: &mut [u8]) {
|
||||
let mut buf_offset = 0;
|
||||
let mut storage_pos = pos;
|
||||
while buf_offset < buf.len() {
|
||||
let block_id = storage_pos / BLOCK_SIZE as u64;
|
||||
let block = self.blocks.get(&block_id).unwrap_or(&[0; BLOCK_SIZE]);
|
||||
let block_offset = storage_pos % BLOCK_SIZE as u64;
|
||||
let block_len = BLOCK_SIZE as u64 - block_offset;
|
||||
let buf_len = buf.len() - buf_offset;
|
||||
let copy_len = std::cmp::min(block_len as usize, buf_len);
|
||||
buf[buf_offset..buf_offset + copy_len]
|
||||
.copy_from_slice(&block[block_offset as usize..block_offset as usize + copy_len]);
|
||||
buf_offset += copy_len;
|
||||
storage_pos += copy_len as u64;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn write(&mut self, pos: u64, buf: &[u8]) {
|
||||
let mut buf_offset = 0;
|
||||
let mut storage_pos = pos;
|
||||
while buf_offset < buf.len() {
|
||||
let block_id = storage_pos / BLOCK_SIZE as u64;
|
||||
let block = self.blocks.entry(block_id).or_insert([0; BLOCK_SIZE]);
|
||||
let block_offset = storage_pos % BLOCK_SIZE as u64;
|
||||
let block_len = BLOCK_SIZE as u64 - block_offset;
|
||||
let buf_len = buf.len() - buf_offset;
|
||||
let copy_len = std::cmp::min(block_len as usize, buf_len);
|
||||
block[block_offset as usize..block_offset as usize + copy_len]
|
||||
.copy_from_slice(&buf[buf_offset..buf_offset + copy_len]);
|
||||
buf_offset += copy_len;
|
||||
storage_pos += copy_len as u64
|
||||
}
|
||||
}
|
||||
}
|
||||
61
libs/walproposer/src/simtest/log.rs
Normal file
61
libs/walproposer/src/simtest/log.rs
Normal file
@@ -0,0 +1,61 @@
|
||||
use std::{sync::Arc, fmt};
|
||||
|
||||
use safekeeper::simlib::{world::World, sync::Mutex};
|
||||
use tracing_subscriber::fmt::{time::FormatTime, format::Writer};
|
||||
use utils::logging;
|
||||
|
||||
use crate::bindings;
|
||||
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct SimClock {
|
||||
world_ptr: Arc<Mutex<Option<Arc<World>>>>,
|
||||
}
|
||||
|
||||
impl Default for SimClock {
|
||||
fn default() -> Self {
|
||||
SimClock {
|
||||
world_ptr: Arc::new(Mutex::new(None)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl SimClock {
|
||||
pub fn set_world(&self, world: Arc<World>) {
|
||||
*self.world_ptr.lock() = Some(world);
|
||||
}
|
||||
}
|
||||
|
||||
impl FormatTime for SimClock {
|
||||
fn format_time(&self, w: &mut Writer<'_>) -> fmt::Result {
|
||||
let world = self.world_ptr.lock().clone();
|
||||
|
||||
if let Some(world) = world {
|
||||
let now = world.now();
|
||||
write!(w, "[{}]", now)
|
||||
} else {
|
||||
write!(w, "[?]")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn init_logger() -> SimClock {
|
||||
let debug_enabled = unsafe { bindings::debug_enabled };
|
||||
|
||||
let clock = SimClock::default();
|
||||
let base_logger = tracing_subscriber::fmt()
|
||||
.with_target(false)
|
||||
.with_timer(clock.clone())
|
||||
.with_ansi(true)
|
||||
.with_max_level(match debug_enabled {
|
||||
true => tracing::Level::DEBUG,
|
||||
false => tracing::Level::INFO,
|
||||
})
|
||||
.with_writer(std::io::stdout);
|
||||
base_logger.init();
|
||||
|
||||
// logging::replace_panic_hook_with_tracing_panic_hook().forget();
|
||||
std::panic::set_hook(Box::new(|_| {}));
|
||||
|
||||
clock
|
||||
}
|
||||
11
libs/walproposer/src/simtest/mod.rs
Normal file
11
libs/walproposer/src/simtest/mod.rs
Normal file
@@ -0,0 +1,11 @@
|
||||
#[cfg(test)]
|
||||
pub mod simple_client;
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod wp_sk;
|
||||
|
||||
pub mod disk;
|
||||
pub mod safekeeper;
|
||||
pub mod storage;
|
||||
pub mod log;
|
||||
pub mod util;
|
||||
372
libs/walproposer/src/simtest/safekeeper.rs
Normal file
372
libs/walproposer/src/simtest/safekeeper.rs
Normal file
@@ -0,0 +1,372 @@
|
||||
//! Safekeeper communication endpoint to WAL proposer (compute node).
|
||||
//! Gets messages from the network, passes them down to consensus module and
|
||||
//! sends replies back.
|
||||
|
||||
use std::{collections::HashMap, path::PathBuf, sync::Arc, time::Duration};
|
||||
|
||||
use anyhow::{anyhow, bail, Result};
|
||||
use bytes::{Bytes, BytesMut};
|
||||
use hyper::Uri;
|
||||
use log::info;
|
||||
use safekeeper::{
|
||||
safekeeper::{
|
||||
ProposerAcceptorMessage, SafeKeeper, SafeKeeperState, ServerInfo, UNKNOWN_SERVER_VERSION,
|
||||
},
|
||||
simlib::{network::TCP, node_os::NodeOs, proto::AnyMessage, world::NodeEvent},
|
||||
timeline::TimelineError,
|
||||
SafeKeeperConf, wal_storage::Storage,
|
||||
};
|
||||
use tracing::{debug, info_span};
|
||||
use utils::{
|
||||
id::{NodeId, TenantId, TenantTimelineId, TimelineId},
|
||||
lsn::Lsn,
|
||||
};
|
||||
|
||||
use crate::simtest::storage::DiskStateStorage;
|
||||
|
||||
use super::{
|
||||
disk::{Disk, TimelineDisk},
|
||||
storage::DiskWALStorage,
|
||||
};
|
||||
|
||||
struct ConnState {
|
||||
tcp: TCP,
|
||||
|
||||
greeting: bool,
|
||||
ttid: TenantTimelineId,
|
||||
flush_pending: bool,
|
||||
}
|
||||
|
||||
struct SharedState {
|
||||
sk: SafeKeeper<DiskStateStorage, DiskWALStorage>,
|
||||
disk: Arc<TimelineDisk>,
|
||||
}
|
||||
|
||||
struct GlobalMap {
|
||||
timelines: HashMap<TenantTimelineId, SharedState>,
|
||||
conf: SafeKeeperConf,
|
||||
disk: Arc<Disk>,
|
||||
}
|
||||
|
||||
impl GlobalMap {
|
||||
fn new(disk: Arc<Disk>, conf: SafeKeeperConf) -> Result<Self> {
|
||||
let mut timelines = HashMap::new();
|
||||
|
||||
for (&ttid, disk) in disk.timelines.lock().iter() {
|
||||
debug!("loading timeline {}", ttid);
|
||||
let state = disk.state.lock().clone();
|
||||
|
||||
if state.server.wal_seg_size == 0 {
|
||||
bail!(TimelineError::UninitializedWalSegSize(ttid));
|
||||
}
|
||||
|
||||
if state.server.pg_version == UNKNOWN_SERVER_VERSION {
|
||||
bail!(TimelineError::UninitialinzedPgVersion(ttid));
|
||||
}
|
||||
|
||||
if state.commit_lsn < state.local_start_lsn {
|
||||
bail!(
|
||||
"commit_lsn {} is higher than local_start_lsn {}",
|
||||
state.commit_lsn,
|
||||
state.local_start_lsn
|
||||
);
|
||||
}
|
||||
|
||||
let control_store = DiskStateStorage::new(disk.clone());
|
||||
let wal_store = DiskWALStorage::new(disk.clone(), &control_store)?;
|
||||
|
||||
let sk = SafeKeeper::new(control_store, wal_store, conf.my_id)?;
|
||||
timelines.insert(
|
||||
ttid.clone(),
|
||||
SharedState {
|
||||
sk,
|
||||
disk: disk.clone(),
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
Ok(Self {
|
||||
timelines,
|
||||
conf,
|
||||
disk,
|
||||
})
|
||||
}
|
||||
|
||||
fn create(&mut self, ttid: TenantTimelineId, server_info: ServerInfo) -> Result<()> {
|
||||
if self.timelines.contains_key(&ttid) {
|
||||
bail!("timeline {} already exists", ttid);
|
||||
}
|
||||
|
||||
debug!("creating new timeline {}", ttid);
|
||||
|
||||
let commit_lsn = Lsn::INVALID;
|
||||
let local_start_lsn = Lsn::INVALID;
|
||||
|
||||
// TODO: load state from in-memory storage
|
||||
let state = SafeKeeperState::new(&ttid, server_info, vec![], commit_lsn, local_start_lsn);
|
||||
|
||||
if state.server.wal_seg_size == 0 {
|
||||
bail!(TimelineError::UninitializedWalSegSize(ttid));
|
||||
}
|
||||
|
||||
if state.server.pg_version == UNKNOWN_SERVER_VERSION {
|
||||
bail!(TimelineError::UninitialinzedPgVersion(ttid));
|
||||
}
|
||||
|
||||
if state.commit_lsn < state.local_start_lsn {
|
||||
bail!(
|
||||
"commit_lsn {} is higher than local_start_lsn {}",
|
||||
state.commit_lsn,
|
||||
state.local_start_lsn
|
||||
);
|
||||
}
|
||||
|
||||
let disk_timeline = self.disk.put_state(&ttid, state);
|
||||
let control_store = DiskStateStorage::new(disk_timeline.clone());
|
||||
let wal_store = DiskWALStorage::new(disk_timeline.clone(), &control_store)?;
|
||||
|
||||
let sk = SafeKeeper::new(control_store, wal_store, self.conf.my_id)?;
|
||||
|
||||
self.timelines.insert(
|
||||
ttid.clone(),
|
||||
SharedState {
|
||||
sk,
|
||||
disk: disk_timeline,
|
||||
},
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get(&mut self, ttid: &TenantTimelineId) -> &mut SharedState {
|
||||
self.timelines.get_mut(ttid).expect("timeline must exist")
|
||||
}
|
||||
|
||||
fn has_tli(&self, ttid: &TenantTimelineId) -> bool {
|
||||
self.timelines.contains_key(ttid)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn run_server(os: NodeOs, disk: Arc<Disk>) -> Result<()> {
|
||||
let _enter = info_span!("safekeeper", id = os.id()).entered();
|
||||
debug!("started server");
|
||||
os.log_event("started;safekeeper".to_owned());
|
||||
let conf = SafeKeeperConf {
|
||||
workdir: PathBuf::from("."),
|
||||
my_id: NodeId(os.id() as u64),
|
||||
listen_pg_addr: String::new(),
|
||||
listen_http_addr: String::new(),
|
||||
no_sync: false,
|
||||
broker_endpoint: "/".parse::<Uri>().unwrap(),
|
||||
broker_keepalive_interval: Duration::from_secs(0),
|
||||
heartbeat_timeout: Duration::from_secs(0),
|
||||
remote_storage: None,
|
||||
max_offloader_lag_bytes: 0,
|
||||
backup_runtime_threads: None,
|
||||
wal_backup_enabled: false,
|
||||
auth: None,
|
||||
};
|
||||
|
||||
let mut global = GlobalMap::new(disk, conf.clone())?;
|
||||
let mut conns: HashMap<i64, ConnState> = HashMap::new();
|
||||
|
||||
for (&ttid, shared_state) in global.timelines.iter_mut() {
|
||||
let flush_lsn = shared_state.sk.wal_store.flush_lsn();
|
||||
let commit_lsn = shared_state.sk.state.commit_lsn;
|
||||
os.log_event(format!("tli_loaded;{};{}", flush_lsn.0, commit_lsn.0));
|
||||
}
|
||||
|
||||
let epoll = os.epoll();
|
||||
loop {
|
||||
// waiting for the next message
|
||||
let mut next_event = Some(epoll.recv());
|
||||
|
||||
loop {
|
||||
let event = match next_event {
|
||||
Some(event) => event,
|
||||
None => break,
|
||||
};
|
||||
|
||||
match event {
|
||||
NodeEvent::Accept(tcp) => {
|
||||
conns.insert(
|
||||
tcp.id(),
|
||||
ConnState {
|
||||
tcp,
|
||||
greeting: false,
|
||||
ttid: TenantTimelineId::empty(),
|
||||
flush_pending: false,
|
||||
},
|
||||
);
|
||||
}
|
||||
NodeEvent::Message((msg, tcp)) => {
|
||||
let conn = conns.get_mut(&tcp.id());
|
||||
if let Some(conn) = conn {
|
||||
let res = conn.process_any(msg, &mut global);
|
||||
if res.is_err() {
|
||||
debug!("conn {:?} error: {:#}", tcp, res.unwrap_err());
|
||||
conns.remove(&tcp.id());
|
||||
}
|
||||
} else {
|
||||
debug!("conn {:?} was closed, dropping msg {:?}", tcp, msg);
|
||||
}
|
||||
}
|
||||
NodeEvent::Internal(_) => {}
|
||||
NodeEvent::Closed(_) => {}
|
||||
NodeEvent::WakeTimeout(_) => {}
|
||||
}
|
||||
|
||||
// TODO: make simulator support multiple events per tick
|
||||
next_event = epoll.try_recv();
|
||||
}
|
||||
|
||||
conns.retain(|_, conn| {
|
||||
let res = conn.flush(&mut global);
|
||||
if res.is_err() {
|
||||
debug!("conn {:?} error: {:?}", conn.tcp, res);
|
||||
}
|
||||
res.is_ok()
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
impl ConnState {
|
||||
fn process_any(&mut self, any: AnyMessage, global: &mut GlobalMap) -> Result<()> {
|
||||
if let AnyMessage::Bytes(copy_data) = any {
|
||||
let repl_prefix = b"START_REPLICATION ";
|
||||
if !self.greeting && copy_data.starts_with(repl_prefix) {
|
||||
self.process_start_replication(copy_data.slice(repl_prefix.len()..), global)?;
|
||||
bail!("finished processing START_REPLICATION")
|
||||
}
|
||||
|
||||
let msg = ProposerAcceptorMessage::parse(copy_data)?;
|
||||
debug!("got msg: {:?}", msg);
|
||||
return self.process(msg, global);
|
||||
} else {
|
||||
bail!("unexpected message, expected AnyMessage::Bytes");
|
||||
}
|
||||
}
|
||||
|
||||
fn process_start_replication(
|
||||
&mut self,
|
||||
copy_data: Bytes,
|
||||
global: &mut GlobalMap,
|
||||
) -> Result<()> {
|
||||
// format is "<tenant_id> <timeline_id> <start_lsn> <end_lsn>"
|
||||
let str = String::from_utf8(copy_data.to_vec())?;
|
||||
|
||||
let mut parts = str.split(' ');
|
||||
let tenant_id = parts.next().unwrap().parse::<TenantId>()?;
|
||||
let timeline_id = parts.next().unwrap().parse::<TimelineId>()?;
|
||||
let start_lsn = parts.next().unwrap().parse::<u64>()?;
|
||||
let end_lsn = parts.next().unwrap().parse::<u64>()?;
|
||||
|
||||
let ttid = TenantTimelineId::new(tenant_id, timeline_id);
|
||||
let shared_state = global.get(&ttid);
|
||||
|
||||
// read bytes from start_lsn to end_lsn
|
||||
let mut buf = vec![0; (end_lsn - start_lsn) as usize];
|
||||
shared_state.disk.wal.lock().read(start_lsn, &mut buf);
|
||||
|
||||
// send bytes to the client
|
||||
self.tcp.send(AnyMessage::Bytes(Bytes::from(buf)));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn init_timeline(
|
||||
&mut self,
|
||||
ttid: TenantTimelineId,
|
||||
server_info: ServerInfo,
|
||||
global: &mut GlobalMap,
|
||||
) -> Result<()> {
|
||||
self.ttid = ttid;
|
||||
if global.has_tli(&ttid) {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
global.create(ttid, server_info)
|
||||
}
|
||||
|
||||
fn process(&mut self, msg: ProposerAcceptorMessage, global: &mut GlobalMap) -> Result<()> {
|
||||
if !self.greeting {
|
||||
self.greeting = true;
|
||||
|
||||
match msg {
|
||||
ProposerAcceptorMessage::Greeting(ref greeting) => {
|
||||
debug!(
|
||||
"start handshake with walproposer {:?}",
|
||||
self.tcp,
|
||||
);
|
||||
let server_info = ServerInfo {
|
||||
pg_version: greeting.pg_version,
|
||||
system_id: greeting.system_id,
|
||||
wal_seg_size: greeting.wal_seg_size,
|
||||
};
|
||||
let ttid = TenantTimelineId::new(greeting.tenant_id, greeting.timeline_id);
|
||||
self.init_timeline(ttid, server_info, global)?
|
||||
}
|
||||
_ => {
|
||||
bail!("unexpected message {msg:?} instead of greeting");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let tli = global.get(&self.ttid);
|
||||
|
||||
match msg {
|
||||
ProposerAcceptorMessage::AppendRequest(append_request) => {
|
||||
self.flush_pending = true;
|
||||
self.process_sk_msg(
|
||||
tli,
|
||||
&ProposerAcceptorMessage::NoFlushAppendRequest(append_request),
|
||||
)?;
|
||||
}
|
||||
other => {
|
||||
self.process_sk_msg(tli, &other)?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Process FlushWAL if needed.
|
||||
// TODO: add extra flushes, to verify that extra flushes don't break anything
|
||||
fn flush(&mut self, global: &mut GlobalMap) -> Result<()> {
|
||||
if !self.flush_pending {
|
||||
return Ok(());
|
||||
}
|
||||
self.flush_pending = false;
|
||||
let shared_state = global.get(&self.ttid);
|
||||
self.process_sk_msg(shared_state, &ProposerAcceptorMessage::FlushWAL)
|
||||
}
|
||||
|
||||
/// Make safekeeper process a message and send a reply to the TCP
|
||||
fn process_sk_msg(
|
||||
&mut self,
|
||||
shared_state: &mut SharedState,
|
||||
msg: &ProposerAcceptorMessage,
|
||||
) -> Result<()> {
|
||||
let mut reply = shared_state.sk.process_msg(msg)?;
|
||||
if let Some(reply) = &mut reply {
|
||||
// // if this is AppendResponse, fill in proper hot standby feedback and disk consistent lsn
|
||||
// if let AcceptorProposerMessage::AppendResponse(ref mut resp) = reply {
|
||||
// // TODO:
|
||||
// }
|
||||
|
||||
let mut buf = BytesMut::with_capacity(128);
|
||||
reply.serialize(&mut buf)?;
|
||||
|
||||
self.tcp.send(AnyMessage::Bytes(buf.into()));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for ConnState {
|
||||
fn drop(&mut self) {
|
||||
debug!("dropping conn: {:?}", self.tcp);
|
||||
if !std::thread::panicking() {
|
||||
self.tcp.close();
|
||||
}
|
||||
// TODO: clean up non-fsynced WAL
|
||||
}
|
||||
}
|
||||
38
libs/walproposer/src/simtest/simple_client.rs
Normal file
38
libs/walproposer/src/simtest/simple_client.rs
Normal file
@@ -0,0 +1,38 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use safekeeper::{
|
||||
simlib::{
|
||||
network::{Delay, NetworkOptions},
|
||||
world::World,
|
||||
},
|
||||
simtest::{start_simulation, Options},
|
||||
};
|
||||
|
||||
use crate::{bindings::RunClientC, c_context};
|
||||
|
||||
#[test]
|
||||
fn run_rust_c_test() {
|
||||
let delay = Delay {
|
||||
min: 1,
|
||||
max: 5,
|
||||
fail_prob: 0.5,
|
||||
};
|
||||
|
||||
let network = NetworkOptions {
|
||||
keepalive_timeout: Some(50),
|
||||
connect_delay: delay.clone(),
|
||||
send_delay: delay.clone(),
|
||||
};
|
||||
|
||||
let u32_data: [u32; 5] = [1, 2, 3, 4, 5];
|
||||
|
||||
let world = Arc::new(World::new(1337, Arc::new(network), c_context()));
|
||||
start_simulation(Options {
|
||||
world,
|
||||
time_limit: 1_000_000,
|
||||
client_fn: Box::new(move |_, server_id| unsafe {
|
||||
RunClientC(server_id);
|
||||
}),
|
||||
u32_data,
|
||||
});
|
||||
}
|
||||
234
libs/walproposer/src/simtest/storage.rs
Normal file
234
libs/walproposer/src/simtest/storage.rs
Normal file
@@ -0,0 +1,234 @@
|
||||
use std::{ops::Deref, sync::Arc};
|
||||
|
||||
use anyhow::Result;
|
||||
use bytes::{Buf, BytesMut};
|
||||
use log::{debug, info};
|
||||
use postgres_ffi::{waldecoder::WalStreamDecoder, XLogSegNo};
|
||||
use safekeeper::{control_file, safekeeper::SafeKeeperState, wal_storage};
|
||||
use utils::lsn::Lsn;
|
||||
|
||||
use super::disk::TimelineDisk;
|
||||
|
||||
pub struct DiskStateStorage {
|
||||
persisted_state: SafeKeeperState,
|
||||
disk: Arc<TimelineDisk>,
|
||||
}
|
||||
|
||||
impl DiskStateStorage {
|
||||
pub fn new(disk: Arc<TimelineDisk>) -> Self {
|
||||
let guard = disk.state.lock();
|
||||
let state = guard.clone();
|
||||
drop(guard);
|
||||
DiskStateStorage {
|
||||
persisted_state: state,
|
||||
disk,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl control_file::Storage for DiskStateStorage {
|
||||
fn persist(&mut self, s: &SafeKeeperState) -> Result<()> {
|
||||
self.persisted_state = s.clone();
|
||||
*self.disk.state.lock() = s.clone();
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Deref for DiskStateStorage {
|
||||
type Target = SafeKeeperState;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.persisted_state
|
||||
}
|
||||
}
|
||||
|
||||
pub struct DummyWalStore {
|
||||
lsn: Lsn,
|
||||
}
|
||||
|
||||
impl DummyWalStore {
|
||||
pub fn new() -> Self {
|
||||
DummyWalStore { lsn: Lsn::INVALID }
|
||||
}
|
||||
}
|
||||
|
||||
impl wal_storage::Storage for DummyWalStore {
|
||||
fn flush_lsn(&self) -> Lsn {
|
||||
self.lsn
|
||||
}
|
||||
|
||||
fn write_wal(&mut self, startpos: Lsn, buf: &[u8]) -> Result<()> {
|
||||
self.lsn = startpos + buf.len() as u64;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn truncate_wal(&mut self, end_pos: Lsn) -> Result<()> {
|
||||
self.lsn = end_pos;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn flush_wal(&mut self) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn remove_up_to(&self) -> Box<dyn Fn(XLogSegNo) -> Result<()>> {
|
||||
Box::new(move |_segno_up_to: XLogSegNo| Ok(()))
|
||||
}
|
||||
|
||||
fn get_metrics(&self) -> safekeeper::metrics::WalStorageMetrics {
|
||||
safekeeper::metrics::WalStorageMetrics::default()
|
||||
}
|
||||
}
|
||||
|
||||
pub struct DiskWALStorage {
|
||||
/// Written to disk, but possibly still in the cache and not fully persisted.
|
||||
/// Also can be ahead of record_lsn, if happen to be in the middle of a WAL record.
|
||||
write_lsn: Lsn,
|
||||
|
||||
/// The LSN of the last WAL record written to disk. Still can be not fully flushed.
|
||||
write_record_lsn: Lsn,
|
||||
|
||||
/// The LSN of the last WAL record flushed to disk.
|
||||
flush_record_lsn: Lsn,
|
||||
|
||||
/// Decoder is required for detecting boundaries of WAL records.
|
||||
decoder: WalStreamDecoder,
|
||||
|
||||
unflushed_bytes: BytesMut,
|
||||
|
||||
disk: Arc<TimelineDisk>,
|
||||
}
|
||||
|
||||
impl DiskWALStorage {
|
||||
pub fn new(disk: Arc<TimelineDisk>, state: &SafeKeeperState) -> Result<Self> {
|
||||
let write_lsn = if state.commit_lsn == Lsn(0) {
|
||||
Lsn(0)
|
||||
} else {
|
||||
Self::find_end_of_wal(disk.clone(), state.commit_lsn)?
|
||||
};
|
||||
|
||||
let flush_lsn = write_lsn;
|
||||
Ok(DiskWALStorage {
|
||||
write_lsn,
|
||||
write_record_lsn: flush_lsn,
|
||||
flush_record_lsn: flush_lsn,
|
||||
decoder: WalStreamDecoder::new(flush_lsn, 15),
|
||||
unflushed_bytes: BytesMut::new(),
|
||||
disk,
|
||||
})
|
||||
}
|
||||
|
||||
fn find_end_of_wal(disk: Arc<TimelineDisk>, start_lsn: Lsn) -> Result<Lsn> {
|
||||
let mut buf = [0; 8192];
|
||||
let mut pos = start_lsn.0;
|
||||
let mut decoder = WalStreamDecoder::new(start_lsn, 15);
|
||||
let mut result = start_lsn;
|
||||
loop {
|
||||
disk.wal.lock().read(pos, &mut buf);
|
||||
pos += buf.len() as u64;
|
||||
decoder.feed_bytes(&buf);
|
||||
|
||||
loop {
|
||||
match decoder.poll_decode() {
|
||||
Ok(Some(record)) => result = record.0,
|
||||
Err(e) => {
|
||||
debug!(
|
||||
"find_end_of_wal reached end at {:?}, decode error: {:?}",
|
||||
result, e
|
||||
);
|
||||
return Ok(result);
|
||||
}
|
||||
Ok(None) => break, // need more data
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl wal_storage::Storage for DiskWALStorage {
|
||||
fn flush_lsn(&self) -> Lsn {
|
||||
self.flush_record_lsn
|
||||
}
|
||||
|
||||
fn write_wal(&mut self, startpos: Lsn, buf: &[u8]) -> Result<()> {
|
||||
if self.write_lsn != startpos {
|
||||
panic!("write_wal called with wrong startpos");
|
||||
}
|
||||
|
||||
self.unflushed_bytes.extend_from_slice(buf);
|
||||
self.write_lsn += buf.len() as u64;
|
||||
|
||||
if self.decoder.available() != startpos {
|
||||
info!(
|
||||
"restart decoder from {} to {}",
|
||||
self.decoder.available(),
|
||||
startpos,
|
||||
);
|
||||
self.decoder = WalStreamDecoder::new(startpos, 15);
|
||||
}
|
||||
self.decoder.feed_bytes(buf);
|
||||
loop {
|
||||
match self.decoder.poll_decode()? {
|
||||
None => break, // no full record yet
|
||||
Some((lsn, _rec)) => {
|
||||
self.write_record_lsn = lsn;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn truncate_wal(&mut self, end_pos: Lsn) -> Result<()> {
|
||||
if self.write_lsn != Lsn(0) && end_pos > self.write_lsn {
|
||||
panic!(
|
||||
"truncate_wal called on non-written WAL, write_lsn={}, end_pos={}",
|
||||
self.write_lsn, end_pos
|
||||
);
|
||||
}
|
||||
|
||||
self.flush_wal()?;
|
||||
|
||||
// write zeroes to disk from end_pos until self.write_lsn
|
||||
let buf = [0; 8192];
|
||||
let mut pos = end_pos.0;
|
||||
while pos < self.write_lsn.0 {
|
||||
self.disk.wal.lock().write(pos, &buf);
|
||||
pos += buf.len() as u64;
|
||||
}
|
||||
|
||||
self.write_lsn = end_pos;
|
||||
self.write_record_lsn = end_pos;
|
||||
self.flush_record_lsn = end_pos;
|
||||
self.unflushed_bytes.clear();
|
||||
self.decoder = WalStreamDecoder::new(end_pos, 15);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn flush_wal(&mut self) -> Result<()> {
|
||||
if self.flush_record_lsn == self.write_record_lsn {
|
||||
// no need to do extra flush
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let num_bytes = self.write_record_lsn.0 - self.flush_record_lsn.0;
|
||||
|
||||
self.disk.wal.lock().write(
|
||||
self.flush_record_lsn.0,
|
||||
&self.unflushed_bytes[..num_bytes as usize],
|
||||
);
|
||||
self.unflushed_bytes.advance(num_bytes as usize);
|
||||
self.flush_record_lsn = self.write_record_lsn;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn remove_up_to(&self) -> Box<dyn Fn(XLogSegNo) -> Result<()>> {
|
||||
Box::new(move |_segno_up_to: XLogSegNo| Ok(()))
|
||||
}
|
||||
|
||||
fn get_metrics(&self) -> safekeeper::metrics::WalStorageMetrics {
|
||||
safekeeper::metrics::WalStorageMetrics::default()
|
||||
}
|
||||
}
|
||||
610
libs/walproposer/src/simtest/util.rs
Normal file
610
libs/walproposer/src/simtest/util.rs
Normal file
@@ -0,0 +1,610 @@
|
||||
use std::{ffi::CString, path::Path, str::FromStr, sync::Arc, collections::HashMap};
|
||||
|
||||
use rand::{Rng, SeedableRng};
|
||||
use safekeeper::simlib::{
|
||||
network::{Delay, NetworkOptions},
|
||||
proto::AnyMessage,
|
||||
time::EmptyEvent,
|
||||
world::World,
|
||||
world::{Node, NodeEvent, SEvent, NodeId},
|
||||
};
|
||||
use tracing::{debug, error, info, warn};
|
||||
use utils::{id::TenantTimelineId, lsn::Lsn};
|
||||
|
||||
use crate::{
|
||||
bindings::{
|
||||
neon_tenant_walproposer, neon_timeline_walproposer, sim_redo_start_lsn, syncSafekeepers,
|
||||
wal_acceptor_connection_timeout, wal_acceptor_reconnect_timeout, wal_acceptors_list,
|
||||
MyInsertRecord, WalProposerCleanup, WalProposerRust,
|
||||
},
|
||||
c_context,
|
||||
simtest::{
|
||||
log::{init_logger, SimClock},
|
||||
safekeeper::run_server,
|
||||
},
|
||||
};
|
||||
|
||||
use super::disk::Disk;
|
||||
|
||||
pub struct SkNode {
|
||||
pub node: Arc<Node>,
|
||||
pub id: u32,
|
||||
pub disk: Arc<Disk>,
|
||||
}
|
||||
|
||||
impl SkNode {
|
||||
pub fn new(node: Arc<Node>) -> Self {
|
||||
let disk = Arc::new(Disk::new());
|
||||
let res = Self {
|
||||
id: node.id,
|
||||
node,
|
||||
disk,
|
||||
};
|
||||
res.launch();
|
||||
res
|
||||
}
|
||||
|
||||
pub fn launch(&self) {
|
||||
let id = self.id;
|
||||
let disk = self.disk.clone();
|
||||
// start the server thread
|
||||
self.node.launch(move |os| {
|
||||
let res = run_server(os, disk);
|
||||
debug!("server {} finished: {:?}", id, res);
|
||||
});
|
||||
}
|
||||
|
||||
pub fn restart(&self) {
|
||||
self.node.crash_stop();
|
||||
self.launch();
|
||||
}
|
||||
}
|
||||
|
||||
pub struct TestConfig {
|
||||
pub network: NetworkOptions,
|
||||
pub timeout: u64,
|
||||
pub clock: Option<SimClock>,
|
||||
}
|
||||
|
||||
impl TestConfig {
|
||||
pub fn new(clock: Option<SimClock>) -> Self {
|
||||
Self {
|
||||
network: NetworkOptions {
|
||||
keepalive_timeout: Some(2000),
|
||||
connect_delay: Delay {
|
||||
min: 1,
|
||||
max: 5,
|
||||
fail_prob: 0.0,
|
||||
},
|
||||
send_delay: Delay {
|
||||
min: 1,
|
||||
max: 5,
|
||||
fail_prob: 0.0,
|
||||
},
|
||||
},
|
||||
timeout: 1_000 * 10,
|
||||
clock,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn start(&self, seed: u64) -> Test {
|
||||
let world = Arc::new(World::new(
|
||||
seed,
|
||||
Arc::new(self.network.clone()),
|
||||
c_context(),
|
||||
));
|
||||
world.register_world();
|
||||
|
||||
if let Some(clock) = &self.clock {
|
||||
clock.set_world(world.clone());
|
||||
}
|
||||
|
||||
let servers = [
|
||||
SkNode::new(world.new_node()),
|
||||
SkNode::new(world.new_node()),
|
||||
SkNode::new(world.new_node()),
|
||||
];
|
||||
|
||||
let server_ids = [servers[0].id, servers[1].id, servers[2].id];
|
||||
|
||||
let safekeepers_guc = server_ids.map(|id| format!("node:{}", id)).join(",");
|
||||
let ttid = TenantTimelineId::generate();
|
||||
|
||||
// wait init for all servers
|
||||
world.await_all();
|
||||
|
||||
// clean up pgdata directory
|
||||
self.init_pgdata();
|
||||
|
||||
Test {
|
||||
world,
|
||||
servers,
|
||||
safekeepers_guc,
|
||||
ttid,
|
||||
timeout: self.timeout,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn init_pgdata(&self) {
|
||||
let pgdata = Path::new("/home/admin/simulator/libs/walproposer/pgdata");
|
||||
if pgdata.exists() {
|
||||
std::fs::remove_dir_all(pgdata).unwrap();
|
||||
}
|
||||
std::fs::create_dir(pgdata).unwrap();
|
||||
|
||||
// create empty pg_wal and pg_notify subdirs
|
||||
std::fs::create_dir(pgdata.join("pg_wal")).unwrap();
|
||||
std::fs::create_dir(pgdata.join("pg_notify")).unwrap();
|
||||
|
||||
// write postgresql.conf
|
||||
let mut conf = std::fs::File::create(pgdata.join("postgresql.conf")).unwrap();
|
||||
let content = "
|
||||
wal_log_hints=off
|
||||
hot_standby=on
|
||||
fsync=off
|
||||
wal_level=replica
|
||||
restart_after_crash=off
|
||||
shared_preload_libraries=neon
|
||||
neon.pageserver_connstring=''
|
||||
neon.tenant_id=cc6e67313d57283bad411600fbf5c142
|
||||
neon.timeline_id=de6fa815c1e45aa61491c3d34c4eb33e
|
||||
synchronous_standby_names=walproposer
|
||||
neon.safekeepers='node:1,node:2,node:3'
|
||||
max_connections=100
|
||||
";
|
||||
|
||||
std::io::Write::write_all(&mut conf, content.as_bytes()).unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
pub struct Test {
|
||||
pub world: Arc<World>,
|
||||
pub servers: [SkNode; 3],
|
||||
pub safekeepers_guc: String,
|
||||
pub ttid: TenantTimelineId,
|
||||
pub timeout: u64,
|
||||
}
|
||||
|
||||
impl Test {
|
||||
fn launch_sync(&self) -> Arc<Node> {
|
||||
let client_node = self.world.new_node();
|
||||
debug!("sync-safekeepers started at node {}", client_node.id);
|
||||
|
||||
// start the client thread
|
||||
let guc = self.safekeepers_guc.clone();
|
||||
let ttid = self.ttid.clone();
|
||||
client_node.launch(move |_| {
|
||||
let list = CString::new(guc).unwrap();
|
||||
|
||||
unsafe {
|
||||
WalProposerCleanup();
|
||||
|
||||
syncSafekeepers = true;
|
||||
wal_acceptors_list = list.into_raw();
|
||||
wal_acceptor_reconnect_timeout = 1000;
|
||||
wal_acceptor_connection_timeout = 5000;
|
||||
neon_tenant_walproposer =
|
||||
CString::new(ttid.tenant_id.to_string()).unwrap().into_raw();
|
||||
neon_timeline_walproposer = CString::new(ttid.timeline_id.to_string())
|
||||
.unwrap()
|
||||
.into_raw();
|
||||
WalProposerRust();
|
||||
}
|
||||
});
|
||||
|
||||
self.world.await_all();
|
||||
|
||||
client_node
|
||||
}
|
||||
|
||||
pub fn sync_safekeepers(&self) -> anyhow::Result<Lsn> {
|
||||
let client_node = self.launch_sync();
|
||||
|
||||
// poll until exit or timeout
|
||||
let time_limit = self.timeout;
|
||||
while self.world.step() && self.world.now() < time_limit && !client_node.is_finished() {}
|
||||
|
||||
if !client_node.is_finished() {
|
||||
anyhow::bail!("timeout or idle stuck");
|
||||
}
|
||||
|
||||
let res = client_node.result.lock().clone();
|
||||
if res.0 != 0 {
|
||||
anyhow::bail!("non-zero exitcode: {:?}", res);
|
||||
}
|
||||
let lsn = Lsn::from_str(&res.1)?;
|
||||
Ok(lsn)
|
||||
}
|
||||
|
||||
pub fn launch_walproposer(&self, lsn: Lsn) -> WalProposer {
|
||||
let client_node = self.world.new_node();
|
||||
|
||||
let lsn = if lsn.0 == 0 {
|
||||
// usual LSN after basebackup
|
||||
Lsn(21623024)
|
||||
} else {
|
||||
lsn
|
||||
};
|
||||
|
||||
// start the client thread
|
||||
let guc = self.safekeepers_guc.clone();
|
||||
let ttid = self.ttid.clone();
|
||||
client_node.launch(move |_| {
|
||||
let list = CString::new(guc).unwrap();
|
||||
|
||||
unsafe {
|
||||
WalProposerCleanup();
|
||||
|
||||
sim_redo_start_lsn = lsn.0;
|
||||
syncSafekeepers = false;
|
||||
wal_acceptors_list = list.into_raw();
|
||||
wal_acceptor_reconnect_timeout = 1000;
|
||||
wal_acceptor_connection_timeout = 5000;
|
||||
neon_tenant_walproposer =
|
||||
CString::new(ttid.tenant_id.to_string()).unwrap().into_raw();
|
||||
neon_timeline_walproposer = CString::new(ttid.timeline_id.to_string())
|
||||
.unwrap()
|
||||
.into_raw();
|
||||
WalProposerRust();
|
||||
}
|
||||
});
|
||||
|
||||
self.world.await_all();
|
||||
|
||||
WalProposer {
|
||||
node: client_node,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn poll_for_duration(&self, duration: u64) {
|
||||
let time_limit = std::cmp::min(self.world.now() + duration, self.timeout);
|
||||
while self.world.step() && self.world.now() < time_limit {}
|
||||
}
|
||||
|
||||
pub fn run_schedule(&self, schedule: &Schedule) -> anyhow::Result<()> {
|
||||
{
|
||||
let empty_event = Box::new(EmptyEvent);
|
||||
|
||||
let now = self.world.now();
|
||||
for (time, _) in schedule {
|
||||
if *time < now {
|
||||
continue;
|
||||
}
|
||||
self.world.schedule(*time - now, empty_event.clone())
|
||||
}
|
||||
}
|
||||
|
||||
let mut wait_node = self.launch_sync();
|
||||
// fake walproposer
|
||||
let mut wp = WalProposer {
|
||||
node: wait_node.clone(),
|
||||
};
|
||||
let mut sync_in_progress = true;
|
||||
|
||||
let mut skipped_tx = 0;
|
||||
let mut started_tx = 0;
|
||||
|
||||
let mut schedule_ptr = 0;
|
||||
|
||||
loop {
|
||||
if sync_in_progress && wait_node.is_finished() {
|
||||
let res = wait_node.result.lock().clone();
|
||||
if res.0 != 0 {
|
||||
warn!("sync non-zero exitcode: {:?}", res);
|
||||
debug!("restarting walproposer");
|
||||
wait_node = self.launch_sync();
|
||||
continue;
|
||||
}
|
||||
let lsn = Lsn::from_str(&res.1)?;
|
||||
debug!("sync-safekeepers finished at LSN {}", lsn);
|
||||
wp = self.launch_walproposer(lsn);
|
||||
wait_node = wp.node.clone();
|
||||
debug!("walproposer started at node {}", wait_node.id);
|
||||
sync_in_progress = false;
|
||||
}
|
||||
|
||||
let now = self.world.now();
|
||||
while schedule_ptr < schedule.len() && schedule[schedule_ptr].0 <= now {
|
||||
if now != schedule[schedule_ptr].0 {
|
||||
warn!("skipped event {:?} at {}", schedule[schedule_ptr], now);
|
||||
}
|
||||
|
||||
let action = &schedule[schedule_ptr].1;
|
||||
match action {
|
||||
TestAction::WriteTx(size) => {
|
||||
if !sync_in_progress && !wait_node.is_finished() {
|
||||
started_tx += *size;
|
||||
wp.write_tx(*size);
|
||||
debug!("written {} transactions", size);
|
||||
} else {
|
||||
skipped_tx += size;
|
||||
debug!("skipped {} transactions", size);
|
||||
}
|
||||
}
|
||||
TestAction::RestartSafekeeper(id) => {
|
||||
debug!("restarting safekeeper {}", id);
|
||||
self.servers[*id as usize].restart();
|
||||
}
|
||||
TestAction::RestartWalProposer => {
|
||||
debug!("restarting walproposer");
|
||||
wait_node.crash_stop();
|
||||
sync_in_progress = true;
|
||||
wait_node = self.launch_sync();
|
||||
}
|
||||
}
|
||||
schedule_ptr += 1;
|
||||
}
|
||||
|
||||
if schedule_ptr == schedule.len() {
|
||||
break;
|
||||
}
|
||||
let next_event_time = schedule[schedule_ptr].0;
|
||||
|
||||
// poll until the next event
|
||||
if wait_node.is_finished() {
|
||||
while self.world.step() && self.world.now() < next_event_time {}
|
||||
} else {
|
||||
while self.world.step()
|
||||
&& self.world.now() < next_event_time
|
||||
&& !wait_node.is_finished()
|
||||
{}
|
||||
}
|
||||
}
|
||||
|
||||
debug!("finished schedule");
|
||||
debug!("skipped_tx: {}", skipped_tx);
|
||||
debug!("started_tx: {}", started_tx);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub struct WalProposer {
|
||||
pub node: Arc<Node>,
|
||||
}
|
||||
|
||||
impl WalProposer {
|
||||
pub fn write_tx(&mut self, cnt: usize) {
|
||||
self.node
|
||||
.network_chan()
|
||||
.send(NodeEvent::Internal(AnyMessage::Just32(cnt as u32)));
|
||||
}
|
||||
|
||||
pub fn stop(&self) {
|
||||
self.node.crash_stop();
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum TestAction {
|
||||
WriteTx(usize),
|
||||
RestartSafekeeper(usize),
|
||||
RestartWalProposer,
|
||||
}
|
||||
|
||||
pub type Schedule = Vec<(u64, TestAction)>;
|
||||
|
||||
pub fn generate_schedule(seed: u64) -> Schedule {
|
||||
let mut rng = rand::rngs::StdRng::seed_from_u64(seed);
|
||||
let mut schedule = Vec::new();
|
||||
let mut time = 0;
|
||||
|
||||
let cnt = rng.gen_range(1..100);
|
||||
|
||||
for _ in 0..cnt {
|
||||
time += rng.gen_range(0..500);
|
||||
let action = match rng.gen_range(0..3) {
|
||||
0 => TestAction::WriteTx(rng.gen_range(1..10)),
|
||||
1 => TestAction::RestartSafekeeper(rng.gen_range(0..3)),
|
||||
2 => TestAction::RestartWalProposer,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
schedule.push((time, action));
|
||||
}
|
||||
|
||||
schedule
|
||||
}
|
||||
|
||||
pub fn generate_network_opts(seed: u64) -> NetworkOptions {
|
||||
let mut rng = rand::rngs::StdRng::seed_from_u64(seed);
|
||||
|
||||
let timeout = rng.gen_range(100..2000);
|
||||
let max_delay = rng.gen_range(1..2*timeout);
|
||||
let min_delay = rng.gen_range(1..=max_delay);
|
||||
|
||||
let max_fail_prob = rng.gen_range(0.0..0.9);
|
||||
let connect_fail_prob = rng.gen_range(0.0..max_fail_prob);
|
||||
let send_fail_prob = rng.gen_range(0.0..connect_fail_prob);
|
||||
|
||||
NetworkOptions {
|
||||
keepalive_timeout: Some(timeout),
|
||||
connect_delay: Delay {
|
||||
min: min_delay,
|
||||
max: max_delay,
|
||||
fail_prob: connect_fail_prob,
|
||||
},
|
||||
send_delay: Delay {
|
||||
min: min_delay,
|
||||
max: max_delay,
|
||||
fail_prob: send_fail_prob,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug,Clone,PartialEq,Eq)]
|
||||
enum NodeKind {
|
||||
Unknown,
|
||||
Safekeeper,
|
||||
WalProposer,
|
||||
}
|
||||
|
||||
impl Default for NodeKind {
|
||||
fn default() -> Self {
|
||||
Self::Unknown
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Default)]
|
||||
struct NodeInfo {
|
||||
kind: NodeKind,
|
||||
|
||||
// walproposer
|
||||
is_sync: bool,
|
||||
term: u64,
|
||||
epoch_lsn: u64,
|
||||
|
||||
// safekeeper
|
||||
commit_lsn: u64,
|
||||
flush_lsn: u64,
|
||||
}
|
||||
|
||||
impl NodeInfo {
|
||||
fn init_kind(&mut self, kind: NodeKind) {
|
||||
if self.kind == NodeKind::Unknown {
|
||||
self.kind = kind;
|
||||
} else {
|
||||
assert!(self.kind == kind);
|
||||
}
|
||||
}
|
||||
|
||||
fn started(&mut self, data: &str) {
|
||||
let mut parts = data.split(';');
|
||||
assert!(parts.next().unwrap() == "started");
|
||||
match parts.next().unwrap() {
|
||||
"safekeeper" => {
|
||||
self.init_kind(NodeKind::Safekeeper);
|
||||
}
|
||||
"walproposer" => {
|
||||
self.init_kind(NodeKind::WalProposer);
|
||||
let is_sync: u8 = parts.next().unwrap().parse().unwrap();
|
||||
self.is_sync = is_sync != 0;
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug,Default)]
|
||||
struct GlobalState {
|
||||
nodes: Vec<NodeInfo>,
|
||||
commit_lsn: u64,
|
||||
write_lsn: u64,
|
||||
max_write_lsn: u64,
|
||||
|
||||
written_wal: u64,
|
||||
written_records: u64,
|
||||
}
|
||||
|
||||
impl GlobalState {
|
||||
fn new() -> Self {
|
||||
Default::default()
|
||||
}
|
||||
|
||||
fn get(&mut self, id: u32) -> &mut NodeInfo {
|
||||
let id = id as usize;
|
||||
if id >= self.nodes.len() {
|
||||
self.nodes.resize(id + 1, NodeInfo::default());
|
||||
}
|
||||
&mut self.nodes[id]
|
||||
}
|
||||
}
|
||||
|
||||
pub fn validate_events(events: Vec<SEvent>) {
|
||||
const INITDB_LSN: u64 = 21623024;
|
||||
|
||||
let hook = std::panic::take_hook();
|
||||
scopeguard::defer_on_success! {
|
||||
std::panic::set_hook(hook);
|
||||
};
|
||||
|
||||
let mut state = GlobalState::new();
|
||||
state.max_write_lsn = INITDB_LSN;
|
||||
|
||||
for event in events {
|
||||
debug!("{:?}", event);
|
||||
|
||||
let node = state.get(event.node);
|
||||
if event.data.starts_with("started;") {
|
||||
node.started(&event.data);
|
||||
continue;
|
||||
}
|
||||
assert!(node.kind != NodeKind::Unknown);
|
||||
|
||||
// drop reference to unlock state
|
||||
let mut node = node.clone();
|
||||
|
||||
let mut parts = event.data.split(';');
|
||||
match node.kind {
|
||||
NodeKind::Safekeeper => {
|
||||
match parts.next().unwrap() {
|
||||
"tli_loaded" => {
|
||||
let flush_lsn: u64 = parts.next().unwrap().parse().unwrap();
|
||||
let commit_lsn: u64 = parts.next().unwrap().parse().unwrap();
|
||||
node.flush_lsn = flush_lsn;
|
||||
node.commit_lsn = commit_lsn;
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
NodeKind::WalProposer => {
|
||||
match parts.next().unwrap() {
|
||||
"prop_elected" => {
|
||||
let prop_lsn: u64 = parts.next().unwrap().parse().unwrap();
|
||||
let prop_term: u64 = parts.next().unwrap().parse().unwrap();
|
||||
let prev_lsn: u64 = parts.next().unwrap().parse().unwrap();
|
||||
let prev_term: u64 = parts.next().unwrap().parse().unwrap();
|
||||
|
||||
assert!(prop_lsn >= prev_lsn);
|
||||
assert!(prop_term >= prev_term);
|
||||
|
||||
assert!(prop_lsn >= state.commit_lsn);
|
||||
|
||||
if prop_lsn > state.write_lsn {
|
||||
assert!(prop_lsn <= state.max_write_lsn);
|
||||
debug!("moving write_lsn up from {} to {}", state.write_lsn, prop_lsn);
|
||||
state.write_lsn = prop_lsn;
|
||||
}
|
||||
if prop_lsn < state.write_lsn {
|
||||
debug!("moving write_lsn down from {} to {}", state.write_lsn, prop_lsn);
|
||||
state.write_lsn = prop_lsn;
|
||||
}
|
||||
|
||||
node.epoch_lsn = prop_lsn;
|
||||
node.term = prop_term;
|
||||
}
|
||||
"write_wal" => {
|
||||
assert!(!node.is_sync);
|
||||
let start_lsn: u64 = parts.next().unwrap().parse().unwrap();
|
||||
let end_lsn: u64 = parts.next().unwrap().parse().unwrap();
|
||||
let cnt: u64 = parts.next().unwrap().parse().unwrap();
|
||||
|
||||
let size = end_lsn - start_lsn;
|
||||
state.written_wal += size;
|
||||
state.written_records += cnt;
|
||||
|
||||
// TODO: If we allow writing WAL before winning the election
|
||||
|
||||
assert!(start_lsn >= state.commit_lsn);
|
||||
assert!(end_lsn >= start_lsn);
|
||||
assert!(start_lsn == state.write_lsn);
|
||||
state.write_lsn = end_lsn;
|
||||
|
||||
if end_lsn > state.max_write_lsn {
|
||||
state.max_write_lsn = end_lsn;
|
||||
}
|
||||
}
|
||||
"commit_lsn" => {
|
||||
let lsn: u64 = parts.next().unwrap().parse().unwrap();
|
||||
assert!(lsn >= state.commit_lsn);
|
||||
state.commit_lsn = lsn;
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
|
||||
// update the node in the state struct
|
||||
*state.get(event.node) = node;
|
||||
}
|
||||
}
|
||||
265
libs/walproposer/src/simtest/wp_sk.rs
Normal file
265
libs/walproposer/src/simtest/wp_sk.rs
Normal file
@@ -0,0 +1,265 @@
|
||||
use std::{ffi::CString, path::Path, str::FromStr, sync::Arc};
|
||||
|
||||
use rand::Rng;
|
||||
use safekeeper::simlib::{
|
||||
network::{Delay, NetworkOptions},
|
||||
proto::AnyMessage,
|
||||
world::World,
|
||||
world::{Node, NodeEvent},
|
||||
};
|
||||
use tracing::{info, warn};
|
||||
use utils::{id::TenantTimelineId, lsn::Lsn};
|
||||
|
||||
use crate::{
|
||||
bindings::{
|
||||
neon_tenant_walproposer, neon_timeline_walproposer, sim_redo_start_lsn, syncSafekeepers,
|
||||
wal_acceptor_connection_timeout, wal_acceptor_reconnect_timeout, wal_acceptors_list,
|
||||
MyInsertRecord, WalProposerCleanup, WalProposerRust,
|
||||
},
|
||||
c_context,
|
||||
simtest::{
|
||||
log::{init_logger, SimClock},
|
||||
safekeeper::run_server,
|
||||
util::{generate_schedule, TestConfig, generate_network_opts, validate_events},
|
||||
}, enable_debug,
|
||||
};
|
||||
|
||||
use super::{
|
||||
disk::Disk,
|
||||
util::{Schedule, TestAction},
|
||||
};
|
||||
|
||||
#[test]
|
||||
fn sync_empty_safekeepers() {
|
||||
let clock = init_logger();
|
||||
let mut config = TestConfig::new(Some(clock));
|
||||
let test = config.start(1337);
|
||||
|
||||
let lsn = test.sync_safekeepers().unwrap();
|
||||
assert_eq!(lsn, Lsn(0));
|
||||
info!("Sucessfully synced empty safekeepers at 0/0");
|
||||
|
||||
let lsn = test.sync_safekeepers().unwrap();
|
||||
assert_eq!(lsn, Lsn(0));
|
||||
info!("Sucessfully synced (again) empty safekeepers at 0/0");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn run_walproposer_generate_wal() {
|
||||
let clock = init_logger();
|
||||
let mut config = TestConfig::new(Some(clock));
|
||||
// config.network.timeout = Some(250);
|
||||
let test = config.start(1337);
|
||||
|
||||
let lsn = test.sync_safekeepers().unwrap();
|
||||
assert_eq!(lsn, Lsn(0));
|
||||
info!("Sucessfully synced empty safekeepers at 0/0");
|
||||
|
||||
let mut wp = test.launch_walproposer(lsn);
|
||||
|
||||
test.poll_for_duration(30);
|
||||
|
||||
for i in 0..100 {
|
||||
wp.write_tx(1);
|
||||
test.poll_for_duration(5);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn crash_safekeeper() {
|
||||
let clock = init_logger();
|
||||
let mut config = TestConfig::new(Some(clock));
|
||||
// config.network.timeout = Some(250);
|
||||
let test = config.start(1337);
|
||||
|
||||
let lsn = test.sync_safekeepers().unwrap();
|
||||
assert_eq!(lsn, Lsn(0));
|
||||
info!("Sucessfully synced empty safekeepers at 0/0");
|
||||
|
||||
let mut wp = test.launch_walproposer(lsn);
|
||||
|
||||
test.poll_for_duration(30);
|
||||
|
||||
wp.write_tx(3);
|
||||
|
||||
test.servers[0].restart();
|
||||
|
||||
test.poll_for_duration(100);
|
||||
test.poll_for_duration(1000);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_simple_restart() {
|
||||
let clock = init_logger();
|
||||
let mut config = TestConfig::new(Some(clock));
|
||||
// config.network.timeout = Some(250);
|
||||
let test = config.start(1337);
|
||||
|
||||
let lsn = test.sync_safekeepers().unwrap();
|
||||
assert_eq!(lsn, Lsn(0));
|
||||
info!("Sucessfully synced empty safekeepers at 0/0");
|
||||
|
||||
let mut wp = test.launch_walproposer(lsn);
|
||||
|
||||
test.poll_for_duration(30);
|
||||
|
||||
wp.write_tx(3);
|
||||
test.poll_for_duration(100);
|
||||
|
||||
wp.stop();
|
||||
drop(wp);
|
||||
|
||||
let lsn = test.sync_safekeepers().unwrap();
|
||||
info!("Sucessfully synced safekeepers at {}", lsn);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_simple_schedule() -> anyhow::Result<()> {
|
||||
let clock = init_logger();
|
||||
let mut config = TestConfig::new(Some(clock));
|
||||
config.network.keepalive_timeout = Some(100);
|
||||
let test = config.start(1337);
|
||||
|
||||
let schedule: Schedule = vec![
|
||||
(0, TestAction::RestartWalProposer),
|
||||
(50, TestAction::WriteTx(5)),
|
||||
(100, TestAction::RestartSafekeeper(0)),
|
||||
(100, TestAction::WriteTx(5)),
|
||||
(110, TestAction::RestartSafekeeper(1)),
|
||||
(110, TestAction::WriteTx(5)),
|
||||
(120, TestAction::RestartSafekeeper(2)),
|
||||
(120, TestAction::WriteTx(5)),
|
||||
(201, TestAction::RestartWalProposer),
|
||||
(251, TestAction::RestartSafekeeper(0)),
|
||||
(251, TestAction::RestartSafekeeper(1)),
|
||||
(251, TestAction::RestartSafekeeper(2)),
|
||||
(251, TestAction::WriteTx(5)),
|
||||
(255, TestAction::WriteTx(5)),
|
||||
(1000, TestAction::WriteTx(5)),
|
||||
];
|
||||
|
||||
test.run_schedule(&schedule)?;
|
||||
info!("Test finished, stopping all threads");
|
||||
test.world.deallocate();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_many_tx() -> anyhow::Result<()> {
|
||||
enable_debug();
|
||||
let clock = init_logger();
|
||||
let mut config = TestConfig::new(Some(clock));
|
||||
let test = config.start(1337);
|
||||
|
||||
let mut schedule: Schedule = vec![];
|
||||
for i in 0..100 {
|
||||
schedule.push((i * 10, TestAction::WriteTx(10)));
|
||||
}
|
||||
|
||||
test.run_schedule(&schedule)?;
|
||||
info!("Test finished, stopping all threads");
|
||||
test.world.stop_all();
|
||||
|
||||
let events = test.world.take_events();
|
||||
info!("Events: {:?}", events);
|
||||
let last_commit_lsn = events
|
||||
.iter()
|
||||
.filter_map(|event| {
|
||||
if event.data.starts_with("commit_lsn;") {
|
||||
let lsn: u64 = event.data.split(';').nth(1).unwrap().parse().unwrap();
|
||||
return Some(lsn);
|
||||
}
|
||||
None
|
||||
})
|
||||
.last()
|
||||
.unwrap();
|
||||
|
||||
let initdb_lsn = 21623024;
|
||||
let diff = last_commit_lsn - initdb_lsn;
|
||||
info!("Last commit lsn: {}, diff: {}", last_commit_lsn, diff);
|
||||
assert!(diff > 1000 * 8);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_random_schedules() -> anyhow::Result<()> {
|
||||
let clock = init_logger();
|
||||
let mut config = TestConfig::new(Some(clock));
|
||||
config.network.keepalive_timeout = Some(100);
|
||||
|
||||
for i in 0..30000 {
|
||||
let seed: u64 = rand::thread_rng().gen();
|
||||
config.network = generate_network_opts(seed);
|
||||
|
||||
let test = config.start(seed);
|
||||
warn!("Running test with seed {}", seed);
|
||||
|
||||
let schedule = generate_schedule(seed);
|
||||
test.run_schedule(&schedule).unwrap();
|
||||
validate_events(test.world.take_events());
|
||||
test.world.deallocate();
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_one_schedule() -> anyhow::Result<()> {
|
||||
enable_debug();
|
||||
let clock = init_logger();
|
||||
let mut config = TestConfig::new(Some(clock));
|
||||
config.network.keepalive_timeout = Some(100);
|
||||
|
||||
// let seed = 6762900106769428342;
|
||||
// let test = config.start(seed);
|
||||
// warn!("Running test with seed {}", seed);
|
||||
|
||||
// let schedule = generate_schedule(seed);
|
||||
// info!("schedule: {:?}", schedule);
|
||||
// test.run_schedule(&schedule)?;
|
||||
// test.world.deallocate();
|
||||
|
||||
let seed = 3649773280641776194;
|
||||
config.network = generate_network_opts(seed);
|
||||
info!("network: {:?}", config.network);
|
||||
let test = config.start(seed);
|
||||
warn!("Running test with seed {}", seed);
|
||||
|
||||
let schedule = generate_schedule(seed);
|
||||
info!("schedule: {:?}", schedule);
|
||||
test.run_schedule(&schedule).unwrap();
|
||||
validate_events(test.world.take_events());
|
||||
test.world.deallocate();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_res_dealloc() -> anyhow::Result<()> {
|
||||
// enable_debug();
|
||||
let clock = init_logger();
|
||||
let mut config = TestConfig::new(Some(clock));
|
||||
|
||||
// print pid
|
||||
let pid = unsafe { libc::getpid() };
|
||||
info!("pid: {}", pid);
|
||||
|
||||
let seed = 123456;
|
||||
config.network = generate_network_opts(seed);
|
||||
let test = config.start(seed);
|
||||
warn!("Running test with seed {}", seed);
|
||||
|
||||
let schedule = generate_schedule(seed);
|
||||
info!("schedule: {:?}", schedule);
|
||||
test.run_schedule(&schedule).unwrap();
|
||||
test.world.stop_all();
|
||||
|
||||
let world = test.world.clone();
|
||||
drop(test);
|
||||
info!("world strong count: {}", Arc::strong_count(&world));
|
||||
world.deallocate();
|
||||
info!("world strong count: {}", Arc::strong_count(&world));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
31
libs/walproposer/src/test.rs
Normal file
31
libs/walproposer/src/test.rs
Normal file
@@ -0,0 +1,31 @@
|
||||
use tracing::info;
|
||||
|
||||
use crate::bindings::{TestFunc, MyContextInit};
|
||||
|
||||
#[test]
|
||||
fn test_rust_c_calls() {
|
||||
let res = std::thread::spawn(|| {
|
||||
let res = unsafe {
|
||||
MyContextInit();
|
||||
TestFunc(1, 2)
|
||||
};
|
||||
res
|
||||
}).join().unwrap();
|
||||
info!("res: {}", res);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_sim_bindings() {
|
||||
std::thread::spawn(|| {
|
||||
unsafe {
|
||||
MyContextInit();
|
||||
TestFunc(1, 2)
|
||||
}
|
||||
}).join().unwrap();
|
||||
std::thread::spawn(|| {
|
||||
unsafe {
|
||||
MyContextInit();
|
||||
TestFunc(1, 2)
|
||||
}
|
||||
}).join().unwrap();
|
||||
}
|
||||
100
libs/walproposer/test.c
Normal file
100
libs/walproposer/test.c
Normal file
@@ -0,0 +1,100 @@
|
||||
#include "bindgen_deps.h"
|
||||
#include "rust_bindings.h"
|
||||
#include <stdio.h>
|
||||
#include <pthread.h>
|
||||
#include <stdlib.h>
|
||||
#include "postgres.h"
|
||||
#include "utils/memutils.h"
|
||||
#include "utils/guc.h"
|
||||
#include "miscadmin.h"
|
||||
#include "common/pg_prng.h"
|
||||
|
||||
// From src/backend/main/main.c
|
||||
const char *progname = "fakepostgres";
|
||||
|
||||
int TestFunc(int a, int b) {
|
||||
printf("TestFunc: %d + %d = %d\n", a, b, a + b);
|
||||
rust_function(0);
|
||||
elog(LOG, "postgres elog test");
|
||||
printf("After rust_function\n");
|
||||
return a + b;
|
||||
}
|
||||
|
||||
// This is a quick experiment with rewriting existing Rust code in C.
|
||||
void RunClientC(uint32_t serverId) {
|
||||
uint32_t clientId = sim_id();
|
||||
|
||||
elog(LOG, "started client");
|
||||
|
||||
int data_len = 5;
|
||||
|
||||
int delivered = 0;
|
||||
int tcp = sim_open_tcp(serverId);
|
||||
while (delivered < data_len) {
|
||||
sim_msg_set_repl_cell(delivered+1, clientId, delivered);
|
||||
sim_tcp_send(tcp);
|
||||
|
||||
Event event = sim_epoll_rcv(-1);
|
||||
switch (event.tag)
|
||||
{
|
||||
case Closed:
|
||||
elog(LOG, "connection closed");
|
||||
tcp = sim_open_tcp(serverId);
|
||||
break;
|
||||
|
||||
case Message:
|
||||
Assert(event.any_message == Just32);
|
||||
uint32_t val;
|
||||
sim_msg_get_just_u32(&val);
|
||||
if (val == delivered + 1) {
|
||||
delivered += 1;
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
Assert(false);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool debug_enabled = false;
|
||||
|
||||
bool initializedMemoryContext = false;
|
||||
// pthread_mutex_init(&lock, NULL)?
|
||||
pthread_mutex_t lock;
|
||||
|
||||
void MyContextInit() {
|
||||
// initializes global variables, TODO how to make them thread-local?
|
||||
pthread_mutex_lock(&lock);
|
||||
if (!initializedMemoryContext) {
|
||||
initializedMemoryContext = true;
|
||||
MemoryContextInit();
|
||||
pg_prng_seed(&pg_global_prng_state, 0);
|
||||
|
||||
setenv("PGDATA", "/home/admin/simulator/libs/walproposer/pgdata", 1);
|
||||
|
||||
/*
|
||||
* Set default values for command-line options.
|
||||
*/
|
||||
InitializeGUCOptions();
|
||||
|
||||
/* Acquire configuration parameters */
|
||||
if (!SelectConfigFiles(NULL, progname))
|
||||
exit(1);
|
||||
|
||||
if (debug_enabled) {
|
||||
log_min_messages = LOG;
|
||||
} else {
|
||||
log_min_messages = FATAL;
|
||||
}
|
||||
Log_line_prefix = "[%p] ";
|
||||
|
||||
InitializeMaxBackends();
|
||||
ChangeToDataDir();
|
||||
CreateSharedMemoryAndSemaphores();
|
||||
SetInstallXLogFileSegmentActive();
|
||||
// CreateAuxProcessResourceOwner();
|
||||
// StartupXLOG();
|
||||
}
|
||||
pthread_mutex_unlock(&lock);
|
||||
}
|
||||
@@ -23,6 +23,7 @@ const_format.workspace = true
|
||||
consumption_metrics.workspace = true
|
||||
crc32c.workspace = true
|
||||
crossbeam-utils.workspace = true
|
||||
either.workspace = true
|
||||
fail.workspace = true
|
||||
futures.workspace = true
|
||||
git-version.workspace = true
|
||||
@@ -51,7 +52,7 @@ thiserror.workspace = true
|
||||
tokio = { workspace = true, features = ["process", "sync", "fs", "rt", "io-util", "time"] }
|
||||
tokio-postgres.workspace = true
|
||||
tokio-util.workspace = true
|
||||
toml_edit.workspace = true
|
||||
toml_edit = { workspace = true, features = [ "serde" ] }
|
||||
tracing.workspace = true
|
||||
url.workspace = true
|
||||
walkdir.workspace = true
|
||||
@@ -67,6 +68,10 @@ utils.workspace = true
|
||||
workspace_hack.workspace = true
|
||||
reqwest.workspace = true
|
||||
rpds.workspace = true
|
||||
enum-map.workspace = true
|
||||
enumset.workspace = true
|
||||
strum.workspace = true
|
||||
strum_macros.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
criterion.workspace = true
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
use pageserver::keyspace::{KeyPartitioning, KeySpace};
|
||||
use pageserver::repository::Key;
|
||||
use pageserver::tenant::layer_map::LayerMap;
|
||||
use pageserver::tenant::storage_layer::Layer;
|
||||
use pageserver::tenant::storage_layer::{DeltaFileName, ImageFileName, LayerDescriptor};
|
||||
use pageserver::tenant::storage_layer::{Layer, LayerDescriptor, LayerFileName};
|
||||
use rand::prelude::{SeedableRng, SliceRandom, StdRng};
|
||||
use std::cmp::{max, min};
|
||||
use std::fs::File;
|
||||
@@ -26,30 +25,15 @@ fn build_layer_map(filename_dump: PathBuf) -> LayerMap<LayerDescriptor> {
|
||||
|
||||
let mut updates = layer_map.batch_update();
|
||||
for fname in filenames {
|
||||
let fname = &fname.unwrap();
|
||||
if let Some(imgfilename) = ImageFileName::parse_str(fname) {
|
||||
let layer = LayerDescriptor {
|
||||
key: imgfilename.key_range,
|
||||
lsn: imgfilename.lsn..(imgfilename.lsn + 1),
|
||||
is_incremental: false,
|
||||
short_id: fname.to_string(),
|
||||
};
|
||||
updates.insert_historic(Arc::new(layer));
|
||||
min_lsn = min(min_lsn, imgfilename.lsn);
|
||||
max_lsn = max(max_lsn, imgfilename.lsn);
|
||||
} else if let Some(deltafilename) = DeltaFileName::parse_str(fname) {
|
||||
let layer = LayerDescriptor {
|
||||
key: deltafilename.key_range.clone(),
|
||||
lsn: deltafilename.lsn_range.clone(),
|
||||
is_incremental: true,
|
||||
short_id: fname.to_string(),
|
||||
};
|
||||
updates.insert_historic(Arc::new(layer));
|
||||
min_lsn = min(min_lsn, deltafilename.lsn_range.start);
|
||||
max_lsn = max(max_lsn, deltafilename.lsn_range.end);
|
||||
} else {
|
||||
panic!("unexpected filename {fname}");
|
||||
}
|
||||
let fname = fname.unwrap();
|
||||
let fname = LayerFileName::from_str(&fname).unwrap();
|
||||
let layer = LayerDescriptor::from(fname);
|
||||
|
||||
let lsn_range = layer.get_lsn_range();
|
||||
min_lsn = min(min_lsn, lsn_range.start);
|
||||
max_lsn = max(max_lsn, Lsn(lsn_range.end.0 - 1));
|
||||
|
||||
updates.insert_historic(Arc::new(layer));
|
||||
}
|
||||
|
||||
println!("min: {min_lsn}, max: {max_lsn}");
|
||||
|
||||
@@ -27,11 +27,13 @@ use tracing::*;
|
||||
///
|
||||
use tokio_tar::{Builder, EntryType, Header};
|
||||
|
||||
use crate::context::RequestContext;
|
||||
use crate::tenant::Timeline;
|
||||
use pageserver_api::reltag::{RelTag, SlruKind};
|
||||
|
||||
use postgres_ffi::pg_constants::{DEFAULTTABLESPACE_OID, GLOBALTABLESPACE_OID};
|
||||
use postgres_ffi::pg_constants::{PGDATA_SPECIAL_FILES, PGDATA_SUBDIRS, PG_HBA};
|
||||
use postgres_ffi::relfile_utils::{INIT_FORKNUM, MAIN_FORKNUM};
|
||||
use postgres_ffi::TransactionId;
|
||||
use postgres_ffi::XLogFileName;
|
||||
use postgres_ffi::PG_TLI;
|
||||
@@ -52,6 +54,7 @@ pub async fn send_basebackup_tarball<'a, W>(
|
||||
req_lsn: Option<Lsn>,
|
||||
prev_lsn: Option<Lsn>,
|
||||
full_backup: bool,
|
||||
ctx: &'a RequestContext,
|
||||
) -> anyhow::Result<()>
|
||||
where
|
||||
W: AsyncWrite + Send + Sync + Unpin,
|
||||
@@ -110,6 +113,7 @@ where
|
||||
lsn: backup_lsn,
|
||||
prev_record_lsn: prev_lsn,
|
||||
full_backup,
|
||||
ctx,
|
||||
};
|
||||
basebackup
|
||||
.send_tarball()
|
||||
@@ -129,6 +133,7 @@ where
|
||||
lsn: Lsn,
|
||||
prev_record_lsn: Lsn,
|
||||
full_backup: bool,
|
||||
ctx: &'a RequestContext,
|
||||
}
|
||||
|
||||
impl<'a, W> Basebackup<'a, W>
|
||||
@@ -171,23 +176,54 @@ where
|
||||
SlruKind::MultiXactOffsets,
|
||||
SlruKind::MultiXactMembers,
|
||||
] {
|
||||
for segno in self.timeline.list_slru_segments(kind, self.lsn).await? {
|
||||
for segno in self
|
||||
.timeline
|
||||
.list_slru_segments(kind, self.lsn, self.ctx)
|
||||
.await?
|
||||
{
|
||||
self.add_slru_segment(kind, segno).await?;
|
||||
}
|
||||
}
|
||||
|
||||
// Create tablespace directories
|
||||
for ((spcnode, dbnode), has_relmap_file) in self.timeline.list_dbdirs(self.lsn).await? {
|
||||
for ((spcnode, dbnode), has_relmap_file) in
|
||||
self.timeline.list_dbdirs(self.lsn, self.ctx).await?
|
||||
{
|
||||
self.add_dbdir(spcnode, dbnode, has_relmap_file).await?;
|
||||
|
||||
// Gather and send relational files in each database if full backup is requested.
|
||||
if self.full_backup {
|
||||
for rel in self.timeline.list_rels(spcnode, dbnode, self.lsn).await? {
|
||||
self.add_rel(rel).await?;
|
||||
// If full backup is requested, include all relation files.
|
||||
// Otherwise only include init forks of unlogged relations.
|
||||
let rels = self
|
||||
.timeline
|
||||
.list_rels(spcnode, dbnode, self.lsn, self.ctx)
|
||||
.await?;
|
||||
for &rel in rels.iter() {
|
||||
// Send init fork as main fork to provide well formed empty
|
||||
// contents of UNLOGGED relations. Postgres copies it in
|
||||
// `reinit.c` during recovery.
|
||||
if rel.forknum == INIT_FORKNUM {
|
||||
// I doubt we need _init fork itself, but having it at least
|
||||
// serves as a marker relation is unlogged.
|
||||
self.add_rel(rel, rel).await?;
|
||||
self.add_rel(rel, rel.with_forknum(MAIN_FORKNUM)).await?;
|
||||
continue;
|
||||
}
|
||||
|
||||
if self.full_backup {
|
||||
if rel.forknum == MAIN_FORKNUM && rels.contains(&rel.with_forknum(INIT_FORKNUM))
|
||||
{
|
||||
// skip this, will include it when we reach the init fork
|
||||
continue;
|
||||
}
|
||||
self.add_rel(rel, rel).await?;
|
||||
}
|
||||
}
|
||||
}
|
||||
for xid in self.timeline.list_twophase_files(self.lsn).await? {
|
||||
for xid in self
|
||||
.timeline
|
||||
.list_twophase_files(self.lsn, self.ctx)
|
||||
.await?
|
||||
{
|
||||
self.add_twophase_file(xid).await?;
|
||||
}
|
||||
|
||||
@@ -202,12 +238,16 @@ where
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn add_rel(&mut self, tag: RelTag) -> anyhow::Result<()> {
|
||||
let nblocks = self.timeline.get_rel_size(tag, self.lsn, false).await?;
|
||||
/// Add contents of relfilenode `src`, naming it as `dst`.
|
||||
async fn add_rel(&mut self, src: RelTag, dst: RelTag) -> anyhow::Result<()> {
|
||||
let nblocks = self
|
||||
.timeline
|
||||
.get_rel_size(src, self.lsn, false, self.ctx)
|
||||
.await?;
|
||||
|
||||
// If the relation is empty, create an empty file
|
||||
if nblocks == 0 {
|
||||
let file_name = tag.to_segfile_name(0);
|
||||
let file_name = dst.to_segfile_name(0);
|
||||
let header = new_tar_header(&file_name, 0)?;
|
||||
self.ar.append(&header, &mut io::empty()).await?;
|
||||
return Ok(());
|
||||
@@ -223,12 +263,12 @@ where
|
||||
for blknum in startblk..endblk {
|
||||
let img = self
|
||||
.timeline
|
||||
.get_rel_page_at_lsn(tag, blknum, self.lsn, false)
|
||||
.get_rel_page_at_lsn(src, blknum, self.lsn, false, self.ctx)
|
||||
.await?;
|
||||
segment_data.extend_from_slice(&img[..]);
|
||||
}
|
||||
|
||||
let file_name = tag.to_segfile_name(seg as u32);
|
||||
let file_name = dst.to_segfile_name(seg as u32);
|
||||
let header = new_tar_header(&file_name, segment_data.len() as u64)?;
|
||||
self.ar.append(&header, segment_data.as_slice()).await?;
|
||||
|
||||
@@ -245,14 +285,14 @@ where
|
||||
async fn add_slru_segment(&mut self, slru: SlruKind, segno: u32) -> anyhow::Result<()> {
|
||||
let nblocks = self
|
||||
.timeline
|
||||
.get_slru_segment_size(slru, segno, self.lsn)
|
||||
.get_slru_segment_size(slru, segno, self.lsn, self.ctx)
|
||||
.await?;
|
||||
|
||||
let mut slru_buf: Vec<u8> = Vec::with_capacity(nblocks as usize * BLCKSZ as usize);
|
||||
for blknum in 0..nblocks {
|
||||
let img = self
|
||||
.timeline
|
||||
.get_slru_page_at_lsn(slru, segno, blknum, self.lsn)
|
||||
.get_slru_page_at_lsn(slru, segno, blknum, self.lsn, self.ctx)
|
||||
.await?;
|
||||
|
||||
if slru == SlruKind::Clog {
|
||||
@@ -287,7 +327,7 @@ where
|
||||
let relmap_img = if has_relmap_file {
|
||||
let img = self
|
||||
.timeline
|
||||
.get_relmap_file(spcnode, dbnode, self.lsn)
|
||||
.get_relmap_file(spcnode, dbnode, self.lsn, self.ctx)
|
||||
.await?;
|
||||
ensure!(img.len() == 512);
|
||||
Some(img)
|
||||
@@ -323,7 +363,7 @@ where
|
||||
if !has_relmap_file
|
||||
&& self
|
||||
.timeline
|
||||
.list_rels(spcnode, dbnode, self.lsn)
|
||||
.list_rels(spcnode, dbnode, self.lsn, self.ctx)
|
||||
.await?
|
||||
.is_empty()
|
||||
{
|
||||
@@ -356,7 +396,10 @@ where
|
||||
// Extract twophase state files
|
||||
//
|
||||
async fn add_twophase_file(&mut self, xid: TransactionId) -> anyhow::Result<()> {
|
||||
let img = self.timeline.get_twophase_file(xid, self.lsn).await?;
|
||||
let img = self
|
||||
.timeline
|
||||
.get_twophase_file(xid, self.lsn, self.ctx)
|
||||
.await?;
|
||||
|
||||
let mut buf = BytesMut::new();
|
||||
buf.extend_from_slice(&img[..]);
|
||||
@@ -394,12 +437,12 @@ where
|
||||
|
||||
let checkpoint_bytes = self
|
||||
.timeline
|
||||
.get_checkpoint(self.lsn)
|
||||
.get_checkpoint(self.lsn, self.ctx)
|
||||
.await
|
||||
.context("failed to get checkpoint bytes")?;
|
||||
let pg_control_bytes = self
|
||||
.timeline
|
||||
.get_control_file(self.lsn)
|
||||
.get_control_file(self.lsn, self.ctx)
|
||||
.await
|
||||
.context("failed get control bytes")?;
|
||||
|
||||
|
||||
230
pageserver/src/bin/layer_map_analyzer.rs
Normal file
230
pageserver/src/bin/layer_map_analyzer.rs
Normal file
@@ -0,0 +1,230 @@
|
||||
//! Tool for extracting content-dependent metadata about layers. Useful for scanning real project layer files and evaluating the effectiveness of different heuristics on them.
|
||||
//!
|
||||
//! Currently it only analyzes holes, which are regions within the layer range that the layer contains no updates for. In the future it might do more analysis (maybe key quantiles?) but it should never return sensitive data.
|
||||
|
||||
use anyhow::Result;
|
||||
use std::cmp::Ordering;
|
||||
use std::collections::BinaryHeap;
|
||||
use std::ops::Range;
|
||||
use std::{env, fs, path::Path, path::PathBuf, str, str::FromStr};
|
||||
|
||||
use pageserver::page_cache::PAGE_SZ;
|
||||
use pageserver::repository::{Key, KEY_SIZE};
|
||||
use pageserver::tenant::block_io::{BlockReader, FileBlockReader};
|
||||
use pageserver::tenant::disk_btree::{DiskBtreeReader, VisitDirection};
|
||||
use pageserver::tenant::storage_layer::delta_layer::{Summary, DELTA_KEY_SIZE};
|
||||
use pageserver::tenant::storage_layer::range_overlaps;
|
||||
use pageserver::virtual_file::VirtualFile;
|
||||
|
||||
use utils::{bin_ser::BeSer, lsn::Lsn};
|
||||
|
||||
const MIN_HOLE_LENGTH: i128 = (128 * 1024 * 1024 / PAGE_SZ) as i128;
|
||||
const DEFAULT_MAX_HOLES: usize = 10;
|
||||
|
||||
/// Wrapper for key range to provide reverse ordering by range length for BinaryHeap
|
||||
#[derive(PartialEq, Eq)]
|
||||
struct Hole(Range<Key>);
|
||||
|
||||
impl Ord for Hole {
|
||||
fn cmp(&self, other: &Self) -> Ordering {
|
||||
let other_len = other.0.end.to_i128() - other.0.start.to_i128();
|
||||
let self_len = self.0.end.to_i128() - self.0.start.to_i128();
|
||||
other_len.cmp(&self_len)
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialOrd for Hole {
|
||||
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
|
||||
Some(self.cmp(other))
|
||||
}
|
||||
}
|
||||
|
||||
struct LayerFile {
|
||||
key_range: Range<Key>,
|
||||
lsn_range: Range<Lsn>,
|
||||
is_delta: bool,
|
||||
holes: Vec<Hole>,
|
||||
}
|
||||
|
||||
impl LayerFile {
|
||||
fn skips(&self, key_range: &Range<Key>) -> bool {
|
||||
if !range_overlaps(&self.key_range, key_range) {
|
||||
return false;
|
||||
}
|
||||
let start = match self
|
||||
.holes
|
||||
.binary_search_by_key(&key_range.start, |hole| hole.0.start)
|
||||
{
|
||||
Ok(index) => index,
|
||||
Err(index) => {
|
||||
if index == 0 {
|
||||
return false;
|
||||
}
|
||||
index - 1
|
||||
}
|
||||
};
|
||||
self.holes[start].0.end >= key_range.end
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_filename(name: &str) -> Option<LayerFile> {
|
||||
let split: Vec<&str> = name.split("__").collect();
|
||||
if split.len() != 2 {
|
||||
return None;
|
||||
}
|
||||
let keys: Vec<&str> = split[0].split('-').collect();
|
||||
let mut lsns: Vec<&str> = split[1].split('-').collect();
|
||||
let is_delta = if lsns.len() == 1 {
|
||||
lsns.push(lsns[0]);
|
||||
false
|
||||
} else {
|
||||
true
|
||||
};
|
||||
|
||||
let key_range = Key::from_hex(keys[0]).unwrap()..Key::from_hex(keys[1]).unwrap();
|
||||
let lsn_range = Lsn::from_hex(lsns[0]).unwrap()..Lsn::from_hex(lsns[1]).unwrap();
|
||||
let holes = Vec::new();
|
||||
Some(LayerFile {
|
||||
key_range,
|
||||
lsn_range,
|
||||
is_delta,
|
||||
holes,
|
||||
})
|
||||
}
|
||||
|
||||
// Finds the max_holes largest holes, ignoring any that are smaller than MIN_HOLE_LENGTH"
|
||||
fn get_holes(path: &Path, max_holes: usize) -> Result<Vec<Hole>> {
|
||||
let file = FileBlockReader::new(VirtualFile::open(path)?);
|
||||
let summary_blk = file.read_blk(0)?;
|
||||
let actual_summary = Summary::des_prefix(summary_blk.as_ref())?;
|
||||
let tree_reader = DiskBtreeReader::<_, DELTA_KEY_SIZE>::new(
|
||||
actual_summary.index_start_blk,
|
||||
actual_summary.index_root_blk,
|
||||
file,
|
||||
);
|
||||
// min-heap (reserve space for one more element added before eviction)
|
||||
let mut heap: BinaryHeap<Hole> = BinaryHeap::with_capacity(max_holes + 1);
|
||||
let mut prev_key: Option<Key> = None;
|
||||
tree_reader.visit(
|
||||
&[0u8; DELTA_KEY_SIZE],
|
||||
VisitDirection::Forwards,
|
||||
|key, _value| {
|
||||
let curr = Key::from_slice(&key[..KEY_SIZE]);
|
||||
if let Some(prev) = prev_key {
|
||||
if curr.to_i128() - prev.to_i128() >= MIN_HOLE_LENGTH {
|
||||
heap.push(Hole(prev..curr));
|
||||
if heap.len() > max_holes {
|
||||
heap.pop(); // remove smallest hole
|
||||
}
|
||||
}
|
||||
}
|
||||
prev_key = Some(curr.next());
|
||||
true
|
||||
},
|
||||
)?;
|
||||
let mut holes = heap.into_vec();
|
||||
holes.sort_by_key(|hole| hole.0.start);
|
||||
Ok(holes)
|
||||
}
|
||||
|
||||
fn main() -> Result<()> {
|
||||
let args: Vec<String> = env::args().collect();
|
||||
if args.len() < 2 {
|
||||
println!("Usage: layer_map_analyzer PAGESERVER_DATA_DIR [MAX_HOLES]");
|
||||
return Ok(());
|
||||
}
|
||||
let storage_path = PathBuf::from_str(&args[1])?;
|
||||
let max_holes = if args.len() > 2 {
|
||||
args[2].parse::<usize>().unwrap()
|
||||
} else {
|
||||
DEFAULT_MAX_HOLES
|
||||
};
|
||||
|
||||
// Initialize virtual_file (file desriptor cache) and page cache which are needed to access layer persistent B-Tree.
|
||||
pageserver::virtual_file::init(10);
|
||||
pageserver::page_cache::init(100);
|
||||
|
||||
let mut total_delta_layers = 0usize;
|
||||
let mut total_image_layers = 0usize;
|
||||
let mut total_excess_layers = 0usize;
|
||||
for tenant in fs::read_dir(storage_path.join("tenants"))? {
|
||||
let tenant = tenant?;
|
||||
if !tenant.file_type()?.is_dir() {
|
||||
continue;
|
||||
}
|
||||
for timeline in fs::read_dir(tenant.path().join("timelines"))? {
|
||||
let timeline = timeline?;
|
||||
if !timeline.file_type()?.is_dir() {
|
||||
continue;
|
||||
}
|
||||
// Collect sorted vec of layers and count deltas
|
||||
let mut layers = Vec::new();
|
||||
let mut n_deltas = 0usize;
|
||||
|
||||
for layer in fs::read_dir(timeline.path())? {
|
||||
let layer = layer?;
|
||||
if let Some(mut layer_file) =
|
||||
parse_filename(&layer.file_name().into_string().unwrap())
|
||||
{
|
||||
if layer_file.is_delta {
|
||||
layer_file.holes = get_holes(&layer.path(), max_holes)?;
|
||||
n_deltas += 1;
|
||||
}
|
||||
layers.push(layer_file);
|
||||
}
|
||||
}
|
||||
layers.sort_by_key(|layer| layer.lsn_range.end);
|
||||
|
||||
// Count the number of holes and number of excess layers.
|
||||
// Excess layer is image layer generated when holes in delta layers are not considered.
|
||||
let mut n_excess_layers = 0usize;
|
||||
let mut n_holes = 0usize;
|
||||
|
||||
for i in 0..layers.len() {
|
||||
if !layers[i].is_delta {
|
||||
let mut n_deltas_since_last_image = 0usize;
|
||||
let mut n_skipped = 0usize;
|
||||
let img_key_range = &layers[i].key_range;
|
||||
for j in (0..i).rev() {
|
||||
if range_overlaps(img_key_range, &layers[j].key_range) {
|
||||
if layers[j].is_delta {
|
||||
n_deltas_since_last_image += 1;
|
||||
if layers[j].skips(img_key_range) {
|
||||
n_skipped += 1;
|
||||
}
|
||||
} else {
|
||||
// Image layer is always dense, despite to the fact that it doesn't contain all possible
|
||||
// key values in the specified range: there are may be no keys in the storage belonging
|
||||
// to the image layer range but not present in the image layer.
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
if n_deltas_since_last_image >= 3 && n_deltas_since_last_image - n_skipped < 3 {
|
||||
// It is just approximation: it doesn't take in account all image coverage.
|
||||
// Moreover the new layer map doesn't count total deltas, but the max stack of overlapping deltas.
|
||||
n_excess_layers += 1;
|
||||
}
|
||||
n_holes += n_skipped;
|
||||
}
|
||||
}
|
||||
println!(
|
||||
"Tenant {} timeline {} delta layers {} image layers {} excess layers {} holes {}",
|
||||
tenant.file_name().into_string().unwrap(),
|
||||
timeline.file_name().into_string().unwrap(),
|
||||
n_deltas,
|
||||
layers.len() - n_deltas,
|
||||
n_excess_layers,
|
||||
n_holes
|
||||
);
|
||||
total_delta_layers += n_deltas;
|
||||
total_image_layers += layers.len() - n_deltas;
|
||||
total_excess_layers += n_excess_layers;
|
||||
}
|
||||
}
|
||||
println!(
|
||||
"Total delta layers {} image layers {} excess layers {}",
|
||||
total_delta_layers, total_image_layers, total_excess_layers
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
@@ -7,12 +7,14 @@ use std::{env, ops::ControlFlow, path::Path, str::FromStr};
|
||||
use anyhow::{anyhow, Context};
|
||||
use clap::{Arg, ArgAction, Command};
|
||||
use fail::FailScenario;
|
||||
use metrics::launch_timestamp::{set_launch_timestamp_metric, LaunchTimestamp};
|
||||
use remote_storage::GenericRemoteStorage;
|
||||
use tracing::*;
|
||||
|
||||
use metrics::set_build_info_metric;
|
||||
use pageserver::{
|
||||
config::{defaults::*, PageServerConf},
|
||||
context::{DownloadBehavior, RequestContext},
|
||||
http, page_cache, page_service, task_mgr,
|
||||
task_mgr::TaskKind,
|
||||
task_mgr::{
|
||||
@@ -26,7 +28,7 @@ use utils::{
|
||||
logging,
|
||||
postgres_backend::AuthType,
|
||||
project_git_version,
|
||||
sentry_init::{init_sentry, release_name},
|
||||
sentry_init::init_sentry,
|
||||
signals::{self, Signal},
|
||||
tcp_listener,
|
||||
};
|
||||
@@ -51,6 +53,8 @@ fn version() -> String {
|
||||
}
|
||||
|
||||
fn main() -> anyhow::Result<()> {
|
||||
let launch_ts = Box::leak(Box::new(LaunchTimestamp::generate()));
|
||||
|
||||
let arg_matches = cli().get_matches();
|
||||
|
||||
if arg_matches.get_flag("enabled-features") {
|
||||
@@ -84,8 +88,18 @@ fn main() -> anyhow::Result<()> {
|
||||
}
|
||||
};
|
||||
|
||||
// Initialize logging, which must be initialized before the custom panic hook is installed.
|
||||
logging::init(conf.log_format)?;
|
||||
|
||||
// mind the order required here: 1. logging, 2. panic_hook, 3. sentry.
|
||||
// disarming this hook on pageserver, because we never tear down tracing.
|
||||
logging::replace_panic_hook_with_tracing_panic_hook().forget();
|
||||
|
||||
// initialize sentry if SENTRY_DSN is provided
|
||||
let _sentry_guard = init_sentry(release_name!(), &[("node_id", &conf.id.to_string())]);
|
||||
let _sentry_guard = init_sentry(
|
||||
Some(GIT_VERSION.into()),
|
||||
&[("node_id", &conf.id.to_string())],
|
||||
);
|
||||
|
||||
let tenants_path = conf.tenants_path();
|
||||
if !tenants_path.exists() {
|
||||
@@ -104,7 +118,7 @@ fn main() -> anyhow::Result<()> {
|
||||
virtual_file::init(conf.max_file_descriptors);
|
||||
page_cache::init(conf.page_cache_size);
|
||||
|
||||
start_pageserver(conf).context("Failed to start pageserver")?;
|
||||
start_pageserver(launch_ts, conf).context("Failed to start pageserver")?;
|
||||
|
||||
scenario.teardown();
|
||||
Ok(())
|
||||
@@ -199,13 +213,21 @@ fn initialize_config(
|
||||
})
|
||||
}
|
||||
|
||||
fn start_pageserver(conf: &'static PageServerConf) -> anyhow::Result<()> {
|
||||
// Initialize logging
|
||||
logging::init(conf.log_format)?;
|
||||
|
||||
// Print version to the log, and expose it as a prometheus metric too.
|
||||
info!("version: {}", version());
|
||||
fn start_pageserver(
|
||||
launch_ts: &'static LaunchTimestamp,
|
||||
conf: &'static PageServerConf,
|
||||
) -> anyhow::Result<()> {
|
||||
// Print version and launch timestamp to the log,
|
||||
// and expose them as prometheus metrics.
|
||||
// A changed version string indicates changed software.
|
||||
// A changed launch timestamp indicates a pageserver restart.
|
||||
info!(
|
||||
"version: {} launch_timestamp: {}",
|
||||
version(),
|
||||
launch_ts.to_string()
|
||||
);
|
||||
set_build_info_metric(GIT_VERSION);
|
||||
set_launch_timestamp_metric(launch_ts);
|
||||
|
||||
// If any failpoints were set from FAILPOINTS environment variable,
|
||||
// print them to the log for debugging purposes
|
||||
@@ -246,7 +268,7 @@ fn start_pageserver(conf: &'static PageServerConf) -> anyhow::Result<()> {
|
||||
let signals = signals::install_shutdown_handlers()?;
|
||||
|
||||
// Launch broker client
|
||||
WALRECEIVER_RUNTIME.block_on(pageserver::walreceiver::init_broker_client(conf))?;
|
||||
WALRECEIVER_RUNTIME.block_on(pageserver::broker_client::init_broker_client(conf))?;
|
||||
|
||||
// Initialize authentication for incoming connections
|
||||
let auth = match &conf.auth_type {
|
||||
@@ -303,7 +325,7 @@ fn start_pageserver(conf: &'static PageServerConf) -> anyhow::Result<()> {
|
||||
{
|
||||
let _rt_guard = MGMT_REQUEST_RUNTIME.enter();
|
||||
|
||||
let router = http::make_router(conf, auth.clone(), remote_storage)?
|
||||
let router = http::make_router(conf, launch_ts, auth.clone(), remote_storage)?
|
||||
.build()
|
||||
.map_err(|err| anyhow!(err))?;
|
||||
let service = utils::http::RouterService::new(router).unwrap();
|
||||
@@ -325,6 +347,13 @@ fn start_pageserver(conf: &'static PageServerConf) -> anyhow::Result<()> {
|
||||
);
|
||||
|
||||
if let Some(metric_collection_endpoint) = &conf.metric_collection_endpoint {
|
||||
let metrics_ctx = RequestContext::todo_child(
|
||||
TaskKind::MetricsCollection,
|
||||
// This task itself shouldn't download anything.
|
||||
// The actual size calculation does need downloads, and
|
||||
// creates a child context with the right DownloadBehavior.
|
||||
DownloadBehavior::Error,
|
||||
);
|
||||
task_mgr::spawn(
|
||||
MGMT_REQUEST_RUNTIME.handle(),
|
||||
TaskKind::MetricsCollection,
|
||||
@@ -336,8 +365,10 @@ fn start_pageserver(conf: &'static PageServerConf) -> anyhow::Result<()> {
|
||||
pageserver::consumption_metrics::collect_metrics(
|
||||
metric_collection_endpoint,
|
||||
conf.metric_collection_interval,
|
||||
conf.cached_metric_collection_interval,
|
||||
conf.synthetic_size_calculation_interval,
|
||||
conf.id,
|
||||
metrics_ctx,
|
||||
)
|
||||
.instrument(info_span!("metrics_collection"))
|
||||
.await?;
|
||||
@@ -349,17 +380,34 @@ fn start_pageserver(conf: &'static PageServerConf) -> anyhow::Result<()> {
|
||||
|
||||
// Spawn a task to listen for libpq connections. It will spawn further tasks
|
||||
// for each connection. We created the listener earlier already.
|
||||
task_mgr::spawn(
|
||||
COMPUTE_REQUEST_RUNTIME.handle(),
|
||||
TaskKind::LibpqEndpointListener,
|
||||
None,
|
||||
None,
|
||||
"libpq endpoint listener",
|
||||
true,
|
||||
async move {
|
||||
page_service::libpq_listener_main(conf, auth, pageserver_listener, conf.auth_type).await
|
||||
},
|
||||
);
|
||||
{
|
||||
let libpq_ctx = RequestContext::todo_child(
|
||||
TaskKind::LibpqEndpointListener,
|
||||
// listener task shouldn't need to download anything. (We will
|
||||
// create a separate sub-contexts for each connection, with their
|
||||
// own download behavior. This context is used only to listen and
|
||||
// accept connections.)
|
||||
DownloadBehavior::Error,
|
||||
);
|
||||
task_mgr::spawn(
|
||||
COMPUTE_REQUEST_RUNTIME.handle(),
|
||||
TaskKind::LibpqEndpointListener,
|
||||
None,
|
||||
None,
|
||||
"libpq endpoint listener",
|
||||
true,
|
||||
async move {
|
||||
page_service::libpq_listener_main(
|
||||
conf,
|
||||
auth,
|
||||
pageserver_listener,
|
||||
conf.auth_type,
|
||||
libpq_ctx,
|
||||
)
|
||||
.await
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
// All started up! Now just sit and wait for shutdown signal.
|
||||
signals.handle(|signal| match signal {
|
||||
|
||||
@@ -12,7 +12,9 @@ use anyhow::Context;
|
||||
use clap::{value_parser, Arg, Command};
|
||||
|
||||
use pageserver::{
|
||||
context::{DownloadBehavior, RequestContext},
|
||||
page_cache,
|
||||
task_mgr::TaskKind,
|
||||
tenant::{dump_layerfile_from_path, metadata::TimelineMetadata},
|
||||
virtual_file,
|
||||
};
|
||||
@@ -75,7 +77,8 @@ fn print_layerfile(path: &Path) -> anyhow::Result<()> {
|
||||
// Basic initialization of things that don't change after startup
|
||||
virtual_file::init(10);
|
||||
page_cache::init(100);
|
||||
dump_layerfile_from_path(path, true)
|
||||
let ctx = RequestContext::new(TaskKind::DebugTool, DownloadBehavior::Error);
|
||||
dump_layerfile_from_path(path, true, &ctx)
|
||||
}
|
||||
|
||||
fn handle_metadata(path: &Path, arg_matches: &clap::ArgMatches) -> Result<(), anyhow::Error> {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user