Compare commits

..

1 Commits

Author SHA1 Message Date
Bojan Serafimov
781897b983 Don't run perf tests in parallel 2022-10-11 10:05:25 -04:00
120 changed files with 2796 additions and 10474 deletions

View File

@@ -10,7 +10,7 @@
<!-- List everything that should be done **before** release, any issues / setting changes / etc -->
### Checklist after release
- [ ] Based on the merged commits write release notes and open a PR into `website` repo ([example](https://github.com/neondatabase/website/pull/219/files))
- [ ] Based on the merged commits write release notes and open a PR into `website` repo ([example](https://github.com/neondatabase/website/pull/120/files))
- [ ] Check [#dev-production-stream](https://neondb.slack.com/archives/C03F5SM1N02) Slack channel
- [ ] Check [stuck projects page](https://console.neon.tech/admin/projects?sort=last_active&order=desc&stuck=true)
- [ ] Check [recent operation failures](https://console.neon.tech/admin/operations?action=create_timeline%2Cstart_compute%2Cstop_compute%2Csuspend_compute%2Capply_config%2Cdelete_timeline%2Cdelete_tenant%2Ccreate_branch%2Ccheck_availability&sort=updated_at&order=desc&had_retries=some)

View File

@@ -47,7 +47,7 @@ runs:
else
key=branch-$(echo ${GITHUB_REF#refs/heads/} | tr -c "[:alnum:]._-" "-")
fi
echo "KEY=${key}" >> $GITHUB_OUTPUT
echo "::set-output name=KEY::${key}"
- uses: actions/setup-java@v3
if: ${{ inputs.action == 'generate' }}
@@ -186,7 +186,7 @@ runs:
aws s3 cp --only-show-errors ./index.html "s3://${BUCKET}/${REPORT_PREFIX}/latest/index.html"
echo "[Allure Report](${REPORT_URL})" >> ${GITHUB_STEP_SUMMARY}
echo "report-url=${REPORT_URL}" >> $GITHUB_OUTPUT
echo "::set-output name=report-url::${REPORT_URL}"
- name: Release Allure lock
if: ${{ inputs.action == 'generate' && always() }}

View File

@@ -34,7 +34,7 @@ runs:
S3_KEY=$(aws s3api list-objects-v2 --bucket ${BUCKET} --prefix ${PREFIX%$GITHUB_RUN_ATTEMPT} | jq -r '.Contents[].Key' | grep ${FILENAME} | sort --version-sort | tail -1 || true)
if [ -z "${S3_KEY}" ]; then
if [ "${SKIP_IF_DOES_NOT_EXIST}" = "true" ]; then
echo 'SKIPPED=true' >> $GITHUB_OUTPUT
echo '::set-output name=SKIPPED::true'
exit 0
else
echo 2>&1 "Neither s3://${BUCKET}/${PREFIX}/${FILENAME} nor its version from previous attempts exist"
@@ -42,7 +42,7 @@ runs:
fi
fi
echo 'SKIPPED=false' >> $GITHUB_OUTPUT
echo '::set-output name=SKIPPED::false'
mkdir -p $(dirname $ARCHIVE)
time aws s3 cp --only-show-errors s3://${BUCKET}/${S3_KEY} ${ARCHIVE}

View File

@@ -41,8 +41,8 @@ runs:
;;
esac
echo "api_host=${API_HOST}" >> $GITHUB_OUTPUT
echo "region_id=${REGION_ID}" >> $GITHUB_OUTPUT
echo "::set-output name=api_host::${API_HOST}"
echo "::set-output name=region_id::${REGION_ID}"
env:
ENVIRONMENT: ${{ inputs.environment }}
REGION_ID: ${{ inputs.region_id }}
@@ -72,10 +72,10 @@ runs:
dsn=$(echo $project | jq --raw-output '.roles[] | select(.name != "web_access") | .dsn')/main
echo "::add-mask::${dsn}"
echo "dsn=${dsn}" >> $GITHUB_OUTPUT
echo "::set-output name=dsn::${dsn}"
project_id=$(echo $project | jq --raw-output '.id')
echo "project_id=${project_id}" >> $GITHUB_OUTPUT
echo "::set-output name=project_id::${project_id}"
env:
API_KEY: ${{ inputs.api_key }}
API_HOST: ${{ steps.parse-input.outputs.api_host }}

View File

@@ -32,7 +32,7 @@ runs:
;;
esac
echo "api_host=${API_HOST}" >> $GITHUB_OUTPUT
echo "::set-output name=api_host::${API_HOST}"
env:
ENVIRONMENT: ${{ inputs.environment }}

View File

@@ -2,6 +2,3 @@ zenith_install.tar.gz
.zenith_current_version
neon_install.tar.gz
.neon_current_version
collections/*
!collections/.keep

View File

@@ -3,7 +3,6 @@
localhost_warning = False
host_key_checking = False
timeout = 30
collections_paths = ./collections
[ssh_connection]
ssh_args = -F ./ansible.ssh.cfg

View File

View File

@@ -1,7 +1,7 @@
- name: Upload Neon binaries
hosts: storage
gather_facts: False
remote_user: "{{ remote_user }}"
remote_user: admin
tasks:
@@ -14,8 +14,7 @@
- safekeeper
- name: inform about versions
debug:
msg: "Version to deploy - {{ current_version }}"
debug: msg="Version to deploy - {{ current_version }}"
tags:
- pageserver
- safekeeper
@@ -36,7 +35,7 @@
- name: Deploy pageserver
hosts: pageservers
gather_facts: False
remote_user: "{{ remote_user }}"
remote_user: admin
tasks:
@@ -64,29 +63,15 @@
tags:
- pageserver
- name: read the existing remote pageserver config
ansible.builtin.slurp:
src: /storage/pageserver/data/pageserver.toml
register: _remote_ps_config
tags:
- pageserver
- name: parse the existing pageserver configuration
ansible.builtin.set_fact:
_existing_ps_config: "{{ _remote_ps_config['content'] | b64decode | sivel.toiletwater.from_toml }}"
tags:
- pageserver
- name: construct the final pageserver configuration dict
ansible.builtin.set_fact:
pageserver_config: "{{ pageserver_config_stub | combine({'id': _existing_ps_config.id }) }}"
tags:
- pageserver
- name: template the pageserver config
template:
src: templates/pageserver.toml.j2
dest: /storage/pageserver/data/pageserver.toml
- name: update remote storage (s3) config
lineinfile:
path: /storage/pageserver/data/pageserver.toml
line: "{{ item }}"
loop:
- "[remote_storage]"
- "bucket_name = '{{ bucket_name }}'"
- "bucket_region = '{{ bucket_region }}'"
- "prefix_in_bucket = '{{ inventory_hostname }}'"
become: true
tags:
- pageserver
@@ -124,7 +109,7 @@
- name: Deploy safekeeper
hosts: safekeepers
gather_facts: False
remote_user: "{{ remote_user }}"
remote_user: admin
tasks:

20
.github/ansible/neon-stress.hosts vendored Normal file
View File

@@ -0,0 +1,20 @@
[pageservers]
neon-stress-ps-1 console_region_id=1
neon-stress-ps-2 console_region_id=1
[safekeepers]
neon-stress-sk-1 console_region_id=1
neon-stress-sk-2 console_region_id=1
neon-stress-sk-3 console_region_id=1
[storage:children]
pageservers
safekeepers
[storage:vars]
env_name = neon-stress
console_mgmt_base_url = http://neon-stress-console.local
bucket_name = neon-storage-ireland
bucket_region = eu-west-1
etcd_endpoints = neon-stress-etcd.local:2379
safekeeper_enable_s3_offload = false

View File

@@ -1,31 +0,0 @@
storage:
vars:
bucket_name: neon-storage-ireland
bucket_region: eu-west-1
console_mgmt_base_url: http://neon-stress-console.local
env_name: neon-stress
etcd_endpoints: neon-stress-etcd.local:2379
safekeeper_enable_s3_offload: 'false'
pageserver_config_stub:
pg_distrib_dir: /usr/local
remote_storage:
bucket_name: "{{ bucket_name }}"
bucket_region: "{{ bucket_region }}"
prefix_in_bucket: "{{ inventory_hostname }}"
hostname_suffix: ".local"
remote_user: admin
children:
pageservers:
hosts:
neon-stress-ps-1:
console_region_id: aws-eu-west-1
neon-stress-ps-2:
console_region_id: aws-eu-west-1
safekeepers:
hosts:
neon-stress-sk-1:
console_region_id: aws-eu-west-1
neon-stress-sk-2:
console_region_id: aws-eu-west-1
neon-stress-sk-3:
console_region_id: aws-eu-west-1

20
.github/ansible/production.hosts vendored Normal file
View File

@@ -0,0 +1,20 @@
[pageservers]
#zenith-1-ps-1 console_region_id=1
zenith-1-ps-2 console_region_id=1
zenith-1-ps-3 console_region_id=1
[safekeepers]
zenith-1-sk-1 console_region_id=1
zenith-1-sk-2 console_region_id=1
zenith-1-sk-3 console_region_id=1
[storage:children]
pageservers
safekeepers
[storage:vars]
env_name = prod-1
console_mgmt_base_url = http://console-release.local
bucket_name = zenith-storage-oregon
bucket_region = us-west-2
etcd_endpoints = zenith-1-etcd.local:2379

View File

@@ -1,33 +0,0 @@
---
storage:
vars:
env_name: prod-1
console_mgmt_base_url: http://console-release.local
bucket_name: zenith-storage-oregon
bucket_region: us-west-2
etcd_endpoints: zenith-1-etcd.local:2379
pageserver_config_stub:
pg_distrib_dir: /usr/local
remote_storage:
bucket_name: "{{ bucket_name }}"
bucket_region: "{{ bucket_region }}"
prefix_in_bucket: "{{ inventory_hostname }}"
hostname_suffix: ".local"
remote_user: admin
children:
pageservers:
hosts:
zenith-1-ps-2:
console_region_id: aws-us-west-2
zenith-1-ps-3:
console_region_id: aws-us-west-2
safekeepers:
hosts:
zenith-1-sk-1:
console_region_id: aws-us-west-2
zenith-1-sk-2:
console_region_id: aws-us-west-2
zenith-1-sk-3:
console_region_id: aws-us-west-2

View File

@@ -12,19 +12,18 @@ cat <<EOF | tee /tmp/payload
"version": 1,
"host": "${HOST}",
"port": 6400,
"region_id": "{{ console_region_id }}",
"region_id": {{ console_region_id }},
"instance_id": "${INSTANCE_ID}",
"http_host": "${HOST}",
"http_port": 9898,
"active": false
"http_port": 9898
}
EOF
# check if pageserver already registered or not
if ! curl -sf -H "Authorization: Bearer {{ CONSOLE_API_TOKEN }}" {{ console_mgmt_base_url }}/management/api/v2/pageservers/${INSTANCE_ID} -o /dev/null; then
if ! curl -sf -X PATCH -d '{}' {{ console_mgmt_base_url }}/api/v1/pageservers/${INSTANCE_ID} -o /dev/null; then
# not registered, so register it now
ID=$(curl -sf -X POST -H "Authorization: Bearer {{ CONSOLE_API_TOKEN }}" {{ console_mgmt_base_url }}/management/api/v2/pageservers -d@/tmp/payload | jq -r '.id')
ID=$(curl -sf -X POST {{ console_mgmt_base_url }}/api/v1/pageservers -d@/tmp/payload | jq -r '.ID')
# init pageserver
sudo -u pageserver /usr/local/bin/pageserver -c "id=${ID}" -c "pg_distrib_dir='/usr/local'" --init -D /storage/pageserver/data

View File

@@ -14,18 +14,18 @@ cat <<EOF | tee /tmp/payload
"host": "${HOST}",
"port": 6500,
"http_port": 7676,
"region_id": "{{ console_region_id }}",
"region_id": {{ console_region_id }},
"instance_id": "${INSTANCE_ID}",
"availability_zone_id": "${AZ_ID}",
"active": false
"availability_zone_id": "${AZ_ID}"
}
EOF
# check if safekeeper already registered or not
if ! curl -sf -H "Authorization: Bearer {{ CONSOLE_API_TOKEN }}" {{ console_mgmt_base_url }}/management/api/v2/safekeepers/${INSTANCE_ID} -o /dev/null; then
if ! curl -sf -X PATCH -d '{}' {{ console_mgmt_base_url }}/api/v1/safekeepers/${INSTANCE_ID} -o /dev/null; then
# not registered, so register it now
ID=$(curl -sf -X POST -H "Authorization: Bearer {{ CONSOLE_API_TOKEN }}" {{ console_mgmt_base_url }}/management/api/v2/safekeepers -d@/tmp/payload | jq -r '.id')
ID=$(curl -sf -X POST {{ console_mgmt_base_url }}/api/v1/safekeepers -d@/tmp/payload | jq -r '.ID')
# init safekeeper
sudo -u safekeeper /usr/local/bin/safekeeper --id ${ID} --init -D /storage/safekeeper/data
fi

View File

@@ -1,3 +0,0 @@
ansible_connection: aws_ssm
ansible_aws_ssm_bucket_name: neon-dev-bucket
ansible_python_interpreter: /usr/bin/python3

25
.github/ansible/staging.hosts vendored Normal file
View File

@@ -0,0 +1,25 @@
[pageservers]
#zenith-us-stage-ps-1 console_region_id=27
zenith-us-stage-ps-2 console_region_id=27
zenith-us-stage-ps-3 console_region_id=27
zenith-us-stage-ps-4 console_region_id=27
zenith-us-stage-test-ps-1 console_region_id=28
[safekeepers]
zenith-us-stage-sk-4 console_region_id=27
zenith-us-stage-sk-5 console_region_id=27
zenith-us-stage-sk-6 console_region_id=27
zenith-us-stage-test-sk-1 console_region_id=28
zenith-us-stage-test-sk-2 console_region_id=28
zenith-us-stage-test-sk-3 console_region_id=28
[storage:children]
pageservers
safekeepers
[storage:vars]
env_name = us-stage
console_mgmt_base_url = http://console-staging.local
bucket_name = zenith-staging-storage-us-east-1
bucket_region = us-east-1
etcd_endpoints = zenith-us-stage-etcd.local:2379

View File

@@ -1,34 +0,0 @@
storage:
vars:
bucket_name: zenith-staging-storage-us-east-1
bucket_region: us-east-1
console_mgmt_base_url: http://console-staging.local
env_name: us-stage
etcd_endpoints: zenith-us-stage-etcd.local:2379
pageserver_config_stub:
pg_distrib_dir: /usr/local
remote_storage:
bucket_name: "{{ bucket_name }}"
bucket_region: "{{ bucket_region }}"
prefix_in_bucket: "{{ inventory_hostname }}"
hostname_suffix: ".local"
remote_user: admin
children:
pageservers:
hosts:
zenith-us-stage-ps-2:
console_region_id: aws-us-east-1
zenith-us-stage-ps-3:
console_region_id: aws-us-east-1
zenith-us-stage-ps-4:
console_region_id: aws-us-east-1
safekeepers:
hosts:
zenith-us-stage-sk-4:
console_region_id: aws-us-east-1
zenith-us-stage-sk-5:
console_region_id: aws-us-east-1
zenith-us-stage-sk-6:
console_region_id: aws-us-east-1

View File

@@ -1,32 +0,0 @@
storage:
vars:
bucket_name: neon-staging-storage-us-east-2
bucket_region: us-east-2
console_mgmt_base_url: http://console-staging.local
env_name: us-stage
etcd_endpoints: etcd-0.us-east-2.aws.neon.build:2379
pageserver_config_stub:
pg_distrib_dir: /usr/local
remote_storage:
bucket_name: "{{ bucket_name }}"
bucket_region: "{{ bucket_region }}"
prefix_in_bucket: "pageserver/v1"
hostname_suffix: ""
remote_user: ssm-user
ansible_aws_ssm_region: us-east-2
console_region_id: aws-us-east-2
children:
pageservers:
hosts:
pageserver-0.us-east-2.aws.neon.build:
ansible_host: i-0c3e70929edb5d691
safekeepers:
hosts:
safekeeper-0.us-east-2.aws.neon.build:
ansible_host: i-027662bd552bf5db0
safekeeper-1.us-east-2.aws.neon.build:
ansible_host: i-0171efc3604a7b907
safekeeper-2.us-east-2.aws.neon.build:
ansible_host: i-0de0b03a51676a6ce

View File

@@ -1,5 +1,5 @@
[Unit]
Description=Neon pageserver
Description=Zenith pageserver
After=network.target auditd.service
[Service]

View File

@@ -1,12 +1,12 @@
[Unit]
Description=Neon safekeeper
Description=Zenith safekeeper
After=network.target auditd.service
[Service]
Type=simple
User=safekeeper
Environment=RUST_BACKTRACE=1 NEON_REPO_DIR=/storage/safekeeper/data LD_LIBRARY_PATH=/usr/local/v14/lib
ExecStart=/usr/local/bin/safekeeper -l {{ inventory_hostname }}{{ hostname_suffix }}:6500 --listen-http {{ inventory_hostname }}{{ hostname_suffix }}:7676 -D /storage/safekeeper/data --broker-endpoints={{ etcd_endpoints }} --remote-storage='{bucket_name="{{bucket_name}}", bucket_region="{{bucket_region}}", prefix_in_bucket="{{ env_name }}/wal"}'
ExecStart=/usr/local/bin/safekeeper -l {{ inventory_hostname }}.local:6500 --listen-http {{ inventory_hostname }}.local:7676 -D /storage/safekeeper/data --broker-endpoints={{ etcd_endpoints }} --remote-storage='{bucket_name="{{bucket_name}}", bucket_region="{{bucket_region}}", prefix_in_bucket="{{ env_name }}/wal"}'
ExecReload=/bin/kill -HUP $MAINPID
KillMode=mixed
KillSignal=SIGINT

View File

@@ -1 +0,0 @@
{{ pageserver_config | sivel.toiletwater.to_toml }}

View File

@@ -138,31 +138,23 @@ jobs:
SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }}
pgbench-compare:
strategy:
fail-fast: false
matrix:
# neon-captest-new: Run pgbench in a freshly created project
# neon-captest-reuse: Same, but reusing existing project
# neon-captest-prefetch: Same, with prefetching enabled (new project)
platform: [ neon-captest-new, neon-captest-reuse, neon-captest-prefetch ]
db_size: [ 10gb ]
include:
- platform: neon-captest-new
db_size: 50gb
- platform: neon-captest-prefetch
db_size: 50gb
- platform: rds-aurora
db_size: 50gb
env:
TEST_PG_BENCH_DURATIONS_MATRIX: "60m"
TEST_PG_BENCH_SCALES_MATRIX: ${{ matrix.db_size }}
TEST_PG_BENCH_SCALES_MATRIX: "10gb"
POSTGRES_DISTRIB_DIR: /tmp/neon/pg_install
DEFAULT_PG_VERSION: 14
TEST_OUTPUT: /tmp/test_output
BUILD_TYPE: remote
SAVE_PERF_REPORT: ${{ github.event.inputs.save_perf_report || ( github.ref == 'refs/heads/main' ) }}
PLATFORM: ${{ matrix.platform }}
strategy:
fail-fast: false
max-parallel: 1
matrix:
# neon-captest-new: Run pgbench in a freshly created project
# neon-captest-reuse: Same, but reusing existing project
# neon-captest-prefetch: Same, with prefetching enabled (new project)
platform: [ neon-captest-new, neon-captest-reuse, neon-captest-prefetch, rds-aurora ]
runs-on: dev
container:
@@ -187,7 +179,7 @@ jobs:
echo "${POSTGRES_DISTRIB_DIR}/v${DEFAULT_PG_VERSION}/bin" >> $GITHUB_PATH
- name: Create Neon Project
if: contains(fromJson('["neon-captest-new", "neon-captest-prefetch"]'), matrix.platform)
if: matrix.platform != 'neon-captest-reuse'
id: create-neon-project
uses: ./.github/actions/neon-project-create
with:
@@ -213,9 +205,11 @@ jobs:
;;
esac
echo "connstr=${CONNSTR}" >> $GITHUB_OUTPUT
echo "::set-output name=connstr::${CONNSTR}"
psql ${CONNSTR} -c "SELECT version();"
env:
PLATFORM: ${{ matrix.platform }}
- name: Set database options
if: matrix.platform == 'neon-captest-prefetch'
@@ -234,6 +228,7 @@ jobs:
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
extra_params: -m remote_cluster --timeout 21600 -k test_pgbench_remote_init
env:
PLATFORM: ${{ matrix.platform }}
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
@@ -247,6 +242,7 @@ jobs:
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
extra_params: -m remote_cluster --timeout 21600 -k test_pgbench_remote_simple_update
env:
PLATFORM: ${{ matrix.platform }}
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
@@ -260,6 +256,7 @@ jobs:
save_perf_report: ${{ env.SAVE_PERF_REPORT }}
extra_params: -m remote_cluster --timeout 21600 -k test_pgbench_remote_select_only
env:
PLATFORM: ${{ matrix.platform }}
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
@@ -272,7 +269,7 @@ jobs:
build_type: ${{ env.BUILD_TYPE }}
- name: Delete Neon Project
if: ${{ steps.create-neon-project.outputs.project_id && always() }}
if: ${{ matrix.platform != 'neon-captest-reuse' && always() }}
uses: ./.github/actions/neon-project-delete
with:
environment: dev

View File

@@ -35,12 +35,12 @@ jobs:
echo ref:$GITHUB_REF_NAME
echo rev:$(git rev-list --count HEAD)
if [[ "$GITHUB_REF_NAME" == "main" ]]; then
echo "tag=$(git rev-list --count HEAD)" >> $GITHUB_OUTPUT
echo "::set-output name=tag::$(git rev-list --count HEAD)"
elif [[ "$GITHUB_REF_NAME" == "release" ]]; then
echo "tag=release-$(git rev-list --count HEAD)" >> $GITHUB_OUTPUT
echo "::set-output name=tag::release-$(git rev-list --count HEAD)"
else
echo "GITHUB_REF_NAME (value '$GITHUB_REF_NAME') is not set to either 'main' or 'release'"
echo "tag=$GITHUB_RUN_ID" >> $GITHUB_OUTPUT
echo "::set-output name=tag::$GITHUB_RUN_ID"
fi
shell: bash
id: build-tag
@@ -78,12 +78,12 @@ jobs:
- name: Set pg 14 revision for caching
id: pg_v14_rev
run: echo pg_rev=$(git rev-parse HEAD:vendor/postgres-v14) >> $GITHUB_OUTPUT
run: echo ::set-output name=pg_rev::$(git rev-parse HEAD:vendor/postgres-v14)
shell: bash -euxo pipefail {0}
- name: Set pg 15 revision for caching
id: pg_v15_rev
run: echo pg_rev=$(git rev-parse HEAD:vendor/postgres-v15) >> $GITHUB_OUTPUT
run: echo ::set-output name=pg_rev::$(git rev-parse HEAD:vendor/postgres-v15)
shell: bash -euxo pipefail {0}
# Set some environment variables used by all the steps.
@@ -622,8 +622,6 @@ jobs:
crane copy 369495373322.dkr.ecr.eu-central-1.amazonaws.com/neon:$GITHUB_RUN_ID 093970136003.dkr.ecr.us-east-2.amazonaws.com/neon:latest
crane copy 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-tools:$GITHUB_RUN_ID 093970136003.dkr.ecr.us-east-2.amazonaws.com/compute-tools:latest
crane copy 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-node:$GITHUB_RUN_ID 093970136003.dkr.ecr.us-east-2.amazonaws.com/compute-node:latest
crane copy 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-node-v14:$GITHUB_RUN_ID 093970136003.dkr.ecr.us-east-2.amazonaws.com/compute-node-v14:latest
crane copy 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-node-v15:$GITHUB_RUN_ID 093970136003.dkr.ecr.us-east-2.amazonaws.com/compute-node-v15:latest
- name: Configure Docker Hub login
run: |
@@ -671,12 +669,12 @@ jobs:
- id: set-matrix
run: |
if [[ "$GITHUB_REF_NAME" == "main" ]]; then
STAGING='{"env_name": "staging", "proxy_job": "neon-proxy", "proxy_config": "staging.proxy", "kubeconfig_secret": "STAGING_KUBECONFIG_DATA", "console_api_key_secret": "NEON_STAGING_API_KEY"}'
NEON_STRESS='{"env_name": "neon-stress", "proxy_job": "neon-stress-proxy", "proxy_config": "neon-stress.proxy", "kubeconfig_secret": "NEON_STRESS_KUBECONFIG_DATA", "console_api_key_secret": "NEON_CAPTEST_API_KEY"}'
echo "include=[$STAGING, $NEON_STRESS]" >> $GITHUB_OUTPUT
STAGING='{"env_name": "staging", "proxy_job": "neon-proxy", "proxy_config": "staging.proxy", "kubeconfig_secret": "STAGING_KUBECONFIG_DATA"}'
NEON_STRESS='{"env_name": "neon-stress", "proxy_job": "neon-stress-proxy", "proxy_config": "neon-stress.proxy", "kubeconfig_secret": "NEON_STRESS_KUBECONFIG_DATA"}'
echo "::set-output name=include::[$STAGING, $NEON_STRESS]"
elif [[ "$GITHUB_REF_NAME" == "release" ]]; then
PRODUCTION='{"env_name": "production", "proxy_job": "neon-proxy", "proxy_config": "production.proxy", "kubeconfig_secret": "PRODUCTION_KUBECONFIG_DATA", "console_api_key_secret": "NEON_PRODUCTION_API_KEY"}'
echo "include=[$PRODUCTION]" >> $GITHUB_OUTPUT
PRODUCTION='{"env_name": "production", "proxy_job": "neon-proxy", "proxy_config": "production.proxy", "kubeconfig_secret": "PRODUCTION_KUBECONFIG_DATA"}'
echo "::set-output name=include::[$PRODUCTION]"
else
echo "GITHUB_REF_NAME (value '$GITHUB_REF_NAME') is not set to either 'main' or 'release'"
exit 1
@@ -712,7 +710,7 @@ jobs:
- name: Setup ansible
run: |
export PATH="/root/.local/bin:$PATH"
pip install --progress-bar off --user ansible boto3 toml
pip install --progress-bar off --user ansible boto3
- name: Redeploy
run: |
@@ -734,48 +732,8 @@ jobs:
chmod 0600 ssh-key
ssh-add ssh-key
rm -f ssh-key ssh-key-cert.pub
ansible-galaxy collection install sivel.toiletwater
ansible-playbook deploy.yaml -i ${{ matrix.env_name }}.hosts.yaml -e CONSOLE_API_TOKEN=${{ secrets[matrix.console_api_key_secret] }}
rm -f neon_install.tar.gz .neon_current_version
deploy-new:
runs-on: dev
container: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/ansible:pinned
# We need both storage **and** compute images for deploy, because control plane picks the compute version based on the storage version.
# If it notices a fresh storage it may bump the compute version. And if compute image failed to build it may break things badly
needs: [ push-docker-hub, calculate-deploy-targets, tag, regress-tests ]
if: |
(github.ref_name == 'main') &&
github.event_name != 'workflow_dispatch'
defaults:
run:
shell: bash
env:
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_DEV }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_KEY_DEV }}
steps:
- name: Checkout
uses: actions/checkout@v3
with:
submodules: true
fetch-depth: 0
- name: Redeploy
run: |
export DOCKER_TAG=${{needs.tag.outputs.build-tag}}
cd "$(pwd)/.github/ansible"
if [[ "$GITHUB_REF_NAME" == "main" ]]; then
./get_binaries.sh
elif [[ "$GITHUB_REF_NAME" == "release" ]]; then
RELEASE=true ./get_binaries.sh
else
echo "GITHUB_REF_NAME (value '$GITHUB_REF_NAME') is not set to either 'main' or 'release'"
exit 1
fi
ansible-galaxy collection install sivel.toiletwater
ansible-playbook deploy.yaml -i staging.us-east-2.hosts.yaml -e @ssm_config -e CONSOLE_API_TOKEN=${{secrets.NEON_STAGING_API_KEY}}
ansible-playbook deploy.yaml -i ${{ matrix.env_name }}.hosts
rm -f neon_install.tar.gz .neon_current_version
deploy-proxy:

View File

@@ -36,7 +36,7 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@v3
uses: actions/checkout@v2
with:
submodules: true
fetch-depth: 2
@@ -56,12 +56,12 @@ jobs:
- name: Set pg 14 revision for caching
id: pg_v14_rev
run: echo pg_rev=$(git rev-parse HEAD:vendor/postgres-v14) >> $GITHUB_OUTPUT
run: echo ::set-output name=pg_rev::$(git rev-parse HEAD:vendor/postgres-v14)
shell: bash -euxo pipefail {0}
- name: Set pg 15 revision for caching
id: pg_v15_rev
run: echo pg_rev=$(git rev-parse HEAD:vendor/postgres-v15) >> $GITHUB_OUTPUT
run: echo ::set-output name=pg_rev::$(git rev-parse HEAD:vendor/postgres-v15)
shell: bash -euxo pipefail {0}
- name: Cache postgres v14 build

867
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,14 +1,3 @@
# 'named-profiles' feature was stabilized in cargo 1.57. This line makes the
# build work with older cargo versions.
#
# We have this because as of this writing, the latest cargo Debian package
# that's available is 1.56. (Confusingly, the Debian package version number
# is 0.57, whereas 'cargo --version' says 1.56.)
#
# See https://tracker.debian.org/pkg/cargo for the current status of the
# package. When that gets updated, we can remove this.
cargo-features = ["named-profiles"]
[workspace]
members = [
"compute_tools",

View File

@@ -71,12 +71,10 @@ RUN apt update && \
RUN apt update && \
apt install -y --no-install-recommends -t testing binutils
# Sed is used to patch for https://github.com/plv8/plv8/issues/503
RUN wget https://github.com/plv8/plv8/archive/refs/tags/v3.1.4.tar.gz && \
tar xvzf v3.1.4.tar.gz && \
cd plv8-3.1.4 && \
export PATH="/usr/local/pgsql/bin:$PATH" && \
sed -i 's/MemoryContextAlloc(/MemoryContextAllocZero(/' plv8.cc && \
make -j $(getconf _NPROCESSORS_ONLN) && \
make -j $(getconf _NPROCESSORS_ONLN) install && \
rm -rf /plv8-* && \
@@ -118,7 +116,8 @@ RUN wget https://github.com/zachasme/h3-pg/archive/refs/tags/v4.0.1.tar.gz -O h3
#
FROM build-deps AS neon-pg-ext-build
COPY --from=postgis-build /usr/local/pgsql/ /usr/local/pgsql/
COPY --from=plv8-build /usr/local/pgsql/ /usr/local/pgsql/
# plv8 still sometimes crashes during the creation
# COPY --from=plv8-build /usr/local/pgsql/ /usr/local/pgsql/
COPY --from=h3-pg-build /usr/local/pgsql/ /usr/local/pgsql/
COPY --from=h3-pg-build /h3/usr /
COPY pgxn/ pgxn/

View File

@@ -76,12 +76,10 @@ RUN apt update && \
RUN apt update && \
apt install -y --no-install-recommends -t testing binutils
# Sed is used to patch for https://github.com/plv8/plv8/issues/503
RUN wget https://github.com/plv8/plv8/archive/refs/tags/v3.1.4.tar.gz && \
tar xvzf v3.1.4.tar.gz && \
cd plv8-3.1.4 && \
export PATH="/usr/local/pgsql/bin:$PATH" && \
sed -i 's/MemoryContextAlloc(/MemoryContextAllocZero(/' plv8.cc && \
make -j $(getconf _NPROCESSORS_ONLN) && \
make -j $(getconf _NPROCESSORS_ONLN) install && \
rm -rf /plv8-* && \
@@ -123,7 +121,8 @@ RUN wget https://github.com/zachasme/h3-pg/archive/refs/tags/v4.0.1.tar.gz -O h3
#
FROM build-deps AS neon-pg-ext-build
COPY --from=postgis-build /usr/local/pgsql/ /usr/local/pgsql/
COPY --from=plv8-build /usr/local/pgsql/ /usr/local/pgsql/
# plv8 still sometimes crashes during the creation
# COPY --from=plv8-build /usr/local/pgsql/ /usr/local/pgsql/
COPY --from=h3-pg-build /usr/local/pgsql/ /usr/local/pgsql/
COPY --from=h3-pg-build /h3/usr /
COPY pgxn/ pgxn/

View File

@@ -6,7 +6,7 @@ edition = "2021"
[dependencies]
anyhow = "1.0"
chrono = "0.4"
clap = "4.0"
clap = "3.0"
env_logger = "0.9"
futures = "0.3.13"
hyper = { version = "0.14", features = ["full"] }

View File

@@ -51,19 +51,53 @@ fn main() -> Result<()> {
// TODO: re-use `utils::logging` later
init_logger(DEFAULT_LOG_LEVEL)?;
let matches = cli().get_matches();
// Env variable is set by `cargo`
let version: Option<&str> = option_env!("CARGO_PKG_VERSION");
let matches = clap::App::new("compute_ctl")
.version(version.unwrap_or("unknown"))
.arg(
Arg::new("connstr")
.short('C')
.long("connstr")
.value_name("DATABASE_URL")
.required(true),
)
.arg(
Arg::new("pgdata")
.short('D')
.long("pgdata")
.value_name("DATADIR")
.required(true),
)
.arg(
Arg::new("pgbin")
.short('b')
.long("pgbin")
.value_name("POSTGRES_PATH"),
)
.arg(
Arg::new("spec")
.short('s')
.long("spec")
.value_name("SPEC_JSON"),
)
.arg(
Arg::new("spec-path")
.short('S')
.long("spec-path")
.value_name("SPEC_PATH"),
)
.get_matches();
let pgdata = matches
.get_one::<String>("pgdata")
.expect("PGDATA path is required");
let pgdata = matches.value_of("pgdata").expect("PGDATA path is required");
let connstr = matches
.get_one::<String>("connstr")
.value_of("connstr")
.expect("Postgres connection string is required");
let spec = matches.get_one::<String>("spec");
let spec_path = matches.get_one::<String>("spec-path");
let spec = matches.value_of("spec");
let spec_path = matches.value_of("spec-path");
// Try to use just 'postgres' if no path is provided
let pgbin = matches.get_one::<String>("pgbin").unwrap();
let pgbin = matches.value_of("pgbin").unwrap_or("postgres");
let spec: ComputeSpec = match spec {
// First, try to get cluster spec from the cli argument
@@ -139,48 +173,3 @@ fn main() -> Result<()> {
}
}
}
fn cli() -> clap::Command {
// Env variable is set by `cargo`
let version = option_env!("CARGO_PKG_VERSION").unwrap_or("unknown");
clap::Command::new("compute_ctl")
.version(version)
.arg(
Arg::new("connstr")
.short('C')
.long("connstr")
.value_name("DATABASE_URL")
.required(true),
)
.arg(
Arg::new("pgdata")
.short('D')
.long("pgdata")
.value_name("DATADIR")
.required(true),
)
.arg(
Arg::new("pgbin")
.short('b')
.long("pgbin")
.default_value("postgres")
.value_name("POSTGRES_PATH"),
)
.arg(
Arg::new("spec")
.short('s')
.long("spec")
.value_name("SPEC_JSON"),
)
.arg(
Arg::new("spec-path")
.short('S')
.long("spec-path")
.value_name("SPEC_PATH"),
)
}
#[test]
fn verify_cli() {
cli().debug_assert()
}

View File

@@ -8,10 +8,11 @@ use std::process::Child;
use std::time::{Duration, Instant};
use anyhow::{bail, Result};
use notify::{RecursiveMode, Watcher};
use postgres::{Client, Transaction};
use serde::Deserialize;
use notify::{RecursiveMode, Watcher};
const POSTGRES_WAIT_TIMEOUT: Duration = Duration::from_millis(60 * 1000); // milliseconds
/// Rust representation of Postgres role info with only those fields
@@ -168,7 +169,7 @@ impl Database {
/// it may require a proper quoting too.
pub fn to_pg_options(&self) -> String {
let mut params: String = self.options.as_pg_options();
write!(params, " OWNER {}", &self.owner.pg_quote())
write!(params, " OWNER {}", &self.owner.quote())
.expect("String is documented to not to error during write operations");
params
@@ -179,17 +180,18 @@ impl Database {
/// intended to be used for DB / role names.
pub type PgIdent = String;
/// Generic trait used to provide quoting / encoding for strings used in the
/// Postgres SQL queries and DATABASE_URL.
pub trait Escaping {
fn pg_quote(&self) -> String;
/// Generic trait used to provide quoting for strings used in the
/// Postgres SQL queries. Currently used only to implement quoting
/// of identifiers, but could be used for literals in the future.
pub trait PgQuote {
fn quote(&self) -> String;
}
impl Escaping for PgIdent {
impl PgQuote for PgIdent {
/// This is intended to mimic Postgres quote_ident(), but for simplicity it
/// always quotes provided string with `""` and escapes every `"`.
/// **Not idempotent**, i.e. if string is already escaped it will be escaped again.
fn pg_quote(&self) -> String {
/// always quotes provided string with `""` and escapes every `"`. Not idempotent,
/// i.e. if string is already escaped it will be escaped again.
fn quote(&self) -> String {
let result = format!("\"{}\"", self.replace('"', "\"\""));
result
}

View File

@@ -1,9 +1,7 @@
use std::path::Path;
use std::str::FromStr;
use anyhow::Result;
use log::{info, log_enabled, warn, Level};
use postgres::config::Config;
use postgres::{Client, NoTls};
use serde::Deserialize;
@@ -117,8 +115,8 @@ pub fn handle_roles(spec: &ComputeSpec, client: &mut Client) -> Result<()> {
if existing_roles.iter().any(|r| r.name == op.name) {
let query: String = format!(
"ALTER ROLE {} RENAME TO {}",
op.name.pg_quote(),
new_name.pg_quote()
op.name.quote(),
new_name.quote()
);
warn!("renaming role '{}' to '{}'", op.name, new_name);
@@ -164,7 +162,7 @@ pub fn handle_roles(spec: &ComputeSpec, client: &mut Client) -> Result<()> {
}
if update_role {
let mut query: String = format!("ALTER ROLE {} ", name.pg_quote());
let mut query: String = format!("ALTER ROLE {} ", name.quote());
info_print!(" -> update");
query.push_str(&role.to_pg_options());
@@ -172,7 +170,7 @@ pub fn handle_roles(spec: &ComputeSpec, client: &mut Client) -> Result<()> {
}
} else {
info!("role name: '{}'", &name);
let mut query: String = format!("CREATE ROLE {} ", name.pg_quote());
let mut query: String = format!("CREATE ROLE {} ", name.quote());
info!("role create query: '{}'", &query);
info_print!(" -> create");
@@ -181,7 +179,7 @@ pub fn handle_roles(spec: &ComputeSpec, client: &mut Client) -> Result<()> {
let grant_query = format!(
"GRANT pg_read_all_data, pg_write_all_data TO {}",
name.pg_quote()
name.quote()
);
xact.execute(grant_query.as_str(), &[])?;
info!("role grant query: '{}'", &grant_query);
@@ -217,7 +215,7 @@ pub fn handle_role_deletions(node: &ComputeNode, client: &mut Client) -> Result<
// We do not check either role exists or not,
// Postgres will take care of it for us
if op.action == "delete_role" {
let query: String = format!("DROP ROLE IF EXISTS {}", &op.name.pg_quote());
let query: String = format!("DROP ROLE IF EXISTS {}", &op.name.quote());
warn!("deleting role '{}'", &op.name);
xact.execute(query.as_str(), &[])?;
@@ -232,16 +230,17 @@ pub fn handle_role_deletions(node: &ComputeNode, client: &mut Client) -> Result<
fn reassign_owned_objects(node: &ComputeNode, role_name: &PgIdent) -> Result<()> {
for db in &node.spec.cluster.databases {
if db.owner != *role_name {
let mut conf = Config::from_str(node.connstr.as_str())?;
conf.dbname(&db.name);
let mut connstr = node.connstr.clone();
// database name is always the last and the only component of the path
connstr.set_path(&db.name);
let mut client = conf.connect(NoTls)?;
let mut client = Client::connect(connstr.as_str(), NoTls)?;
// This will reassign all dependent objects to the db owner
let reassign_query = format!(
"REASSIGN OWNED BY {} TO {}",
role_name.pg_quote(),
db.owner.pg_quote()
role_name.quote(),
db.owner.quote()
);
info!(
"reassigning objects owned by '{}' in db '{}' to '{}'",
@@ -250,7 +249,7 @@ fn reassign_owned_objects(node: &ComputeNode, role_name: &PgIdent) -> Result<()>
client.simple_query(&reassign_query)?;
// This now will only drop privileges of the role
let drop_query = format!("DROP OWNED BY {}", role_name.pg_quote());
let drop_query = format!("DROP OWNED BY {}", role_name.quote());
client.simple_query(&drop_query)?;
}
}
@@ -280,7 +279,7 @@ pub fn handle_databases(spec: &ComputeSpec, client: &mut Client) -> Result<()> {
// We do not check either DB exists or not,
// Postgres will take care of it for us
"delete_db" => {
let query: String = format!("DROP DATABASE IF EXISTS {}", &op.name.pg_quote());
let query: String = format!("DROP DATABASE IF EXISTS {}", &op.name.quote());
warn!("deleting database '{}'", &op.name);
client.execute(query.as_str(), &[])?;
@@ -292,8 +291,8 @@ pub fn handle_databases(spec: &ComputeSpec, client: &mut Client) -> Result<()> {
if existing_dbs.iter().any(|r| r.name == op.name) {
let query: String = format!(
"ALTER DATABASE {} RENAME TO {}",
op.name.pg_quote(),
new_name.pg_quote()
op.name.quote(),
new_name.quote()
);
warn!("renaming database '{}' to '{}'", op.name, new_name);
@@ -321,7 +320,7 @@ pub fn handle_databases(spec: &ComputeSpec, client: &mut Client) -> Result<()> {
// XXX: db owner name is returned as quoted string from Postgres,
// when quoting is needed.
let new_owner = if r.owner.starts_with('"') {
db.owner.pg_quote()
db.owner.quote()
} else {
db.owner.clone()
};
@@ -329,15 +328,15 @@ pub fn handle_databases(spec: &ComputeSpec, client: &mut Client) -> Result<()> {
if new_owner != r.owner {
let query: String = format!(
"ALTER DATABASE {} OWNER TO {}",
name.pg_quote(),
db.owner.pg_quote()
name.quote(),
db.owner.quote()
);
info_print!(" -> update");
client.execute(query.as_str(), &[])?;
}
} else {
let mut query: String = format!("CREATE DATABASE {} ", name.pg_quote());
let mut query: String = format!("CREATE DATABASE {} ", name.quote());
info_print!(" -> create");
query.push_str(&db.to_pg_options());
@@ -367,7 +366,7 @@ pub fn handle_grants(node: &ComputeNode, client: &mut Client) -> Result<()> {
.cluster
.roles
.iter()
.map(|r| r.name.pg_quote())
.map(|r| r.name.quote())
.collect::<Vec<_>>();
for db in &spec.cluster.databases {
@@ -375,7 +374,7 @@ pub fn handle_grants(node: &ComputeNode, client: &mut Client) -> Result<()> {
let query: String = format!(
"GRANT CREATE ON DATABASE {} TO {}",
dbname.pg_quote(),
dbname.quote(),
roles.join(", ")
);
info!("grant query {}", &query);
@@ -386,11 +385,12 @@ pub fn handle_grants(node: &ComputeNode, client: &mut Client) -> Result<()> {
// Do some per-database access adjustments. We'd better do this at db creation time,
// but CREATE DATABASE isn't transactional. So we cannot create db + do some grants
// atomically.
let mut db_connstr = node.connstr.clone();
for db in &node.spec.cluster.databases {
let mut conf = Config::from_str(node.connstr.as_str())?;
conf.dbname(&db.name);
// database name is always the last and the only component of the path
db_connstr.set_path(&db.name);
let mut db_client = conf.connect(NoTls)?;
let mut db_client = Client::connect(db_connstr.as_str(), NoTls)?;
// This will only change ownership on the schema itself, not the objects
// inside it. Without it owner of the `public` schema will be `cloud_admin`
@@ -419,15 +419,9 @@ pub fn handle_grants(node: &ComputeNode, client: &mut Client) -> Result<()> {
END IF;\n\
END\n\
$$;",
db.owner.pg_quote()
db.owner.quote()
);
db_client.simple_query(&alter_query)?;
// Explicitly grant CREATE ON SCHEMA PUBLIC to the web_access user.
// This is needed since postgres 15, where this privilege is removed by default.
let grant_query: String = "GRANT CREATE ON SCHEMA public TO web_access".to_string();
info!("grant query for db {} : {}", &db.name, &grant_query);
db_client.simple_query(&grant_query)?;
}
Ok(())

View File

@@ -33,9 +33,9 @@ mod pg_helpers_tests {
}
#[test]
fn ident_pg_quote() {
fn quote_ident() {
let ident: PgIdent = PgIdent::from("\"name\";\\n select 1;");
assert_eq!(ident.pg_quote(), "\"\"\"name\"\";\\n select 1;\"");
assert_eq!(ident.quote(), "\"\"\"name\"\";\\n select 1;\"");
}
}

View File

@@ -4,19 +4,19 @@ version = "0.1.0"
edition = "2021"
[dependencies]
clap = "4.0"
comfy-table = "6.1"
clap = "3.0"
comfy-table = "5.0.1"
git-version = "0.3.5"
tar = "0.4.38"
postgres = { git = "https://github.com/neondatabase/rust-postgres.git", rev = "d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" }
postgres = { git = "https://github.com/neondatabase/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" }
serde = { version = "1.0", features = ["derive"] }
serde_with = "2.0"
serde_with = "1.12.0"
toml = "0.5"
once_cell = "1.13.0"
regex = "1"
anyhow = "1.0"
thiserror = "1"
nix = "0.25"
nix = "0.23"
reqwest = { version = "0.11", default-features = false, features = ["blocking", "json", "rustls-tls"] }
# Note: Do not directly depend on pageserver or safekeeper; use pageserver_api or safekeeper_api

View File

@@ -6,7 +6,7 @@
//! rely on `neon_local` to set up the environment for each test.
//!
use anyhow::{anyhow, bail, Context, Result};
use clap::{value_parser, Arg, ArgAction, ArgMatches, Command};
use clap::{App, AppSettings, Arg, ArgMatches};
use control_plane::compute::ComputeControlPlane;
use control_plane::local_env::{EtcdBroker, LocalEnv};
use control_plane::safekeeper::SafekeeperNode;
@@ -85,7 +85,212 @@ struct TimelineTreeEl {
// * Providing CLI api to the pageserver
// * TODO: export/import to/from usual postgres
fn main() -> Result<()> {
let matches = cli().get_matches();
let branch_name_arg = Arg::new("branch-name")
.long("branch-name")
.takes_value(true)
.help("Name of the branch to be created or used as an alias for other services")
.required(false);
let pg_node_arg = Arg::new("node").help("Postgres node name").required(false);
let safekeeper_id_arg = Arg::new("id").help("safekeeper id").required(false);
let tenant_id_arg = Arg::new("tenant-id")
.long("tenant-id")
.help("Tenant id. Represented as a hexadecimal string 32 symbols length")
.takes_value(true)
.required(false);
let timeline_id_arg = Arg::new("timeline-id")
.long("timeline-id")
.help("Timeline id. Represented as a hexadecimal string 32 symbols length")
.takes_value(true)
.required(false);
let pg_version_arg = Arg::new("pg-version")
.long("pg-version")
.help("Postgres version to use for the initial tenant")
.required(false)
.takes_value(true)
.default_value(DEFAULT_PG_VERSION);
let port_arg = Arg::new("port")
.long("port")
.required(false)
.value_name("port");
let stop_mode_arg = Arg::new("stop-mode")
.short('m')
.takes_value(true)
.possible_values(&["fast", "immediate"])
.help("If 'immediate', don't flush repository data at shutdown")
.required(false)
.value_name("stop-mode");
let pageserver_config_args = Arg::new("pageserver-config-override")
.long("pageserver-config-override")
.takes_value(true)
.number_of_values(1)
.multiple_occurrences(true)
.help("Additional pageserver's configuration options or overrides, refer to pageserver's 'config-override' CLI parameter docs for more")
.required(false);
let lsn_arg = Arg::new("lsn")
.long("lsn")
.help("Specify Lsn on the timeline to start from. By default, end of the timeline would be used.")
.takes_value(true)
.required(false);
let matches = App::new("Neon CLI")
.setting(AppSettings::ArgRequiredElseHelp)
.version(GIT_VERSION)
.subcommand(
App::new("init")
.about("Initialize a new Neon repository")
.arg(pageserver_config_args.clone())
.arg(timeline_id_arg.clone().help("Use a specific timeline id when creating a tenant and its initial timeline"))
.arg(
Arg::new("config")
.long("config")
.required(false)
.value_name("config"),
)
.arg(pg_version_arg.clone())
)
.subcommand(
App::new("timeline")
.about("Manage timelines")
.subcommand(App::new("list")
.about("List all timelines, available to this pageserver")
.arg(tenant_id_arg.clone()))
.subcommand(App::new("branch")
.about("Create a new timeline, using another timeline as a base, copying its data")
.arg(tenant_id_arg.clone())
.arg(branch_name_arg.clone())
.arg(Arg::new("ancestor-branch-name").long("ancestor-branch-name").takes_value(true)
.help("Use last Lsn of another timeline (and its data) as base when creating the new timeline. The timeline gets resolved by its branch name.").required(false))
.arg(Arg::new("ancestor-start-lsn").long("ancestor-start-lsn").takes_value(true)
.help("When using another timeline as base, use a specific Lsn in it instead of the latest one").required(false)))
.subcommand(App::new("create")
.about("Create a new blank timeline")
.arg(tenant_id_arg.clone())
.arg(branch_name_arg.clone())
.arg(pg_version_arg.clone())
)
.subcommand(App::new("import")
.about("Import timeline from basebackup directory")
.arg(tenant_id_arg.clone())
.arg(timeline_id_arg.clone())
.arg(Arg::new("node-name").long("node-name").takes_value(true)
.help("Name to assign to the imported timeline"))
.arg(Arg::new("base-tarfile").long("base-tarfile").takes_value(true)
.help("Basebackup tarfile to import"))
.arg(Arg::new("base-lsn").long("base-lsn").takes_value(true)
.help("Lsn the basebackup starts at"))
.arg(Arg::new("wal-tarfile").long("wal-tarfile").takes_value(true)
.help("Wal to add after base"))
.arg(Arg::new("end-lsn").long("end-lsn").takes_value(true)
.help("Lsn the basebackup ends at"))
.arg(pg_version_arg.clone())
)
).subcommand(
App::new("tenant")
.setting(AppSettings::ArgRequiredElseHelp)
.about("Manage tenants")
.subcommand(App::new("list"))
.subcommand(App::new("create")
.arg(tenant_id_arg.clone())
.arg(timeline_id_arg.clone().help("Use a specific timeline id when creating a tenant and its initial timeline"))
.arg(Arg::new("config").short('c').takes_value(true).multiple_occurrences(true).required(false))
.arg(pg_version_arg.clone())
)
.subcommand(App::new("config")
.arg(tenant_id_arg.clone())
.arg(Arg::new("config").short('c').takes_value(true).multiple_occurrences(true).required(false))
)
)
.subcommand(
App::new("pageserver")
.setting(AppSettings::ArgRequiredElseHelp)
.about("Manage pageserver")
.subcommand(App::new("status"))
.subcommand(App::new("start").about("Start local pageserver").arg(pageserver_config_args.clone()))
.subcommand(App::new("stop").about("Stop local pageserver")
.arg(stop_mode_arg.clone()))
.subcommand(App::new("restart").about("Restart local pageserver").arg(pageserver_config_args.clone()))
)
.subcommand(
App::new("safekeeper")
.setting(AppSettings::ArgRequiredElseHelp)
.about("Manage safekeepers")
.subcommand(App::new("start")
.about("Start local safekeeper")
.arg(safekeeper_id_arg.clone())
)
.subcommand(App::new("stop")
.about("Stop local safekeeper")
.arg(safekeeper_id_arg.clone())
.arg(stop_mode_arg.clone())
)
.subcommand(App::new("restart")
.about("Restart local safekeeper")
.arg(safekeeper_id_arg.clone())
.arg(stop_mode_arg.clone())
)
)
.subcommand(
App::new("pg")
.setting(AppSettings::ArgRequiredElseHelp)
.about("Manage postgres instances")
.subcommand(App::new("list").arg(tenant_id_arg.clone()))
.subcommand(App::new("create")
.about("Create a postgres compute node")
.arg(pg_node_arg.clone())
.arg(branch_name_arg.clone())
.arg(tenant_id_arg.clone())
.arg(lsn_arg.clone())
.arg(port_arg.clone())
.arg(
Arg::new("config-only")
.help("Don't do basebackup, create compute node with only config files")
.long("config-only")
.required(false))
.arg(pg_version_arg.clone())
)
.subcommand(App::new("start")
.about("Start a postgres compute node.\n This command actually creates new node from scratch, but preserves existing config files")
.arg(pg_node_arg.clone())
.arg(tenant_id_arg.clone())
.arg(branch_name_arg.clone())
.arg(timeline_id_arg.clone())
.arg(lsn_arg.clone())
.arg(port_arg.clone())
.arg(pg_version_arg.clone())
)
.subcommand(
App::new("stop")
.arg(pg_node_arg.clone())
.arg(tenant_id_arg.clone())
.arg(
Arg::new("destroy")
.help("Also delete data directory (now optional, should be default in future)")
.long("destroy")
.required(false)
)
)
)
.subcommand(
App::new("start")
.about("Start page server and safekeepers")
.arg(pageserver_config_args)
)
.subcommand(
App::new("stop")
.about("Stop page server and safekeepers")
.arg(stop_mode_arg.clone())
)
.get_matches();
let (sub_name, sub_args) = match matches.subcommand() {
Some(subcommand_data) => subcommand_data,
@@ -153,7 +358,9 @@ fn print_timelines_tree(
// Memorize all direct children of each timeline.
for timeline in timelines.iter() {
if let Some(ancestor_timeline_id) = timeline.ancestor_timeline_id {
if let Some(ancestor_timeline_id) =
timeline.local.as_ref().and_then(|l| l.ancestor_timeline_id)
{
timelines_hash
.get_mut(&ancestor_timeline_id)
.context("missing timeline info in the HashMap")?
@@ -164,7 +371,13 @@ fn print_timelines_tree(
for timeline in timelines_hash.values() {
// Start with root local timelines (no ancestors) first.
if timeline.info.ancestor_timeline_id.is_none() {
if timeline
.info
.local
.as_ref()
.and_then(|l| l.ancestor_timeline_id)
.is_none()
{
print_timeline(0, &Vec::from([true]), timeline, &timelines_hash)?;
}
}
@@ -181,8 +394,17 @@ fn print_timeline(
timeline: &TimelineTreeEl,
timelines: &HashMap<TimelineId, TimelineTreeEl>,
) -> Result<()> {
let local_remote = match (timeline.info.local.as_ref(), timeline.info.remote.as_ref()) {
(None, None) => unreachable!("in this case no info for a timeline is found"),
(None, Some(_)) => "(R)",
(Some(_), None) => "(L)",
(Some(_), Some(_)) => "(L+R)",
};
// Draw main padding
print!("{} ", local_remote);
if nesting_level > 0 {
let ancestor_lsn = match timeline.info.ancestor_lsn {
let ancestor_lsn = match timeline.info.local.as_ref().and_then(|i| i.ancestor_lsn) {
Some(lsn) => lsn.to_string(),
None => "Unknown Lsn".to_string(),
};
@@ -270,16 +492,16 @@ fn get_tenant_id(sub_match: &ArgMatches, env: &local_env::LocalEnv) -> anyhow::R
fn parse_tenant_id(sub_match: &ArgMatches) -> anyhow::Result<Option<TenantId>> {
sub_match
.get_one::<String>("tenant-id")
.map(|tenant_id| TenantId::from_str(tenant_id))
.value_of("tenant-id")
.map(TenantId::from_str)
.transpose()
.context("Failed to parse tenant id from the argument string")
}
fn parse_timeline_id(sub_match: &ArgMatches) -> anyhow::Result<Option<TimelineId>> {
sub_match
.get_one::<String>("timeline-id")
.map(|timeline_id| TimelineId::from_str(timeline_id))
.value_of("timeline-id")
.map(TimelineId::from_str)
.transpose()
.context("Failed to parse timeline id from the argument string")
}
@@ -288,22 +510,19 @@ fn handle_init(init_match: &ArgMatches) -> anyhow::Result<LocalEnv> {
let initial_timeline_id_arg = parse_timeline_id(init_match)?;
// Create config file
let toml_file: String = if let Some(config_path) = init_match.get_one::<PathBuf>("config") {
let toml_file: String = if let Some(config_path) = init_match.value_of("config") {
// load and parse the file
std::fs::read_to_string(config_path).with_context(|| {
format!(
"Could not read configuration file '{}'",
config_path.display()
)
})?
std::fs::read_to_string(std::path::Path::new(config_path))
.with_context(|| format!("Could not read configuration file '{config_path}'"))?
} else {
// Built-in default config
default_conf(&EtcdBroker::locate_etcd()?)
};
let pg_version = init_match
.get_one::<u32>("pg-version")
.copied()
.value_of("pg-version")
.unwrap()
.parse::<u32>()
.context("Failed to parse postgres version from the argument string")?;
let mut env =
@@ -339,10 +558,9 @@ fn handle_init(init_match: &ArgMatches) -> anyhow::Result<LocalEnv> {
fn pageserver_config_overrides(init_match: &ArgMatches) -> Vec<&str> {
init_match
.get_many::<String>("pageserver-config-override")
.values_of("pageserver-config-override")
.into_iter()
.flatten()
.map(|s| s.as_str())
.collect()
}
@@ -357,7 +575,7 @@ fn handle_tenant(tenant_match: &ArgMatches, env: &mut local_env::LocalEnv) -> an
Some(("create", create_match)) => {
let initial_tenant_id = parse_tenant_id(create_match)?;
let tenant_conf: HashMap<_, _> = create_match
.get_many::<String>("config")
.values_of("config")
.map(|vals| vals.flat_map(|c| c.split_once(':')).collect())
.unwrap_or_default();
let new_tenant_id = pageserver.tenant_create(initial_tenant_id, tenant_conf)?;
@@ -366,8 +584,9 @@ fn handle_tenant(tenant_match: &ArgMatches, env: &mut local_env::LocalEnv) -> an
// Create an initial timeline for the new tenant
let new_timeline_id = parse_timeline_id(create_match)?;
let pg_version = create_match
.get_one::<u32>("pg-version")
.copied()
.value_of("pg-version")
.unwrap()
.parse::<u32>()
.context("Failed to parse postgres version from the argument string")?;
let timeline_info = pageserver.timeline_create(
@@ -378,7 +597,10 @@ fn handle_tenant(tenant_match: &ArgMatches, env: &mut local_env::LocalEnv) -> an
Some(pg_version),
)?;
let new_timeline_id = timeline_info.timeline_id;
let last_record_lsn = timeline_info.last_record_lsn;
let last_record_lsn = timeline_info
.local
.context(format!("Failed to get last record LSN: no local timeline info for timeline {new_timeline_id}"))?
.last_record_lsn;
env.register_branch_mapping(
DEFAULT_BRANCH_NAME.to_string(),
@@ -393,7 +615,7 @@ fn handle_tenant(tenant_match: &ArgMatches, env: &mut local_env::LocalEnv) -> an
Some(("config", create_match)) => {
let tenant_id = get_tenant_id(create_match, env)?;
let tenant_conf: HashMap<_, _> = create_match
.get_many::<String>("config")
.values_of("config")
.map(|vals| vals.flat_map(|c| c.split_once(':')).collect())
.unwrap_or_default();
@@ -420,19 +642,23 @@ fn handle_timeline(timeline_match: &ArgMatches, env: &mut local_env::LocalEnv) -
Some(("create", create_match)) => {
let tenant_id = get_tenant_id(create_match, env)?;
let new_branch_name = create_match
.get_one::<String>("branch-name")
.value_of("branch-name")
.ok_or_else(|| anyhow!("No branch name provided"))?;
let pg_version = create_match
.get_one::<u32>("pg-version")
.copied()
.value_of("pg-version")
.unwrap()
.parse::<u32>()
.context("Failed to parse postgres version from the argument string")?;
let timeline_info =
pageserver.timeline_create(tenant_id, None, None, None, Some(pg_version))?;
let new_timeline_id = timeline_info.timeline_id;
let last_record_lsn = timeline_info.last_record_lsn;
let last_record_lsn = timeline_info
.local
.expect("no local timeline info")
.last_record_lsn;
env.register_branch_mapping(new_branch_name.to_string(), tenant_id, new_timeline_id)?;
println!(
@@ -444,32 +670,35 @@ fn handle_timeline(timeline_match: &ArgMatches, env: &mut local_env::LocalEnv) -
let tenant_id = get_tenant_id(import_match, env)?;
let timeline_id = parse_timeline_id(import_match)?.expect("No timeline id provided");
let name = import_match
.get_one::<String>("node-name")
.value_of("node-name")
.ok_or_else(|| anyhow!("No node name provided"))?;
// Parse base inputs
let base_tarfile = import_match
.get_one::<PathBuf>("base-tarfile")
.ok_or_else(|| anyhow!("No base-tarfile provided"))?
.to_owned();
.value_of("base-tarfile")
.map(|s| PathBuf::from_str(s).unwrap())
.ok_or_else(|| anyhow!("No base-tarfile provided"))?;
let base_lsn = Lsn::from_str(
import_match
.get_one::<String>("base-lsn")
.value_of("base-lsn")
.ok_or_else(|| anyhow!("No base-lsn provided"))?,
)?;
let base = (base_lsn, base_tarfile);
// Parse pg_wal inputs
let wal_tarfile = import_match.get_one::<PathBuf>("wal-tarfile").cloned();
let wal_tarfile = import_match
.value_of("wal-tarfile")
.map(|s| PathBuf::from_str(s).unwrap());
let end_lsn = import_match
.get_one::<String>("end-lsn")
.value_of("end-lsn")
.map(|s| Lsn::from_str(s).unwrap());
// TODO validate both or none are provided
let pg_wal = end_lsn.zip(wal_tarfile);
let pg_version = import_match
.get_one::<u32>("pg-version")
.copied()
.value_of("pg-version")
.unwrap()
.parse::<u32>()
.context("Failed to parse postgres version from the argument string")?;
let mut cplane = ComputeControlPlane::load(env.clone())?;
@@ -484,11 +713,10 @@ fn handle_timeline(timeline_match: &ArgMatches, env: &mut local_env::LocalEnv) -
Some(("branch", branch_match)) => {
let tenant_id = get_tenant_id(branch_match, env)?;
let new_branch_name = branch_match
.get_one::<String>("branch-name")
.value_of("branch-name")
.ok_or_else(|| anyhow!("No branch name provided"))?;
let ancestor_branch_name = branch_match
.get_one::<String>("ancestor-branch-name")
.map(|s| s.as_str())
.value_of("ancestor-branch-name")
.unwrap_or(DEFAULT_BRANCH_NAME);
let ancestor_timeline_id = env
.get_branch_timeline_id(ancestor_branch_name, tenant_id)
@@ -497,8 +725,8 @@ fn handle_timeline(timeline_match: &ArgMatches, env: &mut local_env::LocalEnv) -
})?;
let start_lsn = branch_match
.get_one::<String>("ancestor-start-lsn")
.map(|lsn_str| Lsn::from_str(lsn_str))
.value_of("ancestor-start-lsn")
.map(Lsn::from_str)
.transpose()
.context("Failed to parse ancestor start Lsn from the request")?;
let timeline_info = pageserver.timeline_create(
@@ -510,7 +738,10 @@ fn handle_timeline(timeline_match: &ArgMatches, env: &mut local_env::LocalEnv) -
)?;
let new_timeline_id = timeline_info.timeline_id;
let last_record_lsn = timeline_info.last_record_lsn;
let last_record_lsn = timeline_info
.local
.expect("no local timeline info")
.last_record_lsn;
env.register_branch_mapping(new_branch_name.to_string(), tenant_id, new_timeline_id)?;
@@ -570,7 +801,7 @@ fn handle_pg(pg_match: &ArgMatches, env: &local_env::LocalEnv) -> Result<()> {
// Use the LSN at the end of the timeline.
timeline_infos
.get(&node.timeline_id)
.map(|bi| bi.last_record_lsn.to_string())
.and_then(|bi| bi.local.as_ref().map(|l| l.last_record_lsn.to_string()))
.unwrap_or_else(|| "?".to_string())
}
Some(lsn) => {
@@ -599,39 +830,45 @@ fn handle_pg(pg_match: &ArgMatches, env: &local_env::LocalEnv) -> Result<()> {
}
"create" => {
let branch_name = sub_args
.get_one::<String>("branch-name")
.map(|s| s.as_str())
.value_of("branch-name")
.unwrap_or(DEFAULT_BRANCH_NAME);
let node_name = sub_args
.get_one::<String>("node")
.map(|node_name| node_name.to_string())
.unwrap_or_else(|| format!("{branch_name}_node"));
.value_of("node")
.map(ToString::to_string)
.unwrap_or_else(|| format!("{}_node", branch_name));
let lsn = sub_args
.get_one::<String>("lsn")
.map(|lsn_str| Lsn::from_str(lsn_str))
.value_of("lsn")
.map(Lsn::from_str)
.transpose()
.context("Failed to parse Lsn from the request")?;
let timeline_id = env
.get_branch_timeline_id(branch_name, tenant_id)
.ok_or_else(|| anyhow!("Found no timeline id for branch name '{branch_name}'"))?;
.ok_or_else(|| anyhow!("Found no timeline id for branch name '{}'", branch_name))?;
let port: Option<u16> = sub_args.get_one::<u16>("port").copied();
let port: Option<u16> = match sub_args.value_of("port") {
Some(p) => Some(p.parse()?),
None => None,
};
let pg_version = sub_args
.get_one::<u32>("pg-version")
.copied()
.value_of("pg-version")
.unwrap()
.parse::<u32>()
.context("Failed to parse postgres version from the argument string")?;
cplane.new_node(tenant_id, &node_name, timeline_id, lsn, port, pg_version)?;
}
"start" => {
let port: Option<u16> = sub_args.get_one::<u16>("port").copied();
let port: Option<u16> = match sub_args.value_of("port") {
Some(p) => Some(p.parse()?),
None => None,
};
let node_name = sub_args
.get_one::<String>("node")
.value_of("node")
.ok_or_else(|| anyhow!("No node name was provided to start"))?;
let node = cplane.nodes.get(&(tenant_id, node_name.to_string()));
let node = cplane.nodes.get(&(tenant_id, node_name.to_owned()));
let auth_token = if matches!(env.pageserver.auth_type, AuthType::NeonJWT) {
let claims = Claims::new(Some(tenant_id), Scope::Tenant);
@@ -642,33 +879,36 @@ fn handle_pg(pg_match: &ArgMatches, env: &local_env::LocalEnv) -> Result<()> {
};
if let Some(node) = node {
println!("Starting existing postgres {node_name}...");
println!("Starting existing postgres {}...", node_name);
node.start(&auth_token)?;
} else {
let branch_name = sub_args
.get_one::<String>("branch-name")
.map(|s| s.as_str())
.value_of("branch-name")
.unwrap_or(DEFAULT_BRANCH_NAME);
let timeline_id = env
.get_branch_timeline_id(branch_name, tenant_id)
.ok_or_else(|| {
anyhow!("Found no timeline id for branch name '{branch_name}'")
anyhow!("Found no timeline id for branch name '{}'", branch_name)
})?;
let lsn = sub_args
.get_one::<String>("lsn")
.map(|lsn_str| Lsn::from_str(lsn_str))
.value_of("lsn")
.map(Lsn::from_str)
.transpose()
.context("Failed to parse Lsn from the request")?;
let pg_version = sub_args
.get_one::<u32>("pg-version")
.copied()
.context("Failed to `pg-version` from the argument string")?;
.value_of("pg-version")
.unwrap()
.parse::<u32>()
.context("Failed to parse postgres version from the argument string")?;
// when used with custom port this results in non obvious behaviour
// port is remembered from first start command, i e
// start --port X
// stop
// start <-- will also use port X even without explicit port argument
println!("Starting new postgres (v{pg_version}) {node_name} on timeline {timeline_id} ...");
println!(
"Starting new postgres (v{}) {} on timeline {} ...",
pg_version, node_name, timeline_id
);
let node =
cplane.new_node(tenant_id, node_name, timeline_id, lsn, port, pg_version)?;
@@ -677,18 +917,18 @@ fn handle_pg(pg_match: &ArgMatches, env: &local_env::LocalEnv) -> Result<()> {
}
"stop" => {
let node_name = sub_args
.get_one::<String>("node")
.value_of("node")
.ok_or_else(|| anyhow!("No node name was provided to stop"))?;
let destroy = sub_args.get_flag("destroy");
let destroy = sub_args.is_present("destroy");
let node = cplane
.nodes
.get(&(tenant_id, node_name.to_string()))
.with_context(|| format!("postgres {node_name} is not found"))?;
.get(&(tenant_id, node_name.to_owned()))
.with_context(|| format!("postgres {} is not found", node_name))?;
node.stop(destroy)?;
}
_ => bail!("Unexpected pg subcommand '{sub_name}'"),
_ => bail!("Unexpected pg subcommand '{}'", sub_name),
}
Ok(())
@@ -706,10 +946,7 @@ fn handle_pageserver(sub_match: &ArgMatches, env: &local_env::LocalEnv) -> Resul
}
Some(("stop", stop_match)) => {
let immediate = stop_match
.get_one::<String>("stop-mode")
.map(|s| s.as_str())
== Some("immediate");
let immediate = stop_match.value_of("stop-mode") == Some("immediate");
if let Err(e) = pageserver.stop(immediate) {
eprintln!("pageserver stop failed: {}", e);
@@ -759,7 +996,7 @@ fn handle_safekeeper(sub_match: &ArgMatches, env: &local_env::LocalEnv) -> Resul
};
// All the commands take an optional safekeeper name argument
let sk_id = if let Some(id_str) = sub_args.get_one::<String>("id") {
let sk_id = if let Some(id_str) = sub_args.value_of("id") {
NodeId(id_str.parse().context("while parsing safekeeper id")?)
} else {
DEFAULT_SAFEKEEPER_ID
@@ -775,8 +1012,7 @@ fn handle_safekeeper(sub_match: &ArgMatches, env: &local_env::LocalEnv) -> Resul
}
"stop" => {
let immediate =
sub_args.get_one::<String>("stop-mode").map(|s| s.as_str()) == Some("immediate");
let immediate = sub_args.value_of("stop-mode") == Some("immediate");
if let Err(e) = safekeeper.stop(immediate) {
eprintln!("safekeeper stop failed: {}", e);
@@ -785,8 +1021,7 @@ fn handle_safekeeper(sub_match: &ArgMatches, env: &local_env::LocalEnv) -> Resul
}
"restart" => {
let immediate =
sub_args.get_one::<String>("stop-mode").map(|s| s.as_str()) == Some("immediate");
let immediate = sub_args.value_of("stop-mode") == Some("immediate");
if let Err(e) = safekeeper.stop(immediate) {
eprintln!("safekeeper stop failed: {}", e);
@@ -830,8 +1065,7 @@ fn handle_start_all(sub_match: &ArgMatches, env: &local_env::LocalEnv) -> anyhow
}
fn handle_stop_all(sub_match: &ArgMatches, env: &local_env::LocalEnv) -> Result<()> {
let immediate =
sub_match.get_one::<String>("stop-mode").map(|s| s.as_str()) == Some("immediate");
let immediate = sub_match.value_of("stop-mode") == Some("immediate");
let pageserver = PageServerNode::from_env(env);
@@ -864,219 +1098,3 @@ fn try_stop_etcd_process(env: &local_env::LocalEnv) {
eprintln!("etcd stop failed: {e}");
}
}
fn cli() -> Command {
let branch_name_arg = Arg::new("branch-name")
.long("branch-name")
.help("Name of the branch to be created or used as an alias for other services")
.required(false);
let pg_node_arg = Arg::new("node").help("Postgres node name").required(false);
let safekeeper_id_arg = Arg::new("id").help("safekeeper id").required(false);
let tenant_id_arg = Arg::new("tenant-id")
.long("tenant-id")
.help("Tenant id. Represented as a hexadecimal string 32 symbols length")
.required(false);
let timeline_id_arg = Arg::new("timeline-id")
.long("timeline-id")
.help("Timeline id. Represented as a hexadecimal string 32 symbols length")
.required(false);
let pg_version_arg = Arg::new("pg-version")
.long("pg-version")
.help("Postgres version to use for the initial tenant")
.required(false)
.value_parser(value_parser!(u32))
.default_value(DEFAULT_PG_VERSION);
let port_arg = Arg::new("port")
.long("port")
.required(false)
.value_parser(value_parser!(u16))
.value_name("port");
let stop_mode_arg = Arg::new("stop-mode")
.short('m')
.value_parser(["fast", "immediate"])
.help("If 'immediate', don't flush repository data at shutdown")
.required(false)
.value_name("stop-mode");
let pageserver_config_args = Arg::new("pageserver-config-override")
.long("pageserver-config-override")
.num_args(1)
.action(ArgAction::Append)
.help("Additional pageserver's configuration options or overrides, refer to pageserver's 'config-override' CLI parameter docs for more")
.required(false);
let lsn_arg = Arg::new("lsn")
.long("lsn")
.help("Specify Lsn on the timeline to start from. By default, end of the timeline would be used.")
.required(false);
Command::new("Neon CLI")
.arg_required_else_help(true)
.version(GIT_VERSION)
.subcommand(
Command::new("init")
.about("Initialize a new Neon repository")
.arg(pageserver_config_args.clone())
.arg(timeline_id_arg.clone().help("Use a specific timeline id when creating a tenant and its initial timeline"))
.arg(
Arg::new("config")
.long("config")
.required(false)
.value_parser(value_parser!(PathBuf))
.value_name("config"),
)
.arg(pg_version_arg.clone())
)
.subcommand(
Command::new("timeline")
.about("Manage timelines")
.subcommand(Command::new("list")
.about("List all timelines, available to this pageserver")
.arg(tenant_id_arg.clone()))
.subcommand(Command::new("branch")
.about("Create a new timeline, using another timeline as a base, copying its data")
.arg(tenant_id_arg.clone())
.arg(branch_name_arg.clone())
.arg(Arg::new("ancestor-branch-name").long("ancestor-branch-name")
.help("Use last Lsn of another timeline (and its data) as base when creating the new timeline. The timeline gets resolved by its branch name.").required(false))
.arg(Arg::new("ancestor-start-lsn").long("ancestor-start-lsn")
.help("When using another timeline as base, use a specific Lsn in it instead of the latest one").required(false)))
.subcommand(Command::new("create")
.about("Create a new blank timeline")
.arg(tenant_id_arg.clone())
.arg(branch_name_arg.clone())
.arg(pg_version_arg.clone())
)
.subcommand(Command::new("import")
.about("Import timeline from basebackup directory")
.arg(tenant_id_arg.clone())
.arg(timeline_id_arg.clone())
.arg(Arg::new("node-name").long("node-name")
.help("Name to assign to the imported timeline"))
.arg(Arg::new("base-tarfile")
.long("base-tarfile")
.value_parser(value_parser!(PathBuf))
.help("Basebackup tarfile to import")
)
.arg(Arg::new("base-lsn").long("base-lsn")
.help("Lsn the basebackup starts at"))
.arg(Arg::new("wal-tarfile")
.long("wal-tarfile")
.value_parser(value_parser!(PathBuf))
.help("Wal to add after base")
)
.arg(Arg::new("end-lsn").long("end-lsn")
.help("Lsn the basebackup ends at"))
.arg(pg_version_arg.clone())
)
).subcommand(
Command::new("tenant")
.arg_required_else_help(true)
.about("Manage tenants")
.subcommand(Command::new("list"))
.subcommand(Command::new("create")
.arg(tenant_id_arg.clone())
.arg(timeline_id_arg.clone().help("Use a specific timeline id when creating a tenant and its initial timeline"))
.arg(Arg::new("config").short('c').num_args(1).action(ArgAction::Append).required(false))
.arg(pg_version_arg.clone())
)
.subcommand(Command::new("config")
.arg(tenant_id_arg.clone())
.arg(Arg::new("config").short('c').num_args(1).action(ArgAction::Append).required(false))
)
)
.subcommand(
Command::new("pageserver")
.arg_required_else_help(true)
.about("Manage pageserver")
.subcommand(Command::new("status"))
.subcommand(Command::new("start").about("Start local pageserver").arg(pageserver_config_args.clone()))
.subcommand(Command::new("stop").about("Stop local pageserver")
.arg(stop_mode_arg.clone()))
.subcommand(Command::new("restart").about("Restart local pageserver").arg(pageserver_config_args.clone()))
)
.subcommand(
Command::new("safekeeper")
.arg_required_else_help(true)
.about("Manage safekeepers")
.subcommand(Command::new("start")
.about("Start local safekeeper")
.arg(safekeeper_id_arg.clone())
)
.subcommand(Command::new("stop")
.about("Stop local safekeeper")
.arg(safekeeper_id_arg.clone())
.arg(stop_mode_arg.clone())
)
.subcommand(Command::new("restart")
.about("Restart local safekeeper")
.arg(safekeeper_id_arg)
.arg(stop_mode_arg.clone())
)
)
.subcommand(
Command::new("pg")
.arg_required_else_help(true)
.about("Manage postgres instances")
.subcommand(Command::new("list").arg(tenant_id_arg.clone()))
.subcommand(Command::new("create")
.about("Create a postgres compute node")
.arg(pg_node_arg.clone())
.arg(branch_name_arg.clone())
.arg(tenant_id_arg.clone())
.arg(lsn_arg.clone())
.arg(port_arg.clone())
.arg(
Arg::new("config-only")
.help("Don't do basebackup, create compute node with only config files")
.long("config-only")
.required(false))
.arg(pg_version_arg.clone())
)
.subcommand(Command::new("start")
.about("Start a postgres compute node.\n This command actually creates new node from scratch, but preserves existing config files")
.arg(pg_node_arg.clone())
.arg(tenant_id_arg.clone())
.arg(branch_name_arg)
.arg(timeline_id_arg)
.arg(lsn_arg)
.arg(port_arg)
.arg(pg_version_arg)
)
.subcommand(
Command::new("stop")
.arg(pg_node_arg)
.arg(tenant_id_arg)
.arg(
Arg::new("destroy")
.help("Also delete data directory (now optional, should be default in future)")
.long("destroy")
.action(ArgAction::SetTrue)
.required(false)
)
)
)
.subcommand(
Command::new("start")
.about("Start page server and safekeepers")
.arg(pageserver_config_args)
)
.subcommand(
Command::new("stop")
.about("Stop page server and safekeepers")
.arg(stop_mode_arg)
)
}
#[test]
fn verify_cli() {
cli().debug_assert();
}

View File

@@ -12,8 +12,13 @@ use nix::unistd::Pid;
use postgres::Config;
use reqwest::blocking::{Client, RequestBuilder, Response};
use reqwest::{IntoUrl, Method};
use safekeeper_api::models::TimelineCreateRequest;
use thiserror::Error;
use utils::{connstring::connection_address, http::error::HttpErrorBody, id::NodeId};
use utils::{
connstring::connection_address,
http::error::HttpErrorBody,
id::{NodeId, TenantId, TimelineId},
};
use crate::local_env::{LocalEnv, SafekeeperConf};
use crate::storage::PageServerNode;
@@ -276,4 +281,24 @@ impl SafekeeperNode {
.error_from_body()?;
Ok(())
}
pub fn timeline_create(
&self,
tenant_id: TenantId,
timeline_id: TimelineId,
peer_ids: Vec<NodeId>,
) -> Result<()> {
Ok(self
.http_request(
Method::POST,
format!("{}/tenant/{}/timeline", self.http_base_url, tenant_id),
)
.json(&TimelineCreateRequest {
timeline_id,
peer_ids,
})
.send()?
.error_from_body()?
.json()?)
}
}

View File

@@ -80,6 +80,4 @@
- [015-storage-messaging](rfcs/015-storage-messaging.md)
- [016-connection-routing](rfcs/016-connection-routing.md)
- [017-timeline-data-management](rfcs/017-timeline-data-management.md)
- [018-storage-messaging-2](rfcs/018-storage-messaging-2.md)
- [019-tenant-timeline-lifecycles](rfcs/019-tenant-timeline-lifecycles.md)
- [cluster-size-limits](rfcs/cluster-size-limits.md)

View File

@@ -1,91 +0,0 @@
# Managing Tenant and Timeline lifecycles
## Summary
The pageserver has a Tenant object in memory for each tenant it manages, and a
Timeline for each timeline. There are a lot of tasks that operate on the tenants
and timelines with references to those objects. We have some mechanisms to track
which tasks are operating on each Tenant and Timeline, and to request them to
shutdown when a tenant or timeline is deleted, but it does not cover all uses,
and as a result we have many race conditions around tenant/timeline shutdown.
## Motivation
We have a bunch of race conditions that can produce weird errors and can be hard
to track down.
## Non Goals
This RFC only covers the problem of ensuring that a task/thread isn't operating
on a Tenant or Timeline. It does not cover what states, aside from Active and
non-Active, each Tenant and Timeline should have, or when exactly the transitions
should happen.
## Impacted components (e.g. pageserver, safekeeper, console, etc)
Pageserver. Although I wonder if the safekeeper should have a similar mechanism.
## Current situation
Most pageserver tasks of are managed by task_mgr.rs:
- LibpqEndpointListener
- HttpEndPointListener
- WalReceiverManager and -Connection
- GarbageCollector and Compaction
- InitialLogicalSizeCalculation
In addition to those tasks, the walreceiver performs some direct tokio::spawn
calls to spawn tasks that are not registered with 'task_mgr'. And all of these
tasks can spawn extra operations with tokio spawn_blocking.
Whenever a tenant or timeline is removed from the system, by pageserver
shutdown, delete_timeline or tenant-detach operation, we rely on the task
registry in 'task_mgr.rs' to wait until there are no tasks operating on the
tenant or timeline, before its Tenant/Timeline object is removed. That relies on
each task to register itself with the tenant/timeline ID in
'task_mgr.rs'. However, there are many gaps in that. For example,
GarbageCollection and Compaction tasks are registered with the tenant, but when
they proceed to operate on a particular timeline of the tenant, they don't
register with timeline ID. Because of that, the timeline can be deleted while GC
or compaction is running on it, causing failures in the GC or compaction (see
https://github.com/neondatabase/neon/issues/2442).
Another problem is that the task registry only works for tokio Tasks. There is
no way to register a piece of code that runs inside spawn_blocking(), for
example.
## Proposed implementation
This "voluntary" registration of tasks is fragile. Let's use Rust language features
to enforce that a tenant/timeline cannot be removed from the system when there is
still some code operating on it.
Let's introduce new Guard objects for Tenant and Timeline, and do all actions through
the Guard object. Something like:
TenantActiveGuard: Guard object over Arc<Tenant>. When you acquire the guard,
the code checks that the tenant is in Active state. If it's not, you get an
error. You can change the state of the tenant to Stopping while there are
ActiveTenantGuard objects still on it, to prevent new ActiveTenantGuards from
being acquired, but the Tenant cannot be removed until all the guards are gone.
TenantMaintenanceGuard: Like ActiveTenantGuard, but can be held even when the
tenant is not in Active state. Used for operations like attach/detach. Perhaps
allow only one such guard on a Tenant at a time.
Similarly for Timelines. We don't currentl have a "state" on Timeline, but I think
we need at least two states: Active and Stopping. The Stopping state is used at
deletion, to prevent new TimelineActiveGuards from appearing, while you wait for
existing TimelineActiveGuards to die out.
The shutdown-signaling, using shutdown_watcher() and is_shutdown_requested(),
probably also needs changes to deal with the new Guards. The rule is that if you
have a TenantActiveGuard, and the tenant's state changes from Active to
Stopping, the is_shutdown_requested() function should return true, and
shutdown_watcher() future should return.
This signaling doesn't neessarily need to cover all cases. For example, if you
have a block of code in spawn_blocking(), it might be acceptable if
is_shutdown_requested() doesn't return true even though the tenant is in
Stopping state, as long as the code finishes reasonably fast.

View File

@@ -8,7 +8,7 @@
regex = "1.4.5"
serde = { version = "1.0", features = ["derive"] }
serde_json = "1"
serde_with = "2.0"
serde_with = "1.12.0"
once_cell = "1.13.0"
utils = { path = "../utils" }

View File

@@ -5,7 +5,7 @@ edition = "2021"
[dependencies]
serde = { version = "1.0", features = ["derive"] }
serde_with = "2.0"
serde_with = "1.12.0"
const_format = "0.2.21"
utils = { path = "../utils" }

View File

@@ -123,15 +123,9 @@ pub struct TenantInfo {
pub has_in_progress_downloads: Option<bool>,
}
/// This represents the output of the "timeline_detail" and "timeline_list" API calls.
#[serde_as]
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct TimelineInfo {
#[serde_as(as = "DisplayFromStr")]
pub tenant_id: TenantId,
#[serde_as(as = "DisplayFromStr")]
pub timeline_id: TimelineId,
pub struct LocalTimelineInfo {
#[serde_as(as = "Option<DisplayFromStr>")]
pub ancestor_timeline_id: Option<TimelineId>,
#[serde_as(as = "Option<DisplayFromStr>")]
@@ -155,33 +149,28 @@ pub struct TimelineInfo {
/// the timestamp (in microseconds) of the last received message
pub last_received_msg_ts: Option<u128>,
pub pg_version: u32,
#[serde_as(as = "Option<DisplayFromStr>")]
pub remote_consistent_lsn: Option<Lsn>,
pub awaits_download: bool,
// Some of the above fields are duplicated in 'local' and 'remote', for backwards-
// compatility with older clients.
pub local: LocalTimelineInfo,
pub remote: RemoteTimelineInfo,
}
#[serde_as]
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct LocalTimelineInfo {
#[serde_as(as = "Option<DisplayFromStr>")]
pub ancestor_timeline_id: Option<TimelineId>,
#[serde_as(as = "Option<DisplayFromStr>")]
pub ancestor_lsn: Option<Lsn>,
pub current_logical_size: Option<u64>, // is None when timeline is Unloaded
pub current_physical_size: Option<u64>, // is None when timeline is Unloaded
}
#[serde_as]
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct RemoteTimelineInfo {
#[serde_as(as = "Option<DisplayFromStr>")]
pub remote_consistent_lsn: Option<Lsn>,
#[serde_as(as = "DisplayFromStr")]
pub remote_consistent_lsn: Lsn,
pub awaits_download: bool,
}
///
/// This represents the output of the "timeline_detail" API call.
///
#[serde_as]
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct TimelineInfo {
#[serde_as(as = "DisplayFromStr")]
pub tenant_id: TenantId,
#[serde_as(as = "DisplayFromStr")]
pub timeline_id: TimelineId,
pub local: Option<LocalTimelineInfo>,
pub remote: Option<RemoteTimelineInfo>,
}
pub type ConfigureFailpointsRequest = Vec<FailpointConfig>;
@@ -201,13 +190,3 @@ pub struct FailpointConfig {
pub struct TimelineGcRequest {
pub gc_horizon: Option<u64>,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct TenantSetBackgroundActivityRequest {
pub run_backround_jobs: bool,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct TenantSetBackgroundActivityResponse {
pub msg: String,
}

View File

@@ -13,7 +13,7 @@ crc32c = "0.6.0"
hex = "0.4.3"
once_cell = "1.13.0"
log = "0.4.14"
memoffset = "0.7"
memoffset = "0.6.2"
thiserror = "1.0"
serde = { version = "1.0", features = ["derive"] }
utils = { path = "../utils" }
@@ -26,4 +26,4 @@ wal_craft = { path = "wal_craft" }
[build-dependencies]
anyhow = "1.0"
bindgen = "0.61"
bindgen = "0.60.1"

View File

@@ -7,7 +7,7 @@ edition = "2021"
[dependencies]
anyhow = "1.0"
clap = "4.0"
clap = "3.0"
env_logger = "0.9"
log = "0.4"
once_cell = "1.13.0"

View File

@@ -1,19 +1,68 @@
use anyhow::*;
use clap::{value_parser, Arg, ArgMatches, Command};
use std::{path::PathBuf, str::FromStr};
use clap::{App, Arg, ArgMatches};
use std::str::FromStr;
use wal_craft::*;
fn main() -> Result<()> {
env_logger::Builder::from_env(env_logger::Env::default().default_filter_or("wal_craft=info"))
.init();
let arg_matches = cli().get_matches();
let type_arg = &Arg::new("type")
.takes_value(true)
.help("Type of WAL to craft")
.possible_values([
Simple::NAME,
LastWalRecordXlogSwitch::NAME,
LastWalRecordXlogSwitchEndsOnPageBoundary::NAME,
WalRecordCrossingSegmentFollowedBySmallOne::NAME,
LastWalRecordCrossingSegment::NAME,
])
.required(true);
let arg_matches = App::new("Postgres WAL crafter")
.about("Crafts Postgres databases with specific WAL properties")
.subcommand(
App::new("print-postgres-config")
.about("Print the configuration required for PostgreSQL server before running this script")
)
.subcommand(
App::new("with-initdb")
.about("Craft WAL in a new data directory first initialized with initdb")
.arg(type_arg)
.arg(
Arg::new("datadir")
.takes_value(true)
.help("Data directory for the Postgres server")
.required(true)
)
.arg(
Arg::new("pg-distrib-dir")
.long("pg-distrib-dir")
.takes_value(true)
.help("Directory with Postgres distributions (bin and lib directories, e.g. pg_install containing subpath `v14/bin/postgresql`)")
.default_value("/usr/local")
)
.arg(
Arg::new("pg-version")
.long("pg-version")
.help("Postgres version to use for the initial tenant")
.required(true)
.takes_value(true)
)
)
.subcommand(
App::new("in-existing")
.about("Craft WAL at an existing recently created Postgres database. Note that server may append new WAL entries on shutdown.")
.arg(type_arg)
.arg(
Arg::new("connection")
.takes_value(true)
.help("Connection string to the Postgres database to populate")
.required(true)
)
)
.get_matches();
let wal_craft = |arg_matches: &ArgMatches, client| {
let (intermediate_lsns, end_of_wal_lsn) = match arg_matches
.get_one::<String>("type")
.map(|s| s.as_str())
.context("'type' is required")?
{
let (intermediate_lsns, end_of_wal_lsn) = match arg_matches.value_of("type").unwrap() {
Simple::NAME => Simple::craft(client)?,
LastWalRecordXlogSwitch::NAME => LastWalRecordXlogSwitch::craft(client)?,
LastWalRecordXlogSwitchEndsOnPageBoundary::NAME => {
@@ -23,12 +72,12 @@ fn main() -> Result<()> {
WalRecordCrossingSegmentFollowedBySmallOne::craft(client)?
}
LastWalRecordCrossingSegment::NAME => LastWalRecordCrossingSegment::craft(client)?,
a => panic!("Unknown --type argument: {a}"),
a => panic!("Unknown --type argument: {}", a),
};
for lsn in intermediate_lsns {
println!("intermediate_lsn = {lsn}");
println!("intermediate_lsn = {}", lsn);
}
println!("end_of_wal = {end_of_wal_lsn}");
println!("end_of_wal = {}", end_of_wal_lsn);
Ok(())
};
@@ -36,24 +85,20 @@ fn main() -> Result<()> {
None => panic!("No subcommand provided"),
Some(("print-postgres-config", _)) => {
for cfg in REQUIRED_POSTGRES_CONFIG.iter() {
println!("{cfg}");
println!("{}", cfg);
}
Ok(())
}
Some(("with-initdb", arg_matches)) => {
let cfg = Conf {
pg_version: *arg_matches
.get_one::<u32>("pg-version")
.context("'pg-version' is required")?,
pg_distrib_dir: arg_matches
.get_one::<PathBuf>("pg-distrib-dir")
.context("'pg-distrib-dir' is required")?
.to_owned(),
datadir: arg_matches
.get_one::<PathBuf>("datadir")
.context("'datadir' is required")?
.to_owned(),
pg_version: arg_matches
.value_of("pg-version")
.unwrap()
.parse::<u32>()
.context("Failed to parse postgres version from the argument string")?,
pg_distrib_dir: arg_matches.value_of("pg-distrib-dir").unwrap().into(),
datadir: arg_matches.value_of("datadir").unwrap().into(),
};
cfg.initdb()?;
let srv = cfg.start_server()?;
@@ -63,77 +108,9 @@ fn main() -> Result<()> {
}
Some(("in-existing", arg_matches)) => wal_craft(
arg_matches,
&mut postgres::Config::from_str(
arg_matches
.get_one::<String>("connection")
.context("'connection' is required")?,
)
.context(
"'connection' argument value could not be parsed as a postgres connection string",
)?
.connect(postgres::NoTls)?,
&mut postgres::Config::from_str(arg_matches.value_of("connection").unwrap())?
.connect(postgres::NoTls)?,
),
Some(_) => panic!("Unknown subcommand"),
}
}
fn cli() -> Command {
let type_arg = &Arg::new("type")
.help("Type of WAL to craft")
.value_parser([
Simple::NAME,
LastWalRecordXlogSwitch::NAME,
LastWalRecordXlogSwitchEndsOnPageBoundary::NAME,
WalRecordCrossingSegmentFollowedBySmallOne::NAME,
LastWalRecordCrossingSegment::NAME,
])
.required(true);
Command::new("Postgres WAL crafter")
.about("Crafts Postgres databases with specific WAL properties")
.subcommand(
Command::new("print-postgres-config")
.about("Print the configuration required for PostgreSQL server before running this script")
)
.subcommand(
Command::new("with-initdb")
.about("Craft WAL in a new data directory first initialized with initdb")
.arg(type_arg)
.arg(
Arg::new("datadir")
.help("Data directory for the Postgres server")
.value_parser(value_parser!(PathBuf))
.required(true)
)
.arg(
Arg::new("pg-distrib-dir")
.long("pg-distrib-dir")
.value_parser(value_parser!(PathBuf))
.help("Directory with Postgres distributions (bin and lib directories, e.g. pg_install containing subpath `v14/bin/postgresql`)")
.default_value("/usr/local")
)
.arg(
Arg::new("pg-version")
.long("pg-version")
.help("Postgres version to use for the initial tenant")
.value_parser(value_parser!(u32))
.required(true)
)
)
.subcommand(
Command::new("in-existing")
.about("Craft WAL at an existing recently created Postgres database. Note that server may append new WAL entries on shutdown.")
.arg(type_arg)
.arg(
Arg::new("connection")
.help("Connection string to the Postgres database to populate")
.required(true)
)
)
}
#[test]
fn verify_cli() {
cli().debug_assert();
}

View File

@@ -15,7 +15,7 @@ serde = { version = "1.0", features = ["derive"] }
serde_json = "1"
tokio = { version = "1.17", features = ["sync", "macros", "fs", "io-util"] }
tokio-util = { version = "0.7", features = ["io"] }
toml_edit = { version = "0.14", features = ["easy"] }
toml_edit = { version = "0.13", features = ["easy"] }
tracing = "0.1.27"
workspace_hack = { version = "0.1", path = "../../workspace_hack" }

View File

@@ -16,7 +16,7 @@ use tokio::{
io::{self, AsyncReadExt, AsyncSeekExt, AsyncWriteExt},
};
use tracing::*;
use utils::crashsafe::path_with_suffix_extension;
use utils::crashsafe_dir::path_with_suffix_extension;
use crate::{Download, DownloadError, RemoteObjectId};

View File

@@ -5,7 +5,7 @@ edition = "2021"
[dependencies]
serde = { version = "1.0", features = ["derive"] }
serde_with = "2.0"
serde_with = "1.12.0"
const_format = "0.2.21"
utils = { path = "../utils" }

View File

@@ -1,24 +1,8 @@
use serde::{Deserialize, Serialize};
use serde_with::{serde_as, DisplayFromStr};
use utils::id::{NodeId, TimelineId};
use utils::{
id::{NodeId, TenantId, TimelineId},
lsn::Lsn,
};
#[serde_as]
#[derive(Serialize, Deserialize)]
pub struct TimelineCreateRequest {
#[serde_as(as = "DisplayFromStr")]
pub tenant_id: TenantId,
#[serde_as(as = "DisplayFromStr")]
pub timeline_id: TimelineId,
pub peer_ids: Option<Vec<NodeId>>,
pub pg_version: u32,
pub system_id: Option<u64>,
pub wal_seg_size: Option<u32>,
#[serde_as(as = "DisplayFromStr")]
pub commit_lsn: Lsn,
// If not passed, it is assigned to the beginning of commit_lsn segment.
pub local_start_lsn: Option<Lsn>,
pub peer_ids: Vec<NodeId>,
}

View File

@@ -20,7 +20,7 @@ tokio = { version = "1.17", features = ["macros"]}
tokio-rustls = "0.23"
tracing = "0.1"
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
nix = "0.25"
nix = "0.23.0"
signal-hook = "0.3.10"
rand = "0.8.3"
jsonwebtoken = "8"
@@ -28,7 +28,7 @@ hex = { version = "0.4.3", features = ["serde"] }
rustls = "0.20.2"
rustls-split = "0.3.0"
git-version = "0.3.5"
serde_with = "2.0"
serde_with = "1.12.0"
once_cell = "1.13.0"
@@ -40,7 +40,7 @@ byteorder = "1.4.3"
bytes = "1.0.1"
hex-literal = "0.3"
tempfile = "3.2"
criterion = "0.4"
criterion = "0.3"
rustls-pemfile = "1"
[[bench]]

View File

@@ -12,8 +12,16 @@ pub fn create_dir(path: impl AsRef<Path>) -> io::Result<()> {
let path = path.as_ref();
fs::create_dir(path)?;
fsync_file_and_parent(path)?;
Ok(())
File::open(path)?.sync_all()?;
if let Some(parent) = path.parent() {
File::open(parent)?.sync_all()
} else {
Err(io::Error::new(
io::ErrorKind::InvalidInput,
"can't find parent",
))
}
}
/// Similar to [`std::fs::create_dir_all`], except we fsync all
@@ -57,12 +65,12 @@ pub fn create_dir_all(path: impl AsRef<Path>) -> io::Result<()> {
// Fsync the created directories from child to parent.
for &path in dirs_to_create.iter() {
fsync(path)?;
File::open(path)?.sync_all()?;
}
// If we created any new directories, fsync the parent.
if !dirs_to_create.is_empty() {
fsync(path)?;
File::open(path)?.sync_all()?;
}
Ok(())
@@ -84,33 +92,6 @@ pub fn path_with_suffix_extension(original_path: impl AsRef<Path>, suffix: &str)
.with_extension(new_extension.as_ref())
}
pub fn fsync_file_and_parent(file_path: &Path) -> io::Result<()> {
let parent = file_path.parent().ok_or_else(|| {
io::Error::new(
io::ErrorKind::Other,
format!("File {file_path:?} has no parent"),
)
})?;
fsync(file_path)?;
fsync(parent)?;
Ok(())
}
pub fn fsync(path: &Path) -> io::Result<()> {
File::open(path)
.map_err(|e| io::Error::new(e.kind(), format!("Failed to open the file {path:?}: {e}")))
.and_then(|file| {
file.sync_all().map_err(|e| {
io::Error::new(
e.kind(),
format!("Failed to sync file {path:?} data and metadata: {e}"),
)
})
})
.map_err(|e| io::Error::new(e.kind(), format!("Failed to fsync file {path:?}: {e}")))
}
#[cfg(test)]
mod tests {
use tempfile::tempdir;

View File

@@ -22,8 +22,8 @@ pub mod pq_proto;
// dealing with connstring parsing and handy access to it's parts
pub mod connstring;
// helper functions for creating and fsyncing
pub mod crashsafe;
// helper functions for creating and fsyncing directories/trees
pub mod crashsafe_dir;
// common authentication routines
pub mod auth;

View File

@@ -66,11 +66,6 @@ impl Lsn {
(self.0 % seg_sz as u64) as usize
}
/// Compute LSN of the segment start.
pub fn segment_lsn(self, seg_sz: usize) -> Lsn {
Lsn(self.0 - (self.0 % seg_sz as u64))
}
/// Compute the segment number
pub fn segment_number(self, seg_sz: usize) -> u64 {
self.0 / seg_sz as u64

View File

@@ -15,7 +15,7 @@ use std::sync::Arc;
use std::task::Poll;
use tracing::{debug, error, trace};
use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt, BufReader};
use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt};
use tokio_rustls::TlsAcceptor;
#[async_trait::async_trait]
@@ -66,8 +66,8 @@ pub enum ProcessMsgResult {
/// Always-writeable sock_split stream.
/// May not be readable. See [`PostgresBackend::take_stream_in`]
pub enum Stream {
Unencrypted(BufReader<tokio::net::TcpStream>),
Tls(Box<tokio_rustls::server::TlsStream<BufReader<tokio::net::TcpStream>>>),
Unencrypted(tokio::net::TcpStream),
Tls(Box<tokio_rustls::server::TlsStream<tokio::net::TcpStream>>),
Broken,
}
@@ -157,7 +157,7 @@ impl PostgresBackend {
let peer_addr = socket.peer_addr()?;
Ok(Self {
stream: Stream::Unencrypted(BufReader::new(socket)),
stream: Stream::Unencrypted(socket),
buf_out: BytesMut::with_capacity(10 * 1024),
state: ProtoState::Initialization,
md5_salt: [0u8; 4],

View File

@@ -23,7 +23,7 @@ futures = "0.3.13"
hex = "0.4.3"
hyper = "0.14"
itertools = "0.10.3"
clap = { version = "4.0", features = ["string"] }
clap = "3.0"
daemonize = "0.4.1"
tokio = { version = "1.17", features = ["process", "sync", "macros", "fs", "rt", "io-util", "time"] }
tokio-util = { version = "0.7.3", features = ["io", "io-util"] }
@@ -38,25 +38,25 @@ tar = "0.4.33"
humantime = "2.1.0"
serde = { version = "1.0", features = ["derive"] }
serde_json = "1"
serde_with = "2.0"
serde_with = "1.12.0"
humantime-serde = "1.1.1"
pprof = { git = "https://github.com/neondatabase/pprof-rs.git", branch = "wallclock-profiling", features = ["flamegraph"], optional = true }
toml_edit = { version = "0.14", features = ["easy"] }
toml_edit = { version = "0.13", features = ["easy"] }
scopeguard = "1.1.0"
const_format = "0.2.21"
tracing = "0.1.36"
signal-hook = "0.3.10"
url = "2"
nix = "0.25"
nix = "0.23"
once_cell = "1.13.0"
crossbeam-utils = "0.8.5"
fail = "0.5.0"
git-version = "0.3.5"
rstar = "0.9.3"
num-traits = "0.2.15"
amplify_num = { git = "https://github.com/hlinnaka/rust-amplify.git", branch = "unsigned-int-perf" }
amplify_num = "0.4.1"
pageserver_api = { path = "../libs/pageserver_api" }
postgres_ffi = { path = "../libs/postgres_ffi" }
@@ -69,10 +69,5 @@ close_fds = "0.3.2"
walkdir = "2.3.2"
[dev-dependencies]
criterion = "0.4"
hex-literal = "0.3"
tempfile = "3.2"
[[bench]]
name = "bench_layer_map"
harness = false

File diff suppressed because it is too large Load Diff

View File

@@ -6,7 +6,7 @@ use tracing::*;
use anyhow::{anyhow, bail, Context, Result};
use clap::{Arg, ArgAction, Command};
use clap::{App, Arg};
use daemonize::Daemonize;
use fail::FailScenario;
@@ -51,17 +51,57 @@ fn version() -> String {
}
fn main() -> anyhow::Result<()> {
let arg_matches = cli().get_matches();
let arg_matches = App::new("Neon page server")
.about("Materializes WAL stream to pages and serves them to the postgres")
.version(&*version())
.arg(
if arg_matches.get_flag("enabled-features") {
Arg::new("daemonize")
.short('d')
.long("daemonize")
.takes_value(false)
.help("Run in the background"),
)
.arg(
Arg::new("init")
.long("init")
.takes_value(false)
.help("Initialize pageserver with all given config overrides"),
)
.arg(
Arg::new("workdir")
.short('D')
.long("workdir")
.takes_value(true)
.help("Working directory for the pageserver"),
)
// See `settings.md` for more details on the extra configuration patameters pageserver can process
.arg(
Arg::new("config-override")
.short('c')
.takes_value(true)
.number_of_values(1)
.multiple_occurrences(true)
.help("Additional configuration overrides of the ones from the toml config file (or new ones to add there).
Any option has to be a valid toml document, example: `-c=\"foo='hey'\"` `-c=\"foo={value=1}\"`"),
)
.arg(Arg::new("update-config").long("update-config").takes_value(false).help(
"Update the config file when started",
))
.arg(
Arg::new("enabled-features")
.long("enabled-features")
.takes_value(false)
.help("Show enabled compile time features"),
)
.get_matches();
if arg_matches.is_present("enabled-features") {
println!("{{\"features\": {FEATURES:?} }}");
return Ok(());
}
let workdir = arg_matches
.get_one::<String>("workdir")
.map(Path::new)
.unwrap_or_else(|| Path::new(".neon"));
let workdir = Path::new(arg_matches.value_of("workdir").unwrap_or(".neon"));
let workdir = workdir
.canonicalize()
.with_context(|| format!("Error opening workdir '{}'", workdir.display()))?;
@@ -75,7 +115,7 @@ fn main() -> anyhow::Result<()> {
)
})?;
let daemonize = arg_matches.get_flag("daemonize");
let daemonize = arg_matches.is_present("daemonize");
let conf = match initialize_config(&cfg_file_path, arg_matches, &workdir)? {
ControlFlow::Continue(conf) => conf,
@@ -87,7 +127,7 @@ fn main() -> anyhow::Result<()> {
let tenants_path = conf.tenants_path();
if !tenants_path.exists() {
utils::crashsafe::create_dir_all(conf.tenants_path()).with_context(|| {
utils::crashsafe_dir::create_dir_all(conf.tenants_path()).with_context(|| {
format!(
"Failed to create tenants root dir at '{}'",
tenants_path.display()
@@ -113,8 +153,8 @@ fn initialize_config(
arg_matches: clap::ArgMatches,
workdir: &Path,
) -> anyhow::Result<ControlFlow<(), &'static PageServerConf>> {
let init = arg_matches.get_flag("init");
let update_config = init || arg_matches.get_flag("update-config");
let init = arg_matches.is_present("init");
let update_config = init || arg_matches.is_present("update-config");
let (mut toml, config_file_exists) = if cfg_file_path.is_file() {
if init {
@@ -156,10 +196,13 @@ fn initialize_config(
)
};
if let Some(values) = arg_matches.get_many::<String>("config-override") {
if let Some(values) = arg_matches.values_of("config-override") {
for option_line in values {
let doc = toml_edit::Document::from_str(option_line).with_context(|| {
format!("Option '{option_line}' could not be parsed as a toml document")
format!(
"Option '{}' could not be parsed as a toml document",
option_line
)
})?;
for (key, item) in doc.iter() {
@@ -201,7 +244,7 @@ fn start_pageserver(conf: &'static PageServerConf, daemonize: bool) -> Result<()
// Initialize logger
let log_file = logging::init(LOG_FILE_NAME, daemonize)?;
info!("version: {}", version());
info!("version: {GIT_VERSION}");
// TODO: Check that it looks like a valid repository before going further
@@ -342,55 +385,3 @@ fn start_pageserver(conf: &'static PageServerConf, daemonize: bool) -> Result<()
}
})
}
fn cli() -> Command {
Command::new("Neon page server")
.about("Materializes WAL stream to pages and serves them to the postgres")
.version(version())
.arg(
Arg::new("daemonize")
.short('d')
.long("daemonize")
.action(ArgAction::SetTrue)
.help("Run in the background"),
)
.arg(
Arg::new("init")
.long("init")
.action(ArgAction::SetTrue)
.help("Initialize pageserver with all given config overrides"),
)
.arg(
Arg::new("workdir")
.short('D')
.long("workdir")
.help("Working directory for the pageserver"),
)
// See `settings.md` for more details on the extra configuration patameters pageserver can process
.arg(
Arg::new("config-override")
.short('c')
.num_args(1)
.action(ArgAction::Append)
.help("Additional configuration overrides of the ones from the toml config file (or new ones to add there). \
Any option has to be a valid toml document, example: `-c=\"foo='hey'\"` `-c=\"foo={value=1}\"`"),
)
.arg(
Arg::new("update-config")
.long("update-config")
.action(ArgAction::SetTrue)
.help("Update the config file when started"),
)
.arg(
Arg::new("enabled-features")
.long("enabled-features")
.action(ArgAction::SetTrue)
.help("Show enabled compile time features"),
)
}
#[test]
fn verify_cli() {
cli().debug_assert();
}

View File

@@ -9,7 +9,7 @@ use std::{
};
use anyhow::Context;
use clap::{value_parser, Arg, Command};
use clap::{App, Arg};
use pageserver::{
page_cache,
@@ -24,14 +24,40 @@ project_git_version!(GIT_VERSION);
const METADATA_SUBCOMMAND: &str = "metadata";
fn main() -> anyhow::Result<()> {
let arg_matches = cli().get_matches();
let arg_matches = App::new("Neon Pageserver binutils")
.about("Reads pageserver (and related) binary files management utility")
.version(GIT_VERSION)
.arg(Arg::new("path").help("Input file path").required(false))
.subcommand(
App::new(METADATA_SUBCOMMAND)
.about("Read and update pageserver metadata file")
.arg(
Arg::new("metadata_path")
.help("Input metadata file path")
.required(false),
)
.arg(
Arg::new("disk_consistent_lsn")
.long("disk_consistent_lsn")
.takes_value(true)
.help("Replace disk consistent Lsn"),
)
.arg(
Arg::new("prev_record_lsn")
.long("prev_record_lsn")
.takes_value(true)
.help("Replace previous record Lsn"),
),
)
.get_matches();
match arg_matches.subcommand() {
Some((subcommand_name, subcommand_matches)) => {
let path = subcommand_matches
.get_one::<PathBuf>("metadata_path")
.context("'metadata_path' argument is missing")?
.to_path_buf();
let path = PathBuf::from(
subcommand_matches
.value_of("metadata_path")
.context("'metadata_path' argument is missing")?,
);
anyhow::ensure!(
subcommand_name == METADATA_SUBCOMMAND,
"Unknown subcommand {subcommand_name}"
@@ -39,10 +65,11 @@ fn main() -> anyhow::Result<()> {
handle_metadata(&path, subcommand_matches)?;
}
None => {
let path = arg_matches
.get_one::<PathBuf>("path")
.context("'path' argument is missing")?
.to_path_buf();
let path = PathBuf::from(
arg_matches
.value_of("path")
.context("'path' argument is missing")?,
);
println!(
"No subcommand specified, attempting to guess the format for file {}",
path.display()
@@ -83,7 +110,7 @@ fn handle_metadata(path: &Path, arg_matches: &clap::ArgMatches) -> Result<(), an
let mut meta = TimelineMetadata::from_bytes(&metadata_bytes)?;
println!("Current metadata:\n{meta:?}");
let mut update_meta = false;
if let Some(disk_consistent_lsn) = arg_matches.get_one::<String>("disk_consistent_lsn") {
if let Some(disk_consistent_lsn) = arg_matches.value_of("disk_consistent_lsn") {
meta = TimelineMetadata::new(
Lsn::from_str(disk_consistent_lsn)?,
meta.prev_record_lsn(),
@@ -95,7 +122,7 @@ fn handle_metadata(path: &Path, arg_matches: &clap::ArgMatches) -> Result<(), an
);
update_meta = true;
}
if let Some(prev_record_lsn) = arg_matches.get_one::<String>("prev_record_lsn") {
if let Some(prev_record_lsn) = arg_matches.value_of("prev_record_lsn") {
meta = TimelineMetadata::new(
meta.disk_consistent_lsn(),
Some(Lsn::from_str(prev_record_lsn)?),
@@ -115,40 +142,3 @@ fn handle_metadata(path: &Path, arg_matches: &clap::ArgMatches) -> Result<(), an
Ok(())
}
fn cli() -> Command {
Command::new("Neon Pageserver binutils")
.about("Reads pageserver (and related) binary files management utility")
.version(GIT_VERSION)
.arg(
Arg::new("path")
.help("Input file path")
.value_parser(value_parser!(PathBuf))
.required(false),
)
.subcommand(
Command::new(METADATA_SUBCOMMAND)
.about("Read and update pageserver metadata file")
.arg(
Arg::new("metadata_path")
.help("Input metadata file path")
.value_parser(value_parser!(PathBuf))
.required(false),
)
.arg(
Arg::new("disk_consistent_lsn")
.long("disk_consistent_lsn")
.help("Replace disk consistent Lsn"),
)
.arg(
Arg::new("prev_record_lsn")
.long("prev_record_lsn")
.help("Replace previous record Lsn"),
),
)
}
#[test]
fn verify_cli() {
cli().debug_assert();
}

View File

@@ -7,7 +7,6 @@
use anyhow::{anyhow, bail, ensure, Context, Result};
use remote_storage::RemoteStorageConfig;
use std::env;
use utils::crashsafe::path_with_suffix_extension;
use std::path::{Path, PathBuf};
use std::str::FromStr;
@@ -25,7 +24,6 @@ use crate::tenant_config::{TenantConf, TenantConfOpt};
/// The name of the metadata file pageserver creates per timeline.
pub const METADATA_FILE_NAME: &str = "metadata";
pub const TIMELINE_UNINIT_MARK_SUFFIX: &str = "___uninit";
const TENANT_CONFIG_NAME: &str = "config";
pub mod defaults {
@@ -366,17 +364,6 @@ impl PageServerConf {
self.timelines_path(tenant_id).join(timeline_id.to_string())
}
pub fn timeline_uninit_mark_file_path(
&self,
tenant_id: TenantId,
timeline_id: TimelineId,
) -> PathBuf {
path_with_suffix_extension(
self.timeline_path(&timeline_id, &tenant_id),
TIMELINE_UNINIT_MARK_SUFFIX,
)
}
/// Points to a place in pageserver's local directory,
/// where certain timeline's metadata file should be located.
pub fn metadata_path(&self, timeline_id: TimelineId, tenant_id: TenantId) -> PathBuf {

View File

@@ -1,11 +1,7 @@
openapi: "3.0.2"
info:
title: Page Server API
description: Neon Pageserver API
version: "1.0"
license:
name: "Apache"
url: https://github.com/neondatabase/neon/blob/main/LICENSE
servers:
- url: ""
paths:
@@ -211,6 +207,7 @@ paths:
schema:
$ref: "#/components/schemas/Error"
/v1/tenant/{tenant_id}/timeline/{timeline_id}/get_lsn_by_timestamp:
parameters:
- name: tenant_id
@@ -615,9 +612,6 @@ components:
required:
- timeline_id
- tenant_id
- last_record_lsn
- disk_consistent_lsn
- awaits_download
properties:
timeline_id:
type: string
@@ -625,15 +619,33 @@ components:
tenant_id:
type: string
format: hex
local:
$ref: "#/components/schemas/LocalTimelineInfo"
remote:
$ref: "#/components/schemas/RemoteTimelineInfo"
RemoteTimelineInfo:
type: object
required:
- awaits_download
- remote_consistent_lsn
properties:
awaits_download:
type: boolean
remote_consistent_lsn:
type: string
format: hex
LocalTimelineInfo:
type: object
required:
- last_record_lsn
- disk_consistent_lsn
properties:
last_record_lsn:
type: string
format: hex
disk_consistent_lsn:
type: string
format: hex
remote_consistent_lsn:
type: string
format: hex
ancestor_timeline_id:
type: string
format: hex
@@ -658,39 +670,7 @@ components:
format: hex
last_received_msg_ts:
type: integer
awaits_download:
type: boolean
# These 'local' and 'remote' fields just duplicate some of the fields
# above. They are kept for backwards-compatibility. They can be removed,
# when the control plane has been updated to look at the above fields
# directly.
local:
$ref: "#/components/schemas/LocalTimelineInfo"
remote:
$ref: "#/components/schemas/RemoteTimelineInfo"
LocalTimelineInfo:
type: object
properties:
ancestor_timeline_id:
type: string
format: hex
ancestor_lsn:
type: string
format: hex
current_logical_size:
type: integer
current_physical_size:
type: integer
RemoteTimelineInfo:
type: object
required:
- remote_consistent_lsn
properties:
remote_consistent_lsn:
type: string
format: hex
Error:
type: object
required:

View File

@@ -3,9 +3,6 @@ use std::sync::Arc;
use anyhow::{anyhow, Context, Result};
use hyper::StatusCode;
use hyper::{Body, Request, Response, Uri};
use pageserver_api::models::{
TenantSetBackgroundActivityRequest, TenantSetBackgroundActivityResponse,
};
use remote_storage::GenericRemoteStorage;
use tokio::task::JoinError;
use tracing::*;
@@ -16,12 +13,11 @@ use super::models::{
TimelineCreateRequest,
};
use crate::pgdatadir_mapping::LsnForTimestamp;
use crate::storage_sync;
use crate::storage_sync::index::{RemoteIndex, RemoteTimeline};
use crate::task_mgr::TaskKind;
use crate::tenant::{TenantState, Timeline};
use crate::tenant_config::TenantConfOpt;
use crate::{config::PageServerConf, tenant_mgr};
use crate::{storage_sync, task_mgr};
use utils::{
auth::JwtAuth,
http::{
@@ -83,13 +79,13 @@ fn get_config(request: &Request<Body>) -> &'static PageServerConf {
get_state(request).conf
}
// Helper function to construct a TimelineInfo struct for a timeline
async fn build_timeline_info(
state: &State,
// Helper functions to construct a LocalTimelineInfo struct for a timeline
fn local_timeline_info_from_timeline(
timeline: &Arc<Timeline>,
include_non_incremental_logical_size: bool,
include_non_incremental_physical_size: bool,
) -> anyhow::Result<TimelineInfo> {
) -> anyhow::Result<LocalTimelineInfo> {
let last_record_lsn = timeline.get_last_record_lsn();
let (wal_source_connstr, last_received_msg_lsn, last_received_msg_ts) = {
let guard = timeline.last_received_wal.lock().unwrap();
@@ -104,47 +100,24 @@ async fn build_timeline_info(
}
};
let (remote_consistent_lsn, awaits_download) = if let Some(remote_entry) = state
.remote_index
.read()
.await
.timeline_entry(&TenantTimelineId {
tenant_id: timeline.tenant_id,
timeline_id: timeline.timeline_id,
}) {
(
Some(remote_entry.metadata.disk_consistent_lsn()),
remote_entry.awaits_download,
)
} else {
(None, false)
};
let ancestor_timeline_id = timeline.get_ancestor_timeline_id();
let ancestor_lsn = match timeline.get_ancestor_lsn() {
Lsn(0) => None,
lsn @ Lsn(_) => Some(lsn),
};
let current_logical_size = match timeline.get_current_logical_size() {
Ok(size) => Some(size),
Err(err) => {
error!("Timeline info creation failed to get current logical size: {err:?}");
None
}
};
let current_physical_size = Some(timeline.get_physical_size());
let info = TimelineInfo {
tenant_id: timeline.tenant_id,
timeline_id: timeline.timeline_id,
ancestor_timeline_id,
ancestor_lsn,
let info = LocalTimelineInfo {
ancestor_timeline_id: timeline.get_ancestor_timeline_id(),
ancestor_lsn: {
match timeline.get_ancestor_lsn() {
Lsn(0) => None,
lsn @ Lsn(_) => Some(lsn),
}
},
disk_consistent_lsn: timeline.get_disk_consistent_lsn(),
last_record_lsn,
prev_record_lsn: Some(timeline.get_prev_record_lsn()),
latest_gc_cutoff_lsn: *timeline.get_latest_gc_cutoff_lsn(),
current_logical_size,
current_physical_size,
current_logical_size: Some(
timeline
.get_current_logical_size()
.context("Timeline info creation failed to get current logical size")?,
),
current_physical_size: Some(timeline.get_physical_size()),
current_logical_size_non_incremental: if include_non_incremental_logical_size {
Some(timeline.get_current_logical_size_non_incremental(last_record_lsn)?)
} else {
@@ -159,25 +132,32 @@ async fn build_timeline_info(
last_received_msg_lsn,
last_received_msg_ts,
pg_version: timeline.pg_version,
remote_consistent_lsn,
awaits_download,
// Duplicate some fields in 'local' and 'remote' fields, for backwards-compatility
// with the control plane.
local: LocalTimelineInfo {
ancestor_timeline_id,
ancestor_lsn,
current_logical_size,
current_physical_size,
},
remote: RemoteTimelineInfo {
remote_consistent_lsn,
},
};
Ok(info)
}
fn list_local_timelines(
tenant_id: TenantId,
include_non_incremental_logical_size: bool,
include_non_incremental_physical_size: bool,
) -> Result<Vec<(TimelineId, LocalTimelineInfo)>> {
let tenant = tenant_mgr::get_tenant(tenant_id, true)?;
let timelines = tenant.list_timelines();
let mut local_timeline_info = Vec::with_capacity(timelines.len());
for (timeline_id, repository_timeline) in timelines {
local_timeline_info.push((
timeline_id,
local_timeline_info_from_timeline(
&repository_timeline,
include_non_incremental_logical_size,
include_non_incremental_physical_size,
)?,
))
}
Ok(local_timeline_info)
}
// healthcheck handler
async fn status_handler(request: Request<Body>) -> Result<Response<Body>, ApiError> {
let config = get_config(&request);
@@ -189,8 +169,6 @@ async fn timeline_create_handler(mut request: Request<Body>) -> Result<Response<
let request_data: TimelineCreateRequest = json_request(&mut request).await?;
check_permission(&request, Some(tenant_id))?;
let state = get_state(&request);
let tenant = tenant_mgr::get_tenant(tenant_id, true).map_err(ApiError::NotFound)?;
let new_timeline_info = async {
match tenant.create_timeline(
@@ -201,10 +179,14 @@ async fn timeline_create_handler(mut request: Request<Body>) -> Result<Response<
).await {
Ok(Some(new_timeline)) => {
// Created. Construct a TimelineInfo for it.
let timeline_info = build_timeline_info(state, &new_timeline, false, false)
.await
let local_info = local_timeline_info_from_timeline(&new_timeline, false, false)
.map_err(ApiError::InternalServerError)?;
Ok(Some(timeline_info))
Ok(Some(TimelineInfo {
tenant_id,
timeline_id: new_timeline.timeline_id,
local: Some(local_info),
remote: None,
}))
}
Ok(None) => Ok(None), // timeline already exists
Err(err) => Err(ApiError::InternalServerError(err)),
@@ -227,8 +209,6 @@ async fn timeline_list_handler(request: Request<Body>) -> Result<Response<Body>,
query_param_present(&request, "include-non-incremental-physical-size");
check_permission(&request, Some(tenant_id))?;
let state = get_state(&request);
let timelines = tokio::task::spawn_blocking(move || {
let _enter = info_span!("timeline_list", tenant = %tenant_id).entered();
let tenant = tenant_mgr::get_tenant(tenant_id, true).map_err(ApiError::NotFound)?;
@@ -238,18 +218,36 @@ async fn timeline_list_handler(request: Request<Body>) -> Result<Response<Body>,
.map_err(|e: JoinError| ApiError::InternalServerError(e.into()))??;
let mut response_data = Vec::with_capacity(timelines.len());
for timeline in timelines {
let timeline_info = build_timeline_info(
state,
for (timeline_id, timeline) in timelines {
let local = match local_timeline_info_from_timeline(
&timeline,
include_non_incremental_logical_size,
include_non_incremental_physical_size,
)
.await
.context("Failed to convert tenant timeline {timeline_id} into the local one: {e:?}")
.map_err(ApiError::InternalServerError)?;
) {
Ok(local) => Some(local),
Err(e) => {
error!("Failed to convert tenant timeline {timeline_id} into the local one: {e:?}");
None
}
};
response_data.push(timeline_info);
response_data.push(TimelineInfo {
tenant_id,
timeline_id,
local,
remote: get_state(&request)
.remote_index
.read()
.await
.timeline_entry(&TenantTimelineId {
tenant_id,
timeline_id,
})
.map(|remote_entry| RemoteTimelineInfo {
remote_consistent_lsn: remote_entry.metadata.disk_consistent_lsn(),
awaits_download: remote_entry.awaits_download,
}),
})
}
json_response(StatusCode::OK, response_data)
@@ -294,33 +292,59 @@ async fn timeline_detail_handler(request: Request<Body>) -> Result<Response<Body
query_param_present(&request, "include-non-incremental-physical-size");
check_permission(&request, Some(tenant_id))?;
let state = get_state(&request);
let timeline_info = async {
let (local_timeline_info, remote_timeline_info) = async {
let timeline = tokio::task::spawn_blocking(move || {
tenant_mgr::get_tenant(tenant_id, true)?.get_timeline(timeline_id)
})
.await
.map_err(|e: JoinError| ApiError::InternalServerError(e.into()))?;
let timeline = timeline.map_err(ApiError::NotFound)?;
let local_timeline_info = match timeline.and_then(|timeline| {
local_timeline_info_from_timeline(
&timeline,
include_non_incremental_logical_size,
include_non_incremental_physical_size,
)
}) {
Ok(local_info) => Some(local_info),
Err(e) => {
error!("Failed to get local timeline info: {e:#}");
None
}
};
let timeline_info = build_timeline_info(
state,
&timeline,
include_non_incremental_logical_size,
include_non_incremental_physical_size,
)
.await
.context("Failed to get local timeline info: {e:#}")
.map_err(ApiError::InternalServerError)?;
Ok::<_, ApiError>(timeline_info)
let remote_timeline_info = {
let remote_index_read = get_state(&request).remote_index.read().await;
remote_index_read
.timeline_entry(&TenantTimelineId {
tenant_id,
timeline_id,
})
.map(|remote_entry| RemoteTimelineInfo {
remote_consistent_lsn: remote_entry.metadata.disk_consistent_lsn(),
awaits_download: remote_entry.awaits_download,
})
};
Ok::<_, ApiError>((local_timeline_info, remote_timeline_info))
}
.instrument(info_span!("timeline_detail", tenant = %tenant_id, timeline = %timeline_id))
.await?;
json_response(StatusCode::OK, timeline_info)
if local_timeline_info.is_none() && remote_timeline_info.is_none() {
Err(ApiError::NotFound(anyhow!(
"Timeline {tenant_id}/{timeline_id} is not found neither locally nor remotely"
)))
} else {
json_response(
StatusCode::OK,
TimelineInfo {
tenant_id,
timeline_id,
local: local_timeline_info,
remote: remote_timeline_info,
},
)
}
}
async fn get_lsn_by_timestamp_handler(request: Request<Body>) -> Result<Response<Body>, ApiError> {
@@ -390,7 +414,7 @@ async fn tenant_attach_handler(request: Request<Body>) -> Result<Response<Body>,
}
return json_response(StatusCode::ACCEPTED, ());
}
// no tenant in the index, release the lock to make the potentially lengthy download operation
// no tenant in the index, release the lock to make the potentially lengthy download opetation
drop(index_accessor);
// download index parts for every tenant timeline
@@ -542,27 +566,36 @@ async fn tenant_status(request: Request<Body>) -> Result<Response<Body>, ApiErro
false
});
let (tenant_state, current_physical_size) = match tenant {
Ok(tenant) => {
let timelines = tenant.list_timelines();
// Calculate total physical size of all timelines
let mut current_physical_size = 0;
for timeline in timelines {
current_physical_size += timeline.get_physical_size();
}
(tenant.current_state(), Some(current_physical_size))
}
let tenant_state = match tenant {
Ok(tenant) => tenant.current_state(),
Err(e) => {
error!("Failed to get local tenant state: {e:#}");
if has_in_progress_downloads {
(TenantState::Paused, None)
TenantState::Paused
} else {
(TenantState::Broken, None)
TenantState::Broken
}
}
};
let current_physical_size =
match tokio::task::spawn_blocking(move || list_local_timelines(tenant_id, false, false))
.await
.map_err(|e: JoinError| ApiError::InternalServerError(e.into()))?
{
Err(err) => {
// Getting local timelines can fail when no local tenant directory is on disk (e.g, when tenant data is being downloaded).
// In that case, put a warning message into log and operate normally.
warn!("Failed to get local timelines for tenant {tenant_id}: {err}");
None
}
Ok(local_timeline_infos) => Some(
local_timeline_infos
.into_iter()
.fold(0, |acc, x| acc + x.1.current_physical_size.unwrap()),
),
};
json_response(
StatusCode::OK,
TenantInfo {
@@ -574,63 +607,6 @@ async fn tenant_status(request: Request<Body>) -> Result<Response<Body>, ApiErro
)
}
async fn tenant_set_background_activity(
mut request: Request<Body>,
) -> Result<Response<Body>, ApiError> {
let tenant_id: TenantId = parse_request_param(&request, "tenant_id")?;
check_permission(&request, Some(tenant_id))?;
let request: TenantSetBackgroundActivityRequest = json_request(&mut request).await?;
let tenant = tenant_mgr::get_tenant(tenant_id, false).map_err(ApiError::NotFound)?;
let modified = tenant.set_state_with(|old_state| {
let background_jobs_running = match old_state {
TenantState::Active {
background_jobs_running,
} => background_jobs_running,
_ => return None,
};
match (request.run_backround_jobs, background_jobs_running) {
(true, true) => None,
(false, false) => None,
(true, false) => Some(TenantState::Active {
background_jobs_running: true,
}),
(false, true) => {
// tasks will eventually shut down after that, but we need a guarantee
// that they've stopped so explicitly waiting for it
Some(TenantState::Active {
background_jobs_running: false,
})
}
}
});
if !modified {
return Ok(json_response(
StatusCode::NOT_MODIFIED,
TenantSetBackgroundActivityResponse { msg: "".to_owned() },
)?);
}
// state was modified and request values was set to false which means we changed state
// and now need to wait for tasks shutdown
// XXX can it be changed second time here? and modified flag be outdated now?
if modified && !request.run_backround_jobs {
task_mgr::shutdown_tasks(Some(TaskKind::Compaction), Some(tenant_id), None).await;
task_mgr::shutdown_tasks(Some(TaskKind::GarbageCollector), Some(tenant_id), None).await;
}
Ok(json_response(
StatusCode::OK,
TenantSetBackgroundActivityResponse {
msg: format!("run background jobs set to {}", request.run_backround_jobs),
},
)?)
}
// Helper function to standardize the error messages we produce on bad durations
//
// Intended to be used with anyhow's `with_context`, e.g.:
@@ -842,6 +818,11 @@ async fn failpoints_handler(mut request: Request<Body>) -> Result<Response<Body>
}
// Run GC immediately on given timeline.
// FIXME: This is just for tests. See test_runner/regress/test_gc.py.
// This probably should require special authentication or a global flag to
// enable, I don't think we want to or need to allow regular clients to invoke
// GC.
// @hllinnaka in commits ec44f4b29, 3aca717f3
#[cfg(feature = "testing")]
async fn timeline_gc_handler(mut request: Request<Body>) -> Result<Response<Body>, ApiError> {
let tenant_id: TenantId = parse_request_param(&request, "tenant_id")?;
@@ -867,6 +848,9 @@ async fn timeline_gc_handler(mut request: Request<Body>) -> Result<Response<Body
}
// Run compaction immediately on given timeline.
// FIXME This is just for tests. Don't expect this to be exposed to
// the users or the api.
// @dhammika in commit a0781f229
#[cfg(feature = "testing")]
async fn timeline_compact_handler(request: Request<Body>) -> Result<Response<Body>, ApiError> {
let tenant_id: TenantId = parse_request_param(&request, "tenant_id")?;
@@ -965,10 +949,6 @@ pub fn make_router(
.post("/v1/tenant/:tenant_id/timeline", timeline_create_handler)
.post("/v1/tenant/:tenant_id/attach", tenant_attach_handler)
.post("/v1/tenant/:tenant_id/detach", tenant_detach_handler)
.post(
"/v1/tenant/:tenant_id/set_background_activity",
tenant_set_background_activity,
)
.get(
"/v1/tenant/:tenant_id/timeline/:timeline_id",
timeline_detail_handler,

View File

@@ -43,19 +43,19 @@ pub fn get_lsn_from_controlfile(path: &Path) -> Result<Lsn> {
/// The code that deals with the checkpoint would not work right if the
/// cluster was not shut down cleanly.
pub fn import_timeline_from_postgres_datadir(
path: &Path,
tline: &Timeline,
pgdata_path: &Path,
pgdata_lsn: Lsn,
lsn: Lsn,
) -> Result<()> {
let mut pg_control: Option<ControlFileData> = None;
// TODO this shoud be start_lsn, which is not necessarily equal to end_lsn (aka lsn)
// Then fishing out pg_control would be unnecessary
let mut modification = tline.begin_modification(pgdata_lsn);
let mut modification = tline.begin_modification(lsn);
modification.init_empty()?;
// Import all but pg_wal
let all_but_wal = WalkDir::new(pgdata_path)
let all_but_wal = WalkDir::new(path)
.into_iter()
.filter_entry(|entry| !entry.path().ends_with("pg_wal"));
for entry in all_but_wal {
@@ -63,7 +63,7 @@ pub fn import_timeline_from_postgres_datadir(
let metadata = entry.metadata().expect("error getting dir entry metadata");
if metadata.is_file() {
let absolute_path = entry.path();
let relative_path = absolute_path.strip_prefix(pgdata_path)?;
let relative_path = absolute_path.strip_prefix(path)?;
let file = File::open(absolute_path)?;
let len = metadata.len() as usize;
@@ -84,7 +84,7 @@ pub fn import_timeline_from_postgres_datadir(
"Postgres cluster was not shut down cleanly"
);
ensure!(
pg_control.checkPointCopy.redo == pgdata_lsn.0,
pg_control.checkPointCopy.redo == lsn.0,
"unexpected checkpoint REDO pointer"
);
@@ -92,10 +92,10 @@ pub fn import_timeline_from_postgres_datadir(
// this reads the checkpoint record itself, advancing the tip of the timeline to
// *after* the checkpoint record. And crucially, it initializes the 'prev_lsn'.
import_wal(
&pgdata_path.join("pg_wal"),
&path.join("pg_wal"),
tline,
Lsn(pg_control.checkPointCopy.redo),
pgdata_lsn,
lsn,
)?;
Ok(())

View File

@@ -119,6 +119,32 @@ impl<T> TenantTimelineValues<T> {
fn new() -> Self {
Self(HashMap::new())
}
fn with_capacity(capacity: usize) -> Self {
Self(HashMap::with_capacity(capacity))
}
/// A convenience method to map certain values and omit some of them, if needed.
/// Tenants that won't have any timeline entries due to the filtering, will still be preserved
/// in the structure.
fn filter_map<F, NewT>(self, map: F) -> TenantTimelineValues<NewT>
where
F: Fn(T) -> Option<NewT>,
{
let capacity = self.0.len();
self.0.into_iter().fold(
TenantTimelineValues::<NewT>::with_capacity(capacity),
|mut new_values, (tenant_id, old_values)| {
let new_timeline_values = new_values.0.entry(tenant_id).or_default();
for (timeline_id, old_value) in old_values {
if let Some(new_value) = map(old_value) {
new_timeline_values.insert(timeline_id, new_value);
}
}
new_values
},
)
}
}
/// A suffix to be used during file sync from the remote storage,
@@ -155,3 +181,35 @@ mod backoff_defaults_tests {
);
}
}
#[cfg(test)]
mod tests {
use crate::tenant::harness::TIMELINE_ID;
use super::*;
#[test]
fn tenant_timeline_value_mapping() {
let first_tenant = TenantId::generate();
let second_tenant = TenantId::generate();
assert_ne!(first_tenant, second_tenant);
let mut initial = TenantTimelineValues::new();
initial
.0
.entry(first_tenant)
.or_default()
.insert(TIMELINE_ID, "test_value");
let _ = initial.0.entry(second_tenant).or_default();
assert_eq!(initial.0.len(), 2, "Should have entries for both tenants");
let filtered = initial.filter_map(|_| None::<&str>).0;
assert_eq!(
filtered.len(),
2,
"Should have entries for both tenants even after filtering away all entries"
);
assert!(filtered.contains_key(&first_tenant));
assert!(filtered.contains_key(&second_tenant));
}
}

View File

@@ -107,20 +107,18 @@ static CURRENT_LOGICAL_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| {
// Metrics for cloud upload. These metrics reflect data uploaded to cloud storage,
// or in testing they estimate how much we would upload if we did.
static NUM_PERSISTENT_FILES_CREATED: Lazy<IntCounterVec> = Lazy::new(|| {
register_int_counter_vec!(
static NUM_PERSISTENT_FILES_CREATED: Lazy<IntCounter> = Lazy::new(|| {
register_int_counter!(
"pageserver_created_persistent_files_total",
"Number of files created that are meant to be uploaded to cloud storage",
&["tenant_id", "timeline_id"]
)
.expect("failed to define a metric")
});
static PERSISTENT_BYTES_WRITTEN: Lazy<IntCounterVec> = Lazy::new(|| {
register_int_counter_vec!(
static PERSISTENT_BYTES_WRITTEN: Lazy<IntCounter> = Lazy::new(|| {
register_int_counter!(
"pageserver_written_persistent_bytes_total",
"Total bytes written that are meant to be uploaded to cloud storage",
&["tenant_id", "timeline_id"]
)
.expect("failed to define a metric")
});
@@ -277,15 +275,11 @@ pub static TENANT_TASK_EVENTS: Lazy<IntCounterVec> = Lazy::new(|| {
/// smallest redo processing times. These buckets allow us to measure down
/// to 5us, which equates to 200'000 pages/sec, which equates to 1.6GB/sec.
/// This is much better than the previous 5ms aka 200 pages/sec aka 1.6MB/sec.
///
/// Values up to 1s are recorded because metrics show that we have redo
/// durations and lock times larger than 0.250s.
macro_rules! redo_histogram_time_buckets {
() => {
vec![
0.000_005, 0.000_010, 0.000_025, 0.000_050, 0.000_100, 0.000_250, 0.000_500, 0.001_000,
0.002_500, 0.005_000, 0.010_000, 0.025_000, 0.050_000, 0.100_000, 0.250_000, 0.500_000,
1.000_000,
0.002_500, 0.005_000, 0.010_000, 0.025_000, 0.050_000, 0.100_000, 0.250_000,
]
};
}
@@ -300,17 +294,6 @@ macro_rules! redo_histogram_count_buckets {
};
}
macro_rules! redo_bytes_histogram_count_buckets {
() => {
// powers of (2^.5), from 2^4.5 to 2^15 (22 buckets)
// rounded up to the next multiple of 8 to capture any MAXALIGNed record of that size, too.
vec![
24.0, 32.0, 48.0, 64.0, 96.0, 128.0, 184.0, 256.0, 368.0, 512.0, 728.0, 1024.0, 1456.0,
2048.0, 2904.0, 4096.0, 5800.0, 8192.0, 11592.0, 16384.0, 23176.0, 32768.0,
]
};
}
pub static WAL_REDO_TIME: Lazy<Histogram> = Lazy::new(|| {
register_histogram!(
"pageserver_wal_redo_seconds",
@@ -338,15 +321,6 @@ pub static WAL_REDO_RECORDS_HISTOGRAM: Lazy<Histogram> = Lazy::new(|| {
.expect("failed to define a metric")
});
pub static WAL_REDO_BYTES_HISTOGRAM: Lazy<Histogram> = Lazy::new(|| {
register_histogram!(
"pageserver_wal_redo_bytes_histogram",
"Histogram of number of records replayed per redo",
redo_bytes_histogram_count_buckets!(),
)
.expect("failed to define a metric")
});
pub static WAL_REDO_RECORD_COUNTER: Lazy<IntCounter> = Lazy::new(|| {
register_int_counter!(
"pageserver_replayed_wal_records_total",
@@ -412,12 +386,8 @@ impl TimelineMetrics {
let current_logical_size_gauge = CURRENT_LOGICAL_SIZE
.get_metric_with_label_values(&[&tenant_id, &timeline_id])
.unwrap();
let num_persistent_files_created = NUM_PERSISTENT_FILES_CREATED
.get_metric_with_label_values(&[&tenant_id, &timeline_id])
.unwrap();
let persistent_bytes_written = PERSISTENT_BYTES_WRITTEN
.get_metric_with_label_values(&[&tenant_id, &timeline_id])
.unwrap();
let num_persistent_files_created = NUM_PERSISTENT_FILES_CREATED.clone();
let persistent_bytes_written = PERSISTENT_BYTES_WRITTEN.clone();
TimelineMetrics {
tenant_id,
@@ -449,8 +419,6 @@ impl Drop for TimelineMetrics {
let _ = WAIT_LSN_TIME.remove_label_values(&[tenant_id, timeline_id]);
let _ = CURRENT_PHYSICAL_SIZE.remove_label_values(&[tenant_id, timeline_id]);
let _ = CURRENT_LOGICAL_SIZE.remove_label_values(&[tenant_id, timeline_id]);
let _ = NUM_PERSISTENT_FILES_CREATED.remove_label_values(&[tenant_id, timeline_id]);
let _ = PERSISTENT_BYTES_WRITTEN.remove_label_values(&[tenant_id, timeline_id]);
for op in STORAGE_TIME_OPERATIONS {
let _ = STORAGE_TIME.remove_label_values(&[op, tenant_id, timeline_id]);

View File

@@ -32,7 +32,7 @@ use utils::{
use crate::basebackup;
use crate::config::{PageServerConf, ProfilingConfig};
use crate::import_datadir::import_wal_from_tar;
use crate::import_datadir::{import_basebackup_from_tar, import_wal_from_tar};
use crate::metrics::{LIVE_CONNECTIONS_COUNT, SMGR_QUERY_TIME};
use crate::profiling::profpoint_start;
use crate::reltag::RelTag;
@@ -500,8 +500,11 @@ impl PageServerHandler {
task_mgr::associate_with(Some(tenant_id), Some(timeline_id));
// Create empty timeline
info!("creating new timeline");
let tenant = tenant_mgr::get_tenant(tenant_id, true)?;
let timeline = tenant.create_empty_timeline(timeline_id, base_lsn, pg_version)?;
let timeline = tenant_mgr::get_tenant(tenant_id, true)?.create_empty_timeline(
timeline_id,
base_lsn,
pg_version,
)?;
// TODO mark timeline as not ready until it reaches end_lsn.
// We might have some wal to import as well, and we should prevent compute
@@ -524,8 +527,7 @@ impl PageServerHandler {
// - use block_in_place()
let mut copyin_stream = Box::pin(copyin_stream(pgb));
let reader = SyncIoBridge::new(StreamReader::new(&mut copyin_stream));
tokio::task::block_in_place(|| timeline.import_basebackup_from_tar(reader, base_lsn))?;
timeline.initialize()?;
tokio::task::block_in_place(|| import_basebackup_from_tar(&timeline, reader, base_lsn))?;
// Drain the rest of the Copy data
let mut bytes_after_tar = 0;
@@ -542,6 +544,12 @@ impl PageServerHandler {
// It wouldn't work if base came from vanilla postgres though,
// since we discard some log files.
// Flush data to disk, then upload to s3
info!("flushing layers");
timeline.checkpoint(CheckpointConfig::Flush)?;
timeline.launch_wal_receiver()?;
info!("done");
Ok(())
}

View File

@@ -1403,9 +1403,7 @@ pub fn create_test_timeline(
timeline_id: utils::id::TimelineId,
pg_version: u32,
) -> Result<std::sync::Arc<Timeline>> {
let tline = tenant
.create_empty_timeline(timeline_id, Lsn(8), pg_version)?
.initialize()?;
let tline = tenant.create_empty_timeline(timeline_id, Lsn(8), pg_version)?;
let mut m = tline.begin_modification(Lsn(8));
m.init_empty()?;
m.commit()?;

View File

@@ -169,14 +169,9 @@ use self::{
upload::{upload_index_part, upload_timeline_layers, UploadedTimeline},
};
use crate::{
config::PageServerConf,
exponential_backoff,
storage_sync::index::{LayerFileMetadata, RemoteIndex},
task_mgr,
task_mgr::TaskKind,
task_mgr::BACKGROUND_RUNTIME,
tenant::metadata::TimelineMetadata,
tenant_mgr::{attach_local_tenants, TenantAttachData},
config::PageServerConf, exponential_backoff, storage_sync::index::RemoteIndex, task_mgr,
task_mgr::TaskKind, task_mgr::BACKGROUND_RUNTIME, tenant::metadata::TimelineMetadata,
tenant_mgr::attach_local_tenants,
};
use crate::{
metrics::{IMAGE_SYNC_TIME, REMAINING_SYNC_ITEMS, REMOTE_INDEX_UPLOAD},
@@ -193,7 +188,7 @@ static SYNC_QUEUE: OnceCell<SyncQueue> = OnceCell::new();
/// A timeline status to share with pageserver's sync counterpart,
/// after comparing local and remote timeline state.
#[derive(Clone, PartialEq, Eq)]
#[derive(Clone)]
pub enum LocalTimelineInitStatus {
/// The timeline has every remote layer present locally.
/// There could be some layers requiring uploading,
@@ -316,7 +311,7 @@ impl SyncQueue {
/// A task to run in the async download/upload loop.
/// Limited by the number of retries, after certain threshold the failing task gets evicted and the timeline disabled.
#[derive(Debug, Clone, PartialEq, Eq)]
#[derive(Debug, Clone)]
enum SyncTask {
/// A checkpoint outcome with possible local file updates that need actualization in the remote storage.
/// Not necessary more fresh than the one already uploaded.
@@ -427,7 +422,7 @@ impl SyncTaskBatch {
.extend(new_delete.data.deleted_layers.iter().cloned());
}
if let Some(batch_upload) = &mut self.upload {
let not_deleted = |layer: &PathBuf, _: &mut LayerFileMetadata| {
let not_deleted = |layer: &PathBuf| {
!new_delete.data.layers_to_delete.contains(layer)
&& !new_delete.data.deleted_layers.contains(layer)
};
@@ -455,35 +450,21 @@ impl SyncTaskBatch {
#[derive(Debug, Clone, PartialEq, Eq)]
struct LayersUpload {
/// Layer file path in the pageserver workdir, that were added for the corresponding checkpoint.
layers_to_upload: HashMap<PathBuf, LayerFileMetadata>,
layers_to_upload: HashSet<PathBuf>,
/// Already uploaded layers. Used to store the data about the uploads between task retries
/// and to record the data into the remote index after the task got completed or evicted.
uploaded_layers: HashMap<PathBuf, LayerFileMetadata>,
uploaded_layers: HashSet<PathBuf>,
metadata: Option<TimelineMetadata>,
}
/// A timeline download task.
/// Does not contain the file list to download, to allow other
/// parts of the pageserer code to schedule the task
/// without using the remote index or any other ways to list the remote timeline files.
/// without using the remote index or any other ways to list the remote timleine files.
/// Skips the files that are already downloaded.
#[derive(Debug, Clone, PartialEq, Eq)]
struct LayersDownload {
layers_to_skip: HashSet<PathBuf>,
/// Paths which have been downloaded, and had their metadata verified or generated.
///
/// Metadata generation happens when upgrading from past version of `IndexPart`.
gathered_metadata: HashMap<PathBuf, LayerFileMetadata>,
}
impl LayersDownload {
fn from_skipped_layers(layers_to_skip: HashSet<PathBuf>) -> Self {
LayersDownload {
layers_to_skip,
gathered_metadata: HashMap::default(),
}
}
}
#[derive(Debug, Clone, PartialEq, Eq)]
@@ -505,7 +486,7 @@ struct LayersDeletion {
pub fn schedule_layer_upload(
tenant_id: TenantId,
timeline_id: TimelineId,
layers_to_upload: HashMap<PathBuf, LayerFileMetadata>,
layers_to_upload: HashSet<PathBuf>,
metadata: Option<TimelineMetadata>,
) {
let sync_queue = match SYNC_QUEUE.get() {
@@ -522,7 +503,7 @@ pub fn schedule_layer_upload(
},
SyncTask::upload(LayersUpload {
layers_to_upload,
uploaded_layers: HashMap::new(),
uploaded_layers: HashSet::new(),
metadata,
}),
);
@@ -580,44 +561,18 @@ pub fn schedule_layer_download(tenant_id: TenantId, timeline_id: TimelineId) {
tenant_id,
timeline_id,
},
SyncTask::download(LayersDownload::from_skipped_layers(HashSet::new())),
SyncTask::download(LayersDownload {
layers_to_skip: HashSet::new(),
}),
);
debug!("Download task for tenant {tenant_id}, timeline {timeline_id} sent")
}
/// Local existing timeline files
///
/// Values of this type serve different meanings in different contexts. On startup, collected
/// timelines come with the full collected information and when signalling readyness to attach
/// after completed download. After the download the file information is no longer carried, because
/// it is already merged into [`RemoteTimeline`].
#[derive(Debug)]
pub struct TimelineLocalFiles(TimelineMetadata, HashMap<PathBuf, LayerFileMetadata>);
impl TimelineLocalFiles {
pub fn metadata(&self) -> &TimelineMetadata {
&self.0
}
/// Called during startup, for all of the local files with full metadata.
pub(crate) fn collected(
metadata: TimelineMetadata,
timeline_files: HashMap<PathBuf, LayerFileMetadata>,
) -> TimelineLocalFiles {
TimelineLocalFiles(metadata, timeline_files)
}
/// Called near the end of tenant initialization, to signal readyness to attach tenants.
pub(crate) fn ready(metadata: TimelineMetadata) -> Self {
TimelineLocalFiles(metadata, HashMap::new())
}
}
/// Launch a thread to perform remote storage sync tasks.
/// See module docs for loop step description.
pub fn spawn_storage_sync_task(
conf: &'static PageServerConf,
local_timeline_files: HashMap<TenantId, HashMap<TimelineId, TimelineLocalFiles>>,
local_timeline_files: TenantTimelineValues<(TimelineMetadata, HashSet<PathBuf>)>,
storage: GenericRemoteStorage,
max_concurrent_timelines_sync: NonZeroUsize,
max_sync_errors: NonZeroU32,
@@ -640,7 +595,7 @@ pub fn spawn_storage_sync_task(
let mut keys_for_index_part_downloads = HashSet::new();
let mut timelines_to_sync = HashMap::new();
for (tenant_id, timeline_data) in local_timeline_files {
for (tenant_id, timeline_data) in local_timeline_files.0 {
if timeline_data.is_empty() {
info!("got empty tenant {}", tenant_id);
let _ = empty_tenants.0.entry(tenant_id).or_default();
@@ -743,7 +698,7 @@ async fn storage_sync_loop(
"Sync loop step completed, {} new tenant state update(s)",
updated_tenants.len()
);
let mut timelines_to_attach = HashMap::new();
let mut timelines_to_attach = TenantTimelineValues::new();
let index_accessor = index.read().await;
for tenant_id in updated_tenants {
let tenant_entry = match index_accessor.tenant_entry(&tenant_id) {
@@ -769,16 +724,12 @@ async fn storage_sync_loop(
// and register them all at once in a tenant for download
// to be submitted in a single operation to tenant
// so it can apply them at once to internal timeline map.
timelines_to_attach.insert(
timelines_to_attach.0.insert(
tenant_id,
TenantAttachData::Ready(
tenant_entry
.iter()
.map(|(&id, entry)| {
(id, TimelineLocalFiles::ready(entry.metadata.clone()))
})
.collect(),
),
tenant_entry
.iter()
.map(|(&id, entry)| (id, entry.metadata.clone()))
.collect(),
);
}
}
@@ -1020,27 +971,15 @@ async fn download_timeline_data(
}
DownloadedTimeline::Successful(mut download_data) => {
match update_local_metadata(conf, sync_id, current_remote_timeline).await {
Ok(()) => {
let mut g = index.write().await;
match g.set_awaits_download(&sync_id, false) {
Ok(()) => {
let timeline = g
.timeline_entry_mut(&sync_id)
.expect("set_awaits_download verified existence");
timeline.merge_metadata_from_downloaded(
&download_data.data.gathered_metadata,
);
register_sync_status(sync_id, sync_start, TASK_NAME, Some(true));
return DownloadStatus::Downloaded;
}
Err(e) => {
error!("Timeline {sync_id} was expected to be in the remote index after a successful download, but it's absent: {e:?}");
}
};
}
Ok(()) => match index.write().await.set_awaits_download(&sync_id, false) {
Ok(()) => {
register_sync_status(sync_id, sync_start, TASK_NAME, Some(true));
return DownloadStatus::Downloaded;
}
Err(e) => {
error!("Timeline {sync_id} was expected to be in the remote index after a successful download, but it's absent: {e:?}");
}
},
Err(e) => {
error!("Failed to update local timeline metadata: {e:?}");
download_data.retries += 1;
@@ -1243,18 +1182,11 @@ async fn update_remote_data(
}
if upload_failed {
existing_entry.add_upload_failures(
uploaded_data
.layers_to_upload
.iter()
.map(|(k, v)| (k.to_owned(), v.to_owned())),
uploaded_data.layers_to_upload.iter().cloned(),
);
} else {
existing_entry.add_timeline_layers(
uploaded_data
.uploaded_layers
.iter()
.map(|(k, v)| (k.to_owned(), v.to_owned())),
);
existing_entry
.add_timeline_layers(uploaded_data.uploaded_layers.iter().cloned());
}
}
RemoteDataUpdate::Delete(layers_to_remove) => {
@@ -1274,19 +1206,11 @@ async fn update_remote_data(
};
let mut new_remote_timeline = RemoteTimeline::new(new_metadata.clone());
if upload_failed {
new_remote_timeline.add_upload_failures(
uploaded_data
.layers_to_upload
.iter()
.map(|(k, v)| (k.to_owned(), v.to_owned())),
);
new_remote_timeline
.add_upload_failures(uploaded_data.layers_to_upload.iter().cloned());
} else {
new_remote_timeline.add_timeline_layers(
uploaded_data
.uploaded_layers
.iter()
.map(|(k, v)| (k.to_owned(), v.to_owned())),
);
new_remote_timeline
.add_timeline_layers(uploaded_data.uploaded_layers.iter().cloned());
}
index_accessor.add_timeline_entry(sync_id, new_remote_timeline.clone());
@@ -1334,14 +1258,13 @@ async fn validate_task_retries(
fn schedule_first_sync_tasks(
index: &mut RemoteTimelineIndex,
sync_queue: &SyncQueue,
local_timeline_files: HashMap<TenantTimelineId, TimelineLocalFiles>,
local_timeline_files: HashMap<TenantTimelineId, (TimelineMetadata, HashSet<PathBuf>)>,
) -> TenantTimelineValues<LocalTimelineInitStatus> {
let mut local_timeline_init_statuses = TenantTimelineValues::new();
let mut new_sync_tasks = VecDeque::with_capacity(local_timeline_files.len());
for (sync_id, local_timeline) in local_timeline_files {
let TimelineLocalFiles(local_metadata, local_files) = local_timeline;
for (sync_id, (local_metadata, local_files)) in local_timeline_files {
match index.timeline_entry_mut(&sync_id) {
Some(remote_timeline) => {
let (timeline_status, awaits_download) = compare_local_and_remote_timeline(
@@ -1385,7 +1308,7 @@ fn schedule_first_sync_tasks(
sync_id,
SyncTask::upload(LayersUpload {
layers_to_upload: local_files,
uploaded_layers: HashMap::new(),
uploaded_layers: HashSet::new(),
metadata: Some(local_metadata.clone()),
}),
));
@@ -1412,46 +1335,20 @@ fn compare_local_and_remote_timeline(
new_sync_tasks: &mut VecDeque<(TenantTimelineId, SyncTask)>,
sync_id: TenantTimelineId,
local_metadata: TimelineMetadata,
local_files: HashMap<PathBuf, LayerFileMetadata>,
local_files: HashSet<PathBuf>,
remote_entry: &RemoteTimeline,
) -> (LocalTimelineInitStatus, bool) {
let _entered = info_span!("compare_local_and_remote_timeline", sync_id = %sync_id).entered();
let needed_to_download_files = remote_entry
.stored_files()
.iter()
.filter_map(|(layer_file, remote_metadata)| {
if let Some(local_metadata) = local_files.get(layer_file) {
match (remote_metadata.file_size(), local_metadata.file_size()) {
(Some(x), Some(y)) if x == y => { None },
(None, Some(_)) => {
// upgrading from an earlier IndexPart without metadata
None
},
_ => {
// having to deal with other than (Some(x), Some(y)) where x != y here is a
// bummer, but see #2582 and #2610 for attempts and discussion.
warn!("Redownloading locally existing {layer_file:?} due to size mismatch, size on index: {:?}, on disk: {:?}", remote_metadata.file_size(), local_metadata.file_size());
Some(layer_file)
},
}
} else {
// doesn't exist locally
Some(layer_file)
}
})
.collect::<HashSet<_>>();
let remote_files = remote_entry.stored_files();
let (initial_timeline_status, awaits_download) = if !needed_to_download_files.is_empty() {
let number_of_layers_to_download = remote_files.difference(&local_files).count();
let (initial_timeline_status, awaits_download) = if number_of_layers_to_download > 0 {
new_sync_tasks.push_back((
sync_id,
SyncTask::download(LayersDownload::from_skipped_layers(
local_files
.keys()
.filter(|path| !needed_to_download_files.contains(path))
.cloned()
.collect(),
)),
SyncTask::download(LayersDownload {
layers_to_skip: local_files.clone(),
}),
));
info!("NeedsSync");
(LocalTimelineInitStatus::NeedsSync, true)
@@ -1466,22 +1363,15 @@ fn compare_local_and_remote_timeline(
};
let layers_to_upload = local_files
.iter()
.filter_map(|(local_file, metadata)| {
if !remote_entry.stored_files().contains_key(local_file) {
Some((local_file.to_owned(), metadata.to_owned()))
} else {
None
}
})
.collect::<HashMap<_, _>>();
.difference(remote_files)
.cloned()
.collect::<HashSet<_>>();
if !layers_to_upload.is_empty() {
new_sync_tasks.push_back((
sync_id,
SyncTask::upload(LayersUpload {
layers_to_upload,
uploaded_layers: HashMap::new(),
uploaded_layers: HashSet::new(),
metadata: Some(local_metadata),
}),
));
@@ -1537,12 +1427,11 @@ mod test_utils {
let timeline_path = harness.timeline_path(&timeline_id);
fs::create_dir_all(&timeline_path).await?;
let mut layers_to_upload = HashMap::with_capacity(filenames.len());
let mut layers_to_upload = HashSet::with_capacity(filenames.len());
for &file in filenames {
let file_path = timeline_path.join(file);
fs::write(&file_path, dummy_contents(file).into_bytes()).await?;
let metadata = LayerFileMetadata::new(file_path.metadata()?.len());
layers_to_upload.insert(file_path, metadata);
layers_to_upload.insert(file_path);
}
fs::write(
@@ -1553,7 +1442,7 @@ mod test_utils {
Ok(LayersUpload {
layers_to_upload,
uploaded_layers: HashMap::new(),
uploaded_layers: HashSet::new(),
metadata: Some(metadata),
})
}
@@ -1608,13 +1497,12 @@ mod tests {
assert!(sync_id_2 != sync_id_3);
assert!(sync_id_3 != TEST_SYNC_ID);
let download_task =
SyncTask::download(LayersDownload::from_skipped_layers(HashSet::from([
PathBuf::from("sk"),
])));
let download_task = SyncTask::download(LayersDownload {
layers_to_skip: HashSet::from([PathBuf::from("sk")]),
});
let upload_task = SyncTask::upload(LayersUpload {
layers_to_upload: HashMap::from([(PathBuf::from("up"), LayerFileMetadata::new(123))]),
uploaded_layers: HashMap::from([(PathBuf::from("upl"), LayerFileMetadata::new(123))]),
layers_to_upload: HashSet::from([PathBuf::from("up")]),
uploaded_layers: HashSet::from([PathBuf::from("upl")]),
metadata: Some(dummy_metadata(Lsn(2))),
});
let delete_task = SyncTask::delete(LayersDeletion {
@@ -1658,10 +1546,12 @@ mod tests {
let sync_queue = SyncQueue::new(NonZeroUsize::new(100).unwrap());
assert_eq!(sync_queue.len(), 0);
let download = LayersDownload::from_skipped_layers(HashSet::from([PathBuf::from("sk")]));
let download = LayersDownload {
layers_to_skip: HashSet::from([PathBuf::from("sk")]),
};
let upload = LayersUpload {
layers_to_upload: HashMap::from([(PathBuf::from("up"), LayerFileMetadata::new(123))]),
uploaded_layers: HashMap::from([(PathBuf::from("upl"), LayerFileMetadata::new(123))]),
layers_to_upload: HashSet::from([PathBuf::from("up")]),
uploaded_layers: HashSet::from([PathBuf::from("upl")]),
metadata: Some(dummy_metadata(Lsn(2))),
};
let delete = LayersDeletion {
@@ -1709,10 +1599,18 @@ mod tests {
#[tokio::test]
async fn same_task_id_same_tasks_batch() {
let sync_queue = SyncQueue::new(NonZeroUsize::new(1).unwrap());
let download_1 = LayersDownload::from_skipped_layers(HashSet::from([PathBuf::from("sk1")]));
let download_2 = LayersDownload::from_skipped_layers(HashSet::from([PathBuf::from("sk2")]));
let download_3 = LayersDownload::from_skipped_layers(HashSet::from([PathBuf::from("sk3")]));
let download_4 = LayersDownload::from_skipped_layers(HashSet::from([PathBuf::from("sk4")]));
let download_1 = LayersDownload {
layers_to_skip: HashSet::from([PathBuf::from("sk1")]),
};
let download_2 = LayersDownload {
layers_to_skip: HashSet::from([PathBuf::from("sk2")]),
};
let download_3 = LayersDownload {
layers_to_skip: HashSet::from([PathBuf::from("sk3")]),
};
let download_4 = LayersDownload {
layers_to_skip: HashSet::from([PathBuf::from("sk4")]),
};
let sync_id_2 = TenantTimelineId {
tenant_id: TenantId::from_array(hex!("22223344556677881122334455667788")),
@@ -1736,15 +1634,15 @@ mod tests {
Some(SyncTaskBatch {
download: Some(SyncData {
retries: 0,
data: LayersDownload::from_skipped_layers(
{
data: LayersDownload {
layers_to_skip: {
let mut set = HashSet::new();
set.extend(download_1.layers_to_skip.into_iter());
set.extend(download_2.layers_to_skip.into_iter());
set.extend(download_4.layers_to_skip.into_iter());
set
},
)
}
}),
upload: None,
delete: None,
@@ -1760,148 +1658,4 @@ mod tests {
"Should have one task left out of the batch"
);
}
mod local_and_remote_comparisons {
use super::*;
#[test]
fn ready() {
let mut new_sync_tasks = VecDeque::default();
let sync_id = TenantTimelineId::generate();
let local_metadata = dummy_metadata(0x02.into());
let local_files =
HashMap::from([(PathBuf::from("first_file"), LayerFileMetadata::new(123))]);
let mut remote_entry = RemoteTimeline::new(local_metadata.clone());
remote_entry
.add_timeline_layers([(PathBuf::from("first_file"), LayerFileMetadata::new(123))]);
let (status, sync_needed) = compare_local_and_remote_timeline(
&mut new_sync_tasks,
sync_id,
local_metadata.clone(),
local_files,
&remote_entry,
);
assert_eq!(
status,
LocalTimelineInitStatus::LocallyComplete(local_metadata)
);
assert!(!sync_needed);
assert!(new_sync_tasks.is_empty(), "{:?}", new_sync_tasks);
}
#[test]
fn needs_download() {
let mut new_sync_tasks = VecDeque::default();
let sync_id = TenantTimelineId::generate();
let local_metadata = dummy_metadata(0x02.into());
let local_files = HashMap::default();
let mut remote_entry = RemoteTimeline::new(local_metadata.clone());
remote_entry
.add_timeline_layers([(PathBuf::from("first_file"), LayerFileMetadata::new(123))]);
let (status, sync_needed) = compare_local_and_remote_timeline(
&mut new_sync_tasks,
sync_id,
local_metadata,
local_files.clone(),
&remote_entry,
);
assert_eq!(status, LocalTimelineInitStatus::NeedsSync);
assert!(sync_needed);
let new_sync_tasks = new_sync_tasks.into_iter().collect::<Vec<_>>();
assert_eq!(
&new_sync_tasks,
&[(
sync_id,
SyncTask::download(LayersDownload::from_skipped_layers(
local_files.keys().cloned().collect()
))
)]
);
}
#[test]
fn redownload_is_not_needed_on_upgrade() {
// originally the implementation missed the `(None, Some(_))` case in the match, and
// proceeded to always redownload if the remote metadata was not available.
let mut new_sync_tasks = VecDeque::default();
let sync_id = TenantTimelineId::generate();
let local_metadata = dummy_metadata(0x02.into());
// type system would in general allow that LayerFileMetadata would be created with
// file_size: None, however `LayerFileMetadata::default` is only allowed from tests,
// and so everywhere within the system valid LayerFileMetadata is being created, it is
// created through `::new`.
let local_files =
HashMap::from([(PathBuf::from("first_file"), LayerFileMetadata::new(123))]);
let mut remote_entry = RemoteTimeline::new(local_metadata.clone());
// RemoteTimeline is constructed out of an older version IndexPart, which didn't carry
// any metadata.
remote_entry
.add_timeline_layers([(PathBuf::from("first_file"), LayerFileMetadata::default())]);
let (status, sync_needed) = compare_local_and_remote_timeline(
&mut new_sync_tasks,
sync_id,
local_metadata.clone(),
local_files,
&remote_entry,
);
assert_eq!(
status,
LocalTimelineInitStatus::LocallyComplete(local_metadata)
);
assert!(!sync_needed);
}
#[test]
fn needs_upload() {
let mut new_sync_tasks = VecDeque::default();
let sync_id = TenantTimelineId::generate();
let local_metadata = dummy_metadata(0x02.into());
let local_files =
HashMap::from([(PathBuf::from("first_file"), LayerFileMetadata::new(123))]);
let mut remote_entry = RemoteTimeline::new(local_metadata.clone());
remote_entry.add_timeline_layers([]);
let (status, sync_needed) = compare_local_and_remote_timeline(
&mut new_sync_tasks,
sync_id,
local_metadata.clone(),
local_files.clone(),
&remote_entry,
);
assert_eq!(
status,
LocalTimelineInitStatus::LocallyComplete(local_metadata.clone())
);
assert!(!sync_needed);
let new_sync_tasks = new_sync_tasks.into_iter().collect::<Vec<_>>();
assert_eq!(
&new_sync_tasks,
&[(
sync_id,
SyncTask::upload(LayersUpload {
layers_to_upload: local_files,
uploaded_layers: HashMap::default(),
metadata: Some(local_metadata),
})
)]
);
}
}
}

View File

@@ -171,7 +171,7 @@ mod tests {
let local_timeline_path = harness.timeline_path(&TIMELINE_ID);
let timeline_upload =
create_local_timeline(&harness, TIMELINE_ID, &layer_files, metadata.clone()).await?;
for (local_path, _metadata) in timeline_upload.layers_to_upload {
for local_path in timeline_upload.layers_to_upload {
let remote_path =
local_storage.resolve_in_storage(&local_storage.remote_object_id(&local_path)?)?;
let remote_parent_dir = remote_path.parent().unwrap();

View File

@@ -16,13 +16,9 @@ use tokio::{
};
use tracing::{debug, error, info, warn};
use crate::{
config::PageServerConf,
storage_sync::{index::LayerFileMetadata, SyncTask},
TEMP_FILE_SUFFIX,
};
use crate::{config::PageServerConf, storage_sync::SyncTask, TEMP_FILE_SUFFIX};
use utils::{
crashsafe::path_with_suffix_extension,
crashsafe_dir::path_with_suffix_extension,
id::{TenantId, TenantTimelineId, TimelineId},
};
@@ -223,14 +219,8 @@ pub(super) async fn download_timeline_layers<'a>(
let layers_to_download = remote_timeline
.stored_files()
.iter()
.filter_map(|(layer_path, metadata)| {
if !download.layers_to_skip.contains(layer_path) {
Some((layer_path.to_owned(), metadata.to_owned()))
} else {
None
}
})
.difference(&download.layers_to_skip)
.cloned()
.collect::<Vec<_>>();
debug!("Layers to download: {layers_to_download:?}");
@@ -243,129 +233,89 @@ pub(super) async fn download_timeline_layers<'a>(
let mut download_tasks = layers_to_download
.into_iter()
.map(|(layer_destination_path, metadata)| async move {
.map(|layer_destination_path| async move {
if layer_destination_path.exists() {
debug!(
"Layer already exists locally, skipping download: {}",
layer_destination_path.display()
);
} else {
// Perform a rename inspired by durable_rename from file_utils.c.
// The sequence:
// write(tmp)
// fsync(tmp)
// rename(tmp, new)
// fsync(new)
// fsync(parent)
// For more context about durable_rename check this email from postgres mailing list:
// https://www.postgresql.org/message-id/56583BDD.9060302@2ndquadrant.com
// If pageserver crashes the temp file will be deleted on startup and re-downloaded.
let temp_file_path =
path_with_suffix_extension(&layer_destination_path, TEMP_FILE_SUFFIX);
match layer_destination_path.metadata() {
Ok(m) if m.is_file() => {
// the file exists from earlier round when we failed after renaming it as
// layer_destination_path
let verified = if let Some(expected) = metadata.file_size() {
m.len() == expected
} else {
// behaviour before recording metadata was to accept any existing
true
};
let mut destination_file =
fs::File::create(&temp_file_path).await.with_context(|| {
format!(
"Failed to create a destination file for layer '{}'",
temp_file_path.display()
)
})?;
if verified {
debug!(
"Layer already exists locally, skipping download: {}",
layer_destination_path.display()
);
return Ok((layer_destination_path, LayerFileMetadata::new(m.len())))
} else {
// no need to remove it, it will be overwritten by fs::rename
// after successful download
warn!("Downloaded layer exists already but layer file metadata mismatches: {}, metadata {:?}", layer_destination_path.display(), metadata);
}
}
Ok(m) => {
return Err(anyhow::anyhow!("Downloaded layer destination exists but is not a file: {m:?}, target needs to be removed/archived manually: {layer_destination_path:?}"));
}
Err(_) => {
// behave as the file didn't exist
}
let mut layer_download = storage.download_storage_object(None, &layer_destination_path)
.await
.with_context(|| {
format!(
"Failed to initiate the download the layer for {sync_id} into file '{}'",
temp_file_path.display()
)
})?;
io::copy(&mut layer_download.download_stream, &mut destination_file)
.await
.with_context(|| {
format!(
"Failed to download the layer for {sync_id} into file '{}'",
temp_file_path.display()
)
})?;
// Tokio doc here: https://docs.rs/tokio/1.17.0/tokio/fs/struct.File.html states that:
// A file will not be closed immediately when it goes out of scope if there are any IO operations
// that have not yet completed. To ensure that a file is closed immediately when it is dropped,
// you should call flush before dropping it.
//
// From the tokio code I see that it waits for pending operations to complete. There shouldn't be any because
// we assume that `destination_file` file is fully written. I.e there is no pending .write(...).await operations.
// But for additional safety let's check/wait for any pending operations.
destination_file.flush().await.with_context(|| {
format!(
"failed to flush source file at {}",
temp_file_path.display()
)
})?;
// not using sync_data because it can lose file size update
destination_file.sync_all().await.with_context(|| {
format!(
"failed to fsync source file at {}",
temp_file_path.display()
)
})?;
drop(destination_file);
fail::fail_point!("remote-storage-download-pre-rename", |_| {
anyhow::bail!("remote-storage-download-pre-rename failpoint triggered")
});
fs::rename(&temp_file_path, &layer_destination_path).await?;
fsync_path(&layer_destination_path).await.with_context(|| {
format!(
"Cannot fsync layer destination path {}",
layer_destination_path.display(),
)
})?;
}
// Perform a rename inspired by durable_rename from file_utils.c.
// The sequence:
// write(tmp)
// fsync(tmp)
// rename(tmp, new)
// fsync(new)
// fsync(parent)
// For more context about durable_rename check this email from postgres mailing list:
// https://www.postgresql.org/message-id/56583BDD.9060302@2ndquadrant.com
// If pageserver crashes the temp file will be deleted on startup and re-downloaded.
let temp_file_path =
path_with_suffix_extension(&layer_destination_path, TEMP_FILE_SUFFIX);
// TODO: this doesn't use the cached fd for some reason?
let mut destination_file =
fs::File::create(&temp_file_path).await.with_context(|| {
format!(
"Failed to create a destination file for layer '{}'",
temp_file_path.display()
)
})?;
let mut layer_download = storage.download_storage_object(None, &layer_destination_path)
.await
.with_context(|| {
format!(
"Failed to initiate the download the layer for {sync_id} into file '{}'",
temp_file_path.display()
)
})?;
let bytes_amount = io::copy(&mut layer_download.download_stream, &mut destination_file)
.await
.with_context(|| {
format!(
"Failed to download the layer for {sync_id} into file '{}'",
temp_file_path.display()
)
})?;
// Tokio doc here: https://docs.rs/tokio/1.17.0/tokio/fs/struct.File.html states that:
// A file will not be closed immediately when it goes out of scope if there are any IO operations
// that have not yet completed. To ensure that a file is closed immediately when it is dropped,
// you should call flush before dropping it.
//
// From the tokio code I see that it waits for pending operations to complete. There shouldn't be any because
// we assume that `destination_file` file is fully written. I.e there is no pending .write(...).await operations.
// But for additional safety let's check/wait for any pending operations.
destination_file.flush().await.with_context(|| {
format!(
"failed to flush source file at {}",
temp_file_path.display()
)
})?;
match metadata.file_size() {
Some(expected) if expected != bytes_amount => {
anyhow::bail!(
"According to layer file metadata should had downloaded {expected} bytes but downloaded {bytes_amount} bytes into file '{}'",
temp_file_path.display()
);
},
Some(_) | None => {
// matches, or upgrading from an earlier IndexPart version
}
}
// not using sync_data because it can lose file size update
destination_file.sync_all().await.with_context(|| {
format!(
"failed to fsync source file at {}",
temp_file_path.display()
)
})?;
drop(destination_file);
fail::fail_point!("remote-storage-download-pre-rename", |_| {
anyhow::bail!("remote-storage-download-pre-rename failpoint triggered")
});
fs::rename(&temp_file_path, &layer_destination_path).await?;
fsync_path(&layer_destination_path).await.with_context(|| {
format!(
"Cannot fsync layer destination path {}",
layer_destination_path.display(),
)
})?;
Ok::<_, anyhow::Error>((layer_destination_path, LayerFileMetadata::new(bytes_amount)))
Ok::<_, anyhow::Error>(layer_destination_path)
})
.collect::<FuturesUnordered<_>>();
@@ -374,12 +324,9 @@ pub(super) async fn download_timeline_layers<'a>(
let mut undo = HashSet::new();
while let Some(download_result) = download_tasks.next().await {
match download_result {
Ok((downloaded_path, metadata)) => {
Ok(downloaded_path) => {
undo.insert(downloaded_path.clone());
download.layers_to_skip.insert(downloaded_path.clone());
// what if the key existed already? ignore, because then we would had
// downloaded a partial file, and had to retry
download.gathered_metadata.insert(downloaded_path, metadata);
download.layers_to_skip.insert(downloaded_path);
}
Err(e) => {
errors_happened = true;
@@ -402,8 +349,6 @@ pub(super) async fn download_timeline_layers<'a>(
);
for item in undo {
download.layers_to_skip.remove(&item);
// intentionally don't clear the gathered_metadata because it exists for fsync_path
// failure on parent directory
}
errors_happened = true;
}
@@ -508,9 +453,9 @@ mod tests {
let timeline_upload =
create_local_timeline(&harness, TIMELINE_ID, &layer_files, metadata.clone()).await?;
for local_path in timeline_upload.layers_to_upload.keys() {
for local_path in timeline_upload.layers_to_upload {
let remote_path =
local_storage.resolve_in_storage(&storage.remote_object_id(local_path)?)?;
local_storage.resolve_in_storage(&storage.remote_object_id(&local_path)?)?;
let remote_parent_dir = remote_path.parent().unwrap();
if !remote_parent_dir.exists() {
fs::create_dir_all(&remote_parent_dir).await?;
@@ -528,19 +473,11 @@ mod tests {
let mut remote_timeline = RemoteTimeline::new(metadata.clone());
remote_timeline.awaits_download = true;
remote_timeline.add_timeline_layers(layer_files.iter().map(|layer| {
let layer_path = local_timeline_path.join(layer);
// this could had also been LayerFileMetadata::default(), but since in this test we
// don't do the merge operation done by storage_sync::download_timeline_data, it would
// not be merged back to timeline.
let metadata_from_upload = timeline_upload
.layers_to_upload
.get(&layer_path)
.expect("layer must exist in previously uploaded paths")
.to_owned();
(layer_path, metadata_from_upload)
}));
remote_timeline.add_timeline_layers(
layer_files
.iter()
.map(|layer| local_timeline_path.join(layer)),
);
let download_data = match download_timeline_layers(
harness.conf,
@@ -550,9 +487,9 @@ mod tests {
sync_id,
SyncData::new(
current_retries,
LayersDownload::from_skipped_layers(HashSet::from([
local_timeline_path.join("layer_to_skip")
])),
LayersDownload {
layers_to_skip: HashSet::from([local_timeline_path.join("layer_to_skip")]),
},
),
)
.await
@@ -615,7 +552,12 @@ mod tests {
&sync_queue,
None,
sync_id,
SyncData::new(0, LayersDownload::from_skipped_layers(HashSet::new())),
SyncData::new(
0,
LayersDownload {
layers_to_skip: HashSet::new(),
},
),
)
.await;
assert!(
@@ -634,7 +576,12 @@ mod tests {
&sync_queue,
Some(&not_expecting_download_remote_timeline),
sync_id,
SyncData::new(0, LayersDownload::from_skipped_layers(HashSet::new())),
SyncData::new(
0,
LayersDownload {
layers_to_skip: HashSet::new(),
},
),
)
.await;
assert!(

View File

@@ -212,8 +212,8 @@ impl RemoteTimelineIndex {
/// Restored index part data about the timeline, stored in the remote index.
#[derive(Debug, Clone)]
pub struct RemoteTimeline {
timeline_layers: HashMap<PathBuf, LayerFileMetadata>,
missing_layers: HashMap<PathBuf, LayerFileMetadata>,
timeline_layers: HashSet<PathBuf>,
missing_layers: HashSet<PathBuf>,
pub metadata: TimelineMetadata,
pub awaits_download: bool,
@@ -222,161 +222,62 @@ pub struct RemoteTimeline {
impl RemoteTimeline {
pub fn new(metadata: TimelineMetadata) -> Self {
Self {
timeline_layers: HashMap::default(),
missing_layers: HashMap::default(),
timeline_layers: HashSet::new(),
missing_layers: HashSet::new(),
metadata,
awaits_download: false,
}
}
pub fn add_timeline_layers(
&mut self,
new_layers: impl IntoIterator<Item = (PathBuf, LayerFileMetadata)>,
) {
self.timeline_layers.extend(new_layers);
pub fn add_timeline_layers(&mut self, new_layers: impl IntoIterator<Item = PathBuf>) {
self.timeline_layers.extend(new_layers.into_iter());
}
pub fn add_upload_failures(
&mut self,
upload_failures: impl IntoIterator<Item = (PathBuf, LayerFileMetadata)>,
) {
self.missing_layers.extend(upload_failures);
pub fn add_upload_failures(&mut self, upload_failures: impl IntoIterator<Item = PathBuf>) {
self.missing_layers.extend(upload_failures.into_iter());
}
pub fn remove_layers(&mut self, layers_to_remove: &HashSet<PathBuf>) {
self.timeline_layers
.retain(|layer, _| !layers_to_remove.contains(layer));
.retain(|layer| !layers_to_remove.contains(layer));
self.missing_layers
.retain(|layer, _| !layers_to_remove.contains(layer));
.retain(|layer| !layers_to_remove.contains(layer));
}
/// Lists all layer files in the given remote timeline. Omits the metadata file.
pub fn stored_files(&self) -> &HashMap<PathBuf, LayerFileMetadata> {
pub fn stored_files(&self) -> &HashSet<PathBuf> {
&self.timeline_layers
}
/// Combines metadata gathered or verified during downloading needed layer files to metadata on
/// the [`RemoteIndex`], so it can be uploaded later.
pub fn merge_metadata_from_downloaded(
&mut self,
downloaded: &HashMap<PathBuf, LayerFileMetadata>,
) {
downloaded.iter().for_each(|(path, metadata)| {
if let Some(upgraded) = self.timeline_layers.get_mut(path) {
upgraded.merge(metadata);
}
});
}
pub fn from_index_part(timeline_path: &Path, index_part: IndexPart) -> anyhow::Result<Self> {
let metadata = TimelineMetadata::from_bytes(&index_part.metadata_bytes)?;
let default_metadata = &IndexLayerMetadata::default();
let find_metadata = |key: &RelativePath| -> LayerFileMetadata {
index_part
.layer_metadata
.get(key)
.unwrap_or(default_metadata)
.into()
};
Ok(Self {
timeline_layers: index_part
.timeline_layers
.iter()
.map(|layer_path| (layer_path.as_path(timeline_path), find_metadata(layer_path)))
.collect(),
missing_layers: index_part
.missing_layers
.iter()
.map(|layer_path| (layer_path.as_path(timeline_path), find_metadata(layer_path)))
.collect(),
timeline_layers: to_local_paths(timeline_path, index_part.timeline_layers),
missing_layers: to_local_paths(timeline_path, index_part.missing_layers),
metadata,
awaits_download: false,
})
}
}
/// Metadata gathered for each of the layer files.
///
/// Fields have to be `Option`s because remote [`IndexPart`]'s can be from different version, which
/// might have less or more metadata depending if upgrading or rolling back an upgrade.
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)]
#[cfg_attr(test, derive(Default))]
pub struct LayerFileMetadata {
file_size: Option<u64>,
}
impl From<&'_ IndexLayerMetadata> for LayerFileMetadata {
fn from(other: &IndexLayerMetadata) -> Self {
LayerFileMetadata {
file_size: other.file_size,
}
}
}
impl LayerFileMetadata {
pub fn new(file_size: u64) -> Self {
LayerFileMetadata {
file_size: Some(file_size),
}
}
pub fn file_size(&self) -> Option<u64> {
self.file_size
}
/// Metadata has holes due to version upgrades. This method is called to upgrade self with the
/// other value.
///
/// This is called on the possibly outdated version.
pub fn merge(&mut self, other: &Self) {
self.file_size = other.file_size.or(self.file_size);
}
}
/// Part of the remote index, corresponding to a certain timeline.
/// Contains the data about all files in the timeline, present remotely and its metadata.
///
/// This type needs to be backwards and forwards compatible. When changing the fields,
/// remember to add a test case for the changed version.
#[serde_as]
#[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize)]
pub struct IndexPart {
/// Debugging aid describing the version of this type.
#[serde(default)]
version: usize,
/// Each of the layers present on remote storage.
///
/// Additional metadata can might exist in `layer_metadata`.
timeline_layers: HashSet<RelativePath>,
/// Currently is not really used in pageserver,
/// present to manually keep track of the layer files that pageserver might never retrieve.
///
/// Such "holes" might appear if any upload task was evicted on an error threshold:
/// the this layer will only be rescheduled for upload on pageserver restart.
missing_layers: HashSet<RelativePath>,
/// Per layer file metadata, which can be present for a present or missing layer file.
///
/// Older versions of `IndexPart` will not have this property or have only a part of metadata
/// that latest version stores.
#[serde(default)]
layer_metadata: HashMap<RelativePath, IndexLayerMetadata>,
#[serde_as(as = "DisplayFromStr")]
disk_consistent_lsn: Lsn,
metadata_bytes: Vec<u8>,
}
impl IndexPart {
/// When adding or modifying any parts of `IndexPart`, increment the version so that it can be
/// used to understand later versions.
///
/// Version is currently informative only.
const LATEST_VERSION: usize = 1;
pub const FILE_NAME: &'static str = "index_part.json";
#[cfg(test)]
@@ -387,10 +288,8 @@ impl IndexPart {
metadata_bytes: Vec<u8>,
) -> Self {
Self {
version: Self::LATEST_VERSION,
timeline_layers,
missing_layers,
layer_metadata: HashMap::default(),
disk_consistent_lsn,
metadata_bytes,
}
@@ -405,68 +304,35 @@ impl IndexPart {
remote_timeline: RemoteTimeline,
) -> anyhow::Result<Self> {
let metadata_bytes = remote_timeline.metadata.to_bytes()?;
let mut layer_metadata = HashMap::new();
let mut missing_layers = HashSet::new();
separate_paths_and_metadata(
timeline_path,
&remote_timeline.missing_layers,
&mut missing_layers,
&mut layer_metadata,
)
.context("Failed to convert missing layers' paths to relative ones")?;
let mut timeline_layers = HashSet::new();
separate_paths_and_metadata(
timeline_path,
&remote_timeline.timeline_layers,
&mut timeline_layers,
&mut layer_metadata,
)
.context("Failed to convert timeline layers' paths to relative ones")?;
Ok(Self {
version: Self::LATEST_VERSION,
timeline_layers,
missing_layers,
layer_metadata,
timeline_layers: to_relative_paths(timeline_path, remote_timeline.timeline_layers)
.context("Failed to convert timeline layers' paths to relative ones")?,
missing_layers: to_relative_paths(timeline_path, remote_timeline.missing_layers)
.context("Failed to convert missing layers' paths to relative ones")?,
disk_consistent_lsn: remote_timeline.metadata.disk_consistent_lsn(),
metadata_bytes,
})
}
}
/// Serialized form of [`LayerFileMetadata`].
#[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize, Default)]
pub struct IndexLayerMetadata {
file_size: Option<u64>,
}
impl From<&'_ LayerFileMetadata> for IndexLayerMetadata {
fn from(other: &'_ LayerFileMetadata) -> Self {
IndexLayerMetadata {
file_size: other.file_size,
}
}
}
fn separate_paths_and_metadata(
fn to_local_paths(
timeline_path: &Path,
input: &HashMap<PathBuf, LayerFileMetadata>,
output: &mut HashSet<RelativePath>,
layer_metadata: &mut HashMap<RelativePath, IndexLayerMetadata>,
) -> anyhow::Result<()> {
for (path, metadata) in input {
let rel_path = RelativePath::new(timeline_path, path)?;
let metadata = IndexLayerMetadata::from(metadata);
paths: impl IntoIterator<Item = RelativePath>,
) -> HashSet<PathBuf> {
paths
.into_iter()
.map(|path| path.as_path(timeline_path))
.collect()
}
layer_metadata.insert(rel_path.clone(), metadata);
output.insert(rel_path);
}
Ok(())
fn to_relative_paths(
timeline_path: &Path,
paths: impl IntoIterator<Item = PathBuf>,
) -> anyhow::Result<HashSet<RelativePath>> {
paths
.into_iter()
.map(|path| RelativePath::new(timeline_path, path))
.collect()
}
#[cfg(test)]
@@ -491,13 +357,13 @@ mod tests {
DEFAULT_PG_VERSION,
);
let remote_timeline = RemoteTimeline {
timeline_layers: HashMap::from([
(timeline_path.join("layer_1"), LayerFileMetadata::new(1)),
(timeline_path.join("layer_2"), LayerFileMetadata::new(2)),
timeline_layers: HashSet::from([
timeline_path.join("layer_1"),
timeline_path.join("layer_2"),
]),
missing_layers: HashMap::from([
(timeline_path.join("missing_1"), LayerFileMetadata::new(3)),
(timeline_path.join("missing_2"), LayerFileMetadata::new(4)),
missing_layers: HashSet::from([
timeline_path.join("missing_1"),
timeline_path.join("missing_2"),
]),
metadata: metadata.clone(),
awaits_download: false,
@@ -619,13 +485,13 @@ mod tests {
let conversion_result = IndexPart::from_remote_timeline(
&timeline_path,
RemoteTimeline {
timeline_layers: HashMap::from([
(PathBuf::from("bad_path"), LayerFileMetadata::new(1)),
(timeline_path.join("layer_2"), LayerFileMetadata::new(2)),
timeline_layers: HashSet::from([
PathBuf::from("bad_path"),
timeline_path.join("layer_2"),
]),
missing_layers: HashMap::from([
(timeline_path.join("missing_1"), LayerFileMetadata::new(3)),
(timeline_path.join("missing_2"), LayerFileMetadata::new(4)),
missing_layers: HashSet::from([
timeline_path.join("missing_1"),
timeline_path.join("missing_2"),
]),
metadata: metadata.clone(),
awaits_download: false,
@@ -636,13 +502,13 @@ mod tests {
let conversion_result = IndexPart::from_remote_timeline(
&timeline_path,
RemoteTimeline {
timeline_layers: HashMap::from([
(timeline_path.join("layer_1"), LayerFileMetadata::new(1)),
(timeline_path.join("layer_2"), LayerFileMetadata::new(2)),
timeline_layers: HashSet::from([
timeline_path.join("layer_1"),
timeline_path.join("layer_2"),
]),
missing_layers: HashMap::from([
(PathBuf::from("bad_path"), LayerFileMetadata::new(3)),
(timeline_path.join("missing_2"), LayerFileMetadata::new(4)),
missing_layers: HashSet::from([
PathBuf::from("bad_path"),
timeline_path.join("missing_2"),
]),
metadata,
awaits_download: false,
@@ -650,63 +516,4 @@ mod tests {
);
assert!(conversion_result.is_err(), "Should not be able to convert metadata with missing layer paths that are not in the timeline directory");
}
#[test]
fn v0_indexpart_is_parsed() {
let example = r#"{
"timeline_layers":["000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__0000000001696070-00000000016960E9"],
"missing_layers":["not_a_real_layer_but_adding_coverage"],
"disk_consistent_lsn":"0/16960E8",
"metadata_bytes":[113,11,159,210,0,54,0,4,0,0,0,0,1,105,96,232,1,0,0,0,0,1,105,96,112,0,0,0,0,0,0,0,0,0,0,0,0,0,1,105,96,112,0,0,0,0,1,105,96,112,0,0,0,14,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
}"#;
let expected = IndexPart {
version: 0,
timeline_layers: [RelativePath("000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__0000000001696070-00000000016960E9".to_owned())].into_iter().collect(),
missing_layers: [RelativePath("not_a_real_layer_but_adding_coverage".to_owned())].into_iter().collect(),
layer_metadata: HashMap::default(),
disk_consistent_lsn: "0/16960E8".parse::<Lsn>().unwrap(),
metadata_bytes: [113,11,159,210,0,54,0,4,0,0,0,0,1,105,96,232,1,0,0,0,0,1,105,96,112,0,0,0,0,0,0,0,0,0,0,0,0,0,1,105,96,112,0,0,0,0,1,105,96,112,0,0,0,14,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0].to_vec(),
};
let part = serde_json::from_str::<IndexPart>(example).unwrap();
assert_eq!(part, expected);
}
#[test]
fn v1_indexpart_is_parsed() {
let example = r#"{
"version":1,
"timeline_layers":["000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__0000000001696070-00000000016960E9"],
"missing_layers":["not_a_real_layer_but_adding_coverage"],
"layer_metadata":{
"000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__0000000001696070-00000000016960E9": { "file_size": 25600000 },
"not_a_real_layer_but_adding_coverage": { "file_size": 9007199254741001 }
},
"disk_consistent_lsn":"0/16960E8",
"metadata_bytes":[113,11,159,210,0,54,0,4,0,0,0,0,1,105,96,232,1,0,0,0,0,1,105,96,112,0,0,0,0,0,0,0,0,0,0,0,0,0,1,105,96,112,0,0,0,0,1,105,96,112,0,0,0,14,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
}"#;
let expected = IndexPart {
// note this is not verified, could be anything, but exists for humans debugging.. could be the git version instead?
version: 1,
timeline_layers: [RelativePath("000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__0000000001696070-00000000016960E9".to_owned())].into_iter().collect(),
missing_layers: [RelativePath("not_a_real_layer_but_adding_coverage".to_owned())].into_iter().collect(),
layer_metadata: HashMap::from([
(RelativePath("000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__0000000001696070-00000000016960E9".to_owned()), IndexLayerMetadata {
file_size: Some(25600000),
}),
(RelativePath("not_a_real_layer_but_adding_coverage".to_owned()), IndexLayerMetadata {
// serde_json should always parse this but this might be a double with jq for
// example.
file_size: Some(9007199254741001),
})
]),
disk_consistent_lsn: "0/16960E8".parse::<Lsn>().unwrap(),
metadata_bytes: [113,11,159,210,0,54,0,4,0,0,0,0,1,105,96,232,1,0,0,0,0,1,105,96,112,0,0,0,0,0,0,0,0,0,0,0,0,0,1,105,96,112,0,0,0,0,1,105,96,112,0,0,0,14,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0].to_vec(),
};
let part = serde_json::from_str::<IndexPart>(example).unwrap();
assert_eq!(part, expected);
}
}

View File

@@ -69,25 +69,14 @@ pub(super) async fn upload_timeline_layers<'a>(
.map(|meta| meta.disk_consistent_lsn());
let already_uploaded_layers = remote_timeline
.map(|timeline| {
timeline
.stored_files()
.keys()
.cloned()
.collect::<std::collections::HashSet<_>>()
})
.map(|timeline| timeline.stored_files())
.cloned()
.unwrap_or_default();
let layers_to_upload = upload
.layers_to_upload
.iter()
.filter_map(|(k, v)| {
if !already_uploaded_layers.contains(k) {
Some((k.to_owned(), v.to_owned()))
} else {
None
}
})
.difference(&already_uploaded_layers)
.cloned()
.collect::<Vec<_>>();
if layers_to_upload.is_empty() {
@@ -109,7 +98,7 @@ pub(super) async fn upload_timeline_layers<'a>(
let mut upload_tasks = layers_to_upload
.into_iter()
.map(|(source_path, known_metadata)| async move {
.map(|source_path| async move {
let source_file = match fs::File::open(&source_path).await.with_context(|| {
format!(
"Failed to upen a source file for layer '{}'",
@@ -120,7 +109,7 @@ pub(super) async fn upload_timeline_layers<'a>(
Err(e) => return Err(UploadError::MissingLocalFile(source_path, e)),
};
let fs_size = source_file
let source_size = source_file
.metadata()
.await
.with_context(|| {
@@ -130,24 +119,10 @@ pub(super) async fn upload_timeline_layers<'a>(
)
})
.map_err(UploadError::Other)?
.len();
// FIXME: this looks bad
if let Some(metadata_size) = known_metadata.file_size() {
if metadata_size != fs_size {
return Err(UploadError::Other(anyhow::anyhow!(
"File {source_path:?} has its current FS size {fs_size} diferent from initially determined {metadata_size}"
)));
}
} else {
// this is a silly state we would like to avoid
}
let fs_size = usize::try_from(fs_size).with_context(|| format!("File {source_path:?} size {fs_size} could not be converted to usize"))
.map_err(UploadError::Other)?;
.len() as usize;
match storage
.upload_storage_object(Box::new(source_file), fs_size, &source_path)
.upload_storage_object(Box::new(source_file), source_size, &source_path)
.await
.with_context(|| format!("Failed to upload layer file for {sync_id}"))
{
@@ -161,11 +136,8 @@ pub(super) async fn upload_timeline_layers<'a>(
while let Some(upload_result) = upload_tasks.next().await {
match upload_result {
Ok(uploaded_path) => {
let metadata = upload
.layers_to_upload
.remove(&uploaded_path)
.expect("metadata should always exist, assuming no double uploads");
upload.uploaded_layers.insert(uploaded_path, metadata);
upload.layers_to_upload.remove(&uploaded_path);
upload.uploaded_layers.insert(uploaded_path);
}
Err(e) => match e {
UploadError::Other(e) => {
@@ -290,7 +262,7 @@ mod tests {
assert_eq!(
upload
.uploaded_layers
.keys()
.iter()
.cloned()
.collect::<BTreeSet<_>>(),
layer_files
@@ -385,7 +357,7 @@ mod tests {
assert_eq!(
upload
.uploaded_layers
.keys()
.iter()
.cloned()
.collect::<BTreeSet<_>>(),
layer_files

View File

@@ -14,7 +14,7 @@
use anyhow::{bail, ensure, Context, Result};
use tokio::sync::watch;
use tracing::*;
use utils::crashsafe::path_with_suffix_extension;
use utils::crashsafe_dir::path_with_suffix_extension;
use std::cmp::min;
use std::collections::hash_map::Entry;
@@ -23,12 +23,10 @@ use std::collections::HashMap;
use std::fs;
use std::fs::File;
use std::fs::OpenOptions;
use std::io;
use std::io::Write;
use std::num::NonZeroU64;
use std::ops::Bound::Included;
use std::path::Path;
use std::path::PathBuf;
use std::process::Command;
use std::process::Stdio;
use std::sync::Arc;
@@ -51,7 +49,7 @@ pub use pageserver_api::models::TenantState;
use toml_edit;
use utils::{
crashsafe,
crashsafe_dir,
id::{TenantId, TimelineId},
lsn::{Lsn, RecordLsn},
};
@@ -61,14 +59,13 @@ pub mod block_io;
mod delta_layer;
mod disk_btree;
pub(crate) mod ephemeral_file;
pub mod filename;
mod filename;
mod image_layer;
mod inmemory_layer;
pub mod layer_map;
mod layer_map;
pub mod metadata;
mod par_fsync;
pub mod storage_layer;
mod storage_layer;
mod timeline;
@@ -122,216 +119,6 @@ pub struct Tenant {
upload_layers: bool,
}
/// A timeline with some of its files on disk, being initialized.
/// This struct ensures the atomicity of the timeline init: it's either properly created and inserted into pageserver's memory, or
/// its local files are removed. In the worst case of a crash, an uninit mark file is left behind, which causes the directory
/// to be removed on next restart.
///
/// The caller is responsible for proper timeline data filling before the final init.
#[must_use]
pub struct UninitializedTimeline<'t> {
owning_tenant: &'t Tenant,
timeline_id: TimelineId,
raw_timeline: Option<(Timeline, TimelineUninitMark)>,
}
/// An uninit mark file, created along the timeline dir to ensure the timeline either gets fully initialized and loaded into pageserver's memory,
/// or gets removed eventually.
///
/// XXX: it's important to create it near the timeline dir, not inside it to ensure timeline dir gets removed first.
#[must_use]
struct TimelineUninitMark {
uninit_mark_deleted: bool,
uninit_mark_path: PathBuf,
timeline_path: PathBuf,
}
impl UninitializedTimeline<'_> {
/// Ensures timeline data is valid, loads it into pageserver's memory and removes uninit mark file on success.
pub fn initialize(self) -> anyhow::Result<Arc<Timeline>> {
let mut timelines = self.owning_tenant.timelines.lock().unwrap();
self.initialize_with_lock(&mut timelines, true)
}
fn initialize_with_lock(
mut self,
timelines: &mut HashMap<TimelineId, Arc<Timeline>>,
load_layer_map: bool,
) -> anyhow::Result<Arc<Timeline>> {
let timeline_id = self.timeline_id;
let tenant_id = self.owning_tenant.tenant_id;
let (new_timeline, uninit_mark) = self.raw_timeline.take().with_context(|| {
format!("No timeline for initalization found for {tenant_id}/{timeline_id}")
})?;
let new_timeline = Arc::new(new_timeline);
let new_disk_consistent_lsn = new_timeline.get_disk_consistent_lsn();
// TODO it would be good to ensure that, but apparently a lot of our testing is dependend on that at least
// ensure!(new_disk_consistent_lsn.is_valid(),
// "Timeline {tenant_id}/{timeline_id} has invalid disk_consistent_lsn and cannot be initialized");
match timelines.entry(timeline_id) {
Entry::Occupied(_) => anyhow::bail!(
"Found freshly initialized timeline {tenant_id}/{timeline_id} in the tenant map"
),
Entry::Vacant(v) => {
if load_layer_map {
new_timeline
.load_layer_map(new_disk_consistent_lsn)
.with_context(|| {
format!(
"Failed to load layermap for timeline {tenant_id}/{timeline_id}"
)
})?;
}
uninit_mark.remove_uninit_mark().with_context(|| {
format!(
"Failed to remove uninit mark file for timeline {tenant_id}/{timeline_id}"
)
})?;
v.insert(Arc::clone(&new_timeline));
new_timeline.launch_wal_receiver().with_context(|| {
format!("Failed to launch walreceiver for timeline {tenant_id}/{timeline_id}")
})?;
}
}
Ok(new_timeline)
}
/// Prepares timeline data by loading it from the basebackup archive.
pub fn import_basebackup_from_tar(
&self,
reader: impl std::io::Read,
base_lsn: Lsn,
) -> anyhow::Result<()> {
let raw_timeline = self.raw_timeline()?;
import_datadir::import_basebackup_from_tar(raw_timeline, reader, base_lsn).with_context(
|| {
format!(
"Failed to import basebackup for timeline {}/{}",
self.owning_tenant.tenant_id, self.timeline_id
)
},
)?;
fail::fail_point!("before-checkpoint-new-timeline", |_| {
bail!("failpoint before-checkpoint-new-timeline");
});
raw_timeline
.checkpoint(CheckpointConfig::Flush)
.with_context(|| {
format!(
"Failed to checkpoint after basebackup import for timeline {}/{}",
self.owning_tenant.tenant_id, self.timeline_id
)
})?;
Ok(())
}
fn raw_timeline(&self) -> anyhow::Result<&Timeline> {
Ok(&self
.raw_timeline
.as_ref()
.with_context(|| {
format!(
"No raw timeline {}/{} found",
self.owning_tenant.tenant_id, self.timeline_id
)
})?
.0)
}
}
impl Drop for UninitializedTimeline<'_> {
fn drop(&mut self) {
if let Some((_, uninit_mark)) = self.raw_timeline.take() {
let _entered = info_span!("drop_uninitialized_timeline", tenant = %self.owning_tenant.tenant_id, timeline = %self.timeline_id).entered();
error!("Timeline got dropped without initializing, cleaning its files");
cleanup_timeline_directory(uninit_mark);
}
}
}
fn cleanup_timeline_directory(uninit_mark: TimelineUninitMark) {
let timeline_path = &uninit_mark.timeline_path;
match ignore_absent_files(|| fs::remove_dir_all(timeline_path)) {
Ok(()) => {
info!("Timeline dir {timeline_path:?} removed successfully, removing the uninit mark")
}
Err(e) => {
error!("Failed to clean up uninitialized timeline directory {timeline_path:?}: {e:?}")
}
}
drop(uninit_mark); // mark handles its deletion on drop, gets retained if timeline dir exists
}
impl TimelineUninitMark {
/// Useful for initializing timelines, existing on disk after the restart.
pub fn dummy() -> Self {
Self {
uninit_mark_deleted: true,
uninit_mark_path: PathBuf::new(),
timeline_path: PathBuf::new(),
}
}
fn new(uninit_mark_path: PathBuf, timeline_path: PathBuf) -> Self {
Self {
uninit_mark_deleted: false,
uninit_mark_path,
timeline_path,
}
}
fn remove_uninit_mark(mut self) -> anyhow::Result<()> {
if !self.uninit_mark_deleted {
self.delete_mark_file_if_present()?;
}
Ok(())
}
fn delete_mark_file_if_present(&mut self) -> Result<(), anyhow::Error> {
let uninit_mark_file = &self.uninit_mark_path;
let uninit_mark_parent = uninit_mark_file
.parent()
.with_context(|| format!("Uninit mark file {uninit_mark_file:?} has no parent"))?;
ignore_absent_files(|| fs::remove_file(&uninit_mark_file)).with_context(|| {
format!("Failed to remove uninit mark file at path {uninit_mark_file:?}")
})?;
crashsafe::fsync(uninit_mark_parent).context("Failed to fsync uninit mark parent")?;
self.uninit_mark_deleted = true;
Ok(())
}
}
impl Drop for TimelineUninitMark {
fn drop(&mut self) {
if !self.uninit_mark_deleted {
if self.timeline_path.exists() {
error!(
"Uninit mark {} is not removed, timeline {} stays uninitialized",
self.uninit_mark_path.display(),
self.timeline_path.display()
)
} else {
// unblock later timeline creation attempts
warn!(
"Removing intermediate uninit mark file {}",
self.uninit_mark_path.display()
);
if let Err(e) = self.delete_mark_file_if_present() {
error!("Failed to remove the uninit mark file: {e}")
}
}
}
}
}
/// A repository corresponds to one .neon directory. One repository holds multiple
/// timelines, forked off from the same initial call to 'initdb'.
impl Tenant {
@@ -357,27 +144,35 @@ impl Tenant {
/// Lists timelines the tenant contains.
/// Up to tenant's implementation to omit certain timelines that ar not considered ready for use.
pub fn list_timelines(&self) -> Vec<Arc<Timeline>> {
pub fn list_timelines(&self) -> Vec<(TimelineId, Arc<Timeline>)> {
self.timelines
.lock()
.unwrap()
.values()
.map(Arc::clone)
.iter()
.map(|(timeline_id, timeline_entry)| (*timeline_id, Arc::clone(timeline_entry)))
.collect()
}
/// This is used to create the initial 'main' timeline during bootstrapping,
/// or when importing a new base backup. The caller is expected to load an
/// initial image of the datadir to the new timeline after this.
/// Create a new, empty timeline. The caller is responsible for loading data into it
/// Initdb lsn is provided for timeline impl to be able to perform checks for some operations against it.
pub fn create_empty_timeline(
&self,
new_timeline_id: TimelineId,
initdb_lsn: Lsn,
pg_version: u32,
) -> anyhow::Result<UninitializedTimeline> {
let timelines = self.timelines.lock().unwrap();
let timeline_uninit_mark = self.create_timeline_uninit_mark(new_timeline_id, &timelines)?;
drop(timelines);
) -> Result<Arc<Timeline>> {
// XXX: keep the lock to avoid races during timeline creation
let mut timelines = self.timelines.lock().unwrap();
anyhow::ensure!(
timelines.get(&new_timeline_id).is_none(),
"Timeline {new_timeline_id} already exists"
);
let timeline_path = self.conf.timeline_path(&new_timeline_id, &self.tenant_id);
if timeline_path.exists() {
bail!("Timeline directory already exists, but timeline is missing in repository map. This is a bug.")
}
let new_metadata = TimelineMetadata::new(
Lsn(0),
@@ -388,13 +183,11 @@ impl Tenant {
initdb_lsn,
pg_version,
);
self.prepare_timeline(
new_timeline_id,
new_metadata,
timeline_uninit_mark,
true,
None,
)
let new_timeline =
self.create_initialized_timeline(new_timeline_id, new_metadata, &mut timelines)?;
new_timeline.layers.write().unwrap().next_open_layer_at = Some(initdb_lsn);
Ok(new_timeline)
}
/// Create a new timeline.
@@ -410,10 +203,14 @@ impl Tenant {
ancestor_timeline_id: Option<TimelineId>,
mut ancestor_start_lsn: Option<Lsn>,
pg_version: u32,
) -> anyhow::Result<Option<Arc<Timeline>>> {
) -> Result<Option<Arc<Timeline>>> {
let new_timeline_id = new_timeline_id.unwrap_or_else(TimelineId::generate);
if self.get_timeline(new_timeline_id).is_ok() {
if self
.conf
.timeline_path(&new_timeline_id, &self.tenant_id)
.exists()
{
debug!("timeline {new_timeline_id} already exists");
return Ok(None);
}
@@ -548,7 +345,7 @@ impl Tenant {
ensure!(
!children_exist,
"Cannot delete timeline which has child timelines"
"Cannot detach timeline which has child timelines"
);
let timeline_entry = match timelines.entry(timeline_id) {
Entry::Occupied(e) => e,
@@ -592,32 +389,21 @@ impl Tenant {
timeline_id,
metadata.pg_version()
);
if timelines_accessor.contains_key(&timeline_id) {
warn!(
let ancestor = metadata
.ancestor_timeline()
.and_then(|ancestor_timeline_id| timelines_accessor.get(&ancestor_timeline_id))
.cloned();
match timelines_accessor.entry(timeline_id) {
Entry::Occupied(_) => warn!(
"Timeline {}/{} already exists in the tenant map, skipping its initialization",
self.tenant_id, timeline_id
);
continue;
} else {
let ancestor = metadata
.ancestor_timeline()
.and_then(|ancestor_timeline_id| timelines_accessor.get(&ancestor_timeline_id))
.cloned();
let timeline = UninitializedTimeline {
owning_tenant: self,
timeline_id,
raw_timeline: Some((
self.create_timeline_data(timeline_id, metadata, ancestor)
.with_context(|| {
format!("Failed to initialize timeline {timeline_id}")
})?,
TimelineUninitMark::dummy(),
)),
};
let initialized_timeline =
timeline.initialize_with_lock(&mut timelines_accessor, true)?;
timelines_accessor.insert(timeline_id, initialized_timeline);
),
Entry::Vacant(v) => {
let timeline = self
.initialize_new_timeline(timeline_id, metadata, ancestor)
.with_context(|| format!("Failed to initialize timeline {timeline_id}"))?;
v.insert(timeline);
}
}
}
@@ -655,42 +441,22 @@ impl Tenant {
}
pub fn set_state(&self, new_state: TenantState) {
self.set_state_with(|_| Some(new_state));
}
pub fn set_state_with<F>(&self, f: F) -> bool
where
F: FnOnce(&mut TenantState) -> Option<TenantState>,
{
let modify = |old_state: &mut TenantState| {
let new_state = match f(old_state) {
None => return false,
Some(new_state) => new_state,
};
match (old_state, new_state) {
(equal_state_1, equal_state_2) if equal_state_1 == &equal_state_2 => {
debug!("Ignoring new state, equal to the existing one: {equal_state_2:?}");
false
}
(TenantState::Broken, _) => {
error!("Ignoring state update {new_state:?} for broken tenant");
false
}
(old_state, new_state) => {
*old_state = new_state;
true
match (self.current_state(), new_state) {
(equal_state_1, equal_state_2) if equal_state_1 == equal_state_2 => {
debug!("Ignoring new state, equal to the existing one: {equal_state_2:?}");
}
(TenantState::Broken, _) => {
error!("Ignoring state update {new_state:?} for broken tenant");
}
(_, new_state) => {
self.state.send_replace(new_state);
if self.should_run_tasks() {
// Spawn gc and compaction loops. The loops will shut themselves
// down when they notice that the tenant is inactive.
crate::tenant_tasks::start_background_loops(self.tenant_id);
}
}
};
let modified = self.state.send_if_modified(modify);
if modified && self.should_run_tasks() {
// Spawn gc and compaction loops. The loops will shut themselves
// down when they notice that the tenant is inactive.
crate::tenant_tasks::start_background_loops(self.tenant_id);
}
modified
}
pub fn subscribe_for_state_updates(&self) -> watch::Receiver<TenantState> {
@@ -831,12 +597,12 @@ impl Tenant {
self.tenant_conf.write().unwrap().update(&new_tenant_conf);
}
fn create_timeline_data(
fn initialize_new_timeline(
&self,
new_timeline_id: TimelineId,
new_metadata: TimelineMetadata,
ancestor: Option<Arc<Timeline>>,
) -> anyhow::Result<Timeline> {
) -> anyhow::Result<Arc<Timeline>> {
if let Some(ancestor_timeline_id) = new_metadata.ancestor_timeline() {
anyhow::ensure!(
ancestor.is_some(),
@@ -844,8 +610,9 @@ impl Tenant {
)
}
let new_disk_consistent_lsn = new_metadata.disk_consistent_lsn();
let pg_version = new_metadata.pg_version();
Ok(Timeline::new(
let new_timeline = Arc::new(Timeline::new(
self.conf,
Arc::clone(&self.tenant_conf),
new_metadata,
@@ -855,7 +622,15 @@ impl Tenant {
Arc::clone(&self.walredo_mgr),
self.upload_layers,
pg_version,
))
));
new_timeline
.load_layer_map(new_disk_consistent_lsn)
.context("failed to load layermap")?;
new_timeline.launch_wal_receiver()?;
Ok(new_timeline)
}
pub fn new(
@@ -1131,20 +906,16 @@ impl Tenant {
Ok(totals)
}
/// Branch an existing timeline
fn branch_timeline(
&self,
src: TimelineId,
dst: TimelineId,
start_lsn: Option<Lsn>,
) -> anyhow::Result<Arc<Timeline>> {
) -> Result<Arc<Timeline>> {
// We need to hold this lock to prevent GC from starting at the same time. GC scans the directory to learn
// about timelines, so otherwise a race condition is possible, where we create new timeline and GC
// concurrently removes data that is needed by the new timeline.
let _gc_cs = self.gc_cs.lock().unwrap();
let timelines = self.timelines.lock().unwrap();
let timeline_uninit_mark = self.create_timeline_uninit_mark(dst, &timelines)?;
drop(timelines);
// In order for the branch creation task to not wait for GC/compaction,
// we need to make sure that the starting LSN of the child branch is not out of scope midway by
@@ -1155,12 +926,12 @@ impl Tenant {
// Step 2 is to avoid initializing the new branch using data removed by past GC iterations
// or in-queue GC iterations.
let src_timeline = self.get_timeline(src).with_context(|| {
format!(
"No ancestor {} found for timeline {}/{}",
src, self.tenant_id, dst
)
})?;
// XXX: keep the lock to avoid races during timeline creation
let mut timelines = self.timelines.lock().unwrap();
let src_timeline = timelines
.get(&src)
// message about timeline being remote is one .context up in the stack
.ok_or_else(|| anyhow::anyhow!("unknown timeline id: {src}"))?;
let latest_gc_cutoff_lsn = src_timeline.get_latest_gc_cutoff_lsn();
@@ -1210,21 +981,11 @@ impl Tenant {
dst_prev,
Some(src),
start_lsn,
*src_timeline.latest_gc_cutoff_lsn.read(), // FIXME: should we hold onto this guard longer?
*src_timeline.latest_gc_cutoff_lsn.read(),
src_timeline.initdb_lsn,
src_timeline.pg_version,
);
let mut timelines = self.timelines.lock().unwrap();
let new_timeline = self
.prepare_timeline(
dst,
metadata,
timeline_uninit_mark,
false,
Some(src_timeline),
)?
.initialize_with_lock(&mut timelines, true)?;
drop(timelines);
let new_timeline = self.create_initialized_timeline(dst, metadata, &mut timelines)?;
info!("branched timeline {dst} from {src} at {start_lsn}");
Ok(new_timeline)
@@ -1236,10 +997,7 @@ impl Tenant {
&self,
timeline_id: TimelineId,
pg_version: u32,
) -> anyhow::Result<Arc<Timeline>> {
let timelines = self.timelines.lock().unwrap();
let timeline_uninit_mark = self.create_timeline_uninit_mark(timeline_id, &timelines)?;
drop(timelines);
) -> Result<Arc<Timeline>> {
// create a `tenant/{tenant_id}/timelines/basebackup-{timeline_id}.{TEMP_FILE_SUFFIX}/`
// temporary directory for basebackup files for the given timeline.
let initdb_path = path_with_suffix_extension(
@@ -1249,65 +1007,24 @@ impl Tenant {
TEMP_FILE_SUFFIX,
);
// an uninit mark was placed before, nothing else can access this timeline files
// current initdb was not run yet, so remove whatever was left from the previous runs
if initdb_path.exists() {
fs::remove_dir_all(&initdb_path).with_context(|| {
format!(
"Failed to remove already existing initdb directory: {}",
initdb_path.display()
)
})?;
}
// Init temporarily repo to get bootstrap data, this creates a directory in the `initdb_path` path
// Init temporarily repo to get bootstrap data
run_initdb(self.conf, &initdb_path, pg_version)?;
// this new directory is very temporary, set to remove it immediately after bootstrap, we don't need it
scopeguard::defer! {
if let Err(e) = fs::remove_dir_all(&initdb_path) {
// this is unlikely, but we will remove the directory on pageserver restart or another bootstrap call
error!("Failed to remove temporary initdb directory '{}': {}", initdb_path.display(), e);
}
}
let pgdata_path = &initdb_path;
let pgdata_lsn = import_datadir::get_lsn_from_controlfile(pgdata_path)?.align();
let pgdata_path = initdb_path;
let lsn = import_datadir::get_lsn_from_controlfile(&pgdata_path)?.align();
// Import the contents of the data directory at the initial checkpoint
// LSN, and any WAL after that.
// Initdb lsn will be equal to last_record_lsn which will be set after import.
// Because we know it upfront avoid having an option or dummy zero value by passing it to the metadata.
let new_metadata = TimelineMetadata::new(
Lsn(0),
None,
None,
Lsn(0),
pgdata_lsn,
pgdata_lsn,
pg_version,
);
let raw_timeline =
self.prepare_timeline(timeline_id, new_metadata, timeline_uninit_mark, true, None)?;
let tenant_id = raw_timeline.owning_tenant.tenant_id;
let unfinished_timeline = raw_timeline.raw_timeline()?;
import_datadir::import_timeline_from_postgres_datadir(
unfinished_timeline,
pgdata_path,
pgdata_lsn,
)
.with_context(|| {
format!("Failed to import pgdatadir for timeline {tenant_id}/{timeline_id}")
})?;
// Because we know it upfront avoid having an option or dummy zero value by passing it to create_empty_timeline.
let timeline = self.create_empty_timeline(timeline_id, lsn, pg_version)?;
import_datadir::import_timeline_from_postgres_datadir(&pgdata_path, &*timeline, lsn)?;
fail::fail_point!("before-checkpoint-new-timeline", |_| {
anyhow::bail!("failpoint before-checkpoint-new-timeline");
bail!("failpoint before-checkpoint-new-timeline");
});
unfinished_timeline
.checkpoint(CheckpointConfig::Forced)
.with_context(|| format!("Failed to checkpoint after pgdatadir import for timeline {tenant_id}/{timeline_id}"))?;
let mut timelines = self.timelines.lock().unwrap();
let timeline = raw_timeline.initialize_with_lock(&mut timelines, false)?;
drop(timelines);
timeline.checkpoint(CheckpointConfig::Forced)?;
info!(
"created root timeline {} timeline.lsn {}",
@@ -1315,65 +1032,25 @@ impl Tenant {
timeline.get_last_record_lsn()
);
// Remove temp dir. We don't need it anymore
fs::remove_dir_all(pgdata_path)?;
Ok(timeline)
}
/// Creates intermediate timeline structure and its files, without loading it into memory.
/// It's up to the caller to import the necesary data and import the timeline into memory.
fn prepare_timeline(
fn create_initialized_timeline(
&self,
new_timeline_id: TimelineId,
new_metadata: TimelineMetadata,
uninit_mark: TimelineUninitMark,
init_layers: bool,
ancestor: Option<Arc<Timeline>>,
) -> anyhow::Result<UninitializedTimeline> {
let tenant_id = self.tenant_id;
match self.create_timeline_files(
&uninit_mark.timeline_path,
new_timeline_id,
new_metadata,
ancestor,
) {
Ok(new_timeline) => {
if init_layers {
new_timeline.layers.write().unwrap().next_open_layer_at =
Some(new_timeline.initdb_lsn);
}
debug!(
"Successfully created initial files for timeline {tenant_id}/{new_timeline_id}"
);
Ok(UninitializedTimeline {
owning_tenant: self,
timeline_id: new_timeline_id,
raw_timeline: Some((new_timeline, uninit_mark)),
})
}
Err(e) => {
error!("Failed to create initial files for timeline {tenant_id}/{new_timeline_id}, cleaning up: {e:?}");
cleanup_timeline_directory(uninit_mark);
Err(e)
}
}
}
fn create_timeline_files(
&self,
timeline_path: &Path,
new_timeline_id: TimelineId,
new_metadata: TimelineMetadata,
ancestor: Option<Arc<Timeline>>,
) -> anyhow::Result<Timeline> {
let timeline_data = self
.create_timeline_data(new_timeline_id, new_metadata.clone(), ancestor)
.context("Failed to create timeline data structure")?;
crashsafe::create_dir_all(timeline_path).context("Failed to create timeline directory")?;
fail::fail_point!("after-timeline-uninit-mark-creation", |_| {
anyhow::bail!("failpoint after-timeline-uninit-mark-creation");
});
timelines: &mut MutexGuard<HashMap<TimelineId, Arc<Timeline>>>,
) -> Result<Arc<Timeline>> {
crashsafe_dir::create_dir_all(self.conf.timeline_path(&new_timeline_id, &self.tenant_id))
.with_context(|| {
format!(
"Failed to create timeline {}/{} directory",
new_timeline_id, self.tenant_id
)
})?;
save_metadata(
self.conf,
new_timeline_id,
@@ -1381,49 +1058,37 @@ impl Tenant {
&new_metadata,
true,
)
.context("Failed to create timeline metadata")?;
.with_context(|| {
format!(
"Failed to create timeline {}/{} metadata",
new_timeline_id, self.tenant_id
)
})?;
Ok(timeline_data)
}
/// Attempts to create an uninit mark file for the timeline initialization.
/// Bails, if the timeline is already loaded into the memory (i.e. initialized before), or the uninit mark file already exists.
///
/// This way, we need to hold the timelines lock only for small amount of time during the mark check/creation per timeline init.
fn create_timeline_uninit_mark(
&self,
timeline_id: TimelineId,
timelines: &MutexGuard<HashMap<TimelineId, Arc<Timeline>>>,
) -> anyhow::Result<TimelineUninitMark> {
let tenant_id = self.tenant_id;
anyhow::ensure!(
timelines.get(&timeline_id).is_none(),
"Timeline {tenant_id}/{timeline_id} already exists in pageserver's memory"
);
let timeline_path = self.conf.timeline_path(&timeline_id, &tenant_id);
anyhow::ensure!(
!timeline_path.exists(),
"Timeline {} already exists, cannot create its uninit mark file",
timeline_path.display()
);
let uninit_mark_path = self
.conf
.timeline_uninit_mark_file_path(tenant_id, timeline_id);
fs::File::create(&uninit_mark_path)
.context("Failed to create uninit mark file")
.and_then(|_| {
crashsafe::fsync_file_and_parent(&uninit_mark_path)
.context("Failed to fsync uninit mark file")
})
let ancestor = new_metadata
.ancestor_timeline()
.and_then(|ancestor_timeline_id| timelines.get(&ancestor_timeline_id))
.cloned();
let new_timeline = self
.initialize_new_timeline(new_timeline_id, new_metadata, ancestor)
.with_context(|| {
format!("Failed to crate uninit mark for timeline {tenant_id}/{timeline_id}")
format!(
"Failed to initialize timeline {}/{}",
new_timeline_id, self.tenant_id
)
})?;
let uninit_mark = TimelineUninitMark::new(uninit_mark_path, timeline_path);
match timelines.entry(new_timeline_id) {
Entry::Occupied(_) => bail!(
"Found freshly initialized timeline {} in the tenant map",
new_timeline_id
),
Entry::Vacant(v) => {
v.insert(Arc::clone(&new_timeline));
}
}
Ok(uninit_mark)
Ok(new_timeline)
}
}
@@ -1443,7 +1108,7 @@ fn run_initdb(
initdb_lib_dir.display(),
);
let initdb_output = Command::new(&initdb_bin_path)
let initdb_output = Command::new(initdb_bin_path)
.args(&["-D", &initdb_target_dir.to_string_lossy()])
.args(&["-U", &conf.superuser])
.args(&["-E", "utf8"])
@@ -1456,13 +1121,7 @@ fn run_initdb(
.env("DYLD_LIBRARY_PATH", &initdb_lib_dir)
.stdout(Stdio::null())
.output()
.with_context(|| {
format!(
"failed to execute {} at target dir {}",
initdb_bin_path.display(),
initdb_target_dir.display()
)
})?;
.context("failed to execute initdb")?;
if !initdb_output.status.success() {
bail!(
"initdb failed: '{}'",
@@ -1501,19 +1160,6 @@ pub fn dump_layerfile_from_path(path: &Path, verbose: bool) -> Result<()> {
Ok(())
}
fn ignore_absent_files<F>(fs_operation: F) -> io::Result<()>
where
F: Fn() -> io::Result<()>,
{
fs_operation().or_else(|e| {
if e.kind() == io::ErrorKind::NotFound {
Ok(())
} else {
Err(e)
}
})
}
#[cfg(test)]
pub mod harness {
use bytes::{Bytes, BytesMut};
@@ -1730,9 +1376,7 @@ mod tests {
#[test]
fn test_basic() -> Result<()> {
let tenant = TenantHarness::create("test_basic")?.load();
let tline = tenant
.create_empty_timeline(TIMELINE_ID, Lsn(0), DEFAULT_PG_VERSION)?
.initialize()?;
let tline = tenant.create_empty_timeline(TIMELINE_ID, Lsn(0), DEFAULT_PG_VERSION)?;
let writer = tline.writer();
writer.put(*TEST_KEY, Lsn(0x10), &Value::Image(TEST_IMG("foo at 0x10")))?;
@@ -1754,18 +1398,13 @@ mod tests {
#[test]
fn no_duplicate_timelines() -> Result<()> {
let tenant = TenantHarness::create("no_duplicate_timelines")?.load();
let _ = tenant
.create_empty_timeline(TIMELINE_ID, Lsn(0), DEFAULT_PG_VERSION)?
.initialize()?;
let _ = tenant.create_empty_timeline(TIMELINE_ID, Lsn(0), DEFAULT_PG_VERSION)?;
match tenant.create_empty_timeline(TIMELINE_ID, Lsn(0), DEFAULT_PG_VERSION) {
Ok(_) => panic!("duplicate timeline creation should fail"),
Err(e) => assert_eq!(
e.to_string(),
format!(
"Timeline {}/{} already exists in pageserver's memory",
tenant.tenant_id, TIMELINE_ID
)
format!("Timeline {TIMELINE_ID} already exists")
),
}
@@ -1785,9 +1424,7 @@ mod tests {
#[test]
fn test_branch() -> Result<()> {
let tenant = TenantHarness::create("test_branch")?.load();
let tline = tenant
.create_empty_timeline(TIMELINE_ID, Lsn(0), DEFAULT_PG_VERSION)?
.initialize()?;
let tline = tenant.create_empty_timeline(TIMELINE_ID, Lsn(0), DEFAULT_PG_VERSION)?;
let writer = tline.writer();
use std::str::from_utf8;
@@ -1882,9 +1519,7 @@ mod tests {
let tenant =
TenantHarness::create("test_prohibit_branch_creation_on_garbage_collected_data")?
.load();
let tline = tenant
.create_empty_timeline(TIMELINE_ID, Lsn(0), DEFAULT_PG_VERSION)?
.initialize()?;
let tline = tenant.create_empty_timeline(TIMELINE_ID, Lsn(0), DEFAULT_PG_VERSION)?;
make_some_layers(tline.as_ref(), Lsn(0x20))?;
// this removes layers before lsn 40 (50 minus 10), so there are two remaining layers, image and delta for 31-50
@@ -1914,9 +1549,7 @@ mod tests {
let tenant =
TenantHarness::create("test_prohibit_branch_creation_on_pre_initdb_lsn")?.load();
tenant
.create_empty_timeline(TIMELINE_ID, Lsn(0x50), DEFAULT_PG_VERSION)?
.initialize()?;
tenant.create_empty_timeline(TIMELINE_ID, Lsn(0x50), DEFAULT_PG_VERSION)?;
// try to branch at lsn 0x25, should fail because initdb lsn is 0x50
match tenant.branch_timeline(TIMELINE_ID, NEW_TIMELINE_ID, Some(Lsn(0x25))) {
Ok(_) => panic!("branching should have failed"),
@@ -1960,9 +1593,7 @@ mod tests {
fn test_retain_data_in_parent_which_is_needed_for_child() -> Result<()> {
let tenant =
TenantHarness::create("test_retain_data_in_parent_which_is_needed_for_child")?.load();
let tline = tenant
.create_empty_timeline(TIMELINE_ID, Lsn(0), DEFAULT_PG_VERSION)?
.initialize()?;
let tline = tenant.create_empty_timeline(TIMELINE_ID, Lsn(0), DEFAULT_PG_VERSION)?;
make_some_layers(tline.as_ref(), Lsn(0x20))?;
tenant.branch_timeline(TIMELINE_ID, NEW_TIMELINE_ID, Some(Lsn(0x40)))?;
@@ -1979,9 +1610,7 @@ mod tests {
fn test_parent_keeps_data_forever_after_branching() -> Result<()> {
let tenant =
TenantHarness::create("test_parent_keeps_data_forever_after_branching")?.load();
let tline = tenant
.create_empty_timeline(TIMELINE_ID, Lsn(0), DEFAULT_PG_VERSION)?
.initialize()?;
let tline = tenant.create_empty_timeline(TIMELINE_ID, Lsn(0), DEFAULT_PG_VERSION)?;
make_some_layers(tline.as_ref(), Lsn(0x20))?;
tenant.branch_timeline(TIMELINE_ID, NEW_TIMELINE_ID, Some(Lsn(0x40)))?;
@@ -2009,9 +1638,8 @@ mod tests {
let harness = TenantHarness::create(TEST_NAME)?;
{
let tenant = harness.load();
let tline = tenant
.create_empty_timeline(TIMELINE_ID, Lsn(0x8000), DEFAULT_PG_VERSION)?
.initialize()?;
let tline =
tenant.create_empty_timeline(TIMELINE_ID, Lsn(0x8000), DEFAULT_PG_VERSION)?;
make_some_layers(tline.as_ref(), Lsn(0x8000))?;
tline.checkpoint(CheckpointConfig::Forced)?;
}
@@ -2031,9 +1659,7 @@ mod tests {
// create two timelines
{
let tenant = harness.load();
let tline = tenant
.create_empty_timeline(TIMELINE_ID, Lsn(0), DEFAULT_PG_VERSION)?
.initialize()?;
let tline = tenant.create_empty_timeline(TIMELINE_ID, Lsn(0), DEFAULT_PG_VERSION)?;
make_some_layers(tline.as_ref(), Lsn(0x20))?;
tline.checkpoint(CheckpointConfig::Forced)?;
@@ -2069,9 +1695,7 @@ mod tests {
let harness = TenantHarness::create(TEST_NAME)?;
let tenant = harness.load();
tenant
.create_empty_timeline(TIMELINE_ID, Lsn(0), DEFAULT_PG_VERSION)?
.initialize()?;
tenant.create_empty_timeline(TIMELINE_ID, Lsn(0), DEFAULT_PG_VERSION)?;
drop(tenant);
let metadata_path = harness.timeline_path(&TIMELINE_ID).join(METADATA_FILE_NAME);
@@ -2108,9 +1732,7 @@ mod tests {
#[test]
fn test_images() -> Result<()> {
let tenant = TenantHarness::create("test_images")?.load();
let tline = tenant
.create_empty_timeline(TIMELINE_ID, Lsn(0), DEFAULT_PG_VERSION)?
.initialize()?;
let tline = tenant.create_empty_timeline(TIMELINE_ID, Lsn(0), DEFAULT_PG_VERSION)?;
let writer = tline.writer();
writer.put(*TEST_KEY, Lsn(0x10), &Value::Image(TEST_IMG("foo at 0x10")))?;
@@ -2160,9 +1782,7 @@ mod tests {
#[test]
fn test_bulk_insert() -> Result<()> {
let tenant = TenantHarness::create("test_bulk_insert")?.load();
let tline = tenant
.create_empty_timeline(TIMELINE_ID, Lsn(0), DEFAULT_PG_VERSION)?
.initialize()?;
let tline = tenant.create_empty_timeline(TIMELINE_ID, Lsn(0), DEFAULT_PG_VERSION)?;
let mut lsn = Lsn(0x10);
@@ -2202,9 +1822,7 @@ mod tests {
#[test]
fn test_random_updates() -> Result<()> {
let tenant = TenantHarness::create("test_random_updates")?.load();
let tline = tenant
.create_empty_timeline(TIMELINE_ID, Lsn(0), DEFAULT_PG_VERSION)?
.initialize()?;
let tline = tenant.create_empty_timeline(TIMELINE_ID, Lsn(0), DEFAULT_PG_VERSION)?;
const NUM_KEYS: usize = 1000;
@@ -2274,9 +1892,7 @@ mod tests {
#[test]
fn test_traverse_branches() -> Result<()> {
let tenant = TenantHarness::create("test_traverse_branches")?.load();
let mut tline = tenant
.create_empty_timeline(TIMELINE_ID, Lsn(0), DEFAULT_PG_VERSION)?
.initialize()?;
let mut tline = tenant.create_empty_timeline(TIMELINE_ID, Lsn(0), DEFAULT_PG_VERSION)?;
const NUM_KEYS: usize = 1000;
@@ -2355,9 +1971,7 @@ mod tests {
#[test]
fn test_traverse_ancestors() -> Result<()> {
let tenant = TenantHarness::create("test_traverse_ancestors")?.load();
let mut tline = tenant
.create_empty_timeline(TIMELINE_ID, Lsn(0), DEFAULT_PG_VERSION)?
.initialize()?;
let mut tline = tenant.create_empty_timeline(TIMELINE_ID, Lsn(0), DEFAULT_PG_VERSION)?;
const NUM_KEYS: usize = 100;
const NUM_TLINES: usize = 50;

View File

@@ -62,8 +62,6 @@ pub struct LayerMap {
struct LayerRTreeObject {
layer: Arc<dyn Layer>,
envelope: AABB<[IntKey; 2]>,
}
// Representation of Key as numeric type.
@@ -199,16 +197,9 @@ impl PartialEq for LayerRTreeObject {
impl RTreeObject for LayerRTreeObject {
type Envelope = AABB<[IntKey; 2]>;
fn envelope(&self) -> Self::Envelope {
self.envelope
}
}
impl LayerRTreeObject {
fn new(layer: Arc<dyn Layer>) -> Self {
let key_range = layer.get_key_range();
let lsn_range = layer.get_lsn_range();
let envelope = AABB::from_corners(
let key_range = self.layer.get_key_range();
let lsn_range = self.layer.get_lsn_range();
AABB::from_corners(
[
IntKey::from(key_range.start.to_i128()),
IntKey::from(lsn_range.start.0 as i128),
@@ -217,8 +208,7 @@ impl LayerRTreeObject {
IntKey::from(key_range.end.to_i128() - 1),
IntKey::from(lsn_range.end.0 as i128 - 1),
], // AABB::upper is inclusive, while `key_range.end` and `lsn_range.end` are exclusive
);
LayerRTreeObject { layer, envelope }
)
}
}
@@ -348,7 +338,7 @@ impl LayerMap {
if layer.get_key_range() == (Key::MIN..Key::MAX) {
self.l0_delta_layers.push(layer.clone());
}
self.historic_layers.insert(LayerRTreeObject::new(layer));
self.historic_layers.insert(LayerRTreeObject { layer });
NUM_ONDISK_LAYERS.inc();
}
@@ -372,7 +362,7 @@ impl LayerMap {
}
assert!(self
.historic_layers
.remove(&LayerRTreeObject::new(layer))
.remove(&LayerRTreeObject { layer })
.is_some());
NUM_ONDISK_LAYERS.dec();
}

View File

@@ -52,10 +52,7 @@ use crate::task_mgr::TaskKind;
use crate::walreceiver::{is_etcd_client_initialized, spawn_connection_manager_task};
use crate::walredo::WalRedoManager;
use crate::CheckpointConfig;
use crate::{
page_cache,
storage_sync::{self, index::LayerFileMetadata},
};
use crate::{page_cache, storage_sync};
pub struct Timeline {
conf: &'static PageServerConf,
@@ -478,6 +475,10 @@ impl Timeline {
}
/// Mutate the timeline with a [`TimelineWriter`].
///
/// FIXME: This ought to return &'a TimelineWriter, where TimelineWriter
/// is a generic type in this trait. But that doesn't currently work in
/// Rust: https://rust-lang.github.io/rfcs/1598-generic_associated_types.html
pub fn writer(&self) -> TimelineWriter<'_> {
TimelineWriter {
tl: self,
@@ -1193,8 +1194,8 @@ impl Timeline {
self.create_image_layers(&partitioning, self.initdb_lsn, true)?
} else {
// normal case, write out a L0 delta layer file.
let (delta_path, metadata) = self.create_delta_layer(&frozen_layer)?;
HashMap::from([(delta_path, metadata)])
let delta_path = self.create_delta_layer(&frozen_layer)?;
HashSet::from([delta_path])
};
fail_point!("flush-frozen-before-sync");
@@ -1220,86 +1221,85 @@ impl Timeline {
// TODO: This perhaps should be done in 'flush_frozen_layers', after flushing
// *all* the layers, to avoid fsyncing the file multiple times.
let disk_consistent_lsn = Lsn(lsn_range.end.0 - 1);
let old_disk_consistent_lsn = self.disk_consistent_lsn.load();
self.update_disk_consistent_lsn(disk_consistent_lsn, layer_paths_to_upload)?;
// If we were able to advance 'disk_consistent_lsn', save it the metadata file.
// After crash, we will restart WAL streaming and processing from that point.
if disk_consistent_lsn != old_disk_consistent_lsn {
assert!(disk_consistent_lsn > old_disk_consistent_lsn);
self.update_metadata_file(disk_consistent_lsn, layer_paths_to_upload)?;
// Also update the in-memory copy
self.disk_consistent_lsn.store(disk_consistent_lsn);
}
Ok(())
}
/// Update metadata file
fn update_metadata_file(
fn update_disk_consistent_lsn(
&self,
disk_consistent_lsn: Lsn,
layer_paths_to_upload: HashMap<PathBuf, LayerFileMetadata>,
layer_paths_to_upload: HashSet<PathBuf>,
) -> Result<()> {
// We can only save a valid 'prev_record_lsn' value on disk if we
// flushed *all* in-memory changes to disk. We only track
// 'prev_record_lsn' in memory for the latest processed record, so we
// don't remember what the correct value that corresponds to some old
// LSN is. But if we flush everything, then the value corresponding
// current 'last_record_lsn' is correct and we can store it on disk.
let RecordLsn {
last: last_record_lsn,
prev: prev_record_lsn,
} = self.last_record_lsn.load();
let ondisk_prev_record_lsn = if disk_consistent_lsn == last_record_lsn {
Some(prev_record_lsn)
} else {
None
};
// If we were able to advance 'disk_consistent_lsn', save it the metadata file.
// After crash, we will restart WAL streaming and processing from that point.
let old_disk_consistent_lsn = self.disk_consistent_lsn.load();
if disk_consistent_lsn != old_disk_consistent_lsn {
assert!(disk_consistent_lsn > old_disk_consistent_lsn);
let ancestor_timeline_id = self
.ancestor_timeline
.as_ref()
.map(|ancestor| ancestor.timeline_id);
// We can only save a valid 'prev_record_lsn' value on disk if we
// flushed *all* in-memory changes to disk. We only track
// 'prev_record_lsn' in memory for the latest processed record, so we
// don't remember what the correct value that corresponds to some old
// LSN is. But if we flush everything, then the value corresponding
// current 'last_record_lsn' is correct and we can store it on disk.
let RecordLsn {
last: last_record_lsn,
prev: prev_record_lsn,
} = self.last_record_lsn.load();
let ondisk_prev_record_lsn = if disk_consistent_lsn == last_record_lsn {
Some(prev_record_lsn)
} else {
None
};
let metadata = TimelineMetadata::new(
disk_consistent_lsn,
ondisk_prev_record_lsn,
ancestor_timeline_id,
self.ancestor_lsn,
*self.latest_gc_cutoff_lsn.read(),
self.initdb_lsn,
self.pg_version,
);
let ancestor_timeline_id = self
.ancestor_timeline
.as_ref()
.map(|ancestor| ancestor.timeline_id);
fail_point!("checkpoint-before-saving-metadata", |x| bail!(
"{}",
x.unwrap()
));
save_metadata(
self.conf,
self.timeline_id,
self.tenant_id,
&metadata,
false,
)?;
if self.upload_layers.load(atomic::Ordering::Relaxed) {
storage_sync::schedule_layer_upload(
self.tenant_id,
self.timeline_id,
layer_paths_to_upload,
Some(metadata),
let metadata = TimelineMetadata::new(
disk_consistent_lsn,
ondisk_prev_record_lsn,
ancestor_timeline_id,
self.ancestor_lsn,
*self.latest_gc_cutoff_lsn.read(),
self.initdb_lsn,
self.pg_version,
);
fail_point!("checkpoint-before-saving-metadata", |x| bail!(
"{}",
x.unwrap()
));
save_metadata(
self.conf,
self.timeline_id,
self.tenant_id,
&metadata,
false,
)?;
if self.upload_layers.load(atomic::Ordering::Relaxed) {
storage_sync::schedule_layer_upload(
self.tenant_id,
self.timeline_id,
layer_paths_to_upload,
Some(metadata),
);
}
// Also update the in-memory copy
self.disk_consistent_lsn.store(disk_consistent_lsn);
}
Ok(())
}
// Write out the given frozen in-memory layer as a new L0 delta file
fn create_delta_layer(
&self,
frozen_layer: &InMemoryLayer,
) -> Result<(PathBuf, LayerFileMetadata)> {
fn create_delta_layer(&self, frozen_layer: &InMemoryLayer) -> Result<PathBuf> {
// Write it out
let new_delta = frozen_layer.write_to_disk()?;
let new_delta_path = new_delta.path();
@@ -1325,13 +1325,12 @@ impl Timeline {
// update the timeline's physical size
let sz = new_delta_path.metadata()?.len();
self.metrics.current_physical_size_gauge.add(sz);
// update metrics
self.metrics.num_persistent_files_created.inc_by(1);
self.metrics.persistent_bytes_written.inc_by(sz);
Ok((new_delta_path, LayerFileMetadata::new(sz)))
Ok(new_delta_path)
}
pub fn compact(&self) -> anyhow::Result<()> {
@@ -1397,7 +1396,7 @@ impl Timeline {
storage_sync::schedule_layer_upload(
self.tenant_id,
self.timeline_id,
layer_paths_to_upload,
HashSet::from_iter(layer_paths_to_upload),
None,
);
}
@@ -1478,9 +1477,10 @@ impl Timeline {
partitioning: &KeyPartitioning,
lsn: Lsn,
force: bool,
) -> Result<HashMap<PathBuf, LayerFileMetadata>> {
) -> Result<HashSet<PathBuf>> {
let timer = self.metrics.create_images_time_histo.start_timer();
let mut image_layers: Vec<ImageLayer> = Vec::new();
let mut layer_paths_to_upload = HashSet::new();
for partition in partitioning.parts.iter() {
if force || self.time_for_new_image_layer(partition, lsn)? {
let img_range =
@@ -1502,6 +1502,7 @@ impl Timeline {
}
}
let image_layer = image_layer_writer.finish()?;
layer_paths_to_upload.insert(image_layer.path());
image_layers.push(image_layer);
}
}
@@ -1515,25 +1516,15 @@ impl Timeline {
//
// Compaction creates multiple image layers. It would be better to create them all
// and fsync them all in parallel.
let all_paths = image_layers
.iter()
.map(|layer| layer.path())
.chain(std::iter::once(
self.conf.timeline_path(&self.timeline_id, &self.tenant_id),
))
.collect::<Vec<_>>();
let mut all_paths = Vec::from_iter(layer_paths_to_upload.clone());
all_paths.push(self.conf.timeline_path(&self.timeline_id, &self.tenant_id));
par_fsync::par_fsync(&all_paths)?;
let mut layer_paths_to_upload = HashMap::with_capacity(image_layers.len());
let mut layers = self.layers.write().unwrap();
for l in image_layers {
let path = l.path();
let metadata = path.metadata()?;
layer_paths_to_upload.insert(path, LayerFileMetadata::new(metadata.len()));
self.metrics.current_physical_size_gauge.add(metadata.len());
self.metrics
.current_physical_size_gauge
.add(l.path().metadata()?.len());
layers.insert_historic(Arc::new(l));
}
drop(layers);
@@ -1784,16 +1775,16 @@ impl Timeline {
}
let mut layers = self.layers.write().unwrap();
let mut new_layer_paths = HashMap::with_capacity(new_layers.len());
let mut new_layer_paths = HashSet::with_capacity(new_layers.len());
for l in new_layers {
let new_delta_path = l.path();
let metadata = new_delta_path.metadata()?;
// update the timeline's physical size
self.metrics.current_physical_size_gauge.add(metadata.len());
self.metrics
.current_physical_size_gauge
.add(new_delta_path.metadata()?.len());
new_layer_paths.insert(new_delta_path, LayerFileMetadata::new(metadata.len()));
new_layer_paths.insert(new_delta_path);
layers.insert_historic(Arc::new(l));
}
@@ -1959,9 +1950,6 @@ impl Timeline {
new_gc_cutoff
);
write_guard.store_and_unlock(new_gc_cutoff).wait();
// Persist metadata file
self.update_metadata_file(self.disk_consistent_lsn.load(), HashMap::new())?;
}
info!("GC starting");
@@ -2088,18 +2076,6 @@ impl Timeline {
result.layers_removed += 1;
}
info!(
"GC completed removing {} layers, cuttof {}",
result.layers_removed, new_gc_cutoff
);
if result.layers_removed != 0 {
fail_point!("gc-before-save-metadata", |_| {
info!("Abnormaly terinate pageserver at gc-before-save-metadata fail point");
std::process::abort();
});
return Ok(result);
}
if self.upload_layers.load(atomic::Ordering::Relaxed) {
storage_sync::schedule_layer_delete(
self.tenant_id,

View File

@@ -1,7 +1,7 @@
//! This module acts as a switchboard to access different repositories managed by this
//! page server.
use std::collections::{hash_map, HashMap};
use std::collections::{hash_map, HashMap, HashSet};
use std::ffi::OsStr;
use std::fs;
use std::path::{Path, PathBuf};
@@ -12,19 +12,19 @@ use tracing::*;
use remote_storage::GenericRemoteStorage;
use crate::config::{PageServerConf, METADATA_FILE_NAME, TIMELINE_UNINIT_MARK_SUFFIX};
use crate::config::{PageServerConf, METADATA_FILE_NAME};
use crate::http::models::TenantInfo;
use crate::storage_sync::index::{LayerFileMetadata, RemoteIndex, RemoteTimelineIndex};
use crate::storage_sync::{self, LocalTimelineInitStatus, SyncStartupData, TimelineLocalFiles};
use crate::storage_sync::index::{RemoteIndex, RemoteTimelineIndex};
use crate::storage_sync::{self, LocalTimelineInitStatus, SyncStartupData};
use crate::task_mgr::{self, TaskKind};
use crate::tenant::{
ephemeral_file::is_ephemeral_file, metadata::TimelineMetadata, Tenant, TenantState,
};
use crate::tenant_config::TenantConfOpt;
use crate::walredo::PostgresRedoManager;
use crate::TEMP_FILE_SUFFIX;
use crate::{TenantTimelineValues, TEMP_FILE_SUFFIX};
use utils::crashsafe::{self, path_with_suffix_extension};
use utils::crashsafe_dir::{self, path_with_suffix_extension};
use utils::id::{TenantId, TimelineId};
mod tenants_state {
@@ -70,54 +70,34 @@ pub fn init_tenant_mgr(
.remote_storage_config
.as_ref()
.expect("remote storage without config");
let mut broken_tenants = HashMap::new();
let mut ready_tenants = HashMap::new();
for (tenant_id, tenant_attach_data) in local_tenant_files.into_iter() {
match tenant_attach_data {
TenantAttachData::Ready(t) => {
ready_tenants.insert(tenant_id, t);
}
TenantAttachData::Broken(e) => {
broken_tenants.insert(tenant_id, TenantAttachData::Broken(e));
}
}
}
let SyncStartupData {
remote_index,
local_timeline_init_statuses,
} = storage_sync::spawn_storage_sync_task(
conf,
ready_tenants,
local_tenant_files,
storage,
storage_config.max_concurrent_syncs,
storage_config.max_sync_errors,
)
.context("Failed to spawn the storage sync thread")?;
let n = local_timeline_init_statuses.0.len();
let mut synced_timelines = local_timeline_init_statuses.0.into_iter().fold(
HashMap::<TenantId, TenantAttachData>::with_capacity(n),
|mut new_values, (tenant_id, old_values)| {
let new_timeline_values = new_values
.entry(tenant_id)
.or_insert_with(|| TenantAttachData::Ready(HashMap::new()));
if let TenantAttachData::Ready(t) = new_timeline_values {
for (timeline_id, old_value) in old_values {
if let LocalTimelineInitStatus::LocallyComplete(metadata) = old_value {
t.insert(timeline_id, TimelineLocalFiles::ready(metadata));
}
}
}
new_values
},
);
synced_timelines.extend(broken_tenants);
(remote_index, synced_timelines)
(
remote_index,
local_timeline_init_statuses.filter_map(|init_status| match init_status {
LocalTimelineInitStatus::LocallyComplete(metadata) => Some(metadata),
LocalTimelineInitStatus::NeedsSync => None,
}),
)
} else {
info!("No remote storage configured, skipping storage sync, considering all local timelines with correct metadata files enabled");
(RemoteIndex::default(), local_tenant_files)
(
RemoteIndex::default(),
local_tenant_files.filter_map(|(metadata, _)| Some(metadata)),
)
};
attach_local_tenants(conf, &remote_index, tenants_to_attach);
Ok(remote_index)
@@ -137,12 +117,18 @@ pub fn init_tenant_mgr(
pub fn attach_local_tenants(
conf: &'static PageServerConf,
remote_index: &RemoteIndex,
tenants_to_attach: HashMap<TenantId, TenantAttachData>,
tenants_to_attach: TenantTimelineValues<TimelineMetadata>,
) {
let _entered = info_span!("attach_local_tenants").entered();
let number_of_tenants = tenants_to_attach.len();
let number_of_tenants = tenants_to_attach.0.len();
for (tenant_id, local_timelines) in tenants_to_attach.0 {
info!(
"Attaching {} timelines for {tenant_id}",
local_timelines.len()
);
debug!("Timelines to attach: {local_timelines:?}");
for (tenant_id, local_timelines) in tenants_to_attach {
let mut tenants_accessor = tenants_state::write_tenants();
let tenant = match tenants_accessor.entry(tenant_id) {
hash_map::Entry::Occupied(o) => {
@@ -151,55 +137,25 @@ pub fn attach_local_tenants(
}
hash_map::Entry::Vacant(v) => {
info!("Tenant {tenant_id} was not found in pageserver's memory, loading it");
let tenant = Arc::new(Tenant::new(
conf,
TenantConfOpt::default(),
Arc::new(PostgresRedoManager::new(conf, tenant_id)),
tenant_id,
remote_index.clone(),
conf.remote_storage_config.is_some(),
));
match local_timelines {
TenantAttachData::Broken(_) => {
tenant.set_state(TenantState::Broken);
}
TenantAttachData::Ready(_) => {
match Tenant::load_tenant_config(conf, tenant_id) {
Ok(tenant_conf) => {
tenant.update_tenant_config(tenant_conf);
tenant.activate(false);
}
Err(e) => {
error!("Failed to read config for tenant {tenant_id}, disabling tenant: {e:?}");
tenant.set_state(TenantState::Broken);
}
};
}
}
let tenant = load_local_tenant(conf, tenant_id, remote_index);
v.insert(Arc::clone(&tenant));
tenant
}
};
drop(tenants_accessor);
match local_timelines {
TenantAttachData::Broken(e) => warn!("{}", e),
TenantAttachData::Ready(ref timelines) => {
info!("Attaching {} timelines for {tenant_id}", timelines.len());
debug!("Timelines to attach: {local_timelines:?}");
let has_timelines = !timelines.is_empty();
let timelines_to_attach = timelines
.iter()
.map(|(&k, v)| (k, v.metadata().to_owned()))
.collect();
match tenant.init_attach_timelines(timelines_to_attach) {
Ok(()) => {
info!("successfully loaded local timelines for tenant {tenant_id}");
tenant.activate(has_timelines);
}
Err(e) => {
error!("Failed to attach tenant timelines: {e:?}");
tenant.set_state(TenantState::Broken);
}
if tenant.current_state() == TenantState::Broken {
warn!("Skipping timeline load for broken tenant {tenant_id}")
} else {
let has_timelines = !local_timelines.is_empty();
match tenant.init_attach_timelines(local_timelines) {
Ok(()) => {
info!("successfully loaded local timelines for tenant {tenant_id}");
tenant.activate(has_timelines);
}
Err(e) => {
error!("Failed to attach tenant timelines: {e:?}");
tenant.set_state(TenantState::Broken);
}
}
}
@@ -208,6 +164,44 @@ pub fn attach_local_tenants(
info!("Processed {number_of_tenants} local tenants during attach")
}
fn load_local_tenant(
conf: &'static PageServerConf,
tenant_id: TenantId,
remote_index: &RemoteIndex,
) -> Arc<Tenant> {
let tenant = Arc::new(Tenant::new(
conf,
TenantConfOpt::default(),
Arc::new(PostgresRedoManager::new(conf, tenant_id)),
tenant_id,
remote_index.clone(),
conf.remote_storage_config.is_some(),
));
let tenant_timelines_dir = conf.timelines_path(&tenant_id);
if !tenant_timelines_dir.is_dir() {
error!(
"Tenant {} has no timelines directory at {}",
tenant_id,
tenant_timelines_dir.display()
);
tenant.set_state(TenantState::Broken);
} else {
match Tenant::load_tenant_config(conf, tenant_id) {
Ok(tenant_conf) => {
tenant.update_tenant_config(tenant_conf);
tenant.activate(false);
}
Err(e) => {
error!("Failed to read config for tenant {tenant_id}, disabling tenant: {e:?}");
tenant.set_state(TenantState::Broken);
}
}
}
tenant
}
///
/// Shut down all tenants. This runs as part of pageserver shutdown.
///
@@ -265,98 +259,58 @@ fn create_tenant_files(
temporary_tenant_dir.display()
);
let temporary_tenant_timelines_dir = rebase_directory(
&conf.timelines_path(&tenant_id),
&target_tenant_directory,
&temporary_tenant_dir,
)?;
let temporary_tenant_config_path = rebase_directory(
&conf.tenant_config_path(tenant_id),
&target_tenant_directory,
&temporary_tenant_dir,
)?;
// top-level dir may exist if we are creating it through CLI
crashsafe::create_dir_all(&temporary_tenant_dir).with_context(|| {
crashsafe_dir::create_dir_all(&temporary_tenant_dir).with_context(|| {
format!(
"could not create temporary tenant directory {}",
temporary_tenant_dir.display()
)
})?;
let creation_result = try_create_target_tenant_dir(
conf,
tenant_conf,
tenant_id,
&temporary_tenant_dir,
&target_tenant_directory,
);
if creation_result.is_err() {
error!("Failed to create directory structure for tenant {tenant_id}, cleaning tmp data");
if let Err(e) = fs::remove_dir_all(&temporary_tenant_dir) {
error!("Failed to remove temporary tenant directory {temporary_tenant_dir:?}: {e}")
} else if let Err(e) = crashsafe::fsync(&temporary_tenant_dir) {
error!(
"Failed to fsync removed temporary tenant directory {temporary_tenant_dir:?}: {e}"
)
}
}
creation_result
}
fn try_create_target_tenant_dir(
conf: &'static PageServerConf,
tenant_conf: TenantConfOpt,
tenant_id: TenantId,
temporary_tenant_dir: &Path,
target_tenant_directory: &Path,
) -> Result<(), anyhow::Error> {
let temporary_tenant_timelines_dir = rebase_directory(
&conf.timelines_path(&tenant_id),
target_tenant_directory,
temporary_tenant_dir,
)
.with_context(|| format!("Failed to resolve tenant {tenant_id} temporary timelines dir"))?;
let temporary_tenant_config_path = rebase_directory(
&conf.tenant_config_path(tenant_id),
target_tenant_directory,
temporary_tenant_dir,
)
.with_context(|| format!("Failed to resolve tenant {tenant_id} temporary config path"))?;
Tenant::persist_tenant_config(&temporary_tenant_config_path, tenant_conf, true).with_context(
|| {
format!(
"Failed to write tenant {} config to {}",
tenant_id,
temporary_tenant_config_path.display()
)
},
)?;
crashsafe::create_dir(&temporary_tenant_timelines_dir).with_context(|| {
// first, create a config in the top-level temp directory, fsync the file
Tenant::persist_tenant_config(&temporary_tenant_config_path, tenant_conf, true)?;
// then, create a subdirectory in the top-level temp directory, fsynced
crashsafe_dir::create_dir(&temporary_tenant_timelines_dir).with_context(|| {
format!(
"could not create tenant {} temporary timelines directory {}",
tenant_id,
"could not create temporary tenant timelines directory {}",
temporary_tenant_timelines_dir.display()
)
})?;
fail::fail_point!("tenant-creation-before-tmp-rename", |_| {
anyhow::bail!("failpoint tenant-creation-before-tmp-rename");
});
fs::rename(&temporary_tenant_dir, target_tenant_directory).with_context(|| {
// move-rename tmp directory with all files synced into a permanent directory, fsync its parent
fs::rename(&temporary_tenant_dir, &target_tenant_directory).with_context(|| {
format!(
"failed to move tenant {} temporary directory {} into the permanent one {}",
tenant_id,
"failed to move temporary tenant directory {} into the permanent one {}",
temporary_tenant_dir.display(),
target_tenant_directory.display()
)
})?;
let target_dir_parent = target_tenant_directory.parent().with_context(|| {
format!(
"Failed to get tenant {} dir parent for {}",
tenant_id,
"Failed to get tenant dir parent for {}",
target_tenant_directory.display()
)
})?;
crashsafe::fsync(target_dir_parent).with_context(|| {
format!(
"Failed to fsync renamed directory's parent {} for tenant {}",
target_dir_parent.display(),
tenant_id,
)
})?;
fs::File::open(target_dir_parent)?.sync_all()?;
info!(
"created tenant directory structure in {}",
target_tenant_directory.display()
);
Ok(())
}
@@ -521,21 +475,16 @@ pub fn list_tenant_info(remote_index: &RemoteTimelineIndex) -> Vec<TenantInfo> {
.collect()
}
#[derive(Debug)]
pub enum TenantAttachData {
Ready(HashMap<TimelineId, TimelineLocalFiles>),
Broken(anyhow::Error),
}
/// Attempts to collect information about all tenant and timelines, existing on the local FS.
/// If finds any, deletes all temporary files and directories, created before. Also removes empty directories,
/// that may appear due to such removals.
/// Does not fail on particular timeline or tenant collection errors, rather logging them and ignoring the entities.
fn local_tenant_timeline_files(
config: &'static PageServerConf,
) -> anyhow::Result<HashMap<TenantId, TenantAttachData>> {
) -> anyhow::Result<TenantTimelineValues<(TimelineMetadata, HashSet<PathBuf>)>> {
let _entered = info_span!("local_tenant_timeline_files").entered();
let mut local_tenant_timeline_files = HashMap::new();
let mut local_tenant_timeline_files = TenantTimelineValues::new();
let tenants_dir = config.tenants_path();
for tenants_dir_entry in fs::read_dir(&tenants_dir)
.with_context(|| format!("Failed to list tenants dir {}", tenants_dir.display()))?
@@ -557,31 +506,19 @@ fn local_tenant_timeline_files(
}
} else {
match collect_timelines_for_tenant(config, &tenant_dir_path) {
Ok((tenant_id, TenantAttachData::Broken(e))) => {
local_tenant_timeline_files.entry(tenant_id).or_insert(TenantAttachData::Broken(e));
},
Ok((tenant_id, TenantAttachData::Ready(collected_files))) => {
Ok((tenant_id, collected_files)) => {
if collected_files.is_empty() {
match remove_if_empty(&tenant_dir_path) {
Ok(true) => info!("Removed empty tenant directory {}", tenant_dir_path.display()),
Ok(false) => {
// insert empty timeline entry: it has some non-temporary files inside that we cannot remove
// so make obvious for HTTP API callers, that something exists there and try to load the tenant
let _ = local_tenant_timeline_files.entry(tenant_id).or_insert_with(|| TenantAttachData::Ready(HashMap::new()));
let _ = local_tenant_timeline_files.0.entry(tenant_id).or_default();
},
Err(e) => error!("Failed to remove empty tenant directory: {e:?}"),
}
} else {
match local_tenant_timeline_files.entry(tenant_id) {
hash_map::Entry::Vacant(entry) => {
entry.insert(TenantAttachData::Ready(collected_files));
}
hash_map::Entry::Occupied(entry) =>{
if let TenantAttachData::Ready(old_timelines) = entry.into_mut() {
old_timelines.extend(collected_files);
}
},
}
local_tenant_timeline_files.0.entry(tenant_id).or_default().extend(collected_files.into_iter())
}
},
Err(e) => error!(
@@ -604,7 +541,7 @@ fn local_tenant_timeline_files(
info!(
"Collected files for {} tenants",
local_tenant_timeline_files.len(),
local_tenant_timeline_files.0.len()
);
Ok(local_tenant_timeline_files)
}
@@ -642,19 +579,14 @@ fn is_temporary(path: &Path) -> bool {
}
}
fn is_uninit_mark(path: &Path) -> bool {
match path.file_name() {
Some(name) => name
.to_string_lossy()
.ends_with(TIMELINE_UNINIT_MARK_SUFFIX),
None => false,
}
}
#[allow(clippy::type_complexity)]
fn collect_timelines_for_tenant(
config: &'static PageServerConf,
tenant_path: &Path,
) -> anyhow::Result<(TenantId, TenantAttachData)> {
) -> anyhow::Result<(
TenantId,
HashMap<TimelineId, (TimelineMetadata, HashSet<PathBuf>)>,
)> {
let tenant_id = tenant_path
.file_name()
.and_then(OsStr::to_str)
@@ -663,17 +595,6 @@ fn collect_timelines_for_tenant(
.context("Could not parse tenant id out of the tenant dir name")?;
let timelines_dir = config.timelines_path(&tenant_id);
if !timelines_dir.as_path().is_dir() {
return Ok((
tenant_id,
TenantAttachData::Broken(anyhow::anyhow!(
"Tenant {} has no timelines directory at {}",
tenant_id,
timelines_dir.display()
)),
));
}
let mut tenant_timelines = HashMap::new();
for timelines_dir_entry in fs::read_dir(&timelines_dir)
.with_context(|| format!("Failed to list timelines dir entry for tenant {tenant_id}"))?
@@ -693,74 +614,25 @@ fn collect_timelines_for_tenant(
e
);
}
} else if is_uninit_mark(&timeline_dir) {
let timeline_uninit_mark_file = &timeline_dir;
info!(
"Found an uninit mark file {}, removing the timeline and its uninit mark",
timeline_uninit_mark_file.display()
);
let timeline_id = timeline_uninit_mark_file
.file_stem()
.and_then(OsStr::to_str)
.unwrap_or_default()
.parse::<TimelineId>()
.with_context(|| {
format!(
"Could not parse timeline id out of the timeline uninit mark name {}",
timeline_uninit_mark_file.display()
)
})?;
let timeline_dir = config.timeline_path(&timeline_id, &tenant_id);
if let Err(e) =
remove_timeline_and_uninit_mark(&timeline_dir, timeline_uninit_mark_file)
{
error!("Failed to clean up uninit marked timeline: {e:?}");
}
} else {
let timeline_id = timeline_dir
.file_name()
.and_then(OsStr::to_str)
.unwrap_or_default()
.parse::<TimelineId>()
.with_context(|| {
format!(
"Could not parse timeline id out of the timeline dir name {}",
timeline_dir.display()
)
})?;
let timeline_uninit_mark_file =
config.timeline_uninit_mark_file_path(tenant_id, timeline_id);
if timeline_uninit_mark_file.exists() {
info!("Found an uninit mark file for timeline {tenant_id}/{timeline_id}, removing the timeline and its uninit mark");
if let Err(e) = remove_timeline_and_uninit_mark(
&timeline_dir,
&timeline_uninit_mark_file,
) {
error!("Failed to clean up uninit marked timeline: {e:?}");
match collect_timeline_files(&timeline_dir) {
Ok((timeline_id, metadata, timeline_files)) => {
tenant_timelines.insert(timeline_id, (metadata, timeline_files));
}
} else {
match collect_timeline_files(&timeline_dir) {
Ok((metadata, timeline_files)) => {
tenant_timelines.insert(
timeline_id,
TimelineLocalFiles::collected(metadata, timeline_files),
);
}
Err(e) => {
error!(
"Failed to process timeline dir contents at '{}', reason: {:?}",
timeline_dir.display(),
e
);
match remove_if_empty(&timeline_dir) {
Ok(true) => info!(
"Removed empty timeline directory {}",
timeline_dir.display()
),
Ok(false) => (),
Err(e) => {
error!("Failed to remove empty timeline directory: {e:?}")
}
Err(e) => {
error!(
"Failed to process timeline dir contents at '{}', reason: {:?}",
timeline_dir.display(),
e
);
match remove_if_empty(&timeline_dir) {
Ok(true) => info!(
"Removed empty timeline directory {}",
timeline_dir.display()
),
Ok(false) => (),
Err(e) => {
error!("Failed to remove empty timeline directory: {e:?}")
}
}
}
@@ -780,51 +652,28 @@ fn collect_timelines_for_tenant(
debug!("Tenant {tenant_id} has no timelines loaded");
}
Ok((tenant_id, TenantAttachData::Ready(tenant_timelines)))
}
fn remove_timeline_and_uninit_mark(timeline_dir: &Path, uninit_mark: &Path) -> anyhow::Result<()> {
fs::remove_dir_all(&timeline_dir)
.or_else(|e| {
if e.kind() == std::io::ErrorKind::NotFound {
// we can leave the uninit mark without a timeline dir,
// just remove the mark then
Ok(())
} else {
Err(e)
}
})
.with_context(|| {
format!(
"Failed to remove unit marked timeline directory {}",
timeline_dir.display()
)
})?;
fs::remove_file(&uninit_mark).with_context(|| {
format!(
"Failed to remove timeline uninit mark file {}",
uninit_mark.display()
)
})?;
Ok(())
Ok((tenant_id, tenant_timelines))
}
// discover timeline files and extract timeline metadata
// NOTE: ephemeral files are excluded from the list
fn collect_timeline_files(
timeline_dir: &Path,
) -> anyhow::Result<(TimelineMetadata, HashMap<PathBuf, LayerFileMetadata>)> {
let mut timeline_files = HashMap::new();
) -> anyhow::Result<(TimelineId, TimelineMetadata, HashSet<PathBuf>)> {
let mut timeline_files = HashSet::new();
let mut timeline_metadata_path = None;
let timeline_id = timeline_dir
.file_name()
.and_then(OsStr::to_str)
.unwrap_or_default()
.parse::<TimelineId>()
.context("Could not parse timeline id out of the timeline dir name")?;
let timeline_dir_entries =
fs::read_dir(&timeline_dir).context("Failed to list timeline dir contents")?;
for entry in timeline_dir_entries {
let entry_path = entry.context("Failed to list timeline dir entry")?.path();
let metadata = entry_path.metadata()?;
if metadata.is_file() {
if entry_path.is_file() {
if entry_path.file_name().and_then(OsStr::to_str) == Some(METADATA_FILE_NAME) {
timeline_metadata_path = Some(entry_path);
} else if is_ephemeral_file(&entry_path.file_name().unwrap().to_string_lossy()) {
@@ -839,8 +688,7 @@ fn collect_timeline_files(
)
})?;
} else {
let layer_metadata = LayerFileMetadata::new(metadata.len());
timeline_files.insert(entry_path, layer_metadata);
timeline_files.insert(entry_path);
}
}
}
@@ -866,5 +714,5 @@ fn collect_timeline_files(
"Timeline has no ancestor and no layer files"
);
Ok((metadata, timeline_files))
Ok((timeline_id, metadata, timeline_files))
}

View File

@@ -70,10 +70,8 @@ async fn compaction_loop(tenant_id: TenantId) {
// Run compaction
let mut sleep_duration = tenant.get_compaction_period();
if let Err(e) = tenant.compaction_iteration() {
error!("Compaction failed, retrying: {e:#}");
sleep_duration = wait_duration;
error!("Compaction failed, retrying in {:?}: {e:#}", sleep_duration);
#[cfg(feature = "testing")]
std::process::abort();
}
// Sleep
@@ -121,10 +119,8 @@ async fn gc_loop(tenant_id: TenantId) {
if gc_horizon > 0 {
if let Err(e) = tenant.gc_iteration(None, gc_horizon, tenant.get_pitr_interval(), false)
{
error!("Gc failed, retrying: {e:#}");
sleep_duration = wait_duration;
error!("Gc failed, retrying in {:?}: {e:#}", sleep_duration);
#[cfg(feature = "testing")]
std::process::abort();
}
}

View File

@@ -1374,9 +1374,7 @@ mod tests {
timeline: harness
.load()
.create_empty_timeline(TIMELINE_ID, Lsn(0), crate::DEFAULT_PG_VERSION)
.expect("Failed to create an empty timeline for dummy wal connection manager")
.initialize()
.unwrap(),
.expect("Failed to create an empty timeline for dummy wal connection manager"),
wal_connect_timeout: Duration::from_secs(1),
lagging_wal_timeout: Duration::from_secs(1),
max_lsn_wal_lag: NonZeroU64::new(1024 * 1024).unwrap(),

View File

@@ -35,12 +35,11 @@ use std::sync::Mutex;
use std::time::Duration;
use std::time::Instant;
use tracing::*;
use utils::crashsafe::path_with_suffix_extension;
use utils::crashsafe_dir::path_with_suffix_extension;
use utils::{bin_ser::BeSer, id::TenantId, lsn::Lsn, nonblock::set_nonblock};
use crate::metrics::{
WAL_REDO_BYTES_HISTOGRAM, WAL_REDO_RECORDS_HISTOGRAM, WAL_REDO_RECORD_COUNTER, WAL_REDO_TIME,
WAL_REDO_WAIT_TIME,
WAL_REDO_RECORDS_HISTOGRAM, WAL_REDO_RECORD_COUNTER, WAL_REDO_TIME, WAL_REDO_WAIT_TIME,
};
use crate::pgdatadir_mapping::{key_to_rel_block, key_to_slru_block};
use crate::reltag::{RelTag, SlruKind};
@@ -245,23 +244,12 @@ impl PostgresRedoManager {
let end_time = Instant::now();
let duration = end_time.duration_since(lock_time);
let len = records.len();
let nbytes = records.iter().fold(0, |acumulator, record| {
acumulator
+ match &record.1 {
NeonWalRecord::Postgres { rec, .. } => rec.len(),
_ => unreachable!("Only PostgreSQL records are accepted in this batch"),
}
});
WAL_REDO_TIME.observe(duration.as_secs_f64());
WAL_REDO_RECORDS_HISTOGRAM.observe(len as f64);
WAL_REDO_BYTES_HISTOGRAM.observe(nbytes as f64);
WAL_REDO_RECORDS_HISTOGRAM.observe(records.len() as f64);
debug!(
"postgres applied {} WAL records ({} bytes) in {} us to reconstruct page image at LSN {}",
len,
nbytes,
"postgres applied {} WAL records in {} us to reconstruct page image at LSN {}",
records.len(),
duration.as_micros(),
lsn
);
@@ -270,9 +258,8 @@ impl PostgresRedoManager {
// next request will launch a new one.
if result.is_err() {
error!(
"error applying {} WAL records ({} bytes) to reconstruct page image at LSN {}",
"error applying {} WAL records to reconstruct page image at LSN {}",
records.len(),
nbytes,
lsn
);
let process = process_guard.take().unwrap();

View File

@@ -10,12 +10,51 @@ struct WalProposerConn
PGconn *pg_conn;
bool is_nonblocking; /* whether the connection is non-blocking */
char *recvbuf; /* last received data from
* walprop_async_read */
* libpqprop_async_read */
};
/* Prototypes for exported functions */
static char *libpqprop_error_message(WalProposerConn * conn);
static WalProposerConnStatusType libpqprop_status(WalProposerConn * conn);
static WalProposerConn * libpqprop_connect_start(char *conninfo);
static WalProposerConnectPollStatusType libpqprop_connect_poll(WalProposerConn * conn);
static bool libpqprop_send_query(WalProposerConn * conn, char *query);
static WalProposerExecStatusType libpqprop_get_query_result(WalProposerConn * conn);
static pgsocket libpqprop_socket(WalProposerConn * conn);
static int libpqprop_flush(WalProposerConn * conn);
static void libpqprop_finish(WalProposerConn * conn);
static PGAsyncReadResult libpqprop_async_read(WalProposerConn * conn, char **buf, int *amount);
static PGAsyncWriteResult libpqprop_async_write(WalProposerConn * conn, void const *buf, size_t size);
static bool libpqprop_blocking_write(WalProposerConn * conn, void const *buf, size_t size);
static WalProposerFunctionsType PQWalProposerFunctions =
{
libpqprop_error_message,
libpqprop_status,
libpqprop_connect_start,
libpqprop_connect_poll,
libpqprop_send_query,
libpqprop_get_query_result,
libpqprop_socket,
libpqprop_flush,
libpqprop_finish,
libpqprop_async_read,
libpqprop_async_write,
libpqprop_blocking_write,
};
/* Module initialization */
void
pg_init_libpqwalproposer(void)
{
if (WalProposerFunctions != NULL)
elog(ERROR, "libpqwalproposer already loaded");
WalProposerFunctions = &PQWalProposerFunctions;
}
/* Helper function */
static bool
ensure_nonblocking_status(WalProposerConn *conn, bool is_nonblocking)
ensure_nonblocking_status(WalProposerConn * conn, bool is_nonblocking)
{
/* If we're already correctly blocking or nonblocking, all good */
if (is_nonblocking == conn->is_nonblocking)
@@ -30,14 +69,14 @@ ensure_nonblocking_status(WalProposerConn *conn, bool is_nonblocking)
}
/* Exported function definitions */
char *
walprop_error_message(WalProposerConn *conn)
static char *
libpqprop_error_message(WalProposerConn * conn)
{
return PQerrorMessage(conn->pg_conn);
}
WalProposerConnStatusType
walprop_status(WalProposerConn *conn)
static WalProposerConnStatusType
libpqprop_status(WalProposerConn * conn)
{
switch (PQstatus(conn->pg_conn))
{
@@ -50,8 +89,8 @@ walprop_status(WalProposerConn *conn)
}
}
WalProposerConn *
walprop_connect_start(char *conninfo)
static WalProposerConn *
libpqprop_connect_start(char *conninfo)
{
WalProposerConn *conn;
PGconn *pg_conn;
@@ -80,8 +119,8 @@ walprop_connect_start(char *conninfo)
return conn;
}
WalProposerConnectPollStatusType
walprop_connect_poll(WalProposerConn *conn)
static WalProposerConnectPollStatusType
libpqprop_connect_poll(WalProposerConn * conn)
{
WalProposerConnectPollStatusType return_val;
@@ -121,8 +160,8 @@ walprop_connect_poll(WalProposerConn *conn)
return return_val;
}
bool
walprop_send_query(WalProposerConn *conn, char *query)
static bool
libpqprop_send_query(WalProposerConn * conn, char *query)
{
/*
* We need to be in blocking mode for sending the query to run without
@@ -138,8 +177,8 @@ walprop_send_query(WalProposerConn *conn, char *query)
return true;
}
WalProposerExecStatusType
walprop_get_query_result(WalProposerConn *conn)
static WalProposerExecStatusType
libpqprop_get_query_result(WalProposerConn * conn)
{
PGresult *result;
WalProposerExecStatusType return_val;
@@ -216,20 +255,20 @@ walprop_get_query_result(WalProposerConn *conn)
return return_val;
}
pgsocket
walprop_socket(WalProposerConn *conn)
static pgsocket
libpqprop_socket(WalProposerConn * conn)
{
return PQsocket(conn->pg_conn);
}
int
walprop_flush(WalProposerConn *conn)
static int
libpqprop_flush(WalProposerConn * conn)
{
return (PQflush(conn->pg_conn));
}
void
walprop_finish(WalProposerConn *conn)
static void
libpqprop_finish(WalProposerConn * conn)
{
if (conn->recvbuf != NULL)
PQfreemem(conn->recvbuf);
@@ -243,8 +282,8 @@ walprop_finish(WalProposerConn *conn)
* On success, the data is placed in *buf. It is valid until the next call
* to this function.
*/
PGAsyncReadResult
walprop_async_read(WalProposerConn *conn, char **buf, int *amount)
static PGAsyncReadResult
libpqprop_async_read(WalProposerConn * conn, char **buf, int *amount)
{
int result;
@@ -314,8 +353,8 @@ walprop_async_read(WalProposerConn *conn, char **buf, int *amount)
}
}
PGAsyncWriteResult
walprop_async_write(WalProposerConn *conn, void const *buf, size_t size)
static PGAsyncWriteResult
libpqprop_async_write(WalProposerConn * conn, void const *buf, size_t size)
{
int result;
@@ -369,12 +408,8 @@ walprop_async_write(WalProposerConn *conn, void const *buf, size_t size)
}
}
/*
* This function is very similar to walprop_async_write. For more
* information, refer to the comments there.
*/
bool
walprop_blocking_write(WalProposerConn *conn, void const *buf, size_t size)
static bool
libpqprop_blocking_write(WalProposerConn * conn, void const *buf, size_t size)
{
int result;
@@ -382,6 +417,10 @@ walprop_blocking_write(WalProposerConn *conn, void const *buf, size_t size)
if (!ensure_nonblocking_status(conn, false))
return false;
/*
* Ths function is very similar to libpqprop_async_write. For more
* information, refer to the comments there
*/
if ((result = PQputCopyData(conn->pg_conn, buf, size)) == -1)
return false;

View File

@@ -32,6 +32,7 @@ void
_PG_init(void)
{
pg_init_libpagestore();
pg_init_libpqwalproposer();
pg_init_walproposer();
EmitWarningsOnPlaceholders("neon");

View File

@@ -13,6 +13,7 @@
#define NEON_H
extern void pg_init_libpagestore(void);
extern void pg_init_libpqwalproposer(void);
extern void pg_init_walproposer(void);
#endif /* NEON_H */

View File

@@ -79,6 +79,9 @@ bool am_wal_proposer;
char *neon_timeline_walproposer = NULL;
char *neon_tenant_walproposer = NULL;
/* Declared in walproposer.h, defined here, initialized in libpqwalproposer.c */
WalProposerFunctionsType *WalProposerFunctions = NULL;
#define WAL_PROPOSER_SLOT_NAME "wal_proposer_slot"
static int n_safekeepers = 0;
@@ -435,6 +438,10 @@ WalProposerInitImpl(XLogRecPtr flushRecPtr, uint64 systemId)
char *sep;
char *port;
/* Load the libpq-specific functions */
if (WalProposerFunctions == NULL)
elog(ERROR, "libpqwalproposer didn't initialize correctly");
load_file("libpqwalreceiver", false);
if (WalReceiverFunctions == NULL)
elog(ERROR, "libpqwalreceiver didn't initialize correctly");
@@ -1464,6 +1471,12 @@ SendProposerElected(Safekeeper *sk)
*/
th = &sk->voteResponse.termHistory;
/*
* If any WAL is present on the sk, it must be authorized by some term.
* OTOH, without any WAL there are no term swiches in the log.
*/
Assert((th->n_entries == 0) ==
(sk->voteResponse.flushLsn == InvalidXLogRecPtr));
/* We must start somewhere. */
Assert(propTermHistory.n_entries >= 1);

View File

@@ -446,31 +446,31 @@ typedef enum
} WalProposerConnStatusType;
/* Re-exported PQerrorMessage */
extern char *walprop_error_message(WalProposerConn *conn);
typedef char *(*walprop_error_message_fn) (WalProposerConn * conn);
/* Re-exported PQstatus */
extern WalProposerConnStatusType walprop_status(WalProposerConn *conn);
typedef WalProposerConnStatusType(*walprop_status_fn) (WalProposerConn * conn);
/* Re-exported PQconnectStart */
extern WalProposerConn * walprop_connect_start(char *conninfo);
typedef WalProposerConn * (*walprop_connect_start_fn) (char *conninfo);
/* Re-exported PQconectPoll */
extern WalProposerConnectPollStatusType walprop_connect_poll(WalProposerConn *conn);
typedef WalProposerConnectPollStatusType(*walprop_connect_poll_fn) (WalProposerConn * conn);
/* Blocking wrapper around PQsendQuery */
extern bool walprop_send_query(WalProposerConn *conn, char *query);
typedef bool (*walprop_send_query_fn) (WalProposerConn * conn, char *query);
/* Wrapper around PQconsumeInput + PQisBusy + PQgetResult */
extern WalProposerExecStatusType walprop_get_query_result(WalProposerConn *conn);
typedef WalProposerExecStatusType(*walprop_get_query_result_fn) (WalProposerConn * conn);
/* Re-exported PQsocket */
extern pgsocket walprop_socket(WalProposerConn *conn);
typedef pgsocket (*walprop_socket_fn) (WalProposerConn * conn);
/* Wrapper around PQconsumeInput (if socket's read-ready) + PQflush */
extern int walprop_flush(WalProposerConn *conn);
typedef int (*walprop_flush_fn) (WalProposerConn * conn);
/* Re-exported PQfinish */
extern void walprop_finish(WalProposerConn *conn);
typedef void (*walprop_finish_fn) (WalProposerConn * conn);
/*
* Ergonomic wrapper around PGgetCopyData
@@ -486,7 +486,9 @@ extern void walprop_finish(WalProposerConn *conn);
* performs a bit of extra checking work that's always required and is normally
* somewhat verbose.
*/
extern PGAsyncReadResult walprop_async_read(WalProposerConn *conn, char **buf, int *amount);
typedef PGAsyncReadResult(*walprop_async_read_fn) (WalProposerConn * conn,
char **buf,
int *amount);
/*
* Ergonomic wrapper around PQputCopyData + PQflush
@@ -495,14 +497,69 @@ extern PGAsyncReadResult walprop_async_read(WalProposerConn *conn, char **buf, i
*
* For information on the meaning of return codes, refer to PGAsyncWriteResult.
*/
extern PGAsyncWriteResult walprop_async_write(WalProposerConn *conn, void const *buf, size_t size);
typedef PGAsyncWriteResult(*walprop_async_write_fn) (WalProposerConn * conn,
void const *buf,
size_t size);
/*
* Blocking equivalent to walprop_async_write_fn
*
* Returns 'true' if successful, 'false' on failure.
*/
extern bool walprop_blocking_write(WalProposerConn *conn, void const *buf, size_t size);
typedef bool (*walprop_blocking_write_fn) (WalProposerConn * conn, void const *buf, size_t size);
/* All libpqwalproposer exported functions collected together. */
typedef struct WalProposerFunctionsType
{
walprop_error_message_fn walprop_error_message;
walprop_status_fn walprop_status;
walprop_connect_start_fn walprop_connect_start;
walprop_connect_poll_fn walprop_connect_poll;
walprop_send_query_fn walprop_send_query;
walprop_get_query_result_fn walprop_get_query_result;
walprop_socket_fn walprop_socket;
walprop_flush_fn walprop_flush;
walprop_finish_fn walprop_finish;
walprop_async_read_fn walprop_async_read;
walprop_async_write_fn walprop_async_write;
walprop_blocking_write_fn walprop_blocking_write;
} WalProposerFunctionsType;
/* Allow the above functions to be "called" with normal syntax */
#define walprop_error_message(conn) \
WalProposerFunctions->walprop_error_message(conn)
#define walprop_status(conn) \
WalProposerFunctions->walprop_status(conn)
#define walprop_connect_start(conninfo) \
WalProposerFunctions->walprop_connect_start(conninfo)
#define walprop_connect_poll(conn) \
WalProposerFunctions->walprop_connect_poll(conn)
#define walprop_send_query(conn, query) \
WalProposerFunctions->walprop_send_query(conn, query)
#define walprop_get_query_result(conn) \
WalProposerFunctions->walprop_get_query_result(conn)
#define walprop_set_nonblocking(conn, arg) \
WalProposerFunctions->walprop_set_nonblocking(conn, arg)
#define walprop_socket(conn) \
WalProposerFunctions->walprop_socket(conn)
#define walprop_flush(conn) \
WalProposerFunctions->walprop_flush(conn)
#define walprop_finish(conn) \
WalProposerFunctions->walprop_finish(conn)
#define walprop_async_read(conn, buf, amount) \
WalProposerFunctions->walprop_async_read(conn, buf, amount)
#define walprop_async_write(conn, buf, size) \
WalProposerFunctions->walprop_async_write(conn, buf, size)
#define walprop_blocking_write(conn, buf, size) \
WalProposerFunctions->walprop_blocking_write(conn, buf, size)
/*
* The runtime location of the libpqwalproposer functions.
*
* This pointer is set by the initializer in libpqwalproposer, so that we
* can use it later.
*/
extern PGDLLIMPORT WalProposerFunctionsType * WalProposerFunctions;
extern uint64 BackpressureThrottlingTime(void);

View File

@@ -7,9 +7,9 @@ edition = "2021"
anyhow = "1.0"
atty = "0.2.14"
base64 = "0.13.0"
bstr = "1.0"
bstr = "0.2.17"
bytes = { version = "1.0.1", features = ['serde'] }
clap = "4.0"
clap = "3.0"
futures = "0.3.13"
git-version = "0.3.5"
hashbrown = "0.12"
@@ -22,11 +22,7 @@ once_cell = "1.13.0"
parking_lot = "0.12"
pin-project-lite = "0.2.7"
rand = "0.8.3"
reqwest = { version = "0.11", default-features = false, features = [
"blocking",
"json",
"rustls-tls",
] }
reqwest = { version = "0.11", default-features = false, features = ["blocking", "json", "rustls-tls"] }
routerify = "3"
rustls = "0.20.0"
rustls-pemfile = "1"
@@ -37,13 +33,13 @@ sha2 = "0.10.2"
socket2 = "0.4.4"
thiserror = "1.0.30"
tokio = { version = "1.17", features = ["macros"] }
tokio-postgres = { git = "https://github.com/neondatabase/rust-postgres.git", rev = "d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" }
tokio-postgres = { git = "https://github.com/neondatabase/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" }
tokio-rustls = "0.23.0"
tracing = "0.1.36"
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
url = "2.2.2"
uuid = { version = "1.2", features = ["v4", "serde"] }
x509-parser = "0.14"
uuid = { version = "0.8.2", features = ["v4", "serde"]}
x509-parser = "0.13.2"
utils = { path = "../libs/utils" }
metrics = { path = "../libs/metrics" }
@@ -51,6 +47,6 @@ workspace_hack = { version = "0.1", path = "../workspace_hack" }
[dev-dependencies]
async-trait = "0.1"
rcgen = "0.10"
rstest = "0.15"
rcgen = "0.8.14"
rstest = "0.12"
tokio-postgres-rustls = "0.9.0"

View File

@@ -8,20 +8,36 @@ use crate::{
http, scram,
stream::PqStream,
};
use futures::TryFutureExt;
use serde::{Deserialize, Serialize};
use std::future::Future;
use thiserror::Error;
use tokio::io::{AsyncRead, AsyncWrite};
use tracing::{error, info, info_span};
use tracing::info;
const REQUEST_FAILED: &str = "Console request failed";
#[derive(Debug, Error)]
#[error("{}", REQUEST_FAILED)]
pub struct TransportError(#[from] std::io::Error);
pub enum TransportError {
#[error("Console responded with a malformed JSON: {0}")]
BadResponse(#[from] serde_json::Error),
impl UserFacingError for TransportError {}
/// HTTP status (other than 200) returned by the console.
#[error("Console responded with an HTTP status: {0}")]
HttpStatus(reqwest::StatusCode),
#[error(transparent)]
Io(#[from] std::io::Error),
}
impl UserFacingError for TransportError {
fn to_string_client(&self) -> String {
use TransportError::*;
match self {
HttpStatus(_) => self.to_string(),
_ => REQUEST_FAILED.to_owned(),
}
}
}
// Helps eliminate graceless `.map_err` calls without introducing another ctor.
impl From<reqwest::Error> for TransportError {
@@ -146,19 +162,15 @@ impl<'a> Api<'a> {
])
.build()?;
let span = info_span!("http", id = request_id, url = req.url().as_str());
info!(parent: &span, "request auth info");
let msg = self
.endpoint
.checked_execute(req)
.and_then(|r| r.json::<GetRoleSecretResponse>())
.await
.map_err(|e| {
error!(parent: &span, "{e}");
e
})?;
info!(id = request_id, url = req.url().as_str(), "request");
let resp = self.endpoint.execute(req).await?;
if !resp.status().is_success() {
return Err(TransportError::HttpStatus(resp.status()).into());
}
scram::ServerSecret::parse(&msg.role_secret)
let response: GetRoleSecretResponse = serde_json::from_str(&resp.text().await?)?;
scram::ServerSecret::parse(&response.role_secret)
.map(AuthInfo::Scram)
.ok_or(GetAuthInfoError::BadSecret)
}
@@ -177,21 +189,17 @@ impl<'a> Api<'a> {
])
.build()?;
let span = info_span!("http", id = request_id, url = req.url().as_str());
info!(parent: &span, "request wake-up");
let msg = self
.endpoint
.checked_execute(req)
.and_then(|r| r.json::<GetWakeComputeResponse>())
.await
.map_err(|e| {
error!(parent: &span, "{e}");
e
})?;
info!(id = request_id, url = req.url().as_str(), "request");
let resp = self.endpoint.execute(req).await?;
if !resp.status().is_success() {
return Err(TransportError::HttpStatus(resp.status()).into());
}
let response: GetWakeComputeResponse = serde_json::from_str(&resp.text().await?)?;
// Unfortunately, ownership won't let us use `Option::ok_or` here.
let (host, port) = match parse_host_port(&msg.address) {
None => return Err(WakeComputeError::BadComputeAddress(msg.address)),
let (host, port) = match parse_host_port(&response.address) {
None => return Err(WakeComputeError::BadComputeAddress(response.address)),
Some(x) => x,
};

View File

@@ -1,7 +1,7 @@
use crate::{auth, compute, error::UserFacingError, stream::PqStream, waiters};
use thiserror::Error;
use tokio::io::{AsyncRead, AsyncWrite};
use tracing::{info, info_span};
use tracing::info;
use utils::pq_proto::{BeMessage as Be, BeParameterStatusMessage};
#[derive(Debug, Error)]
@@ -51,12 +51,11 @@ pub async fn handle_user(
client: &mut PqStream<impl AsyncRead + AsyncWrite + Unpin>,
) -> auth::Result<compute::NodeInfo> {
let psql_session_id = new_psql_session_id();
let span = info_span!("link", psql_session_id = &psql_session_id);
let greeting = hello_message(link_uri, &psql_session_id);
let db_info = super::with_waiter(psql_session_id, |waiter| async {
// Give user a URL to spawn a new database.
info!(parent: &span, "sending the auth URL to the user");
info!("sending the auth URL to the user");
client
.write_message_noflush(&Be::AuthenticationOk)?
.write_message_noflush(&BeParameterStatusMessage::encoding())?
@@ -64,7 +63,7 @@ pub async fn handle_user(
.await?;
// Wait for web console response (see `mgmt`).
info!(parent: &span, "waiting for console's reply...");
info!("waiting for console's reply...");
waiter.await?.map_err(LinkAuthError::AuthFailed)
})
.await?;

View File

@@ -17,7 +17,6 @@ impl Endpoint {
Self { endpoint, client }
}
#[inline(always)]
pub fn url(&self) -> &ApiUrl {
&self.endpoint
}
@@ -37,16 +36,6 @@ impl Endpoint {
) -> Result<reqwest::Response, reqwest::Error> {
self.client.execute(request).await
}
/// Execute a [request](reqwest::Request) and raise an error if status != 200.
pub async fn checked_execute(
&self,
request: reqwest::Request,
) -> Result<reqwest::Response, reqwest::Error> {
self.execute(request)
.await
.and_then(|r| r.error_for_status())
}
}
#[cfg(test)]

View File

@@ -45,43 +45,98 @@ async fn main() -> anyhow::Result<()> {
.with_target(false)
.init();
let arg_matches = cli().get_matches();
let arg_matches = clap::App::new("Neon proxy/router")
.version(GIT_VERSION)
.arg(
Arg::new("proxy")
.short('p')
.long("proxy")
.takes_value(true)
.help("listen for incoming client connections on ip:port")
.default_value("127.0.0.1:4432"),
)
.arg(
Arg::new("auth-backend")
.long("auth-backend")
.takes_value(true)
.possible_values(["console", "postgres", "link"])
.default_value("link"),
)
.arg(
Arg::new("mgmt")
.short('m')
.long("mgmt")
.takes_value(true)
.help("listen for management callback connection on ip:port")
.default_value("127.0.0.1:7000"),
)
.arg(
Arg::new("http")
.short('h')
.long("http")
.takes_value(true)
.help("listen for incoming http connections (metrics, etc) on ip:port")
.default_value("127.0.0.1:7001"),
)
.arg(
Arg::new("uri")
.short('u')
.long("uri")
.takes_value(true)
.help("redirect unauthenticated users to the given uri in case of link auth")
.default_value("http://localhost:3000/psql_session/"),
)
.arg(
Arg::new("auth-endpoint")
.short('a')
.long("auth-endpoint")
.takes_value(true)
.help("cloud API endpoint for authenticating users")
.default_value("http://localhost:3000/authenticate_proxy_request/"),
)
.arg(
Arg::new("tls-key")
.short('k')
.long("tls-key")
.alias("ssl-key") // backwards compatibility
.takes_value(true)
.help("path to TLS key for client postgres connections"),
)
.arg(
Arg::new("tls-cert")
.short('c')
.long("tls-cert")
.alias("ssl-cert") // backwards compatibility
.takes_value(true)
.help("path to TLS cert for client postgres connections"),
)
.get_matches();
let tls_config = match (
arg_matches.get_one::<String>("tls-key"),
arg_matches.get_one::<String>("tls-cert"),
arg_matches.value_of("tls-key"),
arg_matches.value_of("tls-cert"),
) {
(Some(key_path), Some(cert_path)) => Some(config::configure_tls(key_path, cert_path)?),
(None, None) => None,
_ => bail!("either both or neither tls-key and tls-cert must be specified"),
};
let proxy_address: SocketAddr = arg_matches.get_one::<String>("proxy").unwrap().parse()?;
let mgmt_address: SocketAddr = arg_matches.get_one::<String>("mgmt").unwrap().parse()?;
let http_address: SocketAddr = arg_matches.get_one::<String>("http").unwrap().parse()?;
let proxy_address: SocketAddr = arg_matches.value_of("proxy").unwrap().parse()?;
let mgmt_address: SocketAddr = arg_matches.value_of("mgmt").unwrap().parse()?;
let http_address: SocketAddr = arg_matches.value_of("http").unwrap().parse()?;
let auth_backend = match arg_matches
.get_one::<String>("auth-backend")
.unwrap()
.as_str()
{
let auth_backend = match arg_matches.value_of("auth-backend").unwrap() {
"console" => {
let url = arg_matches
.get_one::<String>("auth-endpoint")
.unwrap()
.parse()?;
let url = arg_matches.value_of("auth-endpoint").unwrap().parse()?;
let endpoint = http::Endpoint::new(url, reqwest::Client::new());
auth::BackendType::Console(Cow::Owned(endpoint), ())
}
"postgres" => {
let url = arg_matches
.get_one::<String>("auth-endpoint")
.unwrap()
.parse()?;
let url = arg_matches.value_of("auth-endpoint").unwrap().parse()?;
auth::BackendType::Postgres(Cow::Owned(url), ())
}
"link" => {
let url = arg_matches.get_one::<String>("uri").unwrap().parse()?;
let url = arg_matches.value_of("uri").unwrap().parse()?;
auth::BackendType::Link(Cow::Owned(url))
}
other => bail!("unsupported auth backend: {other}"),
@@ -119,68 +174,3 @@ async fn main() -> anyhow::Result<()> {
Ok(())
}
fn cli() -> clap::Command {
clap::Command::new("Neon proxy/router")
.disable_help_flag(true)
.version(GIT_VERSION)
.arg(
Arg::new("proxy")
.short('p')
.long("proxy")
.help("listen for incoming client connections on ip:port")
.default_value("127.0.0.1:4432"),
)
.arg(
Arg::new("auth-backend")
.long("auth-backend")
.value_parser(["console", "postgres", "link"])
.default_value("link"),
)
.arg(
Arg::new("mgmt")
.short('m')
.long("mgmt")
.help("listen for management callback connection on ip:port")
.default_value("127.0.0.1:7000"),
)
.arg(
Arg::new("http")
.long("http")
.help("listen for incoming http connections (metrics, etc) on ip:port")
.default_value("127.0.0.1:7001"),
)
.arg(
Arg::new("uri")
.short('u')
.long("uri")
.help("redirect unauthenticated users to the given uri in case of link auth")
.default_value("http://localhost:3000/psql_session/"),
)
.arg(
Arg::new("auth-endpoint")
.short('a')
.long("auth-endpoint")
.help("cloud API endpoint for authenticating users")
.default_value("http://localhost:3000/authenticate_proxy_request/"),
)
.arg(
Arg::new("tls-key")
.short('k')
.long("tls-key")
.alias("ssl-key") // backwards compatibility
.help("path to TLS key for client postgres connections"),
)
.arg(
Arg::new("tls-cert")
.short('c')
.long("tls-cert")
.alias("ssl-cert") // backwards compatibility
.help("path to TLS cert for client postgres connections"),
)
}
#[test]
fn verify_cli() {
cli().debug_assert();
}

View File

@@ -1,7 +1,7 @@
use crate::auth;
use crate::cancellation::{self, CancelMap};
use crate::config::{ProxyConfig, TlsConfig};
use crate::stream::{MeasuredStream, PqStream, Stream};
use crate::stream::{MetricsStream, PqStream, Stream};
use anyhow::{bail, Context};
use futures::TryFutureExt;
use metrics::{register_int_counter, IntCounter};
@@ -64,7 +64,7 @@ pub async fn task_main(
let cancel_map = Arc::new(CancelMap::default());
loop {
let (socket, peer_addr) = listener.accept().await?;
info!("accepted postgres client connection from {peer_addr}");
info!("accepted connection from {peer_addr}");
let session_id = uuid::Uuid::new_v4();
let cancel_map = Arc::clone(&cancel_map);
@@ -270,8 +270,8 @@ impl<S: AsyncRead + AsyncWrite + Unpin + Send> Client<'_, S> {
// Starting from here we only proxy the client's traffic.
info!("performing the proxy pass...");
let mut db = MeasuredStream::new(db.stream, inc_proxied);
let mut client = MeasuredStream::new(stream.into_inner(), inc_proxied);
let mut db = MetricsStream::new(db.stream, inc_proxied);
let mut client = MetricsStream::new(stream.into_inner(), inc_proxied);
let _ = tokio::io::copy_bidirectional(&mut client, &mut db).await?;
Ok(())

View File

@@ -231,7 +231,7 @@ impl<S: AsyncRead + AsyncWrite + Unpin> AsyncWrite for Stream<S> {
pin_project! {
/// This stream tracks all writes and calls user provided
/// callback when the underlying stream is flushed.
pub struct MeasuredStream<S, W> {
pub struct MetricsStream<S, W> {
#[pin]
stream: S,
write_count: usize,
@@ -239,7 +239,7 @@ pin_project! {
}
}
impl<S, W> MeasuredStream<S, W> {
impl<S, W> MetricsStream<S, W> {
pub fn new(stream: S, inc_write_count: W) -> Self {
Self {
stream,
@@ -249,7 +249,7 @@ impl<S, W> MeasuredStream<S, W> {
}
}
impl<S: AsyncRead + Unpin, W> AsyncRead for MeasuredStream<S, W> {
impl<S: AsyncRead + Unpin, W> AsyncRead for MetricsStream<S, W> {
fn poll_read(
self: Pin<&mut Self>,
context: &mut task::Context<'_>,
@@ -259,7 +259,7 @@ impl<S: AsyncRead + Unpin, W> AsyncRead for MeasuredStream<S, W> {
}
}
impl<S: AsyncWrite + Unpin, W: FnMut(usize)> AsyncWrite for MeasuredStream<S, W> {
impl<S: AsyncWrite + Unpin, W: FnMut(usize)> AsyncWrite for MetricsStream<S, W> {
fn poll_write(
self: Pin<&mut Self>,
context: &mut task::Context<'_>,

View File

@@ -11,7 +11,7 @@ hyper = "0.14"
fs2 = "0.4.3"
serde_json = "1"
tracing = "0.1.27"
clap = "4.0"
clap = "3.0"
daemonize = "0.4.1"
tokio = { version = "1.17", features = ["macros", "fs"] }
postgres-protocol = { git = "https://github.com/neondatabase/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" }
@@ -22,14 +22,14 @@ humantime = "2.1.0"
url = "2.2.2"
signal-hook = "0.3.10"
serde = { version = "1.0", features = ["derive"] }
serde_with = "2.0"
serde_with = "1.12.0"
hex = "0.4.3"
const_format = "0.2.21"
tokio-postgres = { git = "https://github.com/neondatabase/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" }
git-version = "0.3.5"
async-trait = "0.1"
once_cell = "1.13.0"
toml_edit = { version = "0.14", features = ["easy"] }
toml_edit = { version = "0.13", features = ["easy"] }
thiserror = "1"
parking_lot = "0.12.1"

View File

@@ -2,7 +2,7 @@
// Main entry point for the safekeeper executable
//
use anyhow::{bail, Context, Result};
use clap::{value_parser, Arg, ArgAction, Command};
use clap::{App, Arg};
use const_format::formatcp;
use daemonize::Daemonize;
use fs2::FileExt;
@@ -40,44 +40,145 @@ const ID_FILE_NAME: &str = "safekeeper.id";
project_git_version!(GIT_VERSION);
fn main() -> anyhow::Result<()> {
let arg_matches = cli().get_matches();
let arg_matches = App::new("Neon safekeeper")
.about("Store WAL stream to local file system and push it to WAL receivers")
.version(GIT_VERSION)
.arg(
Arg::new("datadir")
.short('D')
.long("dir")
.takes_value(true)
.help("Path to the safekeeper data directory"),
)
.arg(
Arg::new("init")
.long("init")
.takes_value(false)
.help("Initialize safekeeper with ID"),
)
.arg(
Arg::new("listen-pg")
.short('l')
.long("listen-pg")
.alias("listen") // for compatibility
.takes_value(true)
.help(formatcp!("listen for incoming WAL data connections on ip:port (default: {DEFAULT_PG_LISTEN_ADDR})")),
)
.arg(
Arg::new("listen-http")
.long("listen-http")
.takes_value(true)
.help(formatcp!("http endpoint address for metrics on ip:port (default: {DEFAULT_HTTP_LISTEN_ADDR})")),
)
// FIXME this argument is no longer needed since pageserver address is forwarded from compute.
// However because this argument is in use by console's e2e tests let's keep it for now and remove separately.
// So currently it is a noop.
.arg(
Arg::new("pageserver")
.short('p')
.long("pageserver")
.takes_value(true),
)
.arg(
Arg::new("recall")
.long("recall")
.takes_value(true)
.help("Period for requestion pageserver to call for replication"),
)
.arg(
Arg::new("daemonize")
.short('d')
.long("daemonize")
.takes_value(false)
.help("Run in the background"),
)
.arg(
Arg::new("no-sync")
.short('n')
.long("no-sync")
.takes_value(false)
.help("Do not wait for changes to be written safely to disk"),
)
.arg(
Arg::new("dump-control-file")
.long("dump-control-file")
.takes_value(true)
.help("Dump control file at path specified by this argument and exit"),
)
.arg(
Arg::new("id").long("id").takes_value(true).help("safekeeper node id: integer")
).arg(
Arg::new("broker-endpoints")
.long("broker-endpoints")
.takes_value(true)
.help("a comma separated broker (etcd) endpoints for storage nodes coordination, e.g. 'http://127.0.0.1:2379'"),
)
.arg(
Arg::new("broker-etcd-prefix")
.long("broker-etcd-prefix")
.takes_value(true)
.help("a prefix to always use when polling/pusing data in etcd from this safekeeper"),
)
.arg(
Arg::new("wal-backup-threads").long("backup-threads").takes_value(true).help(formatcp!("number of threads for wal backup (default {DEFAULT_WAL_BACKUP_RUNTIME_THREADS}")),
).arg(
Arg::new("remote-storage")
.long("remote-storage")
.takes_value(true)
.help("Remote storage configuration for WAL backup (offloading to s3) as TOML inline table, e.g. {\"max_concurrent_syncs\" = 17, \"max_sync_errors\": 13, \"bucket_name\": \"<BUCKETNAME>\", \"bucket_region\":\"<REGION>\", \"concurrency_limit\": 119}.\nSafekeeper offloads WAL to [prefix_in_bucket/]<tenant_id>/<timeline_id>/<segment_file>, mirroring structure on the file system.")
)
.arg(
Arg::new("enable-wal-backup")
.long("enable-wal-backup")
.takes_value(true)
.default_value("true")
.default_missing_value("true")
.help("Enable/disable WAL backup to s3. When disabled, safekeeper removes WAL ignoring WAL backup horizon."),
)
.arg(
Arg::new("auth-validation-public-key-path")
.long("auth-validation-public-key-path")
.takes_value(true)
.help("Path to an RSA .pem public key which is used to check JWT tokens")
)
.get_matches();
if let Some(addr) = arg_matches.get_one::<String>("dump-control-file") {
if let Some(addr) = arg_matches.value_of("dump-control-file") {
let state = control_file::FileStorage::load_control_file(Path::new(addr))?;
let json = serde_json::to_string(&state)?;
print!("{json}");
print!("{}", json);
return Ok(());
}
let mut conf = SafeKeeperConf::default();
if let Some(dir) = arg_matches.get_one::<PathBuf>("datadir") {
if let Some(dir) = arg_matches.value_of("datadir") {
// change into the data directory.
std::env::set_current_dir(dir)?;
std::env::set_current_dir(PathBuf::from(dir))?;
}
if arg_matches.get_flag("no-sync") {
if arg_matches.is_present("no-sync") {
conf.no_sync = true;
}
if arg_matches.get_flag("daemonize") {
if arg_matches.is_present("daemonize") {
conf.daemonize = true;
}
if let Some(addr) = arg_matches.get_one::<String>("listen-pg") {
conf.listen_pg_addr = addr.to_string();
if let Some(addr) = arg_matches.value_of("listen-pg") {
conf.listen_pg_addr = addr.to_owned();
}
if let Some(addr) = arg_matches.get_one::<String>("listen-http") {
conf.listen_http_addr = addr.to_string();
if let Some(addr) = arg_matches.value_of("listen-http") {
conf.listen_http_addr = addr.to_owned();
}
if let Some(recall) = arg_matches.get_one::<String>("recall") {
if let Some(recall) = arg_matches.value_of("recall") {
conf.recall_period = humantime::parse_duration(recall)?;
}
let mut given_id = None;
if let Some(given_id_str) = arg_matches.get_one::<String>("id") {
if let Some(given_id_str) = arg_matches.value_of("id") {
given_id = Some(NodeId(
given_id_str
.parse()
@@ -85,20 +186,20 @@ fn main() -> anyhow::Result<()> {
));
}
if let Some(addr) = arg_matches.get_one::<String>("broker-endpoints") {
if let Some(addr) = arg_matches.value_of("broker-endpoints") {
let collected_ep: Result<Vec<Url>, ParseError> = addr.split(',').map(Url::parse).collect();
conf.broker_endpoints = collected_ep.context("Failed to parse broker endpoint urls")?;
}
if let Some(prefix) = arg_matches.get_one::<String>("broker-etcd-prefix") {
if let Some(prefix) = arg_matches.value_of("broker-etcd-prefix") {
conf.broker_etcd_prefix = prefix.to_string();
}
if let Some(backup_threads) = arg_matches.get_one::<String>("wal-backup-threads") {
if let Some(backup_threads) = arg_matches.value_of("wal-backup-threads") {
conf.backup_runtime_threads = backup_threads
.parse()
.with_context(|| format!("Failed to parse backup threads {}", backup_threads))?;
}
if let Some(storage_conf) = arg_matches.get_one::<String>("remote-storage") {
if let Some(storage_conf) = arg_matches.value_of("remote-storage") {
// funny toml doesn't consider plain inline table as valid document, so wrap in a key to parse
let storage_conf_toml = format!("remote_storage = {}", storage_conf);
let parsed_toml = storage_conf_toml.parse::<Document>()?; // parse
@@ -107,16 +208,16 @@ fn main() -> anyhow::Result<()> {
}
// Seems like there is no better way to accept bool values explicitly in clap.
conf.wal_backup_enabled = arg_matches
.get_one::<String>("enable-wal-backup")
.value_of("enable-wal-backup")
.unwrap()
.parse()
.context("failed to parse bool enable-s3-offload bool")?;
conf.auth_validation_public_key_path = arg_matches
.get_one::<String>("auth-validation-public-key-path")
.value_of("auth-validation-public-key-path")
.map(PathBuf::from);
start_safekeeper(conf, given_id, arg_matches.get_flag("init"))
start_safekeeper(conf, given_id, arg_matches.is_present("init"))
}
fn start_safekeeper(mut conf: SafeKeeperConf, given_id: Option<NodeId>, init: bool) -> Result<()> {
@@ -323,102 +424,3 @@ fn set_id(conf: &mut SafeKeeperConf, given_id: Option<NodeId>) -> Result<()> {
conf.my_id = my_id;
Ok(())
}
fn cli() -> Command {
Command::new("Neon safekeeper")
.about("Store WAL stream to local file system and push it to WAL receivers")
.version(GIT_VERSION)
.arg(
Arg::new("datadir")
.short('D')
.long("dir")
.value_parser(value_parser!(PathBuf))
.help("Path to the safekeeper data directory"),
)
.arg(
Arg::new("init")
.long("init")
.action(ArgAction::SetTrue)
.help("Initialize safekeeper with ID"),
)
.arg(
Arg::new("listen-pg")
.short('l')
.long("listen-pg")
.alias("listen") // for compatibility
.help(formatcp!("listen for incoming WAL data connections on ip:port (default: {DEFAULT_PG_LISTEN_ADDR})")),
)
.arg(
Arg::new("listen-http")
.long("listen-http")
.help(formatcp!("http endpoint address for metrics on ip:port (default: {DEFAULT_HTTP_LISTEN_ADDR})")),
)
// FIXME this argument is no longer needed since pageserver address is forwarded from compute.
// However because this argument is in use by console's e2e tests let's keep it for now and remove separately.
// So currently it is a noop.
.arg(
Arg::new("pageserver")
.short('p')
.long("pageserver"),
)
.arg(
Arg::new("recall")
.long("recall")
.help("Period for requestion pageserver to call for replication"),
)
.arg(
Arg::new("daemonize")
.short('d')
.long("daemonize")
.action(ArgAction::SetTrue)
.help("Run in the background"),
)
.arg(
Arg::new("no-sync")
.short('n')
.long("no-sync")
.action(ArgAction::SetTrue)
.help("Do not wait for changes to be written safely to disk"),
)
.arg(
Arg::new("dump-control-file")
.long("dump-control-file")
.help("Dump control file at path specified by this argument and exit"),
)
.arg(
Arg::new("id").long("id").help("safekeeper node id: integer")
).arg(
Arg::new("broker-endpoints")
.long("broker-endpoints")
.help("a comma separated broker (etcd) endpoints for storage nodes coordination, e.g. 'http://127.0.0.1:2379'"),
)
.arg(
Arg::new("broker-etcd-prefix")
.long("broker-etcd-prefix")
.help("a prefix to always use when polling/pusing data in etcd from this safekeeper"),
)
.arg(
Arg::new("wal-backup-threads").long("backup-threads").help(formatcp!("number of threads for wal backup (default {DEFAULT_WAL_BACKUP_RUNTIME_THREADS}")),
).arg(
Arg::new("remote-storage")
.long("remote-storage")
.help("Remote storage configuration for WAL backup (offloading to s3) as TOML inline table, e.g. {\"max_concurrent_syncs\" = 17, \"max_sync_errors\": 13, \"bucket_name\": \"<BUCKETNAME>\", \"bucket_region\":\"<REGION>\", \"concurrency_limit\": 119}.\nSafekeeper offloads WAL to [prefix_in_bucket/]<tenant_id>/<timeline_id>/<segment_file>, mirroring structure on the file system.")
)
.arg(
Arg::new("enable-wal-backup")
.long("enable-wal-backup")
.default_value("true")
.default_missing_value("true")
.help("Enable/disable WAL backup to s3. When disabled, safekeeper removes WAL ignoring WAL backup horizon."),
)
.arg(
Arg::new("auth-validation-public-key-path")
.long("auth-validation-public-key-path")
.help("Path to an RSA .pem public key which is used to check JWT tokens")
)
}
#[test]
fn verify_cli() {
cli().debug_assert();
}

View File

@@ -1,8 +1,8 @@
use anyhow::anyhow;
use hyper::{Body, Request, Response, StatusCode, Uri};
use anyhow::Context;
use once_cell::sync::Lazy;
use postgres_ffi::WAL_SEGMENT_SIZE;
use serde::Serialize;
use serde::Serializer;
use std::collections::{HashMap, HashSet};
@@ -10,7 +10,6 @@ use std::fmt::Display;
use std::sync::Arc;
use tokio::task::JoinError;
use crate::safekeeper::ServerInfo;
use crate::safekeeper::Term;
use crate::safekeeper::TermHistory;
@@ -78,7 +77,6 @@ struct TimelineStatus {
#[serde(serialize_with = "display_serialize")]
timeline_id: TimelineId,
acceptor_state: AcceptorStateStatus,
pg_info: ServerInfo,
#[serde(serialize_with = "display_serialize")]
flush_lsn: Lsn,
#[serde(serialize_with = "display_serialize")]
@@ -123,7 +121,6 @@ async fn timeline_status_handler(request: Request<Body>) -> Result<Response<Body
tenant_id: ttid.tenant_id,
timeline_id: ttid.timeline_id,
acceptor_state: acc_state,
pg_info: state.server,
flush_lsn,
timeline_start_lsn: state.timeline_start_lsn,
local_start_lsn: state.local_start_lsn,
@@ -139,29 +136,12 @@ async fn timeline_create_handler(mut request: Request<Body>) -> Result<Response<
let request_data: TimelineCreateRequest = json_request(&mut request).await?;
let ttid = TenantTimelineId {
tenant_id: request_data.tenant_id,
tenant_id: parse_request_param(&request, "tenant_id")?,
timeline_id: request_data.timeline_id,
};
check_permission(&request, Some(ttid.tenant_id))?;
let server_info = ServerInfo {
pg_version: request_data.pg_version,
system_id: request_data.system_id.unwrap_or(0),
wal_seg_size: request_data.wal_seg_size.unwrap_or(WAL_SEGMENT_SIZE as u32),
};
let local_start_lsn = request_data.local_start_lsn.unwrap_or_else(|| {
request_data
.commit_lsn
.segment_lsn(server_info.wal_seg_size as usize)
});
tokio::task::spawn_blocking(move || {
GlobalTimelines::create(ttid, server_info, request_data.commit_lsn, local_start_lsn)
})
.await
.map_err(|e| ApiError::InternalServerError(e.into()))?
.map_err(ApiError::InternalServerError)?;
json_response(StatusCode::OK, ())
Err(ApiError::BadRequest(anyhow!("not implemented")))
}
/// Deactivates the timeline and removes its data directory.
@@ -261,7 +241,7 @@ pub fn make_router(
.data(auth)
.get("/v1/status", status_handler)
// Will be used in the future instead of implicit timeline creation
.post("/v1/tenant/timeline", timeline_create_handler)
.post("/v1/tenant/:tenant_id/timeline", timeline_create_handler)
.get(
"/v1/tenant/:tenant_id/timeline/:timeline_id",
timeline_status_handler,

View File

@@ -104,8 +104,6 @@ fn prepare_safekeeper(ttid: TenantTimelineId, pg_version: u32) -> Result<Arc<Tim
wal_seg_size: WAL_SEGMENT_SIZE as u32,
system_id: 0,
},
Lsn::INVALID,
Lsn::INVALID,
)
}

View File

@@ -6,7 +6,6 @@ use anyhow::{anyhow, bail, Result};
use bytes::BytesMut;
use tracing::*;
use utils::lsn::Lsn;
use crate::safekeeper::ServerInfo;
use crate::timeline::Timeline;
@@ -80,7 +79,7 @@ impl<'pg> ReceiveWalConn<'pg> {
system_id: greeting.system_id,
wal_seg_size: greeting.wal_seg_size,
};
GlobalTimelines::create(spg.ttid, server_info, Lsn::INVALID, Lsn::INVALID)?
GlobalTimelines::create(spg.ttid, server_info)?
}
_ => bail!("unexpected message {:?} instead of greeting", next_msg),
};

View File

@@ -222,8 +222,6 @@ impl SafeKeeperState {
ttid: &TenantTimelineId,
server_info: ServerInfo,
peers: Vec<NodeId>,
commit_lsn: Lsn,
local_start_lsn: Lsn,
) -> SafeKeeperState {
SafeKeeperState {
tenant_id: ttid.tenant_id,
@@ -235,10 +233,10 @@ impl SafeKeeperState {
server: server_info,
proposer_uuid: [0; 16],
timeline_start_lsn: Lsn(0),
local_start_lsn,
commit_lsn,
backup_lsn: local_start_lsn,
peer_horizon_lsn: local_start_lsn,
local_start_lsn: Lsn(0),
commit_lsn: Lsn(0),
backup_lsn: Lsn::INVALID,
peer_horizon_lsn: Lsn(0),
remote_consistent_lsn: Lsn(0),
peers: Peers(peers.iter().map(|p| (*p, PeerInfo::new())).collect()),
}
@@ -254,8 +252,6 @@ impl SafeKeeperState {
wal_seg_size: 0,
},
vec![],
Lsn::INVALID,
Lsn::INVALID,
)
}
}
@@ -744,8 +740,7 @@ where
"setting timeline_start_lsn to {:?}",
state.timeline_start_lsn
);
}
if state.local_start_lsn == Lsn(0) {
state.local_start_lsn = msg.start_streaming_at;
info!("setting local_start_lsn to {:?}", state.local_start_lsn);
}

View File

@@ -107,14 +107,6 @@ impl SharedState {
bail!(TimelineError::UninitialinzedPgVersion(*ttid));
}
if state.commit_lsn < state.local_start_lsn {
bail!(
"commit_lsn {} is higher than local_start_lsn {}",
state.commit_lsn,
state.local_start_lsn
);
}
// We don't want to write anything to disk, because we may have existing timeline there.
// These functions should not change anything on disk.
let control_store = control_file::FileStorage::create_new(ttid, conf, state)?;
@@ -294,7 +286,7 @@ pub struct Timeline {
/// Sending here asks for wal backup launcher attention (start/stop
/// offloading). Sending ttid instead of concrete command allows to do
/// sending without timeline lock.
pub wal_backup_launcher_tx: Sender<TenantTimelineId>,
wal_backup_launcher_tx: Sender<TenantTimelineId>,
/// Used to broadcast commit_lsn updates to all background jobs.
commit_lsn_watch_tx: watch::Sender<Lsn>,
@@ -347,12 +339,10 @@ impl Timeline {
ttid: TenantTimelineId,
wal_backup_launcher_tx: Sender<TenantTimelineId>,
server_info: ServerInfo,
commit_lsn: Lsn,
local_start_lsn: Lsn,
) -> Result<Timeline> {
let (commit_lsn_watch_tx, commit_lsn_watch_rx) = watch::channel(Lsn::INVALID);
let (cancellation_tx, cancellation_rx) = watch::channel(false);
let state = SafeKeeperState::new(&ttid, server_info, vec![], commit_lsn, local_start_lsn);
let state = SafeKeeperState::new(&ttid, server_info, vec![]);
Ok(Timeline {
ttid,
@@ -391,7 +381,6 @@ impl Timeline {
match || -> Result<()> {
shared_state.sk.persist()?;
// TODO: add more initialization steps here
shared_state.update_status(self.ttid);
Ok(())
}() {
Ok(_) => Ok(()),

View File

@@ -15,7 +15,6 @@ use std::sync::{Arc, Mutex, MutexGuard};
use tokio::sync::mpsc::Sender;
use tracing::*;
use utils::id::{TenantId, TenantTimelineId, TimelineId};
use utils::lsn::Lsn;
struct GlobalTimelinesState {
timelines: HashMap<TenantTimelineId, Arc<Timeline>>,
@@ -154,12 +153,7 @@ impl GlobalTimelines {
/// Create a new timeline with the given id. If the timeline already exists, returns
/// an existing timeline.
pub fn create(
ttid: TenantTimelineId,
server_info: ServerInfo,
commit_lsn: Lsn,
local_start_lsn: Lsn,
) -> Result<Arc<Timeline>> {
pub fn create(ttid: TenantTimelineId, server_info: ServerInfo) -> Result<Arc<Timeline>> {
let (conf, wal_backup_launcher_tx) = {
let state = TIMELINES_STATE.lock().unwrap();
if let Ok(timeline) = state.get(&ttid) {
@@ -176,8 +170,6 @@ impl GlobalTimelines {
ttid,
wal_backup_launcher_tx,
server_info,
commit_lsn,
local_start_lsn,
)?);
// Take a lock and finish the initialization holding this mutex. No other threads
@@ -198,9 +190,6 @@ impl GlobalTimelines {
Ok(_) => {
// We are done with bootstrap, release the lock, return the timeline.
drop(shared_state);
timeline
.wal_backup_launcher_tx
.blocking_send(timeline.ttid)?;
Ok(timeline)
}
Err(e) => {

View File

@@ -317,13 +317,13 @@ def remote_consistent_lsn(
) -> int:
detail = pageserver_http_client.timeline_detail(tenant, timeline)
lsn_str = detail["remote_consistent_lsn"]
if lsn_str is None:
if detail["remote"] is None:
# No remote information at all. This happens right after creating
# a timeline, before any part of it has been uploaded to remote
# storage yet.
return 0
else:
lsn_str = detail["remote"]["remote_consistent_lsn"]
assert isinstance(lsn_str, str)
return lsn_from_hex(lsn_str)
@@ -577,7 +577,7 @@ def main(args: argparse.Namespace):
args.work_dir, f"{timeline['tenant_id']}_{timeline['timeline_id']}.tar"
)
pg_version = timeline["pg_version"]
pg_version = timeline["local"]["pg_version"]
# Export timeline from old pageserver
if args.only_import is False:

View File

@@ -130,12 +130,11 @@ class NeonCompare(PgCompare):
"size", timeline_size / (1024 * 1024), "MB", report=MetricReport.LOWER_IS_BETTER
)
params = f'{{tenant_id="{self.env.initial_tenant}",timeline_id="{self.timeline}"}}'
total_files = self.zenbenchmark.get_int_counter_value(
self.env.pageserver, "pageserver_created_persistent_files_total" + params
self.env.pageserver, "pageserver_created_persistent_files_total"
)
total_bytes = self.zenbenchmark.get_int_counter_value(
self.env.pageserver, "pageserver_written_persistent_bytes_total" + params
self.env.pageserver, "pageserver_written_persistent_bytes_total"
)
self.zenbenchmark.record(
"data_uploaded", total_bytes / (1024 * 1024), "MB", report=MetricReport.LOWER_IS_BETTER

Some files were not shown because too many files have changed in this diff Show More