Compare commits

..

1 Commits

Author SHA1 Message Date
Konstantin Knizhnik
121c19fcd6 Replace R-Tree with B-Tree in layer map 2022-10-07 13:11:24 +03:00
131 changed files with 2720 additions and 10383 deletions

View File

@@ -10,7 +10,7 @@
<!-- List everything that should be done **before** release, any issues / setting changes / etc --> <!-- List everything that should be done **before** release, any issues / setting changes / etc -->
### Checklist after release ### Checklist after release
- [ ] Based on the merged commits write release notes and open a PR into `website` repo ([example](https://github.com/neondatabase/website/pull/219/files)) - [ ] Based on the merged commits write release notes and open a PR into `website` repo ([example](https://github.com/neondatabase/website/pull/120/files))
- [ ] Check [#dev-production-stream](https://neondb.slack.com/archives/C03F5SM1N02) Slack channel - [ ] Check [#dev-production-stream](https://neondb.slack.com/archives/C03F5SM1N02) Slack channel
- [ ] Check [stuck projects page](https://console.neon.tech/admin/projects?sort=last_active&order=desc&stuck=true) - [ ] Check [stuck projects page](https://console.neon.tech/admin/projects?sort=last_active&order=desc&stuck=true)
- [ ] Check [recent operation failures](https://console.neon.tech/admin/operations?action=create_timeline%2Cstart_compute%2Cstop_compute%2Csuspend_compute%2Capply_config%2Cdelete_timeline%2Cdelete_tenant%2Ccreate_branch%2Ccheck_availability&sort=updated_at&order=desc&had_retries=some) - [ ] Check [recent operation failures](https://console.neon.tech/admin/operations?action=create_timeline%2Cstart_compute%2Cstop_compute%2Csuspend_compute%2Capply_config%2Cdelete_timeline%2Cdelete_tenant%2Ccreate_branch%2Ccheck_availability&sort=updated_at&order=desc&had_retries=some)

View File

@@ -47,7 +47,7 @@ runs:
else else
key=branch-$(echo ${GITHUB_REF#refs/heads/} | tr -c "[:alnum:]._-" "-") key=branch-$(echo ${GITHUB_REF#refs/heads/} | tr -c "[:alnum:]._-" "-")
fi fi
echo "KEY=${key}" >> $GITHUB_OUTPUT echo "::set-output name=KEY::${key}"
- uses: actions/setup-java@v3 - uses: actions/setup-java@v3
if: ${{ inputs.action == 'generate' }} if: ${{ inputs.action == 'generate' }}
@@ -186,7 +186,7 @@ runs:
aws s3 cp --only-show-errors ./index.html "s3://${BUCKET}/${REPORT_PREFIX}/latest/index.html" aws s3 cp --only-show-errors ./index.html "s3://${BUCKET}/${REPORT_PREFIX}/latest/index.html"
echo "[Allure Report](${REPORT_URL})" >> ${GITHUB_STEP_SUMMARY} echo "[Allure Report](${REPORT_URL})" >> ${GITHUB_STEP_SUMMARY}
echo "report-url=${REPORT_URL}" >> $GITHUB_OUTPUT echo "::set-output name=report-url::${REPORT_URL}"
- name: Release Allure lock - name: Release Allure lock
if: ${{ inputs.action == 'generate' && always() }} if: ${{ inputs.action == 'generate' && always() }}

View File

@@ -34,7 +34,7 @@ runs:
S3_KEY=$(aws s3api list-objects-v2 --bucket ${BUCKET} --prefix ${PREFIX%$GITHUB_RUN_ATTEMPT} | jq -r '.Contents[].Key' | grep ${FILENAME} | sort --version-sort | tail -1 || true) S3_KEY=$(aws s3api list-objects-v2 --bucket ${BUCKET} --prefix ${PREFIX%$GITHUB_RUN_ATTEMPT} | jq -r '.Contents[].Key' | grep ${FILENAME} | sort --version-sort | tail -1 || true)
if [ -z "${S3_KEY}" ]; then if [ -z "${S3_KEY}" ]; then
if [ "${SKIP_IF_DOES_NOT_EXIST}" = "true" ]; then if [ "${SKIP_IF_DOES_NOT_EXIST}" = "true" ]; then
echo 'SKIPPED=true' >> $GITHUB_OUTPUT echo '::set-output name=SKIPPED::true'
exit 0 exit 0
else else
echo 2>&1 "Neither s3://${BUCKET}/${PREFIX}/${FILENAME} nor its version from previous attempts exist" echo 2>&1 "Neither s3://${BUCKET}/${PREFIX}/${FILENAME} nor its version from previous attempts exist"
@@ -42,7 +42,7 @@ runs:
fi fi
fi fi
echo 'SKIPPED=false' >> $GITHUB_OUTPUT echo '::set-output name=SKIPPED::false'
mkdir -p $(dirname $ARCHIVE) mkdir -p $(dirname $ARCHIVE)
time aws s3 cp --only-show-errors s3://${BUCKET}/${S3_KEY} ${ARCHIVE} time aws s3 cp --only-show-errors s3://${BUCKET}/${S3_KEY} ${ARCHIVE}

View File

@@ -41,8 +41,8 @@ runs:
;; ;;
esac esac
echo "api_host=${API_HOST}" >> $GITHUB_OUTPUT echo "::set-output name=api_host::${API_HOST}"
echo "region_id=${REGION_ID}" >> $GITHUB_OUTPUT echo "::set-output name=region_id::${REGION_ID}"
env: env:
ENVIRONMENT: ${{ inputs.environment }} ENVIRONMENT: ${{ inputs.environment }}
REGION_ID: ${{ inputs.region_id }} REGION_ID: ${{ inputs.region_id }}
@@ -72,10 +72,10 @@ runs:
dsn=$(echo $project | jq --raw-output '.roles[] | select(.name != "web_access") | .dsn')/main dsn=$(echo $project | jq --raw-output '.roles[] | select(.name != "web_access") | .dsn')/main
echo "::add-mask::${dsn}" echo "::add-mask::${dsn}"
echo "dsn=${dsn}" >> $GITHUB_OUTPUT echo "::set-output name=dsn::${dsn}"
project_id=$(echo $project | jq --raw-output '.id') project_id=$(echo $project | jq --raw-output '.id')
echo "project_id=${project_id}" >> $GITHUB_OUTPUT echo "::set-output name=project_id::${project_id}"
env: env:
API_KEY: ${{ inputs.api_key }} API_KEY: ${{ inputs.api_key }}
API_HOST: ${{ steps.parse-input.outputs.api_host }} API_HOST: ${{ steps.parse-input.outputs.api_host }}

View File

@@ -32,7 +32,7 @@ runs:
;; ;;
esac esac
echo "api_host=${API_HOST}" >> $GITHUB_OUTPUT echo "::set-output name=api_host::${API_HOST}"
env: env:
ENVIRONMENT: ${{ inputs.environment }} ENVIRONMENT: ${{ inputs.environment }}

View File

@@ -2,6 +2,3 @@ zenith_install.tar.gz
.zenith_current_version .zenith_current_version
neon_install.tar.gz neon_install.tar.gz
.neon_current_version .neon_current_version
collections/*
!collections/.keep

View File

@@ -3,7 +3,6 @@
localhost_warning = False localhost_warning = False
host_key_checking = False host_key_checking = False
timeout = 30 timeout = 30
collections_paths = ./collections
[ssh_connection] [ssh_connection]
ssh_args = -F ./ansible.ssh.cfg ssh_args = -F ./ansible.ssh.cfg

View File

View File

@@ -1,7 +1,7 @@
- name: Upload Neon binaries - name: Upload Neon binaries
hosts: storage hosts: storage
gather_facts: False gather_facts: False
remote_user: "{{ remote_user }}" remote_user: admin
tasks: tasks:
@@ -14,8 +14,7 @@
- safekeeper - safekeeper
- name: inform about versions - name: inform about versions
debug: debug: msg="Version to deploy - {{ current_version }}"
msg: "Version to deploy - {{ current_version }}"
tags: tags:
- pageserver - pageserver
- safekeeper - safekeeper
@@ -36,7 +35,7 @@
- name: Deploy pageserver - name: Deploy pageserver
hosts: pageservers hosts: pageservers
gather_facts: False gather_facts: False
remote_user: "{{ remote_user }}" remote_user: admin
tasks: tasks:
@@ -64,29 +63,15 @@
tags: tags:
- pageserver - pageserver
- name: read the existing remote pageserver config - name: update remote storage (s3) config
ansible.builtin.slurp: lineinfile:
src: /storage/pageserver/data/pageserver.toml path: /storage/pageserver/data/pageserver.toml
register: _remote_ps_config line: "{{ item }}"
tags: loop:
- pageserver - "[remote_storage]"
- "bucket_name = '{{ bucket_name }}'"
- name: parse the existing pageserver configuration - "bucket_region = '{{ bucket_region }}'"
ansible.builtin.set_fact: - "prefix_in_bucket = '{{ inventory_hostname }}'"
_existing_ps_config: "{{ _remote_ps_config['content'] | b64decode | sivel.toiletwater.from_toml }}"
tags:
- pageserver
- name: construct the final pageserver configuration dict
ansible.builtin.set_fact:
pageserver_config: "{{ pageserver_config_stub | combine({'id': _existing_ps_config.id }) }}"
tags:
- pageserver
- name: template the pageserver config
template:
src: templates/pageserver.toml.j2
dest: /storage/pageserver/data/pageserver.toml
become: true become: true
tags: tags:
- pageserver - pageserver
@@ -124,7 +109,7 @@
- name: Deploy safekeeper - name: Deploy safekeeper
hosts: safekeepers hosts: safekeepers
gather_facts: False gather_facts: False
remote_user: "{{ remote_user }}" remote_user: admin
tasks: tasks:

View File

@@ -23,7 +23,6 @@ docker cp ${ID}:/data/postgres_install.tar.gz .
tar -xzf postgres_install.tar.gz -C neon_install tar -xzf postgres_install.tar.gz -C neon_install
mkdir neon_install/bin/ mkdir neon_install/bin/
docker cp ${ID}:/usr/local/bin/pageserver neon_install/bin/ docker cp ${ID}:/usr/local/bin/pageserver neon_install/bin/
docker cp ${ID}:/usr/local/bin/pageserver_binutils neon_install/bin/
docker cp ${ID}:/usr/local/bin/safekeeper neon_install/bin/ docker cp ${ID}:/usr/local/bin/safekeeper neon_install/bin/
docker cp ${ID}:/usr/local/bin/proxy neon_install/bin/ docker cp ${ID}:/usr/local/bin/proxy neon_install/bin/
docker cp ${ID}:/usr/local/v14/bin/ neon_install/v14/bin/ docker cp ${ID}:/usr/local/v14/bin/ neon_install/v14/bin/

20
.github/ansible/neon-stress.hosts vendored Normal file
View File

@@ -0,0 +1,20 @@
[pageservers]
neon-stress-ps-1 console_region_id=1
neon-stress-ps-2 console_region_id=1
[safekeepers]
neon-stress-sk-1 console_region_id=1
neon-stress-sk-2 console_region_id=1
neon-stress-sk-3 console_region_id=1
[storage:children]
pageservers
safekeepers
[storage:vars]
env_name = neon-stress
console_mgmt_base_url = http://neon-stress-console.local
bucket_name = neon-storage-ireland
bucket_region = eu-west-1
etcd_endpoints = etcd-stress.local:2379
safekeeper_enable_s3_offload = false

View File

@@ -1,31 +0,0 @@
storage:
vars:
bucket_name: neon-storage-ireland
bucket_region: eu-west-1
console_mgmt_base_url: http://neon-stress-console.local
env_name: neon-stress
etcd_endpoints: neon-stress-etcd.local:2379
safekeeper_enable_s3_offload: 'false'
pageserver_config_stub:
pg_distrib_dir: /usr/local
remote_storage:
bucket_name: "{{ bucket_name }}"
bucket_region: "{{ bucket_region }}"
prefix_in_bucket: "{{ inventory_hostname }}"
hostname_suffix: ".local"
remote_user: admin
children:
pageservers:
hosts:
neon-stress-ps-1:
console_region_id: aws-eu-west-1
neon-stress-ps-2:
console_region_id: aws-eu-west-1
safekeepers:
hosts:
neon-stress-sk-1:
console_region_id: aws-eu-west-1
neon-stress-sk-2:
console_region_id: aws-eu-west-1
neon-stress-sk-3:
console_region_id: aws-eu-west-1

20
.github/ansible/production.hosts vendored Normal file
View File

@@ -0,0 +1,20 @@
[pageservers]
#zenith-1-ps-1 console_region_id=1
zenith-1-ps-2 console_region_id=1
zenith-1-ps-3 console_region_id=1
[safekeepers]
zenith-1-sk-1 console_region_id=1
zenith-1-sk-2 console_region_id=1
zenith-1-sk-3 console_region_id=1
[storage:children]
pageservers
safekeepers
[storage:vars]
env_name = prod-1
console_mgmt_base_url = http://console-release.local
bucket_name = zenith-storage-oregon
bucket_region = us-west-2
etcd_endpoints = zenith-1-etcd.local:2379

View File

@@ -1,33 +0,0 @@
---
storage:
vars:
env_name: prod-1
console_mgmt_base_url: http://console-release.local
bucket_name: zenith-storage-oregon
bucket_region: us-west-2
etcd_endpoints: zenith-1-etcd.local:2379
pageserver_config_stub:
pg_distrib_dir: /usr/local
remote_storage:
bucket_name: "{{ bucket_name }}"
bucket_region: "{{ bucket_region }}"
prefix_in_bucket: "{{ inventory_hostname }}"
hostname_suffix: ".local"
remote_user: admin
children:
pageservers:
hosts:
zenith-1-ps-2:
console_region_id: aws-us-west-2
zenith-1-ps-3:
console_region_id: aws-us-west-2
safekeepers:
hosts:
zenith-1-sk-1:
console_region_id: aws-us-west-2
zenith-1-sk-2:
console_region_id: aws-us-west-2
zenith-1-sk-3:
console_region_id: aws-us-west-2

View File

@@ -12,19 +12,18 @@ cat <<EOF | tee /tmp/payload
"version": 1, "version": 1,
"host": "${HOST}", "host": "${HOST}",
"port": 6400, "port": 6400,
"region_id": "{{ console_region_id }}", "region_id": {{ console_region_id }},
"instance_id": "${INSTANCE_ID}", "instance_id": "${INSTANCE_ID}",
"http_host": "${HOST}", "http_host": "${HOST}",
"http_port": 9898, "http_port": 9898
"active": false
} }
EOF EOF
# check if pageserver already registered or not # check if pageserver already registered or not
if ! curl -sf -H "Authorization: Bearer {{ CONSOLE_API_TOKEN }}" {{ console_mgmt_base_url }}/management/api/v2/pageservers/${INSTANCE_ID} -o /dev/null; then if ! curl -sf -X PATCH -d '{}' {{ console_mgmt_base_url }}/api/v1/pageservers/${INSTANCE_ID} -o /dev/null; then
# not registered, so register it now # not registered, so register it now
ID=$(curl -sf -X POST -H "Authorization: Bearer {{ CONSOLE_API_TOKEN }}" {{ console_mgmt_base_url }}/management/api/v2/pageservers -d@/tmp/payload | jq -r '.id') ID=$(curl -sf -X POST {{ console_mgmt_base_url }}/api/v1/pageservers -d@/tmp/payload | jq -r '.ID')
# init pageserver # init pageserver
sudo -u pageserver /usr/local/bin/pageserver -c "id=${ID}" -c "pg_distrib_dir='/usr/local'" --init -D /storage/pageserver/data sudo -u pageserver /usr/local/bin/pageserver -c "id=${ID}" -c "pg_distrib_dir='/usr/local'" --init -D /storage/pageserver/data

View File

@@ -14,18 +14,18 @@ cat <<EOF | tee /tmp/payload
"host": "${HOST}", "host": "${HOST}",
"port": 6500, "port": 6500,
"http_port": 7676, "http_port": 7676,
"region_id": "{{ console_region_id }}", "region_id": {{ console_region_id }},
"instance_id": "${INSTANCE_ID}", "instance_id": "${INSTANCE_ID}",
"availability_zone_id": "${AZ_ID}", "availability_zone_id": "${AZ_ID}"
"active": false
} }
EOF EOF
# check if safekeeper already registered or not # check if safekeeper already registered or not
if ! curl -sf -H "Authorization: Bearer {{ CONSOLE_API_TOKEN }}" {{ console_mgmt_base_url }}/management/api/v2/safekeepers/${INSTANCE_ID} -o /dev/null; then if ! curl -sf -X PATCH -d '{}' {{ console_mgmt_base_url }}/api/v1/safekeepers/${INSTANCE_ID} -o /dev/null; then
# not registered, so register it now # not registered, so register it now
ID=$(curl -sf -X POST -H "Authorization: Bearer {{ CONSOLE_API_TOKEN }}" {{ console_mgmt_base_url }}/management/api/v2/safekeepers -d@/tmp/payload | jq -r '.id') ID=$(curl -sf -X POST {{ console_mgmt_base_url }}/api/v1/safekeepers -d@/tmp/payload | jq -r '.ID')
# init safekeeper # init safekeeper
sudo -u safekeeper /usr/local/bin/safekeeper --id ${ID} --init -D /storage/safekeeper/data sudo -u safekeeper /usr/local/bin/safekeeper --id ${ID} --init -D /storage/safekeeper/data
fi fi

View File

@@ -1,3 +0,0 @@
ansible_connection: aws_ssm
ansible_aws_ssm_bucket_name: neon-dev-bucket
ansible_python_interpreter: /usr/bin/python3

25
.github/ansible/staging.hosts vendored Normal file
View File

@@ -0,0 +1,25 @@
[pageservers]
#zenith-us-stage-ps-1 console_region_id=27
zenith-us-stage-ps-2 console_region_id=27
zenith-us-stage-ps-3 console_region_id=27
zenith-us-stage-ps-4 console_region_id=27
zenith-us-stage-test-ps-1 console_region_id=28
[safekeepers]
zenith-us-stage-sk-4 console_region_id=27
zenith-us-stage-sk-5 console_region_id=27
zenith-us-stage-sk-6 console_region_id=27
zenith-us-stage-test-sk-1 console_region_id=28
zenith-us-stage-test-sk-2 console_region_id=28
zenith-us-stage-test-sk-3 console_region_id=28
[storage:children]
pageservers
safekeepers
[storage:vars]
env_name = us-stage
console_mgmt_base_url = http://console-staging.local
bucket_name = zenith-staging-storage-us-east-1
bucket_region = us-east-1
etcd_endpoints = zenith-us-stage-etcd.local:2379

View File

@@ -1,34 +0,0 @@
storage:
vars:
bucket_name: zenith-staging-storage-us-east-1
bucket_region: us-east-1
console_mgmt_base_url: http://console-staging.local
env_name: us-stage
etcd_endpoints: zenith-us-stage-etcd.local:2379
pageserver_config_stub:
pg_distrib_dir: /usr/local
remote_storage:
bucket_name: "{{ bucket_name }}"
bucket_region: "{{ bucket_region }}"
prefix_in_bucket: "{{ inventory_hostname }}"
hostname_suffix: ".local"
remote_user: admin
children:
pageservers:
hosts:
zenith-us-stage-ps-2:
console_region_id: aws-us-east-1
zenith-us-stage-ps-3:
console_region_id: aws-us-east-1
zenith-us-stage-ps-4:
console_region_id: aws-us-east-1
safekeepers:
hosts:
zenith-us-stage-sk-4:
console_region_id: aws-us-east-1
zenith-us-stage-sk-5:
console_region_id: aws-us-east-1
zenith-us-stage-sk-6:
console_region_id: aws-us-east-1

View File

@@ -1,32 +0,0 @@
storage:
vars:
bucket_name: neon-staging-storage-us-east-2
bucket_region: us-east-2
console_mgmt_base_url: http://console-staging.local
env_name: us-stage
etcd_endpoints: etcd-0.us-east-2.aws.neon.build:2379
pageserver_config_stub:
pg_distrib_dir: /usr/local
remote_storage:
bucket_name: "{{ bucket_name }}"
bucket_region: "{{ bucket_region }}"
prefix_in_bucket: "pageserver/v1"
hostname_suffix: ""
remote_user: ssm-user
ansible_aws_ssm_region: us-east-2
console_region_id: aws-us-east-2
children:
pageservers:
hosts:
pageserver-0.us-east-2.aws.neon.build:
ansible_host: i-0c3e70929edb5d691
safekeepers:
hosts:
safekeeper-0.us-east-2.aws.neon.build:
ansible_host: i-027662bd552bf5db0
safekeeper-1.us-east-2.aws.neon.build:
ansible_host: i-0171efc3604a7b907
safekeeper-2.us-east-2.aws.neon.build:
ansible_host: i-0de0b03a51676a6ce

View File

@@ -1,5 +1,5 @@
[Unit] [Unit]
Description=Neon pageserver Description=Zenith pageserver
After=network.target auditd.service After=network.target auditd.service
[Service] [Service]

View File

@@ -1,12 +1,12 @@
[Unit] [Unit]
Description=Neon safekeeper Description=Zenith safekeeper
After=network.target auditd.service After=network.target auditd.service
[Service] [Service]
Type=simple Type=simple
User=safekeeper User=safekeeper
Environment=RUST_BACKTRACE=1 NEON_REPO_DIR=/storage/safekeeper/data LD_LIBRARY_PATH=/usr/local/v14/lib Environment=RUST_BACKTRACE=1 NEON_REPO_DIR=/storage/safekeeper/data LD_LIBRARY_PATH=/usr/local/v14/lib
ExecStart=/usr/local/bin/safekeeper -l {{ inventory_hostname }}{{ hostname_suffix }}:6500 --listen-http {{ inventory_hostname }}{{ hostname_suffix }}:7676 -D /storage/safekeeper/data --broker-endpoints={{ etcd_endpoints }} --remote-storage='{bucket_name="{{bucket_name}}", bucket_region="{{bucket_region}}", prefix_in_bucket="{{ env_name }}/wal"}' ExecStart=/usr/local/bin/safekeeper -l {{ inventory_hostname }}.local:6500 --listen-http {{ inventory_hostname }}.local:7676 -D /storage/safekeeper/data --broker-endpoints={{ etcd_endpoints }} --remote-storage='{bucket_name="{{bucket_name}}", bucket_region="{{bucket_region}}", prefix_in_bucket="{{ env_name }}/wal"}'
ExecReload=/bin/kill -HUP $MAINPID ExecReload=/bin/kill -HUP $MAINPID
KillMode=mixed KillMode=mixed
KillSignal=SIGINT KillSignal=SIGINT

View File

@@ -1 +0,0 @@
{{ pageserver_config | sivel.toiletwater.to_toml }}

View File

@@ -46,7 +46,7 @@ jobs:
runs-on: [self-hosted, zenith-benchmarker] runs-on: [self-hosted, zenith-benchmarker]
env: env:
POSTGRES_DISTRIB_DIR: /usr/pgsql POSTGRES_DISTRIB_DIR: /tmp/pg_install
DEFAULT_PG_VERSION: 14 DEFAULT_PG_VERSION: 14
steps: steps:
@@ -138,31 +138,22 @@ jobs:
SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }} SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }}
pgbench-compare: pgbench-compare:
env:
TEST_PG_BENCH_DURATIONS_MATRIX: "60m"
TEST_PG_BENCH_SCALES_MATRIX: "10gb"
POSTGRES_DISTRIB_DIR: /tmp/neon/pg_install
DEFAULT_PG_VERSION: 14
TEST_OUTPUT: /tmp/test_output
BUILD_TYPE: remote
SAVE_PERF_REPORT: ${{ github.event.inputs.save_perf_report || ( github.ref == 'refs/heads/main' ) }}
strategy: strategy:
fail-fast: false fail-fast: false
matrix: matrix:
# neon-captest-new: Run pgbench in a freshly created project # neon-captest-new: Run pgbench in a freshly created project
# neon-captest-reuse: Same, but reusing existing project # neon-captest-reuse: Same, but reusing existing project
# neon-captest-prefetch: Same, with prefetching enabled (new project) # neon-captest-prefetch: Same, with prefetching enabled (new project)
platform: [ neon-captest-new, neon-captest-reuse, neon-captest-prefetch ] platform: [ neon-captest-new, neon-captest-reuse, neon-captest-prefetch, rds-aurora ]
db_size: [ 10gb ]
include:
- platform: neon-captest-new
db_size: 50gb
- platform: neon-captest-prefetch
db_size: 50gb
- platform: rds-aurora
db_size: 50gb
env:
TEST_PG_BENCH_DURATIONS_MATRIX: "60m"
TEST_PG_BENCH_SCALES_MATRIX: ${{ matrix.db_size }}
POSTGRES_DISTRIB_DIR: /tmp/neon/pg_install
DEFAULT_PG_VERSION: 14
TEST_OUTPUT: /tmp/test_output
BUILD_TYPE: remote
SAVE_PERF_REPORT: ${{ github.event.inputs.save_perf_report || ( github.ref == 'refs/heads/main' ) }}
PLATFORM: ${{ matrix.platform }}
runs-on: dev runs-on: dev
container: container:
@@ -187,7 +178,7 @@ jobs:
echo "${POSTGRES_DISTRIB_DIR}/v${DEFAULT_PG_VERSION}/bin" >> $GITHUB_PATH echo "${POSTGRES_DISTRIB_DIR}/v${DEFAULT_PG_VERSION}/bin" >> $GITHUB_PATH
- name: Create Neon Project - name: Create Neon Project
if: contains(fromJson('["neon-captest-new", "neon-captest-prefetch"]'), matrix.platform) if: matrix.platform != 'neon-captest-reuse'
id: create-neon-project id: create-neon-project
uses: ./.github/actions/neon-project-create uses: ./.github/actions/neon-project-create
with: with:
@@ -213,9 +204,11 @@ jobs:
;; ;;
esac esac
echo "connstr=${CONNSTR}" >> $GITHUB_OUTPUT echo "::set-output name=connstr::${CONNSTR}"
psql ${CONNSTR} -c "SELECT version();" psql ${CONNSTR} -c "SELECT version();"
env:
PLATFORM: ${{ matrix.platform }}
- name: Set database options - name: Set database options
if: matrix.platform == 'neon-captest-prefetch' if: matrix.platform == 'neon-captest-prefetch'
@@ -234,6 +227,7 @@ jobs:
save_perf_report: ${{ env.SAVE_PERF_REPORT }} save_perf_report: ${{ env.SAVE_PERF_REPORT }}
extra_params: -m remote_cluster --timeout 21600 -k test_pgbench_remote_init extra_params: -m remote_cluster --timeout 21600 -k test_pgbench_remote_init
env: env:
PLATFORM: ${{ matrix.platform }}
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }} BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}" VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}" PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
@@ -247,6 +241,7 @@ jobs:
save_perf_report: ${{ env.SAVE_PERF_REPORT }} save_perf_report: ${{ env.SAVE_PERF_REPORT }}
extra_params: -m remote_cluster --timeout 21600 -k test_pgbench_remote_simple_update extra_params: -m remote_cluster --timeout 21600 -k test_pgbench_remote_simple_update
env: env:
PLATFORM: ${{ matrix.platform }}
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }} BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}" VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}" PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
@@ -260,6 +255,7 @@ jobs:
save_perf_report: ${{ env.SAVE_PERF_REPORT }} save_perf_report: ${{ env.SAVE_PERF_REPORT }}
extra_params: -m remote_cluster --timeout 21600 -k test_pgbench_remote_select_only extra_params: -m remote_cluster --timeout 21600 -k test_pgbench_remote_select_only
env: env:
PLATFORM: ${{ matrix.platform }}
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }} BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }}
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}" VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}" PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
@@ -272,7 +268,7 @@ jobs:
build_type: ${{ env.BUILD_TYPE }} build_type: ${{ env.BUILD_TYPE }}
- name: Delete Neon Project - name: Delete Neon Project
if: ${{ steps.create-neon-project.outputs.project_id && always() }} if: ${{ matrix.platform != 'neon-captest-reuse' && always() }}
uses: ./.github/actions/neon-project-delete uses: ./.github/actions/neon-project-delete
with: with:
environment: dev environment: dev

View File

@@ -35,12 +35,12 @@ jobs:
echo ref:$GITHUB_REF_NAME echo ref:$GITHUB_REF_NAME
echo rev:$(git rev-list --count HEAD) echo rev:$(git rev-list --count HEAD)
if [[ "$GITHUB_REF_NAME" == "main" ]]; then if [[ "$GITHUB_REF_NAME" == "main" ]]; then
echo "tag=$(git rev-list --count HEAD)" >> $GITHUB_OUTPUT echo "::set-output name=tag::$(git rev-list --count HEAD)"
elif [[ "$GITHUB_REF_NAME" == "release" ]]; then elif [[ "$GITHUB_REF_NAME" == "release" ]]; then
echo "tag=release-$(git rev-list --count HEAD)" >> $GITHUB_OUTPUT echo "::set-output name=tag::release-$(git rev-list --count HEAD)"
else else
echo "GITHUB_REF_NAME (value '$GITHUB_REF_NAME') is not set to either 'main' or 'release'" echo "GITHUB_REF_NAME (value '$GITHUB_REF_NAME') is not set to either 'main' or 'release'"
echo "tag=$GITHUB_RUN_ID" >> $GITHUB_OUTPUT echo "::set-output name=tag::$GITHUB_RUN_ID"
fi fi
shell: bash shell: bash
id: build-tag id: build-tag
@@ -78,12 +78,12 @@ jobs:
- name: Set pg 14 revision for caching - name: Set pg 14 revision for caching
id: pg_v14_rev id: pg_v14_rev
run: echo pg_rev=$(git rev-parse HEAD:vendor/postgres-v14) >> $GITHUB_OUTPUT run: echo ::set-output name=pg_rev::$(git rev-parse HEAD:vendor/postgres-v14)
shell: bash -euxo pipefail {0} shell: bash -euxo pipefail {0}
- name: Set pg 15 revision for caching - name: Set pg 15 revision for caching
id: pg_v15_rev id: pg_v15_rev
run: echo pg_rev=$(git rev-parse HEAD:vendor/postgres-v15) >> $GITHUB_OUTPUT run: echo ::set-output name=pg_rev::$(git rev-parse HEAD:vendor/postgres-v15)
shell: bash -euxo pipefail {0} shell: bash -euxo pipefail {0}
# Set some environment variables used by all the steps. # Set some environment variables used by all the steps.
@@ -494,7 +494,7 @@ jobs:
run: echo "{\"credsStore\":\"ecr-login\"}" > /kaniko/.docker/config.json run: echo "{\"credsStore\":\"ecr-login\"}" > /kaniko/.docker/config.json
- name: Kaniko build neon - name: Kaniko build neon
run: /kaniko/executor --snapshotMode=redo --cache=true --cache-repo 369495373322.dkr.ecr.eu-central-1.amazonaws.com/cache --snapshotMode=redo --context . --build-arg GIT_VERSION=${{ github.sha }} --destination 369495373322.dkr.ecr.eu-central-1.amazonaws.com/neon:$GITHUB_RUN_ID run: /kaniko/executor --snapshotMode=redo --cache=true --cache-repo 369495373322.dkr.ecr.eu-central-1.amazonaws.com/cache --snapshotMode=redo --context . --destination 369495373322.dkr.ecr.eu-central-1.amazonaws.com/neon:$GITHUB_RUN_ID
compute-tools-image: compute-tools-image:
runs-on: dev runs-on: dev
@@ -508,7 +508,7 @@ jobs:
run: echo "{\"credsStore\":\"ecr-login\"}" > /kaniko/.docker/config.json run: echo "{\"credsStore\":\"ecr-login\"}" > /kaniko/.docker/config.json
- name: Kaniko build compute tools - name: Kaniko build compute tools
run: /kaniko/executor --snapshotMode=redo --cache=true --cache-repo 369495373322.dkr.ecr.eu-central-1.amazonaws.com/cache --snapshotMode=redo --context . --build-arg GIT_VERSION=${{ github.sha }} --dockerfile Dockerfile.compute-tools --destination 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-tools:$GITHUB_RUN_ID run: /kaniko/executor --snapshotMode=redo --cache=true --cache-repo 369495373322.dkr.ecr.eu-central-1.amazonaws.com/cache --snapshotMode=redo --context . --dockerfile Dockerfile.compute-tools --destination 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-tools:$GITHUB_RUN_ID
compute-node-image: compute-node-image:
runs-on: dev runs-on: dev
@@ -527,7 +527,7 @@ jobs:
# cloud repo depends on this image name, thus duplicating it # cloud repo depends on this image name, thus duplicating it
# remove compute-node when cloud repo is updated # remove compute-node when cloud repo is updated
- name: Kaniko build compute node with extensions v14 (compatibility) - name: Kaniko build compute node with extensions v14 (compatibility)
run: /kaniko/executor --skip-unused-stages --snapshotMode=redo --cache=true --cache-repo 369495373322.dkr.ecr.eu-central-1.amazonaws.com/cache --snapshotMode=redo --context . --build-arg GIT_VERSION=${{ github.sha }} --dockerfile Dockerfile.compute-node-v14 --destination 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-node:$GITHUB_RUN_ID run: /kaniko/executor --skip-unused-stages --snapshotMode=redo --cache=true --cache-repo 369495373322.dkr.ecr.eu-central-1.amazonaws.com/cache --snapshotMode=redo --context . --dockerfile Dockerfile.compute-node-v14 --destination 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-node:$GITHUB_RUN_ID
compute-node-image-v14: compute-node-image-v14:
runs-on: dev runs-on: dev
@@ -543,7 +543,7 @@ jobs:
run: echo "{\"credsStore\":\"ecr-login\"}" > /kaniko/.docker/config.json run: echo "{\"credsStore\":\"ecr-login\"}" > /kaniko/.docker/config.json
- name: Kaniko build compute node with extensions v14 - name: Kaniko build compute node with extensions v14
run: /kaniko/executor --skip-unused-stages --snapshotMode=redo --cache=true --cache-repo 369495373322.dkr.ecr.eu-central-1.amazonaws.com/cache --context . --build-arg GIT_VERSION=${{ github.sha }} --dockerfile Dockerfile.compute-node-v14 --destination 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-node-v14:$GITHUB_RUN_ID run: /kaniko/executor --skip-unused-stages --snapshotMode=redo --cache=true --cache-repo 369495373322.dkr.ecr.eu-central-1.amazonaws.com/cache --context . --dockerfile Dockerfile.compute-node-v14 --destination 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-node-v14:$GITHUB_RUN_ID
compute-node-image-v15: compute-node-image-v15:
@@ -560,7 +560,7 @@ jobs:
run: echo "{\"credsStore\":\"ecr-login\"}" > /kaniko/.docker/config.json run: echo "{\"credsStore\":\"ecr-login\"}" > /kaniko/.docker/config.json
- name: Kaniko build compute node with extensions v15 - name: Kaniko build compute node with extensions v15
run: /kaniko/executor --skip-unused-stages --snapshotMode=redo --cache=true --cache-repo 369495373322.dkr.ecr.eu-central-1.amazonaws.com/cache --context . --build-arg GIT_VERSION=${{ github.sha }} --dockerfile Dockerfile.compute-node-v15 --destination 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-node-v15:$GITHUB_RUN_ID run: /kaniko/executor --skip-unused-stages --snapshotMode=redo --cache=true --cache-repo 369495373322.dkr.ecr.eu-central-1.amazonaws.com/cache --context . --dockerfile Dockerfile.compute-node-v15 --destination 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-node-v15:$GITHUB_RUN_ID
promote-images: promote-images:
runs-on: dev runs-on: dev
@@ -622,8 +622,6 @@ jobs:
crane copy 369495373322.dkr.ecr.eu-central-1.amazonaws.com/neon:$GITHUB_RUN_ID 093970136003.dkr.ecr.us-east-2.amazonaws.com/neon:latest crane copy 369495373322.dkr.ecr.eu-central-1.amazonaws.com/neon:$GITHUB_RUN_ID 093970136003.dkr.ecr.us-east-2.amazonaws.com/neon:latest
crane copy 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-tools:$GITHUB_RUN_ID 093970136003.dkr.ecr.us-east-2.amazonaws.com/compute-tools:latest crane copy 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-tools:$GITHUB_RUN_ID 093970136003.dkr.ecr.us-east-2.amazonaws.com/compute-tools:latest
crane copy 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-node:$GITHUB_RUN_ID 093970136003.dkr.ecr.us-east-2.amazonaws.com/compute-node:latest crane copy 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-node:$GITHUB_RUN_ID 093970136003.dkr.ecr.us-east-2.amazonaws.com/compute-node:latest
crane copy 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-node-v14:$GITHUB_RUN_ID 093970136003.dkr.ecr.us-east-2.amazonaws.com/compute-node-v14:latest
crane copy 369495373322.dkr.ecr.eu-central-1.amazonaws.com/compute-node-v15:$GITHUB_RUN_ID 093970136003.dkr.ecr.us-east-2.amazonaws.com/compute-node-v15:latest
- name: Configure Docker Hub login - name: Configure Docker Hub login
run: | run: |
@@ -671,12 +669,12 @@ jobs:
- id: set-matrix - id: set-matrix
run: | run: |
if [[ "$GITHUB_REF_NAME" == "main" ]]; then if [[ "$GITHUB_REF_NAME" == "main" ]]; then
STAGING='{"env_name": "staging", "proxy_job": "neon-proxy", "proxy_config": "staging.proxy", "kubeconfig_secret": "STAGING_KUBECONFIG_DATA", "console_api_key_secret": "NEON_STAGING_API_KEY"}' STAGING='{"env_name": "staging", "proxy_job": "neon-proxy", "proxy_config": "staging.proxy", "kubeconfig_secret": "STAGING_KUBECONFIG_DATA"}'
NEON_STRESS='{"env_name": "neon-stress", "proxy_job": "neon-stress-proxy", "proxy_config": "neon-stress.proxy", "kubeconfig_secret": "NEON_STRESS_KUBECONFIG_DATA", "console_api_key_secret": "NEON_CAPTEST_API_KEY"}' NEON_STRESS='{"env_name": "neon-stress", "proxy_job": "neon-stress-proxy", "proxy_config": "neon-stress.proxy", "kubeconfig_secret": "NEON_STRESS_KUBECONFIG_DATA"}'
echo "include=[$STAGING, $NEON_STRESS]" >> $GITHUB_OUTPUT echo "::set-output name=include::[$STAGING, $NEON_STRESS]"
elif [[ "$GITHUB_REF_NAME" == "release" ]]; then elif [[ "$GITHUB_REF_NAME" == "release" ]]; then
PRODUCTION='{"env_name": "production", "proxy_job": "neon-proxy", "proxy_config": "production.proxy", "kubeconfig_secret": "PRODUCTION_KUBECONFIG_DATA", "console_api_key_secret": "NEON_PRODUCTION_API_KEY"}' PRODUCTION='{"env_name": "production", "proxy_job": "neon-proxy", "proxy_config": "production.proxy", "kubeconfig_secret": "PRODUCTION_KUBECONFIG_DATA"}'
echo "include=[$PRODUCTION]" >> $GITHUB_OUTPUT echo "::set-output name=include::[$PRODUCTION]"
else else
echo "GITHUB_REF_NAME (value '$GITHUB_REF_NAME') is not set to either 'main' or 'release'" echo "GITHUB_REF_NAME (value '$GITHUB_REF_NAME') is not set to either 'main' or 'release'"
exit 1 exit 1
@@ -712,7 +710,7 @@ jobs:
- name: Setup ansible - name: Setup ansible
run: | run: |
export PATH="/root/.local/bin:$PATH" export PATH="/root/.local/bin:$PATH"
pip install --progress-bar off --user ansible boto3 toml pip install --progress-bar off --user ansible boto3
- name: Redeploy - name: Redeploy
run: | run: |
@@ -734,48 +732,8 @@ jobs:
chmod 0600 ssh-key chmod 0600 ssh-key
ssh-add ssh-key ssh-add ssh-key
rm -f ssh-key ssh-key-cert.pub rm -f ssh-key ssh-key-cert.pub
ansible-galaxy collection install sivel.toiletwater
ansible-playbook deploy.yaml -i ${{ matrix.env_name }}.hosts.yaml -e CONSOLE_API_TOKEN=${{ secrets[matrix.console_api_key_secret] }}
rm -f neon_install.tar.gz .neon_current_version
deploy-new: ansible-playbook deploy.yaml -i ${{ matrix.env_name }}.hosts
runs-on: dev
container: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/ansible:pinned
# We need both storage **and** compute images for deploy, because control plane picks the compute version based on the storage version.
# If it notices a fresh storage it may bump the compute version. And if compute image failed to build it may break things badly
needs: [ push-docker-hub, calculate-deploy-targets, tag, regress-tests ]
if: |
(github.ref_name == 'main') &&
github.event_name != 'workflow_dispatch'
defaults:
run:
shell: bash
env:
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_DEV }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_KEY_DEV }}
steps:
- name: Checkout
uses: actions/checkout@v3
with:
submodules: true
fetch-depth: 0
- name: Redeploy
run: |
export DOCKER_TAG=${{needs.tag.outputs.build-tag}}
cd "$(pwd)/.github/ansible"
if [[ "$GITHUB_REF_NAME" == "main" ]]; then
./get_binaries.sh
elif [[ "$GITHUB_REF_NAME" == "release" ]]; then
RELEASE=true ./get_binaries.sh
else
echo "GITHUB_REF_NAME (value '$GITHUB_REF_NAME') is not set to either 'main' or 'release'"
exit 1
fi
ansible-galaxy collection install sivel.toiletwater
ansible-playbook deploy.yaml -i staging.us-east-2.hosts.yaml -e @ssm_config -e CONSOLE_API_TOKEN=${{secrets.NEON_STAGING_API_KEY}}
rm -f neon_install.tar.gz .neon_current_version rm -f neon_install.tar.gz .neon_current_version
deploy-proxy: deploy-proxy:

View File

@@ -36,7 +36,7 @@ jobs:
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v3 uses: actions/checkout@v2
with: with:
submodules: true submodules: true
fetch-depth: 2 fetch-depth: 2
@@ -56,12 +56,12 @@ jobs:
- name: Set pg 14 revision for caching - name: Set pg 14 revision for caching
id: pg_v14_rev id: pg_v14_rev
run: echo pg_rev=$(git rev-parse HEAD:vendor/postgres-v14) >> $GITHUB_OUTPUT run: echo ::set-output name=pg_rev::$(git rev-parse HEAD:vendor/postgres-v14)
shell: bash -euxo pipefail {0} shell: bash -euxo pipefail {0}
- name: Set pg 15 revision for caching - name: Set pg 15 revision for caching
id: pg_v15_rev id: pg_v15_rev
run: echo pg_rev=$(git rev-parse HEAD:vendor/postgres-v15) >> $GITHUB_OUTPUT run: echo ::set-output name=pg_rev::$(git rev-parse HEAD:vendor/postgres-v15)
shell: bash -euxo pipefail {0} shell: bash -euxo pipefail {0}
- name: Cache postgres v14 build - name: Cache postgres v14 build

884
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,14 +1,3 @@
# 'named-profiles' feature was stabilized in cargo 1.57. This line makes the
# build work with older cargo versions.
#
# We have this because as of this writing, the latest cargo Debian package
# that's available is 1.56. (Confusingly, the Debian package version number
# is 0.57, whereas 'cargo --version' says 1.56.)
#
# See https://tracker.debian.org/pkg/cargo for the current status of the
# package. When that gets updated, we can remove this.
cargo-features = ["named-profiles"]
[workspace] [workspace]
members = [ members = [
"compute_tools", "compute_tools",

View File

@@ -44,7 +44,7 @@ COPY . .
# Show build caching stats to check if it was used in the end. # Show build caching stats to check if it was used in the end.
# Has to be the part of the same RUN since cachepot daemon is killed in the end of this RUN, losing the compilation stats. # Has to be the part of the same RUN since cachepot daemon is killed in the end of this RUN, losing the compilation stats.
RUN set -e \ RUN set -e \
&& mold -run cargo build --bin pageserver --bin pageserver_binutils --bin safekeeper --bin proxy --locked --release \ && mold -run cargo build --bin pageserver --bin safekeeper --bin proxy --locked --release \
&& cachepot -s && cachepot -s
# Build final image # Build final image
@@ -63,10 +63,9 @@ RUN set -e \
&& useradd -d /data neon \ && useradd -d /data neon \
&& chown -R neon:neon /data && chown -R neon:neon /data
COPY --from=build --chown=neon:neon /home/nonroot/target/release/pageserver /usr/local/bin COPY --from=build --chown=neon:neon /home/nonroot/target/release/pageserver /usr/local/bin
COPY --from=build --chown=neon:neon /home/nonroot/target/release/pageserver_binutils /usr/local/bin COPY --from=build --chown=neon:neon /home/nonroot/target/release/safekeeper /usr/local/bin
COPY --from=build --chown=neon:neon /home/nonroot/target/release/safekeeper /usr/local/bin COPY --from=build --chown=neon:neon /home/nonroot/target/release/proxy /usr/local/bin
COPY --from=build --chown=neon:neon /home/nonroot/target/release/proxy /usr/local/bin
COPY --from=pg-build /home/nonroot/pg_install/v14 /usr/local/v14/ COPY --from=pg-build /home/nonroot/pg_install/v14 /usr/local/v14/
COPY --from=pg-build /home/nonroot/pg_install/v15 /usr/local/v15/ COPY --from=pg-build /home/nonroot/pg_install/v15 /usr/local/v15/
@@ -86,3 +85,4 @@ VOLUME ["/data"]
USER neon USER neon
EXPOSE 6400 EXPOSE 6400
EXPOSE 9898 EXPOSE 9898
CMD ["/bin/bash"]

View File

@@ -71,12 +71,10 @@ RUN apt update && \
RUN apt update && \ RUN apt update && \
apt install -y --no-install-recommends -t testing binutils apt install -y --no-install-recommends -t testing binutils
# Sed is used to patch for https://github.com/plv8/plv8/issues/503
RUN wget https://github.com/plv8/plv8/archive/refs/tags/v3.1.4.tar.gz && \ RUN wget https://github.com/plv8/plv8/archive/refs/tags/v3.1.4.tar.gz && \
tar xvzf v3.1.4.tar.gz && \ tar xvzf v3.1.4.tar.gz && \
cd plv8-3.1.4 && \ cd plv8-3.1.4 && \
export PATH="/usr/local/pgsql/bin:$PATH" && \ export PATH="/usr/local/pgsql/bin:$PATH" && \
sed -i 's/MemoryContextAlloc(/MemoryContextAllocZero(/' plv8.cc && \
make -j $(getconf _NPROCESSORS_ONLN) && \ make -j $(getconf _NPROCESSORS_ONLN) && \
make -j $(getconf _NPROCESSORS_ONLN) install && \ make -j $(getconf _NPROCESSORS_ONLN) install && \
rm -rf /plv8-* && \ rm -rf /plv8-* && \
@@ -118,7 +116,8 @@ RUN wget https://github.com/zachasme/h3-pg/archive/refs/tags/v4.0.1.tar.gz -O h3
# #
FROM build-deps AS neon-pg-ext-build FROM build-deps AS neon-pg-ext-build
COPY --from=postgis-build /usr/local/pgsql/ /usr/local/pgsql/ COPY --from=postgis-build /usr/local/pgsql/ /usr/local/pgsql/
COPY --from=plv8-build /usr/local/pgsql/ /usr/local/pgsql/ # plv8 still sometimes crashes during the creation
# COPY --from=plv8-build /usr/local/pgsql/ /usr/local/pgsql/
COPY --from=h3-pg-build /usr/local/pgsql/ /usr/local/pgsql/ COPY --from=h3-pg-build /usr/local/pgsql/ /usr/local/pgsql/
COPY --from=h3-pg-build /h3/usr / COPY --from=h3-pg-build /h3/usr /
COPY pgxn/ pgxn/ COPY pgxn/ pgxn/

View File

@@ -76,12 +76,10 @@ RUN apt update && \
RUN apt update && \ RUN apt update && \
apt install -y --no-install-recommends -t testing binutils apt install -y --no-install-recommends -t testing binutils
# Sed is used to patch for https://github.com/plv8/plv8/issues/503
RUN wget https://github.com/plv8/plv8/archive/refs/tags/v3.1.4.tar.gz && \ RUN wget https://github.com/plv8/plv8/archive/refs/tags/v3.1.4.tar.gz && \
tar xvzf v3.1.4.tar.gz && \ tar xvzf v3.1.4.tar.gz && \
cd plv8-3.1.4 && \ cd plv8-3.1.4 && \
export PATH="/usr/local/pgsql/bin:$PATH" && \ export PATH="/usr/local/pgsql/bin:$PATH" && \
sed -i 's/MemoryContextAlloc(/MemoryContextAllocZero(/' plv8.cc && \
make -j $(getconf _NPROCESSORS_ONLN) && \ make -j $(getconf _NPROCESSORS_ONLN) && \
make -j $(getconf _NPROCESSORS_ONLN) install && \ make -j $(getconf _NPROCESSORS_ONLN) install && \
rm -rf /plv8-* && \ rm -rf /plv8-* && \
@@ -123,7 +121,8 @@ RUN wget https://github.com/zachasme/h3-pg/archive/refs/tags/v4.0.1.tar.gz -O h3
# #
FROM build-deps AS neon-pg-ext-build FROM build-deps AS neon-pg-ext-build
COPY --from=postgis-build /usr/local/pgsql/ /usr/local/pgsql/ COPY --from=postgis-build /usr/local/pgsql/ /usr/local/pgsql/
COPY --from=plv8-build /usr/local/pgsql/ /usr/local/pgsql/ # plv8 still sometimes crashes during the creation
# COPY --from=plv8-build /usr/local/pgsql/ /usr/local/pgsql/
COPY --from=h3-pg-build /usr/local/pgsql/ /usr/local/pgsql/ COPY --from=h3-pg-build /usr/local/pgsql/ /usr/local/pgsql/
COPY --from=h3-pg-build /h3/usr / COPY --from=h3-pg-build /h3/usr /
COPY pgxn/ pgxn/ COPY pgxn/ pgxn/

View File

@@ -6,7 +6,7 @@ edition = "2021"
[dependencies] [dependencies]
anyhow = "1.0" anyhow = "1.0"
chrono = "0.4" chrono = "0.4"
clap = "4.0" clap = "3.0"
env_logger = "0.9" env_logger = "0.9"
futures = "0.3.13" futures = "0.3.13"
hyper = { version = "0.14", features = ["full"] } hyper = { version = "0.14", features = ["full"] }

View File

@@ -51,19 +51,53 @@ fn main() -> Result<()> {
// TODO: re-use `utils::logging` later // TODO: re-use `utils::logging` later
init_logger(DEFAULT_LOG_LEVEL)?; init_logger(DEFAULT_LOG_LEVEL)?;
let matches = cli().get_matches(); // Env variable is set by `cargo`
let version: Option<&str> = option_env!("CARGO_PKG_VERSION");
let matches = clap::App::new("compute_ctl")
.version(version.unwrap_or("unknown"))
.arg(
Arg::new("connstr")
.short('C')
.long("connstr")
.value_name("DATABASE_URL")
.required(true),
)
.arg(
Arg::new("pgdata")
.short('D')
.long("pgdata")
.value_name("DATADIR")
.required(true),
)
.arg(
Arg::new("pgbin")
.short('b')
.long("pgbin")
.value_name("POSTGRES_PATH"),
)
.arg(
Arg::new("spec")
.short('s')
.long("spec")
.value_name("SPEC_JSON"),
)
.arg(
Arg::new("spec-path")
.short('S')
.long("spec-path")
.value_name("SPEC_PATH"),
)
.get_matches();
let pgdata = matches let pgdata = matches.value_of("pgdata").expect("PGDATA path is required");
.get_one::<String>("pgdata")
.expect("PGDATA path is required");
let connstr = matches let connstr = matches
.get_one::<String>("connstr") .value_of("connstr")
.expect("Postgres connection string is required"); .expect("Postgres connection string is required");
let spec = matches.get_one::<String>("spec"); let spec = matches.value_of("spec");
let spec_path = matches.get_one::<String>("spec-path"); let spec_path = matches.value_of("spec-path");
// Try to use just 'postgres' if no path is provided // Try to use just 'postgres' if no path is provided
let pgbin = matches.get_one::<String>("pgbin").unwrap(); let pgbin = matches.value_of("pgbin").unwrap_or("postgres");
let spec: ComputeSpec = match spec { let spec: ComputeSpec = match spec {
// First, try to get cluster spec from the cli argument // First, try to get cluster spec from the cli argument
@@ -139,48 +173,3 @@ fn main() -> Result<()> {
} }
} }
} }
fn cli() -> clap::Command {
// Env variable is set by `cargo`
let version = option_env!("CARGO_PKG_VERSION").unwrap_or("unknown");
clap::Command::new("compute_ctl")
.version(version)
.arg(
Arg::new("connstr")
.short('C')
.long("connstr")
.value_name("DATABASE_URL")
.required(true),
)
.arg(
Arg::new("pgdata")
.short('D')
.long("pgdata")
.value_name("DATADIR")
.required(true),
)
.arg(
Arg::new("pgbin")
.short('b')
.long("pgbin")
.default_value("postgres")
.value_name("POSTGRES_PATH"),
)
.arg(
Arg::new("spec")
.short('s')
.long("spec")
.value_name("SPEC_JSON"),
)
.arg(
Arg::new("spec-path")
.short('S')
.long("spec-path")
.value_name("SPEC_PATH"),
)
}
#[test]
fn verify_cli() {
cli().debug_assert()
}

View File

@@ -8,10 +8,11 @@ use std::process::Child;
use std::time::{Duration, Instant}; use std::time::{Duration, Instant};
use anyhow::{bail, Result}; use anyhow::{bail, Result};
use notify::{RecursiveMode, Watcher};
use postgres::{Client, Transaction}; use postgres::{Client, Transaction};
use serde::Deserialize; use serde::Deserialize;
use notify::{RecursiveMode, Watcher};
const POSTGRES_WAIT_TIMEOUT: Duration = Duration::from_millis(60 * 1000); // milliseconds const POSTGRES_WAIT_TIMEOUT: Duration = Duration::from_millis(60 * 1000); // milliseconds
/// Rust representation of Postgres role info with only those fields /// Rust representation of Postgres role info with only those fields
@@ -168,7 +169,7 @@ impl Database {
/// it may require a proper quoting too. /// it may require a proper quoting too.
pub fn to_pg_options(&self) -> String { pub fn to_pg_options(&self) -> String {
let mut params: String = self.options.as_pg_options(); let mut params: String = self.options.as_pg_options();
write!(params, " OWNER {}", &self.owner.pg_quote()) write!(params, " OWNER {}", &self.owner.quote())
.expect("String is documented to not to error during write operations"); .expect("String is documented to not to error during write operations");
params params
@@ -179,17 +180,18 @@ impl Database {
/// intended to be used for DB / role names. /// intended to be used for DB / role names.
pub type PgIdent = String; pub type PgIdent = String;
/// Generic trait used to provide quoting / encoding for strings used in the /// Generic trait used to provide quoting for strings used in the
/// Postgres SQL queries and DATABASE_URL. /// Postgres SQL queries. Currently used only to implement quoting
pub trait Escaping { /// of identifiers, but could be used for literals in the future.
fn pg_quote(&self) -> String; pub trait PgQuote {
fn quote(&self) -> String;
} }
impl Escaping for PgIdent { impl PgQuote for PgIdent {
/// This is intended to mimic Postgres quote_ident(), but for simplicity it /// This is intended to mimic Postgres quote_ident(), but for simplicity it
/// always quotes provided string with `""` and escapes every `"`. /// always quotes provided string with `""` and escapes every `"`. Not idempotent,
/// **Not idempotent**, i.e. if string is already escaped it will be escaped again. /// i.e. if string is already escaped it will be escaped again.
fn pg_quote(&self) -> String { fn quote(&self) -> String {
let result = format!("\"{}\"", self.replace('"', "\"\"")); let result = format!("\"{}\"", self.replace('"', "\"\""));
result result
} }

View File

@@ -1,9 +1,7 @@
use std::path::Path; use std::path::Path;
use std::str::FromStr;
use anyhow::Result; use anyhow::Result;
use log::{info, log_enabled, warn, Level}; use log::{info, log_enabled, warn, Level};
use postgres::config::Config;
use postgres::{Client, NoTls}; use postgres::{Client, NoTls};
use serde::Deserialize; use serde::Deserialize;
@@ -117,8 +115,8 @@ pub fn handle_roles(spec: &ComputeSpec, client: &mut Client) -> Result<()> {
if existing_roles.iter().any(|r| r.name == op.name) { if existing_roles.iter().any(|r| r.name == op.name) {
let query: String = format!( let query: String = format!(
"ALTER ROLE {} RENAME TO {}", "ALTER ROLE {} RENAME TO {}",
op.name.pg_quote(), op.name.quote(),
new_name.pg_quote() new_name.quote()
); );
warn!("renaming role '{}' to '{}'", op.name, new_name); warn!("renaming role '{}' to '{}'", op.name, new_name);
@@ -164,7 +162,7 @@ pub fn handle_roles(spec: &ComputeSpec, client: &mut Client) -> Result<()> {
} }
if update_role { if update_role {
let mut query: String = format!("ALTER ROLE {} ", name.pg_quote()); let mut query: String = format!("ALTER ROLE {} ", name.quote());
info_print!(" -> update"); info_print!(" -> update");
query.push_str(&role.to_pg_options()); query.push_str(&role.to_pg_options());
@@ -172,7 +170,7 @@ pub fn handle_roles(spec: &ComputeSpec, client: &mut Client) -> Result<()> {
} }
} else { } else {
info!("role name: '{}'", &name); info!("role name: '{}'", &name);
let mut query: String = format!("CREATE ROLE {} ", name.pg_quote()); let mut query: String = format!("CREATE ROLE {} ", name.quote());
info!("role create query: '{}'", &query); info!("role create query: '{}'", &query);
info_print!(" -> create"); info_print!(" -> create");
@@ -181,7 +179,7 @@ pub fn handle_roles(spec: &ComputeSpec, client: &mut Client) -> Result<()> {
let grant_query = format!( let grant_query = format!(
"GRANT pg_read_all_data, pg_write_all_data TO {}", "GRANT pg_read_all_data, pg_write_all_data TO {}",
name.pg_quote() name.quote()
); );
xact.execute(grant_query.as_str(), &[])?; xact.execute(grant_query.as_str(), &[])?;
info!("role grant query: '{}'", &grant_query); info!("role grant query: '{}'", &grant_query);
@@ -217,7 +215,7 @@ pub fn handle_role_deletions(node: &ComputeNode, client: &mut Client) -> Result<
// We do not check either role exists or not, // We do not check either role exists or not,
// Postgres will take care of it for us // Postgres will take care of it for us
if op.action == "delete_role" { if op.action == "delete_role" {
let query: String = format!("DROP ROLE IF EXISTS {}", &op.name.pg_quote()); let query: String = format!("DROP ROLE IF EXISTS {}", &op.name.quote());
warn!("deleting role '{}'", &op.name); warn!("deleting role '{}'", &op.name);
xact.execute(query.as_str(), &[])?; xact.execute(query.as_str(), &[])?;
@@ -232,16 +230,17 @@ pub fn handle_role_deletions(node: &ComputeNode, client: &mut Client) -> Result<
fn reassign_owned_objects(node: &ComputeNode, role_name: &PgIdent) -> Result<()> { fn reassign_owned_objects(node: &ComputeNode, role_name: &PgIdent) -> Result<()> {
for db in &node.spec.cluster.databases { for db in &node.spec.cluster.databases {
if db.owner != *role_name { if db.owner != *role_name {
let mut conf = Config::from_str(node.connstr.as_str())?; let mut connstr = node.connstr.clone();
conf.dbname(&db.name); // database name is always the last and the only component of the path
connstr.set_path(&db.name);
let mut client = conf.connect(NoTls)?; let mut client = Client::connect(connstr.as_str(), NoTls)?;
// This will reassign all dependent objects to the db owner // This will reassign all dependent objects to the db owner
let reassign_query = format!( let reassign_query = format!(
"REASSIGN OWNED BY {} TO {}", "REASSIGN OWNED BY {} TO {}",
role_name.pg_quote(), role_name.quote(),
db.owner.pg_quote() db.owner.quote()
); );
info!( info!(
"reassigning objects owned by '{}' in db '{}' to '{}'", "reassigning objects owned by '{}' in db '{}' to '{}'",
@@ -250,7 +249,7 @@ fn reassign_owned_objects(node: &ComputeNode, role_name: &PgIdent) -> Result<()>
client.simple_query(&reassign_query)?; client.simple_query(&reassign_query)?;
// This now will only drop privileges of the role // This now will only drop privileges of the role
let drop_query = format!("DROP OWNED BY {}", role_name.pg_quote()); let drop_query = format!("DROP OWNED BY {}", role_name.quote());
client.simple_query(&drop_query)?; client.simple_query(&drop_query)?;
} }
} }
@@ -280,7 +279,7 @@ pub fn handle_databases(spec: &ComputeSpec, client: &mut Client) -> Result<()> {
// We do not check either DB exists or not, // We do not check either DB exists or not,
// Postgres will take care of it for us // Postgres will take care of it for us
"delete_db" => { "delete_db" => {
let query: String = format!("DROP DATABASE IF EXISTS {}", &op.name.pg_quote()); let query: String = format!("DROP DATABASE IF EXISTS {}", &op.name.quote());
warn!("deleting database '{}'", &op.name); warn!("deleting database '{}'", &op.name);
client.execute(query.as_str(), &[])?; client.execute(query.as_str(), &[])?;
@@ -292,8 +291,8 @@ pub fn handle_databases(spec: &ComputeSpec, client: &mut Client) -> Result<()> {
if existing_dbs.iter().any(|r| r.name == op.name) { if existing_dbs.iter().any(|r| r.name == op.name) {
let query: String = format!( let query: String = format!(
"ALTER DATABASE {} RENAME TO {}", "ALTER DATABASE {} RENAME TO {}",
op.name.pg_quote(), op.name.quote(),
new_name.pg_quote() new_name.quote()
); );
warn!("renaming database '{}' to '{}'", op.name, new_name); warn!("renaming database '{}' to '{}'", op.name, new_name);
@@ -321,7 +320,7 @@ pub fn handle_databases(spec: &ComputeSpec, client: &mut Client) -> Result<()> {
// XXX: db owner name is returned as quoted string from Postgres, // XXX: db owner name is returned as quoted string from Postgres,
// when quoting is needed. // when quoting is needed.
let new_owner = if r.owner.starts_with('"') { let new_owner = if r.owner.starts_with('"') {
db.owner.pg_quote() db.owner.quote()
} else { } else {
db.owner.clone() db.owner.clone()
}; };
@@ -329,15 +328,15 @@ pub fn handle_databases(spec: &ComputeSpec, client: &mut Client) -> Result<()> {
if new_owner != r.owner { if new_owner != r.owner {
let query: String = format!( let query: String = format!(
"ALTER DATABASE {} OWNER TO {}", "ALTER DATABASE {} OWNER TO {}",
name.pg_quote(), name.quote(),
db.owner.pg_quote() db.owner.quote()
); );
info_print!(" -> update"); info_print!(" -> update");
client.execute(query.as_str(), &[])?; client.execute(query.as_str(), &[])?;
} }
} else { } else {
let mut query: String = format!("CREATE DATABASE {} ", name.pg_quote()); let mut query: String = format!("CREATE DATABASE {} ", name.quote());
info_print!(" -> create"); info_print!(" -> create");
query.push_str(&db.to_pg_options()); query.push_str(&db.to_pg_options());
@@ -367,7 +366,7 @@ pub fn handle_grants(node: &ComputeNode, client: &mut Client) -> Result<()> {
.cluster .cluster
.roles .roles
.iter() .iter()
.map(|r| r.name.pg_quote()) .map(|r| r.name.quote())
.collect::<Vec<_>>(); .collect::<Vec<_>>();
for db in &spec.cluster.databases { for db in &spec.cluster.databases {
@@ -375,7 +374,7 @@ pub fn handle_grants(node: &ComputeNode, client: &mut Client) -> Result<()> {
let query: String = format!( let query: String = format!(
"GRANT CREATE ON DATABASE {} TO {}", "GRANT CREATE ON DATABASE {} TO {}",
dbname.pg_quote(), dbname.quote(),
roles.join(", ") roles.join(", ")
); );
info!("grant query {}", &query); info!("grant query {}", &query);
@@ -386,11 +385,12 @@ pub fn handle_grants(node: &ComputeNode, client: &mut Client) -> Result<()> {
// Do some per-database access adjustments. We'd better do this at db creation time, // Do some per-database access adjustments. We'd better do this at db creation time,
// but CREATE DATABASE isn't transactional. So we cannot create db + do some grants // but CREATE DATABASE isn't transactional. So we cannot create db + do some grants
// atomically. // atomically.
let mut db_connstr = node.connstr.clone();
for db in &node.spec.cluster.databases { for db in &node.spec.cluster.databases {
let mut conf = Config::from_str(node.connstr.as_str())?; // database name is always the last and the only component of the path
conf.dbname(&db.name); db_connstr.set_path(&db.name);
let mut db_client = conf.connect(NoTls)?; let mut db_client = Client::connect(db_connstr.as_str(), NoTls)?;
// This will only change ownership on the schema itself, not the objects // This will only change ownership on the schema itself, not the objects
// inside it. Without it owner of the `public` schema will be `cloud_admin` // inside it. Without it owner of the `public` schema will be `cloud_admin`
@@ -419,15 +419,9 @@ pub fn handle_grants(node: &ComputeNode, client: &mut Client) -> Result<()> {
END IF;\n\ END IF;\n\
END\n\ END\n\
$$;", $$;",
db.owner.pg_quote() db.owner.quote()
); );
db_client.simple_query(&alter_query)?; db_client.simple_query(&alter_query)?;
// Explicitly grant CREATE ON SCHEMA PUBLIC to the web_access user.
// This is needed since postgres 15, where this privilege is removed by default.
let grant_query: String = "GRANT CREATE ON SCHEMA public TO web_access".to_string();
info!("grant query for db {} : {}", &db.name, &grant_query);
db_client.simple_query(&grant_query)?;
} }
Ok(()) Ok(())

View File

@@ -33,9 +33,9 @@ mod pg_helpers_tests {
} }
#[test] #[test]
fn ident_pg_quote() { fn quote_ident() {
let ident: PgIdent = PgIdent::from("\"name\";\\n select 1;"); let ident: PgIdent = PgIdent::from("\"name\";\\n select 1;");
assert_eq!(ident.pg_quote(), "\"\"\"name\"\";\\n select 1;\""); assert_eq!(ident.quote(), "\"\"\"name\"\";\\n select 1;\"");
} }
} }

View File

@@ -4,19 +4,19 @@ version = "0.1.0"
edition = "2021" edition = "2021"
[dependencies] [dependencies]
clap = "4.0" clap = "3.0"
comfy-table = "6.1" comfy-table = "5.0.1"
git-version = "0.3.5" git-version = "0.3.5"
tar = "0.4.38" tar = "0.4.38"
postgres = { git = "https://github.com/neondatabase/rust-postgres.git", rev = "d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" } postgres = { git = "https://github.com/neondatabase/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" }
serde = { version = "1.0", features = ["derive"] } serde = { version = "1.0", features = ["derive"] }
serde_with = "2.0" serde_with = "1.12.0"
toml = "0.5" toml = "0.5"
once_cell = "1.13.0" once_cell = "1.13.0"
regex = "1" regex = "1"
anyhow = "1.0" anyhow = "1.0"
thiserror = "1" thiserror = "1"
nix = "0.25" nix = "0.23"
reqwest = { version = "0.11", default-features = false, features = ["blocking", "json", "rustls-tls"] } reqwest = { version = "0.11", default-features = false, features = ["blocking", "json", "rustls-tls"] }
# Note: Do not directly depend on pageserver or safekeeper; use pageserver_api or safekeeper_api # Note: Do not directly depend on pageserver or safekeeper; use pageserver_api or safekeeper_api

View File

@@ -6,7 +6,7 @@
//! rely on `neon_local` to set up the environment for each test. //! rely on `neon_local` to set up the environment for each test.
//! //!
use anyhow::{anyhow, bail, Context, Result}; use anyhow::{anyhow, bail, Context, Result};
use clap::{value_parser, Arg, ArgAction, ArgMatches, Command}; use clap::{App, AppSettings, Arg, ArgMatches};
use control_plane::compute::ComputeControlPlane; use control_plane::compute::ComputeControlPlane;
use control_plane::local_env::{EtcdBroker, LocalEnv}; use control_plane::local_env::{EtcdBroker, LocalEnv};
use control_plane::safekeeper::SafekeeperNode; use control_plane::safekeeper::SafekeeperNode;
@@ -85,7 +85,212 @@ struct TimelineTreeEl {
// * Providing CLI api to the pageserver // * Providing CLI api to the pageserver
// * TODO: export/import to/from usual postgres // * TODO: export/import to/from usual postgres
fn main() -> Result<()> { fn main() -> Result<()> {
let matches = cli().get_matches(); let branch_name_arg = Arg::new("branch-name")
.long("branch-name")
.takes_value(true)
.help("Name of the branch to be created or used as an alias for other services")
.required(false);
let pg_node_arg = Arg::new("node").help("Postgres node name").required(false);
let safekeeper_id_arg = Arg::new("id").help("safekeeper id").required(false);
let tenant_id_arg = Arg::new("tenant-id")
.long("tenant-id")
.help("Tenant id. Represented as a hexadecimal string 32 symbols length")
.takes_value(true)
.required(false);
let timeline_id_arg = Arg::new("timeline-id")
.long("timeline-id")
.help("Timeline id. Represented as a hexadecimal string 32 symbols length")
.takes_value(true)
.required(false);
let pg_version_arg = Arg::new("pg-version")
.long("pg-version")
.help("Postgres version to use for the initial tenant")
.required(false)
.takes_value(true)
.default_value(DEFAULT_PG_VERSION);
let port_arg = Arg::new("port")
.long("port")
.required(false)
.value_name("port");
let stop_mode_arg = Arg::new("stop-mode")
.short('m')
.takes_value(true)
.possible_values(&["fast", "immediate"])
.help("If 'immediate', don't flush repository data at shutdown")
.required(false)
.value_name("stop-mode");
let pageserver_config_args = Arg::new("pageserver-config-override")
.long("pageserver-config-override")
.takes_value(true)
.number_of_values(1)
.multiple_occurrences(true)
.help("Additional pageserver's configuration options or overrides, refer to pageserver's 'config-override' CLI parameter docs for more")
.required(false);
let lsn_arg = Arg::new("lsn")
.long("lsn")
.help("Specify Lsn on the timeline to start from. By default, end of the timeline would be used.")
.takes_value(true)
.required(false);
let matches = App::new("Neon CLI")
.setting(AppSettings::ArgRequiredElseHelp)
.version(GIT_VERSION)
.subcommand(
App::new("init")
.about("Initialize a new Neon repository")
.arg(pageserver_config_args.clone())
.arg(timeline_id_arg.clone().help("Use a specific timeline id when creating a tenant and its initial timeline"))
.arg(
Arg::new("config")
.long("config")
.required(false)
.value_name("config"),
)
.arg(pg_version_arg.clone())
)
.subcommand(
App::new("timeline")
.about("Manage timelines")
.subcommand(App::new("list")
.about("List all timelines, available to this pageserver")
.arg(tenant_id_arg.clone()))
.subcommand(App::new("branch")
.about("Create a new timeline, using another timeline as a base, copying its data")
.arg(tenant_id_arg.clone())
.arg(branch_name_arg.clone())
.arg(Arg::new("ancestor-branch-name").long("ancestor-branch-name").takes_value(true)
.help("Use last Lsn of another timeline (and its data) as base when creating the new timeline. The timeline gets resolved by its branch name.").required(false))
.arg(Arg::new("ancestor-start-lsn").long("ancestor-start-lsn").takes_value(true)
.help("When using another timeline as base, use a specific Lsn in it instead of the latest one").required(false)))
.subcommand(App::new("create")
.about("Create a new blank timeline")
.arg(tenant_id_arg.clone())
.arg(branch_name_arg.clone())
.arg(pg_version_arg.clone())
)
.subcommand(App::new("import")
.about("Import timeline from basebackup directory")
.arg(tenant_id_arg.clone())
.arg(timeline_id_arg.clone())
.arg(Arg::new("node-name").long("node-name").takes_value(true)
.help("Name to assign to the imported timeline"))
.arg(Arg::new("base-tarfile").long("base-tarfile").takes_value(true)
.help("Basebackup tarfile to import"))
.arg(Arg::new("base-lsn").long("base-lsn").takes_value(true)
.help("Lsn the basebackup starts at"))
.arg(Arg::new("wal-tarfile").long("wal-tarfile").takes_value(true)
.help("Wal to add after base"))
.arg(Arg::new("end-lsn").long("end-lsn").takes_value(true)
.help("Lsn the basebackup ends at"))
.arg(pg_version_arg.clone())
)
).subcommand(
App::new("tenant")
.setting(AppSettings::ArgRequiredElseHelp)
.about("Manage tenants")
.subcommand(App::new("list"))
.subcommand(App::new("create")
.arg(tenant_id_arg.clone())
.arg(timeline_id_arg.clone().help("Use a specific timeline id when creating a tenant and its initial timeline"))
.arg(Arg::new("config").short('c').takes_value(true).multiple_occurrences(true).required(false))
.arg(pg_version_arg.clone())
)
.subcommand(App::new("config")
.arg(tenant_id_arg.clone())
.arg(Arg::new("config").short('c').takes_value(true).multiple_occurrences(true).required(false))
)
)
.subcommand(
App::new("pageserver")
.setting(AppSettings::ArgRequiredElseHelp)
.about("Manage pageserver")
.subcommand(App::new("status"))
.subcommand(App::new("start").about("Start local pageserver").arg(pageserver_config_args.clone()))
.subcommand(App::new("stop").about("Stop local pageserver")
.arg(stop_mode_arg.clone()))
.subcommand(App::new("restart").about("Restart local pageserver").arg(pageserver_config_args.clone()))
)
.subcommand(
App::new("safekeeper")
.setting(AppSettings::ArgRequiredElseHelp)
.about("Manage safekeepers")
.subcommand(App::new("start")
.about("Start local safekeeper")
.arg(safekeeper_id_arg.clone())
)
.subcommand(App::new("stop")
.about("Stop local safekeeper")
.arg(safekeeper_id_arg.clone())
.arg(stop_mode_arg.clone())
)
.subcommand(App::new("restart")
.about("Restart local safekeeper")
.arg(safekeeper_id_arg.clone())
.arg(stop_mode_arg.clone())
)
)
.subcommand(
App::new("pg")
.setting(AppSettings::ArgRequiredElseHelp)
.about("Manage postgres instances")
.subcommand(App::new("list").arg(tenant_id_arg.clone()))
.subcommand(App::new("create")
.about("Create a postgres compute node")
.arg(pg_node_arg.clone())
.arg(branch_name_arg.clone())
.arg(tenant_id_arg.clone())
.arg(lsn_arg.clone())
.arg(port_arg.clone())
.arg(
Arg::new("config-only")
.help("Don't do basebackup, create compute node with only config files")
.long("config-only")
.required(false))
.arg(pg_version_arg.clone())
)
.subcommand(App::new("start")
.about("Start a postgres compute node.\n This command actually creates new node from scratch, but preserves existing config files")
.arg(pg_node_arg.clone())
.arg(tenant_id_arg.clone())
.arg(branch_name_arg.clone())
.arg(timeline_id_arg.clone())
.arg(lsn_arg.clone())
.arg(port_arg.clone())
.arg(pg_version_arg.clone())
)
.subcommand(
App::new("stop")
.arg(pg_node_arg.clone())
.arg(tenant_id_arg.clone())
.arg(
Arg::new("destroy")
.help("Also delete data directory (now optional, should be default in future)")
.long("destroy")
.required(false)
)
)
)
.subcommand(
App::new("start")
.about("Start page server and safekeepers")
.arg(pageserver_config_args)
)
.subcommand(
App::new("stop")
.about("Stop page server and safekeepers")
.arg(stop_mode_arg.clone())
)
.get_matches();
let (sub_name, sub_args) = match matches.subcommand() { let (sub_name, sub_args) = match matches.subcommand() {
Some(subcommand_data) => subcommand_data, Some(subcommand_data) => subcommand_data,
@@ -153,7 +358,9 @@ fn print_timelines_tree(
// Memorize all direct children of each timeline. // Memorize all direct children of each timeline.
for timeline in timelines.iter() { for timeline in timelines.iter() {
if let Some(ancestor_timeline_id) = timeline.ancestor_timeline_id { if let Some(ancestor_timeline_id) =
timeline.local.as_ref().and_then(|l| l.ancestor_timeline_id)
{
timelines_hash timelines_hash
.get_mut(&ancestor_timeline_id) .get_mut(&ancestor_timeline_id)
.context("missing timeline info in the HashMap")? .context("missing timeline info in the HashMap")?
@@ -164,7 +371,13 @@ fn print_timelines_tree(
for timeline in timelines_hash.values() { for timeline in timelines_hash.values() {
// Start with root local timelines (no ancestors) first. // Start with root local timelines (no ancestors) first.
if timeline.info.ancestor_timeline_id.is_none() { if timeline
.info
.local
.as_ref()
.and_then(|l| l.ancestor_timeline_id)
.is_none()
{
print_timeline(0, &Vec::from([true]), timeline, &timelines_hash)?; print_timeline(0, &Vec::from([true]), timeline, &timelines_hash)?;
} }
} }
@@ -181,8 +394,17 @@ fn print_timeline(
timeline: &TimelineTreeEl, timeline: &TimelineTreeEl,
timelines: &HashMap<TimelineId, TimelineTreeEl>, timelines: &HashMap<TimelineId, TimelineTreeEl>,
) -> Result<()> { ) -> Result<()> {
let local_remote = match (timeline.info.local.as_ref(), timeline.info.remote.as_ref()) {
(None, None) => unreachable!("in this case no info for a timeline is found"),
(None, Some(_)) => "(R)",
(Some(_), None) => "(L)",
(Some(_), Some(_)) => "(L+R)",
};
// Draw main padding
print!("{} ", local_remote);
if nesting_level > 0 { if nesting_level > 0 {
let ancestor_lsn = match timeline.info.ancestor_lsn { let ancestor_lsn = match timeline.info.local.as_ref().and_then(|i| i.ancestor_lsn) {
Some(lsn) => lsn.to_string(), Some(lsn) => lsn.to_string(),
None => "Unknown Lsn".to_string(), None => "Unknown Lsn".to_string(),
}; };
@@ -270,16 +492,16 @@ fn get_tenant_id(sub_match: &ArgMatches, env: &local_env::LocalEnv) -> anyhow::R
fn parse_tenant_id(sub_match: &ArgMatches) -> anyhow::Result<Option<TenantId>> { fn parse_tenant_id(sub_match: &ArgMatches) -> anyhow::Result<Option<TenantId>> {
sub_match sub_match
.get_one::<String>("tenant-id") .value_of("tenant-id")
.map(|tenant_id| TenantId::from_str(tenant_id)) .map(TenantId::from_str)
.transpose() .transpose()
.context("Failed to parse tenant id from the argument string") .context("Failed to parse tenant id from the argument string")
} }
fn parse_timeline_id(sub_match: &ArgMatches) -> anyhow::Result<Option<TimelineId>> { fn parse_timeline_id(sub_match: &ArgMatches) -> anyhow::Result<Option<TimelineId>> {
sub_match sub_match
.get_one::<String>("timeline-id") .value_of("timeline-id")
.map(|timeline_id| TimelineId::from_str(timeline_id)) .map(TimelineId::from_str)
.transpose() .transpose()
.context("Failed to parse timeline id from the argument string") .context("Failed to parse timeline id from the argument string")
} }
@@ -288,22 +510,19 @@ fn handle_init(init_match: &ArgMatches) -> anyhow::Result<LocalEnv> {
let initial_timeline_id_arg = parse_timeline_id(init_match)?; let initial_timeline_id_arg = parse_timeline_id(init_match)?;
// Create config file // Create config file
let toml_file: String = if let Some(config_path) = init_match.get_one::<PathBuf>("config") { let toml_file: String = if let Some(config_path) = init_match.value_of("config") {
// load and parse the file // load and parse the file
std::fs::read_to_string(config_path).with_context(|| { std::fs::read_to_string(std::path::Path::new(config_path))
format!( .with_context(|| format!("Could not read configuration file '{config_path}'"))?
"Could not read configuration file '{}'",
config_path.display()
)
})?
} else { } else {
// Built-in default config // Built-in default config
default_conf(&EtcdBroker::locate_etcd()?) default_conf(&EtcdBroker::locate_etcd()?)
}; };
let pg_version = init_match let pg_version = init_match
.get_one::<u32>("pg-version") .value_of("pg-version")
.copied() .unwrap()
.parse::<u32>()
.context("Failed to parse postgres version from the argument string")?; .context("Failed to parse postgres version from the argument string")?;
let mut env = let mut env =
@@ -339,10 +558,9 @@ fn handle_init(init_match: &ArgMatches) -> anyhow::Result<LocalEnv> {
fn pageserver_config_overrides(init_match: &ArgMatches) -> Vec<&str> { fn pageserver_config_overrides(init_match: &ArgMatches) -> Vec<&str> {
init_match init_match
.get_many::<String>("pageserver-config-override") .values_of("pageserver-config-override")
.into_iter() .into_iter()
.flatten() .flatten()
.map(|s| s.as_str())
.collect() .collect()
} }
@@ -357,7 +575,7 @@ fn handle_tenant(tenant_match: &ArgMatches, env: &mut local_env::LocalEnv) -> an
Some(("create", create_match)) => { Some(("create", create_match)) => {
let initial_tenant_id = parse_tenant_id(create_match)?; let initial_tenant_id = parse_tenant_id(create_match)?;
let tenant_conf: HashMap<_, _> = create_match let tenant_conf: HashMap<_, _> = create_match
.get_many::<String>("config") .values_of("config")
.map(|vals| vals.flat_map(|c| c.split_once(':')).collect()) .map(|vals| vals.flat_map(|c| c.split_once(':')).collect())
.unwrap_or_default(); .unwrap_or_default();
let new_tenant_id = pageserver.tenant_create(initial_tenant_id, tenant_conf)?; let new_tenant_id = pageserver.tenant_create(initial_tenant_id, tenant_conf)?;
@@ -366,8 +584,9 @@ fn handle_tenant(tenant_match: &ArgMatches, env: &mut local_env::LocalEnv) -> an
// Create an initial timeline for the new tenant // Create an initial timeline for the new tenant
let new_timeline_id = parse_timeline_id(create_match)?; let new_timeline_id = parse_timeline_id(create_match)?;
let pg_version = create_match let pg_version = create_match
.get_one::<u32>("pg-version") .value_of("pg-version")
.copied() .unwrap()
.parse::<u32>()
.context("Failed to parse postgres version from the argument string")?; .context("Failed to parse postgres version from the argument string")?;
let timeline_info = pageserver.timeline_create( let timeline_info = pageserver.timeline_create(
@@ -378,7 +597,10 @@ fn handle_tenant(tenant_match: &ArgMatches, env: &mut local_env::LocalEnv) -> an
Some(pg_version), Some(pg_version),
)?; )?;
let new_timeline_id = timeline_info.timeline_id; let new_timeline_id = timeline_info.timeline_id;
let last_record_lsn = timeline_info.last_record_lsn; let last_record_lsn = timeline_info
.local
.context(format!("Failed to get last record LSN: no local timeline info for timeline {new_timeline_id}"))?
.last_record_lsn;
env.register_branch_mapping( env.register_branch_mapping(
DEFAULT_BRANCH_NAME.to_string(), DEFAULT_BRANCH_NAME.to_string(),
@@ -393,7 +615,7 @@ fn handle_tenant(tenant_match: &ArgMatches, env: &mut local_env::LocalEnv) -> an
Some(("config", create_match)) => { Some(("config", create_match)) => {
let tenant_id = get_tenant_id(create_match, env)?; let tenant_id = get_tenant_id(create_match, env)?;
let tenant_conf: HashMap<_, _> = create_match let tenant_conf: HashMap<_, _> = create_match
.get_many::<String>("config") .values_of("config")
.map(|vals| vals.flat_map(|c| c.split_once(':')).collect()) .map(|vals| vals.flat_map(|c| c.split_once(':')).collect())
.unwrap_or_default(); .unwrap_or_default();
@@ -420,19 +642,23 @@ fn handle_timeline(timeline_match: &ArgMatches, env: &mut local_env::LocalEnv) -
Some(("create", create_match)) => { Some(("create", create_match)) => {
let tenant_id = get_tenant_id(create_match, env)?; let tenant_id = get_tenant_id(create_match, env)?;
let new_branch_name = create_match let new_branch_name = create_match
.get_one::<String>("branch-name") .value_of("branch-name")
.ok_or_else(|| anyhow!("No branch name provided"))?; .ok_or_else(|| anyhow!("No branch name provided"))?;
let pg_version = create_match let pg_version = create_match
.get_one::<u32>("pg-version") .value_of("pg-version")
.copied() .unwrap()
.parse::<u32>()
.context("Failed to parse postgres version from the argument string")?; .context("Failed to parse postgres version from the argument string")?;
let timeline_info = let timeline_info =
pageserver.timeline_create(tenant_id, None, None, None, Some(pg_version))?; pageserver.timeline_create(tenant_id, None, None, None, Some(pg_version))?;
let new_timeline_id = timeline_info.timeline_id; let new_timeline_id = timeline_info.timeline_id;
let last_record_lsn = timeline_info.last_record_lsn; let last_record_lsn = timeline_info
.local
.expect("no local timeline info")
.last_record_lsn;
env.register_branch_mapping(new_branch_name.to_string(), tenant_id, new_timeline_id)?; env.register_branch_mapping(new_branch_name.to_string(), tenant_id, new_timeline_id)?;
println!( println!(
@@ -444,32 +670,35 @@ fn handle_timeline(timeline_match: &ArgMatches, env: &mut local_env::LocalEnv) -
let tenant_id = get_tenant_id(import_match, env)?; let tenant_id = get_tenant_id(import_match, env)?;
let timeline_id = parse_timeline_id(import_match)?.expect("No timeline id provided"); let timeline_id = parse_timeline_id(import_match)?.expect("No timeline id provided");
let name = import_match let name = import_match
.get_one::<String>("node-name") .value_of("node-name")
.ok_or_else(|| anyhow!("No node name provided"))?; .ok_or_else(|| anyhow!("No node name provided"))?;
// Parse base inputs // Parse base inputs
let base_tarfile = import_match let base_tarfile = import_match
.get_one::<PathBuf>("base-tarfile") .value_of("base-tarfile")
.ok_or_else(|| anyhow!("No base-tarfile provided"))? .map(|s| PathBuf::from_str(s).unwrap())
.to_owned(); .ok_or_else(|| anyhow!("No base-tarfile provided"))?;
let base_lsn = Lsn::from_str( let base_lsn = Lsn::from_str(
import_match import_match
.get_one::<String>("base-lsn") .value_of("base-lsn")
.ok_or_else(|| anyhow!("No base-lsn provided"))?, .ok_or_else(|| anyhow!("No base-lsn provided"))?,
)?; )?;
let base = (base_lsn, base_tarfile); let base = (base_lsn, base_tarfile);
// Parse pg_wal inputs // Parse pg_wal inputs
let wal_tarfile = import_match.get_one::<PathBuf>("wal-tarfile").cloned(); let wal_tarfile = import_match
.value_of("wal-tarfile")
.map(|s| PathBuf::from_str(s).unwrap());
let end_lsn = import_match let end_lsn = import_match
.get_one::<String>("end-lsn") .value_of("end-lsn")
.map(|s| Lsn::from_str(s).unwrap()); .map(|s| Lsn::from_str(s).unwrap());
// TODO validate both or none are provided // TODO validate both or none are provided
let pg_wal = end_lsn.zip(wal_tarfile); let pg_wal = end_lsn.zip(wal_tarfile);
let pg_version = import_match let pg_version = import_match
.get_one::<u32>("pg-version") .value_of("pg-version")
.copied() .unwrap()
.parse::<u32>()
.context("Failed to parse postgres version from the argument string")?; .context("Failed to parse postgres version from the argument string")?;
let mut cplane = ComputeControlPlane::load(env.clone())?; let mut cplane = ComputeControlPlane::load(env.clone())?;
@@ -484,11 +713,10 @@ fn handle_timeline(timeline_match: &ArgMatches, env: &mut local_env::LocalEnv) -
Some(("branch", branch_match)) => { Some(("branch", branch_match)) => {
let tenant_id = get_tenant_id(branch_match, env)?; let tenant_id = get_tenant_id(branch_match, env)?;
let new_branch_name = branch_match let new_branch_name = branch_match
.get_one::<String>("branch-name") .value_of("branch-name")
.ok_or_else(|| anyhow!("No branch name provided"))?; .ok_or_else(|| anyhow!("No branch name provided"))?;
let ancestor_branch_name = branch_match let ancestor_branch_name = branch_match
.get_one::<String>("ancestor-branch-name") .value_of("ancestor-branch-name")
.map(|s| s.as_str())
.unwrap_or(DEFAULT_BRANCH_NAME); .unwrap_or(DEFAULT_BRANCH_NAME);
let ancestor_timeline_id = env let ancestor_timeline_id = env
.get_branch_timeline_id(ancestor_branch_name, tenant_id) .get_branch_timeline_id(ancestor_branch_name, tenant_id)
@@ -497,8 +725,8 @@ fn handle_timeline(timeline_match: &ArgMatches, env: &mut local_env::LocalEnv) -
})?; })?;
let start_lsn = branch_match let start_lsn = branch_match
.get_one::<String>("ancestor-start-lsn") .value_of("ancestor-start-lsn")
.map(|lsn_str| Lsn::from_str(lsn_str)) .map(Lsn::from_str)
.transpose() .transpose()
.context("Failed to parse ancestor start Lsn from the request")?; .context("Failed to parse ancestor start Lsn from the request")?;
let timeline_info = pageserver.timeline_create( let timeline_info = pageserver.timeline_create(
@@ -510,7 +738,10 @@ fn handle_timeline(timeline_match: &ArgMatches, env: &mut local_env::LocalEnv) -
)?; )?;
let new_timeline_id = timeline_info.timeline_id; let new_timeline_id = timeline_info.timeline_id;
let last_record_lsn = timeline_info.last_record_lsn; let last_record_lsn = timeline_info
.local
.expect("no local timeline info")
.last_record_lsn;
env.register_branch_mapping(new_branch_name.to_string(), tenant_id, new_timeline_id)?; env.register_branch_mapping(new_branch_name.to_string(), tenant_id, new_timeline_id)?;
@@ -570,7 +801,7 @@ fn handle_pg(pg_match: &ArgMatches, env: &local_env::LocalEnv) -> Result<()> {
// Use the LSN at the end of the timeline. // Use the LSN at the end of the timeline.
timeline_infos timeline_infos
.get(&node.timeline_id) .get(&node.timeline_id)
.map(|bi| bi.last_record_lsn.to_string()) .and_then(|bi| bi.local.as_ref().map(|l| l.last_record_lsn.to_string()))
.unwrap_or_else(|| "?".to_string()) .unwrap_or_else(|| "?".to_string())
} }
Some(lsn) => { Some(lsn) => {
@@ -599,39 +830,45 @@ fn handle_pg(pg_match: &ArgMatches, env: &local_env::LocalEnv) -> Result<()> {
} }
"create" => { "create" => {
let branch_name = sub_args let branch_name = sub_args
.get_one::<String>("branch-name") .value_of("branch-name")
.map(|s| s.as_str())
.unwrap_or(DEFAULT_BRANCH_NAME); .unwrap_or(DEFAULT_BRANCH_NAME);
let node_name = sub_args let node_name = sub_args
.get_one::<String>("node") .value_of("node")
.map(|node_name| node_name.to_string()) .map(ToString::to_string)
.unwrap_or_else(|| format!("{branch_name}_node")); .unwrap_or_else(|| format!("{}_node", branch_name));
let lsn = sub_args let lsn = sub_args
.get_one::<String>("lsn") .value_of("lsn")
.map(|lsn_str| Lsn::from_str(lsn_str)) .map(Lsn::from_str)
.transpose() .transpose()
.context("Failed to parse Lsn from the request")?; .context("Failed to parse Lsn from the request")?;
let timeline_id = env let timeline_id = env
.get_branch_timeline_id(branch_name, tenant_id) .get_branch_timeline_id(branch_name, tenant_id)
.ok_or_else(|| anyhow!("Found no timeline id for branch name '{branch_name}'"))?; .ok_or_else(|| anyhow!("Found no timeline id for branch name '{}'", branch_name))?;
let port: Option<u16> = sub_args.get_one::<u16>("port").copied(); let port: Option<u16> = match sub_args.value_of("port") {
Some(p) => Some(p.parse()?),
None => None,
};
let pg_version = sub_args let pg_version = sub_args
.get_one::<u32>("pg-version") .value_of("pg-version")
.copied() .unwrap()
.parse::<u32>()
.context("Failed to parse postgres version from the argument string")?; .context("Failed to parse postgres version from the argument string")?;
cplane.new_node(tenant_id, &node_name, timeline_id, lsn, port, pg_version)?; cplane.new_node(tenant_id, &node_name, timeline_id, lsn, port, pg_version)?;
} }
"start" => { "start" => {
let port: Option<u16> = sub_args.get_one::<u16>("port").copied(); let port: Option<u16> = match sub_args.value_of("port") {
Some(p) => Some(p.parse()?),
None => None,
};
let node_name = sub_args let node_name = sub_args
.get_one::<String>("node") .value_of("node")
.ok_or_else(|| anyhow!("No node name was provided to start"))?; .ok_or_else(|| anyhow!("No node name was provided to start"))?;
let node = cplane.nodes.get(&(tenant_id, node_name.to_string())); let node = cplane.nodes.get(&(tenant_id, node_name.to_owned()));
let auth_token = if matches!(env.pageserver.auth_type, AuthType::NeonJWT) { let auth_token = if matches!(env.pageserver.auth_type, AuthType::NeonJWT) {
let claims = Claims::new(Some(tenant_id), Scope::Tenant); let claims = Claims::new(Some(tenant_id), Scope::Tenant);
@@ -642,33 +879,36 @@ fn handle_pg(pg_match: &ArgMatches, env: &local_env::LocalEnv) -> Result<()> {
}; };
if let Some(node) = node { if let Some(node) = node {
println!("Starting existing postgres {node_name}..."); println!("Starting existing postgres {}...", node_name);
node.start(&auth_token)?; node.start(&auth_token)?;
} else { } else {
let branch_name = sub_args let branch_name = sub_args
.get_one::<String>("branch-name") .value_of("branch-name")
.map(|s| s.as_str())
.unwrap_or(DEFAULT_BRANCH_NAME); .unwrap_or(DEFAULT_BRANCH_NAME);
let timeline_id = env let timeline_id = env
.get_branch_timeline_id(branch_name, tenant_id) .get_branch_timeline_id(branch_name, tenant_id)
.ok_or_else(|| { .ok_or_else(|| {
anyhow!("Found no timeline id for branch name '{branch_name}'") anyhow!("Found no timeline id for branch name '{}'", branch_name)
})?; })?;
let lsn = sub_args let lsn = sub_args
.get_one::<String>("lsn") .value_of("lsn")
.map(|lsn_str| Lsn::from_str(lsn_str)) .map(Lsn::from_str)
.transpose() .transpose()
.context("Failed to parse Lsn from the request")?; .context("Failed to parse Lsn from the request")?;
let pg_version = sub_args let pg_version = sub_args
.get_one::<u32>("pg-version") .value_of("pg-version")
.copied() .unwrap()
.context("Failed to `pg-version` from the argument string")?; .parse::<u32>()
.context("Failed to parse postgres version from the argument string")?;
// when used with custom port this results in non obvious behaviour // when used with custom port this results in non obvious behaviour
// port is remembered from first start command, i e // port is remembered from first start command, i e
// start --port X // start --port X
// stop // stop
// start <-- will also use port X even without explicit port argument // start <-- will also use port X even without explicit port argument
println!("Starting new postgres (v{pg_version}) {node_name} on timeline {timeline_id} ..."); println!(
"Starting new postgres (v{}) {} on timeline {} ...",
pg_version, node_name, timeline_id
);
let node = let node =
cplane.new_node(tenant_id, node_name, timeline_id, lsn, port, pg_version)?; cplane.new_node(tenant_id, node_name, timeline_id, lsn, port, pg_version)?;
@@ -677,18 +917,18 @@ fn handle_pg(pg_match: &ArgMatches, env: &local_env::LocalEnv) -> Result<()> {
} }
"stop" => { "stop" => {
let node_name = sub_args let node_name = sub_args
.get_one::<String>("node") .value_of("node")
.ok_or_else(|| anyhow!("No node name was provided to stop"))?; .ok_or_else(|| anyhow!("No node name was provided to stop"))?;
let destroy = sub_args.get_flag("destroy"); let destroy = sub_args.is_present("destroy");
let node = cplane let node = cplane
.nodes .nodes
.get(&(tenant_id, node_name.to_string())) .get(&(tenant_id, node_name.to_owned()))
.with_context(|| format!("postgres {node_name} is not found"))?; .with_context(|| format!("postgres {} is not found", node_name))?;
node.stop(destroy)?; node.stop(destroy)?;
} }
_ => bail!("Unexpected pg subcommand '{sub_name}'"), _ => bail!("Unexpected pg subcommand '{}'", sub_name),
} }
Ok(()) Ok(())
@@ -706,10 +946,7 @@ fn handle_pageserver(sub_match: &ArgMatches, env: &local_env::LocalEnv) -> Resul
} }
Some(("stop", stop_match)) => { Some(("stop", stop_match)) => {
let immediate = stop_match let immediate = stop_match.value_of("stop-mode") == Some("immediate");
.get_one::<String>("stop-mode")
.map(|s| s.as_str())
== Some("immediate");
if let Err(e) = pageserver.stop(immediate) { if let Err(e) = pageserver.stop(immediate) {
eprintln!("pageserver stop failed: {}", e); eprintln!("pageserver stop failed: {}", e);
@@ -759,7 +996,7 @@ fn handle_safekeeper(sub_match: &ArgMatches, env: &local_env::LocalEnv) -> Resul
}; };
// All the commands take an optional safekeeper name argument // All the commands take an optional safekeeper name argument
let sk_id = if let Some(id_str) = sub_args.get_one::<String>("id") { let sk_id = if let Some(id_str) = sub_args.value_of("id") {
NodeId(id_str.parse().context("while parsing safekeeper id")?) NodeId(id_str.parse().context("while parsing safekeeper id")?)
} else { } else {
DEFAULT_SAFEKEEPER_ID DEFAULT_SAFEKEEPER_ID
@@ -775,8 +1012,7 @@ fn handle_safekeeper(sub_match: &ArgMatches, env: &local_env::LocalEnv) -> Resul
} }
"stop" => { "stop" => {
let immediate = let immediate = sub_args.value_of("stop-mode") == Some("immediate");
sub_args.get_one::<String>("stop-mode").map(|s| s.as_str()) == Some("immediate");
if let Err(e) = safekeeper.stop(immediate) { if let Err(e) = safekeeper.stop(immediate) {
eprintln!("safekeeper stop failed: {}", e); eprintln!("safekeeper stop failed: {}", e);
@@ -785,8 +1021,7 @@ fn handle_safekeeper(sub_match: &ArgMatches, env: &local_env::LocalEnv) -> Resul
} }
"restart" => { "restart" => {
let immediate = let immediate = sub_args.value_of("stop-mode") == Some("immediate");
sub_args.get_one::<String>("stop-mode").map(|s| s.as_str()) == Some("immediate");
if let Err(e) = safekeeper.stop(immediate) { if let Err(e) = safekeeper.stop(immediate) {
eprintln!("safekeeper stop failed: {}", e); eprintln!("safekeeper stop failed: {}", e);
@@ -830,8 +1065,7 @@ fn handle_start_all(sub_match: &ArgMatches, env: &local_env::LocalEnv) -> anyhow
} }
fn handle_stop_all(sub_match: &ArgMatches, env: &local_env::LocalEnv) -> Result<()> { fn handle_stop_all(sub_match: &ArgMatches, env: &local_env::LocalEnv) -> Result<()> {
let immediate = let immediate = sub_match.value_of("stop-mode") == Some("immediate");
sub_match.get_one::<String>("stop-mode").map(|s| s.as_str()) == Some("immediate");
let pageserver = PageServerNode::from_env(env); let pageserver = PageServerNode::from_env(env);
@@ -864,219 +1098,3 @@ fn try_stop_etcd_process(env: &local_env::LocalEnv) {
eprintln!("etcd stop failed: {e}"); eprintln!("etcd stop failed: {e}");
} }
} }
fn cli() -> Command {
let branch_name_arg = Arg::new("branch-name")
.long("branch-name")
.help("Name of the branch to be created or used as an alias for other services")
.required(false);
let pg_node_arg = Arg::new("node").help("Postgres node name").required(false);
let safekeeper_id_arg = Arg::new("id").help("safekeeper id").required(false);
let tenant_id_arg = Arg::new("tenant-id")
.long("tenant-id")
.help("Tenant id. Represented as a hexadecimal string 32 symbols length")
.required(false);
let timeline_id_arg = Arg::new("timeline-id")
.long("timeline-id")
.help("Timeline id. Represented as a hexadecimal string 32 symbols length")
.required(false);
let pg_version_arg = Arg::new("pg-version")
.long("pg-version")
.help("Postgres version to use for the initial tenant")
.required(false)
.value_parser(value_parser!(u32))
.default_value(DEFAULT_PG_VERSION);
let port_arg = Arg::new("port")
.long("port")
.required(false)
.value_parser(value_parser!(u16))
.value_name("port");
let stop_mode_arg = Arg::new("stop-mode")
.short('m')
.value_parser(["fast", "immediate"])
.help("If 'immediate', don't flush repository data at shutdown")
.required(false)
.value_name("stop-mode");
let pageserver_config_args = Arg::new("pageserver-config-override")
.long("pageserver-config-override")
.num_args(1)
.action(ArgAction::Append)
.help("Additional pageserver's configuration options or overrides, refer to pageserver's 'config-override' CLI parameter docs for more")
.required(false);
let lsn_arg = Arg::new("lsn")
.long("lsn")
.help("Specify Lsn on the timeline to start from. By default, end of the timeline would be used.")
.required(false);
Command::new("Neon CLI")
.arg_required_else_help(true)
.version(GIT_VERSION)
.subcommand(
Command::new("init")
.about("Initialize a new Neon repository")
.arg(pageserver_config_args.clone())
.arg(timeline_id_arg.clone().help("Use a specific timeline id when creating a tenant and its initial timeline"))
.arg(
Arg::new("config")
.long("config")
.required(false)
.value_parser(value_parser!(PathBuf))
.value_name("config"),
)
.arg(pg_version_arg.clone())
)
.subcommand(
Command::new("timeline")
.about("Manage timelines")
.subcommand(Command::new("list")
.about("List all timelines, available to this pageserver")
.arg(tenant_id_arg.clone()))
.subcommand(Command::new("branch")
.about("Create a new timeline, using another timeline as a base, copying its data")
.arg(tenant_id_arg.clone())
.arg(branch_name_arg.clone())
.arg(Arg::new("ancestor-branch-name").long("ancestor-branch-name")
.help("Use last Lsn of another timeline (and its data) as base when creating the new timeline. The timeline gets resolved by its branch name.").required(false))
.arg(Arg::new("ancestor-start-lsn").long("ancestor-start-lsn")
.help("When using another timeline as base, use a specific Lsn in it instead of the latest one").required(false)))
.subcommand(Command::new("create")
.about("Create a new blank timeline")
.arg(tenant_id_arg.clone())
.arg(branch_name_arg.clone())
.arg(pg_version_arg.clone())
)
.subcommand(Command::new("import")
.about("Import timeline from basebackup directory")
.arg(tenant_id_arg.clone())
.arg(timeline_id_arg.clone())
.arg(Arg::new("node-name").long("node-name")
.help("Name to assign to the imported timeline"))
.arg(Arg::new("base-tarfile")
.long("base-tarfile")
.value_parser(value_parser!(PathBuf))
.help("Basebackup tarfile to import")
)
.arg(Arg::new("base-lsn").long("base-lsn")
.help("Lsn the basebackup starts at"))
.arg(Arg::new("wal-tarfile")
.long("wal-tarfile")
.value_parser(value_parser!(PathBuf))
.help("Wal to add after base")
)
.arg(Arg::new("end-lsn").long("end-lsn")
.help("Lsn the basebackup ends at"))
.arg(pg_version_arg.clone())
)
).subcommand(
Command::new("tenant")
.arg_required_else_help(true)
.about("Manage tenants")
.subcommand(Command::new("list"))
.subcommand(Command::new("create")
.arg(tenant_id_arg.clone())
.arg(timeline_id_arg.clone().help("Use a specific timeline id when creating a tenant and its initial timeline"))
.arg(Arg::new("config").short('c').num_args(1).action(ArgAction::Append).required(false))
.arg(pg_version_arg.clone())
)
.subcommand(Command::new("config")
.arg(tenant_id_arg.clone())
.arg(Arg::new("config").short('c').num_args(1).action(ArgAction::Append).required(false))
)
)
.subcommand(
Command::new("pageserver")
.arg_required_else_help(true)
.about("Manage pageserver")
.subcommand(Command::new("status"))
.subcommand(Command::new("start").about("Start local pageserver").arg(pageserver_config_args.clone()))
.subcommand(Command::new("stop").about("Stop local pageserver")
.arg(stop_mode_arg.clone()))
.subcommand(Command::new("restart").about("Restart local pageserver").arg(pageserver_config_args.clone()))
)
.subcommand(
Command::new("safekeeper")
.arg_required_else_help(true)
.about("Manage safekeepers")
.subcommand(Command::new("start")
.about("Start local safekeeper")
.arg(safekeeper_id_arg.clone())
)
.subcommand(Command::new("stop")
.about("Stop local safekeeper")
.arg(safekeeper_id_arg.clone())
.arg(stop_mode_arg.clone())
)
.subcommand(Command::new("restart")
.about("Restart local safekeeper")
.arg(safekeeper_id_arg)
.arg(stop_mode_arg.clone())
)
)
.subcommand(
Command::new("pg")
.arg_required_else_help(true)
.about("Manage postgres instances")
.subcommand(Command::new("list").arg(tenant_id_arg.clone()))
.subcommand(Command::new("create")
.about("Create a postgres compute node")
.arg(pg_node_arg.clone())
.arg(branch_name_arg.clone())
.arg(tenant_id_arg.clone())
.arg(lsn_arg.clone())
.arg(port_arg.clone())
.arg(
Arg::new("config-only")
.help("Don't do basebackup, create compute node with only config files")
.long("config-only")
.required(false))
.arg(pg_version_arg.clone())
)
.subcommand(Command::new("start")
.about("Start a postgres compute node.\n This command actually creates new node from scratch, but preserves existing config files")
.arg(pg_node_arg.clone())
.arg(tenant_id_arg.clone())
.arg(branch_name_arg)
.arg(timeline_id_arg)
.arg(lsn_arg)
.arg(port_arg)
.arg(pg_version_arg)
)
.subcommand(
Command::new("stop")
.arg(pg_node_arg)
.arg(tenant_id_arg)
.arg(
Arg::new("destroy")
.help("Also delete data directory (now optional, should be default in future)")
.long("destroy")
.action(ArgAction::SetTrue)
.required(false)
)
)
)
.subcommand(
Command::new("start")
.about("Start page server and safekeepers")
.arg(pageserver_config_args)
)
.subcommand(
Command::new("stop")
.about("Stop page server and safekeepers")
.arg(stop_mode_arg)
)
}
#[test]
fn verify_cli() {
cli().debug_assert();
}

View File

@@ -12,8 +12,13 @@ use nix::unistd::Pid;
use postgres::Config; use postgres::Config;
use reqwest::blocking::{Client, RequestBuilder, Response}; use reqwest::blocking::{Client, RequestBuilder, Response};
use reqwest::{IntoUrl, Method}; use reqwest::{IntoUrl, Method};
use safekeeper_api::models::TimelineCreateRequest;
use thiserror::Error; use thiserror::Error;
use utils::{connstring::connection_address, http::error::HttpErrorBody, id::NodeId}; use utils::{
connstring::connection_address,
http::error::HttpErrorBody,
id::{NodeId, TenantId, TimelineId},
};
use crate::local_env::{LocalEnv, SafekeeperConf}; use crate::local_env::{LocalEnv, SafekeeperConf};
use crate::storage::PageServerNode; use crate::storage::PageServerNode;
@@ -276,4 +281,24 @@ impl SafekeeperNode {
.error_from_body()?; .error_from_body()?;
Ok(()) Ok(())
} }
pub fn timeline_create(
&self,
tenant_id: TenantId,
timeline_id: TimelineId,
peer_ids: Vec<NodeId>,
) -> Result<()> {
Ok(self
.http_request(
Method::POST,
format!("{}/tenant/{}/timeline", self.http_base_url, tenant_id),
)
.json(&TimelineCreateRequest {
timeline_id,
peer_ids,
})
.send()?
.error_from_body()?
.json()?)
}
} }

View File

@@ -1,163 +0,0 @@
# Storage messaging
Safekeepers need to communicate to each other to
* Trim WAL on safekeepers;
* Decide on which SK should push WAL to the S3;
* Decide on when to shut down SK<->pageserver connection;
* Understand state of each other to perform peer recovery;
Pageservers need to communicate to safekeepers to decide which SK should provide
WAL to the pageserver.
This is an iteration on [015-storage-messaging](https://github.com/neondatabase/neon/blob/main/docs/rfcs/015-storage-messaging.md) describing current situation,
potential performance issue and ways to address it.
## Background
What we have currently is very close to etcd variant described in
015-storage-messaging. Basically, we have single `SkTimelineInfo` message
periodically sent by all safekeepers to etcd for each timeline.
* Safekeepers subscribe to it to learn status of peers (currently they subscribe to
'everything', but they can and should fetch data only for timelines they hold).
* Pageserver subscribes to it (separate watch per timeline) to learn safekeepers
positions; based on that, it decides from which safekeepers to pull WAL.
Also, safekeepers use etcd elections API to make sure only single safekeeper
offloads WAL.
It works, and callmemaybe is gone. However, this has a performance
hazard. Currently deployed etcd can do about 6k puts per second (using its own
`benchmark` tool); on my 6 core laptop, while running on tmpfs, this gets to
35k. Making benchmark closer to our usage [etcd watch bench](https://github.com/arssher/etcd-client/blob/watch-bench/examples/watch_bench.rs),
I get ~10k received messages per second with various number of publisher-subscribers
(laptop, tmpfs). Diving this by 12 (3 sks generate msg, 1 ps + 3 sk consume them) we
get about 800 active timelines, if message is sent each second. Not extremely
low, but quite reachable.
A lot of idle watches seem to be ok though -- which is good, as pageserver
subscribes to all its timelines regardless of their activity.
Also, running etcd with fsyncs disabled is messy -- data dir must be wiped on
each restart or there is a risk of corruption errors.
The reason is etcd making much more than what we need; it is a fault tolerant
store with strong consistency, but I claim all we need here is just simplest pub
sub with best effort delivery, because
* We already have centralized source of truth for long running data, like which
tlis are on which nodes -- the console.
* Momentary data (safekeeper/pageserver progress) doesn't make sense to persist.
Instead of putting each change to broker, expecting it to reliably deliver it
is better to just have constant flow of data for active timelines: 1) they
serve as natural heartbeats -- if node can't send, we shouldn't pull WAL from
it 2) it is simpler -- no need to track delivery to/from the broker.
Moreover, latency here is important: the faster we obtain fresh data, the
faster we can switch to proper safekeeper after failure.
* As for WAL offloading leader election, it is trivial to achieve through these
heartbeats -- just take suitable node through deterministic rule (min node
id). Once network is stable, this is a converging process (well, except
complicated failure topology, but even then making it converge is not
hard). Such elections bear some risk of several offloaders running
concurrently for a short period of time, but that's harmless.
Generally, if one needs strong consistency, electing leader per se is not
enough; it must be accompanied with number (logical clock ts), checked at
every action to track causality. s3 doesn't provide CAS, so it can't
differentiate old/new leader, this must be solved differently.
We could use etcd CAS (its most powerful/useful primitive actually) to issue
these leader numbers (and e.g. prefix files in s3), but currently I don't see
need for that.
Obviously best effort pub sub is much more simpler and performant; the one proposed is
## gRPC broker
I took tonic and [prototyped](https://github.com/neondatabase/neon/blob/asher/neon-broker/broker/src/broker.rs) the replacement of functionality we currently use
with grpc streams and tokio mpsc channels. The implementation description is at the file header.
It is just 500 lines of code and core functionality is complete. 1-1 pub sub
gives about 120k received messages per second; having multiple subscribers in
different connecitons quickly scales to 1 million received messages per second.
I had concerns about many concurrent streams in singe connection, but 2^20
subscribers still work (though eat memory, with 10 publishers 20GB are consumed;
in this implementation each publisher holds full copy of all subscribers). There
is `bench.rs` nearby which I used for testing.
`SkTimelineInfo` is wired here, but another message can be added (e.g. if
pageservers want to communicate with each other) with templating.
### Fault tolerance
Since such broker is stateless, we can run it under k8s. Or add proxying to
other members, with best-effort this is simple.
### Security implications
Communication happens in a private network that is not exposed to users;
additionaly we can add auth to the broker.
## Alternative: get existing pub-sub
We could take some existing pub sub solution, e.g. RabbitMQ, Redis. But in this
case IMV simplicity of our own outweights external dependency costs (RabbitMQ is
much more complicated and needs VM; Redis Rust client maintenance is not
ideal...). Also note that projects like CockroachDB and TiDB are based on gRPC
as well.
## Alternative: direct communication
Apart from being transport, broker solves one more task: discovery, i.e. letting
safekeepers and pageservers find each other. We can let safekeepers know, for
each timeline, both other safekeepers for this timeline and pageservers serving
it. In this case direct communication is possible:
- each safekeeper pushes to each other safekeeper status of timelines residing
on both of them, letting remove WAL, decide who offloads, decide on peer
recovery;
- each safekeeper pushes to each pageserver status of timelines residing on
both of them, letting pageserver choose from which sk to pull WAL;
It was mostly described in [014-safekeeper-gossip](https://github.com/neondatabase/neon/blob/main/docs/rfcs/014-safekeepers-gossip.md), but I want to recap on that.
The main pro is less one dependency: less moving parts, easier to run Neon
locally/manually, less places to monitor. Fault tolerance for broker disappears,
no kuber or something. To me this is a big thing.
Also (though not a big thing) idle watches for inactive timelines disappear:
naturally safekeepers learn about compute connection first and start pushing
status to pageserver(s), notifying it should pull.
Importantly, I think that eventually knowing and persisting peers and
pageservers on safekeepers is inevitable:
- Knowing peer safekeepers for the timeline is required for correct
automatic membership change -- new member set must be hardened on old
majority before proceeding. It is required to get rid of sync-safekeepers
as well (peer recovery up to flush_lsn).
- Knowing pageservers where the timeline is attached is needed to
1. Understand when to shut down activity on the timeline, i.e. push data to
the broker. We can have a lot of timelines sleeping quietly which
shouldn't occupy resources.
2. Preserve WAL for these (currently we offload to s3 and take it from there,
but serving locally is better, and we get one less condition on which WAL
can be removed from s3).
I suppose this membership data should be passed to safekeepers directly from the
console because
1. Console is the original source of this data, conceptually this is the
simplest way (rather than passing it through compute or something).
2. We already have similar code for deleting timeline on safekeepers
(and attaching/detaching timeline on pageserver), this is a typical
action -- queue operation against storage node and execute it until it
completes (or timeline is dropped).
Cons of direct communication are
- It is more complicated: each safekeeper should maintain set of peers it talks
to, and set of timelines for each such peer -- they ought to be multiplexed
into single connection.
- Totally, we have O(n^2) connections instead of O(n) with broker schema
(still O(n) on each node). However, these are relatively stable, async and
thus not very expensive, I don't think this is a big problem. Up to 10k
storage nodes I doubt connection overhead would be noticeable.
I'd use gRPC for direct communication, and in this sense gRPC based broker is a
step towards it.

View File

@@ -8,7 +8,7 @@
regex = "1.4.5" regex = "1.4.5"
serde = { version = "1.0", features = ["derive"] } serde = { version = "1.0", features = ["derive"] }
serde_json = "1" serde_json = "1"
serde_with = "2.0" serde_with = "1.12.0"
once_cell = "1.13.0" once_cell = "1.13.0"
utils = { path = "../utils" } utils = { path = "../utils" }

View File

@@ -77,16 +77,6 @@ pub const DISK_WRITE_SECONDS_BUCKETS: &[f64] = &[
0.000_050, 0.000_100, 0.000_500, 0.001, 0.003, 0.005, 0.01, 0.05, 0.1, 0.3, 0.5, 0.000_050, 0.000_100, 0.000_500, 0.001, 0.003, 0.005, 0.01, 0.05, 0.1, 0.3, 0.5,
]; ];
pub fn set_build_info_metric(revision: &str) {
let metric = register_int_gauge_vec!(
"libmetrics_build_info",
"Build/version information",
&["revision"]
)
.expect("Failed to register build info metric");
metric.with_label_values(&[revision]).set(1);
}
// Records I/O stats in a "cross-platform" way. // Records I/O stats in a "cross-platform" way.
// Compiles both on macOS and Linux, but current macOS implementation always returns 0 as values for I/O stats. // Compiles both on macOS and Linux, but current macOS implementation always returns 0 as values for I/O stats.
// An alternative is to read procfs (`/proc/[pid]/io`) which does not work under macOS at all, hence abandoned. // An alternative is to read procfs (`/proc/[pid]/io`) which does not work under macOS at all, hence abandoned.

View File

@@ -5,7 +5,7 @@ edition = "2021"
[dependencies] [dependencies]
serde = { version = "1.0", features = ["derive"] } serde = { version = "1.0", features = ["derive"] }
serde_with = "2.0" serde_with = "1.12.0"
const_format = "0.2.21" const_format = "0.2.21"
utils = { path = "../utils" } utils = { path = "../utils" }

View File

@@ -123,15 +123,9 @@ pub struct TenantInfo {
pub has_in_progress_downloads: Option<bool>, pub has_in_progress_downloads: Option<bool>,
} }
/// This represents the output of the "timeline_detail" and "timeline_list" API calls.
#[serde_as] #[serde_as]
#[derive(Debug, Serialize, Deserialize, Clone)] #[derive(Debug, Serialize, Deserialize, Clone)]
pub struct TimelineInfo { pub struct LocalTimelineInfo {
#[serde_as(as = "DisplayFromStr")]
pub tenant_id: TenantId,
#[serde_as(as = "DisplayFromStr")]
pub timeline_id: TimelineId,
#[serde_as(as = "Option<DisplayFromStr>")] #[serde_as(as = "Option<DisplayFromStr>")]
pub ancestor_timeline_id: Option<TimelineId>, pub ancestor_timeline_id: Option<TimelineId>,
#[serde_as(as = "Option<DisplayFromStr>")] #[serde_as(as = "Option<DisplayFromStr>")]
@@ -155,33 +149,28 @@ pub struct TimelineInfo {
/// the timestamp (in microseconds) of the last received message /// the timestamp (in microseconds) of the last received message
pub last_received_msg_ts: Option<u128>, pub last_received_msg_ts: Option<u128>,
pub pg_version: u32, pub pg_version: u32,
#[serde_as(as = "Option<DisplayFromStr>")]
pub remote_consistent_lsn: Option<Lsn>,
pub awaits_download: bool,
// Some of the above fields are duplicated in 'local' and 'remote', for backwards-
// compatility with older clients.
pub local: LocalTimelineInfo,
pub remote: RemoteTimelineInfo,
}
#[serde_as]
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct LocalTimelineInfo {
#[serde_as(as = "Option<DisplayFromStr>")]
pub ancestor_timeline_id: Option<TimelineId>,
#[serde_as(as = "Option<DisplayFromStr>")]
pub ancestor_lsn: Option<Lsn>,
pub current_logical_size: Option<u64>, // is None when timeline is Unloaded
pub current_physical_size: Option<u64>, // is None when timeline is Unloaded
} }
#[serde_as] #[serde_as]
#[derive(Debug, Serialize, Deserialize, Clone)] #[derive(Debug, Serialize, Deserialize, Clone)]
pub struct RemoteTimelineInfo { pub struct RemoteTimelineInfo {
#[serde_as(as = "Option<DisplayFromStr>")] #[serde_as(as = "DisplayFromStr")]
pub remote_consistent_lsn: Option<Lsn>, pub remote_consistent_lsn: Lsn,
pub awaits_download: bool,
}
///
/// This represents the output of the "timeline_detail" API call.
///
#[serde_as]
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct TimelineInfo {
#[serde_as(as = "DisplayFromStr")]
pub tenant_id: TenantId,
#[serde_as(as = "DisplayFromStr")]
pub timeline_id: TimelineId,
pub local: Option<LocalTimelineInfo>,
pub remote: Option<RemoteTimelineInfo>,
} }
pub type ConfigureFailpointsRequest = Vec<FailpointConfig>; pub type ConfigureFailpointsRequest = Vec<FailpointConfig>;

View File

@@ -13,7 +13,7 @@ crc32c = "0.6.0"
hex = "0.4.3" hex = "0.4.3"
once_cell = "1.13.0" once_cell = "1.13.0"
log = "0.4.14" log = "0.4.14"
memoffset = "0.7" memoffset = "0.6.2"
thiserror = "1.0" thiserror = "1.0"
serde = { version = "1.0", features = ["derive"] } serde = { version = "1.0", features = ["derive"] }
utils = { path = "../utils" } utils = { path = "../utils" }
@@ -26,4 +26,4 @@ wal_craft = { path = "wal_craft" }
[build-dependencies] [build-dependencies]
anyhow = "1.0" anyhow = "1.0"
bindgen = "0.61" bindgen = "0.60.1"

View File

@@ -7,7 +7,7 @@ edition = "2021"
[dependencies] [dependencies]
anyhow = "1.0" anyhow = "1.0"
clap = "4.0" clap = "3.0"
env_logger = "0.9" env_logger = "0.9"
log = "0.4" log = "0.4"
once_cell = "1.13.0" once_cell = "1.13.0"

View File

@@ -1,19 +1,68 @@
use anyhow::*; use anyhow::*;
use clap::{value_parser, Arg, ArgMatches, Command}; use clap::{App, Arg, ArgMatches};
use std::{path::PathBuf, str::FromStr}; use std::str::FromStr;
use wal_craft::*; use wal_craft::*;
fn main() -> Result<()> { fn main() -> Result<()> {
env_logger::Builder::from_env(env_logger::Env::default().default_filter_or("wal_craft=info")) env_logger::Builder::from_env(env_logger::Env::default().default_filter_or("wal_craft=info"))
.init(); .init();
let arg_matches = cli().get_matches(); let type_arg = &Arg::new("type")
.takes_value(true)
.help("Type of WAL to craft")
.possible_values([
Simple::NAME,
LastWalRecordXlogSwitch::NAME,
LastWalRecordXlogSwitchEndsOnPageBoundary::NAME,
WalRecordCrossingSegmentFollowedBySmallOne::NAME,
LastWalRecordCrossingSegment::NAME,
])
.required(true);
let arg_matches = App::new("Postgres WAL crafter")
.about("Crafts Postgres databases with specific WAL properties")
.subcommand(
App::new("print-postgres-config")
.about("Print the configuration required for PostgreSQL server before running this script")
)
.subcommand(
App::new("with-initdb")
.about("Craft WAL in a new data directory first initialized with initdb")
.arg(type_arg)
.arg(
Arg::new("datadir")
.takes_value(true)
.help("Data directory for the Postgres server")
.required(true)
)
.arg(
Arg::new("pg-distrib-dir")
.long("pg-distrib-dir")
.takes_value(true)
.help("Directory with Postgres distributions (bin and lib directories, e.g. pg_install containing subpath `v14/bin/postgresql`)")
.default_value("/usr/local")
)
.arg(
Arg::new("pg-version")
.long("pg-version")
.help("Postgres version to use for the initial tenant")
.required(true)
.takes_value(true)
)
)
.subcommand(
App::new("in-existing")
.about("Craft WAL at an existing recently created Postgres database. Note that server may append new WAL entries on shutdown.")
.arg(type_arg)
.arg(
Arg::new("connection")
.takes_value(true)
.help("Connection string to the Postgres database to populate")
.required(true)
)
)
.get_matches();
let wal_craft = |arg_matches: &ArgMatches, client| { let wal_craft = |arg_matches: &ArgMatches, client| {
let (intermediate_lsns, end_of_wal_lsn) = match arg_matches let (intermediate_lsns, end_of_wal_lsn) = match arg_matches.value_of("type").unwrap() {
.get_one::<String>("type")
.map(|s| s.as_str())
.context("'type' is required")?
{
Simple::NAME => Simple::craft(client)?, Simple::NAME => Simple::craft(client)?,
LastWalRecordXlogSwitch::NAME => LastWalRecordXlogSwitch::craft(client)?, LastWalRecordXlogSwitch::NAME => LastWalRecordXlogSwitch::craft(client)?,
LastWalRecordXlogSwitchEndsOnPageBoundary::NAME => { LastWalRecordXlogSwitchEndsOnPageBoundary::NAME => {
@@ -23,12 +72,12 @@ fn main() -> Result<()> {
WalRecordCrossingSegmentFollowedBySmallOne::craft(client)? WalRecordCrossingSegmentFollowedBySmallOne::craft(client)?
} }
LastWalRecordCrossingSegment::NAME => LastWalRecordCrossingSegment::craft(client)?, LastWalRecordCrossingSegment::NAME => LastWalRecordCrossingSegment::craft(client)?,
a => panic!("Unknown --type argument: {a}"), a => panic!("Unknown --type argument: {}", a),
}; };
for lsn in intermediate_lsns { for lsn in intermediate_lsns {
println!("intermediate_lsn = {lsn}"); println!("intermediate_lsn = {}", lsn);
} }
println!("end_of_wal = {end_of_wal_lsn}"); println!("end_of_wal = {}", end_of_wal_lsn);
Ok(()) Ok(())
}; };
@@ -36,24 +85,20 @@ fn main() -> Result<()> {
None => panic!("No subcommand provided"), None => panic!("No subcommand provided"),
Some(("print-postgres-config", _)) => { Some(("print-postgres-config", _)) => {
for cfg in REQUIRED_POSTGRES_CONFIG.iter() { for cfg in REQUIRED_POSTGRES_CONFIG.iter() {
println!("{cfg}"); println!("{}", cfg);
} }
Ok(()) Ok(())
} }
Some(("with-initdb", arg_matches)) => { Some(("with-initdb", arg_matches)) => {
let cfg = Conf { let cfg = Conf {
pg_version: *arg_matches pg_version: arg_matches
.get_one::<u32>("pg-version") .value_of("pg-version")
.context("'pg-version' is required")?, .unwrap()
pg_distrib_dir: arg_matches .parse::<u32>()
.get_one::<PathBuf>("pg-distrib-dir") .context("Failed to parse postgres version from the argument string")?,
.context("'pg-distrib-dir' is required")? pg_distrib_dir: arg_matches.value_of("pg-distrib-dir").unwrap().into(),
.to_owned(), datadir: arg_matches.value_of("datadir").unwrap().into(),
datadir: arg_matches
.get_one::<PathBuf>("datadir")
.context("'datadir' is required")?
.to_owned(),
}; };
cfg.initdb()?; cfg.initdb()?;
let srv = cfg.start_server()?; let srv = cfg.start_server()?;
@@ -63,77 +108,9 @@ fn main() -> Result<()> {
} }
Some(("in-existing", arg_matches)) => wal_craft( Some(("in-existing", arg_matches)) => wal_craft(
arg_matches, arg_matches,
&mut postgres::Config::from_str( &mut postgres::Config::from_str(arg_matches.value_of("connection").unwrap())?
arg_matches .connect(postgres::NoTls)?,
.get_one::<String>("connection")
.context("'connection' is required")?,
)
.context(
"'connection' argument value could not be parsed as a postgres connection string",
)?
.connect(postgres::NoTls)?,
), ),
Some(_) => panic!("Unknown subcommand"), Some(_) => panic!("Unknown subcommand"),
} }
} }
fn cli() -> Command {
let type_arg = &Arg::new("type")
.help("Type of WAL to craft")
.value_parser([
Simple::NAME,
LastWalRecordXlogSwitch::NAME,
LastWalRecordXlogSwitchEndsOnPageBoundary::NAME,
WalRecordCrossingSegmentFollowedBySmallOne::NAME,
LastWalRecordCrossingSegment::NAME,
])
.required(true);
Command::new("Postgres WAL crafter")
.about("Crafts Postgres databases with specific WAL properties")
.subcommand(
Command::new("print-postgres-config")
.about("Print the configuration required for PostgreSQL server before running this script")
)
.subcommand(
Command::new("with-initdb")
.about("Craft WAL in a new data directory first initialized with initdb")
.arg(type_arg)
.arg(
Arg::new("datadir")
.help("Data directory for the Postgres server")
.value_parser(value_parser!(PathBuf))
.required(true)
)
.arg(
Arg::new("pg-distrib-dir")
.long("pg-distrib-dir")
.value_parser(value_parser!(PathBuf))
.help("Directory with Postgres distributions (bin and lib directories, e.g. pg_install containing subpath `v14/bin/postgresql`)")
.default_value("/usr/local")
)
.arg(
Arg::new("pg-version")
.long("pg-version")
.help("Postgres version to use for the initial tenant")
.value_parser(value_parser!(u32))
.required(true)
)
)
.subcommand(
Command::new("in-existing")
.about("Craft WAL at an existing recently created Postgres database. Note that server may append new WAL entries on shutdown.")
.arg(type_arg)
.arg(
Arg::new("connection")
.help("Connection string to the Postgres database to populate")
.required(true)
)
)
}
#[test]
fn verify_cli() {
cli().debug_assert();
}

View File

@@ -15,7 +15,7 @@ serde = { version = "1.0", features = ["derive"] }
serde_json = "1" serde_json = "1"
tokio = { version = "1.17", features = ["sync", "macros", "fs", "io-util"] } tokio = { version = "1.17", features = ["sync", "macros", "fs", "io-util"] }
tokio-util = { version = "0.7", features = ["io"] } tokio-util = { version = "0.7", features = ["io"] }
toml_edit = { version = "0.14", features = ["easy"] } toml_edit = { version = "0.13", features = ["easy"] }
tracing = "0.1.27" tracing = "0.1.27"
workspace_hack = { version = "0.1", path = "../../workspace_hack" } workspace_hack = { version = "0.1", path = "../../workspace_hack" }

View File

@@ -5,7 +5,7 @@ edition = "2021"
[dependencies] [dependencies]
serde = { version = "1.0", features = ["derive"] } serde = { version = "1.0", features = ["derive"] }
serde_with = "2.0" serde_with = "1.12.0"
const_format = "0.2.21" const_format = "0.2.21"
utils = { path = "../utils" } utils = { path = "../utils" }

View File

@@ -1,24 +1,8 @@
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use serde_with::{serde_as, DisplayFromStr}; use utils::id::{NodeId, TimelineId};
use utils::{
id::{NodeId, TenantId, TimelineId},
lsn::Lsn,
};
#[serde_as]
#[derive(Serialize, Deserialize)] #[derive(Serialize, Deserialize)]
pub struct TimelineCreateRequest { pub struct TimelineCreateRequest {
#[serde_as(as = "DisplayFromStr")]
pub tenant_id: TenantId,
#[serde_as(as = "DisplayFromStr")]
pub timeline_id: TimelineId, pub timeline_id: TimelineId,
pub peer_ids: Option<Vec<NodeId>>, pub peer_ids: Vec<NodeId>,
pub pg_version: u32,
pub system_id: Option<u64>,
pub wal_seg_size: Option<u32>,
#[serde_as(as = "DisplayFromStr")]
pub commit_lsn: Lsn,
// If not passed, it is assigned to the beginning of commit_lsn segment.
pub local_start_lsn: Option<Lsn>,
} }

View File

@@ -20,7 +20,7 @@ tokio = { version = "1.17", features = ["macros"]}
tokio-rustls = "0.23" tokio-rustls = "0.23"
tracing = "0.1" tracing = "0.1"
tracing-subscriber = { version = "0.3", features = ["env-filter"] } tracing-subscriber = { version = "0.3", features = ["env-filter"] }
nix = "0.25" nix = "0.23.0"
signal-hook = "0.3.10" signal-hook = "0.3.10"
rand = "0.8.3" rand = "0.8.3"
jsonwebtoken = "8" jsonwebtoken = "8"
@@ -28,7 +28,7 @@ hex = { version = "0.4.3", features = ["serde"] }
rustls = "0.20.2" rustls = "0.20.2"
rustls-split = "0.3.0" rustls-split = "0.3.0"
git-version = "0.3.5" git-version = "0.3.5"
serde_with = "2.0" serde_with = "1.12.0"
once_cell = "1.13.0" once_cell = "1.13.0"
@@ -40,7 +40,7 @@ byteorder = "1.4.3"
bytes = "1.0.1" bytes = "1.0.1"
hex-literal = "0.3" hex-literal = "0.3"
tempfile = "3.2" tempfile = "3.2"
criterion = "0.4" criterion = "0.3"
rustls-pemfile = "1" rustls-pemfile = "1"
[[bench]] [[bench]]

View File

@@ -66,11 +66,6 @@ impl Lsn {
(self.0 % seg_sz as u64) as usize (self.0 % seg_sz as u64) as usize
} }
/// Compute LSN of the segment start.
pub fn segment_lsn(self, seg_sz: usize) -> Lsn {
Lsn(self.0 - (self.0 % seg_sz as u64))
}
/// Compute the segment number /// Compute the segment number
pub fn segment_number(self, seg_sz: usize) -> u64 { pub fn segment_number(self, seg_sz: usize) -> u64 {
self.0 / seg_sz as u64 self.0 / seg_sz as u64

View File

@@ -15,7 +15,7 @@ use std::sync::Arc;
use std::task::Poll; use std::task::Poll;
use tracing::{debug, error, trace}; use tracing::{debug, error, trace};
use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt, BufReader}; use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt};
use tokio_rustls::TlsAcceptor; use tokio_rustls::TlsAcceptor;
#[async_trait::async_trait] #[async_trait::async_trait]
@@ -66,8 +66,8 @@ pub enum ProcessMsgResult {
/// Always-writeable sock_split stream. /// Always-writeable sock_split stream.
/// May not be readable. See [`PostgresBackend::take_stream_in`] /// May not be readable. See [`PostgresBackend::take_stream_in`]
pub enum Stream { pub enum Stream {
Unencrypted(BufReader<tokio::net::TcpStream>), Unencrypted(tokio::net::TcpStream),
Tls(Box<tokio_rustls::server::TlsStream<BufReader<tokio::net::TcpStream>>>), Tls(Box<tokio_rustls::server::TlsStream<tokio::net::TcpStream>>),
Broken, Broken,
} }
@@ -157,7 +157,7 @@ impl PostgresBackend {
let peer_addr = socket.peer_addr()?; let peer_addr = socket.peer_addr()?;
Ok(Self { Ok(Self {
stream: Stream::Unencrypted(BufReader::new(socket)), stream: Stream::Unencrypted(socket),
buf_out: BytesMut::with_capacity(10 * 1024), buf_out: BytesMut::with_capacity(10 * 1024),
state: ProtoState::Initialization, state: ProtoState::Initialization,
md5_salt: [0u8; 4], md5_salt: [0u8; 4],

View File

@@ -10,7 +10,6 @@ use serde::{Deserialize, Serialize};
use std::{ use std::{
borrow::Cow, borrow::Cow,
collections::HashMap, collections::HashMap,
fmt,
future::Future, future::Future,
io::{self, Cursor}, io::{self, Cursor},
str, str,
@@ -125,19 +124,6 @@ pub struct CancelKeyData {
pub cancel_key: i32, pub cancel_key: i32,
} }
impl fmt::Display for CancelKeyData {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let hi = (self.backend_pid as u64) << 32;
let lo = self.cancel_key as u64;
let id = hi | lo;
// This format is more compact and might work better for logs.
f.debug_tuple("CancelKeyData")
.field(&format_args!("{:x}", id))
.finish()
}
}
use rand::distributions::{Distribution, Standard}; use rand::distributions::{Distribution, Standard};
impl Distribution<CancelKeyData> for Standard { impl Distribution<CancelKeyData> for Standard {
fn sample<R: rand::Rng + ?Sized>(&self, rng: &mut R) -> CancelKeyData { fn sample<R: rand::Rng + ?Sized>(&self, rng: &mut R) -> CancelKeyData {

View File

@@ -23,7 +23,7 @@ futures = "0.3.13"
hex = "0.4.3" hex = "0.4.3"
hyper = "0.14" hyper = "0.14"
itertools = "0.10.3" itertools = "0.10.3"
clap = { version = "4.0", features = ["string"] } clap = "3.0"
daemonize = "0.4.1" daemonize = "0.4.1"
tokio = { version = "1.17", features = ["process", "sync", "macros", "fs", "rt", "io-util", "time"] } tokio = { version = "1.17", features = ["process", "sync", "macros", "fs", "rt", "io-util", "time"] }
tokio-util = { version = "0.7.3", features = ["io", "io-util"] } tokio-util = { version = "0.7.3", features = ["io", "io-util"] }
@@ -38,25 +38,25 @@ tar = "0.4.33"
humantime = "2.1.0" humantime = "2.1.0"
serde = { version = "1.0", features = ["derive"] } serde = { version = "1.0", features = ["derive"] }
serde_json = "1" serde_json = "1"
serde_with = "2.0" serde_with = "1.12.0"
humantime-serde = "1.1.1" humantime-serde = "1.1.1"
pprof = { git = "https://github.com/neondatabase/pprof-rs.git", branch = "wallclock-profiling", features = ["flamegraph"], optional = true } pprof = { git = "https://github.com/neondatabase/pprof-rs.git", branch = "wallclock-profiling", features = ["flamegraph"], optional = true }
toml_edit = { version = "0.14", features = ["easy"] } toml_edit = { version = "0.13", features = ["easy"] }
scopeguard = "1.1.0" scopeguard = "1.1.0"
const_format = "0.2.21" const_format = "0.2.21"
tracing = "0.1.36" tracing = "0.1.36"
signal-hook = "0.3.10" signal-hook = "0.3.10"
url = "2" url = "2"
nix = "0.25" nix = "0.23"
once_cell = "1.13.0" once_cell = "1.13.0"
crossbeam-utils = "0.8.5" crossbeam-utils = "0.8.5"
fail = "0.5.0" fail = "0.5.0"
git-version = "0.3.5" git-version = "0.3.5"
rstar = "0.9.3" rstar = "0.9.3"
num-traits = "0.2.15" num-traits = "0.2.15"
amplify_num = { git = "https://github.com/hlinnaka/rust-amplify.git", branch = "unsigned-int-perf" } amplify_num = "0.4.1"
pageserver_api = { path = "../libs/pageserver_api" } pageserver_api = { path = "../libs/pageserver_api" }
postgres_ffi = { path = "../libs/postgres_ffi" } postgres_ffi = { path = "../libs/postgres_ffi" }
@@ -67,13 +67,7 @@ remote_storage = { path = "../libs/remote_storage" }
workspace_hack = { version = "0.1", path = "../workspace_hack" } workspace_hack = { version = "0.1", path = "../workspace_hack" }
close_fds = "0.3.2" close_fds = "0.3.2"
walkdir = "2.3.2" walkdir = "2.3.2"
dashmap = "5.4.0"
[dev-dependencies] [dev-dependencies]
criterion = "0.4"
hex-literal = "0.3" hex-literal = "0.3"
tempfile = "3.2" tempfile = "3.2"
[[bench]]
name = "bench_layer_map"
harness = false

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,35 @@
//! Main entry point for the dump_layerfile executable
//!
//! A handy tool for debugging, that's all.
use anyhow::Result;
use clap::{App, Arg};
use pageserver::page_cache;
use pageserver::tenant::dump_layerfile_from_path;
use pageserver::virtual_file;
use std::path::PathBuf;
use utils::project_git_version;
project_git_version!(GIT_VERSION);
fn main() -> Result<()> {
let arg_matches = App::new("Neon dump_layerfile utility")
.about("Dump contents of one layer file, for debugging")
.version(GIT_VERSION)
.arg(
Arg::new("path")
.help("Path to file to dump")
.required(true)
.index(1),
)
.get_matches();
let path = PathBuf::from(arg_matches.value_of("path").unwrap());
// Basic initialization of things that don't change after startup
virtual_file::init(10);
page_cache::init(100);
dump_layerfile_from_path(&path, true)?;
Ok(())
}

View File

@@ -6,12 +6,10 @@ use tracing::*;
use anyhow::{anyhow, bail, Context, Result}; use anyhow::{anyhow, bail, Context, Result};
use clap::{Arg, ArgAction, Command}; use clap::{App, Arg};
use daemonize::Daemonize; use daemonize::Daemonize;
use fail::FailScenario; use fail::FailScenario;
use metrics::set_build_info_metric;
use pageserver::{ use pageserver::{
config::{defaults::*, PageServerConf}, config::{defaults::*, PageServerConf},
http, page_cache, page_service, profiling, task_mgr, http, page_cache, page_service, profiling, task_mgr,
@@ -33,35 +31,72 @@ use utils::{
project_git_version!(GIT_VERSION); project_git_version!(GIT_VERSION);
const FEATURES: &[&str] = &[
#[cfg(feature = "testing")]
"testing",
#[cfg(feature = "fail/failpoints")]
"fail/failpoints",
#[cfg(feature = "profiling")]
"profiling",
];
fn version() -> String { fn version() -> String {
format!( format!(
"{GIT_VERSION} failpoints: {}, features: {:?}", "{GIT_VERSION} profiling:{} failpoints:{}",
fail::has_failpoints(), cfg!(feature = "profiling"),
FEATURES, fail::has_failpoints()
) )
} }
fn main() -> anyhow::Result<()> { fn main() -> anyhow::Result<()> {
let arg_matches = cli().get_matches(); let arg_matches = App::new("Neon page server")
.about("Materializes WAL stream to pages and serves them to the postgres")
.version(&*version())
.arg(
if arg_matches.get_flag("enabled-features") { Arg::new("daemonize")
println!("{{\"features\": {FEATURES:?} }}"); .short('d')
.long("daemonize")
.takes_value(false)
.help("Run in the background"),
)
.arg(
Arg::new("init")
.long("init")
.takes_value(false)
.help("Initialize pageserver with all given config overrides"),
)
.arg(
Arg::new("workdir")
.short('D')
.long("workdir")
.takes_value(true)
.help("Working directory for the pageserver"),
)
// See `settings.md` for more details on the extra configuration patameters pageserver can process
.arg(
Arg::new("config-override")
.short('c')
.takes_value(true)
.number_of_values(1)
.multiple_occurrences(true)
.help("Additional configuration overrides of the ones from the toml config file (or new ones to add there).
Any option has to be a valid toml document, example: `-c=\"foo='hey'\"` `-c=\"foo={value=1}\"`"),
)
.arg(Arg::new("update-config").long("update-config").takes_value(false).help(
"Update the config file when started",
))
.arg(
Arg::new("enabled-features")
.long("enabled-features")
.takes_value(false)
.help("Show enabled compile time features"),
)
.get_matches();
if arg_matches.is_present("enabled-features") {
let features: &[&str] = &[
#[cfg(feature = "testing")]
"testing",
#[cfg(feature = "profiling")]
"profiling",
];
println!("{{\"features\": {features:?} }}");
return Ok(()); return Ok(());
} }
let workdir = arg_matches let workdir = Path::new(arg_matches.value_of("workdir").unwrap_or(".neon"));
.get_one::<String>("workdir")
.map(Path::new)
.unwrap_or_else(|| Path::new(".neon"));
let workdir = workdir let workdir = workdir
.canonicalize() .canonicalize()
.with_context(|| format!("Error opening workdir '{}'", workdir.display()))?; .with_context(|| format!("Error opening workdir '{}'", workdir.display()))?;
@@ -75,7 +110,7 @@ fn main() -> anyhow::Result<()> {
) )
})?; })?;
let daemonize = arg_matches.get_flag("daemonize"); let daemonize = arg_matches.is_present("daemonize");
let conf = match initialize_config(&cfg_file_path, arg_matches, &workdir)? { let conf = match initialize_config(&cfg_file_path, arg_matches, &workdir)? {
ControlFlow::Continue(conf) => conf, ControlFlow::Continue(conf) => conf,
@@ -113,8 +148,8 @@ fn initialize_config(
arg_matches: clap::ArgMatches, arg_matches: clap::ArgMatches,
workdir: &Path, workdir: &Path,
) -> anyhow::Result<ControlFlow<(), &'static PageServerConf>> { ) -> anyhow::Result<ControlFlow<(), &'static PageServerConf>> {
let init = arg_matches.get_flag("init"); let init = arg_matches.is_present("init");
let update_config = init || arg_matches.get_flag("update-config"); let update_config = init || arg_matches.is_present("update-config");
let (mut toml, config_file_exists) = if cfg_file_path.is_file() { let (mut toml, config_file_exists) = if cfg_file_path.is_file() {
if init { if init {
@@ -156,10 +191,13 @@ fn initialize_config(
) )
}; };
if let Some(values) = arg_matches.get_many::<String>("config-override") { if let Some(values) = arg_matches.values_of("config-override") {
for option_line in values { for option_line in values {
let doc = toml_edit::Document::from_str(option_line).with_context(|| { let doc = toml_edit::Document::from_str(option_line).with_context(|| {
format!("Option '{option_line}' could not be parsed as a toml document") format!(
"Option '{}' could not be parsed as a toml document",
option_line
)
})?; })?;
for (key, item) in doc.iter() { for (key, item) in doc.iter() {
@@ -201,7 +239,7 @@ fn start_pageserver(conf: &'static PageServerConf, daemonize: bool) -> Result<()
// Initialize logger // Initialize logger
let log_file = logging::init(LOG_FILE_NAME, daemonize)?; let log_file = logging::init(LOG_FILE_NAME, daemonize)?;
info!("version: {}", version()); info!("version: {GIT_VERSION}");
// TODO: Check that it looks like a valid repository before going further // TODO: Check that it looks like a valid repository before going further
@@ -318,8 +356,6 @@ fn start_pageserver(conf: &'static PageServerConf, daemonize: bool) -> Result<()
}, },
); );
set_build_info_metric(GIT_VERSION);
// All started up! Now just sit and wait for shutdown signal. // All started up! Now just sit and wait for shutdown signal.
signals.handle(|signal| match signal { signals.handle(|signal| match signal {
Signal::Quit => { Signal::Quit => {
@@ -342,55 +378,3 @@ fn start_pageserver(conf: &'static PageServerConf, daemonize: bool) -> Result<()
} }
}) })
} }
fn cli() -> Command {
Command::new("Neon page server")
.about("Materializes WAL stream to pages and serves them to the postgres")
.version(version())
.arg(
Arg::new("daemonize")
.short('d')
.long("daemonize")
.action(ArgAction::SetTrue)
.help("Run in the background"),
)
.arg(
Arg::new("init")
.long("init")
.action(ArgAction::SetTrue)
.help("Initialize pageserver with all given config overrides"),
)
.arg(
Arg::new("workdir")
.short('D')
.long("workdir")
.help("Working directory for the pageserver"),
)
// See `settings.md` for more details on the extra configuration patameters pageserver can process
.arg(
Arg::new("config-override")
.short('c')
.num_args(1)
.action(ArgAction::Append)
.help("Additional configuration overrides of the ones from the toml config file (or new ones to add there). \
Any option has to be a valid toml document, example: `-c=\"foo='hey'\"` `-c=\"foo={value=1}\"`"),
)
.arg(
Arg::new("update-config")
.long("update-config")
.action(ArgAction::SetTrue)
.help("Update the config file when started"),
)
.arg(
Arg::new("enabled-features")
.long("enabled-features")
.action(ArgAction::SetTrue)
.help("Show enabled compile time features"),
)
}
#[test]
fn verify_cli() {
cli().debug_assert();
}

View File

@@ -1,154 +0,0 @@
//! A helper tool to manage pageserver binary files.
//! Accepts a file as an argument, attempts to parse it with all ways possible
//! and prints its interpreted context.
//!
//! Separate, `metadata` subcommand allows to print and update pageserver's metadata file.
use std::{
path::{Path, PathBuf},
str::FromStr,
};
use anyhow::Context;
use clap::{value_parser, Arg, Command};
use pageserver::{
page_cache,
tenant::{dump_layerfile_from_path, metadata::TimelineMetadata},
virtual_file,
};
use postgres_ffi::ControlFileData;
use utils::{lsn::Lsn, project_git_version};
project_git_version!(GIT_VERSION);
const METADATA_SUBCOMMAND: &str = "metadata";
fn main() -> anyhow::Result<()> {
let arg_matches = cli().get_matches();
match arg_matches.subcommand() {
Some((subcommand_name, subcommand_matches)) => {
let path = subcommand_matches
.get_one::<PathBuf>("metadata_path")
.context("'metadata_path' argument is missing")?
.to_path_buf();
anyhow::ensure!(
subcommand_name == METADATA_SUBCOMMAND,
"Unknown subcommand {subcommand_name}"
);
handle_metadata(&path, subcommand_matches)?;
}
None => {
let path = arg_matches
.get_one::<PathBuf>("path")
.context("'path' argument is missing")?
.to_path_buf();
println!(
"No subcommand specified, attempting to guess the format for file {}",
path.display()
);
if let Err(e) = read_pg_control_file(&path) {
println!(
"Failed to read input file as a pg control one: {e:#}\n\
Attempting to read it as layer file"
);
print_layerfile(&path)?;
}
}
};
Ok(())
}
fn read_pg_control_file(control_file_path: &Path) -> anyhow::Result<()> {
let control_file = ControlFileData::decode(&std::fs::read(&control_file_path)?)?;
println!("{control_file:?}");
let control_file_initdb = Lsn(control_file.checkPoint);
println!(
"pg_initdb_lsn: {}, aligned: {}",
control_file_initdb,
control_file_initdb.align()
);
Ok(())
}
fn print_layerfile(path: &Path) -> anyhow::Result<()> {
// Basic initialization of things that don't change after startup
virtual_file::init(10);
page_cache::init(100);
dump_layerfile_from_path(path, true)
}
fn handle_metadata(path: &Path, arg_matches: &clap::ArgMatches) -> Result<(), anyhow::Error> {
let metadata_bytes = std::fs::read(&path)?;
let mut meta = TimelineMetadata::from_bytes(&metadata_bytes)?;
println!("Current metadata:\n{meta:?}");
let mut update_meta = false;
if let Some(disk_consistent_lsn) = arg_matches.get_one::<String>("disk_consistent_lsn") {
meta = TimelineMetadata::new(
Lsn::from_str(disk_consistent_lsn)?,
meta.prev_record_lsn(),
meta.ancestor_timeline(),
meta.ancestor_lsn(),
meta.latest_gc_cutoff_lsn(),
meta.initdb_lsn(),
meta.pg_version(),
);
update_meta = true;
}
if let Some(prev_record_lsn) = arg_matches.get_one::<String>("prev_record_lsn") {
meta = TimelineMetadata::new(
meta.disk_consistent_lsn(),
Some(Lsn::from_str(prev_record_lsn)?),
meta.ancestor_timeline(),
meta.ancestor_lsn(),
meta.latest_gc_cutoff_lsn(),
meta.initdb_lsn(),
meta.pg_version(),
);
update_meta = true;
}
if update_meta {
let metadata_bytes = meta.to_bytes()?;
std::fs::write(&path, &metadata_bytes)?;
}
Ok(())
}
fn cli() -> Command {
Command::new("Neon Pageserver binutils")
.about("Reads pageserver (and related) binary files management utility")
.version(GIT_VERSION)
.arg(
Arg::new("path")
.help("Input file path")
.value_parser(value_parser!(PathBuf))
.required(false),
)
.subcommand(
Command::new(METADATA_SUBCOMMAND)
.about("Read and update pageserver metadata file")
.arg(
Arg::new("metadata_path")
.help("Input metadata file path")
.value_parser(value_parser!(PathBuf))
.required(false),
)
.arg(
Arg::new("disk_consistent_lsn")
.long("disk_consistent_lsn")
.help("Replace disk consistent Lsn"),
)
.arg(
Arg::new("prev_record_lsn")
.long("prev_record_lsn")
.help("Replace previous record Lsn"),
),
)
}
#[test]
fn verify_cli() {
cli().debug_assert();
}

View File

@@ -0,0 +1,75 @@
//! Main entry point for the edit_metadata executable
//!
//! A handy tool for debugging, that's all.
use anyhow::Result;
use clap::{App, Arg};
use pageserver::tenant::metadata::TimelineMetadata;
use std::path::PathBuf;
use std::str::FromStr;
use utils::{lsn::Lsn, project_git_version};
project_git_version!(GIT_VERSION);
fn main() -> Result<()> {
let arg_matches = App::new("Neon update metadata utility")
.about("Dump or update metadata file")
.version(GIT_VERSION)
.arg(
Arg::new("path")
.help("Path to metadata file")
.required(true),
)
.arg(
Arg::new("disk_lsn")
.short('d')
.long("disk_lsn")
.takes_value(true)
.help("Replace disk constistent lsn"),
)
.arg(
Arg::new("prev_lsn")
.short('p')
.long("prev_lsn")
.takes_value(true)
.help("Previous record LSN"),
)
.get_matches();
let path = PathBuf::from(arg_matches.value_of("path").unwrap());
let metadata_bytes = std::fs::read(&path)?;
let mut meta = TimelineMetadata::from_bytes(&metadata_bytes)?;
println!("Current metadata:\n{:?}", &meta);
let mut update_meta = false;
if let Some(disk_lsn) = arg_matches.value_of("disk_lsn") {
meta = TimelineMetadata::new(
Lsn::from_str(disk_lsn)?,
meta.prev_record_lsn(),
meta.ancestor_timeline(),
meta.ancestor_lsn(),
meta.latest_gc_cutoff_lsn(),
meta.initdb_lsn(),
meta.pg_version(),
);
update_meta = true;
}
if let Some(prev_lsn) = arg_matches.value_of("prev_lsn") {
meta = TimelineMetadata::new(
meta.disk_consistent_lsn(),
Some(Lsn::from_str(prev_lsn)?),
meta.ancestor_timeline(),
meta.ancestor_lsn(),
meta.latest_gc_cutoff_lsn(),
meta.initdb_lsn(),
meta.pg_version(),
);
update_meta = true;
}
if update_meta {
let metadata_bytes = meta.to_bytes()?;
std::fs::write(&path, &metadata_bytes)?;
}
Ok(())
}

View File

@@ -1,11 +1,7 @@
openapi: "3.0.2" openapi: "3.0.2"
info: info:
title: Page Server API title: Page Server API
description: Neon Pageserver API
version: "1.0" version: "1.0"
license:
name: "Apache"
url: https://github.com/neondatabase/neon/blob/main/LICENSE
servers: servers:
- url: "" - url: ""
paths: paths:
@@ -211,6 +207,7 @@ paths:
schema: schema:
$ref: "#/components/schemas/Error" $ref: "#/components/schemas/Error"
/v1/tenant/{tenant_id}/timeline/{timeline_id}/get_lsn_by_timestamp: /v1/tenant/{tenant_id}/timeline/{timeline_id}/get_lsn_by_timestamp:
parameters: parameters:
- name: tenant_id - name: tenant_id
@@ -615,9 +612,6 @@ components:
required: required:
- timeline_id - timeline_id
- tenant_id - tenant_id
- last_record_lsn
- disk_consistent_lsn
- awaits_download
properties: properties:
timeline_id: timeline_id:
type: string type: string
@@ -625,15 +619,33 @@ components:
tenant_id: tenant_id:
type: string type: string
format: hex format: hex
local:
$ref: "#/components/schemas/LocalTimelineInfo"
remote:
$ref: "#/components/schemas/RemoteTimelineInfo"
RemoteTimelineInfo:
type: object
required:
- awaits_download
- remote_consistent_lsn
properties:
awaits_download:
type: boolean
remote_consistent_lsn:
type: string
format: hex
LocalTimelineInfo:
type: object
required:
- last_record_lsn
- disk_consistent_lsn
properties:
last_record_lsn: last_record_lsn:
type: string type: string
format: hex format: hex
disk_consistent_lsn: disk_consistent_lsn:
type: string type: string
format: hex format: hex
remote_consistent_lsn:
type: string
format: hex
ancestor_timeline_id: ancestor_timeline_id:
type: string type: string
format: hex format: hex
@@ -658,39 +670,7 @@ components:
format: hex format: hex
last_received_msg_ts: last_received_msg_ts:
type: integer type: integer
awaits_download:
type: boolean
# These 'local' and 'remote' fields just duplicate some of the fields
# above. They are kept for backwards-compatibility. They can be removed,
# when the control plane has been updated to look at the above fields
# directly.
local:
$ref: "#/components/schemas/LocalTimelineInfo"
remote:
$ref: "#/components/schemas/RemoteTimelineInfo"
LocalTimelineInfo:
type: object
properties:
ancestor_timeline_id:
type: string
format: hex
ancestor_lsn:
type: string
format: hex
current_logical_size:
type: integer
current_physical_size:
type: integer
RemoteTimelineInfo:
type: object
required:
- remote_consistent_lsn
properties:
remote_consistent_lsn:
type: string
format: hex
Error: Error:
type: object type: object
required: required:

View File

@@ -79,13 +79,13 @@ fn get_config(request: &Request<Body>) -> &'static PageServerConf {
get_state(request).conf get_state(request).conf
} }
// Helper function to construct a TimelineInfo struct for a timeline // Helper functions to construct a LocalTimelineInfo struct for a timeline
async fn build_timeline_info(
state: &State, fn local_timeline_info_from_timeline(
timeline: &Arc<Timeline>, timeline: &Arc<Timeline>,
include_non_incremental_logical_size: bool, include_non_incremental_logical_size: bool,
include_non_incremental_physical_size: bool, include_non_incremental_physical_size: bool,
) -> anyhow::Result<TimelineInfo> { ) -> anyhow::Result<LocalTimelineInfo> {
let last_record_lsn = timeline.get_last_record_lsn(); let last_record_lsn = timeline.get_last_record_lsn();
let (wal_source_connstr, last_received_msg_lsn, last_received_msg_ts) = { let (wal_source_connstr, last_received_msg_lsn, last_received_msg_ts) = {
let guard = timeline.last_received_wal.lock().unwrap(); let guard = timeline.last_received_wal.lock().unwrap();
@@ -100,47 +100,24 @@ async fn build_timeline_info(
} }
}; };
let (remote_consistent_lsn, awaits_download) = if let Some(remote_entry) = state let info = LocalTimelineInfo {
.remote_index ancestor_timeline_id: timeline.get_ancestor_timeline_id(),
.read() ancestor_lsn: {
.await match timeline.get_ancestor_lsn() {
.timeline_entry(&TenantTimelineId { Lsn(0) => None,
tenant_id: timeline.tenant_id, lsn @ Lsn(_) => Some(lsn),
timeline_id: timeline.timeline_id, }
}) { },
(
Some(remote_entry.metadata.disk_consistent_lsn()),
remote_entry.awaits_download,
)
} else {
(None, false)
};
let ancestor_timeline_id = timeline.get_ancestor_timeline_id();
let ancestor_lsn = match timeline.get_ancestor_lsn() {
Lsn(0) => None,
lsn @ Lsn(_) => Some(lsn),
};
let current_logical_size = match timeline.get_current_logical_size() {
Ok(size) => Some(size),
Err(err) => {
error!("Timeline info creation failed to get current logical size: {err:?}");
None
}
};
let current_physical_size = Some(timeline.get_physical_size());
let info = TimelineInfo {
tenant_id: timeline.tenant_id,
timeline_id: timeline.timeline_id,
ancestor_timeline_id,
ancestor_lsn,
disk_consistent_lsn: timeline.get_disk_consistent_lsn(), disk_consistent_lsn: timeline.get_disk_consistent_lsn(),
last_record_lsn, last_record_lsn,
prev_record_lsn: Some(timeline.get_prev_record_lsn()), prev_record_lsn: Some(timeline.get_prev_record_lsn()),
latest_gc_cutoff_lsn: *timeline.get_latest_gc_cutoff_lsn(), latest_gc_cutoff_lsn: *timeline.get_latest_gc_cutoff_lsn(),
current_logical_size, current_logical_size: Some(
current_physical_size, timeline
.get_current_logical_size()
.context("Timeline info creation failed to get current logical size")?,
),
current_physical_size: Some(timeline.get_physical_size()),
current_logical_size_non_incremental: if include_non_incremental_logical_size { current_logical_size_non_incremental: if include_non_incremental_logical_size {
Some(timeline.get_current_logical_size_non_incremental(last_record_lsn)?) Some(timeline.get_current_logical_size_non_incremental(last_record_lsn)?)
} else { } else {
@@ -155,25 +132,32 @@ async fn build_timeline_info(
last_received_msg_lsn, last_received_msg_lsn,
last_received_msg_ts, last_received_msg_ts,
pg_version: timeline.pg_version, pg_version: timeline.pg_version,
remote_consistent_lsn,
awaits_download,
// Duplicate some fields in 'local' and 'remote' fields, for backwards-compatility
// with the control plane.
local: LocalTimelineInfo {
ancestor_timeline_id,
ancestor_lsn,
current_logical_size,
current_physical_size,
},
remote: RemoteTimelineInfo {
remote_consistent_lsn,
},
}; };
Ok(info) Ok(info)
} }
fn list_local_timelines(
tenant_id: TenantId,
include_non_incremental_logical_size: bool,
include_non_incremental_physical_size: bool,
) -> Result<Vec<(TimelineId, LocalTimelineInfo)>> {
let tenant = tenant_mgr::get_tenant(tenant_id, true)?;
let timelines = tenant.list_timelines();
let mut local_timeline_info = Vec::with_capacity(timelines.len());
for (timeline_id, repository_timeline) in timelines {
local_timeline_info.push((
timeline_id,
local_timeline_info_from_timeline(
&repository_timeline,
include_non_incremental_logical_size,
include_non_incremental_physical_size,
)?,
))
}
Ok(local_timeline_info)
}
// healthcheck handler // healthcheck handler
async fn status_handler(request: Request<Body>) -> Result<Response<Body>, ApiError> { async fn status_handler(request: Request<Body>) -> Result<Response<Body>, ApiError> {
let config = get_config(&request); let config = get_config(&request);
@@ -185,8 +169,6 @@ async fn timeline_create_handler(mut request: Request<Body>) -> Result<Response<
let request_data: TimelineCreateRequest = json_request(&mut request).await?; let request_data: TimelineCreateRequest = json_request(&mut request).await?;
check_permission(&request, Some(tenant_id))?; check_permission(&request, Some(tenant_id))?;
let state = get_state(&request);
let tenant = tenant_mgr::get_tenant(tenant_id, true).map_err(ApiError::NotFound)?; let tenant = tenant_mgr::get_tenant(tenant_id, true).map_err(ApiError::NotFound)?;
let new_timeline_info = async { let new_timeline_info = async {
match tenant.create_timeline( match tenant.create_timeline(
@@ -197,10 +179,14 @@ async fn timeline_create_handler(mut request: Request<Body>) -> Result<Response<
).await { ).await {
Ok(Some(new_timeline)) => { Ok(Some(new_timeline)) => {
// Created. Construct a TimelineInfo for it. // Created. Construct a TimelineInfo for it.
let timeline_info = build_timeline_info(state, &new_timeline, false, false) let local_info = local_timeline_info_from_timeline(&new_timeline, false, false)
.await
.map_err(ApiError::InternalServerError)?; .map_err(ApiError::InternalServerError)?;
Ok(Some(timeline_info)) Ok(Some(TimelineInfo {
tenant_id,
timeline_id: new_timeline.timeline_id,
local: Some(local_info),
remote: None,
}))
} }
Ok(None) => Ok(None), // timeline already exists Ok(None) => Ok(None), // timeline already exists
Err(err) => Err(ApiError::InternalServerError(err)), Err(err) => Err(ApiError::InternalServerError(err)),
@@ -223,8 +209,6 @@ async fn timeline_list_handler(request: Request<Body>) -> Result<Response<Body>,
query_param_present(&request, "include-non-incremental-physical-size"); query_param_present(&request, "include-non-incremental-physical-size");
check_permission(&request, Some(tenant_id))?; check_permission(&request, Some(tenant_id))?;
let state = get_state(&request);
let timelines = tokio::task::spawn_blocking(move || { let timelines = tokio::task::spawn_blocking(move || {
let _enter = info_span!("timeline_list", tenant = %tenant_id).entered(); let _enter = info_span!("timeline_list", tenant = %tenant_id).entered();
let tenant = tenant_mgr::get_tenant(tenant_id, true).map_err(ApiError::NotFound)?; let tenant = tenant_mgr::get_tenant(tenant_id, true).map_err(ApiError::NotFound)?;
@@ -234,18 +218,36 @@ async fn timeline_list_handler(request: Request<Body>) -> Result<Response<Body>,
.map_err(|e: JoinError| ApiError::InternalServerError(e.into()))??; .map_err(|e: JoinError| ApiError::InternalServerError(e.into()))??;
let mut response_data = Vec::with_capacity(timelines.len()); let mut response_data = Vec::with_capacity(timelines.len());
for timeline in timelines { for (timeline_id, timeline) in timelines {
let timeline_info = build_timeline_info( let local = match local_timeline_info_from_timeline(
state,
&timeline, &timeline,
include_non_incremental_logical_size, include_non_incremental_logical_size,
include_non_incremental_physical_size, include_non_incremental_physical_size,
) ) {
.await Ok(local) => Some(local),
.context("Failed to convert tenant timeline {timeline_id} into the local one: {e:?}") Err(e) => {
.map_err(ApiError::InternalServerError)?; error!("Failed to convert tenant timeline {timeline_id} into the local one: {e:?}");
None
}
};
response_data.push(timeline_info); response_data.push(TimelineInfo {
tenant_id,
timeline_id,
local,
remote: get_state(&request)
.remote_index
.read()
.await
.timeline_entry(&TenantTimelineId {
tenant_id,
timeline_id,
})
.map(|remote_entry| RemoteTimelineInfo {
remote_consistent_lsn: remote_entry.metadata.disk_consistent_lsn(),
awaits_download: remote_entry.awaits_download,
}),
})
} }
json_response(StatusCode::OK, response_data) json_response(StatusCode::OK, response_data)
@@ -290,33 +292,59 @@ async fn timeline_detail_handler(request: Request<Body>) -> Result<Response<Body
query_param_present(&request, "include-non-incremental-physical-size"); query_param_present(&request, "include-non-incremental-physical-size");
check_permission(&request, Some(tenant_id))?; check_permission(&request, Some(tenant_id))?;
let state = get_state(&request); let (local_timeline_info, remote_timeline_info) = async {
let timeline_info = async {
let timeline = tokio::task::spawn_blocking(move || { let timeline = tokio::task::spawn_blocking(move || {
tenant_mgr::get_tenant(tenant_id, true)?.get_timeline(timeline_id) tenant_mgr::get_tenant(tenant_id, true)?.get_timeline(timeline_id)
}) })
.await .await
.map_err(|e: JoinError| ApiError::InternalServerError(e.into()))?; .map_err(|e: JoinError| ApiError::InternalServerError(e.into()))?;
let timeline = timeline.map_err(ApiError::NotFound)?; let local_timeline_info = match timeline.and_then(|timeline| {
local_timeline_info_from_timeline(
&timeline,
include_non_incremental_logical_size,
include_non_incremental_physical_size,
)
}) {
Ok(local_info) => Some(local_info),
Err(e) => {
error!("Failed to get local timeline info: {e:#}");
None
}
};
let timeline_info = build_timeline_info( let remote_timeline_info = {
state, let remote_index_read = get_state(&request).remote_index.read().await;
&timeline, remote_index_read
include_non_incremental_logical_size, .timeline_entry(&TenantTimelineId {
include_non_incremental_physical_size, tenant_id,
) timeline_id,
.await })
.context("Failed to get local timeline info: {e:#}") .map(|remote_entry| RemoteTimelineInfo {
.map_err(ApiError::InternalServerError)?; remote_consistent_lsn: remote_entry.metadata.disk_consistent_lsn(),
awaits_download: remote_entry.awaits_download,
Ok::<_, ApiError>(timeline_info) })
};
Ok::<_, ApiError>((local_timeline_info, remote_timeline_info))
} }
.instrument(info_span!("timeline_detail", tenant = %tenant_id, timeline = %timeline_id)) .instrument(info_span!("timeline_detail", tenant = %tenant_id, timeline = %timeline_id))
.await?; .await?;
json_response(StatusCode::OK, timeline_info) if local_timeline_info.is_none() && remote_timeline_info.is_none() {
Err(ApiError::NotFound(anyhow!(
"Timeline {tenant_id}/{timeline_id} is not found neither locally nor remotely"
)))
} else {
json_response(
StatusCode::OK,
TimelineInfo {
tenant_id,
timeline_id,
local: local_timeline_info,
remote: remote_timeline_info,
},
)
}
} }
async fn get_lsn_by_timestamp_handler(request: Request<Body>) -> Result<Response<Body>, ApiError> { async fn get_lsn_by_timestamp_handler(request: Request<Body>) -> Result<Response<Body>, ApiError> {
@@ -386,7 +414,7 @@ async fn tenant_attach_handler(request: Request<Body>) -> Result<Response<Body>,
} }
return json_response(StatusCode::ACCEPTED, ()); return json_response(StatusCode::ACCEPTED, ());
} }
// no tenant in the index, release the lock to make the potentially lengthy download operation // no tenant in the index, release the lock to make the potentially lengthy download opetation
drop(index_accessor); drop(index_accessor);
// download index parts for every tenant timeline // download index parts for every tenant timeline
@@ -538,27 +566,36 @@ async fn tenant_status(request: Request<Body>) -> Result<Response<Body>, ApiErro
false false
}); });
let (tenant_state, current_physical_size) = match tenant { let tenant_state = match tenant {
Ok(tenant) => { Ok(tenant) => tenant.current_state(),
let timelines = tenant.list_timelines();
// Calculate total physical size of all timelines
let mut current_physical_size = 0;
for timeline in timelines {
current_physical_size += timeline.get_physical_size();
}
(tenant.current_state(), Some(current_physical_size))
}
Err(e) => { Err(e) => {
error!("Failed to get local tenant state: {e:#}"); error!("Failed to get local tenant state: {e:#}");
if has_in_progress_downloads { if has_in_progress_downloads {
(TenantState::Paused, None) TenantState::Paused
} else { } else {
(TenantState::Broken, None) TenantState::Broken
} }
} }
}; };
let current_physical_size =
match tokio::task::spawn_blocking(move || list_local_timelines(tenant_id, false, false))
.await
.map_err(|e: JoinError| ApiError::InternalServerError(e.into()))?
{
Err(err) => {
// Getting local timelines can fail when no local tenant directory is on disk (e.g, when tenant data is being downloaded).
// In that case, put a warning message into log and operate normally.
warn!("Failed to get local timelines for tenant {tenant_id}: {err}");
None
}
Ok(local_timeline_infos) => Some(
local_timeline_infos
.into_iter()
.fold(0, |acc, x| acc + x.1.current_physical_size.unwrap()),
),
};
json_response( json_response(
StatusCode::OK, StatusCode::OK,
TenantInfo { TenantInfo {
@@ -747,7 +784,7 @@ async fn tenant_config_handler(mut request: Request<Body>) -> Result<Response<Bo
json_response(StatusCode::OK, ()) json_response(StatusCode::OK, ())
} }
#[cfg(feature = "testing")] #[cfg(any(feature = "testing", feature = "failpoints"))]
async fn failpoints_handler(mut request: Request<Body>) -> Result<Response<Body>, ApiError> { async fn failpoints_handler(mut request: Request<Body>) -> Result<Response<Body>, ApiError> {
if !fail::has_failpoints() { if !fail::has_failpoints() {
return Err(ApiError::BadRequest(anyhow!( return Err(ApiError::BadRequest(anyhow!(

View File

@@ -119,6 +119,32 @@ impl<T> TenantTimelineValues<T> {
fn new() -> Self { fn new() -> Self {
Self(HashMap::new()) Self(HashMap::new())
} }
fn with_capacity(capacity: usize) -> Self {
Self(HashMap::with_capacity(capacity))
}
/// A convenience method to map certain values and omit some of them, if needed.
/// Tenants that won't have any timeline entries due to the filtering, will still be preserved
/// in the structure.
fn filter_map<F, NewT>(self, map: F) -> TenantTimelineValues<NewT>
where
F: Fn(T) -> Option<NewT>,
{
let capacity = self.0.len();
self.0.into_iter().fold(
TenantTimelineValues::<NewT>::with_capacity(capacity),
|mut new_values, (tenant_id, old_values)| {
let new_timeline_values = new_values.0.entry(tenant_id).or_default();
for (timeline_id, old_value) in old_values {
if let Some(new_value) = map(old_value) {
new_timeline_values.insert(timeline_id, new_value);
}
}
new_values
},
)
}
} }
/// A suffix to be used during file sync from the remote storage, /// A suffix to be used during file sync from the remote storage,
@@ -155,3 +181,35 @@ mod backoff_defaults_tests {
); );
} }
} }
#[cfg(test)]
mod tests {
use crate::tenant::harness::TIMELINE_ID;
use super::*;
#[test]
fn tenant_timeline_value_mapping() {
let first_tenant = TenantId::generate();
let second_tenant = TenantId::generate();
assert_ne!(first_tenant, second_tenant);
let mut initial = TenantTimelineValues::new();
initial
.0
.entry(first_tenant)
.or_default()
.insert(TIMELINE_ID, "test_value");
let _ = initial.0.entry(second_tenant).or_default();
assert_eq!(initial.0.len(), 2, "Should have entries for both tenants");
let filtered = initial.filter_map(|_| None::<&str>).0;
assert_eq!(
filtered.len(),
2,
"Should have entries for both tenants even after filtering away all entries"
);
assert!(filtered.contains_key(&first_tenant));
assert!(filtered.contains_key(&second_tenant));
}
}

View File

@@ -107,20 +107,18 @@ static CURRENT_LOGICAL_SIZE: Lazy<UIntGaugeVec> = Lazy::new(|| {
// Metrics for cloud upload. These metrics reflect data uploaded to cloud storage, // Metrics for cloud upload. These metrics reflect data uploaded to cloud storage,
// or in testing they estimate how much we would upload if we did. // or in testing they estimate how much we would upload if we did.
static NUM_PERSISTENT_FILES_CREATED: Lazy<IntCounterVec> = Lazy::new(|| { static NUM_PERSISTENT_FILES_CREATED: Lazy<IntCounter> = Lazy::new(|| {
register_int_counter_vec!( register_int_counter!(
"pageserver_created_persistent_files_total", "pageserver_created_persistent_files_total",
"Number of files created that are meant to be uploaded to cloud storage", "Number of files created that are meant to be uploaded to cloud storage",
&["tenant_id", "timeline_id"]
) )
.expect("failed to define a metric") .expect("failed to define a metric")
}); });
static PERSISTENT_BYTES_WRITTEN: Lazy<IntCounterVec> = Lazy::new(|| { static PERSISTENT_BYTES_WRITTEN: Lazy<IntCounter> = Lazy::new(|| {
register_int_counter_vec!( register_int_counter!(
"pageserver_written_persistent_bytes_total", "pageserver_written_persistent_bytes_total",
"Total bytes written that are meant to be uploaded to cloud storage", "Total bytes written that are meant to be uploaded to cloud storage",
&["tenant_id", "timeline_id"]
) )
.expect("failed to define a metric") .expect("failed to define a metric")
}); });
@@ -277,15 +275,11 @@ pub static TENANT_TASK_EVENTS: Lazy<IntCounterVec> = Lazy::new(|| {
/// smallest redo processing times. These buckets allow us to measure down /// smallest redo processing times. These buckets allow us to measure down
/// to 5us, which equates to 200'000 pages/sec, which equates to 1.6GB/sec. /// to 5us, which equates to 200'000 pages/sec, which equates to 1.6GB/sec.
/// This is much better than the previous 5ms aka 200 pages/sec aka 1.6MB/sec. /// This is much better than the previous 5ms aka 200 pages/sec aka 1.6MB/sec.
///
/// Values up to 1s are recorded because metrics show that we have redo
/// durations and lock times larger than 0.250s.
macro_rules! redo_histogram_time_buckets { macro_rules! redo_histogram_time_buckets {
() => { () => {
vec![ vec![
0.000_005, 0.000_010, 0.000_025, 0.000_050, 0.000_100, 0.000_250, 0.000_500, 0.001_000, 0.000_005, 0.000_010, 0.000_025, 0.000_050, 0.000_100, 0.000_250, 0.000_500, 0.001_000,
0.002_500, 0.005_000, 0.010_000, 0.025_000, 0.050_000, 0.100_000, 0.250_000, 0.500_000, 0.002_500, 0.005_000, 0.010_000, 0.025_000, 0.050_000, 0.100_000, 0.250_000,
1.000_000,
] ]
}; };
} }
@@ -300,17 +294,6 @@ macro_rules! redo_histogram_count_buckets {
}; };
} }
macro_rules! redo_bytes_histogram_count_buckets {
() => {
// powers of (2^.5), from 2^4.5 to 2^15 (22 buckets)
// rounded up to the next multiple of 8 to capture any MAXALIGNed record of that size, too.
vec![
24.0, 32.0, 48.0, 64.0, 96.0, 128.0, 184.0, 256.0, 368.0, 512.0, 728.0, 1024.0, 1456.0,
2048.0, 2904.0, 4096.0, 5800.0, 8192.0, 11592.0, 16384.0, 23176.0, 32768.0,
]
};
}
pub static WAL_REDO_TIME: Lazy<Histogram> = Lazy::new(|| { pub static WAL_REDO_TIME: Lazy<Histogram> = Lazy::new(|| {
register_histogram!( register_histogram!(
"pageserver_wal_redo_seconds", "pageserver_wal_redo_seconds",
@@ -338,15 +321,6 @@ pub static WAL_REDO_RECORDS_HISTOGRAM: Lazy<Histogram> = Lazy::new(|| {
.expect("failed to define a metric") .expect("failed to define a metric")
}); });
pub static WAL_REDO_BYTES_HISTOGRAM: Lazy<Histogram> = Lazy::new(|| {
register_histogram!(
"pageserver_wal_redo_bytes_histogram",
"Histogram of number of records replayed per redo",
redo_bytes_histogram_count_buckets!(),
)
.expect("failed to define a metric")
});
pub static WAL_REDO_RECORD_COUNTER: Lazy<IntCounter> = Lazy::new(|| { pub static WAL_REDO_RECORD_COUNTER: Lazy<IntCounter> = Lazy::new(|| {
register_int_counter!( register_int_counter!(
"pageserver_replayed_wal_records_total", "pageserver_replayed_wal_records_total",
@@ -412,12 +386,8 @@ impl TimelineMetrics {
let current_logical_size_gauge = CURRENT_LOGICAL_SIZE let current_logical_size_gauge = CURRENT_LOGICAL_SIZE
.get_metric_with_label_values(&[&tenant_id, &timeline_id]) .get_metric_with_label_values(&[&tenant_id, &timeline_id])
.unwrap(); .unwrap();
let num_persistent_files_created = NUM_PERSISTENT_FILES_CREATED let num_persistent_files_created = NUM_PERSISTENT_FILES_CREATED.clone();
.get_metric_with_label_values(&[&tenant_id, &timeline_id]) let persistent_bytes_written = PERSISTENT_BYTES_WRITTEN.clone();
.unwrap();
let persistent_bytes_written = PERSISTENT_BYTES_WRITTEN
.get_metric_with_label_values(&[&tenant_id, &timeline_id])
.unwrap();
TimelineMetrics { TimelineMetrics {
tenant_id, tenant_id,
@@ -449,8 +419,6 @@ impl Drop for TimelineMetrics {
let _ = WAIT_LSN_TIME.remove_label_values(&[tenant_id, timeline_id]); let _ = WAIT_LSN_TIME.remove_label_values(&[tenant_id, timeline_id]);
let _ = CURRENT_PHYSICAL_SIZE.remove_label_values(&[tenant_id, timeline_id]); let _ = CURRENT_PHYSICAL_SIZE.remove_label_values(&[tenant_id, timeline_id]);
let _ = CURRENT_LOGICAL_SIZE.remove_label_values(&[tenant_id, timeline_id]); let _ = CURRENT_LOGICAL_SIZE.remove_label_values(&[tenant_id, timeline_id]);
let _ = NUM_PERSISTENT_FILES_CREATED.remove_label_values(&[tenant_id, timeline_id]);
let _ = PERSISTENT_BYTES_WRITTEN.remove_label_values(&[tenant_id, timeline_id]);
for op in STORAGE_TIME_OPERATIONS { for op in STORAGE_TIME_OPERATIONS {
let _ = STORAGE_TIME.remove_label_values(&[op, tenant_id, timeline_id]); let _ = STORAGE_TIME.remove_label_values(&[op, tenant_id, timeline_id]);

View File

@@ -36,9 +36,8 @@
//! mapping is automatically removed and the slot is marked free. //! mapping is automatically removed and the slot is marked free.
//! //!
use dashmap::mapref::entry::Entry;
use dashmap::DashMap;
use std::{ use std::{
collections::{hash_map::Entry, HashMap},
convert::TryInto, convert::TryInto,
sync::{ sync::{
atomic::{AtomicU8, AtomicUsize, Ordering}, atomic::{AtomicU8, AtomicUsize, Ordering},
@@ -169,11 +168,18 @@ impl Slot {
pub struct PageCache { pub struct PageCache {
/// This contains the mapping from the cache key to buffer slot that currently /// This contains the mapping from the cache key to buffer slot that currently
/// contains the page, if any. /// contains the page, if any.
materialized_page_map: DashMap<MaterializedPageHashKey, Vec<Version>>, ///
/// TODO: This is protected by a single lock. If that becomes a bottleneck,
/// this HashMap can be replaced with a more concurrent version, there are
/// plenty of such crates around.
///
/// If you add support for caching different kinds of objects, each object kind
/// can have a separate mapping map, next to this field.
materialized_page_map: RwLock<HashMap<MaterializedPageHashKey, Vec<Version>>>,
ephemeral_page_map: DashMap<(u64, u32), usize>, ephemeral_page_map: RwLock<HashMap<(u64, u32), usize>>,
immutable_page_map: DashMap<(u64, u32), usize>, immutable_page_map: RwLock<HashMap<(u64, u32), usize>>,
/// The actual buffers with their metadata. /// The actual buffers with their metadata.
slots: Box<[Slot]>, slots: Box<[Slot]>,
@@ -610,7 +616,7 @@ impl PageCache {
fn search_mapping(&self, cache_key: &mut CacheKey) -> Option<usize> { fn search_mapping(&self, cache_key: &mut CacheKey) -> Option<usize> {
match cache_key { match cache_key {
CacheKey::MaterializedPage { hash_key, lsn } => { CacheKey::MaterializedPage { hash_key, lsn } => {
let map = &self.materialized_page_map; let map = self.materialized_page_map.read().unwrap();
let versions = map.get(hash_key)?; let versions = map.get(hash_key)?;
let version_idx = match versions.binary_search_by_key(lsn, |v| v.lsn) { let version_idx = match versions.binary_search_by_key(lsn, |v| v.lsn) {
@@ -623,11 +629,11 @@ impl PageCache {
Some(version.slot_idx) Some(version.slot_idx)
} }
CacheKey::EphemeralPage { file_id, blkno } => { CacheKey::EphemeralPage { file_id, blkno } => {
let map = &self.ephemeral_page_map; let map = self.ephemeral_page_map.read().unwrap();
Some(*map.get(&(*file_id, *blkno))?) Some(*map.get(&(*file_id, *blkno))?)
} }
CacheKey::ImmutableFilePage { file_id, blkno } => { CacheKey::ImmutableFilePage { file_id, blkno } => {
let map = &self.immutable_page_map; let map = self.immutable_page_map.read().unwrap();
Some(*map.get(&(*file_id, *blkno))?) Some(*map.get(&(*file_id, *blkno))?)
} }
} }
@@ -640,7 +646,7 @@ impl PageCache {
fn search_mapping_for_write(&self, key: &CacheKey) -> Option<usize> { fn search_mapping_for_write(&self, key: &CacheKey) -> Option<usize> {
match key { match key {
CacheKey::MaterializedPage { hash_key, lsn } => { CacheKey::MaterializedPage { hash_key, lsn } => {
let map = &self.materialized_page_map; let map = self.materialized_page_map.read().unwrap();
let versions = map.get(hash_key)?; let versions = map.get(hash_key)?;
if let Ok(version_idx) = versions.binary_search_by_key(lsn, |v| v.lsn) { if let Ok(version_idx) = versions.binary_search_by_key(lsn, |v| v.lsn) {
@@ -650,11 +656,11 @@ impl PageCache {
} }
} }
CacheKey::EphemeralPage { file_id, blkno } => { CacheKey::EphemeralPage { file_id, blkno } => {
let map = &self.ephemeral_page_map; let map = self.ephemeral_page_map.read().unwrap();
Some(*map.get(&(*file_id, *blkno))?) Some(*map.get(&(*file_id, *blkno))?)
} }
CacheKey::ImmutableFilePage { file_id, blkno } => { CacheKey::ImmutableFilePage { file_id, blkno } => {
let map = &self.immutable_page_map; let map = self.immutable_page_map.read().unwrap();
Some(*map.get(&(*file_id, *blkno))?) Some(*map.get(&(*file_id, *blkno))?)
} }
} }
@@ -669,7 +675,7 @@ impl PageCache {
hash_key: old_hash_key, hash_key: old_hash_key,
lsn: old_lsn, lsn: old_lsn,
} => { } => {
let map = &self.materialized_page_map; let mut map = self.materialized_page_map.write().unwrap();
if let Entry::Occupied(mut old_entry) = map.entry(old_hash_key.clone()) { if let Entry::Occupied(mut old_entry) = map.entry(old_hash_key.clone()) {
let versions = old_entry.get_mut(); let versions = old_entry.get_mut();
@@ -684,12 +690,12 @@ impl PageCache {
} }
} }
CacheKey::EphemeralPage { file_id, blkno } => { CacheKey::EphemeralPage { file_id, blkno } => {
let map = &self.ephemeral_page_map; let mut map = self.ephemeral_page_map.write().unwrap();
map.remove(&(*file_id, *blkno)) map.remove(&(*file_id, *blkno))
.expect("could not find old key in mapping"); .expect("could not find old key in mapping");
} }
CacheKey::ImmutableFilePage { file_id, blkno } => { CacheKey::ImmutableFilePage { file_id, blkno } => {
let map = &self.immutable_page_map; let mut map = self.immutable_page_map.write().unwrap();
map.remove(&(*file_id, *blkno)) map.remove(&(*file_id, *blkno))
.expect("could not find old key in mapping"); .expect("could not find old key in mapping");
} }
@@ -707,8 +713,8 @@ impl PageCache {
hash_key: new_key, hash_key: new_key,
lsn: new_lsn, lsn: new_lsn,
} => { } => {
let map = &self.materialized_page_map; let mut map = self.materialized_page_map.write().unwrap();
let mut versions = map.entry(new_key.clone()).or_default(); let versions = map.entry(new_key.clone()).or_default();
match versions.binary_search_by_key(new_lsn, |v| v.lsn) { match versions.binary_search_by_key(new_lsn, |v| v.lsn) {
Ok(version_idx) => Some(versions[version_idx].slot_idx), Ok(version_idx) => Some(versions[version_idx].slot_idx),
Err(version_idx) => { Err(version_idx) => {
@@ -724,7 +730,7 @@ impl PageCache {
} }
} }
CacheKey::EphemeralPage { file_id, blkno } => { CacheKey::EphemeralPage { file_id, blkno } => {
let map = &self.ephemeral_page_map; let mut map = self.ephemeral_page_map.write().unwrap();
match map.entry((*file_id, *blkno)) { match map.entry((*file_id, *blkno)) {
Entry::Occupied(entry) => Some(*entry.get()), Entry::Occupied(entry) => Some(*entry.get()),
Entry::Vacant(entry) => { Entry::Vacant(entry) => {
@@ -734,7 +740,7 @@ impl PageCache {
} }
} }
CacheKey::ImmutableFilePage { file_id, blkno } => { CacheKey::ImmutableFilePage { file_id, blkno } => {
let map = &self.immutable_page_map; let mut map = self.immutable_page_map.write().unwrap();
match map.entry((*file_id, *blkno)) { match map.entry((*file_id, *blkno)) {
Entry::Occupied(entry) => Some(*entry.get()), Entry::Occupied(entry) => Some(*entry.get()),
Entry::Vacant(entry) => { Entry::Vacant(entry) => {

View File

@@ -169,14 +169,9 @@ use self::{
upload::{upload_index_part, upload_timeline_layers, UploadedTimeline}, upload::{upload_index_part, upload_timeline_layers, UploadedTimeline},
}; };
use crate::{ use crate::{
config::PageServerConf, config::PageServerConf, exponential_backoff, storage_sync::index::RemoteIndex, task_mgr,
exponential_backoff, task_mgr::TaskKind, task_mgr::BACKGROUND_RUNTIME, tenant::metadata::TimelineMetadata,
storage_sync::index::{LayerFileMetadata, RemoteIndex}, tenant_mgr::attach_local_tenants,
task_mgr,
task_mgr::TaskKind,
task_mgr::BACKGROUND_RUNTIME,
tenant::metadata::TimelineMetadata,
tenant_mgr::{attach_local_tenants, TenantAttachData},
}; };
use crate::{ use crate::{
metrics::{IMAGE_SYNC_TIME, REMAINING_SYNC_ITEMS, REMOTE_INDEX_UPLOAD}, metrics::{IMAGE_SYNC_TIME, REMAINING_SYNC_ITEMS, REMOTE_INDEX_UPLOAD},
@@ -193,7 +188,7 @@ static SYNC_QUEUE: OnceCell<SyncQueue> = OnceCell::new();
/// A timeline status to share with pageserver's sync counterpart, /// A timeline status to share with pageserver's sync counterpart,
/// after comparing local and remote timeline state. /// after comparing local and remote timeline state.
#[derive(Clone, PartialEq, Eq)] #[derive(Clone)]
pub enum LocalTimelineInitStatus { pub enum LocalTimelineInitStatus {
/// The timeline has every remote layer present locally. /// The timeline has every remote layer present locally.
/// There could be some layers requiring uploading, /// There could be some layers requiring uploading,
@@ -316,7 +311,7 @@ impl SyncQueue {
/// A task to run in the async download/upload loop. /// A task to run in the async download/upload loop.
/// Limited by the number of retries, after certain threshold the failing task gets evicted and the timeline disabled. /// Limited by the number of retries, after certain threshold the failing task gets evicted and the timeline disabled.
#[derive(Debug, Clone, PartialEq, Eq)] #[derive(Debug, Clone)]
enum SyncTask { enum SyncTask {
/// A checkpoint outcome with possible local file updates that need actualization in the remote storage. /// A checkpoint outcome with possible local file updates that need actualization in the remote storage.
/// Not necessary more fresh than the one already uploaded. /// Not necessary more fresh than the one already uploaded.
@@ -427,7 +422,7 @@ impl SyncTaskBatch {
.extend(new_delete.data.deleted_layers.iter().cloned()); .extend(new_delete.data.deleted_layers.iter().cloned());
} }
if let Some(batch_upload) = &mut self.upload { if let Some(batch_upload) = &mut self.upload {
let not_deleted = |layer: &PathBuf, _: &mut LayerFileMetadata| { let not_deleted = |layer: &PathBuf| {
!new_delete.data.layers_to_delete.contains(layer) !new_delete.data.layers_to_delete.contains(layer)
&& !new_delete.data.deleted_layers.contains(layer) && !new_delete.data.deleted_layers.contains(layer)
}; };
@@ -455,35 +450,21 @@ impl SyncTaskBatch {
#[derive(Debug, Clone, PartialEq, Eq)] #[derive(Debug, Clone, PartialEq, Eq)]
struct LayersUpload { struct LayersUpload {
/// Layer file path in the pageserver workdir, that were added for the corresponding checkpoint. /// Layer file path in the pageserver workdir, that were added for the corresponding checkpoint.
layers_to_upload: HashMap<PathBuf, LayerFileMetadata>, layers_to_upload: HashSet<PathBuf>,
/// Already uploaded layers. Used to store the data about the uploads between task retries /// Already uploaded layers. Used to store the data about the uploads between task retries
/// and to record the data into the remote index after the task got completed or evicted. /// and to record the data into the remote index after the task got completed or evicted.
uploaded_layers: HashMap<PathBuf, LayerFileMetadata>, uploaded_layers: HashSet<PathBuf>,
metadata: Option<TimelineMetadata>, metadata: Option<TimelineMetadata>,
} }
/// A timeline download task. /// A timeline download task.
/// Does not contain the file list to download, to allow other /// Does not contain the file list to download, to allow other
/// parts of the pageserer code to schedule the task /// parts of the pageserer code to schedule the task
/// without using the remote index or any other ways to list the remote timeline files. /// without using the remote index or any other ways to list the remote timleine files.
/// Skips the files that are already downloaded. /// Skips the files that are already downloaded.
#[derive(Debug, Clone, PartialEq, Eq)] #[derive(Debug, Clone, PartialEq, Eq)]
struct LayersDownload { struct LayersDownload {
layers_to_skip: HashSet<PathBuf>, layers_to_skip: HashSet<PathBuf>,
/// Paths which have been downloaded, and had their metadata verified or generated.
///
/// Metadata generation happens when upgrading from past version of `IndexPart`.
gathered_metadata: HashMap<PathBuf, LayerFileMetadata>,
}
impl LayersDownload {
fn from_skipped_layers(layers_to_skip: HashSet<PathBuf>) -> Self {
LayersDownload {
layers_to_skip,
gathered_metadata: HashMap::default(),
}
}
} }
#[derive(Debug, Clone, PartialEq, Eq)] #[derive(Debug, Clone, PartialEq, Eq)]
@@ -505,7 +486,7 @@ struct LayersDeletion {
pub fn schedule_layer_upload( pub fn schedule_layer_upload(
tenant_id: TenantId, tenant_id: TenantId,
timeline_id: TimelineId, timeline_id: TimelineId,
layers_to_upload: HashMap<PathBuf, LayerFileMetadata>, layers_to_upload: HashSet<PathBuf>,
metadata: Option<TimelineMetadata>, metadata: Option<TimelineMetadata>,
) { ) {
let sync_queue = match SYNC_QUEUE.get() { let sync_queue = match SYNC_QUEUE.get() {
@@ -522,7 +503,7 @@ pub fn schedule_layer_upload(
}, },
SyncTask::upload(LayersUpload { SyncTask::upload(LayersUpload {
layers_to_upload, layers_to_upload,
uploaded_layers: HashMap::new(), uploaded_layers: HashSet::new(),
metadata, metadata,
}), }),
); );
@@ -580,44 +561,18 @@ pub fn schedule_layer_download(tenant_id: TenantId, timeline_id: TimelineId) {
tenant_id, tenant_id,
timeline_id, timeline_id,
}, },
SyncTask::download(LayersDownload::from_skipped_layers(HashSet::new())), SyncTask::download(LayersDownload {
layers_to_skip: HashSet::new(),
}),
); );
debug!("Download task for tenant {tenant_id}, timeline {timeline_id} sent") debug!("Download task for tenant {tenant_id}, timeline {timeline_id} sent")
} }
/// Local existing timeline files
///
/// Values of this type serve different meanings in different contexts. On startup, collected
/// timelines come with the full collected information and when signalling readyness to attach
/// after completed download. After the download the file information is no longer carried, because
/// it is already merged into [`RemoteTimeline`].
#[derive(Debug)]
pub struct TimelineLocalFiles(TimelineMetadata, HashMap<PathBuf, LayerFileMetadata>);
impl TimelineLocalFiles {
pub fn metadata(&self) -> &TimelineMetadata {
&self.0
}
/// Called during startup, for all of the local files with full metadata.
pub(crate) fn collected(
metadata: TimelineMetadata,
timeline_files: HashMap<PathBuf, LayerFileMetadata>,
) -> TimelineLocalFiles {
TimelineLocalFiles(metadata, timeline_files)
}
/// Called near the end of tenant initialization, to signal readyness to attach tenants.
pub(crate) fn ready(metadata: TimelineMetadata) -> Self {
TimelineLocalFiles(metadata, HashMap::new())
}
}
/// Launch a thread to perform remote storage sync tasks. /// Launch a thread to perform remote storage sync tasks.
/// See module docs for loop step description. /// See module docs for loop step description.
pub fn spawn_storage_sync_task( pub fn spawn_storage_sync_task(
conf: &'static PageServerConf, conf: &'static PageServerConf,
local_timeline_files: HashMap<TenantId, HashMap<TimelineId, TimelineLocalFiles>>, local_timeline_files: TenantTimelineValues<(TimelineMetadata, HashSet<PathBuf>)>,
storage: GenericRemoteStorage, storage: GenericRemoteStorage,
max_concurrent_timelines_sync: NonZeroUsize, max_concurrent_timelines_sync: NonZeroUsize,
max_sync_errors: NonZeroU32, max_sync_errors: NonZeroU32,
@@ -640,7 +595,7 @@ pub fn spawn_storage_sync_task(
let mut keys_for_index_part_downloads = HashSet::new(); let mut keys_for_index_part_downloads = HashSet::new();
let mut timelines_to_sync = HashMap::new(); let mut timelines_to_sync = HashMap::new();
for (tenant_id, timeline_data) in local_timeline_files { for (tenant_id, timeline_data) in local_timeline_files.0 {
if timeline_data.is_empty() { if timeline_data.is_empty() {
info!("got empty tenant {}", tenant_id); info!("got empty tenant {}", tenant_id);
let _ = empty_tenants.0.entry(tenant_id).or_default(); let _ = empty_tenants.0.entry(tenant_id).or_default();
@@ -743,7 +698,7 @@ async fn storage_sync_loop(
"Sync loop step completed, {} new tenant state update(s)", "Sync loop step completed, {} new tenant state update(s)",
updated_tenants.len() updated_tenants.len()
); );
let mut timelines_to_attach = HashMap::new(); let mut timelines_to_attach = TenantTimelineValues::new();
let index_accessor = index.read().await; let index_accessor = index.read().await;
for tenant_id in updated_tenants { for tenant_id in updated_tenants {
let tenant_entry = match index_accessor.tenant_entry(&tenant_id) { let tenant_entry = match index_accessor.tenant_entry(&tenant_id) {
@@ -769,16 +724,12 @@ async fn storage_sync_loop(
// and register them all at once in a tenant for download // and register them all at once in a tenant for download
// to be submitted in a single operation to tenant // to be submitted in a single operation to tenant
// so it can apply them at once to internal timeline map. // so it can apply them at once to internal timeline map.
timelines_to_attach.insert( timelines_to_attach.0.insert(
tenant_id, tenant_id,
TenantAttachData::Ready( tenant_entry
tenant_entry .iter()
.iter() .map(|(&id, entry)| (id, entry.metadata.clone()))
.map(|(&id, entry)| { .collect(),
(id, TimelineLocalFiles::ready(entry.metadata.clone()))
})
.collect(),
),
); );
} }
} }
@@ -1020,27 +971,15 @@ async fn download_timeline_data(
} }
DownloadedTimeline::Successful(mut download_data) => { DownloadedTimeline::Successful(mut download_data) => {
match update_local_metadata(conf, sync_id, current_remote_timeline).await { match update_local_metadata(conf, sync_id, current_remote_timeline).await {
Ok(()) => { Ok(()) => match index.write().await.set_awaits_download(&sync_id, false) {
let mut g = index.write().await; Ok(()) => {
register_sync_status(sync_id, sync_start, TASK_NAME, Some(true));
match g.set_awaits_download(&sync_id, false) { return DownloadStatus::Downloaded;
Ok(()) => { }
let timeline = g Err(e) => {
.timeline_entry_mut(&sync_id) error!("Timeline {sync_id} was expected to be in the remote index after a successful download, but it's absent: {e:?}");
.expect("set_awaits_download verified existence"); }
},
timeline.merge_metadata_from_downloaded(
&download_data.data.gathered_metadata,
);
register_sync_status(sync_id, sync_start, TASK_NAME, Some(true));
return DownloadStatus::Downloaded;
}
Err(e) => {
error!("Timeline {sync_id} was expected to be in the remote index after a successful download, but it's absent: {e:?}");
}
};
}
Err(e) => { Err(e) => {
error!("Failed to update local timeline metadata: {e:?}"); error!("Failed to update local timeline metadata: {e:?}");
download_data.retries += 1; download_data.retries += 1;
@@ -1243,18 +1182,11 @@ async fn update_remote_data(
} }
if upload_failed { if upload_failed {
existing_entry.add_upload_failures( existing_entry.add_upload_failures(
uploaded_data uploaded_data.layers_to_upload.iter().cloned(),
.layers_to_upload
.iter()
.map(|(k, v)| (k.to_owned(), v.to_owned())),
); );
} else { } else {
existing_entry.add_timeline_layers( existing_entry
uploaded_data .add_timeline_layers(uploaded_data.uploaded_layers.iter().cloned());
.uploaded_layers
.iter()
.map(|(k, v)| (k.to_owned(), v.to_owned())),
);
} }
} }
RemoteDataUpdate::Delete(layers_to_remove) => { RemoteDataUpdate::Delete(layers_to_remove) => {
@@ -1274,19 +1206,11 @@ async fn update_remote_data(
}; };
let mut new_remote_timeline = RemoteTimeline::new(new_metadata.clone()); let mut new_remote_timeline = RemoteTimeline::new(new_metadata.clone());
if upload_failed { if upload_failed {
new_remote_timeline.add_upload_failures( new_remote_timeline
uploaded_data .add_upload_failures(uploaded_data.layers_to_upload.iter().cloned());
.layers_to_upload
.iter()
.map(|(k, v)| (k.to_owned(), v.to_owned())),
);
} else { } else {
new_remote_timeline.add_timeline_layers( new_remote_timeline
uploaded_data .add_timeline_layers(uploaded_data.uploaded_layers.iter().cloned());
.uploaded_layers
.iter()
.map(|(k, v)| (k.to_owned(), v.to_owned())),
);
} }
index_accessor.add_timeline_entry(sync_id, new_remote_timeline.clone()); index_accessor.add_timeline_entry(sync_id, new_remote_timeline.clone());
@@ -1334,14 +1258,13 @@ async fn validate_task_retries(
fn schedule_first_sync_tasks( fn schedule_first_sync_tasks(
index: &mut RemoteTimelineIndex, index: &mut RemoteTimelineIndex,
sync_queue: &SyncQueue, sync_queue: &SyncQueue,
local_timeline_files: HashMap<TenantTimelineId, TimelineLocalFiles>, local_timeline_files: HashMap<TenantTimelineId, (TimelineMetadata, HashSet<PathBuf>)>,
) -> TenantTimelineValues<LocalTimelineInitStatus> { ) -> TenantTimelineValues<LocalTimelineInitStatus> {
let mut local_timeline_init_statuses = TenantTimelineValues::new(); let mut local_timeline_init_statuses = TenantTimelineValues::new();
let mut new_sync_tasks = VecDeque::with_capacity(local_timeline_files.len()); let mut new_sync_tasks = VecDeque::with_capacity(local_timeline_files.len());
for (sync_id, local_timeline) in local_timeline_files { for (sync_id, (local_metadata, local_files)) in local_timeline_files {
let TimelineLocalFiles(local_metadata, local_files) = local_timeline;
match index.timeline_entry_mut(&sync_id) { match index.timeline_entry_mut(&sync_id) {
Some(remote_timeline) => { Some(remote_timeline) => {
let (timeline_status, awaits_download) = compare_local_and_remote_timeline( let (timeline_status, awaits_download) = compare_local_and_remote_timeline(
@@ -1385,7 +1308,7 @@ fn schedule_first_sync_tasks(
sync_id, sync_id,
SyncTask::upload(LayersUpload { SyncTask::upload(LayersUpload {
layers_to_upload: local_files, layers_to_upload: local_files,
uploaded_layers: HashMap::new(), uploaded_layers: HashSet::new(),
metadata: Some(local_metadata.clone()), metadata: Some(local_metadata.clone()),
}), }),
)); ));
@@ -1412,46 +1335,20 @@ fn compare_local_and_remote_timeline(
new_sync_tasks: &mut VecDeque<(TenantTimelineId, SyncTask)>, new_sync_tasks: &mut VecDeque<(TenantTimelineId, SyncTask)>,
sync_id: TenantTimelineId, sync_id: TenantTimelineId,
local_metadata: TimelineMetadata, local_metadata: TimelineMetadata,
local_files: HashMap<PathBuf, LayerFileMetadata>, local_files: HashSet<PathBuf>,
remote_entry: &RemoteTimeline, remote_entry: &RemoteTimeline,
) -> (LocalTimelineInitStatus, bool) { ) -> (LocalTimelineInitStatus, bool) {
let _entered = info_span!("compare_local_and_remote_timeline", sync_id = %sync_id).entered(); let _entered = info_span!("compare_local_and_remote_timeline", sync_id = %sync_id).entered();
let needed_to_download_files = remote_entry let remote_files = remote_entry.stored_files();
.stored_files()
.iter()
.filter_map(|(layer_file, remote_metadata)| {
if let Some(local_metadata) = local_files.get(layer_file) {
match (remote_metadata.file_size(), local_metadata.file_size()) {
(Some(x), Some(y)) if x == y => { None },
(None, Some(_)) => {
// upgrading from an earlier IndexPart without metadata
None
},
_ => {
// having to deal with other than (Some(x), Some(y)) where x != y here is a
// bummer, but see #2582 and #2610 for attempts and discussion.
warn!("Redownloading locally existing {layer_file:?} due to size mismatch, size on index: {:?}, on disk: {:?}", remote_metadata.file_size(), local_metadata.file_size());
Some(layer_file)
},
}
} else {
// doesn't exist locally
Some(layer_file)
}
})
.collect::<HashSet<_>>();
let (initial_timeline_status, awaits_download) = if !needed_to_download_files.is_empty() { let number_of_layers_to_download = remote_files.difference(&local_files).count();
let (initial_timeline_status, awaits_download) = if number_of_layers_to_download > 0 {
new_sync_tasks.push_back(( new_sync_tasks.push_back((
sync_id, sync_id,
SyncTask::download(LayersDownload::from_skipped_layers( SyncTask::download(LayersDownload {
local_files layers_to_skip: local_files.clone(),
.keys() }),
.filter(|path| !needed_to_download_files.contains(path))
.cloned()
.collect(),
)),
)); ));
info!("NeedsSync"); info!("NeedsSync");
(LocalTimelineInitStatus::NeedsSync, true) (LocalTimelineInitStatus::NeedsSync, true)
@@ -1466,22 +1363,15 @@ fn compare_local_and_remote_timeline(
}; };
let layers_to_upload = local_files let layers_to_upload = local_files
.iter() .difference(remote_files)
.filter_map(|(local_file, metadata)| { .cloned()
if !remote_entry.stored_files().contains_key(local_file) { .collect::<HashSet<_>>();
Some((local_file.to_owned(), metadata.to_owned()))
} else {
None
}
})
.collect::<HashMap<_, _>>();
if !layers_to_upload.is_empty() { if !layers_to_upload.is_empty() {
new_sync_tasks.push_back(( new_sync_tasks.push_back((
sync_id, sync_id,
SyncTask::upload(LayersUpload { SyncTask::upload(LayersUpload {
layers_to_upload, layers_to_upload,
uploaded_layers: HashMap::new(), uploaded_layers: HashSet::new(),
metadata: Some(local_metadata), metadata: Some(local_metadata),
}), }),
)); ));
@@ -1537,12 +1427,11 @@ mod test_utils {
let timeline_path = harness.timeline_path(&timeline_id); let timeline_path = harness.timeline_path(&timeline_id);
fs::create_dir_all(&timeline_path).await?; fs::create_dir_all(&timeline_path).await?;
let mut layers_to_upload = HashMap::with_capacity(filenames.len()); let mut layers_to_upload = HashSet::with_capacity(filenames.len());
for &file in filenames { for &file in filenames {
let file_path = timeline_path.join(file); let file_path = timeline_path.join(file);
fs::write(&file_path, dummy_contents(file).into_bytes()).await?; fs::write(&file_path, dummy_contents(file).into_bytes()).await?;
let metadata = LayerFileMetadata::new(file_path.metadata()?.len()); layers_to_upload.insert(file_path);
layers_to_upload.insert(file_path, metadata);
} }
fs::write( fs::write(
@@ -1553,7 +1442,7 @@ mod test_utils {
Ok(LayersUpload { Ok(LayersUpload {
layers_to_upload, layers_to_upload,
uploaded_layers: HashMap::new(), uploaded_layers: HashSet::new(),
metadata: Some(metadata), metadata: Some(metadata),
}) })
} }
@@ -1608,13 +1497,12 @@ mod tests {
assert!(sync_id_2 != sync_id_3); assert!(sync_id_2 != sync_id_3);
assert!(sync_id_3 != TEST_SYNC_ID); assert!(sync_id_3 != TEST_SYNC_ID);
let download_task = let download_task = SyncTask::download(LayersDownload {
SyncTask::download(LayersDownload::from_skipped_layers(HashSet::from([ layers_to_skip: HashSet::from([PathBuf::from("sk")]),
PathBuf::from("sk"), });
])));
let upload_task = SyncTask::upload(LayersUpload { let upload_task = SyncTask::upload(LayersUpload {
layers_to_upload: HashMap::from([(PathBuf::from("up"), LayerFileMetadata::new(123))]), layers_to_upload: HashSet::from([PathBuf::from("up")]),
uploaded_layers: HashMap::from([(PathBuf::from("upl"), LayerFileMetadata::new(123))]), uploaded_layers: HashSet::from([PathBuf::from("upl")]),
metadata: Some(dummy_metadata(Lsn(2))), metadata: Some(dummy_metadata(Lsn(2))),
}); });
let delete_task = SyncTask::delete(LayersDeletion { let delete_task = SyncTask::delete(LayersDeletion {
@@ -1658,10 +1546,12 @@ mod tests {
let sync_queue = SyncQueue::new(NonZeroUsize::new(100).unwrap()); let sync_queue = SyncQueue::new(NonZeroUsize::new(100).unwrap());
assert_eq!(sync_queue.len(), 0); assert_eq!(sync_queue.len(), 0);
let download = LayersDownload::from_skipped_layers(HashSet::from([PathBuf::from("sk")])); let download = LayersDownload {
layers_to_skip: HashSet::from([PathBuf::from("sk")]),
};
let upload = LayersUpload { let upload = LayersUpload {
layers_to_upload: HashMap::from([(PathBuf::from("up"), LayerFileMetadata::new(123))]), layers_to_upload: HashSet::from([PathBuf::from("up")]),
uploaded_layers: HashMap::from([(PathBuf::from("upl"), LayerFileMetadata::new(123))]), uploaded_layers: HashSet::from([PathBuf::from("upl")]),
metadata: Some(dummy_metadata(Lsn(2))), metadata: Some(dummy_metadata(Lsn(2))),
}; };
let delete = LayersDeletion { let delete = LayersDeletion {
@@ -1709,10 +1599,18 @@ mod tests {
#[tokio::test] #[tokio::test]
async fn same_task_id_same_tasks_batch() { async fn same_task_id_same_tasks_batch() {
let sync_queue = SyncQueue::new(NonZeroUsize::new(1).unwrap()); let sync_queue = SyncQueue::new(NonZeroUsize::new(1).unwrap());
let download_1 = LayersDownload::from_skipped_layers(HashSet::from([PathBuf::from("sk1")])); let download_1 = LayersDownload {
let download_2 = LayersDownload::from_skipped_layers(HashSet::from([PathBuf::from("sk2")])); layers_to_skip: HashSet::from([PathBuf::from("sk1")]),
let download_3 = LayersDownload::from_skipped_layers(HashSet::from([PathBuf::from("sk3")])); };
let download_4 = LayersDownload::from_skipped_layers(HashSet::from([PathBuf::from("sk4")])); let download_2 = LayersDownload {
layers_to_skip: HashSet::from([PathBuf::from("sk2")]),
};
let download_3 = LayersDownload {
layers_to_skip: HashSet::from([PathBuf::from("sk3")]),
};
let download_4 = LayersDownload {
layers_to_skip: HashSet::from([PathBuf::from("sk4")]),
};
let sync_id_2 = TenantTimelineId { let sync_id_2 = TenantTimelineId {
tenant_id: TenantId::from_array(hex!("22223344556677881122334455667788")), tenant_id: TenantId::from_array(hex!("22223344556677881122334455667788")),
@@ -1736,15 +1634,15 @@ mod tests {
Some(SyncTaskBatch { Some(SyncTaskBatch {
download: Some(SyncData { download: Some(SyncData {
retries: 0, retries: 0,
data: LayersDownload::from_skipped_layers( data: LayersDownload {
{ layers_to_skip: {
let mut set = HashSet::new(); let mut set = HashSet::new();
set.extend(download_1.layers_to_skip.into_iter()); set.extend(download_1.layers_to_skip.into_iter());
set.extend(download_2.layers_to_skip.into_iter()); set.extend(download_2.layers_to_skip.into_iter());
set.extend(download_4.layers_to_skip.into_iter()); set.extend(download_4.layers_to_skip.into_iter());
set set
}, },
) }
}), }),
upload: None, upload: None,
delete: None, delete: None,
@@ -1760,148 +1658,4 @@ mod tests {
"Should have one task left out of the batch" "Should have one task left out of the batch"
); );
} }
mod local_and_remote_comparisons {
use super::*;
#[test]
fn ready() {
let mut new_sync_tasks = VecDeque::default();
let sync_id = TenantTimelineId::generate();
let local_metadata = dummy_metadata(0x02.into());
let local_files =
HashMap::from([(PathBuf::from("first_file"), LayerFileMetadata::new(123))]);
let mut remote_entry = RemoteTimeline::new(local_metadata.clone());
remote_entry
.add_timeline_layers([(PathBuf::from("first_file"), LayerFileMetadata::new(123))]);
let (status, sync_needed) = compare_local_and_remote_timeline(
&mut new_sync_tasks,
sync_id,
local_metadata.clone(),
local_files,
&remote_entry,
);
assert_eq!(
status,
LocalTimelineInitStatus::LocallyComplete(local_metadata)
);
assert!(!sync_needed);
assert!(new_sync_tasks.is_empty(), "{:?}", new_sync_tasks);
}
#[test]
fn needs_download() {
let mut new_sync_tasks = VecDeque::default();
let sync_id = TenantTimelineId::generate();
let local_metadata = dummy_metadata(0x02.into());
let local_files = HashMap::default();
let mut remote_entry = RemoteTimeline::new(local_metadata.clone());
remote_entry
.add_timeline_layers([(PathBuf::from("first_file"), LayerFileMetadata::new(123))]);
let (status, sync_needed) = compare_local_and_remote_timeline(
&mut new_sync_tasks,
sync_id,
local_metadata,
local_files.clone(),
&remote_entry,
);
assert_eq!(status, LocalTimelineInitStatus::NeedsSync);
assert!(sync_needed);
let new_sync_tasks = new_sync_tasks.into_iter().collect::<Vec<_>>();
assert_eq!(
&new_sync_tasks,
&[(
sync_id,
SyncTask::download(LayersDownload::from_skipped_layers(
local_files.keys().cloned().collect()
))
)]
);
}
#[test]
fn redownload_is_not_needed_on_upgrade() {
// originally the implementation missed the `(None, Some(_))` case in the match, and
// proceeded to always redownload if the remote metadata was not available.
let mut new_sync_tasks = VecDeque::default();
let sync_id = TenantTimelineId::generate();
let local_metadata = dummy_metadata(0x02.into());
// type system would in general allow that LayerFileMetadata would be created with
// file_size: None, however `LayerFileMetadata::default` is only allowed from tests,
// and so everywhere within the system valid LayerFileMetadata is being created, it is
// created through `::new`.
let local_files =
HashMap::from([(PathBuf::from("first_file"), LayerFileMetadata::new(123))]);
let mut remote_entry = RemoteTimeline::new(local_metadata.clone());
// RemoteTimeline is constructed out of an older version IndexPart, which didn't carry
// any metadata.
remote_entry
.add_timeline_layers([(PathBuf::from("first_file"), LayerFileMetadata::default())]);
let (status, sync_needed) = compare_local_and_remote_timeline(
&mut new_sync_tasks,
sync_id,
local_metadata.clone(),
local_files,
&remote_entry,
);
assert_eq!(
status,
LocalTimelineInitStatus::LocallyComplete(local_metadata)
);
assert!(!sync_needed);
}
#[test]
fn needs_upload() {
let mut new_sync_tasks = VecDeque::default();
let sync_id = TenantTimelineId::generate();
let local_metadata = dummy_metadata(0x02.into());
let local_files =
HashMap::from([(PathBuf::from("first_file"), LayerFileMetadata::new(123))]);
let mut remote_entry = RemoteTimeline::new(local_metadata.clone());
remote_entry.add_timeline_layers([]);
let (status, sync_needed) = compare_local_and_remote_timeline(
&mut new_sync_tasks,
sync_id,
local_metadata.clone(),
local_files.clone(),
&remote_entry,
);
assert_eq!(
status,
LocalTimelineInitStatus::LocallyComplete(local_metadata.clone())
);
assert!(!sync_needed);
let new_sync_tasks = new_sync_tasks.into_iter().collect::<Vec<_>>();
assert_eq!(
&new_sync_tasks,
&[(
sync_id,
SyncTask::upload(LayersUpload {
layers_to_upload: local_files,
uploaded_layers: HashMap::default(),
metadata: Some(local_metadata),
})
)]
);
}
}
} }

View File

@@ -171,7 +171,7 @@ mod tests {
let local_timeline_path = harness.timeline_path(&TIMELINE_ID); let local_timeline_path = harness.timeline_path(&TIMELINE_ID);
let timeline_upload = let timeline_upload =
create_local_timeline(&harness, TIMELINE_ID, &layer_files, metadata.clone()).await?; create_local_timeline(&harness, TIMELINE_ID, &layer_files, metadata.clone()).await?;
for (local_path, _metadata) in timeline_upload.layers_to_upload { for local_path in timeline_upload.layers_to_upload {
let remote_path = let remote_path =
local_storage.resolve_in_storage(&local_storage.remote_object_id(&local_path)?)?; local_storage.resolve_in_storage(&local_storage.remote_object_id(&local_path)?)?;
let remote_parent_dir = remote_path.parent().unwrap(); let remote_parent_dir = remote_path.parent().unwrap();

View File

@@ -16,11 +16,7 @@ use tokio::{
}; };
use tracing::{debug, error, info, warn}; use tracing::{debug, error, info, warn};
use crate::{ use crate::{config::PageServerConf, storage_sync::SyncTask, TEMP_FILE_SUFFIX};
config::PageServerConf,
storage_sync::{index::LayerFileMetadata, SyncTask},
TEMP_FILE_SUFFIX,
};
use utils::{ use utils::{
crashsafe_dir::path_with_suffix_extension, crashsafe_dir::path_with_suffix_extension,
id::{TenantId, TenantTimelineId, TimelineId}, id::{TenantId, TenantTimelineId, TimelineId},
@@ -223,14 +219,8 @@ pub(super) async fn download_timeline_layers<'a>(
let layers_to_download = remote_timeline let layers_to_download = remote_timeline
.stored_files() .stored_files()
.iter() .difference(&download.layers_to_skip)
.filter_map(|(layer_path, metadata)| { .cloned()
if !download.layers_to_skip.contains(layer_path) {
Some((layer_path.to_owned(), metadata.to_owned()))
} else {
None
}
})
.collect::<Vec<_>>(); .collect::<Vec<_>>();
debug!("Layers to download: {layers_to_download:?}"); debug!("Layers to download: {layers_to_download:?}");
@@ -243,129 +233,89 @@ pub(super) async fn download_timeline_layers<'a>(
let mut download_tasks = layers_to_download let mut download_tasks = layers_to_download
.into_iter() .into_iter()
.map(|(layer_destination_path, metadata)| async move { .map(|layer_destination_path| async move {
if layer_destination_path.exists() {
debug!(
"Layer already exists locally, skipping download: {}",
layer_destination_path.display()
);
} else {
// Perform a rename inspired by durable_rename from file_utils.c.
// The sequence:
// write(tmp)
// fsync(tmp)
// rename(tmp, new)
// fsync(new)
// fsync(parent)
// For more context about durable_rename check this email from postgres mailing list:
// https://www.postgresql.org/message-id/56583BDD.9060302@2ndquadrant.com
// If pageserver crashes the temp file will be deleted on startup and re-downloaded.
let temp_file_path =
path_with_suffix_extension(&layer_destination_path, TEMP_FILE_SUFFIX);
match layer_destination_path.metadata() { let mut destination_file =
Ok(m) if m.is_file() => { fs::File::create(&temp_file_path).await.with_context(|| {
// the file exists from earlier round when we failed after renaming it as format!(
// layer_destination_path "Failed to create a destination file for layer '{}'",
let verified = if let Some(expected) = metadata.file_size() { temp_file_path.display()
m.len() == expected )
} else { })?;
// behaviour before recording metadata was to accept any existing
true
};
if verified { let mut layer_download = storage.download_storage_object(None, &layer_destination_path)
debug!( .await
"Layer already exists locally, skipping download: {}", .with_context(|| {
layer_destination_path.display() format!(
); "Failed to initiate the download the layer for {sync_id} into file '{}'",
return Ok((layer_destination_path, LayerFileMetadata::new(m.len()))) temp_file_path.display()
} else { )
// no need to remove it, it will be overwritten by fs::rename })?;
// after successful download io::copy(&mut layer_download.download_stream, &mut destination_file)
warn!("Downloaded layer exists already but layer file metadata mismatches: {}, metadata {:?}", layer_destination_path.display(), metadata); .await
} .with_context(|| {
} format!(
Ok(m) => { "Failed to download the layer for {sync_id} into file '{}'",
return Err(anyhow::anyhow!("Downloaded layer destination exists but is not a file: {m:?}, target needs to be removed/archived manually: {layer_destination_path:?}")); temp_file_path.display()
} )
Err(_) => { })?;
// behave as the file didn't exist
} // Tokio doc here: https://docs.rs/tokio/1.17.0/tokio/fs/struct.File.html states that:
// A file will not be closed immediately when it goes out of scope if there are any IO operations
// that have not yet completed. To ensure that a file is closed immediately when it is dropped,
// you should call flush before dropping it.
//
// From the tokio code I see that it waits for pending operations to complete. There shouldn't be any because
// we assume that `destination_file` file is fully written. I.e there is no pending .write(...).await operations.
// But for additional safety let's check/wait for any pending operations.
destination_file.flush().await.with_context(|| {
format!(
"failed to flush source file at {}",
temp_file_path.display()
)
})?;
// not using sync_data because it can lose file size update
destination_file.sync_all().await.with_context(|| {
format!(
"failed to fsync source file at {}",
temp_file_path.display()
)
})?;
drop(destination_file);
fail::fail_point!("remote-storage-download-pre-rename", |_| {
anyhow::bail!("remote-storage-download-pre-rename failpoint triggered")
});
fs::rename(&temp_file_path, &layer_destination_path).await?;
fsync_path(&layer_destination_path).await.with_context(|| {
format!(
"Cannot fsync layer destination path {}",
layer_destination_path.display(),
)
})?;
} }
Ok::<_, anyhow::Error>(layer_destination_path)
// Perform a rename inspired by durable_rename from file_utils.c.
// The sequence:
// write(tmp)
// fsync(tmp)
// rename(tmp, new)
// fsync(new)
// fsync(parent)
// For more context about durable_rename check this email from postgres mailing list:
// https://www.postgresql.org/message-id/56583BDD.9060302@2ndquadrant.com
// If pageserver crashes the temp file will be deleted on startup and re-downloaded.
let temp_file_path =
path_with_suffix_extension(&layer_destination_path, TEMP_FILE_SUFFIX);
// TODO: this doesn't use the cached fd for some reason?
let mut destination_file =
fs::File::create(&temp_file_path).await.with_context(|| {
format!(
"Failed to create a destination file for layer '{}'",
temp_file_path.display()
)
})?;
let mut layer_download = storage.download_storage_object(None, &layer_destination_path)
.await
.with_context(|| {
format!(
"Failed to initiate the download the layer for {sync_id} into file '{}'",
temp_file_path.display()
)
})?;
let bytes_amount = io::copy(&mut layer_download.download_stream, &mut destination_file)
.await
.with_context(|| {
format!(
"Failed to download the layer for {sync_id} into file '{}'",
temp_file_path.display()
)
})?;
// Tokio doc here: https://docs.rs/tokio/1.17.0/tokio/fs/struct.File.html states that:
// A file will not be closed immediately when it goes out of scope if there are any IO operations
// that have not yet completed. To ensure that a file is closed immediately when it is dropped,
// you should call flush before dropping it.
//
// From the tokio code I see that it waits for pending operations to complete. There shouldn't be any because
// we assume that `destination_file` file is fully written. I.e there is no pending .write(...).await operations.
// But for additional safety let's check/wait for any pending operations.
destination_file.flush().await.with_context(|| {
format!(
"failed to flush source file at {}",
temp_file_path.display()
)
})?;
match metadata.file_size() {
Some(expected) if expected != bytes_amount => {
anyhow::bail!(
"According to layer file metadata should had downloaded {expected} bytes but downloaded {bytes_amount} bytes into file '{}'",
temp_file_path.display()
);
},
Some(_) | None => {
// matches, or upgrading from an earlier IndexPart version
}
}
// not using sync_data because it can lose file size update
destination_file.sync_all().await.with_context(|| {
format!(
"failed to fsync source file at {}",
temp_file_path.display()
)
})?;
drop(destination_file);
fail::fail_point!("remote-storage-download-pre-rename", |_| {
anyhow::bail!("remote-storage-download-pre-rename failpoint triggered")
});
fs::rename(&temp_file_path, &layer_destination_path).await?;
fsync_path(&layer_destination_path).await.with_context(|| {
format!(
"Cannot fsync layer destination path {}",
layer_destination_path.display(),
)
})?;
Ok::<_, anyhow::Error>((layer_destination_path, LayerFileMetadata::new(bytes_amount)))
}) })
.collect::<FuturesUnordered<_>>(); .collect::<FuturesUnordered<_>>();
@@ -374,12 +324,9 @@ pub(super) async fn download_timeline_layers<'a>(
let mut undo = HashSet::new(); let mut undo = HashSet::new();
while let Some(download_result) = download_tasks.next().await { while let Some(download_result) = download_tasks.next().await {
match download_result { match download_result {
Ok((downloaded_path, metadata)) => { Ok(downloaded_path) => {
undo.insert(downloaded_path.clone()); undo.insert(downloaded_path.clone());
download.layers_to_skip.insert(downloaded_path.clone()); download.layers_to_skip.insert(downloaded_path);
// what if the key existed already? ignore, because then we would had
// downloaded a partial file, and had to retry
download.gathered_metadata.insert(downloaded_path, metadata);
} }
Err(e) => { Err(e) => {
errors_happened = true; errors_happened = true;
@@ -402,8 +349,6 @@ pub(super) async fn download_timeline_layers<'a>(
); );
for item in undo { for item in undo {
download.layers_to_skip.remove(&item); download.layers_to_skip.remove(&item);
// intentionally don't clear the gathered_metadata because it exists for fsync_path
// failure on parent directory
} }
errors_happened = true; errors_happened = true;
} }
@@ -508,9 +453,9 @@ mod tests {
let timeline_upload = let timeline_upload =
create_local_timeline(&harness, TIMELINE_ID, &layer_files, metadata.clone()).await?; create_local_timeline(&harness, TIMELINE_ID, &layer_files, metadata.clone()).await?;
for local_path in timeline_upload.layers_to_upload.keys() { for local_path in timeline_upload.layers_to_upload {
let remote_path = let remote_path =
local_storage.resolve_in_storage(&storage.remote_object_id(local_path)?)?; local_storage.resolve_in_storage(&storage.remote_object_id(&local_path)?)?;
let remote_parent_dir = remote_path.parent().unwrap(); let remote_parent_dir = remote_path.parent().unwrap();
if !remote_parent_dir.exists() { if !remote_parent_dir.exists() {
fs::create_dir_all(&remote_parent_dir).await?; fs::create_dir_all(&remote_parent_dir).await?;
@@ -528,19 +473,11 @@ mod tests {
let mut remote_timeline = RemoteTimeline::new(metadata.clone()); let mut remote_timeline = RemoteTimeline::new(metadata.clone());
remote_timeline.awaits_download = true; remote_timeline.awaits_download = true;
remote_timeline.add_timeline_layers(layer_files.iter().map(|layer| { remote_timeline.add_timeline_layers(
let layer_path = local_timeline_path.join(layer); layer_files
.iter()
// this could had also been LayerFileMetadata::default(), but since in this test we .map(|layer| local_timeline_path.join(layer)),
// don't do the merge operation done by storage_sync::download_timeline_data, it would );
// not be merged back to timeline.
let metadata_from_upload = timeline_upload
.layers_to_upload
.get(&layer_path)
.expect("layer must exist in previously uploaded paths")
.to_owned();
(layer_path, metadata_from_upload)
}));
let download_data = match download_timeline_layers( let download_data = match download_timeline_layers(
harness.conf, harness.conf,
@@ -550,9 +487,9 @@ mod tests {
sync_id, sync_id,
SyncData::new( SyncData::new(
current_retries, current_retries,
LayersDownload::from_skipped_layers(HashSet::from([ LayersDownload {
local_timeline_path.join("layer_to_skip") layers_to_skip: HashSet::from([local_timeline_path.join("layer_to_skip")]),
])), },
), ),
) )
.await .await
@@ -615,7 +552,12 @@ mod tests {
&sync_queue, &sync_queue,
None, None,
sync_id, sync_id,
SyncData::new(0, LayersDownload::from_skipped_layers(HashSet::new())), SyncData::new(
0,
LayersDownload {
layers_to_skip: HashSet::new(),
},
),
) )
.await; .await;
assert!( assert!(
@@ -634,7 +576,12 @@ mod tests {
&sync_queue, &sync_queue,
Some(&not_expecting_download_remote_timeline), Some(&not_expecting_download_remote_timeline),
sync_id, sync_id,
SyncData::new(0, LayersDownload::from_skipped_layers(HashSet::new())), SyncData::new(
0,
LayersDownload {
layers_to_skip: HashSet::new(),
},
),
) )
.await; .await;
assert!( assert!(

View File

@@ -212,8 +212,8 @@ impl RemoteTimelineIndex {
/// Restored index part data about the timeline, stored in the remote index. /// Restored index part data about the timeline, stored in the remote index.
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub struct RemoteTimeline { pub struct RemoteTimeline {
timeline_layers: HashMap<PathBuf, LayerFileMetadata>, timeline_layers: HashSet<PathBuf>,
missing_layers: HashMap<PathBuf, LayerFileMetadata>, missing_layers: HashSet<PathBuf>,
pub metadata: TimelineMetadata, pub metadata: TimelineMetadata,
pub awaits_download: bool, pub awaits_download: bool,
@@ -222,161 +222,62 @@ pub struct RemoteTimeline {
impl RemoteTimeline { impl RemoteTimeline {
pub fn new(metadata: TimelineMetadata) -> Self { pub fn new(metadata: TimelineMetadata) -> Self {
Self { Self {
timeline_layers: HashMap::default(), timeline_layers: HashSet::new(),
missing_layers: HashMap::default(), missing_layers: HashSet::new(),
metadata, metadata,
awaits_download: false, awaits_download: false,
} }
} }
pub fn add_timeline_layers( pub fn add_timeline_layers(&mut self, new_layers: impl IntoIterator<Item = PathBuf>) {
&mut self, self.timeline_layers.extend(new_layers.into_iter());
new_layers: impl IntoIterator<Item = (PathBuf, LayerFileMetadata)>,
) {
self.timeline_layers.extend(new_layers);
} }
pub fn add_upload_failures( pub fn add_upload_failures(&mut self, upload_failures: impl IntoIterator<Item = PathBuf>) {
&mut self, self.missing_layers.extend(upload_failures.into_iter());
upload_failures: impl IntoIterator<Item = (PathBuf, LayerFileMetadata)>,
) {
self.missing_layers.extend(upload_failures);
} }
pub fn remove_layers(&mut self, layers_to_remove: &HashSet<PathBuf>) { pub fn remove_layers(&mut self, layers_to_remove: &HashSet<PathBuf>) {
self.timeline_layers self.timeline_layers
.retain(|layer, _| !layers_to_remove.contains(layer)); .retain(|layer| !layers_to_remove.contains(layer));
self.missing_layers self.missing_layers
.retain(|layer, _| !layers_to_remove.contains(layer)); .retain(|layer| !layers_to_remove.contains(layer));
} }
/// Lists all layer files in the given remote timeline. Omits the metadata file. /// Lists all layer files in the given remote timeline. Omits the metadata file.
pub fn stored_files(&self) -> &HashMap<PathBuf, LayerFileMetadata> { pub fn stored_files(&self) -> &HashSet<PathBuf> {
&self.timeline_layers &self.timeline_layers
} }
/// Combines metadata gathered or verified during downloading needed layer files to metadata on
/// the [`RemoteIndex`], so it can be uploaded later.
pub fn merge_metadata_from_downloaded(
&mut self,
downloaded: &HashMap<PathBuf, LayerFileMetadata>,
) {
downloaded.iter().for_each(|(path, metadata)| {
if let Some(upgraded) = self.timeline_layers.get_mut(path) {
upgraded.merge(metadata);
}
});
}
pub fn from_index_part(timeline_path: &Path, index_part: IndexPart) -> anyhow::Result<Self> { pub fn from_index_part(timeline_path: &Path, index_part: IndexPart) -> anyhow::Result<Self> {
let metadata = TimelineMetadata::from_bytes(&index_part.metadata_bytes)?; let metadata = TimelineMetadata::from_bytes(&index_part.metadata_bytes)?;
let default_metadata = &IndexLayerMetadata::default();
let find_metadata = |key: &RelativePath| -> LayerFileMetadata {
index_part
.layer_metadata
.get(key)
.unwrap_or(default_metadata)
.into()
};
Ok(Self { Ok(Self {
timeline_layers: index_part timeline_layers: to_local_paths(timeline_path, index_part.timeline_layers),
.timeline_layers missing_layers: to_local_paths(timeline_path, index_part.missing_layers),
.iter()
.map(|layer_path| (layer_path.as_path(timeline_path), find_metadata(layer_path)))
.collect(),
missing_layers: index_part
.missing_layers
.iter()
.map(|layer_path| (layer_path.as_path(timeline_path), find_metadata(layer_path)))
.collect(),
metadata, metadata,
awaits_download: false, awaits_download: false,
}) })
} }
} }
/// Metadata gathered for each of the layer files.
///
/// Fields have to be `Option`s because remote [`IndexPart`]'s can be from different version, which
/// might have less or more metadata depending if upgrading or rolling back an upgrade.
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)]
#[cfg_attr(test, derive(Default))]
pub struct LayerFileMetadata {
file_size: Option<u64>,
}
impl From<&'_ IndexLayerMetadata> for LayerFileMetadata {
fn from(other: &IndexLayerMetadata) -> Self {
LayerFileMetadata {
file_size: other.file_size,
}
}
}
impl LayerFileMetadata {
pub fn new(file_size: u64) -> Self {
LayerFileMetadata {
file_size: Some(file_size),
}
}
pub fn file_size(&self) -> Option<u64> {
self.file_size
}
/// Metadata has holes due to version upgrades. This method is called to upgrade self with the
/// other value.
///
/// This is called on the possibly outdated version.
pub fn merge(&mut self, other: &Self) {
self.file_size = other.file_size.or(self.file_size);
}
}
/// Part of the remote index, corresponding to a certain timeline. /// Part of the remote index, corresponding to a certain timeline.
/// Contains the data about all files in the timeline, present remotely and its metadata. /// Contains the data about all files in the timeline, present remotely and its metadata.
///
/// This type needs to be backwards and forwards compatible. When changing the fields,
/// remember to add a test case for the changed version.
#[serde_as] #[serde_as]
#[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize)] #[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize)]
pub struct IndexPart { pub struct IndexPart {
/// Debugging aid describing the version of this type.
#[serde(default)]
version: usize,
/// Each of the layers present on remote storage.
///
/// Additional metadata can might exist in `layer_metadata`.
timeline_layers: HashSet<RelativePath>, timeline_layers: HashSet<RelativePath>,
/// Currently is not really used in pageserver, /// Currently is not really used in pageserver,
/// present to manually keep track of the layer files that pageserver might never retrieve. /// present to manually keep track of the layer files that pageserver might never retrieve.
/// ///
/// Such "holes" might appear if any upload task was evicted on an error threshold: /// Such "holes" might appear if any upload task was evicted on an error threshold:
/// the this layer will only be rescheduled for upload on pageserver restart. /// the this layer will only be rescheduled for upload on pageserver restart.
missing_layers: HashSet<RelativePath>, missing_layers: HashSet<RelativePath>,
/// Per layer file metadata, which can be present for a present or missing layer file.
///
/// Older versions of `IndexPart` will not have this property or have only a part of metadata
/// that latest version stores.
#[serde(default)]
layer_metadata: HashMap<RelativePath, IndexLayerMetadata>,
#[serde_as(as = "DisplayFromStr")] #[serde_as(as = "DisplayFromStr")]
disk_consistent_lsn: Lsn, disk_consistent_lsn: Lsn,
metadata_bytes: Vec<u8>, metadata_bytes: Vec<u8>,
} }
impl IndexPart { impl IndexPart {
/// When adding or modifying any parts of `IndexPart`, increment the version so that it can be
/// used to understand later versions.
///
/// Version is currently informative only.
const LATEST_VERSION: usize = 1;
pub const FILE_NAME: &'static str = "index_part.json"; pub const FILE_NAME: &'static str = "index_part.json";
#[cfg(test)] #[cfg(test)]
@@ -387,10 +288,8 @@ impl IndexPart {
metadata_bytes: Vec<u8>, metadata_bytes: Vec<u8>,
) -> Self { ) -> Self {
Self { Self {
version: Self::LATEST_VERSION,
timeline_layers, timeline_layers,
missing_layers, missing_layers,
layer_metadata: HashMap::default(),
disk_consistent_lsn, disk_consistent_lsn,
metadata_bytes, metadata_bytes,
} }
@@ -405,68 +304,35 @@ impl IndexPart {
remote_timeline: RemoteTimeline, remote_timeline: RemoteTimeline,
) -> anyhow::Result<Self> { ) -> anyhow::Result<Self> {
let metadata_bytes = remote_timeline.metadata.to_bytes()?; let metadata_bytes = remote_timeline.metadata.to_bytes()?;
let mut layer_metadata = HashMap::new();
let mut missing_layers = HashSet::new();
separate_paths_and_metadata(
timeline_path,
&remote_timeline.missing_layers,
&mut missing_layers,
&mut layer_metadata,
)
.context("Failed to convert missing layers' paths to relative ones")?;
let mut timeline_layers = HashSet::new();
separate_paths_and_metadata(
timeline_path,
&remote_timeline.timeline_layers,
&mut timeline_layers,
&mut layer_metadata,
)
.context("Failed to convert timeline layers' paths to relative ones")?;
Ok(Self { Ok(Self {
version: Self::LATEST_VERSION, timeline_layers: to_relative_paths(timeline_path, remote_timeline.timeline_layers)
timeline_layers, .context("Failed to convert timeline layers' paths to relative ones")?,
missing_layers, missing_layers: to_relative_paths(timeline_path, remote_timeline.missing_layers)
layer_metadata, .context("Failed to convert missing layers' paths to relative ones")?,
disk_consistent_lsn: remote_timeline.metadata.disk_consistent_lsn(), disk_consistent_lsn: remote_timeline.metadata.disk_consistent_lsn(),
metadata_bytes, metadata_bytes,
}) })
} }
} }
/// Serialized form of [`LayerFileMetadata`]. fn to_local_paths(
#[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize, Default)]
pub struct IndexLayerMetadata {
file_size: Option<u64>,
}
impl From<&'_ LayerFileMetadata> for IndexLayerMetadata {
fn from(other: &'_ LayerFileMetadata) -> Self {
IndexLayerMetadata {
file_size: other.file_size,
}
}
}
fn separate_paths_and_metadata(
timeline_path: &Path, timeline_path: &Path,
input: &HashMap<PathBuf, LayerFileMetadata>, paths: impl IntoIterator<Item = RelativePath>,
output: &mut HashSet<RelativePath>, ) -> HashSet<PathBuf> {
layer_metadata: &mut HashMap<RelativePath, IndexLayerMetadata>, paths
) -> anyhow::Result<()> { .into_iter()
for (path, metadata) in input { .map(|path| path.as_path(timeline_path))
let rel_path = RelativePath::new(timeline_path, path)?; .collect()
let metadata = IndexLayerMetadata::from(metadata); }
layer_metadata.insert(rel_path.clone(), metadata); fn to_relative_paths(
output.insert(rel_path); timeline_path: &Path,
} paths: impl IntoIterator<Item = PathBuf>,
Ok(()) ) -> anyhow::Result<HashSet<RelativePath>> {
paths
.into_iter()
.map(|path| RelativePath::new(timeline_path, path))
.collect()
} }
#[cfg(test)] #[cfg(test)]
@@ -491,13 +357,13 @@ mod tests {
DEFAULT_PG_VERSION, DEFAULT_PG_VERSION,
); );
let remote_timeline = RemoteTimeline { let remote_timeline = RemoteTimeline {
timeline_layers: HashMap::from([ timeline_layers: HashSet::from([
(timeline_path.join("layer_1"), LayerFileMetadata::new(1)), timeline_path.join("layer_1"),
(timeline_path.join("layer_2"), LayerFileMetadata::new(2)), timeline_path.join("layer_2"),
]), ]),
missing_layers: HashMap::from([ missing_layers: HashSet::from([
(timeline_path.join("missing_1"), LayerFileMetadata::new(3)), timeline_path.join("missing_1"),
(timeline_path.join("missing_2"), LayerFileMetadata::new(4)), timeline_path.join("missing_2"),
]), ]),
metadata: metadata.clone(), metadata: metadata.clone(),
awaits_download: false, awaits_download: false,
@@ -619,13 +485,13 @@ mod tests {
let conversion_result = IndexPart::from_remote_timeline( let conversion_result = IndexPart::from_remote_timeline(
&timeline_path, &timeline_path,
RemoteTimeline { RemoteTimeline {
timeline_layers: HashMap::from([ timeline_layers: HashSet::from([
(PathBuf::from("bad_path"), LayerFileMetadata::new(1)), PathBuf::from("bad_path"),
(timeline_path.join("layer_2"), LayerFileMetadata::new(2)), timeline_path.join("layer_2"),
]), ]),
missing_layers: HashMap::from([ missing_layers: HashSet::from([
(timeline_path.join("missing_1"), LayerFileMetadata::new(3)), timeline_path.join("missing_1"),
(timeline_path.join("missing_2"), LayerFileMetadata::new(4)), timeline_path.join("missing_2"),
]), ]),
metadata: metadata.clone(), metadata: metadata.clone(),
awaits_download: false, awaits_download: false,
@@ -636,13 +502,13 @@ mod tests {
let conversion_result = IndexPart::from_remote_timeline( let conversion_result = IndexPart::from_remote_timeline(
&timeline_path, &timeline_path,
RemoteTimeline { RemoteTimeline {
timeline_layers: HashMap::from([ timeline_layers: HashSet::from([
(timeline_path.join("layer_1"), LayerFileMetadata::new(1)), timeline_path.join("layer_1"),
(timeline_path.join("layer_2"), LayerFileMetadata::new(2)), timeline_path.join("layer_2"),
]), ]),
missing_layers: HashMap::from([ missing_layers: HashSet::from([
(PathBuf::from("bad_path"), LayerFileMetadata::new(3)), PathBuf::from("bad_path"),
(timeline_path.join("missing_2"), LayerFileMetadata::new(4)), timeline_path.join("missing_2"),
]), ]),
metadata, metadata,
awaits_download: false, awaits_download: false,
@@ -650,63 +516,4 @@ mod tests {
); );
assert!(conversion_result.is_err(), "Should not be able to convert metadata with missing layer paths that are not in the timeline directory"); assert!(conversion_result.is_err(), "Should not be able to convert metadata with missing layer paths that are not in the timeline directory");
} }
#[test]
fn v0_indexpart_is_parsed() {
let example = r#"{
"timeline_layers":["000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__0000000001696070-00000000016960E9"],
"missing_layers":["not_a_real_layer_but_adding_coverage"],
"disk_consistent_lsn":"0/16960E8",
"metadata_bytes":[113,11,159,210,0,54,0,4,0,0,0,0,1,105,96,232,1,0,0,0,0,1,105,96,112,0,0,0,0,0,0,0,0,0,0,0,0,0,1,105,96,112,0,0,0,0,1,105,96,112,0,0,0,14,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
}"#;
let expected = IndexPart {
version: 0,
timeline_layers: [RelativePath("000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__0000000001696070-00000000016960E9".to_owned())].into_iter().collect(),
missing_layers: [RelativePath("not_a_real_layer_but_adding_coverage".to_owned())].into_iter().collect(),
layer_metadata: HashMap::default(),
disk_consistent_lsn: "0/16960E8".parse::<Lsn>().unwrap(),
metadata_bytes: [113,11,159,210,0,54,0,4,0,0,0,0,1,105,96,232,1,0,0,0,0,1,105,96,112,0,0,0,0,0,0,0,0,0,0,0,0,0,1,105,96,112,0,0,0,0,1,105,96,112,0,0,0,14,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0].to_vec(),
};
let part = serde_json::from_str::<IndexPart>(example).unwrap();
assert_eq!(part, expected);
}
#[test]
fn v1_indexpart_is_parsed() {
let example = r#"{
"version":1,
"timeline_layers":["000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__0000000001696070-00000000016960E9"],
"missing_layers":["not_a_real_layer_but_adding_coverage"],
"layer_metadata":{
"000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__0000000001696070-00000000016960E9": { "file_size": 25600000 },
"not_a_real_layer_but_adding_coverage": { "file_size": 9007199254741001 }
},
"disk_consistent_lsn":"0/16960E8",
"metadata_bytes":[113,11,159,210,0,54,0,4,0,0,0,0,1,105,96,232,1,0,0,0,0,1,105,96,112,0,0,0,0,0,0,0,0,0,0,0,0,0,1,105,96,112,0,0,0,0,1,105,96,112,0,0,0,14,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
}"#;
let expected = IndexPart {
// note this is not verified, could be anything, but exists for humans debugging.. could be the git version instead?
version: 1,
timeline_layers: [RelativePath("000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__0000000001696070-00000000016960E9".to_owned())].into_iter().collect(),
missing_layers: [RelativePath("not_a_real_layer_but_adding_coverage".to_owned())].into_iter().collect(),
layer_metadata: HashMap::from([
(RelativePath("000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__0000000001696070-00000000016960E9".to_owned()), IndexLayerMetadata {
file_size: Some(25600000),
}),
(RelativePath("not_a_real_layer_but_adding_coverage".to_owned()), IndexLayerMetadata {
// serde_json should always parse this but this might be a double with jq for
// example.
file_size: Some(9007199254741001),
})
]),
disk_consistent_lsn: "0/16960E8".parse::<Lsn>().unwrap(),
metadata_bytes: [113,11,159,210,0,54,0,4,0,0,0,0,1,105,96,232,1,0,0,0,0,1,105,96,112,0,0,0,0,0,0,0,0,0,0,0,0,0,1,105,96,112,0,0,0,0,1,105,96,112,0,0,0,14,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0].to_vec(),
};
let part = serde_json::from_str::<IndexPart>(example).unwrap();
assert_eq!(part, expected);
}
} }

View File

@@ -69,25 +69,14 @@ pub(super) async fn upload_timeline_layers<'a>(
.map(|meta| meta.disk_consistent_lsn()); .map(|meta| meta.disk_consistent_lsn());
let already_uploaded_layers = remote_timeline let already_uploaded_layers = remote_timeline
.map(|timeline| { .map(|timeline| timeline.stored_files())
timeline .cloned()
.stored_files()
.keys()
.cloned()
.collect::<std::collections::HashSet<_>>()
})
.unwrap_or_default(); .unwrap_or_default();
let layers_to_upload = upload let layers_to_upload = upload
.layers_to_upload .layers_to_upload
.iter() .difference(&already_uploaded_layers)
.filter_map(|(k, v)| { .cloned()
if !already_uploaded_layers.contains(k) {
Some((k.to_owned(), v.to_owned()))
} else {
None
}
})
.collect::<Vec<_>>(); .collect::<Vec<_>>();
if layers_to_upload.is_empty() { if layers_to_upload.is_empty() {
@@ -109,7 +98,7 @@ pub(super) async fn upload_timeline_layers<'a>(
let mut upload_tasks = layers_to_upload let mut upload_tasks = layers_to_upload
.into_iter() .into_iter()
.map(|(source_path, known_metadata)| async move { .map(|source_path| async move {
let source_file = match fs::File::open(&source_path).await.with_context(|| { let source_file = match fs::File::open(&source_path).await.with_context(|| {
format!( format!(
"Failed to upen a source file for layer '{}'", "Failed to upen a source file for layer '{}'",
@@ -120,7 +109,7 @@ pub(super) async fn upload_timeline_layers<'a>(
Err(e) => return Err(UploadError::MissingLocalFile(source_path, e)), Err(e) => return Err(UploadError::MissingLocalFile(source_path, e)),
}; };
let fs_size = source_file let source_size = source_file
.metadata() .metadata()
.await .await
.with_context(|| { .with_context(|| {
@@ -130,24 +119,10 @@ pub(super) async fn upload_timeline_layers<'a>(
) )
}) })
.map_err(UploadError::Other)? .map_err(UploadError::Other)?
.len(); .len() as usize;
// FIXME: this looks bad
if let Some(metadata_size) = known_metadata.file_size() {
if metadata_size != fs_size {
return Err(UploadError::Other(anyhow::anyhow!(
"File {source_path:?} has its current FS size {fs_size} diferent from initially determined {metadata_size}"
)));
}
} else {
// this is a silly state we would like to avoid
}
let fs_size = usize::try_from(fs_size).with_context(|| format!("File {source_path:?} size {fs_size} could not be converted to usize"))
.map_err(UploadError::Other)?;
match storage match storage
.upload_storage_object(Box::new(source_file), fs_size, &source_path) .upload_storage_object(Box::new(source_file), source_size, &source_path)
.await .await
.with_context(|| format!("Failed to upload layer file for {sync_id}")) .with_context(|| format!("Failed to upload layer file for {sync_id}"))
{ {
@@ -161,11 +136,8 @@ pub(super) async fn upload_timeline_layers<'a>(
while let Some(upload_result) = upload_tasks.next().await { while let Some(upload_result) = upload_tasks.next().await {
match upload_result { match upload_result {
Ok(uploaded_path) => { Ok(uploaded_path) => {
let metadata = upload upload.layers_to_upload.remove(&uploaded_path);
.layers_to_upload upload.uploaded_layers.insert(uploaded_path);
.remove(&uploaded_path)
.expect("metadata should always exist, assuming no double uploads");
upload.uploaded_layers.insert(uploaded_path, metadata);
} }
Err(e) => match e { Err(e) => match e {
UploadError::Other(e) => { UploadError::Other(e) => {
@@ -290,7 +262,7 @@ mod tests {
assert_eq!( assert_eq!(
upload upload
.uploaded_layers .uploaded_layers
.keys() .iter()
.cloned() .cloned()
.collect::<BTreeSet<_>>(), .collect::<BTreeSet<_>>(),
layer_files layer_files
@@ -385,7 +357,7 @@ mod tests {
assert_eq!( assert_eq!(
upload upload
.uploaded_layers .uploaded_layers
.keys() .iter()
.cloned() .cloned()
.collect::<BTreeSet<_>>(), .collect::<BTreeSet<_>>(),
layer_files layer_files

View File

@@ -59,14 +59,13 @@ pub mod block_io;
mod delta_layer; mod delta_layer;
mod disk_btree; mod disk_btree;
pub(crate) mod ephemeral_file; pub(crate) mod ephemeral_file;
pub mod filename; mod filename;
mod image_layer; mod image_layer;
mod inmemory_layer; mod inmemory_layer;
pub mod layer_map; mod layer_map;
pub mod metadata; pub mod metadata;
mod par_fsync; mod par_fsync;
pub mod storage_layer; mod storage_layer;
mod timeline; mod timeline;
@@ -145,18 +144,17 @@ impl Tenant {
/// Lists timelines the tenant contains. /// Lists timelines the tenant contains.
/// Up to tenant's implementation to omit certain timelines that ar not considered ready for use. /// Up to tenant's implementation to omit certain timelines that ar not considered ready for use.
pub fn list_timelines(&self) -> Vec<Arc<Timeline>> { pub fn list_timelines(&self) -> Vec<(TimelineId, Arc<Timeline>)> {
self.timelines self.timelines
.lock() .lock()
.unwrap() .unwrap()
.values() .iter()
.map(Arc::clone) .map(|(timeline_id, timeline_entry)| (*timeline_id, Arc::clone(timeline_entry)))
.collect() .collect()
} }
/// This is used to create the initial 'main' timeline during bootstrapping, /// Create a new, empty timeline. The caller is responsible for loading data into it
/// or when importing a new base backup. The caller is expected to load an /// Initdb lsn is provided for timeline impl to be able to perform checks for some operations against it.
/// initial image of the datadir to the new timeline after this.
pub fn create_empty_timeline( pub fn create_empty_timeline(
&self, &self,
new_timeline_id: TimelineId, new_timeline_id: TimelineId,
@@ -347,7 +345,7 @@ impl Tenant {
ensure!( ensure!(
!children_exist, !children_exist,
"Cannot delete timeline which has child timelines" "Cannot detach timeline which has child timelines"
); );
let timeline_entry = match timelines.entry(timeline_id) { let timeline_entry = match timelines.entry(timeline_id) {
Entry::Occupied(e) => e, Entry::Occupied(e) => e,
@@ -908,7 +906,6 @@ impl Tenant {
Ok(totals) Ok(totals)
} }
/// Branch an existing timeline
fn branch_timeline( fn branch_timeline(
&self, &self,
src: TimelineId, src: TimelineId,
@@ -984,7 +981,7 @@ impl Tenant {
dst_prev, dst_prev,
Some(src), Some(src),
start_lsn, start_lsn,
*src_timeline.latest_gc_cutoff_lsn.read(), // FIXME: should we hold onto this guard longer? *src_timeline.latest_gc_cutoff_lsn.read(),
src_timeline.initdb_lsn, src_timeline.initdb_lsn,
src_timeline.pg_version, src_timeline.pg_version,
); );
@@ -1097,22 +1094,12 @@ impl Tenant {
/// Create the cluster temporarily in 'initdbpath' directory inside the repository /// Create the cluster temporarily in 'initdbpath' directory inside the repository
/// to get bootstrap data for timeline initialization. /// to get bootstrap data for timeline initialization.
fn run_initdb( fn run_initdb(conf: &'static PageServerConf, initdbpath: &Path, pg_version: u32) -> Result<()> {
conf: &'static PageServerConf, info!("running initdb in {}... ", initdbpath.display());
initdb_target_dir: &Path,
pg_version: u32,
) -> Result<()> {
let initdb_bin_path = conf.pg_bin_dir(pg_version).join("initdb");
let initdb_lib_dir = conf.pg_lib_dir(pg_version);
info!(
"running {} in {}, libdir: {}",
initdb_bin_path.display(),
initdb_target_dir.display(),
initdb_lib_dir.display(),
);
let initdb_output = Command::new(initdb_bin_path) let initdb_path = conf.pg_bin_dir(pg_version).join("initdb");
.args(&["-D", &initdb_target_dir.to_string_lossy()]) let initdb_output = Command::new(initdb_path)
.args(&["-D", &initdbpath.to_string_lossy()])
.args(&["-U", &conf.superuser]) .args(&["-U", &conf.superuser])
.args(&["-E", "utf8"]) .args(&["-E", "utf8"])
.arg("--no-instructions") .arg("--no-instructions")
@@ -1120,8 +1107,8 @@ fn run_initdb(
// so no need to fsync it // so no need to fsync it
.arg("--no-sync") .arg("--no-sync")
.env_clear() .env_clear()
.env("LD_LIBRARY_PATH", &initdb_lib_dir) .env("LD_LIBRARY_PATH", conf.pg_lib_dir(pg_version))
.env("DYLD_LIBRARY_PATH", &initdb_lib_dir) .env("DYLD_LIBRARY_PATH", conf.pg_lib_dir(pg_version))
.stdout(Stdio::null()) .stdout(Stdio::null())
.output() .output()
.context("failed to execute initdb")?; .context("failed to execute initdb")?;

View File

@@ -556,7 +556,7 @@ impl DeltaLayer {
/// Create a DeltaLayer struct representing an existing file on disk. /// Create a DeltaLayer struct representing an existing file on disk.
/// ///
/// This variant is only used for debugging purposes, by the 'pageserver_binutils' binary. /// This variant is only used for debugging purposes, by the 'dump_layerfile' binary.
pub fn new_for_path<F>(path: &Path, file: F) -> Result<Self> pub fn new_for_path<F>(path: &Path, file: F) -> Result<Self>
where where
F: FileExt, F: FileExt,

View File

@@ -177,7 +177,7 @@ impl fmt::Display for ImageFileName {
/// ///
/// This is used by DeltaLayer and ImageLayer. Normally, this holds a reference to the /// This is used by DeltaLayer and ImageLayer. Normally, this holds a reference to the
/// global config, and paths to layer files are constructed using the tenant/timeline /// global config, and paths to layer files are constructed using the tenant/timeline
/// path from the config. But in the 'pageserver_binutils' binary, we need to construct a Layer /// path from the config. But in the 'dump_layerfile' binary, we need to construct a Layer
/// struct for a file on disk, without having a page server running, so that we have no /// struct for a file on disk, without having a page server running, so that we have no
/// config. In that case, we use the Path variant to hold the full path to the file on /// config. In that case, we use the Path variant to hold the full path to the file on
/// disk. /// disk.

View File

@@ -357,7 +357,7 @@ impl ImageLayer {
/// Create an ImageLayer struct representing an existing file on disk. /// Create an ImageLayer struct representing an existing file on disk.
/// ///
/// This variant is only used for debugging purposes, by the 'pageserver_binutils' binary. /// This variant is only used for debugging purposes, by the 'dump_layerfile' binary.
pub fn new_for_path<F>(path: &Path, file: F) -> Result<ImageLayer> pub fn new_for_path<F>(path: &Path, file: F) -> Result<ImageLayer>
where where
F: std::os::unix::prelude::FileExt, F: std::os::unix::prelude::FileExt,

View File

@@ -15,19 +15,25 @@ use crate::repository::Key;
use crate::tenant::inmemory_layer::InMemoryLayer; use crate::tenant::inmemory_layer::InMemoryLayer;
use crate::tenant::storage_layer::Layer; use crate::tenant::storage_layer::Layer;
use crate::tenant::storage_layer::{range_eq, range_overlaps}; use crate::tenant::storage_layer::{range_eq, range_overlaps};
use amplify_num::i256;
use anyhow::Result; use anyhow::Result;
use num_traits::identities::{One, Zero}; use std::collections::{BTreeMap, VecDeque};
use num_traits::{Bounded, Num, Signed};
use rstar::{RTree, RTreeObject, AABB};
use std::cmp::Ordering;
use std::collections::VecDeque;
use std::ops::Range; use std::ops::Range;
use std::ops::{Add, Div, Mul, Neg, Rem, Sub};
use std::sync::Arc; use std::sync::Arc;
use tracing::*; use tracing::*;
use utils::lsn::Lsn; use utils::lsn::Lsn;
#[derive(Debug, Clone, PartialEq, PartialOrd, Ord, Eq)]
struct BTreeKey {
lsn: Lsn,
seq: usize,
}
impl BTreeKey {
fn new(lsn: Lsn) -> BTreeKey {
BTreeKey { lsn, seq: 0 }
}
}
/// ///
/// LayerMap tracks what layers exist on a timeline. /// LayerMap tracks what layers exist on a timeline.
/// ///
@@ -53,175 +59,14 @@ pub struct LayerMap {
pub frozen_layers: VecDeque<Arc<InMemoryLayer>>, pub frozen_layers: VecDeque<Arc<InMemoryLayer>>,
/// All the historic layers are kept here /// All the historic layers are kept here
historic_layers: RTree<LayerRTreeObject>, historic_layers: BTreeMap<BTreeKey, Arc<dyn Layer>>,
layers_seqno: usize,
/// L0 layers have key range Key::MIN..Key::MAX, and locating them using R-Tree search is very inefficient. /// L0 layers have key range Key::MIN..Key::MAX, and locating them using R-Tree search is very inefficient.
/// So L0 layers are held in l0_delta_layers vector, in addition to the R-tree. /// So L0 layers are held in l0_delta_layers vector, in addition to the R-tree.
l0_delta_layers: Vec<Arc<dyn Layer>>, l0_delta_layers: Vec<Arc<dyn Layer>>,
} }
struct LayerRTreeObject {
layer: Arc<dyn Layer>,
envelope: AABB<[IntKey; 2]>,
}
// Representation of Key as numeric type.
// We can not use native implementation of i128, because rstar::RTree
// doesn't handle properly integer overflow during area calculation: sum(Xi*Yi).
// Overflow will cause panic in debug mode and incorrect area calculation in release mode,
// which leads to non-optimally balanced R-Tree (but doesn't fit correctness of R-Tree work).
// By using i256 as the type, even though all the actual values would fit in i128, we can be
// sure that multiplication doesn't overflow.
//
#[derive(Clone, PartialEq, Eq, PartialOrd, Debug)]
struct IntKey(i256);
impl Copy for IntKey {}
impl IntKey {
fn from(i: i128) -> Self {
IntKey(i256::from(i))
}
}
impl Bounded for IntKey {
fn min_value() -> Self {
IntKey(i256::MIN)
}
fn max_value() -> Self {
IntKey(i256::MAX)
}
}
impl Signed for IntKey {
fn is_positive(&self) -> bool {
self.0 > i256::ZERO
}
fn is_negative(&self) -> bool {
self.0 < i256::ZERO
}
fn signum(&self) -> Self {
match self.0.cmp(&i256::ZERO) {
Ordering::Greater => IntKey(i256::ONE),
Ordering::Less => IntKey(-i256::ONE),
Ordering::Equal => IntKey(i256::ZERO),
}
}
fn abs(&self) -> Self {
IntKey(self.0.abs())
}
fn abs_sub(&self, other: &Self) -> Self {
if self.0 <= other.0 {
IntKey(i256::ZERO)
} else {
IntKey(self.0 - other.0)
}
}
}
impl Neg for IntKey {
type Output = Self;
fn neg(self) -> Self::Output {
IntKey(-self.0)
}
}
impl Rem for IntKey {
type Output = Self;
fn rem(self, rhs: Self) -> Self::Output {
IntKey(self.0 % rhs.0)
}
}
impl Div for IntKey {
type Output = Self;
fn div(self, rhs: Self) -> Self::Output {
IntKey(self.0 / rhs.0)
}
}
impl Add for IntKey {
type Output = Self;
fn add(self, rhs: Self) -> Self::Output {
IntKey(self.0 + rhs.0)
}
}
impl Sub for IntKey {
type Output = Self;
fn sub(self, rhs: Self) -> Self::Output {
IntKey(self.0 - rhs.0)
}
}
impl Mul for IntKey {
type Output = Self;
fn mul(self, rhs: Self) -> Self::Output {
IntKey(self.0 * rhs.0)
}
}
impl One for IntKey {
fn one() -> Self {
IntKey(i256::ONE)
}
}
impl Zero for IntKey {
fn zero() -> Self {
IntKey(i256::ZERO)
}
fn is_zero(&self) -> bool {
self.0 == i256::ZERO
}
}
impl Num for IntKey {
type FromStrRadixErr = <i128 as Num>::FromStrRadixErr;
fn from_str_radix(str: &str, radix: u32) -> Result<Self, Self::FromStrRadixErr> {
Ok(IntKey(i256::from(i128::from_str_radix(str, radix)?)))
}
}
impl PartialEq for LayerRTreeObject {
fn eq(&self, other: &Self) -> bool {
// FIXME: ptr_eq might fail to return true for 'dyn'
// references. Clippy complains about this. In practice it
// seems to work, the assertion below would be triggered
// otherwise but this ought to be fixed.
#[allow(clippy::vtable_address_comparisons)]
Arc::ptr_eq(&self.layer, &other.layer)
}
}
impl RTreeObject for LayerRTreeObject {
type Envelope = AABB<[IntKey; 2]>;
fn envelope(&self) -> Self::Envelope {
self.envelope
}
}
impl LayerRTreeObject {
fn new(layer: Arc<dyn Layer>) -> Self {
let key_range = layer.get_key_range();
let lsn_range = layer.get_lsn_range();
let envelope = AABB::from_corners(
[
IntKey::from(key_range.start.to_i128()),
IntKey::from(lsn_range.start.0 as i128),
],
[
IntKey::from(key_range.end.to_i128() - 1),
IntKey::from(lsn_range.end.0 as i128 - 1),
], // AABB::upper is inclusive, while `key_range.end` and `lsn_range.end` are exclusive
);
LayerRTreeObject { layer, envelope }
}
}
/// Return value of LayerMap::search /// Return value of LayerMap::search
pub struct SearchResult { pub struct SearchResult {
pub layer: Arc<dyn Layer>, pub layer: Arc<dyn Layer>,
@@ -244,23 +89,17 @@ impl LayerMap {
// linear search // linear search
// Find the latest image layer that covers the given key // Find the latest image layer that covers the given key
let mut latest_img: Option<Arc<dyn Layer>> = None; let mut latest_img: Option<Arc<dyn Layer>> = None;
let mut latest_img_lsn: Option<Lsn> = None; let mut latest_img_lsn = Lsn(0);
let envelope = AABB::from_corners( let mut iter = self
[IntKey::from(key.to_i128()), IntKey::from(0i128)],
[
IntKey::from(key.to_i128()),
IntKey::from(end_lsn.0 as i128 - 1),
],
);
for e in self
.historic_layers .historic_layers
.locate_in_envelope_intersecting(&envelope) .range(BTreeKey::new(Lsn(0))..BTreeKey::new(end_lsn));
{ while let Some((_key, l)) = iter.next_back() {
let l = &e.layer;
if l.is_incremental() { if l.is_incremental() {
continue; continue;
} }
assert!(l.get_key_range().contains(&key)); if !l.get_key_range().contains(&key) {
continue;
}
let img_lsn = l.get_lsn_range().start; let img_lsn = l.get_lsn_range().start;
assert!(img_lsn < end_lsn); assert!(img_lsn < end_lsn);
if Lsn(img_lsn.0 + 1) == end_lsn { if Lsn(img_lsn.0 + 1) == end_lsn {
@@ -270,23 +109,23 @@ impl LayerMap {
lsn_floor: img_lsn, lsn_floor: img_lsn,
})); }));
} }
if img_lsn > latest_img_lsn.unwrap_or(Lsn(0)) { latest_img = Some(Arc::clone(l));
latest_img = Some(Arc::clone(l)); latest_img_lsn = img_lsn;
latest_img_lsn = Some(img_lsn); break;
}
} }
// Search the delta layers // Search the delta layers
let mut latest_delta: Option<Arc<dyn Layer>> = None; let mut latest_delta: Option<Arc<dyn Layer>> = None;
for e in self let mut iter = self
.historic_layers .historic_layers
.locate_in_envelope_intersecting(&envelope) .range(BTreeKey::new(Lsn(0))..BTreeKey::new(end_lsn));
{ while let Some((_key, l)) = iter.next_back() {
let l = &e.layer;
if !l.is_incremental() { if !l.is_incremental() {
continue; continue;
} }
assert!(l.get_key_range().contains(&key)); if !l.get_key_range().contains(&key) {
continue;
}
if l.get_lsn_range().start >= end_lsn { if l.get_lsn_range().start >= end_lsn {
info!( info!(
"Candidate delta layer {}..{} is too new for lsn {}", "Candidate delta layer {}..{} is too new for lsn {}",
@@ -296,6 +135,9 @@ impl LayerMap {
); );
} }
assert!(l.get_lsn_range().start < end_lsn); assert!(l.get_lsn_range().start < end_lsn);
if l.get_lsn_range().end <= latest_img_lsn {
continue;
}
if l.get_lsn_range().end >= end_lsn { if l.get_lsn_range().end >= end_lsn {
// this layer contains the requested point in the key/lsn space. // this layer contains the requested point in the key/lsn space.
// No need to search any further // No need to search any further
@@ -321,10 +163,7 @@ impl LayerMap {
"found (old) layer {} for request on {key} at {end_lsn}", "found (old) layer {} for request on {key} at {end_lsn}",
l.filename().display(), l.filename().display(),
); );
let lsn_floor = std::cmp::max( let lsn_floor = std::cmp::max(Lsn(latest_img_lsn.0 + 1), l.get_lsn_range().start);
Lsn(latest_img_lsn.unwrap_or(Lsn(0)).0 + 1),
l.get_lsn_range().start,
);
Ok(Some(SearchResult { Ok(Some(SearchResult {
lsn_floor, lsn_floor,
layer: l, layer: l,
@@ -332,7 +171,7 @@ impl LayerMap {
} else if let Some(l) = latest_img { } else if let Some(l) = latest_img {
trace!("found img layer and no deltas for request on {key} at {end_lsn}"); trace!("found img layer and no deltas for request on {key} at {end_lsn}");
Ok(Some(SearchResult { Ok(Some(SearchResult {
lsn_floor: latest_img_lsn.unwrap(), lsn_floor: latest_img_lsn,
layer: l, layer: l,
})) }))
} else { } else {
@@ -348,7 +187,14 @@ impl LayerMap {
if layer.get_key_range() == (Key::MIN..Key::MAX) { if layer.get_key_range() == (Key::MIN..Key::MAX) {
self.l0_delta_layers.push(layer.clone()); self.l0_delta_layers.push(layer.clone());
} }
self.historic_layers.insert(LayerRTreeObject::new(layer)); self.historic_layers.insert(
BTreeKey {
lsn: layer.get_lsn_range().start,
seq: self.layers_seqno,
},
layer,
);
self.layers_seqno += 1;
NUM_ONDISK_LAYERS.inc(); NUM_ONDISK_LAYERS.inc();
} }
@@ -370,10 +216,26 @@ impl LayerMap {
.retain(|other| !Arc::ptr_eq(other, &layer)); .retain(|other| !Arc::ptr_eq(other, &layer));
assert_eq!(self.l0_delta_layers.len(), len_before - 1); assert_eq!(self.l0_delta_layers.len(), len_before - 1);
} }
assert!(self let len_before = self.historic_layers.len();
.historic_layers #[allow(clippy::vtable_address_comparisons)]
.remove(&LayerRTreeObject::new(layer)) self.historic_layers
.is_some()); .retain(|_key, other| !Arc::ptr_eq(other, &layer));
if self.historic_layers.len() != len_before - 1 {
assert!(self.historic_layers.len() == len_before);
error!(
"Failed to remove {} layer: {}..{}__{}..{}",
if layer.is_incremental() {
"inremental"
} else {
"image"
},
layer.get_key_range().start,
layer.get_key_range().end,
layer.get_lsn_range().start,
layer.get_lsn_range().end
);
}
assert!(self.historic_layers.len() == len_before - 1);
NUM_ONDISK_LAYERS.dec(); NUM_ONDISK_LAYERS.dec();
} }
@@ -390,21 +252,10 @@ impl LayerMap {
loop { loop {
let mut made_progress = false; let mut made_progress = false;
let envelope = AABB::from_corners( for (_key, l) in self
[
IntKey::from(range_remain.start.to_i128()),
IntKey::from(lsn_range.start.0 as i128),
],
[
IntKey::from(range_remain.end.to_i128() - 1),
IntKey::from(lsn_range.end.0 as i128 - 1),
],
);
for e in self
.historic_layers .historic_layers
.locate_in_envelope_intersecting(&envelope) .range(BTreeKey::new(lsn_range.start)..BTreeKey::new(lsn_range.end))
{ {
let l = &e.layer;
if l.is_incremental() { if l.is_incremental() {
continue; continue;
} }
@@ -427,39 +278,30 @@ impl LayerMap {
} }
pub fn iter_historic_layers(&self) -> impl '_ + Iterator<Item = Arc<dyn Layer>> { pub fn iter_historic_layers(&self) -> impl '_ + Iterator<Item = Arc<dyn Layer>> {
self.historic_layers.iter().map(|e| e.layer.clone()) self.historic_layers
.iter()
.map(|(_key, layer)| layer.clone())
} }
/// Find the last image layer that covers 'key', ignoring any image layers /// Find the last image layer that covers 'key', ignoring any image layers
/// newer than 'lsn'. /// newer than 'lsn'.
fn find_latest_image(&self, key: Key, lsn: Lsn) -> Option<Arc<dyn Layer>> { fn find_latest_image(&self, key: Key, lsn: Lsn) -> Option<Arc<dyn Layer>> {
let mut candidate_lsn = Lsn(0); let mut iter = self
let mut candidate = None;
let envelope = AABB::from_corners(
[IntKey::from(key.to_i128()), IntKey::from(0)],
[IntKey::from(key.to_i128()), IntKey::from(lsn.0 as i128)],
);
for e in self
.historic_layers .historic_layers
.locate_in_envelope_intersecting(&envelope) .range(BTreeKey::new(Lsn(0))..BTreeKey::new(lsn + 1));
{ while let Some((_key, l)) = iter.next_back() {
let l = &e.layer;
if l.is_incremental() { if l.is_incremental() {
continue; continue;
} }
assert!(l.get_key_range().contains(&key)); if !l.get_key_range().contains(&key) {
let this_lsn = l.get_lsn_range().start;
assert!(this_lsn <= lsn);
if this_lsn < candidate_lsn {
// our previous candidate was better
continue; continue;
} }
candidate_lsn = this_lsn; let this_lsn = l.get_lsn_range().start;
candidate = Some(Arc::clone(l)); assert!(this_lsn <= lsn);
return Some(Arc::clone(l));
} }
None
candidate
} }
/// ///
@@ -476,18 +318,10 @@ impl LayerMap {
lsn: Lsn, lsn: Lsn,
) -> Result<Vec<(Range<Key>, Option<Arc<dyn Layer>>)>> { ) -> Result<Vec<(Range<Key>, Option<Arc<dyn Layer>>)>> {
let mut points = vec![key_range.start]; let mut points = vec![key_range.start];
let envelope = AABB::from_corners( for (_lsn, l) in self
[IntKey::from(key_range.start.to_i128()), IntKey::from(0)],
[
IntKey::from(key_range.end.to_i128()),
IntKey::from(lsn.0 as i128),
],
);
for e in self
.historic_layers .historic_layers
.locate_in_envelope_intersecting(&envelope) .range(BTreeKey::new(Lsn(0))..BTreeKey::new(lsn + 1))
{ {
let l = &e.layer;
assert!(l.get_lsn_range().start <= lsn); assert!(l.get_lsn_range().start <= lsn);
let range = l.get_key_range(); let range = l.get_key_range();
if key_range.contains(&range.start) { if key_range.contains(&range.start) {
@@ -524,26 +358,17 @@ impl LayerMap {
if lsn_range.start >= lsn_range.end { if lsn_range.start >= lsn_range.end {
return Ok(0); return Ok(0);
} }
let envelope = AABB::from_corners( for (_lsn, l) in self
[
IntKey::from(key_range.start.to_i128()),
IntKey::from(lsn_range.start.0 as i128),
],
[
IntKey::from(key_range.end.to_i128() - 1),
IntKey::from(lsn_range.end.0 as i128 - 1),
],
);
for e in self
.historic_layers .historic_layers
.locate_in_envelope_intersecting(&envelope) .range(BTreeKey::new(lsn_range.start)..BTreeKey::new(lsn_range.end))
{ {
let l = &e.layer;
if !l.is_incremental() { if !l.is_incremental() {
continue; continue;
} }
if !range_overlaps(&l.get_key_range(), key_range) {
continue;
}
assert!(range_overlaps(&l.get_lsn_range(), lsn_range)); assert!(range_overlaps(&l.get_lsn_range(), lsn_range));
assert!(range_overlaps(&l.get_key_range(), key_range));
// We ignore level0 delta layers. Unless the whole keyspace fits // We ignore level0 delta layers. Unless the whole keyspace fits
// into one partition // into one partition
@@ -579,8 +404,8 @@ impl LayerMap {
} }
println!("historic_layers:"); println!("historic_layers:");
for e in self.historic_layers.iter() { for (_key, layer) in self.historic_layers.iter() {
e.layer.dump(verbose)?; layer.dump(verbose)?;
} }
println!("End dump LayerMap"); println!("End dump LayerMap");
Ok(()) Ok(())

View File

@@ -52,10 +52,7 @@ use crate::task_mgr::TaskKind;
use crate::walreceiver::{is_etcd_client_initialized, spawn_connection_manager_task}; use crate::walreceiver::{is_etcd_client_initialized, spawn_connection_manager_task};
use crate::walredo::WalRedoManager; use crate::walredo::WalRedoManager;
use crate::CheckpointConfig; use crate::CheckpointConfig;
use crate::{ use crate::{page_cache, storage_sync};
page_cache,
storage_sync::{self, index::LayerFileMetadata},
};
pub struct Timeline { pub struct Timeline {
conf: &'static PageServerConf, conf: &'static PageServerConf,
@@ -478,6 +475,10 @@ impl Timeline {
} }
/// Mutate the timeline with a [`TimelineWriter`]. /// Mutate the timeline with a [`TimelineWriter`].
///
/// FIXME: This ought to return &'a TimelineWriter, where TimelineWriter
/// is a generic type in this trait. But that doesn't currently work in
/// Rust: https://rust-lang.github.io/rfcs/1598-generic_associated_types.html
pub fn writer(&self) -> TimelineWriter<'_> { pub fn writer(&self) -> TimelineWriter<'_> {
TimelineWriter { TimelineWriter {
tl: self, tl: self,
@@ -1193,8 +1194,8 @@ impl Timeline {
self.create_image_layers(&partitioning, self.initdb_lsn, true)? self.create_image_layers(&partitioning, self.initdb_lsn, true)?
} else { } else {
// normal case, write out a L0 delta layer file. // normal case, write out a L0 delta layer file.
let (delta_path, metadata) = self.create_delta_layer(&frozen_layer)?; let delta_path = self.create_delta_layer(&frozen_layer)?;
HashMap::from([(delta_path, metadata)]) HashSet::from([delta_path])
}; };
fail_point!("flush-frozen-before-sync"); fail_point!("flush-frozen-before-sync");
@@ -1220,86 +1221,85 @@ impl Timeline {
// TODO: This perhaps should be done in 'flush_frozen_layers', after flushing // TODO: This perhaps should be done in 'flush_frozen_layers', after flushing
// *all* the layers, to avoid fsyncing the file multiple times. // *all* the layers, to avoid fsyncing the file multiple times.
let disk_consistent_lsn = Lsn(lsn_range.end.0 - 1); let disk_consistent_lsn = Lsn(lsn_range.end.0 - 1);
let old_disk_consistent_lsn = self.disk_consistent_lsn.load(); self.update_disk_consistent_lsn(disk_consistent_lsn, layer_paths_to_upload)?;
// If we were able to advance 'disk_consistent_lsn', save it the metadata file.
// After crash, we will restart WAL streaming and processing from that point.
if disk_consistent_lsn != old_disk_consistent_lsn {
assert!(disk_consistent_lsn > old_disk_consistent_lsn);
self.update_metadata_file(disk_consistent_lsn, layer_paths_to_upload)?;
// Also update the in-memory copy
self.disk_consistent_lsn.store(disk_consistent_lsn);
}
Ok(()) Ok(())
} }
/// Update metadata file /// Update metadata file
fn update_metadata_file( fn update_disk_consistent_lsn(
&self, &self,
disk_consistent_lsn: Lsn, disk_consistent_lsn: Lsn,
layer_paths_to_upload: HashMap<PathBuf, LayerFileMetadata>, layer_paths_to_upload: HashSet<PathBuf>,
) -> Result<()> { ) -> Result<()> {
// We can only save a valid 'prev_record_lsn' value on disk if we // If we were able to advance 'disk_consistent_lsn', save it the metadata file.
// flushed *all* in-memory changes to disk. We only track // After crash, we will restart WAL streaming and processing from that point.
// 'prev_record_lsn' in memory for the latest processed record, so we let old_disk_consistent_lsn = self.disk_consistent_lsn.load();
// don't remember what the correct value that corresponds to some old if disk_consistent_lsn != old_disk_consistent_lsn {
// LSN is. But if we flush everything, then the value corresponding assert!(disk_consistent_lsn > old_disk_consistent_lsn);
// current 'last_record_lsn' is correct and we can store it on disk.
let RecordLsn {
last: last_record_lsn,
prev: prev_record_lsn,
} = self.last_record_lsn.load();
let ondisk_prev_record_lsn = if disk_consistent_lsn == last_record_lsn {
Some(prev_record_lsn)
} else {
None
};
let ancestor_timeline_id = self // We can only save a valid 'prev_record_lsn' value on disk if we
.ancestor_timeline // flushed *all* in-memory changes to disk. We only track
.as_ref() // 'prev_record_lsn' in memory for the latest processed record, so we
.map(|ancestor| ancestor.timeline_id); // don't remember what the correct value that corresponds to some old
// LSN is. But if we flush everything, then the value corresponding
// current 'last_record_lsn' is correct and we can store it on disk.
let RecordLsn {
last: last_record_lsn,
prev: prev_record_lsn,
} = self.last_record_lsn.load();
let ondisk_prev_record_lsn = if disk_consistent_lsn == last_record_lsn {
Some(prev_record_lsn)
} else {
None
};
let metadata = TimelineMetadata::new( let ancestor_timeline_id = self
disk_consistent_lsn, .ancestor_timeline
ondisk_prev_record_lsn, .as_ref()
ancestor_timeline_id, .map(|ancestor| ancestor.timeline_id);
self.ancestor_lsn,
*self.latest_gc_cutoff_lsn.read(),
self.initdb_lsn,
self.pg_version,
);
fail_point!("checkpoint-before-saving-metadata", |x| bail!( let metadata = TimelineMetadata::new(
"{}", disk_consistent_lsn,
x.unwrap() ondisk_prev_record_lsn,
)); ancestor_timeline_id,
self.ancestor_lsn,
save_metadata( *self.latest_gc_cutoff_lsn.read(),
self.conf, self.initdb_lsn,
self.timeline_id, self.pg_version,
self.tenant_id,
&metadata,
false,
)?;
if self.upload_layers.load(atomic::Ordering::Relaxed) {
storage_sync::schedule_layer_upload(
self.tenant_id,
self.timeline_id,
layer_paths_to_upload,
Some(metadata),
); );
fail_point!("checkpoint-before-saving-metadata", |x| bail!(
"{}",
x.unwrap()
));
save_metadata(
self.conf,
self.timeline_id,
self.tenant_id,
&metadata,
false,
)?;
if self.upload_layers.load(atomic::Ordering::Relaxed) {
storage_sync::schedule_layer_upload(
self.tenant_id,
self.timeline_id,
layer_paths_to_upload,
Some(metadata),
);
}
// Also update the in-memory copy
self.disk_consistent_lsn.store(disk_consistent_lsn);
} }
Ok(()) Ok(())
} }
// Write out the given frozen in-memory layer as a new L0 delta file // Write out the given frozen in-memory layer as a new L0 delta file
fn create_delta_layer( fn create_delta_layer(&self, frozen_layer: &InMemoryLayer) -> Result<PathBuf> {
&self,
frozen_layer: &InMemoryLayer,
) -> Result<(PathBuf, LayerFileMetadata)> {
// Write it out // Write it out
let new_delta = frozen_layer.write_to_disk()?; let new_delta = frozen_layer.write_to_disk()?;
let new_delta_path = new_delta.path(); let new_delta_path = new_delta.path();
@@ -1325,13 +1325,12 @@ impl Timeline {
// update the timeline's physical size // update the timeline's physical size
let sz = new_delta_path.metadata()?.len(); let sz = new_delta_path.metadata()?.len();
self.metrics.current_physical_size_gauge.add(sz); self.metrics.current_physical_size_gauge.add(sz);
// update metrics // update metrics
self.metrics.num_persistent_files_created.inc_by(1); self.metrics.num_persistent_files_created.inc_by(1);
self.metrics.persistent_bytes_written.inc_by(sz); self.metrics.persistent_bytes_written.inc_by(sz);
Ok((new_delta_path, LayerFileMetadata::new(sz))) Ok(new_delta_path)
} }
pub fn compact(&self) -> anyhow::Result<()> { pub fn compact(&self) -> anyhow::Result<()> {
@@ -1397,7 +1396,7 @@ impl Timeline {
storage_sync::schedule_layer_upload( storage_sync::schedule_layer_upload(
self.tenant_id, self.tenant_id,
self.timeline_id, self.timeline_id,
layer_paths_to_upload, HashSet::from_iter(layer_paths_to_upload),
None, None,
); );
} }
@@ -1478,9 +1477,10 @@ impl Timeline {
partitioning: &KeyPartitioning, partitioning: &KeyPartitioning,
lsn: Lsn, lsn: Lsn,
force: bool, force: bool,
) -> Result<HashMap<PathBuf, LayerFileMetadata>> { ) -> Result<HashSet<PathBuf>> {
let timer = self.metrics.create_images_time_histo.start_timer(); let timer = self.metrics.create_images_time_histo.start_timer();
let mut image_layers: Vec<ImageLayer> = Vec::new(); let mut image_layers: Vec<ImageLayer> = Vec::new();
let mut layer_paths_to_upload = HashSet::new();
for partition in partitioning.parts.iter() { for partition in partitioning.parts.iter() {
if force || self.time_for_new_image_layer(partition, lsn)? { if force || self.time_for_new_image_layer(partition, lsn)? {
let img_range = let img_range =
@@ -1502,6 +1502,7 @@ impl Timeline {
} }
} }
let image_layer = image_layer_writer.finish()?; let image_layer = image_layer_writer.finish()?;
layer_paths_to_upload.insert(image_layer.path());
image_layers.push(image_layer); image_layers.push(image_layer);
} }
} }
@@ -1515,25 +1516,15 @@ impl Timeline {
// //
// Compaction creates multiple image layers. It would be better to create them all // Compaction creates multiple image layers. It would be better to create them all
// and fsync them all in parallel. // and fsync them all in parallel.
let all_paths = image_layers let mut all_paths = Vec::from_iter(layer_paths_to_upload.clone());
.iter() all_paths.push(self.conf.timeline_path(&self.timeline_id, &self.tenant_id));
.map(|layer| layer.path())
.chain(std::iter::once(
self.conf.timeline_path(&self.timeline_id, &self.tenant_id),
))
.collect::<Vec<_>>();
par_fsync::par_fsync(&all_paths)?; par_fsync::par_fsync(&all_paths)?;
let mut layer_paths_to_upload = HashMap::with_capacity(image_layers.len());
let mut layers = self.layers.write().unwrap(); let mut layers = self.layers.write().unwrap();
for l in image_layers { for l in image_layers {
let path = l.path(); self.metrics
let metadata = path.metadata()?; .current_physical_size_gauge
.add(l.path().metadata()?.len());
layer_paths_to_upload.insert(path, LayerFileMetadata::new(metadata.len()));
self.metrics.current_physical_size_gauge.add(metadata.len());
layers.insert_historic(Arc::new(l)); layers.insert_historic(Arc::new(l));
} }
drop(layers); drop(layers);
@@ -1784,16 +1775,16 @@ impl Timeline {
} }
let mut layers = self.layers.write().unwrap(); let mut layers = self.layers.write().unwrap();
let mut new_layer_paths = HashMap::with_capacity(new_layers.len()); let mut new_layer_paths = HashSet::with_capacity(new_layers.len());
for l in new_layers { for l in new_layers {
let new_delta_path = l.path(); let new_delta_path = l.path();
let metadata = new_delta_path.metadata()?;
// update the timeline's physical size // update the timeline's physical size
self.metrics.current_physical_size_gauge.add(metadata.len()); self.metrics
.current_physical_size_gauge
.add(new_delta_path.metadata()?.len());
new_layer_paths.insert(new_delta_path, LayerFileMetadata::new(metadata.len())); new_layer_paths.insert(new_delta_path);
layers.insert_historic(Arc::new(l)); layers.insert_historic(Arc::new(l));
} }
@@ -1959,9 +1950,6 @@ impl Timeline {
new_gc_cutoff new_gc_cutoff
); );
write_guard.store_and_unlock(new_gc_cutoff).wait(); write_guard.store_and_unlock(new_gc_cutoff).wait();
// Persist metadata file
self.update_metadata_file(self.disk_consistent_lsn.load(), HashMap::new())?;
} }
info!("GC starting"); info!("GC starting");
@@ -2088,18 +2076,6 @@ impl Timeline {
result.layers_removed += 1; result.layers_removed += 1;
} }
info!(
"GC completed removing {} layers, cuttof {}",
result.layers_removed, new_gc_cutoff
);
if result.layers_removed != 0 {
fail_point!("gc-before-save-metadata", |_| {
info!("Abnormaly terinate pageserver at gc-before-save-metadata fail point");
std::process::abort();
});
return Ok(result);
}
if self.upload_layers.load(atomic::Ordering::Relaxed) { if self.upload_layers.load(atomic::Ordering::Relaxed) {
storage_sync::schedule_layer_delete( storage_sync::schedule_layer_delete(
self.tenant_id, self.tenant_id,

View File

@@ -24,7 +24,7 @@ pub mod defaults {
// This parameter determines L1 layer file size. // This parameter determines L1 layer file size.
pub const DEFAULT_COMPACTION_TARGET_SIZE: u64 = 128 * 1024 * 1024; pub const DEFAULT_COMPACTION_TARGET_SIZE: u64 = 128 * 1024 * 1024;
pub const DEFAULT_COMPACTION_PERIOD: &str = "20 s"; pub const DEFAULT_COMPACTION_PERIOD: &str = "1 s";
pub const DEFAULT_COMPACTION_THRESHOLD: usize = 10; pub const DEFAULT_COMPACTION_THRESHOLD: usize = 10;
pub const DEFAULT_GC_HORIZON: u64 = 64 * 1024 * 1024; pub const DEFAULT_GC_HORIZON: u64 = 64 * 1024 * 1024;

View File

@@ -1,7 +1,7 @@
//! This module acts as a switchboard to access different repositories managed by this //! This module acts as a switchboard to access different repositories managed by this
//! page server. //! page server.
use std::collections::{hash_map, HashMap}; use std::collections::{hash_map, HashMap, HashSet};
use std::ffi::OsStr; use std::ffi::OsStr;
use std::fs; use std::fs;
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
@@ -14,15 +14,15 @@ use remote_storage::GenericRemoteStorage;
use crate::config::{PageServerConf, METADATA_FILE_NAME}; use crate::config::{PageServerConf, METADATA_FILE_NAME};
use crate::http::models::TenantInfo; use crate::http::models::TenantInfo;
use crate::storage_sync::index::{LayerFileMetadata, RemoteIndex, RemoteTimelineIndex}; use crate::storage_sync::index::{RemoteIndex, RemoteTimelineIndex};
use crate::storage_sync::{self, LocalTimelineInitStatus, SyncStartupData, TimelineLocalFiles}; use crate::storage_sync::{self, LocalTimelineInitStatus, SyncStartupData};
use crate::task_mgr::{self, TaskKind}; use crate::task_mgr::{self, TaskKind};
use crate::tenant::{ use crate::tenant::{
ephemeral_file::is_ephemeral_file, metadata::TimelineMetadata, Tenant, TenantState, ephemeral_file::is_ephemeral_file, metadata::TimelineMetadata, Tenant, TenantState,
}; };
use crate::tenant_config::TenantConfOpt; use crate::tenant_config::TenantConfOpt;
use crate::walredo::PostgresRedoManager; use crate::walredo::PostgresRedoManager;
use crate::TEMP_FILE_SUFFIX; use crate::{TenantTimelineValues, TEMP_FILE_SUFFIX};
use utils::crashsafe_dir::{self, path_with_suffix_extension}; use utils::crashsafe_dir::{self, path_with_suffix_extension};
use utils::id::{TenantId, TimelineId}; use utils::id::{TenantId, TimelineId};
@@ -70,54 +70,34 @@ pub fn init_tenant_mgr(
.remote_storage_config .remote_storage_config
.as_ref() .as_ref()
.expect("remote storage without config"); .expect("remote storage without config");
let mut broken_tenants = HashMap::new();
let mut ready_tenants = HashMap::new();
for (tenant_id, tenant_attach_data) in local_tenant_files.into_iter() {
match tenant_attach_data {
TenantAttachData::Ready(t) => {
ready_tenants.insert(tenant_id, t);
}
TenantAttachData::Broken(e) => {
broken_tenants.insert(tenant_id, TenantAttachData::Broken(e));
}
}
}
let SyncStartupData { let SyncStartupData {
remote_index, remote_index,
local_timeline_init_statuses, local_timeline_init_statuses,
} = storage_sync::spawn_storage_sync_task( } = storage_sync::spawn_storage_sync_task(
conf, conf,
ready_tenants, local_tenant_files,
storage, storage,
storage_config.max_concurrent_syncs, storage_config.max_concurrent_syncs,
storage_config.max_sync_errors, storage_config.max_sync_errors,
) )
.context("Failed to spawn the storage sync thread")?; .context("Failed to spawn the storage sync thread")?;
let n = local_timeline_init_statuses.0.len(); (
let mut synced_timelines = local_timeline_init_statuses.0.into_iter().fold( remote_index,
HashMap::<TenantId, TenantAttachData>::with_capacity(n), local_timeline_init_statuses.filter_map(|init_status| match init_status {
|mut new_values, (tenant_id, old_values)| { LocalTimelineInitStatus::LocallyComplete(metadata) => Some(metadata),
let new_timeline_values = new_values LocalTimelineInitStatus::NeedsSync => None,
.entry(tenant_id) }),
.or_insert_with(|| TenantAttachData::Ready(HashMap::new())); )
if let TenantAttachData::Ready(t) = new_timeline_values {
for (timeline_id, old_value) in old_values {
if let LocalTimelineInitStatus::LocallyComplete(metadata) = old_value {
t.insert(timeline_id, TimelineLocalFiles::ready(metadata));
}
}
}
new_values
},
);
synced_timelines.extend(broken_tenants);
(remote_index, synced_timelines)
} else { } else {
info!("No remote storage configured, skipping storage sync, considering all local timelines with correct metadata files enabled"); info!("No remote storage configured, skipping storage sync, considering all local timelines with correct metadata files enabled");
(RemoteIndex::default(), local_tenant_files) (
RemoteIndex::default(),
local_tenant_files.filter_map(|(metadata, _)| Some(metadata)),
)
}; };
attach_local_tenants(conf, &remote_index, tenants_to_attach); attach_local_tenants(conf, &remote_index, tenants_to_attach);
Ok(remote_index) Ok(remote_index)
@@ -137,12 +117,18 @@ pub fn init_tenant_mgr(
pub fn attach_local_tenants( pub fn attach_local_tenants(
conf: &'static PageServerConf, conf: &'static PageServerConf,
remote_index: &RemoteIndex, remote_index: &RemoteIndex,
tenants_to_attach: HashMap<TenantId, TenantAttachData>, tenants_to_attach: TenantTimelineValues<TimelineMetadata>,
) { ) {
let _entered = info_span!("attach_local_tenants").entered(); let _entered = info_span!("attach_local_tenants").entered();
let number_of_tenants = tenants_to_attach.len(); let number_of_tenants = tenants_to_attach.0.len();
for (tenant_id, local_timelines) in tenants_to_attach.0 {
info!(
"Attaching {} timelines for {tenant_id}",
local_timelines.len()
);
debug!("Timelines to attach: {local_timelines:?}");
for (tenant_id, local_timelines) in tenants_to_attach {
let mut tenants_accessor = tenants_state::write_tenants(); let mut tenants_accessor = tenants_state::write_tenants();
let tenant = match tenants_accessor.entry(tenant_id) { let tenant = match tenants_accessor.entry(tenant_id) {
hash_map::Entry::Occupied(o) => { hash_map::Entry::Occupied(o) => {
@@ -151,55 +137,25 @@ pub fn attach_local_tenants(
} }
hash_map::Entry::Vacant(v) => { hash_map::Entry::Vacant(v) => {
info!("Tenant {tenant_id} was not found in pageserver's memory, loading it"); info!("Tenant {tenant_id} was not found in pageserver's memory, loading it");
let tenant = Arc::new(Tenant::new( let tenant = load_local_tenant(conf, tenant_id, remote_index);
conf,
TenantConfOpt::default(),
Arc::new(PostgresRedoManager::new(conf, tenant_id)),
tenant_id,
remote_index.clone(),
conf.remote_storage_config.is_some(),
));
match local_timelines {
TenantAttachData::Broken(_) => {
tenant.set_state(TenantState::Broken);
}
TenantAttachData::Ready(_) => {
match Tenant::load_tenant_config(conf, tenant_id) {
Ok(tenant_conf) => {
tenant.update_tenant_config(tenant_conf);
tenant.activate(false);
}
Err(e) => {
error!("Failed to read config for tenant {tenant_id}, disabling tenant: {e:?}");
tenant.set_state(TenantState::Broken);
}
};
}
}
v.insert(Arc::clone(&tenant)); v.insert(Arc::clone(&tenant));
tenant tenant
} }
}; };
drop(tenants_accessor); drop(tenants_accessor);
match local_timelines {
TenantAttachData::Broken(e) => warn!("{}", e), if tenant.current_state() == TenantState::Broken {
TenantAttachData::Ready(ref timelines) => { warn!("Skipping timeline load for broken tenant {tenant_id}")
info!("Attaching {} timelines for {tenant_id}", timelines.len()); } else {
debug!("Timelines to attach: {local_timelines:?}"); let has_timelines = !local_timelines.is_empty();
let has_timelines = !timelines.is_empty(); match tenant.init_attach_timelines(local_timelines) {
let timelines_to_attach = timelines Ok(()) => {
.iter() info!("successfully loaded local timelines for tenant {tenant_id}");
.map(|(&k, v)| (k, v.metadata().to_owned())) tenant.activate(has_timelines);
.collect(); }
match tenant.init_attach_timelines(timelines_to_attach) { Err(e) => {
Ok(()) => { error!("Failed to attach tenant timelines: {e:?}");
info!("successfully loaded local timelines for tenant {tenant_id}"); tenant.set_state(TenantState::Broken);
tenant.activate(has_timelines);
}
Err(e) => {
error!("Failed to attach tenant timelines: {e:?}");
tenant.set_state(TenantState::Broken);
}
} }
} }
} }
@@ -208,6 +164,44 @@ pub fn attach_local_tenants(
info!("Processed {number_of_tenants} local tenants during attach") info!("Processed {number_of_tenants} local tenants during attach")
} }
fn load_local_tenant(
conf: &'static PageServerConf,
tenant_id: TenantId,
remote_index: &RemoteIndex,
) -> Arc<Tenant> {
let tenant = Arc::new(Tenant::new(
conf,
TenantConfOpt::default(),
Arc::new(PostgresRedoManager::new(conf, tenant_id)),
tenant_id,
remote_index.clone(),
conf.remote_storage_config.is_some(),
));
let tenant_timelines_dir = conf.timelines_path(&tenant_id);
if !tenant_timelines_dir.is_dir() {
error!(
"Tenant {} has no timelines directory at {}",
tenant_id,
tenant_timelines_dir.display()
);
tenant.set_state(TenantState::Broken);
} else {
match Tenant::load_tenant_config(conf, tenant_id) {
Ok(tenant_conf) => {
tenant.update_tenant_config(tenant_conf);
tenant.activate(false);
}
Err(e) => {
error!("Failed to read config for tenant {tenant_id}, disabling tenant: {e:?}");
tenant.set_state(TenantState::Broken);
}
}
}
tenant
}
/// ///
/// Shut down all tenants. This runs as part of pageserver shutdown. /// Shut down all tenants. This runs as part of pageserver shutdown.
/// ///
@@ -481,21 +475,16 @@ pub fn list_tenant_info(remote_index: &RemoteTimelineIndex) -> Vec<TenantInfo> {
.collect() .collect()
} }
#[derive(Debug)]
pub enum TenantAttachData {
Ready(HashMap<TimelineId, TimelineLocalFiles>),
Broken(anyhow::Error),
}
/// Attempts to collect information about all tenant and timelines, existing on the local FS. /// Attempts to collect information about all tenant and timelines, existing on the local FS.
/// If finds any, deletes all temporary files and directories, created before. Also removes empty directories, /// If finds any, deletes all temporary files and directories, created before. Also removes empty directories,
/// that may appear due to such removals. /// that may appear due to such removals.
/// Does not fail on particular timeline or tenant collection errors, rather logging them and ignoring the entities. /// Does not fail on particular timeline or tenant collection errors, rather logging them and ignoring the entities.
fn local_tenant_timeline_files( fn local_tenant_timeline_files(
config: &'static PageServerConf, config: &'static PageServerConf,
) -> anyhow::Result<HashMap<TenantId, TenantAttachData>> { ) -> anyhow::Result<TenantTimelineValues<(TimelineMetadata, HashSet<PathBuf>)>> {
let _entered = info_span!("local_tenant_timeline_files").entered(); let _entered = info_span!("local_tenant_timeline_files").entered();
let mut local_tenant_timeline_files = HashMap::new(); let mut local_tenant_timeline_files = TenantTimelineValues::new();
let tenants_dir = config.tenants_path(); let tenants_dir = config.tenants_path();
for tenants_dir_entry in fs::read_dir(&tenants_dir) for tenants_dir_entry in fs::read_dir(&tenants_dir)
.with_context(|| format!("Failed to list tenants dir {}", tenants_dir.display()))? .with_context(|| format!("Failed to list tenants dir {}", tenants_dir.display()))?
@@ -517,31 +506,19 @@ fn local_tenant_timeline_files(
} }
} else { } else {
match collect_timelines_for_tenant(config, &tenant_dir_path) { match collect_timelines_for_tenant(config, &tenant_dir_path) {
Ok((tenant_id, TenantAttachData::Broken(e))) => { Ok((tenant_id, collected_files)) => {
local_tenant_timeline_files.entry(tenant_id).or_insert(TenantAttachData::Broken(e));
},
Ok((tenant_id, TenantAttachData::Ready(collected_files))) => {
if collected_files.is_empty() { if collected_files.is_empty() {
match remove_if_empty(&tenant_dir_path) { match remove_if_empty(&tenant_dir_path) {
Ok(true) => info!("Removed empty tenant directory {}", tenant_dir_path.display()), Ok(true) => info!("Removed empty tenant directory {}", tenant_dir_path.display()),
Ok(false) => { Ok(false) => {
// insert empty timeline entry: it has some non-temporary files inside that we cannot remove // insert empty timeline entry: it has some non-temporary files inside that we cannot remove
// so make obvious for HTTP API callers, that something exists there and try to load the tenant // so make obvious for HTTP API callers, that something exists there and try to load the tenant
let _ = local_tenant_timeline_files.entry(tenant_id).or_insert_with(|| TenantAttachData::Ready(HashMap::new())); let _ = local_tenant_timeline_files.0.entry(tenant_id).or_default();
}, },
Err(e) => error!("Failed to remove empty tenant directory: {e:?}"), Err(e) => error!("Failed to remove empty tenant directory: {e:?}"),
} }
} else { } else {
match local_tenant_timeline_files.entry(tenant_id) { local_tenant_timeline_files.0.entry(tenant_id).or_default().extend(collected_files.into_iter())
hash_map::Entry::Vacant(entry) => {
entry.insert(TenantAttachData::Ready(collected_files));
}
hash_map::Entry::Occupied(entry) =>{
if let TenantAttachData::Ready(old_timelines) = entry.into_mut() {
old_timelines.extend(collected_files);
}
},
}
} }
}, },
Err(e) => error!( Err(e) => error!(
@@ -564,7 +541,7 @@ fn local_tenant_timeline_files(
info!( info!(
"Collected files for {} tenants", "Collected files for {} tenants",
local_tenant_timeline_files.len(), local_tenant_timeline_files.0.len()
); );
Ok(local_tenant_timeline_files) Ok(local_tenant_timeline_files)
} }
@@ -602,10 +579,14 @@ fn is_temporary(path: &Path) -> bool {
} }
} }
#[allow(clippy::type_complexity)]
fn collect_timelines_for_tenant( fn collect_timelines_for_tenant(
config: &'static PageServerConf, config: &'static PageServerConf,
tenant_path: &Path, tenant_path: &Path,
) -> anyhow::Result<(TenantId, TenantAttachData)> { ) -> anyhow::Result<(
TenantId,
HashMap<TimelineId, (TimelineMetadata, HashSet<PathBuf>)>,
)> {
let tenant_id = tenant_path let tenant_id = tenant_path
.file_name() .file_name()
.and_then(OsStr::to_str) .and_then(OsStr::to_str)
@@ -614,17 +595,6 @@ fn collect_timelines_for_tenant(
.context("Could not parse tenant id out of the tenant dir name")?; .context("Could not parse tenant id out of the tenant dir name")?;
let timelines_dir = config.timelines_path(&tenant_id); let timelines_dir = config.timelines_path(&tenant_id);
if !timelines_dir.as_path().is_dir() {
return Ok((
tenant_id,
TenantAttachData::Broken(anyhow::anyhow!(
"Tenant {} has no timelines directory at {}",
tenant_id,
timelines_dir.display()
)),
));
}
let mut tenant_timelines = HashMap::new(); let mut tenant_timelines = HashMap::new();
for timelines_dir_entry in fs::read_dir(&timelines_dir) for timelines_dir_entry in fs::read_dir(&timelines_dir)
.with_context(|| format!("Failed to list timelines dir entry for tenant {tenant_id}"))? .with_context(|| format!("Failed to list timelines dir entry for tenant {tenant_id}"))?
@@ -647,10 +617,7 @@ fn collect_timelines_for_tenant(
} else { } else {
match collect_timeline_files(&timeline_dir) { match collect_timeline_files(&timeline_dir) {
Ok((timeline_id, metadata, timeline_files)) => { Ok((timeline_id, metadata, timeline_files)) => {
tenant_timelines.insert( tenant_timelines.insert(timeline_id, (metadata, timeline_files));
timeline_id,
TimelineLocalFiles::collected(metadata, timeline_files),
);
} }
Err(e) => { Err(e) => {
error!( error!(
@@ -685,19 +652,15 @@ fn collect_timelines_for_tenant(
debug!("Tenant {tenant_id} has no timelines loaded"); debug!("Tenant {tenant_id} has no timelines loaded");
} }
Ok((tenant_id, TenantAttachData::Ready(tenant_timelines))) Ok((tenant_id, tenant_timelines))
} }
// discover timeline files and extract timeline metadata // discover timeline files and extract timeline metadata
// NOTE: ephemeral files are excluded from the list // NOTE: ephemeral files are excluded from the list
fn collect_timeline_files( fn collect_timeline_files(
timeline_dir: &Path, timeline_dir: &Path,
) -> anyhow::Result<( ) -> anyhow::Result<(TimelineId, TimelineMetadata, HashSet<PathBuf>)> {
TimelineId, let mut timeline_files = HashSet::new();
TimelineMetadata,
HashMap<PathBuf, LayerFileMetadata>,
)> {
let mut timeline_files = HashMap::new();
let mut timeline_metadata_path = None; let mut timeline_metadata_path = None;
let timeline_id = timeline_dir let timeline_id = timeline_dir
@@ -710,9 +673,7 @@ fn collect_timeline_files(
fs::read_dir(&timeline_dir).context("Failed to list timeline dir contents")?; fs::read_dir(&timeline_dir).context("Failed to list timeline dir contents")?;
for entry in timeline_dir_entries { for entry in timeline_dir_entries {
let entry_path = entry.context("Failed to list timeline dir entry")?.path(); let entry_path = entry.context("Failed to list timeline dir entry")?.path();
let metadata = entry_path.metadata()?; if entry_path.is_file() {
if metadata.is_file() {
if entry_path.file_name().and_then(OsStr::to_str) == Some(METADATA_FILE_NAME) { if entry_path.file_name().and_then(OsStr::to_str) == Some(METADATA_FILE_NAME) {
timeline_metadata_path = Some(entry_path); timeline_metadata_path = Some(entry_path);
} else if is_ephemeral_file(&entry_path.file_name().unwrap().to_string_lossy()) { } else if is_ephemeral_file(&entry_path.file_name().unwrap().to_string_lossy()) {
@@ -727,8 +688,7 @@ fn collect_timeline_files(
) )
})?; })?;
} else { } else {
let layer_metadata = LayerFileMetadata::new(metadata.len()); timeline_files.insert(entry_path);
timeline_files.insert(entry_path, layer_metadata);
} }
} }
} }

View File

@@ -70,10 +70,8 @@ async fn compaction_loop(tenant_id: TenantId) {
// Run compaction // Run compaction
let mut sleep_duration = tenant.get_compaction_period(); let mut sleep_duration = tenant.get_compaction_period();
if let Err(e) = tenant.compaction_iteration() { if let Err(e) = tenant.compaction_iteration() {
error!("Compaction failed, retrying: {e:#}");
sleep_duration = wait_duration; sleep_duration = wait_duration;
error!("Compaction failed, retrying in {:?}: {e:#}", sleep_duration);
#[cfg(feature = "testing")]
std::process::abort();
} }
// Sleep // Sleep
@@ -121,10 +119,8 @@ async fn gc_loop(tenant_id: TenantId) {
if gc_horizon > 0 { if gc_horizon > 0 {
if let Err(e) = tenant.gc_iteration(None, gc_horizon, tenant.get_pitr_interval(), false) if let Err(e) = tenant.gc_iteration(None, gc_horizon, tenant.get_pitr_interval(), false)
{ {
error!("Gc failed, retrying: {e:#}");
sleep_duration = wait_duration; sleep_duration = wait_duration;
error!("Gc failed, retrying in {:?}: {e:#}", sleep_duration);
#[cfg(feature = "testing")]
std::process::abort();
} }
} }

View File

@@ -39,8 +39,7 @@ use utils::crashsafe_dir::path_with_suffix_extension;
use utils::{bin_ser::BeSer, id::TenantId, lsn::Lsn, nonblock::set_nonblock}; use utils::{bin_ser::BeSer, id::TenantId, lsn::Lsn, nonblock::set_nonblock};
use crate::metrics::{ use crate::metrics::{
WAL_REDO_BYTES_HISTOGRAM, WAL_REDO_RECORDS_HISTOGRAM, WAL_REDO_RECORD_COUNTER, WAL_REDO_TIME, WAL_REDO_RECORDS_HISTOGRAM, WAL_REDO_RECORD_COUNTER, WAL_REDO_TIME, WAL_REDO_WAIT_TIME,
WAL_REDO_WAIT_TIME,
}; };
use crate::pgdatadir_mapping::{key_to_rel_block, key_to_slru_block}; use crate::pgdatadir_mapping::{key_to_rel_block, key_to_slru_block};
use crate::reltag::{RelTag, SlruKind}; use crate::reltag::{RelTag, SlruKind};
@@ -245,23 +244,12 @@ impl PostgresRedoManager {
let end_time = Instant::now(); let end_time = Instant::now();
let duration = end_time.duration_since(lock_time); let duration = end_time.duration_since(lock_time);
let len = records.len();
let nbytes = records.iter().fold(0, |acumulator, record| {
acumulator
+ match &record.1 {
NeonWalRecord::Postgres { rec, .. } => rec.len(),
_ => unreachable!("Only PostgreSQL records are accepted in this batch"),
}
});
WAL_REDO_TIME.observe(duration.as_secs_f64()); WAL_REDO_TIME.observe(duration.as_secs_f64());
WAL_REDO_RECORDS_HISTOGRAM.observe(len as f64); WAL_REDO_RECORDS_HISTOGRAM.observe(records.len() as f64);
WAL_REDO_BYTES_HISTOGRAM.observe(nbytes as f64);
debug!( debug!(
"postgres applied {} WAL records ({} bytes) in {} us to reconstruct page image at LSN {}", "postgres applied {} WAL records in {} us to reconstruct page image at LSN {}",
len, records.len(),
nbytes,
duration.as_micros(), duration.as_micros(),
lsn lsn
); );
@@ -270,9 +258,8 @@ impl PostgresRedoManager {
// next request will launch a new one. // next request will launch a new one.
if result.is_err() { if result.is_err() {
error!( error!(
"error applying {} WAL records ({} bytes) to reconstruct page image at LSN {}", "error applying {} WAL records to reconstruct page image at LSN {}",
records.len(), records.len(),
nbytes,
lsn lsn
); );
let process = process_guard.take().unwrap(); let process = process_guard.take().unwrap();

View File

@@ -10,12 +10,51 @@ struct WalProposerConn
PGconn *pg_conn; PGconn *pg_conn;
bool is_nonblocking; /* whether the connection is non-blocking */ bool is_nonblocking; /* whether the connection is non-blocking */
char *recvbuf; /* last received data from char *recvbuf; /* last received data from
* walprop_async_read */ * libpqprop_async_read */
}; };
/* Prototypes for exported functions */
static char *libpqprop_error_message(WalProposerConn * conn);
static WalProposerConnStatusType libpqprop_status(WalProposerConn * conn);
static WalProposerConn * libpqprop_connect_start(char *conninfo);
static WalProposerConnectPollStatusType libpqprop_connect_poll(WalProposerConn * conn);
static bool libpqprop_send_query(WalProposerConn * conn, char *query);
static WalProposerExecStatusType libpqprop_get_query_result(WalProposerConn * conn);
static pgsocket libpqprop_socket(WalProposerConn * conn);
static int libpqprop_flush(WalProposerConn * conn);
static void libpqprop_finish(WalProposerConn * conn);
static PGAsyncReadResult libpqprop_async_read(WalProposerConn * conn, char **buf, int *amount);
static PGAsyncWriteResult libpqprop_async_write(WalProposerConn * conn, void const *buf, size_t size);
static bool libpqprop_blocking_write(WalProposerConn * conn, void const *buf, size_t size);
static WalProposerFunctionsType PQWalProposerFunctions =
{
libpqprop_error_message,
libpqprop_status,
libpqprop_connect_start,
libpqprop_connect_poll,
libpqprop_send_query,
libpqprop_get_query_result,
libpqprop_socket,
libpqprop_flush,
libpqprop_finish,
libpqprop_async_read,
libpqprop_async_write,
libpqprop_blocking_write,
};
/* Module initialization */
void
pg_init_libpqwalproposer(void)
{
if (WalProposerFunctions != NULL)
elog(ERROR, "libpqwalproposer already loaded");
WalProposerFunctions = &PQWalProposerFunctions;
}
/* Helper function */ /* Helper function */
static bool static bool
ensure_nonblocking_status(WalProposerConn *conn, bool is_nonblocking) ensure_nonblocking_status(WalProposerConn * conn, bool is_nonblocking)
{ {
/* If we're already correctly blocking or nonblocking, all good */ /* If we're already correctly blocking or nonblocking, all good */
if (is_nonblocking == conn->is_nonblocking) if (is_nonblocking == conn->is_nonblocking)
@@ -30,14 +69,14 @@ ensure_nonblocking_status(WalProposerConn *conn, bool is_nonblocking)
} }
/* Exported function definitions */ /* Exported function definitions */
char * static char *
walprop_error_message(WalProposerConn *conn) libpqprop_error_message(WalProposerConn * conn)
{ {
return PQerrorMessage(conn->pg_conn); return PQerrorMessage(conn->pg_conn);
} }
WalProposerConnStatusType static WalProposerConnStatusType
walprop_status(WalProposerConn *conn) libpqprop_status(WalProposerConn * conn)
{ {
switch (PQstatus(conn->pg_conn)) switch (PQstatus(conn->pg_conn))
{ {
@@ -50,8 +89,8 @@ walprop_status(WalProposerConn *conn)
} }
} }
WalProposerConn * static WalProposerConn *
walprop_connect_start(char *conninfo) libpqprop_connect_start(char *conninfo)
{ {
WalProposerConn *conn; WalProposerConn *conn;
PGconn *pg_conn; PGconn *pg_conn;
@@ -80,8 +119,8 @@ walprop_connect_start(char *conninfo)
return conn; return conn;
} }
WalProposerConnectPollStatusType static WalProposerConnectPollStatusType
walprop_connect_poll(WalProposerConn *conn) libpqprop_connect_poll(WalProposerConn * conn)
{ {
WalProposerConnectPollStatusType return_val; WalProposerConnectPollStatusType return_val;
@@ -121,8 +160,8 @@ walprop_connect_poll(WalProposerConn *conn)
return return_val; return return_val;
} }
bool static bool
walprop_send_query(WalProposerConn *conn, char *query) libpqprop_send_query(WalProposerConn * conn, char *query)
{ {
/* /*
* We need to be in blocking mode for sending the query to run without * We need to be in blocking mode for sending the query to run without
@@ -138,8 +177,8 @@ walprop_send_query(WalProposerConn *conn, char *query)
return true; return true;
} }
WalProposerExecStatusType static WalProposerExecStatusType
walprop_get_query_result(WalProposerConn *conn) libpqprop_get_query_result(WalProposerConn * conn)
{ {
PGresult *result; PGresult *result;
WalProposerExecStatusType return_val; WalProposerExecStatusType return_val;
@@ -216,20 +255,20 @@ walprop_get_query_result(WalProposerConn *conn)
return return_val; return return_val;
} }
pgsocket static pgsocket
walprop_socket(WalProposerConn *conn) libpqprop_socket(WalProposerConn * conn)
{ {
return PQsocket(conn->pg_conn); return PQsocket(conn->pg_conn);
} }
int static int
walprop_flush(WalProposerConn *conn) libpqprop_flush(WalProposerConn * conn)
{ {
return (PQflush(conn->pg_conn)); return (PQflush(conn->pg_conn));
} }
void static void
walprop_finish(WalProposerConn *conn) libpqprop_finish(WalProposerConn * conn)
{ {
if (conn->recvbuf != NULL) if (conn->recvbuf != NULL)
PQfreemem(conn->recvbuf); PQfreemem(conn->recvbuf);
@@ -243,8 +282,8 @@ walprop_finish(WalProposerConn *conn)
* On success, the data is placed in *buf. It is valid until the next call * On success, the data is placed in *buf. It is valid until the next call
* to this function. * to this function.
*/ */
PGAsyncReadResult static PGAsyncReadResult
walprop_async_read(WalProposerConn *conn, char **buf, int *amount) libpqprop_async_read(WalProposerConn * conn, char **buf, int *amount)
{ {
int result; int result;
@@ -314,8 +353,8 @@ walprop_async_read(WalProposerConn *conn, char **buf, int *amount)
} }
} }
PGAsyncWriteResult static PGAsyncWriteResult
walprop_async_write(WalProposerConn *conn, void const *buf, size_t size) libpqprop_async_write(WalProposerConn * conn, void const *buf, size_t size)
{ {
int result; int result;
@@ -369,12 +408,8 @@ walprop_async_write(WalProposerConn *conn, void const *buf, size_t size)
} }
} }
/* static bool
* This function is very similar to walprop_async_write. For more libpqprop_blocking_write(WalProposerConn * conn, void const *buf, size_t size)
* information, refer to the comments there.
*/
bool
walprop_blocking_write(WalProposerConn *conn, void const *buf, size_t size)
{ {
int result; int result;
@@ -382,6 +417,10 @@ walprop_blocking_write(WalProposerConn *conn, void const *buf, size_t size)
if (!ensure_nonblocking_status(conn, false)) if (!ensure_nonblocking_status(conn, false))
return false; return false;
/*
* Ths function is very similar to libpqprop_async_write. For more
* information, refer to the comments there
*/
if ((result = PQputCopyData(conn->pg_conn, buf, size)) == -1) if ((result = PQputCopyData(conn->pg_conn, buf, size)) == -1)
return false; return false;

View File

@@ -32,6 +32,7 @@ void
_PG_init(void) _PG_init(void)
{ {
pg_init_libpagestore(); pg_init_libpagestore();
pg_init_libpqwalproposer();
pg_init_walproposer(); pg_init_walproposer();
EmitWarningsOnPlaceholders("neon"); EmitWarningsOnPlaceholders("neon");

View File

@@ -13,6 +13,7 @@
#define NEON_H #define NEON_H
extern void pg_init_libpagestore(void); extern void pg_init_libpagestore(void);
extern void pg_init_libpqwalproposer(void);
extern void pg_init_walproposer(void); extern void pg_init_walproposer(void);
#endif /* NEON_H */ #endif /* NEON_H */

View File

@@ -79,6 +79,9 @@ bool am_wal_proposer;
char *neon_timeline_walproposer = NULL; char *neon_timeline_walproposer = NULL;
char *neon_tenant_walproposer = NULL; char *neon_tenant_walproposer = NULL;
/* Declared in walproposer.h, defined here, initialized in libpqwalproposer.c */
WalProposerFunctionsType *WalProposerFunctions = NULL;
#define WAL_PROPOSER_SLOT_NAME "wal_proposer_slot" #define WAL_PROPOSER_SLOT_NAME "wal_proposer_slot"
static int n_safekeepers = 0; static int n_safekeepers = 0;
@@ -435,6 +438,10 @@ WalProposerInitImpl(XLogRecPtr flushRecPtr, uint64 systemId)
char *sep; char *sep;
char *port; char *port;
/* Load the libpq-specific functions */
if (WalProposerFunctions == NULL)
elog(ERROR, "libpqwalproposer didn't initialize correctly");
load_file("libpqwalreceiver", false); load_file("libpqwalreceiver", false);
if (WalReceiverFunctions == NULL) if (WalReceiverFunctions == NULL)
elog(ERROR, "libpqwalreceiver didn't initialize correctly"); elog(ERROR, "libpqwalreceiver didn't initialize correctly");
@@ -1464,6 +1471,12 @@ SendProposerElected(Safekeeper *sk)
*/ */
th = &sk->voteResponse.termHistory; th = &sk->voteResponse.termHistory;
/*
* If any WAL is present on the sk, it must be authorized by some term.
* OTOH, without any WAL there are no term swiches in the log.
*/
Assert((th->n_entries == 0) ==
(sk->voteResponse.flushLsn == InvalidXLogRecPtr));
/* We must start somewhere. */ /* We must start somewhere. */
Assert(propTermHistory.n_entries >= 1); Assert(propTermHistory.n_entries >= 1);

View File

@@ -446,31 +446,31 @@ typedef enum
} WalProposerConnStatusType; } WalProposerConnStatusType;
/* Re-exported PQerrorMessage */ /* Re-exported PQerrorMessage */
extern char *walprop_error_message(WalProposerConn *conn); typedef char *(*walprop_error_message_fn) (WalProposerConn * conn);
/* Re-exported PQstatus */ /* Re-exported PQstatus */
extern WalProposerConnStatusType walprop_status(WalProposerConn *conn); typedef WalProposerConnStatusType(*walprop_status_fn) (WalProposerConn * conn);
/* Re-exported PQconnectStart */ /* Re-exported PQconnectStart */
extern WalProposerConn * walprop_connect_start(char *conninfo); typedef WalProposerConn * (*walprop_connect_start_fn) (char *conninfo);
/* Re-exported PQconectPoll */ /* Re-exported PQconectPoll */
extern WalProposerConnectPollStatusType walprop_connect_poll(WalProposerConn *conn); typedef WalProposerConnectPollStatusType(*walprop_connect_poll_fn) (WalProposerConn * conn);
/* Blocking wrapper around PQsendQuery */ /* Blocking wrapper around PQsendQuery */
extern bool walprop_send_query(WalProposerConn *conn, char *query); typedef bool (*walprop_send_query_fn) (WalProposerConn * conn, char *query);
/* Wrapper around PQconsumeInput + PQisBusy + PQgetResult */ /* Wrapper around PQconsumeInput + PQisBusy + PQgetResult */
extern WalProposerExecStatusType walprop_get_query_result(WalProposerConn *conn); typedef WalProposerExecStatusType(*walprop_get_query_result_fn) (WalProposerConn * conn);
/* Re-exported PQsocket */ /* Re-exported PQsocket */
extern pgsocket walprop_socket(WalProposerConn *conn); typedef pgsocket (*walprop_socket_fn) (WalProposerConn * conn);
/* Wrapper around PQconsumeInput (if socket's read-ready) + PQflush */ /* Wrapper around PQconsumeInput (if socket's read-ready) + PQflush */
extern int walprop_flush(WalProposerConn *conn); typedef int (*walprop_flush_fn) (WalProposerConn * conn);
/* Re-exported PQfinish */ /* Re-exported PQfinish */
extern void walprop_finish(WalProposerConn *conn); typedef void (*walprop_finish_fn) (WalProposerConn * conn);
/* /*
* Ergonomic wrapper around PGgetCopyData * Ergonomic wrapper around PGgetCopyData
@@ -486,7 +486,9 @@ extern void walprop_finish(WalProposerConn *conn);
* performs a bit of extra checking work that's always required and is normally * performs a bit of extra checking work that's always required and is normally
* somewhat verbose. * somewhat verbose.
*/ */
extern PGAsyncReadResult walprop_async_read(WalProposerConn *conn, char **buf, int *amount); typedef PGAsyncReadResult(*walprop_async_read_fn) (WalProposerConn * conn,
char **buf,
int *amount);
/* /*
* Ergonomic wrapper around PQputCopyData + PQflush * Ergonomic wrapper around PQputCopyData + PQflush
@@ -495,14 +497,69 @@ extern PGAsyncReadResult walprop_async_read(WalProposerConn *conn, char **buf, i
* *
* For information on the meaning of return codes, refer to PGAsyncWriteResult. * For information on the meaning of return codes, refer to PGAsyncWriteResult.
*/ */
extern PGAsyncWriteResult walprop_async_write(WalProposerConn *conn, void const *buf, size_t size); typedef PGAsyncWriteResult(*walprop_async_write_fn) (WalProposerConn * conn,
void const *buf,
size_t size);
/* /*
* Blocking equivalent to walprop_async_write_fn * Blocking equivalent to walprop_async_write_fn
* *
* Returns 'true' if successful, 'false' on failure. * Returns 'true' if successful, 'false' on failure.
*/ */
extern bool walprop_blocking_write(WalProposerConn *conn, void const *buf, size_t size); typedef bool (*walprop_blocking_write_fn) (WalProposerConn * conn, void const *buf, size_t size);
/* All libpqwalproposer exported functions collected together. */
typedef struct WalProposerFunctionsType
{
walprop_error_message_fn walprop_error_message;
walprop_status_fn walprop_status;
walprop_connect_start_fn walprop_connect_start;
walprop_connect_poll_fn walprop_connect_poll;
walprop_send_query_fn walprop_send_query;
walprop_get_query_result_fn walprop_get_query_result;
walprop_socket_fn walprop_socket;
walprop_flush_fn walprop_flush;
walprop_finish_fn walprop_finish;
walprop_async_read_fn walprop_async_read;
walprop_async_write_fn walprop_async_write;
walprop_blocking_write_fn walprop_blocking_write;
} WalProposerFunctionsType;
/* Allow the above functions to be "called" with normal syntax */
#define walprop_error_message(conn) \
WalProposerFunctions->walprop_error_message(conn)
#define walprop_status(conn) \
WalProposerFunctions->walprop_status(conn)
#define walprop_connect_start(conninfo) \
WalProposerFunctions->walprop_connect_start(conninfo)
#define walprop_connect_poll(conn) \
WalProposerFunctions->walprop_connect_poll(conn)
#define walprop_send_query(conn, query) \
WalProposerFunctions->walprop_send_query(conn, query)
#define walprop_get_query_result(conn) \
WalProposerFunctions->walprop_get_query_result(conn)
#define walprop_set_nonblocking(conn, arg) \
WalProposerFunctions->walprop_set_nonblocking(conn, arg)
#define walprop_socket(conn) \
WalProposerFunctions->walprop_socket(conn)
#define walprop_flush(conn) \
WalProposerFunctions->walprop_flush(conn)
#define walprop_finish(conn) \
WalProposerFunctions->walprop_finish(conn)
#define walprop_async_read(conn, buf, amount) \
WalProposerFunctions->walprop_async_read(conn, buf, amount)
#define walprop_async_write(conn, buf, size) \
WalProposerFunctions->walprop_async_write(conn, buf, size)
#define walprop_blocking_write(conn, buf, size) \
WalProposerFunctions->walprop_blocking_write(conn, buf, size)
/*
* The runtime location of the libpqwalproposer functions.
*
* This pointer is set by the initializer in libpqwalproposer, so that we
* can use it later.
*/
extern PGDLLIMPORT WalProposerFunctionsType * WalProposerFunctions;
extern uint64 BackpressureThrottlingTime(void); extern uint64 BackpressureThrottlingTime(void);

View File

@@ -5,11 +5,11 @@ edition = "2021"
[dependencies] [dependencies]
anyhow = "1.0" anyhow = "1.0"
atty = "0.2.14" async-trait = "0.1"
base64 = "0.13.0" base64 = "0.13.0"
bstr = "1.0" bstr = "0.2.17"
bytes = { version = "1.0.1", features = ['serde'] } bytes = { version = "1.0.1", features = ['serde'] }
clap = "4.0" clap = "3.0"
futures = "0.3.13" futures = "0.3.13"
git-version = "0.3.5" git-version = "0.3.5"
hashbrown = "0.12" hashbrown = "0.12"
@@ -22,11 +22,7 @@ once_cell = "1.13.0"
parking_lot = "0.12" parking_lot = "0.12"
pin-project-lite = "0.2.7" pin-project-lite = "0.2.7"
rand = "0.8.3" rand = "0.8.3"
reqwest = { version = "0.11", default-features = false, features = [ reqwest = { version = "0.11", default-features = false, features = ["blocking", "json", "rustls-tls"] }
"blocking",
"json",
"rustls-tls",
] }
routerify = "3" routerify = "3"
rustls = "0.20.0" rustls = "0.20.0"
rustls-pemfile = "1" rustls-pemfile = "1"
@@ -37,20 +33,17 @@ sha2 = "0.10.2"
socket2 = "0.4.4" socket2 = "0.4.4"
thiserror = "1.0.30" thiserror = "1.0.30"
tokio = { version = "1.17", features = ["macros"] } tokio = { version = "1.17", features = ["macros"] }
tokio-postgres = { git = "https://github.com/neondatabase/rust-postgres.git", rev = "d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" } tokio-postgres = { git = "https://github.com/neondatabase/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" }
tokio-rustls = "0.23.0" tokio-rustls = "0.23.0"
tracing = "0.1.36"
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
url = "2.2.2" url = "2.2.2"
uuid = { version = "1.2", features = ["v4", "serde"] } uuid = { version = "0.8.2", features = ["v4", "serde"]}
x509-parser = "0.14" x509-parser = "0.13.2"
utils = { path = "../libs/utils" } utils = { path = "../libs/utils" }
metrics = { path = "../libs/metrics" } metrics = { path = "../libs/metrics" }
workspace_hack = { version = "0.1", path = "../workspace_hack" } workspace_hack = { version = "0.1", path = "../workspace_hack" }
[dev-dependencies] [dev-dependencies]
async-trait = "0.1" rcgen = "0.8.14"
rcgen = "0.10" rstest = "0.12"
rstest = "0.15"
tokio-postgres-rustls = "0.9.0" tokio-postgres-rustls = "0.9.0"

View File

@@ -15,7 +15,6 @@ use once_cell::sync::Lazy;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use std::borrow::Cow; use std::borrow::Cow;
use tokio::io::{AsyncRead, AsyncWrite}; use tokio::io::{AsyncRead, AsyncWrite};
use tracing::{info, warn};
static CPLANE_WAITERS: Lazy<Waiters<mgmt::ComputeReady>> = Lazy::new(Default::default); static CPLANE_WAITERS: Lazy<Waiters<mgmt::ComputeReady>> = Lazy::new(Default::default);
@@ -172,8 +171,6 @@ impl BackendType<'_, ClientCredentials<'_>> {
// support SNI or other means of passing the project name. // support SNI or other means of passing the project name.
// We now expect to see a very specific payload in the place of password. // We now expect to see a very specific payload in the place of password.
if creds.project().is_none() { if creds.project().is_none() {
warn!("project name not specified, resorting to the password hack auth flow");
let payload = AuthFlow::new(client) let payload = AuthFlow::new(client)
.begin(auth::PasswordHack) .begin(auth::PasswordHack)
.await? .await?
@@ -182,7 +179,6 @@ impl BackendType<'_, ClientCredentials<'_>> {
// Finally we may finish the initialization of `creds`. // Finally we may finish the initialization of `creds`.
// TODO: add missing type safety to ClientCredentials. // TODO: add missing type safety to ClientCredentials.
info!(project = &payload.project, "received missing parameter");
creds.project = Some(payload.project.into()); creds.project = Some(payload.project.into());
let mut config = match &self { let mut config = match &self {
@@ -200,7 +196,6 @@ impl BackendType<'_, ClientCredentials<'_>> {
// We should use a password from payload as well. // We should use a password from payload as well.
config.password(payload.password); config.password(payload.password);
info!("user successfully authenticated (using the password hack)");
return Ok(compute::NodeInfo { return Ok(compute::NodeInfo {
reported_auth_ok: false, reported_auth_ok: false,
config, config,
@@ -208,31 +203,19 @@ impl BackendType<'_, ClientCredentials<'_>> {
} }
} }
let res = match self { match self {
Console(endpoint, creds) => { Console(endpoint, creds) => {
info!(
user = creds.user,
project = creds.project(),
"performing authentication using the console"
);
console::Api::new(&endpoint, extra, &creds) console::Api::new(&endpoint, extra, &creds)
.handle_user(client) .handle_user(client)
.await .await
} }
Postgres(endpoint, creds) => { Postgres(endpoint, creds) => {
info!("performing mock authentication using a local postgres instance");
postgres::Api::new(&endpoint, &creds) postgres::Api::new(&endpoint, &creds)
.handle_user(client) .handle_user(client)
.await .await
} }
// NOTE: this auth backend doesn't use client credentials. // NOTE: this auth backend doesn't use client credentials.
Link(url) => { Link(url) => link::handle_user(&url, client).await,
info!("performing link authentication"); }
link::handle_user(&url, client).await
}
}?;
info!("user successfully authenticated");
Ok(res)
} }
} }

View File

@@ -8,20 +8,35 @@ use crate::{
http, scram, http, scram,
stream::PqStream, stream::PqStream,
}; };
use futures::TryFutureExt;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use std::future::Future; use std::future::Future;
use thiserror::Error; use thiserror::Error;
use tokio::io::{AsyncRead, AsyncWrite}; use tokio::io::{AsyncRead, AsyncWrite};
use tracing::{error, info, info_span};
const REQUEST_FAILED: &str = "Console request failed"; const REQUEST_FAILED: &str = "Console request failed";
#[derive(Debug, Error)] #[derive(Debug, Error)]
#[error("{}", REQUEST_FAILED)] pub enum TransportError {
pub struct TransportError(#[from] std::io::Error); #[error("Console responded with a malformed JSON: {0}")]
BadResponse(#[from] serde_json::Error),
impl UserFacingError for TransportError {} /// HTTP status (other than 200) returned by the console.
#[error("Console responded with an HTTP status: {0}")]
HttpStatus(reqwest::StatusCode),
#[error(transparent)]
Io(#[from] std::io::Error),
}
impl UserFacingError for TransportError {
fn to_string_client(&self) -> String {
use TransportError::*;
match self {
HttpStatus(_) => self.to_string(),
_ => REQUEST_FAILED.to_owned(),
}
}
}
// Helps eliminate graceless `.map_err` calls without introducing another ctor. // Helps eliminate graceless `.map_err` calls without introducing another ctor.
impl From<reqwest::Error> for TransportError { impl From<reqwest::Error> for TransportError {
@@ -133,11 +148,10 @@ impl<'a> Api<'a> {
} }
async fn get_auth_info(&self) -> Result<AuthInfo, GetAuthInfoError> { async fn get_auth_info(&self) -> Result<AuthInfo, GetAuthInfoError> {
let request_id = uuid::Uuid::new_v4().to_string();
let req = self let req = self
.endpoint .endpoint
.get("proxy_get_role_secret") .get("proxy_get_role_secret")
.header("X-Request-ID", &request_id) .header("X-Request-ID", uuid::Uuid::new_v4().to_string())
.query(&[("session_id", self.extra.session_id)]) .query(&[("session_id", self.extra.session_id)])
.query(&[ .query(&[
("application_name", self.extra.application_name), ("application_name", self.extra.application_name),
@@ -146,30 +160,27 @@ impl<'a> Api<'a> {
]) ])
.build()?; .build()?;
let span = info_span!("http", id = request_id, url = req.url().as_str()); // TODO: use a proper logger
info!(parent: &span, "request auth info"); println!("cplane request: {}", req.url());
let msg = self
.endpoint
.checked_execute(req)
.and_then(|r| r.json::<GetRoleSecretResponse>())
.await
.map_err(|e| {
error!(parent: &span, "{e}");
e
})?;
scram::ServerSecret::parse(&msg.role_secret) let resp = self.endpoint.execute(req).await?;
if !resp.status().is_success() {
return Err(TransportError::HttpStatus(resp.status()).into());
}
let response: GetRoleSecretResponse = serde_json::from_str(&resp.text().await?)?;
scram::ServerSecret::parse(&response.role_secret)
.map(AuthInfo::Scram) .map(AuthInfo::Scram)
.ok_or(GetAuthInfoError::BadSecret) .ok_or(GetAuthInfoError::BadSecret)
} }
/// Wake up the compute node and return the corresponding connection info. /// Wake up the compute node and return the corresponding connection info.
pub(super) async fn wake_compute(&self) -> Result<ComputeConnCfg, WakeComputeError> { pub(super) async fn wake_compute(&self) -> Result<ComputeConnCfg, WakeComputeError> {
let request_id = uuid::Uuid::new_v4().to_string();
let req = self let req = self
.endpoint .endpoint
.get("proxy_wake_compute") .get("proxy_wake_compute")
.header("X-Request-ID", &request_id) .header("X-Request-ID", uuid::Uuid::new_v4().to_string())
.query(&[("session_id", self.extra.session_id)]) .query(&[("session_id", self.extra.session_id)])
.query(&[ .query(&[
("application_name", self.extra.application_name), ("application_name", self.extra.application_name),
@@ -177,21 +188,19 @@ impl<'a> Api<'a> {
]) ])
.build()?; .build()?;
let span = info_span!("http", id = request_id, url = req.url().as_str()); // TODO: use a proper logger
info!(parent: &span, "request wake-up"); println!("cplane request: {}", req.url());
let msg = self
.endpoint let resp = self.endpoint.execute(req).await?;
.checked_execute(req) if !resp.status().is_success() {
.and_then(|r| r.json::<GetWakeComputeResponse>()) return Err(TransportError::HttpStatus(resp.status()).into());
.await }
.map_err(|e| {
error!(parent: &span, "{e}"); let response: GetWakeComputeResponse = serde_json::from_str(&resp.text().await?)?;
e
})?;
// Unfortunately, ownership won't let us use `Option::ok_or` here. // Unfortunately, ownership won't let us use `Option::ok_or` here.
let (host, port) = match parse_host_port(&msg.address) { let (host, port) = match parse_host_port(&response.address) {
None => return Err(WakeComputeError::BadComputeAddress(msg.address)), None => return Err(WakeComputeError::BadComputeAddress(response.address)),
Some(x) => x, Some(x) => x,
}; };
@@ -218,18 +227,15 @@ where
GetAuthInfo: Future<Output = Result<AuthInfo, GetAuthInfoError>>, GetAuthInfo: Future<Output = Result<AuthInfo, GetAuthInfoError>>,
WakeCompute: Future<Output = Result<ComputeConnCfg, WakeComputeError>>, WakeCompute: Future<Output = Result<ComputeConnCfg, WakeComputeError>>,
{ {
info!("fetching user's authentication info");
let auth_info = get_auth_info(endpoint).await?; let auth_info = get_auth_info(endpoint).await?;
let flow = AuthFlow::new(client); let flow = AuthFlow::new(client);
let scram_keys = match auth_info { let scram_keys = match auth_info {
AuthInfo::Md5(_) => { AuthInfo::Md5(_) => {
// TODO: decide if we should support MD5 in api v2 // TODO: decide if we should support MD5 in api v2
info!("auth endpoint chooses MD5");
return Err(auth::AuthError::bad_auth_method("MD5")); return Err(auth::AuthError::bad_auth_method("MD5"));
} }
AuthInfo::Scram(secret) => { AuthInfo::Scram(secret) => {
info!("auth endpoint chooses SCRAM");
let scram = auth::Scram(&secret); let scram = auth::Scram(&secret);
Some(compute::ScramKeys { Some(compute::ScramKeys {
client_key: flow.begin(scram).await?.authenticate().await?.as_bytes(), client_key: flow.begin(scram).await?.authenticate().await?.as_bytes(),

View File

@@ -1,7 +1,6 @@
use crate::{auth, compute, error::UserFacingError, stream::PqStream, waiters}; use crate::{auth, compute, error::UserFacingError, stream::PqStream, waiters};
use thiserror::Error; use thiserror::Error;
use tokio::io::{AsyncRead, AsyncWrite}; use tokio::io::{AsyncRead, AsyncWrite};
use tracing::{info, info_span};
use utils::pq_proto::{BeMessage as Be, BeParameterStatusMessage}; use utils::pq_proto::{BeMessage as Be, BeParameterStatusMessage};
#[derive(Debug, Error)] #[derive(Debug, Error)]
@@ -51,20 +50,17 @@ pub async fn handle_user(
client: &mut PqStream<impl AsyncRead + AsyncWrite + Unpin>, client: &mut PqStream<impl AsyncRead + AsyncWrite + Unpin>,
) -> auth::Result<compute::NodeInfo> { ) -> auth::Result<compute::NodeInfo> {
let psql_session_id = new_psql_session_id(); let psql_session_id = new_psql_session_id();
let span = info_span!("link", psql_session_id = &psql_session_id);
let greeting = hello_message(link_uri, &psql_session_id); let greeting = hello_message(link_uri, &psql_session_id);
let db_info = super::with_waiter(psql_session_id, |waiter| async { let db_info = super::with_waiter(psql_session_id, |waiter| async {
// Give user a URL to spawn a new database. // Give user a URL to spawn a new database
info!(parent: &span, "sending the auth URL to the user");
client client
.write_message_noflush(&Be::AuthenticationOk)? .write_message_noflush(&Be::AuthenticationOk)?
.write_message_noflush(&BeParameterStatusMessage::encoding())? .write_message_noflush(&BeParameterStatusMessage::encoding())?
.write_message(&Be::NoticeResponse(&greeting)) .write_message(&Be::NoticeResponse(&greeting))
.await?; .await?;
// Wait for web console response (see `mgmt`). // Wait for web console response (see `mgmt`)
info!(parent: &span, "waiting for console's reply...");
waiter.await?.map_err(LinkAuthError::AuthFailed) waiter.await?.map_err(LinkAuthError::AuthFailed)
}) })
.await?; .await?;

View File

@@ -3,7 +3,6 @@
use crate::error::UserFacingError; use crate::error::UserFacingError;
use std::borrow::Cow; use std::borrow::Cow;
use thiserror::Error; use thiserror::Error;
use tracing::info;
use utils::pq_proto::StartupMessageParams; use utils::pq_proto::StartupMessageParams;
#[derive(Debug, Error, PartialEq, Eq, Clone)] #[derive(Debug, Error, PartialEq, Eq, Clone)]
@@ -83,13 +82,6 @@ impl<'a> ClientCredentials<'a> {
} }
.transpose()?; .transpose()?;
info!(
user = user,
dbname = dbname,
project = project.as_deref(),
"credentials"
);
Ok(Self { Ok(Self {
user, user,
dbname, dbname,

View File

@@ -4,7 +4,6 @@ use parking_lot::Mutex;
use std::net::SocketAddr; use std::net::SocketAddr;
use tokio::net::TcpStream; use tokio::net::TcpStream;
use tokio_postgres::{CancelToken, NoTls}; use tokio_postgres::{CancelToken, NoTls};
use tracing::info;
use utils::pq_proto::CancelKeyData; use utils::pq_proto::CancelKeyData;
/// Enables serving `CancelRequest`s. /// Enables serving `CancelRequest`s.
@@ -19,9 +18,8 @@ impl CancelMap {
.lock() .lock()
.get(&key) .get(&key)
.and_then(|x| x.clone()) .and_then(|x| x.clone())
.with_context(|| format!("query cancellation key not found: {key}"))?; .with_context(|| format!("unknown session: {:?}", key))?;
info!("cancelling query per user's request using key {key}");
cancel_closure.try_cancel_query().await cancel_closure.try_cancel_query().await
} }
@@ -43,16 +41,14 @@ impl CancelMap {
self.0 self.0
.lock() .lock()
.try_insert(key, None) .try_insert(key, None)
.map_err(|_| anyhow!("query cancellation key already exists: {key}"))?; .map_err(|_| anyhow!("session already exists: {:?}", key))?;
// This will guarantee that the session gets dropped // This will guarantee that the session gets dropped
// as soon as the future is finished. // as soon as the future is finished.
scopeguard::defer! { scopeguard::defer! {
self.0.lock().remove(&key); self.0.lock().remove(&key);
info!("dropped query cancellation key {key}");
} }
info!("registered new query cancellation key {key}");
let session = Session::new(key, self); let session = Session::new(key, self);
f(session).await f(session).await
} }
@@ -106,13 +102,10 @@ impl<'a> Session<'a> {
fn new(key: CancelKeyData, cancel_map: &'a CancelMap) -> Self { fn new(key: CancelKeyData, cancel_map: &'a CancelMap) -> Self {
Self { key, cancel_map } Self { key, cancel_map }
} }
}
impl Session<'_> {
/// Store the cancel token for the given session. /// Store the cancel token for the given session.
/// This enables query cancellation in [`crate::proxy::handshake`]. /// This enables query cancellation in [`crate::proxy::handshake`].
pub fn enable_query_cancellation(self, cancel_closure: CancelClosure) -> CancelKeyData { pub fn enable_query_cancellation(self, cancel_closure: CancelClosure) -> CancelKeyData {
info!("enabling query cancellation for this session");
self.cancel_map self.cancel_map
.0 .0
.lock() .lock()

View File

@@ -5,7 +5,6 @@ use std::{io, net::SocketAddr};
use thiserror::Error; use thiserror::Error;
use tokio::net::TcpStream; use tokio::net::TcpStream;
use tokio_postgres::NoTls; use tokio_postgres::NoTls;
use tracing::{error, info};
use utils::pq_proto::StartupMessageParams; use utils::pq_proto::StartupMessageParams;
#[derive(Debug, Error)] #[derive(Debug, Error)]
@@ -55,7 +54,6 @@ impl NodeInfo {
use tokio_postgres::config::Host; use tokio_postgres::config::Host;
let connect_once = |host, port| { let connect_once = |host, port| {
info!("trying to connect to a compute node at {host}:{port}");
TcpStream::connect((host, port)).and_then(|socket| async { TcpStream::connect((host, port)).and_then(|socket| async {
let socket_addr = socket.peer_addr()?; let socket_addr = socket.peer_addr()?;
// This prevents load balancer from severing the connection. // This prevents load balancer from severing the connection.
@@ -74,11 +72,7 @@ impl NodeInfo {
if ports.len() > 1 && ports.len() != hosts.len() { if ports.len() > 1 && ports.len() != hosts.len() {
return Err(io::Error::new( return Err(io::Error::new(
io::ErrorKind::Other, io::ErrorKind::Other,
format!( format!("couldn't connect: bad compute config, ports and hosts entries' count does not match: {:?}", self.config),
"couldn't connect: bad compute config, \
ports and hosts entries' count does not match: {:?}",
self.config
),
)); ));
} }
@@ -94,7 +88,7 @@ impl NodeInfo {
Ok(socket) => return Ok(socket), Ok(socket) => return Ok(socket),
Err(err) => { Err(err) => {
// We can't throw an error here, as there might be more hosts to try. // We can't throw an error here, as there might be more hosts to try.
error!("failed to connect to a compute node at {host}:{port}: {err}"); println!("failed to connect to compute `{host}:{port}`: {err}");
connection_error = Some(err); connection_error = Some(err);
} }
} }
@@ -166,8 +160,8 @@ impl NodeInfo {
.ok_or(ConnectionError::FailedToFetchPgVersion)? .ok_or(ConnectionError::FailedToFetchPgVersion)?
.into(); .into();
info!("connected to user's compute node at {socket_addr}");
let cancel_closure = CancelClosure::new(socket_addr, client.cancel_token()); let cancel_closure = CancelClosure::new(socket_addr, client.cancel_token());
let db = PostgresConnection { stream, version }; let db = PostgresConnection { stream, version };
Ok((db, cancel_closure)) Ok((db, cancel_closure))

View File

@@ -17,7 +17,6 @@ impl Endpoint {
Self { endpoint, client } Self { endpoint, client }
} }
#[inline(always)]
pub fn url(&self) -> &ApiUrl { pub fn url(&self) -> &ApiUrl {
&self.endpoint &self.endpoint
} }
@@ -37,16 +36,6 @@ impl Endpoint {
) -> Result<reqwest::Response, reqwest::Error> { ) -> Result<reqwest::Response, reqwest::Error> {
self.client.execute(request).await self.client.execute(request).await
} }
/// Execute a [request](reqwest::Request) and raise an error if status != 200.
pub async fn checked_execute(
&self,
request: reqwest::Request,
) -> Result<reqwest::Response, reqwest::Error> {
self.execute(request)
.await
.and_then(|r| r.error_for_status())
}
} }
#[cfg(test)] #[cfg(test)]

View File

@@ -1,7 +1,6 @@
use anyhow::anyhow; use anyhow::anyhow;
use hyper::{Body, Request, Response, StatusCode}; use hyper::{Body, Request, Response, StatusCode};
use std::net::TcpListener; use std::net::TcpListener;
use tracing::info;
use utils::http::{endpoint, error::ApiError, json::json_response, RouterBuilder, RouterService}; use utils::http::{endpoint, error::ApiError, json::json_response, RouterBuilder, RouterService};
async fn status_handler(_: Request<Body>) -> Result<Response<Body>, ApiError> { async fn status_handler(_: Request<Body>) -> Result<Response<Body>, ApiError> {
@@ -13,9 +12,9 @@ fn make_router() -> RouterBuilder<hyper::Body, ApiError> {
router.get("/v1/status", status_handler) router.get("/v1/status", status_handler)
} }
pub async fn task_main(http_listener: TcpListener) -> anyhow::Result<()> { pub async fn thread_main(http_listener: TcpListener) -> anyhow::Result<()> {
scopeguard::defer! { scopeguard::defer! {
info!("http has shut down"); println!("http has shut down");
} }
let service = || RouterService::new(make_router().build()?); let service = || RouterService::new(make_router().build()?);

View File

@@ -23,10 +23,8 @@ use anyhow::{bail, Context};
use clap::{self, Arg}; use clap::{self, Arg};
use config::ProxyConfig; use config::ProxyConfig;
use futures::FutureExt; use futures::FutureExt;
use metrics::set_build_info_metric;
use std::{borrow::Cow, future::Future, net::SocketAddr}; use std::{borrow::Cow, future::Future, net::SocketAddr};
use tokio::{net::TcpListener, task::JoinError}; use tokio::{net::TcpListener, task::JoinError};
use tracing::info;
use utils::project_git_version; use utils::project_git_version;
project_git_version!(GIT_VERSION); project_git_version!(GIT_VERSION);
@@ -40,48 +38,98 @@ async fn flatten_err(
#[tokio::main] #[tokio::main]
async fn main() -> anyhow::Result<()> { async fn main() -> anyhow::Result<()> {
tracing_subscriber::fmt() let arg_matches = clap::App::new("Neon proxy/router")
.with_ansi(atty::is(atty::Stream::Stdout)) .version(GIT_VERSION)
.with_target(false) .arg(
.init(); Arg::new("proxy")
.short('p')
let arg_matches = cli().get_matches(); .long("proxy")
.takes_value(true)
.help("listen for incoming client connections on ip:port")
.default_value("127.0.0.1:4432"),
)
.arg(
Arg::new("auth-backend")
.long("auth-backend")
.takes_value(true)
.possible_values(["console", "postgres", "link"])
.default_value("link"),
)
.arg(
Arg::new("mgmt")
.short('m')
.long("mgmt")
.takes_value(true)
.help("listen for management callback connection on ip:port")
.default_value("127.0.0.1:7000"),
)
.arg(
Arg::new("http")
.short('h')
.long("http")
.takes_value(true)
.help("listen for incoming http connections (metrics, etc) on ip:port")
.default_value("127.0.0.1:7001"),
)
.arg(
Arg::new("uri")
.short('u')
.long("uri")
.takes_value(true)
.help("redirect unauthenticated users to the given uri in case of link auth")
.default_value("http://localhost:3000/psql_session/"),
)
.arg(
Arg::new("auth-endpoint")
.short('a')
.long("auth-endpoint")
.takes_value(true)
.help("cloud API endpoint for authenticating users")
.default_value("http://localhost:3000/authenticate_proxy_request/"),
)
.arg(
Arg::new("tls-key")
.short('k')
.long("tls-key")
.alias("ssl-key") // backwards compatibility
.takes_value(true)
.help("path to TLS key for client postgres connections"),
)
.arg(
Arg::new("tls-cert")
.short('c')
.long("tls-cert")
.alias("ssl-cert") // backwards compatibility
.takes_value(true)
.help("path to TLS cert for client postgres connections"),
)
.get_matches();
let tls_config = match ( let tls_config = match (
arg_matches.get_one::<String>("tls-key"), arg_matches.value_of("tls-key"),
arg_matches.get_one::<String>("tls-cert"), arg_matches.value_of("tls-cert"),
) { ) {
(Some(key_path), Some(cert_path)) => Some(config::configure_tls(key_path, cert_path)?), (Some(key_path), Some(cert_path)) => Some(config::configure_tls(key_path, cert_path)?),
(None, None) => None, (None, None) => None,
_ => bail!("either both or neither tls-key and tls-cert must be specified"), _ => bail!("either both or neither tls-key and tls-cert must be specified"),
}; };
let proxy_address: SocketAddr = arg_matches.get_one::<String>("proxy").unwrap().parse()?; let proxy_address: SocketAddr = arg_matches.value_of("proxy").unwrap().parse()?;
let mgmt_address: SocketAddr = arg_matches.get_one::<String>("mgmt").unwrap().parse()?; let mgmt_address: SocketAddr = arg_matches.value_of("mgmt").unwrap().parse()?;
let http_address: SocketAddr = arg_matches.get_one::<String>("http").unwrap().parse()?; let http_address: SocketAddr = arg_matches.value_of("http").unwrap().parse()?;
let auth_backend = match arg_matches let auth_backend = match arg_matches.value_of("auth-backend").unwrap() {
.get_one::<String>("auth-backend")
.unwrap()
.as_str()
{
"console" => { "console" => {
let url = arg_matches let url = arg_matches.value_of("auth-endpoint").unwrap().parse()?;
.get_one::<String>("auth-endpoint")
.unwrap()
.parse()?;
let endpoint = http::Endpoint::new(url, reqwest::Client::new()); let endpoint = http::Endpoint::new(url, reqwest::Client::new());
auth::BackendType::Console(Cow::Owned(endpoint), ()) auth::BackendType::Console(Cow::Owned(endpoint), ())
} }
"postgres" => { "postgres" => {
let url = arg_matches let url = arg_matches.value_of("auth-endpoint").unwrap().parse()?;
.get_one::<String>("auth-endpoint")
.unwrap()
.parse()?;
auth::BackendType::Postgres(Cow::Owned(url), ()) auth::BackendType::Postgres(Cow::Owned(url), ())
} }
"link" => { "link" => {
let url = arg_matches.get_one::<String>("uri").unwrap().parse()?; let url = arg_matches.value_of("uri").unwrap().parse()?;
auth::BackendType::Link(Cow::Owned(url)) auth::BackendType::Link(Cow::Owned(url))
} }
other => bail!("unsupported auth backend: {other}"), other => bail!("unsupported auth backend: {other}"),
@@ -92,95 +140,29 @@ async fn main() -> anyhow::Result<()> {
auth_backend, auth_backend,
})); }));
info!("Version: {GIT_VERSION}"); println!("Version: {GIT_VERSION}");
info!("Authentication backend: {}", config.auth_backend); println!("Authentication backend: {}", config.auth_backend);
// Check that we can bind to address before further initialization // Check that we can bind to address before further initialization
info!("Starting http on {http_address}"); println!("Starting http on {}", http_address);
let http_listener = TcpListener::bind(http_address).await?.into_std()?; let http_listener = TcpListener::bind(http_address).await?.into_std()?;
info!("Starting mgmt on {mgmt_address}"); println!("Starting mgmt on {}", mgmt_address);
let mgmt_listener = TcpListener::bind(mgmt_address).await?.into_std()?; let mgmt_listener = TcpListener::bind(mgmt_address).await?.into_std()?;
info!("Starting proxy on {proxy_address}"); println!("Starting proxy on {}", proxy_address);
let proxy_listener = TcpListener::bind(proxy_address).await?; let proxy_listener = TcpListener::bind(proxy_address).await?;
let tasks = [ let tasks = [
tokio::spawn(http::server::task_main(http_listener)), tokio::spawn(http::server::thread_main(http_listener)),
tokio::spawn(proxy::task_main(config, proxy_listener)), tokio::spawn(proxy::thread_main(config, proxy_listener)),
tokio::task::spawn_blocking(move || mgmt::thread_main(mgmt_listener)), tokio::task::spawn_blocking(move || mgmt::thread_main(mgmt_listener)),
] ]
.map(flatten_err); .map(flatten_err);
set_build_info_metric(GIT_VERSION);
// This will block until all tasks have completed. // This will block until all tasks have completed.
// Furthermore, the first one to fail will cancel the rest. // Furthermore, the first one to fail will cancel the rest.
let _: Vec<()> = futures::future::try_join_all(tasks).await?; let _: Vec<()> = futures::future::try_join_all(tasks).await?;
Ok(()) Ok(())
} }
fn cli() -> clap::Command {
clap::Command::new("Neon proxy/router")
.disable_help_flag(true)
.version(GIT_VERSION)
.arg(
Arg::new("proxy")
.short('p')
.long("proxy")
.help("listen for incoming client connections on ip:port")
.default_value("127.0.0.1:4432"),
)
.arg(
Arg::new("auth-backend")
.long("auth-backend")
.value_parser(["console", "postgres", "link"])
.default_value("link"),
)
.arg(
Arg::new("mgmt")
.short('m')
.long("mgmt")
.help("listen for management callback connection on ip:port")
.default_value("127.0.0.1:7000"),
)
.arg(
Arg::new("http")
.long("http")
.help("listen for incoming http connections (metrics, etc) on ip:port")
.default_value("127.0.0.1:7001"),
)
.arg(
Arg::new("uri")
.short('u')
.long("uri")
.help("redirect unauthenticated users to the given uri in case of link auth")
.default_value("http://localhost:3000/psql_session/"),
)
.arg(
Arg::new("auth-endpoint")
.short('a')
.long("auth-endpoint")
.help("cloud API endpoint for authenticating users")
.default_value("http://localhost:3000/authenticate_proxy_request/"),
)
.arg(
Arg::new("tls-key")
.short('k')
.long("tls-key")
.alias("ssl-key") // backwards compatibility
.help("path to TLS key for client postgres connections"),
)
.arg(
Arg::new("tls-cert")
.short('c')
.long("tls-cert")
.alias("ssl-cert") // backwards compatibility
.help("path to TLS cert for client postgres connections"),
)
}
#[test]
fn verify_cli() {
cli().debug_assert();
}

View File

@@ -5,7 +5,6 @@ use std::{
net::{TcpListener, TcpStream}, net::{TcpListener, TcpStream},
thread, thread,
}; };
use tracing::{error, info};
use utils::{ use utils::{
postgres_backend::{self, AuthType, PostgresBackend}, postgres_backend::{self, AuthType, PostgresBackend},
pq_proto::{BeMessage, SINGLE_COL_ROWDESC}, pq_proto::{BeMessage, SINGLE_COL_ROWDESC},
@@ -20,7 +19,7 @@ use utils::{
/// ///
pub fn thread_main(listener: TcpListener) -> anyhow::Result<()> { pub fn thread_main(listener: TcpListener) -> anyhow::Result<()> {
scopeguard::defer! { scopeguard::defer! {
info!("mgmt has shut down"); println!("mgmt has shut down");
} }
listener listener
@@ -28,14 +27,14 @@ pub fn thread_main(listener: TcpListener) -> anyhow::Result<()> {
.context("failed to set listener to blocking")?; .context("failed to set listener to blocking")?;
loop { loop {
let (socket, peer_addr) = listener.accept().context("failed to accept a new client")?; let (socket, peer_addr) = listener.accept().context("failed to accept a new client")?;
info!("accepted connection from {peer_addr}"); println!("accepted connection from {}", peer_addr);
socket socket
.set_nodelay(true) .set_nodelay(true)
.context("failed to set client socket option")?; .context("failed to set client socket option")?;
thread::spawn(move || { thread::spawn(move || {
if let Err(err) = handle_connection(socket) { if let Err(err) = handle_connection(socket) {
error!("{err}"); println!("error: {}", err);
} }
}); });
} }
@@ -103,14 +102,14 @@ impl postgres_backend::Handler for MgmtHandler {
let res = try_process_query(pgb, query_string); let res = try_process_query(pgb, query_string);
// intercept and log error message // intercept and log error message
if res.is_err() { if res.is_err() {
error!("mgmt query failed: {res:?}"); println!("Mgmt query failed: #{:?}", res);
} }
res res
} }
} }
fn try_process_query(pgb: &mut PostgresBackend, query_string: &str) -> anyhow::Result<()> { fn try_process_query(pgb: &mut PostgresBackend, query_string: &str) -> anyhow::Result<()> {
info!("got mgmt query [redacted]"); // Content contains password, don't print it println!("Got mgmt query [redacted]"); // Content contains password, don't print it
let resp: PsqlSessionResponse = serde_json::from_str(query_string)?; let resp: PsqlSessionResponse = serde_json::from_str(query_string)?;

View File

@@ -1,14 +1,13 @@
use crate::auth; use crate::auth;
use crate::cancellation::{self, CancelMap}; use crate::cancellation::{self, CancelMap};
use crate::config::{ProxyConfig, TlsConfig}; use crate::config::{ProxyConfig, TlsConfig};
use crate::stream::{MeasuredStream, PqStream, Stream}; use crate::stream::{MetricsStream, PqStream, Stream};
use anyhow::{bail, Context}; use anyhow::{bail, Context};
use futures::TryFutureExt; use futures::TryFutureExt;
use metrics::{register_int_counter, IntCounter}; use metrics::{register_int_counter, IntCounter};
use once_cell::sync::Lazy; use once_cell::sync::Lazy;
use std::sync::Arc; use std::sync::Arc;
use tokio::io::{AsyncRead, AsyncWrite}; use tokio::io::{AsyncRead, AsyncWrite};
use tracing::{error, info, info_span, Instrument};
use utils::pq_proto::{BeMessage as Be, *}; use utils::pq_proto::{BeMessage as Be, *};
const ERR_INSECURE_CONNECTION: &str = "connection is insecure (try using `sslmode=require`)"; const ERR_INSECURE_CONNECTION: &str = "connection is insecure (try using `sslmode=require`)";
@@ -44,17 +43,17 @@ where
F: std::future::Future<Output = anyhow::Result<R>>, F: std::future::Future<Output = anyhow::Result<R>>,
{ {
future.await.map_err(|err| { future.await.map_err(|err| {
error!("{err}"); println!("error: {}", err);
err err
}) })
} }
pub async fn task_main( pub async fn thread_main(
config: &'static ProxyConfig, config: &'static ProxyConfig,
listener: tokio::net::TcpListener, listener: tokio::net::TcpListener,
) -> anyhow::Result<()> { ) -> anyhow::Result<()> {
scopeguard::defer! { scopeguard::defer! {
info!("proxy has shut down"); println!("proxy has shut down");
} }
// When set for the server socket, the keepalive setting // When set for the server socket, the keepalive setting
@@ -64,29 +63,22 @@ pub async fn task_main(
let cancel_map = Arc::new(CancelMap::default()); let cancel_map = Arc::new(CancelMap::default());
loop { loop {
let (socket, peer_addr) = listener.accept().await?; let (socket, peer_addr) = listener.accept().await?;
info!("accepted postgres client connection from {peer_addr}"); println!("accepted connection from {}", peer_addr);
let session_id = uuid::Uuid::new_v4();
let cancel_map = Arc::clone(&cancel_map); let cancel_map = Arc::clone(&cancel_map);
tokio::spawn( tokio::spawn(log_error(async move {
log_error(async move { socket
info!("spawned a task for {peer_addr}"); .set_nodelay(true)
.context("failed to set socket option")?;
socket handle_client(config, &cancel_map, socket).await
.set_nodelay(true) }));
.context("failed to set socket option")?;
handle_client(config, &cancel_map, session_id, socket).await
})
.instrument(info_span!("client", session = format_args!("{session_id}"))),
);
} }
} }
async fn handle_client( async fn handle_client(
config: &ProxyConfig, config: &ProxyConfig,
cancel_map: &CancelMap, cancel_map: &CancelMap,
session_id: uuid::Uuid,
stream: impl AsyncRead + AsyncWrite + Unpin + Send, stream: impl AsyncRead + AsyncWrite + Unpin + Send,
) -> anyhow::Result<()> { ) -> anyhow::Result<()> {
// The `closed` counter will increase when this future is destroyed. // The `closed` counter will increase when this future is destroyed.
@@ -96,8 +88,7 @@ async fn handle_client(
} }
let tls = config.tls_config.as_ref(); let tls = config.tls_config.as_ref();
let do_handshake = handshake(stream, tls, cancel_map).instrument(info_span!("handshake")); let (mut stream, params) = match handshake(stream, tls, cancel_map).await? {
let (mut stream, params) = match do_handshake.await? {
Some(x) => x, Some(x) => x,
None => return Ok(()), // it's a cancellation request None => return Ok(()), // it's a cancellation request
}; };
@@ -115,7 +106,7 @@ async fn handle_client(
async { result }.or_else(|e| stream.throw_error(e)).await? async { result }.or_else(|e| stream.throw_error(e)).await?
}; };
let client = Client::new(stream, creds, &params, session_id); let client = Client::new(stream, creds, &params);
cancel_map cancel_map
.with_session(|session| client.connect_to_db(session)) .with_session(|session| client.connect_to_db(session))
.await .await
@@ -136,7 +127,7 @@ async fn handshake<S: AsyncRead + AsyncWrite + Unpin>(
let mut stream = PqStream::new(Stream::from_raw(stream)); let mut stream = PqStream::new(Stream::from_raw(stream));
loop { loop {
let msg = stream.read_startup_packet().await?; let msg = stream.read_startup_packet().await?;
info!("received {msg:?}"); println!("got message: {:?}", msg);
use FeStartupPacket::*; use FeStartupPacket::*;
match msg { match msg {
@@ -173,13 +164,11 @@ async fn handshake<S: AsyncRead + AsyncWrite + Unpin>(
stream.throw_error_str(ERR_INSECURE_CONNECTION).await?; stream.throw_error_str(ERR_INSECURE_CONNECTION).await?;
} }
info!(session_type = "normal", "successful handshake");
break Ok(Some((stream, params))); break Ok(Some((stream, params)));
} }
CancelRequest(cancel_key_data) => { CancelRequest(cancel_key_data) => {
cancel_map.cancel_session(cancel_key_data).await?; cancel_map.cancel_session(cancel_key_data).await?;
info!(session_type = "cancellation", "successful handshake");
break Ok(None); break Ok(None);
} }
} }
@@ -194,8 +183,6 @@ struct Client<'a, S> {
creds: auth::BackendType<'a, auth::ClientCredentials<'a>>, creds: auth::BackendType<'a, auth::ClientCredentials<'a>>,
/// KV-dictionary with PostgreSQL connection params. /// KV-dictionary with PostgreSQL connection params.
params: &'a StartupMessageParams, params: &'a StartupMessageParams,
/// Unique connection ID.
session_id: uuid::Uuid,
} }
impl<'a, S> Client<'a, S> { impl<'a, S> Client<'a, S> {
@@ -204,13 +191,11 @@ impl<'a, S> Client<'a, S> {
stream: PqStream<S>, stream: PqStream<S>,
creds: auth::BackendType<'a, auth::ClientCredentials<'a>>, creds: auth::BackendType<'a, auth::ClientCredentials<'a>>,
params: &'a StartupMessageParams, params: &'a StartupMessageParams,
session_id: uuid::Uuid,
) -> Self { ) -> Self {
Self { Self {
stream, stream,
creds, creds,
params, params,
session_id,
} }
} }
} }
@@ -222,20 +207,17 @@ impl<S: AsyncRead + AsyncWrite + Unpin + Send> Client<'_, S> {
mut stream, mut stream,
creds, creds,
params, params,
session_id,
} = self; } = self;
let extra = auth::ConsoleReqExtra { let extra = auth::ConsoleReqExtra {
session_id, // aka this connection's id // Currently it's OK to generate a new UUID **here**, but
// it might be better to move this to `cancellation::Session`.
session_id: uuid::Uuid::new_v4(),
application_name: params.get("application_name"), application_name: params.get("application_name"),
}; };
// Authenticate and connect to a compute node. // Authenticate and connect to a compute node.
let auth = creds let auth = creds.authenticate(&extra, &mut stream).await;
.authenticate(&extra, &mut stream)
.instrument(info_span!("auth"))
.await;
let node = async { auth }.or_else(|e| stream.throw_error(e)).await?; let node = async { auth }.or_else(|e| stream.throw_error(e)).await?;
let reported_auth_ok = node.reported_auth_ok; let reported_auth_ok = node.reported_auth_ok;
@@ -269,9 +251,8 @@ impl<S: AsyncRead + AsyncWrite + Unpin + Send> Client<'_, S> {
} }
// Starting from here we only proxy the client's traffic. // Starting from here we only proxy the client's traffic.
info!("performing the proxy pass..."); let mut db = MetricsStream::new(db.stream, inc_proxied);
let mut db = MeasuredStream::new(db.stream, inc_proxied); let mut client = MetricsStream::new(stream.into_inner(), inc_proxied);
let mut client = MeasuredStream::new(stream.into_inner(), inc_proxied);
let _ = tokio::io::copy_bidirectional(&mut client, &mut db).await?; let _ = tokio::io::copy_bidirectional(&mut client, &mut db).await?;
Ok(()) Ok(())

View File

@@ -231,7 +231,7 @@ impl<S: AsyncRead + AsyncWrite + Unpin> AsyncWrite for Stream<S> {
pin_project! { pin_project! {
/// This stream tracks all writes and calls user provided /// This stream tracks all writes and calls user provided
/// callback when the underlying stream is flushed. /// callback when the underlying stream is flushed.
pub struct MeasuredStream<S, W> { pub struct MetricsStream<S, W> {
#[pin] #[pin]
stream: S, stream: S,
write_count: usize, write_count: usize,
@@ -239,7 +239,7 @@ pin_project! {
} }
} }
impl<S, W> MeasuredStream<S, W> { impl<S, W> MetricsStream<S, W> {
pub fn new(stream: S, inc_write_count: W) -> Self { pub fn new(stream: S, inc_write_count: W) -> Self {
Self { Self {
stream, stream,
@@ -249,7 +249,7 @@ impl<S, W> MeasuredStream<S, W> {
} }
} }
impl<S: AsyncRead + Unpin, W> AsyncRead for MeasuredStream<S, W> { impl<S: AsyncRead + Unpin, W> AsyncRead for MetricsStream<S, W> {
fn poll_read( fn poll_read(
self: Pin<&mut Self>, self: Pin<&mut Self>,
context: &mut task::Context<'_>, context: &mut task::Context<'_>,
@@ -259,7 +259,7 @@ impl<S: AsyncRead + Unpin, W> AsyncRead for MeasuredStream<S, W> {
} }
} }
impl<S: AsyncWrite + Unpin, W: FnMut(usize)> AsyncWrite for MeasuredStream<S, W> { impl<S: AsyncWrite + Unpin, W: FnMut(usize)> AsyncWrite for MetricsStream<S, W> {
fn poll_write( fn poll_write(
self: Pin<&mut Self>, self: Pin<&mut Self>,
context: &mut task::Context<'_>, context: &mut task::Context<'_>,

View File

@@ -13,7 +13,7 @@
# avoid running regular linting script that checks every feature. # avoid running regular linting script that checks every feature.
if [[ "$OSTYPE" == "darwin"* ]]; then if [[ "$OSTYPE" == "darwin"* ]]; then
# no extra features to test currently, add more here when needed # no extra features to test currently, add more here when needed
cargo clippy --locked --all --all-targets --features testing -- -A unknown_lints -D warnings cargo clippy --locked --all --all-targets -- -A unknown_lints -D warnings
else else
# * `-A unknown_lints` do not warn about unknown lint suppressions # * `-A unknown_lints` do not warn about unknown lint suppressions
# that people with newer toolchains might use # that people with newer toolchains might use

View File

@@ -11,7 +11,7 @@ hyper = "0.14"
fs2 = "0.4.3" fs2 = "0.4.3"
serde_json = "1" serde_json = "1"
tracing = "0.1.27" tracing = "0.1.27"
clap = "4.0" clap = "3.0"
daemonize = "0.4.1" daemonize = "0.4.1"
tokio = { version = "1.17", features = ["macros", "fs"] } tokio = { version = "1.17", features = ["macros", "fs"] }
postgres-protocol = { git = "https://github.com/neondatabase/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" } postgres-protocol = { git = "https://github.com/neondatabase/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" }
@@ -22,14 +22,14 @@ humantime = "2.1.0"
url = "2.2.2" url = "2.2.2"
signal-hook = "0.3.10" signal-hook = "0.3.10"
serde = { version = "1.0", features = ["derive"] } serde = { version = "1.0", features = ["derive"] }
serde_with = "2.0" serde_with = "1.12.0"
hex = "0.4.3" hex = "0.4.3"
const_format = "0.2.21" const_format = "0.2.21"
tokio-postgres = { git = "https://github.com/neondatabase/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" } tokio-postgres = { git = "https://github.com/neondatabase/rust-postgres.git", rev="d052ee8b86fff9897c77b0fe89ea9daba0e1fa38" }
git-version = "0.3.5" git-version = "0.3.5"
async-trait = "0.1" async-trait = "0.1"
once_cell = "1.13.0" once_cell = "1.13.0"
toml_edit = { version = "0.14", features = ["easy"] } toml_edit = { version = "0.13", features = ["easy"] }
thiserror = "1" thiserror = "1"
parking_lot = "0.12.1" parking_lot = "0.12.1"

Some files were not shown because too many files have changed in this diff Show More